`;return o},i=(function(){let g={"365":{"id":365,"name":"Grok 2","model_name":"Grok 2","desc":"Grok 2 is xAI's frontier language model with state-of-the-art reasoning capabilities, best for complex and multi-step use cases.","desc_short":"
Grok 2 is xAI's frontier language model with state-of-the-art reasoning capabilities, best for complex and multi-step use cases.
","desc_more":"","link":"grok-2","provider":"Official","developer":"xAI","tpm":112,"image":"https://hbcdn01.hotbot.com/avatar/365.jpg","model":153,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":147344,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Creative Writing","Grok","Unmodified","Image Input","Advanced Reasoning","Coding"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Grok 2","set":{"temperature":1}},"854":{"id":854,"name":"Meta Llama 4 Maverick","model_name":"Meta Llama 4 Maverick","desc":"The Llama 4 collection of models are natively multimodal AI models that enable text and multimodal experiences. These models leverage a mixture-of-experts architecture to offer industry-leading performance in text and image understanding. These Llama 4 models mark the beginning of a new era for the Llama ecosystem. Launching alongside the rest of the Llama 4 series, Llama 4 Maverick is a 17 billion parameter model with 128 experts. Massive MoE with the equivalent of 400B parameters, and a greatly expanded context window.","desc_short":"
The Llama 4 collection of models are natively multimodal AI models that enable text and multimodal experiences. These models leverage a mixture-of-experts architecture to offer industry-leading performance in text and image understanding. These Llama 4 models mark the beginning of a new era for the Llama ecosystem. Launching alongside the rest of the Llama 4 series, Llama 4 Maverick is a 17 billion parameter model with 128 experts. Massive MoE with the equivalent of 400B parameters, and a greatly expanded context window.
","desc_more":"","link":"meta-llama-4-maverick","provider":"Official","developer":"Meta","tpm":13,"image":"https://hbcdn01.hotbot.com/avatar/854.jpg","model":216,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Meta","Llama","Unmodified","Image Input","Coding","Creative Writing","128x17B Parameters"],"labels":["Official"],"wel":"","asktext":"","short_name":"Meta Llama 4 Maverick","set":{"temperature":1}},"851":{"id":851,"name":"Google Gemini 2.0 Flash","model_name":"Google Gemini 2.0 Flash","desc":"Popular with developers as a powerful workhorse model, optimal for high-volume, high-frequency tasks at scale and highly capable of multimodal reasoning across vast amounts of information with a context window of 1 million tokens. 2.0 is Google's new flagship series of Gemini.","desc_short":"
Popular with developers as a powerful workhorse model, optimal for high-volume, high-frequency tasks at scale and highly capable of multimodal reasoning across vast amounts of information with a context window of 1 million tokens. 2.0 is Google's new flagship series of Gemini.
","desc_more":"","link":"google-gemini-2.0-flash","provider":"Official","developer":"Google","tpm":5,"image":"https://hbcdn01.hotbot.com/avatar/851.jpg","model":212,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Google","Gemini","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Google Gemini 2.0 Flash","set":{"temperature":1}},"169":{"id":169,"name":"Gemma2 9b","model_name":"Gemma2 9b","desc":"Gemma2 9b is Google's redesigned open model, optimized for outsized performance and unmatched efficiency. Built from the same research and technology used to create the Gemini models, this model provides built-in safety advancements and an expanded parameter size while still being extremely fast.","desc_short":"
Gemma2 9b is Google's redesigned open model, optimized for outsized performance and unmatched efficiency. Built from the same research and technology used to create the Gemini models, this model provides built-in safety advancements and an expanded parameter size while still being extremely fast.
","desc_more":"","link":"gemma2-9b","provider":"Official","developer":"Google","tpm":4,"image":"https://hbcdn01.hotbot.com/avatar/169.jpg","model":37,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Gemma","Google","Unmodified","9B Parameters","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"Gemma2 9b","set":{"temperature":1}},"789":{"id":789,"name":"Jamba 1.5 Large","model_name":"Jamba 1.5 Large","desc":"Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality. It features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis. Built on a novel SSM-Transformer architecture, it outperforms larger models on benchmarks while maintaining resource efficiency.","desc_short":"
Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality. It features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis. Built on a novel SSM-Transformer architecture, it outperforms larger models on benchmarks while maintaining resource efficiency.
","desc_more":"","link":"jamba-1.5-large","provider":"Official","developer":"AI21","tpm":100,"image":"https://hbcdn01.hotbot.com/avatar/789.jpg","model":185,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":90880,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Unmodified","Mamba","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Jamba 1.5 Large","set":{"temperature":1}},"293":{"id":293,"name":"WizardLM-2 8x22B","model_name":"WizardLM-2 8x22B","desc":"WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model. It demonstrates highly competitive performance compared to leading proprietary models, and it consistently outperforms all existing state-of-the-art opensource models. It is an instruct finetune of Mixtral 8x22B.","desc_short":"
WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model. It demonstrates highly competitive performance compared to leading proprietary models, and it consistently outperforms all existing state-of-the-art opensource models. It is an instruct finetune of Mixtral 8x22B.
","desc_more":"","link":"wizardlm-2-8x22b","provider":"Official","developer":"Microsoft","tpm":10,"image":"https://hbcdn01.hotbot.com/avatar/293.jpg","model":121,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Unmodified","8x22B Parameters","Mixture Of Experts","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"WizardLM-2 8x22B","set":{"temperature":1}},"849":{"id":849,"name":"DeepSeek R1","model_name":"DeepSeek R1","desc":"First-generation reasoning model from DeepSeek. Open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.","desc_short":"
First-generation reasoning model from DeepSeek. Open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.
","desc_more":"","link":"deepseek-r1","provider":"Official","developer":"DeepSeek","tpm":28,"image":"https://hbcdn01.hotbot.com/avatar/849.jpg","model":209,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":78679,"level":1,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","DeepSeek","Unmodified","Coding","Advanced Reasoning","671B Parameters"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"DeepSeek R1","set":{"temperature":1}},"369":{"id":369,"name":"Grok 2 Mini","model_name":"Grok 2 Mini","desc":"Grok 2 Mini is xAI's fast, lightweight language model that offers a balance between speed and answer quality.","desc_short":"
Grok 2 Mini is xAI's fast, lightweight language model that offers a balance between speed and answer quality.
","desc_more":"","link":"grok-2-mini","provider":"Official","developer":"xAI","tpm":112,"image":"https://hbcdn01.hotbot.com/avatar/369.jpg","model":157,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":87728,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Grok","Unmodified","Image Input","Creative Writing","Coding"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Grok 2 Mini","set":{"temperature":1}},"1":{"id":1,"name":"HotBot Assistant","model_name":"HotBot Assistant","desc":"HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.","desc_short":"
HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.
","desc_more":"","link":"hotbot-assistant","provider":"Official","developer":"HotBot","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/1.jpg","model":201,"sys":"","read_image":1,"prem":false,"can_at":0,"no_comp":1,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Image Generation","Web Search","Coding","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Assistant","set":{"temperature":1}}},b={},d=localStorage,n="hba-botcache",l=()=>{let g={},b=d[n];try{let l=Date.now()-6e5;b&&$.each(JSON.parse(b),(b,d)=>{if(d.cacheTS>l)g[b]=d})}catch(a){}return g},a=g=>{let b=l();for(let a of g){b[a.id]=a;if(!a.cacheTS)a.cacheTS=Date.now()}d[n]=JSON.stringify(b)};$.each(l(),(b,d)=>{if(!g[b])g[b]=d});async function e(d){let n=[],l={};for(let h of(d instanceof Array)?d:[d])if(h=parseInt(h)){if(b[h]&&!g[h])await b[h];if(g[h])l[h]=g[h];else if(n.indexOf(h)<0)n.push(h)}if(n.length){let i=$.post("/lookup-bots.json",{bot_id:n.join(",")});for(let h of n)b[h]=i;let o=await i;if(o.code==0){a(o.data);for(let h of o.data)g[h.id]=h,l[h.id]=h}for(let h of n)delete b[h]}return l}return{cache:g,getBots:e,getBot:async g=>g?(await e(g))[g]:null}})();$('