`;return o},i=(function(){let g={"793":{"id":793,"name":"Jamba 1.5 Mini","model_name":"Jamba 1.5 Mini","desc":"Jamba 1.5 Mini is the world's first production-grade Mamba-based model, combining SSM and Transformer architectures for a 256K context window and high efficiency. It works with 9 languages and can handle various writing and analysis tasks as well as or better than similar small models.","desc_short":"
Jamba 1.5 Mini is the world's first production-grade Mamba-based model, combining SSM and Transformer architectures for a 256K context window and high efficiency. It works with 9 languages and can handle various writing and analysis tasks as well as or better than similar small models.
","desc_more":"","link":"jamba-1.5-mini","provider":"Official","developer":"AI21","tpm":7,"image":"https://hbcdn01.hotbot.com/avatar/793.jpg","model":181,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Unmodified","Mamba","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Jamba 1.5 Mini","set":{"temperature":1}},"369":{"id":369,"name":"Grok 2 Mini","model_name":"Grok 2 Mini","desc":"Grok 2 Mini is xAI's fast, lightweight language model that offers a balance between speed and answer quality.","desc_short":"
Grok 2 Mini is xAI's fast, lightweight language model that offers a balance between speed and answer quality.
","desc_more":"","link":"grok-2-mini","provider":"Official","developer":"xAI","tpm":112,"image":"https://hbcdn01.hotbot.com/avatar/369.jpg","model":157,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":50636,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Grok","Unmodified","Image Input","Creative Writing","Coding"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Grok 2 Mini","set":{"temperature":1}},"309":{"id":309,"name":"Mistral Nemo","model_name":"Mistral Nemo","desc":"A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA. The model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.","desc_short":"
A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA. The model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.
","desc_more":"","link":"mistral-nemo","provider":"Official","developer":"Mistral AI","tpm":3,"image":"https://hbcdn01.hotbot.com/avatar/309.jpg","model":137,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Mistral","Unmodified","12B Parameters","Multilingual","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Mistral Nemo","set":{"temperature":1}},"301":{"id":301,"name":"Hermes 3 405B Instruct","model_name":"Hermes 3 405B Instruct","desc":"Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board. Hermes 3 405B is a frontier-level, full-parameter finetune of the Llama-3.1 405B foundation model, focused on aligning LLMs to the user, with powerful steering capabilities and control given to the end user.","desc_short":"
Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board. Hermes 3 405B is a frontier-level, full-parameter finetune of the Llama-3.1 405B foundation model, focused on aligning LLMs to the user, with powerful steering capabilities and control given to the end user.
","desc_more":"","link":"hermes-3-405b-instruct","provider":"Official","developer":"Nous","tpm":90,"image":"https://hbcdn01.hotbot.com/avatar/301.jpg","model":129,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":43291,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Unmodified","405B Parameters","Advanced Reasoning","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Hermes 3 405B Instruct","set":{"temperature":1}},"313":{"id":313,"name":"Mistral 7B Instruct","model_name":"Mistral 7B Instruct","desc":"A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.","desc_short":"
A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.
","desc_more":"","link":"mistral-7b-instruct","provider":"Official","developer":"Mistral AI","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/313.jpg","model":141,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Mistral","Unmodified","7B Parameters","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"Mistral 7B Instruct","set":{"temperature":1}},"377":{"id":377,"name":"Mistral Large","model_name":"Mistral Large","desc":"Mistral Large 2 is the new generation of Mistral's flagship model. It is significantly capable in code generation, mathematics, and reasoning.","desc_short":"
Mistral Large 2 is the new generation of Mistral's flagship model. It is significantly capable in code generation, mathematics, and reasoning.
","desc_more":"","link":"mistral-large","provider":"Official","developer":"Mistral","tpm":80,"image":"https://hbcdn01.hotbot.com/avatar/377.jpg","model":173,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":184106,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Creative Writing","Mistral","Unmodified","Multilingual","Coding","Advanced Reasoning"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Mistral Large","set":{"temperature":1}},"849":{"id":849,"name":"DeepSeek R1","model_name":"DeepSeek R1","desc":"First-generation reasoning model from DeepSeek. Open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.","desc_short":"
First-generation reasoning model from DeepSeek. Open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.
","desc_more":"","link":"deepseek-r1","provider":"Official","developer":"DeepSeek","tpm":28,"image":"https://hbcdn01.hotbot.com/avatar/849.jpg","model":209,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":59640,"level":1,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","DeepSeek","Unmodified","Coding","Advanced Reasoning","671B Parameters"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"DeepSeek R1","set":{"temperature":1}},"293":{"id":293,"name":"WizardLM-2 8x22B","model_name":"WizardLM-2 8x22B","desc":"WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model. It demonstrates highly competitive performance compared to leading proprietary models, and it consistently outperforms all existing state-of-the-art opensource models. It is an instruct finetune of Mixtral 8x22B.","desc_short":"
WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model. It demonstrates highly competitive performance compared to leading proprietary models, and it consistently outperforms all existing state-of-the-art opensource models. It is an instruct finetune of Mixtral 8x22B.
","desc_more":"","link":"wizardlm-2-8x22b","provider":"Official","developer":"Microsoft","tpm":10,"image":"https://hbcdn01.hotbot.com/avatar/293.jpg","model":121,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Unmodified","8x22B Parameters","Mixture Of Experts","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"WizardLM-2 8x22B","set":{"temperature":1}},"1":{"id":1,"name":"HotBot Assistant","model_name":"HotBot Assistant","desc":"HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.","desc_short":"
HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.
","desc_more":"","link":"hotbot-assistant","provider":"Official","developer":"HotBot","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/1.jpg","model":201,"sys":"","read_image":1,"prem":false,"can_at":0,"no_comp":1,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Image Generation","Web Search","Coding","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Assistant","set":{"temperature":1}}},b={},d=localStorage,n="hba-botcache",l=()=>{let g={},b=d[n];try{let l=Date.now()-6e5;b&&$.each(JSON.parse(b),(b,d)=>{if(d.cacheTS>l)g[b]=d})}catch(a){}return g},a=g=>{let b=l();for(let a of g){b[a.id]=a;if(!a.cacheTS)a.cacheTS=Date.now()}d[n]=JSON.stringify(b)};$.each(l(),(b,d)=>{if(!g[b])g[b]=d});async function e(d){let n=[],l={};for(let h of(d instanceof Array)?d:[d])if(h=parseInt(h)){if(b[h]&&!g[h])await b[h];if(g[h])l[h]=g[h];else if(n.indexOf(h)<0)n.push(h)}if(n.length){let i=$.post("/lookup-bots.json",{bot_id:n.join(",")});for(let h of n)b[h]=i;let o=await i;if(o.code==0){a(o.data);for(let h of o.data)g[h.id]=h,l[h.id]=h}for(let h of n)delete b[h]}return l}return{cache:g,getBots:e,getBot:async g=>g?(await e(g))[g]:null}})();$('