`;return o},i=(function(){let g={"181":{"id":181,"name":"Claude 3 Sonnet","model_name":"Claude 3 Sonnet","desc":"Claude 3 Sonnet has a balance of intelligence and speed. Anthropic's balanced and scalable model delivers strong utility.","desc_short":"
Claude 3 Sonnet has a balance of intelligence and speed. Anthropic's balanced and scalable model delivers strong utility.
","desc_more":"","link":"claude-3-sonnet","provider":"Official","developer":"Anthropic","tpm":180,"image":"https://hbcdn01.hotbot.com/avatar/181.jpg","model":65,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":201572,"level":3,"ph":"","labels_full":["Official","Premium","Large Context","Claude","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Claude 3 Sonnet","set":{"temperature":1}},"789":{"id":789,"name":"Jamba 1.5 Large","model_name":"Jamba 1.5 Large","desc":"Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality. It features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis. Built on a novel SSM-Transformer architecture, it outperforms larger models on benchmarks while maintaining resource efficiency.","desc_short":"
Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality. It features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis. Built on a novel SSM-Transformer architecture, it outperforms larger models on benchmarks while maintaining resource efficiency.
","desc_more":"","link":"jamba-1.5-large","provider":"Official","developer":"AI21","tpm":100,"image":"https://hbcdn01.hotbot.com/avatar/789.jpg","model":185,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":61264,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Unmodified","Mamba","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Jamba 1.5 Large","set":{"temperature":1}},"845":{"id":845,"name":"DeepSeek V3 Chat","model_name":"DeepSeek V3 Chat","desc":"DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations reveal that the model outperforms other open-source models and rivals leading closed-source models.","desc_short":"
DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations reveal that the model outperforms other open-source models and rivals leading closed-source models.
","desc_more":"","link":"deepseek-v3-chat","provider":"Official","developer":"DeepSeek","tpm":5,"image":"https://hbcdn01.hotbot.com/avatar/845.jpg","model":205,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","DeepSeek","Unmodified","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"DeepSeek V3 Chat","set":{"temperature":1}},"377":{"id":377,"name":"Mistral Large","model_name":"Mistral Large","desc":"Mistral Large 2 is the new generation of Mistral's flagship model. It is significantly capable in code generation, mathematics, and reasoning.","desc_short":"
Mistral Large 2 is the new generation of Mistral's flagship model. It is significantly capable in code generation, mathematics, and reasoning.
","desc_more":"","link":"mistral-large","provider":"Official","developer":"Mistral","tpm":80,"image":"https://hbcdn01.hotbot.com/avatar/377.jpg","model":173,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":163006,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Creative Writing","Mistral","Unmodified","Multilingual","Coding","Advanced Reasoning"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Mistral Large","set":{"temperature":1}},"293":{"id":293,"name":"WizardLM-2 8x22B","model_name":"WizardLM-2 8x22B","desc":"WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model. It demonstrates highly competitive performance compared to leading proprietary models, and it consistently outperforms all existing state-of-the-art opensource models. It is an instruct finetune of Mixtral 8x22B.","desc_short":"
WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model. It demonstrates highly competitive performance compared to leading proprietary models, and it consistently outperforms all existing state-of-the-art opensource models. It is an instruct finetune of Mixtral 8x22B.
","desc_more":"","link":"wizardlm-2-8x22b","provider":"Official","developer":"Microsoft","tpm":10,"image":"https://hbcdn01.hotbot.com/avatar/293.jpg","model":121,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Unmodified","8x22B Parameters","Mixture Of Experts","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"WizardLM-2 8x22B","set":{"temperature":1}},"177":{"id":177,"name":"Claude 3 Haiku","model_name":"Claude 3 Haiku","desc":"Claude 3 Haiku is a fast and compact model built by Anthropic for near-instant responsiveness. It's focus is on quick and accurate targeted performance.","desc_short":"
Claude 3 Haiku is a fast and compact model built by Anthropic for near-instant responsiveness. It's focus is on quick and accurate targeted performance.
","desc_more":"","link":"claude-3-haiku","provider":"Official","developer":"Anthropic","tpm":15,"image":"https://hbcdn01.hotbot.com/avatar/177.jpg","model":61,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Claude","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Claude 3 Haiku","set":{"temperature":1}},"785":{"id":785,"name":"Liquid LFM 40B","model_name":"Liquid LFM 40B","desc":"Liquid Foundation Models (LFMs) are large neural networks built with computational units rooted in dynamic systems. This mixture of experts model is built for general purpose AI, with an excellent handle on sequential data and context.","desc_short":"
Liquid Foundation Models (LFMs) are large neural networks built with computational units rooted in dynamic systems. This mixture of experts model is built for general purpose AI, with an excellent handle on sequential data and context.
","desc_more":"","link":"liquid-lfm-40b","provider":"Official","developer":"Liquid","tpm":1,"image":"https://hbcdn01.hotbot.com/avatar/785.jpg","model":177,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Unmodified","Mixture of Experts","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Liquid LFM 40B","set":{"temperature":1}},"137":{"id":137,"name":"OpenAI GPT-3.5 Turbo","model_name":"OpenAI GPT-3.5 Turbo","desc":"OpenAI GPT-3.5 Turbo is a fast, inexpensive model for simple tasks developed by OpenAI. Capable of understanding and generating natural language or code, it has been optimized for chat.","desc_short":"
OpenAI GPT-3.5 Turbo is a fast, inexpensive model for simple tasks developed by OpenAI. Capable of understanding and generating natural language or code, it has been optimized for chat.
","desc_more":"","link":"openai-gpt-3.5-turbo","provider":"Official","developer":"OpenAI","tpm":20,"image":"https://hbcdn01.hotbot.com/avatar/137.jpg","model":5,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":9132,"level":1,"ph":"","labels_full":["Official","Premium","ChatGPT","OpenAI","Unmodified","Coding"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"OpenAI GPT-3.5 Turbo","set":{"temperature":1}},"1":{"id":1,"name":"HotBot Assistant","model_name":"HotBot Assistant","desc":"HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.","desc_short":"
HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.
","desc_more":"","link":"hotbot-assistant","provider":"Official","developer":"HotBot","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/1.jpg","model":25,"sys":"","read_image":1,"prem":false,"can_at":0,"no_comp":1,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Image Generation","Web Search","70B Parameters","Coding","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Assistant","set":{"temperature":1}}},b={},d=localStorage,n="hba-botcache",l=()=>{let g={},b=d[n];try{let l=Date.now()-6e5;b&&$.each(JSON.parse(b),(b,d)=>{if(d.cacheTS>l)g[b]=d})}catch(a){}return g},a=g=>{let b=l();for(let a of g){b[a.id]=a;if(!a.cacheTS)a.cacheTS=Date.now()}d[n]=JSON.stringify(b)};$.each(l(),(b,d)=>{if(!g[b])g[b]=d});async function e(d){let n=[],l={};for(let h of(d instanceof Array)?d:[d])if(h=parseInt(h)){if(b[h]&&!g[h])await b[h];if(g[h])l[h]=g[h];else if(n.indexOf(h)<0)n.push(h)}if(n.length){let i=$.post("/lookup-bots.json",{bot_id:n.join(",")});for(let h of n)b[h]=i;let o=await i;if(o.code==0){a(o.data);for(let h of o.data)g[h.id]=h,l[h.id]=h}for(let h of n)delete b[h]}return l}return{cache:g,getBots:e,getBot:async g=>g?(await e(g))[g]:null}})();$('