`;return o},i=(function(){let g={"365":{"id":365,"name":"Grok 2","model_name":"Grok 2","desc":"Grok 2 is xAI's frontier language model with state-of-the-art reasoning capabilities, best for complex and multi-step use cases.","desc_short":"
Grok 2 is xAI's frontier language model with state-of-the-art reasoning capabilities, best for complex and multi-step use cases.
","desc_more":"","link":"grok-2","provider":"Official","developer":"xAI","tpm":112,"image":"https://hbcdn01.hotbot.com/avatar/365.jpg","model":153,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":104998,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Creative Writing","Grok","Unmodified","Image Input","Advanced Reasoning","Coding"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Grok 2","set":{"temperature":1}},"785":{"id":785,"name":"Liquid LFM 40B","model_name":"Liquid LFM 40B","desc":"Liquid Foundation Models (LFMs) are large neural networks built with computational units rooted in dynamic systems. This mixture of experts model is built for general purpose AI, with an excellent handle on sequential data and context.","desc_short":"
Liquid Foundation Models (LFMs) are large neural networks built with computational units rooted in dynamic systems. This mixture of experts model is built for general purpose AI, with an excellent handle on sequential data and context.
","desc_more":"","link":"liquid-lfm-40b","provider":"Official","developer":"Liquid","tpm":1,"image":"https://hbcdn01.hotbot.com/avatar/785.jpg","model":177,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Unmodified","Mixture of Experts","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Liquid LFM 40B","set":{"temperature":1}},"845":{"id":845,"name":"DeepSeek V3 Chat","model_name":"DeepSeek V3 Chat","desc":"DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations reveal that the model outperforms other open-source models and rivals leading closed-source models.","desc_short":"
DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations reveal that the model outperforms other open-source models and rivals leading closed-source models.
","desc_more":"","link":"deepseek-v3-chat","provider":"Official","developer":"DeepSeek","tpm":5,"image":"https://hbcdn01.hotbot.com/avatar/845.jpg","model":205,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","DeepSeek","Unmodified","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"DeepSeek V3 Chat","set":{"temperature":1}},"313":{"id":313,"name":"Mistral 7B Instruct","model_name":"Mistral 7B Instruct","desc":"A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.","desc_short":"
A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.
","desc_more":"","link":"mistral-7b-instruct","provider":"Official","developer":"Mistral AI","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/313.jpg","model":141,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Mistral","Unmodified","7B Parameters","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"Mistral 7B Instruct","set":{"temperature":1}},"793":{"id":793,"name":"Jamba 1.5 Mini","model_name":"Jamba 1.5 Mini","desc":"Jamba 1.5 Mini is the world's first production-grade Mamba-based model, combining SSM and Transformer architectures for a 256K context window and high efficiency. It works with 9 languages and can handle various writing and analysis tasks as well as or better than similar small models.","desc_short":"
Jamba 1.5 Mini is the world's first production-grade Mamba-based model, combining SSM and Transformer architectures for a 256K context window and high efficiency. It works with 9 languages and can handle various writing and analysis tasks as well as or better than similar small models.
","desc_more":"","link":"jamba-1.5-mini","provider":"Official","developer":"AI21","tpm":7,"image":"https://hbcdn01.hotbot.com/avatar/793.jpg","model":181,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Unmodified","Mamba","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Jamba 1.5 Mini","set":{"temperature":1}},"789":{"id":789,"name":"Jamba 1.5 Large","model_name":"Jamba 1.5 Large","desc":"Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality. It features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis. Built on a novel SSM-Transformer architecture, it outperforms larger models on benchmarks while maintaining resource efficiency.","desc_short":"
Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality. It features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis. Built on a novel SSM-Transformer architecture, it outperforms larger models on benchmarks while maintaining resource efficiency.
","desc_more":"","link":"jamba-1.5-large","provider":"Official","developer":"AI21","tpm":100,"image":"https://hbcdn01.hotbot.com/avatar/789.jpg","model":185,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":72691,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Unmodified","Mamba","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Jamba 1.5 Large","set":{"temperature":1}},"829":{"id":829,"name":"EVA Qwen2.5 14B","model_name":"EVA Qwen2.5 14B","desc":"A model specializing in RP and creative writing, this model is based on Qwen2.5-14B, fine-tuned with a mixture of synthetic and natural data. It is trained on 1.5M tokens of role-play data, and fine-tuned on 1.5M tokens of synthetic data.","desc_short":"
A model specializing in RP and creative writing, this model is based on Qwen2.5-14B, fine-tuned with a mixture of synthetic and natural data. It is trained on 1.5M tokens of role-play data, and fine-tuned on 1.5M tokens of synthetic data.
","desc_more":"","link":"eva-qwen2.5-14b","provider":"Official","developer":"HotBot","tpm":8,"image":"https://hbcdn01.hotbot.com/avatar/829.jpg","model":193,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Qwen","Unmodified","Creative Writing","14B Parameters"],"labels":["Official"],"wel":"","asktext":"","short_name":"EVA Qwen2.5 14B","set":{"temperature":1}},"385":{"id":385,"name":"Qwen 2 VL 72B","model_name":"Qwen 2 VL 72B","desc":"Qwen2 VL 72B is a multimodal LLM from the Qwen Team with impressive multimedia and automations support.","desc_short":"
Qwen2 VL 72B is a multimodal LLM from the Qwen Team with impressive multimedia and automations support.
","desc_more":"","link":"qwen-2-vl-72b","provider":"Official","developer":"Qwen","tpm":8,"image":"https://hbcdn01.hotbot.com/avatar/385.jpg","model":169,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Creative Writing","Qwen","Unmodified","Image Input","72B Parameters","Multilingual"],"labels":["Official"],"wel":"","asktext":"","short_name":"Qwen 2 VL 72B","set":{"temperature":1}},"1":{"id":1,"name":"HotBot Assistant","model_name":"HotBot Assistant","desc":"HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.","desc_short":"
HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.
","desc_more":"","link":"hotbot-assistant","provider":"Official","developer":"HotBot","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/1.jpg","model":25,"sys":"","read_image":1,"prem":false,"can_at":0,"no_comp":1,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Image Generation","Web Search","70B Parameters","Coding","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Assistant","set":{"temperature":1}}},b={},d=localStorage,n="hba-botcache",l=()=>{let g={},b=d[n];try{let l=Date.now()-6e5;b&&$.each(JSON.parse(b),(b,d)=>{if(d.cacheTS>l)g[b]=d})}catch(a){}return g},a=g=>{let b=l();for(let a of g){b[a.id]=a;if(!a.cacheTS)a.cacheTS=Date.now()}d[n]=JSON.stringify(b)};$.each(l(),(b,d)=>{if(!g[b])g[b]=d});async function e(d){let n=[],l={};for(let h of(d instanceof Array)?d:[d])if(h=parseInt(h)){if(b[h]&&!g[h])await b[h];if(g[h])l[h]=g[h];else if(n.indexOf(h)<0)n.push(h)}if(n.length){let i=$.post("/lookup-bots.json",{bot_id:n.join(",")});for(let h of n)b[h]=i;let o=await i;if(o.code==0){a(o.data);for(let h of o.data)g[h.id]=h,l[h.id]=h}for(let h of n)delete b[h]}return l}return{cache:g,getBots:e,getBot:async g=>g?(await e(g))[g]:null}})();$('