`;return o},i=(function(){let g={"845":{"id":845,"name":"DeepSeek V3 Chat","model_name":"DeepSeek V3 Chat","desc":"DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations reveal that the model outperforms other open-source models and rivals leading closed-source models.","desc_short":"
DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations reveal that the model outperforms other open-source models and rivals leading closed-source models.
","desc_more":"","link":"deepseek-v3-chat","provider":"Official","developer":"DeepSeek","tpm":5,"image":"https://hbcdn01.hotbot.com/avatar/845.jpg","model":205,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","DeepSeek","Unmodified","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"DeepSeek V3 Chat","set":{"temperature":1}},"289":{"id":289,"name":"MythoMax 13B","model_name":"MythoMax 13B","desc":"One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay.","desc_short":"
One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay.
","desc_more":"","link":"mythomax-13b","provider":"Official","developer":"gryphe","tpm":2,"image":"hotbot.png","model":117,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Unmodified","13B Parameters"],"labels":["Official"],"wel":"","asktext":"","short_name":"MythoMax 13B","set":{"temperature":1}},"785":{"id":785,"name":"Liquid LFM 40B","model_name":"Liquid LFM 40B","desc":"Liquid Foundation Models (LFMs) are large neural networks built with computational units rooted in dynamic systems. This mixture of experts model is built for general purpose AI, with an excellent handle on sequential data and context.","desc_short":"
Liquid Foundation Models (LFMs) are large neural networks built with computational units rooted in dynamic systems. This mixture of experts model is built for general purpose AI, with an excellent handle on sequential data and context.
","desc_more":"","link":"liquid-lfm-40b","provider":"Official","developer":"Liquid","tpm":1,"image":"https://hbcdn01.hotbot.com/avatar/785.jpg","model":177,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Unmodified","Mixture of Experts","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Liquid LFM 40B","set":{"temperature":1}},"309":{"id":309,"name":"Mistral Nemo","model_name":"Mistral Nemo","desc":"A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA. The model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.","desc_short":"
A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA. The model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.
","desc_more":"","link":"mistral-nemo","provider":"Official","developer":"Mistral AI","tpm":3,"image":"https://hbcdn01.hotbot.com/avatar/309.jpg","model":137,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Mistral","Unmodified","12B Parameters","Multilingual","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Mistral Nemo","set":{"temperature":1}},"381":{"id":381,"name":"Qwen 2 VL 7B","model_name":"Qwen 2 VL 7B","desc":"Qwen2 VL 7B is a multimodal LLM from the Qwen Team with multimedia capabilities.","desc_short":"
Qwen2 VL 7B is a multimodal LLM from the Qwen Team with multimedia capabilities.
","desc_more":"","link":"qwen-2-vl-7b","provider":"Official","developer":"Qwen","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/381.jpg","model":165,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Qwen","Unmodified","Image Input","7B Parameters","Multilingual"],"labels":["Official"],"wel":"","asktext":"","short_name":"Qwen 2 VL 7B","set":{"temperature":1}},"369":{"id":369,"name":"Grok 2 Mini","model_name":"Grok 2 Mini","desc":"Grok 2 Mini is xAI's fast, lightweight language model that offers a balance between speed and answer quality.","desc_short":"
Grok 2 Mini is xAI's fast, lightweight language model that offers a balance between speed and answer quality.
","desc_more":"","link":"grok-2-mini","provider":"Official","developer":"xAI","tpm":112,"image":"https://hbcdn01.hotbot.com/avatar/369.jpg","model":157,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":90530,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Grok","Unmodified","Image Input","Creative Writing","Coding"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Grok 2 Mini","set":{"temperature":1}},"849":{"id":849,"name":"DeepSeek R1","model_name":"DeepSeek R1","desc":"First-generation reasoning model from DeepSeek. Open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.","desc_short":"
First-generation reasoning model from DeepSeek. Open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.
","desc_more":"","link":"deepseek-r1","provider":"Official","developer":"DeepSeek","tpm":28,"image":"https://hbcdn01.hotbot.com/avatar/849.jpg","model":209,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":144131,"level":1,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","DeepSeek","Unmodified","Coding","Advanced Reasoning","671B Parameters"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"DeepSeek R1","set":{"temperature":1}},"793":{"id":793,"name":"Jamba 1.5 Mini","model_name":"Jamba 1.5 Mini","desc":"Jamba 1.5 Mini is the world's first production-grade Mamba-based model, combining SSM and Transformer architectures for a 256K context window and high efficiency. It works with 9 languages and can handle various writing and analysis tasks as well as or better than similar small models.","desc_short":"
Jamba 1.5 Mini is the world's first production-grade Mamba-based model, combining SSM and Transformer architectures for a 256K context window and high efficiency. It works with 9 languages and can handle various writing and analysis tasks as well as or better than similar small models.
","desc_more":"","link":"jamba-1.5-mini","provider":"Official","developer":"AI21","tpm":7,"image":"https://hbcdn01.hotbot.com/avatar/793.jpg","model":181,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Unmodified","Mamba","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Jamba 1.5 Mini","set":{"temperature":1}},"1":{"id":1,"name":"HotBot Assistant","model_name":"HotBot Assistant","desc":"HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.","desc_short":"
HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.
","desc_more":"","link":"hotbot-assistant","provider":"Official","developer":"HotBot","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/1.jpg","model":201,"sys":"","read_image":1,"prem":false,"can_at":0,"no_comp":1,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Image Generation","Web Search","Coding","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Assistant","set":{"temperature":1}}},b={},d=localStorage,n="hba-botcache",l=()=>{let g={},b=d[n];try{let l=Date.now()-6e5;b&&$.each(JSON.parse(b),(b,d)=>{if(d.cacheTS>l)g[b]=d})}catch(a){}return g},a=g=>{let b=l();for(let a of g){b[a.id]=a;if(!a.cacheTS)a.cacheTS=Date.now()}d[n]=JSON.stringify(b)};$.each(l(),(b,d)=>{if(!g[b])g[b]=d});async function e(d){let n=[],l={};for(let h of(d instanceof Array)?d:[d])if(h=parseInt(h)){if(b[h]&&!g[h])await b[h];if(g[h])l[h]=g[h];else if(n.indexOf(h)<0)n.push(h)}if(n.length){let i=$.post("/lookup-bots.json",{bot_id:n.join(",")});for(let h of n)b[h]=i;let o=await i;if(o.code==0){a(o.data);for(let h of o.data)g[h.id]=h,l[h.id]=h}for(let h of n)delete b[h]}return l}return{cache:g,getBots:e,getBot:async g=>g?(await e(g))[g]:null}})();$('