`;return o},i=(function(){let g={"365":{"id":365,"name":"Grok 2","model_name":"Grok 2","desc":"Grok 2 is xAI's frontier language model with state-of-the-art reasoning capabilities, best for complex and multi-step use cases.","desc_short":"
Grok 2 is xAI's frontier language model with state-of-the-art reasoning capabilities, best for complex and multi-step use cases.
","desc_more":"","link":"grok-2","provider":"Official","developer":"xAI","tpm":112,"image":"https://hbcdn01.hotbot.com/avatar/365.jpg","model":153,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":152541,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Creative Writing","Grok","Unmodified","Image Input","Advanced Reasoning","Coding"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Grok 2","set":{"temperature":1}},"853":{"id":853,"name":"Meta Llama 4 Scout","model_name":"Meta Llama 4 Scout","desc":"The Llama 4 collection of models are natively multimodal AI models that enable text and multimodal experiences. These models leverage a mixture-of-experts architecture to offer industry-leading performance in text and image understanding. These Llama 4 models mark the beginning of a new era for the Llama ecosystem. Launching alongside the rest of the Llama 4 series, Llama 4 Scout is a 17 billion parameter model with 16 experts. Fast MoE with the equivalent of 109B parameters, and a greatly expanded context window.","desc_short":"
The Llama 4 collection of models are natively multimodal AI models that enable text and multimodal experiences. These models leverage a mixture-of-experts architecture to offer industry-leading performance in text and image understanding. These Llama 4 models mark the beginning of a new era for the Llama ecosystem. Launching alongside the rest of the Llama 4 series, Llama 4 Scout is a 17 billion parameter model with 16 experts. Fast MoE with the equivalent of 109B parameters, and a greatly expanded context window.
","desc_more":"","link":"meta-llama-4-scout","provider":"Official","developer":"Meta","tpm":5,"image":"https://hbcdn01.hotbot.com/avatar/853.jpg","model":215,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Meta","Llama","Unmodified","Image Input","Coding","Creative Writing","16x17B Parameters"],"labels":["Official"],"wel":"","asktext":"","short_name":"Meta Llama 4 Scout","set":{"temperature":1}},"785":{"id":785,"name":"Liquid LFM 40B","model_name":"Liquid LFM 40B","desc":"Liquid Foundation Models (LFMs) are large neural networks built with computational units rooted in dynamic systems. This mixture of experts model is built for general purpose AI, with an excellent handle on sequential data and context.","desc_short":"
Liquid Foundation Models (LFMs) are large neural networks built with computational units rooted in dynamic systems. This mixture of experts model is built for general purpose AI, with an excellent handle on sequential data and context.
","desc_more":"","link":"liquid-lfm-40b","provider":"Official","developer":"Liquid","tpm":1,"image":"https://hbcdn01.hotbot.com/avatar/785.jpg","model":177,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Unmodified","Mixture of Experts","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Liquid LFM 40B","set":{"temperature":1}},"141":{"id":141,"name":"OpenAI GPT-3.5 Turbo 16k","model_name":"OpenAI GPT-3.5 Turbo 16k","desc":"OpenAI GPT-3.5 Turbo is a fast, inexpensive model for simple tasks developed by OpenAI. Capable of understanding and generating natural language or code, it has been optimized for chat, with an expanded context length of 16K.","desc_short":"
OpenAI GPT-3.5 Turbo is a fast, inexpensive model for simple tasks developed by OpenAI. Capable of understanding and generating natural language or code, it has been optimized for chat, with an expanded context length of 16K.
","desc_more":"","link":"openai-gpt-3.5-turbo-16k","provider":"Official","developer":"OpenAI","tpm":70,"image":"https://hbcdn01.hotbot.com/avatar/141.jpg","model":9,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":57635,"level":2,"ph":"","labels_full":["Official","Premium","Large Context","OpenAI","ChatGPT","Unmodified","Coding"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"OpenAI GPT-3.5 Turbo 16k","set":{"temperature":1}},"205":{"id":205,"name":"Meta Llama3.1 8b","model_name":"Meta Llama3.1 8b","desc":"Llama 3.1 is a group of open-source instruction-tuned models from Meta. These multilingual models have a context length of 128K, state-of-the-art tool use, and strong reasoning capabilities. The 8B variant is a light-weight, ultra-fast model that can run anywhere.","desc_short":"
Llama 3.1 is a group of open-source instruction-tuned models from Meta. These multilingual models have a context length of 128K, state-of-the-art tool use, and strong reasoning capabilities. The 8B variant is a light-weight, ultra-fast model that can run anywhere.
","desc_more":"","link":"meta-llama3.1-8b","provider":"Official","developer":"Meta","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/205.jpg","model":93,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Meta","Llama","Unmodified","8B Parameters","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"Meta Llama3.1 8b","set":{"temperature":1}},"845":{"id":845,"name":"DeepSeek V3 Chat","model_name":"DeepSeek V3 Chat","desc":"DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations reveal that the model outperforms other open-source models and rivals leading closed-source models.","desc_short":"
DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations reveal that the model outperforms other open-source models and rivals leading closed-source models.
","desc_more":"","link":"deepseek-v3-chat","provider":"Official","developer":"DeepSeek","tpm":5,"image":"https://hbcdn01.hotbot.com/avatar/845.jpg","model":205,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","DeepSeek","Unmodified","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"DeepSeek V3 Chat","set":{"temperature":1}},"309":{"id":309,"name":"Mistral Nemo","model_name":"Mistral Nemo","desc":"A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA. The model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.","desc_short":"
A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA. The model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.
","desc_more":"","link":"mistral-nemo","provider":"Official","developer":"Mistral AI","tpm":3,"image":"https://hbcdn01.hotbot.com/avatar/309.jpg","model":137,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Mistral","Unmodified","12B Parameters","Multilingual","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Mistral Nemo","set":{"temperature":1}},"841":{"id":841,"name":"Meta Llama 3.3 70b","model_name":"Meta Llama 3.3 70b","desc":"The Meta Llama 3.3 multilingual large language model (LLM) is an instruction tuned generative model in 70B (text in/text out). It is optimized for multilingual dialogue use cases and outperforms many of the available open source and closed chat models on common industry benchmarks. Llama 3.3 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety.","desc_short":"
The Meta Llama 3.3 multilingual large language model (LLM) is an instruction tuned generative model in 70B (text in/text out). It is optimized for multilingual dialogue use cases and outperforms many of the available open source and closed chat models on common industry benchmarks. Llama 3.3 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety.
","desc_more":"","link":"meta-llama-3.3-70b","provider":"Official","developer":"Meta","tpm":14,"image":"hotbot.png","model":201,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Meta","Llama","Unmodified","Coding","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Meta Llama 3.3 70b","set":{"temperature":1}},"1":{"id":1,"name":"HotBot Assistant","model_name":"HotBot Assistant","desc":"HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.","desc_short":"
HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.
","desc_more":"","link":"hotbot-assistant","provider":"Official","developer":"HotBot","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/1.jpg","model":201,"sys":"","read_image":1,"prem":false,"can_at":0,"no_comp":1,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Image Generation","Web Search","Coding","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Assistant","set":{"temperature":1}}},b={},d=localStorage,n="hba-botcache",l=()=>{let g={},b=d[n];try{let l=Date.now()-6e5;b&&$.each(JSON.parse(b),(b,d)=>{if(d.cacheTS>l)g[b]=d})}catch(a){}return g},a=g=>{let b=l();for(let a of g){b[a.id]=a;if(!a.cacheTS)a.cacheTS=Date.now()}d[n]=JSON.stringify(b)};$.each(l(),(b,d)=>{if(!g[b])g[b]=d});async function e(d){let n=[],l={};for(let h of(d instanceof Array)?d:[d])if(h=parseInt(h)){if(b[h]&&!g[h])await b[h];if(g[h])l[h]=g[h];else if(n.indexOf(h)<0)n.push(h)}if(n.length){let i=$.post("/lookup-bots.json",{bot_id:n.join(",")});for(let h of n)b[h]=i;let o=await i;if(o.code==0){a(o.data);for(let h of o.data)g[h.id]=h,l[h.id]=h}for(let h of n)delete b[h]}return l}return{cache:g,getBots:e,getBot:async g=>g?(await e(g))[g]:null}})();$('