`;return o},i=(function(){let g={"289":{"id":289,"name":"MythoMax 13B","model_name":"MythoMax 13B","desc":"One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay.","desc_short":"
One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay.
","desc_more":"","link":"mythomax-13b","provider":"Official","developer":"gryphe","tpm":2,"image":"hotbot.png","model":117,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Unmodified","13B Parameters"],"labels":["Official"],"wel":"","asktext":"","short_name":"MythoMax 13B","set":{"temperature":1}},"854":{"id":854,"name":"Meta Llama 4 Maverick","model_name":"Meta Llama 4 Maverick","desc":"The Llama 4 collection of models are natively multimodal AI models that enable text and multimodal experiences. These models leverage a mixture-of-experts architecture to offer industry-leading performance in text and image understanding. These Llama 4 models mark the beginning of a new era for the Llama ecosystem. Launching alongside the rest of the Llama 4 series, Llama 4 Maverick is a 17 billion parameter model with 128 experts. Massive MoE with the equivalent of 400B parameters, and a greatly expanded context window.","desc_short":"
The Llama 4 collection of models are natively multimodal AI models that enable text and multimodal experiences. These models leverage a mixture-of-experts architecture to offer industry-leading performance in text and image understanding. These Llama 4 models mark the beginning of a new era for the Llama ecosystem. Launching alongside the rest of the Llama 4 series, Llama 4 Maverick is a 17 billion parameter model with 128 experts. Massive MoE with the equivalent of 400B parameters, and a greatly expanded context window.
","desc_more":"","link":"meta-llama-4-maverick","provider":"Official","developer":"Meta","tpm":13,"image":"https://hbcdn01.hotbot.com/avatar/854.jpg","model":216,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Meta","Llama","Unmodified","Image Input","Coding","Creative Writing","128x17B Parameters"],"labels":["Official"],"wel":"","asktext":"","short_name":"Meta Llama 4 Maverick","set":{"temperature":1}},"785":{"id":785,"name":"Liquid LFM 40B","model_name":"Liquid LFM 40B","desc":"Liquid Foundation Models (LFMs) are large neural networks built with computational units rooted in dynamic systems. This mixture of experts model is built for general purpose AI, with an excellent handle on sequential data and context.","desc_short":"
Liquid Foundation Models (LFMs) are large neural networks built with computational units rooted in dynamic systems. This mixture of experts model is built for general purpose AI, with an excellent handle on sequential data and context.
","desc_more":"","link":"liquid-lfm-40b","provider":"Official","developer":"Liquid","tpm":1,"image":"https://hbcdn01.hotbot.com/avatar/785.jpg","model":177,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Unmodified","Mixture of Experts","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Liquid LFM 40B","set":{"temperature":1}},"141":{"id":141,"name":"OpenAI GPT-3.5 Turbo 16k","model_name":"OpenAI GPT-3.5 Turbo 16k","desc":"OpenAI GPT-3.5 Turbo is a fast, inexpensive model for simple tasks developed by OpenAI. Capable of understanding and generating natural language or code, it has been optimized for chat, with an expanded context length of 16K.","desc_short":"
OpenAI GPT-3.5 Turbo is a fast, inexpensive model for simple tasks developed by OpenAI. Capable of understanding and generating natural language or code, it has been optimized for chat, with an expanded context length of 16K.
","desc_more":"","link":"openai-gpt-3.5-turbo-16k","provider":"Official","developer":"OpenAI","tpm":70,"image":"https://hbcdn01.hotbot.com/avatar/141.jpg","model":9,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":72062,"level":2,"ph":"","labels_full":["Official","Premium","Large Context","OpenAI","ChatGPT","Unmodified","Coding"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"OpenAI GPT-3.5 Turbo 16k","set":{"temperature":1}},"385":{"id":385,"name":"Qwen 2 VL 72B","model_name":"Qwen 2 VL 72B","desc":"Qwen2 VL 72B is a multimodal LLM from the Qwen Team with impressive multimedia and automations support.","desc_short":"
Qwen2 VL 72B is a multimodal LLM from the Qwen Team with impressive multimedia and automations support.
","desc_more":"","link":"qwen-2-vl-72b","provider":"Official","developer":"Qwen","tpm":8,"image":"https://hbcdn01.hotbot.com/avatar/385.jpg","model":169,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Creative Writing","Qwen","Unmodified","Image Input","72B Parameters","Multilingual"],"labels":["Official"],"wel":"","asktext":"","short_name":"Qwen 2 VL 72B","set":{"temperature":1}},"185":{"id":185,"name":"Claude 3 Opus","model_name":"Claude 3 Opus","desc":"Claude 3 Opus is a powerful model for highly complex tasks. Developed by Anthropic for top-level performance, intelligence, fluency, and understanding.","desc_short":"
Claude 3 Opus is a powerful model for highly complex tasks. Developed by Anthropic for top-level performance, intelligence, fluency, and understanding.
","desc_more":"","link":"claude-3-opus","provider":"Official","developer":"Anthropic","tpm":900,"image":"https://hbcdn01.hotbot.com/avatar/185.jpg","model":69,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":357281,"level":3,"ph":"","labels_full":["Official","Premium","Large Context","Claude","Unmodified","Image Input","Coding","Advanced Reasoning","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Claude 3 Opus","set":{"temperature":1}},"789":{"id":789,"name":"Jamba 1.5 Large","model_name":"Jamba 1.5 Large","desc":"Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality. It features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis. Built on a novel SSM-Transformer architecture, it outperforms larger models on benchmarks while maintaining resource efficiency.","desc_short":"
Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality. It features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis. Built on a novel SSM-Transformer architecture, it outperforms larger models on benchmarks while maintaining resource efficiency.
","desc_more":"","link":"jamba-1.5-large","provider":"Official","developer":"AI21","tpm":100,"image":"https://hbcdn01.hotbot.com/avatar/789.jpg","model":185,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":90880,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Unmodified","Mamba","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Jamba 1.5 Large","set":{"temperature":1}},"205":{"id":205,"name":"Meta Llama3.1 8b","model_name":"Meta Llama3.1 8b","desc":"Llama 3.1 is a group of open-source instruction-tuned models from Meta. These multilingual models have a context length of 128K, state-of-the-art tool use, and strong reasoning capabilities. The 8B variant is a light-weight, ultra-fast model that can run anywhere.","desc_short":"
Llama 3.1 is a group of open-source instruction-tuned models from Meta. These multilingual models have a context length of 128K, state-of-the-art tool use, and strong reasoning capabilities. The 8B variant is a light-weight, ultra-fast model that can run anywhere.
","desc_more":"","link":"meta-llama3.1-8b","provider":"Official","developer":"Meta","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/205.jpg","model":93,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Meta","Llama","Unmodified","8B Parameters","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"Meta Llama3.1 8b","set":{"temperature":1}},"1":{"id":1,"name":"HotBot Assistant","model_name":"HotBot Assistant","desc":"HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.","desc_short":"
HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.
","desc_more":"","link":"hotbot-assistant","provider":"Official","developer":"HotBot","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/1.jpg","model":201,"sys":"","read_image":1,"prem":false,"can_at":0,"no_comp":1,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Image Generation","Web Search","Coding","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Assistant","set":{"temperature":1}}},b={},d=localStorage,n="hba-botcache",l=()=>{let g={},b=d[n];try{let l=Date.now()-6e5;b&&$.each(JSON.parse(b),(b,d)=>{if(d.cacheTS>l)g[b]=d})}catch(a){}return g},a=g=>{let b=l();for(let a of g){b[a.id]=a;if(!a.cacheTS)a.cacheTS=Date.now()}d[n]=JSON.stringify(b)};$.each(l(),(b,d)=>{if(!g[b])g[b]=d});async function e(d){let n=[],l={};for(let h of(d instanceof Array)?d:[d])if(h=parseInt(h)){if(b[h]&&!g[h])await b[h];if(g[h])l[h]=g[h];else if(n.indexOf(h)<0)n.push(h)}if(n.length){let i=$.post("/lookup-bots.json",{bot_id:n.join(",")});for(let h of n)b[h]=i;let o=await i;if(o.code==0){a(o.data);for(let h of o.data)g[h.id]=h,l[h.id]=h}for(let h of n)delete b[h]}return l}return{cache:g,getBots:e,getBot:async g=>g?(await e(g))[g]:null}})();$('