`;return o},i=(function(){let g={"851":{"id":851,"name":"Google Gemini 2.0 Flash","model_name":"Google Gemini 2.0 Flash","desc":"Popular with developers as a powerful workhorse model, optimal for high-volume, high-frequency tasks at scale and highly capable of multimodal reasoning across vast amounts of information with a context window of 1 million tokens. 2.0 is Google's new flagship series of Gemini.","desc_short":"
Popular with developers as a powerful workhorse model, optimal for high-volume, high-frequency tasks at scale and highly capable of multimodal reasoning across vast amounts of information with a context window of 1 million tokens. 2.0 is Google's new flagship series of Gemini.
","desc_more":"","link":"google-gemini-2.0-flash","provider":"Official","developer":"Google","tpm":5,"image":"https://hbcdn01.hotbot.com/avatar/851.jpg","model":212,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Google","Gemini","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Google Gemini 2.0 Flash","set":{"temperature":1}},"181":{"id":181,"name":"Claude 3 Sonnet","model_name":"Claude 3 Sonnet","desc":"Claude 3 Sonnet has a balance of intelligence and speed. Anthropic's balanced and scalable model delivers strong utility.","desc_short":"
Claude 3 Sonnet has a balance of intelligence and speed. Anthropic's balanced and scalable model delivers strong utility.
","desc_more":"","link":"claude-3-sonnet","provider":"Official","developer":"Anthropic","tpm":180,"image":"https://hbcdn01.hotbot.com/avatar/181.jpg","model":65,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":132128,"level":3,"ph":"","labels_full":["Official","Premium","Large Context","Claude","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Claude 3 Sonnet","set":{"temperature":1}},"309":{"id":309,"name":"Mistral Nemo","model_name":"Mistral Nemo","desc":"A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA. The model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.","desc_short":"
A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA. The model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.
","desc_more":"","link":"mistral-nemo","provider":"Official","developer":"Mistral AI","tpm":3,"image":"https://hbcdn01.hotbot.com/avatar/309.jpg","model":137,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Mistral","Unmodified","12B Parameters","Multilingual","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Mistral Nemo","set":{"temperature":1}},"856":{"id":856,"name":"OpenAI o3","model_name":"OpenAI o3","desc":"o3 is a well-rounded and powerful model across domains. It sets a new standard for math, science, coding, and visual reasoning tasks. It also excels at technical writing and instruction-following. Use it to think through multi-step problems that involve analysis across text, code, and images.","desc_short":"
o3 is a well-rounded and powerful model across domains. It sets a new standard for math, science, coding, and visual reasoning tasks. It also excels at technical writing and instruction-following. Use it to think through multi-step problems that involve analysis across text, code, and images.
","desc_more":"","link":"openai-o3","provider":"Official","developer":"OpenAI","tpm":500,"image":"hotbot.png","model":219,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":2481155,"level":3,"ph":"","labels_full":["Official","Premium","Large Context","OpenAI","Unmodified","Coding","Advanced Reasoning"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"OpenAI o3","set":{"temperature":1}},"317":{"id":317,"name":"Llama 3 Euryale v2.1 70B","model_name":"Llama 3 Euryale 70B v2.1","desc":"Euryale 70B v2.1 is a model focused on creative roleplay. It has improved prompt adherence and spatial awareness, and adapts quickly to custom roleplay and formatting.","desc_short":"
Euryale 70B v2.1 is a model focused on creative roleplay. It has improved prompt adherence and spatial awareness, and adapts quickly to custom roleplay and formatting.
","desc_more":"","link":"llama-3-euryale-v2.1-70b","provider":"Official","developer":"Sao10k","tpm":8,"image":"https://hbcdn01.hotbot.com/avatar/317.jpg","model":145,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Llama","Unmodified","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Llama 3 Euryale v2.1 70B","set":{"temperature":1}},"789":{"id":789,"name":"Jamba 1.5 Large","model_name":"Jamba 1.5 Large","desc":"Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality. It features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis. Built on a novel SSM-Transformer architecture, it outperforms larger models on benchmarks while maintaining resource efficiency.","desc_short":"
Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality. It features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis. Built on a novel SSM-Transformer architecture, it outperforms larger models on benchmarks while maintaining resource efficiency.
","desc_more":"","link":"jamba-1.5-large","provider":"Official","developer":"AI21","tpm":100,"image":"https://hbcdn01.hotbot.com/avatar/789.jpg","model":185,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":90880,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Unmodified","Mamba","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Jamba 1.5 Large","set":{"temperature":1}},"793":{"id":793,"name":"Jamba 1.5 Mini","model_name":"Jamba 1.5 Mini","desc":"Jamba 1.5 Mini is the world's first production-grade Mamba-based model, combining SSM and Transformer architectures for a 256K context window and high efficiency. It works with 9 languages and can handle various writing and analysis tasks as well as or better than similar small models.","desc_short":"
Jamba 1.5 Mini is the world's first production-grade Mamba-based model, combining SSM and Transformer architectures for a 256K context window and high efficiency. It works with 9 languages and can handle various writing and analysis tasks as well as or better than similar small models.
","desc_more":"","link":"jamba-1.5-mini","provider":"Official","developer":"AI21","tpm":7,"image":"https://hbcdn01.hotbot.com/avatar/793.jpg","model":181,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Unmodified","Mamba","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Jamba 1.5 Mini","set":{"temperature":1}},"785":{"id":785,"name":"Liquid LFM 40B","model_name":"Liquid LFM 40B","desc":"Liquid Foundation Models (LFMs) are large neural networks built with computational units rooted in dynamic systems. This mixture of experts model is built for general purpose AI, with an excellent handle on sequential data and context.","desc_short":"
Liquid Foundation Models (LFMs) are large neural networks built with computational units rooted in dynamic systems. This mixture of experts model is built for general purpose AI, with an excellent handle on sequential data and context.
","desc_more":"","link":"liquid-lfm-40b","provider":"Official","developer":"Liquid","tpm":1,"image":"https://hbcdn01.hotbot.com/avatar/785.jpg","model":177,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Unmodified","Mixture of Experts","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Liquid LFM 40B","set":{"temperature":1}},"1":{"id":1,"name":"HotBot Assistant","model_name":"HotBot Assistant","desc":"HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.","desc_short":"
HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.
","desc_more":"","link":"hotbot-assistant","provider":"Official","developer":"HotBot","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/1.jpg","model":201,"sys":"","read_image":1,"prem":false,"can_at":0,"no_comp":1,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Image Generation","Web Search","Coding","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Assistant","set":{"temperature":1}}},b={},d=localStorage,n="hba-botcache",l=()=>{let g={},b=d[n];try{let l=Date.now()-6e5;b&&$.each(JSON.parse(b),(b,d)=>{if(d.cacheTS>l)g[b]=d})}catch(a){}return g},a=g=>{let b=l();for(let a of g){b[a.id]=a;if(!a.cacheTS)a.cacheTS=Date.now()}d[n]=JSON.stringify(b)};$.each(l(),(b,d)=>{if(!g[b])g[b]=d});async function e(d){let n=[],l={};for(let h of(d instanceof Array)?d:[d])if(h=parseInt(h)){if(b[h]&&!g[h])await b[h];if(g[h])l[h]=g[h];else if(n.indexOf(h)<0)n.push(h)}if(n.length){let i=$.post("/lookup-bots.json",{bot_id:n.join(",")});for(let h of n)b[h]=i;let o=await i;if(o.code==0){a(o.data);for(let h of o.data)g[h.id]=h,l[h.id]=h}for(let h of n)delete b[h]}return l}return{cache:g,getBots:e,getBot:async g=>g?(await e(g))[g]:null}})();$('