`;return o},i=(function(){let g={"809":{"id":809,"name":"Claude 3.5 Haiku","model_name":"Claude 3.5 Haiku","desc":"This is the next generation of Anthropic's fastest model. For a similar speed to Claude 3 Haiku, Claude 3.5 Haiku improves across every skill set and surpasses even Claude 3 Opus, the largest model in their previous generation, on many intelligence benchmarks. Claude 3.5 Haiku is particularly strong on coding tasks. It also features low latency, improved instruction following, and more accurate tool use.","desc_short":"
This is the next generation of Anthropic's fastest model. For a similar speed to Claude 3 Haiku, Claude 3.5 Haiku improves across every skill set and surpasses even Claude 3 Opus, the largest model in their previous generation, on many intelligence benchmarks. Claude 3.5 Haiku is particularly strong on coding tasks. It also features low latency, improved instruction following, and more accurate tool use.
","desc_more":"","link":"claude-3.5-haiku","provider":"Official","developer":"Anthropic","tpm":60,"image":"https://hbcdn01.hotbot.com/avatar/809.jpg","model":189,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":47140,"level":2,"ph":"","labels_full":["Official","Premium","Large Context","Claude","Unmodified","Coding","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Claude 3.5 Haiku","set":{"temperature":1}},"177":{"id":177,"name":"Claude 3 Haiku","model_name":"Claude 3 Haiku","desc":"Claude 3 Haiku is a fast and compact model built by Anthropic for near-instant responsiveness. It's focus is on quick and accurate targeted performance.","desc_short":"
Claude 3 Haiku is a fast and compact model built by Anthropic for near-instant responsiveness. It's focus is on quick and accurate targeted performance.
","desc_more":"","link":"claude-3-haiku","provider":"Official","developer":"Anthropic","tpm":15,"image":"https://hbcdn01.hotbot.com/avatar/177.jpg","model":61,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Claude","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Claude 3 Haiku","set":{"temperature":1}},"854":{"id":854,"name":"Meta Llama 4 Maverick","model_name":"Meta Llama 4 Maverick","desc":"The Llama 4 collection of models are natively multimodal AI models that enable text and multimodal experiences. These models leverage a mixture-of-experts architecture to offer industry-leading performance in text and image understanding. These Llama 4 models mark the beginning of a new era for the Llama ecosystem. Launching alongside the rest of the Llama 4 series, Llama 4 Maverick is a 17 billion parameter model with 128 experts. Massive MoE with the equivalent of 400B parameters, and a greatly expanded context window.","desc_short":"
The Llama 4 collection of models are natively multimodal AI models that enable text and multimodal experiences. These models leverage a mixture-of-experts architecture to offer industry-leading performance in text and image understanding. These Llama 4 models mark the beginning of a new era for the Llama ecosystem. Launching alongside the rest of the Llama 4 series, Llama 4 Maverick is a 17 billion parameter model with 128 experts. Massive MoE with the equivalent of 400B parameters, and a greatly expanded context window.
","desc_more":"","link":"meta-llama-4-maverick","provider":"Official","developer":"Meta","tpm":13,"image":"https://hbcdn01.hotbot.com/avatar/854.jpg","model":216,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Meta","Llama","Unmodified","Image Input","Coding","Creative Writing","128x17B Parameters"],"labels":["Official"],"wel":"","asktext":"","short_name":"Meta Llama 4 Maverick","set":{"temperature":1}},"853":{"id":853,"name":"Meta Llama 4 Scout","model_name":"Meta Llama 4 Scout","desc":"The Llama 4 collection of models are natively multimodal AI models that enable text and multimodal experiences. These models leverage a mixture-of-experts architecture to offer industry-leading performance in text and image understanding. These Llama 4 models mark the beginning of a new era for the Llama ecosystem. Launching alongside the rest of the Llama 4 series, Llama 4 Scout is a 17 billion parameter model with 16 experts. Fast MoE with the equivalent of 109B parameters, and a greatly expanded context window.","desc_short":"
The Llama 4 collection of models are natively multimodal AI models that enable text and multimodal experiences. These models leverage a mixture-of-experts architecture to offer industry-leading performance in text and image understanding. These Llama 4 models mark the beginning of a new era for the Llama ecosystem. Launching alongside the rest of the Llama 4 series, Llama 4 Scout is a 17 billion parameter model with 16 experts. Fast MoE with the equivalent of 109B parameters, and a greatly expanded context window.
","desc_more":"","link":"meta-llama-4-scout","provider":"Official","developer":"Meta","tpm":5,"image":"https://hbcdn01.hotbot.com/avatar/853.jpg","model":215,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Meta","Llama","Unmodified","Image Input","Coding","Creative Writing","16x17B Parameters"],"labels":["Official"],"wel":"","asktext":"","short_name":"Meta Llama 4 Scout","set":{"temperature":1}},"145":{"id":145,"name":"OpenAI GPT-4 Turbo","model_name":"OpenAI GPT-4 Turbo","desc":"GPT-4 is a large multimodal model (accepting text or image inputs and outputting text) that can solve difficult problems with greater accuracy than any of OpenAI's previous models, and the turbo variant is optimized for quick responses and natural language chat.","desc_short":"
GPT-4 is a large multimodal model (accepting text or image inputs and outputting text) that can solve difficult problems with greater accuracy than any of OpenAI's previous models, and the turbo variant is optimized for quick responses and natural language chat.
","desc_more":"","link":"openai-gpt-4-turbo","provider":"Official","developer":"OpenAI","tpm":20,"image":"https://hbcdn01.hotbot.com/avatar/145.jpg","model":13,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":56194,"level":1,"ph":"","labels_full":["Official","Premium","Large Context","OpenAI","ChatGPT","Unmodified","Coding","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"OpenAI GPT-4 Turbo","set":{"temperature":1}},"173":{"id":173,"name":"Claude 3.5 Sonnet","model_name":"Claude 3.5 Sonnet","desc":"Claude 3.5 Sonnet sets new industry benchmarks for graduate-level reasoning (GPQA), undergraduate-level knowledge (MMLU), and coding proficiency (HumanEval). Excelling in grasping nuance, humor, and complex instructions, and writes high-quality content with a natural, relatable tone.","desc_short":"
Claude 3.5 Sonnet sets new industry benchmarks for graduate-level reasoning (GPQA), undergraduate-level knowledge (MMLU), and coding proficiency (HumanEval). Excelling in grasping nuance, humor, and complex instructions, and writes high-quality content with a natural, relatable tone.
","desc_more":"","link":"claude-3.5-sonnet","provider":"Official","developer":"Anthropic","tpm":180,"image":"https://hbcdn01.hotbot.com/avatar/173.jpg","model":57,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":1614106,"level":3,"ph":"","labels_full":["Official","Premium","Large Context","Claude","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Claude 3.5 Sonnet","set":{"temperature":1}},"849":{"id":849,"name":"DeepSeek R1","model_name":"DeepSeek R1","desc":"First-generation reasoning model from DeepSeek. Open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.","desc_short":"
First-generation reasoning model from DeepSeek. Open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.
","desc_more":"","link":"deepseek-r1","provider":"Official","developer":"DeepSeek","tpm":28,"image":"https://hbcdn01.hotbot.com/avatar/849.jpg","model":209,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":79483,"level":1,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","DeepSeek","Unmodified","Coding","Advanced Reasoning","671B Parameters"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"DeepSeek R1","set":{"temperature":1}},"373":{"id":373,"name":"Llama 3.2 3B Instruct","model_name":"Llama 3.2 3B Instruct","desc":"Llama 3.2 3B is a 3-billion-parameter multilingual large language model, optimized for advanced natural language processing tasks like dialogue generation, reasoning, and summarization. Designed with the latest transformer architecture, it supports eight languages, including English, Spanish, and Hindi, and is adaptable for additional languages.","desc_short":"
Llama 3.2 3B is a 3-billion-parameter multilingual large language model, optimized for advanced natural language processing tasks like dialogue generation, reasoning, and summarization. Designed with the latest transformer architecture, it supports eight languages, including English, Spanish, and Hindi, and is adaptable for additional languages.
","desc_more":"","link":"llama-3.2-3b-instruct","provider":"Official","developer":"Meta","tpm":1,"image":"https://hbcdn01.hotbot.com/avatar/373.jpg","model":161,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Llama","Meta","Unmodified","3B Parameters","Multilingual"],"labels":["Official"],"wel":"","asktext":"","short_name":"Llama 3.2 3B Instruct","set":{"temperature":1}},"1":{"id":1,"name":"HotBot Assistant","model_name":"HotBot Assistant","desc":"HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.","desc_short":"
HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.
","desc_more":"","link":"hotbot-assistant","provider":"Official","developer":"HotBot","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/1.jpg","model":201,"sys":"","read_image":1,"prem":false,"can_at":0,"no_comp":1,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Image Generation","Web Search","Coding","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Assistant","set":{"temperature":1}}},b={},d=localStorage,n="hba-botcache",l=()=>{let g={},b=d[n];try{let l=Date.now()-6e5;b&&$.each(JSON.parse(b),(b,d)=>{if(d.cacheTS>l)g[b]=d})}catch(a){}return g},a=g=>{let b=l();for(let a of g){b[a.id]=a;if(!a.cacheTS)a.cacheTS=Date.now()}d[n]=JSON.stringify(b)};$.each(l(),(b,d)=>{if(!g[b])g[b]=d});async function e(d){let n=[],l={};for(let h of(d instanceof Array)?d:[d])if(h=parseInt(h)){if(b[h]&&!g[h])await b[h];if(g[h])l[h]=g[h];else if(n.indexOf(h)<0)n.push(h)}if(n.length){let i=$.post("/lookup-bots.json",{bot_id:n.join(",")});for(let h of n)b[h]=i;let o=await i;if(o.code==0){a(o.data);for(let h of o.data)g[h.id]=h,l[h.id]=h}for(let h of n)delete b[h]}return l}return{cache:g,getBots:e,getBot:async g=>g?(await e(g))[g]:null}})();$('