`;return o},i=(function(){let g={"169":{"id":169,"name":"Gemma2 9b","model_name":"Gemma2 9b","desc":"Gemma2 9b is Google's redesigned open model, optimized for outsized performance and unmatched efficiency. Built from the same research and technology used to create the Gemini models, this model provides built-in safety advancements and an expanded parameter size while still being extremely fast.","desc_short":"
Gemma2 9b is Google's redesigned open model, optimized for outsized performance and unmatched efficiency. Built from the same research and technology used to create the Gemini models, this model provides built-in safety advancements and an expanded parameter size while still being extremely fast.
","desc_more":"","link":"gemma2-9b","provider":"Official","developer":"Google","tpm":4,"image":"https://hbcdn01.hotbot.com/avatar/169.jpg","model":37,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Gemma","Google","Unmodified","9B Parameters","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"Gemma2 9b","set":{"temperature":1}},"173":{"id":173,"name":"Claude 3.5 Sonnet","model_name":"Claude 3.5 Sonnet","desc":"Claude 3.5 Sonnet sets new industry benchmarks for graduate-level reasoning (GPQA), undergraduate-level knowledge (MMLU), and coding proficiency (HumanEval). Excelling in grasping nuance, humor, and complex instructions, and writes high-quality content with a natural, relatable tone.","desc_short":"
Claude 3.5 Sonnet sets new industry benchmarks for graduate-level reasoning (GPQA), undergraduate-level knowledge (MMLU), and coding proficiency (HumanEval). Excelling in grasping nuance, humor, and complex instructions, and writes high-quality content with a natural, relatable tone.
","desc_more":"","link":"claude-3.5-sonnet","provider":"Official","developer":"Anthropic","tpm":180,"image":"https://hbcdn01.hotbot.com/avatar/173.jpg","model":57,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":1614736,"level":3,"ph":"","labels_full":["Official","Premium","Large Context","Claude","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Claude 3.5 Sonnet","set":{"temperature":1}},"841":{"id":841,"name":"Meta Llama 3.3 70b","model_name":"Meta Llama 3.3 70b","desc":"The Meta Llama 3.3 multilingual large language model (LLM) is an instruction tuned generative model in 70B (text in/text out). It is optimized for multilingual dialogue use cases and outperforms many of the available open source and closed chat models on common industry benchmarks. Llama 3.3 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety.","desc_short":"
The Meta Llama 3.3 multilingual large language model (LLM) is an instruction tuned generative model in 70B (text in/text out). It is optimized for multilingual dialogue use cases and outperforms many of the available open source and closed chat models on common industry benchmarks. Llama 3.3 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety.
","desc_more":"","link":"meta-llama-3.3-70b","provider":"Official","developer":"Meta","tpm":14,"image":"hotbot.png","model":201,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Meta","Llama","Unmodified","Coding","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Meta Llama 3.3 70b","set":{"temperature":1}},"365":{"id":365,"name":"Grok 2","model_name":"Grok 2","desc":"Grok 2 is xAI's frontier language model with state-of-the-art reasoning capabilities, best for complex and multi-step use cases.","desc_short":"
Grok 2 is xAI's frontier language model with state-of-the-art reasoning capabilities, best for complex and multi-step use cases.
","desc_more":"","link":"grok-2","provider":"Official","developer":"xAI","tpm":112,"image":"https://hbcdn01.hotbot.com/avatar/365.jpg","model":153,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":139480,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Creative Writing","Grok","Unmodified","Image Input","Advanced Reasoning","Coding"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Grok 2","set":{"temperature":1}},"301":{"id":301,"name":"Hermes 3 405B Instruct","model_name":"Hermes 3 405B Instruct","desc":"Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board. Hermes 3 405B is a frontier-level, full-parameter finetune of the Llama-3.1 405B foundation model, focused on aligning LLMs to the user, with powerful steering capabilities and control given to the end user.","desc_short":"
Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board. Hermes 3 405B is a frontier-level, full-parameter finetune of the Llama-3.1 405B foundation model, focused on aligning LLMs to the user, with powerful steering capabilities and control given to the end user.
","desc_more":"","link":"hermes-3-405b-instruct","provider":"Official","developer":"Nous","tpm":90,"image":"https://hbcdn01.hotbot.com/avatar/301.jpg","model":129,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":63421,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Unmodified","405B Parameters","Advanced Reasoning","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Hermes 3 405B Instruct","set":{"temperature":1}},"854":{"id":854,"name":"Meta Llama 4 Maverick","model_name":"Meta Llama 4 Maverick","desc":"The Llama 4 collection of models are natively multimodal AI models that enable text and multimodal experiences. These models leverage a mixture-of-experts architecture to offer industry-leading performance in text and image understanding. These Llama 4 models mark the beginning of a new era for the Llama ecosystem. Launching alongside the rest of the Llama 4 series, Llama 4 Maverick is a 17 billion parameter model with 128 experts. Massive MoE with the equivalent of 400B parameters, and a greatly expanded context window.","desc_short":"
The Llama 4 collection of models are natively multimodal AI models that enable text and multimodal experiences. These models leverage a mixture-of-experts architecture to offer industry-leading performance in text and image understanding. These Llama 4 models mark the beginning of a new era for the Llama ecosystem. Launching alongside the rest of the Llama 4 series, Llama 4 Maverick is a 17 billion parameter model with 128 experts. Massive MoE with the equivalent of 400B parameters, and a greatly expanded context window.
","desc_more":"","link":"meta-llama-4-maverick","provider":"Official","developer":"Meta","tpm":13,"image":"https://hbcdn01.hotbot.com/avatar/854.jpg","model":216,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Meta","Llama","Unmodified","Image Input","Coding","Creative Writing","128x17B Parameters"],"labels":["Official"],"wel":"","asktext":"","short_name":"Meta Llama 4 Maverick","set":{"temperature":1}},"177":{"id":177,"name":"Claude 3 Haiku","model_name":"Claude 3 Haiku","desc":"Claude 3 Haiku is a fast and compact model built by Anthropic for near-instant responsiveness. It's focus is on quick and accurate targeted performance.","desc_short":"
Claude 3 Haiku is a fast and compact model built by Anthropic for near-instant responsiveness. It's focus is on quick and accurate targeted performance.
","desc_more":"","link":"claude-3-haiku","provider":"Official","developer":"Anthropic","tpm":15,"image":"https://hbcdn01.hotbot.com/avatar/177.jpg","model":61,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Claude","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Claude 3 Haiku","set":{"temperature":1}},"809":{"id":809,"name":"Claude 3.5 Haiku","model_name":"Claude 3.5 Haiku","desc":"This is the next generation of Anthropic's fastest model. For a similar speed to Claude 3 Haiku, Claude 3.5 Haiku improves across every skill set and surpasses even Claude 3 Opus, the largest model in their previous generation, on many intelligence benchmarks. Claude 3.5 Haiku is particularly strong on coding tasks. It also features low latency, improved instruction following, and more accurate tool use.","desc_short":"
This is the next generation of Anthropic's fastest model. For a similar speed to Claude 3 Haiku, Claude 3.5 Haiku improves across every skill set and surpasses even Claude 3 Opus, the largest model in their previous generation, on many intelligence benchmarks. Claude 3.5 Haiku is particularly strong on coding tasks. It also features low latency, improved instruction following, and more accurate tool use.
","desc_more":"","link":"claude-3.5-haiku","provider":"Official","developer":"Anthropic","tpm":60,"image":"https://hbcdn01.hotbot.com/avatar/809.jpg","model":189,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":47466,"level":2,"ph":"","labels_full":["Official","Premium","Large Context","Claude","Unmodified","Coding","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Claude 3.5 Haiku","set":{"temperature":1}},"1":{"id":1,"name":"HotBot Assistant","model_name":"HotBot Assistant","desc":"HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.","desc_short":"
HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.
","desc_more":"","link":"hotbot-assistant","provider":"Official","developer":"HotBot","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/1.jpg","model":201,"sys":"","read_image":1,"prem":false,"can_at":0,"no_comp":1,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Image Generation","Web Search","Coding","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Assistant","set":{"temperature":1}}},b={},d=localStorage,n="hba-botcache",l=()=>{let g={},b=d[n];try{let l=Date.now()-6e5;b&&$.each(JSON.parse(b),(b,d)=>{if(d.cacheTS>l)g[b]=d})}catch(a){}return g},a=g=>{let b=l();for(let a of g){b[a.id]=a;if(!a.cacheTS)a.cacheTS=Date.now()}d[n]=JSON.stringify(b)};$.each(l(),(b,d)=>{if(!g[b])g[b]=d});async function e(d){let n=[],l={};for(let h of(d instanceof Array)?d:[d])if(h=parseInt(h)){if(b[h]&&!g[h])await b[h];if(g[h])l[h]=g[h];else if(n.indexOf(h)<0)n.push(h)}if(n.length){let i=$.post("/lookup-bots.json",{bot_id:n.join(",")});for(let h of n)b[h]=i;let o=await i;if(o.code==0){a(o.data);for(let h of o.data)g[h.id]=h,l[h.id]=h}for(let h of n)delete b[h]}return l}return{cache:g,getBots:e,getBot:async g=>g?(await e(g))[g]:null}})();$('