`;return o},i=(function(){let g={"809":{"id":809,"name":"Claude 3.5 Haiku","model_name":"Claude 3.5 Haiku","desc":"This is the next generation of Anthropic's fastest model. For a similar speed to Claude 3 Haiku, Claude 3.5 Haiku improves across every skill set and surpasses even Claude 3 Opus, the largest model in their previous generation, on many intelligence benchmarks. Claude 3.5 Haiku is particularly strong on coding tasks. It also features low latency, improved instruction following, and more accurate tool use.","desc_short":"
This is the next generation of Anthropic's fastest model. For a similar speed to Claude 3 Haiku, Claude 3.5 Haiku improves across every skill set and surpasses even Claude 3 Opus, the largest model in their previous generation, on many intelligence benchmarks. Claude 3.5 Haiku is particularly strong on coding tasks. It also features low latency, improved instruction following, and more accurate tool use.
","desc_more":"","link":"claude-3.5-haiku","provider":"Official","developer":"Anthropic","tpm":60,"image":"https://hbcdn01.hotbot.com/avatar/809.jpg","model":189,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":48490,"level":2,"ph":"","labels_full":["Official","Premium","Large Context","Claude","Unmodified","Coding","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Claude 3.5 Haiku","set":{"temperature":1}},"169":{"id":169,"name":"Gemma2 9b","model_name":"Gemma2 9b","desc":"Gemma2 9b is Google's redesigned open model, optimized for outsized performance and unmatched efficiency. Built from the same research and technology used to create the Gemini models, this model provides built-in safety advancements and an expanded parameter size while still being extremely fast.","desc_short":"
Gemma2 9b is Google's redesigned open model, optimized for outsized performance and unmatched efficiency. Built from the same research and technology used to create the Gemini models, this model provides built-in safety advancements and an expanded parameter size while still being extremely fast.
","desc_more":"","link":"gemma2-9b","provider":"Official","developer":"Google","tpm":4,"image":"https://hbcdn01.hotbot.com/avatar/169.jpg","model":37,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Gemma","Google","Unmodified","9B Parameters","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"Gemma2 9b","set":{"temperature":1}},"852":{"id":852,"name":"Google Gemini 2.0 Flash-Lite","model_name":"Google Gemini 2.0 Flash-Lite","desc":"2.0 is the new flagship series of Gemini. Wanting to continue offering a model at the price and speed of 1.5 Flash, Google has created 2.0 Flash-Lite. A quick but powerful model updated to one million tokens for 2.0.","desc_short":"
2.0 is the new flagship series of Gemini. Wanting to continue offering a model at the price and speed of 1.5 Flash, Google has created 2.0 Flash-Lite. A quick but powerful model updated to one million tokens for 2.0.
","desc_more":"","link":"google-gemini-2.0-flash-lite","provider":"Official","developer":"Google","tpm":4,"image":"https://hbcdn01.hotbot.com/avatar/852.jpg","model":213,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Google","Gemini","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Google Gemini 2.0 Flash-Lite","set":{"temperature":1}},"313":{"id":313,"name":"Mistral 7B Instruct","model_name":"Mistral 7B Instruct","desc":"A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.","desc_short":"
A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.
","desc_more":"","link":"mistral-7b-instruct","provider":"Official","developer":"Mistral AI","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/313.jpg","model":141,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Mistral","Unmodified","7B Parameters","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"Mistral 7B Instruct","set":{"temperature":1}},"317":{"id":317,"name":"Llama 3 Euryale v2.1 70B","model_name":"Llama 3 Euryale 70B v2.1","desc":"Euryale 70B v2.1 is a model focused on creative roleplay. It has improved prompt adherence and spatial awareness, and adapts quickly to custom roleplay and formatting.","desc_short":"
Euryale 70B v2.1 is a model focused on creative roleplay. It has improved prompt adherence and spatial awareness, and adapts quickly to custom roleplay and formatting.
","desc_more":"","link":"llama-3-euryale-v2.1-70b","provider":"Official","developer":"Sao10k","tpm":8,"image":"https://hbcdn01.hotbot.com/avatar/317.jpg","model":145,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Llama","Unmodified","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Llama 3 Euryale v2.1 70B","set":{"temperature":1}},"309":{"id":309,"name":"Mistral Nemo","model_name":"Mistral Nemo","desc":"A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA. The model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.","desc_short":"
A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA. The model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.
","desc_more":"","link":"mistral-nemo","provider":"Official","developer":"Mistral AI","tpm":3,"image":"https://hbcdn01.hotbot.com/avatar/309.jpg","model":137,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Mistral","Unmodified","12B Parameters","Multilingual","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Mistral Nemo","set":{"temperature":1}},"369":{"id":369,"name":"Grok 2 Mini","model_name":"Grok 2 Mini","desc":"Grok 2 Mini is xAI's fast, lightweight language model that offers a balance between speed and answer quality.","desc_short":"
Grok 2 Mini is xAI's fast, lightweight language model that offers a balance between speed and answer quality.
","desc_more":"","link":"grok-2-mini","provider":"Official","developer":"xAI","tpm":112,"image":"https://hbcdn01.hotbot.com/avatar/369.jpg","model":157,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":80031,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Grok","Unmodified","Image Input","Creative Writing","Coding"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Grok 2 Mini","set":{"temperature":1}},"850":{"id":850,"name":"OpenAI o3 Mini","model_name":"OpenAI o3 Mini","desc":"o3-mini is a small reasoning model from OpenAI, providing high intelligence at the same cost and latency targets of o1-mini. o3-mini also supports key developer features. Like other models in the o-series, it is designed to excel at science, math, and coding tasks.","desc_short":"
o3-mini is a small reasoning model from OpenAI, providing high intelligence at the same cost and latency targets of o1-mini. o3-mini also supports key developer features. Like other models in the o-series, it is designed to excel at science, math, and coding tasks.
","desc_more":"","link":"openai-o3-mini","provider":"Official","developer":"OpenAI","tpm":55,"image":"https://hbcdn01.hotbot.com/avatar/850.jpg","model":211,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":151129,"level":2,"ph":"","labels_full":["Official","Premium","Large Context","OpenAI","Unmodified","Coding","Advanced Reasoning"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"OpenAI o3 Mini","set":{"temperature":1}},"1":{"id":1,"name":"HotBot Assistant","model_name":"HotBot Assistant","desc":"HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.","desc_short":"
HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.
","desc_more":"","link":"hotbot-assistant","provider":"Official","developer":"HotBot","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/1.jpg","model":201,"sys":"","read_image":1,"prem":false,"can_at":0,"no_comp":1,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Image Generation","Web Search","Coding","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Assistant","set":{"temperature":1}}},b={},d=localStorage,n="hba-botcache",l=()=>{let g={},b=d[n];try{let l=Date.now()-6e5;b&&$.each(JSON.parse(b),(b,d)=>{if(d.cacheTS>l)g[b]=d})}catch(a){}return g},a=g=>{let b=l();for(let a of g){b[a.id]=a;if(!a.cacheTS)a.cacheTS=Date.now()}d[n]=JSON.stringify(b)};$.each(l(),(b,d)=>{if(!g[b])g[b]=d});async function e(d){let n=[],l={};for(let h of(d instanceof Array)?d:[d])if(h=parseInt(h)){if(b[h]&&!g[h])await b[h];if(g[h])l[h]=g[h];else if(n.indexOf(h)<0)n.push(h)}if(n.length){let i=$.post("/lookup-bots.json",{bot_id:n.join(",")});for(let h of n)b[h]=i;let o=await i;if(o.code==0){a(o.data);for(let h of o.data)g[h.id]=h,l[h.id]=h}for(let h of n)delete b[h]}return l}return{cache:g,getBots:e,getBot:async g=>g?(await e(g))[g]:null}})();$('