`;return o},i=(function(){let g={"313":{"id":313,"name":"Mistral 7B Instruct","model_name":"Mistral 7B Instruct","desc":"A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.","desc_short":"
A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.
","desc_more":"","link":"mistral-7b-instruct","provider":"Official","developer":"Mistral AI","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/313.jpg","model":141,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Mistral","Unmodified","7B Parameters","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"Mistral 7B Instruct","set":{"temperature":1}},"852":{"id":852,"name":"Google Gemini 2.0 Flash-Lite","model_name":"Google Gemini 2.0 Flash-Lite","desc":"2.0 is the new flagship series of Gemini. Wanting to continue offering a model at the price and speed of 1.5 Flash, Google has created 2.0 Flash-Lite. A quick but powerful model updated to one million tokens for 2.0.","desc_short":"
2.0 is the new flagship series of Gemini. Wanting to continue offering a model at the price and speed of 1.5 Flash, Google has created 2.0 Flash-Lite. A quick but powerful model updated to one million tokens for 2.0.
","desc_more":"","link":"google-gemini-2.0-flash-lite","provider":"Official","developer":"Google","tpm":4,"image":"https://hbcdn01.hotbot.com/avatar/852.jpg","model":213,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Google","Gemini","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Google Gemini 2.0 Flash-Lite","set":{"temperature":1}},"301":{"id":301,"name":"Hermes 3 405B Instruct","model_name":"Hermes 3 405B Instruct","desc":"Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board. Hermes 3 405B is a frontier-level, full-parameter finetune of the Llama-3.1 405B foundation model, focused on aligning LLMs to the user, with powerful steering capabilities and control given to the end user.","desc_short":"
Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board. Hermes 3 405B is a frontier-level, full-parameter finetune of the Llama-3.1 405B foundation model, focused on aligning LLMs to the user, with powerful steering capabilities and control given to the end user.
","desc_more":"","link":"hermes-3-405b-instruct","provider":"Official","developer":"Nous","tpm":90,"image":"https://hbcdn01.hotbot.com/avatar/301.jpg","model":129,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":64184,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Unmodified","405B Parameters","Advanced Reasoning","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Hermes 3 405B Instruct","set":{"temperature":1}},"789":{"id":789,"name":"Jamba 1.5 Large","model_name":"Jamba 1.5 Large","desc":"Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality. It features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis. Built on a novel SSM-Transformer architecture, it outperforms larger models on benchmarks while maintaining resource efficiency.","desc_short":"
Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality. It features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis. Built on a novel SSM-Transformer architecture, it outperforms larger models on benchmarks while maintaining resource efficiency.
","desc_more":"","link":"jamba-1.5-large","provider":"Official","developer":"AI21","tpm":100,"image":"https://hbcdn01.hotbot.com/avatar/789.jpg","model":185,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":91730,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Unmodified","Mamba","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Jamba 1.5 Large","set":{"temperature":1}},"849":{"id":849,"name":"DeepSeek R1","model_name":"DeepSeek R1","desc":"First-generation reasoning model from DeepSeek. Open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.","desc_short":"
First-generation reasoning model from DeepSeek. Open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.
","desc_more":"","link":"deepseek-r1","provider":"Official","developer":"DeepSeek","tpm":28,"image":"https://hbcdn01.hotbot.com/avatar/849.jpg","model":209,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":81125,"level":1,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","DeepSeek","Unmodified","Coding","Advanced Reasoning","671B Parameters"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"DeepSeek R1","set":{"temperature":1}},"145":{"id":145,"name":"OpenAI GPT-4 Turbo","model_name":"OpenAI GPT-4 Turbo","desc":"GPT-4 is a large multimodal model (accepting text or image inputs and outputting text) that can solve difficult problems with greater accuracy than any of OpenAI's previous models, and the turbo variant is optimized for quick responses and natural language chat.","desc_short":"
GPT-4 is a large multimodal model (accepting text or image inputs and outputting text) that can solve difficult problems with greater accuracy than any of OpenAI's previous models, and the turbo variant is optimized for quick responses and natural language chat.
","desc_more":"","link":"openai-gpt-4-turbo","provider":"Official","developer":"OpenAI","tpm":20,"image":"https://hbcdn01.hotbot.com/avatar/145.jpg","model":13,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":54247,"level":1,"ph":"","labels_full":["Official","Premium","Large Context","OpenAI","ChatGPT","Unmodified","Coding","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"OpenAI GPT-4 Turbo","set":{"temperature":1}},"377":{"id":377,"name":"Mistral Large","model_name":"Mistral Large","desc":"Mistral Large 2 is the new generation of Mistral's flagship model. It is significantly capable in code generation, mathematics, and reasoning.","desc_short":"
Mistral Large 2 is the new generation of Mistral's flagship model. It is significantly capable in code generation, mathematics, and reasoning.
","desc_more":"","link":"mistral-large","provider":"Official","developer":"Mistral","tpm":80,"image":"https://hbcdn01.hotbot.com/avatar/377.jpg","model":173,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":161678,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Creative Writing","Mistral","Unmodified","Multilingual","Coding","Advanced Reasoning"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Mistral Large","set":{"temperature":1}},"369":{"id":369,"name":"Grok 2 Mini","model_name":"Grok 2 Mini","desc":"Grok 2 Mini is xAI's fast, lightweight language model that offers a balance between speed and answer quality.","desc_short":"
Grok 2 Mini is xAI's fast, lightweight language model that offers a balance between speed and answer quality.
","desc_more":"","link":"grok-2-mini","provider":"Official","developer":"xAI","tpm":112,"image":"https://hbcdn01.hotbot.com/avatar/369.jpg","model":157,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":91555,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Grok","Unmodified","Image Input","Creative Writing","Coding"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Grok 2 Mini","set":{"temperature":1}},"1":{"id":1,"name":"HotBot Assistant","model_name":"HotBot Assistant","desc":"HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.","desc_short":"
HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.
","desc_more":"","link":"hotbot-assistant","provider":"Official","developer":"HotBot","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/1.jpg","model":201,"sys":"","read_image":1,"prem":false,"can_at":0,"no_comp":1,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Image Generation","Web Search","Coding","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Assistant","set":{"temperature":1}}},b={},d=localStorage,n="hba-botcache",l=()=>{let g={},b=d[n];try{let l=Date.now()-6e5;b&&$.each(JSON.parse(b),(b,d)=>{if(d.cacheTS>l)g[b]=d})}catch(a){}return g},a=g=>{let b=l();for(let a of g){b[a.id]=a;if(!a.cacheTS)a.cacheTS=Date.now()}d[n]=JSON.stringify(b)};$.each(l(),(b,d)=>{if(!g[b])g[b]=d});async function e(d){let n=[],l={};for(let h of(d instanceof Array)?d:[d])if(h=parseInt(h)){if(b[h]&&!g[h])await b[h];if(g[h])l[h]=g[h];else if(n.indexOf(h)<0)n.push(h)}if(n.length){let i=$.post("/lookup-bots.json",{bot_id:n.join(",")});for(let h of n)b[h]=i;let o=await i;if(o.code==0){a(o.data);for(let h of o.data)g[h.id]=h,l[h.id]=h}for(let h of n)delete b[h]}return l}return{cache:g,getBots:e,getBot:async g=>g?(await e(g))[g]:null}})();$('