`;return o},i=(function(){let g={"851":{"id":851,"name":"Google Gemini 2.0 Flash","model_name":"Google Gemini 2.0 Flash","desc":"Popular with developers as a powerful workhorse model, optimal for high-volume, high-frequency tasks at scale and highly capable of multimodal reasoning across vast amounts of information with a context window of 1 million tokens. 2.0 is Google's new flagship series of Gemini.","desc_short":"
Popular with developers as a powerful workhorse model, optimal for high-volume, high-frequency tasks at scale and highly capable of multimodal reasoning across vast amounts of information with a context window of 1 million tokens. 2.0 is Google's new flagship series of Gemini.
","desc_more":"","link":"google-gemini-2.0-flash","provider":"Official","developer":"Google","tpm":5,"image":"https://hbcdn01.hotbot.com/avatar/851.jpg","model":212,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Google","Gemini","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Google Gemini 2.0 Flash","set":{"temperature":1}},"201":{"id":201,"name":"OpenAI GPT-4o Mini","model_name":"OpenAI GPT-4o Mini","desc":"GPT-4o Mini is OpenAI's most advanced model in the small models category. It is multimodal (accepting text or image inputs and outputting text), and has higher intelligence than gpt-3.5-turbo but is just as fast. It is meant to be used for smaller tasks, including vision tasks.","desc_short":"
GPT-4o Mini is OpenAI's most advanced model in the small models category. It is multimodal (accepting text or image inputs and outputting text), and has higher intelligence than gpt-3.5-turbo but is just as fast. It is meant to be used for smaller tasks, including vision tasks.
","desc_more":"","link":"openai-gpt-4o-mini","provider":"Official","developer":"OpenAI","tpm":8,"image":"https://hbcdn01.hotbot.com/avatar/201.jpg","model":89,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","OpenAI","ChatGPT","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Free ChatGPT","set":{"temperature":1}},"205":{"id":205,"name":"Meta Llama3.1 8b","model_name":"Meta Llama3.1 8b","desc":"Llama 3.1 is a group of open-source instruction-tuned models from Meta. These multilingual models have a context length of 128K, state-of-the-art tool use, and strong reasoning capabilities. The 8B variant is a light-weight, ultra-fast model that can run anywhere.","desc_short":"
Llama 3.1 is a group of open-source instruction-tuned models from Meta. These multilingual models have a context length of 128K, state-of-the-art tool use, and strong reasoning capabilities. The 8B variant is a light-weight, ultra-fast model that can run anywhere.
","desc_more":"","link":"meta-llama3.1-8b","provider":"Official","developer":"Meta","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/205.jpg","model":93,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Meta","Llama","Unmodified","8B Parameters","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"Meta Llama3.1 8b","set":{"temperature":1}},"149":{"id":149,"name":"OpenAI GPT-4 Omni","model_name":"OpenAI GPT-4 Omni","desc":"GPT-4 Omni is OpenAI's most advanced model. It is multimodal (accepting text or image inputs and outputting text), and it has the same high intelligence as GPT-4 Turbo but is much more efficient, generating text 2x faster.","desc_short":"
GPT-4 Omni is OpenAI's most advanced model. It is multimodal (accepting text or image inputs and outputting text), and it has the same high intelligence as GPT-4 Turbo but is much more efficient, generating text 2x faster.
","desc_more":"","link":"openai-gpt-4-omni","provider":"Official","developer":"OpenAI","tpm":200,"image":"https://hbcdn01.hotbot.com/avatar/149.jpg","model":17,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":200084,"level":3,"ph":"","labels_full":["Official","Premium","Large Context","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"OpenAI GPT-4 Omni","set":{"temperature":1}},"841":{"id":841,"name":"Meta Llama 3.3 70b","model_name":"Meta Llama 3.3 70b","desc":"The Meta Llama 3.3 multilingual large language model (LLM) is an instruction tuned generative model in 70B (text in/text out). It is optimized for multilingual dialogue use cases and outperforms many of the available open source and closed chat models on common industry benchmarks. Llama 3.3 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety.","desc_short":"
The Meta Llama 3.3 multilingual large language model (LLM) is an instruction tuned generative model in 70B (text in/text out). It is optimized for multilingual dialogue use cases and outperforms many of the available open source and closed chat models on common industry benchmarks. Llama 3.3 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety.
","desc_more":"","link":"meta-llama-3.3-70b","provider":"Official","developer":"Meta","tpm":14,"image":"hotbot.png","model":201,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Meta","Llama","Unmodified","Coding","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Meta Llama 3.3 70b","set":{"temperature":1}},"313":{"id":313,"name":"Mistral 7B Instruct","model_name":"Mistral 7B Instruct","desc":"A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.","desc_short":"
A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.
","desc_more":"","link":"mistral-7b-instruct","provider":"Official","developer":"Mistral AI","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/313.jpg","model":141,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Mistral","Unmodified","7B Parameters","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"Mistral 7B Instruct","set":{"temperature":1}},"281":{"id":281,"name":"OpenAI o1","model_name":"OpenAI o1","desc":"The o1 series of large language models are trained with reinforcement learning to perform complex reasoning. o1 models think before they answer, producing a long internal chain of thought before responding to the user. o1 is a reasoning model designed to solve hard problems across domains.","desc_short":"
The o1 series of large language models are trained with reinforcement learning to perform complex reasoning. o1 models think before they answer, producing a long internal chain of thought before responding to the user. o1 is a reasoning model designed to solve hard problems across domains.
","desc_more":"","link":"openai-o1","provider":"Official","developer":"OpenAI","tpm":750,"image":"https://hbcdn01.hotbot.com/avatar/281.jpg","model":109,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":5688027,"level":3,"ph":"","labels_full":["Official","Premium","Large Context","OpenAI","Unmodified","Coding","Advanced Reasoning"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"OpenAI o1","set":{"temperature":1}},"153":{"id":153,"name":"Meta Llama3 8b","model_name":"Meta Llama3 8b","desc":"Llama 3 is a group of foundation models from Meta. It natively supports multilinguality, coding, reasoning, and tool usage. The 8B variant is a light-weight, ultra-fast model that can run anywhere.","desc_short":"
Llama 3 is a group of foundation models from Meta. It natively supports multilinguality, coding, reasoning, and tool usage. The 8B variant is a light-weight, ultra-fast model that can run anywhere.
","desc_more":"","link":"meta-llama3-8b","provider":"Official","developer":"Meta","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/153.jpg","model":21,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Unmodified","8B Parameters","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"Meta Llama3 8b","set":{"temperature":1}},"1":{"id":1,"name":"HotBot Assistant","model_name":"HotBot Assistant","desc":"HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.","desc_short":"
HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.
","desc_more":"","link":"hotbot-assistant","provider":"Official","developer":"HotBot","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/1.jpg","model":201,"sys":"","read_image":1,"prem":false,"can_at":0,"no_comp":1,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Image Generation","Web Search","Coding","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Assistant","set":{"temperature":1}}},b={},d=localStorage,n="hba-botcache",l=()=>{let g={},b=d[n];try{let l=Date.now()-6e5;b&&$.each(JSON.parse(b),(b,d)=>{if(d.cacheTS>l)g[b]=d})}catch(a){}return g},a=g=>{let b=l();for(let a of g){b[a.id]=a;if(!a.cacheTS)a.cacheTS=Date.now()}d[n]=JSON.stringify(b)};$.each(l(),(b,d)=>{if(!g[b])g[b]=d});async function e(d){let n=[],l={};for(let h of(d instanceof Array)?d:[d])if(h=parseInt(h)){if(b[h]&&!g[h])await b[h];if(g[h])l[h]=g[h];else if(n.indexOf(h)<0)n.push(h)}if(n.length){let i=$.post("/lookup-bots.json",{bot_id:n.join(",")});for(let h of n)b[h]=i;let o=await i;if(o.code==0){a(o.data);for(let h of o.data)g[h.id]=h,l[h.id]=h}for(let h of n)delete b[h]}return l}return{cache:g,getBots:e,getBot:async g=>g?(await e(g))[g]:null}})();$('