`;return o},i=(function(){let g={"281":{"id":281,"name":"OpenAI o1","model_name":"OpenAI o1","desc":"The o1 series of large language models are trained with reinforcement learning to perform complex reasoning. o1 models think before they answer, producing a long internal chain of thought before responding to the user. o1 is a reasoning model designed to solve hard problems across domains.","desc_short":"
The o1 series of large language models are trained with reinforcement learning to perform complex reasoning. o1 models think before they answer, producing a long internal chain of thought before responding to the user. o1 is a reasoning model designed to solve hard problems across domains.
","desc_more":"","link":"openai-o1","provider":"Official","developer":"OpenAI","tpm":750,"image":"https://hbcdn01.hotbot.com/avatar/281.jpg","model":109,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":852845,"level":3,"ph":"","labels_full":["Official","Premium","Large Context","OpenAI","Unmodified","Coding","Advanced Reasoning"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"OpenAI o1","set":{"temperature":1}},"377":{"id":377,"name":"Mistral Large","model_name":"Mistral Large","desc":"Mistral Large 2 is the new generation of Mistral's flagship model. It is significantly capable in code generation, mathematics, and reasoning.","desc_short":"
Mistral Large 2 is the new generation of Mistral's flagship model. It is significantly capable in code generation, mathematics, and reasoning.
","desc_more":"","link":"mistral-large","provider":"Official","developer":"Mistral","tpm":80,"image":"https://hbcdn01.hotbot.com/avatar/377.jpg","model":173,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":121400,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Creative Writing","Mistral","Unmodified","Multilingual","Coding","Advanced Reasoning"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Mistral Large","set":{"temperature":1}},"293":{"id":293,"name":"WizardLM-2 8x22B","model_name":"WizardLM-2 8x22B","desc":"WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model. It demonstrates highly competitive performance compared to leading proprietary models, and it consistently outperforms all existing state-of-the-art opensource models. It is an instruct finetune of Mixtral 8x22B.","desc_short":"
WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model. It demonstrates highly competitive performance compared to leading proprietary models, and it consistently outperforms all existing state-of-the-art opensource models. It is an instruct finetune of Mixtral 8x22B.
","desc_more":"","link":"wizardlm-2-8x22b","provider":"Official","developer":"Microsoft","tpm":10,"image":"https://hbcdn01.hotbot.com/avatar/293.jpg","model":121,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Unmodified","8x22B Parameters","Mixture Of Experts","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"WizardLM-2 8x22B","set":{"temperature":1}},"850":{"id":850,"name":"OpenAI o3 Mini","model_name":"OpenAI o3 Mini","desc":"o3-mini is a small reasoning model from OpenAI, providing high intelligence at the same cost and latency targets of o1-mini. o3-mini also supports key developer features. Like other models in the o-series, it is designed to excel at science, math, and coding tasks.","desc_short":"
o3-mini is a small reasoning model from OpenAI, providing high intelligence at the same cost and latency targets of o1-mini. o3-mini also supports key developer features. Like other models in the o-series, it is designed to excel at science, math, and coding tasks.
","desc_more":"","link":"openai-o3-mini","provider":"Official","developer":"OpenAI","tpm":55,"image":"https://hbcdn01.hotbot.com/avatar/850.jpg","model":211,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":96816,"level":2,"ph":"","labels_full":["Official","Premium","Large Context","OpenAI","Unmodified","Coding","Advanced Reasoning"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"OpenAI o3 Mini","set":{"temperature":1}},"321":{"id":321,"name":"OpenChat 3.5 7B","model_name":"OpenChat 3.5 7B","desc":"OpenChat 7B is a library of open-source language models, fine-tuned with C-RLFT (Conditioned Reinforcement Learning Fine-Tuning) - a strategy inspired by offline reinforcement learning. It has been trained on mixed-quality data without preference labels.","desc_short":"
OpenChat 7B is a library of open-source language models, fine-tuned with C-RLFT (Conditioned Reinforcement Learning Fine-Tuning) - a strategy inspired by offline reinforcement learning. It has been trained on mixed-quality data without preference labels.
","desc_more":"","link":"openchat-3.5-7b","provider":"Official","developer":"OpenChat","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/321.jpg","model":149,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","OpenChat","Unmodified","7B Parameters"],"labels":["Official"],"wel":"","asktext":"","short_name":"OpenChat 3.5 7B","set":{"temperature":1}},"309":{"id":309,"name":"Mistral Nemo","model_name":"Mistral Nemo","desc":"A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA. The model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.","desc_short":"
A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA. The model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.
","desc_more":"","link":"mistral-nemo","provider":"Official","developer":"Mistral AI","tpm":3,"image":"https://hbcdn01.hotbot.com/avatar/309.jpg","model":137,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Mistral","Unmodified","12B Parameters","Multilingual","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Mistral Nemo","set":{"temperature":1}},"181":{"id":181,"name":"Claude 3 Sonnet","model_name":"Claude 3 Sonnet","desc":"Claude 3 Sonnet has a balance of intelligence and speed. Anthropic's balanced and scalable model delivers strong utility.","desc_short":"
Claude 3 Sonnet has a balance of intelligence and speed. Anthropic's balanced and scalable model delivers strong utility.
","desc_more":"","link":"claude-3-sonnet","provider":"Official","developer":"Anthropic","tpm":180,"image":"https://hbcdn01.hotbot.com/avatar/181.jpg","model":65,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":170507,"level":3,"ph":"","labels_full":["Official","Premium","Large Context","Claude","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Claude 3 Sonnet","set":{"temperature":1}},"852":{"id":852,"name":"Google Gemini 2.0 Flash-Lite","model_name":"Google Gemini 2.0 Flash-Lite","desc":"2.0 is the new flagship series of Gemini. Wanting to continue offering a model at the price and speed of 1.5 Flash, Google has created 2.0 Flash-Lite. A quick but powerful model updated to one million tokens for 2.0.","desc_short":"
2.0 is the new flagship series of Gemini. Wanting to continue offering a model at the price and speed of 1.5 Flash, Google has created 2.0 Flash-Lite. A quick but powerful model updated to one million tokens for 2.0.
","desc_more":"","link":"google-gemini-2.0-flash-lite","provider":"Official","developer":"Google","tpm":4,"image":"https://hbcdn01.hotbot.com/avatar/852.jpg","model":213,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Google","Gemini","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Google Gemini 2.0 Flash-Lite","set":{"temperature":1}},"1":{"id":1,"name":"HotBot Assistant","model_name":"HotBot Assistant","desc":"HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.","desc_short":"
HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.
","desc_more":"","link":"hotbot-assistant","provider":"Official","developer":"HotBot","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/1.jpg","model":201,"sys":"","read_image":1,"prem":false,"can_at":0,"no_comp":1,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Image Generation","Web Search","Coding","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Assistant","set":{"temperature":1}}},b={},d=localStorage,n="hba-botcache",l=()=>{let g={},b=d[n];try{let l=Date.now()-6e5;b&&$.each(JSON.parse(b),(b,d)=>{if(d.cacheTS>l)g[b]=d})}catch(a){}return g},a=g=>{let b=l();for(let a of g){b[a.id]=a;if(!a.cacheTS)a.cacheTS=Date.now()}d[n]=JSON.stringify(b)};$.each(l(),(b,d)=>{if(!g[b])g[b]=d});async function e(d){let n=[],l={};for(let h of(d instanceof Array)?d:[d])if(h=parseInt(h)){if(b[h]&&!g[h])await b[h];if(g[h])l[h]=g[h];else if(n.indexOf(h)<0)n.push(h)}if(n.length){let i=$.post("/lookup-bots.json",{bot_id:n.join(",")});for(let h of n)b[h]=i;let o=await i;if(o.code==0){a(o.data);for(let h of o.data)g[h.id]=h,l[h.id]=h}for(let h of n)delete b[h]}return l}return{cache:g,getBots:e,getBot:async g=>g?(await e(g))[g]:null}})();$('