`;return o},i=(function(){let g={"153":{"id":153,"name":"Meta Llama3 8b","model_name":"Meta Llama3 8b","desc":"Llama 3 is a group of foundation models from Meta. It natively supports multilinguality, coding, reasoning, and tool usage. The 8B variant is a light-weight, ultra-fast model that can run anywhere.","desc_short":"
Llama 3 is a group of foundation models from Meta. It natively supports multilinguality, coding, reasoning, and tool usage. The 8B variant is a light-weight, ultra-fast model that can run anywhere.
","desc_more":"","link":"meta-llama3-8b","provider":"Official","developer":"Meta","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/153.jpg","model":21,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Unmodified","8B Parameters","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"Meta Llama3 8b","set":{"temperature":1}},"157":{"id":157,"name":"Meta Llama3 70b","model_name":"Meta Llama3 70b","desc":"Llama 3 is a group of foundation models from Meta. It natively supports multilinguality, coding, reasoning, and tool usage. The 70B variant is a highly performant model that enables diverse use cases.","desc_short":"
Llama 3 is a group of foundation models from Meta. It natively supports multilinguality, coding, reasoning, and tool usage. The 70B variant is a highly performant model that enables diverse use cases.
","desc_more":"","link":"meta-llama3-70b","provider":"Official","developer":"Meta","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/157.jpg","model":25,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Meta","Llama","Unmodified","70B Parameters","Coding","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Meta Llama3 70b","set":{"temperature":1}},"289":{"id":289,"name":"MythoMax 13B","model_name":"MythoMax 13B","desc":"One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay.","desc_short":"
One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay.
","desc_more":"","link":"mythomax-13b","provider":"Official","developer":"gryphe","tpm":2,"image":"hotbot.png","model":117,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Unmodified","13B Parameters"],"labels":["Official"],"wel":"","asktext":"","short_name":"MythoMax 13B","set":{"temperature":1}},"809":{"id":809,"name":"Claude 3.5 Haiku","model_name":"Claude 3.5 Haiku","desc":"This is the next generation of Anthropic's fastest model. For a similar speed to Claude 3 Haiku, Claude 3.5 Haiku improves across every skill set and surpasses even Claude 3 Opus, the largest model in their previous generation, on many intelligence benchmarks. Claude 3.5 Haiku is particularly strong on coding tasks. It also features low latency, improved instruction following, and more accurate tool use.","desc_short":"
This is the next generation of Anthropic's fastest model. For a similar speed to Claude 3 Haiku, Claude 3.5 Haiku improves across every skill set and surpasses even Claude 3 Opus, the largest model in their previous generation, on many intelligence benchmarks. Claude 3.5 Haiku is particularly strong on coding tasks. It also features low latency, improved instruction following, and more accurate tool use.
","desc_more":"","link":"claude-3.5-haiku","provider":"Official","developer":"Anthropic","tpm":60,"image":"https://hbcdn01.hotbot.com/avatar/809.jpg","model":189,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":67800,"level":2,"ph":"","labels_full":["Official","Premium","Large Context","Claude","Unmodified","Coding","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Claude 3.5 Haiku","set":{"temperature":1}},"161":{"id":161,"name":"Mixtral 8x7b","model_name":"Mixtral 8x7b","desc":"Mixtral 8x7B, a high-quality sparse mixture of experts model (SMoE) with open weights from Mistral AI. It is a decoder-only model that picks from a set of 8 distinct groups of parameters. At every layer, for every token, a router network chooses two of these groups (the \"experts\") to process the token and combine their output.","desc_short":"
Mixtral 8x7B, a high-quality sparse mixture of experts model (SMoE) with open weights from Mistral AI. It is a decoder-only model that picks from a set of 8 distinct groups of parameters. At every layer, for every token, a router network chooses two of these groups (the \"experts\") to process the token and combine their output.
","desc_more":"","link":"mixtral-8x7b","provider":"Official","developer":"Mistral AI","tpm":5,"image":"https://hbcdn01.hotbot.com/avatar/161.jpg","model":29,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Mixtral","Unmodified","8x7B Parameters","Coding","Mixture Of Experts"],"labels":["Official"],"wel":"","asktext":"","short_name":"Mixtral 8x7b","set":{"temperature":1}},"293":{"id":293,"name":"WizardLM-2 8x22B","model_name":"WizardLM-2 8x22B","desc":"WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model. It demonstrates highly competitive performance compared to leading proprietary models, and it consistently outperforms all existing state-of-the-art opensource models. It is an instruct finetune of Mixtral 8x22B.","desc_short":"
WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model. It demonstrates highly competitive performance compared to leading proprietary models, and it consistently outperforms all existing state-of-the-art opensource models. It is an instruct finetune of Mixtral 8x22B.
","desc_more":"","link":"wizardlm-2-8x22b","provider":"Official","developer":"Microsoft","tpm":10,"image":"https://hbcdn01.hotbot.com/avatar/293.jpg","model":121,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Unmodified","8x22B Parameters","Mixture Of Experts","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"WizardLM-2 8x22B","set":{"temperature":1}},"381":{"id":381,"name":"Qwen 2 VL 7B","model_name":"Qwen 2 VL 7B","desc":"Qwen2 VL 7B is a multimodal LLM from the Qwen Team with multimedia capabilities.","desc_short":"
Qwen2 VL 7B is a multimodal LLM from the Qwen Team with multimedia capabilities.
","desc_more":"","link":"qwen-2-vl-7b","provider":"Official","developer":"Qwen","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/381.jpg","model":165,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Qwen","Unmodified","Image Input","7B Parameters","Multilingual"],"labels":["Official"],"wel":"","asktext":"","short_name":"Qwen 2 VL 7B","set":{"temperature":1}},"829":{"id":829,"name":"EVA Qwen2.5 14B","model_name":"EVA Qwen2.5 14B","desc":"A model specializing in RP and creative writing, this model is based on Qwen2.5-14B, fine-tuned with a mixture of synthetic and natural data. It is trained on 1.5M tokens of role-play data, and fine-tuned on 1.5M tokens of synthetic data.","desc_short":"
A model specializing in RP and creative writing, this model is based on Qwen2.5-14B, fine-tuned with a mixture of synthetic and natural data. It is trained on 1.5M tokens of role-play data, and fine-tuned on 1.5M tokens of synthetic data.
","desc_more":"","link":"eva-qwen2.5-14b","provider":"Official","developer":"HotBot","tpm":8,"image":"https://hbcdn01.hotbot.com/avatar/829.jpg","model":193,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Qwen","Unmodified","Creative Writing","14B Parameters"],"labels":["Official"],"wel":"","asktext":"","short_name":"EVA Qwen2.5 14B","set":{"temperature":1}},"1":{"id":1,"name":"HotBot Assistant","model_name":"HotBot Assistant","desc":"HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.","desc_short":"
HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.
","desc_more":"","link":"hotbot-assistant","provider":"Official","developer":"HotBot","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/1.jpg","model":25,"sys":"","read_image":1,"prem":false,"can_at":0,"no_comp":1,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Image Generation","Web Search","70B Parameters","Coding","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Assistant","set":{"temperature":1}}},b={},d=localStorage,n="hba-botcache",l=()=>{let g={},b=d[n];try{let l=Date.now()-6e5;b&&$.each(JSON.parse(b),(b,d)=>{if(d.cacheTS>l)g[b]=d})}catch(a){}return g},a=g=>{let b=l();for(let a of g){b[a.id]=a;if(!a.cacheTS)a.cacheTS=Date.now()}d[n]=JSON.stringify(b)};$.each(l(),(b,d)=>{if(!g[b])g[b]=d});async function e(d){let n=[],l={};for(let h of(d instanceof Array)?d:[d])if(h=parseInt(h)){if(b[h]&&!g[h])await b[h];if(g[h])l[h]=g[h];else if(n.indexOf(h)<0)n.push(h)}if(n.length){let i=$.post("/lookup-bots.json",{bot_id:n.join(",")});for(let h of n)b[h]=i;let o=await i;if(o.code==0){a(o.data);for(let h of o.data)g[h.id]=h,l[h.id]=h}for(let h of n)delete b[h]}return l}return{cache:g,getBots:e,getBot:async g=>g?(await e(g))[g]:null}})();$('