`;return o},i=(function(){let g={"852":{"id":852,"name":"Google Gemini 2.0 Flash-Lite","model_name":"Google Gemini 2.0 Flash-Lite","desc":"2.0 is the new flagship series of Gemini. Wanting to continue offering a model at the price and speed of 1.5 Flash, Google has created 2.0 Flash-Lite. A quick but powerful model updated to one million tokens for 2.0.","desc_short":"
2.0 is the new flagship series of Gemini. Wanting to continue offering a model at the price and speed of 1.5 Flash, Google has created 2.0 Flash-Lite. A quick but powerful model updated to one million tokens for 2.0.
","desc_more":"","link":"google-gemini-2.0-flash-lite","provider":"Official","developer":"Google","tpm":4,"image":"https://hbcdn01.hotbot.com/avatar/852.jpg","model":213,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Large Context","Google","Gemini","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Google Gemini 2.0 Flash-Lite","set":{"temperature":1}},"317":{"id":317,"name":"Llama 3 Euryale v2.1 70B","model_name":"Llama 3 Euryale 70B v2.1","desc":"Euryale 70B v2.1 is a model focused on creative roleplay. It has improved prompt adherence and spatial awareness, and adapts quickly to custom roleplay and formatting.","desc_short":"
Euryale 70B v2.1 is a model focused on creative roleplay. It has improved prompt adherence and spatial awareness, and adapts quickly to custom roleplay and formatting.
","desc_more":"","link":"llama-3-euryale-v2.1-70b","provider":"Official","developer":"Sao10k","tpm":8,"image":"https://hbcdn01.hotbot.com/avatar/317.jpg","model":145,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Llama","Unmodified","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Llama 3 Euryale v2.1 70B","set":{"temperature":1}},"141":{"id":141,"name":"OpenAI GPT-3.5 Turbo 16k","model_name":"OpenAI GPT-3.5 Turbo 16k","desc":"OpenAI GPT-3.5 Turbo is a fast, inexpensive model for simple tasks developed by OpenAI. Capable of understanding and generating natural language or code, it has been optimized for chat, with an expanded context length of 16K.","desc_short":"
OpenAI GPT-3.5 Turbo is a fast, inexpensive model for simple tasks developed by OpenAI. Capable of understanding and generating natural language or code, it has been optimized for chat, with an expanded context length of 16K.
","desc_more":"","link":"openai-gpt-3.5-turbo-16k","provider":"Official","developer":"OpenAI","tpm":70,"image":"https://hbcdn01.hotbot.com/avatar/141.jpg","model":9,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":71568,"level":2,"ph":"","labels_full":["Official","Premium","Large Context","OpenAI","ChatGPT","Unmodified","Coding"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"OpenAI GPT-3.5 Turbo 16k","set":{"temperature":1}},"145":{"id":145,"name":"OpenAI GPT-4 Turbo","model_name":"OpenAI GPT-4 Turbo","desc":"GPT-4 is a large multimodal model (accepting text or image inputs and outputting text) that can solve difficult problems with greater accuracy than any of OpenAI's previous models, and the turbo variant is optimized for quick responses and natural language chat.","desc_short":"
GPT-4 is a large multimodal model (accepting text or image inputs and outputting text) that can solve difficult problems with greater accuracy than any of OpenAI's previous models, and the turbo variant is optimized for quick responses and natural language chat.
","desc_more":"","link":"openai-gpt-4-turbo","provider":"Official","developer":"OpenAI","tpm":20,"image":"https://hbcdn01.hotbot.com/avatar/145.jpg","model":13,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":56803,"level":1,"ph":"","labels_full":["Official","Premium","Large Context","OpenAI","ChatGPT","Unmodified","Coding","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"OpenAI GPT-4 Turbo","set":{"temperature":1}},"149":{"id":149,"name":"OpenAI GPT-4 Omni","model_name":"OpenAI GPT-4 Omni","desc":"GPT-4 Omni is OpenAI's most advanced model. It is multimodal (accepting text or image inputs and outputting text), and it has the same high intelligence as GPT-4 Turbo but is much more efficient, generating text 2x faster.","desc_short":"
GPT-4 Omni is OpenAI's most advanced model. It is multimodal (accepting text or image inputs and outputting text), and it has the same high intelligence as GPT-4 Turbo but is much more efficient, generating text 2x faster.
","desc_more":"","link":"openai-gpt-4-omni","provider":"Official","developer":"OpenAI","tpm":200,"image":"https://hbcdn01.hotbot.com/avatar/149.jpg","model":17,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":201765,"level":3,"ph":"","labels_full":["Official","Premium","Large Context","Unmodified","Image Input","Coding","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"OpenAI GPT-4 Omni","set":{"temperature":1}},"313":{"id":313,"name":"Mistral 7B Instruct","model_name":"Mistral 7B Instruct","desc":"A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.","desc_short":"
A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.
","desc_more":"","link":"mistral-7b-instruct","provider":"Official","developer":"Mistral AI","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/313.jpg","model":141,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Mistral","Unmodified","7B Parameters","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"Mistral 7B Instruct","set":{"temperature":1}},"789":{"id":789,"name":"Jamba 1.5 Large","model_name":"Jamba 1.5 Large","desc":"Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality. It features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis. Built on a novel SSM-Transformer architecture, it outperforms larger models on benchmarks while maintaining resource efficiency.","desc_short":"
Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality. It features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis. Built on a novel SSM-Transformer architecture, it outperforms larger models on benchmarks while maintaining resource efficiency.
","desc_more":"","link":"jamba-1.5-large","provider":"Official","developer":"AI21","tpm":100,"image":"https://hbcdn01.hotbot.com/avatar/789.jpg","model":185,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":90880,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Unmodified","Mamba","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Jamba 1.5 Large","set":{"temperature":1}},"381":{"id":381,"name":"Qwen 2 VL 7B","model_name":"Qwen 2 VL 7B","desc":"Qwen2 VL 7B is a multimodal LLM from the Qwen Team with multimedia capabilities.","desc_short":"
Qwen2 VL 7B is a multimodal LLM from the Qwen Team with multimedia capabilities.
","desc_more":"","link":"qwen-2-vl-7b","provider":"Official","developer":"Qwen","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/381.jpg","model":165,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Qwen","Unmodified","Image Input","7B Parameters","Multilingual"],"labels":["Official"],"wel":"","asktext":"","short_name":"Qwen 2 VL 7B","set":{"temperature":1}},"1":{"id":1,"name":"HotBot Assistant","model_name":"HotBot Assistant","desc":"HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.","desc_short":"
HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.
","desc_more":"","link":"hotbot-assistant","provider":"Official","developer":"HotBot","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/1.jpg","model":201,"sys":"","read_image":1,"prem":false,"can_at":0,"no_comp":1,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Image Generation","Web Search","Coding","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Assistant","set":{"temperature":1}}},b={},d=localStorage,n="hba-botcache",l=()=>{let g={},b=d[n];try{let l=Date.now()-6e5;b&&$.each(JSON.parse(b),(b,d)=>{if(d.cacheTS>l)g[b]=d})}catch(a){}return g},a=g=>{let b=l();for(let a of g){b[a.id]=a;if(!a.cacheTS)a.cacheTS=Date.now()}d[n]=JSON.stringify(b)};$.each(l(),(b,d)=>{if(!g[b])g[b]=d});async function e(d){let n=[],l={};for(let h of(d instanceof Array)?d:[d])if(h=parseInt(h)){if(b[h]&&!g[h])await b[h];if(g[h])l[h]=g[h];else if(n.indexOf(h)<0)n.push(h)}if(n.length){let i=$.post("/lookup-bots.json",{bot_id:n.join(",")});for(let h of n)b[h]=i;let o=await i;if(o.code==0){a(o.data);for(let h of o.data)g[h.id]=h,l[h.id]=h}for(let h of n)delete b[h]}return l}return{cache:g,getBots:e,getBot:async g=>g?(await e(g))[g]:null}})();$('