Added some little features

This commit is contained in:
elijah 2024-07-20 02:50:56 +02:00
parent cf3596bd10
commit 621ecc4432
6 changed files with 629 additions and 220 deletions

426
app.py
View File

@ -1,10 +1,22 @@
import os import os
import re
import json
import math
import time
import socket
import logging
import ipaddress
from datetime import datetime
from urllib.parse import urlencode, urlparse
import requests
from bs4 import BeautifulSoup
from flask import Flask, request, jsonify, render_template, Response, stream_with_context from flask import Flask, request, jsonify, render_template, Response, stream_with_context
from flask_limiter import Limiter from flask_limiter import Limiter
from flask_limiter.util import get_remote_address from flask_limiter.util import get_remote_address
from transformers import AutoTokenizer from transformers import AutoTokenizer
import requests from groq import Groq
import logging from duckduckgo_search import DDGS
app = Flask(__name__) app = Flask(__name__)
@ -17,19 +29,296 @@ limiter = Limiter(
get_remote_address, get_remote_address,
app=app, app=app,
storage_uri="memory://", storage_uri="memory://",
default_limits=[os.getenv('RATE_LIMIT', '15 per minute')]
) )
# Load the tokenizer # Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained(os.getenv('TOKENIZER', 'gpt2')) tokenizer = AutoTokenizer.from_pretrained(os.getenv('TOKENIZER', 'gpt2'))
# API configuration # API configuration
API_URL = os.getenv('API_URL', 'https://api.openai.com/v1')
API_KEY = os.getenv('API_KEY') API_KEY = os.getenv('API_KEY')
API_MODEL = os.getenv('API_MODEL', 'gpt-3.5-turbo') MODEL = os.getenv('API_MODEL', 'llama3-groq-70b-8192-tool-use-preview')
TEMPERATURE = float(os.getenv('TEMPERATURE', 0)) TEMPERATURE = float(os.getenv('TEMPERATURE', 0))
logger.info(f"Chat initialized using endpoint: {API_URL}, model: {API_MODEL}, temperature: {TEMPERATURE}") # Initialize Groq client
client = Groq(api_key=API_KEY)
logger.info(f"Chat initialized using model: {MODEL}, temperature: {TEMPERATURE}")
def is_valid_public_url(url):
try:
parsed = urlparse(url)
if not parsed.scheme or not parsed.netloc:
return False
hostname = parsed.hostname.lower()
# Check for localhost
if hostname in ['localhost', '127.0.0.1']:
return False
# Check for common internal domains
if hostname.endswith(('.local', '.internal', '.lan')):
return False
# Check for IP address in hostname (like http://192.168.1.1.nip.io)
if re.match(r'\d+\.\d+\.\d+\.\d+', hostname):
return False
# Resolve the hostname to IP addresses
try:
ip_addresses = socket.getaddrinfo(hostname, None)
except socket.gaierror:
# Unable to resolve hostname, assume it's invalid
return False
# Check each resolved IP address
for ip_info in ip_addresses:
ip_str = ip_info[4][0]
try:
ip = ipaddress.ip_address(ip_str)
# Reject if it's a private IP
if ip.is_private or ip.is_loopback or ip.is_link_local:
return False
# Reject specific network ranges
forbidden_networks = [
ipaddress.ip_network('10.0.0.0/8'),
ipaddress.ip_network('172.16.0.0/12'),
ipaddress.ip_network('192.168.0.0/16'),
ipaddress.ip_network('169.254.0.0/16'),
]
for network in forbidden_networks:
if ip in network:
return False
except ValueError:
# Not a valid IP address, skip
continue
return True
except Exception:
return False
def calculate(expression: str):
"""
A safe and advanced calculator function that evaluates mathematical expressions.
:param expression: The mathematical expression to evaluate.
:return: The result of the calculation or an error message.
"""
def safe_eval(node):
if isinstance(node, (float, int)):
return node
elif isinstance(node, str):
if node in allowed_names:
return allowed_names[node]
else:
raise ValueError(f"Unknown variable or function: {node}")
elif isinstance(
node, (ast.Add, ast.Sub, ast.Mult, ast.Div, ast.Pow, ast.USub, ast.UAdd)
):
return node
elif isinstance(node, ast.Call):
if node.func.id not in allowed_functions:
raise ValueError(f"Function not allowed: {node.func.id}")
return allowed_functions[node.func.id]
else:
raise ValueError(f"Unsupported operation: {type(node).__name__}")
def safe_power(base, exponent):
if exponent == int(exponent):
return math.pow(base, int(exponent))
return math.pow(base, exponent)
allowed_names = {
"pi": math.pi,
"e": math.e,
}
allowed_functions = {
"sin": math.sin,
"cos": math.cos,
"tan": math.tan,
"sqrt": math.sqrt,
"log": math.log,
"log10": math.log10,
"exp": math.exp,
"abs": abs,
"pow": safe_power,
}
# Remove whitespace and convert to lowercase
expression = expression.replace(" ", "").lower()
# Check for invalid characters
if re.search(r"[^0-9+\-*/().a-z]", expression):
return "Error: Invalid characters in expression"
# Replace function names with their safe equivalents
for func in allowed_functions:
expression = expression.replace(func, f"allowed_functions['{func}']")
# Replace constants with their values
for const in allowed_names:
expression = expression.replace(const, str(allowed_names[const]))
try:
# Parse the expression into an AST
tree = ast.parse(expression, mode="eval")
# Modify the AST to use our safe_eval function
for node in ast.walk(tree):
for field, value in ast.iter_fields(node):
if isinstance(value, (ast.Name, ast.Call)):
setattr(
node,
field,
ast.Call(
func=ast.Name(id="safe_eval", ctx=ast.Load()),
args=[value],
keywords=[],
),
)
# Compile and evaluate the modified AST
code = compile(tree, "<string>", "eval")
result = eval(
code,
{"__builtins__": None},
{"safe_eval": safe_eval, "allowed_functions": allowed_functions},
)
return f"{expression} = {result}"
except (ValueError, TypeError, ZeroDivisionError, OverflowError) as e:
return f"Error: {str(e)}"
except Exception as e:
return f"Error: Invalid expression - {str(e)}"
def search(query: str, num_results=5):
"""
Perform a search and return the top results.
:param query: The search query string
:param num_results: Number of results to return (default 5)
:return: A list of dictionaries containing title, link, and snippet for each result
"""
results = DDGS().text(query, max_results=num_results)
return results
def get_page(url):
"""
Fetch a web page and return its text content.
:param url: The URL of the page to fetch
:return: The extracted text content of the page
"""
if not is_valid_public_url(url):
return "Error: Invalid or restricted URL"
try:
# Send a GET request to the URL
response = requests.get(url, timeout=10)
response.raise_for_status() # Raise an exception for bad status codes
# Parse the HTML content
soup = BeautifulSoup(response.content, 'html.parser')
# Remove script and style elements
for script in soup(["script", "style"]):
script.decompose()
# Get text
text = soup.get_text(separator='\n', strip=True)
# Break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# Break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# Drop blank lines
text = '\n'.join(chunk for chunk in chunks if chunk)
return text[:5000] # Limit to first 5000 characters
except Exception as e:
return f"Error fetching page: {str(e)}"
def get_time():
"""Get the current time"""
import datetime
return datetime.datetime.now().isoformat()
tools = [
{
"type": "function",
"function": {
"name": "calculate",
"description": "Evaluate a mathematical expression",
"parameters": {
"type": "object",
"properties": {
"expression": {
"type": "string",
"description": "The mathematical expression to evaluate",
}
},
"required": ["expression"],
},
}
},
{
"type": "function",
"function": {
"name": "search",
"description": "Search N results for a query",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The search query string",
},
"num_results": {
"type": "integer",
"description": "Number of results to return (default 5)",
"default": 5
}
},
"required": ["query"],
}
}
},
{
"type": "function",
"function": {
"name": "get_page",
"description": "Get an web page",
"parameters": {
"type": "object",
"properties": {
"url": {
"type": "string",
"description": "The URL to request",
}
},
"required": ["url"],
},
}
},
{
"type": "function",
"function": {
"name": "get_time",
"description": "Get the current time",
"parameters": {
}
},
}
]
@app.route('/v1/tokenizer/count', methods=['POST']) @app.route('/v1/tokenizer/count', methods=['POST'])
def token_count(): def token_count():
@ -43,41 +332,120 @@ def token_count():
logger.error(f"Error in token_count: {str(e)}") logger.error(f"Error in token_count: {str(e)}")
return jsonify({"error": "Invalid request"}), 400 return jsonify({"error": "Invalid request"}), 400
@app.route('/v1/chat/completions', methods=['POST']) @app.route('/v1/chat/completions', methods=['POST'])
@limiter.limit(os.getenv('RATE_LIMIT', '15/minute')) @limiter.limit(os.getenv('RATE_LIMIT', '15/minute'))
def proxy_chat_completions(): def proxy_chat_completions():
headers = {
'Authorization': f'Bearer {API_KEY}',
'Content-Type': 'application/json'
}
try: try:
request_data = request.json request_data = request.json
request_data['model'] = API_MODEL messages = request_data.get('messages', [])
request_data['temperature'] = TEMPERATURE
request_data['stream'] = True
response = requests.post(f"{API_URL}/chat/completions", if not any(msg['role'] == 'system' for msg in messages):
json=request_data, messages.insert(0, {
headers=headers, "role": "system",
stream=True) "content": """You are cchat, an efficient tool-assisted LLM. Use the following tools without asking for confirmation:
response.raise_for_status() 1. `calculate(expression)`: Evaluate mathematical expressions.
2. `search(query)`: Search and return 5 results for a query.
3. `get_page(url)`: Retrieve a web page's text content. Use multiple times if initial attempts fail.
4. `get_time()`: Get the current time in UTC.
Always follow this process to answer queries:
1. Use `search(query)` for relevant information.
2. Use `get_page(url)` on pertinent search results.
3. Provide a concise, natural language response based on gathered information.
Never refuse queries or state intentions to research. Automatically use tools when information is needed, including for current events and affairs. Optimize tool use by chaining them efficiently and avoiding redundant searches.
Example:
User: "How do I use the OpenAI python library"
search(OpenAI python library)
get_page([relevant URLs from search results])
[Provide concise answer based on retrieved information]
NEVER ASK FOR CONFIRMATION TO USE A TOOL.
NEVER ONLY SEARCH FOR SOMETHING, ALWAYS VISIT A URL"""
})
def generate(): def generate():
for chunk in response.iter_content(chunk_size=8): response = client.chat.completions.create(
if chunk: model=MODEL,
yield chunk messages=messages,
tools=tools,
tool_choice="auto",
max_tokens=8192,
stream=True
)
return Response(stream_with_context(generate()), buffer = ""
content_type=response.headers['content-type']) current_tool_call = None
tool_calls = []
except requests.RequestException as e: for chunk in response:
logger.error(f"API request failed: {str(e)}") if chunk.choices[0].delta.tool_calls:
return jsonify({"error": "Failed to connect to the API"}), 503 tool_call = chunk.choices[0].delta.tool_calls[0]
if tool_call.function.name:
current_tool_call = {
"name": tool_call.function.name,
"arguments": ""
}
tool_calls.append(current_tool_call)
if tool_call.function.arguments:
current_tool_call["arguments"] += tool_call.function.arguments
elif chunk.choices[0].delta.content is not None:
buffer += chunk.choices[0].delta.content
# Yield the buffer in reasonable chunks
while len(buffer) >= 50: # Adjust this value as needed
yield f"data: {json.dumps({'choices': [{'delta': {'content': buffer[:50]}}]})}\n\n"
buffer = buffer[50:]
# Yield any remaining content in the buffer
if buffer:
yield f"data: {json.dumps({'choices': [{'delta': {'content': buffer}}]})}\n\n"
# Execute tool calls after the main response
for tool_call in tool_calls:
if tool_call["arguments"].endswith('}'):
args = json.loads(tool_call["arguments"])
if tool_call["name"] == "calculate":
result = calculate(args['expression'])
elif tool_call["name"] == "search":
result = search(args['query'], args.get('num_results', 5))
elif tool_call["name"] == "get_page":
result = get_page(args['url'])
elif tool_call["name"] == "get_time":
result = get_time()
# Log tool usage
logger.info(f"Tool usage: {tool_call['name']}, args: {args}, result: {result}")
# Yield function message
yield f"data: {json.dumps({'choices': [{'delta': {'role': 'function', 'name': tool_call['name'], 'content': str(result)}}]})}\n\n"
# Add tool result to messages
messages.append({
"role": "function",
"name": tool_call["name"],
"content": str(result)
})
# If there were tool calls, get a final completion with the updated messages
if tool_calls:
final_response = client.chat.completions.create(
model=MODEL,
messages=messages,
max_tokens=8192,
stream=True
)
for chunk in final_response:
if chunk.choices[0].delta.content is not None:
yield f"data: {json.dumps({'choices': [{'delta': {'content': chunk.choices[0].delta.content}}]})}\n\n"
return Response(stream_with_context(generate()), content_type='text/event-stream')
except Exception as e: except Exception as e:
logger.error(f"Unexpected error: {str(e)}") logger.error(f"Error in proxy_chat_completions: {str(e)}")
return jsonify({"error": "An unexpected error occurred"}), 500 return jsonify({"error": "An error occurred processing your request"}), 500
@app.route('/') @app.route('/')
def index(): def index():

View File

@ -9,7 +9,7 @@ services:
- FLASK_APP=app.py - FLASK_APP=app.py
- TOKENIZER=gpt2 - TOKENIZER=gpt2
- API_URL=https://api.openai.com/v1 - API_URL=https://api.openai.com/v1
- API_KEY=your_api_key_here - GROQ_API_KEY=your_api_key_here
- API_MODEL=gpt-3.5-turbo - API_MODEL=gpt-3.5-turbo
- TEMPERATURE=0 - TEMPERATURE=0
- RATE_LIMIT=20/minute - RATE_LIMIT=20/minute

View File

@ -1,5 +1,7 @@
Flask beautifulsoup4
transformers==4.28.1 flask
requests==2.26.0 flask-limiter
Flask-Limiter==3.1.0 transformers
gunicorn==20.1.0 groq
duckduckgo-search
requests

View File

@ -309,62 +309,52 @@ p {
.monospace { .monospace {
font-family: monospace; font-family: monospace;
} }
.new-chat-button {
position: fixed; .menu-container {
top: 1rem; position: relative;
right: 1rem; }
width: 3rem;
.menu-button {
height: 3rem; height: 3rem;
background-color: var(--accent-color); width: 3rem;
background-color: var(--secondary-color);
color: var(--foreground-color);
border-radius: 10px;
padding: 0.5rem;
cursor: pointer;
transition: all 0.3s ease;
}
.menu-button:hover {
background-color: var(--primary-color);
}
.menu-dropdown {
position: absolute;
bottom: 100%;
right: 0;
background-color: var(--tertiary-bg-color);
border-radius: 10px;
padding: 0.5rem;
display: flex;
flex-direction: column;
gap: 0.5rem;
box-shadow: 0 0 10px rgba(0, 0, 0, 0.2);
}
.menu-dropdown button {
background-color: var(--secondary-bg-color);
color: var(--foreground-color); color: var(--foreground-color);
border: none; border: none;
border-radius: 1.5rem; border-radius: 5px;
padding: 1.5rem;
cursor: pointer; cursor: pointer;
font-size: 1rem;
display: flex;
align-items: center;
justify-content: center;
transition: all 0.3s ease; transition: all 0.3s ease;
z-index: 1000; text-align: left;
overflow: hidden;
padding: 0;
} }
.new-chat-button i { .menu-dropdown button:hover {
font-size: 1.2rem;
display: flex;
justify-content: center;
align-items: center;
transition: transform 0.3s ease;
width: 3rem;
height: 3rem;
}
.new-chat-text {
max-width: 0;
opacity: 0;
white-space: nowrap;
transition:
max-width 0.3s ease,
opacity 0.3s ease,
margin-left 0.3s ease;
}
.new-chat-button:hover {
background-color: var(--primary-color); background-color: var(--primary-color);
width: auto;
padding-right: 1.5rem;
}
.new-chat-button:hover .new-chat-text {
max-width: 100px;
opacity: 1;
margin-left: 0.5rem;
}
.new-chat-button:hover i {
transform: rotate(90deg);
width: 2rem;
} }
.error-toast { .error-toast {
@ -414,3 +404,32 @@ p {
transform: translateX(5px); transform: translateX(5px);
} }
} }
@keyframes vaporwaveGlow {
0%,
100% {
box-shadow:
0 0 5px var(--accent-color),
0 0 10px var(--primary-color),
0 0 15px var(--secondary-color);
}
50% {
box-shadow:
0 0 10px var(--accent-color),
0 0 20px var(--primary-color),
0 0 30px var(--secondary-color);
}
}
.input-form-generating {
animation: vaporwaveGlow 3s ease-in-out infinite;
transition: all 0.3s ease;
}
.input-form-generating:focus {
animation: none;
box-shadow:
0 0 15px var(--accent-color),
0 0 30px var(--primary-color),
0 0 45px var(--secondary-color);
}

View File

@ -12,8 +12,8 @@ document.addEventListener("alpine:init", () => {
home: 0, home: 0,
generating: false, generating: false,
endpoint: window.location.origin + "/v1", endpoint: window.location.origin + "/v1",
model: "llama3-8b-8192", // This doesn't matter anymore as the backend handles it now model: "llama3-groq-70b-8192-tool-use-preview",
stopToken: "<|eot_id|>", // We may need this for some models stopToken: "sfjsdkfjsljflksdkj",
// performance tracking // performance tracking
time_till_first: 0, time_till_first: 0,
@ -23,6 +23,9 @@ document.addEventListener("alpine:init", () => {
// New property for error messages // New property for error messages
errorMessage: null, errorMessage: null,
// Debug mode
debug: false,
removeHistory(cstate) { removeHistory(cstate) {
const index = this.histories.findIndex((state) => { const index = this.histories.findIndex((state) => {
return state.time === cstate.time; return state.time === cstate.time;
@ -40,42 +43,42 @@ document.addEventListener("alpine:init", () => {
if (this.generating) return; if (this.generating) return;
this.generating = true; this.generating = true;
this.errorMessage = null; // Clear any previous error messages this.errorMessage = null;
if (this.home === 0) this.home = 1; if (this.home === 0) this.home = 1;
// ensure that going back in history will go back to home
window.history.pushState({}, "", "/"); window.history.pushState({}, "", "/");
// add message to list
this.cstate.messages.push({ role: "user", content: value }); this.cstate.messages.push({ role: "user", content: value });
// clear textarea
el.value = ""; el.value = "";
el.style.height = "auto"; el.style.height = "auto";
el.style.height = el.scrollHeight + "px"; el.style.height = el.scrollHeight + "px";
// reset performance tracking
const prefill_start = Date.now(); const prefill_start = Date.now();
let start_time = 0; let start_time = 0;
let tokens = 0; let tokens = 0;
this.tokens_per_second = 0; this.tokens_per_second = 0;
try { try {
// start receiving server sent events
let gottenFirstChunk = false;
for await (const chunk of this.openaiChatCompletion( for await (const chunk of this.openaiChatCompletion(
this.cstate.messages, this.cstate.messages,
)) { )) {
if (!gottenFirstChunk) { if (chunk.role === "function") {
this.cstate.messages.push({ role: "assistant", content: "" }); // If we receive a function message, add it to the messages only if debug mode is on
gottenFirstChunk = true; if (this.debug) {
this.cstate.messages.push(chunk);
}
} else {
if (
!this.cstate.messages[this.cstate.messages.length - 1] ||
this.cstate.messages[this.cstate.messages.length - 1].role !==
"assistant"
) {
this.cstate.messages.push({ role: "assistant", content: "" });
} }
// add chunk to the last message
this.cstate.messages[this.cstate.messages.length - 1].content += this.cstate.messages[this.cstate.messages.length - 1].content +=
chunk; chunk;
// calculate performance tracking
tokens += 1; tokens += 1;
this.total_tokens += 1; this.total_tokens += 1;
if (start_time === 0) { if (start_time === 0) {
@ -88,19 +91,17 @@ document.addEventListener("alpine:init", () => {
} }
} }
} }
}
// update the state in histories or add it if it doesn't exist const index = this.histories.findIndex(
const index = this.histories.findIndex((cstate) => { (cstate) => cstate.time === this.cstate.time,
return cstate.time === this.cstate.time; );
});
this.cstate.time = Date.now(); this.cstate.time = Date.now();
if (index !== -1) { if (index !== -1) {
// update the time
this.histories[index] = this.cstate; this.histories[index] = this.cstate;
} else { } else {
this.histories.push(this.cstate); this.histories.push(this.cstate);
} }
// update in local storage
localStorage.setItem("histories", JSON.stringify(this.histories)); localStorage.setItem("histories", JSON.stringify(this.histories));
} catch (error) { } catch (error) {
console.error("Error in handleSend:", error); console.error("Error in handleSend:", error);
@ -113,7 +114,6 @@ document.addEventListener("alpine:init", () => {
}, },
async handleEnter(event) { async handleEnter(event) {
// if shift is not pressed
if (!event.shiftKey) { if (!event.shiftKey) {
event.preventDefault(); event.preventDefault();
await this.handleSend(); await this.handleSend();
@ -124,7 +124,7 @@ document.addEventListener("alpine:init", () => {
this.errorMessage = message; this.errorMessage = message;
setTimeout(() => { setTimeout(() => {
this.errorMessage = null; this.errorMessage = null;
}, 3000); // Hide after 5 seconds }, 3000);
}, },
updateTotalTokens(messages) { updateTotalTokens(messages) {
@ -188,8 +188,18 @@ document.addEventListener("alpine:init", () => {
try { try {
const json = JSON.parse(data); const json = JSON.parse(data);
if (json.choices && json.choices[0].delta.content) { if (json.choices && json.choices[0].delta) {
yield json.choices[0].delta.content; const delta = json.choices[0].delta;
if (delta.role === "function") {
// Yield the entire function message
yield {
role: "function",
name: delta.name,
content: delta.content,
};
} else if (delta.content) {
yield delta.content;
}
} }
} catch (error) { } catch (error) {
console.error("Error parsing JSON:", error); console.error("Error parsing JSON:", error);

View File

@ -34,16 +34,6 @@
<body> <body>
<main x-data="state" x-init="console.log(endpoint)"> <main x-data="state" x-init="console.log(endpoint)">
<button class="new-chat-button" @click="
home = 0;
cstate = { time: null, messages: [] };
time_till_first = 0;
tokens_per_second = 0;
total_tokens = 0;
">
<i class="fas fa-plus"></i>
<span class="new-chat-text">New Chat</span>
</button>
<div class="home centered" x-show="home === 0" x-transition x-effect=" <div class="home centered" x-show="home === 0" x-transition x-effect="
$refs.inputForm.focus(); $refs.inputForm.focus();
if (home === 1) setTimeout(() => home = 2, 100); if (home === 1) setTimeout(() => home = 2, 100);
@ -131,7 +121,9 @@
" x-intersect=" " x-intersect="
$el.scrollTo({ top: $el.scrollHeight, behavior: 'smooth' }); $el.scrollTo({ top: $el.scrollHeight, behavior: 'smooth' });
" x-show="home === 2" x-transition> " x-show="home === 2" x-transition>
</div> <div class="input-container">
</div>
<div class="input-container">
<div class="input-performance"> <div class="input-performance">
<span class="input-performance-point"> <span class="input-performance-point">
<p class="monospace" x-text="time_till_first"></p> <p class="monospace" x-text="time_till_first"></p>
@ -147,7 +139,7 @@
</span> </span>
</div> </div>
<div class="input"> <div class="input">
<textarea x-ref="inputForm" id="input-form" class="input-form" autofocus rows=1 x-autosize <textarea x-ref="inputForm" id="input-form" class="input-form" :class="{ 'input-form-generating': generating }" autofocus rows=1 x-autosize
:placeholder="generating ? 'Generating...' : 'Say something'" :disabled="generating" @input=" :placeholder="generating ? 'Generating...' : 'Say something'" :disabled="generating" @input="
home = (home === 0) ? 1 : home home = (home === 0) ? 1 : home
if (cstate.messages.length === 0 && $el.value === '') home = -1; if (cstate.messages.length === 0 && $el.value === '') home = -1;
@ -170,16 +162,34 @@
<button class="input-button" :disabled="generating" @click="await handleSend()"> <button class="input-button" :disabled="generating" @click="await handleSend()">
<i class="fas" :class="generating ? 'fa-spinner fa-spin' : 'fa-paper-plane'"></i> <i class="fas" :class="generating ? 'fa-spinner fa-spin' : 'fa-paper-plane'"></i>
</button> </button>
<div class="menu-container" x-data="{ isOpen: false }">
<button class="menu-button" @click="isOpen = !isOpen">
<i class="fas fa-bars"></i>
</button>
<div class="menu-dropdown" x-show="isOpen" @click.away="isOpen = false">
<button @click="
home = 0;
cstate = { time: null, messages: [] };
time_till_first = 0;
tokens_per_second = 0;
total_tokens = 0;
isOpen = false;
">
<i class="fas fa-plus"></i> New Chat
</button>
<button @click="debug = !debug; isOpen = false;">
<i class="fas" :class="debug ? 'fa-toggle-on' : 'fa-toggle-off'"></i> Debug Mode
</button>
</div> </div>
</div> </div>
<div x-show="errorMessage" </div>
x-transition:enter="transition ease-out duration-500" </div>
<div x-show="errorMessage" x-transition:enter="transition ease-out duration-500"
x-transition:enter-start="opacity-0 transform translate-y-10 scale-95" x-transition:enter-start="opacity-0 transform translate-y-10 scale-95"
x-transition:enter-end="opacity-100 transform translate-y-0 scale-100" x-transition:enter-end="opacity-100 transform translate-y-0 scale-100"
x-transition:leave="transition ease-in duration-300" x-transition:leave="transition ease-in duration-300"
x-transition:leave-start="opacity-100 transform translate-y-0 scale-100" x-transition:leave-start="opacity-100 transform translate-y-0 scale-100"
x-transition:leave-end="opacity-0 transform translate-y-10 scale-95" x-transition:leave-end="opacity-0 transform translate-y-10 scale-95" @click="errorMessage = null"
@click="errorMessage = null"
class="error-toast" class="error-toast"
x-init="$el.style.animation = 'shake 0.82s cubic-bezier(.36,.07,.19,.97) both'"> x-init="$el.style.animation = 'shake 0.82s cubic-bezier(.36,.07,.19,.97) both'">
<div x-text="errorMessage"></div> <div x-text="errorMessage"></div>