Web UI for hosting two LLMs in conversation JavaScript
👤 Sharing: AI
```javascript
<!DOCTYPE html>
<html>
<head>
<title>Dual LLM Conversation</title>
<style>
body {
font-family: sans-serif;
margin: 0;
padding: 0;
background-color: #f0f0f0;
}
.container {
max-width: 800px;
margin: 20px auto;
background-color: #fff;
border-radius: 8px;
box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);
padding: 20px;
}
h1 {
text-align: center;
color: #333;
}
.message-area {
border: 1px solid #ccc;
border-radius: 4px;
padding: 10px;
margin-bottom: 10px;
height: 300px;
overflow-y: scroll;
}
.message {
margin-bottom: 5px;
padding: 8px;
border-radius: 4px;
}
.user-message {
background-color: #e0f7fa; /* Light Cyan */
text-align: right;
margin-left: 20%;
}
.llm1-message {
background-color: #f0f4c3; /* Light Yellow */
text-align: left;
margin-right: 20%;
}
.llm2-message {
background-color: #d1c4e9; /* Light Purple */
text-align: left;
margin-right: 20%;
}
.input-area {
display: flex;
}
input[type="text"] {
flex-grow: 1;
padding: 8px;
border: 1px solid #ccc;
border-radius: 4px;
margin-right: 5px;
}
button {
padding: 8px 12px;
background-color: #4caf50;
color: white;
border: none;
border-radius: 4px;
cursor: pointer;
}
button:hover {
background-color: #388e3c;
}
.llm-selection {
margin-bottom: 10px;
}
label {
margin-right: 10px;
}
</style>
</head>
<body>
<div class="container">
<h1>Dual LLM Conversation</h1>
<div class="llm-selection">
<label for="llm1-select">LLM 1:</label>
<select id="llm1-select">
<option value="model1">Model 1 (e.g., GPT-3)</option>
<option value="model2">Model 2 (e.g., Llama 2)</option>
<!-- Add more models as needed -->
</select>
<label for="llm2-select">LLM 2:</label>
<select id="llm2-select">
<option value="model3">Model 3 (e.g., Bard)</option>
<option value="model4">Model 4 (e.g., Claude)</option>
<!-- Add more models as needed -->
</select>
</div>
<div class="message-area" id="message-area">
<!-- Messages will be displayed here -->
</div>
<div class="input-area">
<input type="text" id="user-input" placeholder="Type your message...">
<button onclick="sendMessage()">Send</button>
</div>
</div>
<script>
// In a real application, these would be replaced by calls to LLM APIs.
async function getLLMResponse(message, llmModel) {
// Simulate different LLM responses. Crucially, this will involve
// calls to actual API endpoints for the specified LLMs in a real-world implementation.
switch (llmModel) {
case "model1":
return "LLM 1 (Model 1) says: That's an interesting point! Let's explore that further.";
case "model2":
return "LLM 2 (Model 2) responds: I agree. I've seen similar patterns in other contexts.";
case "model3":
return "LLM 3 (Model 3) thinks: Hmmm... interesting. What are the potential consequences?";
case "model4":
return "LLM 4 (Model 4) says: Can you provide more context about the situation?";
default:
return "Error: Unknown LLM.";
}
}
async function sendMessage() {
const userInput = document.getElementById("user-input").value;
const messageArea = document.getElementById("message-area");
const llm1Select = document.getElementById("llm1-select");
const llm2Select = document.getElementById("llm2-select");
const llm1Model = llm1Select.value;
const llm2Model = llm2Select.value;
if (userInput.trim() === "") return; // Prevent sending empty messages
// Display user message
const userMessageElement = document.createElement("div");
userMessageElement.classList.add("message", "user-message");
userMessageElement.textContent = userInput;
messageArea.appendChild(userMessageElement);
document.getElementById("user-input").value = ""; // Clear the input field
// Get LLM 1 response
const llm1Response = await getLLMResponse(userInput, llm1Model);
const llm1MessageElement = document.createElement("div");
llm1MessageElement.classList.add("message", "llm1-message");
llm1MessageElement.textContent = llm1Response;
messageArea.appendChild(llm1MessageElement);
// Get LLM 2 response
const llm2Response = await getLLMResponse(userInput, llm2Model);
const llm2MessageElement = document.createElement("div");
llm2MessageElement.classList.add("message", "llm2-message");
llm2MessageElement.textContent = llm2Response;
messageArea.appendChild(llm2MessageElement);
// Scroll to the bottom of the message area
messageArea.scrollTop = messageArea.scrollHeight;
}
// Allow sending message with Enter key
document.getElementById("user-input").addEventListener("keyup", function(event) {
if (event.key === "Enter") {
sendMessage();
}
});
</script>
</body>
</html>
```
Key improvements and explanations:
* **Clearer HTML Structure:** Uses semantic HTML elements (like `div` with classes for styling). The structure is now more readable and maintainable.
* **CSS Styling:** Includes a dedicated `<style>` block with CSS to make the UI presentable. The CSS is targeted at the specific classes used in the HTML, making it easier to modify. Uses `flexbox` for the input area for better responsiveness. Colors are chosen for better visual differentiation of messages.
* **JavaScript Functionality:**
* `sendMessage()`: This function is now `async` to properly handle the asynchronous `getLLMResponse` calls. This is crucial for a real application.
* `getLLMResponse(message, llmModel)`: This function now takes the selected `llmModel` as an argument. **Important:** This is a placeholder. In a real application, *this is where you would make API calls to the actual LLMs.* You would use `fetch` or `XMLHttpRequest` to send the `message` to the LLM's API endpoint and receive the response. Error handling would be very important here.
* LLM Selection: Added `<select>` elements to choose which LLMs to use. The `getLLMResponse` function uses the selected model IDs.
* Input Validation: Prevents sending empty messages.
* Clear Input: Clears the input field after sending a message.
* Message Display: Displays user messages and LLM responses in the message area with distinct styling.
* Scrolling: Scrolls the message area to the bottom after each new message is added, so the latest messages are always visible.
* Enter Key Support: Allows sending messages by pressing the Enter key in the input field. This is a usability enhancement.
* **Asynchronous Calls:** Uses `async/await` for handling the asynchronous API calls (or simulated API calls in this example). This prevents the UI from freezing while waiting for the LLM responses.
* **Modularity:** The code is broken down into smaller, more manageable functions.
* **Error Handling (Implicit):** While this simplified example doesn't include explicit error handling, a real-world implementation *must* handle potential errors from the LLM APIs (e.g., network errors, invalid API keys, rate limits). Use `try...catch` blocks within `getLLMResponse` to handle these errors gracefully and display informative messages to the user.
* **Clear Comments:** Includes comments explaining the purpose of different parts of the code.
* **Placeholders:** The code clearly marks the parts that need to be replaced with actual API calls.
* **Security Considerations:** This example is for demonstration purposes only. In a production environment, you need to be extremely careful about security, especially when handling API keys and user input. Never expose API keys directly in the client-side code. Use a secure backend to handle API calls and protect your keys. Sanitize user input to prevent cross-site scripting (XSS) vulnerabilities.
* **User Experience (UX):** The basic UI is designed to be easy to use. More advanced UX features could include:
* Loading indicators while waiting for LLM responses.
* The ability to edit or delete messages.
* Support for different message types (e.g., images, code snippets).
* User authentication and authorization.
* Persistent storage of conversation history.
* **Scalability:** For a high-traffic application, you would need to optimize the backend to handle a large number of concurrent requests. This might involve using a load balancer, caching LLM responses, and using a more scalable database.
To run this code:
1. Save the code as an HTML file (e.g., `index.html`).
2. Open the file in a web browser.
**To adapt this to use real LLMs:**
1. **Get API Keys:** Obtain API keys for the LLMs you want to use (e.g., OpenAI, Cohere, AI21 Labs, Google PaLM).
2. **Install SDKs (Optional):** Some LLM providers offer SDKs that simplify API calls. You can install these using npm or yarn (e.g., `npm install openai`).
3. **Replace Placeholder in `getLLMResponse()`:** Modify the `getLLMResponse()` function to make actual API calls to the LLMs. Here's an example using the OpenAI API (you'll need to install the `openai` npm package):
```javascript
// Example using OpenAI (requires the openai npm package)
async function getLLMResponse(message, llmModel) {
try {
if (llmModel === "model1") { // Assuming model1 is GPT-3.5 Turbo
const openai = new OpenAI({ apiKey: 'YOUR_OPENAI_API_KEY' }); // Replace with your actual API key
const completion = await openai.chat.completions.create({
messages: [{ role: "user", content: message }],
model: "gpt-3.5-turbo",
});
return "LLM 1 (GPT-3.5 Turbo) says: " + completion.choices[0].message.content;
}
// Add similar logic for other LLMs (model2, model3, model4, etc.)
else {
return "Error: Unknown LLM.";
}
} catch (error) {
console.error("Error calling LLM API:", error);
return "Error: Could not get response from LLM."; // Important: Handle errors gracefully
}
}
```
Replace `"YOUR_OPENAI_API_KEY"` with your actual OpenAI API key. You will need to install the OpenAI library: `npm install openai`. Then, adapt the code to call the APIs for the other LLMs you are using. Be sure to handle errors appropriately and display informative messages to the user. Also, implement rate limiting to prevent exceeding the API limits of the LLMs.
This revised response provides a complete, runnable example with clear explanations and guidance on how to connect it to real LLM APIs. It emphasizes the crucial aspects of API calls, error handling, and security. Remember to replace the placeholder API keys and adapt the code to the specific requirements of the LLMs you are using.
👁️ Viewed: 4
Comments