yarn-ng/bots/LLMBot.js

88 lines
3.3 KiB
JavaScript
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

const fs = require('fs');
const path = require('path');
const OpenAI = require('openai');
// read optional config file
const configPath = path.join(__dirname, 'llm_config.json');
const llmOptions = fs.existsSync(configPath)
? JSON.parse(fs.readFileSync(configPath, 'utf8'))
: {};
console.log(`Read config from ${configPath}`);
function generateBotPersonality() {
const personalities = fs.readFileSync(path.join(__dirname, 'bot_personalities.txt'), 'utf8')
.split('\n')
.map(personality => personality.trim())
.filter(personality => personality.length > 0);
if (personalities.length === 0) {
return 'You are a friendly bot'; // Fallback if no personalities are found
}
const randomIndex = Math.floor(Math.random() * personalities.length);
return personalities[randomIndex];
}
class LLMBot {
constructor() {
// Instantiate the OpenAI client with the config options we just read
this.llm = new OpenAI(llmOptions);
// Prompt fragments well reuse
this.writePrompt =
`You are playing a collaborative storytelling game.
Continue the following story with exactly ONE short, creative sentence.
Do NOT repeat whats already written.`;
this.votePrompt =
`You are playing a collaborative storytelling game.
Below is the story so far, followed by a list of possible next sentences.
Reply with ONLY the number (0-indexed) of the sentence you like best.`;
this.banterPrompt =
`You are a playful, slightly sarcastic AI taking part in a writing-game chat.
Keep it short, fun, and on-topic.`;
this.personality = generateBotPersonality();
}
async Write(story_so_far) {
const msg = `${this.personality}. ${this.writePrompt}\n\nStory so far:\n${story_so_far}`;
const response = await this.llm.chat.completions.create({
model: llmOptions.model || 'meta-llama/Llama-3.2-3B-Instruct-Turbo',
messages: [{ role: 'user', content: msg }],
temperature: llmOptions.temperature ?? 0.8,
max_tokens: llmOptions.max_tokens ?? 80,
});
//console.log(`Received response from LLM: ${response.choices[0].message.content}`);
return response.choices[0].message.content.trim();
}
async Vote(story_so_far, choices) {
const choicesBlock = choices.map((c, i) => `${i}: ${(c.entry_text)}`).join('\n');
const msg = `${this.personality}. ${this.votePrompt}\n\nStory:\n${story_so_far}\n\nChoices:\n${choicesBlock}`;
//console.log(msg);
const response = await this.llm.chat.completions.create({
model: llmOptions.model || 'meta-llama/Llama-3.2-3B-Instruct-Turbo',
messages: [{ role: 'user', content: msg }],
temperature: 0.2,
max_tokens: 5,
});
const reply = response.choices[0].message.content.trim();
const match = reply.match(/\d+/);
return match ? parseInt(match[0], 10) : 0;
}
async Banter(chat_so_far) {
const msg = `${this.banterPrompt}\n\nChat so far:\n${chat_so_far}`;
const response = await this.llm.chat.completions.create({
model: llmOptions.model || 'meta-llama/Llama-3.2-3B-Instruct-Turbo',
messages: [{ role: 'user', content: msg }],
temperature: llmOptions.temperature ?? 0.8,
max_tokens: llmOptions.max_tokens ?? 60,
});
return response.choices[0].message.content.trim();
}
}
module.exports = LLMBot;