72 lines
2.7 KiB
JavaScript
72 lines
2.7 KiB
JavaScript
const fs = require('fs');
|
||
const path = require('path');
|
||
const OpenAI = require('openai');
|
||
|
||
// read optional config file
|
||
const configPath = path.join(__dirname, 'llm_config.json');
|
||
const llmOptions = fs.existsSync(configPath)
|
||
? JSON.parse(fs.readFileSync(configPath, 'utf8'))
|
||
: {};
|
||
|
||
console.log(`Read config from ${configPath}`);
|
||
|
||
class LLMBot {
|
||
constructor() {
|
||
// Instantiate the OpenAI client with the config options we just read
|
||
this.llm = new OpenAI(llmOptions);
|
||
|
||
// Prompt fragments we’ll reuse
|
||
this.writePrompt =
|
||
`You are playing a collaborative storytelling game.
|
||
Continue the following story with exactly ONE short, creative sentence.
|
||
Do NOT repeat what’s already written.`;
|
||
|
||
this.votePrompt =
|
||
`You are playing a collaborative storytelling game.
|
||
Below is the story so far, followed by a list of possible next sentences.
|
||
Reply with ONLY the number (0-indexed) of the sentence you like best.`;
|
||
|
||
this.banterPrompt =
|
||
`You are a playful, slightly sarcastic AI taking part in a writing-game chat.
|
||
Keep it short, fun, and on-topic.`;
|
||
}
|
||
|
||
async Write(story_so_far) {
|
||
const msg = `${this.writePrompt}\n\nStory so far:\n${story_so_far}`;
|
||
const response = await this.llm.chat.completions.create({
|
||
model: llmOptions.model || 'meta-llama/Llama-3.2-3B-Instruct-Turbo',
|
||
messages: [{ role: 'user', content: msg }],
|
||
temperature: llmOptions.temperature ?? 0.8,
|
||
max_tokens: llmOptions.max_tokens ?? 80,
|
||
});
|
||
console.log(`Received response from LLM: ${response.choices[0].message.content}`);
|
||
return response.choices[0].message.content.trim();
|
||
}
|
||
|
||
async Vote(story_so_far, choices) {
|
||
const choicesBlock = choices.map((c, i) => `${i}: ${c}`).join('\n');
|
||
const msg = `${this.votePrompt}\n\nStory:\n${story_so_far}\n\nChoices:\n${choicesBlock}`;
|
||
const response = await this.llm.chat.completions.create({
|
||
model: llmOptions.model || 'meta-llama/Llama-3.2-3B-Instruct-Turbo',
|
||
messages: [{ role: 'user', content: msg }],
|
||
temperature: 0.2,
|
||
max_tokens: 5,
|
||
});
|
||
const reply = response.choices[0].message.content.trim();
|
||
const match = reply.match(/\d+/);
|
||
return match ? parseInt(match[0], 10) : 0;
|
||
}
|
||
|
||
async Banter(chat_so_far) {
|
||
const msg = `${this.banterPrompt}\n\nChat so far:\n${chat_so_far}`;
|
||
const response = await this.llm.chat.completions.create({
|
||
model: llmOptions.model || 'meta-llama/Llama-3.2-3B-Instruct-Turbo',
|
||
messages: [{ role: 'user', content: msg }],
|
||
temperature: llmOptions.temperature ?? 0.8,
|
||
max_tokens: llmOptions.max_tokens ?? 60,
|
||
});
|
||
return response.choices[0].message.content.trim();
|
||
}
|
||
}
|
||
|
||
module.exports = LLMBot; |