42 lines
1.4 KiB
Python
42 lines
1.4 KiB
Python
# Plugin for bot LLM chat
|
|
from discord.ext import commands
|
|
import discord
|
|
import yaml
|
|
import random
|
|
import os
|
|
import llm
|
|
|
|
plugin_folder=os.path.dirname(os.path.realpath(__file__))
|
|
prompts_folder=os.path.join(plugin_folder, 'prompts')
|
|
default_prompt="default.txt"
|
|
config_filename=os.path.join(plugin_folder, 'settings.yaml')
|
|
llm_data = {}
|
|
|
|
async def get_chat_history(ctx, limit=20):
|
|
messages = [message async for message in ctx.channel.history(limit=limit)]
|
|
plain_messages = list(map(lambda m: f"{m.author.name}: {m.content}", messages))
|
|
plain_messages.reverse()
|
|
return plain_messages
|
|
|
|
@commands.command(name='llm')
|
|
async def llm_response(ctx):
|
|
prompt_file = os.path.join(prompts_folder, default_prompt)
|
|
with open(prompt_file, 'r') as prompt_file:
|
|
prompt = prompt_file.read()
|
|
history_arr = await get_chat_history(ctx)
|
|
history_str = '\n'.join(history_arr)
|
|
full_prompt = prompt.replace("<CONVHISTORY>", history_str)
|
|
response = llm_data["model"].prompt(full_prompt)
|
|
print(response)
|
|
|
|
async def setup(bot):
|
|
with open(config_filename, 'r') as conf_file:
|
|
yaml_config = yaml.safe_load(conf_file)
|
|
model = llm.get_model("gpt-3.5-turbo-instruct")
|
|
model.key = yaml_config["api_key"]
|
|
model.api_base = yaml_config["api_base"]
|
|
model.completion = True
|
|
llm_data["model"] = model
|
|
bot.add_command(llm_response)
|
|
print("LLM interface initialized")
|