dank-bot-py/plugins/botchat/plugin.py

189 lines
6.9 KiB
Python

# Plugin for bot LLM chat
from discord.ext import commands
import discord
import io
import aiohttp
import yaml
import random
import os
plugin_folder=os.path.dirname(os.path.realpath(__file__))
prompts_folder=os.path.join(plugin_folder, 'prompts')
default_prompt="default.txt"
config_filename=os.path.join(plugin_folder, 'settings.yaml')
llm_data = {}
async def prompt_llm(prompt):
"""
Prompts the upstream LLM for a completion of the given prompt
:param prompt: prompt to complete
:return: returns a string consisting of completion text
"""
print("Prompting LLM")
print(f"PROMPT DATA\n{prompt}")
async with aiohttp.ClientSession(llm_data["api_base"]) as session:
async with session.post("/completion", json={"prompt": prompt, "n_predict": 250, "mirostat": 2}) as resp:
print(f"LLM response status {resp.status}")
response_json=await resp.json()
content=response_json["content"]
return content
def get_message_contents(msg):
"""
Given a Discord message object, prints the contents in an IRC-like format
:param msg: discord.Message to get the contents of
:return: returns a string in the format "user: message"
"""
message_text = f"{msg.author.name}: {msg.clean_content}"
print(f"Message contents -- {message_text}")
return message_text
async def get_chat_history(ctx, limit=20):
"""
Returns a list containing {limit} number of previous messages in the channel
referenced by chat context {ctx}
:param ctx: Chat context to get messages from
:param limit: Maximum number of messages to get
:return: A list of strings representing the messages
"""
messages = [message async for message in ctx.channel.history(limit=limit)]
plain_messages = list(map(get_message_contents, messages))
plain_messages.reverse()
return plain_messages
async def log_history(ctx, history):
"""
Given a list of strings representing recent chat history (along with
context object), logs those strings to a file for later ingestion by the bot
:param ctx: Chat context for message history (required for channel info)
:param history: List of chat history strings in IRC-style format
"""
# if (isinstance(ctx.channel,discord.TextChannel)):
# channel_id = ctx.channel.id
# channel_name = ctx.channel.name
# os.makedirs(os.path.join(plugin_folder, 'logs', str(channel_id)))
# history_filename = os.path.join(plugin_folder, 'logs', str(channel_id), f"{channel_name}.txt")
# with open(history_filename, 'r+') as history_file:
# history_file.write(history)
pass
@commands.command(name='llm')
async def llm_response(ctx):
"""
Sends a response from the bot to the chat context in {ctx}
:param ctx: Chat context to send message to
"""
await ctx.channel.typing()
prompt_file = os.path.join(prompts_folder, default_prompt)
with open(prompt_file, 'r') as prompt_file:
prompt = prompt_file.read()
history_arr = await get_chat_history(ctx)
history_str = '\n'.join(history_arr)
full_prompt = prompt.replace("<CONVHISTORY>", history_str)
response = await prompt_llm(full_prompt)
await send_chat_responses(ctx, response)
await log_history(ctx, history_str)
async def send_chat_responses(ctx, response_text):
"""
Helper function for sending out the text in {response_text} to the discord server
context in {ctx}, handling breaking it into multiple parts and not sending
text that the LLM should not have generated, such as other users
:param ctx: Message context that we're replying to
:param response_text: String containing message we want to send
"""
print("Processing chat response")
fullResponseLog = "dank-bot:" + response_text # first response won't include the user
responseLines = fullResponseLog.splitlines()
output_strs = []
for line in responseLines:
if line.startswith("dank-bot:"):
truncStr = line.replace("dank-bot:","")
output_strs.append(truncStr)
elif line.find(":") > 0 and line.find(":") < 20:
break
else:
output_strs.append(line.strip())
for outs in output_strs:
final_output_str = await fixup_mentions(ctx, outs)
await ctx.channel.send(final_output_str)
async def fixup_mentions(ctx, text):
"""
Converts all user/role/etc mentions in {text} to the proper format
so the bot can mention them properly.
:param ctx: Message context that we're replying to
:param text: String containing message we want to send
:return: A string with all @User/@Role mentions changed to <@12345> format
"""
newtext = text
if (isinstance(ctx.channel,discord.DMChannel)):
newtext = newtext.replace(f"@{ctx.author.name}", ctx.author.mention)
elif (isinstance(ctx.channel,discord.GroupChannel)):
for user in ctx.channel.recipients:
newtext = newtext.replace(f"@{user.name}", user.mention)
elif (isinstance(ctx.channel,discord.Thread)):
for user in await ctx.channel.fetch_members():
member_info = await ctx.channel.guild.fetch_member(user.id)
newtext = newtext.replace(f"@{member_info.name}", member_info.mention)
else:
for user in ctx.channel.members:
newtext = newtext.replace(f"@{user.name}", user.mention)
if ctx.guild != None:
for role in ctx.guild.roles:
newtext = newtext.replace(f"@{role.name}", role.mention)
return newtext
async def handle_message(ctx):
"""
Function that hooks on_message and watches for/responds to incoming messages
:param ctx: Message context
"""
print("Dank-bot received message")
print(f"Dank-bot ID is {llm_data['bot'].user.id}")
bot_id = llm_data['bot'].user.id
# First case, bot DMed
if (isinstance(ctx.channel,discord.DMChannel) and ctx.author.id != bot_id):
print("Dank-bot DMed, responding")
await llm_response(ctx)
return
# Second case, bot mentioned
bot_mentions=list(filter(lambda x: x.id == bot_id, ctx.mentions))
if (len(bot_mentions) > 0):
print("Dank-bot mentioned, responding")
await llm_response(ctx)
return
# Other case, random response
random_roll = random.random()
print(f"Dank-bot rolled {random_roll}")
if (random_roll < llm_data['response_probability']):
print(f"{random_roll} < {llm_data['response_probability']}, responding")
await llm_response(ctx)
return
async def setup(bot):
"""
Bot plugin initialization
:param bot: Discord bot object
"""
with open(config_filename, 'r') as conf_file:
yaml_config = yaml.safe_load(conf_file)
llm_data["api_base"] = yaml_config["api_base"]
llm_data["response_probability"] = yaml_config["response_probability"]
bot.add_command(llm_response)
bot.add_listener(handle_message, "on_message")
llm_data["bot"] = bot
print("LLM interface initialized")