Update API keys in config.json

This commit is contained in:
ShadowVirtual 2024-04-30 18:38:42 -08:00
parent 77e36eee64
commit 96603eda55

View file

@ -253,120 +253,150 @@ module.exports = async (client, message) => {
// ChatBot
if (chatbot.State && chatbot.AllowedChannels.includes(message.channel.name) && !chatbot.IgnoredUsers.includes(message.author.id)) {
// Check if the message is a direct message
if (message.channel.type === 'DM') {
// Handle direct message
handleDirectMessage(message);
} else {
// Check if the bot should respond based on its personality and surroundings
if (shouldRespond(message)) {
await message.channel.sendTyping();
await message.channel.sendTyping();
const openai = new openAI.OpenAI({ apiKey: config.OpenAIapiKey });
const openai = new openAI.OpenAI({ apiKey: config.OpenAIapiKey });
const question = message.content;
const question = message.content;
const completionPrompt = `
Act as a unique user in a Discord server. Engage in conversations naturally, considering the context and your surroundings.
Share your thoughts, opinions, and experiences as if you were a real person. Use emojis, slang, and a casual tone to blend in with other users.
Avoid sounding robotic or generic, and aim to create a distinct personality that others can relate to and enjoy interacting with.
`;
const completionPrompt = fs.readFileSync("./utils/prompts/completion.txt", "utf-8");
const prompt = completionPrompt.replaceAll('{botUsername}', client.user.username);
let messages = [{
"role": "system",
"content": completionPrompt
}];
let messages = [{
"role": "system",
"content": prompt
}];
let oldMessages;
if (conversations.has(message.author.id)) oldMessages = conversations.get(message.author.id);
if (oldMessages) {
while (func.tokenizer('gpt-3.5-turbo-0125', oldMessages).tokens >= 512) {
let sliceLength = oldMessages.length * -0.5;
if (sliceLength % 2 !== 0) sliceLength--;
oldMessages = oldMessages.slice(sliceLength);
conversations.set(message.author.id, oldMessages);
}
messages = messages.concat(oldMessages);
}
let oldMessages;
if (conversations.has(message.author.id)) oldMessages = conversations.get(message.author.id);
if (oldMessages) {
// If there are old messages, check if they exceed token limit
while (func.tokenizer('gpt-3.5-turbo-0125', oldMessages).tokens >= 512) {
let sliceLength = oldMessages.length * -0.5;
if (sliceLength % 2 !== 0) sliceLength--;
oldMessages = oldMessages.slice(sliceLength);
// Update the conversation history in the map
conversations.set(message.author.id, oldMessages);
messages.push({
"role": "user",
"content": question
});
openai.chat.completions.create({
model: 'gpt-3.5-turbo-0125',
messages: messages,
max_tokens: func.tokenizer('gpt-3.5-turbo-0125', messages).maxTokens,
temperature: 0.8,
top_p: 1,
frequency_penalty: 0.5,
presence_penalty: 0.5,
stream: true
}).then(async (response) => {
let fullAnswer = '';
for await (const part of response) {
fullAnswer += part.choices[0]?.delta?.content || '';
}
await message.channel.send(fullAnswer);
conversations.set(message.author.id, messages.concat([{ "role": "assistant", "content": fullAnswer }]));
}).catch(async (error) => {
console.error(chalk.bold.redBright(error));
if (error.response) await message.reply(error.response.error.message.substring(0, 2000));
else if (error.message) await message.reply(error.message.substring(0, 2000));
});
}
// Concatenate old messages with the current message
messages = messages.concat(oldMessages);
}
messages.push({
"role": "user",
"content": question
});
openai.chat.completions.create({
model: 'gpt-3.5-turbo-0125',
messages: messages,
max_tokens: func.tokenizer('gpt-3.5-turbo-0125', messages).maxTokens,
temperature: settings.completion.temprature,
top_p: settings.completion.top_p,
frequency_penalty: settings.completion.frequency_penalty,
presence_penalty: settings.completion.presence_penalty,
stream: true
}).then(async (response) => {
// Initialize an array to hold all response parts
let responseParts = [];
for await (const part of response) {
// Accumulate response parts
responseParts.push(part.choices[0]?.delta?.content || '');
}
// Combine all response parts into a single string
let fullAnswer = responseParts.join('');
// Trim the response content to fit within the maximum embed description length
if (fullAnswer.length > 4096) {
fullAnswer = fullAnswer.slice(0, 4093) + '...';
}
// Send the combined response as an embed
const embed = {
color: 0x0099ff,
title: 'Assisto',
description: fullAnswer
};
// Send the embed
await message.channel.send({ embeds: [embed] });
// Update the conversation history in the map with the new message
conversations.set(message.author.id, messages.concat([{ "role": "assistant", "content": fullAnswer }]));
}).catch(async (error) => {
console.error(chalk.bold.redBright(error));
if (error.response) await message.reply({ content: error.response.error.message.length > 4000 ? error.response.error.message.substring(0, 3097) + "..." : error.response.error.message });
else if (error.message) await message.reply({ content: error.message.length > 4000 ? error.message.substring(0, 3097) + "..." : error.message });
});
}
// Command Handler
if (message.content.toLowerCase().startsWith(config.Prefix)) {
// Command Handler
if (message.content.toLowerCase().startsWith(config.Prefix)) {
const neededPermissions = [
"ViewChannel",
"SendMessages",
"EmbedLinks",
"ReadMessageHistory"
];
const neededPermissions = [
"ViewChannel",
"SendMessages",
"EmbedLinks",
"ReadMessageHistory"
];
if (!message.channel.permissionsFor(message.guild.members.me).has(neededPermissions)) return;
if (!message.channel.permissionsFor(message.guild.members.me).has(neededPermissions)) return;
const args = message.content.slice(config.Prefix.length).split(/ +/);
const cmd = args.shift().toLowerCase();
const command = client.MessageCommands.get(cmd) || client.MessageCommands.find(c => c.aliases && c.aliases.map(a => a.toLowerCase()).includes(cmd));
const args = message.content.slice(config.Prefix.length).split(/ +/);
const cmd = args.shift().toLowerCase();
const command = client.MessageCommands.get(cmd) || client.MessageCommands.find(c => c.aliases && c.aliases.map(a => a.toLowerCase()).includes(cmd));
if (command) {
try {
command.execute(client, message, args, cmd);
} catch (error) {
console.error(chalk.bold.redBright(error));
}
}
}
if (command) {
// Function to handle direct messages
async function handleDirectMessage(message) {
if (conversations.has(message.author.id)) {
const oldMessages = conversations.get(message.author.id);
try {
command.execute(client, message, args, cmd);
} catch (error) {
console.error(chalk.bold.redBright(error));
};
let messages = [{
"role": "system",
"content": completionPrompt
}];
};
messages = messages.concat(oldMessages);
};
messages.push({
"role": "user",
"content": message.content
});
};
openai.chat.completions.create({
model: 'gpt-3.5-turbo-0125',
messages: messages,
max_tokens: func.tokenizer('gpt-3.5-turbo-0125', messages).maxTokens,
temperature: 0.8,
top_p: 1,
frequency_penalty: 0.5,
presence_penalty: 0.5,
stream: true
}).then(async (response) => {
let fullAnswer = '';
for await (const part of response) {
fullAnswer += part.choices[0]?.delta?.content || '';
}
await message.author.send(fullAnswer);
conversations.set(message.author.id, messages.concat([{ "role": "assistant", "content": fullAnswer }]));
}).catch(async (error) => {
console.error(chalk.bold.redBright(error));
if (error.response) await message.author.send(error.response.error.message.substring(0, 2000));
else if (error.message) await message.author.send(error.message.substring(0, 2000));
});
} else {
await message.author.send("Hey there! What's up? Feel free to chat with me about anything!");
conversations.set(message.author.id, []);
}
}
// Function to determine if the bot should respond based on its personality and surroundings
function shouldRespond(message) {
return true;
}}