Update Discord client initialization in index.js
This commit is contained in:
parent
e4bee9f501
commit
98d846a11b
1 changed files with 136 additions and 101 deletions
|
@ -12,7 +12,7 @@ const conversations = new Map();
|
|||
|
||||
module.exports = async (client, message) => {
|
||||
|
||||
if (message.channel.type === Discord.ChannelType.DM || message.author.bot || message.system) return;
|
||||
if (message.author.bot || message.system) return;
|
||||
|
||||
// Auto Moderation
|
||||
if (moderation.State && !moderation.IgnoredChannels.includes(message.channelId) && !moderation.IgnoredUsers.includes(message.author.id)) {
|
||||
|
@ -254,32 +254,39 @@ module.exports = async (client, message) => {
|
|||
// ChatBot
|
||||
if (chatbot.State && chatbot.AllowedChannels.includes(message.channel.name) && !chatbot.IgnoredUsers.includes(message.author.id)) {
|
||||
|
||||
// Check if the message is a direct message
|
||||
if (message.channel.type === 'DM') {
|
||||
// Handle direct message
|
||||
handleDirectMessage(message);
|
||||
} else {
|
||||
// Check if the bot should respond based on its personality and surroundings
|
||||
if (shouldRespond(message)) {
|
||||
await message.channel.sendTyping();
|
||||
|
||||
const openai = new openAI.OpenAI({ apiKey: config.OpenAIapiKey });
|
||||
|
||||
const question = message.content;
|
||||
|
||||
const completionPrompt = fs.readFileSync("./utils/prompts/completion.txt", "utf-8");
|
||||
const prompt = completionPrompt.replaceAll('{botUsername}', client.user.username);
|
||||
const completionPrompt = `
|
||||
Act as a unique user in a Discord server. Engage in conversations naturally, considering the context and your surroundings.
|
||||
Share your thoughts, opinions, and experiences as if you were a real person. Use emojis, slang, and a casual tone to blend in with other users.
|
||||
Avoid sounding robotic or generic, and aim to create a distinct personality that others can relate to and enjoy interacting with.
|
||||
`;
|
||||
|
||||
let messages = [{
|
||||
"role": "system",
|
||||
"content": prompt
|
||||
"content": completionPrompt
|
||||
}];
|
||||
|
||||
let oldMessages;
|
||||
if (conversations.has(message.author.id)) oldMessages = conversations.get(message.author.id);
|
||||
if (oldMessages) {
|
||||
// If there are old messages, check if they exceed token limit
|
||||
while (func.tokenizer('gpt-3.5-turbo-0125', oldMessages).tokens >= 512) {
|
||||
let sliceLength = oldMessages.length * -0.5;
|
||||
if (sliceLength % 2 !== 0) sliceLength--;
|
||||
oldMessages = oldMessages.slice(sliceLength);
|
||||
// Update the conversation history in the map
|
||||
conversations.set(message.author.id, oldMessages);
|
||||
}
|
||||
// Concatenate old messages with the current message
|
||||
messages = messages.concat(oldMessages);
|
||||
}
|
||||
|
||||
|
@ -289,61 +296,36 @@ if (chatbot.State && chatbot.AllowedChannels.includes(message.channel.name) && !
|
|||
});
|
||||
|
||||
openai.chat.completions.create({
|
||||
|
||||
model: 'gpt-3.5-turbo-0125',
|
||||
messages: messages,
|
||||
max_tokens: func.tokenizer('gpt-3.5-turbo-0125', messages).maxTokens,
|
||||
temperature: settings.completion.temprature,
|
||||
top_p: settings.completion.top_p,
|
||||
frequency_penalty: settings.completion.frequency_penalty,
|
||||
presence_penalty: settings.completion.presence_penalty,
|
||||
temperature: 0.8,
|
||||
top_p: 1,
|
||||
frequency_penalty: 0.5,
|
||||
presence_penalty: 0.5,
|
||||
stream: true
|
||||
|
||||
}).then(async (response) => {
|
||||
|
||||
// Initialize an array to hold all response parts
|
||||
let responseParts = [];
|
||||
let fullAnswer = '';
|
||||
|
||||
for await (const part of response) {
|
||||
// Accumulate response parts
|
||||
responseParts.push(part.choices[0]?.delta?.content || '');
|
||||
fullAnswer += part.choices[0]?.delta?.content || '';
|
||||
}
|
||||
|
||||
// Combine all response parts into a single string
|
||||
let fullAnswer = responseParts.join('');
|
||||
await message.channel.send(fullAnswer);
|
||||
|
||||
// Trim the response content to fit within the maximum embed description length
|
||||
if (fullAnswer.length > 4096) {
|
||||
fullAnswer = fullAnswer.slice(0, 4093) + '...';
|
||||
}
|
||||
|
||||
// Send the combined response as an embed
|
||||
const embed = {
|
||||
color: 0x0099ff,
|
||||
title: 'Assisto',
|
||||
description: fullAnswer
|
||||
};
|
||||
|
||||
// Send the embed
|
||||
await message.channel.send({ embeds: [embed] });
|
||||
|
||||
// Update the conversation history in the map with the new message
|
||||
conversations.set(message.author.id, messages.concat([{ "role": "assistant", "content": fullAnswer }]));
|
||||
|
||||
}).catch(async (error) => {
|
||||
|
||||
console.error(chalk.bold.redBright(error));
|
||||
|
||||
if (error.response) await message.reply({ content: error.response.error.message.length > 4000 ? error.response.error.message.substring(0, 3097) + "..." : error.response.error.message });
|
||||
else if (error.message) await message.reply({ content: error.message.length > 4000 ? error.message.substring(0, 3097) + "..." : error.message });
|
||||
|
||||
if (error.response) await message.reply(error.response.error.message.substring(0, 2000));
|
||||
else if (error.message) await message.reply(error.message.substring(0, 2000));
|
||||
});
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Command Handler
|
||||
if (message.content.toLowerCase().startsWith(config.Prefix)) {
|
||||
|
||||
const neededPermissions = [
|
||||
"ViewChannel",
|
||||
"SendMessages",
|
||||
|
@ -358,15 +340,68 @@ if (chatbot.State && chatbot.AllowedChannels.includes(message.channel.name) && !
|
|||
const command = client.MessageCommands.get(cmd) || client.MessageCommands.find(c => c.aliases && c.aliases.map(a => a.toLowerCase()).includes(cmd));
|
||||
|
||||
if (command) {
|
||||
|
||||
try {
|
||||
command.execute(client, message, args, cmd);
|
||||
} catch (error) {
|
||||
console.error(chalk.bold.redBright(error));
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
// Function to handle direct messages
|
||||
async function handleDirectMessage(message) {
|
||||
if (conversations.has(message.author.id)) {
|
||||
const oldMessages = conversations.get(message.author.id);
|
||||
|
||||
};
|
||||
let messages = [{
|
||||
"role": "system",
|
||||
"content": completionPrompt
|
||||
}];
|
||||
|
||||
};
|
||||
messages = messages.concat(oldMessages);
|
||||
|
||||
messages.push({
|
||||
"role": "user",
|
||||
"content": message.content
|
||||
});
|
||||
|
||||
openai.chat.completions.create({
|
||||
model: 'gpt-3.5-turbo-0125',
|
||||
messages: messages,
|
||||
max_tokens: func.tokenizer('gpt-3.5-turbo-0125', messages).maxTokens,
|
||||
temperature: 0.8,
|
||||
top_p: 1,
|
||||
frequency_penalty: 0.5,
|
||||
presence_penalty: 0.5,
|
||||
stream: true
|
||||
}).then(async (response) => {
|
||||
let fullAnswer = '';
|
||||
|
||||
for await (const part of response) {
|
||||
fullAnswer += part.choices[0]?.delta?.content || '';
|
||||
}
|
||||
|
||||
await message.author.send(fullAnswer);
|
||||
|
||||
conversations.set(message.author.id, messages.concat([{ "role": "assistant", "content": fullAnswer }]));
|
||||
}).catch(async (error) => {
|
||||
console.error(chalk.bold.redBright(error));
|
||||
|
||||
if (error.response) await message.author.send(error.response.error.message.substring(0, 2000));
|
||||
else if (error.message) await message.author.send(error.message.substring(0, 2000));
|
||||
});
|
||||
} else {
|
||||
await message.author.send("Hey there! What's up? Feel free to chat with me about anything!");
|
||||
|
||||
conversations.set(message.author.id, []);
|
||||
}
|
||||
}
|
||||
|
||||
// Function to determine if the bot should respond based on its personality and surroundings
|
||||
function shouldRespond(message) {
|
||||
const botMentioned = message.mentions.has(client.user.id);
|
||||
const isQuestion = message.content.includes('?');
|
||||
const containsKeywords = ['hello', 'hi', 'hey', 'what\'s up', 'how are you'].some(keyword => message.content.toLowerCase().includes(keyword));
|
||||
|
||||
return botMentioned || isQuestion || containsKeywords;
|
||||
}
|
Loading…
Reference in a new issue