make the gpt command not trash

This commit is contained in:
derpystuff 2023-04-20 00:31:30 +02:00
parent dfc8d20c84
commit 3aeb9bfdc7
7 changed files with 276 additions and 58 deletions

View file

@ -1,57 +1,11 @@
const { Constants } = require('detritus-client');
const { InteractionCallbackTypes, ApplicationCommandOptionTypes } = Constants;
const { AI_GPT_MODELS, AI_GPT_MODEL_CONFIG } = require("../../../labscore/constants");
const superagent = require('superagent')
const { createEmbed } = require('../../../labscore/utils/embed');
const { icon, highlight, codeblock } = require('../../../labscore/utils/markdown');
module.exports = {
description: 'Access a wide selection of Large Language Models.',
description: 'Generate text via Large Language Models',
name: 'gpt',
options: [
{
name: 'model',
description: 'LLM to use',
choices: AI_GPT_MODELS,
required: true,
},
{
name: 'prompt',
description: 'Prompt',
type: ApplicationCommandOptionTypes.STRING,
required: true,
maxLength: 256
}
],
run: async (context, args) => {
try{
let s = Date.now()
await context.respond({data: {}, type: InteractionCallbackTypes.DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE})
await context.editOrRespond({
embeds: [createEmbed("loading_ai", context)]
})
let res = await superagent.get(`${process.env.AI_SERVER}/gpt`)
.query({
model: args.model,
prompt: args.prompt
})
await context.editOrRespond({
embeds: [createEmbed("default", context, {
footer: {
iconUrl: `https://derpystuff.gitlab.io/webstorage4/v2/assets/icons/ai/ico_ai_${AI_GPT_MODEL_CONFIG[args.model].icon}.png`,
text: `${AI_GPT_MODEL_CONFIG[args.model].name}${context.application.name}`,
},
description: codeblock("ansi", [res.body.response.substr(0, 1024).replace(/\\n/g,'\n')])
})]
})
}catch(e){
console.log(e)
await context.editOrRespond({
embeds: [createEmbed("error", context, "Unable to generate response. Try again in a bit.")]
})
}
},
require('../subcommands/gpt/chatgpt'),
require('../subcommands/gpt/davinci3'),
require('../subcommands/gpt/claude'),
require('../subcommands/gpt/claude-instant'),
require('../subcommands/gpt/alpaca')
]
};

View file

@ -0,0 +1,54 @@
const { Constants } = require('detritus-client');
const { InteractionCallbackTypes, ApplicationCommandOptionTypes } = Constants;
const { AI_GPT_MODEL_CONFIG } = require("../../../../labscore/constants");
const superagent = require('superagent')
const { createEmbed } = require('../../../../labscore/utils/embed');
const { codeblock } = require('../../../../labscore/utils/markdown');
const { format } = require('../../../../labscore/utils/ansi');
module.exports = {
description: 'Alpaca-7b (Replicate alpaca-7b)',
name: 'alpaca',
type: ApplicationCommandOptionTypes.SUB_COMMAND,
options: [
{
name: 'prompt',
description: 'Prompt',
type: ApplicationCommandOptionTypes.STRING,
required: true,
maxLength: 256
}
],
run: async (context, args) => {
const MODEL = "replicate:alpaca-7b"
try{
let s = Date.now()
await context.respond({data: {}, type: InteractionCallbackTypes.DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE})
await context.editOrRespond({
embeds: [createEmbed("loading_ai", context)]
})
let res = await superagent.get(`${process.env.AI_SERVER}/gpt`)
.query({
model: MODEL,
prompt: args.prompt
})
await context.editOrRespond({
embeds: [createEmbed("default", context, {
footer: {
iconUrl: `https://derpystuff.gitlab.io/webstorage4/v2/assets/icons/ai/ico_ai_${AI_GPT_MODEL_CONFIG[MODEL].icon}.png`,
text: `${AI_GPT_MODEL_CONFIG[MODEL].name}${context.application.name}`,
},
description: codeblock("ansi", [format(args.prompt, "cyan") + res.body.response.substr(0, 1024).replace(/\\n/g,'\n')])
})]
})
}catch(e){
console.log(e)
await context.editOrRespond({
embeds: [createEmbed("error", context, "Unable to generate response. Try again in a bit.")]
})
}
},
};

View file

@ -0,0 +1,53 @@
const { Constants } = require('detritus-client');
const { InteractionCallbackTypes, ApplicationCommandOptionTypes } = Constants;
const { AI_GPT_MODEL_CONFIG } = require("../../../../labscore/constants");
const superagent = require('superagent')
const { createEmbed } = require('../../../../labscore/utils/embed');
const { codeblock } = require('../../../../labscore/utils/markdown');
module.exports = {
description: 'ChatGPT (OpenAI gpt-3.5-turbo)',
name: 'chatgpt',
type: ApplicationCommandOptionTypes.SUB_COMMAND,
options: [
{
name: 'prompt',
description: 'Prompt',
type: ApplicationCommandOptionTypes.STRING,
required: true,
maxLength: 256
}
],
run: async (context, args) => {
const MODEL = "openai:gpt-3.5-turbo"
try{
let s = Date.now()
await context.respond({data: {}, type: InteractionCallbackTypes.DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE})
await context.editOrRespond({
embeds: [createEmbed("loading_ai", context)]
})
let res = await superagent.get(`${process.env.AI_SERVER}/gpt`)
.query({
model: MODEL,
prompt: args.prompt
})
await context.editOrRespond({
embeds: [createEmbed("default", context, {
footer: {
iconUrl: `https://derpystuff.gitlab.io/webstorage4/v2/assets/icons/ai/ico_ai_${AI_GPT_MODEL_CONFIG[MODEL].icon}.png`,
text: `${AI_GPT_MODEL_CONFIG[MODEL].name}${context.application.name}`,
},
description: codeblock("ansi", [res.body.response.substr(0, 1024).replace(/\\n/g,'\n')])
})]
})
}catch(e){
console.log(e)
await context.editOrRespond({
embeds: [createEmbed("error", context, "Unable to generate response. Try again in a bit.")]
})
}
},
};

View file

@ -0,0 +1,53 @@
const { Constants } = require('detritus-client');
const { InteractionCallbackTypes, ApplicationCommandOptionTypes } = Constants;
const { AI_GPT_MODEL_CONFIG } = require("../../../../labscore/constants");
const superagent = require('superagent')
const { createEmbed } = require('../../../../labscore/utils/embed');
const { codeblock } = require('../../../../labscore/utils/markdown');
module.exports = {
description: 'Claude Instant (Anthropic claude-instant-v1)',
name: 'claude-instant',
type: ApplicationCommandOptionTypes.SUB_COMMAND,
options: [
{
name: 'prompt',
description: 'Prompt',
type: ApplicationCommandOptionTypes.STRING,
required: true,
maxLength: 256
}
],
run: async (context, args) => {
const MODEL = "anthropic:claude-instant-v1"
try{
let s = Date.now()
await context.respond({data: {}, type: InteractionCallbackTypes.DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE})
await context.editOrRespond({
embeds: [createEmbed("loading_ai", context)]
})
let res = await superagent.get(`${process.env.AI_SERVER}/gpt`)
.query({
model: MODEL,
prompt: args.prompt
})
await context.editOrRespond({
embeds: [createEmbed("default", context, {
footer: {
iconUrl: `https://derpystuff.gitlab.io/webstorage4/v2/assets/icons/ai/ico_ai_${AI_GPT_MODEL_CONFIG[MODEL].icon}.png`,
text: `${AI_GPT_MODEL_CONFIG[MODEL].name}${context.application.name}`,
},
description: codeblock("ansi", [res.body.response.substr(0, 1024).replace(/\\n/g,'\n')])
})]
})
}catch(e){
console.log(e)
await context.editOrRespond({
embeds: [createEmbed("error", context, "Unable to generate response. Try again in a bit.")]
})
}
},
};

View file

@ -0,0 +1,53 @@
const { Constants } = require('detritus-client');
const { InteractionCallbackTypes, ApplicationCommandOptionTypes } = Constants;
const { AI_GPT_MODEL_CONFIG } = require("../../../../labscore/constants");
const superagent = require('superagent')
const { createEmbed } = require('../../../../labscore/utils/embed');
const { codeblock } = require('../../../../labscore/utils/markdown');
module.exports = {
description: 'Claude (Anthropic claude-v1)',
name: 'claude',
type: ApplicationCommandOptionTypes.SUB_COMMAND,
options: [
{
name: 'prompt',
description: 'Prompt',
type: ApplicationCommandOptionTypes.STRING,
required: true,
maxLength: 256
}
],
run: async (context, args) => {
const MODEL = "anthropic:claude-v1"
try{
let s = Date.now()
await context.respond({data: {}, type: InteractionCallbackTypes.DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE})
await context.editOrRespond({
embeds: [createEmbed("loading_ai", context)]
})
let res = await superagent.get(`${process.env.AI_SERVER}/gpt`)
.query({
model: MODEL,
prompt: args.prompt
})
await context.editOrRespond({
embeds: [createEmbed("default", context, {
footer: {
iconUrl: `https://derpystuff.gitlab.io/webstorage4/v2/assets/icons/ai/ico_ai_${AI_GPT_MODEL_CONFIG[MODEL].icon}.png`,
text: `${AI_GPT_MODEL_CONFIG[MODEL].name}${context.application.name}`,
},
description: codeblock("ansi", [res.body.response.substr(0, 1024).replace(/\\n/g,'\n')])
})]
})
}catch(e){
console.log(e)
await context.editOrRespond({
embeds: [createEmbed("error", context, "Unable to generate response. Try again in a bit.")]
})
}
},
};

View file

@ -0,0 +1,53 @@
const { Constants } = require('detritus-client');
const { InteractionCallbackTypes, ApplicationCommandOptionTypes } = Constants;
const { AI_GPT_MODEL_CONFIG } = require("../../../../labscore/constants");
const superagent = require('superagent')
const { createEmbed } = require('../../../../labscore/utils/embed');
const { codeblock } = require('../../../../labscore/utils/markdown');
module.exports = {
description: 'GPT-3 (OpenAI text-davinci-003)',
name: 'gpt3',
type: ApplicationCommandOptionTypes.SUB_COMMAND,
options: [
{
name: 'prompt',
description: 'Prompt',
type: ApplicationCommandOptionTypes.STRING,
required: true,
maxLength: 256
}
],
run: async (context, args) => {
const MODEL = "openai: text-davinci-003"
try{
let s = Date.now()
await context.respond({data: {}, type: InteractionCallbackTypes.DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE})
await context.editOrRespond({
embeds: [createEmbed("loading_ai", context)]
})
let res = await superagent.get(`${process.env.AI_SERVER}/gpt`)
.query({
model: MODEL,
prompt: args.prompt
})
await context.editOrRespond({
embeds: [createEmbed("default", context, {
footer: {
iconUrl: `https://derpystuff.gitlab.io/webstorage4/v2/assets/icons/ai/ico_ai_${AI_GPT_MODEL_CONFIG[MODEL].icon}.png`,
text: `${AI_GPT_MODEL_CONFIG[MODEL].name}${context.application.name}`,
},
description: codeblock("ansi", [res.body.response.substr(0, 1024).replace(/\\n/g,'\n')])
})]
})
}catch(e){
console.log(e)
await context.editOrRespond({
embeds: [createEmbed("error", context, "Unable to generate response. Try again in a bit.")]
})
}
},
};

View file

@ -316,17 +316,15 @@ module.exports.AI_GPT_MODEL_CONFIG = {
}
module.exports.AI_GPT_MODELS = [
/*
{ name: "OpenAI gpt-3.5-turbo (ChatGPT)", value: "openai:gpt-3.5-turbo" },
{ name: "Anthropic claude-instant-v1", value: "anthropic:claude-instant-v1" },
{ name: "Anthropic claude-v1", value: "anthropic:claude-v1" },
{ name: "Replicate alpaca-7b", value: "replicate:alpaca-7b" },
{ name: "HuggingFace bigscience/bloomz", value: "huggingface:bigscience/bloomz" },
{ name: "HuggingFace google/flan-t5-xxl", value: "huggingface:google/flan-t5-xxl" },
{ name: "HuggingFace google/flan-ul2", value: "huggingface:google/flan-ul2" },
{ name: "Cohere command-medium-nightly", value: "cohere:command-medium-nightly" },
{ name: "Cohere command-medium-nightly", value: "cohere:command-xlarge-nightly" },
*/
{ name: "OpenAI gpt-3.5-turbo (ChatGPT)", value: "openai:gpt-3.5-turbo" },
{ name: "Anthropic claude-v1", value: "anthropic:claude-v1" },
{ name: "Replicate alpaca-7b", value: "replicate:alpaca-7b" },
{ name: "OpenAI text-ada-001", value: "openai:text-ada-001" },
{ name: "OpenAI text-babbage-001", value: "openai:text-babbage-001" },
{ name: "OpenAI text-curie-001", value: "openai:text-curie-001" },