diff --git a/commands/interaction/slash/gpt.js b/commands/interaction/slash/gpt.js index 0a0f060..bf98fdd 100644 --- a/commands/interaction/slash/gpt.js +++ b/commands/interaction/slash/gpt.js @@ -1,57 +1,11 @@ -const { Constants } = require('detritus-client'); -const { InteractionCallbackTypes, ApplicationCommandOptionTypes } = Constants; - -const { AI_GPT_MODELS, AI_GPT_MODEL_CONFIG } = require("../../../labscore/constants"); -const superagent = require('superagent') - -const { createEmbed } = require('../../../labscore/utils/embed'); -const { icon, highlight, codeblock } = require('../../../labscore/utils/markdown'); - module.exports = { - description: 'Access a wide selection of Large Language Models.', + description: 'Generate text via Large Language Models', name: 'gpt', options: [ - { - name: 'model', - description: 'LLM to use', - choices: AI_GPT_MODELS, - required: true, - }, - { - name: 'prompt', - description: 'Prompt', - type: ApplicationCommandOptionTypes.STRING, - required: true, - maxLength: 256 - } - ], - run: async (context, args) => { - try{ - let s = Date.now() - await context.respond({data: {}, type: InteractionCallbackTypes.DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE}) - await context.editOrRespond({ - embeds: [createEmbed("loading_ai", context)] - }) - let res = await superagent.get(`${process.env.AI_SERVER}/gpt`) - .query({ - model: args.model, - prompt: args.prompt - }) - - await context.editOrRespond({ - embeds: [createEmbed("default", context, { - footer: { - iconUrl: `https://derpystuff.gitlab.io/webstorage4/v2/assets/icons/ai/ico_ai_${AI_GPT_MODEL_CONFIG[args.model].icon}.png`, - text: `${AI_GPT_MODEL_CONFIG[args.model].name} • ${context.application.name}`, - }, - description: codeblock("ansi", [res.body.response.substr(0, 1024).replace(/\\n/g,'\n')]) - })] - }) - }catch(e){ - console.log(e) - await context.editOrRespond({ - embeds: [createEmbed("error", context, "Unable to generate response. Try again in a bit.")] - }) - } - }, + require('../subcommands/gpt/chatgpt'), + require('../subcommands/gpt/davinci3'), + require('../subcommands/gpt/claude'), + require('../subcommands/gpt/claude-instant'), + require('../subcommands/gpt/alpaca') + ] }; \ No newline at end of file diff --git a/commands/interaction/subcommands/gpt/alpaca.js b/commands/interaction/subcommands/gpt/alpaca.js new file mode 100644 index 0000000..db3c27a --- /dev/null +++ b/commands/interaction/subcommands/gpt/alpaca.js @@ -0,0 +1,54 @@ +const { Constants } = require('detritus-client'); +const { InteractionCallbackTypes, ApplicationCommandOptionTypes } = Constants; + +const { AI_GPT_MODEL_CONFIG } = require("../../../../labscore/constants"); +const superagent = require('superagent') + +const { createEmbed } = require('../../../../labscore/utils/embed'); +const { codeblock } = require('../../../../labscore/utils/markdown'); +const { format } = require('../../../../labscore/utils/ansi'); + +module.exports = { + description: 'Alpaca-7b (Replicate alpaca-7b)', + name: 'alpaca', + type: ApplicationCommandOptionTypes.SUB_COMMAND, + options: [ + { + name: 'prompt', + description: 'Prompt', + type: ApplicationCommandOptionTypes.STRING, + required: true, + maxLength: 256 + } + ], + run: async (context, args) => { + const MODEL = "replicate:alpaca-7b" + try{ + let s = Date.now() + await context.respond({data: {}, type: InteractionCallbackTypes.DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE}) + await context.editOrRespond({ + embeds: [createEmbed("loading_ai", context)] + }) + let res = await superagent.get(`${process.env.AI_SERVER}/gpt`) + .query({ + model: MODEL, + prompt: args.prompt + }) + + await context.editOrRespond({ + embeds: [createEmbed("default", context, { + footer: { + iconUrl: `https://derpystuff.gitlab.io/webstorage4/v2/assets/icons/ai/ico_ai_${AI_GPT_MODEL_CONFIG[MODEL].icon}.png`, + text: `${AI_GPT_MODEL_CONFIG[MODEL].name} • ${context.application.name}`, + }, + description: codeblock("ansi", [format(args.prompt, "cyan") + res.body.response.substr(0, 1024).replace(/\\n/g,'\n')]) + })] + }) + }catch(e){ + console.log(e) + await context.editOrRespond({ + embeds: [createEmbed("error", context, "Unable to generate response. Try again in a bit.")] + }) + } + }, +}; \ No newline at end of file diff --git a/commands/interaction/subcommands/gpt/chatgpt.js b/commands/interaction/subcommands/gpt/chatgpt.js new file mode 100644 index 0000000..a4062d1 --- /dev/null +++ b/commands/interaction/subcommands/gpt/chatgpt.js @@ -0,0 +1,53 @@ +const { Constants } = require('detritus-client'); +const { InteractionCallbackTypes, ApplicationCommandOptionTypes } = Constants; + +const { AI_GPT_MODEL_CONFIG } = require("../../../../labscore/constants"); +const superagent = require('superagent') + +const { createEmbed } = require('../../../../labscore/utils/embed'); +const { codeblock } = require('../../../../labscore/utils/markdown'); + +module.exports = { + description: 'ChatGPT (OpenAI gpt-3.5-turbo)', + name: 'chatgpt', + type: ApplicationCommandOptionTypes.SUB_COMMAND, + options: [ + { + name: 'prompt', + description: 'Prompt', + type: ApplicationCommandOptionTypes.STRING, + required: true, + maxLength: 256 + } + ], + run: async (context, args) => { + const MODEL = "openai:gpt-3.5-turbo" + try{ + let s = Date.now() + await context.respond({data: {}, type: InteractionCallbackTypes.DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE}) + await context.editOrRespond({ + embeds: [createEmbed("loading_ai", context)] + }) + let res = await superagent.get(`${process.env.AI_SERVER}/gpt`) + .query({ + model: MODEL, + prompt: args.prompt + }) + + await context.editOrRespond({ + embeds: [createEmbed("default", context, { + footer: { + iconUrl: `https://derpystuff.gitlab.io/webstorage4/v2/assets/icons/ai/ico_ai_${AI_GPT_MODEL_CONFIG[MODEL].icon}.png`, + text: `${AI_GPT_MODEL_CONFIG[MODEL].name} • ${context.application.name}`, + }, + description: codeblock("ansi", [res.body.response.substr(0, 1024).replace(/\\n/g,'\n')]) + })] + }) + }catch(e){ + console.log(e) + await context.editOrRespond({ + embeds: [createEmbed("error", context, "Unable to generate response. Try again in a bit.")] + }) + } + }, +}; \ No newline at end of file diff --git a/commands/interaction/subcommands/gpt/claude-instant.js b/commands/interaction/subcommands/gpt/claude-instant.js new file mode 100644 index 0000000..9e6d73b --- /dev/null +++ b/commands/interaction/subcommands/gpt/claude-instant.js @@ -0,0 +1,53 @@ +const { Constants } = require('detritus-client'); +const { InteractionCallbackTypes, ApplicationCommandOptionTypes } = Constants; + +const { AI_GPT_MODEL_CONFIG } = require("../../../../labscore/constants"); +const superagent = require('superagent') + +const { createEmbed } = require('../../../../labscore/utils/embed'); +const { codeblock } = require('../../../../labscore/utils/markdown'); + +module.exports = { + description: 'Claude Instant (Anthropic claude-instant-v1)', + name: 'claude-instant', + type: ApplicationCommandOptionTypes.SUB_COMMAND, + options: [ + { + name: 'prompt', + description: 'Prompt', + type: ApplicationCommandOptionTypes.STRING, + required: true, + maxLength: 256 + } + ], + run: async (context, args) => { + const MODEL = "anthropic:claude-instant-v1" + try{ + let s = Date.now() + await context.respond({data: {}, type: InteractionCallbackTypes.DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE}) + await context.editOrRespond({ + embeds: [createEmbed("loading_ai", context)] + }) + let res = await superagent.get(`${process.env.AI_SERVER}/gpt`) + .query({ + model: MODEL, + prompt: args.prompt + }) + + await context.editOrRespond({ + embeds: [createEmbed("default", context, { + footer: { + iconUrl: `https://derpystuff.gitlab.io/webstorage4/v2/assets/icons/ai/ico_ai_${AI_GPT_MODEL_CONFIG[MODEL].icon}.png`, + text: `${AI_GPT_MODEL_CONFIG[MODEL].name} • ${context.application.name}`, + }, + description: codeblock("ansi", [res.body.response.substr(0, 1024).replace(/\\n/g,'\n')]) + })] + }) + }catch(e){ + console.log(e) + await context.editOrRespond({ + embeds: [createEmbed("error", context, "Unable to generate response. Try again in a bit.")] + }) + } + }, +}; \ No newline at end of file diff --git a/commands/interaction/subcommands/gpt/claude.js b/commands/interaction/subcommands/gpt/claude.js new file mode 100644 index 0000000..f4b71f6 --- /dev/null +++ b/commands/interaction/subcommands/gpt/claude.js @@ -0,0 +1,53 @@ +const { Constants } = require('detritus-client'); +const { InteractionCallbackTypes, ApplicationCommandOptionTypes } = Constants; + +const { AI_GPT_MODEL_CONFIG } = require("../../../../labscore/constants"); +const superagent = require('superagent') + +const { createEmbed } = require('../../../../labscore/utils/embed'); +const { codeblock } = require('../../../../labscore/utils/markdown'); + +module.exports = { + description: 'Claude (Anthropic claude-v1)', + name: 'claude', + type: ApplicationCommandOptionTypes.SUB_COMMAND, + options: [ + { + name: 'prompt', + description: 'Prompt', + type: ApplicationCommandOptionTypes.STRING, + required: true, + maxLength: 256 + } + ], + run: async (context, args) => { + const MODEL = "anthropic:claude-v1" + try{ + let s = Date.now() + await context.respond({data: {}, type: InteractionCallbackTypes.DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE}) + await context.editOrRespond({ + embeds: [createEmbed("loading_ai", context)] + }) + let res = await superagent.get(`${process.env.AI_SERVER}/gpt`) + .query({ + model: MODEL, + prompt: args.prompt + }) + + await context.editOrRespond({ + embeds: [createEmbed("default", context, { + footer: { + iconUrl: `https://derpystuff.gitlab.io/webstorage4/v2/assets/icons/ai/ico_ai_${AI_GPT_MODEL_CONFIG[MODEL].icon}.png`, + text: `${AI_GPT_MODEL_CONFIG[MODEL].name} • ${context.application.name}`, + }, + description: codeblock("ansi", [res.body.response.substr(0, 1024).replace(/\\n/g,'\n')]) + })] + }) + }catch(e){ + console.log(e) + await context.editOrRespond({ + embeds: [createEmbed("error", context, "Unable to generate response. Try again in a bit.")] + }) + } + }, +}; \ No newline at end of file diff --git a/commands/interaction/subcommands/gpt/davinci3.js b/commands/interaction/subcommands/gpt/davinci3.js new file mode 100644 index 0000000..80d6630 --- /dev/null +++ b/commands/interaction/subcommands/gpt/davinci3.js @@ -0,0 +1,53 @@ +const { Constants } = require('detritus-client'); +const { InteractionCallbackTypes, ApplicationCommandOptionTypes } = Constants; + +const { AI_GPT_MODEL_CONFIG } = require("../../../../labscore/constants"); +const superagent = require('superagent') + +const { createEmbed } = require('../../../../labscore/utils/embed'); +const { codeblock } = require('../../../../labscore/utils/markdown'); + +module.exports = { + description: 'GPT-3 (OpenAI text-davinci-003)', + name: 'gpt3', + type: ApplicationCommandOptionTypes.SUB_COMMAND, + options: [ + { + name: 'prompt', + description: 'Prompt', + type: ApplicationCommandOptionTypes.STRING, + required: true, + maxLength: 256 + } + ], + run: async (context, args) => { + const MODEL = "openai: text-davinci-003" + try{ + let s = Date.now() + await context.respond({data: {}, type: InteractionCallbackTypes.DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE}) + await context.editOrRespond({ + embeds: [createEmbed("loading_ai", context)] + }) + let res = await superagent.get(`${process.env.AI_SERVER}/gpt`) + .query({ + model: MODEL, + prompt: args.prompt + }) + + await context.editOrRespond({ + embeds: [createEmbed("default", context, { + footer: { + iconUrl: `https://derpystuff.gitlab.io/webstorage4/v2/assets/icons/ai/ico_ai_${AI_GPT_MODEL_CONFIG[MODEL].icon}.png`, + text: `${AI_GPT_MODEL_CONFIG[MODEL].name} • ${context.application.name}`, + }, + description: codeblock("ansi", [res.body.response.substr(0, 1024).replace(/\\n/g,'\n')]) + })] + }) + }catch(e){ + console.log(e) + await context.editOrRespond({ + embeds: [createEmbed("error", context, "Unable to generate response. Try again in a bit.")] + }) + } + }, +}; \ No newline at end of file diff --git a/labscore/constants.js b/labscore/constants.js index bccacbc..364ff41 100644 --- a/labscore/constants.js +++ b/labscore/constants.js @@ -316,17 +316,15 @@ module.exports.AI_GPT_MODEL_CONFIG = { } module.exports.AI_GPT_MODELS = [ - /* + { name: "OpenAI gpt-3.5-turbo (ChatGPT)", value: "openai:gpt-3.5-turbo" }, { name: "Anthropic claude-instant-v1", value: "anthropic:claude-instant-v1" }, + { name: "Anthropic claude-v1", value: "anthropic:claude-v1" }, + { name: "Replicate alpaca-7b", value: "replicate:alpaca-7b" }, { name: "HuggingFace bigscience/bloomz", value: "huggingface:bigscience/bloomz" }, { name: "HuggingFace google/flan-t5-xxl", value: "huggingface:google/flan-t5-xxl" }, { name: "HuggingFace google/flan-ul2", value: "huggingface:google/flan-ul2" }, { name: "Cohere command-medium-nightly", value: "cohere:command-medium-nightly" }, { name: "Cohere command-medium-nightly", value: "cohere:command-xlarge-nightly" }, - */ - { name: "OpenAI gpt-3.5-turbo (ChatGPT)", value: "openai:gpt-3.5-turbo" }, - { name: "Anthropic claude-v1", value: "anthropic:claude-v1" }, - { name: "Replicate alpaca-7b", value: "replicate:alpaca-7b" }, { name: "OpenAI text-ada-001", value: "openai:text-ada-001" }, { name: "OpenAI text-babbage-001", value: "openai:text-babbage-001" }, { name: "OpenAI text-curie-001", value: "openai:text-curie-001" },