diff --git a/commands/interaction/slash/gpt.js b/commands/interaction/slash/gpt.js new file mode 100644 index 0000000..0a0f060 --- /dev/null +++ b/commands/interaction/slash/gpt.js @@ -0,0 +1,57 @@ +const { Constants } = require('detritus-client'); +const { InteractionCallbackTypes, ApplicationCommandOptionTypes } = Constants; + +const { AI_GPT_MODELS, AI_GPT_MODEL_CONFIG } = require("../../../labscore/constants"); +const superagent = require('superagent') + +const { createEmbed } = require('../../../labscore/utils/embed'); +const { icon, highlight, codeblock } = require('../../../labscore/utils/markdown'); + +module.exports = { + description: 'Access a wide selection of Large Language Models.', + name: 'gpt', + options: [ + { + name: 'model', + description: 'LLM to use', + choices: AI_GPT_MODELS, + required: true, + }, + { + name: 'prompt', + description: 'Prompt', + type: ApplicationCommandOptionTypes.STRING, + required: true, + maxLength: 256 + } + ], + run: async (context, args) => { + try{ + let s = Date.now() + await context.respond({data: {}, type: InteractionCallbackTypes.DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE}) + await context.editOrRespond({ + embeds: [createEmbed("loading_ai", context)] + }) + let res = await superagent.get(`${process.env.AI_SERVER}/gpt`) + .query({ + model: args.model, + prompt: args.prompt + }) + + await context.editOrRespond({ + embeds: [createEmbed("default", context, { + footer: { + iconUrl: `https://derpystuff.gitlab.io/webstorage4/v2/assets/icons/ai/ico_ai_${AI_GPT_MODEL_CONFIG[args.model].icon}.png`, + text: `${AI_GPT_MODEL_CONFIG[args.model].name} β€’ ${context.application.name}`, + }, + description: codeblock("ansi", [res.body.response.substr(0, 1024).replace(/\\n/g,'\n')]) + })] + }) + }catch(e){ + console.log(e) + await context.editOrRespond({ + embeds: [createEmbed("error", context, "Unable to generate response. Try again in a bit.")] + }) + } + }, +}; \ No newline at end of file diff --git a/labscore/constants.js b/labscore/constants.js index 3b112fa..bccacbc 100644 --- a/labscore/constants.js +++ b/labscore/constants.js @@ -298,6 +298,42 @@ module.exports.MICROSOFT_VOICE_CONFIG = { "Male Whisper": { pitch: 113, speed: 170 } } +module.exports.AI_GPT_MODEL_CONFIG = { + "anthropic:claude-instant-v1": { name: "Anthropic claude-instant-v1", icon: "anthropic" }, + "anthropic:claude-v1": { name: "Anthropic claude-v1", icon: "anthropic" }, + "replicate:alpaca-7b": { name: "Replicate alpaca-7b", icon: "replicate" }, + "huggingface:bigscience/bloomz": { name: "HuggingFace bigscience/bloomz", icon: "huggingface" }, + "huggingface:google/flan-t5-xxl": { name: "HuggingFace google/flan-t5-xxl", icon: "huggingface" }, + "huggingface:google/flan-ul2": { name: "HuggingFace google/flan-ul2", icon: "huggingface" }, + "cohere:command-medium-nightly": { name: "Cohere command-medium-nightly", icon: "cohere" }, + "cohere:command-xlarge-nightly": { name: "Cohere command-medium-nightly", icon: "cohere" }, + "openai:gpt-3.5-turbo": { name: "OpenAI gpt-3.5-turbo", icon: "openai" }, + "openai:text-ada-001": { name: "OpenAI text-ada-001", icon: "openai" }, + "openai:text-babbage-001": { name: "OpenAI text-babbage-001", icon: "openai" }, + "openai:text-curie-001": { name: "OpenAI text-curie-001", icon: "openai" }, + "openai:text-davinci-002": { name: "OpenAI text-davinci-002", icon: "openai" }, + "openai:text-davinci-003": { name: "OpenAI text-davinci-003", icon: "openai" } +} + +module.exports.AI_GPT_MODELS = [ + /* + { name: "Anthropic claude-instant-v1", value: "anthropic:claude-instant-v1" }, + { name: "HuggingFace bigscience/bloomz", value: "huggingface:bigscience/bloomz" }, + { name: "HuggingFace google/flan-t5-xxl", value: "huggingface:google/flan-t5-xxl" }, + { name: "HuggingFace google/flan-ul2", value: "huggingface:google/flan-ul2" }, + { name: "Cohere command-medium-nightly", value: "cohere:command-medium-nightly" }, + { name: "Cohere command-medium-nightly", value: "cohere:command-xlarge-nightly" }, + */ + { name: "OpenAI gpt-3.5-turbo (ChatGPT)", value: "openai:gpt-3.5-turbo" }, + { name: "Anthropic claude-v1", value: "anthropic:claude-v1" }, + { name: "Replicate alpaca-7b", value: "replicate:alpaca-7b" }, + { name: "OpenAI text-ada-001", value: "openai:text-ada-001" }, + { name: "OpenAI text-babbage-001", value: "openai:text-babbage-001" }, + { name: "OpenAI text-curie-001", value: "openai:text-curie-001" }, + { name: "OpenAI text-davinci-002", value: "openai:text-davinci-002" }, + { name: "OpenAI text-davinci-003", value: "openai:text-davinci-003" } +] + module.exports.TRANSLATE_LANGUAGE_MAPPINGS = Object.freeze({ "af": "πŸ‡ΏπŸ‡¦", "sq": "πŸ‡¦πŸ‡±", diff --git a/labscore/utils/embed.js b/labscore/utils/embed.js index 235902c..40a5867 100644 --- a/labscore/utils/embed.js +++ b/labscore/utils/embed.js @@ -69,6 +69,15 @@ const embedTypes = Object.freeze({ }, color: COLORS.embed } + }, + "loading_ai": (context) => { + return { + author: { + iconUrl: STATIC_ICONS.loading_ai, + name: `Generating...` + }, + color: COLORS.brand + } } }) diff --git a/labscore/utils/statics.js b/labscore/utils/statics.js index 1f534be..a9100d5 100644 --- a/labscore/utils/statics.js +++ b/labscore/utils/statics.js @@ -82,6 +82,10 @@ const Statics = Object.freeze({ file: "icons/core/ico_notice_loading.gif", revision: 0 }, + loading_ai: { + file: "icons/ai/ico_ai_generating.gif", + revision: 0 + }, warning: { file: "icons/core/ico_notice_warning.png", revision: 0 @@ -120,5 +124,6 @@ module.exports.STATIC_ICONS = Object.freeze({ adult: staticAsset(Statics.icons.adult), error: staticAsset(Statics.icons.error), loading: staticAsset(Statics.icons.loading), + loading_ai: staticAsset(Statics.icons.loading_ai), warning: staticAsset(Statics.icons.warning) }) \ No newline at end of file