diff --git a/commands/message/genai/gemini-pro.js b/commands/message/genai/gemini-pro.js index 9cfb626..a4e69b5 100644 --- a/commands/message/genai/gemini-pro.js +++ b/commands/message/genai/gemini-pro.js @@ -1,44 +1,63 @@ +const { googleGenaiGeminiApi } = require("#api"); const { PERMISSION_GROUPS } = require("#constants"); -const { LlmModelsGenerate } = require("#obelisk"); const { createEmbed } = require("#utils/embed"); const { acknowledge } = require("#utils/interactions"); const { stringwrap, iconPill, smallIconPill } = require("#utils/markdown"); const { editOrReply } = require("#utils/message"); -const { STATIC_ICONS } = require("#utils/statics"); +const { STATIC_ICONS, STATICS, STATIC_ASSETS } = require("#utils/statics"); const { hasFeature } = require("#utils/testing"); module.exports = { name: 'gemini-pro', label: 'text', - aliases: ['gpro'], + aliases: ['gpro','gempro','gem-pro'], metadata: { - description: `${iconPill("generative_ai", "LIMITED TESTING")}\n${smallIconPill("reply", "Supports Replies")}\n\nRun Gemini 1.0 Pro with a custom prompt.`, - description_short: 'Gemini-1.0-Pro', + description: `${iconPill("generative_ai", "LIMITED TESTING")}\n${smallIconPill("reply", "Supports Replies")}\n\nRun Gemini 2.5 Pro with a custom prompt.`, + description_short: 'Gemini 2.5 Pro', examples: ['gem why do they call it oven when you of in the cold food of out hot eat the food'], category: 'limited', - usage: 'gemini-pro ' + usage: 'gemini-pro []' }, + args: [ + { name: 'prompt', default: '', required: false, help: "The starting system prompt." }, + { name: 'model', default: 'gemini-2.5-pro-preview-05-06', required: false, help: "The model." }, +// { name: 'temperature', default: 0.25, required: false, help: "Model temperature." }, + ], permissionsClient: [...PERMISSION_GROUPS.baseline, ...PERMISSION_GROUPS.attachments], run: async (context, args) => { if(!await hasFeature(context, "ai/gemini/text")) return; await acknowledge(context); if(!args.text) return editOrReply(context, createEmbed("warning", context, `Missing Parameter (text).`)) + + let model = "gemini-2.5-pro-preview-05-06" + if(args.model && await hasFeature(context, "ai/gpt/model-selection")) model = args.model; let input = args.text; - try{ - await editOrReply(context, createEmbed("ai_custom", context, STATIC_ICONS.ai_gemini)) + let prompt = `You are a friendly assistant designed to help people.\n- Today\'s date is ${new Date().toLocaleDateString('en-us', { weekday:"long", year:"numeric", month:"long", day:"numeric"})}\n- You should always use gender neutral pronouns when possible.\n- When answering a question, be concise and to the point.\n- Try to keep responses below 1000 characters. This does not apply to subjects that require more exhaustive or in-depth explanation.\n- Respond in a natural way, using Markdown formatting.` + if(args.prompt !== "") prompt = args.prompt - let res = await LlmModelsGenerate(context, "gemini-1.5-pro", input, "BLOCK_NONE") + try{ + await editOrReply(context, createEmbed("defaultNoFooter", context, { + author: { + iconUrl: STATIC_ICONS.ai_gemini, + name: `​` + }, + image: { + url: STATIC_ASSETS.chat_loading_small + } + })) + + let res = await googleGenaiGeminiApi(context, model, input, prompt) let description = [] let files = []; if(res.response.body.message) return editOrReply(context, createEmbed("error", context, e.response.body.message)) - let output = res.response.body.candidates[0]?.output + let output = res.response.body.output if(!output) return editOrReply(context, createEmbed("error", context, `Gemini returned an error. Try again later.`)) if(output.length <= 4000) description.push(output) @@ -57,7 +76,7 @@ module.exports = { }, description: description.join('\n'), footer: { - text: `Generative AI is experimental • Data submitted to Gemini may be used by Google for training.` + text: `${model} • Data submitted to Gemini may be used by Google for training.` } })], files @@ -65,7 +84,7 @@ module.exports = { } catch(e){ console.log(e) if(e.response?.body?.message) return editOrReply(context, createEmbed("error", context, e.response.body.message)) - return editOrReply(context, createEmbed("error", context, `Unable to generate response.`)) + return editOrReply(context, createEmbed("error", context, `Gemini API failed.`)) } } }; \ No newline at end of file diff --git a/labscore/api/endpoints.js b/labscore/api/endpoints.js index 9327d47..318462f 100644 --- a/labscore/api/endpoints.js +++ b/labscore/api/endpoints.js @@ -8,6 +8,7 @@ const Api = Object.freeze({ HOST: Hosts.prod, GOOGLE_GENERATIVEAI_EDIT_IMAGE: '/google/generativeai/edit-image', + GOOGLE_GENERATIVEAI_GEMINI_API: '/google/generativeai/gemini', GOOGLE_GENERATIVEAI_IMAGEN: '/google/generativeai/imagen', GOOGLE_PERSPECTIVE: '/google/perspective/analyze', GOOGLE_SPEECH_RECOGNIZE: '/google/speech/recognize', diff --git a/labscore/api/index.js b/labscore/api/index.js index da8f646..3e45b20 100644 --- a/labscore/api/index.js +++ b/labscore/api/index.js @@ -49,6 +49,14 @@ module.exports.googleGenaiEditImage = async function(context, prompt, url){ }) } +module.exports.googleGenaiGeminiApi = async function(context, model, input, prompt){ + return await request(Api.GOOGLE_GENERATIVEAI_GEMINI_API, "GET", {}, { + prompt, + input, + model + }) +} + module.exports.googleGenaiImagen = async function(context, prompt, imageCount = 2){ return await request(Api.GOOGLE_GENERATIVEAI_IMAGEN, "GET", {}, { prompt: prompt, diff --git a/labscore/utils/statics.js b/labscore/utils/statics.js index 9466768..78b4c5d 100644 --- a/labscore/utils/statics.js +++ b/labscore/utils/statics.js @@ -215,7 +215,7 @@ const Statics = Object.freeze({ revision: 0 }, ai_gemini: { - file: "icons/aiv2/gemini_spark.png", + file: "icons/aiv2/gemini_spark_v2.png", revision: 0 }, ai_palm_idle: {