use the new endpoint

This commit is contained in:
derpystuff 2023-12-23 15:44:07 +01:00
parent 5517223eea
commit de1c17625c
3 changed files with 9 additions and 2 deletions

View file

@ -1,7 +1,7 @@
const { createEmbed } = require('../../../labscore/utils/embed')
const { editOrReply } = require('../../../labscore/utils/message')
const { googleSpeechRecognition } = require('../../../labscore/api');
const { googleSpeechRecognitionWithLabels } = require('../../../labscore/api');
const { STATICS } = require('../../../labscore/utils/statics');
const { codeblock, icon } = require('../../../labscore/utils/markdown');
@ -32,7 +32,7 @@ module.exports = {
if(!msg.attachments.first()) return editOrReply(context, createEmbed("warning", context, "No voice message found."))
if(!msg.attachments.first().url.split('?')[0].endsWith('voice-message.ogg')) return editOrReply(context, createEmbed("warning", context, "No voice message found."))
const recog = await googleSpeechRecognition(context, msg.attachments.first().url)
const recog = await googleSpeechRecognitionWithLabels(context, msg.attachments.first().url)
return editOrReply(context, createEmbed("default", context, {
description: codeblock("md", [ recog.response.body.transcription_with_speakers ]),

View file

@ -9,6 +9,7 @@ const Api = Object.freeze({
GOOGLE_PERSPECTIVE: '/google/perspective/analyze',
GOOGLE_SPEECH_RECOGNIZE: '/google/speech/recognize',
GOOGLE_SPEECH_RECOGNIZE_LABELS: '/google/speech/multirecognize',
GOOGLE_TRANSLATE: '/google/translate/text',
GOOGLE_VISION_COLORS: '/google/vision/colors',
GOOGLE_VISION_FACES: '/google/vision/faces',

View file

@ -54,6 +54,12 @@ module.exports.googleSpeechRecognition = async function(context, url){
})
}
module.exports.googleSpeechRecognitionWithLabels = async function(context, url){
return await request(Api.GOOGLE_SPEECH_RECOGNIZE_LABELS, "GET", {}, {
url
})
}
module.exports.googleTranslate = async function(context, text, to, from){
return await request(Api.GOOGLE_TRANSLATE, "GET", {}, {
text: text,