| | |
| | | ><span :style="{ 'animation-play-state': animationPlayState }"></span> |
| | | </div> |
| | | </div> |
| | | <div class="mt-5">请开始说话</div> |
| | | <div class="mt-5" :class="{ 'cursor-pointer': currentVoiceType === VoiceTipType.Speak }" @click="voiceTipClick"> |
| | | {{ voiceTipMap[currentVoiceType] }} |
| | | </div> |
| | | |
| | | <div class="flex items-center justify-between bottom-16 absolute left-1/2 -translate-x-1/2 space-x-16"> |
| | | <div class="size-[35px] flex items-center justify-center bg-[#292929] rounded-full cursor-pointer" @click="togglePlayClick"> |
| | |
| | | </template> |
| | | |
| | | <script setup lang="ts"> |
| | | import { computed, ref, watch } from 'vue'; |
| | | import { computed, nextTick, ref, watch } from 'vue'; |
| | | import type { ChatContent } from '../../../model/types'; |
| | | import { AnswerType } from '../../../model/types'; |
| | | import { VoiceRecognitionErrorType, VoiceTipType, voiceTipMap } from './types'; |
| | | import router from '/@/router'; |
| | | import { setRoomConfig } from '/@/stores/chatRoom'; |
| | | |
| | | const animationPlayState = ref<'paused' | 'running'>('running'); |
| | | |
| | | const playIcon = computed(() => (animationPlayState.value === 'running' ? 'icon-zanting' : 'icon-bofang')); |
| | | |
| | | const isSpeak = ref(false); |
| | | const togglePlayClick = () => { |
| | | animationPlayState.value = animationPlayState.value === 'running' ? 'paused' : 'running'; |
| | | if (currentVoiceType.value === VoiceTipType.Speak) { |
| | | if (isSpeak.value) { |
| | | window.speechSynthesis.pause(); |
| | | } else { |
| | | window.speechSynthesis.resume(); |
| | | } |
| | | isSpeak.value = !isSpeak.value; |
| | | } |
| | | }; |
| | | |
| | | const props = defineProps(['isHome']); |
| | | const emit = defineEmits(['submit', 'updateInputValue']); |
| | | const isShow = defineModel('isShow', { |
| | | type: Boolean, |
| | | }); |
| | | |
| | | const resetToListenVoice = () => { |
| | | currentVoiceType.value = VoiceTipType.NoSpeech; |
| | | audioChangeWord(); |
| | | }; |
| | | const isListening = ref(false); |
| | | |
| | | const inputValue = ref(''); |
| | | const closeClick = () => { |
| | | isShow.value = false; |
| | | }; |
| | | |
| | | let recognition = null; |
| | | let speech = null; |
| | | const currentVoiceType = ref<VoiceTipType>(VoiceTipType.NoSpeech); |
| | | const handleAnswerRes = (res: ChatContent) => { |
| | | if (!res) { |
| | | return; |
| | | } |
| | | let text = ''; |
| | | |
| | | if (res.type === AnswerType.Text || res.type === AnswerType.Knowledge) { |
| | | if (res.type === AnswerType.Knowledge) { |
| | | text = res.values?.map((item) => item.answer) ?? ''; |
| | | } else { |
| | | text = res.values; |
| | | } |
| | | } else { |
| | | text = '抱歉,我无法口述回答此问题的,需要查看请关闭此语音对话界面'; |
| | | } |
| | | |
| | | currentVoiceType.value = VoiceTipType.Speak; |
| | | isSpeak.value = true; |
| | | var speech = new SpeechSynthesisUtterance(); |
| | | speech.text = text; // 内容 |
| | | speech.lang = 'zh-cn'; // 语言 |
| | | speech.voiceURI = 'Microsoft Huihui - Chinese (Simplified, PRC)'; // 声音和服务 |
| | | // eslint-disable-next-line no-irregular-whitespace |
| | | speech.volume = 0.7; // 声音的音量区间范围是0到1默认是1 |
| | | // eslint-disable-next-line no-irregular-whitespace |
| | | speech.rate = 1; // 语速,数值,默认值是1,范围是0.1到10,表示语速的倍数,例如2表示正常语速的两倍 |
| | | // eslint-disable-next-line no-irregular-whitespace |
| | | speech.pitch = 1; // 表示说话的音高,数值,范围从0(最小)到2(最大)。默认值为1。 |
| | | |
| | | speech.onend = () => { |
| | | resetToListenVoice(); |
| | | }; |
| | | window.speechSynthesis.speak(speech); |
| | | }; |
| | | |
| | | const voiceTipClick = () => { |
| | | switch (currentVoiceType.value) { |
| | | case VoiceTipType.Speak: |
| | | window.speechSynthesis.cancel(); |
| | | setTimeout(() => { |
| | | resetToListenVoice(); |
| | | }, 0); |
| | | |
| | | break; |
| | | default: |
| | | break; |
| | | } |
| | | window.speechSynthesis.cancel(); |
| | | }; |
| | | const audioChangeWord = () => { |
| | | inputValue.value = ''; |
| | | emit('updateInputValue', ''); |
| | | // 创建SpeechRecognition对象 |
| | | // eslint-disable-next-line no-undef |
| | | var recognition = new webkitSpeechRecognition(); |
| | | recognition = new webkitSpeechRecognition(); |
| | | if (!recognition) { |
| | | // eslint-disable-next-line no-undef |
| | | recognition = new SpeechRecognition(); |
| | |
| | | recognition.onresult = function (event) { |
| | | var result = event.results[0][0].transcript; |
| | | console.log('监听结果:', result); |
| | | inputValue.value = result; |
| | | |
| | | emit('updateInputValue', result); |
| | | currentVoiceType.value = VoiceTipType.Think; |
| | | if (!props.isHome) { |
| | | emit('submit', handleAnswerRes); |
| | | } else { |
| | | setRoomConfig(router.currentRoute.value.query.id as string, 'firstResCb', handleAnswerRes); |
| | | emit('submit'); |
| | | } |
| | | }; |
| | | recognition.onspeechstart = (event) => { |
| | | currentVoiceType.value = VoiceTipType.Speech; |
| | | }; |
| | | |
| | | // 监听错误事件 |
| | | recognition.onerror = function (event) { |
| | | isListening.value = false; |
| | | ElMessage.error('监听语音失败'); |
| | | // ElMessage.error('监听语音失败'); |
| | | console.error(event.error); |
| | | switch (event.error) { |
| | | case VoiceRecognitionErrorType.NoSpeech: |
| | | if (isShow.value) { |
| | | resetToListenVoice(); |
| | | } |
| | | break; |
| | | |
| | | default: |
| | | break; |
| | | } |
| | | }; |
| | | // 监听结束事件(包括识别成功、识别错误和用户停止) |
| | | recognition.onend = function () { |
| | |
| | | }; |
| | | |
| | | const resetStatus = () => { |
| | | currentVoiceType.value = VoiceTipType.NoSpeech; |
| | | animationPlayState.value = 'running'; |
| | | recognition?.abort(); |
| | | window.speechSynthesis.cancel(); |
| | | }; |
| | | |
| | | watch( |
| | |
| | | (val) => { |
| | | if (!val) { |
| | | resetStatus(); |
| | | } else { |
| | | resetToListenVoice(); |
| | | } |
| | | } |
| | | ); |