码迷,mamicode.com
首页 > 编程语言 > 详细

基于javascript用olamisdk实现web端语音识别语义理解(speex压缩)

时间:2017-07-06 18:44:07      阅读:481      评论:0      收藏:0      [点我收藏+]

标签:语音识别   语义理解   语义解析   js跨域   olami   

转载请注明原文地址:http://blog.csdn.net/ls0609/article/details/73920229

olami开放平台sdk除了支持语音识别功能外,更强大的在于支持语义理解功能,在Android平台和iOS平台都有示例demo供大家下载。

语音在线听书demohttp://blog.csdn.net/ls0609/article/details/71519203

语音记账demohttp://blog.csdn.net/ls0609/article/details/72765789

web端,基于JavaScriptolami开放平台sdk也可以实现语音识别语义理解。本文就实现了这样一个小程序,web客户端本地用麦克风录音,录音的数据用speex压缩,然后跨域向服务器发送请求,返回识别的语音和语义字符串并显示。

先上图:

如下图刚载入的时候,未录音前界面

技术分享

 

点击开始录音button

技术分享

一句话说完自动检测尾音结束标志然后压缩上传给服务器进行识别

技术分享

将从服务器获取的识别结果显示到界面上

 

 

技术分享

 

本例中说的语音是:我要听三国演义这本书,用的是android平台听书app建立的语法。 
返回的json字串如下:

{

    “data”: {

        “asr”: {

            result“我要听三国演义这本书”,

            “speech_status”: 0,

            “final”: true,

            “status”: 0

        },

        “nli”: [

            {

                “desc_obj”: {

                    result“正在努力搜索中,请稍等”,

                    “status”: 0

                },

                “semantic”: [

                    {

                        “app”: “musiccontrol”,

                        input“我要听三国演义这本书”,

                        “slots”: [

                            {

                                “name”:“songname”,

                                value“三国演义”

                            }

                        ],

                        “modifier”: [

                            “play”

                        ],

                        “customer”: “58df512384ae11f0bb7b487e”

                    }

                ],

                “type”: “musiccontrol”

            }

        ]

    },

    “status”: “ok”

}

通过解析这段json,可以得到app类型,songname(用于查询书名)modifierplay表示行为是播放。 
这段json的语法当然是用户自定义的,获得了json字串就可以解析得到程序需要的字段用于对应的操作,从而实现了语义理解功能。olami开放平台语法编写介绍http://blog.csdn.net/ls0609/article/details/71624340

下面来看看实现的code,用eclipse建立J2EE工程WebVoiceRecognize 
初次搭建可以参考如下网站: 
http://jingyan.baidu.com/article/1709ad808caf9d4634c4f0f8.html

下面是建立的工程目录结构,发布后,网页打开运行在chrome或者QQ浏览器均可。 

技术分享

下面讲述下voiceRecognize.html这个文件,其他都是min.js,只需知道如何调用就可以了。

 

技术分享 

 

        window.AudioContext =window.AudioContext || window.webkitAudioContext;

 

        var audioContext = new AudioContext();

        var audioInput = null,

            realAudioInput = null,

            inputPoint = null,

            audioRecorder = null;

        var rafID = null;

        var analyserContext = null;

        var recIndex = 0;

 

        var recording = false;

        var bRecorded = false;

 

        function load(){

            initAudio();//初始化recorder

            setAuthorization("http://portal.olavoice.com/cloudservice/api","51a4bb56ba954655a4fc834bfdc46af1","asr","68bff251789b426896e70e888f919a6d","nli");

           setCallBackFromServerResult(getResultFromServer);

        }

 

        function initAudio() {

                if (!navigator.getUserMedia)

                    navigator.getUserMedia =navigator.webkitGetUserMedia || navigator.mozGetUserMedia;

                if (!navigator.cancelAnimationFrame)

                   navigator.cancelAnimationFrame = navigator.webkitCancelAnimationFrame ||navigator.mozCancelAnimationFrame;

                if (!navigator.requestAnimationFrame)

                   navigator.requestAnimationFrame = navigator.webkitRequestAnimationFrame|| navigator.mozRequestAnimationFrame;

 

            navigator.getUserMedia({audio:true}, gotStream, function(e) {

                    alert(‘Error gettingaudio‘);

                    console.log(e);

                });

        }

 

        function gotStream(stream) {

            inputPoint = audioContext.createGain();

 

            // Create an AudioNode from the stream.

            realAudioInput =audioContext.createMediaStreamSource(stream);

            audioInput = realAudioInput;

            audioInput.connect(inputPoint);

            audioRecorder = new Recorder(inputPoint );

        }

 

        function StartRecording()

        {

            // start recording

            if (audioRecorder == null)

            {

               initAudio();

               alert("need initialize media"); 

            }        

            audioRecorder.clear();

            audioRecorder.record(); 

            recording = true

            bRecorded = false;

            ToggleLabels();

            RegisterCallBackToRecorder();          

        }

 

        function StopRecording()

        {

            audioRecorder.stop();

            audioRecorder.getBuffers(gotBuffers );    

        }

 

        function RegisterCallBackToRecorder()

        {//检测语音结束后回调

           audioRecorder.setCallBack(speexEncode);

        }

 

        function ToggleLabels()

        {

            if(recording)

            {

                document .getElementById("recordbutton").value

                                                     = "录音中";   

                document .getElementById("speexEncodebutton").value

                                                     = "停止录音";      

                var btn = document .getElementById             

                                     ("recordbutton").value;  

            }else{

                document .getElementById("speexEncodebutton").value

                                                      = "识别中";   

                document .getElementById("recordbutton").value

                                  "停止录"                            

            }  

 

        }

 

        window.record = function(e)

        {

            if(!recording)

            {

                StartRecording();

                recording = true;

                bRecorded = false;

            }

            else

            {

                StopRecording();

                recording = false;

                bRecorded = true;

            }

 

            ToggleLabels();

        };

 

      window.speexEncode =  function()

      {

            exportSpeex();     

      };

 

 

     function exportSpeex()

     {

        recording = false;

        bRecorded = true;

        ToggleLabels();

        audioRecorder.stop();              

       audioRecorder.exportPCM(uploadSpeexData);

     }          

 

     function getResultFromServer()

     {

         document .getElementById(‘result‘).innerText = JSON.stringify(result);

         document .getElementById("speexEncodebutton").value = "停止录音"

         document .getElementById("recordbutton").value = "开始录音"

     }

      </script>

    </body>

 

</html>

浏览器载入的时候,先调用load()进行初始化

function load(){

setAuthorization(

                "http://portal.olavoice.com/cloudservice/api",//serverurl

                "51a4bb56ba954655a4fc834bfdc46af1"//appkey            

                "asr",//api类型

                "68bff251789b426896e70e888f919a6d",//appSecret

                "nli");//seq

setCallBackFromServerResult(getResultFromServer);

}

initAudio()中初始化了recorder用于获取麦克风资源做录音使用。

setAuthorization函数,参数分别是

url 服务器地址 
appkey 
开放平台注册应用后获得的appkey 
api api
类型选asr为语音 
appSecret 
开放平台注册应用后获得的appSecret 
nli 
seq表示包含语音语义返回,为stt表示只有语音返回

setCallBackFromServerResult(getResultFromServer)注册录音介绍并且识别出结果后的回调,在回调函数中可以把结果输出到界面上。

当点击开始录音button后,调用

function StartRecording()

{

      // start recording

      if (audioRecorder == null)

      {

         initAudio();  

         alert("need initialize media");   

      }       

      audioRecorder.clear();

      audioRecorder.record(); 

      recording = true

      bRecorded = false;

      ToggleLabels();

      RegisterCallBackToRecorder(); //注册recoder回调       

}

当在录音的js代码中,会自动检测尾音结束,然后回调注册的函数speexEncode (),点击停止录音button一样调用这个函数

window.speexEncode=  function()

 {

    exportSpeex();     

 };

 

 

  function exportSpeex()

{

    recording = false;

    bRecorded = true;

    ToggleLabels();//更新界面的button状态

    audioRecorder.stop();              

    audioRecorder.exportPCM(uploadSpeexData);

}

audioRecorder.exportPCM(uploadSpeexData)实现了将录好的16Kpcm语音压缩成speex格式并上传到服务器,从服务器取得结果后调用注册的setCallBackFromServerResult(getResultFromServer)函数,然后再函数getResultFromServer中进行结果的输出显示。

代码下载地址:

olamisdk实现web端语音识别语音理解

相关网站链接:

olami开放平台语法官方介绍:https://cn.olami.ai/wiki/?mp=nli&content=nli2.html

olami开放平台语法编写简介:http://blog.csdn.net/ls0609/article/details/71624340

本文出自 “ls0609” 博客,请务必保留此出处http://ls0609.blog.51cto.com/12943469/1945069

基于javascript用olamisdk实现web端语音识别语义理解(speex压缩)

标签:语音识别   语义理解   语义解析   js跨域   olami   

原文地址:http://ls0609.blog.51cto.com/12943469/1945069

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!