ROS如何华为手机语音唤醒功能醒

谦逊-努力-务实
ROS+科大讯飞语音=让你的机器人能听会说---(三)识别篇
(三)识别篇
安装声卡驱动库
sudo apt-get install libasound2-dev
将demo里面的call.bnf拷贝到/Robot/voice/call.bnf
语音识别节点代码
在xf_voice/src文件夹下新建xf_asr.cpp文件,并复制以下代码。
注意,请将appid改为你自己在官网申请的。
#include &stdlib.h&
#include &stdio.h&
#include &string.h&
#include &errno.h&
#include &unistd.h&
#include "ros/ros.h"
#include "std_msgs/String.h"
#include "pthread.h"
#include "/Robot/voice/inc/qisr.h"
#include "/Robot/voice/inc/msp_cmn.h"
#include "/Robot/voice/inc/msp_errors.h"
#include &stdio.h&
#include &stdlib.h&
#include &alsa/asoundlib.h&
#define SAMPLE_RATE 16000
#define CHANNLE 1
#define FRAMES_SIZE 3200
#define FORMAT SND_PCM_FORMAT_S16_LE
#define PER_SAMPLE 2
#define DEVICE
char data[10000];
int flag_begin=0;
int flag_understand=0;
int flag_unknow=0;
typedef struct _wave_pcm_hdr
samples_per_
avg_bytes_per_
} wave_pcm_
wave_pcm_hdr default_wav_hdr =
{ 'R', 'I', 'F', 'F' },
{'W', 'A', 'V', 'E'},
{'f', 'm', 't', ' '},
{'d', 'a', 't', 'a'},
#define SAMPLE_RATE_16K
#define SAMPLE_RATE_8K
#define MAX_GRAMMARID_LEN
#define MAX_PARAMS_LEN
const char * ASR_RES_PATH
= "fo|/Robot/voice/bin/msc/res/asr/common.jet";
const char * GRM_BUILD_PATH
= "/Robot/voice/bin/msc/res/asr/GrmBuilld";
const char * GRM_FILE
= "/Robot/voice/call.bnf";
const char * listened_file
="/Robot/voice/wav/ddhgdw.pcm";
typedef struct _UserData {
grammar_id[MAX_GRAMMARID_LEN];
UserData asr_
int build_grammar(UserData *udata);
int run_asr(void *udata);
int build_grm_cb(int ecode, const char *info, void *udata)
UserData *grm_data = (UserData *)
if (NULL != grm_data) {
grm_data-&build_fini = 1;
grm_data-&errcode =
if (MSP_SUCCESS == ecode && NULL != info) {
printf("构建语法成功! 语法ID:%s\n", info);
if (NULL != grm_data)
snprintf(grm_data-&grammar_id, MAX_GRAMMARID_LEN - 1, info);
printf("构建语法失败!%d\n", ecode);
int build_grammar(UserData *udata)
FILE *grm_file
char *grm_content
unsigned int grm_cnt_len
char grm_build_params[MAX_PARAMS_LEN]
grm_file = fopen(GRM_FILE, "rb");
if(NULL == grm_file) {
printf("打开\"%s\"文件失败![%s]\n", GRM_FILE, strerror(errno));
return -1;
fseek(grm_file, 0, SEEK_END);
grm_cnt_len = ftell(grm_file);
fseek(grm_file, 0, SEEK_SET);
grm_content = (char *)malloc(grm_cnt_len + 1);
if (NULL == grm_content)
printf("内存分配失败!\n");
fclose(grm_file);
grm_file = NULL;
return -1;
fread((void*)grm_content, 1, grm_cnt_len, grm_file);
grm_content[grm_cnt_len] = '\0';
fclose(grm_file);
grm_file = NULL;
snprintf(grm_build_params, MAX_PARAMS_LEN - 1,
"engine_type = local, \
asr_res_path = %s, sample_rate = %d, \
grm_build_path = %s, ",
ASR_RES_PATH,
SAMPLE_RATE_16K,
GRM_BUILD_PATH
ret = QISRBuildGrammar("bnf", grm_content, grm_cnt_len, grm_build_params, build_grm_cb, udata);
free(grm_content);
grm_content = NULL;
int recode_asr(void *ptr)
char asr_params[MAX_PARAMS_LEN]
const char *rec_rslt
const char *session_id
FILE *f_pcm
char *pcm_data
long pcm_count
int aud_stat
= MSP_AUDIO_SAMPLE_CONTINUE;
int ep_status
= MSP_EP_LOOKING_FOR_SPEECH;
int rec_status
= MSP_REC_STATUS_INCOMPLETE;
int rss_status
= MSP_REC_STATUS_INCOMPLETE;
int errcode
float time=5;
snd_pcm_t *
snd_pcm_hw_params_t *
snd_pcm_uframes_t frames,
char *ptr_
FILE *fp = fopen("/Robot/voice/wav/listened.wav", "wb");
snprintf(asr_params, MAX_PARAMS_LEN - 1,
"engine_type = cloud,\
domain = iat,\
nlp_version=2.0,\
mixed_type=delay ,\
mixed_threshold=50,\
mixed_timeout= 3000 ,\
asr_res_path = %s, sample_rate = %d, \
grm_build_path = %s, local_grammar = %s, \
result_type = json, result_encoding = UTF-8 ",
ASR_RES_PATH,
SAMPLE_RATE_16K,
GRM_BUILD_PATH,
asr_data.grammar_id
session_id = QISRSessionBegin(NULL, asr_params, &errcode);
printf("开始识别...\n");
if(fp == NULL){
}else(printf("open file success"));
rc = snd_pcm_open(&handle, DEVICE, SND_PCM_STREAM_CAPTURE, 0);
if (rc & 0) {
printf("OK:before alloca\n");
snd_pcm_hw_params_alloca(&params);
printf("OK:after alloca\n");
rc = snd_pcm_hw_params_any(handle, params);
if (rc & 0) {
printf("OK:snd_pcm_hw_params_an()\n");
rc = snd_pcm_hw_params_set_access(handle, params,SND_PCM_ACCESS_RW_INTERLEAVED);
if (rc & 0) {
printf("OK:snd_pcm_hw_params_set_access()\n");
rc = snd_pcm_hw_params_set_format(handle, params, FORMAT);
if (rc & 0) {
printf("OK:snd_pcm_hw_params_set_format()\n");
rc = snd_pcm_hw_params_set_channels(handle, params, CHANNLE);
if (rc & 0) {
printf("OK:snd_pcm_hw_params_set_channels()\n");
rc = snd_pcm_hw_params_set_rate(handle, params,SAMPLE_RATE, 0);
if (rc & 0) {
printf("OK:snd_pcm_hw_params_set_rate()\n");
frames = FRAMES_SIZE;
rc = snd_pcm_hw_params(handle, params);
if (rc & 0) {
printf("OK:snd_pcm_hw_paraams()\n");
size = frames * PER_SAMPLE *CHANNLE;
ptr_buffer = buffer = (char *) malloc(size);
if(buffer == NULL){
printf("OK:malloc()\n");
loops = SAMPLE_RATE/frames*
fwrite(&default_wav_hdr, sizeof(default_wav_hdr) ,1, fp);
while (loops & 0)
ret = snd_pcm_readi(handle, ptr_buffer, frames);
if (ret == -EPIPE) {
printf( "overrun occurred\n");
snd_pcm_prepare(handle);
} else if (ret & 0)
printf("error from read: %s\n",snd_strerror(ret));
else if (ret != frames)
printf( "short read, read %d frames\n", ret);
rc = fwrite(ptr_buffer, size, 1, fp);
if (rc & 0){
printf("error in write\n");
if (rc != 1)
printf("failed to write %d bytes\n",size);
printf("&");
fflush(stdout);
if (0 == pcm_count) aud_stat = MSP_AUDIO_SAMPLE_FIRST;
aud_stat = MSP_AUDIO_SAMPLE_CONTINUE;
errcode = QISRAudioWrite(session_id, (const void *)ptr_buffer, size, aud_stat, &ep_status, &rec_status);
if (MSP_EP_AFTER_SPEECH == ep_status)break;
QISRAudioWrite(session_id, (const void *)NULL, 0, MSP_AUDIO_SAMPLE_LAST, &ep_status, &rec_status);
if(buffer != NULL)
free(buffer);
if(fp != NULL)
fclose(fp);
snd_pcm_drain(handle);
snd_pcm_close(handle);
free(pcm_data);
pcm_data = NULL;
while (MSP_REC_STATUS_COMPLETE != rss_status && MSP_SUCCESS == errcode) {
rec_rslt = QISRGetResult(session_id, &rss_status, 0, &errcode);
printf(".");
usleep(150 * 1000);
printf("\n识别结束%d:\n",loops);
printf("=============================================================\n");
if (NULL != rec_rslt)
printf("%s\n", rec_rslt);
sprintf(data,"%s\n", rec_rslt);
flag_understand=1;
printf("没有识别结果!\n");
flag_unknow=1;
printf("=============================================================\n");
if (NULL != pcm_data) {
free(pcm_data);
pcm_data = NULL;
if (NULL != f_pcm) {
fclose(f_pcm);
f_pcm = NULL;
printf("exit with code :%d..\n",errcode);
QISRSessionEnd(session_id, NULL);
void wakeupcallback(const std_msgs::String::ConstPtr& msg)
std::cout&&"waking up"&&std::
usleep(700*1000);
flag_begin=1;
int main(int argc,char **argv)
const char *login_config
= "appid = 573bdbff";
ret = MSPLogin(NULL, NULL, login_config);
if (MSP_SUCCESS != ret) {
printf("登录失败:%d\n", ret);
printf("构建离线识别语法网络...\n");
ret = build_grammar(&asr_data);
if (MSP_SUCCESS != ret) {
printf("构建语法调用失败!\n");
return -1;
while (1 != asr_data.build_fini)
usleep(300 * 1000);
if (MSP_SUCCESS != asr_data.errcode)
return -1;
if (MSP_SUCCESS != ret) {
printf("离线语法识别出错: %d \n", ret);
return -1;
ros::init(argc, argv, "xf_asr");
ros::NodeH
ros::Rate loop_rate(10);
ros::Subscriber sub = n.subscribe("xfwakeup", 1000, wakeupcallback);
ros::Publisher pub = n.advertise&std_msgs::String&("xfsaywords", 1000);
ros::Publisher pub2= n.advertise&std_msgs::String&("xfunderstand",10000);
while (ros::ok())
if(flag_begin)
ret = recode_asr(&asr_data);
flag_begin=0;
if(flag_unknow)
std_msgs::S
std::stringstream
flag_unknow=0;
ss && "对不起,我好像不明白! ";
msg.data = ss.str();
pub.publish(msg);
if(flag_understand)
flag_understand=0;
std_msgs::S
std::stringstream ss2;
msg.data = ss2.str();
pub2.publish(msg);
ros::spinOnce();
loop_rate.sleep();
MSPLogout();
printf("请按任意键退出...\n");
getchar();
配置CMakeLists.txt
在原xf_voice文件夹的CMakeLists.txt文件末尾加上以下代码
add_executable(xf_asr src/xf_asr.cpp)
target_link_libraries(xf_asr ${catkin_LIBRARIES} /Robot/voice/lib/libmsc.so
-ldl -lpthread -lm -lrt -lasound)
add_dependencies(xf_asr xf_voice_generate_messages_cpp)
回到catkin_ws文件夹下,执行catkin_make
$ catkin_make
第一个终端,运行主节点
第二个终端,运行语音合成节点
$ cd catkin_ws/
$ source devel/setup.sh
$ rosrun xf_voice xf_tts
第三个终端,运行语音识别节点
$ cd catkin_ws/
$ source devel/setup.sh
$ rosrun xf_voice xf_asr
第四个终端,发送唤醒信号
$ cd catkin_ws/
$ source devel/setup.sh
$ rostopic pub /xfwakeup std_msgs/String s
每次第四个终端发送一次唤醒信号,在第三个启动语音识别的终端中就会启动一次语音识别过程。
超时如果没有识别结果则返回没有识别结果!同时在第二个终端会提示“对不起,我好像不明白”,你也会听到相应的提示音。
如果有识别结果,则会停止识别并返回结果。同时结果会被发送到xfunderstand主题。
构建离线识别语法网络...
构建语法成功! 语法ID:call
开始识别...
open file successOK:before alloca
OK:after alloca
OK:snd_pcm_hw_params_an()
OK:snd_pcm_hw_params_set_access()
OK:snd_pcm_hw_params_set_format()
OK:snd_pcm_hw_params_set_channels()
OK:snd_pcm_hw_params_set_rate()
OK:snd_pcm_hw_paraams()
OK:malloc()
&&&&&&&&&&&&&&&&&&&&&&&&&..
识别结束0:
=============================================================
没有识别结果!
=============================================================
exit with code :0..
开始识别...
open file successOK:before alloca
OK:after alloca
OK:snd_pcm_hw_params_an()
OK:snd_pcm_hw_params_set_access()
OK:snd_pcm_hw_params_set_format()
OK:snd_pcm_hw_params_set_channels()
OK:snd_pcm_hw_params_set_rate()
OK:snd_pcm_hw_paraams()
OK:malloc()
&&&&&&&&&&&&&&&&&&&&&&&&&....
识别结束0:
=============================================================
{"rc":0,"operation":"ANSWER","service":"chat","answer":{"text":"医院门口,一老大爷很伤心。好心人上前询问,老大爷答道:孙子生病,拉了坨便便想带来化验。用报纸包了好几层放口袋里,结果在车上被偷了……","type":"T"},"text":"给我讲一个笑话。"}
=============================================================
exit with code :0..
第二个终端 语音合成节点输出内容
I heard,I will say:对不起,我好像不明白!
开始合成 ...
正在合成 ...
没有更多推荐了,RouterOS远程启动控制家里的PC_百度文库
您的浏览器Javascript被禁用,需开启后体验完整功能,
享专业文档下载特权
&赠共享文档下载特权
&10W篇文档免费专享
&每天抽奖多种福利
两大类热门资源免费畅读
续费一年阅读会员,立省24元!
RouterOS远程启动控制家里的PC
阅读已结束,下载本文需要
想免费下载更多文档?
定制HR最喜欢的简历
你可能喜欢502 Proxy Error
Proxy Error
The proxy server received an invalid
response from an upstream server.
The proxy server could not handle the request .
Reason: Error reading from remote server科大讯飞实时语音唤醒+离线命令词识别在Linux及ROS下的应用
0.写在最前面github地址:(喜欢的话帮忙github点个赞呗~~~包含命令行和ROS两个分支,自行选择)因为科大讯飞给的Demo中只有“录一段时间的音频然后命令词识别”、“对一段录音中是否有唤醒词进行判断”,不能够实现24小时不间断的进行命令词识别或者唤醒的Demo。所以我设计了程序实现24小时不间断录音,实时检测有无唤醒词出现,当有唤醒词出现时,切换到“一段时间的命令词识别”功能。(科大讯飞的文档中说,唤醒功能的QIVWSessionBegin可以修改一个参数实现唤醒+命令词识别,但是我试了很多次,不懂这里的bnf应该是什么格式的)本文假设大家对于科大讯飞的语音识别开发平台有一定的了解,文中将不再介绍下载安装等事项。只在Linux平台下进行过试验,系统版本Ubuntu14.04(64bit)、ROS版本indigo,暂时没有发现bug。包括录音、语音识别等代码主要参考科大讯飞的SDK,作者加上了命令解析的代码,并设计整个程序框架,对于作者部分的代码完全开源,不保留权利。至于科大讯飞部分代码,请联系科大讯飞公司。1.整体框架int main(int argc, char **argv)//这只是主体程序
//init iflytek
int ret = 0 ;
ret = MSPLogin(NULL, NULL, lgi_param);
memset(&asr_data, 0, sizeof(UserData));
ret = build_grammar(&asr_data);
//第一次使用某语法进行识别,需要先构建语法网络,获取语法ID,之后使用此语法进行识别,无需再次构建
run_ivw(NULL, ssb_param);
if(g_is_awaken_succeed){
run_asr(&asr_data);
g_is_awaken_succeed = FALSE;
if(g_is_order_publiced == FALSE){
if(g_order==ORDER_BACK_TO_CHARGE){
play_wav((char*)concat(PACKAGE_PATH, "audios/back_to_charge.wav"));
if(g_order == ORDER_FACE_DETECTION){
play_wav((char*)concat(PACKAGE_PATH, "audios/operating_face_rec.wav"));
g_is_order_publiced = TRUE;
MSPLogout();
}上面是main函数的主体部分登录-&构建语法-&进入while(1)循环2.语音唤醒在while(1)循环中,运行run_ivw(NULL, ssb_param); 开启录音,创建一个新的线程接收录音并上传至服务器等待唤醒结果。此时run_ivw处于阻塞状态,一直持续到收到唤醒结果,然后停止录音退出唤醒服务。void run_ivw(const char *grammar_list ,
const char* session_begin_params)//这只是主题
//start QIVW
session_id=QIVWSessionBegin(grammar_list, session_begin_params, &err_code);
err_code = QIVWRegisterNotify(session_id, cb_ivw_msg_proc,NULL);
//start record
err_code = create_recorder(&recorder, iat_cb, (void*)session_id);
err_code = open_recorder(recorder, get_default_input_dev(), &wavfmt);
err_code = start_record(recorder);
record_state = MSP_AUDIO_SAMPLE_FIRST;
while(record_state != MSP_AUDIO_SAMPLE_LAST)
sleep_ms(200); //阻塞直到唤醒结果出现
printf("waiting for awaken%d\n", record_state);
if (recorder) {
if(!is_record_stopped(recorder))
stop_record(recorder);
close_recorder(recorder);
destroy_recorder(recorder);
recorder = NULL;
if (NULL != session_id)
QIVWSessionEnd(session_id, sse_hints);
}3.离线命令词识别只有当唤醒结果是成功的才运行run_asr(&asr_data),该函数构建离线命令词识别参数,调用demo_mic函数。该函数的作用与run_ivw函数基本相似,初始化语音识别、开始识别、等待15秒或者识别完成之后关闭录音static void demo_mic(const char* session_begin_params)//这只是主体程序
struct speech_rec_notifier recnotifier = {
on_result,
on_speech_begin,
on_speech_end
errcode = sr_init(&iat, session_begin_params, SR_MIC, &recnotifier);
errcode = sr_start_listening(&iat);
/* demo 15 seconds recording */
while(i++ & 15 && iat.session_id != NULL)
errcode = sr_stop_listening(&iat);
sr_uninit(&iat);
}4.识别结果语音唤醒的回调函数为:int cb_ivw_msg_proc( const char *sessionID, int msg, int param1, int param2, const void *info, void *userData )//这只是主体部分程序
if (MSP_IVW_MSG_ERROR == msg) //唤醒出错消息
g_is_awaken_succeed = FALSE;
record_state = MSP_AUDIO_SAMPLE_LAST;
}else if (MSP_IVW_MSG_WAKEUP == msg) //唤醒成功消息
g_is_awaken_succeed = TRUE;
record_state = MSP_AUDIO_SAMPLE_LAST;
int ret = stop_record(recorder);
}该函数通过服务器返回的消息判断唤醒结果,这里我们通过全局变量向主函数以及录音线程传递结果,以及时做出该有的反应。离线命令词识别的回调函数:void on_result(const char *result, char is_last)
if (result) {
size_t left = g_buffersize - 1 - strlen(g_result);
size_t size = strlen(result);
if (left & size) {
g_result = (char*)realloc(g_result, g_buffersize + BUFFER_SIZE);
if (g_result)
g_buffersize += BUFFER_SIZE;
printf("mem alloc failed\n");
strncat(g_result, result, size);
show_result(g_result, is_last);
g_order = get_order(g_result);
if(g_order & ORDER_NONE){
g_is_order_publiced = FALSE;
}持续获取结果,然后调用get_order函数获取结果并返回到全局变量g_order中。这个get_order函数是我根据自己的语法特性编写的。各位看官可以根据自己的情况做更改。我的语法特性请看下图:主要就是todo+order的形式,其中是todo的id,是order的id。这样就可以在获取结果时很明显地区分开todo和order,进而识别出语义。5.ROS下的应用这个语音唤醒与命令词识别一开始也主要想用在ROS系统进行机器人语音控制。这里给出了indigo版本的实现。在这个包中,我定义了一个sr_order的msg,然后定义一个awaken_asr节点,当获取到识别结果时就发布消息。同时给出了一个listener接收消息的例子。熟悉ROS的朋友应该都知道我在说啥,就不细说了,具体看我github中ROS的分支程序。参考SDK文档∞写在最后科大讯飞工作人员的态度真的很棒,之前调试发现了一个bug,向他们反馈,虽然还没解决,但是一直在积极跟进。另外希望科大讯飞可以降低起购量。学生党真的想用,但是不需要这么多的装机量。最后在他们官方群哭诉了挺久,他们的工作人员建议我实名制学生身份,然后向他们服务申请了几个学术用途的装机量。他们做支持的小姐姐不辞劳苦地忙了很久,最后终于给我申请到了。在此感谢科大讯飞给了我这个机会学习到这些知识。
没有更多推荐了,ROS:语音识别与合成 让机器人会说话 - 系统管理 - 次元立方网 - 电脑知识与技术互动交流平台
ROS:语音识别与合成 让机器人会说话
智能机器人当然少不了语音功能,这篇文章将介绍一下ROS中的语音识别与合成。
参考《ros by example》 。
1.运行以下命令:
sudo apt-get install gstreamer0.10-pocketsphinx
sudo apt-get install ros-indigo-pocketsphinx
sudo apt-get install ros-indigo-audio-common
sudo apt-get install libasound2
sudo apt-get install gstreamer0.10-gconf(indigo版本,必须安装)
2.下载pocketsphinx代码,地址:https://github.com/mikeferguson/pocketsphinx,记得编译。
$cd catkin_ws/src
$git clone https://github.com/mikeferguson/pocketsphinx
$catkin_make
3.了解topic /recognizer/output
pocketphinx软件包包括 Recognizer.py,这个脚本会处理电脑的音频输入流然后从现有的词库中匹配声音命令,匹配成功后会发送到/recognizer/output 这个话题上。其他程序可以通过订阅此话题获取识别的声音内容。
最好使用带有麦克风的耳机,连接电脑,并将其选为音频输入设备,如果使用的是Ubuntu 14.04或更高版本,请转到系统设置,然后单击声音控制面板。在Input中从设备列表中选择你的耳机设备,说几句话测试一下,可以看到音量表响应。 然后点击Output选项卡,选择所需的输出设备,然后调整音量滑块。
运行launch
$roslaunch pocketsphinx robocup.launch
你会看到如下
现在可以说话了。正常情况如下:
$rostopic echo /recognizer/output
运行上面的命令可以更清楚的看到识别到的声音内容。
其他程序可以通过订阅这个topic 来获取识别的声音内容。另外,pocketsphinx只能识别词库里的单词或短语,因此有局限性。
机器人说话
$sudo apt-get install ros-indigo-audio-common
$sudo apt-get install libasound2
$rosrun sound_play soundplay_node.py
$rosrun sound_play say.py 'Greetings Humans. Take me to your leader.'
你会听到电脑发声:Greetings Humans. Take me to your leader.
默认的声音是kal_diphone,我们也可以更换其他的声音,查看电脑上其他的声音,运行
ls /usr/share/festival/voices/english
可以看到还有一个don_diphone(之前安装的).如果穴ky"http://www.it165.net/qq/" target="_blank" class="keylink">qq4/Lu7s8nV4rj2o6zUy9DQz8LD5sP8we48L3A+CjxwcmUgY2xhc3M9"brush:">
rosrun sound_play say.py 'Welcome to the future' voice_don_diphone
查看所有可用的声音,运行下面命令
$sudo apt-cache search --names-only festvox-*
我们可以安装上面列表中的任意一个,eg:festvox-don,执行下面命令
$sudo apt-get install festvox-don
安装更多声音:
$ sudo apt-get install festlex-cmu
$ cd /usr/share/festival/voices/english/
$ sudo wget -c
http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_clb_arctic
-0.95-release.tar.bz2
$ sudo wget -c
http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_bdl_arctic
-0.95-release.tar.bz2
$ sudo tar jxfv cmu_us_clb_arctic-0.95-release.tar.bz2
$ sudo tar jxfv cmu_us_bdl_arctic-0.95-release.tar.bz2
$ sudo rm cmu_us_clb_arctic-0.95-release.tar.bz2
$ sudo rm cmu_us_bdl_arctic-0.95-release.tar.bz2
$ sudo ln -s cmu_us_clb_arctic cmu_us_clb_arctic_clunits
$ sudo ln -s cmu_us_bdl_arctic cmu_us_bdl_arctic_clunits
$ rosrun sound_play say.py 'I am speaking with a female C M U voice'
voice_cmu_us_clb_arctic_clunits
$ rosrun sound_play say.py 'I am speaking with a male C M U voice' voice_cmu_us_bdl_arctic_clunits
还可以使用sound_play播放wav文件或一些内置的声音。
rosrun sound_play play.py `rospack find rbx1_speech`/sounds/R2D2a.wav
注意:play.py需要wav文件的绝对路径,建议使用rospack find 如果不用也可以只输入完整的路径名。
播放内置声音,参数可以选择1-5之间的整数,友情提示,先调低你的音量。
rosrun sound_play playbuiltin.py 4
pocketsphinx只识别英文,如果要识别中文要费不少周章,我们可以使用讯飞语音来识别中文,效果还不错。推荐https://github.com/ncnynl/xf-ros,直接用,感谢作者ncnynl!
延伸阅读:
  学习爬虫有一段时间了,从Python的Urllib、Urlll...
本教程为 李华明 编著的iOS-Cocos2d游戏开发系列教程:教程涵盖关于i......
专题主要学习DirectX的初级编程入门学习,对Directx11的入门及初学者有......
&面向对象的JavaScript&这一说法多少有些冗余,因为JavaScript 语言本......
Windows7系统专题 无论是升级操作系统、资料备份、加强资料的安全及管......

我要回帖

更多关于 语音唤醒功能 的文章

 

随机推荐