您的位置:首页 > 移动开发 > Android开发

Android初学----自己封装集成讯飞语音和人脸识别

2018-01-16 10:42 507 查看
讯飞语音,对于程序员来说,听到这个名字并不陌生,很多APP项目都集成了它的SDK,最近公司的项目需要语音识别、合成及人脸识别的功能。由此将所学所用做一下笔记。

集成环境:集成讯飞语音相对来说比较简单,按着官方给的集成文档(http://doc.xfyun.cn/msc_android/299548)一步一步操作就行,导入下载好的jar包和jniLibs文件夹的.so包即可。

项目中记得添加权限和初始化SpeechUtility

在工程 AndroidManifest.xml 文件中添加如下权限


<!--连接网络权限,用于执行云端语音能力 -->
<uses-permission android:name="android.permission.INTERNET"/>
<!--获取手机录音机使用权限,听写、识别、语义理解需要用到此权限 -->
<uses-permission android:name="android.permission.RECORD_AUDIO"/>
<!--读取网络信息状态 -->
<uses-permission android:name="android.permission.ACCESS_NETWORK_STATE"/>
<!--获取当前wifi状态 -->
<uses-permission android:name="android.permission.ACCESS_WIFI_STATE"/>
<!--允许程序改变网络连接状态 -->
<uses-permission android:name="android.permission.CHANGE_NETWORK_STATE"/>
<!--读取手机信息权限 -->
<uses-permission android:name="android.permission.READ_PHONE_STATE"/>
<!--读取联系人权限,上传联系人需要用到此权限 -->
<uses-permission android:name="android.permission.READ_CONTACTS"/>
<!--外存储写权限,构建语法需要用到此权限 -->
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE"/>
<!--外存储读权限,构建语法需要用到此权限 -->
<uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE"/>
<!--配置权限,用来记录应用配置信息 -->
<uses-permission android:name="android.permission.WRITE_SETTINGS"/>
<!--手机定位信息,用来为语义等功能提供定位,提供更精准的服务-->
<!--定位信息是敏感信息,可通过Setting.setLocationEnable(false)关闭定位请求 -->
<uses-permission android:name="android.permission.ACCESS_FINE_LOCATION"/>
<!--如需使用人脸识别,还要添加:摄相头权限,拍照需要用到 -->
<uses-permission android:name="android.permission.CAMERA" />
15bf6


封装语音识别与合成,代码如下:


package com.face.speech;

import android.content.Context;
import android.media.MediaRecorder;
import android.os.Bundle;
import android.os.Environment;
import android.text.TextUtils;
import android.util.Log;

import com.iflytek.cloud.ErrorCode;
import com.iflytek.cloud.InitListener;
import com.iflytek.cloud.RecognizerListener;
import com.iflytek.cloud.RecognizerResult;
import com.iflytek.cloud.SpeechConstant;
import com.iflytek.cloud.SpeechError;
import com.iflytek.cloud.SpeechRecognizer;
import com.iflytek.cloud.SpeechSynthesizer;
import com.iflytek.cloud.SpeechUtility;
import com.iflytek.cloud.SynthesizerListener;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;

/**
* Created by AA on 2018/1/2.
*/

public class XunfeiSpeech extends Speech {
private Context context;
private String result = "";
private List<String> jsonResoult;
// 语音合成对象
private SpeechSynthesizer mTts;
// 语音听写对象
private SpeechRecognizer mIat;
// 引擎类型 云端
private String mEngineType = SpeechConstant.TYPE_CLOUD;
// 默认发音人
private String voicer = "xiaoqi";
RecognizerListener listener;
private boolean isListeningNow = false;
MediaRecorder mMediaRecorder = null;
private static XunfeiSpeech speech;
public static XunfeiSpeech getInstance(Context context,String appId){
if (speech==null){
synchronized (XunfeiSpeech.class){
if (speech==null){
speech = new XunfeiSpeech(context,appId);
}
}
}
return speech;
}
/**
* 讯飞语音对象初始化构造方法
* @param context 上下文对象
* @param appId APPID
*/
private  XunfeiSpeech(Context context,String appId) {
this.context = context;
//初始化语音识别
SpeechUtility.createUtility(context, SpeechConstant.APPID + "=" + appId);
initSpeech(context);
initSpeechSynthesizer(context);
jsonResoult = new ArrayList<>();
}
/**
* 初始化语音合成对象
* @param context
*/
private void initSpeechSynthesizer(Context context) {
// 初始化合成对象
mTts = SpeechSynthesizer.createSynthesizer(context, mTtsInitListener);
// 清空参数
mTts.setParameter(SpeechConstant.PARAMS, null);
mTts.setParameter(SpeechConstant.ENGINE_TYPE, SpeechConstant.TYPE_CLOUD);
// 设置在线合成发音人
mTts.setParameter(SpeechConstant.VOICE_NAME, voicer);
//设置合成语速
mTts.setParameter(SpeechConstant.SPEED,  "50");
//设置合成音调
mTts.setParameter(SpeechConstant.PITCH, "50");
//设置合成音量
mTts.setParameter(SpeechConstant.VOLUME, "50");
}
/**
* 初始化语音识别对象
* @param context
*/
void initSpeech(Context context) {
// 使用SpeechRecognizer对象,可根据回调消息自定义界面;
mIat = SpeechRecognizer.createRecognizer(context, mInitListener);
// 清空参数
mIat.setParameter(SpeechConstant.PARAMS, null);
// 设置听写引擎
mIat.setParameter(SpeechConstant.ENGINE_TYPE, mEngineType);
// 设置返回结果格式
mIat.setParameter(SpeechConstant.RESULT_TYPE, "json");
// 设置语音前端点:静音超时时间,即用户多长时间不说话则当做超时处理
mIat.setParameter(SpeechConstant.VAD_BOS, "4000");
// 设置语音后端点:后端点静音检测时间,即用户停止说话多长时间内即认为不再输入, 自动停止录音
mIat.setParameter(SpeechConstant.VAD_EOS, "1500");
// 设置标点符号,设置为"0"返回结果无标点,设置为"1"返回结果有标点
mIat.setParameter(SpeechConstant.ASR_PTT, "1");
// 设置音频保存路径,保存音频格式支持pcm、wav,设置路径为sd卡请注意WRITE_EXTERNAL_STORAGE权限
// 注:AUDIO_FORMAT参数语记需要更新版本才能生效
mIat.setParameter(SpeechConstant.AUDIO_FORMAT,"wav");
mIat.setParameter(SpeechConstant.ASR_AUDIO_PATH, Environment.getExternalStorageDirectory()+"/msc/iat.wav");
}

/**
* 初始化语音合成监听。
*/
private InitListener mTtsInitListener = new InitListener() {
@Override
public void onInit(int code) {
//            Log.d(TAG, "InitListener init() code = " + code);
if (code != ErrorCode.SUCCESS) {
//                showTip("初始化失败,错误码:"+code);
} else {
// 初始化成功,之后可以调用startSpeaking方法
// 注:有的开发者在onCreate方法中创建完合成对象之后马上就调用startSpeaking进行合成,
// 正确的做法是将onCreate中的startSpeaking调用移至这里
}
}
};
/**
* 初始化语音识别监听器。
*/
private InitListener mInitListener = new InitListener() {

@Override
public void onInit(int code) {
Log.d("msg", "SpeechRecognizer init() code = " + code);
if (code != ErrorCode.SUCCESS) {
//                showTip("初始化失败,错误码:" + code);
}
}
};
/**
* 开始识别语音
* @param callback 识别结果回调接口
*/
@Override
public void startListening(final ListenResultCallback callback) {
listener  = new RecognizerListener() {
@Override
public void onVolumeChanged(int i, byte[] bytes) {
callback.onVolumeChanged(i,bytes);
}

@Override
public void onBeginOfSpeech() {

}

@Override
public void onEndOfSpeech() {

}

@Override
public void onResult(RecognizerResult recognizerResult, boolean b) {
//                Log.i("mic",recognizerResult.getResultString());
//解析结果  这里返回的结果是json字符串
XunfeiResult re = XunfeiResult.getResult(recognizerResult.getResultString());
if (!re.isLs()){//是否是最后一句话
//默认只取第一条结果
List<XunfeiResult.WsBean> ws = re.getWs();
for (XunfeiResult.WsBean w:
ws) {
result += w.getCw().get(0).getW();
}
jsonResoult.add(recognizerResult.getResultString());
}else {
if (!TextUtils.isEmpty(result)){
callback.result(result,jsonResoult);
}
result = "";
jsonResoult.clear();
//设置开启连续语音识别
try {
mIat.cancel();
Thread.sleep(100);
mIat.startListening(this);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}

@Override
public void onError(SpeechError speechError) {
callback.error(speechError.getErrorCode()+"");
if (speechError.getErrorCode()==10118){
Log.i("mic","getErrorCode="+speechError.getErrorCode());
mIat.cancel();
mIat.startListening(this);
}

}

@Override
public void onEvent(int i, int i1, int i2, Bundle bundle) {

}
};
mIat.startListening(listener);
if (listenSpeechTimer!=null){
listenSpeechTimer = new Timer();
listenSpeechTimer.schedule(listenSpeechTimertask,3000,1000*60*60);
}
}

/**
* 开始语音合成
* @param speakContent 合成语音内容
* @param callback 合成回调接口
*/
@Override
public void startSpeaking(String speakContent, final SpeakResultCallback callback) {
if (TextUtils.isEmpty(speakContent)) return;
mTts.startSpeaking(speakContent, new SynthesizerListener() {
/**
* 开始说话
*/
@Override
public void onSpeakBegin() {
if (mIat!=null){
mIat.cancel();
}
}
/**
* @param i
* @param i1
* @param i2
* @param s
*/
@Override
public void onBufferProgress(int i, int i1, int i2, String s) {

}

@Override
public void onSpeakPaused() {

}

@Override
public void onSpeakResumed() {

}

@Override
public void onSpeakProgress(int i, int i1, int i2) {

}

/**
* 语音合成结束
* @param speechError 错误码
*/
@Override
public void onCompleted(SpeechError speechError) {

if (listener!=null){
//                    mIat.cancel();
//                    mIat.startListening(listener);//重新开启语音识别
}
callback.onCompleted(speechError.getErrorCode()+"");
}

@Override
public void onEvent(int i, int i1, int i2, Bundle bundle) {

}
});
}

/**
* 关闭语音
*/
@Override
public void cancel() {
if (mTts!=null){
mTts.destroy();
}
if (mIat!=null){
mIat.cancel();
mIat.destroy();
}
if (listenSpeechTimer!=null){
listenSpeechTimer.cancel();
listenSpeechTimer = null;
}
}
}


用于解析语音识别的bean类:


package com.face.speech;

import com.google.gson.Gson;

import java.util.List;

/**
* Created by AA on 2018/1/3.
*/

public class XunfeiResult {
public static XunfeiResult getResult(String json){
return new Gson().fromJson(json,XunfeiResult.class);
}
/**
* sn : 1
* ls : true
* bg : 0
* ed : 0
* ws : [{"bg":0,"cw":[{"w":"今天","sc":0}]},{"bg":0,"cw":[{"w":"的","sc":0}]},{"bg":0,"cw":[{"w":"天气","sc":0}]},{"bg":0,"cw":[{"w":"怎么样","sc":0}]},{"bg":0,"cw":[{"w":"。","sc":0}]}]
*/

private int sn;
private boolean ls;
private int bg;
private int ed;
private List<WsBean> ws;

public int getSn() {
return sn;
}

public void setSn(int sn) {
this.sn = sn;
}

public boolean isLs() {
return ls;
}

public void setLs(boolean ls) {
this.ls = ls;
}

public int getBg() {
return bg;
}

public void setBg(int bg) {
this.bg = bg;
}

public int getEd() {
return ed;
}

public void setEd(int ed) {
this.ed = ed;
}

public List<WsBean> getWs() {
return ws;
}

public void setWs(List<WsBean> ws) {
this.ws = ws;
}

public static class WsBean {
/**
* bg : 0
* cw : [{"w":"今天","sc":0}]
*/

private int bg;
private List<CwBean> cw;

public int getBg() {
return bg;
}

public void setBg(int bg) {
this.bg = bg;
}

public List<CwBean> getCw() {
return cw;
}

public void setCw(List<CwBean> cw) {
this.cw = cw;
}

public static class CwBean {
/**
* w : 今天
* sc : 0
*/

private String w;
private int sc;

public String getW() {
return w;
}

public void setW(String w) {
this.w = w;
}

public int getSc() {
return sc;
}

public void setSc(int sc) {
this.sc = sc;
}
}
}
}


这里用到的是gson解析,在build.gradle文件中添加


compile 'com.google.code.gson:gson:2.2.4'


即可。

封装人脸识别,包括组操作创建和删除等等,因为要加入人脸搜索功能,只有将识别注册的人脸加入到已创建的组中,才能根据组id对人脸信息进行搜索,代码如下:


package com.face.facerecognition;

import android.content.Context;
import android.os.Bundle;
import android.text.TextUtils;
import android.util.Log;

import com.iflytek.cloud.IdentityListener;
import com.iflytek.cloud.IdentityResult;
import com.iflytek.cloud.IdentityVerifier;
import com.iflytek.cloud.InitListener;
import com.iflytek.cloud.SpeechConstant;
import com.iflytek.cloud.SpeechError;
import com.iflytek.cloud.SpeechUtility;

import java.util.List;

/**
* Created by AA on 2018/1/9.
*/

public class XunfeiFaceRecognition extends FaceRecognition {
private Context context;
private String appId;
private IdentityVerifier verifier;
private IdentityListener identityListener;
private int ActionType = -1;//当前执行的操作类型
private static XunfeiFaceRecognition recognition;

/**
* 单例模式
* @param context
* @param appId
* @return
*/
public static XunfeiFaceRecognition getInstance(Context context, String appId) {
if (recognition == null) {
synchronized (XunfeiFaceRecognition.class) {
if (recognition == null) {
recognition = new XunfeiFaceRecognition(context,appId);
}
}
}
return recognition;
}

/**
* 构造方法(私有化)
* @param context
* @param appId
*/
private XunfeiFaceRecognition(Context context, String appId) {
this.context = context;
this.appId = appId;
init();
}

private void init() {
SpeechUtility.createUtility(context, SpeechConstant.APPID + "=" + appId);
if (verifier == null) {
verifier = IdentityVerifier.createVerifier(context, new InitListener() {
@Override
public void onInit(int i) {
Log.i("verifier", "i==" + i);
}
});
}
}

public void setParams() {

}

/**
* 组操作
* @param groupId 组id  在创建时作为组名
* @param userId 用户id
* @param type 操作类型
* @param callback 回调
*/
@Override
public void groupSetting(String groupId, String userId, int type,RecognitionCallback callback) {
ActionType = type;
initListener(callback);
verifier.setParameter(SpeechConstant.MFV_SCENES, "ipt");
// 设置用户id
verifier.setParameter(SpeechConstant.AUTH_ID, userId);
String params = "";
String cmd = "";
switch (type){
case FaceRecognition.GROUP_CREATE:
//创建组
params="scope=group,group_name=" + groupId;
cmd = "add";
break;
case FaceRecognition.GROUP_DELETE:
//删除组
params="scope=group,group_id=" + groupId;
cmd = "delete";
break;
case FaceRecognition.GROUP_ADD_PERSON:
//加入组员
params = "scope=person,group_id=" + groupId+",auth_id=" + userId;
cmd = "add";
break;
case FaceRecognition.GROUP_DELETE_PERSON:
//删除组员
params = "scope=person,group_id=" + groupId+",auth_id=" + userId;
cmd = "delete";
break;
case FaceRecognition.GROUP_FIND_ALL://查询所有组员
params = "scope=group,group_id=" + groupId;
cmd = "query";
break;
default:
break;
}
verifier.execute("ipt",cmd,params, identityListener);
}

/**
* 人脸注册
* @param groupId 用户组id
* @param userId 用户id
* @param imageData 图片二进制数据
* @param callback 回调
*/
@Override
public void registerFace(String groupId, String userId, byte[] imageData, RecognitionCallback callback) {
ActionType = FaceRecognition.RECOGNITION_REGISTER;
initListener(callback);
// 设置会话场景
verifier.setParameter(SpeechConstant.MFV_SCENES, "ifr");
// 设置会话类型
verifier.setParameter(SpeechConstant.MFV_SST, "enroll");
// 设置用户id
verifier.setParameter(SpeechConstant.AUTH_ID, userId);
verifier.startWorking(identityListener);
String params = "";
//        String params = "scope=person,group_id=" + groupId + ",auth_id="+userId;
verifier.writeData("ifr", params, imageData, 0, imageData.length);
verifier.stopWrite("ifr");
}

/**
* 人脸搜索
* @param groupId 组id
* @param imageData 图片二进制数据
* @param callback 回调
*/
@Override
public void findFace(String groupId, byte[] imageData, RecognitionCallback callback) {
ActionType = FaceRecognition.RECOGNITION_FIND;
initListener(callback);
// 设置会话场景
verifier.setParameter(SpeechConstant.MFV_SCENES, "ifr");
// 设置会话类型
verifier.setParameter(SpeechConstant.MFV_SST, "identify");
verifier.startWorking(identityListener);
String params = "scope=person,group_id=" + groupId;
verifier.writeData("ifr", params, imageData, 0, imageData.length);
verifier.stopWrite("ifr");
}

/**
* 人脸验证
* @param groupId 组id
* @param userId 用户id
* @param imageData 图片二进制数据
* @param callback 回调
*/
@Override
public void validationFace(String groupId, String userId, byte[] imageData, RecognitionCallback callback) {
ActionType = FaceRecognition.RECOGNITION_IDENTIFY;
initListener(callback);
// 设置会话场景
verifier.setParameter(SpeechConstant.MFV_SCENES, "ifr");
// 设置会话类型
verifier.setParameter(SpeechConstant.MFV_SST, "verify");
// 设置验证模式,单一验证模式:sin
verifier.setParameter(SpeechConstant.MFV_VCM, "sin");
// 用户id
verifier.setParameter(SpeechConstant.AUTH_ID, userId);
verifier.startWorking(identityListener);
String params = "group_id=" + groupId+ ",auth_id="+userId;;
verifier.writeData("ifr", params, imageData, 0, imageData.length);
verifier.stopWrite("ifr");
}

/**
* 删除人脸
* @param userId 用户id
* @param callback 回调
*/
@Override
public void deleteFace(String userId, RecognitionCallback callback) {
ActionType = FaceRecognition.RECOGNITION_DELETE;
initListener(callback);
// 设置会话场景
verifier.setParameter(SpeechConstant.MFV_SCENES, "ifr");
// 用户id
verifier.setParameter(SpeechConstant.AUTH_ID, userId);
verifier.execute("ifr","delete","",identityListener);
}

/**
* 关闭人脸识别
*/
@Override
public void cancel() {
if (verifier!=null){
verifier.cancel();
verifier.destroy();
verifier = null;
}
}

/**
* 初始化监听器
* @param callback 回调
*/
public void initListener(final RecognitionCallback callback) {
if (identityListener == null) {
identityListener = new IdentityListener() {
@Override
public void onResult(IdentityResult identityResult, boolean b) {
Log.i("new--",identityResult.getResultString());
if (TextUtils.isEmpty(identityResult.getResultString())) return;
XunfeiFaceResult result = XunfeiFaceResult.getResult(identityResult.getResultString());
if (result==null){return;}
if (result.getSsub().equalsIgnoreCase("ipt")){//组操作
if (result.getSst().equalsIgnoreCase("query")){//查询所有组员
findAllPerson(result,callback);
}else if (result.getSst().equalsIgnoreCase("add")){
if (TextUtils.isEmpty(result.getUser())){//创建组
if (result.getRet()==0){
callback.createGroup(true,result.getGroup_id());
}else {
callback.createGroup(false,result.getGroup_id());
}
}else {//添加组用户
if (result.getRet()==0){
callback.addPersonToGroup(true,result.getUser(),result.getGroup_id());
}else {
callback.addPersonToGroup(false,result.getUser(),result.getGroup_id());
}
}
} else if (result.getSst().equalsIgnoreCase("delete")){
if (!TextUtils.isEmpty(result.getUser()) && !TextUtils.isEmpty(result.getGroup_id())){//删除组用户
if (result.getRet()==0){
callback.deletePersonFromGroup(true,result.getUser(),result.getGroup_id());
}else {
callback.deletePersonFromGroup(false,result.getUser(),result.getGroup_id());
}
}else if (!TextUtils.isEmpty(result.getGroup_id()) && TextUtils.isEmpty(result.getUser())){//删除组
if (result.getRet()==0){
callback.deleteGroup(true,result.getGroup_id());
}else {
callback.deleteGroup(false,result.getGroup_id());
}
}
}
}
else if (result.getSsub().equalsIgnoreCase("ifr")){//人脸操作
if (result.getSst().equalsIgnoreCase("verify")){//人脸验证
if (result.getDecision().equalsIgnoreCase("accepted")){
callback.identify(true,result.getFace_score());
}else {
callback.identify(false,result.getFace_score());
}
}else if (result.getSst().equalsIgnoreCase("enroll")){//人脸注册
if (result.getRet()==0){
callback.register(true);
}else {
callback.register(false);
}
}else if (result.getSst().equalsIgnoreCase("identify")){//人脸搜索
findFaceNext(result,callback);
}else if (result.getSst().equalsIgnoreCase("delete")){//删除用户
if (result.getRet()==0){
callback.deleteFace(true);
}else {
callback.deleteFace(false);
}
}
}
}

@Override
public void onError(SpeechError speechError) {
Log.i("new--",speechError.getErrorCode() + ":" + speechError.getErrorDescription());
callback.fail(speechError.getErrorCode() + ":" + speechError.getErrorDescription(),ActionType);
}

@Override
public void onEvent(int i, int i1, int i2, Bundle bundle) {

}
};
}
}

private void findAllPerson(XunfeiFaceResult result, RecognitionCallback callback) {
String ss = "";
for (XunfeiFaceResult.PersonBean b:
result.getPerson()) {
ss += "用户:"+b.getUser()+", ";
}
Log.i("new--",ss);
}

private void findFaceNext(
XunfeiFaceResult result,
RecognitionCallback callback) {
List<XunfeiFaceResult.IfvResultBean.CandidatesBean> candidatesBean = result.getIfv_result().getCandidates();
if (candidatesBean == null || candidatesBean.size() == 0) {
callback.findFace(false, null,0);
} else {
Person person = new Person();
for (XunfeiFaceResult.IfvResultBean.CandidatesBean bb :
candidatesBean) {
if (!bb.getDecision().equalsIgnoreCase("accepted")) {
candidatesBean.remove(bb);
}
}
if (candidatesBean.size() == 0) {
callback.findFace(false, null,0);
return;
}
double score = 0;
int index = 0;
for (int i = 0; i < candidatesBean.size(); i++) {
if (candidatesBean.get(i).getScore() > score) {
index = i;
score = candidatesBean.get(i).getScore();
}
}
person.setGorupId(result.getGroup_id() + "");
person.setUserId(candidatesBean.get(index).getUser());
callback.findFace(true, person,candidatesBean.get(index).getScore());
}
}
}


这里需要注意的是,人脸识别都是上传图片的二进制文件,官方有大小限制要求在200k以内,此时大图片就要压缩以后在上传:


/**
* 压缩图片
* @param imgPath 图片路径
* @return byte[]
*/
private byte[] getImgData(String imgPath) {
BitmapFactory.Options options = new BitmapFactory.Options();
options.inJustDecodeBounds = true;
Bitmap bitmap = BitmapFactory.decodeFile(imgPath, options);
// 压缩图片
options.inSampleSize = Math.max(1, (int) Math.ceil(Math.max(
(double) options.outWidth / 1024f,
(double) options.outHeight / 1024f)));
options.inJustDecodeBounds = false;
bitmap = BitmapFactory.decodeFile(imgPath, options);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
bitmap.compress(Bitmap.CompressFormat.JPEG, 80, baos);
byte data[] = baos.toByteArray();
return data;
}


ok,第一篇文章完成!
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: