Android デバイスから Java メソッドを呼び出そうとしています。私の Java メソッドは、axis2 Web サービスを介してアクセスしています。
ここに私の完全なJavaクラスがあります.Adnroidデバイスから呼び出す2つのメソッドを書きました
java.lang.Thread.run(Thread.java:662) で原因: java.lang.NoClassDefFoundError: edu/cmu/sphinx/util/props/ConfigurationManager
これは、外部 jar ライブラリ内のクラスで、これは私がrecognize_wave(String wavePath) メソッドで使用しています。私はまた、edu/cmu/sphinx/util/props/ConfigurationManager が jar ファイル m で利用可能であることを確認しました
ジャバメソッド:
package edu.cmu.sphinx.demo.transcriber;
import edu.cmu.sphinx.frontend.util.AudioFileDataSource;
import edu.cmu.sphinx.recognizer.Recognizer;
import edu.cmu.sphinx.result.Result;
//import edu.cmu.sphinx.util.props.ConfigurationManager;
import edu.cmu.sphinx.util.props.ConfigurationManager;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import javax.sound.sampled.UnsupportedAudioFileException;
/** A simple example that shows how to transcribe a continuous audio file that has multiple utterances in it. */
public class Transcriber {
// private static final String PATH = "file:///D:\\Sound\\";
@SuppressWarnings("null")
public static String recognize_wave(String wavePath) throws MalformedURLException{
String resultText="";
URL audioURL;
audioURL = new URL(wavePath);
URL configURL = Transcriber.class.getResource("config.xml");
ConfigurationManager cm = new ConfigurationManager(configURL);
Recognizer recognizer = (Recognizer) cm.lookup("recognizer");
/* allocate the resource necessary for the recognizer */
recognizer.allocate();
// configure the audio input for the recognizer
AudioFileDataSource dataSource = (AudioFileDataSource) cm.lookup("audioFileDataSource");
dataSource.setAudioFile(audioURL, null);
// Loop until last utterance in the audio file has been decoded, in which case the recognizer will return null.
Result result;
while ((result = recognizer.recognize())!= null) {
resultText = result.getBestResultNoFiller();
System.out.println(resultText);
}
return resultText;
}
public String get_wav_byte(byte[] wavbite,String path) throws IOException
{
String result1="null";
//return result1;
final String PATH = "file:///D:\\Sound\\";
//System.out.println(bhavik1111);
try
{
File dstFile = new File(path);
FileOutputStream out = new FileOutputStream(dstFile);
out.write(wavbite, 0, wavbite.length);
out.close();
}
catch (IOException e)
{
System.out.println("IOException : " + e);
}
try {
result1=recognize_wave(path);
} catch (MalformedURLException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return result1;
}
}
KSOAP2でそのメソッドを呼び出すための私のAndroidコードは次のとおりです。
package com.varma.samples.audiorecorder;
import java.io.BufferedInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import org.ksoap2.SoapEnvelope;
import org.ksoap2.SoapFault;
import org.ksoap2.serialization.MarshalBase64;
import org.ksoap2.serialization.SoapObject;
import org.ksoap2.serialization.SoapSerializationEnvelope;
import org.ksoap2.transport.HttpTransportSE;
import android.annotation.SuppressLint;
import android.app.Activity;
import android.media.AudioFormat;
import android.media.AudioRecord;
import android.media.MediaRecorder;
import android.os.Bundle;
import android.os.Environment;
import android.util.Base64;
import android.util.Log;
import android.view.View;
import android.view.ViewDebug.FlagToString;
import android.widget.Button;
import android.widget.TextView;
public class RecorderActivity extends Activity {
private static final int RECORDER_BPP =16;
private static final String AUDIO_RECORDER_FILE_EXT_WAV = ".wav";
private static final String AUDIO_RECORDER_FOLDER = "AudioRecorder";
private static final String AUDIO_RECORDER_TEMP_FILE = "record_temp.raw";
private static String AUDIO_WAV_FILE = "";
private static final int RECORDER_SAMPLERATE = 16000;
private static final int RECORDER_CHANNELS = AudioFormat.CHANNEL_CONFIGURATION_MONO;
private static final int RECORDER_AUDIO_ENCODING = AudioFormat.ENCODING_PCM_16BIT;
private AudioRecord recorder = null;
private int bufferSize = 0;
private Thread recordingThread = null;
private boolean isRecording = false;
@SuppressLint("NewApi")
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.main);
setButtonHandlers();
enableButtons(false);
bufferSize = AudioRecord.getMinBufferSize(RECORDER_SAMPLERATE,RECORDER_CHANNELS,RECORDER_AUDIO_ENCODING);
}
private void setButtonHandlers() {
((Button)findViewById(R.id.btnStart)).setOnClickListener(btnClick);
((Button)findViewById(R.id.btnStop)).setOnClickListener(btnClick);
}
private void enableButton(int id,boolean isEnable){
((Button)findViewById(id)).setEnabled(isEnable);
}
private void enableButtons(boolean isRecording) {
enableButton(R.id.btnStart,!isRecording);
enableButton(R.id.btnStop,isRecording);
}
private String getFilename(){
String filepath = Environment.getExternalStorageDirectory().getPath();
File file = new File(filepath,AUDIO_RECORDER_FOLDER);
if(!file.exists()){
file.mkdirs();
}
return (file.getAbsolutePath() + "/" + System.currentTimeMillis() + AUDIO_RECORDER_FILE_EXT_WAV);
}
private String getTempFilename(){
String filepath = Environment.getExternalStorageDirectory().getPath();
File file = new File(filepath,AUDIO_RECORDER_FOLDER);
if(!file.exists()){
file.mkdirs();
}
File tempFile = new File(filepath,AUDIO_RECORDER_TEMP_FILE);
if(tempFile.exists())
tempFile.delete();
return (file.getAbsolutePath() + "/" + AUDIO_RECORDER_TEMP_FILE);
}
@SuppressLint({ "NewApi", "NewApi" })
private void startRecording(){
recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,
RECORDER_SAMPLERATE, RECORDER_CHANNELS,RECORDER_AUDIO_ENCODING, bufferSize);
recorder.startRecording();
isRecording = true;
recordingThread = new Thread(new Runnable() {
@Override
public void run() {
writeAudioDataToFile();
}
},"AudioRecorder Thread");
recordingThread.start();
}
@SuppressLint({ "NewApi", "NewApi", "NewApi" })
private void writeAudioDataToFile(){
byte data[] = new byte[bufferSize];
String filename = getTempFilename();
FileOutputStream os = null;
try {
os = new FileOutputStream(filename);
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
int read = 0;
if(null != os){
while(isRecording){
read = recorder.read(data, 0, bufferSize);
if(AudioRecord.ERROR_INVALID_OPERATION != read){
try {
os.write(data);
} catch (IOException e) {
e.printStackTrace();
}
}
}
try {
os.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
@SuppressLint({ "NewApi", "NewApi" })
private void stopRecording(){
if(null != recorder){
isRecording = false;
recorder.stop();
recorder.release();
recorder = null;
recordingThread = null;
}
copyWaveFile(getTempFilename(),getFilename());
deleteTempFile();
}
private void deleteTempFile() {
File file = new File(getTempFilename());
file.delete();
}
@SuppressLint("NewApi")
private void copyWaveFile(String inFilename,String outFilename){
FileInputStream in = null;
FileOutputStream out = null;
long totalAudioLen = 0;
long totalDataLen = totalAudioLen + 36;
long longSampleRate = 16000;
int channels = 1;
long byteRate = RECORDER_BPP * RECORDER_SAMPLERATE * channels/8;
/// long byteRate = 256;
byte[] data = new byte[bufferSize];
try {
in = new FileInputStream(inFilename);
out = new FileOutputStream(outFilename);
totalAudioLen = in.getChannel().size();
totalDataLen = totalAudioLen + 36;
AppLog.logString("File size: " + totalDataLen);
WriteWaveFileHeader(out, totalAudioLen, totalDataLen,
longSampleRate, channels, byteRate);
while(in.read(data) != -1){
out.write(data);
}
in.close();
out.close();
//////////////////
AUDIO_WAV_FILE=outFilename;
/////////////////
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
/////////////read wav file and convert to byte////////////////////
public static byte[] getBytesFromFile(File file) throws IOException {
/*
InputStream is = new FileInputStream(file);
// Get the size of the file
long length = file.length();
// You cannot create an array using a long type.
// It needs to be an int type.
// Before converting to an int type, check
// to ensure that file is not larger than Integer.MAX_VALUE.
if (length > Integer.MAX_VALUE) {
// File is too large
}
// Create the byte array to hold the data
byte[] bytes = new byte[(int)length];
// Read in the bytes
int offset = 0;
int numRead = 0;
while (offset < bytes.length
&& (numRead=is.read(bytes, offset, bytes.length-offset)) >= 0) {
offset += numRead;
}
// Ensure all the bytes have been read in
if (offset < bytes.length) {
throw new IOException("Could not completely read file "+file.getName());
}
// Close the input stream and return bytes
is.close();
return bytes;
*/
ByteArrayOutputStream out = new ByteArrayOutputStream();
BufferedInputStream in = new BufferedInputStream(new FileInputStream(file));
int read;
byte[] buff = new byte[1024];
while ((read = in.read(buff)) > 0)
{
out.write(buff, 0, read);
}
out.flush();
byte[] audioBytes = out.toByteArray();
return audioBytes;
}
//////////////////////////////////////
private void WriteWaveFileHeader(
FileOutputStream out, long totalAudioLen,
long totalDataLen, long longSampleRate, int channels,
long byteRate) throws IOException {
byte[] header = new byte[44];
header[0] = 'R'; // RIFF/WAVE header
header[1] = 'I';
header[2] = 'F';
header[3] = 'F';
header[4] = (byte) (totalDataLen & 0xff);
header[5] = (byte) ((totalDataLen >> 8) & 0xff);
header[6] = (byte) ((totalDataLen >> 16) & 0xff);
header[7] = (byte) ((totalDataLen >> 24) & 0xff);
header[8] = 'W';
header[9] = 'A';
header[10] = 'V';
header[11] = 'E';
header[12] = 'f'; // 'fmt ' chunk
header[13] = 'm';
header[14] = 't';
header[15] = ' ';
header[16] = 16; // 4 bytes: size of 'fmt ' chunk
header[17] = 0;
header[18] = 0;
header[19] = 0;
header[20] = 1; // format = 1
header[21] = 0;
header[22] = (byte) channels;
header[23] = 0;
header[24] = (byte) (longSampleRate & 0xff);
header[25] = (byte) ((longSampleRate >> 8) & 0xff);
header[26] = (byte) ((longSampleRate >> 16) & 0xff);
header[27] = (byte) ((longSampleRate >> 24) & 0xff);
header[28] = (byte) (byteRate & 0xff);
header[29] = (byte) ((byteRate >> 8) & 0xff);
header[30] = (byte) ((byteRate >> 16) & 0xff);
header[31] = (byte) ((byteRate >> 24) & 0xff);
header[32] = (byte) (2 * 16 / 8); // block align
header[33] = 0;
header[34] = RECORDER_BPP; // bits per sample
header[35] = 0;
header[36] = 'd';
header[37] = 'a';
header[38] = 't';
header[39] = 'a';
header[40] = (byte) (totalAudioLen & 0xff);
header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
header[43] = (byte) ((totalAudioLen >> 24) & 0xff);
out.write(header, 0, 44);
}
private View.OnClickListener btnClick = new View.OnClickListener() {
@Override
public void onClick(View v) {
switch(v.getId()){
case R.id.btnStart:{
AppLog.logString("Start Recording");
enableButtons(true);
startRecording();
break;
}
case R.id.btnStop:{
AppLog.logString("Start Recording");
enableButtons(false);
stopRecording();
File source_for_byte=new File( AUDIO_WAV_FILE);
byte[] temp = new byte[(int) source_for_byte.length()];
try {
temp=getBytesFromFile(source_for_byte);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
//byte[] strBase64 = Base64.encode(temp, Base64.NO_WRAP);
//Request.addProperty("image", strBase64);
//////////////////////WebService Activity ///////////////////////
String METHOD_NAME = "";
// our webservice method name
String NAMESPACE = "http://test.com";
String SOAP_ACTION = NAMESPACE + METHOD_NAME;
// NAMESPACE + method name
//final String URL = "http://192.168.3.106:8080/axis2/services/speechmain?wsdl";
final String URL="http://192.168.3.106:8080/axis2/services/VoiceService?wsdl";
METHOD_NAME = "get_wav_byte";
try {
SoapObject request = new SoapObject(NAMESPACE, METHOD_NAME);
request.addProperty("wavbite", temp);
request.addProperty("path", "D:\\sound\\latest_recognizer.wav");
SoapSerializationEnvelope envelope = new SoapSerializationEnvelope(
SoapEnvelope.VER11);
new MarshalBase64().register(envelope); // serialization
envelope.encodingStyle = SoapEnvelope.ENC;
envelope.dotNet = true;
envelope.setOutputSoapObject(request);
HttpTransportSE androidHttpTransport = new HttpTransportSE(URL);
androidHttpTransport.call(SOAP_ACTION, envelope);
Object result = envelope.getResponse();
// Object result = (SoapObject) envelope.bodyIn;
((TextView) findViewById(R.id.gettext1)).setText("NUMBER IS :-> "
+ result.toString());
} catch (Exception E) {
E.printStackTrace();
((TextView) findViewById(R.id.gettext1)).setText("ERROR:"
+ E.getClass().getName() + ":" + E.getMessage());
}
/////////////////////////
break;
}
}
}
};
}
私が理解できるのは、そのパブリック静的文字列認識_波(文字列wavePath)メソッドで外部jarファイルを使用しているため、どのように機能しないかです。
私はたくさん検索しましたが、まだ良いガイダンスはまだありません..
あなたが私を助けてくれることを願っています..
前もって感謝します ..