-
Notifications
You must be signed in to change notification settings - Fork 1
/
test.cs
88 lines (69 loc) · 2.28 KB
/
test.cs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
using Godot;
using System;
using System.IO;
using Syn.Speech.Api;
public class test : Node2D
{
[Signal]
delegate void result(string output);
[Signal]
delegate void delete_audio();
[Export]
private bool _use_keyword = true;
[Export]
private string _save_path = "user://record.wav";
private void _on_Record_button_button_down()
{
GD.Print("Voice started Recording");
_effect.SetRecordingActive(true);
EmitSignal("delete_audio");
}
private void _on_Record_button_button_up()
{
GD.Print("voice stopped recording");
_recording = _effect.GetRecording();
_effect.SetRecordingActive(false);
_recording.SaveToWav(_save_path); //save the recording in a .wav file
Main(); // call for the conversion immediatly after
}
//handles audio recording
private AudioEffectRecord _effect;
private AudioStreamSample _recording;
public override void _Ready()
{
// We get the index of the "Record" bus.
int idx = AudioServer.GetBusIndex("Record");
// And use it to retrieve its first effect, which has been defined
// as an "AudioEffectRecord" resource.
_effect = (AudioEffectRecord)AudioServer.GetBusEffect(idx, 0);
//GD.Print(Godot.OS.GetUserDataDir() + "/record.wav");
}
//handles speech recognition
private static Configuration _configuration;
private static StreamSpeechRecognizer _speechRecognizer;
private string _audio_file = Godot.OS.GetUserDataDir() + "/record.wav";
public void Main()
{
var modelsDirectory = System.IO.Path.Combine(System.IO.Directory.GetCurrentDirectory(), "Models");
_configuration = new Configuration
{
AcousticModelPath = modelsDirectory,
DictionaryPath = System.IO.Path.Combine(modelsDirectory, "cmudict-en-us.dict"),
LanguageModelPath = System.IO.Path.Combine(modelsDirectory, "en-us.lm.dmp"),
UseGrammar = _use_keyword,
GrammarPath = modelsDirectory,
GrammarName = "Hello"
};
FileStream fs = new FileStream(_audio_file, FileMode.Open);
_speechRecognizer = new StreamSpeechRecognizer(_configuration);
_speechRecognizer.StartRecognition(fs);
var result = _speechRecognizer.GetResult();
_speechRecognizer.StopRecognition();
if (result != null)
{
GD.Print("Speech Recognized: " + result.GetHypothesis());
EmitSignal("result", "Speech Recognized: " + result.GetHypothesis());
fs.Dispose();
}
}
}