forked from flowerhahaha/linebot-openai
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathline.js
107 lines (94 loc) · 3.47 KB
/
line.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
// line.js
const azureTTS = require("./azure-tts");
const azureSpeech = require("./azure-speech");
const fs = require("fs");
const path = require("path");
const line = require("@line/bot-sdk");
const config = {
channelAccessToken: process.env.channelAccessToken,
channelSecret: process.env.channelSecret
};
const client = new line.Client(config);
const openai = require("./openai");
const middleware = line.middleware(config);
// Handles an incoming LINE Messaging API event
const handleEvent = async (event) => {
if (event.type === "message") {
if (event.message.type === "text") {
const textResponse = await openai.chatGPT(event.message.text, event.source.userId);
const currentRole = openai.getCurrentRole(event.source.userId);
const transcriptStatus = openai.getTranscriptStatus(event.source.userId);
if (currentRole === "ryan") {
const textMessage = {
type: "text",
text: textResponse
};
return client.replyMessage(event.replyToken, textMessage);
} else {
const audioFilePath = await azureTTS.textToSpeech(textResponse, currentRole);
console.log(`Current role: ${currentRole}`);
const audioMessage = {
type: "audio",
originalContentUrl: audioFilePath,
duration: 60000
};
if (transcriptStatus) {
const textMessage = {
type: "text",
text: textResponse
};
return client.replyMessage(event.replyToken, [audioMessage, textMessage]);
} else {
return client.replyMessage(event.replyToken, audioMessage);
}
}
} else if (event.message.type === "audio") {
const audioStream = await client.getMessageContent(event.message.id);
const audioFilePath = path.join(__dirname, "tempAudio.wav");
const writeStream = fs.createWriteStream(audioFilePath);
audioStream.pipe(writeStream);
writeStream.on("finish", async () => {
try {
const recognizedText = await azureSpeech.speechToText(audioFilePath);
fs.unlinkSync(audioFilePath);
const textResponse = await openai.chatGPT(recognizedText, event.source.userId);
const currentRole = openai.getCurrentRole(event.source.userId);
const transcriptStatus = openai.getTranscriptStatus(event.source.userId);
if (currentRole === "ryan") {
const textMessage = {
type: "text",
text: textResponse
};
return client.replyMessage(event.replyToken, textMessage);
} else {
const audioResponsePath = await azureTTS.textToSpeech(textResponse, currentRole);
const audioMessage = {
type: "audio",
originalContentUrl: audioResponsePath,
duration: 60000
};
if (transcriptStatus) {
const textMessage = {
type: "text",
text: textResponse
};
return client.replyMessage(event.replyToken, [audioMessage, textMessage]);
} else {
return client.replyMessage(event.replyToken, audioMessage);
}
}
} catch (error) {
console.error("Error processing audio:", error);
}
});
writeStream.on("error", (error) => {
console.error("Error writing audio file:", error);
});
}
}
return Promise.resolve(null);
};
module.exports = {
middleware,
handleEvent
};