-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.js
More file actions
266 lines (227 loc) · 8.84 KB
/
app.js
File metadata and controls
266 lines (227 loc) · 8.84 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
const visualization = document.getElementById('visualization');
const background = document.getElementById('background');
let currentImageModel = 'flux';
let chatHistory = [];
let systemPrompt = "";
// --- App Initialization ---
window.onload = async () => {
// Fetch the system prompt
try {
const response = await fetch('ai-instruct.txt');
systemPrompt = await response.text();
} catch (error) {
console.error('Error fetching system prompt:', error);
systemPrompt = "You are Unity, a helpful AI assistant."; // Fallback prompt
}
// Start listening for voice input
if (recognition) {
recognition.start();
}
};
// --- Speech Recognition ---
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
let recognition;
if (SpeechRecognition) {
recognition = new SpeechRecognition();
recognition.continuous = true;
recognition.lang = 'en-US';
recognition.interimResults = false;
recognition.maxAlternatives = 1;
recognition.onstart = () => {
console.log('Voice recognition started.');
visualization.style.borderColor = '#ff0000'; // Red when listening
};
recognition.onend = () => {
console.log('Voice recognition stopped.');
visualization.style.borderColor = '#ffffff'; // White when not listening
// Restart recognition if it stops unexpectedly
if (!isMuted) {
recognition.start();
}
};
recognition.onresult = (event) => {
const transcript = event.results[event.results.length - 1][0].transcript.trim();
console.log('User said:', transcript);
const isLocalCommand = handleVoiceCommand(transcript);
// If it's not a local command, send it to the AI model
if (!isLocalCommand) {
getAIResponse(transcript);
}
};
recognition.onerror = (event) => {
console.error('Speech recognition error:', event.error);
};
} else {
console.error('Speech recognition not supported in this browser.');
alert('Speech recognition not supported in this browser.');
}
// --- Speech Synthesis ---
const synth = window.speechSynthesis;
let isMuted = false;
function speak(text) {
if (synth.speaking) {
console.error('Speech synthesis is already speaking.');
return;
}
if (text !== '') {
const utterance = new SpeechSynthesisUtterance(text);
const voices = synth.getVoices();
const ukFemaleVoice = voices.find(voice => voice.name.includes('Google UK English Female') || voice.lang === 'en-GB' && voice.gender === 'female');
if (ukFemaleVoice) {
utterance.voice = ukFemaleVoice;
} else {
// Fallback to default voice if UK female voice is not found
console.warn("UK English female voice not found, using default.");
}
utterance.onstart = () => {
console.log('AI is speaking...');
visualization.style.animation = 'pulse 1s infinite';
};
utterance.onend = () => {
console.log('AI finished speaking.');
visualization.style.animation = '';
};
synth.speak(utterance);
}
}
// --- Voice Commands ---
function handleVoiceCommand(command) {
const lowerCaseCommand = command.toLowerCase();
if (lowerCaseCommand.includes('mute my mic') || lowerCaseCommand.includes('mute microphone')) {
isMuted = true;
recognition.stop();
speak("Microphone muted.");
return true;
} else if (lowerCaseCommand.includes('unmute my mic') || lowerCaseCommand.includes('unmute microphone')) {
isMuted = false;
recognition.start();
speak("Microphone unmuted.");
return true;
} else if (lowerCaseCommand.includes('shut up') || lowerCaseCommand.includes('be quiet')) {
synth.cancel();
return true;
} else if (lowerCaseCommand.includes('copy image') || lowerCaseCommand.includes('copy this image')) {
copyImageToClipboard();
return true;
} else if (lowerCaseCommand.includes('save image') || lowerCaseCommand.includes('download image')) {
saveImage();
return true;
} else if (lowerCaseCommand.includes('open image') || lowerCaseCommand.includes('open this image')) {
openImageInNewTab();
return true;
} else if (lowerCaseCommand.includes('use flux model') || lowerCaseCommand.includes('switch to flux')) {
currentImageModel = 'flux';
speak("Image model set to flux.");
return true;
} else if (lowerCaseCommand.includes('use turbo model') || lowerCaseCommand.includes('switch to turbo')) {
currentImageModel = 'turbo';
speak("Image model set to turbo.");
return true;
} else if (lowerCaseCommand.includes('use kontext model') || lowerCaseCommand.includes('switch to kontext')) {
currentImageModel = 'kontext';
speak("Image model set to kontext.");
return true;
} else if (lowerCaseCommand.includes('clear history') || lowerCaseCommand.includes('delete history') || lowerCaseCommand.includes('clear chat')) {
chatHistory = [];
speak("Chat history cleared.");
return true;
}
return false;
}
// --- AI Model Interaction ---
async function getAIResponse(userInput) {
console.log(`Sending to AI: ${userInput}`);
// Add user message to chat history
chatHistory.push({ role: "user", content: userInput });
// Keep only the last 12 messages (excluding the system prompt)
if (chatHistory.length > 12) {
chatHistory.splice(0, chatHistory.length - 12);
}
let aiText = "";
// 1. Get text response from Pollinations AI (unity model)
try {
const messages = [
{ role: "system", content: systemPrompt },
...chatHistory
];
const textResponse = await fetch('https://text.pollinations.ai/openai', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
"messages": messages,
"model": "unity"
})
});
const data = await textResponse.json();
aiText = data.choices[0].message.content;
// Add AI response to chat history
chatHistory.push({ role: "assistant", content: aiText });
// Speak the AI's text response
speak(aiText);
} catch (error) {
console.error('Error getting text from Pollinations AI:', error);
speak("Sorry, I couldn't get a text response.");
}
// 2. Get image from Pollinations AI (using the current image model)
try {
// Use the user's input as the prompt for image generation
const imageUrl = `https://image.pollinations.ai/prompt/${encodeURIComponent(userInput)}?model=${currentImageModel}`;
background.style.backgroundImage = `url(${imageUrl})`;
} catch (error) {
console.error('Error getting image from Pollinations AI:', error);
// You might want to set a default background image here
}
}\n
// --- App Initialization ---
// --- Image Actions (Voice Controlled) ---
function getImageUrl() {
const style = window.getComputedStyle(background);
const backgroundImage = style.getPropertyValue('background-image');
// Extract URL from 'url("...")'
return backgroundImage.slice(5, -2);
}
async function copyImageToClipboard() {
const imageUrl = getImageUrl();
if (imageUrl) {
try {
const response = await fetch(imageUrl);
const blob = await response.blob();
await navigator.clipboard.write([new ClipboardItem({ [blob.type]: blob })]);
speak('Image copied to clipboard.');
} catch (err) {
console.error('Failed to copy image: ', err);
speak('Sorry, I could not copy the image. This might be due to browser limitations.');
}
}
}
async function saveImage() {
const imageUrl = getImageUrl();
if (imageUrl) {
try {
const response = await fetch(imageUrl);
const blob = await response.blob();
const url = window.URL.createObjectURL(blob);
const a = document.createElement('a');
a.style.display = 'none';
a.href = url;
a.download = 'pollination_image.png'; // Or generate a more descriptive name
document.body.appendChild(a);
a.click();
window.URL.revokeObjectURL(url);
document.body.removeChild(a);
speak('Image saved.');
} catch (err) {
console.error('Failed to save image: ', err);
speak('Sorry, I could not save the image.');
}
}
}
function openImageInNewTab() {
const imageUrl = getImageUrl();
if (imageUrl) {
window.open(imageUrl, '_blank');
speak('Image opened in new tab.');
}
}