2018-12-14 19:35:49 +00:00
|
|
|
#[cfg(target_os = "linux")]
|
2021-11-19 15:22:05 +00:00
|
|
|
use std::{collections::HashMap, sync::Mutex};
|
2020-08-20 02:28:30 +00:00
|
|
|
|
|
|
|
use lazy_static::*;
|
2018-12-14 19:35:49 +00:00
|
|
|
use log::{info, trace};
|
|
|
|
use speech_dispatcher::*;
|
|
|
|
|
2020-09-23 15:12:51 +00:00
|
|
|
use crate::{Backend, BackendId, Error, Features, UtteranceId, CALLBACKS};
|
2018-12-14 19:35:49 +00:00
|
|
|
|
2020-11-03 03:44:47 +00:00
|
|
|
#[derive(Clone, Debug)]
|
2020-09-23 15:12:51 +00:00
|
|
|
pub(crate) struct SpeechDispatcher(Connection);
|
2018-12-14 19:35:49 +00:00
|
|
|
|
2020-08-20 02:28:30 +00:00
|
|
|
lazy_static! {
|
|
|
|
static ref SPEAKING: Mutex<HashMap<u64, bool>> = {
|
|
|
|
let m: HashMap<u64, bool> = HashMap::new();
|
|
|
|
Mutex::new(m)
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-12-14 19:35:49 +00:00
|
|
|
impl SpeechDispatcher {
|
2021-11-19 15:22:05 +00:00
|
|
|
pub(crate) fn new() -> std::result::Result<Self, Error> {
|
2018-12-14 19:35:49 +00:00
|
|
|
info!("Initializing SpeechDispatcher backend");
|
2021-11-19 15:22:05 +00:00
|
|
|
let connection = speech_dispatcher::Connection::open("tts", "tts", "tts", Mode::Threaded)?;
|
2020-08-20 02:28:30 +00:00
|
|
|
let sd = SpeechDispatcher(connection);
|
|
|
|
let mut speaking = SPEAKING.lock().unwrap();
|
|
|
|
speaking.insert(sd.0.client_id(), false);
|
2020-09-25 16:33:49 +00:00
|
|
|
sd.0.on_begin(Some(Box::new(|msg_id, client_id| {
|
2020-08-20 02:28:30 +00:00
|
|
|
let mut speaking = SPEAKING.lock().unwrap();
|
|
|
|
speaking.insert(client_id, true);
|
2020-09-25 16:08:19 +00:00
|
|
|
let mut callbacks = CALLBACKS.lock().unwrap();
|
2020-09-23 15:12:51 +00:00
|
|
|
let backend_id = BackendId::SpeechDispatcher(client_id);
|
2020-09-25 16:08:19 +00:00
|
|
|
let cb = callbacks.get_mut(&backend_id).unwrap();
|
2020-09-23 15:12:51 +00:00
|
|
|
let utterance_id = UtteranceId::SpeechDispatcher(msg_id);
|
2020-09-25 16:08:19 +00:00
|
|
|
if let Some(f) = cb.utterance_begin.as_mut() {
|
2020-11-03 17:03:55 +00:00
|
|
|
f(utterance_id);
|
2020-09-23 15:12:51 +00:00
|
|
|
}
|
2020-09-25 16:33:49 +00:00
|
|
|
})));
|
|
|
|
sd.0.on_end(Some(Box::new(|msg_id, client_id| {
|
2020-08-20 02:28:30 +00:00
|
|
|
let mut speaking = SPEAKING.lock().unwrap();
|
|
|
|
speaking.insert(client_id, false);
|
2020-09-25 16:08:19 +00:00
|
|
|
let mut callbacks = CALLBACKS.lock().unwrap();
|
2020-09-23 15:12:51 +00:00
|
|
|
let backend_id = BackendId::SpeechDispatcher(client_id);
|
2020-09-25 16:08:19 +00:00
|
|
|
let cb = callbacks.get_mut(&backend_id).unwrap();
|
2020-09-23 15:12:51 +00:00
|
|
|
let utterance_id = UtteranceId::SpeechDispatcher(msg_id);
|
2020-09-25 16:08:19 +00:00
|
|
|
if let Some(f) = cb.utterance_end.as_mut() {
|
2020-11-03 17:03:55 +00:00
|
|
|
f(utterance_id);
|
2020-09-23 15:12:51 +00:00
|
|
|
}
|
2020-09-25 16:33:49 +00:00
|
|
|
})));
|
2020-10-08 12:56:45 +00:00
|
|
|
sd.0.on_cancel(Some(Box::new(|msg_id, client_id| {
|
2020-08-20 02:28:30 +00:00
|
|
|
let mut speaking = SPEAKING.lock().unwrap();
|
|
|
|
speaking.insert(client_id, false);
|
2020-10-08 12:56:45 +00:00
|
|
|
let mut callbacks = CALLBACKS.lock().unwrap();
|
|
|
|
let backend_id = BackendId::SpeechDispatcher(client_id);
|
|
|
|
let cb = callbacks.get_mut(&backend_id).unwrap();
|
|
|
|
let utterance_id = UtteranceId::SpeechDispatcher(msg_id);
|
|
|
|
if let Some(f) = cb.utterance_stop.as_mut() {
|
2020-11-03 17:03:55 +00:00
|
|
|
f(utterance_id);
|
2020-10-08 12:56:45 +00:00
|
|
|
}
|
2020-09-25 16:33:49 +00:00
|
|
|
})));
|
|
|
|
sd.0.on_pause(Some(Box::new(|_msg_id, client_id| {
|
2020-08-20 02:28:30 +00:00
|
|
|
let mut speaking = SPEAKING.lock().unwrap();
|
|
|
|
speaking.insert(client_id, false);
|
2020-09-25 16:33:49 +00:00
|
|
|
})));
|
|
|
|
sd.0.on_resume(Some(Box::new(|_msg_id, client_id| {
|
2020-08-20 02:28:30 +00:00
|
|
|
let mut speaking = SPEAKING.lock().unwrap();
|
|
|
|
speaking.insert(client_id, true);
|
2020-09-25 16:33:49 +00:00
|
|
|
})));
|
2021-11-19 15:22:05 +00:00
|
|
|
Ok(sd)
|
2018-12-14 19:35:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Backend for SpeechDispatcher {
|
2020-09-23 15:12:51 +00:00
|
|
|
fn id(&self) -> Option<BackendId> {
|
|
|
|
Some(BackendId::SpeechDispatcher(self.0.client_id()))
|
|
|
|
}
|
|
|
|
|
2019-03-24 21:30:45 +00:00
|
|
|
fn supported_features(&self) -> Features {
|
|
|
|
Features {
|
|
|
|
stop: true,
|
|
|
|
rate: true,
|
|
|
|
pitch: true,
|
|
|
|
volume: true,
|
2020-08-20 02:28:30 +00:00
|
|
|
is_speaking: true,
|
2020-09-23 15:12:51 +00:00
|
|
|
utterance_callbacks: true,
|
2019-03-24 21:30:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-22 17:40:03 +00:00
|
|
|
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error> {
|
2018-12-14 19:35:49 +00:00
|
|
|
trace!("speak({}, {})", text, interrupt);
|
|
|
|
if interrupt {
|
2018-12-30 17:13:48 +00:00
|
|
|
self.stop()?;
|
2018-12-14 19:35:49 +00:00
|
|
|
}
|
2019-09-30 15:36:51 +00:00
|
|
|
let single_char = text.to_string().capacity() == 1;
|
|
|
|
if single_char {
|
|
|
|
self.0.set_punctuation(Punctuation::All);
|
|
|
|
}
|
2020-09-22 17:40:03 +00:00
|
|
|
let id = self.0.say(Priority::Important, text);
|
2019-09-30 15:36:51 +00:00
|
|
|
if single_char {
|
|
|
|
self.0.set_punctuation(Punctuation::None);
|
|
|
|
}
|
2020-09-22 17:40:03 +00:00
|
|
|
if let Some(id) = id {
|
2020-09-26 17:47:18 +00:00
|
|
|
Ok(Some(UtteranceId::SpeechDispatcher(id)))
|
2020-09-22 17:40:03 +00:00
|
|
|
} else {
|
|
|
|
Err(Error::NoneError)
|
|
|
|
}
|
2018-12-14 19:35:49 +00:00
|
|
|
}
|
|
|
|
|
2020-07-06 17:52:18 +00:00
|
|
|
fn stop(&mut self) -> Result<(), Error> {
|
2018-12-28 14:49:02 +00:00
|
|
|
trace!("stop()");
|
|
|
|
self.0.cancel();
|
2018-12-30 17:13:48 +00:00
|
|
|
Ok(())
|
2018-12-28 14:49:02 +00:00
|
|
|
}
|
|
|
|
|
Clean up speech synthesis properties, and implement everything for WinRT.
I'd previously attempted to normalize everything to `u8`, but this had some drawbacks:
* It failed to account for some synthesis drivers defining normal as mid-range, while most define it very low.
* It didn't track the normal value for a given synthesizer.
* There was no clean way to map a curve between the minimum, normal, and maximum rates.
Here we track the minimum, normal, and maximum values of rate, pitch, and volume. Sanity checks are done on set.
Also, as a further proof-of-concept, all properties are now implemented for the WinRT driver.
2020-05-18 23:12:59 +00:00
|
|
|
fn min_rate(&self) -> f32 {
|
|
|
|
-100.
|
|
|
|
}
|
|
|
|
|
|
|
|
fn max_rate(&self) -> f32 {
|
|
|
|
100.
|
|
|
|
}
|
|
|
|
|
|
|
|
fn normal_rate(&self) -> f32 {
|
|
|
|
0.
|
2018-12-14 19:35:49 +00:00
|
|
|
}
|
|
|
|
|
Clean up speech synthesis properties, and implement everything for WinRT.
I'd previously attempted to normalize everything to `u8`, but this had some drawbacks:
* It failed to account for some synthesis drivers defining normal as mid-range, while most define it very low.
* It didn't track the normal value for a given synthesizer.
* There was no clean way to map a curve between the minimum, normal, and maximum rates.
Here we track the minimum, normal, and maximum values of rate, pitch, and volume. Sanity checks are done on set.
Also, as a further proof-of-concept, all properties are now implemented for the WinRT driver.
2020-05-18 23:12:59 +00:00
|
|
|
fn get_rate(&self) -> Result<f32, Error> {
|
|
|
|
Ok(self.0.get_voice_rate() as f32)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn set_rate(&mut self, rate: f32) -> Result<(), Error> {
|
|
|
|
self.0.set_voice_rate(rate as i32);
|
2018-12-30 17:13:48 +00:00
|
|
|
Ok(())
|
2018-12-15 15:56:13 +00:00
|
|
|
}
|
|
|
|
|
Clean up speech synthesis properties, and implement everything for WinRT.
I'd previously attempted to normalize everything to `u8`, but this had some drawbacks:
* It failed to account for some synthesis drivers defining normal as mid-range, while most define it very low.
* It didn't track the normal value for a given synthesizer.
* There was no clean way to map a curve between the minimum, normal, and maximum rates.
Here we track the minimum, normal, and maximum values of rate, pitch, and volume. Sanity checks are done on set.
Also, as a further proof-of-concept, all properties are now implemented for the WinRT driver.
2020-05-18 23:12:59 +00:00
|
|
|
fn min_pitch(&self) -> f32 {
|
|
|
|
-100.
|
2018-12-15 15:56:13 +00:00
|
|
|
}
|
|
|
|
|
Clean up speech synthesis properties, and implement everything for WinRT.
I'd previously attempted to normalize everything to `u8`, but this had some drawbacks:
* It failed to account for some synthesis drivers defining normal as mid-range, while most define it very low.
* It didn't track the normal value for a given synthesizer.
* There was no clean way to map a curve between the minimum, normal, and maximum rates.
Here we track the minimum, normal, and maximum values of rate, pitch, and volume. Sanity checks are done on set.
Also, as a further proof-of-concept, all properties are now implemented for the WinRT driver.
2020-05-18 23:12:59 +00:00
|
|
|
fn max_pitch(&self) -> f32 {
|
|
|
|
100.
|
|
|
|
}
|
|
|
|
|
|
|
|
fn normal_pitch(&self) -> f32 {
|
|
|
|
0.
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_pitch(&self) -> Result<f32, Error> {
|
|
|
|
Ok(self.0.get_voice_pitch() as f32)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn set_pitch(&mut self, pitch: f32) -> Result<(), Error> {
|
|
|
|
self.0.set_voice_pitch(pitch as i32);
|
2018-12-30 17:13:48 +00:00
|
|
|
Ok(())
|
2018-12-15 15:56:13 +00:00
|
|
|
}
|
|
|
|
|
Clean up speech synthesis properties, and implement everything for WinRT.
I'd previously attempted to normalize everything to `u8`, but this had some drawbacks:
* It failed to account for some synthesis drivers defining normal as mid-range, while most define it very low.
* It didn't track the normal value for a given synthesizer.
* There was no clean way to map a curve between the minimum, normal, and maximum rates.
Here we track the minimum, normal, and maximum values of rate, pitch, and volume. Sanity checks are done on set.
Also, as a further proof-of-concept, all properties are now implemented for the WinRT driver.
2020-05-18 23:12:59 +00:00
|
|
|
fn min_volume(&self) -> f32 {
|
|
|
|
-100.
|
|
|
|
}
|
|
|
|
|
|
|
|
fn max_volume(&self) -> f32 {
|
|
|
|
100.
|
|
|
|
}
|
|
|
|
|
|
|
|
fn normal_volume(&self) -> f32 {
|
2020-08-18 20:19:34 +00:00
|
|
|
100.
|
Clean up speech synthesis properties, and implement everything for WinRT.
I'd previously attempted to normalize everything to `u8`, but this had some drawbacks:
* It failed to account for some synthesis drivers defining normal as mid-range, while most define it very low.
* It didn't track the normal value for a given synthesizer.
* There was no clean way to map a curve between the minimum, normal, and maximum rates.
Here we track the minimum, normal, and maximum values of rate, pitch, and volume. Sanity checks are done on set.
Also, as a further proof-of-concept, all properties are now implemented for the WinRT driver.
2020-05-18 23:12:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn get_volume(&self) -> Result<f32, Error> {
|
|
|
|
Ok(self.0.get_volume() as f32)
|
2018-12-15 15:56:13 +00:00
|
|
|
}
|
|
|
|
|
Clean up speech synthesis properties, and implement everything for WinRT.
I'd previously attempted to normalize everything to `u8`, but this had some drawbacks:
* It failed to account for some synthesis drivers defining normal as mid-range, while most define it very low.
* It didn't track the normal value for a given synthesizer.
* There was no clean way to map a curve between the minimum, normal, and maximum rates.
Here we track the minimum, normal, and maximum values of rate, pitch, and volume. Sanity checks are done on set.
Also, as a further proof-of-concept, all properties are now implemented for the WinRT driver.
2020-05-18 23:12:59 +00:00
|
|
|
fn set_volume(&mut self, volume: f32) -> Result<(), Error> {
|
|
|
|
self.0.set_volume(volume as i32);
|
2018-12-30 17:13:48 +00:00
|
|
|
Ok(())
|
2018-12-14 19:35:49 +00:00
|
|
|
}
|
2020-06-02 19:53:14 +00:00
|
|
|
|
|
|
|
fn is_speaking(&self) -> Result<bool, Error> {
|
2020-08-20 02:28:30 +00:00
|
|
|
let speaking = SPEAKING.lock().unwrap();
|
|
|
|
let is_speaking = speaking.get(&self.0.client_id()).unwrap();
|
|
|
|
Ok(*is_speaking)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for SpeechDispatcher {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
let mut speaking = SPEAKING.lock().unwrap();
|
|
|
|
speaking.remove(&self.0.client_id());
|
2020-06-02 19:53:14 +00:00
|
|
|
}
|
2018-12-14 19:35:49 +00:00
|
|
|
}
|