2020-12-07 20:58:59 +00:00
|
|
|
#[cfg(windows)]
|
2020-12-08 03:35:07 +00:00
|
|
|
use std::collections::{HashMap, VecDeque};
|
|
|
|
use std::sync::Mutex;
|
2020-09-23 15:31:21 +00:00
|
|
|
|
|
|
|
use lazy_static::lazy_static;
|
2020-05-18 20:01:28 +00:00
|
|
|
use log::{info, trace};
|
2020-06-17 21:46:42 +00:00
|
|
|
|
2020-06-17 22:25:43 +00:00
|
|
|
use tts_winrt_bindings::windows::media::playback::{
|
2020-11-25 16:07:28 +00:00
|
|
|
MediaPlaybackState, MediaPlayer, MediaPlayerAudioCategory,
|
2020-06-14 23:56:01 +00:00
|
|
|
};
|
2020-06-17 22:25:43 +00:00
|
|
|
use tts_winrt_bindings::windows::media::speech_synthesis::SpeechSynthesizer;
|
2020-09-24 22:56:46 +00:00
|
|
|
use tts_winrt_bindings::windows::{foundation::TypedEventHandler, media::core::MediaSource};
|
2020-05-18 20:01:28 +00:00
|
|
|
|
2020-09-24 22:56:46 +00:00
|
|
|
use crate::{Backend, BackendId, Error, Features, UtteranceId, CALLBACKS};
|
2020-05-18 20:01:28 +00:00
|
|
|
|
|
|
|
impl From<winrt::Error> for Error {
|
|
|
|
fn from(e: winrt::Error) -> Self {
|
|
|
|
Error::WinRT(e)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-03 03:44:47 +00:00
|
|
|
#[derive(Clone, Debug)]
|
2020-05-18 20:01:28 +00:00
|
|
|
pub struct WinRT {
|
2020-09-23 15:31:21 +00:00
|
|
|
id: BackendId,
|
2020-05-18 20:01:28 +00:00
|
|
|
synth: SpeechSynthesizer,
|
|
|
|
player: MediaPlayer,
|
2020-12-08 03:35:07 +00:00
|
|
|
rate: f32,
|
|
|
|
pitch: f32,
|
|
|
|
volume: f32,
|
|
|
|
}
|
|
|
|
|
|
|
|
struct Utterance {
|
|
|
|
id: UtteranceId,
|
|
|
|
text: String,
|
|
|
|
rate: f32,
|
|
|
|
pitch: f32,
|
|
|
|
volume: f32,
|
2020-05-18 20:01:28 +00:00
|
|
|
}
|
|
|
|
|
2020-09-23 15:31:21 +00:00
|
|
|
lazy_static! {
|
|
|
|
static ref NEXT_BACKEND_ID: Mutex<u64> = Mutex::new(0);
|
2020-10-08 12:16:10 +00:00
|
|
|
static ref NEXT_UTTERANCE_ID: Mutex<u64> = Mutex::new(0);
|
2020-12-08 03:35:07 +00:00
|
|
|
static ref BACKEND_TO_SPEECH_SYNTHESIZER: Mutex<HashMap<BackendId, SpeechSynthesizer>> = {
|
|
|
|
let v: HashMap<BackendId, SpeechSynthesizer> = HashMap::new();
|
2020-09-24 22:56:46 +00:00
|
|
|
Mutex::new(v)
|
|
|
|
};
|
2020-12-08 03:35:07 +00:00
|
|
|
static ref BACKEND_TO_MEDIA_PLAYER: Mutex<HashMap<BackendId, MediaPlayer>> = {
|
|
|
|
let v: HashMap<BackendId, MediaPlayer> = HashMap::new();
|
2020-09-24 22:56:46 +00:00
|
|
|
Mutex::new(v)
|
|
|
|
};
|
2020-12-08 03:35:07 +00:00
|
|
|
static ref UTTERANCES: Mutex<HashMap<BackendId, VecDeque<Utterance>>> = {
|
|
|
|
let utterances: HashMap<BackendId, VecDeque<Utterance>> = HashMap::new();
|
|
|
|
Mutex::new(utterances)
|
2020-09-24 22:56:46 +00:00
|
|
|
};
|
2020-09-23 15:31:21 +00:00
|
|
|
}
|
|
|
|
|
2020-05-18 20:01:28 +00:00
|
|
|
impl WinRT {
|
|
|
|
pub fn new() -> std::result::Result<Self, Error> {
|
|
|
|
info!("Initializing WinRT backend");
|
2020-12-08 03:35:07 +00:00
|
|
|
let synth = SpeechSynthesizer::new()?;
|
2020-05-18 20:01:28 +00:00
|
|
|
let player = MediaPlayer::new()?;
|
2020-11-25 16:07:28 +00:00
|
|
|
player.set_real_time_playback(true)?;
|
|
|
|
player.set_audio_category(MediaPlayerAudioCategory::Speech)?;
|
2020-09-23 15:31:21 +00:00
|
|
|
let mut backend_id = NEXT_BACKEND_ID.lock().unwrap();
|
2020-09-24 22:56:46 +00:00
|
|
|
let bid = BackendId::WinRT(*backend_id);
|
2020-09-23 15:31:21 +00:00
|
|
|
*backend_id += 1;
|
2020-11-25 16:07:28 +00:00
|
|
|
drop(backend_id);
|
2020-12-08 03:35:07 +00:00
|
|
|
{
|
|
|
|
let mut utterances = UTTERANCES.lock().unwrap();
|
|
|
|
utterances.insert(bid, VecDeque::new());
|
|
|
|
}
|
2020-09-24 22:56:46 +00:00
|
|
|
let mut backend_to_media_player = BACKEND_TO_MEDIA_PLAYER.lock().unwrap();
|
2020-11-03 17:11:49 +00:00
|
|
|
backend_to_media_player.insert(bid, player.clone());
|
2020-11-25 16:07:28 +00:00
|
|
|
drop(backend_to_media_player);
|
2020-12-08 03:35:07 +00:00
|
|
|
let mut backend_to_speech_synthesizer = BACKEND_TO_SPEECH_SYNTHESIZER.lock().unwrap();
|
|
|
|
backend_to_speech_synthesizer.insert(bid, synth.clone());
|
|
|
|
drop(backend_to_speech_synthesizer);
|
|
|
|
let bid_clone = bid;
|
|
|
|
player.media_ended(TypedEventHandler::new(
|
|
|
|
move |sender: &MediaPlayer, _args| {
|
|
|
|
let backend_to_media_player = BACKEND_TO_MEDIA_PLAYER.lock().unwrap();
|
|
|
|
let id = backend_to_media_player.iter().find(|v| v.1 == sender);
|
|
|
|
if let Some((id, _)) = id {
|
|
|
|
let mut utterances = UTTERANCES.lock().unwrap();
|
|
|
|
if let Some(utterances) = utterances.get_mut(id) {
|
|
|
|
if let Some(utterance) = utterances.pop_front() {
|
|
|
|
let mut callbacks = CALLBACKS.lock().unwrap();
|
|
|
|
let callbacks = callbacks.get_mut(id).unwrap();
|
|
|
|
if let Some(callback) = callbacks.utterance_end.as_mut() {
|
|
|
|
callback(utterance.id);
|
|
|
|
}
|
|
|
|
if let Some(utterance) = utterances.front() {
|
|
|
|
let backend_to_speech_synthesizer =
|
|
|
|
BACKEND_TO_SPEECH_SYNTHESIZER.lock().unwrap();
|
|
|
|
let id = backend_to_speech_synthesizer
|
|
|
|
.iter()
|
|
|
|
.find(|v| *v.0 == bid_clone);
|
|
|
|
if let Some((_, tts)) = id {
|
|
|
|
tts.options()?.set_speaking_rate(utterance.rate.into())?;
|
|
|
|
tts.options()?.set_audio_pitch(utterance.pitch.into())?;
|
|
|
|
tts.options()?.set_audio_volume(utterance.volume.into())?;
|
|
|
|
let stream = tts
|
|
|
|
.synthesize_text_to_stream_async(utterance.text.as_str())?
|
|
|
|
.get()?;
|
|
|
|
let content_type = stream.content_type()?;
|
|
|
|
let source =
|
|
|
|
MediaSource::create_from_stream(stream, content_type)?;
|
|
|
|
sender.set_source(source)?;
|
|
|
|
sender.play()?;
|
|
|
|
if let Some(callback) = callbacks.utterance_begin.as_mut() {
|
|
|
|
callback(utterance.id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-11-03 17:11:49 +00:00
|
|
|
}
|
|
|
|
}
|
2020-12-08 03:35:07 +00:00
|
|
|
/*let source = sender.source()?;
|
|
|
|
let source: MediaPlaybackList = source.try_into()?;
|
|
|
|
source.items()?.clear()?;
|
|
|
|
let backend_to_media_player = BACKEND_TO_MEDIA_PLAYER.lock().unwrap();
|
|
|
|
let id = backend_to_media_player.iter().find(|v| v.1 == sender);
|
2020-09-24 22:56:46 +00:00
|
|
|
if let Some(id) = id {
|
|
|
|
let id = id.0;
|
2020-09-25 16:08:19 +00:00
|
|
|
let mut callbacks = CALLBACKS.lock().unwrap();
|
|
|
|
let callbacks = callbacks.get_mut(&id).unwrap();
|
2020-12-08 03:35:07 +00:00
|
|
|
if let Some(callback) = callbacks.utterance_end.as_mut() {
|
|
|
|
let last_spoken_utterance = LAST_SPOKEN_UTTERANCE.lock().unwrap();
|
|
|
|
if let Some(utterance_id) = last_spoken_utterance.get(&id) {
|
|
|
|
callback(*utterance_id);
|
2020-09-24 22:56:46 +00:00
|
|
|
}
|
|
|
|
}
|
2020-12-08 03:35:07 +00:00
|
|
|
}*/
|
2020-11-03 17:11:49 +00:00
|
|
|
Ok(())
|
|
|
|
},
|
|
|
|
))?;
|
|
|
|
Ok(Self {
|
|
|
|
id: bid,
|
2020-12-08 03:35:07 +00:00
|
|
|
synth,
|
2020-11-03 17:11:49 +00:00
|
|
|
player,
|
2020-12-08 03:35:07 +00:00
|
|
|
rate: 1.,
|
|
|
|
pitch: 1.,
|
|
|
|
volume: 1.,
|
2020-11-03 17:11:49 +00:00
|
|
|
})
|
2020-07-06 17:52:18 +00:00
|
|
|
}
|
2020-05-18 20:01:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Backend for WinRT {
|
2020-09-23 15:31:21 +00:00
|
|
|
fn id(&self) -> Option<BackendId> {
|
|
|
|
Some(self.id)
|
|
|
|
}
|
|
|
|
|
2020-05-18 20:01:28 +00:00
|
|
|
fn supported_features(&self) -> Features {
|
|
|
|
Features {
|
|
|
|
stop: true,
|
Clean up speech synthesis properties, and implement everything for WinRT.
I'd previously attempted to normalize everything to `u8`, but this had some drawbacks:
* It failed to account for some synthesis drivers defining normal as mid-range, while most define it very low.
* It didn't track the normal value for a given synthesizer.
* There was no clean way to map a curve between the minimum, normal, and maximum rates.
Here we track the minimum, normal, and maximum values of rate, pitch, and volume. Sanity checks are done on set.
Also, as a further proof-of-concept, all properties are now implemented for the WinRT driver.
2020-05-18 23:12:59 +00:00
|
|
|
rate: true,
|
|
|
|
pitch: true,
|
|
|
|
volume: true,
|
2020-07-07 14:08:44 +00:00
|
|
|
is_speaking: true,
|
2020-09-24 22:56:46 +00:00
|
|
|
utterance_callbacks: true,
|
2020-05-18 20:01:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-22 17:40:03 +00:00
|
|
|
fn speak(
|
|
|
|
&mut self,
|
|
|
|
text: &str,
|
|
|
|
interrupt: bool,
|
|
|
|
) -> std::result::Result<Option<UtteranceId>, Error> {
|
2020-11-25 16:07:28 +00:00
|
|
|
if interrupt && self.is_speaking()? {
|
2020-11-03 18:02:16 +00:00
|
|
|
self.stop()?;
|
|
|
|
}
|
2020-12-08 03:35:07 +00:00
|
|
|
let utterance_id = {
|
|
|
|
let mut uid = NEXT_UTTERANCE_ID.lock().unwrap();
|
2020-12-07 20:58:59 +00:00
|
|
|
let utterance_id = UtteranceId::WinRT(*uid);
|
|
|
|
*uid += 1;
|
2020-12-08 03:35:07 +00:00
|
|
|
utterance_id
|
2020-12-07 20:58:59 +00:00
|
|
|
};
|
2020-12-08 03:35:07 +00:00
|
|
|
let mut no_utterances = false;
|
2020-12-07 20:58:59 +00:00
|
|
|
{
|
2020-12-08 03:35:07 +00:00
|
|
|
let mut utterances = UTTERANCES.lock().unwrap();
|
|
|
|
if let Some(utterances) = utterances.get_mut(&self.id) {
|
|
|
|
no_utterances = utterances.is_empty();
|
|
|
|
let utterance = Utterance {
|
|
|
|
id: utterance_id,
|
|
|
|
text: text.into(),
|
|
|
|
rate: self.rate,
|
|
|
|
pitch: self.pitch,
|
|
|
|
volume: self.volume,
|
|
|
|
};
|
|
|
|
utterances.push_back(utterance);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if no_utterances
|
|
|
|
&& self.player.playback_session()?.playback_state()? != MediaPlaybackState::Playing
|
|
|
|
{
|
|
|
|
self.synth.options()?.set_speaking_rate(self.rate.into())?;
|
|
|
|
self.synth.options()?.set_audio_pitch(self.pitch.into())?;
|
|
|
|
self.synth.options()?.set_audio_volume(self.volume.into())?;
|
|
|
|
let stream = self.synth.synthesize_text_to_stream_async(text)?.get()?;
|
|
|
|
let content_type = stream.content_type()?;
|
|
|
|
let source = MediaSource::create_from_stream(stream, content_type)?;
|
|
|
|
self.player.set_source(source)?;
|
|
|
|
self.player.play()?;
|
|
|
|
let mut callbacks = CALLBACKS.lock().unwrap();
|
|
|
|
let callbacks = callbacks.get_mut(&self.id).unwrap();
|
|
|
|
if let Some(callback) = callbacks.utterance_begin.as_mut() {
|
|
|
|
callback(utterance_id);
|
|
|
|
}
|
2020-12-07 20:58:59 +00:00
|
|
|
}
|
2020-09-24 22:56:46 +00:00
|
|
|
Ok(Some(utterance_id))
|
2020-05-18 20:01:28 +00:00
|
|
|
}
|
|
|
|
|
2020-07-06 17:52:18 +00:00
|
|
|
fn stop(&mut self) -> std::result::Result<(), Error> {
|
2020-05-18 20:01:28 +00:00
|
|
|
trace!("stop()");
|
2020-11-25 16:07:28 +00:00
|
|
|
if !self.is_speaking()? {
|
|
|
|
return Ok(());
|
|
|
|
}
|
2020-12-08 03:35:07 +00:00
|
|
|
let mut utterances = UTTERANCES.lock().unwrap();
|
|
|
|
if let Some(utterances) = utterances.get(&self.id) {
|
|
|
|
let mut callbacks = CALLBACKS.lock().unwrap();
|
|
|
|
let callbacks = callbacks.get_mut(&self.id).unwrap();
|
|
|
|
if let Some(callback) = callbacks.utterance_stop.as_mut() {
|
|
|
|
for utterance in utterances {
|
|
|
|
callback(utterance.id);
|
|
|
|
}
|
2020-11-03 17:11:49 +00:00
|
|
|
}
|
|
|
|
}
|
2020-12-08 03:35:07 +00:00
|
|
|
if let Some(utterances) = utterances.get_mut(&self.id) {
|
|
|
|
utterances.clear();
|
|
|
|
}
|
|
|
|
self.player.pause()?;
|
2020-05-18 20:01:28 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
Clean up speech synthesis properties, and implement everything for WinRT.
I'd previously attempted to normalize everything to `u8`, but this had some drawbacks:
* It failed to account for some synthesis drivers defining normal as mid-range, while most define it very low.
* It didn't track the normal value for a given synthesizer.
* There was no clean way to map a curve between the minimum, normal, and maximum rates.
Here we track the minimum, normal, and maximum values of rate, pitch, and volume. Sanity checks are done on set.
Also, as a further proof-of-concept, all properties are now implemented for the WinRT driver.
2020-05-18 23:12:59 +00:00
|
|
|
fn min_rate(&self) -> f32 {
|
|
|
|
0.5
|
2020-05-18 20:01:28 +00:00
|
|
|
}
|
|
|
|
|
Clean up speech synthesis properties, and implement everything for WinRT.
I'd previously attempted to normalize everything to `u8`, but this had some drawbacks:
* It failed to account for some synthesis drivers defining normal as mid-range, while most define it very low.
* It didn't track the normal value for a given synthesizer.
* There was no clean way to map a curve between the minimum, normal, and maximum rates.
Here we track the minimum, normal, and maximum values of rate, pitch, and volume. Sanity checks are done on set.
Also, as a further proof-of-concept, all properties are now implemented for the WinRT driver.
2020-05-18 23:12:59 +00:00
|
|
|
fn max_rate(&self) -> f32 {
|
|
|
|
6.0
|
2020-05-18 20:01:28 +00:00
|
|
|
}
|
|
|
|
|
Clean up speech synthesis properties, and implement everything for WinRT.
I'd previously attempted to normalize everything to `u8`, but this had some drawbacks:
* It failed to account for some synthesis drivers defining normal as mid-range, while most define it very low.
* It didn't track the normal value for a given synthesizer.
* There was no clean way to map a curve between the minimum, normal, and maximum rates.
Here we track the minimum, normal, and maximum values of rate, pitch, and volume. Sanity checks are done on set.
Also, as a further proof-of-concept, all properties are now implemented for the WinRT driver.
2020-05-18 23:12:59 +00:00
|
|
|
fn normal_rate(&self) -> f32 {
|
|
|
|
1.
|
2020-05-18 20:01:28 +00:00
|
|
|
}
|
|
|
|
|
Clean up speech synthesis properties, and implement everything for WinRT.
I'd previously attempted to normalize everything to `u8`, but this had some drawbacks:
* It failed to account for some synthesis drivers defining normal as mid-range, while most define it very low.
* It didn't track the normal value for a given synthesizer.
* There was no clean way to map a curve between the minimum, normal, and maximum rates.
Here we track the minimum, normal, and maximum values of rate, pitch, and volume. Sanity checks are done on set.
Also, as a further proof-of-concept, all properties are now implemented for the WinRT driver.
2020-05-18 23:12:59 +00:00
|
|
|
fn get_rate(&self) -> std::result::Result<f32, Error> {
|
|
|
|
let rate = self.synth.options()?.speaking_rate()?;
|
|
|
|
Ok(rate as f32)
|
2020-05-18 20:01:28 +00:00
|
|
|
}
|
|
|
|
|
Clean up speech synthesis properties, and implement everything for WinRT.
I'd previously attempted to normalize everything to `u8`, but this had some drawbacks:
* It failed to account for some synthesis drivers defining normal as mid-range, while most define it very low.
* It didn't track the normal value for a given synthesizer.
* There was no clean way to map a curve between the minimum, normal, and maximum rates.
Here we track the minimum, normal, and maximum values of rate, pitch, and volume. Sanity checks are done on set.
Also, as a further proof-of-concept, all properties are now implemented for the WinRT driver.
2020-05-18 23:12:59 +00:00
|
|
|
fn set_rate(&mut self, rate: f32) -> std::result::Result<(), Error> {
|
2020-12-08 03:35:07 +00:00
|
|
|
self.rate = rate;
|
Clean up speech synthesis properties, and implement everything for WinRT.
I'd previously attempted to normalize everything to `u8`, but this had some drawbacks:
* It failed to account for some synthesis drivers defining normal as mid-range, while most define it very low.
* It didn't track the normal value for a given synthesizer.
* There was no clean way to map a curve between the minimum, normal, and maximum rates.
Here we track the minimum, normal, and maximum values of rate, pitch, and volume. Sanity checks are done on set.
Also, as a further proof-of-concept, all properties are now implemented for the WinRT driver.
2020-05-18 23:12:59 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn min_pitch(&self) -> f32 {
|
|
|
|
0.
|
2020-05-18 20:01:28 +00:00
|
|
|
}
|
|
|
|
|
Clean up speech synthesis properties, and implement everything for WinRT.
I'd previously attempted to normalize everything to `u8`, but this had some drawbacks:
* It failed to account for some synthesis drivers defining normal as mid-range, while most define it very low.
* It didn't track the normal value for a given synthesizer.
* There was no clean way to map a curve between the minimum, normal, and maximum rates.
Here we track the minimum, normal, and maximum values of rate, pitch, and volume. Sanity checks are done on set.
Also, as a further proof-of-concept, all properties are now implemented for the WinRT driver.
2020-05-18 23:12:59 +00:00
|
|
|
fn max_pitch(&self) -> f32 {
|
|
|
|
2.
|
|
|
|
}
|
|
|
|
|
|
|
|
fn normal_pitch(&self) -> f32 {
|
|
|
|
1.
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_pitch(&self) -> std::result::Result<f32, Error> {
|
|
|
|
let pitch = self.synth.options()?.audio_pitch()?;
|
|
|
|
Ok(pitch as f32)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn set_pitch(&mut self, pitch: f32) -> std::result::Result<(), Error> {
|
2020-12-08 03:35:07 +00:00
|
|
|
self.pitch = pitch;
|
Clean up speech synthesis properties, and implement everything for WinRT.
I'd previously attempted to normalize everything to `u8`, but this had some drawbacks:
* It failed to account for some synthesis drivers defining normal as mid-range, while most define it very low.
* It didn't track the normal value for a given synthesizer.
* There was no clean way to map a curve between the minimum, normal, and maximum rates.
Here we track the minimum, normal, and maximum values of rate, pitch, and volume. Sanity checks are done on set.
Also, as a further proof-of-concept, all properties are now implemented for the WinRT driver.
2020-05-18 23:12:59 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn min_volume(&self) -> f32 {
|
|
|
|
0.
|
|
|
|
}
|
|
|
|
|
|
|
|
fn max_volume(&self) -> f32 {
|
|
|
|
1.
|
|
|
|
}
|
|
|
|
|
|
|
|
fn normal_volume(&self) -> f32 {
|
|
|
|
1.
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_volume(&self) -> std::result::Result<f32, Error> {
|
|
|
|
let volume = self.synth.options()?.audio_volume()?;
|
|
|
|
Ok(volume as f32)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn set_volume(&mut self, volume: f32) -> std::result::Result<(), Error> {
|
2020-12-08 03:35:07 +00:00
|
|
|
self.volume = volume;
|
Clean up speech synthesis properties, and implement everything for WinRT.
I'd previously attempted to normalize everything to `u8`, but this had some drawbacks:
* It failed to account for some synthesis drivers defining normal as mid-range, while most define it very low.
* It didn't track the normal value for a given synthesizer.
* There was no clean way to map a curve between the minimum, normal, and maximum rates.
Here we track the minimum, normal, and maximum values of rate, pitch, and volume. Sanity checks are done on set.
Also, as a further proof-of-concept, all properties are now implemented for the WinRT driver.
2020-05-18 23:12:59 +00:00
|
|
|
Ok(())
|
2020-05-18 20:01:28 +00:00
|
|
|
}
|
2020-06-02 19:53:14 +00:00
|
|
|
|
2020-06-02 21:59:04 +00:00
|
|
|
fn is_speaking(&self) -> std::result::Result<bool, Error> {
|
2020-12-08 03:35:07 +00:00
|
|
|
let utterances = UTTERANCES.lock().unwrap();
|
|
|
|
let utterances = utterances.get(&self.id).unwrap();
|
|
|
|
Ok(!utterances.is_empty())
|
2020-06-02 19:53:14 +00:00
|
|
|
}
|
2020-05-18 20:01:28 +00:00
|
|
|
}
|
2020-09-24 22:56:46 +00:00
|
|
|
|
|
|
|
impl Drop for WinRT {
|
|
|
|
fn drop(&mut self) {
|
2020-10-08 12:16:10 +00:00
|
|
|
let id = self.id;
|
2020-09-24 22:56:46 +00:00
|
|
|
let mut backend_to_media_player = BACKEND_TO_MEDIA_PLAYER.lock().unwrap();
|
|
|
|
backend_to_media_player.remove(&id);
|
2020-12-08 03:35:07 +00:00
|
|
|
let mut backend_to_speech_synthesizer = BACKEND_TO_SPEECH_SYNTHESIZER.lock().unwrap();
|
|
|
|
backend_to_speech_synthesizer.remove(&id);
|
|
|
|
let mut utterances = UTTERANCES.lock().unwrap();
|
|
|
|
utterances.remove(&id);
|
2020-09-24 22:56:46 +00:00
|
|
|
}
|
|
|
|
}
|