Merge remote-tracking branch 'upstream/master' into c-ffi

This commit is contained in:
Michael Connor Buchan 2022-07-22 21:42:59 +01:00
commit 1597797c57
20 changed files with 811 additions and 257 deletions

View File

@ -27,14 +27,26 @@ jobs:
with:
command: check
args: --all-features --examples
if: ${{ runner.os != 'Linux' }}
- uses: actions-rs/cargo@v1
with:
command: check
args: --no-default-features --examples
if: ${{ runner.os == 'Linux' }}
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
args: --all --check
- uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --all-features
if: ${{ runner.os != 'Linux' }}
- uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --no-default-features
if: ${{ runner.os == 'Linux' }}
check_web:
name: Check Web
@ -88,4 +100,4 @@ jobs:
sudo apt-get update
sudo apt-get install -y libspeechd-dev
cargo login $CARGO_TOKEN
cargo publish
cargo publish --no-default-features

View File

@ -26,14 +26,26 @@ jobs:
with:
command: check
args: --all-features --examples
if: ${{ runner.os != 'Linux' }}
- uses: actions-rs/cargo@v1
with:
command: check
args: --no-default-features --examples
if: ${{ runner.os == 'Linux' }}
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
args: --all --check
- uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --all-features
if: ${{ runner.os != 'Linux' }}
- uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --no-default-features
if: ${{ runner.os == 'Linux' }}
check_web:
name: Check Web
@ -55,7 +67,7 @@ jobs:
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
args: --all --check
- uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,19 +1,20 @@
[package]
name = "tts"
version = "0.17.3"
version = "0.23.0"
authors = ["Nolan Darilek <nolan@thewordnerd.info>"]
repository = "https://github.com/ndarilek/tts-rs"
description = "High-level Text-To-Speech (TTS) interface"
license = "MIT"
exclude = ["*.cfg", "*.yml"]
edition = "2018"
edition = "2021"
[lib]
crate-type = ["lib", "cdylib", "staticlib"]
[features]
use_tolk = ["tolk"]
ffi = ["cbindgen"]
speech_dispatcher_0_10 = ["speech-dispatcher/0_10"]
default = ["speech_dispatcher_0_10"]
[dependencies]
dyn-clonable = "0.9"
@ -21,32 +22,35 @@ lazy_static = "1"
libc = {version = "0.2", optional = true}
log = "0.4"
thiserror = "1"
unic-langid = "0.9.0"
serde = { version = "1.0", optional = true, features = ["derive"] }
[dev-dependencies]
env_logger = "0.8"
env_logger = "0.9"
[build-dependencies]
cbindgen = {version = "0.18.0", optional = true}
[target.'cfg(windows)'.dependencies]
tolk = { version = "0.5", optional = true }
windows = "0.9"
[target.'cfg(windows)'.build-dependencies]
windows = "0.9"
windows = { version = "0.39", features = ["Foundation", "Foundation_Collections", "Media_Core", "Media_Playback", "Media_SpeechSynthesis", "Storage_Streams"] }
[target.'cfg(target_os = "linux")'.dependencies]
speech-dispatcher = "0.7"
speech-dispatcher = { version = "0.13", default-features = false }
[target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies]
cocoa-foundation = "0.1"
core-foundation = "0.9"
libc = "0.2"
objc = { version = "0.2", features = ["exception"] }
[target.wasm32-unknown-unknown.dependencies]
wasm-bindgen = "0.2"
web-sys = { version = "0.3", features = ["EventTarget", "SpeechSynthesis", "SpeechSynthesisErrorCode", "SpeechSynthesisErrorEvent", "SpeechSynthesisEvent", "SpeechSynthesisUtterance", "Window", ] }
web-sys = { version = "0.3", features = ["EventTarget", "SpeechSynthesis", "SpeechSynthesisErrorCode", "SpeechSynthesisErrorEvent", "SpeechSynthesisEvent", "SpeechSynthesisUtterance", "SpeechSynthesisVoice", "Window", ] }
[target.'cfg(target_os="android")'.dependencies]
jni = "0.19"
ndk-glue = "0.3"
ndk-glue = "0.6"
[package.metadata.docs.rs]
no-default-features = true

View File

@ -1,14 +1,4 @@
fn main() {
#[cfg(windows)]
if std::env::var("TARGET").unwrap().contains("windows") {
windows::build!(
Windows::Foundation::{EventRegistrationToken, IAsyncOperation, TypedEventHandler},
Windows::Media::Core::MediaSource,
Windows::Media::Playback::{MediaPlaybackSession, MediaPlaybackState, MediaPlayer, MediaPlayerAudioCategory},
Windows::Media::SpeechSynthesis::{SpeechSynthesisStream, SpeechSynthesizer, SpeechSynthesizerOptions},
Windows::Storage::Streams::IRandomAccessStream,
);
}
if std::env::var("TARGET").unwrap().contains("-apple") {
println!("cargo:rustc-link-lib=framework=AVFoundation");
if !std::env::var("CARGO_CFG_TARGET_OS")

View File

@ -11,7 +11,7 @@ buildscript {
dependencies {
classpath "com.android.tools.build:gradle:4.1.1"
classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
classpath "gradle.plugin.com.github.willir.rust:plugin:0.3.3"
classpath "gradle.plugin.com.github.willir.rust:plugin:0.3.4"
// NOTE: Do not place your application dependencies here; they belong
// in the individual module build.gradle files
}

View File

@ -10,5 +10,5 @@ edition = "2018"
crate-type = ["dylib"]
[dependencies]
ndk-glue = "0.2"
ndk-glue = "0.6"
tts = { path = "../.." }

89
examples/clone_drop.rs Normal file
View File

@ -0,0 +1,89 @@
use std::io;
#[cfg(target_os = "macos")]
use cocoa_foundation::base::id;
#[cfg(target_os = "macos")]
use cocoa_foundation::foundation::NSRunLoop;
#[cfg(target_os = "macos")]
use objc::{msg_send, sel, sel_impl};
use tts::*;
fn main() -> Result<(), Error> {
env_logger::init();
let tts = Tts::default()?;
if Tts::screen_reader_available() {
println!("A screen reader is available on this platform.");
} else {
println!("No screen reader is available on this platform.");
}
let Features {
utterance_callbacks,
..
} = tts.supported_features();
if utterance_callbacks {
tts.on_utterance_begin(Some(Box::new(|utterance| {
println!("Started speaking {:?}", utterance)
})))?;
tts.on_utterance_end(Some(Box::new(|utterance| {
println!("Finished speaking {:?}", utterance)
})))?;
tts.on_utterance_stop(Some(Box::new(|utterance| {
println!("Stopped speaking {:?}", utterance)
})))?;
}
let mut tts_clone = tts.clone();
drop(tts);
let Features { is_speaking, .. } = tts_clone.supported_features();
if is_speaking {
println!("Are we speaking? {}", tts_clone.is_speaking()?);
}
tts_clone.speak("Hello, world.", false)?;
let Features { rate, .. } = tts_clone.supported_features();
if rate {
let original_rate = tts_clone.get_rate()?;
tts_clone.speak(format!("Current rate: {}", original_rate), false)?;
tts_clone.set_rate(tts_clone.max_rate())?;
tts_clone.speak("This is very fast.", false)?;
tts_clone.set_rate(tts_clone.min_rate())?;
tts_clone.speak("This is very slow.", false)?;
tts_clone.set_rate(tts_clone.normal_rate())?;
tts_clone.speak("This is the normal rate.", false)?;
tts_clone.set_rate(original_rate)?;
}
let Features { pitch, .. } = tts_clone.supported_features();
if pitch {
let original_pitch = tts_clone.get_pitch()?;
tts_clone.set_pitch(tts_clone.max_pitch())?;
tts_clone.speak("This is high-pitch.", false)?;
tts_clone.set_pitch(tts_clone.min_pitch())?;
tts_clone.speak("This is low pitch.", false)?;
tts_clone.set_pitch(tts_clone.normal_pitch())?;
tts_clone.speak("This is normal pitch.", false)?;
tts_clone.set_pitch(original_pitch)?;
}
let Features { volume, .. } = tts_clone.supported_features();
if volume {
let original_volume = tts_clone.get_volume()?;
tts_clone.set_volume(tts_clone.max_volume())?;
tts_clone.speak("This is loud!", false)?;
tts_clone.set_volume(tts_clone.min_volume())?;
tts_clone.speak("This is quiet.", false)?;
tts_clone.set_volume(tts_clone.normal_volume())?;
tts_clone.speak("This is normal volume.", false)?;
tts_clone.set_volume(original_volume)?;
}
tts_clone.speak("Goodbye.", false)?;
let mut _input = String::new();
// The below is only needed to make the example run on MacOS because there is no NSRunLoop in this context.
// It shouldn't be needed in an app or game that almost certainly has one already.
#[cfg(target_os = "macos")]
{
let run_loop: id = unsafe { NSRunLoop::currentRunLoop() };
unsafe {
let _: () = msg_send![run_loop, run];
}
}
io::stdin().read_line(&mut _input)?;
Ok(())
}

View File

@ -71,6 +71,23 @@ fn main() -> Result<(), Error> {
tts.speak("This is normal volume.", false)?;
tts.set_volume(original_volume)?;
}
let Features { voice, .. } = tts.supported_features();
if voice {
let voices = tts.voices()?;
println!("Available voices:\n===");
for v in &voices {
println!("{:?}", v);
}
let Features { get_voice, .. } = tts.supported_features();
let original_voice = if get_voice { tts.voice()? } else { None };
for v in &voices {
tts.set_voice(v)?;
tts.speak(format!("This is {}.", v.name()), false)?;
}
if let Some(original_voice) = original_voice {
tts.set_voice(&original_voice)?;
}
}
tts.speak("Goodbye.", false)?;
let mut _input = String::new();
// The below is only needed to make the example run on MacOS because there is no NSRunLoop in this context.

View File

@ -7,5 +7,8 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
seed = "0.8"
tts = { path = "../.." }
console_log = "0.2"
log = "0.4"
seed = "0.9"
tts = { path = "../.." }
wasm-bindgen = "= 0.2.80"

View File

@ -1,5 +1,5 @@
<!DOCTYPE html>
<html>
<html lang="en">
<head>
<title>Example</title>

View File

@ -15,13 +15,20 @@ enum Msg {
RateChanged(String),
PitchChanged(String),
VolumeChanged(String),
VoiceChanged(String),
Speak,
}
fn init(_: Url, _: &mut impl Orders<Msg>) -> Model {
let tts = Tts::default().unwrap();
let mut tts = Tts::default().unwrap();
if tts.voices().unwrap().iter().len() > 0 {
if tts.voice().unwrap().is_none() {
tts.set_voice(tts.voices().unwrap().first().unwrap())
.expect("Failed to set voice");
}
}
Model {
text: Default::default(),
text: "Hello, world. This is a test of the current text-to-speech values.".into(),
tts,
}
}
@ -42,6 +49,13 @@ fn update(msg: Msg, model: &mut Model, _: &mut impl Orders<Msg>) {
let volume = volume.parse::<f32>().unwrap();
model.tts.set_volume(volume).unwrap();
}
VoiceChanged(voice) => {
for v in model.tts.voices().unwrap() {
if v.id() == voice {
model.tts.set_voice(&v).unwrap();
}
}
}
Speak => {
model.tts.speak(&model.text, false).unwrap();
}
@ -49,6 +63,7 @@ fn update(msg: Msg, model: &mut Model, _: &mut impl Orders<Msg>) {
}
fn view(model: &Model) -> Node<Msg> {
let should_show_voices = model.tts.voices().unwrap().iter().len() > 0;
form![
div![label![
"Text to speak",
@ -96,6 +111,36 @@ fn view(model: &Model) -> Node<Msg> {
input_ev(Ev::Input, Msg::VolumeChanged)
],
],],
if should_show_voices {
div![
label!["Voice"],
select![
model.tts.voices().unwrap().iter().map(|v| {
let selected = if let Some(voice) = model.tts.voice().unwrap() {
voice.id() == v.id()
} else {
false
};
option![
attrs! {
At::Value => v.id()
},
if selected {
attrs! {
At::Selected => selected
}
} else {
attrs! {}
},
v.name()
]
}),
input_ev(Ev::Change, Msg::VoiceChanged)
]
]
} else {
div!["Your browser does not seem to support selecting voices."]
},
button![
"Speak",
ev(Ev::Click, |e| {
@ -107,5 +152,6 @@ fn view(model: &Model) -> Node<Msg> {
}
fn main() {
console_log::init().expect("Error initializing logger");
App::start("app", init, update, view);
}

View File

@ -1,18 +1,22 @@
#[cfg(target_os = "android")]
use std::collections::HashSet;
use std::ffi::{CStr, CString};
use std::os::raw::c_void;
use std::sync::{Mutex, RwLock};
use std::thread;
use std::time::Duration;
use std::{
collections::HashSet,
ffi::{CStr, CString},
os::raw::c_void,
sync::{Mutex, RwLock},
thread,
time::{Duration, Instant},
};
use jni::objects::{GlobalRef, JObject, JString};
use jni::sys::{jfloat, jint, JNI_VERSION_1_6};
use jni::{JNIEnv, JavaVM};
use jni::{
objects::{GlobalRef, JObject, JString},
sys::{jfloat, jint, JNI_VERSION_1_6},
JNIEnv, JavaVM,
};
use lazy_static::lazy_static;
use log::{error, info};
use crate::{Backend, BackendId, Error, Features, UtteranceId, CALLBACKS};
use crate::{Backend, BackendId, Error, Features, UtteranceId, Voice, CALLBACKS};
lazy_static! {
static ref BRIDGE: Mutex<Option<GlobalRef>> = Mutex::new(None);
@ -198,12 +202,18 @@ impl Android {
}
let tts = env.new_global_ref(tts)?;
// This hack makes my brain bleed.
const MAX_WAIT_TIME: Duration = Duration::from_millis(500);
let start = Instant::now();
// Wait a max of 500ms for initialization, then return an error to avoid hanging.
loop {
{
let pending = PENDING_INITIALIZATIONS.read().unwrap();
if !(*pending).contains(&bid) {
break;
}
if start.elapsed() > MAX_WAIT_TIME {
return Err(Error::OperationFailed);
}
}
thread::sleep(Duration::from_millis(5));
}
@ -238,6 +248,8 @@ impl Backend for Android {
volume: false,
is_speaking: true,
utterance_callbacks: true,
voice: false,
get_voice: false,
}
}
@ -375,4 +387,16 @@ impl Backend for Android {
let rv = rv.z()?;
Ok(rv)
}
fn voice(&self) -> Result<Option<Voice>, Error> {
unimplemented!()
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
unimplemented!()
}
fn set_voice(&mut self, _voice: &Voice) -> Result<(), Error> {
unimplemented!()
}
}

View File

@ -1,5 +1,4 @@
#[cfg(target_os = "macos")]
#[link(name = "AppKit", kind = "framework")]
use cocoa_foundation::base::{id, nil};
use cocoa_foundation::foundation::NSString;
use log::{info, trace};
@ -7,18 +6,18 @@ use objc::declare::ClassDecl;
use objc::runtime::*;
use objc::*;
use crate::{Backend, BackendId, Error, Features, UtteranceId};
use crate::{Backend, BackendId, Error, Features, UtteranceId, Voice};
#[derive(Clone, Debug)]
pub(crate) struct AppKit(*mut Object, *mut Object);
impl AppKit {
pub(crate) fn new() -> Self {
pub(crate) fn new() -> Result<Self, Error> {
info!("Initializing AppKit backend");
unsafe {
let obj: *mut Object = msg_send![class!(NSSpeechSynthesizer), new];
let mut decl =
ClassDecl::new("MyNSSpeechSynthesizerDelegate", class!(NSObject)).unwrap();
let mut decl = ClassDecl::new("MyNSSpeechSynthesizerDelegate", class!(NSObject))
.ok_or(Error::OperationFailed)?;
decl.add_ivar::<id>("synth");
decl.add_ivar::<id>("strings");
@ -82,11 +81,17 @@ impl AppKit {
let delegate_class = decl.register();
let delegate_obj: *mut Object = msg_send![delegate_class, new];
delegate_obj.as_mut().unwrap().set_ivar("synth", obj);
delegate_obj
.as_mut()
.ok_or(Error::OperationFailed)?
.set_ivar("synth", obj);
let strings: id = msg_send![class!(NSMutableArray), new];
delegate_obj.as_mut().unwrap().set_ivar("strings", strings);
delegate_obj
.as_mut()
.ok_or(Error::OperationFailed)?
.set_ivar("strings", strings);
let _: Object = msg_send![obj, setDelegate: delegate_obj];
AppKit(obj, delegate_obj)
Ok(AppKit(obj, delegate_obj))
}
}
}
@ -200,6 +205,18 @@ impl Backend for AppKit {
let is_speaking: i8 = unsafe { msg_send![self.0, isSpeaking] };
Ok(is_speaking != NO as i8)
}
fn voice(&self) -> Result<Option<Voice>, Error> {
unimplemented!()
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
unimplemented!()
}
fn set_voice(&mut self, _voice: &Voice) -> Result<(), Error> {
unimplemented!()
}
}
impl Drop for AppKit {

View File

@ -1,15 +1,17 @@
#[cfg(any(target_os = "macos", target_os = "ios"))]
#[link(name = "AVFoundation", kind = "framework")]
use std::sync::Mutex;
use std::{str::FromStr, sync::Mutex};
use cocoa_foundation::base::{id, nil, NO};
use cocoa_foundation::foundation::NSString;
use core_foundation::array::CFArray;
use core_foundation::string::CFString;
use lazy_static::lazy_static;
use log::{info, trace};
use objc::runtime::{Object, Sel};
use objc::{class, declare::ClassDecl, msg_send, sel, sel_impl};
use unic_langid::LanguageIdentifier;
use crate::{Backend, BackendId, Error, Features, UtteranceId, CALLBACKS};
use crate::{Backend, BackendId, Error, Features, Gender, UtteranceId, Voice, CALLBACKS};
#[derive(Clone, Debug)]
pub(crate) struct AvFoundation {
@ -19,6 +21,7 @@ pub(crate) struct AvFoundation {
rate: f32,
volume: f32,
pitch: f32,
voice: Option<Voice>,
}
lazy_static! {
@ -26,9 +29,10 @@ lazy_static! {
}
impl AvFoundation {
pub(crate) fn new() -> Self {
pub(crate) fn new() -> Result<Self, Error> {
info!("Initializing AVFoundation backend");
let mut decl = ClassDecl::new("MyNSSpeechSynthesizerDelegate", class!(NSObject)).unwrap();
let mut decl = ClassDecl::new("MyNSSpeechSynthesizerDelegate", class!(NSObject))
.ok_or(Error::OperationFailed)?;
decl.add_ivar::<u64>("backend_id");
extern "C" fn speech_synthesizer_did_start_speech_utterance(
@ -142,10 +146,11 @@ impl AvFoundation {
rate: 0.5,
volume: 1.,
pitch: 1.,
voice: None,
}
};
*backend_id += 1;
rv
Ok(rv)
}
}
@ -161,6 +166,8 @@ impl Backend for AvFoundation {
pitch: true,
volume: true,
is_speaking: true,
voice: true,
get_voice: false,
utterance_callbacks: true,
}
}
@ -185,6 +192,12 @@ impl Backend for AvFoundation {
let _: () = msg_send![utterance, setVolume: self.volume];
trace!("Setting pitch to {}", self.pitch);
let _: () = msg_send![utterance, setPitchMultiplier: self.pitch];
if let Some(voice) = &self.voice {
let mut vid = NSString::alloc(nil);
vid = vid.init_str(&voice.id());
let v: id = msg_send![class!(AVSpeechSynthesisVoice), voiceWithIdentifier: vid];
let _: () = msg_send![utterance, setVoice: v];
}
trace!("Enqueuing");
let _: () = msg_send![self.synth, speakUtterance: utterance];
trace!("Done queuing");
@ -271,6 +284,42 @@ impl Backend for AvFoundation {
let is_speaking: i8 = unsafe { msg_send![self.synth, isSpeaking] };
Ok(is_speaking != NO as i8)
}
fn voice(&self) -> Result<Option<Voice>, Error> {
unimplemented!()
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
let voices: CFArray = unsafe { msg_send![class!(AVSpeechSynthesisVoice), speechVoices] };
let rv = voices
.iter()
.map(|v| {
let id: CFString = unsafe { msg_send![*v as *const Object, identifier] };
let name: CFString = unsafe { msg_send![*v as *const Object, name] };
let gender: i64 = unsafe { msg_send![*v as *const Object, gender] };
let gender = match gender {
1 => Some(Gender::Male),
2 => Some(Gender::Female),
_ => None,
};
let language: CFString = unsafe { msg_send![*v as *const Object, language] };
let language = language.to_string();
let language = LanguageIdentifier::from_str(&language).unwrap();
Voice {
id: id.to_string(),
name: name.to_string(),
gender,
language,
}
})
.collect();
Ok(rv)
}
fn set_voice(&mut self, voice: &Voice) -> Result<(), Error> {
self.voice = Some(voice.clone());
Ok(())
}
}
impl Drop for AvFoundation {

View File

@ -1,12 +1,12 @@
#[cfg(target_os = "linux")]
use std::collections::HashMap;
use std::sync::Mutex;
use std::{collections::HashMap, str::FromStr, sync::Mutex};
use lazy_static::*;
use log::{info, trace};
use speech_dispatcher::*;
use unic_langid::LanguageIdentifier;
use crate::{Backend, BackendId, Error, Features, UtteranceId, CALLBACKS};
use crate::{Backend, BackendId, Error, Features, UtteranceId, Voice, CALLBACKS};
#[derive(Clone, Debug)]
pub(crate) struct SpeechDispatcher(Connection);
@ -19,9 +19,9 @@ lazy_static! {
}
impl SpeechDispatcher {
pub(crate) fn new() -> Self {
pub(crate) fn new() -> std::result::Result<Self, Error> {
info!("Initializing SpeechDispatcher backend");
let connection = speech_dispatcher::Connection::open("tts", "tts", "tts", Mode::Threaded);
let connection = speech_dispatcher::Connection::open("tts", "tts", "tts", Mode::Threaded)?;
let sd = SpeechDispatcher(connection);
let mut speaking = SPEAKING.lock().unwrap();
speaking.insert(sd.0.client_id(), false);
@ -66,7 +66,7 @@ impl SpeechDispatcher {
let mut speaking = SPEAKING.lock().unwrap();
speaking.insert(client_id, true);
})));
sd
Ok(sd)
}
}
@ -82,6 +82,8 @@ impl Backend for SpeechDispatcher {
pitch: true,
volume: true,
is_speaking: true,
voice: true,
get_voice: false,
utterance_callbacks: true,
}
}
@ -93,11 +95,11 @@ impl Backend for SpeechDispatcher {
}
let single_char = text.to_string().capacity() == 1;
if single_char {
self.0.set_punctuation(Punctuation::All);
self.0.set_punctuation(Punctuation::All)?;
}
let id = self.0.say(Priority::Important, text);
if single_char {
self.0.set_punctuation(Punctuation::None);
self.0.set_punctuation(Punctuation::None)?;
}
if let Some(id) = id {
Ok(Some(UtteranceId::SpeechDispatcher(id)))
@ -108,7 +110,7 @@ impl Backend for SpeechDispatcher {
fn stop(&mut self) -> Result<(), Error> {
trace!("stop()");
self.0.cancel();
self.0.cancel()?;
Ok(())
}
@ -129,7 +131,7 @@ impl Backend for SpeechDispatcher {
}
fn set_rate(&mut self, rate: f32) -> Result<(), Error> {
self.0.set_voice_rate(rate as i32);
self.0.set_voice_rate(rate as i32)?;
Ok(())
}
@ -150,7 +152,7 @@ impl Backend for SpeechDispatcher {
}
fn set_pitch(&mut self, pitch: f32) -> Result<(), Error> {
self.0.set_voice_pitch(pitch as i32);
self.0.set_voice_pitch(pitch as i32)?;
Ok(())
}
@ -171,7 +173,7 @@ impl Backend for SpeechDispatcher {
}
fn set_volume(&mut self, volume: f32) -> Result<(), Error> {
self.0.set_volume(volume as i32);
self.0.set_volume(volume as i32)?;
Ok(())
}
@ -180,6 +182,35 @@ impl Backend for SpeechDispatcher {
let is_speaking = speaking.get(&self.0.client_id()).unwrap();
Ok(*is_speaking)
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
let rv = self
.0
.list_synthesis_voices()?
.iter()
.map(|v| Voice {
id: v.name.clone(),
name: v.name.clone(),
gender: None,
language: LanguageIdentifier::from_str(&v.language).unwrap(),
})
.collect::<Vec<Voice>>();
Ok(rv)
}
fn voice(&self) -> Result<Option<Voice>, Error> {
unimplemented!()
}
fn set_voice(&mut self, voice: &Voice) -> Result<(), Error> {
for v in self.0.list_synthesis_voices()? {
if v.name == voice.name {
self.0.set_synthesis_voice(&v)?;
return Ok(());
}
}
Err(Error::OperationFailed)
}
}
impl Drop for SpeechDispatcher {

View File

@ -4,7 +4,7 @@ use std::sync::Arc;
use log::{info, trace};
use tolk::Tolk as TolkPtr;
use crate::{Backend, BackendId, Error, Features, UtteranceId};
use crate::{Backend, BackendId, Error, Features, UtteranceId, Voice};
#[derive(Clone, Debug)]
pub(crate) struct Tolk(Arc<TolkPtr>);
@ -108,4 +108,16 @@ impl Backend for Tolk {
fn is_speaking(&self) -> Result<bool, Error> {
unimplemented!()
}
fn voice(&self) -> Result<Option<Voice>, Error> {
unimplemented!()
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
unimplemented!()
}
fn set_voice(&mut self, _voice: &Voice) -> Result<(), Error> {
unimplemented!()
}
}

View File

@ -1,16 +1,17 @@
#[cfg(target_arch = "wasm32")]
use std::sync::Mutex;
use std::{str::FromStr, sync::Mutex};
use lazy_static::lazy_static;
use log::{info, trace};
use unic_langid::LanguageIdentifier;
use wasm_bindgen::prelude::*;
use wasm_bindgen::JsCast;
use web_sys::{
SpeechSynthesisErrorCode, SpeechSynthesisErrorEvent, SpeechSynthesisEvent,
SpeechSynthesisUtterance,
SpeechSynthesisUtterance, SpeechSynthesisVoice,
};
use crate::{Backend, BackendId, Error, Features, UtteranceId, CALLBACKS};
use crate::{Backend, BackendId, Error, Features, UtteranceId, Voice, CALLBACKS};
#[derive(Clone, Debug)]
pub struct Web {
@ -18,6 +19,7 @@ pub struct Web {
rate: f32,
pitch: f32,
volume: f32,
voice: Option<SpeechSynthesisVoice>,
}
lazy_static! {
@ -35,6 +37,7 @@ impl Web {
rate: 1.,
pitch: 1.,
volume: 1.,
voice: None,
};
*backend_id += 1;
Ok(rv)
@ -53,6 +56,8 @@ impl Backend for Web {
pitch: true,
volume: true,
is_speaking: true,
voice: true,
get_voice: true,
utterance_callbacks: true,
}
}
@ -63,6 +68,9 @@ impl Backend for Web {
utterance.set_rate(self.rate);
utterance.set_pitch(self.pitch);
utterance.set_volume(self.volume);
if self.voice.is_some() {
utterance.set_voice(self.voice.as_ref());
}
let id = self.id().unwrap();
let mut uid = NEXT_UTTERANCE_ID.lock().unwrap();
let utterance_id = UtteranceId::Web(*uid);
@ -196,6 +204,55 @@ impl Backend for Web {
Err(Error::NoneError)
}
}
fn voice(&self) -> Result<Option<Voice>, Error> {
if let Some(voice) = &self.voice {
Ok(Some(voice.clone().into()))
} else {
if let Some(window) = web_sys::window() {
let speech_synthesis = window.speech_synthesis().unwrap();
for voice in speech_synthesis.get_voices().iter() {
let voice: SpeechSynthesisVoice = voice.into();
if voice.default() {
return Ok(Some(voice.into()));
}
}
} else {
return Err(Error::NoneError);
}
Ok(None)
}
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
if let Some(window) = web_sys::window() {
let speech_synthesis = window.speech_synthesis().unwrap();
let mut rv: Vec<Voice> = vec![];
for v in speech_synthesis.get_voices().iter() {
let v: SpeechSynthesisVoice = v.into();
rv.push(v.into());
}
Ok(rv)
} else {
Err(Error::NoneError)
}
}
fn set_voice(&mut self, voice: &Voice) -> Result<(), Error> {
if let Some(window) = web_sys::window() {
let speech_synthesis = window.speech_synthesis().unwrap();
for v in speech_synthesis.get_voices().iter() {
let v: SpeechSynthesisVoice = v.into();
if v.voice_uri() == voice.id {
self.voice = Some(v);
return Ok(());
}
}
return Err(Error::OperationFailed);
} else {
Err(Error::NoneError)
}
}
}
impl Drop for Web {
@ -204,3 +261,15 @@ impl Drop for Web {
mappings.retain(|v| v.0 != self.id);
}
}
impl From<SpeechSynthesisVoice> for Voice {
fn from(other: SpeechSynthesisVoice) -> Self {
let language = LanguageIdentifier::from_str(&other.lang()).unwrap();
Voice {
id: other.voice_uri(),
name: other.name(),
gender: None,
language,
}
}
}

View File

@ -1,30 +1,31 @@
#[cfg(windows)]
use std::collections::{HashMap, VecDeque};
use std::sync::Mutex;
use std::{
collections::{HashMap, VecDeque},
str::FromStr,
sync::Mutex,
};
use lazy_static::lazy_static;
use log::{info, trace};
mod bindings;
use bindings::Windows::{
use unic_langid::LanguageIdentifier;
use windows::{
Foundation::TypedEventHandler,
Media::{
Core::MediaSource,
Playback::{MediaPlayer, MediaPlayerAudioCategory},
SpeechSynthesis::SpeechSynthesizer,
SpeechSynthesis::{SpeechSynthesizer, VoiceGender, VoiceInformation},
},
};
use crate::{Backend, BackendId, Error, Features, UtteranceId, CALLBACKS};
use crate::{Backend, BackendId, Error, Features, Gender, UtteranceId, Voice, CALLBACKS};
impl From<windows::Error> for Error {
fn from(e: windows::Error) -> Self {
impl From<windows::core::Error> for Error {
fn from(e: windows::core::Error) -> Self {
Error::WinRt(e)
}
}
#[derive(Clone, Debug)]
#[derive(Clone)]
pub struct WinRt {
id: BackendId,
synth: SpeechSynthesizer,
@ -32,6 +33,7 @@ pub struct WinRt {
rate: f32,
pitch: f32,
volume: f32,
voice: VoiceInformation,
}
struct Utterance {
@ -40,6 +42,7 @@ struct Utterance {
rate: f32,
pitch: f32,
volume: f32,
voice: VoiceInformation,
}
lazy_static! {
@ -81,7 +84,7 @@ impl WinRt {
backend_to_speech_synthesizer.insert(bid, synth.clone());
drop(backend_to_speech_synthesizer);
let bid_clone = bid;
player.MediaEnded(TypedEventHandler::new(
player.MediaEnded(&TypedEventHandler::new(
move |sender: &Option<MediaPlayer>, _args| {
if let Some(sender) = sender {
let backend_to_media_player = BACKEND_TO_MEDIA_PLAYER.lock().unwrap();
@ -105,13 +108,14 @@ impl WinRt {
tts.Options()?.SetSpeakingRate(utterance.rate.into())?;
tts.Options()?.SetAudioPitch(utterance.pitch.into())?;
tts.Options()?.SetAudioVolume(utterance.volume.into())?;
let stream = tts
.SynthesizeTextToStreamAsync(utterance.text.as_str())?
.get()?;
tts.SetVoice(&utterance.voice)?;
let text = &utterance.text;
let stream =
tts.SynthesizeTextToStreamAsync(&text.into())?.get()?;
let content_type = stream.ContentType()?;
let source =
MediaSource::CreateFromStream(stream, content_type)?;
sender.SetSource(source)?;
MediaSource::CreateFromStream(&stream, &content_type)?;
sender.SetSource(&source)?;
sender.Play()?;
if let Some(callback) = callbacks.utterance_begin.as_mut() {
callback(utterance.id);
@ -132,6 +136,7 @@ impl WinRt {
rate: 1.,
pitch: 1.,
volume: 1.,
voice: SpeechSynthesizer::DefaultVoice()?,
})
}
}
@ -148,6 +153,8 @@ impl Backend for WinRt {
pitch: true,
volume: true,
is_speaking: true,
voice: true,
get_voice: true,
utterance_callbacks: true,
}
}
@ -177,6 +184,7 @@ impl Backend for WinRt {
rate: self.rate,
pitch: self.pitch,
volume: self.volume,
voice: self.voice.clone(),
};
utterances.push_back(utterance);
}
@ -185,10 +193,14 @@ impl Backend for WinRt {
self.synth.Options()?.SetSpeakingRate(self.rate.into())?;
self.synth.Options()?.SetAudioPitch(self.pitch.into())?;
self.synth.Options()?.SetAudioVolume(self.volume.into())?;
let stream = self.synth.SynthesizeTextToStreamAsync(text)?.get()?;
self.synth.SetVoice(&self.voice)?;
let stream = self
.synth
.SynthesizeTextToStreamAsync(&text.into())?
.get()?;
let content_type = stream.ContentType()?;
let source = MediaSource::CreateFromStream(stream, content_type)?;
self.player.SetSource(source)?;
let source = MediaSource::CreateFromStream(&stream, &content_type)?;
self.player.SetSource(&source)?;
self.player.Play()?;
let mut callbacks = CALLBACKS.lock().unwrap();
let callbacks = callbacks.get_mut(&self.id).unwrap();
@ -292,6 +304,31 @@ impl Backend for WinRt {
let utterances = utterances.get(&self.id).unwrap();
Ok(!utterances.is_empty())
}
fn voice(&self) -> Result<Option<Voice>, Error> {
let voice = self.synth.Voice()?;
let voice = voice.try_into()?;
Ok(Some(voice))
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
let mut rv: Vec<Voice> = vec![];
for voice in SpeechSynthesizer::AllVoices()? {
rv.push(voice.try_into()?);
}
Ok(rv)
}
fn set_voice(&mut self, voice: &Voice) -> Result<(), Error> {
for v in SpeechSynthesizer::AllVoices()? {
let vid: String = v.Id()?.try_into()?;
if vid == voice.id {
self.voice = v;
return Ok(());
}
}
Err(Error::OperationFailed)
}
}
impl Drop for WinRt {
@ -305,3 +342,24 @@ impl Drop for WinRt {
utterances.remove(&id);
}
}
impl TryInto<Voice> for VoiceInformation {
type Error = Error;
fn try_into(self) -> Result<Voice, Self::Error> {
let gender = self.Gender()?;
let gender = if gender == VoiceGender::Male {
Gender::Male
} else {
Gender::Female
};
let language: String = self.Language()?.try_into()?;
let language = LanguageIdentifier::from_str(&language).unwrap();
Ok(Voice {
id: self.Id()?.try_into()?,
name: self.DisplayName()?.try_into()?,
gender: Some(gender),
language,
})
}
}

View File

@ -1 +0,0 @@
::windows::include_bindings!();

View File

@ -1,22 +1,23 @@
/*!
* a Text-To-Speech (TTS) library providing high-level interfaces to a variety of backends.
* Currently supported backends are:
* * Windows
* * Screen readers/SAPI via Tolk (requires `tolk` Cargo feature)
* * WinRT
* * Linux via [Speech Dispatcher](https://freebsoft.org/speechd)
* * MacOS/iOS
* * AppKit on MacOS 10.13 and below
* * AVFoundation on MacOS 10.14 and above, and iOS
* * Android
* * WebAssembly
*/
//! * a Text-To-Speech (TTS) library providing high-level interfaces to a variety of backends.
//! * Currently supported backends are:
//! * * Windows
//! * * Screen readers/SAPI via Tolk (requires `tolk` Cargo feature)
//! * * WinRT
//! * * Linux via [Speech Dispatcher](https://freebsoft.org/speechd)
//! * * MacOS/iOS
//! * * AppKit on MacOS 10.13 and below
//! * * AVFoundation on MacOS 10.14 and above, and iOS
//! * * Android
//! * * WebAssembly
use std::boxed::Box;
use std::collections::HashMap;
#[cfg(target_os = "macos")]
use std::ffi::CStr;
use std::sync::Mutex;
use std::fmt;
#[cfg(windows)]
use std::string::FromUtf16Error;
use std::sync::{Arc, Mutex};
use std::{boxed::Box, sync::RwLock};
#[cfg(any(target_os = "macos", target_os = "ios"))]
use cocoa_foundation::base::id;
@ -26,59 +27,134 @@ use lazy_static::lazy_static;
use libc::c_char;
#[cfg(target_os = "macos")]
use objc::{class, msg_send, sel, sel_impl};
#[cfg(target_os = "linux")]
use speech_dispatcher::Error as SpeechDispatcherError;
use thiserror::Error;
#[cfg(all(windows, feature = "tolk"))]
use tolk::Tolk;
pub use unic_langid::LanguageIdentifier;
mod backends;
#[cfg(feature = "ffi")]
pub mod ffi;
#[repr(C)]
#[derive(Clone, Copy, Debug)]
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum Backends {
#[cfg(target_os = "linux")]
SpeechDispatcher,
#[cfg(target_arch = "wasm32")]
Web,
#[cfg(all(windows, feature = "tolk"))]
Tolk,
#[cfg(windows)]
WinRt,
#[cfg(target_os = "android")]
Android,
#[cfg(target_os = "macos")]
AppKit,
#[cfg(any(target_os = "macos", target_os = "ios"))]
AvFoundation,
#[cfg(target_os = "android")]
Android,
#[cfg(target_os = "linux")]
SpeechDispatcher,
#[cfg(all(windows, feature = "tolk"))]
Tolk,
#[cfg(target_arch = "wasm32")]
Web,
#[cfg(windows)]
WinRt,
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
impl fmt::Display for Backends {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
#[cfg(target_os = "android")]
Backends::Android => writeln!(f, "Android"),
#[cfg(target_os = "macos")]
Backends::AppKit => writeln!(f, "AppKit"),
#[cfg(any(target_os = "macos", target_os = "ios"))]
Backends::AvFoundation => writeln!(f, "AVFoundation"),
#[cfg(target_os = "linux")]
Backends::SpeechDispatcher => writeln!(f, "Speech Dispatcher"),
#[cfg(all(windows, feature = "tolk"))]
Backends::Tolk => writeln!(f, "Tolk"),
#[cfg(target_arch = "wasm32")]
Backends::Web => writeln!(f, "Web"),
#[cfg(windows)]
Backends::WinRt => writeln!(f, "Windows Runtime"),
}
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum BackendId {
#[cfg(target_os = "linux")]
SpeechDispatcher(u64),
#[cfg(target_arch = "wasm32")]
Web(u64),
#[cfg(windows)]
WinRt(u64),
#[cfg(target_os = "android")]
Android(u64),
#[cfg(any(target_os = "macos", target_os = "ios"))]
AvFoundation(u64),
#[cfg(target_os = "android")]
Android(u64),
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum UtteranceId {
#[cfg(target_os = "linux")]
SpeechDispatcher(u64),
#[cfg(target_arch = "wasm32")]
Web(u64),
#[cfg(windows)]
WinRt(u64),
#[cfg(any(target_os = "macos", target_os = "ios"))]
AvFoundation(id),
}
impl fmt::Display for BackendId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
#[cfg(target_os = "android")]
BackendId::Android(id) => writeln!(f, "{}", id),
#[cfg(any(target_os = "macos", target_os = "ios"))]
BackendId::AvFoundation(id) => writeln!(f, "{}", id),
#[cfg(target_os = "linux")]
BackendId::SpeechDispatcher(id) => writeln!(f, "{}", id),
#[cfg(target_arch = "wasm32")]
BackendId::Web(id) => writeln!(f, "Web({})", id),
#[cfg(windows)]
BackendId::WinRt(id) => writeln!(f, "{}", id),
}
}
}
// # Note
//
// Most trait implementations are blocked by cocoa_foundation::base::id;
// which is a type alias for objc::runtime::Object, which only implements Debug.
#[derive(Debug)]
#[cfg_attr(
not(any(target_os = "macos", target_os = "ios")),
derive(Clone, Copy, Eq, Hash, PartialEq, PartialOrd, Ord)
)]
#[cfg_attr(
all(feature = "serde", not(any(target_os = "macos", target_os = "ios"))),
derive(serde::Serialize, serde::Deserialize)
)]
pub enum UtteranceId {
#[cfg(target_os = "android")]
Android(u64),
#[cfg(any(target_os = "macos", target_os = "ios"))]
AvFoundation(id),
#[cfg(target_os = "linux")]
SpeechDispatcher(u64),
#[cfg(target_arch = "wasm32")]
Web(u64),
#[cfg(windows)]
WinRt(u64),
}
// # Note
//
// Display is not implemented by cocoa_foundation::base::id;
// which is a type alias for objc::runtime::Object, which only implements Debug.
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
impl fmt::Display for UtteranceId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
#[cfg(target_os = "android")]
UtteranceId::Android(id) => writeln!(f, "{}", id),
#[cfg(target_os = "linux")]
UtteranceId::SpeechDispatcher(id) => writeln!(f, "{}", id),
#[cfg(target_arch = "wasm32")]
UtteranceId::Web(id) => writeln!(f, "Web({})", id),
#[cfg(windows)]
UtteranceId::WinRt(id) => writeln!(f, "{}", id),
}
}
}
unsafe impl Send for UtteranceId {}
@ -86,25 +162,28 @@ unsafe impl Send for UtteranceId {}
unsafe impl Sync for UtteranceId {}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Features {
pub stop: bool,
pub rate: bool,
pub pitch: bool,
pub volume: bool,
pub is_speaking: bool,
pub pitch: bool,
pub rate: bool,
pub stop: bool,
pub utterance_callbacks: bool,
pub voice: bool,
pub get_voice: bool,
pub volume: bool,
}
impl Default for Features {
fn default() -> Self {
Self {
stop: false,
rate: false,
pitch: false,
volume: false,
is_speaking: false,
utterance_callbacks: false,
}
impl fmt::Display for Features {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
writeln!(f, "{:#?}", self)
}
}
impl Features {
pub fn new() -> Self {
Self::default()
}
}
@ -117,11 +196,17 @@ pub enum Error {
#[error("Operation failed")]
OperationFailed,
#[cfg(target_arch = "wasm32")]
#[error("JavaScript error: [0])]")]
#[error("JavaScript error: [0]")]
JavaScriptError(wasm_bindgen::JsValue),
#[cfg(target_os = "linux")]
#[error("Speech Dispatcher error: {0}")]
SpeechDispatcher(#[from] SpeechDispatcherError),
#[cfg(windows)]
#[error("WinRT error")]
WinRt(windows::Error),
WinRt(windows::core::Error),
#[cfg(windows)]
#[error("UTF string conversion failed")]
UtfStringConversionFailed(#[from] FromUtf16Error),
#[error("Unsupported feature")]
UnsupportedFeature,
#[error("Out of range")]
@ -132,7 +217,7 @@ pub enum Error {
}
#[clonable]
trait Backend: Clone {
pub trait Backend: Clone {
fn id(&self) -> Option<BackendId>;
fn supported_features(&self) -> Features;
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error>;
@ -153,6 +238,9 @@ trait Backend: Clone {
fn get_volume(&self) -> Result<f32, Error>;
fn set_volume(&mut self, volume: f32) -> Result<(), Error>;
fn is_speaking(&self) -> Result<bool, Error>;
fn voices(&self) -> Result<Vec<Voice>, Error>;
fn voice(&self) -> Result<Option<Voice>, Error>;
fn set_voice(&mut self, voice: &Voice) -> Result<(), Error>;
}
#[derive(Default)]
@ -174,30 +262,31 @@ lazy_static! {
}
#[derive(Clone)]
pub struct Tts(Box<dyn Backend>);
pub struct Tts(Arc<RwLock<Box<dyn Backend>>>);
unsafe impl Send for Tts {}
unsafe impl Sync for Tts {}
impl Tts {
/**
* Create a new `TTS` instance with the specified backend.
*/
/// Create a new `TTS` instance with the specified backend.
pub fn new(backend: Backends) -> Result<Tts, Error> {
let backend = match backend {
#[cfg(target_os = "linux")]
Backends::SpeechDispatcher => Ok(Tts(Box::new(backends::SpeechDispatcher::new()))),
Backends::SpeechDispatcher => {
let tts = backends::SpeechDispatcher::new()?;
Ok(Tts(Arc::new(RwLock::new(Box::new(tts)))))
}
#[cfg(target_arch = "wasm32")]
Backends::Web => {
let tts = backends::Web::new()?;
Ok(Tts(Box::new(tts)))
Ok(Tts(Arc::new(RwLock::new(Box::new(tts)))))
}
#[cfg(all(windows, feature = "tolk"))]
Backends::Tolk => {
let tts = backends::Tolk::new();
if let Some(tts) = tts {
Ok(Tts(Box::new(tts)))
Ok(Tts(Arc::new(RwLock::new(Box::new(tts)))))
} else {
Err(Error::NoneError)
}
@ -205,20 +294,24 @@ impl Tts {
#[cfg(windows)]
Backends::WinRt => {
let tts = backends::WinRt::new()?;
Ok(Tts(Box::new(tts)))
Ok(Tts(Arc::new(RwLock::new(Box::new(tts)))))
}
#[cfg(target_os = "macos")]
Backends::AppKit => Ok(Tts(Box::new(backends::AppKit::new()))),
Backends::AppKit => Ok(Tts(Arc::new(RwLock::new(Box::new(
backends::AppKit::new()?
))))),
#[cfg(any(target_os = "macos", target_os = "ios"))]
Backends::AvFoundation => Ok(Tts(Box::new(backends::AvFoundation::new()))),
Backends::AvFoundation => Ok(Tts(Arc::new(RwLock::new(Box::new(
backends::AvFoundation::new()?,
))))),
#[cfg(target_os = "android")]
Backends::Android => {
let tts = backends::Android::new()?;
Ok(Tts(Box::new(tts)))
Ok(Tts(Arc::new(RwLock::new(Box::new(tts)))))
}
};
if let Ok(backend) = backend {
if let Some(id) = backend.0.id() {
if let Some(id) = backend.0.read().unwrap().id() {
let mut callbacks = CALLBACKS.lock().unwrap();
callbacks.insert(id, Callbacks::default());
}
@ -267,82 +360,70 @@ impl Tts {
tts
}
/**
* Returns the features supported by this TTS engine
*/
/// Returns the features supported by this TTS engine
pub fn supported_features(&self) -> Features {
self.0.supported_features()
self.0.read().unwrap().supported_features()
}
/**
* Speaks the specified text, optionally interrupting current speech.
*/
/// Speaks the specified text, optionally interrupting current speech.
pub fn speak<S: Into<String>>(
&mut self,
text: S,
interrupt: bool,
) -> Result<Option<UtteranceId>, Error> {
self.0.speak(text.into().as_str(), interrupt)
self.0
.write()
.unwrap()
.speak(text.into().as_str(), interrupt)
}
/**
* Stops current speech.
*/
/// Stops current speech.
pub fn stop(&mut self) -> Result<&Self, Error> {
let Features { stop, .. } = self.supported_features();
if stop {
self.0.stop()?;
self.0.write().unwrap().stop()?;
Ok(self)
} else {
Err(Error::UnsupportedFeature)
}
}
/**
* Returns the minimum rate for this speech synthesizer.
*/
/// Returns the minimum rate for this speech synthesizer.
pub fn min_rate(&self) -> f32 {
self.0.min_rate()
self.0.read().unwrap().min_rate()
}
/**
* Returns the maximum rate for this speech synthesizer.
*/
/// Returns the maximum rate for this speech synthesizer.
pub fn max_rate(&self) -> f32 {
self.0.max_rate()
self.0.read().unwrap().max_rate()
}
/**
* Returns the normal rate for this speech synthesizer.
*/
/// Returns the normal rate for this speech synthesizer.
pub fn normal_rate(&self) -> f32 {
self.0.normal_rate()
self.0.read().unwrap().normal_rate()
}
/**
* Gets the current speech rate.
*/
/// Gets the current speech rate.
pub fn get_rate(&self) -> Result<f32, Error> {
let Features { rate, .. } = self.supported_features();
if rate {
self.0.get_rate()
self.0.read().unwrap().get_rate()
} else {
Err(Error::UnsupportedFeature)
}
}
/**
* Sets the desired speech rate.
*/
/// Sets the desired speech rate.
pub fn set_rate(&mut self, rate: f32) -> Result<&Self, Error> {
let Features {
rate: rate_feature, ..
} = self.supported_features();
if rate_feature {
if rate < self.0.min_rate() || rate > self.0.max_rate() {
let mut backend = self.0.write().unwrap();
if rate < backend.min_rate() || rate > backend.max_rate() {
Err(Error::OutOfRange)
} else {
self.0.set_rate(rate)?;
backend.set_rate(rate)?;
Ok(self)
}
} else {
@ -350,52 +431,43 @@ impl Tts {
}
}
/**
* Returns the minimum pitch for this speech synthesizer.
*/
/// Returns the minimum pitch for this speech synthesizer.
pub fn min_pitch(&self) -> f32 {
self.0.min_pitch()
self.0.read().unwrap().min_pitch()
}
/**
* Returns the maximum pitch for this speech synthesizer.
*/
/// Returns the maximum pitch for this speech synthesizer.
pub fn max_pitch(&self) -> f32 {
self.0.max_pitch()
self.0.read().unwrap().max_pitch()
}
/**
* Returns the normal pitch for this speech synthesizer.
*/
/// Returns the normal pitch for this speech synthesizer.
pub fn normal_pitch(&self) -> f32 {
self.0.normal_pitch()
self.0.read().unwrap().normal_pitch()
}
/**
* Gets the current speech pitch.
*/
/// Gets the current speech pitch.
pub fn get_pitch(&self) -> Result<f32, Error> {
let Features { pitch, .. } = self.supported_features();
if pitch {
self.0.get_pitch()
self.0.read().unwrap().get_pitch()
} else {
Err(Error::UnsupportedFeature)
}
}
/**
* Sets the desired speech pitch.
*/
/// Sets the desired speech pitch.
pub fn set_pitch(&mut self, pitch: f32) -> Result<&Self, Error> {
let Features {
pitch: pitch_feature,
..
} = self.supported_features();
if pitch_feature {
if pitch < self.0.min_pitch() || pitch > self.0.max_pitch() {
let mut backend = self.0.write().unwrap();
if pitch < backend.min_pitch() || pitch > backend.max_pitch() {
Err(Error::OutOfRange)
} else {
self.0.set_pitch(pitch)?;
backend.set_pitch(pitch)?;
Ok(self)
}
} else {
@ -403,52 +475,43 @@ impl Tts {
}
}
/**
* Returns the minimum volume for this speech synthesizer.
*/
/// Returns the minimum volume for this speech synthesizer.
pub fn min_volume(&self) -> f32 {
self.0.min_volume()
self.0.read().unwrap().min_volume()
}
/**
* Returns the maximum volume for this speech synthesizer.
*/
/// Returns the maximum volume for this speech synthesizer.
pub fn max_volume(&self) -> f32 {
self.0.max_volume()
self.0.read().unwrap().max_volume()
}
/**
* Returns the normal volume for this speech synthesizer.
*/
/// Returns the normal volume for this speech synthesizer.
pub fn normal_volume(&self) -> f32 {
self.0.normal_volume()
self.0.read().unwrap().normal_volume()
}
/**
* Gets the current speech volume.
*/
/// Gets the current speech volume.
pub fn get_volume(&self) -> Result<f32, Error> {
let Features { volume, .. } = self.supported_features();
if volume {
self.0.get_volume()
self.0.read().unwrap().get_volume()
} else {
Err(Error::UnsupportedFeature)
}
}
/**
* Sets the desired speech volume.
*/
/// Sets the desired speech volume.
pub fn set_volume(&mut self, volume: f32) -> Result<&Self, Error> {
let Features {
volume: volume_feature,
..
} = self.supported_features();
if volume_feature {
if volume < self.0.min_volume() || volume > self.0.max_volume() {
let mut backend = self.0.write().unwrap();
if volume < backend.min_volume() || volume > backend.max_volume() {
Err(Error::OutOfRange)
} else {
self.0.set_volume(volume)?;
backend.set_volume(volume)?;
Ok(self)
}
} else {
@ -456,21 +519,50 @@ impl Tts {
}
}
/**
* Returns whether this speech synthesizer is speaking.
*/
/// Returns whether this speech synthesizer is speaking.
pub fn is_speaking(&self) -> Result<bool, Error> {
let Features { is_speaking, .. } = self.supported_features();
if is_speaking {
self.0.is_speaking()
self.0.read().unwrap().is_speaking()
} else {
Err(Error::UnsupportedFeature)
}
}
/**
* Called when this speech synthesizer begins speaking an utterance.
*/
/// Returns list of available voices.
pub fn voices(&self) -> Result<Vec<Voice>, Error> {
let Features { voice, .. } = self.supported_features();
if voice {
self.0.read().unwrap().voices()
} else {
Err(Error::UnsupportedFeature)
}
}
/// Return the current speaking voice.
pub fn voice(&self) -> Result<Option<Voice>, Error> {
let Features { get_voice, .. } = self.supported_features();
if get_voice {
self.0.read().unwrap().voice()
} else {
Err(Error::UnsupportedFeature)
}
}
/// Set speaking voice.
pub fn set_voice(&mut self, voice: &Voice) -> Result<(), Error> {
let Features {
voice: voice_feature,
..
} = self.supported_features();
if voice_feature {
self.0.write().unwrap().set_voice(voice)
} else {
Err(Error::UnsupportedFeature)
}
}
/// Called when this speech synthesizer begins speaking an utterance.
pub fn on_utterance_begin(
&self,
callback: Option<Box<dyn FnMut(UtteranceId)>>,
@ -481,7 +573,7 @@ impl Tts {
} = self.supported_features();
if utterance_callbacks {
let mut callbacks = CALLBACKS.lock().unwrap();
let id = self.0.id().unwrap();
let id = self.0.read().unwrap().id().unwrap();
let mut callbacks = callbacks.get_mut(&id).unwrap();
callbacks.utterance_begin = callback;
Ok(())
@ -490,9 +582,7 @@ impl Tts {
}
}
/**
* Called when this speech synthesizer finishes speaking an utterance.
*/
/// Called when this speech synthesizer finishes speaking an utterance.
pub fn on_utterance_end(
&self,
callback: Option<Box<dyn FnMut(UtteranceId)>>,
@ -503,7 +593,7 @@ impl Tts {
} = self.supported_features();
if utterance_callbacks {
let mut callbacks = CALLBACKS.lock().unwrap();
let id = self.0.id().unwrap();
let id = self.0.read().unwrap().id().unwrap();
let mut callbacks = callbacks.get_mut(&id).unwrap();
callbacks.utterance_end = callback;
Ok(())
@ -512,9 +602,7 @@ impl Tts {
}
}
/**
* Called when this speech synthesizer is stopped and still has utterances in its queue.
*/
/// Called when this speech synthesizer is stopped and still has utterances in its queue.
pub fn on_utterance_stop(
&self,
callback: Option<Box<dyn FnMut(UtteranceId)>>,
@ -525,7 +613,7 @@ impl Tts {
} = self.supported_features();
if utterance_callbacks {
let mut callbacks = CALLBACKS.lock().unwrap();
let id = self.0.id().unwrap();
let id = self.0.read().unwrap().id().unwrap();
let mut callbacks = callbacks.get_mut(&id).unwrap();
callbacks.utterance_stop = callback;
Ok(())
@ -555,9 +643,43 @@ impl Tts {
impl Drop for Tts {
fn drop(&mut self) {
if let Some(id) = self.0.id() {
let mut callbacks = CALLBACKS.lock().unwrap();
callbacks.remove(&id);
if Arc::strong_count(&self.0) <= 1 {
if let Some(id) = self.0.read().unwrap().id() {
let mut callbacks = CALLBACKS.lock().unwrap();
callbacks.remove(&id);
}
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum Gender {
Male,
Female,
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Voice {
pub(crate) id: String,
pub(crate) name: String,
pub(crate) gender: Option<Gender>,
pub(crate) language: LanguageIdentifier,
}
impl Voice {
pub fn id(&self) -> String {
self.id.clone()
}
pub fn name(&self) -> String {
self.name.clone()
}
pub fn gender(&self) -> Option<Gender> {
self.gender
}
pub fn language(&self) -> LanguageIdentifier {
self.language.clone()
}
}