Merge branch 'master' into feature/voices

This commit is contained in:
francois-caddet 2022-03-20 13:02:37 +01:00
commit 88f4598ec6
54 changed files with 2181 additions and 445 deletions

View File

@ -1,81 +1,103 @@
name: Release
on:
push:
tags:
- "v*"
jobs:
build_linux:
name: Build Linux
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- run: |
sudo apt-get update
sudo apt-get install -y libspeechd-dev
rustup update
cargo build --release
rustup target add wasm32-unknown-unknown
cargo build --release --target wasm32-unknown-unknown
build_windows:
name: Build Windows
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- run: |
choco install -y llvm
rustup update
cargo build --release
build_macos:
name: Build MacOS
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- run: |
rustup update
cargo build --release
build_ios:
name: Build iOS
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- run: |
rustup update
rustup target add aarch64-apple-ios x86_64-apple-ios
cargo install cargo-lipo
cargo lipo --release
publish_winrt_bindings:
name: Publish winrt_bindings
runs-on: windows-latest
needs: [build_windows]
env:
CARGO_TOKEN: ${{ secrets.CARGO_TOKEN }}
steps:
- uses: actions/checkout@v2
- run: |
choco install -y llvm
rustup update
cargo login $CARGO_TOKEN
cd winrt_bindings
cargo package
cargo publish || true
publish:
name: Publish
runs-on: ubuntu-latest
needs: [build_linux, build_windows, build_macos, build_ios]
env:
CARGO_TOKEN: ${{ secrets.CARGO_TOKEN }}
steps:
- uses: actions/checkout@v2
- run: |
sudo apt-get update
sudo apt-get install -y libspeechd-dev
rustup update
cargo login $CARGO_TOKEN
cargo publish
name: Release
on:
push:
tags:
- "v*"
jobs:
check:
name: Check
strategy:
matrix:
os: [windows-latest, ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
- uses: Swatinem/rust-cache@v1
- run: sudo apt-get update; sudo apt-get install -y libspeechd-dev
if: ${{ runner.os == 'Linux' }}
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
components: rustfmt, clippy
override: true
- uses: actions-rs/cargo@v1
with:
command: check
args: --all-features --examples
if: ${{ runner.os != 'Linux' }}
- uses: actions-rs/cargo@v1
with:
command: check
args: --no-default-features --examples
if: ${{ runner.os == 'Linux' }}
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all --check
- uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --all-features
if: ${{ runner.os != 'Linux' }}
- uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --no-default-features
if: ${{ runner.os == 'Linux' }}
check_web:
name: Check Web
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: Swatinem/rust-cache@v1
- uses: actions-rs/toolchain@v1
with:
target: wasm32-unknown-unknown
profile: minimal
toolchain: stable
components: rustfmt, clippy
override: true
- uses: actions-rs/cargo@v1
with:
command: check
args: --all-features --examples --target wasm32-unknown-unknown
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
- uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --all-features --target wasm32-unknown-unknown
- uses: actions-rs/install@v0.1
with:
crate: cargo-make
- uses: actions-rs/cargo@v1
with:
command: make
args: build-web-example
publish:
name: Publish
runs-on: ubuntu-latest
needs: [check, check_web]
env:
CARGO_TOKEN: ${{ secrets.CARGO_TOKEN }}
steps:
- uses: actions/checkout@v2
- uses: Swatinem/rust-cache@v1
- uses: actions-rs/toolchain@v1
with:
target: wasm32-unknown-unknown
profile: minimal
toolchain: stable
override: true
- run: |
sudo apt-get update
sudo apt-get install -y libspeechd-dev
cargo login $CARGO_TOKEN
cargo publish --no-default-features

View File

@ -1,49 +1,117 @@
name: Test
on:
push:
pull_request:
jobs:
build_linux:
name: Build Linux
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- run: |
sudo apt-get update
sudo apt-get install -y libspeechd-dev
rustup update
cargo build --release
rustup target add wasm32-unknown-unknown
cargo build --release --target wasm32-unknown-unknown
build_windows:
name: Build Windows
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- run: |
choco install -y llvm
rustup update
cargo build --release
build_macos:
name: Build MacOS
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- run: |
rustup update
cargo build --release
build_ios:
name: Build iOS
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- run: |
rustup update
rustup target add aarch64-apple-ios x86_64-apple-ios
cargo install cargo-lipo
cargo lipo --release
name: Test
on:
push:
pull_request:
jobs:
check:
name: Check
strategy:
matrix:
os: [windows-latest, ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
- uses: Swatinem/rust-cache@v1
- run: sudo apt-get update; sudo apt-get install -y libspeechd-dev
if: ${{ runner.os == 'Linux' }}
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
components: rustfmt, clippy
override: true
- uses: actions-rs/cargo@v1
with:
command: check
args: --all-features --examples
if: ${{ runner.os != 'Linux' }}
- uses: actions-rs/cargo@v1
with:
command: check
args: --no-default-features --examples
if: ${{ runner.os == 'Linux' }}
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all --check
- uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --all-features
if: ${{ runner.os != 'Linux' }}
- uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --no-default-features
if: ${{ runner.os == 'Linux' }}
check_web:
name: Check Web
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: Swatinem/rust-cache@v1
- uses: actions-rs/toolchain@v1
with:
target: wasm32-unknown-unknown
profile: minimal
toolchain: stable
components: rustfmt, clippy
override: true
- uses: actions-rs/cargo@v1
with:
command: check
args: --all-features --examples --target wasm32-unknown-unknown
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all --check
- uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --all-features --target wasm32-unknown-unknown
check_android:
name: Check Android
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: Swatinem/rust-cache@v1
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
components: rustfmt, clippy
override: true
- uses: actions-rs/install@v0.1
with:
crate: cargo-apk
# use-tool-cache: true
- run: rustup target add aarch64-linux-android armv7-linux-androideabi i686-linux-android x86_64-linux-android
- uses: actions-rs/cargo@v1
with:
command: apk
args: build
check_web_example:
name: Check Web Example
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: Swatinem/rust-cache@v1
- uses: actions-rs/toolchain@v1
with:
target: wasm32-unknown-unknown
profile: minimal
toolchain: stable
components: rustfmt, clippy
override: true
- uses: actions-rs/install@v0.1
with:
crate: cargo-make
- uses: actions-rs/cargo@v1
with:
command: make
args: build-web-example

1
.gitignore vendored
View File

@ -1,2 +1,3 @@
Cargo.lock
target
*.dll

View File

@ -1,48 +1,57 @@
[package]
name = "tts"
version = "0.8.0"
version = "0.21.1"
authors = ["Nolan Darilek <nolan@thewordnerd.info>"]
repository = "https://github.com/ndarilek/tts-rs"
description = "High-level Text-To-Speech (TTS) interface"
license = "MIT"
exclude = ["*.cfg", "*.yml"]
edition = "2018"
edition = "2021"
[package.metadata.patch.speech-dispatcher]
version = "0.7.0"
patches = [
"speech-dispatcher.patch"
]
#patches = [
# "speech-dispatcher.patch"
#]
[patch.crates-io]
speech-dispatcher = { path = './target/patch/speech-dispatcher-0.7.0'}
#[patch.crates-io]
#speech-dispatcher = { path = './target/patch/speech-dispatcher-0.7.0'}
[lib]
crate-type = ["lib", "cdylib", "staticlib"]
[features]
speech_dispatcher_0_10 = ["speech-dispatcher/0_10"]
default = ["speech_dispatcher_0_10"]
[dependencies]
dyn-clonable = "0.9"
lazy_static = "1"
log = "0.4"
thiserror = "1"
unic-langid = "0.9.0"
serde = { version = "1.0", optional = true, features = ["derive"] }
[dev-dependencies]
env_logger = "0.7"
env_logger = "0.9"
[target.'cfg(windows)'.dependencies]
tolk = ">= 0.2.1"
winrt = "0.7"
tts_winrt_bindings = { version = "0.1", path="winrt_bindings" }
tolk = { version = "0.5", optional = true }
windows = { version = "0.34", features = ["alloc", "Foundation", "Media_Core", "Media_Playback", "Media_SpeechSynthesis", "Storage_Streams"] }
[target.'cfg(target_os = "linux")'.dependencies]
speech-dispatcher = "0.7"
speech-dispatcher = { version = "0.13", default-features = false }
[target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies]
cocoa-foundation = "0.1"
core-foundation = "0.9"
libc = "0.2"
objc = "0.2"
objc = { version = "0.2", features = ["exception"] }
[target.wasm32-unknown-unknown.dependencies]
wasm-bindgen = "0.2"
web-sys = { version = "0.3", features = ["EventTarget", "SpeechSynthesis", "SpeechSynthesisEvent", "SpeechSynthesisUtterance", "Window", ] }
web-sys = { version = "0.3", features = ["EventTarget", "SpeechSynthesis", "SpeechSynthesisErrorCode", "SpeechSynthesisErrorEvent", "SpeechSynthesisEvent", "SpeechSynthesisUtterance", "Window", ] }
[target.'cfg(target_os="android")'.dependencies]
jni = "0.19"
ndk-glue = "0.6"

33
Makefile.toml Normal file
View File

@ -0,0 +1,33 @@
[tasks.build-android-example]
script = [
"cd examples/android",
"./gradlew assembleDebug",
]
[tasks.run-android-example]
script = [
"cd examples/android",
"./gradlew runDebug",
]
[tasks.log-android]
command = "adb"
args = ["logcat", "RustStdoutStderr:D", "*:S"]
[tasks.install-trunk]
install_crate = { crate_name = "trunk", binary = "trunk", test_arg = "--help" }
[tasks.install-wasm-bindgen-cli]
install_crate = { crate_name = "wasm-bindgen-cli", binary = "wasm-bindgen", test_arg = "--help" }
[tasks.build-web-example]
dependencies = ["install-trunk", "install-wasm-bindgen-cli"]
cwd = "examples/web"
command = "trunk"
args = ["build"]
[tasks.run-web-example]
dependencies = ["install-trunk", "install-wasm-bindgen-cli"]
cwd = "examples/web"
command = "trunk"
args = ["serve"]

View File

@ -3,10 +3,23 @@
This library provides a high-level Text-To-Speech (TTS) interface supporting various backends. Currently supported backends are:
* Windows
* Screen readers/SAPI via Tolk
* Screen readers/SAPI via Tolk (requires `tolk` Cargo feature)
* WinRT
* Linux via [Speech Dispatcher](https://freebsoft.org/speechd)
* MacOS
* MacOS/iOS
* AppKit on MacOS 10.13 and below
* AVFoundation on MacOS 10.14 and above, and iOS
* Android
* WebAssembly
## Android Setup
On most platforms, this library is plug-and-play. Because of JNI's complexity, Android setup is a bit more involved. In general, look to the Android example for guidance. Here are some rough steps to get going:
* Set up _Cargo.toml_ as the example does. Be sure to depend on `ndk-glue`.
* Place _Bridge.java_ appropriately in your app. This is needed to support various Android TTS callbacks.
* Create a main activity similar to _MainActivity.kt_. In particular, you need to derive `android.app.NativeActivity`, and you need a `System.loadLibrary(...)` call appropriate for your app. `System.loadLibrary(...)` is needed to trigger `JNI_OnLoad`.
* * Even though you've loaded the library in your main activity, add a metadata tag to your activity in _AndroidManifest.xml_ referencing it. Yes, this is redundant but necessary.
* Set if your various build.gradle scripts to reference the plugins, dependencies, etc. from the example. In particular, you'll want to set up [cargo-ndk-android-gradle](https://github.com/willir/cargo-ndk-android-gradle/) and either [depend on androidx.annotation](https://developer.android.com/reference/androidx/annotation/package-summary) or otherwise configure your app to keep the class _rs.tts.Bridge_.
And I think that should about do it. Good luck!

View File

@ -1,7 +1,10 @@
fn main() {
if std::env::var("TARGET").unwrap().contains("-apple") {
println!("cargo:rustc-link-lib=framework=AVFoundation");
if !std::env::var("CARGO_CFG_TARGET_OS").unwrap().contains("ios") {
if !std::env::var("CARGO_CFG_TARGET_OS")
.unwrap()
.contains("ios")
{
println!("cargo:rustc-link-lib=framework=AppKit");
}
}

39
examples/99bottles.rs Normal file
View File

@ -0,0 +1,39 @@
use std::io;
use std::{thread, time};
#[cfg(target_os = "macos")]
use cocoa_foundation::base::id;
#[cfg(target_os = "macos")]
use cocoa_foundation::foundation::NSRunLoop;
#[cfg(target_os = "macos")]
use objc::{msg_send, sel, sel_impl};
use tts::*;
fn main() -> Result<(), Error> {
env_logger::init();
let mut tts = Tts::default()?;
let mut bottles = 99;
while bottles > 0 {
tts.speak(format!("{} bottles of beer on the wall,", bottles), false)?;
tts.speak(format!("{} bottles of beer,", bottles), false)?;
tts.speak("Take one down, pass it around", false)?;
tts.speak("Give us a bit to drink this...", false)?;
let time = time::Duration::from_secs(15);
thread::sleep(time);
bottles -= 1;
tts.speak(format!("{} bottles of beer on the wall,", bottles), false)?;
}
let mut _input = String::new();
// The below is only needed to make the example run on MacOS because there is no NSRunLoop in this context.
// It shouldn't be needed in an app or game that almost certainly has one already.
#[cfg(target_os = "macos")]
{
let run_loop: id = unsafe { NSRunLoop::currentRunLoop() };
unsafe {
let _: () = msg_send![run_loop, run];
}
}
io::stdin().read_line(&mut _input)?;
Ok(())
}

16
examples/android/.gitignore vendored Normal file
View File

@ -0,0 +1,16 @@
*.iml
.gradle
/local.properties
/.idea/caches
/.idea/libraries
/.idea/modules.xml
/.idea/workspace.xml
/.idea/navEditor.xml
/.idea/assetWizardSettings.xml
.DS_Store
/build
/captures
.externalNativeBuild
.cxx
local.properties
Cargo.lock

3
examples/android/.idea/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
# Default ignored files
/shelf/
/workspace.xml

View File

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="CompilerConfiguration">
<bytecodeTargetLevel target="1.6" />
</component>
</project>

View File

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="GradleMigrationSettings" migrationVersion="1" />
<component name="GradleSettings">
<option name="linkedExternalProjectsSettings">
<GradleProjectSettings>
<option name="testRunner" value="PLATFORM" />
<option name="distributionType" value="DEFAULT_WRAPPED" />
<option name="externalProjectPath" value="$PROJECT_DIR$" />
<option name="gradleJvm" value="11" />
<option name="modules">
<set>
<option value="$PROJECT_DIR$" />
</set>
</option>
<option name="resolveModulePerSourceSet" value="false" />
<option name="useQualifiedModuleNames" value="true" />
</GradleProjectSettings>
</option>
</component>
</project>

View File

@ -0,0 +1,25 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="RemoteRepositoriesConfiguration">
<remote-repository>
<option name="id" value="central" />
<option name="name" value="Maven Central repository" />
<option name="url" value="https://repo1.maven.org/maven2" />
</remote-repository>
<remote-repository>
<option name="id" value="jboss.community" />
<option name="name" value="JBoss Community repository" />
<option name="url" value="https://repository.jboss.org/nexus/content/repositories/public/" />
</remote-repository>
<remote-repository>
<option name="id" value="BintrayJCenter" />
<option name="name" value="BintrayJCenter" />
<option name="url" value="https://jcenter.bintray.com/" />
</remote-repository>
<remote-repository>
<option name="id" value="Google" />
<option name="name" value="Google" />
<option name="url" value="https://dl.google.com/dl/android/maven2/" />
</remote-repository>
</component>
</project>

View File

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" languageLevel="JDK_1_6" default="false" project-jdk-name="1.8" project-jdk-type="JavaSDK">
<output url="file://$PROJECT_DIR$/build/classes" />
</component>
<component name="ProjectType">
<option name="id" value="Android" />
</component>
</project>

View File

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$/../.." vcs="Git" />
</component>
</project>

1
examples/android/app/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/build

View File

@ -0,0 +1,56 @@
plugins {
id "com.android.application"
id "kotlin-android"
}
android {
compileSdkVersion 30
buildToolsVersion "30.0.3"
defaultConfig {
applicationId "rs.tts"
minSdkVersion 21
targetSdkVersion 30
versionCode 1
versionName "1.0"
}
buildTypes {
release {
minifyEnabled false
proguardFiles getDefaultProguardFile("proguard-android-optimize.txt"), "proguard-rules.pro"
}
}
compileOptions {
sourceCompatibility JavaVersion.VERSION_1_8
targetCompatibility JavaVersion.VERSION_1_8
}
kotlinOptions {
jvmTarget = "1.8"
}
}
dependencies {
implementation "org.jetbrains.kotlin:kotlin-stdlib:$kotlin_version"
implementation "androidx.core:core-ktx:1.2.0"
implementation "androidx.annotation:annotation:1.1.0"
implementation "com.google.android.material:material:1.1.0"
implementation "androidx.constraintlayout:constraintlayout:1.1.3"
}
apply plugin: "com.github.willir.rust.cargo-ndk-android"
cargoNdk {
module = "."
}
project.afterEvaluate {
android.applicationVariants.all { variant ->
task "run${variant.name.capitalize()}"(type: Exec, dependsOn: "install${variant.name.capitalize()}", group: "run") {
commandLine = ["adb", "shell", "monkey", "-p", variant.applicationId + " 1"]
doLast {
println "Launching ${variant.applicationId}"
}
}
}
}

21
examples/android/app/proguard-rules.pro vendored Normal file
View File

@ -0,0 +1,21 @@
# Add project specific ProGuard rules here.
# You can control the set of applied configuration files using the
# proguardFiles setting in build.gradle.
#
# For more details, see
# http://developer.android.com/guide/developing/tools/proguard.html
# If your project uses WebView with JS, uncomment the following
# and specify the fully qualified class name to the JavaScript interface
# class:
#-keepclassmembers class fqcn.of.javascript.interface.for.webview {
# public *;
#}
# Uncomment this to preserve the line number information for
# debugging stack traces.
#-keepattributes SourceFile,LineNumberTable
# If you keep the line number information, uncomment this to
# hide the original source file name.
#-renamesourcefileattribute SourceFile

View File

@ -0,0 +1 @@
jniLibs

View File

@ -0,0 +1,13 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android" package="rs.tts">
<application android:allowBackup="true" android:label="@string/app_name">
<activity android:name=".MainActivity">
<meta-data android:name="android.app.lib_name" android:value="hello_world" />
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
</manifest>

View File

@ -0,0 +1,24 @@
package rs.tts;
import android.speech.tts.TextToSpeech;
import android.speech.tts.UtteranceProgressListener;
@androidx.annotation.Keep
public class Bridge extends UtteranceProgressListener implements TextToSpeech.OnInitListener {
public int backendId;
public Bridge(int backendId) {
this.backendId = backendId;
}
public native void onInit(int status);
public native void onStart(String utteranceId);
public native void onStop(String utteranceId, Boolean interrupted);
public native void onDone(String utteranceId);
public native void onError(String utteranceId) ;
}

View File

@ -0,0 +1,11 @@
package rs.tts
import android.app.NativeActivity
class MainActivity : NativeActivity() {
companion object {
init {
System.loadLibrary("hello_world")
}
}
}

View File

@ -0,0 +1,3 @@
<resources>
<string name="app_name">TTS-RS</string>
</resources>

View File

@ -0,0 +1,29 @@
// Top-level build file where you can add configuration options common to all sub-projects/modules.
buildscript {
ext.kotlin_version = "1.3.72"
repositories {
google()
jcenter()
maven {
url "https://plugins.gradle.org/m2/"
}
}
dependencies {
classpath "com.android.tools.build:gradle:4.1.1"
classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
classpath "gradle.plugin.com.github.willir.rust:plugin:0.3.4"
// NOTE: Do not place your application dependencies here; they belong
// in the individual module build.gradle files
}
}
allprojects {
repositories {
google()
jcenter()
}
}
task clean(type: Delete) {
delete rootProject.buildDir
}

View File

@ -0,0 +1,14 @@
[package]
name = "hello_world"
version = "0.1.0"
authors = ["Nolan Darilek <nolan@thewordnerd.info>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["dylib"]
[dependencies]
ndk-glue = "0.6"
tts = { path = "../.." }

View File

@ -0,0 +1,21 @@
# Project-wide Gradle settings.
# IDE (e.g. Android Studio) users:
# Gradle settings configured through the IDE *will override*
# any settings specified in this file.
# For more details on how to configure your build environment visit
# http://www.gradle.org/docs/current/userguide/build_environment.html
# Specifies the JVM arguments used for the daemon process.
# The setting is particularly useful for tweaking memory settings.
org.gradle.jvmargs=-Xmx2048m -Dfile.encoding=UTF-8
# When configured, Gradle will run in incubating parallel mode.
# This option should only be used with decoupled projects. More details, visit
# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
# org.gradle.parallel=true
# AndroidX package structure to make it clearer which packages are bundled with the
# Android operating system, and which are packaged with your app"s APK
# https://developer.android.com/topic/libraries/support-library/androidx-rn
android.useAndroidX=true
# Automatically convert third-party libraries to use AndroidX
android.enableJetifier=true
# Kotlin code style for this project: "official" or "obsolete":
kotlin.code.style=official

Binary file not shown.

View File

@ -0,0 +1,6 @@
#Mon Dec 28 17:32:22 CST 2020
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-6.5-bin.zip

172
examples/android/gradlew vendored Executable file
View File

@ -0,0 +1,172 @@
#!/usr/bin/env sh
##############################################################################
##
## Gradle start up script for UN*X
##
##############################################################################
# Attempt to set APP_HOME
# Resolve links: $0 may be a link
PRG="$0"
# Need this for relative symlinks.
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`"/$link"
fi
done
SAVED="`pwd`"
cd "`dirname \"$PRG\"`/" >/dev/null
APP_HOME="`pwd -P`"
cd "$SAVED" >/dev/null
APP_NAME="Gradle"
APP_BASE_NAME=`basename "$0"`
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS=""
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD="maximum"
warn () {
echo "$*"
}
die () {
echo
echo "$*"
echo
exit 1
}
# OS specific support (must be 'true' or 'false').
cygwin=false
msys=false
darwin=false
nonstop=false
case "`uname`" in
CYGWIN* )
cygwin=true
;;
Darwin* )
darwin=true
;;
MINGW* )
msys=true
;;
NONSTOP* )
nonstop=true
;;
esac
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
# Determine the Java command to use to start the JVM.
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
else
JAVACMD="java"
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
# Increase the maximum file descriptors if we can.
if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
MAX_FD_LIMIT=`ulimit -H -n`
if [ $? -eq 0 ] ; then
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
MAX_FD="$MAX_FD_LIMIT"
fi
ulimit -n $MAX_FD
if [ $? -ne 0 ] ; then
warn "Could not set maximum file descriptor limit: $MAX_FD"
fi
else
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
fi
fi
# For Darwin, add options to specify how the application appears in the dock
if $darwin; then
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
fi
# For Cygwin, switch paths to Windows format before running java
if $cygwin ; then
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
JAVACMD=`cygpath --unix "$JAVACMD"`
# We build the pattern for arguments to be converted via cygpath
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
SEP=""
for dir in $ROOTDIRSRAW ; do
ROOTDIRS="$ROOTDIRS$SEP$dir"
SEP="|"
done
OURCYGPATTERN="(^($ROOTDIRS))"
# Add a user-defined pattern to the cygpath arguments
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
fi
# Now convert the arguments - kludge to limit ourselves to /bin/sh
i=0
for arg in "$@" ; do
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
else
eval `echo args$i`="\"$arg\""
fi
i=$((i+1))
done
case $i in
(0) set -- ;;
(1) set -- "$args0" ;;
(2) set -- "$args0" "$args1" ;;
(3) set -- "$args0" "$args1" "$args2" ;;
(4) set -- "$args0" "$args1" "$args2" "$args3" ;;
(5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
(6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
(7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
(8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
(9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
esac
fi
# Escape application args
save () {
for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
echo " "
}
APP_ARGS=$(save "$@")
# Collect all arguments for the java command, following the shell quoting and substitution rules
eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
cd "$(dirname "$0")"
fi
exec "$JAVACMD" "$@"

84
examples/android/gradlew.bat vendored Normal file
View File

@ -0,0 +1,84 @@
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS=
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windows variants
if not "%OS%" == "Windows_NT" goto win9xME_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega

View File

@ -0,0 +1 @@
include ":app"

View File

@ -0,0 +1,70 @@
use tts::*;
// The `loop {}` below only simulates an app loop.
// Without it, the `TTS` instance gets dropped before callbacks can run.
#[allow(unreachable_code)]
fn run() -> Result<(), Error> {
let mut tts = Tts::default()?;
let Features {
utterance_callbacks,
..
} = tts.supported_features();
if utterance_callbacks {
tts.on_utterance_begin(Some(Box::new(|utterance| {
println!("Started speaking {:?}", utterance)
})))?;
tts.on_utterance_end(Some(Box::new(|utterance| {
println!("Finished speaking {:?}", utterance)
})))?;
tts.on_utterance_stop(Some(Box::new(|utterance| {
println!("Stopped speaking {:?}", utterance)
})))?;
}
let Features { is_speaking, .. } = tts.supported_features();
if is_speaking {
println!("Are we speaking? {}", tts.is_speaking()?);
}
tts.speak("Hello, world.", false)?;
let Features { rate, .. } = tts.supported_features();
if rate {
let original_rate = tts.get_rate()?;
tts.speak(format!("Current rate: {}", original_rate), false)?;
tts.set_rate(tts.max_rate())?;
tts.speak("This is very fast.", false)?;
tts.set_rate(tts.min_rate())?;
tts.speak("This is very slow.", false)?;
tts.set_rate(tts.normal_rate())?;
tts.speak("This is the normal rate.", false)?;
tts.set_rate(original_rate)?;
}
let Features { pitch, .. } = tts.supported_features();
if pitch {
let original_pitch = tts.get_pitch()?;
tts.set_pitch(tts.max_pitch())?;
tts.speak("This is high-pitch.", false)?;
tts.set_pitch(tts.min_pitch())?;
tts.speak("This is low pitch.", false)?;
tts.set_pitch(tts.normal_pitch())?;
tts.speak("This is normal pitch.", false)?;
tts.set_pitch(original_pitch)?;
}
let Features { volume, .. } = tts.supported_features();
if volume {
let original_volume = tts.get_volume()?;
tts.set_volume(tts.max_volume())?;
tts.speak("This is loud!", false)?;
tts.set_volume(tts.min_volume())?;
tts.speak("This is quiet.", false)?;
tts.set_volume(tts.normal_volume())?;
tts.speak("This is normal volume.", false)?;
tts.set_volume(original_volume)?;
}
tts.speak("Goodbye.", false)?;
loop {}
Ok(())
}
#[cfg_attr(target_os = "android", ndk_glue::main(backtrace = "on"))]
pub fn main() {
run().expect("Failed to run");
}

89
examples/clone_drop.rs Normal file
View File

@ -0,0 +1,89 @@
use std::io;
#[cfg(target_os = "macos")]
use cocoa_foundation::base::id;
#[cfg(target_os = "macos")]
use cocoa_foundation::foundation::NSRunLoop;
#[cfg(target_os = "macos")]
use objc::{msg_send, sel, sel_impl};
use tts::*;
fn main() -> Result<(), Error> {
env_logger::init();
let tts = Tts::default()?;
if Tts::screen_reader_available() {
println!("A screen reader is available on this platform.");
} else {
println!("No screen reader is available on this platform.");
}
let Features {
utterance_callbacks,
..
} = tts.supported_features();
if utterance_callbacks {
tts.on_utterance_begin(Some(Box::new(|utterance| {
println!("Started speaking {:?}", utterance)
})))?;
tts.on_utterance_end(Some(Box::new(|utterance| {
println!("Finished speaking {:?}", utterance)
})))?;
tts.on_utterance_stop(Some(Box::new(|utterance| {
println!("Stopped speaking {:?}", utterance)
})))?;
}
let mut tts_clone = tts.clone();
drop(tts);
let Features { is_speaking, .. } = tts_clone.supported_features();
if is_speaking {
println!("Are we speaking? {}", tts_clone.is_speaking()?);
}
tts_clone.speak("Hello, world.", false)?;
let Features { rate, .. } = tts_clone.supported_features();
if rate {
let original_rate = tts_clone.get_rate()?;
tts_clone.speak(format!("Current rate: {}", original_rate), false)?;
tts_clone.set_rate(tts_clone.max_rate())?;
tts_clone.speak("This is very fast.", false)?;
tts_clone.set_rate(tts_clone.min_rate())?;
tts_clone.speak("This is very slow.", false)?;
tts_clone.set_rate(tts_clone.normal_rate())?;
tts_clone.speak("This is the normal rate.", false)?;
tts_clone.set_rate(original_rate)?;
}
let Features { pitch, .. } = tts_clone.supported_features();
if pitch {
let original_pitch = tts_clone.get_pitch()?;
tts_clone.set_pitch(tts_clone.max_pitch())?;
tts_clone.speak("This is high-pitch.", false)?;
tts_clone.set_pitch(tts_clone.min_pitch())?;
tts_clone.speak("This is low pitch.", false)?;
tts_clone.set_pitch(tts_clone.normal_pitch())?;
tts_clone.speak("This is normal pitch.", false)?;
tts_clone.set_pitch(original_pitch)?;
}
let Features { volume, .. } = tts_clone.supported_features();
if volume {
let original_volume = tts_clone.get_volume()?;
tts_clone.set_volume(tts_clone.max_volume())?;
tts_clone.speak("This is loud!", false)?;
tts_clone.set_volume(tts_clone.min_volume())?;
tts_clone.speak("This is quiet.", false)?;
tts_clone.set_volume(tts_clone.normal_volume())?;
tts_clone.speak("This is normal volume.", false)?;
tts_clone.set_volume(original_volume)?;
}
tts_clone.speak("Goodbye.", false)?;
let mut _input = String::new();
// The below is only needed to make the example run on MacOS because there is no NSRunLoop in this context.
// It shouldn't be needed in an app or game that almost certainly has one already.
#[cfg(target_os = "macos")]
{
let run_loop: id = unsafe { NSRunLoop::currentRunLoop() };
unsafe {
let _: () = msg_send![run_loop, run];
}
}
io::stdin().read_line(&mut _input)?;
Ok(())
}

View File

@ -11,7 +11,12 @@ use tts::*;
fn main() -> Result<(), Error> {
env_logger::init();
let mut tts = TTS::default()?;
let mut tts = Tts::default()?;
if Tts::screen_reader_available() {
println!("A screen reader is available on this platform.");
} else {
println!("No screen reader is available on this platform.");
}
let Features {
utterance_callbacks,
..
@ -23,6 +28,13 @@ fn main() -> Result<(), Error> {
tts.on_utterance_end(Some(Box::new(|utterance| {
println!("Finished speaking {:?}", utterance)
})))?;
tts.on_utterance_stop(Some(Box::new(|utterance| {
println!("Stopped speaking {:?}", utterance)
})))?;
}
let Features { is_speaking, .. } = tts.supported_features();
if is_speaking {
println!("Are we speaking? {}", tts.is_speaking()?);
}
tts.speak("Hello, world.", false)?;
let Features { rate, .. } = tts.supported_features();

14
examples/latency.rs Normal file
View File

@ -0,0 +1,14 @@
use std::io;
use tts::*;
fn main() -> Result<(), Error> {
env_logger::init();
let mut tts = Tts::default()?;
println!("Press Enter and wait for speech.");
loop {
let mut _input = String::new();
io::stdin().read_line(&mut _input)?;
tts.speak("Hello, world.", true)?;
}
}

15
examples/ramble.rs Normal file
View File

@ -0,0 +1,15 @@
use std::{thread, time};
use tts::*;
fn main() -> Result<(), Error> {
env_logger::init();
let mut tts = Tts::default()?;
let mut phrase = 1;
loop {
tts.speak(format!("Phrase {}", phrase), false)?;
let time = time::Duration::from_secs(5);
thread::sleep(time);
phrase += 1;
}
}

View File

@ -0,0 +1,2 @@
[build]
target = "wasm32-unknown-unknown"

1
examples/web/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
dist

11
examples/web/Cargo.toml Normal file
View File

@ -0,0 +1,11 @@
[package]
name = "web"
version = "0.1.0"
authors = ["Nolan Darilek <nolan@thewordnerd.info>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
seed = "0.8"
tts = { path = "../.." }

12
examples/web/index.html Normal file
View File

@ -0,0 +1,12 @@
<!DOCTYPE html>
<html>
<head>
<title>Example</title>
</head>
<body>
<div id="app"></div>
</body>
</html>

111
examples/web/src/main.rs Normal file
View File

@ -0,0 +1,111 @@
#![allow(clippy::wildcard_imports)]
use seed::{prelude::*, *};
use tts::Tts;
#[derive(Clone)]
struct Model {
text: String,
tts: Tts,
}
#[derive(Clone)]
enum Msg {
TextChanged(String),
RateChanged(String),
PitchChanged(String),
VolumeChanged(String),
Speak,
}
fn init(_: Url, _: &mut impl Orders<Msg>) -> Model {
let tts = Tts::default().unwrap();
Model {
text: Default::default(),
tts,
}
}
fn update(msg: Msg, model: &mut Model, _: &mut impl Orders<Msg>) {
use Msg::*;
match msg {
TextChanged(text) => model.text = text,
RateChanged(rate) => {
let rate = rate.parse::<f32>().unwrap();
model.tts.set_rate(rate).unwrap();
}
PitchChanged(pitch) => {
let pitch = pitch.parse::<f32>().unwrap();
model.tts.set_pitch(pitch).unwrap();
}
VolumeChanged(volume) => {
let volume = volume.parse::<f32>().unwrap();
model.tts.set_volume(volume).unwrap();
}
Speak => {
model.tts.speak(&model.text, false).unwrap();
}
}
}
fn view(model: &Model) -> Node<Msg> {
form![
div![label![
"Text to speak",
input![
attrs! {
At::Value => model.text,
At::AutoFocus => AtValue::None,
},
input_ev(Ev::Input, Msg::TextChanged)
],
],],
div![label![
"Rate",
input![
attrs! {
At::Type => "number",
At::Value => model.tts.get_rate().unwrap(),
At::Min => model.tts.min_rate(),
At::Max => model.tts.max_rate()
},
input_ev(Ev::Input, Msg::RateChanged)
],
],],
div![label![
"Pitch",
input![
attrs! {
At::Type => "number",
At::Value => model.tts.get_pitch().unwrap(),
At::Min => model.tts.min_pitch(),
At::Max => model.tts.max_pitch()
},
input_ev(Ev::Input, Msg::PitchChanged)
],
],],
div![label![
"Volume",
input![
attrs! {
At::Type => "number",
At::Value => model.tts.get_volume().unwrap(),
At::Min => model.tts.min_volume(),
At::Max => model.tts.max_volume()
},
input_ev(Ev::Input, Msg::VolumeChanged)
],
],],
button![
"Speak",
ev(Ev::Click, |e| {
e.prevent_default();
Msg::Speak
}),
],
]
}
fn main() {
App::start("app", init, update, view);
}

388
src/backends/android.rs Normal file
View File

@ -0,0 +1,388 @@
#[cfg(target_os = "android")]
use std::{
collections::HashSet,
ffi::{CStr, CString},
os::raw::c_void,
sync::{Mutex, RwLock},
thread,
time::{Duration, Instant},
};
use jni::{
objects::{GlobalRef, JObject, JString},
sys::{jfloat, jint, JNI_VERSION_1_6},
JNIEnv, JavaVM,
};
use lazy_static::lazy_static;
use log::{error, info};
use crate::{Backend, BackendId, Error, Features, UtteranceId, CALLBACKS};
lazy_static! {
static ref BRIDGE: Mutex<Option<GlobalRef>> = Mutex::new(None);
static ref NEXT_BACKEND_ID: Mutex<u64> = Mutex::new(0);
static ref PENDING_INITIALIZATIONS: RwLock<HashSet<u64>> = RwLock::new(HashSet::new());
static ref NEXT_UTTERANCE_ID: Mutex<u64> = Mutex::new(0);
}
#[allow(non_snake_case)]
#[no_mangle]
pub extern "system" fn JNI_OnLoad(vm: JavaVM, _: *mut c_void) -> jint {
let env = vm.get_env().expect("Cannot get reference to the JNIEnv");
let b = env
.find_class("rs/tts/Bridge")
.expect("Failed to find `Bridge`");
let b = env
.new_global_ref(b)
.expect("Failed to create `Bridge` `GlobalRef`");
let mut bridge = BRIDGE.lock().unwrap();
*bridge = Some(b);
JNI_VERSION_1_6
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn Java_rs_tts_Bridge_onInit(env: JNIEnv, obj: JObject, status: jint) {
let id = env
.get_field(obj, "backendId", "I")
.expect("Failed to get backend ID")
.i()
.expect("Failed to cast to int") as u64;
let mut pending = PENDING_INITIALIZATIONS.write().unwrap();
(*pending).remove(&id);
if status != 0 {
error!("Failed to initialize TTS engine");
}
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn Java_rs_tts_Bridge_onStart(
env: JNIEnv,
obj: JObject,
utterance_id: JString,
) {
let backend_id = env
.get_field(obj, "backendId", "I")
.expect("Failed to get backend ID")
.i()
.expect("Failed to cast to int") as u64;
let backend_id = BackendId::Android(backend_id);
let utterance_id = CString::from(CStr::from_ptr(
env.get_string(utterance_id).unwrap().as_ptr(),
))
.into_string()
.unwrap();
let utterance_id = utterance_id.parse::<u64>().unwrap();
let utterance_id = UtteranceId::Android(utterance_id);
let mut callbacks = CALLBACKS.lock().unwrap();
let cb = callbacks.get_mut(&backend_id).unwrap();
if let Some(f) = cb.utterance_begin.as_mut() {
f(utterance_id);
}
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn Java_rs_tts_Bridge_onStop(
env: JNIEnv,
obj: JObject,
utterance_id: JString,
) {
let backend_id = env
.get_field(obj, "backendId", "I")
.expect("Failed to get backend ID")
.i()
.expect("Failed to cast to int") as u64;
let backend_id = BackendId::Android(backend_id);
let utterance_id = CString::from(CStr::from_ptr(
env.get_string(utterance_id).unwrap().as_ptr(),
))
.into_string()
.unwrap();
let utterance_id = utterance_id.parse::<u64>().unwrap();
let utterance_id = UtteranceId::Android(utterance_id);
let mut callbacks = CALLBACKS.lock().unwrap();
let cb = callbacks.get_mut(&backend_id).unwrap();
if let Some(f) = cb.utterance_end.as_mut() {
f(utterance_id);
}
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn Java_rs_tts_Bridge_onDone(
env: JNIEnv,
obj: JObject,
utterance_id: JString,
) {
let backend_id = env
.get_field(obj, "backendId", "I")
.expect("Failed to get backend ID")
.i()
.expect("Failed to cast to int") as u64;
let backend_id = BackendId::Android(backend_id);
let utterance_id = CString::from(CStr::from_ptr(
env.get_string(utterance_id).unwrap().as_ptr(),
))
.into_string()
.unwrap();
let utterance_id = utterance_id.parse::<u64>().unwrap();
let utterance_id = UtteranceId::Android(utterance_id);
let mut callbacks = CALLBACKS.lock().unwrap();
let cb = callbacks.get_mut(&backend_id).unwrap();
if let Some(f) = cb.utterance_stop.as_mut() {
f(utterance_id);
}
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn Java_rs_tts_Bridge_onError(
env: JNIEnv,
obj: JObject,
utterance_id: JString,
) {
let backend_id = env
.get_field(obj, "backendId", "I")
.expect("Failed to get backend ID")
.i()
.expect("Failed to cast to int") as u64;
let backend_id = BackendId::Android(backend_id);
let utterance_id = CString::from(CStr::from_ptr(
env.get_string(utterance_id).unwrap().as_ptr(),
))
.into_string()
.unwrap();
let utterance_id = utterance_id.parse::<u64>().unwrap();
let utterance_id = UtteranceId::Android(utterance_id);
let mut callbacks = CALLBACKS.lock().unwrap();
let cb = callbacks.get_mut(&backend_id).unwrap();
if let Some(f) = cb.utterance_end.as_mut() {
f(utterance_id);
}
}
#[derive(Clone)]
pub(crate) struct Android {
id: BackendId,
tts: GlobalRef,
rate: f32,
pitch: f32,
}
impl Android {
pub(crate) fn new() -> Result<Self, Error> {
info!("Initializing Android backend");
let mut backend_id = NEXT_BACKEND_ID.lock().unwrap();
let bid = *backend_id;
let id = BackendId::Android(bid);
*backend_id += 1;
drop(backend_id);
let native_activity = ndk_glue::native_activity();
let vm = Self::vm()?;
let env = vm.attach_current_thread_permanently()?;
let bridge = BRIDGE.lock().unwrap();
if let Some(bridge) = &*bridge {
let bridge = env.new_object(bridge, "(I)V", &[(bid as jint).into()])?;
let tts = env.new_object(
"android/speech/tts/TextToSpeech",
"(Landroid/content/Context;Landroid/speech/tts/TextToSpeech$OnInitListener;)V",
&[native_activity.activity().into(), bridge.into()],
)?;
env.call_method(
tts,
"setOnUtteranceProgressListener",
"(Landroid/speech/tts/UtteranceProgressListener;)I",
&[bridge.into()],
)?;
{
let mut pending = PENDING_INITIALIZATIONS.write().unwrap();
(*pending).insert(bid);
}
let tts = env.new_global_ref(tts)?;
// This hack makes my brain bleed.
const MAX_WAIT_TIME: Duration = Duration::from_millis(500);
let start = Instant::now();
// Wait a max of 500ms for initialization, then return an error to avoid hanging.
loop {
{
let pending = PENDING_INITIALIZATIONS.read().unwrap();
if !(*pending).contains(&bid) {
break;
}
if start.elapsed() > MAX_WAIT_TIME {
return Err(Error::OperationFailed);
}
}
thread::sleep(Duration::from_millis(5));
}
Ok(Self {
id,
tts,
rate: 1.,
pitch: 1.,
})
} else {
Err(Error::NoneError)
}
}
fn vm() -> Result<JavaVM, jni::errors::Error> {
let native_activity = ndk_glue::native_activity();
let vm_ptr = native_activity.vm();
unsafe { jni::JavaVM::from_raw(vm_ptr) }
}
}
impl Backend for Android {
fn id(&self) -> Option<BackendId> {
Some(self.id)
}
fn supported_features(&self) -> Features {
Features {
stop: true,
rate: true,
pitch: true,
volume: false,
is_speaking: true,
utterance_callbacks: true,
}
}
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error> {
let vm = Self::vm()?;
let env = vm.get_env()?;
let tts = self.tts.as_obj();
let text = env.new_string(text)?;
let queue_mode = if interrupt { 0 } else { 1 };
let mut utterance_id = NEXT_UTTERANCE_ID.lock().unwrap();
let uid = *utterance_id;
*utterance_id += 1;
drop(utterance_id);
let id = UtteranceId::Android(uid);
let uid = env.new_string(uid.to_string())?;
let rv = env.call_method(
tts,
"speak",
"(Ljava/lang/CharSequence;ILandroid/os/Bundle;Ljava/lang/String;)I",
&[
text.into(),
queue_mode.into(),
JObject::null().into(),
uid.into(),
],
)?;
let rv = rv.i()?;
if rv == 0 {
Ok(Some(id))
} else {
Err(Error::OperationFailed)
}
}
fn stop(&mut self) -> Result<(), Error> {
let vm = Self::vm()?;
let env = vm.get_env()?;
let tts = self.tts.as_obj();
let rv = env.call_method(tts, "stop", "()I", &[])?;
let rv = rv.i()?;
if rv == 0 {
Ok(())
} else {
Err(Error::OperationFailed)
}
}
fn min_rate(&self) -> f32 {
0.1
}
fn max_rate(&self) -> f32 {
10.
}
fn normal_rate(&self) -> f32 {
1.
}
fn get_rate(&self) -> Result<f32, Error> {
Ok(self.rate)
}
fn set_rate(&mut self, rate: f32) -> Result<(), Error> {
let vm = Self::vm()?;
let env = vm.get_env()?;
let tts = self.tts.as_obj();
let rate = rate as jfloat;
let rv = env.call_method(tts, "setSpeechRate", "(F)I", &[rate.into()])?;
let rv = rv.i()?;
if rv == 0 {
self.rate = rate;
Ok(())
} else {
Err(Error::OperationFailed)
}
}
fn min_pitch(&self) -> f32 {
0.1
}
fn max_pitch(&self) -> f32 {
2.
}
fn normal_pitch(&self) -> f32 {
1.
}
fn get_pitch(&self) -> Result<f32, Error> {
Ok(self.pitch)
}
fn set_pitch(&mut self, pitch: f32) -> Result<(), Error> {
let vm = Self::vm()?;
let env = vm.get_env()?;
let tts = self.tts.as_obj();
let pitch = pitch as jfloat;
let rv = env.call_method(tts, "setPitch", "(F)I", &[pitch.into()])?;
let rv = rv.i()?;
if rv == 0 {
self.pitch = pitch;
Ok(())
} else {
Err(Error::OperationFailed)
}
}
fn min_volume(&self) -> f32 {
todo!()
}
fn max_volume(&self) -> f32 {
todo!()
}
fn normal_volume(&self) -> f32 {
todo!()
}
fn get_volume(&self) -> Result<f32, Error> {
todo!()
}
fn set_volume(&mut self, _volume: f32) -> Result<(), Error> {
todo!()
}
fn is_speaking(&self) -> Result<bool, Error> {
let vm = Self::vm()?;
let env = vm.get_env()?;
let tts = self.tts.as_obj();
let rv = env.call_method(tts, "isSpeaking", "()Z", &[])?;
let rv = rv.z()?;
Ok(rv)
}
}

View File

@ -9,6 +9,7 @@ use objc::*;
use crate::{Backend, BackendId, Error, Features, UtteranceId};
#[derive(Clone, Debug)]
pub(crate) struct AppKit(*mut Object, *mut Object);
impl AppKit {
@ -197,7 +198,7 @@ impl Backend for AppKit {
fn is_speaking(&self) -> Result<bool, Error> {
let is_speaking: i8 = unsafe { msg_send![self.0, isSpeaking] };
Ok(is_speaking == YES)
Ok(is_speaking != NO as i8)
}
fn voice(&self) -> Result<String,Error> {

View File

@ -2,7 +2,7 @@
#[link(name = "AVFoundation", kind = "framework")]
use std::sync::Mutex;
use cocoa_foundation::base::{id, nil};
use cocoa_foundation::base::{id, nil, NO};
use cocoa_foundation::foundation::NSString;
use lazy_static::lazy_static;
use log::{info, trace};
@ -15,6 +15,7 @@ use crate::voices::Backend as VoiceBackend;
mod voices;
use voices::*;
#[derive(Clone, Debug)]
pub(crate) struct AvFoundation {
id: BackendId,
delegate: *mut Object,
@ -41,16 +42,22 @@ impl AvFoundation {
_synth: *const Object,
utterance: id,
) {
trace!("speech_synthesizer_did_start_speech_utterance");
unsafe {
let backend_id: u64 = *this.get_ivar("backend_id");
let backend_id = BackendId::AvFoundation(backend_id);
trace!("Locking callbacks");
let mut callbacks = CALLBACKS.lock().unwrap();
trace!("Locked");
let callbacks = callbacks.get_mut(&backend_id).unwrap();
if let Some(callback) = callbacks.utterance_begin.as_mut() {
trace!("Calling utterance_begin");
let utterance_id = UtteranceId::AvFoundation(utterance);
callback(utterance_id);
trace!("Called");
}
}
trace!("Done speech_synthesizer_did_start_speech_utterance");
}
extern "C" fn speech_synthesizer_did_finish_speech_utterance(
@ -59,16 +66,46 @@ impl AvFoundation {
_synth: *const Object,
utterance: id,
) {
trace!("speech_synthesizer_did_finish_speech_utterance");
unsafe {
let backend_id: u64 = *this.get_ivar("backend_id");
let backend_id = BackendId::AvFoundation(backend_id);
trace!("Locking callbacks");
let mut callbacks = CALLBACKS.lock().unwrap();
trace!("Locked");
let callbacks = callbacks.get_mut(&backend_id).unwrap();
if let Some(callback) = callbacks.utterance_end.as_mut() {
trace!("Calling utterance_end");
let utterance_id = UtteranceId::AvFoundation(utterance);
callback(utterance_id);
trace!("Called");
}
}
trace!("Done speech_synthesizer_did_finish_speech_utterance");
}
extern "C" fn speech_synthesizer_did_cancel_speech_utterance(
this: &Object,
_: Sel,
_synth: *const Object,
utterance: id,
) {
trace!("speech_synthesizer_did_cancel_speech_utterance");
unsafe {
let backend_id: u64 = *this.get_ivar("backend_id");
let backend_id = BackendId::AvFoundation(backend_id);
trace!("Locking callbacks");
let mut callbacks = CALLBACKS.lock().unwrap();
trace!("Locked");
let callbacks = callbacks.get_mut(&backend_id).unwrap();
if let Some(callback) = callbacks.utterance_stop.as_mut() {
trace!("Calling utterance_stop");
let utterance_id = UtteranceId::AvFoundation(utterance);
callback(utterance_id);
trace!("Called");
}
}
trace!("Done speech_synthesizer_did_cancel_speech_utterance");
}
unsafe {
@ -82,22 +119,31 @@ impl AvFoundation {
speech_synthesizer_did_finish_speech_utterance
as extern "C" fn(&Object, Sel, *const Object, id) -> (),
);
decl.add_method(
sel!(speechSynthesizer:didCancelSpeechUtterance:),
speech_synthesizer_did_cancel_speech_utterance
as extern "C" fn(&Object, Sel, *const Object, id) -> (),
);
}
let delegate_class = decl.register();
let delegate_obj: *mut Object = unsafe { msg_send![delegate_class, new] };
let mut backend_id = NEXT_BACKEND_ID.lock().unwrap();
let rv = unsafe {
trace!("Creating synth");
let synth: *mut Object = msg_send![class!(AVSpeechSynthesizer), new];
trace!("Allocated {:?}", synth);
delegate_obj
.as_mut()
.unwrap()
.set_ivar("backend_id", *backend_id);
trace!("Set backend ID in delegate");
let _: () = msg_send![synth, setDelegate: delegate_obj];
trace!("Assigned delegate: {:?}", delegate_obj);
AvFoundation {
id: BackendId::AvFoundation(*backend_id),
delegate: delegate_obj,
synth: synth,
synth,
rate: 0.5,
volume: 1.,
pitch: 1.,
@ -128,19 +174,28 @@ impl Backend for AvFoundation {
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error> {
trace!("speak({}, {})", text, interrupt);
if interrupt {
if interrupt && self.is_speaking()? {
self.stop()?;
}
let utterance: id;
let mut utterance: id;
unsafe {
let str = NSString::alloc(nil).init_str(text);
trace!("Allocating utterance string");
let mut str = NSString::alloc(nil);
str = str.init_str(text);
trace!("Allocating utterance");
utterance = msg_send![class!(AVSpeechUtterance), alloc];
let _: () = msg_send![utterance, initWithString: str];
trace!("Initializing utterance");
utterance = msg_send![utterance, initWithString: str];
trace!("Setting rate to {}", self.rate);
let _: () = msg_send![utterance, setRate: self.rate];
trace!("Setting volume to {}", self.volume);
let _: () = msg_send![utterance, setVolume: self.volume];
trace!("Setting pitch to {}", self.pitch);
let _: () = msg_send![utterance, setPitchMultiplier: self.pitch];
let _: () = msg_send![utterance, setVoice: self.voice];
trace!("Enqueuing");
let _: () = msg_send![self.synth, speakUtterance: utterance];
trace!("Done queuing");
}
Ok(Some(UtteranceId::AvFoundation(utterance)))
}
@ -192,6 +247,7 @@ impl Backend for AvFoundation {
}
fn set_pitch(&mut self, pitch: f32) -> Result<(), Error> {
trace!("set_pitch({})", pitch);
self.pitch = pitch;
Ok(())
}
@ -213,13 +269,15 @@ impl Backend for AvFoundation {
}
fn set_volume(&mut self, volume: f32) -> Result<(), Error> {
trace!("set_volume({})", volume);
self.volume = volume;
Ok(())
}
fn is_speaking(&self) -> Result<bool, Error> {
trace!("is_speaking()");
let is_speaking: i8 = unsafe { msg_send![self.synth, isSpeaking] };
Ok(is_speaking == 1)
Ok(is_speaking != NO as i8)
}
fn voice(&self) -> Result<String,Error> {

View File

@ -10,7 +10,7 @@ use crate::backends::AvFoundation;
use crate::voices;
use crate::voices::Gender;
#[derive(Copy,Clone)]
#[derive(Copy,Clone, Debug)]
pub(crate) struct AVSpeechSynthesisVoice(*const Object);
impl AVSpeechSynthesisVoice {

View File

@ -1,11 +1,11 @@
#[cfg(target_os = "linux")]
mod speech_dispatcher;
#[cfg(windows)]
#[cfg(all(windows, feature = "tolk"))]
mod tolk;
#[cfg(windows)]
pub(crate) mod winrt;
mod winrt;
#[cfg(target_arch = "wasm32")]
mod web;
@ -16,17 +16,26 @@ mod appkit;
#[cfg(any(target_os = "macos", target_os = "ios"))]
mod av_foundation;
#[cfg(target_os = "android")]
mod android;
#[cfg(target_os = "linux")]
pub(crate) use self::speech_dispatcher::*;
#[cfg(windows)]
#[cfg(all(windows, feature = "tolk"))]
pub(crate) use self::tolk::*;
#[cfg(windows)]
pub(crate) use self::winrt::*;
#[cfg(target_arch = "wasm32")]
pub use self::web::*;
pub(crate) use self::web::*;
#[cfg(target_os = "macos")]
pub(crate) use self::appkit::*;
#[cfg(any(target_os = "macos", target_os = "ios"))]
pub(crate) use self::av_foundation::*;
#[cfg(target_os = "android")]
pub(crate) use self::android::*;

View File

@ -1,7 +1,5 @@
#[cfg(target_os = "linux")]
use std::collections::HashMap;
use std::convert::TryInto;
use std::sync::Mutex;
use std::{collections::HashMap, sync::Mutex};
use lazy_static::*;
use log::{info, trace};
@ -9,6 +7,7 @@ use speech_dispatcher::*;
use crate::{Backend, BackendId, Error, Features, UtteranceId, CALLBACKS};
#[derive(Clone, Debug)]
pub(crate) struct SpeechDispatcher(Connection);
lazy_static! {
@ -19,9 +18,9 @@ lazy_static! {
}
impl SpeechDispatcher {
pub(crate) fn new() -> Self {
pub(crate) fn new() -> std::result::Result<Self, Error> {
info!("Initializing SpeechDispatcher backend");
let connection = speech_dispatcher::Connection::open("tts", "tts", "tts", Mode::Threaded);
let connection = speech_dispatcher::Connection::open("tts", "tts", "tts", Mode::Threaded)?;
let sd = SpeechDispatcher(connection);
let mut speaking = SPEAKING.lock().unwrap();
speaking.insert(sd.0.client_id(), false);
@ -47,9 +46,16 @@ impl SpeechDispatcher {
f(utterance_id);
}
})));
sd.0.on_cancel(Some(Box::new(|_msg_id, client_id| {
sd.0.on_cancel(Some(Box::new(|msg_id, client_id| {
let mut speaking = SPEAKING.lock().unwrap();
speaking.insert(client_id, false);
let mut callbacks = CALLBACKS.lock().unwrap();
let backend_id = BackendId::SpeechDispatcher(client_id);
let cb = callbacks.get_mut(&backend_id).unwrap();
let utterance_id = UtteranceId::SpeechDispatcher(msg_id);
if let Some(f) = cb.utterance_stop.as_mut() {
f(utterance_id);
}
})));
sd.0.on_pause(Some(Box::new(|_msg_id, client_id| {
let mut speaking = SPEAKING.lock().unwrap();
@ -59,7 +65,7 @@ impl SpeechDispatcher {
let mut speaking = SPEAKING.lock().unwrap();
speaking.insert(client_id, true);
})));
sd
Ok(sd)
}
}
@ -87,14 +93,14 @@ impl Backend for SpeechDispatcher {
}
let single_char = text.to_string().capacity() == 1;
if single_char {
self.0.set_punctuation(Punctuation::All);
self.0.set_punctuation(Punctuation::All)?;
}
let id = self.0.say(Priority::Important, text);
if single_char {
self.0.set_punctuation(Punctuation::None);
self.0.set_punctuation(Punctuation::None)?;
}
if let Some(id) = id {
Ok(Some(UtteranceId::SpeechDispatcher(id.try_into().unwrap())))
Ok(Some(UtteranceId::SpeechDispatcher(id)))
} else {
Err(Error::NoneError)
}
@ -102,7 +108,7 @@ impl Backend for SpeechDispatcher {
fn stop(&mut self) -> Result<(), Error> {
trace!("stop()");
self.0.cancel();
self.0.cancel()?;
Ok(())
}
@ -123,7 +129,7 @@ impl Backend for SpeechDispatcher {
}
fn set_rate(&mut self, rate: f32) -> Result<(), Error> {
self.0.set_voice_rate(rate as i32);
self.0.set_voice_rate(rate as i32)?;
Ok(())
}
@ -144,7 +150,7 @@ impl Backend for SpeechDispatcher {
}
fn set_pitch(&mut self, pitch: f32) -> Result<(), Error> {
self.0.set_voice_pitch(pitch as i32);
self.0.set_voice_pitch(pitch as i32)?;
Ok(())
}
@ -165,7 +171,7 @@ impl Backend for SpeechDispatcher {
}
fn set_volume(&mut self, volume: f32) -> Result<(), Error> {
self.0.set_volume(volume as i32);
self.0.set_volume(volume as i32)?;
Ok(())
}

View File

@ -1,10 +1,13 @@
#[cfg(windows)]
#[cfg(all(windows, feature = "tolk"))]
use std::sync::Arc;
use log::{info, trace};
use tolk::Tolk as TolkPtr;
use crate::{Backend, BackendId, Error, Features, UtteranceId};
pub(crate) struct Tolk(TolkPtr);
#[derive(Clone, Debug)]
pub(crate) struct Tolk(Arc<TolkPtr>);
impl Tolk {
pub(crate) fn new() -> Option<Self> {

View File

@ -5,10 +5,14 @@ use lazy_static::lazy_static;
use log::{info, trace};
use wasm_bindgen::prelude::*;
use wasm_bindgen::JsCast;
use web_sys::{SpeechSynthesisEvent, SpeechSynthesisUtterance};
use web_sys::{
SpeechSynthesisErrorCode, SpeechSynthesisErrorEvent, SpeechSynthesisEvent,
SpeechSynthesisUtterance,
};
use crate::{Backend, BackendId, Error, Features, UtteranceId, CALLBACKS};
#[derive(Clone, Debug)]
pub struct Web {
id: BackendId,
rate: f32,
@ -18,6 +22,8 @@ pub struct Web {
lazy_static! {
static ref NEXT_BACKEND_ID: Mutex<u64> = Mutex::new(0);
static ref UTTERANCE_MAPPINGS: Mutex<Vec<(BackendId, UtteranceId)>> = Mutex::new(Vec::new());
static ref NEXT_UTTERANCE_ID: Mutex<u64> = Mutex::new(0);
}
impl Web {
@ -59,25 +65,43 @@ impl Backend for Web {
utterance.set_pitch(self.pitch);
utterance.set_volume(self.volume);
let id = self.id().unwrap();
let utterance_id = UtteranceId::Web(utterance.clone());
let callback = Closure::wrap(Box::new(move |evt: SpeechSynthesisEvent| {
let mut uid = NEXT_UTTERANCE_ID.lock().unwrap();
let utterance_id = UtteranceId::Web(*uid);
*uid += 1;
drop(uid);
let mut mappings = UTTERANCE_MAPPINGS.lock().unwrap();
mappings.push((self.id, utterance_id));
drop(mappings);
let callback = Closure::wrap(Box::new(move |_evt: SpeechSynthesisEvent| {
let mut callbacks = CALLBACKS.lock().unwrap();
let callback = callbacks.get_mut(&id).unwrap();
if let Some(f) = callback.utterance_begin.as_mut() {
let utterance_id = UtteranceId::Web(evt.utterance());
f(utterance_id);
}
}) as Box<dyn Fn(_)>);
utterance.set_onstart(Some(callback.as_ref().unchecked_ref()));
let callback = Closure::wrap(Box::new(move |evt: SpeechSynthesisEvent| {
let callback = Closure::wrap(Box::new(move |_evt: SpeechSynthesisEvent| {
let mut callbacks = CALLBACKS.lock().unwrap();
let callback = callbacks.get_mut(&id).unwrap();
if let Some(f) = callback.utterance_end.as_mut() {
let utterance_id = UtteranceId::Web(evt.utterance());
f(utterance_id);
}
let mut mappings = UTTERANCE_MAPPINGS.lock().unwrap();
mappings.retain(|v| v.1 != utterance_id);
}) as Box<dyn Fn(_)>);
utterance.set_onend(Some(callback.as_ref().unchecked_ref()));
let callback = Closure::wrap(Box::new(move |evt: SpeechSynthesisErrorEvent| {
if evt.error() == SpeechSynthesisErrorCode::Canceled {
let mut callbacks = CALLBACKS.lock().unwrap();
let callback = callbacks.get_mut(&id).unwrap();
if let Some(f) = callback.utterance_stop.as_mut() {
f(utterance_id);
}
}
let mut mappings = UTTERANCE_MAPPINGS.lock().unwrap();
mappings.retain(|v| v.1 != utterance_id);
}) as Box<dyn Fn(_)>);
utterance.set_onerror(Some(callback.as_ref().unchecked_ref()));
if interrupt {
self.stop()?;
}
@ -186,3 +210,10 @@ impl Backend for Web {
unimplemented!()
}
}
impl Drop for Web {
fn drop(&mut self) {
let mut mappings = UTTERANCE_MAPPINGS.lock().unwrap();
mappings.retain(|v| v.0 != self.id);
}
}

View File

@ -1,134 +1,139 @@
#[cfg(windows)]
use std::collections::HashMap;
use std::collections::{HashMap, VecDeque};
use std::sync::Mutex;
use lazy_static::lazy_static;
use log::{info, trace};
use winrt::ComInterface;
use tts_winrt_bindings::windows::media::playback::{
CurrentMediaPlaybackItemChangedEventArgs, MediaPlaybackItem, MediaPlaybackList,
MediaPlaybackState, MediaPlayer,
use windows::{
Foundation::TypedEventHandler,
Media::{
Core::MediaSource,
Playback::{MediaPlayer, MediaPlayerAudioCategory},
SpeechSynthesis::SpeechSynthesizer,
},
};
use tts_winrt_bindings::windows::media::speech_synthesis::SpeechSynthesizer;
use tts_winrt_bindings::windows::{foundation::TypedEventHandler, media::core::MediaSource};
use crate::{Backend, BackendId, Error, Features, UtteranceId, CALLBACKS};
impl From<winrt::Error> for Error {
fn from(e: winrt::Error) -> Self {
Error::WinRT(e)
impl From<windows::core::Error> for Error {
fn from(e: windows::core::Error) -> Self {
Error::WinRt(e)
}
}
pub struct WinRT {
#[derive(Clone)]
pub struct WinRt {
id: BackendId,
synth: SpeechSynthesizer,
player: MediaPlayer,
playback_list: MediaPlaybackList,
rate: f32,
pitch: f32,
volume: f32,
}
struct Utterance {
id: UtteranceId,
text: String,
rate: f32,
pitch: f32,
volume: f32,
}
lazy_static! {
static ref NEXT_BACKEND_ID: Mutex<u64> = Mutex::new(0);
static ref NEXT_UTTERANCE_ID: Mutex<u64> = Mutex::new(0);
static ref BACKEND_TO_SPEECH_SYNTHESIZER: Mutex<HashMap<BackendId, SpeechSynthesizer>> = {
let v: HashMap<BackendId, SpeechSynthesizer> = HashMap::new();
Mutex::new(v)
};
static ref BACKEND_TO_MEDIA_PLAYER: Mutex<HashMap<BackendId, MediaPlayer>> = {
let v: HashMap<BackendId, MediaPlayer> = HashMap::new();
Mutex::new(v)
};
static ref BACKEND_TO_PLAYBACK_LIST: Mutex<HashMap<BackendId, MediaPlaybackList>> = {
let v: HashMap<BackendId, MediaPlaybackList> = HashMap::new();
Mutex::new(v)
};
static ref LAST_SPOKEN_UTTERANCE: Mutex<HashMap<BackendId, UtteranceId>> = {
let v: HashMap<BackendId, UtteranceId> = HashMap::new();
Mutex::new(v)
static ref UTTERANCES: Mutex<HashMap<BackendId, VecDeque<Utterance>>> = {
let utterances: HashMap<BackendId, VecDeque<Utterance>> = HashMap::new();
Mutex::new(utterances)
};
}
impl WinRT {
impl WinRt {
pub fn new() -> std::result::Result<Self, Error> {
info!("Initializing WinRT backend");
let playback_list = MediaPlaybackList::new()?;
let synth = SpeechSynthesizer::new()?;
let player = MediaPlayer::new()?;
player.set_auto_play(true)?;
player.set_source(&playback_list)?;
player.SetRealTimePlayback(true)?;
player.SetAudioCategory(MediaPlayerAudioCategory::Speech)?;
let mut backend_id = NEXT_BACKEND_ID.lock().unwrap();
let bid = BackendId::WinRT(*backend_id);
let mut rv = Self {
id: bid,
synth: SpeechSynthesizer::new()?,
player: player,
playback_list: playback_list,
};
let bid = BackendId::WinRt(*backend_id);
*backend_id += 1;
Self::init_callbacks(&mut rv)?;
Ok(rv)
}
fn reinit_player(&mut self) -> std::result::Result<(), Error> {
self.playback_list = MediaPlaybackList::new()?;
self.player = MediaPlayer::new()?;
self.player.set_auto_play(true)?;
self.player.set_source(&self.playback_list)?;
self.init_callbacks()?;
Ok(())
}
fn init_callbacks(&mut self) -> Result<(), winrt::Error> {
let id = self.id().unwrap();
drop(backend_id);
{
let mut utterances = UTTERANCES.lock().unwrap();
utterances.insert(bid, VecDeque::new());
}
let mut backend_to_media_player = BACKEND_TO_MEDIA_PLAYER.lock().unwrap();
backend_to_media_player.insert(id, self.player.clone());
self.player
.media_ended(TypedEventHandler::new(|sender, _args| {
let backend_to_media_player = BACKEND_TO_MEDIA_PLAYER.lock().unwrap();
let id = backend_to_media_player.iter().find(|v| v.1 == sender);
if let Some(id) = id {
let id = id.0;
let mut callbacks = CALLBACKS.lock().unwrap();
let callbacks = callbacks.get_mut(&id).unwrap();
if let Some(callback) = callbacks.utterance_end.as_mut() {
let last_spoken_utterance = LAST_SPOKEN_UTTERANCE.lock().unwrap();
if let Some(utterance_id) = last_spoken_utterance.get(&id) {
callback(utterance_id.clone());
backend_to_media_player.insert(bid, player.clone());
drop(backend_to_media_player);
let mut backend_to_speech_synthesizer = BACKEND_TO_SPEECH_SYNTHESIZER.lock().unwrap();
backend_to_speech_synthesizer.insert(bid, synth.clone());
drop(backend_to_speech_synthesizer);
let bid_clone = bid;
player.MediaEnded(TypedEventHandler::new(
move |sender: &Option<MediaPlayer>, _args| {
if let Some(sender) = sender {
let backend_to_media_player = BACKEND_TO_MEDIA_PLAYER.lock().unwrap();
let id = backend_to_media_player.iter().find(|v| v.1 == sender);
if let Some((id, _)) = id {
let mut utterances = UTTERANCES.lock().unwrap();
if let Some(utterances) = utterances.get_mut(id) {
if let Some(utterance) = utterances.pop_front() {
let mut callbacks = CALLBACKS.lock().unwrap();
let callbacks = callbacks.get_mut(id).unwrap();
if let Some(callback) = callbacks.utterance_end.as_mut() {
callback(utterance.id);
}
if let Some(utterance) = utterances.front() {
let backend_to_speech_synthesizer =
BACKEND_TO_SPEECH_SYNTHESIZER.lock().unwrap();
let id = backend_to_speech_synthesizer
.iter()
.find(|v| *v.0 == bid_clone);
if let Some((_, tts)) = id {
tts.Options()?.SetSpeakingRate(utterance.rate.into())?;
tts.Options()?.SetAudioPitch(utterance.pitch.into())?;
tts.Options()?.SetAudioVolume(utterance.volume.into())?;
let stream = tts
.SynthesizeTextToStreamAsync(utterance.text.as_str())?
.get()?;
let content_type = stream.ContentType()?;
let source =
MediaSource::CreateFromStream(stream, content_type)?;
sender.SetSource(source)?;
sender.Play()?;
if let Some(callback) = callbacks.utterance_begin.as_mut() {
callback(utterance.id);
}
}
}
}
}
}
}
Ok(())
}))?;
let mut backend_to_playback_list = BACKEND_TO_PLAYBACK_LIST.lock().unwrap();
backend_to_playback_list.insert(id, self.playback_list.clone());
self.playback_list
.current_item_changed(TypedEventHandler::new(
|sender: &MediaPlaybackList, args: &CurrentMediaPlaybackItemChangedEventArgs| {
let backend_to_playback_list = BACKEND_TO_PLAYBACK_LIST.lock().unwrap();
let id = backend_to_playback_list.iter().find(|v| v.1 == sender);
if let Some(id) = id {
let id = id.0;
let mut callbacks = CALLBACKS.lock().unwrap();
let callbacks = callbacks.get_mut(&id).unwrap();
let old_item = args.old_item()?;
if !old_item.is_null() {
if let Some(callback) = callbacks.utterance_end.as_mut() {
callback(UtteranceId::WinRT(old_item));
}
}
let new_item = args.new_item()?;
if !new_item.is_null() {
let mut last_spoken_utterance = LAST_SPOKEN_UTTERANCE.lock().unwrap();
let utterance_id = UtteranceId::WinRT(new_item);
last_spoken_utterance.insert(*id, utterance_id.clone());
if let Some(callback) = callbacks.utterance_begin.as_mut() {
callback(utterance_id);
}
}
}
Ok(())
},
))?;
Ok(())
},
))?;
Ok(Self {
id: bid,
synth,
player,
rate: 1.,
pitch: 1.,
volume: 1.,
})
}
}
impl Backend for WinRT {
impl Backend for WinRt {
fn id(&self) -> Option<BackendId> {
Some(self.id)
}
@ -150,33 +155,67 @@ impl Backend for WinRT {
text: &str,
interrupt: bool,
) -> std::result::Result<Option<UtteranceId>, Error> {
trace!("speak({}, {})", text, interrupt);
if interrupt {
if interrupt && self.is_speaking()? {
self.stop()?;
}
let stream = self.synth.synthesize_text_to_stream_async(text)?.get()?;
let content_type = stream.content_type()?;
let source = MediaSource::create_from_stream(stream, content_type)?;
let item = MediaPlaybackItem::create(source)?;
let state = self.player.playback_session()?.playback_state()?;
if state == MediaPlaybackState::Paused {
let index = self.playback_list.current_item_index()?;
let total = self.playback_list.items()?.size()?;
if total != 0 && index == total - 1 {
self.reinit_player()?;
let utterance_id = {
let mut uid = NEXT_UTTERANCE_ID.lock().unwrap();
let utterance_id = UtteranceId::WinRt(*uid);
*uid += 1;
utterance_id
};
let mut no_utterances = false;
{
let mut utterances = UTTERANCES.lock().unwrap();
if let Some(utterances) = utterances.get_mut(&self.id) {
no_utterances = utterances.is_empty();
let utterance = Utterance {
id: utterance_id,
text: text.into(),
rate: self.rate,
pitch: self.pitch,
volume: self.volume,
};
utterances.push_back(utterance);
}
}
self.playback_list.items()?.append(&item)?;
if !self.is_speaking()? {
self.player.play()?;
if no_utterances {
self.synth.Options()?.SetSpeakingRate(self.rate.into())?;
self.synth.Options()?.SetAudioPitch(self.pitch.into())?;
self.synth.Options()?.SetAudioVolume(self.volume.into())?;
let stream = self.synth.SynthesizeTextToStreamAsync(text)?.get()?;
let content_type = stream.ContentType()?;
let source = MediaSource::CreateFromStream(stream, content_type)?;
self.player.SetSource(source)?;
self.player.Play()?;
let mut callbacks = CALLBACKS.lock().unwrap();
let callbacks = callbacks.get_mut(&self.id).unwrap();
if let Some(callback) = callbacks.utterance_begin.as_mut() {
callback(utterance_id);
}
}
let utterance_id = UtteranceId::WinRT(item);
Ok(Some(utterance_id))
}
fn stop(&mut self) -> std::result::Result<(), Error> {
trace!("stop()");
self.reinit_player()?;
if !self.is_speaking()? {
return Ok(());
}
let mut utterances = UTTERANCES.lock().unwrap();
if let Some(utterances) = utterances.get(&self.id) {
let mut callbacks = CALLBACKS.lock().unwrap();
let callbacks = callbacks.get_mut(&self.id).unwrap();
if let Some(callback) = callbacks.utterance_stop.as_mut() {
for utterance in utterances {
callback(utterance.id);
}
}
}
if let Some(utterances) = utterances.get_mut(&self.id) {
utterances.clear();
}
self.player.Pause()?;
Ok(())
}
@ -193,12 +232,12 @@ impl Backend for WinRT {
}
fn get_rate(&self) -> std::result::Result<f32, Error> {
let rate = self.synth.options()?.speaking_rate()?;
let rate = self.synth.Options()?.SpeakingRate()?;
Ok(rate as f32)
}
fn set_rate(&mut self, rate: f32) -> std::result::Result<(), Error> {
self.synth.options()?.set_speaking_rate(rate.into())?;
self.rate = rate;
Ok(())
}
@ -215,12 +254,12 @@ impl Backend for WinRT {
}
fn get_pitch(&self) -> std::result::Result<f32, Error> {
let pitch = self.synth.options()?.audio_pitch()?;
let pitch = self.synth.Options()?.AudioPitch()?;
Ok(pitch as f32)
}
fn set_pitch(&mut self, pitch: f32) -> std::result::Result<(), Error> {
self.synth.options()?.set_audio_pitch(pitch.into())?;
self.pitch = pitch;
Ok(())
}
@ -237,19 +276,19 @@ impl Backend for WinRT {
}
fn get_volume(&self) -> std::result::Result<f32, Error> {
let volume = self.synth.options()?.audio_volume()?;
let volume = self.synth.Options()?.AudioVolume()?;
Ok(volume as f32)
}
fn set_volume(&mut self, volume: f32) -> std::result::Result<(), Error> {
self.synth.options()?.set_audio_volume(volume.into())?;
self.volume = volume;
Ok(())
}
fn is_speaking(&self) -> std::result::Result<bool, Error> {
let state = self.player.playback_session()?.playback_state()?;
let playing = state == MediaPlaybackState::Opening || state == MediaPlaybackState::Playing;
Ok(playing)
let utterances = UTTERANCES.lock().unwrap();
let utterances = utterances.get(&self.id).unwrap();
Ok(!utterances.is_empty())
}
fn voice(&self) -> Result<String,Error> {
@ -265,14 +304,14 @@ impl Backend for WinRT {
}
}
impl Drop for WinRT {
impl Drop for WinRt {
fn drop(&mut self) {
let id = self.id().unwrap();
let mut backend_to_playback_list = BACKEND_TO_PLAYBACK_LIST.lock().unwrap();
backend_to_playback_list.remove(&id);
let id = self.id;
let mut backend_to_media_player = BACKEND_TO_MEDIA_PLAYER.lock().unwrap();
backend_to_media_player.remove(&id);
let mut last_spoken_utterance = LAST_SPOKEN_UTTERANCE.lock().unwrap();
last_spoken_utterance.remove(&id);
let mut backend_to_speech_synthesizer = BACKEND_TO_SPEECH_SYNTHESIZER.lock().unwrap();
backend_to_speech_synthesizer.remove(&id);
let mut utterances = UTTERANCES.lock().unwrap();
utterances.remove(&id);
}
}

View File

@ -2,120 +2,214 @@
* a Text-To-Speech (TTS) library providing high-level interfaces to a variety of backends.
* Currently supported backends are:
* * Windows
* * Screen readers/SAPI via Tolk
* * Screen readers/SAPI via Tolk (requires `tolk` Cargo feature)
* * WinRT
* * Linux via [Speech Dispatcher](https://freebsoft.org/speechd)
* * MacOS
* * MacOS/iOS
* * AppKit on MacOS 10.13 and below
* * AVFoundation on MacOS 10.14 and above, and iOS
* * Android
* * WebAssembly
*/
use std::boxed::Box;
use std::collections::HashMap;
#[cfg(target_os = "macos")]
use std::ffi::CStr;
use std::sync::Mutex;
use std::fmt;
use std::sync::{Arc, Mutex};
use std::{boxed::Box, sync::RwLock};
#[cfg(any(target_os = "macos", target_os = "ios"))]
use cocoa_foundation::base::id;
use dyn_clonable::*;
use lazy_static::lazy_static;
#[cfg(target_os = "macos")]
use libc::c_char;
#[cfg(target_os = "macos")]
use objc::{class, msg_send, sel, sel_impl};
#[cfg(target_os = "linux")]
use speech_dispatcher::Error as SpeechDispatcherError;
use thiserror::Error;
#[cfg(target_arch = "wasm32")]
use web_sys::SpeechSynthesisUtterance;
#[cfg(windows)]
use tts_winrt_bindings::windows::media::playback::MediaPlaybackItem;
#[cfg(all(windows, feature = "tolk"))]
use tolk::Tolk;
mod backends;
mod voices;
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum Backends {
#[cfg(target_os = "linux")]
SpeechDispatcher,
#[cfg(target_arch = "wasm32")]
Web,
#[cfg(windows)]
Tolk,
#[cfg(windows)]
WinRT,
#[cfg(target_os = "android")]
Android,
#[cfg(target_os = "macos")]
AppKit,
#[cfg(any(target_os = "macos", target_os = "ios"))]
AvFoundation,
#[cfg(target_os = "linux")]
SpeechDispatcher,
#[cfg(all(windows, feature = "tolk"))]
Tolk,
#[cfg(target_arch = "wasm32")]
Web,
#[cfg(windows)]
WinRt,
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
impl fmt::Display for Backends {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
#[cfg(target_os = "android")]
Backends::Android => writeln!(f, "Android"),
#[cfg(target_os = "macos")]
Backends::AppKit => writeln!(f, "AppKit"),
#[cfg(any(target_os = "macos", target_os = "ios"))]
Backends::AvFoundation => writeln!(f, "AVFoundation"),
#[cfg(target_os = "linux")]
Backends::SpeechDispatcher => writeln!(f, "Speech Dispatcher"),
#[cfg(all(windows, feature = "tolk"))]
Backends::Tolk => writeln!(f, "Tolk"),
#[cfg(target_arch = "wasm32")]
Backends::Web => writeln!(f, "Web"),
#[cfg(windows)]
Backends::WinRt => writeln!(f, "Windows Runtime"),
}
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum BackendId {
#[cfg(target_os = "android")]
Android(u64),
#[cfg(any(target_os = "macos", target_os = "ios"))]
AvFoundation(u64),
#[cfg(target_os = "linux")]
SpeechDispatcher(u64),
#[cfg(target_arch = "wasm32")]
Web(u64),
#[cfg(windows)]
WinRT(u64),
#[cfg(any(target_os = "macos", target_os = "ios"))]
AvFoundation(u64),
WinRt(u64),
}
#[derive(Clone, Debug, PartialEq)]
impl fmt::Display for BackendId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
#[cfg(target_os = "android")]
BackendId::Android(id) => writeln!(f, "{}", id),
#[cfg(any(target_os = "macos", target_os = "ios"))]
BackendId::AvFoundation(id) => writeln!(f, "{}", id),
#[cfg(target_os = "linux")]
BackendId::SpeechDispatcher(id) => writeln!(f, "{}", id),
#[cfg(target_arch = "wasm32")]
BackendId::Web(id) => writeln!(f, "Web({})", id),
#[cfg(windows)]
BackendId::WinRt(id) => writeln!(f, "{}", id),
}
}
}
// # Note
//
// Most trait implementations are blocked by cocoa_foundation::base::id;
// which is a type alias for objc::runtime::Object, which only implements Debug.
#[derive(Debug)]
#[cfg_attr(
not(any(target_os = "macos", target_os = "ios")),
derive(Clone, Copy, Eq, Hash, PartialEq, PartialOrd, Ord)
)]
#[cfg_attr(
all(feature = "serde", not(any(target_os = "macos", target_os = "ios"))),
derive(serde::Serialize, serde::Deserialize)
)]
pub enum UtteranceId {
#[cfg(target_os = "android")]
Android(u64),
#[cfg(any(target_os = "macos", target_os = "ios"))]
AvFoundation(id),
#[cfg(target_os = "linux")]
SpeechDispatcher(u64),
#[cfg(target_arch = "wasm32")]
Web(SpeechSynthesisUtterance),
Web(u64),
#[cfg(windows)]
WinRT(MediaPlaybackItem),
#[cfg(any(target_os = "macos", target_os = "ios"))]
AvFoundation(id),
WinRt(u64),
}
pub struct Features {
pub stop: bool,
pub rate: bool,
pub pitch: bool,
pub volume: bool,
pub is_speaking: bool,
pub voices: bool,
pub utterance_callbacks: bool,
}
impl Default for Features {
fn default() -> Self {
Self {
stop: false,
rate: false,
pitch: false,
volume: false,
is_speaking: false,
voices: false,
utterance_callbacks: false,
// # Note
//
// Display is not implemented by cocoa_foundation::base::id;
// which is a type alias for objc::runtime::Object, which only implements Debug.
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
impl fmt::Display for UtteranceId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
#[cfg(target_os = "android")]
UtteranceId::Android(id) => writeln!(f, "{}", id),
#[cfg(target_os = "linux")]
UtteranceId::SpeechDispatcher(id) => writeln!(f, "{}", id),
#[cfg(target_arch = "wasm32")]
UtteranceId::Web(id) => writeln!(f, "Web({})", id),
#[cfg(windows)]
UtteranceId::WinRt(id) => writeln!(f, "{}", id),
}
}
}
unsafe impl Send for UtteranceId {}
unsafe impl Sync for UtteranceId {}
#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Features {
pub is_speaking: bool,
pub pitch: bool,
pub rate: bool,
pub stop: bool,
pub utterance_callbacks: bool,
pub voices: bool,
pub volume: bool,
}
impl fmt::Display for Features {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
writeln!(f, "{:#?}", self)
}
}
impl Features {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Debug, Error)]
pub enum Error {
#[error("IO error: {0}")]
IO(#[from] std::io::Error),
Io(#[from] std::io::Error),
#[error("Value not received")]
NoneError,
#[error("Operation failed")]
OperationFailed,
#[cfg(target_arch = "wasm32")]
#[error("JavaScript error: [0])]")]
#[error("JavaScript error: [0]")]
JavaScriptError(wasm_bindgen::JsValue),
#[cfg(target_os = "linux")]
#[error("Speech Dispatcher error: {0}")]
SpeechDispatcher(#[from] SpeechDispatcherError),
#[cfg(windows)]
#[error("WinRT error")]
WinRT(winrt::Error),
WinRt(windows::core::Error),
#[error("Unsupported feature")]
UnsupportedFeature,
#[error("Out of range")]
OutOfRange,
#[cfg(target_os = "android")]
#[error("JNI error: [0])]")]
JNI(#[from] jni::errors::Error),
}
pub trait Backend {
#[clonable]
pub trait Backend: Clone {
fn id(&self) -> Option<BackendId>;
fn supported_features(&self) -> Features;
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error>;
@ -145,6 +239,7 @@ pub trait Backend {
struct Callbacks {
utterance_begin: Option<Box<dyn FnMut(UtteranceId)>>,
utterance_end: Option<Box<dyn FnMut(UtteranceId)>>,
utterance_stop: Option<Box<dyn FnMut(UtteranceId)>>,
}
unsafe impl Send for Callbacks {}
@ -158,47 +253,59 @@ lazy_static! {
};
}
pub struct TTS(Box<dyn Backend>);
#[derive(Clone)]
pub struct Tts(Arc<RwLock<Box<dyn Backend>>>);
unsafe impl std::marker::Send for TTS {}
unsafe impl Send for Tts {}
unsafe impl std::marker::Sync for TTS {}
unsafe impl Sync for Tts {}
impl TTS {
impl Tts {
/**
* Create a new `TTS` instance with the specified backend.
*/
pub fn new(backend: Backends) -> Result<TTS, Error> {
pub fn new(backend: Backends) -> Result<Tts, Error> {
let backend = match backend {
#[cfg(target_os = "linux")]
Backends::SpeechDispatcher => Ok(TTS(Box::new(backends::SpeechDispatcher::new()))),
Backends::SpeechDispatcher => {
let tts = backends::SpeechDispatcher::new()?;
Ok(Tts(Arc::new(RwLock::new(Box::new(tts)))))
}
#[cfg(target_arch = "wasm32")]
Backends::Web => {
let tts = backends::Web::new()?;
Ok(TTS(Box::new(tts)))
Ok(Tts(Arc::new(RwLock::new(Box::new(tts)))))
}
#[cfg(windows)]
#[cfg(all(windows, feature = "tolk"))]
Backends::Tolk => {
let tts = backends::Tolk::new();
if let Some(tts) = tts {
Ok(TTS(Box::new(tts)))
Ok(Tts(Arc::new(RwLock::new(Box::new(tts)))))
} else {
Err(Error::NoneError)
}
}
#[cfg(windows)]
Backends::WinRT => {
let tts = backends::winrt::WinRT::new()?;
Ok(TTS(Box::new(tts)))
Backends::WinRt => {
let tts = backends::WinRt::new()?;
Ok(Tts(Arc::new(RwLock::new(Box::new(tts)))))
}
#[cfg(target_os = "macos")]
Backends::AppKit => Ok(TTS(Box::new(backends::AppKit::new()))),
Backends::AppKit => Ok(Tts(Arc::new(RwLock::new(
Box::new(backends::AppKit::new()),
)))),
#[cfg(any(target_os = "macos", target_os = "ios"))]
Backends::AvFoundation => Ok(TTS(Box::new(backends::AvFoundation::new()))),
Backends::AvFoundation => Ok(Tts(Arc::new(RwLock::new(Box::new(
backends::AvFoundation::new(),
))))),
#[cfg(target_os = "android")]
Backends::Android => {
let tts = backends::Android::new()?;
Ok(Tts(Arc::new(RwLock::new(Box::new(tts)))))
}
};
if backend.is_ok() {
let backend = backend.unwrap();
if let Some(id) = backend.0.id() {
if let Ok(backend) = backend {
if let Some(id) = backend.0.read().unwrap().id() {
let mut callbacks = CALLBACKS.lock().unwrap();
callbacks.insert(id, Callbacks::default());
}
@ -208,17 +315,19 @@ impl TTS {
}
}
pub fn default() -> Result<TTS, Error> {
pub fn default() -> Result<Tts, Error> {
#[cfg(target_os = "linux")]
let tts = TTS::new(Backends::SpeechDispatcher);
#[cfg(windows)]
let tts = if let Some(tts) = TTS::new(Backends::Tolk).ok() {
let tts = Tts::new(Backends::SpeechDispatcher);
#[cfg(all(windows, feature = "tolk"))]
let tts = if let Ok(tts) = Tts::new(Backends::Tolk) {
Ok(tts)
} else {
TTS::new(Backends::WinRT)
Tts::new(Backends::WinRt)
};
#[cfg(all(windows, not(feature = "tolk")))]
let tts = Tts::new(Backends::WinRt);
#[cfg(target_arch = "wasm32")]
let tts = TTS::new(Backends::Web);
let tts = Tts::new(Backends::Web);
#[cfg(target_os = "macos")]
let tts = unsafe {
// Needed because the Rust NSProcessInfo structs report bogus values, and I don't want to pull in a full bindgen stack.
@ -227,19 +336,21 @@ impl TTS {
let str: *const c_char = msg_send![version, UTF8String];
let str = CStr::from_ptr(str);
let str = str.to_string_lossy();
let version: Vec<&str> = str.split(" ").collect();
let version: Vec<&str> = str.split(' ').collect();
let version = version[1];
let version_parts: Vec<&str> = version.split(".").collect();
let minor_version: i8 = version_parts[1].parse().unwrap();
let version_parts: Vec<&str> = version.split('.').collect();
let major_version: i8 = version_parts[0].parse().unwrap();
if minor_version >= 14 || major_version >= 11 {
TTS::new(Backends::AvFoundation)
let minor_version: i8 = version_parts[1].parse().unwrap();
if major_version >= 11 || minor_version >= 14 {
Tts::new(Backends::AvFoundation)
} else {
TTS::new(Backends::AppKit)
Tts::new(Backends::AppKit)
}
};
#[cfg(target_os = "ios")]
let tts = TTS::new(Backends::AvFoundation);
let tts = Tts::new(Backends::AvFoundation);
#[cfg(target_os = "android")]
let tts = Tts::new(Backends::Android);
tts
}
@ -247,7 +358,7 @@ impl TTS {
* Returns the features supported by this TTS engine
*/
pub fn supported_features(&self) -> Features {
self.0.supported_features()
self.0.read().unwrap().supported_features()
}
/**
@ -258,7 +369,10 @@ impl TTS {
text: S,
interrupt: bool,
) -> Result<Option<UtteranceId>, Error> {
self.0.speak(text.into().as_str(), interrupt)
self.0
.write()
.unwrap()
.speak(text.into().as_str(), interrupt)
}
/**
@ -267,7 +381,7 @@ impl TTS {
pub fn stop(&mut self) -> Result<&Self, Error> {
let Features { stop, .. } = self.supported_features();
if stop {
self.0.stop()?;
self.0.write().unwrap().stop()?;
Ok(self)
} else {
Err(Error::UnsupportedFeature)
@ -278,21 +392,21 @@ impl TTS {
* Returns the minimum rate for this speech synthesizer.
*/
pub fn min_rate(&self) -> f32 {
self.0.min_rate()
self.0.read().unwrap().min_rate()
}
/**
* Returns the maximum rate for this speech synthesizer.
*/
pub fn max_rate(&self) -> f32 {
self.0.max_rate()
self.0.read().unwrap().max_rate()
}
/**
* Returns the normal rate for this speech synthesizer.
*/
pub fn normal_rate(&self) -> f32 {
self.0.normal_rate()
self.0.read().unwrap().normal_rate()
}
/**
@ -301,7 +415,7 @@ impl TTS {
pub fn get_rate(&self) -> Result<f32, Error> {
let Features { rate, .. } = self.supported_features();
if rate {
self.0.get_rate()
self.0.read().unwrap().get_rate()
} else {
Err(Error::UnsupportedFeature)
}
@ -315,10 +429,11 @@ impl TTS {
rate: rate_feature, ..
} = self.supported_features();
if rate_feature {
if rate < self.0.min_rate() || rate > self.0.max_rate() {
let mut backend = self.0.write().unwrap();
if rate < backend.min_rate() || rate > backend.max_rate() {
Err(Error::OutOfRange)
} else {
self.0.set_rate(rate)?;
backend.set_rate(rate)?;
Ok(self)
}
} else {
@ -330,21 +445,21 @@ impl TTS {
* Returns the minimum pitch for this speech synthesizer.
*/
pub fn min_pitch(&self) -> f32 {
self.0.min_pitch()
self.0.read().unwrap().min_pitch()
}
/**
* Returns the maximum pitch for this speech synthesizer.
*/
pub fn max_pitch(&self) -> f32 {
self.0.max_pitch()
self.0.read().unwrap().max_pitch()
}
/**
* Returns the normal pitch for this speech synthesizer.
*/
pub fn normal_pitch(&self) -> f32 {
self.0.normal_pitch()
self.0.read().unwrap().normal_pitch()
}
/**
@ -353,7 +468,7 @@ impl TTS {
pub fn get_pitch(&self) -> Result<f32, Error> {
let Features { pitch, .. } = self.supported_features();
if pitch {
self.0.get_pitch()
self.0.read().unwrap().get_pitch()
} else {
Err(Error::UnsupportedFeature)
}
@ -368,10 +483,11 @@ impl TTS {
..
} = self.supported_features();
if pitch_feature {
if pitch < self.0.min_pitch() || pitch > self.0.max_pitch() {
let mut backend = self.0.write().unwrap();
if pitch < backend.min_pitch() || pitch > backend.max_pitch() {
Err(Error::OutOfRange)
} else {
self.0.set_pitch(pitch)?;
backend.set_pitch(pitch)?;
Ok(self)
}
} else {
@ -383,21 +499,21 @@ impl TTS {
* Returns the minimum volume for this speech synthesizer.
*/
pub fn min_volume(&self) -> f32 {
self.0.min_volume()
self.0.read().unwrap().min_volume()
}
/**
* Returns the maximum volume for this speech synthesizer.
*/
pub fn max_volume(&self) -> f32 {
self.0.max_volume()
self.0.read().unwrap().max_volume()
}
/**
* Returns the normal volume for this speech synthesizer.
*/
pub fn normal_volume(&self) -> f32 {
self.0.normal_volume()
self.0.read().unwrap().normal_volume()
}
/**
@ -406,7 +522,7 @@ impl TTS {
pub fn get_volume(&self) -> Result<f32, Error> {
let Features { volume, .. } = self.supported_features();
if volume {
self.0.get_volume()
self.0.read().unwrap().get_volume()
} else {
Err(Error::UnsupportedFeature)
}
@ -421,10 +537,11 @@ impl TTS {
..
} = self.supported_features();
if volume_feature {
if volume < self.0.min_volume() || volume > self.0.max_volume() {
let mut backend = self.0.write().unwrap();
if volume < backend.min_volume() || volume > backend.max_volume() {
Err(Error::OutOfRange)
} else {
self.0.set_volume(volume)?;
backend.set_volume(volume)?;
Ok(self)
}
} else {
@ -438,7 +555,7 @@ impl TTS {
pub fn is_speaking(&self) -> Result<bool, Error> {
let Features { is_speaking, .. } = self.supported_features();
if is_speaking {
self.0.is_speaking()
self.0.read().unwrap().is_speaking()
} else {
Err(Error::UnsupportedFeature)
}
@ -448,7 +565,7 @@ impl TTS {
* Returns list of available voices.
*/
pub fn list_voices(&self) -> Vec<String> {
self.0.list_voices()
self.0.read().unwrap().list_voices()
}
/**
@ -457,7 +574,7 @@ impl TTS {
pub fn voice(&self) -> Result<String,Error> {
let Features { voices, .. } = self.supported_features();
if voices {
self.0.voice()
self.0.read().unwrap().voice()
} else {
Err(Error::UnsupportedFeature)
}
@ -470,9 +587,9 @@ impl TTS {
let Features {
voices: voices_feature,
..
} = self.0.supported_features();
} = self.supported_features();
if voices_feature {
self.0.set_voice(voice.into().as_str())
self.0.write().unwrap().set_voice(voice.into().as_str())
} else {
Err(Error::UnsupportedFeature)
}
@ -491,7 +608,7 @@ impl TTS {
} = self.supported_features();
if utterance_callbacks {
let mut callbacks = CALLBACKS.lock().unwrap();
let id = self.0.id().unwrap();
let id = self.0.read().unwrap().id().unwrap();
let mut callbacks = callbacks.get_mut(&id).unwrap();
callbacks.utterance_begin = callback;
Ok(())
@ -513,7 +630,7 @@ impl TTS {
} = self.supported_features();
if utterance_callbacks {
let mut callbacks = CALLBACKS.lock().unwrap();
let id = self.0.id().unwrap();
let id = self.0.read().unwrap().id().unwrap();
let mut callbacks = callbacks.get_mut(&id).unwrap();
callbacks.utterance_end = callback;
Ok(())
@ -521,13 +638,55 @@ impl TTS {
Err(Error::UnsupportedFeature)
}
}
/**
* Called when this speech synthesizer is stopped and still has utterances in its queue.
*/
pub fn on_utterance_stop(
&self,
callback: Option<Box<dyn FnMut(UtteranceId)>>,
) -> Result<(), Error> {
let Features {
utterance_callbacks,
..
} = self.supported_features();
if utterance_callbacks {
let mut callbacks = CALLBACKS.lock().unwrap();
let id = self.0.read().unwrap().id().unwrap();
let mut callbacks = callbacks.get_mut(&id).unwrap();
callbacks.utterance_stop = callback;
Ok(())
} else {
Err(Error::UnsupportedFeature)
}
}
/*
* Returns `true` if a screen reader is available to provide speech.
*/
#[allow(unreachable_code)]
pub fn screen_reader_available() -> bool {
#[cfg(target_os = "windows")]
{
#[cfg(feature = "tolk")]
{
let tolk = Tolk::new();
return tolk.detect_screen_reader().is_some();
}
#[cfg(not(feature = "tolk"))]
return false;
}
false
}
}
impl Drop for TTS {
impl Drop for Tts {
fn drop(&mut self) {
if let Some(id) = self.0.id() {
let mut callbacks = CALLBACKS.lock().unwrap();
callbacks.remove(&id);
if Arc::strong_count(&self.0) <= 1 {
if let Some(id) = self.0.read().unwrap().id() {
let mut callbacks = CALLBACKS.lock().unwrap();
callbacks.remove(&id);
}
}
}
}

View File

@ -1,13 +0,0 @@
[package]
name = "tts_winrt_bindings"
version = "0.1.0"
authors = ["Nolan Darilek <nolan@thewordnerd.info>"]
description = "Internal crate used by `tts`"
license = "MIT"
edition = "2018"
[dependencies]
winrt = "0.7"
[build-dependencies]
winrt = "0.7"

View File

@ -1,12 +0,0 @@
winrt::build!(
dependencies
os
types
windows::media::core::MediaSource
windows::media::playback::{MediaPlaybackItem, MediaPlaybackList, MediaPlaybackState, MediaPlayer}
windows::media::speech_synthesis::SpeechSynthesizer
);
fn main() {
build();
}

View File

@ -1 +0,0 @@
include!(concat!(env!("OUT_DIR"), "/winrt.rs"));