1
0
mirror of https://github.com/ndarilek/tts-rs.git synced 2024-11-13 06:29:36 +00:00

Compare commits

..

No commits in common. "master" and "v0.6.0" have entirely different histories.

53 changed files with 423 additions and 2891 deletions

View File

@ -1,21 +1,69 @@
name: Release
on:
push:
tags:
- "v*"
jobs:
publish:
name: Publish
runs-on: ubuntu-22.04
env:
CARGO_TOKEN: ${{ secrets.CARGO_TOKEN }}
steps:
- uses: actions/checkout@v4
- run: |
sudo apt-get update
sudo apt-get install -y libspeechd-dev
cargo login $CARGO_TOKEN
rustup toolchain install stable
cargo publish
name: Release
on:
push:
tags:
- "v*"
jobs:
build_linux:
name: Build Linux
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- run: |
sudo apt-get update
sudo apt-get install -y libspeechd-dev
cargo build --release
rustup target add wasm32-unknown-unknown
cargo build --release --target wasm32-unknown-unknown
build_windows:
name: Build Windows
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- run: |
choco install -y llvm
cargo build --release
build_macos:
name: Build MacOS
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- run: |
cargo build --release
rustup target add aarch64-apple-ios x86_64-apple-ios
cargo install cargo-lipo
cargo lipo --release
publish_winrt_bindings:
name: Publish winrt_bindings
runs-on: windows-latest
needs: [build_windows]
env:
CARGO_TOKEN: ${{ secrets.CARGO_TOKEN }}
steps:
- uses: actions/checkout@v2
- run: |
choco install -y llvm
cargo login $CARGO_TOKEN
cd winrt_bindings
cargo package
cargo publish || true
publish:
name: Publish
runs-on: ubuntu-latest
needs: [build_linux, build_windows, build_macos]
env:
CARGO_TOKEN: ${{ secrets.CARGO_TOKEN }}
steps:
- uses: actions/checkout@v2
- run: |
sudo apt-get update
sudo apt-get install -y libspeechd-dev
cargo login $CARGO_TOKEN
cargo publish

View File

@ -1,62 +1,39 @@
name: Test
on:
push:
pull_request:
jobs:
check_formatting:
name: Check Formatting
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- run: |
rustup toolchain install stable
cargo fmt --all --check
cd examples/web
cargo fmt --all --check
check:
name: Check
strategy:
matrix:
os: [windows-latest, ubuntu-22.04, macos-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- run: sudo apt-get update; sudo apt-get install -y libspeechd-dev
if: ${{ runner.os == 'Linux' }}
- run: |
rustup toolchain install stable
cargo clippy --all-targets
check_web:
name: Check Web
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- run: |
rustup target add wasm32-unknown-unknown
rustup toolchain install stable
cargo clippy --all-targets --target wasm32-unknown-unknown
check_android:
name: Check Android
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- run: |
rustup target add aarch64-linux-android
rustup toolchain install stable
cargo clippy --all-targets --target aarch64-linux-android
check_web_example:
name: Check Web Example
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- run: |
rustup target add wasm32-unknown-unknown
rustup toolchain install stable
cd examples/web
cargo build --target wasm32-unknown-unknown
name: Test
on:
push:
pull_request:
jobs:
build_linux:
name: Build Linux
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- run: |
sudo apt-get update
sudo apt-get install -y libspeechd-dev
cargo build --release
rustup target add wasm32-unknown-unknown
cargo build --release --target wasm32-unknown-unknown
build_windows:
name: Build Windows
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- run: |
choco install -y llvm
cargo build --release
build_macos:
name: Build MacOS
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- run: |
cargo build --release
rustup target add aarch64-apple-ios x86_64-apple-ios
cargo install cargo-lipo
cargo lipo --release

1
.gitignore vendored
View File

@ -1,3 +1,2 @@
Cargo.lock
target
*.dll

View File

@ -1,71 +1,36 @@
[package]
name = "tts"
version = "0.26.3"
version = "0.6.0"
authors = ["Nolan Darilek <nolan@thewordnerd.info>"]
repository = "https://github.com/ndarilek/tts-rs"
description = "High-level Text-To-Speech (TTS) interface"
documentation = "https://docs.rs/tts"
license = "MIT"
exclude = ["*.cfg", "*.yml"]
edition = "2021"
edition = "2018"
[lib]
crate-type = ["lib", "cdylib", "staticlib"]
[features]
speech_dispatcher_0_9 = ["speech-dispatcher/0_9"]
speech_dispatcher_0_10 = ["speech-dispatcher/0_10"]
speech_dispatcher_0_11 = ["speech-dispatcher/0_11"]
default = ["speech_dispatcher_0_11"]
crate-type = ["lib", "staticlib"]
[dependencies]
dyn-clonable = "0.9"
oxilangtag = "0.1"
lazy_static = "1"
log = "0.4"
serde = { version = "1", optional = true, features = ["derive"] }
thiserror = "1"
[dev-dependencies]
env_logger = "0.11"
env_logger = "0.7"
[target.'cfg(windows)'.dependencies]
tolk = { version = "0.5", optional = true }
windows = { version = "0.58", features = [
"Foundation",
"Foundation_Collections",
"Media_Core",
"Media_Playback",
"Media_SpeechSynthesis",
"Storage_Streams",
] }
tolk = "0.2"
winrt = "0.7"
tts_winrt_bindings = { version = "0.1", path="winrt_bindings" }
[target.'cfg(target_os = "linux")'.dependencies]
speech-dispatcher = { version = "0.16", default-features = false }
speech-dispatcher = "0.4"
[target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies]
cocoa-foundation = "0.1"
core-foundation = "0.9"
libc = "0.2"
objc = { version = "0.2", features = ["exception"] }
objc = "0.2"
[target.wasm32-unknown-unknown.dependencies]
wasm-bindgen = "0.2"
web-sys = { version = "0.3", features = [
"EventTarget",
"SpeechSynthesis",
"SpeechSynthesisErrorCode",
"SpeechSynthesisErrorEvent",
"SpeechSynthesisEvent",
"SpeechSynthesisUtterance",
"SpeechSynthesisVoice",
"Window",
] }
[target.'cfg(target_os="android")'.dependencies]
jni = "0.21"
ndk-context = "0.1"
[package.metadata.docs.rs]
no-default-features = true
features = ["speech_dispatcher_0_11"]
web-sys = { version = "0.3", features = ["SpeechSynthesis", "SpeechSynthesisUtterance", "Window", ] }

View File

@ -1,33 +0,0 @@
[tasks.build-android-example]
script = [
"cd examples/android",
"./gradlew assembleDebug",
]
[tasks.run-android-example]
script = [
"cd examples/android",
"./gradlew runDebug",
]
[tasks.log-android]
command = "adb"
args = ["logcat", "RustStdoutStderr:D", "*:S"]
[tasks.install-trunk]
install_crate = { crate_name = "trunk", binary = "trunk", test_arg = "--help" }
[tasks.install-wasm-bindgen-cli]
install_crate = { crate_name = "wasm-bindgen-cli", binary = "wasm-bindgen", test_arg = "--help" }
[tasks.build-web-example]
dependencies = ["install-trunk", "install-wasm-bindgen-cli"]
cwd = "examples/web"
command = "trunk"
args = ["build"]
[tasks.run-web-example]
dependencies = ["install-trunk", "install-wasm-bindgen-cli"]
cwd = "examples/web"
command = "trunk"
args = ["serve"]

View File

@ -3,23 +3,10 @@
This library provides a high-level Text-To-Speech (TTS) interface supporting various backends. Currently supported backends are:
* Windows
* Screen readers/SAPI via Tolk (requires `tolk` Cargo feature)
* Screen readers/SAPI via Tolk
* WinRT
* Linux via [Speech Dispatcher](https://freebsoft.org/speechd)
* MacOS/iOS
* MacOS
* AppKit on MacOS 10.13 and below
* AVFoundation on MacOS 10.14 and above, and iOS
* Android
* WebAssembly
## Android Setup
On most platforms, this library is plug-and-play. Because of JNI's complexity, Android setup is a bit more involved. In general, look to the Android example for guidance. Here are some rough steps to get going:
* Set up _Cargo.toml_ as the example does. Be sure to depend on `ndk-glue`.
* Place _Bridge.java_ appropriately in your app. This is needed to support various Android TTS callbacks.
* Create a main activity similar to _MainActivity.kt_. In particular, you need to derive `android.app.NativeActivity`, and you need a `System.loadLibrary(...)` call appropriate for your app. `System.loadLibrary(...)` is needed to trigger `JNI_OnLoad`.
* * Even though you've loaded the library in your main activity, add a metadata tag to your activity in _AndroidManifest.xml_ referencing it. Yes, this is redundant but necessary.
* Set if your various build.gradle scripts to reference the plugins, dependencies, etc. from the example. In particular, you'll want to set up [cargo-ndk-android-gradle](https://github.com/willir/cargo-ndk-android-gradle/) and either [depend on androidx.annotation](https://developer.android.com/reference/androidx/annotation/package-summary) or otherwise configure your app to keep the class _rs.tts.Bridge_.
And I think that should about do it. Good luck!

View File

@ -1,11 +1,15 @@
// Copyright 2013-2015 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main() {
if std::env::var("TARGET").unwrap().contains("-apple") {
println!("cargo:rustc-link-lib=framework=AppKit");
println!("cargo:rustc-link-lib=framework=AVFoundation");
if !std::env::var("CARGO_CFG_TARGET_OS")
.unwrap()
.contains("ios")
{
println!("cargo:rustc-link-lib=framework=AppKit");
}
}
}

View File

@ -1,39 +0,0 @@
use std::io;
use std::{thread, time};
#[cfg(target_os = "macos")]
use cocoa_foundation::base::id;
#[cfg(target_os = "macos")]
use cocoa_foundation::foundation::NSRunLoop;
#[cfg(target_os = "macos")]
use objc::{msg_send, sel, sel_impl};
use tts::*;
fn main() -> Result<(), Error> {
env_logger::init();
let mut tts = Tts::default()?;
let mut bottles = 99;
while bottles > 0 {
tts.speak(format!("{} bottles of beer on the wall,", bottles), false)?;
tts.speak(format!("{} bottles of beer,", bottles), false)?;
tts.speak("Take one down, pass it around", false)?;
tts.speak("Give us a bit to drink this...", false)?;
let time = time::Duration::from_secs(15);
thread::sleep(time);
bottles -= 1;
tts.speak(format!("{} bottles of beer on the wall,", bottles), false)?;
}
let mut _input = String::new();
// The below is only needed to make the example run on MacOS because there is no NSRunLoop in this context.
// It shouldn't be needed in an app or game that almost certainly has one already.
#[cfg(target_os = "macos")]
{
let run_loop: id = unsafe { NSRunLoop::currentRunLoop() };
unsafe {
let _: () = msg_send![run_loop, run];
}
}
io::stdin().read_line(&mut _input)?;
Ok(())
}

View File

@ -1,16 +0,0 @@
*.iml
.gradle
/local.properties
/.idea/caches
/.idea/libraries
/.idea/modules.xml
/.idea/workspace.xml
/.idea/navEditor.xml
/.idea/assetWizardSettings.xml
.DS_Store
/build
/captures
.externalNativeBuild
.cxx
local.properties
Cargo.lock

View File

@ -1,3 +0,0 @@
# Default ignored files
/shelf/
/workspace.xml

View File

@ -1,6 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="CompilerConfiguration">
<bytecodeTargetLevel target="1.6" />
</component>
</project>

View File

@ -1,21 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="GradleMigrationSettings" migrationVersion="1" />
<component name="GradleSettings">
<option name="linkedExternalProjectsSettings">
<GradleProjectSettings>
<option name="testRunner" value="PLATFORM" />
<option name="distributionType" value="DEFAULT_WRAPPED" />
<option name="externalProjectPath" value="$PROJECT_DIR$" />
<option name="gradleJvm" value="11" />
<option name="modules">
<set>
<option value="$PROJECT_DIR$" />
</set>
</option>
<option name="resolveModulePerSourceSet" value="false" />
<option name="useQualifiedModuleNames" value="true" />
</GradleProjectSettings>
</option>
</component>
</project>

View File

@ -1,25 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="RemoteRepositoriesConfiguration">
<remote-repository>
<option name="id" value="central" />
<option name="name" value="Maven Central repository" />
<option name="url" value="https://repo1.maven.org/maven2" />
</remote-repository>
<remote-repository>
<option name="id" value="jboss.community" />
<option name="name" value="JBoss Community repository" />
<option name="url" value="https://repository.jboss.org/nexus/content/repositories/public/" />
</remote-repository>
<remote-repository>
<option name="id" value="BintrayJCenter" />
<option name="name" value="BintrayJCenter" />
<option name="url" value="https://jcenter.bintray.com/" />
</remote-repository>
<remote-repository>
<option name="id" value="Google" />
<option name="name" value="Google" />
<option name="url" value="https://dl.google.com/dl/android/maven2/" />
</remote-repository>
</component>
</project>

View File

@ -1,9 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" languageLevel="JDK_1_6" default="false" project-jdk-name="1.8" project-jdk-type="JavaSDK">
<output url="file://$PROJECT_DIR$/build/classes" />
</component>
<component name="ProjectType">
<option name="id" value="Android" />
</component>
</project>

View File

@ -1,6 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$/../.." vcs="Git" />
</component>
</project>

View File

@ -1 +0,0 @@
/build

View File

@ -1,56 +0,0 @@
plugins {
id "com.android.application"
id "org.mozilla.rust-android-gradle.rust-android"
}
android {
namespace "rs.tts"
compileSdkVersion 33
ndkVersion "25.1.8937393"
defaultConfig {
applicationId "rs.tts"
minSdkVersion 21
targetSdkVersion 33
versionCode 1
versionName "1.0"
}
buildTypes {
release {
minifyEnabled false
proguardFiles getDefaultProguardFile("proguard-android-optimize.txt"), "proguard-rules.pro"
}
}
}
dependencies {
implementation "androidx.core:core-ktx:1.2.0"
implementation "androidx.annotation:annotation:1.1.0"
implementation "com.google.android.material:material:1.1.0"
implementation "androidx.constraintlayout:constraintlayout:1.1.3"
}
apply plugin: "org.mozilla.rust-android-gradle.rust-android"
cargo {
module = "."
libname = "tts"
targets = ["arm", "x86"]
}
tasks.whenTaskAdded { task ->
if ((task.name == 'javaPreCompileDebug' || task.name == 'javaPreCompileRelease')) {
task.dependsOn "cargoBuild"
}
}
project.afterEvaluate {
android.applicationVariants.all { variant ->
task "run${variant.name.capitalize()}"(type: Exec, dependsOn: "install${variant.name.capitalize()}", group: "run") {
commandLine = ["adb", "shell", "monkey", "-p", variant.applicationId + " 1"]
doLast {
println "Launching ${variant.applicationId}"
}
}
}
}

View File

@ -1,21 +0,0 @@
# Add project specific ProGuard rules here.
# You can control the set of applied configuration files using the
# proguardFiles setting in build.gradle.
#
# For more details, see
# http://developer.android.com/guide/developing/tools/proguard.html
# If your project uses WebView with JS, uncomment the following
# and specify the fully qualified class name to the JavaScript interface
# class:
#-keepclassmembers class fqcn.of.javascript.interface.for.webview {
# public *;
#}
# Uncomment this to preserve the line number information for
# debugging stack traces.
#-keepattributes SourceFile,LineNumberTable
# If you keep the line number information, uncomment this to
# hide the original source file name.
#-renamesourcefileattribute SourceFile

View File

@ -1 +0,0 @@
jniLibs

View File

@ -1,13 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android">
<application android:allowBackup="true" android:label="@string/app_name">
<activity android:name=".MainActivity" android:exported="true">
<meta-data android:name="android.app.lib_name" android:value="hello_world" />
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
</manifest>

View File

@ -1,24 +0,0 @@
package rs.tts;
import android.speech.tts.TextToSpeech;
import android.speech.tts.UtteranceProgressListener;
@androidx.annotation.Keep
public class Bridge extends UtteranceProgressListener implements TextToSpeech.OnInitListener {
public int backendId;
public Bridge(int backendId) {
this.backendId = backendId;
}
public native void onInit(int status);
public native void onStart(String utteranceId);
public native void onStop(String utteranceId, Boolean interrupted);
public native void onDone(String utteranceId);
public native void onError(String utteranceId) ;
}

View File

@ -1,11 +0,0 @@
package rs.tts
import android.app.NativeActivity
class MainActivity : NativeActivity() {
companion object {
init {
System.loadLibrary("hello_world")
}
}
}

View File

@ -1,3 +0,0 @@
<resources>
<string name="app_name">TTS-RS</string>
</resources>

View File

@ -1,29 +0,0 @@
// Top-level build file where you can add configuration options common to all sub-projects/modules.
buildscript {
repositories {
google()
mavenCentral()
maven {
url "https://plugins.gradle.org/m2/"
}
}
}
plugins {
id "com.android.application" version "7.3.0" apply false
id "com.android.library" version "7.3.0" apply false
id "org.jetbrains.kotlin.android" version "1.7.21" apply false
id "org.mozilla.rust-android-gradle.rust-android" version "0.9.3" apply false
}
allprojects {
repositories {
google()
mavenCentral()
}
}
task clean(type: Delete) {
delete rootProject.buildDir
}

View File

@ -1,14 +0,0 @@
[package]
name = "hello_world"
version = "0.1.0"
authors = ["Nolan Darilek <nolan@thewordnerd.info>"]
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["dylib"]
[dependencies]
ndk-glue = "0.7"
tts = { path = "../.." }

View File

@ -1,21 +0,0 @@
# Project-wide Gradle settings.
# IDE (e.g. Android Studio) users:
# Gradle settings configured through the IDE *will override*
# any settings specified in this file.
# For more details on how to configure your build environment visit
# http://www.gradle.org/docs/current/userguide/build_environment.html
# Specifies the JVM arguments used for the daemon process.
# The setting is particularly useful for tweaking memory settings.
org.gradle.jvmargs=-Xmx2048m -Dfile.encoding=UTF-8
# When configured, Gradle will run in incubating parallel mode.
# This option should only be used with decoupled projects. More details, visit
# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
# org.gradle.parallel=true
# AndroidX package structure to make it clearer which packages are bundled with the
# Android operating system, and which are packaged with your app"s APK
# https://developer.android.com/topic/libraries/support-library/androidx-rn
android.useAndroidX=true
# Automatically convert third-party libraries to use AndroidX
android.enableJetifier=true
# Kotlin code style for this project: "official" or "obsolete":
kotlin.code.style=official

View File

@ -1,6 +0,0 @@
#Mon Dec 28 17:32:22 CST 2020
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-7.5.1-bin.zip

View File

@ -1,172 +0,0 @@
#!/usr/bin/env sh
##############################################################################
##
## Gradle start up script for UN*X
##
##############################################################################
# Attempt to set APP_HOME
# Resolve links: $0 may be a link
PRG="$0"
# Need this for relative symlinks.
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`"/$link"
fi
done
SAVED="`pwd`"
cd "`dirname \"$PRG\"`/" >/dev/null
APP_HOME="`pwd -P`"
cd "$SAVED" >/dev/null
APP_NAME="Gradle"
APP_BASE_NAME=`basename "$0"`
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS=""
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD="maximum"
warn () {
echo "$*"
}
die () {
echo
echo "$*"
echo
exit 1
}
# OS specific support (must be 'true' or 'false').
cygwin=false
msys=false
darwin=false
nonstop=false
case "`uname`" in
CYGWIN* )
cygwin=true
;;
Darwin* )
darwin=true
;;
MINGW* )
msys=true
;;
NONSTOP* )
nonstop=true
;;
esac
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
# Determine the Java command to use to start the JVM.
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
else
JAVACMD="java"
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
# Increase the maximum file descriptors if we can.
if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
MAX_FD_LIMIT=`ulimit -H -n`
if [ $? -eq 0 ] ; then
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
MAX_FD="$MAX_FD_LIMIT"
fi
ulimit -n $MAX_FD
if [ $? -ne 0 ] ; then
warn "Could not set maximum file descriptor limit: $MAX_FD"
fi
else
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
fi
fi
# For Darwin, add options to specify how the application appears in the dock
if $darwin; then
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
fi
# For Cygwin, switch paths to Windows format before running java
if $cygwin ; then
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
JAVACMD=`cygpath --unix "$JAVACMD"`
# We build the pattern for arguments to be converted via cygpath
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
SEP=""
for dir in $ROOTDIRSRAW ; do
ROOTDIRS="$ROOTDIRS$SEP$dir"
SEP="|"
done
OURCYGPATTERN="(^($ROOTDIRS))"
# Add a user-defined pattern to the cygpath arguments
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
fi
# Now convert the arguments - kludge to limit ourselves to /bin/sh
i=0
for arg in "$@" ; do
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
else
eval `echo args$i`="\"$arg\""
fi
i=$((i+1))
done
case $i in
(0) set -- ;;
(1) set -- "$args0" ;;
(2) set -- "$args0" "$args1" ;;
(3) set -- "$args0" "$args1" "$args2" ;;
(4) set -- "$args0" "$args1" "$args2" "$args3" ;;
(5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
(6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
(7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
(8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
(9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
esac
fi
# Escape application args
save () {
for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
echo " "
}
APP_ARGS=$(save "$@")
# Collect all arguments for the java command, following the shell quoting and substitution rules
eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
cd "$(dirname "$0")"
fi
exec "$JAVACMD" "$@"

View File

@ -1,84 +0,0 @@
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS=
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windows variants
if not "%OS%" == "Windows_NT" goto win9xME_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega

View File

@ -1,8 +0,0 @@
pluginManagement {
repositories {
gradlePluginPortal()
google()
mavenCentral()
}
}
include ":app"

View File

@ -1,70 +0,0 @@
use tts::*;
// The `loop {}` below only simulates an app loop.
// Without it, the `TTS` instance gets dropped before callbacks can run.
#[allow(unreachable_code)]
fn run() -> Result<(), Error> {
let mut tts = Tts::default()?;
let Features {
utterance_callbacks,
..
} = tts.supported_features();
if utterance_callbacks {
tts.on_utterance_begin(Some(Box::new(|utterance| {
println!("Started speaking {:?}", utterance)
})))?;
tts.on_utterance_end(Some(Box::new(|utterance| {
println!("Finished speaking {:?}", utterance)
})))?;
tts.on_utterance_stop(Some(Box::new(|utterance| {
println!("Stopped speaking {:?}", utterance)
})))?;
}
let Features { is_speaking, .. } = tts.supported_features();
if is_speaking {
println!("Are we speaking? {}", tts.is_speaking()?);
}
tts.speak("Hello, world.", false)?;
let Features { rate, .. } = tts.supported_features();
if rate {
let original_rate = tts.get_rate()?;
tts.speak(format!("Current rate: {}", original_rate), false)?;
tts.set_rate(tts.max_rate())?;
tts.speak("This is very fast.", false)?;
tts.set_rate(tts.min_rate())?;
tts.speak("This is very slow.", false)?;
tts.set_rate(tts.normal_rate())?;
tts.speak("This is the normal rate.", false)?;
tts.set_rate(original_rate)?;
}
let Features { pitch, .. } = tts.supported_features();
if pitch {
let original_pitch = tts.get_pitch()?;
tts.set_pitch(tts.max_pitch())?;
tts.speak("This is high-pitch.", false)?;
tts.set_pitch(tts.min_pitch())?;
tts.speak("This is low pitch.", false)?;
tts.set_pitch(tts.normal_pitch())?;
tts.speak("This is normal pitch.", false)?;
tts.set_pitch(original_pitch)?;
}
let Features { volume, .. } = tts.supported_features();
if volume {
let original_volume = tts.get_volume()?;
tts.set_volume(tts.max_volume())?;
tts.speak("This is loud!", false)?;
tts.set_volume(tts.min_volume())?;
tts.speak("This is quiet.", false)?;
tts.set_volume(tts.normal_volume())?;
tts.speak("This is normal volume.", false)?;
tts.set_volume(original_volume)?;
}
tts.speak("Goodbye.", false)?;
loop {}
Ok(())
}
#[cfg_attr(target_os = "android", ndk_glue::main(backtrace = "on"))]
pub fn main() {
run().expect("Failed to run");
}

View File

@ -1,89 +0,0 @@
use std::io;
#[cfg(target_os = "macos")]
use cocoa_foundation::base::id;
#[cfg(target_os = "macos")]
use cocoa_foundation::foundation::NSRunLoop;
#[cfg(target_os = "macos")]
use objc::{msg_send, sel, sel_impl};
use tts::*;
fn main() -> Result<(), Error> {
env_logger::init();
let tts = Tts::default()?;
if Tts::screen_reader_available() {
println!("A screen reader is available on this platform.");
} else {
println!("No screen reader is available on this platform.");
}
let Features {
utterance_callbacks,
..
} = tts.supported_features();
if utterance_callbacks {
tts.on_utterance_begin(Some(Box::new(|utterance| {
println!("Started speaking {:?}", utterance)
})))?;
tts.on_utterance_end(Some(Box::new(|utterance| {
println!("Finished speaking {:?}", utterance)
})))?;
tts.on_utterance_stop(Some(Box::new(|utterance| {
println!("Stopped speaking {:?}", utterance)
})))?;
}
let mut tts_clone = tts.clone();
drop(tts);
let Features { is_speaking, .. } = tts_clone.supported_features();
if is_speaking {
println!("Are we speaking? {}", tts_clone.is_speaking()?);
}
tts_clone.speak("Hello, world.", false)?;
let Features { rate, .. } = tts_clone.supported_features();
if rate {
let original_rate = tts_clone.get_rate()?;
tts_clone.speak(format!("Current rate: {}", original_rate), false)?;
tts_clone.set_rate(tts_clone.max_rate())?;
tts_clone.speak("This is very fast.", false)?;
tts_clone.set_rate(tts_clone.min_rate())?;
tts_clone.speak("This is very slow.", false)?;
tts_clone.set_rate(tts_clone.normal_rate())?;
tts_clone.speak("This is the normal rate.", false)?;
tts_clone.set_rate(original_rate)?;
}
let Features { pitch, .. } = tts_clone.supported_features();
if pitch {
let original_pitch = tts_clone.get_pitch()?;
tts_clone.set_pitch(tts_clone.max_pitch())?;
tts_clone.speak("This is high-pitch.", false)?;
tts_clone.set_pitch(tts_clone.min_pitch())?;
tts_clone.speak("This is low pitch.", false)?;
tts_clone.set_pitch(tts_clone.normal_pitch())?;
tts_clone.speak("This is normal pitch.", false)?;
tts_clone.set_pitch(original_pitch)?;
}
let Features { volume, .. } = tts_clone.supported_features();
if volume {
let original_volume = tts_clone.get_volume()?;
tts_clone.set_volume(tts_clone.max_volume())?;
tts_clone.speak("This is loud!", false)?;
tts_clone.set_volume(tts_clone.min_volume())?;
tts_clone.speak("This is quiet.", false)?;
tts_clone.set_volume(tts_clone.normal_volume())?;
tts_clone.speak("This is normal volume.", false)?;
tts_clone.set_volume(original_volume)?;
}
tts_clone.speak("Goodbye.", false)?;
let mut _input = String::new();
// The below is only needed to make the example run on MacOS because there is no NSRunLoop in this context.
// It shouldn't be needed in an app or game that almost certainly has one already.
#[cfg(target_os = "macos")]
{
let run_loop: id = unsafe { NSRunLoop::currentRunLoop() };
unsafe {
let _: () = msg_send![run_loop, run];
}
}
io::stdin().read_line(&mut _input)?;
Ok(())
}

View File

@ -11,31 +11,7 @@ use tts::*;
fn main() -> Result<(), Error> {
env_logger::init();
let mut tts = Tts::default()?;
if Tts::screen_reader_available() {
println!("A screen reader is available on this platform.");
} else {
println!("No screen reader is available on this platform.");
}
let Features {
utterance_callbacks,
..
} = tts.supported_features();
if utterance_callbacks {
tts.on_utterance_begin(Some(Box::new(|utterance| {
println!("Started speaking {:?}", utterance)
})))?;
tts.on_utterance_end(Some(Box::new(|utterance| {
println!("Finished speaking {:?}", utterance)
})))?;
tts.on_utterance_stop(Some(Box::new(|utterance| {
println!("Stopped speaking {:?}", utterance)
})))?;
}
let Features { is_speaking, .. } = tts.supported_features();
if is_speaking {
println!("Are we speaking? {}", tts.is_speaking()?);
}
let mut tts = TTS::default()?;
tts.speak("Hello, world.", false)?;
let Features { rate, .. } = tts.supported_features();
if rate {
@ -71,27 +47,8 @@ fn main() -> Result<(), Error> {
tts.speak("This is normal volume.", false)?;
tts.set_volume(original_volume)?;
}
let Features { voice, .. } = tts.supported_features();
if voice {
let voices = tts.voices()?;
println!("Available voices:\n===");
for v in &voices {
println!("{:?}", v);
}
let Features { get_voice, .. } = tts.supported_features();
let original_voice = if get_voice { tts.voice()? } else { None };
for v in &voices {
tts.set_voice(v)?;
tts.speak(format!("This is {}.", v.name()), false)?;
}
if let Some(original_voice) = original_voice {
tts.set_voice(&original_voice)?;
}
}
tts.speak("Goodbye.", false)?;
let mut _input = String::new();
// The below is only needed to make the example run on MacOS because there is no NSRunLoop in this context.
// It shouldn't be needed in an app or game that almost certainly has one already.
#[cfg(target_os = "macos")]
{
let run_loop: id = unsafe { NSRunLoop::currentRunLoop() };

View File

@ -1,14 +0,0 @@
use std::io;
use tts::*;
fn main() -> Result<(), Error> {
env_logger::init();
let mut tts = Tts::default()?;
println!("Press Enter and wait for speech.");
loop {
let mut _input = String::new();
io::stdin().read_line(&mut _input)?;
tts.speak("Hello, world.", true)?;
}
}

View File

@ -1,32 +0,0 @@
#[cfg(target_os = "macos")]
use cocoa_foundation::base::id;
#[cfg(target_os = "macos")]
use cocoa_foundation::foundation::NSDefaultRunLoopMode;
#[cfg(target_os = "macos")]
use cocoa_foundation::foundation::NSRunLoop;
#[cfg(target_os = "macos")]
use objc::class;
#[cfg(target_os = "macos")]
use objc::{msg_send, sel, sel_impl};
use std::{thread, time};
use tts::*;
fn main() -> Result<(), Error> {
env_logger::init();
let mut tts = Tts::default()?;
let mut phrase = 1;
loop {
tts.speak(format!("Phrase {}", phrase), false)?;
#[cfg(target_os = "macos")]
{
let run_loop: id = unsafe { NSRunLoop::currentRunLoop() };
unsafe {
let date: id = msg_send![class!(NSDate), distantFuture];
let _: () = msg_send![run_loop, runMode:NSDefaultRunLoopMode beforeDate:date];
}
}
let time = time::Duration::from_secs(5);
thread::sleep(time);
phrase += 1;
}
}

View File

@ -1,2 +0,0 @@
[build]
target = "wasm32-unknown-unknown"

View File

@ -1 +0,0 @@
dist

View File

@ -1,13 +0,0 @@
[package]
name = "web"
version = "0.1.0"
authors = ["Nolan Darilek <nolan@thewordnerd.info>"]
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
console_log = "0.2"
log = "0.4"
seed = "0.9"
tts = { path = "../.." }

View File

@ -1,12 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<title>Example</title>
</head>
<body>
<div id="app"></div>
</body>
</html>

View File

@ -1,157 +0,0 @@
#![allow(clippy::wildcard_imports)]
use seed::{prelude::*, *};
use tts::Tts;
#[derive(Clone)]
struct Model {
text: String,
tts: Tts,
}
#[derive(Clone)]
enum Msg {
TextChanged(String),
RateChanged(String),
PitchChanged(String),
VolumeChanged(String),
VoiceChanged(String),
Speak,
}
fn init(_: Url, _: &mut impl Orders<Msg>) -> Model {
let mut tts = Tts::default().unwrap();
if tts.voices().unwrap().iter().len() > 0 {
if tts.voice().unwrap().is_none() {
tts.set_voice(tts.voices().unwrap().first().unwrap())
.expect("Failed to set voice");
}
}
Model {
text: "Hello, world. This is a test of the current text-to-speech values.".into(),
tts,
}
}
fn update(msg: Msg, model: &mut Model, _: &mut impl Orders<Msg>) {
use Msg::*;
match msg {
TextChanged(text) => model.text = text,
RateChanged(rate) => {
let rate = rate.parse::<f32>().unwrap();
model.tts.set_rate(rate).unwrap();
}
PitchChanged(pitch) => {
let pitch = pitch.parse::<f32>().unwrap();
model.tts.set_pitch(pitch).unwrap();
}
VolumeChanged(volume) => {
let volume = volume.parse::<f32>().unwrap();
model.tts.set_volume(volume).unwrap();
}
VoiceChanged(voice) => {
for v in model.tts.voices().unwrap() {
if v.id() == voice {
model.tts.set_voice(&v).unwrap();
}
}
}
Speak => {
model.tts.speak(&model.text, false).unwrap();
}
}
}
fn view(model: &Model) -> Node<Msg> {
let should_show_voices = model.tts.voices().unwrap().iter().len() > 0;
form![
div![label![
"Text to speak",
input![
attrs! {
At::Value => model.text,
At::AutoFocus => AtValue::None,
},
input_ev(Ev::Input, Msg::TextChanged)
],
],],
div![label![
"Rate",
input![
attrs! {
At::Type => "number",
At::Value => model.tts.get_rate().unwrap(),
At::Min => model.tts.min_rate(),
At::Max => model.tts.max_rate()
},
input_ev(Ev::Input, Msg::RateChanged)
],
],],
div![label![
"Pitch",
input![
attrs! {
At::Type => "number",
At::Value => model.tts.get_pitch().unwrap(),
At::Min => model.tts.min_pitch(),
At::Max => model.tts.max_pitch()
},
input_ev(Ev::Input, Msg::PitchChanged)
],
],],
div![label![
"Volume",
input![
attrs! {
At::Type => "number",
At::Value => model.tts.get_volume().unwrap(),
At::Min => model.tts.min_volume(),
At::Max => model.tts.max_volume()
},
input_ev(Ev::Input, Msg::VolumeChanged)
],
],],
if should_show_voices {
div![
label!["Voice"],
select![
model.tts.voices().unwrap().iter().map(|v| {
let selected = if let Some(voice) = model.tts.voice().unwrap() {
voice.id() == v.id()
} else {
false
};
option![
attrs! {
At::Value => v.id()
},
if selected {
attrs! {
At::Selected => selected
}
} else {
attrs! {}
},
v.name()
]
}),
input_ev(Ev::Change, Msg::VoiceChanged)
]
]
} else {
div!["Your browser does not seem to support selecting voices."]
},
button![
"Speak",
ev(Ev::Click, |e| {
e.prevent_default();
Msg::Speak
}),
],
]
}
fn main() {
console_log::init().expect("Error initializing logger");
App::start("app", init, update, view);
}

View File

@ -1,402 +0,0 @@
#[cfg(target_os = "android")]
use std::{
collections::HashSet,
ffi::{CStr, CString},
os::raw::c_void,
sync::{Mutex, RwLock},
thread,
time::{Duration, Instant},
};
use jni::{
objects::{GlobalRef, JObject, JString},
sys::{jfloat, jint, JNI_VERSION_1_6},
JNIEnv, JavaVM,
};
use lazy_static::lazy_static;
use log::{error, info};
use crate::{Backend, BackendId, Error, Features, UtteranceId, Voice, CALLBACKS};
lazy_static! {
static ref BRIDGE: Mutex<Option<GlobalRef>> = Mutex::new(None);
static ref NEXT_BACKEND_ID: Mutex<u64> = Mutex::new(0);
static ref PENDING_INITIALIZATIONS: RwLock<HashSet<u64>> = RwLock::new(HashSet::new());
static ref NEXT_UTTERANCE_ID: Mutex<u64> = Mutex::new(0);
}
#[allow(non_snake_case)]
#[no_mangle]
pub extern "system" fn JNI_OnLoad(vm: JavaVM, _: *mut c_void) -> jint {
let mut env = vm.get_env().expect("Cannot get reference to the JNIEnv");
let b = env
.find_class("rs/tts/Bridge")
.expect("Failed to find `Bridge`");
let b = env
.new_global_ref(b)
.expect("Failed to create `Bridge` `GlobalRef`");
let mut bridge = BRIDGE.lock().unwrap();
*bridge = Some(b);
JNI_VERSION_1_6
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn Java_rs_tts_Bridge_onInit(mut env: JNIEnv, obj: JObject, status: jint) {
let id = env
.get_field(obj, "backendId", "I")
.expect("Failed to get backend ID")
.i()
.expect("Failed to cast to int") as u64;
let mut pending = PENDING_INITIALIZATIONS.write().unwrap();
(*pending).remove(&id);
if status != 0 {
error!("Failed to initialize TTS engine");
}
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn Java_rs_tts_Bridge_onStart(
mut env: JNIEnv,
obj: JObject,
utterance_id: JString,
) {
let backend_id = env
.get_field(obj, "backendId", "I")
.expect("Failed to get backend ID")
.i()
.expect("Failed to cast to int") as u64;
let backend_id = BackendId::Android(backend_id);
let utterance_id = CString::from(CStr::from_ptr(
env.get_string(&utterance_id).unwrap().as_ptr(),
))
.into_string()
.unwrap();
let utterance_id = utterance_id.parse::<u64>().unwrap();
let utterance_id = UtteranceId::Android(utterance_id);
let mut callbacks = CALLBACKS.lock().unwrap();
let cb = callbacks.get_mut(&backend_id).unwrap();
if let Some(f) = cb.utterance_begin.as_mut() {
f(utterance_id);
}
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn Java_rs_tts_Bridge_onStop(
mut env: JNIEnv,
obj: JObject,
utterance_id: JString,
) {
let backend_id = env
.get_field(obj, "backendId", "I")
.expect("Failed to get backend ID")
.i()
.expect("Failed to cast to int") as u64;
let backend_id = BackendId::Android(backend_id);
let utterance_id = CString::from(CStr::from_ptr(
env.get_string(&utterance_id).unwrap().as_ptr(),
))
.into_string()
.unwrap();
let utterance_id = utterance_id.parse::<u64>().unwrap();
let utterance_id = UtteranceId::Android(utterance_id);
let mut callbacks = CALLBACKS.lock().unwrap();
let cb = callbacks.get_mut(&backend_id).unwrap();
if let Some(f) = cb.utterance_end.as_mut() {
f(utterance_id);
}
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn Java_rs_tts_Bridge_onDone(
mut env: JNIEnv,
obj: JObject,
utterance_id: JString,
) {
let backend_id = env
.get_field(obj, "backendId", "I")
.expect("Failed to get backend ID")
.i()
.expect("Failed to cast to int") as u64;
let backend_id = BackendId::Android(backend_id);
let utterance_id = CString::from(CStr::from_ptr(
env.get_string(&utterance_id).unwrap().as_ptr(),
))
.into_string()
.unwrap();
let utterance_id = utterance_id.parse::<u64>().unwrap();
let utterance_id = UtteranceId::Android(utterance_id);
let mut callbacks = CALLBACKS.lock().unwrap();
let cb = callbacks.get_mut(&backend_id).unwrap();
if let Some(f) = cb.utterance_stop.as_mut() {
f(utterance_id);
}
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn Java_rs_tts_Bridge_onError(
mut env: JNIEnv,
obj: JObject,
utterance_id: JString,
) {
let backend_id = env
.get_field(obj, "backendId", "I")
.expect("Failed to get backend ID")
.i()
.expect("Failed to cast to int") as u64;
let backend_id = BackendId::Android(backend_id);
let utterance_id = CString::from(CStr::from_ptr(
env.get_string(&utterance_id).unwrap().as_ptr(),
))
.into_string()
.unwrap();
let utterance_id = utterance_id.parse::<u64>().unwrap();
let utterance_id = UtteranceId::Android(utterance_id);
let mut callbacks = CALLBACKS.lock().unwrap();
let cb = callbacks.get_mut(&backend_id).unwrap();
if let Some(f) = cb.utterance_end.as_mut() {
f(utterance_id);
}
}
#[derive(Clone)]
pub(crate) struct Android {
id: BackendId,
tts: GlobalRef,
rate: f32,
pitch: f32,
}
impl Android {
pub(crate) fn new() -> Result<Self, Error> {
info!("Initializing Android backend");
let mut backend_id = NEXT_BACKEND_ID.lock().unwrap();
let bid = *backend_id;
let id = BackendId::Android(bid);
*backend_id += 1;
drop(backend_id);
let ctx = ndk_context::android_context();
let vm = unsafe { jni::JavaVM::from_raw(ctx.vm().cast()) }?;
let context = unsafe { JObject::from_raw(ctx.context().cast()) };
let mut env = vm.attach_current_thread_permanently()?;
let bridge = BRIDGE.lock().unwrap();
if let Some(bridge) = &*bridge {
let bridge = env.new_object(bridge, "(I)V", &[(bid as jint).into()])?;
let tts = env.new_object(
"android/speech/tts/TextToSpeech",
"(Landroid/content/Context;Landroid/speech/tts/TextToSpeech$OnInitListener;)V",
&[(&context).into(), (&bridge).into()],
)?;
env.call_method(
&tts,
"setOnUtteranceProgressListener",
"(Landroid/speech/tts/UtteranceProgressListener;)I",
&[(&bridge).into()],
)?;
{
let mut pending = PENDING_INITIALIZATIONS.write().unwrap();
(*pending).insert(bid);
}
let tts = env.new_global_ref(tts)?;
// This hack makes my brain bleed.
const MAX_WAIT_TIME: Duration = Duration::from_millis(500);
let start = Instant::now();
// Wait a max of 500ms for initialization, then return an error to avoid hanging.
loop {
{
let pending = PENDING_INITIALIZATIONS.read().unwrap();
if !(*pending).contains(&bid) {
break;
}
if start.elapsed() > MAX_WAIT_TIME {
return Err(Error::OperationFailed);
}
}
thread::sleep(Duration::from_millis(5));
}
Ok(Self {
id,
tts,
rate: 1.,
pitch: 1.,
})
} else {
Err(Error::NoneError)
}
}
fn vm() -> Result<JavaVM, jni::errors::Error> {
let ctx = ndk_context::android_context();
unsafe { jni::JavaVM::from_raw(ctx.vm().cast()) }
}
}
impl Backend for Android {
fn id(&self) -> Option<BackendId> {
Some(self.id)
}
fn supported_features(&self) -> Features {
Features {
stop: true,
rate: true,
pitch: true,
volume: false,
is_speaking: true,
utterance_callbacks: true,
voice: false,
get_voice: false,
}
}
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error> {
let vm = Self::vm()?;
let mut env = vm.get_env()?;
let tts = self.tts.as_obj();
let text = env.new_string(text)?;
let queue_mode = if interrupt { 0 } else { 1 };
let mut utterance_id = NEXT_UTTERANCE_ID.lock().unwrap();
let uid = *utterance_id;
*utterance_id += 1;
drop(utterance_id);
let id = UtteranceId::Android(uid);
let uid = env.new_string(uid.to_string())?;
let rv = env.call_method(
tts,
"speak",
"(Ljava/lang/CharSequence;ILandroid/os/Bundle;Ljava/lang/String;)I",
&[
(&text).into(),
queue_mode.into(),
(&JObject::null()).into(),
(&uid).into(),
],
)?;
let rv = rv.i()?;
if rv == 0 {
Ok(Some(id))
} else {
Err(Error::OperationFailed)
}
}
fn stop(&mut self) -> Result<(), Error> {
let vm = Self::vm()?;
let mut env = vm.get_env()?;
let tts = self.tts.as_obj();
let rv = env.call_method(tts, "stop", "()I", &[])?;
let rv = rv.i()?;
if rv == 0 {
Ok(())
} else {
Err(Error::OperationFailed)
}
}
fn min_rate(&self) -> f32 {
0.1
}
fn max_rate(&self) -> f32 {
10.
}
fn normal_rate(&self) -> f32 {
1.
}
fn get_rate(&self) -> Result<f32, Error> {
Ok(self.rate)
}
fn set_rate(&mut self, rate: f32) -> Result<(), Error> {
let vm = Self::vm()?;
let mut env = vm.get_env()?;
let tts = self.tts.as_obj();
let rate = rate as jfloat;
let rv = env.call_method(tts, "setSpeechRate", "(F)I", &[rate.into()])?;
let rv = rv.i()?;
if rv == 0 {
self.rate = rate;
Ok(())
} else {
Err(Error::OperationFailed)
}
}
fn min_pitch(&self) -> f32 {
0.1
}
fn max_pitch(&self) -> f32 {
2.
}
fn normal_pitch(&self) -> f32 {
1.
}
fn get_pitch(&self) -> Result<f32, Error> {
Ok(self.pitch)
}
fn set_pitch(&mut self, pitch: f32) -> Result<(), Error> {
let vm = Self::vm()?;
let mut env = vm.get_env()?;
let tts = self.tts.as_obj();
let pitch = pitch as jfloat;
let rv = env.call_method(tts, "setPitch", "(F)I", &[pitch.into()])?;
let rv = rv.i()?;
if rv == 0 {
self.pitch = pitch;
Ok(())
} else {
Err(Error::OperationFailed)
}
}
fn min_volume(&self) -> f32 {
todo!()
}
fn max_volume(&self) -> f32 {
todo!()
}
fn normal_volume(&self) -> f32 {
todo!()
}
fn get_volume(&self) -> Result<f32, Error> {
todo!()
}
fn set_volume(&mut self, _volume: f32) -> Result<(), Error> {
todo!()
}
fn is_speaking(&self) -> Result<bool, Error> {
let vm = Self::vm()?;
let mut env = vm.get_env()?;
let tts = self.tts.as_obj();
let rv = env.call_method(tts, "isSpeaking", "()Z", &[])?;
let rv = rv.z()?;
Ok(rv)
}
fn voice(&self) -> Result<Option<Voice>, Error> {
unimplemented!()
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
unimplemented!()
}
fn set_voice(&mut self, _voice: &Voice) -> Result<(), Error> {
unimplemented!()
}
}

View File

@ -1,4 +1,5 @@
#[cfg(target_os = "macos")]
#[link(name = "AppKit", kind = "framework")]
use cocoa_foundation::base::{id, nil};
use cocoa_foundation::foundation::NSString;
use log::{info, trace};
@ -6,18 +7,17 @@ use objc::declare::ClassDecl;
use objc::runtime::*;
use objc::*;
use crate::{Backend, BackendId, Error, Features, UtteranceId, Voice};
use crate::{Backend, Error, Features};
#[derive(Clone, Debug)]
pub(crate) struct AppKit(*mut Object, *mut Object);
pub struct AppKit(*mut Object, *mut Object);
impl AppKit {
pub(crate) fn new() -> Result<Self, Error> {
pub fn new() -> Self {
info!("Initializing AppKit backend");
unsafe {
let obj: *mut Object = msg_send![class!(NSSpeechSynthesizer), new];
let mut decl = ClassDecl::new("MyNSSpeechSynthesizerDelegate", class!(NSObject))
.ok_or(Error::OperationFailed)?;
let mut decl =
ClassDecl::new("MyNSSpeechSynthesizerDelegate", class!(NSObject)).unwrap();
decl.add_ivar::<id>("synth");
decl.add_ivar::<id>("strings");
@ -46,15 +46,13 @@ impl AppKit {
) {
unsafe {
let strings: id = *this.get_ivar("strings");
let str: id = msg_send!(strings, firstObject);
let _: () = msg_send![str, release];
let _: () = msg_send!(strings, removeObjectAtIndex:0);
let count: u32 = msg_send![strings, count];
if count > 0 {
let str: id = msg_send!(strings, firstObject);
let _: () = msg_send![str, release];
let _: () = msg_send!(strings, removeObjectAtIndex:0);
if count > 1 {
let str: id = msg_send!(strings, firstObject);
let _: BOOL = msg_send![synth, startSpeakingString: str];
}
let _: BOOL = msg_send![synth, startSpeakingString: str];
}
}
}
@ -83,37 +81,27 @@ impl AppKit {
let delegate_class = decl.register();
let delegate_obj: *mut Object = msg_send![delegate_class, new];
delegate_obj
.as_mut()
.ok_or(Error::OperationFailed)?
.set_ivar("synth", obj);
delegate_obj.as_mut().unwrap().set_ivar("synth", obj);
let strings: id = msg_send![class!(NSMutableArray), new];
delegate_obj
.as_mut()
.ok_or(Error::OperationFailed)?
.set_ivar("strings", strings);
delegate_obj.as_mut().unwrap().set_ivar("strings", strings);
let _: Object = msg_send![obj, setDelegate: delegate_obj];
Ok(AppKit(obj, delegate_obj))
AppKit(obj, delegate_obj)
}
}
}
impl Backend for AppKit {
fn id(&self) -> Option<BackendId> {
None
}
fn supported_features(&self) -> Features {
Features {
stop: true,
rate: true,
pitch: false,
volume: true,
is_speaking: true,
..Default::default()
}
}
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error> {
fn speak(&mut self, text: &str, interrupt: bool) -> Result<(), Error> {
trace!("speak({}, {})", text, interrupt);
if interrupt {
self.stop()?;
@ -122,7 +110,7 @@ impl Backend for AppKit {
let str = NSString::alloc(nil).init_str(text);
let _: () = msg_send![self.1, enqueueAndSpeak: str];
}
Ok(None)
Ok(())
}
fn stop(&mut self) -> Result<(), Error> {
@ -205,19 +193,7 @@ impl Backend for AppKit {
fn is_speaking(&self) -> Result<bool, Error> {
let is_speaking: i8 = unsafe { msg_send![self.0, isSpeaking] };
Ok(is_speaking != NO as i8)
}
fn voice(&self) -> Result<Option<Voice>, Error> {
unimplemented!()
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
unimplemented!()
}
fn set_voice(&mut self, _voice: &Voice) -> Result<(), Error> {
unimplemented!()
Ok(is_speaking == YES)
}
}

View File

@ -1,165 +1,36 @@
#[cfg(any(target_os = "macos", target_os = "ios"))]
use std::sync::Mutex;
use cocoa_foundation::base::{id, nil, NO};
#[link(name = "AVFoundation", kind = "framework")]
use cocoa_foundation::base::{id, nil};
use cocoa_foundation::foundation::NSString;
use core_foundation::array::CFArray;
use core_foundation::base::TCFType;
use core_foundation::string::CFString;
use lazy_static::lazy_static;
use log::{info, trace};
use objc::runtime::{Object, Sel};
use objc::{class, declare::ClassDecl, msg_send, sel, sel_impl};
use oxilangtag::LanguageTag;
use objc::runtime::*;
use objc::*;
use crate::{Backend, BackendId, Error, Features, Gender, UtteranceId, Voice, CALLBACKS};
use crate::{Backend, Error, Features};
#[derive(Clone, Debug)]
pub(crate) struct AvFoundation {
id: BackendId,
delegate: *mut Object,
pub struct AvFoundation {
synth: *mut Object,
rate: f32,
volume: f32,
pitch: f32,
voice: Option<Voice>,
}
lazy_static! {
static ref NEXT_BACKEND_ID: Mutex<u64> = Mutex::new(0);
}
impl AvFoundation {
pub(crate) fn new() -> Result<Self, Error> {
pub fn new() -> Self {
info!("Initializing AVFoundation backend");
let mut decl = ClassDecl::new("MyNSSpeechSynthesizerDelegate", class!(NSObject))
.ok_or(Error::OperationFailed)?;
decl.add_ivar::<u64>("backend_id");
extern "C" fn speech_synthesizer_did_start_speech_utterance(
this: &Object,
_: Sel,
_synth: *const Object,
utterance: id,
) {
trace!("speech_synthesizer_did_start_speech_utterance");
unsafe {
let backend_id: u64 = *this.get_ivar("backend_id");
let backend_id = BackendId::AvFoundation(backend_id);
trace!("Locking callbacks");
let mut callbacks = CALLBACKS.lock().unwrap();
trace!("Locked");
let callbacks = callbacks.get_mut(&backend_id).unwrap();
if let Some(callback) = callbacks.utterance_begin.as_mut() {
trace!("Calling utterance_begin");
let utterance_id = UtteranceId::AvFoundation(utterance);
callback(utterance_id);
trace!("Called");
}
}
trace!("Done speech_synthesizer_did_start_speech_utterance");
}
extern "C" fn speech_synthesizer_did_finish_speech_utterance(
this: &Object,
_: Sel,
_synth: *const Object,
utterance: id,
) {
trace!("speech_synthesizer_did_finish_speech_utterance");
unsafe {
let backend_id: u64 = *this.get_ivar("backend_id");
let backend_id = BackendId::AvFoundation(backend_id);
trace!("Locking callbacks");
let mut callbacks = CALLBACKS.lock().unwrap();
trace!("Locked");
let callbacks = callbacks.get_mut(&backend_id).unwrap();
if let Some(callback) = callbacks.utterance_end.as_mut() {
trace!("Calling utterance_end");
let utterance_id = UtteranceId::AvFoundation(utterance);
callback(utterance_id);
trace!("Called");
}
}
trace!("Done speech_synthesizer_did_finish_speech_utterance");
}
extern "C" fn speech_synthesizer_did_cancel_speech_utterance(
this: &Object,
_: Sel,
_synth: *const Object,
utterance: id,
) {
trace!("speech_synthesizer_did_cancel_speech_utterance");
unsafe {
let backend_id: u64 = *this.get_ivar("backend_id");
let backend_id = BackendId::AvFoundation(backend_id);
trace!("Locking callbacks");
let mut callbacks = CALLBACKS.lock().unwrap();
trace!("Locked");
let callbacks = callbacks.get_mut(&backend_id).unwrap();
if let Some(callback) = callbacks.utterance_stop.as_mut() {
trace!("Calling utterance_stop");
let utterance_id = UtteranceId::AvFoundation(utterance);
callback(utterance_id);
trace!("Called");
}
}
trace!("Done speech_synthesizer_did_cancel_speech_utterance");
}
unsafe {
decl.add_method(
sel!(speechSynthesizer:didStartSpeechUtterance:),
speech_synthesizer_did_start_speech_utterance
as extern "C" fn(&Object, Sel, *const Object, id) -> (),
);
decl.add_method(
sel!(speechSynthesizer:didFinishSpeechUtterance:),
speech_synthesizer_did_finish_speech_utterance
as extern "C" fn(&Object, Sel, *const Object, id) -> (),
);
decl.add_method(
sel!(speechSynthesizer:didCancelSpeechUtterance:),
speech_synthesizer_did_cancel_speech_utterance
as extern "C" fn(&Object, Sel, *const Object, id) -> (),
);
}
let delegate_class = decl.register();
let delegate_obj: *mut Object = unsafe { msg_send![delegate_class, new] };
let mut backend_id = NEXT_BACKEND_ID.lock().unwrap();
let rv = unsafe {
trace!("Creating synth");
let synth: *mut Object = msg_send![class!(AVSpeechSynthesizer), new];
trace!("Allocated {:?}", synth);
delegate_obj
.as_mut()
.unwrap()
.set_ivar("backend_id", *backend_id);
trace!("Set backend ID in delegate");
let _: () = msg_send![synth, setDelegate: delegate_obj];
trace!("Assigned delegate: {:?}", delegate_obj);
AvFoundation {
id: BackendId::AvFoundation(*backend_id),
delegate: delegate_obj,
synth,
synth: synth,
rate: 0.5,
volume: 1.,
pitch: 1.,
voice: None,
}
};
*backend_id += 1;
Ok(rv)
}
}
}
impl Backend for AvFoundation {
fn id(&self) -> Option<BackendId> {
Some(self.id)
}
fn supported_features(&self) -> Features {
Features {
stop: true,
@ -167,43 +38,24 @@ impl Backend for AvFoundation {
pitch: true,
volume: true,
is_speaking: true,
voice: true,
get_voice: false,
utterance_callbacks: true,
}
}
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error> {
fn speak(&mut self, text: &str, interrupt: bool) -> Result<(), Error> {
trace!("speak({}, {})", text, interrupt);
if interrupt && self.is_speaking()? {
if interrupt {
self.stop()?;
}
let mut utterance: id;
unsafe {
trace!("Allocating utterance string");
let mut str = NSString::alloc(nil);
str = str.init_str(text);
trace!("Allocating utterance");
utterance = msg_send![class!(AVSpeechUtterance), alloc];
trace!("Initializing utterance");
utterance = msg_send![utterance, initWithString: str];
trace!("Setting rate to {}", self.rate);
let str = NSString::alloc(nil).init_str(text);
let utterance: id = msg_send![class!(AVSpeechUtterance), alloc];
let _: () = msg_send![utterance, initWithString: str];
let _: () = msg_send![utterance, setRate: self.rate];
trace!("Setting volume to {}", self.volume);
let _: () = msg_send![utterance, setVolume: self.volume];
trace!("Setting pitch to {}", self.pitch);
let _: () = msg_send![utterance, setPitchMultiplier: self.pitch];
if let Some(voice) = &self.voice {
let mut vid = NSString::alloc(nil);
vid = vid.init_str(&voice.id());
let v: id = msg_send![class!(AVSpeechSynthesisVoice), voiceWithIdentifier: vid];
let _: () = msg_send![utterance, setVoice: v];
}
trace!("Enqueuing");
let _: () = msg_send![self.synth, speakUtterance: utterance];
trace!("Done queuing");
}
Ok(Some(UtteranceId::AvFoundation(utterance)))
Ok(())
}
fn stop(&mut self) -> Result<(), Error> {
@ -253,7 +105,6 @@ impl Backend for AvFoundation {
}
fn set_pitch(&mut self, pitch: f32) -> Result<(), Error> {
trace!("set_pitch({})", pitch);
self.pitch = pitch;
Ok(())
}
@ -275,65 +126,19 @@ impl Backend for AvFoundation {
}
fn set_volume(&mut self, volume: f32) -> Result<(), Error> {
trace!("set_volume({})", volume);
self.volume = volume;
Ok(())
}
fn is_speaking(&self) -> Result<bool, Error> {
trace!("is_speaking()");
let is_speaking: i8 = unsafe { msg_send![self.synth, isSpeaking] };
Ok(is_speaking != NO as i8)
}
fn voice(&self) -> Result<Option<Voice>, Error> {
unimplemented!()
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
let voices: CFArray = unsafe {
CFArray::wrap_under_get_rule(msg_send![class!(AVSpeechSynthesisVoice), speechVoices])
};
let rv = voices
.iter()
.map(|v| {
let id: CFString = unsafe {
CFString::wrap_under_get_rule(msg_send![*v as *const Object, identifier])
};
let name: CFString =
unsafe { CFString::wrap_under_get_rule(msg_send![*v as *const Object, name]) };
let gender: i64 = unsafe { msg_send![*v as *const Object, gender] };
let gender = match gender {
1 => Some(Gender::Male),
2 => Some(Gender::Female),
_ => None,
};
let language: CFString = unsafe {
CFString::wrap_under_get_rule(msg_send![*v as *const Object, language])
};
let language = language.to_string();
let language = LanguageTag::parse(language).unwrap();
Voice {
id: id.to_string(),
name: name.to_string(),
gender,
language,
}
})
.collect();
Ok(rv)
}
fn set_voice(&mut self, voice: &Voice) -> Result<(), Error> {
self.voice = Some(voice.clone());
Ok(())
Ok(is_speaking == 1)
}
}
impl Drop for AvFoundation {
fn drop(&mut self) {
unsafe {
let _: Object = msg_send![self.delegate, release];
let _: Object = msg_send![self.synth, release];
}
}

View File

@ -1,11 +1,11 @@
#[cfg(target_os = "linux")]
mod speech_dispatcher;
#[cfg(all(windows, feature = "tolk"))]
#[cfg(windows)]
mod tolk;
#[cfg(windows)]
mod winrt;
pub(crate) mod winrt;
#[cfg(target_arch = "wasm32")]
mod web;
@ -16,26 +16,17 @@ mod appkit;
#[cfg(any(target_os = "macos", target_os = "ios"))]
mod av_foundation;
#[cfg(target_os = "android")]
mod android;
#[cfg(target_os = "linux")]
pub(crate) use self::speech_dispatcher::*;
#[cfg(all(windows, feature = "tolk"))]
pub(crate) use self::tolk::*;
pub use self::speech_dispatcher::*;
#[cfg(windows)]
pub(crate) use self::winrt::*;
pub use self::tolk::*;
#[cfg(target_arch = "wasm32")]
pub(crate) use self::web::*;
pub use self::web::*;
#[cfg(target_os = "macos")]
pub(crate) use self::appkit::*;
pub use self::appkit::*;
#[cfg(any(target_os = "macos", target_os = "ios"))]
pub(crate) use self::av_foundation::*;
#[cfg(target_os = "android")]
pub(crate) use self::android::*;
pub use self::av_foundation::*;

View File

@ -1,116 +1,49 @@
#[cfg(target_os = "linux")]
use std::{collections::HashMap, sync::Mutex};
use lazy_static::*;
use log::{info, trace};
use oxilangtag::LanguageTag;
use speech_dispatcher::*;
use crate::{Backend, BackendId, Error, Features, UtteranceId, Voice, CALLBACKS};
use crate::{Backend, Error, Features};
#[derive(Clone, Debug)]
pub(crate) struct SpeechDispatcher(Connection);
lazy_static! {
static ref SPEAKING: Mutex<HashMap<usize, bool>> = {
let m: HashMap<usize, bool> = HashMap::new();
Mutex::new(m)
};
}
pub struct SpeechDispatcher(Connection);
impl SpeechDispatcher {
pub(crate) fn new() -> std::result::Result<Self, Error> {
pub fn new() -> Self {
info!("Initializing SpeechDispatcher backend");
let connection = speech_dispatcher::Connection::open("tts", "tts", "tts", Mode::Threaded)?;
let sd = SpeechDispatcher(connection);
let mut speaking = SPEAKING.lock().unwrap();
speaking.insert(sd.0.client_id(), false);
sd.0.on_begin(Some(Box::new(|msg_id, client_id| {
let mut speaking = SPEAKING.lock().unwrap();
speaking.insert(client_id, true);
let mut callbacks = CALLBACKS.lock().unwrap();
let backend_id = BackendId::SpeechDispatcher(client_id);
let cb = callbacks.get_mut(&backend_id).unwrap();
let utterance_id = UtteranceId::SpeechDispatcher(msg_id as u64);
if let Some(f) = cb.utterance_begin.as_mut() {
f(utterance_id);
}
})));
sd.0.on_end(Some(Box::new(|msg_id, client_id| {
let mut speaking = SPEAKING.lock().unwrap();
speaking.insert(client_id, false);
let mut callbacks = CALLBACKS.lock().unwrap();
let backend_id = BackendId::SpeechDispatcher(client_id);
let cb = callbacks.get_mut(&backend_id).unwrap();
let utterance_id = UtteranceId::SpeechDispatcher(msg_id as u64);
if let Some(f) = cb.utterance_end.as_mut() {
f(utterance_id);
}
})));
sd.0.on_cancel(Some(Box::new(|msg_id, client_id| {
let mut speaking = SPEAKING.lock().unwrap();
speaking.insert(client_id, false);
let mut callbacks = CALLBACKS.lock().unwrap();
let backend_id = BackendId::SpeechDispatcher(client_id);
let cb = callbacks.get_mut(&backend_id).unwrap();
let utterance_id = UtteranceId::SpeechDispatcher(msg_id as u64);
if let Some(f) = cb.utterance_stop.as_mut() {
f(utterance_id);
}
})));
sd.0.on_pause(Some(Box::new(|_msg_id, client_id| {
let mut speaking = SPEAKING.lock().unwrap();
speaking.insert(client_id, false);
})));
sd.0.on_resume(Some(Box::new(|_msg_id, client_id| {
let mut speaking = SPEAKING.lock().unwrap();
speaking.insert(client_id, true);
})));
Ok(sd)
let connection = speech_dispatcher::Connection::open("tts", "tts", "tts", Mode::Single);
SpeechDispatcher(connection)
}
}
impl Backend for SpeechDispatcher {
fn id(&self) -> Option<BackendId> {
Some(BackendId::SpeechDispatcher(self.0.client_id()))
}
fn supported_features(&self) -> Features {
Features {
stop: true,
rate: true,
pitch: true,
volume: true,
is_speaking: true,
voice: true,
get_voice: false,
utterance_callbacks: true,
is_speaking: false,
}
}
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error> {
fn speak(&mut self, text: &str, interrupt: bool) -> Result<(), Error> {
trace!("speak({}, {})", text, interrupt);
if interrupt {
self.stop()?;
}
let single_char = text.to_string().capacity() == 1;
if single_char {
self.0.set_punctuation(Punctuation::All)?;
self.0.set_punctuation(Punctuation::All);
}
let id = self.0.say(Priority::Important, text);
self.0.say(Priority::Important, text);
if single_char {
self.0.set_punctuation(Punctuation::None)?;
}
if let Some(id) = id {
Ok(Some(UtteranceId::SpeechDispatcher(id)))
} else {
Err(Error::NoneError)
self.0.set_punctuation(Punctuation::None);
}
Ok(())
}
fn stop(&mut self) -> Result<(), Error> {
trace!("stop()");
self.0.cancel()?;
self.0.cancel();
Ok(())
}
@ -131,7 +64,7 @@ impl Backend for SpeechDispatcher {
}
fn set_rate(&mut self, rate: f32) -> Result<(), Error> {
self.0.set_voice_rate(rate as i32)?;
self.0.set_voice_rate(rate as i32);
Ok(())
}
@ -152,7 +85,7 @@ impl Backend for SpeechDispatcher {
}
fn set_pitch(&mut self, pitch: f32) -> Result<(), Error> {
self.0.set_voice_pitch(pitch as i32)?;
self.0.set_voice_pitch(pitch as i32);
Ok(())
}
@ -173,50 +106,11 @@ impl Backend for SpeechDispatcher {
}
fn set_volume(&mut self, volume: f32) -> Result<(), Error> {
self.0.set_volume(volume as i32)?;
self.0.set_volume(volume as i32);
Ok(())
}
fn is_speaking(&self) -> Result<bool, Error> {
let speaking = SPEAKING.lock().unwrap();
let is_speaking = speaking.get(&self.0.client_id()).unwrap();
Ok(*is_speaking)
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
let rv = self
.0
.list_synthesis_voices()?
.iter()
.filter(|v| LanguageTag::parse(v.language.clone()).is_ok())
.map(|v| Voice {
id: v.name.clone(),
name: v.name.clone(),
gender: None,
language: LanguageTag::parse(v.language.clone()).unwrap(),
})
.collect::<Vec<Voice>>();
Ok(rv)
}
fn voice(&self) -> Result<Option<Voice>, Error> {
unimplemented!()
}
fn set_voice(&mut self, voice: &Voice) -> Result<(), Error> {
for v in self.0.list_synthesis_voices()? {
if v.name == voice.name {
self.0.set_synthesis_voice(&v)?;
return Ok(());
}
}
Err(Error::OperationFailed)
}
}
impl Drop for SpeechDispatcher {
fn drop(&mut self) {
let mut speaking = SPEAKING.lock().unwrap();
speaking.remove(&self.0.client_id());
}
}

View File

@ -1,16 +1,13 @@
#[cfg(all(windows, feature = "tolk"))]
use std::sync::Arc;
#[cfg(windows)]
use log::{info, trace};
use tolk::Tolk as TolkPtr;
use crate::{Backend, BackendId, Error, Features, UtteranceId, Voice};
use crate::{Backend, Error, Features};
#[derive(Clone, Debug)]
pub(crate) struct Tolk(Arc<TolkPtr>);
pub struct Tolk(TolkPtr);
impl Tolk {
pub(crate) fn new() -> Option<Self> {
pub fn new() -> Option<Self> {
info!("Initializing Tolk backend");
let tolk = TolkPtr::new();
if tolk.detect_screen_reader().is_some() {
@ -22,21 +19,20 @@ impl Tolk {
}
impl Backend for Tolk {
fn id(&self) -> Option<BackendId> {
None
}
fn supported_features(&self) -> Features {
Features {
stop: true,
..Default::default()
rate: false,
pitch: false,
volume: false,
is_speaking: false,
}
}
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error> {
fn speak(&mut self, text: &str, interrupt: bool) -> Result<(), Error> {
trace!("speak({}, {})", text, interrupt);
self.0.speak(text, interrupt);
Ok(None)
Ok(())
}
fn stop(&mut self) -> Result<(), Error> {
@ -108,16 +104,4 @@ impl Backend for Tolk {
fn is_speaking(&self) -> Result<bool, Error> {
unimplemented!()
}
fn voice(&self) -> Result<Option<Voice>, Error> {
unimplemented!()
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
unimplemented!()
}
fn set_voice(&mut self, _voice: &Voice) -> Result<(), Error> {
unimplemented!()
}
}

View File

@ -1,54 +1,27 @@
#[cfg(target_arch = "wasm32")]
use std::sync::Mutex;
use lazy_static::lazy_static;
use log::{info, trace};
use oxilangtag::LanguageTag;
use wasm_bindgen::prelude::*;
use wasm_bindgen::JsCast;
use web_sys::{
SpeechSynthesisErrorCode, SpeechSynthesisErrorEvent, SpeechSynthesisEvent,
SpeechSynthesisUtterance, SpeechSynthesisVoice,
};
use web_sys::SpeechSynthesisUtterance;
use crate::{Backend, BackendId, Error, Features, UtteranceId, Voice, CALLBACKS};
use crate::{Backend, Error, Features};
#[derive(Clone, Debug)]
pub struct Web {
id: BackendId,
rate: f32,
pitch: f32,
volume: f32,
voice: Option<SpeechSynthesisVoice>,
}
lazy_static! {
static ref NEXT_BACKEND_ID: Mutex<u64> = Mutex::new(0);
static ref UTTERANCE_MAPPINGS: Mutex<Vec<(BackendId, UtteranceId)>> = Mutex::new(Vec::new());
static ref NEXT_UTTERANCE_ID: Mutex<u64> = Mutex::new(0);
}
impl Web {
pub fn new() -> Result<Self, Error> {
info!("Initializing Web backend");
let mut backend_id = NEXT_BACKEND_ID.lock().unwrap();
let rv = Web {
id: BackendId::Web(*backend_id),
Ok(Web {
rate: 1.,
pitch: 1.,
volume: 1.,
voice: None,
};
*backend_id += 1;
Ok(rv)
})
}
}
impl Backend for Web {
fn id(&self) -> Option<BackendId> {
Some(self.id)
}
fn supported_features(&self) -> Features {
Features {
stop: true,
@ -56,69 +29,23 @@ impl Backend for Web {
pitch: true,
volume: true,
is_speaking: true,
voice: true,
get_voice: true,
utterance_callbacks: true,
}
}
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error> {
fn speak(&mut self, text: &str, interrupt: bool) -> Result<(), Error> {
trace!("speak({}, {})", text, interrupt);
let utterance = SpeechSynthesisUtterance::new_with_text(text).unwrap();
utterance.set_rate(self.rate);
utterance.set_pitch(self.pitch);
utterance.set_volume(self.volume);
if self.voice.is_some() {
utterance.set_voice(self.voice.as_ref());
}
let id = self.id().unwrap();
let mut uid = NEXT_UTTERANCE_ID.lock().unwrap();
let utterance_id = UtteranceId::Web(*uid);
*uid += 1;
drop(uid);
let mut mappings = UTTERANCE_MAPPINGS.lock().unwrap();
mappings.push((self.id, utterance_id));
drop(mappings);
let callback = Closure::wrap(Box::new(move |_evt: SpeechSynthesisEvent| {
let mut callbacks = CALLBACKS.lock().unwrap();
let callback = callbacks.get_mut(&id).unwrap();
if let Some(f) = callback.utterance_begin.as_mut() {
f(utterance_id);
}
}) as Box<dyn Fn(_)>);
utterance.set_onstart(Some(callback.as_ref().unchecked_ref()));
let callback = Closure::wrap(Box::new(move |_evt: SpeechSynthesisEvent| {
let mut callbacks = CALLBACKS.lock().unwrap();
let callback = callbacks.get_mut(&id).unwrap();
if let Some(f) = callback.utterance_end.as_mut() {
f(utterance_id);
}
let mut mappings = UTTERANCE_MAPPINGS.lock().unwrap();
mappings.retain(|v| v.1 != utterance_id);
}) as Box<dyn Fn(_)>);
utterance.set_onend(Some(callback.as_ref().unchecked_ref()));
let callback = Closure::wrap(Box::new(move |evt: SpeechSynthesisErrorEvent| {
if evt.error() == SpeechSynthesisErrorCode::Canceled {
let mut callbacks = CALLBACKS.lock().unwrap();
let callback = callbacks.get_mut(&id).unwrap();
if let Some(f) = callback.utterance_stop.as_mut() {
f(utterance_id);
}
}
let mut mappings = UTTERANCE_MAPPINGS.lock().unwrap();
mappings.retain(|v| v.1 != utterance_id);
}) as Box<dyn Fn(_)>);
utterance.set_onerror(Some(callback.as_ref().unchecked_ref()));
if interrupt {
self.stop()?;
}
if let Some(window) = web_sys::window() {
let speech_synthesis = window.speech_synthesis().unwrap();
speech_synthesis.speak(&utterance);
Ok(Some(utterance_id))
} else {
Err(Error::NoneError)
}
Ok(())
}
fn stop(&mut self) -> Result<(), Error> {
@ -204,72 +131,4 @@ impl Backend for Web {
Err(Error::NoneError)
}
}
fn voice(&self) -> Result<Option<Voice>, Error> {
if let Some(voice) = &self.voice {
Ok(Some(voice.clone().into()))
} else {
if let Some(window) = web_sys::window() {
let speech_synthesis = window.speech_synthesis().unwrap();
for voice in speech_synthesis.get_voices().iter() {
let voice: SpeechSynthesisVoice = voice.into();
if voice.default() {
return Ok(Some(voice.into()));
}
}
} else {
return Err(Error::NoneError);
}
Ok(None)
}
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
if let Some(window) = web_sys::window() {
let speech_synthesis = window.speech_synthesis().unwrap();
let mut rv: Vec<Voice> = vec![];
for v in speech_synthesis.get_voices().iter() {
let v: SpeechSynthesisVoice = v.into();
rv.push(v.into());
}
Ok(rv)
} else {
Err(Error::NoneError)
}
}
fn set_voice(&mut self, voice: &Voice) -> Result<(), Error> {
if let Some(window) = web_sys::window() {
let speech_synthesis = window.speech_synthesis().unwrap();
for v in speech_synthesis.get_voices().iter() {
let v: SpeechSynthesisVoice = v.into();
if v.voice_uri() == voice.id {
self.voice = Some(v);
return Ok(());
}
}
Err(Error::OperationFailed)
} else {
Err(Error::NoneError)
}
}
}
impl Drop for Web {
fn drop(&mut self) {
let mut mappings = UTTERANCE_MAPPINGS.lock().unwrap();
mappings.retain(|v| v.0 != self.id);
}
}
impl From<SpeechSynthesisVoice> for Voice {
fn from(other: SpeechSynthesisVoice) -> Self {
let language = LanguageTag::parse(other.lang()).unwrap();
Voice {
id: other.voice_uri(),
name: other.name(),
gender: None,
language,
}
}
}

View File

@ -1,150 +1,50 @@
#[cfg(windows)]
use std::{
collections::{HashMap, VecDeque},
sync::Mutex,
};
use lazy_static::lazy_static;
use log::{info, trace};
use oxilangtag::LanguageTag;
use windows::{
Foundation::TypedEventHandler,
Media::{
Core::MediaSource,
Playback::{MediaPlayer, MediaPlayerAudioCategory},
SpeechSynthesis::{SpeechSynthesizer, VoiceGender, VoiceInformation},
},
use tts_winrt_bindings::windows::media::core::MediaSource;
use tts_winrt_bindings::windows::media::playback::{
MediaPlaybackItem, MediaPlaybackList, MediaPlaybackState, MediaPlayer,
};
use tts_winrt_bindings::windows::media::speech_synthesis::SpeechSynthesizer;
use crate::{Backend, BackendId, Error, Features, Gender, UtteranceId, Voice, CALLBACKS};
use crate::{Backend, Error, Features};
impl From<windows::core::Error> for Error {
fn from(e: windows::core::Error) -> Self {
Error::WinRt(e)
impl From<winrt::Error> for Error {
fn from(e: winrt::Error) -> Self {
Error::WinRT(e)
}
}
#[derive(Clone)]
pub struct WinRt {
id: BackendId,
pub struct WinRT {
synth: SpeechSynthesizer,
player: MediaPlayer,
rate: f32,
pitch: f32,
volume: f32,
voice: VoiceInformation,
playback_list: MediaPlaybackList,
}
struct Utterance {
id: UtteranceId,
text: String,
rate: f32,
pitch: f32,
volume: f32,
voice: VoiceInformation,
}
lazy_static! {
static ref NEXT_BACKEND_ID: Mutex<u64> = Mutex::new(0);
static ref NEXT_UTTERANCE_ID: Mutex<u64> = Mutex::new(0);
static ref BACKEND_TO_SPEECH_SYNTHESIZER: Mutex<HashMap<BackendId, SpeechSynthesizer>> = {
let v: HashMap<BackendId, SpeechSynthesizer> = HashMap::new();
Mutex::new(v)
};
static ref BACKEND_TO_MEDIA_PLAYER: Mutex<HashMap<BackendId, MediaPlayer>> = {
let v: HashMap<BackendId, MediaPlayer> = HashMap::new();
Mutex::new(v)
};
static ref UTTERANCES: Mutex<HashMap<BackendId, VecDeque<Utterance>>> = {
let utterances: HashMap<BackendId, VecDeque<Utterance>> = HashMap::new();
Mutex::new(utterances)
};
}
impl WinRt {
impl WinRT {
pub fn new() -> std::result::Result<Self, Error> {
info!("Initializing WinRT backend");
let synth = SpeechSynthesizer::new()?;
let playback_list = MediaPlaybackList::new()?;
let player = MediaPlayer::new()?;
player.SetRealTimePlayback(true)?;
player.SetAudioCategory(MediaPlayerAudioCategory::Speech)?;
let mut backend_id = NEXT_BACKEND_ID.lock().unwrap();
let bid = BackendId::WinRt(*backend_id);
*backend_id += 1;
drop(backend_id);
{
let mut utterances = UTTERANCES.lock().unwrap();
utterances.insert(bid, VecDeque::new());
}
let mut backend_to_media_player = BACKEND_TO_MEDIA_PLAYER.lock().unwrap();
backend_to_media_player.insert(bid, player.clone());
drop(backend_to_media_player);
let mut backend_to_speech_synthesizer = BACKEND_TO_SPEECH_SYNTHESIZER.lock().unwrap();
backend_to_speech_synthesizer.insert(bid, synth.clone());
drop(backend_to_speech_synthesizer);
let bid_clone = bid;
player.MediaEnded(&TypedEventHandler::new(
move |sender: &Option<MediaPlayer>, _args| {
if let Some(sender) = sender {
let backend_to_media_player = BACKEND_TO_MEDIA_PLAYER.lock().unwrap();
let id = backend_to_media_player.iter().find(|v| v.1 == sender);
if let Some((id, _)) = id {
let mut utterances = UTTERANCES.lock().unwrap();
if let Some(utterances) = utterances.get_mut(id) {
if let Some(utterance) = utterances.pop_front() {
let mut callbacks = CALLBACKS.lock().unwrap();
let callbacks = callbacks.get_mut(id).unwrap();
if let Some(callback) = callbacks.utterance_end.as_mut() {
callback(utterance.id);
}
if let Some(utterance) = utterances.front() {
let backend_to_speech_synthesizer =
BACKEND_TO_SPEECH_SYNTHESIZER.lock().unwrap();
let id = backend_to_speech_synthesizer
.iter()
.find(|v| *v.0 == bid_clone);
if let Some((_, tts)) = id {
tts.Options()?.SetSpeakingRate(utterance.rate.into())?;
tts.Options()?.SetAudioPitch(utterance.pitch.into())?;
tts.Options()?.SetAudioVolume(utterance.volume.into())?;
tts.SetVoice(&utterance.voice)?;
let text = &utterance.text;
let stream =
tts.SynthesizeTextToStreamAsync(&text.into())?.get()?;
let content_type = stream.ContentType()?;
let source =
MediaSource::CreateFromStream(&stream, &content_type)?;
sender.SetSource(&source)?;
sender.Play()?;
if let Some(callback) = callbacks.utterance_begin.as_mut() {
callback(utterance.id);
}
}
}
}
}
}
}
Ok(())
},
))?;
player.set_auto_play(true)?;
player.set_source(&playback_list)?;
Ok(Self {
id: bid,
synth,
player,
rate: 1.,
pitch: 1.,
volume: 1.,
voice: SpeechSynthesizer::DefaultVoice()?,
synth: SpeechSynthesizer::new()?,
player: player,
playback_list: playback_list,
})
}
fn reinit_player(&mut self) -> std::result::Result<(), Error> {
self.playback_list = MediaPlaybackList::new()?;
self.player = MediaPlayer::new()?;
self.player.set_auto_play(true)?;
self.player.set_source(&self.playback_list)?;
Ok(())
}
}
impl Backend for WinRt {
fn id(&self) -> Option<BackendId> {
Some(self.id)
}
impl Backend for WinRT {
fn supported_features(&self) -> Features {
Features {
stop: true,
@ -152,83 +52,36 @@ impl Backend for WinRt {
pitch: true,
volume: true,
is_speaking: true,
voice: true,
get_voice: true,
utterance_callbacks: true,
}
}
fn speak(
&mut self,
text: &str,
interrupt: bool,
) -> std::result::Result<Option<UtteranceId>, Error> {
if interrupt && self.is_speaking()? {
fn speak(&mut self, text: &str, interrupt: bool) -> std::result::Result<(), Error> {
trace!("speak({}, {})", text, interrupt);
if interrupt {
self.stop()?;
}
let utterance_id = {
let mut uid = NEXT_UTTERANCE_ID.lock().unwrap();
let utterance_id = UtteranceId::WinRt(*uid);
*uid += 1;
utterance_id
};
let mut no_utterances = false;
{
let mut utterances = UTTERANCES.lock().unwrap();
if let Some(utterances) = utterances.get_mut(&self.id) {
no_utterances = utterances.is_empty();
let utterance = Utterance {
id: utterance_id,
text: text.into(),
rate: self.rate,
pitch: self.pitch,
volume: self.volume,
voice: self.voice.clone(),
};
utterances.push_back(utterance);
let stream = self.synth.synthesize_text_to_stream_async(text)?.get()?;
let content_type = stream.content_type()?;
let source = MediaSource::create_from_stream(stream, content_type)?;
let item = MediaPlaybackItem::create(source)?;
let state = self.player.playback_session()?.playback_state()?;
if state == MediaPlaybackState::Paused {
let index = self.playback_list.current_item_index()?;
let total = self.playback_list.items()?.size()?;
if total != 0 && index == total - 1 {
self.reinit_player()?;
}
}
if no_utterances {
self.synth.Options()?.SetSpeakingRate(self.rate.into())?;
self.synth.Options()?.SetAudioPitch(self.pitch.into())?;
self.synth.Options()?.SetAudioVolume(self.volume.into())?;
self.synth.SetVoice(&self.voice)?;
let stream = self
.synth
.SynthesizeTextToStreamAsync(&text.into())?
.get()?;
let content_type = stream.ContentType()?;
let source = MediaSource::CreateFromStream(&stream, &content_type)?;
self.player.SetSource(&source)?;
self.player.Play()?;
let mut callbacks = CALLBACKS.lock().unwrap();
let callbacks = callbacks.get_mut(&self.id).unwrap();
if let Some(callback) = callbacks.utterance_begin.as_mut() {
callback(utterance_id);
}
self.playback_list.items()?.append(item)?;
if !self.is_speaking()? {
self.player.play()?;
}
Ok(Some(utterance_id))
Ok(())
}
fn stop(&mut self) -> std::result::Result<(), Error> {
trace!("stop()");
if !self.is_speaking()? {
return Ok(());
}
let mut utterances = UTTERANCES.lock().unwrap();
if let Some(utterances) = utterances.get(&self.id) {
let mut callbacks = CALLBACKS.lock().unwrap();
let callbacks = callbacks.get_mut(&self.id).unwrap();
if let Some(callback) = callbacks.utterance_stop.as_mut() {
for utterance in utterances {
callback(utterance.id);
}
}
}
if let Some(utterances) = utterances.get_mut(&self.id) {
utterances.clear();
}
self.player.Pause()?;
self.reinit_player()?;
Ok(())
}
@ -245,12 +98,12 @@ impl Backend for WinRt {
}
fn get_rate(&self) -> std::result::Result<f32, Error> {
let rate = self.synth.Options()?.SpeakingRate()?;
let rate = self.synth.options()?.speaking_rate()?;
Ok(rate as f32)
}
fn set_rate(&mut self, rate: f32) -> std::result::Result<(), Error> {
self.rate = rate;
self.synth.options()?.set_speaking_rate(rate.into())?;
Ok(())
}
@ -267,12 +120,12 @@ impl Backend for WinRt {
}
fn get_pitch(&self) -> std::result::Result<f32, Error> {
let pitch = self.synth.Options()?.AudioPitch()?;
let pitch = self.synth.options()?.audio_pitch()?;
Ok(pitch as f32)
}
fn set_pitch(&mut self, pitch: f32) -> std::result::Result<(), Error> {
self.pitch = pitch;
self.synth.options()?.set_audio_pitch(pitch.into())?;
Ok(())
}
@ -289,76 +142,18 @@ impl Backend for WinRt {
}
fn get_volume(&self) -> std::result::Result<f32, Error> {
let volume = self.synth.Options()?.AudioVolume()?;
let volume = self.synth.options()?.audio_volume()?;
Ok(volume as f32)
}
fn set_volume(&mut self, volume: f32) -> std::result::Result<(), Error> {
self.volume = volume;
self.synth.options()?.set_audio_volume(volume.into())?;
Ok(())
}
fn is_speaking(&self) -> std::result::Result<bool, Error> {
let utterances = UTTERANCES.lock().unwrap();
let utterances = utterances.get(&self.id).unwrap();
Ok(!utterances.is_empty())
}
fn voice(&self) -> Result<Option<Voice>, Error> {
let voice = self.synth.Voice()?;
let voice = voice.try_into()?;
Ok(Some(voice))
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
let mut rv: Vec<Voice> = vec![];
for voice in SpeechSynthesizer::AllVoices()? {
rv.push(voice.try_into()?);
}
Ok(rv)
}
fn set_voice(&mut self, voice: &Voice) -> Result<(), Error> {
for v in SpeechSynthesizer::AllVoices()? {
let vid: String = v.Id()?.try_into()?;
if vid == voice.id {
self.voice = v;
return Ok(());
}
}
Err(Error::OperationFailed)
}
}
impl Drop for WinRt {
fn drop(&mut self) {
let id = self.id;
let mut backend_to_media_player = BACKEND_TO_MEDIA_PLAYER.lock().unwrap();
backend_to_media_player.remove(&id);
let mut backend_to_speech_synthesizer = BACKEND_TO_SPEECH_SYNTHESIZER.lock().unwrap();
backend_to_speech_synthesizer.remove(&id);
let mut utterances = UTTERANCES.lock().unwrap();
utterances.remove(&id);
}
}
impl TryInto<Voice> for VoiceInformation {
type Error = Error;
fn try_into(self) -> Result<Voice, Self::Error> {
let gender = self.Gender()?;
let gender = if gender == VoiceGender::Male {
Gender::Male
} else {
Gender::Female
};
let language: String = self.Language()?.try_into()?;
let language = LanguageTag::parse(language).unwrap();
Ok(Voice {
id: self.Id()?.try_into()?,
name: self.DisplayName()?.try_into()?,
gender: Some(gender),
language,
})
let state = self.player.playback_session()?.playback_state()?;
let playing = state == MediaPlaybackState::Opening || state == MediaPlaybackState::Playing;
Ok(playing)
}
}

View File

@ -1,223 +1,74 @@
//! * a Text-To-Speech (TTS) library providing high-level interfaces to a variety of backends.
//! * Currently supported backends are:
//! * * Windows
//! * * Screen readers/SAPI via Tolk (requires `tolk` Cargo feature)
//! * * WinRT
//! * * Linux via [Speech Dispatcher](https://freebsoft.org/speechd)
//! * * MacOS/iOS
//! * * AppKit on MacOS 10.13 and below
//! * * AVFoundation on MacOS 10.14 and above, and iOS
//! * * Android
//! * * WebAssembly
/*!
* a Text-To-Speech (TTS) library providing high-level interfaces to a variety of backends.
* Currently supported backends are:
* * Windows
* * Screen readers/SAPI via Tolk
* * WinRT
* * Linux via [Speech Dispatcher](https://freebsoft.org/speechd)
* * MacOS
* * AppKit on MacOS 10.13 and below
* * AVFoundation on MacOS 10.14 and above, and iOS
* * WebAssembly
*/
use std::collections::HashMap;
use std::boxed::Box;
#[cfg(target_os = "macos")]
use std::ffi::CStr;
use std::fmt;
use std::rc::Rc;
#[cfg(windows)]
use std::string::FromUtf16Error;
use std::sync::Mutex;
use std::{boxed::Box, sync::RwLock};
#[cfg(any(target_os = "macos", target_os = "ios"))]
#[cfg(target_os = "macos")]
use cocoa_foundation::base::id;
use dyn_clonable::*;
use lazy_static::lazy_static;
#[cfg(target_os = "macos")]
use libc::c_char;
#[cfg(target_os = "macos")]
use objc::{class, msg_send, sel, sel_impl};
pub use oxilangtag::LanguageTag;
#[cfg(target_os = "linux")]
use speech_dispatcher::Error as SpeechDispatcherError;
use thiserror::Error;
#[cfg(all(windows, feature = "tolk"))]
use tolk::Tolk;
mod backends;
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum Backends {
#[cfg(target_os = "android")]
Android,
#[cfg(target_os = "linux")]
SpeechDispatcher,
#[cfg(target_arch = "wasm32")]
Web,
#[cfg(windows)]
Tolk,
#[cfg(windows)]
WinRT,
#[cfg(target_os = "macos")]
AppKit,
#[cfg(any(target_os = "macos", target_os = "ios"))]
AvFoundation,
#[cfg(target_os = "linux")]
SpeechDispatcher,
#[cfg(all(windows, feature = "tolk"))]
Tolk,
#[cfg(target_arch = "wasm32")]
Web,
#[cfg(windows)]
WinRt,
}
impl fmt::Display for Backends {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
#[cfg(target_os = "android")]
Backends::Android => writeln!(f, "Android"),
#[cfg(target_os = "macos")]
Backends::AppKit => writeln!(f, "AppKit"),
#[cfg(any(target_os = "macos", target_os = "ios"))]
Backends::AvFoundation => writeln!(f, "AVFoundation"),
#[cfg(target_os = "linux")]
Backends::SpeechDispatcher => writeln!(f, "Speech Dispatcher"),
#[cfg(all(windows, feature = "tolk"))]
Backends::Tolk => writeln!(f, "Tolk"),
#[cfg(target_arch = "wasm32")]
Backends::Web => writeln!(f, "Web"),
#[cfg(windows)]
Backends::WinRt => writeln!(f, "Windows Runtime"),
}
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum BackendId {
#[cfg(target_os = "android")]
Android(u64),
#[cfg(any(target_os = "macos", target_os = "ios"))]
AvFoundation(u64),
#[cfg(target_os = "linux")]
SpeechDispatcher(usize),
#[cfg(target_arch = "wasm32")]
Web(u64),
#[cfg(windows)]
WinRt(u64),
}
impl fmt::Display for BackendId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
#[cfg(target_os = "android")]
BackendId::Android(id) => writeln!(f, "Android({id})"),
#[cfg(any(target_os = "macos", target_os = "ios"))]
BackendId::AvFoundation(id) => writeln!(f, "AvFoundation({id})"),
#[cfg(target_os = "linux")]
BackendId::SpeechDispatcher(id) => writeln!(f, "SpeechDispatcher({id})"),
#[cfg(target_arch = "wasm32")]
BackendId::Web(id) => writeln!(f, "Web({id})"),
#[cfg(windows)]
BackendId::WinRt(id) => writeln!(f, "WinRT({id})"),
}
}
}
// # Note
//
// Most trait implementations are blocked by cocoa_foundation::base::id;
// which is a type alias for objc::runtime::Object, which only implements Debug.
#[derive(Debug)]
#[cfg_attr(
not(any(target_os = "macos", target_os = "ios")),
derive(Clone, Copy, Eq, Hash, PartialEq, PartialOrd, Ord)
)]
#[cfg_attr(
all(feature = "serde", not(any(target_os = "macos", target_os = "ios"))),
derive(serde::Serialize, serde::Deserialize)
)]
pub enum UtteranceId {
#[cfg(target_os = "android")]
Android(u64),
#[cfg(any(target_os = "macos", target_os = "ios"))]
AvFoundation(id),
#[cfg(target_os = "linux")]
SpeechDispatcher(u64),
#[cfg(target_arch = "wasm32")]
Web(u64),
#[cfg(windows)]
WinRt(u64),
}
// # Note
//
// Display is not implemented by cocoa_foundation::base::id;
// which is a type alias for objc::runtime::Object, which only implements Debug.
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
impl fmt::Display for UtteranceId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
#[cfg(target_os = "android")]
UtteranceId::Android(id) => writeln!(f, "Android({id})"),
#[cfg(target_os = "linux")]
UtteranceId::SpeechDispatcher(id) => writeln!(f, "SpeechDispatcher({id})"),
#[cfg(target_arch = "wasm32")]
UtteranceId::Web(id) => writeln!(f, "Web({})", id),
#[cfg(windows)]
UtteranceId::WinRt(id) => writeln!(f, "WinRt({id})"),
}
}
}
unsafe impl Send for UtteranceId {}
unsafe impl Sync for UtteranceId {}
#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Features {
pub is_speaking: bool,
pub pitch: bool,
pub rate: bool,
pub stop: bool,
pub utterance_callbacks: bool,
pub voice: bool,
pub get_voice: bool,
pub rate: bool,
pub pitch: bool,
pub volume: bool,
}
impl fmt::Display for Features {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
writeln!(f, "{self:#?}")
}
}
impl Features {
pub fn new() -> Self {
Self::default()
}
pub is_speaking: bool,
}
#[derive(Debug, Error)]
pub enum Error {
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
IO(#[from] std::io::Error),
#[error("Value not received")]
NoneError,
#[error("Operation failed")]
OperationFailed,
#[cfg(target_arch = "wasm32")]
#[error("JavaScript error: [0]")]
#[error("JavaScript error: [0])]")]
JavaScriptError(wasm_bindgen::JsValue),
#[cfg(target_os = "linux")]
#[error("Speech Dispatcher error: {0}")]
SpeechDispatcher(#[from] SpeechDispatcherError),
#[cfg(windows)]
#[error("WinRT error")]
WinRt(windows::core::Error),
#[cfg(windows)]
#[error("UTF string conversion failed")]
UtfStringConversionFailed(#[from] FromUtf16Error),
WinRT(winrt::Error),
#[error("Unsupported feature")]
UnsupportedFeature,
#[error("Out of range")]
OutOfRange,
#[cfg(target_os = "android")]
#[error("JNI error: [0])]")]
JNI(#[from] jni::errors::Error),
}
#[clonable]
pub trait Backend: Clone {
fn id(&self) -> Option<BackendId>;
pub trait Backend {
fn supported_features(&self) -> Features;
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error>;
fn speak(&mut self, text: &str, interrupt: bool) -> Result<(), Error>;
fn stop(&mut self) -> Result<(), Error>;
fn min_rate(&self) -> f32;
fn max_rate(&self) -> f32;
@ -235,103 +86,59 @@ pub trait Backend: Clone {
fn get_volume(&self) -> Result<f32, Error>;
fn set_volume(&mut self, volume: f32) -> Result<(), Error>;
fn is_speaking(&self) -> Result<bool, Error>;
fn voices(&self) -> Result<Vec<Voice>, Error>;
fn voice(&self) -> Result<Option<Voice>, Error>;
fn set_voice(&mut self, voice: &Voice) -> Result<(), Error>;
}
#[derive(Default)]
struct Callbacks {
utterance_begin: Option<Box<dyn FnMut(UtteranceId)>>,
utterance_end: Option<Box<dyn FnMut(UtteranceId)>>,
utterance_stop: Option<Box<dyn FnMut(UtteranceId)>>,
}
pub struct TTS(Box<dyn Backend>);
unsafe impl Send for Callbacks {}
unsafe impl std::marker::Send for TTS {}
unsafe impl Sync for Callbacks {}
unsafe impl std::marker::Sync for TTS {}
lazy_static! {
static ref CALLBACKS: Mutex<HashMap<BackendId, Callbacks>> = {
let m: HashMap<BackendId, Callbacks> = HashMap::new();
Mutex::new(m)
};
}
#[derive(Clone)]
pub struct Tts(Rc<RwLock<Box<dyn Backend>>>);
unsafe impl Send for Tts {}
unsafe impl Sync for Tts {}
impl Tts {
/// Create a new `TTS` instance with the specified backend.
pub fn new(backend: Backends) -> Result<Tts, Error> {
let backend = match backend {
impl TTS {
/**
* Create a new `TTS` instance with the specified backend.
*/
pub fn new(backend: Backends) -> Result<TTS, Error> {
match backend {
#[cfg(target_os = "linux")]
Backends::SpeechDispatcher => {
let tts = backends::SpeechDispatcher::new()?;
Ok(Tts(Rc::new(RwLock::new(Box::new(tts)))))
}
Backends::SpeechDispatcher => Ok(TTS(Box::new(backends::SpeechDispatcher::new()))),
#[cfg(target_arch = "wasm32")]
Backends::Web => {
let tts = backends::Web::new()?;
Ok(Tts(Rc::new(RwLock::new(Box::new(tts)))))
Ok(TTS(Box::new(tts)))
}
#[cfg(all(windows, feature = "tolk"))]
#[cfg(windows)]
Backends::Tolk => {
let tts = backends::Tolk::new();
if let Some(tts) = tts {
Ok(Tts(Rc::new(RwLock::new(Box::new(tts)))))
Ok(TTS(Box::new(tts)))
} else {
Err(Error::NoneError)
}
}
#[cfg(windows)]
Backends::WinRt => {
let tts = backends::WinRt::new()?;
Ok(Tts(Rc::new(RwLock::new(Box::new(tts)))))
Backends::WinRT => {
let tts = backends::winrt::WinRT::new()?;
Ok(TTS(Box::new(tts)))
}
#[cfg(target_os = "macos")]
Backends::AppKit => Ok(Tts(Rc::new(RwLock::new(
Box::new(backends::AppKit::new()?),
)))),
Backends::AppKit => Ok(TTS(Box::new(backends::AppKit::new()))),
#[cfg(any(target_os = "macos", target_os = "ios"))]
Backends::AvFoundation => Ok(Tts(Rc::new(RwLock::new(Box::new(
backends::AvFoundation::new()?,
))))),
#[cfg(target_os = "android")]
Backends::Android => {
let tts = backends::Android::new()?;
Ok(Tts(Rc::new(RwLock::new(Box::new(tts)))))
}
};
if let Ok(backend) = backend {
if let Some(id) = backend.0.read().unwrap().id() {
let mut callbacks = CALLBACKS.lock().unwrap();
callbacks.insert(id, Callbacks::default());
}
Ok(backend)
} else {
backend
Backends::AvFoundation => Ok(TTS(Box::new(backends::AvFoundation::new()))),
}
}
#[allow(clippy::should_implement_trait)]
pub fn default() -> Result<Tts, Error> {
pub fn default() -> Result<TTS, Error> {
#[cfg(target_os = "linux")]
let tts = Tts::new(Backends::SpeechDispatcher);
#[cfg(all(windows, feature = "tolk"))]
let tts = if let Ok(tts) = Tts::new(Backends::Tolk) {
let tts = TTS::new(Backends::SpeechDispatcher);
#[cfg(windows)]
let tts = if let Some(tts) = TTS::new(Backends::Tolk).ok() {
Ok(tts)
} else {
Tts::new(Backends::WinRt)
TTS::new(Backends::WinRT)
};
#[cfg(all(windows, not(feature = "tolk")))]
let tts = Tts::new(Backends::WinRt);
#[cfg(target_arch = "wasm32")]
let tts = Tts::new(Backends::Web);
let tts = TTS::new(Backends::Web);
#[cfg(target_os = "macos")]
let tts = unsafe {
// Needed because the Rust NSProcessInfo structs report bogus values, and I don't want to pull in a full bindgen stack.
@ -340,88 +147,94 @@ impl Tts {
let str: *const c_char = msg_send![version, UTF8String];
let str = CStr::from_ptr(str);
let str = str.to_string_lossy();
let version: Vec<&str> = str.split(' ').collect();
let version: Vec<&str> = str.split(" ").collect();
let version = version[1];
let version_parts: Vec<&str> = version.split('.').collect();
let major_version: i8 = version_parts[0].parse().unwrap();
let version_parts: Vec<&str> = version.split(".").collect();
let minor_version: i8 = version_parts[1].parse().unwrap();
if major_version >= 11 || minor_version >= 14 {
Tts::new(Backends::AvFoundation)
if minor_version >= 14 {
TTS::new(Backends::AvFoundation)
} else {
Tts::new(Backends::AppKit)
TTS::new(Backends::AppKit)
}
};
#[cfg(target_os = "ios")]
let tts = Tts::new(Backends::AvFoundation);
#[cfg(target_os = "android")]
let tts = Tts::new(Backends::Android);
let tts = TTS::new(Backends::AvFoundation);
tts
}
/// Returns the features supported by this TTS engine
/**
* Returns the features supported by this TTS engine
*/
pub fn supported_features(&self) -> Features {
self.0.read().unwrap().supported_features()
self.0.supported_features()
}
/// Speaks the specified text, optionally interrupting current speech.
pub fn speak<S: Into<String>>(
&mut self,
text: S,
interrupt: bool,
) -> Result<Option<UtteranceId>, Error> {
self.0
.write()
.unwrap()
.speak(text.into().as_str(), interrupt)
/**
* Speaks the specified text, optionally interrupting current speech.
*/
pub fn speak<S: Into<String>>(&mut self, text: S, interrupt: bool) -> Result<&Self, Error> {
self.0.speak(text.into().as_str(), interrupt)?;
Ok(self)
}
/// Stops current speech.
/**
* Stops current speech.
*/
pub fn stop(&mut self) -> Result<&Self, Error> {
let Features { stop, .. } = self.supported_features();
if stop {
self.0.write().unwrap().stop()?;
self.0.stop()?;
Ok(self)
} else {
Err(Error::UnsupportedFeature)
}
}
/// Returns the minimum rate for this speech synthesizer.
/**
* Returns the minimum rate for this speech synthesizer.
*/
pub fn min_rate(&self) -> f32 {
self.0.read().unwrap().min_rate()
self.0.min_rate()
}
/// Returns the maximum rate for this speech synthesizer.
/**
* Returns the maximum rate for this speech synthesizer.
*/
pub fn max_rate(&self) -> f32 {
self.0.read().unwrap().max_rate()
self.0.max_rate()
}
/// Returns the normal rate for this speech synthesizer.
/**
* Returns the normal rate for this speech synthesizer.
*/
pub fn normal_rate(&self) -> f32 {
self.0.read().unwrap().normal_rate()
self.0.normal_rate()
}
/// Gets the current speech rate.
/**
* Gets the current speech rate.
*/
pub fn get_rate(&self) -> Result<f32, Error> {
let Features { rate, .. } = self.supported_features();
if rate {
self.0.read().unwrap().get_rate()
self.0.get_rate()
} else {
Err(Error::UnsupportedFeature)
}
}
/// Sets the desired speech rate.
/**
* Sets the desired speech rate.
*/
pub fn set_rate(&mut self, rate: f32) -> Result<&Self, Error> {
let Features {
rate: rate_feature, ..
} = self.supported_features();
if rate_feature {
let mut backend = self.0.write().unwrap();
if rate < backend.min_rate() || rate > backend.max_rate() {
if rate < self.0.min_rate() || rate > self.0.max_rate() {
Err(Error::OutOfRange)
} else {
backend.set_rate(rate)?;
self.0.set_rate(rate)?;
Ok(self)
}
} else {
@ -429,43 +242,52 @@ impl Tts {
}
}
/// Returns the minimum pitch for this speech synthesizer.
/**
* Returns the minimum pitch for this speech synthesizer.
*/
pub fn min_pitch(&self) -> f32 {
self.0.read().unwrap().min_pitch()
self.0.min_pitch()
}
/// Returns the maximum pitch for this speech synthesizer.
/**
* Returns the maximum pitch for this speech synthesizer.
*/
pub fn max_pitch(&self) -> f32 {
self.0.read().unwrap().max_pitch()
self.0.max_pitch()
}
/// Returns the normal pitch for this speech synthesizer.
/**
* Returns the normal pitch for this speech synthesizer.
*/
pub fn normal_pitch(&self) -> f32 {
self.0.read().unwrap().normal_pitch()
self.0.normal_pitch()
}
/// Gets the current speech pitch.
/**
* Gets the current speech pitch.
*/
pub fn get_pitch(&self) -> Result<f32, Error> {
let Features { pitch, .. } = self.supported_features();
if pitch {
self.0.read().unwrap().get_pitch()
self.0.get_pitch()
} else {
Err(Error::UnsupportedFeature)
}
}
/// Sets the desired speech pitch.
/**
* Sets the desired speech pitch.
*/
pub fn set_pitch(&mut self, pitch: f32) -> Result<&Self, Error> {
let Features {
pitch: pitch_feature,
..
} = self.supported_features();
if pitch_feature {
let mut backend = self.0.write().unwrap();
if pitch < backend.min_pitch() || pitch > backend.max_pitch() {
if pitch < self.0.min_pitch() || pitch > self.0.max_pitch() {
Err(Error::OutOfRange)
} else {
backend.set_pitch(pitch)?;
self.0.set_pitch(pitch)?;
Ok(self)
}
} else {
@ -473,43 +295,52 @@ impl Tts {
}
}
/// Returns the minimum volume for this speech synthesizer.
/**
* Returns the minimum volume for this speech synthesizer.
*/
pub fn min_volume(&self) -> f32 {
self.0.read().unwrap().min_volume()
self.0.min_volume()
}
/// Returns the maximum volume for this speech synthesizer.
/**
* Returns the maximum volume for this speech synthesizer.
*/
pub fn max_volume(&self) -> f32 {
self.0.read().unwrap().max_volume()
self.0.max_volume()
}
/// Returns the normal volume for this speech synthesizer.
/**
* Returns the normal volume for this speech synthesizer.
*/
pub fn normal_volume(&self) -> f32 {
self.0.read().unwrap().normal_volume()
self.0.normal_volume()
}
/// Gets the current speech volume.
/**
* Gets the current speech volume.
*/
pub fn get_volume(&self) -> Result<f32, Error> {
let Features { volume, .. } = self.supported_features();
if volume {
self.0.read().unwrap().get_volume()
self.0.get_volume()
} else {
Err(Error::UnsupportedFeature)
}
}
/// Sets the desired speech volume.
/**
* Sets the desired speech volume.
*/
pub fn set_volume(&mut self, volume: f32) -> Result<&Self, Error> {
let Features {
volume: volume_feature,
..
} = self.supported_features();
if volume_feature {
let mut backend = self.0.write().unwrap();
if volume < backend.min_volume() || volume > backend.max_volume() {
if volume < self.0.min_volume() || volume > self.0.max_volume() {
Err(Error::OutOfRange)
} else {
backend.set_volume(volume)?;
self.0.set_volume(volume)?;
Ok(self)
}
} else {
@ -517,167 +348,15 @@ impl Tts {
}
}
/// Returns whether this speech synthesizer is speaking.
/**
* Returns whether this speech synthesizer is speaking.
*/
pub fn is_speaking(&self) -> Result<bool, Error> {
let Features { is_speaking, .. } = self.supported_features();
if is_speaking {
self.0.read().unwrap().is_speaking()
self.0.is_speaking()
} else {
Err(Error::UnsupportedFeature)
}
}
/// Returns list of available voices.
pub fn voices(&self) -> Result<Vec<Voice>, Error> {
let Features { voice, .. } = self.supported_features();
if voice {
self.0.read().unwrap().voices()
} else {
Err(Error::UnsupportedFeature)
}
}
/// Return the current speaking voice.
pub fn voice(&self) -> Result<Option<Voice>, Error> {
let Features { get_voice, .. } = self.supported_features();
if get_voice {
self.0.read().unwrap().voice()
} else {
Err(Error::UnsupportedFeature)
}
}
/// Set speaking voice.
pub fn set_voice(&mut self, voice: &Voice) -> Result<(), Error> {
let Features {
voice: voice_feature,
..
} = self.supported_features();
if voice_feature {
self.0.write().unwrap().set_voice(voice)
} else {
Err(Error::UnsupportedFeature)
}
}
/// Called when this speech synthesizer begins speaking an utterance.
pub fn on_utterance_begin(
&self,
callback: Option<Box<dyn FnMut(UtteranceId)>>,
) -> Result<(), Error> {
let Features {
utterance_callbacks,
..
} = self.supported_features();
if utterance_callbacks {
let mut callbacks = CALLBACKS.lock().unwrap();
let id = self.0.read().unwrap().id().unwrap();
let callbacks = callbacks.get_mut(&id).unwrap();
callbacks.utterance_begin = callback;
Ok(())
} else {
Err(Error::UnsupportedFeature)
}
}
/// Called when this speech synthesizer finishes speaking an utterance.
pub fn on_utterance_end(
&self,
callback: Option<Box<dyn FnMut(UtteranceId)>>,
) -> Result<(), Error> {
let Features {
utterance_callbacks,
..
} = self.supported_features();
if utterance_callbacks {
let mut callbacks = CALLBACKS.lock().unwrap();
let id = self.0.read().unwrap().id().unwrap();
let callbacks = callbacks.get_mut(&id).unwrap();
callbacks.utterance_end = callback;
Ok(())
} else {
Err(Error::UnsupportedFeature)
}
}
/// Called when this speech synthesizer is stopped and still has utterances in its queue.
pub fn on_utterance_stop(
&self,
callback: Option<Box<dyn FnMut(UtteranceId)>>,
) -> Result<(), Error> {
let Features {
utterance_callbacks,
..
} = self.supported_features();
if utterance_callbacks {
let mut callbacks = CALLBACKS.lock().unwrap();
let id = self.0.read().unwrap().id().unwrap();
let callbacks = callbacks.get_mut(&id).unwrap();
callbacks.utterance_stop = callback;
Ok(())
} else {
Err(Error::UnsupportedFeature)
}
}
/*
* Returns `true` if a screen reader is available to provide speech.
*/
#[allow(unreachable_code)]
pub fn screen_reader_available() -> bool {
#[cfg(target_os = "windows")]
{
#[cfg(feature = "tolk")]
{
let tolk = Tolk::new();
return tolk.detect_screen_reader().is_some();
}
#[cfg(not(feature = "tolk"))]
return false;
}
false
}
}
impl Drop for Tts {
fn drop(&mut self) {
if Rc::strong_count(&self.0) <= 1 {
if let Some(id) = self.0.read().unwrap().id() {
let mut callbacks = CALLBACKS.lock().unwrap();
callbacks.remove(&id);
}
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum Gender {
Male,
Female,
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Voice {
pub(crate) id: String,
pub(crate) name: String,
pub(crate) gender: Option<Gender>,
pub(crate) language: LanguageTag<String>,
}
impl Voice {
pub fn id(&self) -> String {
self.id.clone()
}
pub fn name(&self) -> String {
self.name.clone()
}
pub fn gender(&self) -> Option<Gender> {
self.gender
}
pub fn language(&self) -> LanguageTag<String> {
self.language.clone()
}
}

13
winrt_bindings/Cargo.toml Normal file
View File

@ -0,0 +1,13 @@
[package]
name = "tts_winrt_bindings"
version = "0.1.0"
authors = ["Nolan Darilek <nolan@thewordnerd.info>"]
description = "Internal crate used by `tts`"
license = "MIT"
edition = "2018"
[dependencies]
winrt = "0.7"
[build-dependencies]
winrt = "0.7"

12
winrt_bindings/build.rs Normal file
View File

@ -0,0 +1,12 @@
winrt::build!(
dependencies
os
types
windows::media::core::MediaSource
windows::media::playback::{MediaPlaybackItem, MediaPlaybackList, MediaPlaybackState, MediaPlayer}
windows::media::speech_synthesis::SpeechSynthesizer
);
fn main() {
build();
}

View File

@ -0,0 +1 @@
include!(concat!(env!("OUT_DIR"), "/winrt.rs"));