1
0
mirror of https://github.com/ndarilek/tts-rs.git synced 2024-11-01 04:39:36 +00:00

Compare commits

..

384 Commits

Author SHA1 Message Date
b31efe752d Bump version and windows dependency. 2024-07-06 18:33:51 -05:00
ae7bf7554c Bump version and windows dependency. 2024-07-03 10:46:43 -05:00
3bc16f0c6f Bump version and windows dependency. 2024-04-19 09:17:08 -05:00
3c8ae0ae42 Bump version. 2024-02-09 11:54:31 -06:00
07edc20861 Switch Arc to Rc to appease Clippy. 2024-02-09 11:34:01 -06:00
96a5209a9f Attempt to simplify CI. 2024-02-09 11:29:00 -06:00
20b18949e2 Just build for target since we don't need an APK. 2024-02-09 11:07:11 -06:00
f29de0aede Switch to actions/checkout@v4. 2024-02-09 11:03:41 -06:00
9e1476fd36
Merge pull request #49 from subalterngames:speech_dispatcher_voices_panic
Fixed a panic in SpeechDispatcher.voices()
2024-02-09 10:59:04 -06:00
3032fe0fb3
Merge pull request #51 from Enyium:patch-1
Added docs.rs link to `Cargo.toml`
2024-02-09 10:54:59 -06:00
edd09c24e7 Switch from cargo-apk to xbuild. 2024-02-09 10:49:08 -06:00
b7b4e7dc85 Bump dependencies. 2024-02-09 10:37:55 -06:00
f593340051
Merge pull request #50 from MarijnS95:drop-ndk-glue
Drop `ndk-glue` dependency from the main crate
2024-02-09 10:31:54 -06:00
Enyium
12d8e1f532
Added docs.rs link to Cargo.toml 2023-12-05 12:28:47 +01:00
Marijn Suijten
2a81dc9b70 Drop ndk-glue dependency from the main crate
Commit d42d201 ("Update Android dependencies and example.") correctly
replaces `ndk-glue` with `ndk-context` as a more generic crate to hold on
to a global `JavaVM` and Android `jobject` `Context`, but didn't drop the
unused `ndk-glue` crate from the list of Android dependencies.  This
crate is only used in the example crate, and [shouldn't clobber
downstream crates].

Besides, `ndk-glue` has been deprecated for some time and should be
replaced by `android-activity` in the example in a followup PR.

[shouldn't clobber downstream crates]: https://github.com/emilk/egui/pull/3606/files#r1401313794
2023-11-22 00:12:48 +01:00
Esther Alter
a0c6cbaf6a Fixed a panic in SpeechDispatcher.voices() 2023-10-25 11:59:13 -04:00
5c528f1d8e Bump version. 2023-09-09 12:44:17 -05:00
9fb8107acf Eliminate some warnings. 2023-09-09 12:43:43 -05:00
8dabcc99c4 Bump windows dependency. 2023-09-09 12:41:36 -05:00
b369fb5614 docs.rs is actually using speech-dispatcher 0.11 now. 2023-04-06 10:19:21 -05:00
e6e1cd49bf Attempt to fix docs.rs builds and bump version. 2023-04-05 11:27:49 -05:00
e2edc18e6e Bump dependency and version. 2023-04-03 10:08:03 -05:00
3eba940a22 Bump windows dependency and version. 2023-03-28 14:20:23 -05:00
7e761e1267 Complete migration to jni 0.21. 2023-03-06 15:25:49 -06:00
bf8eb07866 Bump version. 2023-03-06 14:36:48 -06:00
f5be2b7657 Bump jni dependency. 2023-03-06 14:34:49 -06:00
b7e7ed46dd Appease Clippy. 2023-03-06 14:34:49 -06:00
69eebf2ffa Bump windows dependency. 2023-03-06 14:34:49 -06:00
6c6089daf9
Merge pull request #41 from ninjaboy:fix-macos-inline-play-example
Add support for inline pronounciation
2023-03-06 14:34:30 -06:00
Alexey Stolybko
c874607afe Add support for inline pronounciation 2022-12-26 20:07:58 +00:00
2667d4e943 Bump version. 2022-12-02 13:06:02 -06:00
8b506a89e0 ... 2022-12-02 13:00:20 -06:00
dcaf5b914d ... 2022-12-02 12:59:33 -06:00
359b1c8053 Obnoxious that I can't just push this directly to the mac and test for some reason... 2022-12-02 12:58:46 -06:00
527b4cd61e Small tweaks. 2022-12-02 12:57:34 -06:00
97fa370dec Refactor AVFoundation to oxilangtag. 2022-12-02 12:52:08 -06:00
915673eec6 Bump dependency. 2022-12-02 12:38:50 -06:00
cf72bad59a cargo fmt 2022-11-22 15:44:09 -06:00
246e587f2d Merge branch 'master' into oxilangtag 2022-11-22 15:38:04 -06:00
d65d79f8fb More tweaks and simplifications to work around ndk-sys checksum failure. 2022-11-22 15:16:18 -06:00
c339d2bee3 Bump editions in exampels, and remove unnecessary dependencies. 2022-11-22 14:55:31 -06:00
daaead1dc3 Tweak features used in CI builds. 2022-11-22 14:36:15 -06:00
d547d84af0 Merge branch 'master' into oxilangtag 2022-11-22 14:16:08 -06:00
f6766ec633 Bump speech-dispatcher dependency and tweak docs.rs build configuration. 2022-11-22 14:15:40 -06:00
8102820f86 These checks should be run before the tag is pushed, so remove them from the release workflow. 2022-11-22 14:13:46 -06:00
3c9a78a953 Install necessary targets and simplify further. 2022-11-22 12:44:43 -06:00
5470b9557d Drop old actions-rs actions which seem to no longer be updated. 2022-11-22 12:36:00 -06:00
6770a2ed58 Eliminate caching. 2022-11-22 12:27:40 -06:00
61d84a2120 Manually install toolchain before cache to eliminate a deprecated action and hopefully fix checksum validation failures. 2022-11-22 12:12:57 -06:00
7a91a1e827 Merge branch 'master' into android 2022-11-22 11:26:04 -06:00
e19e5ef0b7 Remove unnecessary return statement. 2022-11-22 11:13:05 -06:00
3e4299d0e6 Update some old actions to newer versions. 2022-11-22 11:04:05 -06:00
d42d20189a Update Android dependencies and example. 2022-11-22 10:44:18 -06:00
22ae0ef5a3 Refactor to oxilangtag for language codes, and bump Windows dependency. 2022-11-21 12:00:35 -06:00
f5716c48f5 Fix type mismatches after speech-dispatcher update. 2022-10-19 10:28:18 -05:00
eb1d13976a Bump version. 2022-10-19 09:58:45 -05:00
259549e21d
Merge pull request #34 from helgoboss:master
#33 Fix AVFoundation crash on macOS when getting voices
2022-10-19 09:58:02 -05:00
3679ad6153
Merge pull request #36 from helgoboss:bug/35-appkit-crash
#35 Fix AppKit crash when interrupting speech
2022-10-19 09:55:15 -05:00
Benjamin Klum
94615a254a #35 Fix AppKit crash when interrupting speech
avoid removing first string when queue already empty
2022-10-16 23:18:19 +02:00
Benjamin Klum
ddf96c10aa #33 Remove unnecessary unsafe keyword 2022-10-16 23:15:52 +02:00
Benjamin Klum
3fdd452646 #33 Fix AVFoundation crash on macOS when getting voices
by preventing manual cleanup of non-owned objects
2022-10-16 22:35:03 +02:00
b4f48fa439 Bump dependency and version. 2022-10-03 16:59:33 -05:00
919bc4249a Bump dependency and version. 2022-09-21 16:38:09 -05:00
79e59d551f Remove unnecessary version pins. 2022-09-07 12:37:29 -05:00
652367fa8a Build on Ubuntu 22.04. 2022-09-07 12:33:45 -05:00
3c38c783b0 Support speech-dispatcher 0.11 and 0.10, dropping support for 0.9. 2022-09-07 12:33:33 -05:00
1eae827ed1 Add support for speech-dispatcher 0.10.2 and bump version. 2022-08-29 16:50:49 -05:00
f404e180e4 Bump version. 2022-07-22 10:13:32 -05:00
7cf80fb64d WinRT: Correctly set voice for case where no utterances are in queue.
Fixes #29
2022-07-22 10:08:13 -05:00
b50c5b6b93
Merge pull request #28 from Bear-03/traits
Derive common traits for Gender and Voice
2022-07-22 09:57:49 -05:00
748f07138d Bump version and dependency. 2022-07-21 18:34:22 -05:00
Bear-03
15f28c9af4
Derive common traits for Gender and Voice 2022-07-21 01:25:14 +02:00
b3d2b788f7 Bump version and dependency. 2022-06-30 18:15:42 -05:00
5feb8e3186 Constrain version so example builds. 2022-06-14 13:57:53 -05:00
238d7e2cb3 Bump version. 2022-06-14 13:13:00 -05:00
507d0b5418 Replace some unwrap calls with ok_or(Error::OperationFailed). 2022-06-14 13:09:50 -05:00
10ac1021ee Switch to line doc comments. 2022-06-13 10:35:32 -05:00
323f129b7b #24: Don't use default features when building on docs.rs. 2022-06-13 10:22:39 -05:00
9b4ae761a0 Bump version and dependency. 2022-05-19 12:03:23 -05:00
40f682080d Bump version. 2022-05-09 08:48:07 -05:00
40e28876b2 Remove unnecessary printlns and link directives. 2022-05-09 08:46:46 -05:00
4079f4b3c4 Fix mismatched gender codes. 2022-05-09 08:44:33 -05:00
4283623723 Bump dependency. 2022-05-07 11:04:22 -05:00
569bb160b8 Try to intercept cases where voice might be nil. 2022-03-31 14:48:03 -05:00
4d01717e75 Fix return type in Tolk backend. 2022-03-31 13:38:39 -05:00
da19d5f16c Restore. 2022-03-31 13:37:42 -05:00
822f770ab8 Finish making gender optional. 2022-03-31 13:25:08 -05:00
9bd767629a Remove unspecified gender in favor of Option. 2022-03-31 13:18:57 -05:00
2b4251f6fa Don't support voices in AppKit for now. 2022-03-31 13:16:35 -05:00
219cfbbe00 src
Add voices support to AvFoundation backend.
2022-03-31 13:10:38 -05:00
264af78c58 Get example previewing voices even if one can't be gotten. 2022-03-31 13:09:37 -05:00
e3542abd7c Stub out methods for now. 2022-03-31 11:52:30 -05:00
55c0fbbd2b Remove unnecessary patch. 2022-03-31 11:06:23 -05:00
a0945d7ebb Update example for new API. 2022-03-31 11:04:47 -05:00
c627583928 Eliminate a warning. 2022-03-31 11:02:20 -05:00
ec6d1f74a1 Add voice stubs, currently a no-op, on Android. 2022-03-31 10:55:49 -05:00
b9aa36cb3b Update APIs to support case where getting a voice is supported but the value isn't set. 2022-03-31 10:43:07 -05:00
e699f7e5e5 Add voices support to web platform. 2022-03-31 10:39:39 -05:00
3f9e7c22db Restore default features. 2022-03-30 20:24:40 -05:00
e4c6f6f23a Add voice stubs to Tolk backend. 2022-03-30 20:22:37 -05:00
d4b913908c Eliminate a warning. 2022-03-30 20:18:10 -05:00
b1f60811bf Add voice support to WinRT backend. 2022-03-30 20:13:27 -05:00
51cd84a6cd Support setting voice with Speech Dispatcher, and clarify features to indicate where getting current voice isn't supported. 2022-03-30 18:38:25 -05:00
142f2e6b3a Use plain 'ol struct. 2022-03-30 18:07:08 -05:00
1e55c43153 WIP: Use correct type in backend implementation. 2022-03-30 15:13:28 -05:00
e56a0da2e5 WIP: Reorganize, and try to get working with Speech Dispatcher. 2022-03-30 12:07:59 -05:00
55f841d887 Merge extra module into main module. 2022-03-30 10:54:30 -05:00
c222c087b2 cargo fmt 2022-03-30 10:18:22 -05:00
6057d9c968
Merge pull request #2 from francois-caddet/feature/voices
Add voices feature
2022-03-30 10:17:32 -05:00
francois-caddet
88f4598ec6 Merge branch 'master' into feature/voices 2022-03-20 13:02:37 +01:00
acecb1f362 Bump windows dependency and crate version. 2022-03-18 08:47:06 -05:00
fd9f5ae60a Branch not needed. 2022-03-10 14:35:46 -06:00
b435c89239 Bump version. 2022-03-10 14:14:47 -06:00
3366f93e2b Fix release workflow to not build default features. 2022-03-10 14:14:03 -06:00
539003205e Appease Clippy. 2022-03-10 14:08:13 -06:00
5c9c649505 Also disable default features for Linux when running Clippy. 2022-03-10 14:07:18 -06:00
f275e506df Disable default-features on Linux since runners don't have speech-dispatcher 0.10 or greater. 2022-03-10 13:57:05 -06:00
ef0a78c745 Bump speech-dispatcher, and support building with multiple versions. 2022-03-10 13:46:14 -06:00
cc8fd91c86 Set event callbacks on pre-clone value to ensure that they remain alive. 2022-03-10 11:44:59 -06:00
888e6a3dfa Correctly clean up callback references based on whether Arc remains cloned on drop. 2022-03-10 11:12:46 -06:00
5944794980 Add Arc<RwLock<...>> for remaining platforms. 2022-03-07 18:31:25 -06:00
31309553bb Ditto for Linux. 2022-03-07 17:56:28 -06:00
00bd5e62ff Switch TTS to use Arc<RwLock<Box<dyn Backend>>> to address soundness issues. 2022-03-07 17:54:26 -06:00
90f7dae6a1 Bump windows dependency and version. 2022-03-07 10:31:37 -06:00
f310306508 Bump version. 2022-02-11 09:20:22 -06:00
9f8b670fe0 Bump dependency. 2022-02-11 09:17:46 -06:00
cd2216390c Bump dependency and version. 2022-02-05 09:48:52 -06:00
4466923620 Bump speech-dispatcher dependency. 2022-01-27 10:56:11 -06:00
660072809d Bump version and windows-rs dependency. 2022-01-13 14:28:22 -06:00
1e1c04d4e5 Fix rustfmt error. 2022-01-10 11:10:18 -06:00
050b97fde1 Bump version. 2022-01-10 11:03:18 -06:00
dc00aa427f Bump speech-dispatcher dependency and update for new return types. 2022-01-10 11:02:12 -06:00
1f466275cf Bump ndk-glue version once more. 2022-01-10 10:52:44 -06:00
9066d2b005 Make formatting more consistent. 2022-01-10 10:51:18 -06:00
cdc225418e Bump Android dependencies. 2022-01-10 10:48:19 -06:00
ad785b6536
Merge pull request #14 from AlyoshaVasilieva:ndk-glue
Bump ndk-glue version in Android example
2022-01-10 10:46:32 -06:00
89708d9ef1
Merge pull request #16 from AlyoshaVasilieva:exit-loop
Exit Android initialization loop with error when stuck
2022-01-10 10:43:57 -06:00
e3f9ebe431
Merge pull request #18 from saona-raimundo:master
Implementing common traits
2022-01-10 10:34:49 -06:00
Raimundo Saona
114fb55fc9 Fixing macos and ios restrictions 2022-01-04 13:43:32 +01:00
2ea472e196 Bump Windows dependency, crate version, and remove Debug derive. 2021-12-28 10:09:54 -06:00
Raimundo Saona
5331bc8daf Undo implementation of serde traits for Error 2021-12-23 11:12:35 +01:00
Raimundo Saona
e20170583d Common traits for other structs 2021-12-22 15:38:11 +01:00
Raimundo Saona
9ed03753c2 Common traits for Features 2021-12-22 13:28:00 +01:00
Malloc Voidstar
bed6cfa206
Exit Android initialization loop with error when stuck
500ms is fairly arbitrary; my emulator took 35 to run that loop.
2021-12-10 10:47:12 -08:00
Malloc Voidstar
5e9c98b063
Also bump gradle.plugin.com.github.willir.rust:plugin
0.3.3 doesn't work with cargo-ndk 2+
2021-12-10 09:31:21 -08:00
Malloc Voidstar
ee8ec97ab4
Bump ndk-glue version in Android example 2021-12-10 09:10:39 -08:00
d24d1a6a15 Bump version and dependencies. 2021-12-02 09:24:22 -06:00
94417b5351 Only import from speech_dispatcher when building for Linux. 2021-11-19 09:25:37 -06:00
89fd14d957 Bump windows crate dependency. 2021-11-19 09:24:58 -06:00
47e164a0c8 Support Speech Dispatcher initialization failures, and bump version. 2021-11-19 09:22:05 -06:00
57ffbf0e4f Make windows dependency platform-specific and add alloc feature. 2021-11-16 11:36:24 -06:00
119678ae55 Update to windows 0.27 and bump version. 2021-11-16 11:13:31 -06:00
d5bdb9f498 Bump version and dependency. 2021-11-15 08:16:00 -06:00
562489e5af Bump version. 2021-11-08 07:28:07 -06:00
c12f328cf2 Bump dependency. 2021-11-08 07:27:35 -06:00
f8dbc04c36 Bump dependencies. 2021-11-01 10:39:58 -05:00
a703e790ec Bump edition and version. 2021-11-01 10:38:26 -05:00
92538fbdb8 Upgrade windows-rs to 0.23. 2021-11-01 10:36:15 -05:00
dc3129b79c Bump version. 2021-05-20 17:08:23 -05:00
c4038149a8 Remove a conditional that blocked playback in some circumstances on the WinRT backend. 2021-05-20 17:07:55 -05:00
ca7789f157 Bump version and Tolk dependency. 2021-05-20 13:59:02 -05:00
d85d56c3ee Bump version to work around Tolk crash. 2021-05-11 23:53:15 -05:00
d67bf8344a Bump dependencies. 2021-05-11 20:21:03 -05:00
8f5f58028a Use attributes instead. 2021-05-11 20:18:14 -05:00
86b2e07f15 Bump version. 2021-05-11 19:38:39 -05:00
4088eb12a1 Add ability to detect screen readers. Windows-only for now, and requires the tolk feature. 2021-05-11 19:37:56 -05:00
7b8da53d81 Args shouldn't be a list. 2021-04-05 07:45:08 -05:00
e4b53d17aa Check web example as part of release process. 2021-04-03 12:59:32 -05:00
316b1bceec Use target as part of toolchain installation. 2021-04-03 12:58:19 -05:00
26d06fc635 No really, build the web example. 2021-04-03 12:11:53 -05:00
a879b3dca3 Get web example compiling. 2021-04-03 11:58:23 -05:00
d5a692008a Add action to ensure that web example compiles. 2021-04-03 11:50:11 -05:00
f7239366f0 Add command to build the web example. 2021-04-03 11:49:44 -05:00
debab7de17 Bump version. 2021-04-03 11:11:37 -05:00
1011704b82 And again, VSCode's find/replace didn't catch this. *grumble* 2021-03-31 11:12:42 -05:00
d9639c049b S/TTS/Tts/ here as well. 2021-03-31 11:03:14 -05:00
6dbf9b7ddc Find/replace is failing me today. 2021-03-31 11:01:26 -05:00
336c266ed4 Missed a few... 2021-03-31 10:53:08 -05:00
57f91105ec s/TTS/Tts/ as per Clippy's acronym warnings. 2021-03-31 10:40:42 -05:00
ef96042b12 Bump Windows dependency and update accordingly. Also, fix an acronym Clippy warning. 2021-03-31 10:38:32 -05:00
acccdfeada Bump version. 2021-03-16 17:36:29 -05:00
153075ebab Add web example
Closes #1
2021-03-16 17:33:01 -05:00
25f8211661 Bump version. 2021-03-16 14:18:49 -05:00
fb7f1dddfc *sigh* Fix stupid M1/ARM casting issue. I hate Apple. 2021-03-15 14:02:05 -05:00
50528ce2d1 Another comparison check fix. 2021-03-15 13:47:41 -05:00
8c2aae7afd Try another initialization fix. 2021-03-15 13:46:22 -05:00
ed2d2e76c3 And this is what happens when I don't test on actual hardware. 2021-03-15 13:06:49 -05:00
45255a8049 Fix another possibly broken comparison. 2021-03-15 13:04:38 -05:00
c65c0022d8 (Hopefully) initialize utterances correctly. 2021-03-15 13:03:41 -05:00
bd8e2ee20a Compare against ObjC NO to ensure correctness. 2021-03-15 13:02:51 -05:00
00485d6cd8 Enable 'exception' feature to hopefully catch and surface ObjC exceptions. 2021-03-12 08:36:52 -06:00
cdfb7ddb77 Even more bloody logging. 2021-03-12 06:59:49 -06:00
290eb06d02 Even more trace logging. 2021-03-12 06:38:46 -06:00
e91637a67c Add even more trace logging. 2021-03-12 06:28:02 -06:00
81eba99594 Add cast to (hopefully) get AppKit compiling on M1 macs. 2021-03-12 06:20:05 -06:00
1f510120a5 Add trace logging in AVFoundation backend. 2021-03-12 05:58:30 -06:00
1d075f7ece When speech is interrupted on AVFoundation, only stop if already speaking. May address a possible deadlock. 2021-03-12 05:50:08 -06:00
a22ee53727 Fix Clippy warnings. 2021-03-12 05:48:14 -06:00
2bd324b08b Bump version. 2021-03-11 13:44:00 -06:00
8ba1f91617 Ignore DLL files. 2021-03-11 13:41:26 -06:00
c9279804b7 Different approach. 2021-03-11 13:41:03 -06:00
6664ca89e3 Revert "Add windows crate to generic build-dependencies so the build script works everywhere."
This reverts commit 2fd98c0a52.
2021-03-11 13:38:44 -06:00
2fd98c0a52 Add windows crate to generic build-dependencies so the build script works everywhere. 2021-03-11 13:28:30 -06:00
6784bb8861 Remove bindings publish from CI. 2021-03-11 13:23:08 -06:00
c21d4a6a38 Eliminate separate winrt_bindings crate since it no longer seems necessary for fast builds. 2021-03-11 13:21:41 -06:00
00a16c5dd5 Add missing types. 2021-03-11 12:54:50 -06:00
d9ca83ca15 WIP: Bump dependencies. Currently broken. 2021-03-11 12:33:31 -06:00
42879dfa1f Refactor to new windows crate. 2021-01-22 11:28:42 -06:00
296fa89f5d Bump version, and use automatically-provided feature provided by optional tolk dependency. 2021-01-21 10:49:11 -06:00
3e1f5af61a Call rustup manually, since the action doesn't seem to work. 2020-12-30 14:50:36 -06:00
15b7b33ed3 Are these CSVs? 2020-12-30 14:31:27 -06:00
22cff2ddd1 Bump version. 2020-12-30 14:23:58 -06:00
06eb32b6d4 Make module imports more consistent. 2020-12-30 14:23:31 -06:00
69af3465b3 We don't need dylib on Android, and it breaks WinRT. Revert. 2020-12-30 14:21:32 -06:00
699d0d23e9 Add necessary targets. 2020-12-30 14:15:14 -06:00
7eb74729fc Use cargo-apk to test Android build. 2020-12-30 14:00:25 -06:00
d806c44c76 My brain hurts and I can't figure out how to set this CI action up right now. But it works, so release. 2020-12-30 13:09:29 -06:00
adfb2146ac Bump version. 2020-12-30 12:53:21 -06:00
914a7a1972 Make script executable. 2020-12-30 12:51:55 -06:00
cf39be85af Looks like the tool cache isn't supported or is failing. Comment out for now. 2020-12-30 12:43:15 -06:00
cee5777556 README/packaging tweaks for Android. 2020-12-30 12:41:28 -06:00
a01fd93502 Build Android example as part of CI. 2020-12-30 12:28:07 -06:00
8d6f40b1a5 Finish callback implementation. 2020-12-30 12:19:44 -06:00
0ea46b29b2 Partially implement callbacks. Unfinished due to lazy_static inconsistencies. 2020-12-30 11:37:46 -06:00
c92b67127c Support is_speaking. 2020-12-30 10:15:37 -06:00
733b17fe2c Log TTS initialization failures. 2020-12-30 10:10:49 -06:00
440154502b Clear some unused variable warnings. 2020-12-30 10:07:27 -06:00
2120de8756 Support pitch. 2020-12-30 10:06:18 -06:00
e1c2171833 Support stopping. 2020-12-30 09:49:13 -06:00
22ee9863d6 Return utterance IDs when speech succeeds. 2020-12-30 09:44:47 -06:00
5634fdb393 Block initialization until TTS finishes initializing from Android. 2020-12-30 09:24:13 -06:00
1ac0b91981 Add ugly hack to prove that speech works. 2020-12-29 19:25:56 -06:00
32f57d8578 Speak calls pass to Java, but don't work since something isn't bound to the engine. 2020-12-29 17:25:58 -06:00
f58f875fdf Guess I don't need a global VM. 2020-12-29 15:48:18 -06:00
84926ea110 Store the TTS object in the struct. 2020-12-29 15:47:11 -06:00
da8260cba8 Store the TTS object in the struct. 2020-12-29 15:45:56 -06:00
965bea0adf TTS seems to initialize now. 2020-12-29 14:10:39 -06:00
fc20431916 Refactor Android example to full, self-contained app. 2020-12-29 11:15:24 -06:00
cb91760468 Set Android API versions, and add Makefile.toml convenience script for getting Android logs. 2020-12-28 05:39:20 -06:00
187cd71eeb Add Android example. 2020-12-27 10:42:41 -06:00
5849e340c9 Add initial Android stubs. 2020-12-27 09:41:34 -06:00
6d17447350 Remove unnecessary dependencies and bump versions. 2020-12-07 22:39:30 -06:00
22007fbf79 Bump version. 2020-12-07 21:46:46 -06:00
d8f2b3fb00 I'll do my own queuing, MediaPlaybackList is either the wrong tool for the job or way too buggy. 2020-12-07 21:35:07 -06:00
a905439d9c Add strategic backoff in WinRT backend to (hopefully) eliminate a deadlock. 2020-12-07 14:58:59 -06:00
49e8c0e5dc Bump version. 2020-12-03 13:31:09 -06:00
ad67682235 Implement Send and Sync for UtteranceId on most platforms. 2020-12-03 13:21:24 -06:00
184becfd1a Bump version. 2020-11-25 10:16:33 -06:00
0bdf0fcfd3 Account for macOS 11. 2020-11-25 10:13:17 -06:00
f4952ad132 UWP tweaks and optimizations.
* Initialized TTS `MediaPlayer` in real-time mode.
* Set media category to speech.
* More aggressively drop locks to prevent deadlocks.
* Remove checks of queued items that are no longer necessary.
* Made `is_speaking` check both media player state and queued item count.
* Return eagerly from `stop` if speech isn't in progress, thus eliminating more locks.
2020-11-25 10:07:28 -06:00
728c409e25 Add example for unscientifically measuring latency of TTS. 2020-11-25 09:54:46 -06:00
669c94af36 We don't need autoplay. 2020-11-23 17:20:55 -06:00
fa2903606e Bump version. 2020-11-18 08:27:09 -06:00
9fb4f5e71e Refactor release job. 2020-11-17 16:47:46 -06:00
34db699972 Rename build to check. 2020-11-17 16:42:44 -06:00
80d51e1bff Fix cargo fmt CI failure. 2020-11-17 16:36:33 -06:00
f3705a1856 --target doesn't work here. 2020-11-17 16:28:28 -06:00
2f0ced4eaf Install components and integrate rustfmt checks. 2020-11-17 16:22:14 -06:00
d5f92565e5 Integrate Clippy. 2020-11-17 16:16:14 -06:00
3224cbdf5a We don't need iOS-specific builds since the macOS checks already handle this. 2020-11-17 16:10:20 -06:00
be96aacd7a Add --target. 2020-11-17 16:06:18 -06:00
d97796fff7 Eliminate a Clippy warning. 2020-11-17 16:03:03 -06:00
f37133841a Fix warnings. 2020-11-17 16:00:02 -06:00
3157162192 Use Rust actions to (hopefully) speed things up. 2020-11-17 15:50:22 -06:00
2c73c75e00 Use Matrix build. 2020-11-17 15:15:43 -06:00
b6ef11b60f Clean up release job. 2020-11-17 15:00:37 -06:00
10010f9bc9 Add caching to builds, and remove unneeded LLVM install. 2020-11-17 13:15:37 -06:00
3500e88117 Do we need LLVM for this? 2020-11-17 12:22:18 -06:00
1cbeab6ea9 Bump version. 2020-11-17 10:25:45 -06:00
590d6369fb Remove debugging printlns. 2020-11-17 10:25:21 -06:00
80fa5d4583 Bump version. 2020-11-11 10:29:49 -06:00
6a706f36ab Fix double-speaking bug for good, hopefully. 2020-11-11 10:27:03 -06:00
e1791c7046 Bump version. 2020-11-03 12:02:37 -06:00
031e0ff23f Fix more queuing issues under WinRT. 2020-11-03 12:02:16 -06:00
204cd50935 Change example to expose more WinRT breakage. 2020-11-03 11:51:40 -06:00
6b74afe503 Remove unused variable. 2020-11-03 11:24:09 -06:00
4e157b6fb5 Check examples when building. 2020-11-03 11:23:03 -06:00
df4adc81a7 Bump version. 2020-11-03 11:20:29 -06:00
289a35dc83 Don't double-speak previous item when not flushing queue. 2020-11-03 11:20:02 -06:00
9c98026978 Don't re-initialize a player, just clear the item list. 2020-11-03 11:11:49 -06:00
d3e05b5a7a Revert "Pass TTS instance as first argument to utterance callbacks."
This appears to break callbacks, and is of limited utility.

This reverts commit 29c0a8463e.
2020-11-03 11:03:55 -06:00
5a9c96508f Remove unused variable. 2020-11-03 11:00:20 -06:00
2343523bb6 Add example that exposes WinRT issue where speech doubles in some circumstances. 2020-11-03 10:05:17 -06:00
29c0a8463e Pass TTS instance as first argument to utterance callbacks. 2020-11-02 22:40:30 -06:00
cf0ad2221e Derive Debug. 2020-11-02 21:44:47 -06:00
551bb1292e Make TTS clonable.
Also, add other possibly useful derives.
2020-11-02 21:27:13 -06:00
565aa6d654 Fix issue where is_speaking always returns true under WinRT, and bump version. 2020-11-02 13:30:39 -06:00
efdb274eb4 Bump version. 2020-11-02 10:12:25 -06:00
5feede0b8f Remove unnecessary debug logs. 2020-11-02 10:11:53 -06:00
1d48cb93d7 Bump version and dependencies. 2020-10-30 10:28:02 -05:00
0bbda0a90f Remove WinRT code for handling paused player state, which caused issues with queued speech. 2020-10-30 10:23:24 -05:00
e66b8403aa Remove unnecessary full module names. 2020-10-14 03:54:53 -05:00
a281d74e5c Whitespace fix. 2020-10-14 03:51:08 -05:00
5d1625e5e2 Bump version. 2020-10-08 20:14:38 -05:00
ca186671b4 Make similar refactors to release action. 2020-10-08 19:15:12 -05:00
1be226df8a Switch to cargo check and build with all features under Windows. 2020-10-08 19:11:41 -05:00
baa442f136 Separate WASM build into separate job. 2020-10-08 19:10:14 -05:00
51837a51bf Document feature. 2020-10-08 19:08:18 -05:00
fa216a534e Gate Tolk behind use_tolk feature to support compilation on UWP. 2020-10-08 19:07:07 -05:00
6eb03fb1a3 Bump version. 2020-10-08 09:47:28 -05:00
88ec7db075 Eliminate accidental deadlock. 2020-10-08 09:44:46 -05:00
ba90cd66ba Add unused example of setting on_utterance_stop. 2020-10-08 09:06:48 -05:00
0c13c43a77 Fix incorrect error code name. 2020-10-08 08:16:01 -05:00
724dd1214f Bump version. 2020-10-08 08:09:28 -05:00
6f12974ce4 Implement stop callback on MacOS. 2020-10-08 08:07:33 -05:00
8c783205c3 Implement utterance_stop callback on most platforms. 2020-10-08 07:56:45 -05:00
174011bbb4 Make UtteranceId use u64 on most platforms, and add additional derives. 2020-10-08 07:16:10 -05:00
François Caddet
d2c42d97f5 the voices::Backend trait is almost stable 2020-09-28 11:18:54 +02:00
François Caddet
3294a82485 some fixes
now build on macOS
2020-09-27 20:35:40 +02:00
François Caddet
e19eb56169 first implementation of a voice trait for macOS
WARN: not tested
2020-09-27 20:04:12 +02:00
François Caddet
f7297e18fd add condition for macOS 11 and greater for default backend 2020-09-26 23:39:30 +02:00
François Caddet
f78aed211f fix conflicts 2020-09-26 23:36:15 +02:00
Francois Caddet
008662c940 temporary fix to a build issue with the crate speech-dispatcher 2020-09-26 23:16:10 +02:00
Francois Caddet
8c8dc0ae9f add voices value returned by the backends 2020-09-26 23:03:56 +02:00
c2bbc5ac04 Eliminate more Clippy warnings. 2020-09-26 12:47:18 -05:00
dbac8a3fe0 Eliminate some Clippy warnings. 2020-09-26 12:43:16 -05:00
Francois Caddet
47cbb80595 Merge branch 'develop' into feature/voices 2020-09-26 18:20:10 +02:00
ace5d2fd1f Make compatible with newly-released speech-dispatcher-rs. 2020-09-25 11:33:49 -05:00
589c613bbe Implement FnMut callbacks on AV Foundation, and fix warnings. 2020-09-25 11:14:26 -05:00
1f22843086 Refactor Linux, Windows, and Wasm platforms to use FnMut for callbacks, and bump version. 2020-09-25 11:08:19 -05:00
2c70f77a15 Bump version. 2020-09-24 18:21:14 -05:00
96e5d21e24 Implement callbacks for WinRT. 2020-09-24 17:56:46 -05:00
a22242af50 Implement callbacks for web backend. 2020-09-24 14:26:30 -05:00
532d5d9b58 Tighten up access. 2020-09-23 12:23:46 -05:00
251fb8d8c1 Implement callbacks on AVFoundation. 2020-09-23 12:21:05 -05:00
bd57075d53 Implement unused framework for AVFoundation callbacks. 2020-09-23 11:28:56 -05:00
36a12597de Merge branch 'v0.7' of https://github.com/ndarilek/tts-rs into v0.7 2020-09-23 11:00:41 -05:00
c5524113ff Document the fact that we only need an NSRunLoop in the example because there isn't one already. 2020-09-23 10:33:30 -05:00
6788277a4d Implement framework for utterance callbacks in Windows backends, though they aren't currently called. 2020-09-23 10:31:21 -05:00
61522610cd Implement utterance begin/end callback framework, and set up for Speech-Dispatcher. 2020-09-23 10:12:51 -05:00
f5f11b7cdf Switch to using MediaPlaybackItem as WinRT utterance ID. 2020-09-22 14:51:59 -05:00
017aa8863b Remove unused import and fix i~~OS builds. 2020-09-22 14:30:17 -05:00
6b023c3071 Add AV Foundation support for returning utterance IDs. 2020-09-22 14:08:19 -05:00
4816ec575c Make speak calls return an utterance ID, where possible. 2020-09-22 12:40:03 -05:00
d6508edd12 Remove workaround for incorrect Tolk string handling, pin minimum working version, and bump version. 2020-09-21 15:13:22 -05:00
François Caddet
97f1de5724 Merge branch 'develop' into feature/voices 2020-09-05 12:27:19 +02:00
François Caddet
335ac710a6 add unimplemented functions forvoices feature on every backends 2020-09-05 12:07:51 +02:00
François Caddet
b238c8c938 fix return type of AVSpeechSynthesisVoice:new 2020-09-05 11:30:11 +02:00
François Caddet
1b8809aaeb remove the example changing voice.
the default() voice working properly for av_foundation
2020-09-05 10:55:23 +02:00
François Caddet
0fb6c62d83 fix some parameters types and implement set_voice
We have an ilegal hardware instruction in
backend::av_foundation::voices::AVSpeechSynthesisVoice::new(identifier)
when sending voiceWithIdentifier. Is it because the runLoop is not
runing when it's called?
2020-09-04 15:48:56 +02:00
François Caddet
6ed94686f3 implement set_voice for AVFoundation backend
- TODO: test the implementation
- fixed: set_voice mutability of self parameter
2020-09-03 18:40:32 +02:00
François Caddet
5b0d1b6621 Add voices feature
Implemented for AVFoundation backend but set_voice has no effect for now
Warning: does not build on Linux or windows for now
2020-09-03 16:50:11 +02:00
14a721c837 Depend on build_ios for releasing. 2020-09-02 17:13:03 -05:00
c8fd02b448 Bump version. 2020-09-02 16:51:25 -05:00
03ea2602bc Don't link against AppKit if building on iOS. 2020-09-02 16:37:07 -05:00
dac58539c9 Merge branch 'master' of https://github.com/ndarilek/tts-rs 2020-09-02 16:03:08 -05:00
0d61dc258f Set up conditional compilation for iOS. 2020-09-02 16:03:04 -05:00
2cfd2ea09e Merge branch 'master' of https://github.com/ndarilek/tts-rs into master 2020-09-02 15:52:43 -05:00
d3ca27c707 Force Rust toolchain update, and separate out iOS build. 2020-09-02 15:52:11 -05:00
81b23330e9 Move iOS build into separate CI run to see if this odd bug is triggered. 2020-09-02 15:37:34 -05:00
665013fdff Split text sent to Tolk backend to account for some sort of length limit.
Tolk seems to fail on strings larger than 325 characters in length. Here we:
* Send any strings with 300 or fewer characters through directly.
* For larger strings, split on whitespace boundaries, then create and send buffers of 300 or fewer characters.

This may not handle internationalized text, and may not handle someone bombarding TTS with a giant word. PRs for either welcome.
2020-09-02 11:40:08 -05:00
6c091f3284 Switch Speech-dispatcher initialization to threaded mode so callbacks work and is_speaking is correct. 2020-08-25 11:50:25 -05:00
d3ffd5078f cargo fmt 2020-08-24 16:46:57 -05:00
1507527175 Add Default implementation for Features so backends need only specify features they actually support. 2020-08-24 16:44:00 -05:00
951e31b284 Implement is_speaking For Speech-dispatcher. 2020-08-19 21:28:30 -05:00
045b80c921 Don't scream if winrt_bindings fails to build. 2020-08-18 15:27:07 -05:00
907b828315 Add iOS test to release builds. 2020-08-18 15:26:23 -05:00
bdace524b9 Bump version. 2020-08-18 15:22:46 -05:00
7bcbda15b3 Update supported platforms. 2020-08-18 15:22:12 -05:00
a1e4215ea7 Normal volume of speech-dispatcher is 100, not 0. 2020-08-18 15:19:34 -05:00
2f85c3b2bf Add iOS build. 2020-08-18 15:16:30 -05:00
3b3be830c6 Update iOS build targets. 2020-08-18 14:59:48 -05:00
65eeddc1ad Remove missing targets. 2020-08-18 14:24:00 -05:00
27e9aaf034 Add iOS build. 2020-08-18 14:17:06 -05:00
abe5292868 Bump version. 2020-08-13 11:15:52 -05:00
cce1569c72 Sync supported synths in README and lib.rs. 2020-08-13 11:15:23 -05:00
4d980270be Merge branch 'master' of https://github.com/ndarilek/tts-rs 2020-08-13 11:12:25 -05:00
d199a6e8ee Update supported synthesizers. 2020-08-13 11:12:15 -05:00
ff877acd87 Eliminate warning in non-MacOS builds. 2020-08-13 11:11:38 -05:00
c5b1ff1944 Add AVFoundation backend, used automatically on MacOS 10.14 and above. 2020-08-13 11:08:00 -05:00
2d0ab8889a Eliminate a warning. 2020-08-13 06:58:16 -05:00
cc2a4c12f7 Rename ns_speech_synthesizer backend to appkit. 2020-08-13 06:46:16 -05:00
1d7018a558 Build MacOS releases and explicitly specify task dependencies. 2020-08-12 15:56:10 -05:00
d95eed63c5 Add MacOS CI test builds. 2020-08-12 15:48:38 -05:00
af678d76d1 Update documentation with supported backends. 2020-08-12 15:45:16 -05:00
75fd320d3f Implement rate/volume-setting for NSSpeechSynthesizer, along with other tweaks.
Unfortunately, there seems to be a difference in how the `hello_world` example processes rate and volume changes. I'm not sure if it doesn't adjust rate for samples while speaking. In any case, arguably there are just going to be differences in platforms that I can't account for, so this may just have to be. Hopefully it doesn't interfere with actual usage.
2020-08-12 15:41:57 -05:00
dc1c00f446 Good news: NSSpeechSynthesizer speech now queues. Bad news: my brain bleeds. 2020-08-12 15:14:17 -05:00
7eccb9f573 Clean up println! and comparison calls. 2020-08-12 09:54:25 -05:00
427ca027be Add Drop implementation. 2020-08-12 09:52:16 -05:00
47bfe768e6 Get delegates working so speech interruption/queuing should now be possible.
* Fix broken delegate method signature.
* Add `NSRunLoop` into `hello_world` example so delegates are called. Presumably, MacOS apps already run one of these, but the example didn't.
2020-08-12 09:49:51 -05:00
faadc0e3b7 Still doesn't work, but at least it doesn't segfault now. 2020-08-11 14:44:52 -05:00
753f6c5ecd WIP: Initial support for MacOS/NSSpeechSynthesizer.
* Add necessary dependencies, build script, and `NSSpeechSynthesizer` backend.
* Get very basic speech working.

Needs a delegate to handle queued speech, and currently segfaults if one is set.
2020-08-11 12:11:19 -05:00
73786534dc Bump version. 2020-07-07 09:09:18 -05:00
e1bb6741a9 Correctly indicate that WinRT supports detection of speaking. 2020-07-07 09:08:44 -05:00
53 changed files with 3275 additions and 336 deletions

View File

@ -6,50 +6,16 @@ on:
- "v*"
jobs:
build_linux:
name: Build Linux
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- run: |
sudo apt-get update
sudo apt-get install -y libspeechd-dev
cargo build --release
rustup target add wasm32-unknown-unknown
cargo build --release --target wasm32-unknown-unknown
build_windows:
name: Build Windows
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- run: |
choco install -y llvm
cargo build --release
publish_winrt_bindings:
name: Publish winrt_bindings
runs-on: windows-latest
env:
CARGO_TOKEN: ${{ secrets.CARGO_TOKEN }}
steps:
- uses: actions/checkout@v2
- run: |
choco install -y llvm
cargo login $CARGO_TOKEN
cd winrt_bindings
cargo package
cargo publish
publish:
name: Publish
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
env:
CARGO_TOKEN: ${{ secrets.CARGO_TOKEN }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- run: |
sudo apt-get update
sudo apt-get install -y libspeechd-dev
cargo login $CARGO_TOKEN
rustup toolchain install stable
cargo publish

View File

@ -5,23 +5,58 @@ on:
pull_request:
jobs:
build_linux:
name: Build Linux
runs-on: ubuntu-latest
check_formatting:
name: Check Formatting
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- run: |
sudo apt-get update
sudo apt-get install -y libspeechd-dev
cargo build --release
rustup target add wasm32-unknown-unknown
cargo build --release --target wasm32-unknown-unknown
rustup toolchain install stable
cargo fmt --all --check
cd examples/web
cargo fmt --all --check
build_windows:
name: Build Windows
runs-on: windows-latest
check:
name: Check
strategy:
matrix:
os: [windows-latest, ubuntu-22.04, macos-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- run: sudo apt-get update; sudo apt-get install -y libspeechd-dev
if: ${{ runner.os == 'Linux' }}
- run: |
choco install -y llvm
cargo build --release
rustup toolchain install stable
cargo clippy --all-targets
check_web:
name: Check Web
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- run: |
rustup target add wasm32-unknown-unknown
rustup toolchain install stable
cargo clippy --all-targets --target wasm32-unknown-unknown
check_android:
name: Check Android
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- run: |
rustup target add aarch64-linux-android
rustup toolchain install stable
cargo clippy --all-targets --target aarch64-linux-android
check_web_example:
name: Check Web Example
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- run: |
rustup target add wasm32-unknown-unknown
rustup toolchain install stable
cd examples/web
cargo build --target wasm32-unknown-unknown

1
.gitignore vendored
View File

@ -1,2 +1,3 @@
Cargo.lock
target
*.dll

View File

@ -1,28 +1,71 @@
[package]
name = "tts"
version = "0.3.8"
version = "0.26.3"
authors = ["Nolan Darilek <nolan@thewordnerd.info>"]
repository = "https://github.com/ndarilek/tts-rs"
description = "High-level Text-To-Speech (TTS) interface"
documentation = "https://docs.rs/tts"
license = "MIT"
exclude = ["*.cfg", "*.yml"]
edition = "2018"
edition = "2021"
[lib]
crate-type = ["lib", "cdylib", "staticlib"]
[features]
speech_dispatcher_0_9 = ["speech-dispatcher/0_9"]
speech_dispatcher_0_10 = ["speech-dispatcher/0_10"]
speech_dispatcher_0_11 = ["speech-dispatcher/0_11"]
default = ["speech_dispatcher_0_11"]
[dependencies]
dyn-clonable = "0.9"
oxilangtag = "0.1"
lazy_static = "1"
log = "0.4"
serde = { version = "1", optional = true, features = ["derive"] }
thiserror = "1"
[dev-dependencies]
env_logger = "0.7"
env_logger = "0.11"
[target.'cfg(windows)'.dependencies]
tolk = "0.2"
winrt = "0.7"
tts_winrt_bindings = { version = "0.1", path="winrt_bindings" }
tolk = { version = "0.5", optional = true }
windows = { version = "0.58", features = [
"Foundation",
"Foundation_Collections",
"Media_Core",
"Media_Playback",
"Media_SpeechSynthesis",
"Storage_Streams",
] }
[target.'cfg(target_os = "linux")'.dependencies]
speech-dispatcher = "0.4"
speech-dispatcher = { version = "0.16", default-features = false }
[target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies]
cocoa-foundation = "0.1"
core-foundation = "0.9"
libc = "0.2"
objc = { version = "0.2", features = ["exception"] }
[target.wasm32-unknown-unknown.dependencies]
wasm-bindgen = "0.2"
web-sys = { version = "0.3", features = ["SpeechSynthesis", "SpeechSynthesisUtterance", "Window", ] }
web-sys = { version = "0.3", features = [
"EventTarget",
"SpeechSynthesis",
"SpeechSynthesisErrorCode",
"SpeechSynthesisErrorEvent",
"SpeechSynthesisEvent",
"SpeechSynthesisUtterance",
"SpeechSynthesisVoice",
"Window",
] }
[target.'cfg(target_os="android")'.dependencies]
jni = "0.21"
ndk-context = "0.1"
[package.metadata.docs.rs]
no-default-features = true
features = ["speech_dispatcher_0_11"]

33
Makefile.toml Normal file
View File

@ -0,0 +1,33 @@
[tasks.build-android-example]
script = [
"cd examples/android",
"./gradlew assembleDebug",
]
[tasks.run-android-example]
script = [
"cd examples/android",
"./gradlew runDebug",
]
[tasks.log-android]
command = "adb"
args = ["logcat", "RustStdoutStderr:D", "*:S"]
[tasks.install-trunk]
install_crate = { crate_name = "trunk", binary = "trunk", test_arg = "--help" }
[tasks.install-wasm-bindgen-cli]
install_crate = { crate_name = "wasm-bindgen-cli", binary = "wasm-bindgen", test_arg = "--help" }
[tasks.build-web-example]
dependencies = ["install-trunk", "install-wasm-bindgen-cli"]
cwd = "examples/web"
command = "trunk"
args = ["build"]
[tasks.run-web-example]
dependencies = ["install-trunk", "install-wasm-bindgen-cli"]
cwd = "examples/web"
command = "trunk"
args = ["serve"]

View File

@ -2,8 +2,24 @@
This library provides a high-level Text-To-Speech (TTS) interface supporting various backends. Currently supported backends are:
* [Speech Dispatcher](https://freebsoft.org/speechd) (Linux)
* WebAssembly
* Windows
* Screen readers/SAPI via Tolk
* Screen readers/SAPI via Tolk (requires `tolk` Cargo feature)
* WinRT
* Linux via [Speech Dispatcher](https://freebsoft.org/speechd)
* MacOS/iOS
* AppKit on MacOS 10.13 and below
* AVFoundation on MacOS 10.14 and above, and iOS
* Android
* WebAssembly
## Android Setup
On most platforms, this library is plug-and-play. Because of JNI's complexity, Android setup is a bit more involved. In general, look to the Android example for guidance. Here are some rough steps to get going:
* Set up _Cargo.toml_ as the example does. Be sure to depend on `ndk-glue`.
* Place _Bridge.java_ appropriately in your app. This is needed to support various Android TTS callbacks.
* Create a main activity similar to _MainActivity.kt_. In particular, you need to derive `android.app.NativeActivity`, and you need a `System.loadLibrary(...)` call appropriate for your app. `System.loadLibrary(...)` is needed to trigger `JNI_OnLoad`.
* * Even though you've loaded the library in your main activity, add a metadata tag to your activity in _AndroidManifest.xml_ referencing it. Yes, this is redundant but necessary.
* Set if your various build.gradle scripts to reference the plugins, dependencies, etc. from the example. In particular, you'll want to set up [cargo-ndk-android-gradle](https://github.com/willir/cargo-ndk-android-gradle/) and either [depend on androidx.annotation](https://developer.android.com/reference/androidx/annotation/package-summary) or otherwise configure your app to keep the class _rs.tts.Bridge_.
And I think that should about do it. Good luck!

11
build.rs Normal file
View File

@ -0,0 +1,11 @@
fn main() {
if std::env::var("TARGET").unwrap().contains("-apple") {
println!("cargo:rustc-link-lib=framework=AVFoundation");
if !std::env::var("CARGO_CFG_TARGET_OS")
.unwrap()
.contains("ios")
{
println!("cargo:rustc-link-lib=framework=AppKit");
}
}
}

39
examples/99bottles.rs Normal file
View File

@ -0,0 +1,39 @@
use std::io;
use std::{thread, time};
#[cfg(target_os = "macos")]
use cocoa_foundation::base::id;
#[cfg(target_os = "macos")]
use cocoa_foundation::foundation::NSRunLoop;
#[cfg(target_os = "macos")]
use objc::{msg_send, sel, sel_impl};
use tts::*;
fn main() -> Result<(), Error> {
env_logger::init();
let mut tts = Tts::default()?;
let mut bottles = 99;
while bottles > 0 {
tts.speak(format!("{} bottles of beer on the wall,", bottles), false)?;
tts.speak(format!("{} bottles of beer,", bottles), false)?;
tts.speak("Take one down, pass it around", false)?;
tts.speak("Give us a bit to drink this...", false)?;
let time = time::Duration::from_secs(15);
thread::sleep(time);
bottles -= 1;
tts.speak(format!("{} bottles of beer on the wall,", bottles), false)?;
}
let mut _input = String::new();
// The below is only needed to make the example run on MacOS because there is no NSRunLoop in this context.
// It shouldn't be needed in an app or game that almost certainly has one already.
#[cfg(target_os = "macos")]
{
let run_loop: id = unsafe { NSRunLoop::currentRunLoop() };
unsafe {
let _: () = msg_send![run_loop, run];
}
}
io::stdin().read_line(&mut _input)?;
Ok(())
}

16
examples/android/.gitignore vendored Normal file
View File

@ -0,0 +1,16 @@
*.iml
.gradle
/local.properties
/.idea/caches
/.idea/libraries
/.idea/modules.xml
/.idea/workspace.xml
/.idea/navEditor.xml
/.idea/assetWizardSettings.xml
.DS_Store
/build
/captures
.externalNativeBuild
.cxx
local.properties
Cargo.lock

3
examples/android/.idea/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
# Default ignored files
/shelf/
/workspace.xml

View File

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="CompilerConfiguration">
<bytecodeTargetLevel target="1.6" />
</component>
</project>

View File

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="GradleMigrationSettings" migrationVersion="1" />
<component name="GradleSettings">
<option name="linkedExternalProjectsSettings">
<GradleProjectSettings>
<option name="testRunner" value="PLATFORM" />
<option name="distributionType" value="DEFAULT_WRAPPED" />
<option name="externalProjectPath" value="$PROJECT_DIR$" />
<option name="gradleJvm" value="11" />
<option name="modules">
<set>
<option value="$PROJECT_DIR$" />
</set>
</option>
<option name="resolveModulePerSourceSet" value="false" />
<option name="useQualifiedModuleNames" value="true" />
</GradleProjectSettings>
</option>
</component>
</project>

View File

@ -0,0 +1,25 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="RemoteRepositoriesConfiguration">
<remote-repository>
<option name="id" value="central" />
<option name="name" value="Maven Central repository" />
<option name="url" value="https://repo1.maven.org/maven2" />
</remote-repository>
<remote-repository>
<option name="id" value="jboss.community" />
<option name="name" value="JBoss Community repository" />
<option name="url" value="https://repository.jboss.org/nexus/content/repositories/public/" />
</remote-repository>
<remote-repository>
<option name="id" value="BintrayJCenter" />
<option name="name" value="BintrayJCenter" />
<option name="url" value="https://jcenter.bintray.com/" />
</remote-repository>
<remote-repository>
<option name="id" value="Google" />
<option name="name" value="Google" />
<option name="url" value="https://dl.google.com/dl/android/maven2/" />
</remote-repository>
</component>
</project>

View File

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" languageLevel="JDK_1_6" default="false" project-jdk-name="1.8" project-jdk-type="JavaSDK">
<output url="file://$PROJECT_DIR$/build/classes" />
</component>
<component name="ProjectType">
<option name="id" value="Android" />
</component>
</project>

View File

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$/../.." vcs="Git" />
</component>
</project>

1
examples/android/app/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/build

View File

@ -0,0 +1,56 @@
plugins {
id "com.android.application"
id "org.mozilla.rust-android-gradle.rust-android"
}
android {
namespace "rs.tts"
compileSdkVersion 33
ndkVersion "25.1.8937393"
defaultConfig {
applicationId "rs.tts"
minSdkVersion 21
targetSdkVersion 33
versionCode 1
versionName "1.0"
}
buildTypes {
release {
minifyEnabled false
proguardFiles getDefaultProguardFile("proguard-android-optimize.txt"), "proguard-rules.pro"
}
}
}
dependencies {
implementation "androidx.core:core-ktx:1.2.0"
implementation "androidx.annotation:annotation:1.1.0"
implementation "com.google.android.material:material:1.1.0"
implementation "androidx.constraintlayout:constraintlayout:1.1.3"
}
apply plugin: "org.mozilla.rust-android-gradle.rust-android"
cargo {
module = "."
libname = "tts"
targets = ["arm", "x86"]
}
tasks.whenTaskAdded { task ->
if ((task.name == 'javaPreCompileDebug' || task.name == 'javaPreCompileRelease')) {
task.dependsOn "cargoBuild"
}
}
project.afterEvaluate {
android.applicationVariants.all { variant ->
task "run${variant.name.capitalize()}"(type: Exec, dependsOn: "install${variant.name.capitalize()}", group: "run") {
commandLine = ["adb", "shell", "monkey", "-p", variant.applicationId + " 1"]
doLast {
println "Launching ${variant.applicationId}"
}
}
}
}

21
examples/android/app/proguard-rules.pro vendored Normal file
View File

@ -0,0 +1,21 @@
# Add project specific ProGuard rules here.
# You can control the set of applied configuration files using the
# proguardFiles setting in build.gradle.
#
# For more details, see
# http://developer.android.com/guide/developing/tools/proguard.html
# If your project uses WebView with JS, uncomment the following
# and specify the fully qualified class name to the JavaScript interface
# class:
#-keepclassmembers class fqcn.of.javascript.interface.for.webview {
# public *;
#}
# Uncomment this to preserve the line number information for
# debugging stack traces.
#-keepattributes SourceFile,LineNumberTable
# If you keep the line number information, uncomment this to
# hide the original source file name.
#-renamesourcefileattribute SourceFile

View File

@ -0,0 +1 @@
jniLibs

View File

@ -0,0 +1,13 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android">
<application android:allowBackup="true" android:label="@string/app_name">
<activity android:name=".MainActivity" android:exported="true">
<meta-data android:name="android.app.lib_name" android:value="hello_world" />
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
</manifest>

View File

@ -0,0 +1,24 @@
package rs.tts;
import android.speech.tts.TextToSpeech;
import android.speech.tts.UtteranceProgressListener;
@androidx.annotation.Keep
public class Bridge extends UtteranceProgressListener implements TextToSpeech.OnInitListener {
public int backendId;
public Bridge(int backendId) {
this.backendId = backendId;
}
public native void onInit(int status);
public native void onStart(String utteranceId);
public native void onStop(String utteranceId, Boolean interrupted);
public native void onDone(String utteranceId);
public native void onError(String utteranceId) ;
}

View File

@ -0,0 +1,11 @@
package rs.tts
import android.app.NativeActivity
class MainActivity : NativeActivity() {
companion object {
init {
System.loadLibrary("hello_world")
}
}
}

View File

@ -0,0 +1,3 @@
<resources>
<string name="app_name">TTS-RS</string>
</resources>

View File

@ -0,0 +1,29 @@
// Top-level build file where you can add configuration options common to all sub-projects/modules.
buildscript {
repositories {
google()
mavenCentral()
maven {
url "https://plugins.gradle.org/m2/"
}
}
}
plugins {
id "com.android.application" version "7.3.0" apply false
id "com.android.library" version "7.3.0" apply false
id "org.jetbrains.kotlin.android" version "1.7.21" apply false
id "org.mozilla.rust-android-gradle.rust-android" version "0.9.3" apply false
}
allprojects {
repositories {
google()
mavenCentral()
}
}
task clean(type: Delete) {
delete rootProject.buildDir
}

View File

@ -0,0 +1,14 @@
[package]
name = "hello_world"
version = "0.1.0"
authors = ["Nolan Darilek <nolan@thewordnerd.info>"]
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["dylib"]
[dependencies]
ndk-glue = "0.7"
tts = { path = "../.." }

View File

@ -0,0 +1,21 @@
# Project-wide Gradle settings.
# IDE (e.g. Android Studio) users:
# Gradle settings configured through the IDE *will override*
# any settings specified in this file.
# For more details on how to configure your build environment visit
# http://www.gradle.org/docs/current/userguide/build_environment.html
# Specifies the JVM arguments used for the daemon process.
# The setting is particularly useful for tweaking memory settings.
org.gradle.jvmargs=-Xmx2048m -Dfile.encoding=UTF-8
# When configured, Gradle will run in incubating parallel mode.
# This option should only be used with decoupled projects. More details, visit
# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
# org.gradle.parallel=true
# AndroidX package structure to make it clearer which packages are bundled with the
# Android operating system, and which are packaged with your app"s APK
# https://developer.android.com/topic/libraries/support-library/androidx-rn
android.useAndroidX=true
# Automatically convert third-party libraries to use AndroidX
android.enableJetifier=true
# Kotlin code style for this project: "official" or "obsolete":
kotlin.code.style=official

Binary file not shown.

View File

@ -0,0 +1,6 @@
#Mon Dec 28 17:32:22 CST 2020
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-7.5.1-bin.zip

172
examples/android/gradlew vendored Executable file
View File

@ -0,0 +1,172 @@
#!/usr/bin/env sh
##############################################################################
##
## Gradle start up script for UN*X
##
##############################################################################
# Attempt to set APP_HOME
# Resolve links: $0 may be a link
PRG="$0"
# Need this for relative symlinks.
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`"/$link"
fi
done
SAVED="`pwd`"
cd "`dirname \"$PRG\"`/" >/dev/null
APP_HOME="`pwd -P`"
cd "$SAVED" >/dev/null
APP_NAME="Gradle"
APP_BASE_NAME=`basename "$0"`
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS=""
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD="maximum"
warn () {
echo "$*"
}
die () {
echo
echo "$*"
echo
exit 1
}
# OS specific support (must be 'true' or 'false').
cygwin=false
msys=false
darwin=false
nonstop=false
case "`uname`" in
CYGWIN* )
cygwin=true
;;
Darwin* )
darwin=true
;;
MINGW* )
msys=true
;;
NONSTOP* )
nonstop=true
;;
esac
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
# Determine the Java command to use to start the JVM.
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
else
JAVACMD="java"
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
# Increase the maximum file descriptors if we can.
if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
MAX_FD_LIMIT=`ulimit -H -n`
if [ $? -eq 0 ] ; then
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
MAX_FD="$MAX_FD_LIMIT"
fi
ulimit -n $MAX_FD
if [ $? -ne 0 ] ; then
warn "Could not set maximum file descriptor limit: $MAX_FD"
fi
else
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
fi
fi
# For Darwin, add options to specify how the application appears in the dock
if $darwin; then
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
fi
# For Cygwin, switch paths to Windows format before running java
if $cygwin ; then
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
JAVACMD=`cygpath --unix "$JAVACMD"`
# We build the pattern for arguments to be converted via cygpath
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
SEP=""
for dir in $ROOTDIRSRAW ; do
ROOTDIRS="$ROOTDIRS$SEP$dir"
SEP="|"
done
OURCYGPATTERN="(^($ROOTDIRS))"
# Add a user-defined pattern to the cygpath arguments
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
fi
# Now convert the arguments - kludge to limit ourselves to /bin/sh
i=0
for arg in "$@" ; do
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
else
eval `echo args$i`="\"$arg\""
fi
i=$((i+1))
done
case $i in
(0) set -- ;;
(1) set -- "$args0" ;;
(2) set -- "$args0" "$args1" ;;
(3) set -- "$args0" "$args1" "$args2" ;;
(4) set -- "$args0" "$args1" "$args2" "$args3" ;;
(5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
(6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
(7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
(8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
(9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
esac
fi
# Escape application args
save () {
for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
echo " "
}
APP_ARGS=$(save "$@")
# Collect all arguments for the java command, following the shell quoting and substitution rules
eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
cd "$(dirname "$0")"
fi
exec "$JAVACMD" "$@"

84
examples/android/gradlew.bat vendored Normal file
View File

@ -0,0 +1,84 @@
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS=
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windows variants
if not "%OS%" == "Windows_NT" goto win9xME_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega

View File

@ -0,0 +1,8 @@
pluginManagement {
repositories {
gradlePluginPortal()
google()
mavenCentral()
}
}
include ":app"

View File

@ -0,0 +1,70 @@
use tts::*;
// The `loop {}` below only simulates an app loop.
// Without it, the `TTS` instance gets dropped before callbacks can run.
#[allow(unreachable_code)]
fn run() -> Result<(), Error> {
let mut tts = Tts::default()?;
let Features {
utterance_callbacks,
..
} = tts.supported_features();
if utterance_callbacks {
tts.on_utterance_begin(Some(Box::new(|utterance| {
println!("Started speaking {:?}", utterance)
})))?;
tts.on_utterance_end(Some(Box::new(|utterance| {
println!("Finished speaking {:?}", utterance)
})))?;
tts.on_utterance_stop(Some(Box::new(|utterance| {
println!("Stopped speaking {:?}", utterance)
})))?;
}
let Features { is_speaking, .. } = tts.supported_features();
if is_speaking {
println!("Are we speaking? {}", tts.is_speaking()?);
}
tts.speak("Hello, world.", false)?;
let Features { rate, .. } = tts.supported_features();
if rate {
let original_rate = tts.get_rate()?;
tts.speak(format!("Current rate: {}", original_rate), false)?;
tts.set_rate(tts.max_rate())?;
tts.speak("This is very fast.", false)?;
tts.set_rate(tts.min_rate())?;
tts.speak("This is very slow.", false)?;
tts.set_rate(tts.normal_rate())?;
tts.speak("This is the normal rate.", false)?;
tts.set_rate(original_rate)?;
}
let Features { pitch, .. } = tts.supported_features();
if pitch {
let original_pitch = tts.get_pitch()?;
tts.set_pitch(tts.max_pitch())?;
tts.speak("This is high-pitch.", false)?;
tts.set_pitch(tts.min_pitch())?;
tts.speak("This is low pitch.", false)?;
tts.set_pitch(tts.normal_pitch())?;
tts.speak("This is normal pitch.", false)?;
tts.set_pitch(original_pitch)?;
}
let Features { volume, .. } = tts.supported_features();
if volume {
let original_volume = tts.get_volume()?;
tts.set_volume(tts.max_volume())?;
tts.speak("This is loud!", false)?;
tts.set_volume(tts.min_volume())?;
tts.speak("This is quiet.", false)?;
tts.set_volume(tts.normal_volume())?;
tts.speak("This is normal volume.", false)?;
tts.set_volume(original_volume)?;
}
tts.speak("Goodbye.", false)?;
loop {}
Ok(())
}
#[cfg_attr(target_os = "android", ndk_glue::main(backtrace = "on"))]
pub fn main() {
run().expect("Failed to run");
}

89
examples/clone_drop.rs Normal file
View File

@ -0,0 +1,89 @@
use std::io;
#[cfg(target_os = "macos")]
use cocoa_foundation::base::id;
#[cfg(target_os = "macos")]
use cocoa_foundation::foundation::NSRunLoop;
#[cfg(target_os = "macos")]
use objc::{msg_send, sel, sel_impl};
use tts::*;
fn main() -> Result<(), Error> {
env_logger::init();
let tts = Tts::default()?;
if Tts::screen_reader_available() {
println!("A screen reader is available on this platform.");
} else {
println!("No screen reader is available on this platform.");
}
let Features {
utterance_callbacks,
..
} = tts.supported_features();
if utterance_callbacks {
tts.on_utterance_begin(Some(Box::new(|utterance| {
println!("Started speaking {:?}", utterance)
})))?;
tts.on_utterance_end(Some(Box::new(|utterance| {
println!("Finished speaking {:?}", utterance)
})))?;
tts.on_utterance_stop(Some(Box::new(|utterance| {
println!("Stopped speaking {:?}", utterance)
})))?;
}
let mut tts_clone = tts.clone();
drop(tts);
let Features { is_speaking, .. } = tts_clone.supported_features();
if is_speaking {
println!("Are we speaking? {}", tts_clone.is_speaking()?);
}
tts_clone.speak("Hello, world.", false)?;
let Features { rate, .. } = tts_clone.supported_features();
if rate {
let original_rate = tts_clone.get_rate()?;
tts_clone.speak(format!("Current rate: {}", original_rate), false)?;
tts_clone.set_rate(tts_clone.max_rate())?;
tts_clone.speak("This is very fast.", false)?;
tts_clone.set_rate(tts_clone.min_rate())?;
tts_clone.speak("This is very slow.", false)?;
tts_clone.set_rate(tts_clone.normal_rate())?;
tts_clone.speak("This is the normal rate.", false)?;
tts_clone.set_rate(original_rate)?;
}
let Features { pitch, .. } = tts_clone.supported_features();
if pitch {
let original_pitch = tts_clone.get_pitch()?;
tts_clone.set_pitch(tts_clone.max_pitch())?;
tts_clone.speak("This is high-pitch.", false)?;
tts_clone.set_pitch(tts_clone.min_pitch())?;
tts_clone.speak("This is low pitch.", false)?;
tts_clone.set_pitch(tts_clone.normal_pitch())?;
tts_clone.speak("This is normal pitch.", false)?;
tts_clone.set_pitch(original_pitch)?;
}
let Features { volume, .. } = tts_clone.supported_features();
if volume {
let original_volume = tts_clone.get_volume()?;
tts_clone.set_volume(tts_clone.max_volume())?;
tts_clone.speak("This is loud!", false)?;
tts_clone.set_volume(tts_clone.min_volume())?;
tts_clone.speak("This is quiet.", false)?;
tts_clone.set_volume(tts_clone.normal_volume())?;
tts_clone.speak("This is normal volume.", false)?;
tts_clone.set_volume(original_volume)?;
}
tts_clone.speak("Goodbye.", false)?;
let mut _input = String::new();
// The below is only needed to make the example run on MacOS because there is no NSRunLoop in this context.
// It shouldn't be needed in an app or game that almost certainly has one already.
#[cfg(target_os = "macos")]
{
let run_loop: id = unsafe { NSRunLoop::currentRunLoop() };
unsafe {
let _: () = msg_send![run_loop, run];
}
}
io::stdin().read_line(&mut _input)?;
Ok(())
}

View File

@ -1,10 +1,41 @@
use std::io;
#[cfg(target_os = "macos")]
use cocoa_foundation::base::id;
#[cfg(target_os = "macos")]
use cocoa_foundation::foundation::NSRunLoop;
#[cfg(target_os = "macos")]
use objc::{msg_send, sel, sel_impl};
use tts::*;
fn main() -> Result<(), Error> {
env_logger::init();
let mut tts = TTS::default()?;
let mut tts = Tts::default()?;
if Tts::screen_reader_available() {
println!("A screen reader is available on this platform.");
} else {
println!("No screen reader is available on this platform.");
}
let Features {
utterance_callbacks,
..
} = tts.supported_features();
if utterance_callbacks {
tts.on_utterance_begin(Some(Box::new(|utterance| {
println!("Started speaking {:?}", utterance)
})))?;
tts.on_utterance_end(Some(Box::new(|utterance| {
println!("Finished speaking {:?}", utterance)
})))?;
tts.on_utterance_stop(Some(Box::new(|utterance| {
println!("Stopped speaking {:?}", utterance)
})))?;
}
let Features { is_speaking, .. } = tts.supported_features();
if is_speaking {
println!("Are we speaking? {}", tts.is_speaking()?);
}
tts.speak("Hello, world.", false)?;
let Features { rate, .. } = tts.supported_features();
if rate {
@ -40,8 +71,34 @@ fn main() -> Result<(), Error> {
tts.speak("This is normal volume.", false)?;
tts.set_volume(original_volume)?;
}
let Features { voice, .. } = tts.supported_features();
if voice {
let voices = tts.voices()?;
println!("Available voices:\n===");
for v in &voices {
println!("{:?}", v);
}
let Features { get_voice, .. } = tts.supported_features();
let original_voice = if get_voice { tts.voice()? } else { None };
for v in &voices {
tts.set_voice(v)?;
tts.speak(format!("This is {}.", v.name()), false)?;
}
if let Some(original_voice) = original_voice {
tts.set_voice(&original_voice)?;
}
}
tts.speak("Goodbye.", false)?;
let mut _input = String::new();
// The below is only needed to make the example run on MacOS because there is no NSRunLoop in this context.
// It shouldn't be needed in an app or game that almost certainly has one already.
#[cfg(target_os = "macos")]
{
let run_loop: id = unsafe { NSRunLoop::currentRunLoop() };
unsafe {
let _: () = msg_send![run_loop, run];
}
}
io::stdin().read_line(&mut _input)?;
Ok(())
}

14
examples/latency.rs Normal file
View File

@ -0,0 +1,14 @@
use std::io;
use tts::*;
fn main() -> Result<(), Error> {
env_logger::init();
let mut tts = Tts::default()?;
println!("Press Enter and wait for speech.");
loop {
let mut _input = String::new();
io::stdin().read_line(&mut _input)?;
tts.speak("Hello, world.", true)?;
}
}

32
examples/ramble.rs Normal file
View File

@ -0,0 +1,32 @@
#[cfg(target_os = "macos")]
use cocoa_foundation::base::id;
#[cfg(target_os = "macos")]
use cocoa_foundation::foundation::NSDefaultRunLoopMode;
#[cfg(target_os = "macos")]
use cocoa_foundation::foundation::NSRunLoop;
#[cfg(target_os = "macos")]
use objc::class;
#[cfg(target_os = "macos")]
use objc::{msg_send, sel, sel_impl};
use std::{thread, time};
use tts::*;
fn main() -> Result<(), Error> {
env_logger::init();
let mut tts = Tts::default()?;
let mut phrase = 1;
loop {
tts.speak(format!("Phrase {}", phrase), false)?;
#[cfg(target_os = "macos")]
{
let run_loop: id = unsafe { NSRunLoop::currentRunLoop() };
unsafe {
let date: id = msg_send![class!(NSDate), distantFuture];
let _: () = msg_send![run_loop, runMode:NSDefaultRunLoopMode beforeDate:date];
}
}
let time = time::Duration::from_secs(5);
thread::sleep(time);
phrase += 1;
}
}

View File

@ -0,0 +1,2 @@
[build]
target = "wasm32-unknown-unknown"

1
examples/web/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
dist

13
examples/web/Cargo.toml Normal file
View File

@ -0,0 +1,13 @@
[package]
name = "web"
version = "0.1.0"
authors = ["Nolan Darilek <nolan@thewordnerd.info>"]
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
console_log = "0.2"
log = "0.4"
seed = "0.9"
tts = { path = "../.." }

12
examples/web/index.html Normal file
View File

@ -0,0 +1,12 @@
<!DOCTYPE html>
<html lang="en">
<head>
<title>Example</title>
</head>
<body>
<div id="app"></div>
</body>
</html>

157
examples/web/src/main.rs Normal file
View File

@ -0,0 +1,157 @@
#![allow(clippy::wildcard_imports)]
use seed::{prelude::*, *};
use tts::Tts;
#[derive(Clone)]
struct Model {
text: String,
tts: Tts,
}
#[derive(Clone)]
enum Msg {
TextChanged(String),
RateChanged(String),
PitchChanged(String),
VolumeChanged(String),
VoiceChanged(String),
Speak,
}
fn init(_: Url, _: &mut impl Orders<Msg>) -> Model {
let mut tts = Tts::default().unwrap();
if tts.voices().unwrap().iter().len() > 0 {
if tts.voice().unwrap().is_none() {
tts.set_voice(tts.voices().unwrap().first().unwrap())
.expect("Failed to set voice");
}
}
Model {
text: "Hello, world. This is a test of the current text-to-speech values.".into(),
tts,
}
}
fn update(msg: Msg, model: &mut Model, _: &mut impl Orders<Msg>) {
use Msg::*;
match msg {
TextChanged(text) => model.text = text,
RateChanged(rate) => {
let rate = rate.parse::<f32>().unwrap();
model.tts.set_rate(rate).unwrap();
}
PitchChanged(pitch) => {
let pitch = pitch.parse::<f32>().unwrap();
model.tts.set_pitch(pitch).unwrap();
}
VolumeChanged(volume) => {
let volume = volume.parse::<f32>().unwrap();
model.tts.set_volume(volume).unwrap();
}
VoiceChanged(voice) => {
for v in model.tts.voices().unwrap() {
if v.id() == voice {
model.tts.set_voice(&v).unwrap();
}
}
}
Speak => {
model.tts.speak(&model.text, false).unwrap();
}
}
}
fn view(model: &Model) -> Node<Msg> {
let should_show_voices = model.tts.voices().unwrap().iter().len() > 0;
form![
div![label![
"Text to speak",
input![
attrs! {
At::Value => model.text,
At::AutoFocus => AtValue::None,
},
input_ev(Ev::Input, Msg::TextChanged)
],
],],
div![label![
"Rate",
input![
attrs! {
At::Type => "number",
At::Value => model.tts.get_rate().unwrap(),
At::Min => model.tts.min_rate(),
At::Max => model.tts.max_rate()
},
input_ev(Ev::Input, Msg::RateChanged)
],
],],
div![label![
"Pitch",
input![
attrs! {
At::Type => "number",
At::Value => model.tts.get_pitch().unwrap(),
At::Min => model.tts.min_pitch(),
At::Max => model.tts.max_pitch()
},
input_ev(Ev::Input, Msg::PitchChanged)
],
],],
div![label![
"Volume",
input![
attrs! {
At::Type => "number",
At::Value => model.tts.get_volume().unwrap(),
At::Min => model.tts.min_volume(),
At::Max => model.tts.max_volume()
},
input_ev(Ev::Input, Msg::VolumeChanged)
],
],],
if should_show_voices {
div![
label!["Voice"],
select![
model.tts.voices().unwrap().iter().map(|v| {
let selected = if let Some(voice) = model.tts.voice().unwrap() {
voice.id() == v.id()
} else {
false
};
option![
attrs! {
At::Value => v.id()
},
if selected {
attrs! {
At::Selected => selected
}
} else {
attrs! {}
},
v.name()
]
}),
input_ev(Ev::Change, Msg::VoiceChanged)
]
]
} else {
div!["Your browser does not seem to support selecting voices."]
},
button![
"Speak",
ev(Ev::Click, |e| {
e.prevent_default();
Msg::Speak
}),
],
]
}
fn main() {
console_log::init().expect("Error initializing logger");
App::start("app", init, update, view);
}

402
src/backends/android.rs Normal file
View File

@ -0,0 +1,402 @@
#[cfg(target_os = "android")]
use std::{
collections::HashSet,
ffi::{CStr, CString},
os::raw::c_void,
sync::{Mutex, RwLock},
thread,
time::{Duration, Instant},
};
use jni::{
objects::{GlobalRef, JObject, JString},
sys::{jfloat, jint, JNI_VERSION_1_6},
JNIEnv, JavaVM,
};
use lazy_static::lazy_static;
use log::{error, info};
use crate::{Backend, BackendId, Error, Features, UtteranceId, Voice, CALLBACKS};
lazy_static! {
static ref BRIDGE: Mutex<Option<GlobalRef>> = Mutex::new(None);
static ref NEXT_BACKEND_ID: Mutex<u64> = Mutex::new(0);
static ref PENDING_INITIALIZATIONS: RwLock<HashSet<u64>> = RwLock::new(HashSet::new());
static ref NEXT_UTTERANCE_ID: Mutex<u64> = Mutex::new(0);
}
#[allow(non_snake_case)]
#[no_mangle]
pub extern "system" fn JNI_OnLoad(vm: JavaVM, _: *mut c_void) -> jint {
let mut env = vm.get_env().expect("Cannot get reference to the JNIEnv");
let b = env
.find_class("rs/tts/Bridge")
.expect("Failed to find `Bridge`");
let b = env
.new_global_ref(b)
.expect("Failed to create `Bridge` `GlobalRef`");
let mut bridge = BRIDGE.lock().unwrap();
*bridge = Some(b);
JNI_VERSION_1_6
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn Java_rs_tts_Bridge_onInit(mut env: JNIEnv, obj: JObject, status: jint) {
let id = env
.get_field(obj, "backendId", "I")
.expect("Failed to get backend ID")
.i()
.expect("Failed to cast to int") as u64;
let mut pending = PENDING_INITIALIZATIONS.write().unwrap();
(*pending).remove(&id);
if status != 0 {
error!("Failed to initialize TTS engine");
}
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn Java_rs_tts_Bridge_onStart(
mut env: JNIEnv,
obj: JObject,
utterance_id: JString,
) {
let backend_id = env
.get_field(obj, "backendId", "I")
.expect("Failed to get backend ID")
.i()
.expect("Failed to cast to int") as u64;
let backend_id = BackendId::Android(backend_id);
let utterance_id = CString::from(CStr::from_ptr(
env.get_string(&utterance_id).unwrap().as_ptr(),
))
.into_string()
.unwrap();
let utterance_id = utterance_id.parse::<u64>().unwrap();
let utterance_id = UtteranceId::Android(utterance_id);
let mut callbacks = CALLBACKS.lock().unwrap();
let cb = callbacks.get_mut(&backend_id).unwrap();
if let Some(f) = cb.utterance_begin.as_mut() {
f(utterance_id);
}
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn Java_rs_tts_Bridge_onStop(
mut env: JNIEnv,
obj: JObject,
utterance_id: JString,
) {
let backend_id = env
.get_field(obj, "backendId", "I")
.expect("Failed to get backend ID")
.i()
.expect("Failed to cast to int") as u64;
let backend_id = BackendId::Android(backend_id);
let utterance_id = CString::from(CStr::from_ptr(
env.get_string(&utterance_id).unwrap().as_ptr(),
))
.into_string()
.unwrap();
let utterance_id = utterance_id.parse::<u64>().unwrap();
let utterance_id = UtteranceId::Android(utterance_id);
let mut callbacks = CALLBACKS.lock().unwrap();
let cb = callbacks.get_mut(&backend_id).unwrap();
if let Some(f) = cb.utterance_end.as_mut() {
f(utterance_id);
}
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn Java_rs_tts_Bridge_onDone(
mut env: JNIEnv,
obj: JObject,
utterance_id: JString,
) {
let backend_id = env
.get_field(obj, "backendId", "I")
.expect("Failed to get backend ID")
.i()
.expect("Failed to cast to int") as u64;
let backend_id = BackendId::Android(backend_id);
let utterance_id = CString::from(CStr::from_ptr(
env.get_string(&utterance_id).unwrap().as_ptr(),
))
.into_string()
.unwrap();
let utterance_id = utterance_id.parse::<u64>().unwrap();
let utterance_id = UtteranceId::Android(utterance_id);
let mut callbacks = CALLBACKS.lock().unwrap();
let cb = callbacks.get_mut(&backend_id).unwrap();
if let Some(f) = cb.utterance_stop.as_mut() {
f(utterance_id);
}
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn Java_rs_tts_Bridge_onError(
mut env: JNIEnv,
obj: JObject,
utterance_id: JString,
) {
let backend_id = env
.get_field(obj, "backendId", "I")
.expect("Failed to get backend ID")
.i()
.expect("Failed to cast to int") as u64;
let backend_id = BackendId::Android(backend_id);
let utterance_id = CString::from(CStr::from_ptr(
env.get_string(&utterance_id).unwrap().as_ptr(),
))
.into_string()
.unwrap();
let utterance_id = utterance_id.parse::<u64>().unwrap();
let utterance_id = UtteranceId::Android(utterance_id);
let mut callbacks = CALLBACKS.lock().unwrap();
let cb = callbacks.get_mut(&backend_id).unwrap();
if let Some(f) = cb.utterance_end.as_mut() {
f(utterance_id);
}
}
#[derive(Clone)]
pub(crate) struct Android {
id: BackendId,
tts: GlobalRef,
rate: f32,
pitch: f32,
}
impl Android {
pub(crate) fn new() -> Result<Self, Error> {
info!("Initializing Android backend");
let mut backend_id = NEXT_BACKEND_ID.lock().unwrap();
let bid = *backend_id;
let id = BackendId::Android(bid);
*backend_id += 1;
drop(backend_id);
let ctx = ndk_context::android_context();
let vm = unsafe { jni::JavaVM::from_raw(ctx.vm().cast()) }?;
let context = unsafe { JObject::from_raw(ctx.context().cast()) };
let mut env = vm.attach_current_thread_permanently()?;
let bridge = BRIDGE.lock().unwrap();
if let Some(bridge) = &*bridge {
let bridge = env.new_object(bridge, "(I)V", &[(bid as jint).into()])?;
let tts = env.new_object(
"android/speech/tts/TextToSpeech",
"(Landroid/content/Context;Landroid/speech/tts/TextToSpeech$OnInitListener;)V",
&[(&context).into(), (&bridge).into()],
)?;
env.call_method(
&tts,
"setOnUtteranceProgressListener",
"(Landroid/speech/tts/UtteranceProgressListener;)I",
&[(&bridge).into()],
)?;
{
let mut pending = PENDING_INITIALIZATIONS.write().unwrap();
(*pending).insert(bid);
}
let tts = env.new_global_ref(tts)?;
// This hack makes my brain bleed.
const MAX_WAIT_TIME: Duration = Duration::from_millis(500);
let start = Instant::now();
// Wait a max of 500ms for initialization, then return an error to avoid hanging.
loop {
{
let pending = PENDING_INITIALIZATIONS.read().unwrap();
if !(*pending).contains(&bid) {
break;
}
if start.elapsed() > MAX_WAIT_TIME {
return Err(Error::OperationFailed);
}
}
thread::sleep(Duration::from_millis(5));
}
Ok(Self {
id,
tts,
rate: 1.,
pitch: 1.,
})
} else {
Err(Error::NoneError)
}
}
fn vm() -> Result<JavaVM, jni::errors::Error> {
let ctx = ndk_context::android_context();
unsafe { jni::JavaVM::from_raw(ctx.vm().cast()) }
}
}
impl Backend for Android {
fn id(&self) -> Option<BackendId> {
Some(self.id)
}
fn supported_features(&self) -> Features {
Features {
stop: true,
rate: true,
pitch: true,
volume: false,
is_speaking: true,
utterance_callbacks: true,
voice: false,
get_voice: false,
}
}
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error> {
let vm = Self::vm()?;
let mut env = vm.get_env()?;
let tts = self.tts.as_obj();
let text = env.new_string(text)?;
let queue_mode = if interrupt { 0 } else { 1 };
let mut utterance_id = NEXT_UTTERANCE_ID.lock().unwrap();
let uid = *utterance_id;
*utterance_id += 1;
drop(utterance_id);
let id = UtteranceId::Android(uid);
let uid = env.new_string(uid.to_string())?;
let rv = env.call_method(
tts,
"speak",
"(Ljava/lang/CharSequence;ILandroid/os/Bundle;Ljava/lang/String;)I",
&[
(&text).into(),
queue_mode.into(),
(&JObject::null()).into(),
(&uid).into(),
],
)?;
let rv = rv.i()?;
if rv == 0 {
Ok(Some(id))
} else {
Err(Error::OperationFailed)
}
}
fn stop(&mut self) -> Result<(), Error> {
let vm = Self::vm()?;
let mut env = vm.get_env()?;
let tts = self.tts.as_obj();
let rv = env.call_method(tts, "stop", "()I", &[])?;
let rv = rv.i()?;
if rv == 0 {
Ok(())
} else {
Err(Error::OperationFailed)
}
}
fn min_rate(&self) -> f32 {
0.1
}
fn max_rate(&self) -> f32 {
10.
}
fn normal_rate(&self) -> f32 {
1.
}
fn get_rate(&self) -> Result<f32, Error> {
Ok(self.rate)
}
fn set_rate(&mut self, rate: f32) -> Result<(), Error> {
let vm = Self::vm()?;
let mut env = vm.get_env()?;
let tts = self.tts.as_obj();
let rate = rate as jfloat;
let rv = env.call_method(tts, "setSpeechRate", "(F)I", &[rate.into()])?;
let rv = rv.i()?;
if rv == 0 {
self.rate = rate;
Ok(())
} else {
Err(Error::OperationFailed)
}
}
fn min_pitch(&self) -> f32 {
0.1
}
fn max_pitch(&self) -> f32 {
2.
}
fn normal_pitch(&self) -> f32 {
1.
}
fn get_pitch(&self) -> Result<f32, Error> {
Ok(self.pitch)
}
fn set_pitch(&mut self, pitch: f32) -> Result<(), Error> {
let vm = Self::vm()?;
let mut env = vm.get_env()?;
let tts = self.tts.as_obj();
let pitch = pitch as jfloat;
let rv = env.call_method(tts, "setPitch", "(F)I", &[pitch.into()])?;
let rv = rv.i()?;
if rv == 0 {
self.pitch = pitch;
Ok(())
} else {
Err(Error::OperationFailed)
}
}
fn min_volume(&self) -> f32 {
todo!()
}
fn max_volume(&self) -> f32 {
todo!()
}
fn normal_volume(&self) -> f32 {
todo!()
}
fn get_volume(&self) -> Result<f32, Error> {
todo!()
}
fn set_volume(&mut self, _volume: f32) -> Result<(), Error> {
todo!()
}
fn is_speaking(&self) -> Result<bool, Error> {
let vm = Self::vm()?;
let mut env = vm.get_env()?;
let tts = self.tts.as_obj();
let rv = env.call_method(tts, "isSpeaking", "()Z", &[])?;
let rv = rv.z()?;
Ok(rv)
}
fn voice(&self) -> Result<Option<Voice>, Error> {
unimplemented!()
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
unimplemented!()
}
fn set_voice(&mut self, _voice: &Voice) -> Result<(), Error> {
unimplemented!()
}
}

231
src/backends/appkit.rs Normal file
View File

@ -0,0 +1,231 @@
#[cfg(target_os = "macos")]
use cocoa_foundation::base::{id, nil};
use cocoa_foundation::foundation::NSString;
use log::{info, trace};
use objc::declare::ClassDecl;
use objc::runtime::*;
use objc::*;
use crate::{Backend, BackendId, Error, Features, UtteranceId, Voice};
#[derive(Clone, Debug)]
pub(crate) struct AppKit(*mut Object, *mut Object);
impl AppKit {
pub(crate) fn new() -> Result<Self, Error> {
info!("Initializing AppKit backend");
unsafe {
let obj: *mut Object = msg_send![class!(NSSpeechSynthesizer), new];
let mut decl = ClassDecl::new("MyNSSpeechSynthesizerDelegate", class!(NSObject))
.ok_or(Error::OperationFailed)?;
decl.add_ivar::<id>("synth");
decl.add_ivar::<id>("strings");
extern "C" fn enqueue_and_speak(this: &Object, _: Sel, string: id) {
unsafe {
let strings: id = *this.get_ivar("strings");
let _: () = msg_send![strings, addObject: string];
let count: u32 = msg_send![strings, count];
if count == 1 {
let str: id = msg_send!(strings, firstObject);
let synth: id = *this.get_ivar("synth");
let _: BOOL = msg_send![synth, startSpeakingString: str];
}
}
}
decl.add_method(
sel!(enqueueAndSpeak:),
enqueue_and_speak as extern "C" fn(&Object, Sel, id) -> (),
);
extern "C" fn speech_synthesizer_did_finish_speaking(
this: &Object,
_: Sel,
synth: *const Object,
_: BOOL,
) {
unsafe {
let strings: id = *this.get_ivar("strings");
let count: u32 = msg_send![strings, count];
if count > 0 {
let str: id = msg_send!(strings, firstObject);
let _: () = msg_send![str, release];
let _: () = msg_send!(strings, removeObjectAtIndex:0);
if count > 1 {
let str: id = msg_send!(strings, firstObject);
let _: BOOL = msg_send![synth, startSpeakingString: str];
}
}
}
}
decl.add_method(
sel!(speechSynthesizer:didFinishSpeaking:),
speech_synthesizer_did_finish_speaking
as extern "C" fn(&Object, Sel, *const Object, BOOL) -> (),
);
extern "C" fn clear_queue(this: &Object, _: Sel) {
unsafe {
let strings: id = *this.get_ivar("strings");
let mut count: u32 = msg_send![strings, count];
while count > 0 {
let str: id = msg_send!(strings, firstObject);
let _: () = msg_send![str, release];
let _: () = msg_send!(strings, removeObjectAtIndex:0);
count = msg_send![strings, count];
}
}
}
decl.add_method(
sel!(clearQueue),
clear_queue as extern "C" fn(&Object, Sel) -> (),
);
let delegate_class = decl.register();
let delegate_obj: *mut Object = msg_send![delegate_class, new];
delegate_obj
.as_mut()
.ok_or(Error::OperationFailed)?
.set_ivar("synth", obj);
let strings: id = msg_send![class!(NSMutableArray), new];
delegate_obj
.as_mut()
.ok_or(Error::OperationFailed)?
.set_ivar("strings", strings);
let _: Object = msg_send![obj, setDelegate: delegate_obj];
Ok(AppKit(obj, delegate_obj))
}
}
}
impl Backend for AppKit {
fn id(&self) -> Option<BackendId> {
None
}
fn supported_features(&self) -> Features {
Features {
stop: true,
rate: true,
volume: true,
is_speaking: true,
..Default::default()
}
}
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error> {
trace!("speak({}, {})", text, interrupt);
if interrupt {
self.stop()?;
}
unsafe {
let str = NSString::alloc(nil).init_str(text);
let _: () = msg_send![self.1, enqueueAndSpeak: str];
}
Ok(None)
}
fn stop(&mut self) -> Result<(), Error> {
trace!("stop()");
unsafe {
let _: () = msg_send![self.1, clearQueue];
let _: () = msg_send![self.0, stopSpeaking];
}
Ok(())
}
fn min_rate(&self) -> f32 {
10.
}
fn max_rate(&self) -> f32 {
500.
}
fn normal_rate(&self) -> f32 {
175.
}
fn get_rate(&self) -> Result<f32, Error> {
let rate: f32 = unsafe { msg_send![self.0, rate] };
Ok(rate)
}
fn set_rate(&mut self, rate: f32) -> Result<(), Error> {
trace!("set_rate({})", rate);
unsafe {
let _: () = msg_send![self.0, setRate: rate];
}
Ok(())
}
fn min_pitch(&self) -> f32 {
unimplemented!()
}
fn max_pitch(&self) -> f32 {
unimplemented!()
}
fn normal_pitch(&self) -> f32 {
unimplemented!()
}
fn get_pitch(&self) -> Result<f32, Error> {
unimplemented!()
}
fn set_pitch(&mut self, _pitch: f32) -> Result<(), Error> {
unimplemented!()
}
fn min_volume(&self) -> f32 {
0.
}
fn max_volume(&self) -> f32 {
1.
}
fn normal_volume(&self) -> f32 {
1.
}
fn get_volume(&self) -> Result<f32, Error> {
let volume: f32 = unsafe { msg_send![self.0, volume] };
Ok(volume)
}
fn set_volume(&mut self, volume: f32) -> Result<(), Error> {
unsafe {
let _: () = msg_send![self.0, setVolume: volume];
}
Ok(())
}
fn is_speaking(&self) -> Result<bool, Error> {
let is_speaking: i8 = unsafe { msg_send![self.0, isSpeaking] };
Ok(is_speaking != NO as i8)
}
fn voice(&self) -> Result<Option<Voice>, Error> {
unimplemented!()
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
unimplemented!()
}
fn set_voice(&mut self, _voice: &Voice) -> Result<(), Error> {
unimplemented!()
}
}
impl Drop for AppKit {
fn drop(&mut self) {
unsafe {
let _: Object = msg_send![self.0, release];
let _: Object = msg_send![self.1, release];
}
}
}

View File

@ -0,0 +1,340 @@
#[cfg(any(target_os = "macos", target_os = "ios"))]
use std::sync::Mutex;
use cocoa_foundation::base::{id, nil, NO};
use cocoa_foundation::foundation::NSString;
use core_foundation::array::CFArray;
use core_foundation::base::TCFType;
use core_foundation::string::CFString;
use lazy_static::lazy_static;
use log::{info, trace};
use objc::runtime::{Object, Sel};
use objc::{class, declare::ClassDecl, msg_send, sel, sel_impl};
use oxilangtag::LanguageTag;
use crate::{Backend, BackendId, Error, Features, Gender, UtteranceId, Voice, CALLBACKS};
#[derive(Clone, Debug)]
pub(crate) struct AvFoundation {
id: BackendId,
delegate: *mut Object,
synth: *mut Object,
rate: f32,
volume: f32,
pitch: f32,
voice: Option<Voice>,
}
lazy_static! {
static ref NEXT_BACKEND_ID: Mutex<u64> = Mutex::new(0);
}
impl AvFoundation {
pub(crate) fn new() -> Result<Self, Error> {
info!("Initializing AVFoundation backend");
let mut decl = ClassDecl::new("MyNSSpeechSynthesizerDelegate", class!(NSObject))
.ok_or(Error::OperationFailed)?;
decl.add_ivar::<u64>("backend_id");
extern "C" fn speech_synthesizer_did_start_speech_utterance(
this: &Object,
_: Sel,
_synth: *const Object,
utterance: id,
) {
trace!("speech_synthesizer_did_start_speech_utterance");
unsafe {
let backend_id: u64 = *this.get_ivar("backend_id");
let backend_id = BackendId::AvFoundation(backend_id);
trace!("Locking callbacks");
let mut callbacks = CALLBACKS.lock().unwrap();
trace!("Locked");
let callbacks = callbacks.get_mut(&backend_id).unwrap();
if let Some(callback) = callbacks.utterance_begin.as_mut() {
trace!("Calling utterance_begin");
let utterance_id = UtteranceId::AvFoundation(utterance);
callback(utterance_id);
trace!("Called");
}
}
trace!("Done speech_synthesizer_did_start_speech_utterance");
}
extern "C" fn speech_synthesizer_did_finish_speech_utterance(
this: &Object,
_: Sel,
_synth: *const Object,
utterance: id,
) {
trace!("speech_synthesizer_did_finish_speech_utterance");
unsafe {
let backend_id: u64 = *this.get_ivar("backend_id");
let backend_id = BackendId::AvFoundation(backend_id);
trace!("Locking callbacks");
let mut callbacks = CALLBACKS.lock().unwrap();
trace!("Locked");
let callbacks = callbacks.get_mut(&backend_id).unwrap();
if let Some(callback) = callbacks.utterance_end.as_mut() {
trace!("Calling utterance_end");
let utterance_id = UtteranceId::AvFoundation(utterance);
callback(utterance_id);
trace!("Called");
}
}
trace!("Done speech_synthesizer_did_finish_speech_utterance");
}
extern "C" fn speech_synthesizer_did_cancel_speech_utterance(
this: &Object,
_: Sel,
_synth: *const Object,
utterance: id,
) {
trace!("speech_synthesizer_did_cancel_speech_utterance");
unsafe {
let backend_id: u64 = *this.get_ivar("backend_id");
let backend_id = BackendId::AvFoundation(backend_id);
trace!("Locking callbacks");
let mut callbacks = CALLBACKS.lock().unwrap();
trace!("Locked");
let callbacks = callbacks.get_mut(&backend_id).unwrap();
if let Some(callback) = callbacks.utterance_stop.as_mut() {
trace!("Calling utterance_stop");
let utterance_id = UtteranceId::AvFoundation(utterance);
callback(utterance_id);
trace!("Called");
}
}
trace!("Done speech_synthesizer_did_cancel_speech_utterance");
}
unsafe {
decl.add_method(
sel!(speechSynthesizer:didStartSpeechUtterance:),
speech_synthesizer_did_start_speech_utterance
as extern "C" fn(&Object, Sel, *const Object, id) -> (),
);
decl.add_method(
sel!(speechSynthesizer:didFinishSpeechUtterance:),
speech_synthesizer_did_finish_speech_utterance
as extern "C" fn(&Object, Sel, *const Object, id) -> (),
);
decl.add_method(
sel!(speechSynthesizer:didCancelSpeechUtterance:),
speech_synthesizer_did_cancel_speech_utterance
as extern "C" fn(&Object, Sel, *const Object, id) -> (),
);
}
let delegate_class = decl.register();
let delegate_obj: *mut Object = unsafe { msg_send![delegate_class, new] };
let mut backend_id = NEXT_BACKEND_ID.lock().unwrap();
let rv = unsafe {
trace!("Creating synth");
let synth: *mut Object = msg_send![class!(AVSpeechSynthesizer), new];
trace!("Allocated {:?}", synth);
delegate_obj
.as_mut()
.unwrap()
.set_ivar("backend_id", *backend_id);
trace!("Set backend ID in delegate");
let _: () = msg_send![synth, setDelegate: delegate_obj];
trace!("Assigned delegate: {:?}", delegate_obj);
AvFoundation {
id: BackendId::AvFoundation(*backend_id),
delegate: delegate_obj,
synth,
rate: 0.5,
volume: 1.,
pitch: 1.,
voice: None,
}
};
*backend_id += 1;
Ok(rv)
}
}
impl Backend for AvFoundation {
fn id(&self) -> Option<BackendId> {
Some(self.id)
}
fn supported_features(&self) -> Features {
Features {
stop: true,
rate: true,
pitch: true,
volume: true,
is_speaking: true,
voice: true,
get_voice: false,
utterance_callbacks: true,
}
}
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error> {
trace!("speak({}, {})", text, interrupt);
if interrupt && self.is_speaking()? {
self.stop()?;
}
let mut utterance: id;
unsafe {
trace!("Allocating utterance string");
let mut str = NSString::alloc(nil);
str = str.init_str(text);
trace!("Allocating utterance");
utterance = msg_send![class!(AVSpeechUtterance), alloc];
trace!("Initializing utterance");
utterance = msg_send![utterance, initWithString: str];
trace!("Setting rate to {}", self.rate);
let _: () = msg_send![utterance, setRate: self.rate];
trace!("Setting volume to {}", self.volume);
let _: () = msg_send![utterance, setVolume: self.volume];
trace!("Setting pitch to {}", self.pitch);
let _: () = msg_send![utterance, setPitchMultiplier: self.pitch];
if let Some(voice) = &self.voice {
let mut vid = NSString::alloc(nil);
vid = vid.init_str(&voice.id());
let v: id = msg_send![class!(AVSpeechSynthesisVoice), voiceWithIdentifier: vid];
let _: () = msg_send![utterance, setVoice: v];
}
trace!("Enqueuing");
let _: () = msg_send![self.synth, speakUtterance: utterance];
trace!("Done queuing");
}
Ok(Some(UtteranceId::AvFoundation(utterance)))
}
fn stop(&mut self) -> Result<(), Error> {
trace!("stop()");
unsafe {
let _: () = msg_send![self.synth, stopSpeakingAtBoundary: 0];
}
Ok(())
}
fn min_rate(&self) -> f32 {
0.1
}
fn max_rate(&self) -> f32 {
2.
}
fn normal_rate(&self) -> f32 {
0.5
}
fn get_rate(&self) -> Result<f32, Error> {
Ok(self.rate)
}
fn set_rate(&mut self, rate: f32) -> Result<(), Error> {
trace!("set_rate({})", rate);
self.rate = rate;
Ok(())
}
fn min_pitch(&self) -> f32 {
0.5
}
fn max_pitch(&self) -> f32 {
2.0
}
fn normal_pitch(&self) -> f32 {
1.0
}
fn get_pitch(&self) -> Result<f32, Error> {
Ok(self.pitch)
}
fn set_pitch(&mut self, pitch: f32) -> Result<(), Error> {
trace!("set_pitch({})", pitch);
self.pitch = pitch;
Ok(())
}
fn min_volume(&self) -> f32 {
0.
}
fn max_volume(&self) -> f32 {
1.
}
fn normal_volume(&self) -> f32 {
1.
}
fn get_volume(&self) -> Result<f32, Error> {
Ok(self.volume)
}
fn set_volume(&mut self, volume: f32) -> Result<(), Error> {
trace!("set_volume({})", volume);
self.volume = volume;
Ok(())
}
fn is_speaking(&self) -> Result<bool, Error> {
trace!("is_speaking()");
let is_speaking: i8 = unsafe { msg_send![self.synth, isSpeaking] };
Ok(is_speaking != NO as i8)
}
fn voice(&self) -> Result<Option<Voice>, Error> {
unimplemented!()
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
let voices: CFArray = unsafe {
CFArray::wrap_under_get_rule(msg_send![class!(AVSpeechSynthesisVoice), speechVoices])
};
let rv = voices
.iter()
.map(|v| {
let id: CFString = unsafe {
CFString::wrap_under_get_rule(msg_send![*v as *const Object, identifier])
};
let name: CFString =
unsafe { CFString::wrap_under_get_rule(msg_send![*v as *const Object, name]) };
let gender: i64 = unsafe { msg_send![*v as *const Object, gender] };
let gender = match gender {
1 => Some(Gender::Male),
2 => Some(Gender::Female),
_ => None,
};
let language: CFString = unsafe {
CFString::wrap_under_get_rule(msg_send![*v as *const Object, language])
};
let language = language.to_string();
let language = LanguageTag::parse(language).unwrap();
Voice {
id: id.to_string(),
name: name.to_string(),
gender,
language,
}
})
.collect();
Ok(rv)
}
fn set_voice(&mut self, voice: &Voice) -> Result<(), Error> {
self.voice = Some(voice.clone());
Ok(())
}
}
impl Drop for AvFoundation {
fn drop(&mut self) {
unsafe {
let _: Object = msg_send![self.delegate, release];
let _: Object = msg_send![self.synth, release];
}
}
}

View File

@ -1,20 +1,41 @@
#[cfg(target_os = "linux")]
mod speech_dispatcher;
#[cfg(windows)]
#[cfg(all(windows, feature = "tolk"))]
mod tolk;
#[cfg(windows)]
pub(crate) mod winrt;
mod winrt;
#[cfg(target_arch = "wasm32")]
mod web;
#[cfg(target_os = "macos")]
mod appkit;
#[cfg(any(target_os = "macos", target_os = "ios"))]
mod av_foundation;
#[cfg(target_os = "android")]
mod android;
#[cfg(target_os = "linux")]
pub use self::speech_dispatcher::*;
pub(crate) use self::speech_dispatcher::*;
#[cfg(all(windows, feature = "tolk"))]
pub(crate) use self::tolk::*;
#[cfg(windows)]
pub use self::tolk::*;
pub(crate) use self::winrt::*;
#[cfg(target_arch = "wasm32")]
pub use self::web::*;
pub(crate) use self::web::*;
#[cfg(target_os = "macos")]
pub(crate) use self::appkit::*;
#[cfg(any(target_os = "macos", target_os = "ios"))]
pub(crate) use self::av_foundation::*;
#[cfg(target_os = "android")]
pub(crate) use self::android::*;

View File

@ -1,49 +1,116 @@
#[cfg(target_os = "linux")]
use std::{collections::HashMap, sync::Mutex};
use lazy_static::*;
use log::{info, trace};
use oxilangtag::LanguageTag;
use speech_dispatcher::*;
use crate::{Backend, Error, Features};
use crate::{Backend, BackendId, Error, Features, UtteranceId, Voice, CALLBACKS};
pub struct SpeechDispatcher(Connection);
#[derive(Clone, Debug)]
pub(crate) struct SpeechDispatcher(Connection);
lazy_static! {
static ref SPEAKING: Mutex<HashMap<usize, bool>> = {
let m: HashMap<usize, bool> = HashMap::new();
Mutex::new(m)
};
}
impl SpeechDispatcher {
pub fn new() -> Self {
pub(crate) fn new() -> std::result::Result<Self, Error> {
info!("Initializing SpeechDispatcher backend");
let connection = speech_dispatcher::Connection::open("tts", "tts", "tts", Mode::Single);
SpeechDispatcher(connection)
let connection = speech_dispatcher::Connection::open("tts", "tts", "tts", Mode::Threaded)?;
let sd = SpeechDispatcher(connection);
let mut speaking = SPEAKING.lock().unwrap();
speaking.insert(sd.0.client_id(), false);
sd.0.on_begin(Some(Box::new(|msg_id, client_id| {
let mut speaking = SPEAKING.lock().unwrap();
speaking.insert(client_id, true);
let mut callbacks = CALLBACKS.lock().unwrap();
let backend_id = BackendId::SpeechDispatcher(client_id);
let cb = callbacks.get_mut(&backend_id).unwrap();
let utterance_id = UtteranceId::SpeechDispatcher(msg_id as u64);
if let Some(f) = cb.utterance_begin.as_mut() {
f(utterance_id);
}
})));
sd.0.on_end(Some(Box::new(|msg_id, client_id| {
let mut speaking = SPEAKING.lock().unwrap();
speaking.insert(client_id, false);
let mut callbacks = CALLBACKS.lock().unwrap();
let backend_id = BackendId::SpeechDispatcher(client_id);
let cb = callbacks.get_mut(&backend_id).unwrap();
let utterance_id = UtteranceId::SpeechDispatcher(msg_id as u64);
if let Some(f) = cb.utterance_end.as_mut() {
f(utterance_id);
}
})));
sd.0.on_cancel(Some(Box::new(|msg_id, client_id| {
let mut speaking = SPEAKING.lock().unwrap();
speaking.insert(client_id, false);
let mut callbacks = CALLBACKS.lock().unwrap();
let backend_id = BackendId::SpeechDispatcher(client_id);
let cb = callbacks.get_mut(&backend_id).unwrap();
let utterance_id = UtteranceId::SpeechDispatcher(msg_id as u64);
if let Some(f) = cb.utterance_stop.as_mut() {
f(utterance_id);
}
})));
sd.0.on_pause(Some(Box::new(|_msg_id, client_id| {
let mut speaking = SPEAKING.lock().unwrap();
speaking.insert(client_id, false);
})));
sd.0.on_resume(Some(Box::new(|_msg_id, client_id| {
let mut speaking = SPEAKING.lock().unwrap();
speaking.insert(client_id, true);
})));
Ok(sd)
}
}
impl Backend for SpeechDispatcher {
fn id(&self) -> Option<BackendId> {
Some(BackendId::SpeechDispatcher(self.0.client_id()))
}
fn supported_features(&self) -> Features {
Features {
stop: true,
rate: true,
pitch: true,
volume: true,
is_speaking: false,
is_speaking: true,
voice: true,
get_voice: false,
utterance_callbacks: true,
}
}
fn speak(&mut self, text: &str, interrupt: bool) -> Result<(), Error> {
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error> {
trace!("speak({}, {})", text, interrupt);
if interrupt {
self.stop()?;
}
let single_char = text.to_string().capacity() == 1;
if single_char {
self.0.set_punctuation(Punctuation::All);
self.0.set_punctuation(Punctuation::All)?;
}
self.0.say(Priority::Important, text);
let id = self.0.say(Priority::Important, text);
if single_char {
self.0.set_punctuation(Punctuation::None);
self.0.set_punctuation(Punctuation::None)?;
}
if let Some(id) = id {
Ok(Some(UtteranceId::SpeechDispatcher(id)))
} else {
Err(Error::NoneError)
}
Ok(())
}
fn stop(&mut self) -> Result<(), Error> {
trace!("stop()");
self.0.cancel();
self.0.cancel()?;
Ok(())
}
@ -64,7 +131,7 @@ impl Backend for SpeechDispatcher {
}
fn set_rate(&mut self, rate: f32) -> Result<(), Error> {
self.0.set_voice_rate(rate as i32);
self.0.set_voice_rate(rate as i32)?;
Ok(())
}
@ -85,7 +152,7 @@ impl Backend for SpeechDispatcher {
}
fn set_pitch(&mut self, pitch: f32) -> Result<(), Error> {
self.0.set_voice_pitch(pitch as i32);
self.0.set_voice_pitch(pitch as i32)?;
Ok(())
}
@ -98,7 +165,7 @@ impl Backend for SpeechDispatcher {
}
fn normal_volume(&self) -> f32 {
0.
100.
}
fn get_volume(&self) -> Result<f32, Error> {
@ -106,11 +173,50 @@ impl Backend for SpeechDispatcher {
}
fn set_volume(&mut self, volume: f32) -> Result<(), Error> {
self.0.set_volume(volume as i32);
self.0.set_volume(volume as i32)?;
Ok(())
}
fn is_speaking(&self) -> Result<bool, Error> {
let speaking = SPEAKING.lock().unwrap();
let is_speaking = speaking.get(&self.0.client_id()).unwrap();
Ok(*is_speaking)
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
let rv = self
.0
.list_synthesis_voices()?
.iter()
.filter(|v| LanguageTag::parse(v.language.clone()).is_ok())
.map(|v| Voice {
id: v.name.clone(),
name: v.name.clone(),
gender: None,
language: LanguageTag::parse(v.language.clone()).unwrap(),
})
.collect::<Vec<Voice>>();
Ok(rv)
}
fn voice(&self) -> Result<Option<Voice>, Error> {
unimplemented!()
}
fn set_voice(&mut self, voice: &Voice) -> Result<(), Error> {
for v in self.0.list_synthesis_voices()? {
if v.name == voice.name {
self.0.set_synthesis_voice(&v)?;
return Ok(());
}
}
Err(Error::OperationFailed)
}
}
impl Drop for SpeechDispatcher {
fn drop(&mut self) {
let mut speaking = SPEAKING.lock().unwrap();
speaking.remove(&self.0.client_id());
}
}

View File

@ -1,13 +1,16 @@
#[cfg(windows)]
#[cfg(all(windows, feature = "tolk"))]
use std::sync::Arc;
use log::{info, trace};
use tolk::Tolk as TolkPtr;
use crate::{Backend, Error, Features};
use crate::{Backend, BackendId, Error, Features, UtteranceId, Voice};
pub struct Tolk(TolkPtr);
#[derive(Clone, Debug)]
pub(crate) struct Tolk(Arc<TolkPtr>);
impl Tolk {
pub fn new() -> Option<Self> {
pub(crate) fn new() -> Option<Self> {
info!("Initializing Tolk backend");
let tolk = TolkPtr::new();
if tolk.detect_screen_reader().is_some() {
@ -19,20 +22,21 @@ impl Tolk {
}
impl Backend for Tolk {
fn id(&self) -> Option<BackendId> {
None
}
fn supported_features(&self) -> Features {
Features {
stop: true,
rate: false,
pitch: false,
volume: false,
is_speaking: false,
..Default::default()
}
}
fn speak(&mut self, text: &str, interrupt: bool) -> Result<(), Error> {
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error> {
trace!("speak({}, {})", text, interrupt);
self.0.speak(text, interrupt);
Ok(())
Ok(None)
}
fn stop(&mut self) -> Result<(), Error> {
@ -104,4 +108,16 @@ impl Backend for Tolk {
fn is_speaking(&self) -> Result<bool, Error> {
unimplemented!()
}
fn voice(&self) -> Result<Option<Voice>, Error> {
unimplemented!()
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
unimplemented!()
}
fn set_voice(&mut self, _voice: &Voice) -> Result<(), Error> {
unimplemented!()
}
}

View File

@ -1,27 +1,54 @@
#[cfg(target_arch = "wasm32")]
use std::sync::Mutex;
use lazy_static::lazy_static;
use log::{info, trace};
use web_sys::SpeechSynthesisUtterance;
use oxilangtag::LanguageTag;
use wasm_bindgen::prelude::*;
use wasm_bindgen::JsCast;
use web_sys::{
SpeechSynthesisErrorCode, SpeechSynthesisErrorEvent, SpeechSynthesisEvent,
SpeechSynthesisUtterance, SpeechSynthesisVoice,
};
use crate::{Backend, Error, Features};
use crate::{Backend, BackendId, Error, Features, UtteranceId, Voice, CALLBACKS};
#[derive(Clone, Debug)]
pub struct Web {
id: BackendId,
rate: f32,
pitch: f32,
volume: f32,
voice: Option<SpeechSynthesisVoice>,
}
lazy_static! {
static ref NEXT_BACKEND_ID: Mutex<u64> = Mutex::new(0);
static ref UTTERANCE_MAPPINGS: Mutex<Vec<(BackendId, UtteranceId)>> = Mutex::new(Vec::new());
static ref NEXT_UTTERANCE_ID: Mutex<u64> = Mutex::new(0);
}
impl Web {
pub fn new() -> Result<Self, Error> {
info!("Initializing Web backend");
Ok(Web {
let mut backend_id = NEXT_BACKEND_ID.lock().unwrap();
let rv = Web {
id: BackendId::Web(*backend_id),
rate: 1.,
pitch: 1.,
volume: 1.,
})
voice: None,
};
*backend_id += 1;
Ok(rv)
}
}
impl Backend for Web {
fn id(&self) -> Option<BackendId> {
Some(self.id)
}
fn supported_features(&self) -> Features {
Features {
stop: true,
@ -29,23 +56,69 @@ impl Backend for Web {
pitch: true,
volume: true,
is_speaking: true,
voice: true,
get_voice: true,
utterance_callbacks: true,
}
}
fn speak(&mut self, text: &str, interrupt: bool) -> Result<(), Error> {
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error> {
trace!("speak({}, {})", text, interrupt);
let utterance = SpeechSynthesisUtterance::new_with_text(text).unwrap();
utterance.set_rate(self.rate);
utterance.set_pitch(self.pitch);
utterance.set_volume(self.volume);
if self.voice.is_some() {
utterance.set_voice(self.voice.as_ref());
}
let id = self.id().unwrap();
let mut uid = NEXT_UTTERANCE_ID.lock().unwrap();
let utterance_id = UtteranceId::Web(*uid);
*uid += 1;
drop(uid);
let mut mappings = UTTERANCE_MAPPINGS.lock().unwrap();
mappings.push((self.id, utterance_id));
drop(mappings);
let callback = Closure::wrap(Box::new(move |_evt: SpeechSynthesisEvent| {
let mut callbacks = CALLBACKS.lock().unwrap();
let callback = callbacks.get_mut(&id).unwrap();
if let Some(f) = callback.utterance_begin.as_mut() {
f(utterance_id);
}
}) as Box<dyn Fn(_)>);
utterance.set_onstart(Some(callback.as_ref().unchecked_ref()));
let callback = Closure::wrap(Box::new(move |_evt: SpeechSynthesisEvent| {
let mut callbacks = CALLBACKS.lock().unwrap();
let callback = callbacks.get_mut(&id).unwrap();
if let Some(f) = callback.utterance_end.as_mut() {
f(utterance_id);
}
let mut mappings = UTTERANCE_MAPPINGS.lock().unwrap();
mappings.retain(|v| v.1 != utterance_id);
}) as Box<dyn Fn(_)>);
utterance.set_onend(Some(callback.as_ref().unchecked_ref()));
let callback = Closure::wrap(Box::new(move |evt: SpeechSynthesisErrorEvent| {
if evt.error() == SpeechSynthesisErrorCode::Canceled {
let mut callbacks = CALLBACKS.lock().unwrap();
let callback = callbacks.get_mut(&id).unwrap();
if let Some(f) = callback.utterance_stop.as_mut() {
f(utterance_id);
}
}
let mut mappings = UTTERANCE_MAPPINGS.lock().unwrap();
mappings.retain(|v| v.1 != utterance_id);
}) as Box<dyn Fn(_)>);
utterance.set_onerror(Some(callback.as_ref().unchecked_ref()));
if interrupt {
self.stop()?;
}
if let Some(window) = web_sys::window() {
let speech_synthesis = window.speech_synthesis().unwrap();
speech_synthesis.speak(&utterance);
Ok(Some(utterance_id))
} else {
Err(Error::NoneError)
}
Ok(())
}
fn stop(&mut self) -> Result<(), Error> {
@ -131,4 +204,72 @@ impl Backend for Web {
Err(Error::NoneError)
}
}
fn voice(&self) -> Result<Option<Voice>, Error> {
if let Some(voice) = &self.voice {
Ok(Some(voice.clone().into()))
} else {
if let Some(window) = web_sys::window() {
let speech_synthesis = window.speech_synthesis().unwrap();
for voice in speech_synthesis.get_voices().iter() {
let voice: SpeechSynthesisVoice = voice.into();
if voice.default() {
return Ok(Some(voice.into()));
}
}
} else {
return Err(Error::NoneError);
}
Ok(None)
}
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
if let Some(window) = web_sys::window() {
let speech_synthesis = window.speech_synthesis().unwrap();
let mut rv: Vec<Voice> = vec![];
for v in speech_synthesis.get_voices().iter() {
let v: SpeechSynthesisVoice = v.into();
rv.push(v.into());
}
Ok(rv)
} else {
Err(Error::NoneError)
}
}
fn set_voice(&mut self, voice: &Voice) -> Result<(), Error> {
if let Some(window) = web_sys::window() {
let speech_synthesis = window.speech_synthesis().unwrap();
for v in speech_synthesis.get_voices().iter() {
let v: SpeechSynthesisVoice = v.into();
if v.voice_uri() == voice.id {
self.voice = Some(v);
return Ok(());
}
}
Err(Error::OperationFailed)
} else {
Err(Error::NoneError)
}
}
}
impl Drop for Web {
fn drop(&mut self) {
let mut mappings = UTTERANCE_MAPPINGS.lock().unwrap();
mappings.retain(|v| v.0 != self.id);
}
}
impl From<SpeechSynthesisVoice> for Voice {
fn from(other: SpeechSynthesisVoice) -> Self {
let language = LanguageTag::parse(other.lang()).unwrap();
Voice {
id: other.voice_uri(),
name: other.name(),
gender: None,
language,
}
}
}

View File

@ -1,87 +1,234 @@
#[cfg(windows)]
use log::{info, trace};
use tts_winrt_bindings::windows::media::core::MediaSource;
use tts_winrt_bindings::windows::media::playback::{
MediaPlaybackItem, MediaPlaybackList, MediaPlaybackState, MediaPlayer,
use std::{
collections::{HashMap, VecDeque},
sync::Mutex,
};
use tts_winrt_bindings::windows::media::speech_synthesis::SpeechSynthesizer;
use crate::{Backend, Error, Features};
use lazy_static::lazy_static;
use log::{info, trace};
use oxilangtag::LanguageTag;
use windows::{
Foundation::TypedEventHandler,
Media::{
Core::MediaSource,
Playback::{MediaPlayer, MediaPlayerAudioCategory},
SpeechSynthesis::{SpeechSynthesizer, VoiceGender, VoiceInformation},
},
};
impl From<winrt::Error> for Error {
fn from(e: winrt::Error) -> Self {
Error::WinRT(e)
use crate::{Backend, BackendId, Error, Features, Gender, UtteranceId, Voice, CALLBACKS};
impl From<windows::core::Error> for Error {
fn from(e: windows::core::Error) -> Self {
Error::WinRt(e)
}
}
pub struct WinRT {
#[derive(Clone)]
pub struct WinRt {
id: BackendId,
synth: SpeechSynthesizer,
player: MediaPlayer,
playback_list: MediaPlaybackList,
rate: f32,
pitch: f32,
volume: f32,
voice: VoiceInformation,
}
impl WinRT {
struct Utterance {
id: UtteranceId,
text: String,
rate: f32,
pitch: f32,
volume: f32,
voice: VoiceInformation,
}
lazy_static! {
static ref NEXT_BACKEND_ID: Mutex<u64> = Mutex::new(0);
static ref NEXT_UTTERANCE_ID: Mutex<u64> = Mutex::new(0);
static ref BACKEND_TO_SPEECH_SYNTHESIZER: Mutex<HashMap<BackendId, SpeechSynthesizer>> = {
let v: HashMap<BackendId, SpeechSynthesizer> = HashMap::new();
Mutex::new(v)
};
static ref BACKEND_TO_MEDIA_PLAYER: Mutex<HashMap<BackendId, MediaPlayer>> = {
let v: HashMap<BackendId, MediaPlayer> = HashMap::new();
Mutex::new(v)
};
static ref UTTERANCES: Mutex<HashMap<BackendId, VecDeque<Utterance>>> = {
let utterances: HashMap<BackendId, VecDeque<Utterance>> = HashMap::new();
Mutex::new(utterances)
};
}
impl WinRt {
pub fn new() -> std::result::Result<Self, Error> {
info!("Initializing WinRT backend");
let playback_list = MediaPlaybackList::new()?;
let synth = SpeechSynthesizer::new()?;
let player = MediaPlayer::new()?;
player.set_auto_play(true)?;
player.set_source(&playback_list)?;
player.SetRealTimePlayback(true)?;
player.SetAudioCategory(MediaPlayerAudioCategory::Speech)?;
let mut backend_id = NEXT_BACKEND_ID.lock().unwrap();
let bid = BackendId::WinRt(*backend_id);
*backend_id += 1;
drop(backend_id);
{
let mut utterances = UTTERANCES.lock().unwrap();
utterances.insert(bid, VecDeque::new());
}
let mut backend_to_media_player = BACKEND_TO_MEDIA_PLAYER.lock().unwrap();
backend_to_media_player.insert(bid, player.clone());
drop(backend_to_media_player);
let mut backend_to_speech_synthesizer = BACKEND_TO_SPEECH_SYNTHESIZER.lock().unwrap();
backend_to_speech_synthesizer.insert(bid, synth.clone());
drop(backend_to_speech_synthesizer);
let bid_clone = bid;
player.MediaEnded(&TypedEventHandler::new(
move |sender: &Option<MediaPlayer>, _args| {
if let Some(sender) = sender {
let backend_to_media_player = BACKEND_TO_MEDIA_PLAYER.lock().unwrap();
let id = backend_to_media_player.iter().find(|v| v.1 == sender);
if let Some((id, _)) = id {
let mut utterances = UTTERANCES.lock().unwrap();
if let Some(utterances) = utterances.get_mut(id) {
if let Some(utterance) = utterances.pop_front() {
let mut callbacks = CALLBACKS.lock().unwrap();
let callbacks = callbacks.get_mut(id).unwrap();
if let Some(callback) = callbacks.utterance_end.as_mut() {
callback(utterance.id);
}
if let Some(utterance) = utterances.front() {
let backend_to_speech_synthesizer =
BACKEND_TO_SPEECH_SYNTHESIZER.lock().unwrap();
let id = backend_to_speech_synthesizer
.iter()
.find(|v| *v.0 == bid_clone);
if let Some((_, tts)) = id {
tts.Options()?.SetSpeakingRate(utterance.rate.into())?;
tts.Options()?.SetAudioPitch(utterance.pitch.into())?;
tts.Options()?.SetAudioVolume(utterance.volume.into())?;
tts.SetVoice(&utterance.voice)?;
let text = &utterance.text;
let stream =
tts.SynthesizeTextToStreamAsync(&text.into())?.get()?;
let content_type = stream.ContentType()?;
let source =
MediaSource::CreateFromStream(&stream, &content_type)?;
sender.SetSource(&source)?;
sender.Play()?;
if let Some(callback) = callbacks.utterance_begin.as_mut() {
callback(utterance.id);
}
}
}
}
}
}
}
Ok(())
},
))?;
Ok(Self {
synth: SpeechSynthesizer::new()?,
player: player,
playback_list: playback_list,
id: bid,
synth,
player,
rate: 1.,
pitch: 1.,
volume: 1.,
voice: SpeechSynthesizer::DefaultVoice()?,
})
}
fn reinit_player(&mut self) -> std::result::Result<(), Error> {
self.playback_list = MediaPlaybackList::new()?;
self.player = MediaPlayer::new()?;
self.player.set_auto_play(true)?;
self.player.set_source(&self.playback_list)?;
Ok(())
}
}
impl Backend for WinRT {
impl Backend for WinRt {
fn id(&self) -> Option<BackendId> {
Some(self.id)
}
fn supported_features(&self) -> Features {
Features {
stop: true,
rate: true,
pitch: true,
volume: true,
is_speaking: false,
is_speaking: true,
voice: true,
get_voice: true,
utterance_callbacks: true,
}
}
fn speak(&mut self, text: &str, interrupt: bool) -> std::result::Result<(), Error> {
trace!("speak({}, {})", text, interrupt);
if interrupt {
fn speak(
&mut self,
text: &str,
interrupt: bool,
) -> std::result::Result<Option<UtteranceId>, Error> {
if interrupt && self.is_speaking()? {
self.stop()?;
}
let stream = self.synth.synthesize_text_to_stream_async(text)?.get()?;
let content_type = stream.content_type()?;
let source = MediaSource::create_from_stream(stream, content_type)?;
let item = MediaPlaybackItem::create(source)?;
let state = self.player.playback_session()?.playback_state()?;
if state == MediaPlaybackState::Paused {
let index = self.playback_list.current_item_index()?;
let total = self.playback_list.items()?.size()?;
if total != 0 && index == total - 1 {
self.reinit_player()?;
let utterance_id = {
let mut uid = NEXT_UTTERANCE_ID.lock().unwrap();
let utterance_id = UtteranceId::WinRt(*uid);
*uid += 1;
utterance_id
};
let mut no_utterances = false;
{
let mut utterances = UTTERANCES.lock().unwrap();
if let Some(utterances) = utterances.get_mut(&self.id) {
no_utterances = utterances.is_empty();
let utterance = Utterance {
id: utterance_id,
text: text.into(),
rate: self.rate,
pitch: self.pitch,
volume: self.volume,
voice: self.voice.clone(),
};
utterances.push_back(utterance);
}
}
self.playback_list.items()?.append(item)?;
if !self.is_speaking()? {
self.player.play()?;
if no_utterances {
self.synth.Options()?.SetSpeakingRate(self.rate.into())?;
self.synth.Options()?.SetAudioPitch(self.pitch.into())?;
self.synth.Options()?.SetAudioVolume(self.volume.into())?;
self.synth.SetVoice(&self.voice)?;
let stream = self
.synth
.SynthesizeTextToStreamAsync(&text.into())?
.get()?;
let content_type = stream.ContentType()?;
let source = MediaSource::CreateFromStream(&stream, &content_type)?;
self.player.SetSource(&source)?;
self.player.Play()?;
let mut callbacks = CALLBACKS.lock().unwrap();
let callbacks = callbacks.get_mut(&self.id).unwrap();
if let Some(callback) = callbacks.utterance_begin.as_mut() {
callback(utterance_id);
}
}
Ok(())
Ok(Some(utterance_id))
}
fn stop(&mut self) -> std::result::Result<(), Error> {
trace!("stop()");
self.reinit_player()?;
if !self.is_speaking()? {
return Ok(());
}
let mut utterances = UTTERANCES.lock().unwrap();
if let Some(utterances) = utterances.get(&self.id) {
let mut callbacks = CALLBACKS.lock().unwrap();
let callbacks = callbacks.get_mut(&self.id).unwrap();
if let Some(callback) = callbacks.utterance_stop.as_mut() {
for utterance in utterances {
callback(utterance.id);
}
}
}
if let Some(utterances) = utterances.get_mut(&self.id) {
utterances.clear();
}
self.player.Pause()?;
Ok(())
}
@ -98,12 +245,12 @@ impl Backend for WinRT {
}
fn get_rate(&self) -> std::result::Result<f32, Error> {
let rate = self.synth.options()?.speaking_rate()?;
let rate = self.synth.Options()?.SpeakingRate()?;
Ok(rate as f32)
}
fn set_rate(&mut self, rate: f32) -> std::result::Result<(), Error> {
self.synth.options()?.set_speaking_rate(rate.into())?;
self.rate = rate;
Ok(())
}
@ -120,12 +267,12 @@ impl Backend for WinRT {
}
fn get_pitch(&self) -> std::result::Result<f32, Error> {
let pitch = self.synth.options()?.audio_pitch()?;
let pitch = self.synth.Options()?.AudioPitch()?;
Ok(pitch as f32)
}
fn set_pitch(&mut self, pitch: f32) -> std::result::Result<(), Error> {
self.synth.options()?.set_audio_pitch(pitch.into())?;
self.pitch = pitch;
Ok(())
}
@ -142,18 +289,76 @@ impl Backend for WinRT {
}
fn get_volume(&self) -> std::result::Result<f32, Error> {
let volume = self.synth.options()?.audio_volume()?;
let volume = self.synth.Options()?.AudioVolume()?;
Ok(volume as f32)
}
fn set_volume(&mut self, volume: f32) -> std::result::Result<(), Error> {
self.synth.options()?.set_audio_volume(volume.into())?;
self.volume = volume;
Ok(())
}
fn is_speaking(&self) -> std::result::Result<bool, Error> {
let state = self.player.playback_session()?.playback_state()?;
let playing = state == MediaPlaybackState::Opening || state == MediaPlaybackState::Playing;
Ok(playing)
let utterances = UTTERANCES.lock().unwrap();
let utterances = utterances.get(&self.id).unwrap();
Ok(!utterances.is_empty())
}
fn voice(&self) -> Result<Option<Voice>, Error> {
let voice = self.synth.Voice()?;
let voice = voice.try_into()?;
Ok(Some(voice))
}
fn voices(&self) -> Result<Vec<Voice>, Error> {
let mut rv: Vec<Voice> = vec![];
for voice in SpeechSynthesizer::AllVoices()? {
rv.push(voice.try_into()?);
}
Ok(rv)
}
fn set_voice(&mut self, voice: &Voice) -> Result<(), Error> {
for v in SpeechSynthesizer::AllVoices()? {
let vid: String = v.Id()?.try_into()?;
if vid == voice.id {
self.voice = v;
return Ok(());
}
}
Err(Error::OperationFailed)
}
}
impl Drop for WinRt {
fn drop(&mut self) {
let id = self.id;
let mut backend_to_media_player = BACKEND_TO_MEDIA_PLAYER.lock().unwrap();
backend_to_media_player.remove(&id);
let mut backend_to_speech_synthesizer = BACKEND_TO_SPEECH_SYNTHESIZER.lock().unwrap();
backend_to_speech_synthesizer.remove(&id);
let mut utterances = UTTERANCES.lock().unwrap();
utterances.remove(&id);
}
}
impl TryInto<Voice> for VoiceInformation {
type Error = Error;
fn try_into(self) -> Result<Voice, Self::Error> {
let gender = self.Gender()?;
let gender = if gender == VoiceGender::Male {
Gender::Male
} else {
Gender::Female
};
let language: String = self.Language()?.try_into()?;
let language = LanguageTag::parse(language).unwrap();
Ok(Voice {
id: self.Id()?.try_into()?,
name: self.DisplayName()?.try_into()?,
gender: Some(gender),
language,
})
}
}

View File

@ -1,57 +1,223 @@
/*!
* a Text-To-Speech (TTS) library providing high-level interfaces to a variety of backends.
* Currently supported backends are:
* * [Speech Dispatcher](https://freebsoft.org/speechd) (Linux)
* * Windows screen readers and SAPI via [Tolk](https://github.com/dkager/tolk/)
* * WebAssembly
*/
//! * a Text-To-Speech (TTS) library providing high-level interfaces to a variety of backends.
//! * Currently supported backends are:
//! * * Windows
//! * * Screen readers/SAPI via Tolk (requires `tolk` Cargo feature)
//! * * WinRT
//! * * Linux via [Speech Dispatcher](https://freebsoft.org/speechd)
//! * * MacOS/iOS
//! * * AppKit on MacOS 10.13 and below
//! * * AVFoundation on MacOS 10.14 and above, and iOS
//! * * Android
//! * * WebAssembly
use std::boxed::Box;
use std::collections::HashMap;
#[cfg(target_os = "macos")]
use std::ffi::CStr;
use std::fmt;
use std::rc::Rc;
#[cfg(windows)]
use std::string::FromUtf16Error;
use std::sync::Mutex;
use std::{boxed::Box, sync::RwLock};
#[cfg(any(target_os = "macos", target_os = "ios"))]
use cocoa_foundation::base::id;
use dyn_clonable::*;
use lazy_static::lazy_static;
#[cfg(target_os = "macos")]
use libc::c_char;
#[cfg(target_os = "macos")]
use objc::{class, msg_send, sel, sel_impl};
pub use oxilangtag::LanguageTag;
#[cfg(target_os = "linux")]
use speech_dispatcher::Error as SpeechDispatcherError;
use thiserror::Error;
#[cfg(all(windows, feature = "tolk"))]
use tolk::Tolk;
mod backends;
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum Backends {
#[cfg(target_os = "android")]
Android,
#[cfg(target_os = "macos")]
AppKit,
#[cfg(any(target_os = "macos", target_os = "ios"))]
AvFoundation,
#[cfg(target_os = "linux")]
SpeechDispatcher,
#[cfg(all(windows, feature = "tolk"))]
Tolk,
#[cfg(target_arch = "wasm32")]
Web,
#[cfg(windows)]
Tolk,
#[cfg(windows)]
WinRT,
WinRt,
}
impl fmt::Display for Backends {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
#[cfg(target_os = "android")]
Backends::Android => writeln!(f, "Android"),
#[cfg(target_os = "macos")]
Backends::AppKit => writeln!(f, "AppKit"),
#[cfg(any(target_os = "macos", target_os = "ios"))]
Backends::AvFoundation => writeln!(f, "AVFoundation"),
#[cfg(target_os = "linux")]
Backends::SpeechDispatcher => writeln!(f, "Speech Dispatcher"),
#[cfg(all(windows, feature = "tolk"))]
Backends::Tolk => writeln!(f, "Tolk"),
#[cfg(target_arch = "wasm32")]
Backends::Web => writeln!(f, "Web"),
#[cfg(windows)]
Backends::WinRt => writeln!(f, "Windows Runtime"),
}
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum BackendId {
#[cfg(target_os = "android")]
Android(u64),
#[cfg(any(target_os = "macos", target_os = "ios"))]
AvFoundation(u64),
#[cfg(target_os = "linux")]
SpeechDispatcher(usize),
#[cfg(target_arch = "wasm32")]
Web(u64),
#[cfg(windows)]
WinRt(u64),
}
impl fmt::Display for BackendId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
#[cfg(target_os = "android")]
BackendId::Android(id) => writeln!(f, "Android({id})"),
#[cfg(any(target_os = "macos", target_os = "ios"))]
BackendId::AvFoundation(id) => writeln!(f, "AvFoundation({id})"),
#[cfg(target_os = "linux")]
BackendId::SpeechDispatcher(id) => writeln!(f, "SpeechDispatcher({id})"),
#[cfg(target_arch = "wasm32")]
BackendId::Web(id) => writeln!(f, "Web({id})"),
#[cfg(windows)]
BackendId::WinRt(id) => writeln!(f, "WinRT({id})"),
}
}
}
// # Note
//
// Most trait implementations are blocked by cocoa_foundation::base::id;
// which is a type alias for objc::runtime::Object, which only implements Debug.
#[derive(Debug)]
#[cfg_attr(
not(any(target_os = "macos", target_os = "ios")),
derive(Clone, Copy, Eq, Hash, PartialEq, PartialOrd, Ord)
)]
#[cfg_attr(
all(feature = "serde", not(any(target_os = "macos", target_os = "ios"))),
derive(serde::Serialize, serde::Deserialize)
)]
pub enum UtteranceId {
#[cfg(target_os = "android")]
Android(u64),
#[cfg(any(target_os = "macos", target_os = "ios"))]
AvFoundation(id),
#[cfg(target_os = "linux")]
SpeechDispatcher(u64),
#[cfg(target_arch = "wasm32")]
Web(u64),
#[cfg(windows)]
WinRt(u64),
}
// # Note
//
// Display is not implemented by cocoa_foundation::base::id;
// which is a type alias for objc::runtime::Object, which only implements Debug.
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
impl fmt::Display for UtteranceId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
#[cfg(target_os = "android")]
UtteranceId::Android(id) => writeln!(f, "Android({id})"),
#[cfg(target_os = "linux")]
UtteranceId::SpeechDispatcher(id) => writeln!(f, "SpeechDispatcher({id})"),
#[cfg(target_arch = "wasm32")]
UtteranceId::Web(id) => writeln!(f, "Web({})", id),
#[cfg(windows)]
UtteranceId::WinRt(id) => writeln!(f, "WinRt({id})"),
}
}
}
unsafe impl Send for UtteranceId {}
unsafe impl Sync for UtteranceId {}
#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Features {
pub stop: bool,
pub rate: bool,
pub pitch: bool,
pub volume: bool,
pub is_speaking: bool,
pub pitch: bool,
pub rate: bool,
pub stop: bool,
pub utterance_callbacks: bool,
pub voice: bool,
pub get_voice: bool,
pub volume: bool,
}
impl fmt::Display for Features {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
writeln!(f, "{self:#?}")
}
}
impl Features {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Debug, Error)]
pub enum Error {
#[error("IO error: {0}")]
IO(#[from] std::io::Error),
Io(#[from] std::io::Error),
#[error("Value not received")]
NoneError,
#[error("Operation failed")]
OperationFailed,
#[cfg(target_arch = "wasm32")]
#[error("JavaScript error: [0])]")]
#[error("JavaScript error: [0]")]
JavaScriptError(wasm_bindgen::JsValue),
#[cfg(target_os = "linux")]
#[error("Speech Dispatcher error: {0}")]
SpeechDispatcher(#[from] SpeechDispatcherError),
#[cfg(windows)]
#[error("WinRT error")]
WinRT(winrt::Error),
WinRt(windows::core::Error),
#[cfg(windows)]
#[error("UTF string conversion failed")]
UtfStringConversionFailed(#[from] FromUtf16Error),
#[error("Unsupported feature")]
UnsupportedFeature,
#[error("Out of range")]
OutOfRange,
#[cfg(target_os = "android")]
#[error("JNI error: [0])]")]
JNI(#[from] jni::errors::Error),
}
pub trait Backend {
#[clonable]
pub trait Backend: Clone {
fn id(&self) -> Option<BackendId>;
fn supported_features(&self) -> Features;
fn speak(&mut self, text: &str, interrupt: bool) -> Result<(), Error>;
fn speak(&mut self, text: &str, interrupt: bool) -> Result<Option<UtteranceId>, Error>;
fn stop(&mut self) -> Result<(), Error>;
fn min_rate(&self) -> f32;
fn max_rate(&self) -> f32;
@ -69,131 +235,193 @@ pub trait Backend {
fn get_volume(&self) -> Result<f32, Error>;
fn set_volume(&mut self, volume: f32) -> Result<(), Error>;
fn is_speaking(&self) -> Result<bool, Error>;
fn voices(&self) -> Result<Vec<Voice>, Error>;
fn voice(&self) -> Result<Option<Voice>, Error>;
fn set_voice(&mut self, voice: &Voice) -> Result<(), Error>;
}
pub struct TTS(Box<dyn Backend>);
#[derive(Default)]
struct Callbacks {
utterance_begin: Option<Box<dyn FnMut(UtteranceId)>>,
utterance_end: Option<Box<dyn FnMut(UtteranceId)>>,
utterance_stop: Option<Box<dyn FnMut(UtteranceId)>>,
}
unsafe impl std::marker::Send for TTS {}
unsafe impl Send for Callbacks {}
unsafe impl std::marker::Sync for TTS {}
unsafe impl Sync for Callbacks {}
impl TTS {
/**
* Create a new `TTS` instance with the specified backend.
*/
pub fn new(backend: Backends) -> Result<TTS, Error> {
match backend {
lazy_static! {
static ref CALLBACKS: Mutex<HashMap<BackendId, Callbacks>> = {
let m: HashMap<BackendId, Callbacks> = HashMap::new();
Mutex::new(m)
};
}
#[derive(Clone)]
pub struct Tts(Rc<RwLock<Box<dyn Backend>>>);
unsafe impl Send for Tts {}
unsafe impl Sync for Tts {}
impl Tts {
/// Create a new `TTS` instance with the specified backend.
pub fn new(backend: Backends) -> Result<Tts, Error> {
let backend = match backend {
#[cfg(target_os = "linux")]
Backends::SpeechDispatcher => Ok(TTS(Box::new(backends::SpeechDispatcher::new()))),
Backends::SpeechDispatcher => {
let tts = backends::SpeechDispatcher::new()?;
Ok(Tts(Rc::new(RwLock::new(Box::new(tts)))))
}
#[cfg(target_arch = "wasm32")]
Backends::Web => {
let tts = backends::Web::new()?;
Ok(TTS(Box::new(tts)))
Ok(Tts(Rc::new(RwLock::new(Box::new(tts)))))
}
#[cfg(windows)]
#[cfg(all(windows, feature = "tolk"))]
Backends::Tolk => {
let tts = backends::Tolk::new();
if let Some(tts) = tts {
Ok(TTS(Box::new(tts)))
Ok(Tts(Rc::new(RwLock::new(Box::new(tts)))))
} else {
Err(Error::NoneError)
}
}
#[cfg(windows)]
Backends::WinRT => {
let tts = backends::winrt::WinRT::new()?;
Ok(TTS(Box::new(tts)))
Backends::WinRt => {
let tts = backends::WinRt::new()?;
Ok(Tts(Rc::new(RwLock::new(Box::new(tts)))))
}
#[cfg(target_os = "macos")]
Backends::AppKit => Ok(Tts(Rc::new(RwLock::new(
Box::new(backends::AppKit::new()?),
)))),
#[cfg(any(target_os = "macos", target_os = "ios"))]
Backends::AvFoundation => Ok(Tts(Rc::new(RwLock::new(Box::new(
backends::AvFoundation::new()?,
))))),
#[cfg(target_os = "android")]
Backends::Android => {
let tts = backends::Android::new()?;
Ok(Tts(Rc::new(RwLock::new(Box::new(tts)))))
}
};
if let Ok(backend) = backend {
if let Some(id) = backend.0.read().unwrap().id() {
let mut callbacks = CALLBACKS.lock().unwrap();
callbacks.insert(id, Callbacks::default());
}
Ok(backend)
} else {
backend
}
}
pub fn default() -> Result<TTS, Error> {
#[allow(clippy::should_implement_trait)]
pub fn default() -> Result<Tts, Error> {
#[cfg(target_os = "linux")]
let tts = TTS::new(Backends::SpeechDispatcher);
#[cfg(windows)]
let tts = if let Some(tts) = TTS::new(Backends::Tolk).ok() {
let tts = Tts::new(Backends::SpeechDispatcher);
#[cfg(all(windows, feature = "tolk"))]
let tts = if let Ok(tts) = Tts::new(Backends::Tolk) {
Ok(tts)
} else {
TTS::new(Backends::WinRT)
Tts::new(Backends::WinRt)
};
#[cfg(all(windows, not(feature = "tolk")))]
let tts = Tts::new(Backends::WinRt);
#[cfg(target_arch = "wasm32")]
let tts = TTS::new(Backends::Web);
let tts = Tts::new(Backends::Web);
#[cfg(target_os = "macos")]
let tts = unsafe {
// Needed because the Rust NSProcessInfo structs report bogus values, and I don't want to pull in a full bindgen stack.
let pi: id = msg_send![class!(NSProcessInfo), new];
let version: id = msg_send![pi, operatingSystemVersionString];
let str: *const c_char = msg_send![version, UTF8String];
let str = CStr::from_ptr(str);
let str = str.to_string_lossy();
let version: Vec<&str> = str.split(' ').collect();
let version = version[1];
let version_parts: Vec<&str> = version.split('.').collect();
let major_version: i8 = version_parts[0].parse().unwrap();
let minor_version: i8 = version_parts[1].parse().unwrap();
if major_version >= 11 || minor_version >= 14 {
Tts::new(Backends::AvFoundation)
} else {
Tts::new(Backends::AppKit)
}
};
#[cfg(target_os = "ios")]
let tts = Tts::new(Backends::AvFoundation);
#[cfg(target_os = "android")]
let tts = Tts::new(Backends::Android);
tts
}
/**
* Returns the features supported by this TTS engine
*/
/// Returns the features supported by this TTS engine
pub fn supported_features(&self) -> Features {
self.0.supported_features()
self.0.read().unwrap().supported_features()
}
/**
* Speaks the specified text, optionally interrupting current speech.
*/
pub fn speak<S: Into<String>>(&mut self, text: S, interrupt: bool) -> Result<&Self, Error> {
self.0.speak(text.into().as_str(), interrupt)?;
Ok(self)
/// Speaks the specified text, optionally interrupting current speech.
pub fn speak<S: Into<String>>(
&mut self,
text: S,
interrupt: bool,
) -> Result<Option<UtteranceId>, Error> {
self.0
.write()
.unwrap()
.speak(text.into().as_str(), interrupt)
}
/**
* Stops current speech.
*/
/// Stops current speech.
pub fn stop(&mut self) -> Result<&Self, Error> {
let Features { stop, .. } = self.supported_features();
if stop {
self.0.stop()?;
self.0.write().unwrap().stop()?;
Ok(self)
} else {
Err(Error::UnsupportedFeature)
}
}
/**
* Returns the minimum rate for this speech synthesizer.
*/
/// Returns the minimum rate for this speech synthesizer.
pub fn min_rate(&self) -> f32 {
self.0.min_rate()
self.0.read().unwrap().min_rate()
}
/**
* Returns the maximum rate for this speech synthesizer.
*/
/// Returns the maximum rate for this speech synthesizer.
pub fn max_rate(&self) -> f32 {
self.0.max_rate()
self.0.read().unwrap().max_rate()
}
/**
* Returns the normal rate for this speech synthesizer.
*/
/// Returns the normal rate for this speech synthesizer.
pub fn normal_rate(&self) -> f32 {
self.0.normal_rate()
self.0.read().unwrap().normal_rate()
}
/**
* Gets the current speech rate.
*/
/// Gets the current speech rate.
pub fn get_rate(&self) -> Result<f32, Error> {
let Features { rate, .. } = self.supported_features();
if rate {
self.0.get_rate()
self.0.read().unwrap().get_rate()
} else {
Err(Error::UnsupportedFeature)
}
}
/**
* Sets the desired speech rate.
*/
/// Sets the desired speech rate.
pub fn set_rate(&mut self, rate: f32) -> Result<&Self, Error> {
let Features {
rate: rate_feature, ..
} = self.supported_features();
if rate_feature {
if rate < self.0.min_rate() || rate > self.0.max_rate() {
let mut backend = self.0.write().unwrap();
if rate < backend.min_rate() || rate > backend.max_rate() {
Err(Error::OutOfRange)
} else {
self.0.set_rate(rate)?;
backend.set_rate(rate)?;
Ok(self)
}
} else {
@ -201,52 +429,43 @@ impl TTS {
}
}
/**
* Returns the minimum pitch for this speech synthesizer.
*/
/// Returns the minimum pitch for this speech synthesizer.
pub fn min_pitch(&self) -> f32 {
self.0.min_pitch()
self.0.read().unwrap().min_pitch()
}
/**
* Returns the maximum pitch for this speech synthesizer.
*/
/// Returns the maximum pitch for this speech synthesizer.
pub fn max_pitch(&self) -> f32 {
self.0.max_pitch()
self.0.read().unwrap().max_pitch()
}
/**
* Returns the normal pitch for this speech synthesizer.
*/
/// Returns the normal pitch for this speech synthesizer.
pub fn normal_pitch(&self) -> f32 {
self.0.normal_pitch()
self.0.read().unwrap().normal_pitch()
}
/**
* Gets the current speech pitch.
*/
/// Gets the current speech pitch.
pub fn get_pitch(&self) -> Result<f32, Error> {
let Features { pitch, .. } = self.supported_features();
if pitch {
self.0.get_pitch()
self.0.read().unwrap().get_pitch()
} else {
Err(Error::UnsupportedFeature)
}
}
/**
* Sets the desired speech pitch.
*/
/// Sets the desired speech pitch.
pub fn set_pitch(&mut self, pitch: f32) -> Result<&Self, Error> {
let Features {
pitch: pitch_feature,
..
} = self.supported_features();
if pitch_feature {
if pitch < self.0.min_pitch() || pitch > self.0.max_pitch() {
let mut backend = self.0.write().unwrap();
if pitch < backend.min_pitch() || pitch > backend.max_pitch() {
Err(Error::OutOfRange)
} else {
self.0.set_pitch(pitch)?;
backend.set_pitch(pitch)?;
Ok(self)
}
} else {
@ -254,52 +473,43 @@ impl TTS {
}
}
/**
* Returns the minimum volume for this speech synthesizer.
*/
/// Returns the minimum volume for this speech synthesizer.
pub fn min_volume(&self) -> f32 {
self.0.min_volume()
self.0.read().unwrap().min_volume()
}
/**
* Returns the maximum volume for this speech synthesizer.
*/
/// Returns the maximum volume for this speech synthesizer.
pub fn max_volume(&self) -> f32 {
self.0.max_volume()
self.0.read().unwrap().max_volume()
}
/**
* Returns the normal volume for this speech synthesizer.
*/
/// Returns the normal volume for this speech synthesizer.
pub fn normal_volume(&self) -> f32 {
self.0.normal_volume()
self.0.read().unwrap().normal_volume()
}
/**
* Gets the current speech volume.
*/
/// Gets the current speech volume.
pub fn get_volume(&self) -> Result<f32, Error> {
let Features { volume, .. } = self.supported_features();
if volume {
self.0.get_volume()
self.0.read().unwrap().get_volume()
} else {
Err(Error::UnsupportedFeature)
}
}
/**
* Sets the desired speech volume.
*/
/// Sets the desired speech volume.
pub fn set_volume(&mut self, volume: f32) -> Result<&Self, Error> {
let Features {
volume: volume_feature,
..
} = self.supported_features();
if volume_feature {
if volume < self.0.min_volume() || volume > self.0.max_volume() {
let mut backend = self.0.write().unwrap();
if volume < backend.min_volume() || volume > backend.max_volume() {
Err(Error::OutOfRange)
} else {
self.0.set_volume(volume)?;
backend.set_volume(volume)?;
Ok(self)
}
} else {
@ -307,15 +517,167 @@ impl TTS {
}
}
/**
* Returns whether this speech synthesizer is speaking.
*/
/// Returns whether this speech synthesizer is speaking.
pub fn is_speaking(&self) -> Result<bool, Error> {
let Features { is_speaking, .. } = self.supported_features();
if is_speaking {
self.0.is_speaking()
self.0.read().unwrap().is_speaking()
} else {
Err(Error::UnsupportedFeature)
}
}
/// Returns list of available voices.
pub fn voices(&self) -> Result<Vec<Voice>, Error> {
let Features { voice, .. } = self.supported_features();
if voice {
self.0.read().unwrap().voices()
} else {
Err(Error::UnsupportedFeature)
}
}
/// Return the current speaking voice.
pub fn voice(&self) -> Result<Option<Voice>, Error> {
let Features { get_voice, .. } = self.supported_features();
if get_voice {
self.0.read().unwrap().voice()
} else {
Err(Error::UnsupportedFeature)
}
}
/// Set speaking voice.
pub fn set_voice(&mut self, voice: &Voice) -> Result<(), Error> {
let Features {
voice: voice_feature,
..
} = self.supported_features();
if voice_feature {
self.0.write().unwrap().set_voice(voice)
} else {
Err(Error::UnsupportedFeature)
}
}
/// Called when this speech synthesizer begins speaking an utterance.
pub fn on_utterance_begin(
&self,
callback: Option<Box<dyn FnMut(UtteranceId)>>,
) -> Result<(), Error> {
let Features {
utterance_callbacks,
..
} = self.supported_features();
if utterance_callbacks {
let mut callbacks = CALLBACKS.lock().unwrap();
let id = self.0.read().unwrap().id().unwrap();
let callbacks = callbacks.get_mut(&id).unwrap();
callbacks.utterance_begin = callback;
Ok(())
} else {
Err(Error::UnsupportedFeature)
}
}
/// Called when this speech synthesizer finishes speaking an utterance.
pub fn on_utterance_end(
&self,
callback: Option<Box<dyn FnMut(UtteranceId)>>,
) -> Result<(), Error> {
let Features {
utterance_callbacks,
..
} = self.supported_features();
if utterance_callbacks {
let mut callbacks = CALLBACKS.lock().unwrap();
let id = self.0.read().unwrap().id().unwrap();
let callbacks = callbacks.get_mut(&id).unwrap();
callbacks.utterance_end = callback;
Ok(())
} else {
Err(Error::UnsupportedFeature)
}
}
/// Called when this speech synthesizer is stopped and still has utterances in its queue.
pub fn on_utterance_stop(
&self,
callback: Option<Box<dyn FnMut(UtteranceId)>>,
) -> Result<(), Error> {
let Features {
utterance_callbacks,
..
} = self.supported_features();
if utterance_callbacks {
let mut callbacks = CALLBACKS.lock().unwrap();
let id = self.0.read().unwrap().id().unwrap();
let callbacks = callbacks.get_mut(&id).unwrap();
callbacks.utterance_stop = callback;
Ok(())
} else {
Err(Error::UnsupportedFeature)
}
}
/*
* Returns `true` if a screen reader is available to provide speech.
*/
#[allow(unreachable_code)]
pub fn screen_reader_available() -> bool {
#[cfg(target_os = "windows")]
{
#[cfg(feature = "tolk")]
{
let tolk = Tolk::new();
return tolk.detect_screen_reader().is_some();
}
#[cfg(not(feature = "tolk"))]
return false;
}
false
}
}
impl Drop for Tts {
fn drop(&mut self) {
if Rc::strong_count(&self.0) <= 1 {
if let Some(id) = self.0.read().unwrap().id() {
let mut callbacks = CALLBACKS.lock().unwrap();
callbacks.remove(&id);
}
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum Gender {
Male,
Female,
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Voice {
pub(crate) id: String,
pub(crate) name: String,
pub(crate) gender: Option<Gender>,
pub(crate) language: LanguageTag<String>,
}
impl Voice {
pub fn id(&self) -> String {
self.id.clone()
}
pub fn name(&self) -> String {
self.name.clone()
}
pub fn gender(&self) -> Option<Gender> {
self.gender
}
pub fn language(&self) -> LanguageTag<String> {
self.language.clone()
}
}

View File

@ -1,13 +0,0 @@
[package]
name = "tts_winrt_bindings"
version = "0.1.0"
authors = ["Nolan Darilek <nolan@thewordnerd.info>"]
description = "Internal crate used by `tts`"
license = "MIT"
edition = "2018"
[dependencies]
winrt = "0.7"
[build-dependencies]
winrt = "0.7"

View File

@ -1,12 +0,0 @@
winrt::build!(
dependencies
os
types
windows::media::core::MediaSource
windows::media::playback::{MediaPlaybackItem, MediaPlaybackList, MediaPlaybackState, MediaPlayer}
windows::media::speech_synthesis::SpeechSynthesizer
);
fn main() {
build();
}

View File

@ -1 +0,0 @@
include!(concat!(env!("OUT_DIR"), "/winrt.rs"));