diff --git a/.gitignore b/.gitignore index 8521b33af..1d0edde80 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,7 @@ packages/core-bridge/releases packages/*/package-lock.json /sdk-node.iml *~ + +# One test creates persisted SQLite DBs; they should normally be deleted automatically, +# but may be left behind in some error scenarios. +packages/test/temporal-db-*.sqlite diff --git a/packages/common/src/errors.ts b/packages/common/src/errors.ts index d28368289..aa5dd2c59 100644 --- a/packages/common/src/errors.ts +++ b/packages/common/src/errors.ts @@ -20,7 +20,8 @@ export class ValueError extends Error { export class PayloadConverterError extends ValueError {} /** - * Used in different parts of the SDK to note that something unexpected has happened. + * Signals that a requested operation can't be completed because it is illegal given the + * current state of the object; e.g. trying to use a resource after it has been closed. */ @SymbolBasedInstanceOfError('IllegalStateError') export class IllegalStateError extends Error {} diff --git a/packages/core-bridge/Cargo.lock b/packages/core-bridge/Cargo.lock index 2a6782fba..3500e2d98 100644 --- a/packages/core-bridge/Cargo.lock +++ b/packages/core-bridge/Cargo.lock @@ -51,9 +51,9 @@ checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anyhow" -version = "1.0.97" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" [[package]] name = "arbitrary" @@ -88,9 +88,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.87" +version = "0.1.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d556ec1359574147ec0c4fc5eb525f3f23263a592b1a9c07e0a75b427de55c97" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", @@ -179,7 +179,7 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -203,6 +203,16 @@ dependencies = [ "generic-array", ] +[[package]] +name = "bridge-macros" +version = "0.1.0" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "bumpalo" version = "3.17.0" @@ -217,9 +227,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "bzip2" @@ -242,9 +252,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.16" +version = "1.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" +checksum = "8e3a13707ac958681c13b39b458c073d0d9bc8a22cb1b2f4c8e55eb72c13f362" dependencies = [ "jobserver", "libc", @@ -289,6 +299,15 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" +[[package]] +name = "convert_case" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "core-foundation" version = "0.10.0" @@ -340,9 +359,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.14" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" dependencies = [ "crossbeam-utils", ] @@ -374,9 +393,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" dependencies = [ "darling_core", "darling_macro", @@ -384,9 +403,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" dependencies = [ "fnv", "ident_case", @@ -398,9 +417,9 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core", "quote", @@ -429,9 +448,9 @@ checksum = "da692b8d1080ea3045efaab14434d40468c3d8657e42abddfffca87b428f4c1b" [[package]] name = "deranged" -version = "0.3.11" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" dependencies = [ "powerfmt", ] @@ -529,9 +548,9 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "either" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7914353092ddf589ad78f25c5c1c21b7f80b0ff8621e7c814c3485b5306da9d" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "enum-iterator" @@ -583,9 +602,9 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" +checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" dependencies = [ "libc", "windows-sys 0.59.0", @@ -617,9 +636,9 @@ checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" [[package]] name = "flate2" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc" +checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" dependencies = [ "crc32fast", "miniz_oxide", @@ -633,9 +652,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" [[package]] name = "form_urlencoded" @@ -648,9 +667,9 @@ dependencies = [ [[package]] name = "fragile" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" +checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619" [[package]] name = "futures" @@ -783,16 +802,16 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.13.3+wasi-0.2.2", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", "wasm-bindgen", - "windows-targets", ] [[package]] @@ -818,7 +837,7 @@ dependencies = [ "futures-sink", "futures-timer", "futures-util", - "getrandom 0.3.1", + "getrandom 0.3.2", "no-std-compat", "nonzero_ext", "parking_lot", @@ -832,9 +851,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" +checksum = "75249d144030531f8dee69fe9cea04d3edf809a017ae445e2abdff6629e86633" dependencies = [ "atomic-waker", "bytes", @@ -842,7 +861,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.7.1", + "indexmap 2.9.0", "slab", "tokio", "tokio-util", @@ -889,9 +908,9 @@ dependencies = [ [[package]] name = "http" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", @@ -910,12 +929,12 @@ dependencies = [ [[package]] name = "http-body-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", - "futures-util", + "futures-core", "http", "http-body", "pin-project-lite", @@ -987,9 +1006,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" +checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2" dependencies = [ "bytes", "futures-channel", @@ -997,6 +1016,7 @@ dependencies = [ "http", "http-body", "hyper", + "libc", "pin-project-lite", "socket2", "tokio", @@ -1045,9 +1065,9 @@ dependencies = [ [[package]] name = "icu_locid_transform_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" +checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" [[package]] name = "icu_normalizer" @@ -1069,9 +1089,9 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" +checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" [[package]] name = "icu_properties" @@ -1090,9 +1110,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" +checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" [[package]] name = "icu_provider" @@ -1161,9 +1181,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.1" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -1219,10 +1239,11 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" dependencies = [ + "getrandom 0.3.2", "libc", ] @@ -1244,9 +1265,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.170" +version = "0.2.172" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" +checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" [[package]] name = "libloading" @@ -1255,7 +1276,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -1271,9 +1292,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.15" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" [[package]] name = "litemap" @@ -1299,9 +1320,9 @@ checksum = "9374ef4228402d4b7e403e5838cb880d9ee663314b0a900d5a6aabf0c213552e" [[package]] name = "log" -version = "0.4.26" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" [[package]] name = "lru" @@ -1322,6 +1343,17 @@ dependencies = [ "crc", ] +[[package]] +name = "lzma-sys" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fda04ab3764e6cde78b9974eec4f779acaba7c4e84b36eca3cf77c581b85d27" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + [[package]] name = "matchers" version = "0.1.0" @@ -1351,9 +1383,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miniz_oxide" -version = "0.8.5" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" +checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" dependencies = [ "adler2", ] @@ -1484,9 +1516,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.3" +version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "openssl-probe" @@ -1494,20 +1526,6 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" -[[package]] -name = "opentelemetry" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c365a63eec4f55b7efeceb724f1336f26a9cf3427b70e59e2cd2a5b947fba96" -dependencies = [ - "futures-core", - "futures-sink", - "js-sys", - "once_cell", - "pin-project-lite", - "thiserror 1.0.69", -] - [[package]] name = "opentelemetry" version = "0.26.0" @@ -1531,7 +1549,7 @@ dependencies = [ "async-trait", "bytes", "http", - "opentelemetry 0.26.0", + "opentelemetry", "reqwest", ] @@ -1544,7 +1562,7 @@ dependencies = [ "async-trait", "futures-core", "http", - "opentelemetry 0.26.0", + "opentelemetry", "opentelemetry-http", "opentelemetry-proto", "opentelemetry_sdk", @@ -1561,7 +1579,7 @@ version = "0.17.0" source = "git+https://github.com/open-telemetry/opentelemetry-rust.git?rev=e911383#e91138351a689cd21923c15eb48f5fbc95ded807" dependencies = [ "once_cell", - "opentelemetry 0.26.0", + "opentelemetry", "opentelemetry_sdk", "prometheus", "protobuf", @@ -1573,7 +1591,7 @@ version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9d3968ce3aefdcca5c27e3c4ea4391b37547726a70893aab52d3de95d5f8b34" dependencies = [ - "opentelemetry 0.26.0", + "opentelemetry", "opentelemetry_sdk", "prost", "tonic", @@ -1591,7 +1609,7 @@ dependencies = [ "futures-util", "glob", "once_cell", - "opentelemetry 0.26.0", + "opentelemetry", "percent-encoding", "rand 0.8.5", "serde_json", @@ -1626,7 +1644,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -1652,7 +1670,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "indexmap 2.7.1", + "indexmap 2.9.0", ] [[package]] @@ -1708,6 +1726,15 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -1716,11 +1743,11 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.7.35", + "zerocopy", ] [[package]] @@ -1751,9 +1778,9 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.30" +version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1ccf34da56fc294e7d4ccf69a85992b7dfb826b7cf57bac6a70bba3494cc08a" +checksum = "664ec5419c51e34154eec046ebcba56312d5a2fc3b09a06da188e1ad21afadf6" dependencies = [ "proc-macro2", "syn", @@ -1761,9 +1788,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ "unicode-ident", ] @@ -1904,11 +1931,12 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.6" +version = "0.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" +checksum = "c3bd15a6f2967aef83887dcb9fec0014580467e33720d073560cf015a5683012" dependencies = [ "bytes", + "cfg_aliases", "pin-project-lite", "quinn-proto", "quinn-udp", @@ -1918,17 +1946,18 @@ dependencies = [ "thiserror 2.0.12", "tokio", "tracing", + "web-time", ] [[package]] name = "quinn-proto" -version = "0.11.9" +version = "0.11.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" +checksum = "b820744eb4dc9b57a3398183639c511b5a26d2ed702cedd3febaa1393caa22cc" dependencies = [ "bytes", - "getrandom 0.2.15", - "rand 0.8.5", + "getrandom 0.3.2", + "rand 0.9.0", "ring", "rustc-hash", "rustls", @@ -1942,9 +1971,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e46f3055866785f6b92bc6164b76be02ca8f2eb4b002c0354b28cf4c119e5944" +checksum = "541d0f57c6ec747a90738a52741d3221f7960e8ac2f0ff4b1a63680e033b4ab5" dependencies = [ "cfg_aliases", "libc", @@ -1956,13 +1985,19 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" + [[package]] name = "rand" version = "0.8.5" @@ -1982,7 +2017,7 @@ checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", - "zerocopy 0.8.21", + "zerocopy", ] [[package]] @@ -2020,23 +2055,23 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.2", ] [[package]] name = "raw-cpuid" -version = "11.4.0" +version = "11.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "529468c1335c1c03919960dfefdb1b3648858c20d7ec2d0663e728e4a717efbc" +checksum = "c6df7ab838ed27997ba19a4664507e6f82b41fe6e20be42929332156e5e85146" dependencies = [ "bitflags", ] [[package]] name = "redox_syscall" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" +checksum = "d2f103c6d277498fbceb16e84d317e2a400f160f46904d5f5410848c829511a3" dependencies = [ "bitflags", ] @@ -2087,9 +2122,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.12" +version = "0.12.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" +checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" dependencies = [ "base64", "bytes", @@ -2133,9 +2168,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.11" +version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da5349ae27d3887ca812fb375b45a4fbb36d8d12d2df394968cd86e35683fe73" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", @@ -2147,12 +2182,13 @@ dependencies = [ [[package]] name = "ringbuf" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "726bb493fe9cac765e8f96a144c3a8396bdf766dedad22e504b70b908dcbceb4" +checksum = "fe47b720588c8702e34b5979cb3271a8b1842c7cb6f57408efa70c779363488c" dependencies = [ "crossbeam-utils", "portable-atomic", + "portable-atomic-util", ] [[package]] @@ -2192,9 +2228,9 @@ version = "0.1.0" [[package]] name = "rustix" -version = "0.38.44" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +checksum = "d97817398dd4bb2e6da002002db259209759911da105da92bec29ccb12cf58bf" dependencies = [ "bitflags", "errno", @@ -2205,9 +2241,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.23" +version = "0.23.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" +checksum = "df51b5869f3a441595eac5e8ff14d486ff285f7b8c0df8770e49c3b56351f0f0" dependencies = [ "log", "once_cell", @@ -2250,9 +2286,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.8" +version = "0.103.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" dependencies = [ "ring", "rustls-pki-types", @@ -2323,18 +2359,18 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.218" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.218" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", @@ -2432,15 +2468,15 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" +checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" [[package]] name = "socket2" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" +checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" dependencies = [ "libc", "windows-sys 0.52.0", @@ -2475,9 +2511,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "2.0.99" +version = "2.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e02e925281e18ffd9d640e234264753c43edc62d64b2d4cf898f1bc5e75f3fc2" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" dependencies = [ "proc-macro2", "quote", @@ -2541,13 +2577,12 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.17.1" +version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e5a0acb1f3f55f65cc4a866c361b2fb2a0ff6366785ae6fbb5f85df07ba230" +checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf" dependencies = [ - "cfg-if", "fastrand", - "getrandom 0.3.1", + "getrandom 0.3.2", "once_cell", "rustix", "windows-sys 0.59.0", @@ -2606,7 +2641,7 @@ dependencies = [ "itertools", "lru", "mockall", - "opentelemetry 0.26.0", + "opentelemetry", "opentelemetry-otlp", "opentelemetry-prometheus", "opentelemetry_sdk", @@ -2648,7 +2683,7 @@ dependencies = [ "async-trait", "derive_builder", "derive_more", - "opentelemetry 0.26.0", + "opentelemetry", "prost", "serde_json", "temporal-sdk-core-protos", @@ -2683,18 +2718,21 @@ dependencies = [ name = "temporal-sdk-typescript-bridge" version = "0.1.0" dependencies = [ + "anyhow", "async-trait", + "bridge-macros", "futures", "log", "neon", - "once_cell", - "opentelemetry 0.24.0", + "opentelemetry", "parking_lot", "prost", "prost-types", + "serde", "serde_json", "temporal-client", "temporal-sdk-core", + "thiserror 2.0.12", "tokio", "tokio-stream", ] @@ -2757,9 +2795,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.37" +version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" dependencies = [ "deranged", "num-conv", @@ -2770,9 +2808,9 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" [[package]] name = "tinystr" @@ -2801,9 +2839,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.43.0" +version = "1.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" dependencies = [ "backtrace", "bytes", @@ -2851,9 +2889,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" +checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034" dependencies = [ "bytes", "futures-core", @@ -3053,6 +3091,12 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +[[package]] +name = "unicode-segmentation" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" + [[package]] name = "unicode-xid" version = "0.2.6" @@ -3090,11 +3134,11 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.15.1" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f540e3240398cce6128b64ba83fdbdd86129c16a3aa1a3a252efd66eb3d587" +checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.2", ] [[package]] @@ -3126,9 +3170,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasi" -version = "0.13.3+wasi-0.2.2" +version = "0.14.2+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" dependencies = [ "wit-bindgen-rt", ] @@ -3266,7 +3310,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" dependencies = [ "windows-core", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -3278,7 +3322,7 @@ dependencies = [ "windows-implement", "windows-interface", "windows-result 0.1.2", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -3303,15 +3347,21 @@ dependencies = [ "syn", ] +[[package]] +name = "windows-link" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" + [[package]] name = "windows-registry" -version = "0.2.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" dependencies = [ - "windows-result 0.2.0", + "windows-result 0.3.2", "windows-strings", - "windows-targets", + "windows-targets 0.53.0", ] [[package]] @@ -3320,26 +3370,25 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", ] [[package]] name = "windows-result" -version = "0.2.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" dependencies = [ - "windows-targets", + "windows-link", ] [[package]] name = "windows-strings" -version = "0.1.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" dependencies = [ - "windows-result 0.2.0", - "windows-targets", + "windows-link", ] [[package]] @@ -3348,7 +3397,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -3357,7 +3406,7 @@ version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -3366,14 +3415,30 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +dependencies = [ + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", ] [[package]] @@ -3382,53 +3447,101 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + [[package]] name = "wit-bindgen-rt" -version = "0.33.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ "bitflags", ] @@ -3447,15 +3560,23 @@ checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" [[package]] name = "xattr" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e105d177a3871454f754b33bb0ee637ecaaac997446375fd3e5d43a2ed00c909" +checksum = "0d65cbf2f12c15564212d48f4e3dfb87923d25d611f2aed18f4cb23f0413d89e" dependencies = [ "libc", - "linux-raw-sys", "rustix", ] +[[package]] +name = "xz2" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388c44dc09d76f1536602ead6d325eb532f5c122f17782bd57fb47baeeb767e2" +dependencies = [ + "lzma-sys", +] + [[package]] name = "yoke" version = "0.7.5" @@ -3482,39 +3603,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" -dependencies = [ - "byteorder", - "zerocopy-derive 0.7.35", -] - -[[package]] -name = "zerocopy" -version = "0.8.21" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf01143b2dd5d134f11f545cf9f1431b13b749695cb33bcce051e7568f99478" +checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" dependencies = [ - "zerocopy-derive 0.8.21", + "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.35" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "zerocopy-derive" -version = "0.8.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712c8386f4f4299382c9abee219bee7084f78fb939d88b6840fcc1320d5f6da2" +checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" dependencies = [ "proc-macro2", "quote", @@ -3586,9 +3686,9 @@ dependencies = [ [[package]] name = "zip" -version = "2.2.3" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b280484c454e74e5fff658bbf7df8fdbe7a07c6b2de4a53def232c15ef138f3a" +checksum = "1dcb24d0152526ae49b9b96c1dcf71850ca1e0b882e4e28ed898a93c41334744" dependencies = [ "aes", "arbitrary", @@ -3597,17 +3697,16 @@ dependencies = [ "crc32fast", "crossbeam-utils", "deflate64", - "displaydoc", "flate2", + "getrandom 0.3.2", "hmac", - "indexmap 2.7.1", + "indexmap 2.9.0", "lzma-rs", "memchr", "pbkdf2", - "rand 0.8.5", "sha1", - "thiserror 2.0.12", "time", + "xz2", "zeroize", "zopfli", "zstd", @@ -3638,18 +3737,18 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "7.2.3" +version = "7.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3051792fbdc2e1e143244dc28c60f73d8470e93f3f9cbd0ead44da5ed802722" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.14+zstd.1.5.7" +version = "2.0.15+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5" +checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" dependencies = [ "cc", "pkg-config", diff --git a/packages/core-bridge/Cargo.toml b/packages/core-bridge/Cargo.toml index 83aac77c5..1161774f6 100644 --- a/packages/core-bridge/Cargo.toml +++ b/packages/core-bridge/Cargo.toml @@ -1,9 +1,12 @@ [package] name = "temporal-sdk-typescript-bridge" version = "0.1.0" -authors = ["Roey Berman "] +authors = ["Temporal Technologies Inc. "] license = "MIT" edition = "2024" +repository = "https://github.com/temporalio/sdk-typescript" +description = "Core Bridge for the TypeScript Temporal SDK" +publish = false exclude = ["index.node", "sdk-core"] [lib] @@ -17,17 +20,20 @@ lto = true incremental = false [dependencies] +anyhow = "1.0" async-trait = "0.1.83" +bridge-macros = { path = "bridge-macros" } futures = { version = "0.3", features = ["executor"] } log = "0.4" neon = { version = "1.0.0", default-features = false, features = ["napi-6", "futures"] } -opentelemetry = "0.24" +opentelemetry = "0.26" parking_lot = "0.12" prost = "0.13" prost-types = "0.13" +serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -tokio = "1.13" -once_cell = "1.19" temporal-sdk-core = { version = "*", path = "./sdk-core/core", features = ["ephemeral-server"] } temporal-client = { version = "*", path = "./sdk-core/client" } +thiserror = "2" +tokio = "1.13" tokio-stream = "0.1" diff --git a/packages/core-bridge/README.md b/packages/core-bridge/README.md index ae0d9f6ad..e271ae62c 100644 --- a/packages/core-bridge/README.md +++ b/packages/core-bridge/README.md @@ -2,4 +2,9 @@ [![NPM](https://img.shields.io/npm/v/@temporalio/core-bridge?style=for-the-badge)](https://www.npmjs.com/package/@temporalio/core-bridge) + Part of [Temporal](https://temporal.io)'s [TypeScript SDK](https://docs.temporal.io/typescript/introduction/). + +> [!CAUTION] **Important**: This package is not intended to be used directly. Any API + provided by this package is internal and subject to change without notice. + diff --git a/packages/core-bridge/ai-rules.md b/packages/core-bridge/ai-rules.md new file mode 100644 index 000000000..fa8a2be0b --- /dev/null +++ b/packages/core-bridge/ai-rules.md @@ -0,0 +1,418 @@ +# Bridge Layer Coding Norms, Best Practices and Pertinent Knowledge + +This document captures the coding norms, best practices and key knowledge required to efficiently +contribute to development of the Rust ↔ TypeScript bridge layer of the Temporal TypeScript SDK. +This document is meant to guide both human developers and AI tools in maintaining consistency +and quality of the bridge layer. + +## 1. Bridge Layer Design Philosophy + +- **The bridge should be easy to maintain and review** — The bridge's code should clearly + expose it's actual business logic and types, notably by applying mechanical and systematic + coding patterns that are easy to understand and verify. + + - The bridge layer is a fragile interface, as it breaks continuity of type safety guarantees + that TypeScript and Rust provide at other levels. The risk of bugs caused by inconsistencies + between the TS and Rust API definitions is real — we have in fact identified several such + bugs in the past — and we are are highly reliant on developer's and reviewer's thorough + scrutinity to catch those inconsistencies. + + - By keeping the bridge's code thin and foccussed, we can significantly improve the + reviewability of API definitions between the TS and Rust sides, and therefore reduce + the risks of such incoherencies. + + - Care must notably be exercised to avoid drowning the bridge's main logic with JS/Rust + object manipulations, tasks scheduling, and other plumbing concerns that add unreasonable + complexity, hinder evolution and reviewability, and make it hard for new SDK developers + to approach that codebase. + + - Complexity should be moved out of the bridge business logic, preferably to TypeScript + user-facing packages or to the Core SDK project. Where complexity is unavoidable, it should + be structured in a way that preserves reviewability of the bridge's business logic and + types, for example by extracting it to appropriate abstractions and helpers (i.e. like + we did with type conversions, errors, Futures, Callbacks, etc). + + - Though not always possible, we should generally strive to align the TS SDK's bridge's logic + with that of other official Temporal SDK bridges, as this makes it easier for our engineers + to do cross-SDKs maintenance and development. + + - https://github.com/temporalio/sdk-python + - https://github.com/temporalio/sdk-dotnet + - https://github.com/temporalio/sdk-ruby + +- **Bridge APIs are internal** — The bridge itself is an implementation detail and not meant to + be used directly. SDK features and APIs are exposed to users exclusively through the user-facing + packages: `@temporalio/worker`, `@temporalio/testing` and `@temporalio/common`. + + - The bridge provides absolutely no API stability guarantee, not even across patch-level + releases. It is therefore required that the bridge and all public-facing packages be the + same version number (i.e. combining SDK packages of different version is not supported). + + - All functions and types exposed by the TypeScript `core-bridge` package are internal. + These should never be imported from user code, nor should they be reexported by our + own user-facing packages. + +- **Objects transferred through the bridge are DTO** — The bridge must define internal + types specifically for the TS/Rust interface, independentely of user-facing APIs defined + by user-facing packages. These internal types follow the key principles of the well-known + Data Transfer Object pattern. + + - As a general rule, bridge transfer types should mirror the corresponding Core SDK types + except when doing so would be impractical. Alignment with user-facing types is not an + objective. More over, transfer types must adhere to some particular constraints that + will be described later in this document. + + - Bridge should only perform the minimal type validations that are strictly required as part + of its type conversion and business logic. More comprehensive validations and transformations, + including filling-in default values, preserving backward compatibility, converting from + user-friendly types and more, belong to user-facing packages. + +- **The bridge should be reliable** — As a critical component of the Temporal SDK, the bridge + should be highly reliable, notably regarding object lifecycle management, memory usage, and + concurrency challenges. Runtime performance should also not be neglected, though this is + arguably less of a concern at the bridge level compared to other parts of the SDK. + +## 2. Project Structure and Code Organization + +- **Module Organization**: + + - On the Rust side, bridge API functions and types are defined in files under + the `core-bridge/src` directory. Each major component has its own file + (`client.rs`, `worker.rs`, etc). + + - Configuration-related types for each major component are grouped in a nested + `config` submodule inside that component's file. + + - Rust side helper functionalities are designed as proper abstractions inside the + `helpers` module. Some abstractions are also provided through Derive Macros, + defined in the `bridge-macros` crate. + + - On the TS side, bridge APIs (including functions and types) are declared in + the `core-bridge/ts/native.ts` file. + + **Bridge API Functions**: + + - Component-specific prefix for API entrypoint functions (e.g., `client_new`, + `worker_poll_activity_task`, etc). + + - Functions name are snake-case on the Rust side, but camel-case on the TS side. + +- **Bridge API Types**: + + - On the Rust side, bridge types are defined in their respective component files + (e.g. client.rs, worker.rs, etc); configuration-related types are contained + inside the `config` submodule of the component file. + + - On the TypeScript side, most types are defined in the `native.ts` file, + collocated with API functions declarations of the same component. + +- **Declaration order**: + + - As a general rule, functions, types and properties of types should have the + same names in Rust and TypeScript (save for camel-case/snake-case equivalence + where appropriate), and should be listed in the same order. This makes review + significantly easier by allowing quick side-to-side comparison. + +## 3. Type System + +- **Type Conversion System**: + + - The bridge provides a type-safe conversion system between Rust and TypeScript + through two main traits: + + - `TryFromJs` for JavaScript to Rust conversions + - `TryIntoJs` for Rust to JavaScript conversions + + - These traits are implemented for primitive types, and can be derived for most struct and enums + through the `#[derive(TryFromJs)]` and `#[derive(TryIntoJs)]` macros. Custom implementations + are also possible to handle special cases. + +- **Option Handling**: + + - The bridge uses the TypeScript value `null` (rather than `undefined`) to represent the `None` + case of a value defined as `Option` on objects that are sent through the bridge; the + TypeScript mapped type `Option = T | null` can be used to model such value. + + - All expected properties must be present and set to a non-`undefined` value on objects + that are sent across the bridge; that notably means that properties on bridge types + should never be declared with the `?` operator in TypeScript (e.g. there should be no + property declared as `someProperty?: number`). + + - This design allows differentiation between "intentionally unspecified" values (`null`, meaning + `None`) and "unintentionally missing" properties (`undefined`), which in turn allows early + detection of some frequent incoherency patterns between type definition in JS and Rust. + + - Key rules: + - Always use `null` for intentionally unspecified optional values + - Never use TypeScript's optional property syntax (`prop?: T`) + - Explicitly set all properties when sending objects to native code + +- **`TryFromJs` and `TryIntoJs` derive macros** + + - When applied to a Rust enum type, the derive macros expects the corresponding JS type + definition to contain a discriminator property named `type`, whose value must match the + name of the desired enum variant in kebab-case. Other fields of the variant are read from the + same JS object (a.k.a. "Internally tagged" in `serde`'s terminology). If an enum variant + accepts a tupple composed of a single field, then that field is de/serialized from/into the + object containing the `type` property. Refer to type `LogExporter` or `EphemeralServerConfig` + for usage examples of these macros with enum types. + + - Note that the `TryFromJs` and `TryIntoJs` derive macros are designed to handle most of the + patterns seen commonly in this code base; they are however far from being comprehensive. For + more complex use cases, it is generally preferable to implement the desired trait manually. + +- **JavaScript Function Exports**: + + - The bridge uses the `#[js_function]` derive macro to simplify the implementation of functions + that are exported to JavaScript through Neon. This macro handles all the boilerplate required + to safely convert JS arguments to Rust types and return values back to JavaScript. + + - A function decorated with `#[js_function]` must: + + - Accept only arguments that implements the `TryFromJs` trait; + - Return a `BridgeResult`, where T implements the `TryIntoJs` trait. + + - The macro actually expands into two functions: + + - The actual JS-facing function, with the original name, which handles JS argument extraction + and conversion (i.e. using the `TryFromJs` trait to convert each argument to the specified + type), as well as result conversion back to JS (i.e. using the `TryIntoJs` trait for the + specified target type). It also ensures outbound conversion of `BridgeError` into Neon's + `Throw` type (by using the `IntoThrow` trait). + + - An implementation function with an `_impl` suffix that contains the original function body + and operates entirely on Rust types. + + - All functions meant to be called from JavaScript should be marked with this macro. The + functions must also be registered on the Neon's `ModuleContext`, using the pattern seen + in each component's `init` function: + + ```rust + fn init(cx: &mut neon::prelude::ModuleContext) -> neon::prelude::NeonResult<()> { + cx.export_function("functionName", function_name)?; + // More exports... + Ok(()) + } + ``` + + - Each component's `init` functions are called by Neon's bootstrap code, on initialization + of the native module. See `core-bridge/src/lib.rs`. + +- **Common Type Mappings**: + + The bridge supports the following type conversions between TypeScript and Rust: + + **TypeScript → Rust** (using `TryFromJs` trait): + + | TypeScript Type | Rust Type | + | ------------------------------------------- | ----------------------------------------------------------- | --- | + | `number` | `u16`, `i32`, `f32`, `u64`, `f64`, `usize` | x | + | `number` | `Duration` (from ms as `u64`) | x | + | `string` | `String` | x | + | `boolean` | `bool` | x | + | `T[]` or `Array` | `Vec` | x | + | `Record` or `{[key: string]: T}` | `HashMap` | x | + | `string` | `SocketAddr` | x | + | `string` | `Url` | x | + | `T \| null` | `Option` | x | + | `Buffer` | `Vec` | x | + | `unknwon` | `()` (empty tuple) | x | + | `[T1, T2, ...]` - to be confirmed | `(T1, T2, ...)` (tuple) | + | Function - to be reworked | `JsCallback`, `JsAsyncCallback` | + | `OpaqueHandle` - to be reworked | `OpaqueInboundHandle` | + + **Rust → TypeScript** (using `TryIntoJs` trait): + + | Rust Type | TypeScript Type | + | ------------------------------------------ | -------------------------- | --- | + | `String` | `string` | x | + | `bool` | `boolean` | x | + | `Vec` | `T[]` | x | + | `SystemTime` | `bigint` (as nano seconds) | x | + | `()` (unit tuple) | `undefined` | x | + | `(T1, T2, ...)` (tuple) - to be confirmed | `[T1, T2, ...]` | + | `Option` | `T \| null` | x | + | `Vec` | `Buffer` | x | + | `HashMap` - to be confirmed | `Record` | + | `BridgeFuture` - to be reworked | `Promise` | + | `OpaqueOutboundHandle` - to be reworked | `OpaqueHandle` | + + These mappings form the foundation of the interoperability between TypeScript and Rust in the bridge. + + For more complex cases, manual implementations of the `TryFromJs` and `TryIntoJs` traits may be needed. + +## 4. Error Handling + +- **Error Types and Results**: + + - The bridge defines the `BridgeError` enum, as well as an associated `BridgeResult` type + alias, which are the prefered types to report and propagate errors in the bridge layer. + The `BridgeError` enum notably provides the following advantages: + + - They can transparently encapsulate an already "thrown" `Throw` object, thus allowing + propagation through intermediate function calls that are not JS `Context` aware, + then rethrow the `Throw` when appropriate. + + - They can be emitted from a non-`Context`-aware function, and may even be sent across + threads to be finally converted to a `Throw` object if/once propagation reaches a + `Context` aware parent. The JS Error type is automatically determined based on the + variant of the `BridgeError`, including the possibility of sending some bridge specific + error types (see Custom JS Error Classes below). + + - They allow enrichment of errors with contextual informations that are provided while + propagating the error up through the call stack, either through `anyhow`'s `.context(...)` + method, or the `BridgeError`'s specific `.field(...) method. + + - Functions that return a `JsResult` or `NeonResult` (which implies they have access to + a live `Context`), such as implementations of the `TryIntoJs` and `js_function` traits, + are responsible for converting back `BridgeResult` into `Throw`, e.g. by calling + `BridgeResult::into_throw(result, cx)?`. + + - Note that Neon's `Throw` objects can't be `Send`, _not even when wrapped in a `BridgeError`_ + (there is anyway rarely use cases that would implies such thing). With very few exceptions + (noted in Neon's typedocs), once a `Throw` object has been created, the Node's thread is in + a "throwing" state, and any further call to any `Context` object will cause a panic; the + "throwing" state remains until the `Throw` object is finally propagated back to the JS caller. + +- **Error Context**: + + - Always use the `.field()` method on `BridgeResult` to add context to errors when accessing + object properties on JS objects; each call to `field()` _prepends_ a path component, resulting + in a complete path to problematic value once the error has propagated all the way up + (e.g. `fn some_func.args[4].foo.bar: ...`). Note that the `TryFromJs` and `js_function` + derive macros, and most existing helpers already add field context; extra work is only + required when manually implementing the `TryFromJs` trait or when reaching for helpers + that do not add context. + + - Always use the `.context()` method on `BridgeResult` to add context to errors that are + automatically converted from foreing errors or `BridgeResult` that are propagated up + through the caller stack, unless the error itself already provides sufficient context. + +- **Custom JS Error Classes**: + + - The Neon's API provide built-in support for the following standard JS errors: `Error`, + `TypeError` and `RangeError`. The bridge also add support for the following custom JS + errors: `IllegalStateError`, `UnexpectedError`, `TransportError` and `ShutdownError`. + + - Custom JS error classes are defined in `core-bridge/ts/errors.ts` file (for the TS class + definition and name-to-class conversion) and in `core-bridge/src/helpers/errors.rs` (for + the Rust counterpart). Both of these files must be be kept in sync. + + - Note that the Rust code do not actually instantiate the proper JS classes, as those are + not accessible from the native code (i.e. they are not exposed as globals or on any object + currently accessible to the native bridge). Instead, the Rust code create instances of the + `Error` class, then simply override the `name` property of the error object. Those error + objects are then replaced instances of the appropriate Error classes by a pure-JS wrapper + that is added on load of the native library (see `core-bridge/index.js`). + +## FIXME: Anything below this point is incomplete / not reviewed + +## 5. Asynchronous Operations and Thread Safety + +- **Future and Promise Handling**: + + - Consistent use of `future_to_promise()` to convert Rust futures to JS promises + - Extension trait `RuntimeExt` to simplify working with futures + - Clear distinction between sync and async functions + - Proper typing of Promise results + - Careful handling of async error propagation + +- **Thread Safety**: + + - Strict separation between JS and Rust threads + - Careful handling of JS contexts to avoid using them across threads + - Use of the `enter_sync!` macro when entering tokio context + - Proper synchronization of shared resources + - Use of `Arc` and `Mutex` for thread-safe data sharing + - Clear documentation of thread safety requirements + +- **Concurrency Patterns**: + - Consistent patterns for handling concurrent operations + - Clear documentation of thread safety guarantees + - Proper handling of cancellation and timeouts + - Careful management of async resources + +## 6. Resource and Handle Management + +- **Handle Types**: + + - Use of `OpaqueInboundHandle` and `OpaqueOutboundHandle` to safely pass Rust objects to/from JS + - TypeScript interfaces for handles with type discriminators + - Consistent patterns for borrowing (`borrow_inner()`) and taking ownership (`take_inner()`) + - Implementation of `Finalize` trait for proper cleanup + +- **Resource Lifecycle**: + + - Clear patterns for resource creation and destruction + - Proper cleanup in error cases + - Thread-safe resource management + - Documentation of resource ownership and lifetime + +- **Memory Management**: + - Careful management of object lifecycles + - Proper cleanup of resources in error cases + - Efficient memory usage patterns + - Clear documentation of memory management requirements + +## 7. Code Style and Naming Conventions + +- **Rust Code Style**: + + - Use of `/////` line separators to visually divide sections of code + - Standard library imports first, external crate imports next, internal imports last + - Organized by category with blank lines between groups + - Consistent parameter ordering (handles first, then configuration) + - Clear module organization and file structure + +- **TypeScript Code Style**: + + - Consistent use of TypeScript features + - Clear type definitions and interfaces + - Proper use of type aliases and enums + - Consistent file organization + +- **Naming Conventions**: + - PascalCase for types (`BridgeWorkerOptions`, `EphemeralServer`) + - snake_case for Rust functions and variables + - camelCase for TypeScript functions and variables + - Consistent naming patterns (`Core` prefix for sdk-core types) + - Clear and descriptive names for all identifiers + +## 8. Documentation, Testing, and Review + +- **Code Documentation**: + + - Doc comments on public functions and types + - Clear explanations of complex patterns + - Comments for non-obvious code sections + - Use of `// FIXME: ...` comments for issues that need attention + - Examples of proper usage + +- **Testing Guidelines**: + + - Unit tests for all bridge functions + - Integration tests for complex interactions + - Tests for error cases and edge conditions + - Performance tests for critical paths + - Clear test organization and naming + +- **Review Guidelines**: + - Function names must match exactly (camelCase in TypeScript, snake_case in Rust) + - Parameter order must be identical on both sides + - Field names must match exactly (camelCase in TypeScript, snake_case in Rust) + - Field order should match to make review easier + - Nested structures must maintain the same hierarchy + - Comments and documentation should be consistent + - Type definitions should be in the same order for easy review + - Thread safety considerations must be documented + - Resource cleanup must be verified + +============ + +Extra notes: + +- It is not immediately obvious that implementing Neon's `Finalize` for our types provides + any real advantage over implementing `Drop` (or just doing nothing!). Finalize provides a + `Context` and therefore execute on Node's thread, which could be pertinent for some operations, + but doesn't seem fit our use cases. Implementing `Drop` should be sufficient in most cases, + and may feel more natural to Rust developers. diff --git a/packages/core-bridge/bridge-macros/Cargo.toml b/packages/core-bridge/bridge-macros/Cargo.toml new file mode 100644 index 000000000..795195223 --- /dev/null +++ b/packages/core-bridge/bridge-macros/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "bridge-macros" +version = "0.1.0" +edition = "2024" + +[lib] +proc-macro = true + +[dependencies] +syn = { version = "2.0", features = ["full", "extra-traits"] } +quote = "1.0" +proc-macro2 = "1.0" +convert_case = "0.6" + +[dev-dependencies] +temporal-sdk-typescript-bridge = { path = ".." } diff --git a/packages/core-bridge/bridge-macros/src/derive_js_function.rs b/packages/core-bridge/bridge-macros/src/derive_js_function.rs new file mode 100644 index 000000000..ba9a007ba --- /dev/null +++ b/packages/core-bridge/bridge-macros/src/derive_js_function.rs @@ -0,0 +1,126 @@ +use quote::format_ident; +use quote::quote; +use syn::GenericArgument; +use syn::Pat; +use syn::PathArguments; +use syn::Type; +use syn::{FnArg, ItemFn, PatType, ReturnType, parse_macro_input}; + +pub fn js_function_impl( + _attr: proc_macro::TokenStream, + item: proc_macro::TokenStream, +) -> proc_macro::TokenStream { + let input = parse_macro_input!(item as ItemFn); + + let vis = &input.vis; + let fn_name = &input.sig.ident; + let generics = &input.sig.generics; // Is this still pertinent? + let args = &input.sig.inputs; + let return_type = &input.sig.output; // Can we avoid in some cases? + let fn_block = &input.block; + + let fn_impl_name = format_ident!("{}_impl", fn_name); + + // Extract function arguments + let args = args + .iter() + .filter_map(|arg| { + if let FnArg::Typed(PatType { pat, ty, .. }) = arg { + if let Pat::Ident(pat_ident) = &**pat { + Some((pat_ident.ident.clone(), (*ty).clone())) + } else { + None + } + } else { + None + } + }) + .collect::>(); + + // Extract return type + let (result_return_type, inner_return_type) = match return_type { + ReturnType::Type(_, ty) => match extract_bridge_result_type(ty) { + Some(inner_type) => (ty, inner_type), + None => panic!("Return type must be a BridgeResult"), + }, + ReturnType::Default => panic!("Return type must be a BridgeResult"), + }; + + // Generate argument conversions + let arg_conversions = args.iter().enumerate().map(|(i, (name, ty))| { + quote! { + use crate::helpers::*; + let #name = cx.argument_into::<#ty>(#i).field(format!("fn {}", stringify!(#fn_name)).as_str()).into_throw(&mut cx)?; + } + }); + + // Generate implementation function arguments + let impl_args = args.iter().map(|(name, ty)| { + quote! { #name: #ty } + }); + + // Generate argument names for impl function call + let arg_names = args.iter().map(|(name, _)| quote! { #name }); + + let expanded = if args.is_empty() { + // No arguments case + quote! { + // Bridge function + #[allow(clippy::significant_drop_tightening, clippy::unnecessary_wraps)] + #vis fn #fn_name #generics( + mut cx: FunctionContext #generics + ) -> JsResult<<#inner_return_type as crate::helpers::TryIntoJs>::Output> { + let result = #fn_impl_name().into_throw(&mut cx)?; + result.try_into_js(&mut cx) + } + + // Implementation function + #[allow(clippy::unnecessary_wraps)] + #vis fn #fn_impl_name #generics() -> #result_return_type { + #fn_block + } + } + } else { + // With arguments case + quote! { + // Bridge function + #[allow(clippy::significant_drop_tightening, clippy::unnecessary_wraps)] + #vis fn #fn_name #generics( + mut cx: FunctionContext #generics + ) -> JsResult<<#inner_return_type as crate::helpers::TryIntoJs>::Output> { + #(#arg_conversions)* + + let result = #fn_impl_name(#(#arg_names),*).into_throw(&mut cx)?; + result.try_into_js(&mut cx) + } + + // Implementation function + #[allow(clippy::unnecessary_wraps)] + #vis fn #fn_impl_name #generics(#(#impl_args),*) -> #result_return_type { + #fn_block + } + } + }; + + // Uncomment this line to print the expanded macro + // eprintln!("=== fn {} ===\n{}\n======", fn_name.to_string(), expanded); + + proc_macro::TokenStream::from(expanded) +} + +fn extract_bridge_result_type(ty: &Type) -> Option<&Type> { + if let Type::Path(type_path) = ty { + if let Some(last_segment) = type_path.path.segments.last() { + // Check if it's BridgeResult + if last_segment.ident == "BridgeResult" { + // Extract the type parameter T from BridgeResult + if let PathArguments::AngleBracketed(generic_args) = &last_segment.arguments { + if let Some(GenericArgument::Type(inner_type)) = generic_args.args.first() { + return Some(inner_type); + } + } + } + } + } + None +} diff --git a/packages/core-bridge/bridge-macros/src/derive_tryfromjs.rs b/packages/core-bridge/bridge-macros/src/derive_tryfromjs.rs new file mode 100644 index 000000000..6f0103c74 --- /dev/null +++ b/packages/core-bridge/bridge-macros/src/derive_tryfromjs.rs @@ -0,0 +1,152 @@ +use convert_case::{Case, Casing}; +use proc_macro2::TokenStream; +use quote::quote; +use syn::{DeriveInput, FieldsNamed, FieldsUnnamed}; + +pub fn derive_tryfromjs_struct(input: &DeriveInput, data: &syn::DataStruct) -> TokenStream { + let struct_ident = &input.ident; + let generics = &input.generics; + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + + let field_conversions = if let syn::Fields::Named(ref fields) = data.fields { + field_conversions_for_named_fields(fields) + } else { + panic!("Only named fields are supported") + }; + + let expanded = quote! { + impl #impl_generics crate::helpers::TryFromJs for #struct_ident #ty_generics #where_clause { + fn try_from_js<'cx, 'b>(cx: &mut impl neon::prelude::Context<'cx>, js_value: neon::prelude::Handle<'b, neon::prelude::JsValue>) -> crate::helpers::BridgeResult { + use crate::helpers::*; + + let obj = js_value.downcast::(cx)?; + Ok(Self { + #(#field_conversions),* + }) + } + } + }; + + // Uncomment this line to print the expanded macro + // eprintln!( + // "=== struct {} ===\n{}\n======", + // struct_ident.to_string(), + // expanded + // ); + + TokenStream::from(expanded) +} + +pub fn derive_tryfromjs_enum(input: &DeriveInput, data: &syn::DataEnum) -> TokenStream { + let enum_ident = &input.ident; + let enum_name = enum_ident.to_string(); + let variants = &data.variants; + let generics = &input.generics; + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + + let variant_conversions = variants.iter().map(|v| { + let variant_ident = &v.ident; + let discriminant = variant_ident.to_string().to_case(Case::Kebab); + let js_discriminant = variant_ident.to_string().to_case(Case::Camel); + + match &v.fields { + syn::Fields::Unit => { + // e.g. "otel" => Ok(MetricsExporter::Otel) + quote! { + #discriminant => Ok(#enum_ident::#variant_ident), + } + } + syn::Fields::Unnamed(FieldsUnnamed { unnamed, .. }) => { + if unnamed.len() != 1 { + panic!("Enum variant must have a single unnamed field that implements the TryFromJs trait"); + } + let ty = unnamed.first().map(|f| f.ty.clone()).unwrap(); + match ty { + syn::Type::Path(path) => { + // Example output: + // + // "otel" => { + // ::try_from_js(cx, js_value).field("otel").map(MetricsExporter::Otel) + // } + quote! { + #discriminant => { + <#path>::try_from_js(cx, js_value).field(&#js_discriminant).map(#enum_ident::#variant_ident) + }, + } + }, + _ => panic!("Enum variant must have a single unnamed field that implements the TryFromJs trait"), + } + } + syn::Fields::Named(fields) => { + // Example output: + // + // "console" => Ok((|| { + // Ok::(LogExporter::Console { + // filter: { obj.get_property_into(cx, "filter")? }, + // }) + // })() + // .field(format!("type={}", type_str).as_str())?), + // + // The inner closure is required so that we can use the `field` method on the result. + // An alternative would be to do that at the field level, but then that concern would + // spill into the field_conversions_for_named_fields function, which is used in + // other places. + let field_conversions = field_conversions_for_named_fields(fields); + quote! { + #discriminant => Ok(( || { + Ok::<#enum_ident #ty_generics, crate::helpers::BridgeError>(#enum_ident::#variant_ident { + #(#field_conversions),* + }) + })() + .field(&#js_discriminant)?), + } + } + } + }); + + let expanded = quote! { + impl #impl_generics crate::helpers::TryFromJs for #enum_ident #ty_generics #where_clause { + fn try_from_js<'cx, 'b>(cx: &mut impl neon::prelude::Context<'cx>, js_value: neon::prelude::Handle<'b, neon::prelude::JsValue>) -> crate::helpers::BridgeResult { + use crate::helpers::*; + + let obj = js_value.downcast::(cx)?; + let type_str: String = obj.get_property_into(cx, "type")?; + + match type_str.as_str() { + #(#variant_conversions)* + _ => Err(crate::helpers::BridgeError::InvalidVariant { + enum_name: #enum_name.to_string(), + variant: type_str, + }), + } + } + } + }; + + // Uncomment this line to print the expanded macro + // eprintln!( + // "=== enum {} ===\n{}\n======", + // enum_ident.to_string(), + // expanded + // ); + + TokenStream::from(expanded) +} + +fn field_conversions_for_named_fields( + fields: &FieldsNamed, +) -> impl Iterator + '_ { + fields.named.iter().map(|f| { + let field_ident = f + .ident + .as_ref() + .expect("FieldsNamed.named must have an identifier"); + let js_name = field_ident.to_string().to_case(Case::Camel); + + quote! { + #field_ident: { + obj.get_property_into(cx, #js_name)? + } + } + }) +} diff --git a/packages/core-bridge/bridge-macros/src/derive_tryintojs.rs b/packages/core-bridge/bridge-macros/src/derive_tryintojs.rs new file mode 100644 index 000000000..e3fe94301 --- /dev/null +++ b/packages/core-bridge/bridge-macros/src/derive_tryintojs.rs @@ -0,0 +1,154 @@ +use convert_case::{Case, Casing}; +use proc_macro2::TokenStream; +use quote::quote; +use syn::{DeriveInput, FieldsNamed}; + +pub fn derive_tryintojs_struct(input: &DeriveInput, data: &syn::DataStruct) -> TokenStream { + let struct_ident = &input.ident; + let generics = &input.generics; + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + + let field_conversions = if let syn::Fields::Named(ref fields) = data.fields { + field_conversions_for_named_fields(fields) + } else { + panic!("Only named fields are supported") + }; + + let expanded = quote! { + impl #impl_generics crate::helpers::TryIntoJs for #struct_ident #ty_generics #where_clause { + type Output = neon::types::JsObject; + + fn try_into_js<'a>(self, cx: &mut impl neon::prelude::Context<'a>) -> neon::result::JsResult<'a, Self::Output> { + let obj = cx.empty_object(); + #(#field_conversions)* + Ok(obj) + } + } + }; + + TokenStream::from(expanded) +} + +pub fn derive_tryintojs_enum(input: &DeriveInput, data: &syn::DataEnum) -> TokenStream { + let enum_ident = &input.ident; + let generics = &input.generics; + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + + let variant_conversions = data.variants.iter().map(|v| { + let variant_ident = &v.ident; + let js_discriminant = variant_ident.to_string().to_case(Case::Camel); + + match &v.fields { + syn::Fields::Unit => { + quote! { + #enum_ident::#variant_ident => { + let obj = cx.empty_object(); + let type_str = cx.string(#js_discriminant); + obj.set(cx, "type", type_str)?; + obj + } + } + } + syn::Fields::Unnamed(fields) => { + if fields.unnamed.len() != 1 { + panic!("Enum variants with unnamed fields must have exactly one field"); + } + + quote! { + #enum_ident::#variant_ident(inner) => { + let obj = cx.empty_object(); + let type_str = cx.string(#js_discriminant); + obj.set(cx, "type", type_str)?; + + let inner_js = inner.try_into_js(cx)?; + if inner_js.is_a::(cx) { + // If inner is a JsObject, copy its properties to our object + let inner_obj = inner_js.downcast::(cx).unwrap(); + let prop_names = inner_obj.get_own_property_names(cx)?.to_vec(cx)?; + + for key_handle in prop_names { + let value = inner_obj.get_value(cx, key_handle)?; + obj.set(cx, key_handle, value)?; + } + } + + obj + } + } + } + syn::Fields::Named(fields) => { + let field_names = fields.named.iter().map(|f| { + let field_ident = &f.ident; + let js_name = field_ident.as_ref().unwrap().to_string().to_case(Case::Camel); + (field_ident, js_name) + }); + + let pattern_fields = field_names.clone().map(|(field_ident, _)| { + quote! { #field_ident } + }); + + let field_sets = field_names.map(|(field_ident, js_name)| { + quote! { + let js_value = #field_ident.try_into_js(cx)?; + obj.set(cx, #js_name, js_value)?; + } + }); + + quote! { + #enum_ident::#variant_ident { #(#pattern_fields),* } => { + let obj = cx.empty_object(); + let type_str = cx.string(#js_discriminant); + obj.set(cx, "type", type_str)?; + + #(#field_sets)* + + obj + } + } + } + } + }); + + let expanded = quote! { + impl #impl_generics crate::helpers::TryIntoJs for #enum_ident #ty_generics #where_clause { + type Output = neon::types::JsObject; + + fn try_into_js<'a>(self, cx: &mut impl neon::prelude::Context<'a>) -> neon::result::JsResult<'a, Self::Output> { + Ok(match self { + #(#variant_conversions),* + }) + } + } + }; + + // Uncomment this line to print the expanded macro + // eprintln!("=== enum {} ===\n{}\n======", enum_ident.to_string(), expanded); + + TokenStream::from(expanded) +} + +fn field_conversions_for_named_fields( + fields: &FieldsNamed, +) -> impl Iterator + '_ { + fields.named.iter().map(|f| { + let field_ident = f + .ident + .as_ref() + .expect("FieldsNamed.named must have an identifier"); + let js_name = field_ident.to_string().to_case(Case::Camel); + + // Ignore PhantomData fields + if let syn::Type::Path(path) = &f.ty { + if let Some(segment) = path.path.segments.last() { + if segment.ident == "PhantomData" { + return quote! {}; + } + } + } + + quote! { + let js_value = self.#field_ident.try_into_js(cx)?; + obj.set(cx, #js_name, js_value)?; + } + }) +} diff --git a/packages/core-bridge/bridge-macros/src/lib.rs b/packages/core-bridge/bridge-macros/src/lib.rs new file mode 100644 index 000000000..f3e31a682 --- /dev/null +++ b/packages/core-bridge/bridge-macros/src/lib.rs @@ -0,0 +1,39 @@ +mod derive_js_function; +mod derive_tryfromjs; +mod derive_tryintojs; + +use derive_js_function::js_function_impl; +use derive_tryfromjs::{derive_tryfromjs_enum, derive_tryfromjs_struct}; +use derive_tryintojs::{derive_tryintojs_enum, derive_tryintojs_struct}; +use proc_macro::TokenStream; +use syn::{DeriveInput, parse_macro_input}; + +/// Procedural macro for defining bridge types with compile-time field name conversion +#[proc_macro_derive(TryFromJs)] +pub fn try_from_js(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + + match &input.data { + syn::Data::Struct(data) => derive_tryfromjs_struct(&input, data).into(), + syn::Data::Enum(data) => derive_tryfromjs_enum(&input, data).into(), + syn::Data::Union(_) => panic!("Unions are not supported"), + } +} + +/// Procedural macro for defining bridge types with compile-time field name conversion +#[proc_macro_derive(TryIntoJs)] +pub fn try_into_js(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + + match &input.data { + syn::Data::Struct(data) => derive_tryintojs_struct(&input, data).into(), + syn::Data::Enum(data) => derive_tryintojs_enum(&input, data).into(), + syn::Data::Union(_) => panic!("Unions are not supported"), + } +} + +/// Generates a function that can be called from JavaScript with the given name +#[proc_macro_attribute] +pub fn js_function(attr: TokenStream, input: TokenStream) -> TokenStream { + js_function_impl(attr, input) +} diff --git a/packages/core-bridge/index.js b/packages/core-bridge/index.js index 2c6e92aba..3ca13a77f 100644 --- a/packages/core-bridge/index.js +++ b/packages/core-bridge/index.js @@ -2,30 +2,86 @@ const { getPrebuiltPath } = require('./common'); const typescriptExports = require('./lib/index'); const { convertFromNamedError } = require('./lib/errors'); +/** + * Wraps calls to native functions to convert "named errors" to the correct error types. + */ function wrapErrors(fn) { return (...args) => { try { - // Some of our native functions expect callback functions. When present, these callbacks are - // always the last argument passed to the function, and always adhere to the signature - // `callback(err, result)`. If a callback is present, then make sure that errors sent - // to it are also converted. - if (typeof args[args.length - 1] === 'function') { - const callback = args[args.length - 1]; - args[args.length - 1] = (e, x) => callback(convertFromNamedError(e, false), x); + let res = fn(...args); + if (res instanceof Promise) { + return res.catch((e) => { + throw convertFromNamedError(e, false); + }); } - return fn(...args); + return res; } catch (e) { throw convertFromNamedError(e, true); } }; } +let wrapper = (f) => wrapErrors(f); + +/** + * Wraps calls to native functions to add tracing logs. + * + * IMPORTANT: This is meant for internal SDK debugging only. + * Always disable this block before committing back to the repo. + */ +// FIXME(JWH): DO NOT COMMIT THIS LINE +if (process.env.TEMPORAL_TRACE_NATIVE_CALLS?.toLowerCase() === 'true') { + // Generate a random 4-character ID for the current execution + let execId = Math.random().toString(36).substring(2, 6); + let callSequence = 100000; + + function wrapDebug(fn, fnname) { + // Functions names looks like `temporal_sdk_typescript_bridge::logs::get_time_of_day` + // Strip the path to retain only the function name + fnname = fnname.substring(fnname.lastIndexOf('::') + 2); + + // Do not trace these functions, they are way too verbose + const ignored = ['get_time_of_day', 'poll_logs']; + if (ignored.includes(fnname)) return fn; + + return (...args) => { + let callid = `${execId}:${String(callSequence++).substring(1)}`; + + try { + console.log(`${new Date().toISOString()} @@@@ ${callid} ${fnname}() - calling in`); + + let res = fn(...args); + + if (res instanceof Promise) { + console.log(`${new Date().toISOString()} @@@@ ${callid} ${fnname}() - received promise`); + return res.then( + (x) => { + console.log(`${new Date().toISOString()} @@@@ ${callid} ${fnname}() - promise resolved`); + return x; + }, + (e) => { + console.log(`${new Date().toISOString()} @@@@ ${callid} ${fnname}() - promise rejected with ${e}`); + throw convertFromNamedError(e, false); + } + ); + } else { + console.log(`${new Date().toISOString()} @@@@ ${callid} ${fnname}() - returned`); + } + + return res; + } catch (e) { + console.log(`${new Date().toISOString()} @@@@ ${callid} ${fnname}() - threw an error ${e}`); + throw convertFromNamedError(e, true); + } + }; + } + + wrapper = (f) => wrapDebug(wrapErrors(f), f.name); +} try { const nativeLibPath = getPrebuiltPath(); - const nativeExports = Object.fromEntries( - Object.entries(require(nativeLibPath)).map(([name, fn]) => [name, wrapErrors(fn)]) - ); - module.exports = { ...typescriptExports, ...nativeExports }; + const native = Object.fromEntries(Object.entries(require(nativeLibPath)).map(([name, fn]) => [name, wrapper(fn)])); + module.exports = { ...typescriptExports, native }; } catch (err) { throw err; } diff --git a/packages/core-bridge/src/client.rs b/packages/core-bridge/src/client.rs new file mode 100644 index 000000000..76bce1dac --- /dev/null +++ b/packages/core-bridge/src/client.rs @@ -0,0 +1,209 @@ +use std::{collections::HashMap, sync::Arc}; + +use neon::prelude::*; + +use temporal_client::{ClientInitError, ConfiguredClient, TemporalServiceClientWithMetrics}; +use temporal_sdk_core::{ClientOptions as CoreClientOptions, CoreRuntime, RetryClient}; + +use bridge_macros::js_function; + +use crate::runtime::Runtime; +use crate::{helpers::*, runtime::RuntimeExt as _}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +type CoreClient = RetryClient>; + +pub struct Client { + // These fields are pub because they are accessed from Worker::new + pub(crate) core_runtime: Arc, + pub(crate) core_client: CoreClient, +} + +impl Finalize for Client {} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +pub fn init(cx: &mut neon::prelude::ModuleContext) -> neon::prelude::NeonResult<()> { + cx.export_function("newClient", client_new)?; + cx.export_function("clientUpdateHeaders", client_update_headers)?; + cx.export_function("clientUpdateApiKey", client_update_api_key)?; + cx.export_function("clientClose", client_close)?; + + Ok(()) +} + +/// Create a connected gRPC client which can be used to initialize workers. +#[js_function] +pub fn client_new( + runtime: OpaqueInboundHandle, + config: config::BridgeClientOptions, +) -> BridgeResult>> { + let runtime = runtime.borrow_inner()?.core_runtime.clone(); + let config: CoreClientOptions = config.try_into()?; + + runtime.clone().future_to_promise(async move { + let metric_meter = runtime.clone().telemetry().get_temporal_metric_meter(); + + let res = config.connect_no_namespace(metric_meter).await; + + let core_client = match res { + Ok(core_client) => core_client, + Err(ClientInitError::SystemInfoCallError(e)) => Err(BridgeError::TransportError( + format!("Failed to call GetSystemInfo: {e}"), + ))?, + Err(ClientInitError::TonicTransportError(e)) => { + Err(BridgeError::TransportError(format!("{e:?}")))? + } + Err(ClientInitError::InvalidUri(e)) => Err(BridgeError::TypeError { + message: e.to_string(), + field: None, + })?, + }; + + Ok(OpaqueOutboundHandle::new(Client { + core_runtime: runtime, + core_client, + })) + }) +} + +/// Update a Client's HTTP request headers +#[js_function] +pub fn client_update_headers( + client: OpaqueInboundHandle, + headers: HashMap, +) -> BridgeResult<()> { + client + .borrow_inner()? + .core_client + .get_client() + .set_headers(headers); + Ok(()) +} + +/// Update a Client's API key +#[js_function] +pub fn client_update_api_key(client: OpaqueInboundHandle, key: String) -> BridgeResult<()> { + client + .borrow_inner()? + .core_client + .get_client() + .set_api_key(Some(key)); + Ok(()) +} + +#[js_function] +pub fn client_close(client: OpaqueInboundHandle) -> BridgeResult<()> { + // Just drop the client; there's actually no "close" method on Client. + let _client = client.take_inner()?; + Ok(()) +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +mod config { + use std::collections::HashMap; + + use anyhow::Context as _; + + use temporal_client::HttpConnectProxyOptions; + use temporal_sdk_core::{ + ClientOptions as CoreClientOptions, ClientOptionsBuilder, ClientTlsConfig, + TlsConfig as CoreTlsConfig, Url, + }; + + use bridge_macros::TryFromJs; + + use crate::helpers::*; + + #[derive(Debug, Clone, TryFromJs)] + pub(super) struct BridgeClientOptions { + url: Url, + sdk_version: String, + tls: Option, + proxy: Option, + metadata: Option>, + api_key: Option, + disable_error_code_metric_tags: bool, + } + + impl TryInto for BridgeClientOptions { + type Error = BridgeError; + + fn try_into(self) -> Result { + let mut builder = ClientOptionsBuilder::default(); + + if let Some(tls) = self.tls { + builder.tls_cfg(tls.try_into()?); + } + + if let Some(proxy) = self.proxy { + builder.http_connect_proxy(Some(proxy.try_into()?)); + } + + let client_options = builder + .target_url(self.url) + .client_name("temporal-typescript".to_string()) + .client_version(self.sdk_version) + .headers(self.metadata) + .api_key(self.api_key) + .disable_error_code_metric_tags(self.disable_error_code_metric_tags) + .build() + .context("Invalid client options")?; + + Ok(client_options) + } + } + + #[derive(Debug, Clone, TryFromJs)] + struct TlsConfig { + server_name_override: Option, + server_root_ca_certificate: Option>, + client_cert_pair: Option, + } + + #[derive(Debug, Clone, TryFromJs)] + struct ClientTlsConfigPair { + crt: Vec, + key: Vec, + } + + impl TryInto for TlsConfig { + type Error = BridgeError; + + fn try_into(self) -> Result { + Ok(CoreTlsConfig { + server_root_ca_cert: self.server_root_ca_certificate, + domain: self.server_name_override, + client_tls_config: self.client_cert_pair.map(|pair| ClientTlsConfig { + client_cert: pair.crt, + client_private_key: pair.key, + }), + }) + } + } + + #[derive(Debug, Clone, TryFromJs)] + struct ProxyConfig { + target_host: String, + basic_auth: Option, + } + + #[derive(Debug, Clone, TryFromJs)] + struct ProxyBasicAuth { + username: String, + password: String, + } + + impl TryInto for ProxyConfig { + type Error = BridgeError; + + fn try_into(self) -> Result { + Ok(HttpConnectProxyOptions { + target_addr: self.target_host, + basic_auth: self.basic_auth.map(|auth| (auth.username, auth.password)), + }) + } + } +} diff --git a/packages/core-bridge/src/conversions.rs b/packages/core-bridge/src/conversions.rs deleted file mode 100644 index 2c2da9d07..000000000 --- a/packages/core-bridge/src/conversions.rs +++ /dev/null @@ -1,698 +0,0 @@ -use crate::helpers::*; -use neon::{ - context::Context, - handle::Handle, - prelude::*, - types::{JsBoolean, JsNumber, JsString}, -}; -use slot_supplier_bridge::SlotSupplierBridge; -use std::{collections::HashMap, net::SocketAddr, sync::Arc, time::Duration}; -use temporal_client::HttpConnectProxyOptions; -use temporal_sdk_core::api::{ - telemetry::{HistogramBucketOverrides, OtlpProtocol}, - worker::{PollerBehavior, SlotKind}, -}; -use temporal_sdk_core::{ - ClientOptions, ClientOptionsBuilder, ClientTlsConfig, ResourceBasedSlotsOptions, - ResourceBasedSlotsOptionsBuilder, ResourceSlotOptions, RetryConfig, SlotSupplierOptions, - TlsConfig, TunerHolderOptionsBuilder, Url, - api::telemetry::{Logger, MetricTemporality, TelemetryOptions, TelemetryOptionsBuilder}, - api::{ - telemetry::{ - OtelCollectorOptionsBuilder, PrometheusExporterOptionsBuilder, metrics::CoreMeter, - }, - worker::{WorkerConfig, WorkerConfigBuilder}, - }, - ephemeral_server::{ - TemporalDevServerConfig, TemporalDevServerConfigBuilder, TestServerConfig, - TestServerConfigBuilder, - }, - telemetry::{build_otlp_metric_exporter, start_prometheus_metric_exporter}, -}; - -mod slot_supplier_bridge; - -pub enum EphemeralServerConfig { - TestServer(TestServerConfig), - DevServer(TemporalDevServerConfig), -} - -pub trait ArrayHandleConversionsExt { - fn to_vec_of_string(&self, cx: &mut FunctionContext) -> NeonResult>; - fn to_vec_of_float(&self, cx: &mut FunctionContext) -> NeonResult>; -} - -impl ArrayHandleConversionsExt for Handle<'_, JsArray> { - fn to_vec_of_string(&self, cx: &mut FunctionContext) -> NeonResult> { - let js_vec = self.to_vec(cx)?; - let len = js_vec.len(); - let mut ret_vec = Vec::::with_capacity(len); - - for i in js_vec.iter().take(len) { - ret_vec.push(i.downcast_or_throw::(cx)?.value(cx)); - } - Ok(ret_vec) - } - - fn to_vec_of_float(&self, cx: &mut FunctionContext) -> NeonResult> { - let js_vec = self.to_vec(cx)?; - let len = js_vec.len(); - let mut ret_vec = Vec::::with_capacity(len); - - for i in js_vec.iter().take(len) { - ret_vec.push(i.downcast_or_throw::(cx)?.value(cx)); - } - Ok(ret_vec) - } -} - -type BoxedMeterMaker = Box Result, String> + Send + Sync>; - -pub(crate) type TelemOptsRes = (TelemetryOptions, Option); - -pub(crate) trait ObjectHandleConversionsExt { - fn set_default(&self, cx: &mut FunctionContext, key: &str, value: &str) -> NeonResult<()>; - fn as_client_options(&self, ctx: &mut FunctionContext) -> NeonResult; - fn as_telemetry_options(&self, cx: &mut FunctionContext) -> NeonResult; - fn as_worker_config(&self, cx: &mut FunctionContext) -> NeonResult; - fn as_ephemeral_server_config( - &self, - cx: &mut FunctionContext, - sdk_version: String, - ) -> NeonResult; - fn as_hash_map_of_string_to_string( - &self, - cx: &mut FunctionContext, - ) -> NeonResult>; - fn as_hash_map_of_string_to_vec_of_floats( - &self, - cx: &mut FunctionContext, - ) -> NeonResult>>; - fn into_slot_supplier( - self, - cx: &mut FunctionContext, - rbo: &mut Option, - ) -> NeonResult>; -} - -impl ObjectHandleConversionsExt for Handle<'_, JsObject> { - fn as_hash_map_of_string_to_string( - &self, - cx: &mut FunctionContext, - ) -> NeonResult> { - let props = self.get_own_property_names(cx)?; - let props = props.to_vec(cx)?; - let mut map = HashMap::new(); - for k in props { - let k = k.to_string(cx)?; - let v = self.get::(cx, k)?.value(cx); - let k = k.value(cx); - map.insert(k, v); - } - Ok(map) - } - - fn as_hash_map_of_string_to_vec_of_floats( - &self, - cx: &mut FunctionContext, - ) -> NeonResult>> { - let props = self.get_own_property_names(cx)?; - let props = props.to_vec(cx)?; - let mut map = HashMap::new(); - for k in props { - let k = k.to_string(cx)?; - let v = self.get::(cx, k)?.to_vec_of_float(cx)?; - let k = k.value(cx); - map.insert(k, v); - } - Ok(map) - } - - fn as_client_options(&self, cx: &mut FunctionContext) -> NeonResult { - let url = match Url::parse(&js_value_getter!(cx, self, "url", JsString)) { - Ok(url) => url, - // Note that address is what's used in the Node side. - Err(_) => cx.throw_type_error("Invalid serverOptions.address")?, - }; - - let tls_cfg = match js_optional_getter!(cx, self, "tls", JsObject) { - None => None, - Some(tls) => { - let domain = js_optional_value_getter!(cx, &tls, "serverNameOverride", JsString); - - let server_root_ca_cert = get_optional_vec(cx, &tls, "serverRootCACertificate")?; - let client_tls_config = - match js_optional_getter!(cx, &tls, "clientCertPair", JsObject) { - None => None, - Some(client_tls_obj) => Some(ClientTlsConfig { - client_cert: get_vec( - cx, - &client_tls_obj, - "crt", - "serverOptions.tls.clientCertPair.crt", - )?, - client_private_key: get_vec( - cx, - &client_tls_obj, - "key", - "serverOptions.tls.clientCertPair.crt", - )?, - }), - }; - - Some(TlsConfig { - server_root_ca_cert, - domain, - client_tls_config, - }) - } - }; - - let proxy_cfg = match js_optional_getter!(cx, self, "proxy", JsObject) { - None => None, - Some(proxy) => { - let target_addr = js_value_getter!(cx, &proxy, "targetHost", JsString); - - let basic_auth = match js_optional_getter!(cx, &proxy, "basicAuth", JsObject) { - None => None, - Some(proxy_obj) => Some(( - js_value_getter!(cx, &proxy_obj, "username", JsString), - js_value_getter!(cx, &proxy_obj, "password", JsString), - )), - }; - - Some(HttpConnectProxyOptions { - target_addr, - basic_auth, - }) - } - }; - - let retry_config = match js_optional_getter!(cx, self, "retry", JsObject) { - None => RetryConfig::default(), - Some(ref retry_config) => RetryConfig { - initial_interval: Duration::from_millis(js_value_getter!( - cx, - retry_config, - "initialInterval", - JsNumber - ) as u64), - randomization_factor: js_value_getter!( - cx, - retry_config, - "randomizationFactor", - JsNumber - ), - multiplier: js_value_getter!(cx, retry_config, "multiplier", JsNumber), - max_interval: Duration::from_millis(js_value_getter!( - cx, - retry_config, - "maxInterval", - JsNumber - ) as u64), - max_elapsed_time: js_optional_value_getter!( - cx, - retry_config, - "maxElapsedTime", - JsNumber - ) - .map(|val| Duration::from_millis(val as u64)), - max_retries: js_value_getter!(cx, retry_config, "maxRetries", JsNumber) as usize, - }, - }; - - let mut client_options = ClientOptionsBuilder::default(); - if let Some(tls_cfg) = tls_cfg { - client_options.tls_cfg(tls_cfg); - } - client_options.http_connect_proxy(proxy_cfg); - let headers = match js_optional_getter!(cx, self, "metadata", JsObject) { - None => None, - Some(h) => Some(h.as_hash_map_of_string_to_string(cx).map_err(|reason| { - cx.throw_type_error::<_, HashMap>(format!( - "Invalid metadata: {}", - reason - )) - .unwrap_err() - })?), - }; - client_options.headers(headers); - client_options.api_key(js_optional_value_getter!(cx, self, "apiKey", JsString)); - - Ok(client_options - .client_name("temporal-typescript".to_string()) - .client_version(js_value_getter!(cx, self, "sdkVersion", JsString)) - .target_url(url) - .retry_config(retry_config) - .disable_error_code_metric_tags(js_value_getter!( - cx, - self, - "disableErrorCodeMetricTags", - JsBoolean - )) - .build() - .expect("Core server gateway options must be valid")) - } - - fn as_telemetry_options(&self, cx: &mut FunctionContext) -> NeonResult { - let mut telemetry_opts = TelemetryOptionsBuilder::default(); - - if let Some(ref logging) = js_optional_getter!(cx, self, "logging", JsObject) { - let filter = js_value_getter!(cx, logging, "filter", JsString); - if get_optional(cx, logging, "console").is_some() { - telemetry_opts.logging(Logger::Console { filter }); - } else if get_optional(cx, logging, "forward").is_some() { - telemetry_opts.logging(Logger::Forward { filter }); - } else { - cx.throw_type_error( - "Invalid telemetryOptions.logging, expected either 'console' or 'forward' property", - )?; - } - } - - let mut meter_maker = None; - - if let Some(ref metrics) = js_optional_getter!(cx, self, "metrics", JsObject) { - telemetry_opts.metric_prefix(js_value_getter!(cx, metrics, "metricPrefix", JsString)); - - let global_tags = match js_optional_getter!(cx, metrics, "globalTags", JsObject) { - None => None, - Some(global_tags) => Some(global_tags.as_hash_map_of_string_to_string(cx)?), - }; - - telemetry_opts.attach_service_name(js_value_getter!( - cx, - metrics, - "attachServiceName", - JsBoolean - )); - - if let Some(ref prom) = js_optional_getter!(cx, metrics, "prometheus", JsObject) { - if js_optional_getter!(cx, metrics, "otel", JsObject).is_some() { - cx.throw_type_error( - "Invalid telemetryOptions.metrics: can't have both premetheus and otel at the same time", - )? - } - - let mut options = PrometheusExporterOptionsBuilder::default(); - - let addr = js_value_getter!(cx, prom, "bindAddress", JsString); - match addr.parse::() { - Ok(addr) => options.socket_addr(addr), - Err(_) => { - return cx.throw_type_error( - "Invalid telemetryOptions.metrics.prometheus.bindAddress", - )?; - } - }; - - options.counters_total_suffix(js_value_getter!( - cx, - prom, - "countersTotalSuffix", - JsBoolean - )); - - options.unit_suffix(js_value_getter!(cx, prom, "unitSuffix", JsBoolean)); - - options.use_seconds_for_durations(js_value_getter!( - cx, - prom, - "useSecondsForDurations", - JsBoolean - )); - - if let Some(global_tags) = global_tags { - options.global_tags(global_tags); - } - - if let Some(histogram_bucket_overrides) = - js_optional_getter!(cx, prom, "histogramBucketOverrides", JsObject) - { - options.histogram_bucket_overrides(HistogramBucketOverrides { - overrides: histogram_bucket_overrides - .as_hash_map_of_string_to_vec_of_floats(cx)?, - }); - } - - let options = options.build().map_err(|e| { - cx.throw_type_error::<_, TelemetryOptions>(format!( - "Failed to build prometheus exporter options: {:?}", - e - )) - .unwrap_err() - })?; - - meter_maker = - Some( - Box::new(move || match start_prometheus_metric_exporter(options) { - Ok(prom_info) => Ok(prom_info.meter as Arc), - Err(e) => Err(format!("Failed to start prometheus exporter: {}", e)), - }) as BoxedMeterMaker, - ); - } else if let Some(ref otel) = js_optional_getter!(cx, metrics, "otel", JsObject) { - let mut options = OtelCollectorOptionsBuilder::default(); - - let url = js_value_getter!(cx, otel, "url", JsString); - match Url::parse(&url) { - Ok(url) => options.url(url), - Err(e) => { - return cx.throw_type_error(format!( - "Invalid telemetryOptions.metrics.otel.url: {}", - e - ))?; - } - }; - - if js_value_getter!(cx, otel, "http", JsBoolean) { - options.protocol(OtlpProtocol::Http); - } else { - options.protocol(OtlpProtocol::Grpc); - } - - if let Some(ref headers) = js_optional_getter!(cx, otel, "headers", JsObject) { - options.headers(headers.as_hash_map_of_string_to_string(cx)?); - }; - - if let Some(metric_periodicity) = - js_optional_value_getter!(cx, otel, "metricsExportInterval", JsNumber) - .map(|f| f as u64) - { - options.metric_periodicity(Duration::from_millis(metric_periodicity)); - } - - options.use_seconds_for_durations(js_value_getter!( - cx, - otel, - "useSecondsForDurations", - JsBoolean - )); - - match js_value_getter!(cx, otel, "temporality", JsString).as_str() { - "cumulative" => options.metric_temporality(MetricTemporality::Cumulative), - "delta" => options.metric_temporality(MetricTemporality::Delta), - _ => { - return cx.throw_type_error("Invalid telemetryOptions.metrics.otel.temporality, expected 'cumulative' or 'delta'"); - } - }; - - if let Some(global_tags) = global_tags { - options.global_tags(global_tags); - } - - if let Some(histogram_bucket_overrides) = - js_optional_getter!(cx, otel, "histogramBucketOverrides", JsObject) - { - options.histogram_bucket_overrides(HistogramBucketOverrides { - overrides: histogram_bucket_overrides - .as_hash_map_of_string_to_vec_of_floats(cx)?, - }); - } - - let options = options.build().map_err(|e| { - cx.throw_type_error::<_, TelemetryOptions>(format!( - "Failed to build otlp exporter options: {:?}", - e - )) - .unwrap_err() - })?; - - meter_maker = Some(Box::new(move || match build_otlp_metric_exporter(options) { - Ok(otlp_exporter) => Ok(Arc::new(otlp_exporter) as Arc), - Err(e) => Err(format!("Failed to start otlp exporter: {}", e)), - }) as BoxedMeterMaker); - } else { - cx.throw_type_error( - "Invalid telemetryOptions.metrics, missing `prometheus` or `otel` option", - )? - } - } - - Ok(( - telemetry_opts.build().map_err(|reason| { - cx.throw_type_error::<_, TelemetryOptions>(format!("{}", reason)) - .unwrap_err() - })?, - meter_maker, - )) - } - - fn as_worker_config(&self, cx: &mut FunctionContext) -> NeonResult { - let namespace = js_value_getter!(cx, self, "namespace", JsString); - let task_queue = js_value_getter!(cx, self, "taskQueue", JsString); - let enable_remote_activities = - js_value_getter!(cx, self, "enableNonLocalActivities", JsBoolean); - let max_concurrent_wft_polls = - js_value_getter!(cx, self, "maxConcurrentWorkflowTaskPolls", JsNumber) as usize; - let max_concurrent_at_polls = - js_value_getter!(cx, self, "maxConcurrentActivityTaskPolls", JsNumber) as usize; - let sticky_queue_schedule_to_start_timeout = Duration::from_millis(js_value_getter!( - cx, - self, - "stickyQueueScheduleToStartTimeoutMs", - JsNumber - ) as u64); - let max_cached_workflows = - js_value_getter!(cx, self, "maxCachedWorkflows", JsNumber) as usize; - - let max_heartbeat_throttle_interval = Duration::from_millis(js_value_getter!( - cx, - self, - "maxHeartbeatThrottleIntervalMs", - JsNumber - ) as u64); - - let default_heartbeat_throttle_interval = Duration::from_millis(js_value_getter!( - cx, - self, - "defaultHeartbeatThrottleIntervalMs", - JsNumber - ) as u64); - - let max_worker_activities_per_second = - js_optional_getter!(cx, self, "maxActivitiesPerSecond", JsNumber) - .map(|num| num.value(cx)); - let max_task_queue_activities_per_second = - js_optional_getter!(cx, self, "maxTaskQueueActivitiesPerSecond", JsNumber) - .map(|num| num.value(cx)); - - let graceful_shutdown_period = - js_optional_getter!(cx, self, "shutdownGraceTimeMs", JsNumber) - .map(|num| Duration::from_millis(num.value(cx) as u64)); - - let nonsticky_to_sticky_poll_ratio = - js_value_getter!(cx, self, "nonStickyToStickyPollRatio", JsNumber) as f32; - - let tuner = if let Some(tuner) = js_optional_getter!(cx, self, "tuner", JsObject) { - let mut tuner_holder = TunerHolderOptionsBuilder::default(); - let mut rbo = None; - - if let Some(wf_slot_supp) = - js_optional_getter!(cx, &tuner, "workflowTaskSlotSupplier", JsObject) - { - tuner_holder.workflow_slot_options(wf_slot_supp.into_slot_supplier(cx, &mut rbo)?); - } - if let Some(act_slot_supp) = - js_optional_getter!(cx, &tuner, "activityTaskSlotSupplier", JsObject) - { - tuner_holder.activity_slot_options(act_slot_supp.into_slot_supplier(cx, &mut rbo)?); - } - if let Some(local_act_slot_supp) = - js_optional_getter!(cx, &tuner, "localActivityTaskSlotSupplier", JsObject) - { - tuner_holder.local_activity_slot_options( - local_act_slot_supp.into_slot_supplier(cx, &mut rbo)?, - ); - } - if let Some(rbo) = rbo { - tuner_holder.resource_based_options(rbo); - } - match tuner_holder.build_tuner_holder() { - Err(e) => { - return cx.throw_error(format!("Invalid tuner options: {:?}", e)); - } - Ok(th) => Arc::new(th), - } - } else { - return cx.throw_error("Missing tuner"); - }; - - match WorkerConfigBuilder::default() - .worker_build_id(js_value_getter!(cx, self, "buildId", JsString)) - .client_identity_override(Some(js_value_getter!(cx, self, "identity", JsString))) - .use_worker_versioning(js_value_getter!(cx, self, "useVersioning", JsBoolean)) - .no_remote_activities(!enable_remote_activities) - .tuner(tuner) - .workflow_task_poller_behavior(PollerBehavior::SimpleMaximum(max_concurrent_wft_polls)) - .activity_task_poller_behavior(PollerBehavior::SimpleMaximum(max_concurrent_at_polls)) - .nonsticky_to_sticky_poll_ratio(nonsticky_to_sticky_poll_ratio) - .max_cached_workflows(max_cached_workflows) - .sticky_queue_schedule_to_start_timeout(sticky_queue_schedule_to_start_timeout) - .graceful_shutdown_period(graceful_shutdown_period) - .namespace(namespace) - .task_queue(task_queue) - .max_heartbeat_throttle_interval(max_heartbeat_throttle_interval) - .default_heartbeat_throttle_interval(default_heartbeat_throttle_interval) - .max_worker_activities_per_second(max_worker_activities_per_second) - .max_task_queue_activities_per_second(max_task_queue_activities_per_second) - .build() - { - Ok(worker_cfg) => Ok(worker_cfg), - Err(e) => cx.throw_error(format!("Invalid worker config: {:?}", e)), - } - } - - fn set_default(&self, cx: &mut FunctionContext, key: &str, value: &str) -> NeonResult<()> { - let key = cx.string(key); - let existing: Option> = self.get_opt(cx, key)?; - if existing.is_none() { - let value = cx.string(value); - self.set(cx, key, value)?; - } - Ok(()) - } - - fn as_ephemeral_server_config( - &self, - cx: &mut FunctionContext, - sdk_version: String, - ) -> NeonResult { - let js_executable = js_optional_getter!(cx, self, "executable", JsObject) - .unwrap_or_else(|| cx.empty_object()); - js_executable.set_default(cx, "type", "cached-download")?; - - let exec_type = js_value_getter!(cx, &js_executable, "type", JsString); - let executable = match exec_type.as_str() { - "cached-download" => { - let version = js_optional_value_getter!(cx, &js_executable, "version", JsString) - .unwrap_or_else(|| "default".to_owned()); - let dest_dir = - js_optional_value_getter!(cx, &js_executable, "downloadDir", JsString); - let ttl = js_optional_value_getter!(cx, &self, "ttlMs", JsNumber); - - let exec_version = match version.as_str() { - "default" => { - temporal_sdk_core::ephemeral_server::EphemeralExeVersion::SDKDefault { - sdk_name: "sdk-typescript".to_owned(), - sdk_version, - } - } - _ => temporal_sdk_core::ephemeral_server::EphemeralExeVersion::Fixed(version), - }; - temporal_sdk_core::ephemeral_server::EphemeralExe::CachedDownload { - version: exec_version, - dest_dir, - ttl: ttl.map(|ttl| Duration::from_millis(ttl as u64)), - } - } - "existing-path" => { - let path = js_value_getter!(cx, &js_executable, "path", JsString); - temporal_sdk_core::ephemeral_server::EphemeralExe::ExistingPath(path) - } - _ => { - return cx.throw_type_error(format!("Invalid executable type: {}", exec_type))?; - } - }; - let port = js_optional_getter!(cx, self, "port", JsNumber).map(|s| s.value(cx) as u16); - - let server_type = js_value_getter!(cx, self, "type", JsString); - match server_type.as_str() { - "dev-server" => { - let mut config = TemporalDevServerConfigBuilder::default(); - config.exe(executable).port(port); - - if let Some(extra_args) = js_optional_getter!(cx, self, "extraArgs", JsArray) { - config.extra_args(extra_args.to_vec_of_string(cx)?); - }; - if let Some(namespace) = js_optional_value_getter!(cx, self, "namespace", JsString) - { - config.namespace(namespace); - } - if let Some(ip) = js_optional_value_getter!(cx, self, "ip", JsString) { - config.ip(ip); - } - config.db_filename(js_optional_value_getter!(cx, self, "dbFilename", JsString)); - config.ui(js_optional_value_getter!(cx, self, "ui", JsBoolean).unwrap_or_default()); - config.ui_port( - js_optional_getter!(cx, self, "uiPort", JsNumber).map(|s| s.value(cx) as u16), - ); - if let Some(log) = js_optional_getter!(cx, self, "log", JsObject) { - let format = js_value_getter!(cx, &log, "format", JsString); - let level = js_value_getter!(cx, &log, "level", JsString); - config.log((format, level)); - } - - match config.build() { - Ok(config) => Ok(EphemeralServerConfig::DevServer(config)), - Err(err) => { - cx.throw_type_error(format!("Invalid dev server config: {:?}", err)) - } - } - } - "time-skipping" => { - let mut config = TestServerConfigBuilder::default(); - config.exe(executable).port(port); - - if let Some(extra_args_js) = js_optional_getter!(cx, self, "extraArgs", JsArray) { - let extra_args = extra_args_js.to_vec_of_string(cx)?; - config.extra_args(extra_args); - }; - - match config.build() { - Ok(config) => Ok(EphemeralServerConfig::TestServer(config)), - Err(err) => { - cx.throw_type_error(format!("Invalid test server config: {:?}", err)) - } - } - } - s => cx.throw_type_error(format!( - "Invalid ephemeral server type: {}, expected 'dev-server' or 'time-skipping'", - s - )), - } - } - - fn into_slot_supplier( - self, - cx: &mut FunctionContext, - rbo: &mut Option, - ) -> NeonResult> { - match js_value_getter!(cx, &self, "type", JsString).as_str() { - "fixed-size" => Ok(SlotSupplierOptions::FixedSize { - slots: js_value_getter!(cx, &self, "numSlots", JsNumber) as usize, - }), - "resource-based" => { - let min_slots = js_value_getter!(cx, &self, "minimumSlots", JsNumber); - let max_slots = js_value_getter!(cx, &self, "maximumSlots", JsNumber); - let ramp_throttle = js_value_getter!(cx, &self, "rampThrottleMs", JsNumber) as u64; - if let Some(tuner_opts) = js_optional_getter!(cx, &self, "tunerOptions", JsObject) { - let target_mem = - js_value_getter!(cx, &tuner_opts, "targetMemoryUsage", JsNumber); - let target_cpu = js_value_getter!(cx, &tuner_opts, "targetCpuUsage", JsNumber); - *rbo = Some( - ResourceBasedSlotsOptionsBuilder::default() - .target_cpu_usage(target_cpu) - .target_mem_usage(target_mem) - .build() - .expect("Building ResourceBasedSlotsOptions can't fail"), - ) - } else { - return cx - .throw_type_error("Resource based slot supplier requires tunerOptions"); - }; - Ok(SlotSupplierOptions::ResourceBased( - ResourceSlotOptions::new( - min_slots as usize, - max_slots as usize, - Duration::from_millis(ramp_throttle), - ), - )) - } - "custom" => { - let ssb = SlotSupplierBridge::new(cx, self)?; - Ok(SlotSupplierOptions::Custom(Arc::new(ssb))) - } - _ => cx.throw_type_error("Invalid slot supplier type"), - } - } -} diff --git a/packages/core-bridge/src/conversions/slot_supplier_bridge.rs b/packages/core-bridge/src/conversions/slot_supplier_bridge.rs deleted file mode 100644 index 6b418f8dd..000000000 --- a/packages/core-bridge/src/conversions/slot_supplier_bridge.rs +++ /dev/null @@ -1,288 +0,0 @@ -use crate::helpers::{get_optional, js_getter}; -use log::{error, warn}; -use neon::types::JsNull; -use neon::{ - context::Context, - context::FunctionContext, - event::Channel, - handle::{Handle, Root}, - object::Object, - prelude::{JsBuffer, JsFunction, JsObject, JsPromise, JsUndefined, JsValue, NeonResult, Value}, -}; -use prost::Message; -use std::{cell::RefCell, marker::PhantomData, sync::Arc, time::Duration}; -use temporal_sdk_core::api::worker::{ - SlotKind, SlotKindType, SlotMarkUsedContext, SlotReleaseContext, SlotReservationContext, - SlotSupplier, SlotSupplierPermit, -}; -use tokio::sync::oneshot; - -pub struct SlotSupplierBridge { - inner: Arc>, - reserve_cb: Arc>, - try_reserve_cb: Arc>, - mark_used_cb: Arc>, - release_cb: Arc>, - channel: Channel, - _kind: PhantomData, -} - -impl SlotSupplierBridge { - pub(crate) fn new(cx: &mut FunctionContext, obj: Handle<'_, JsObject>) -> NeonResult { - Ok(Self { - inner: Arc::new(obj.root(cx)), - // Callbacks for each function are cached to reduce calling overhead - reserve_cb: Arc::new(js_getter!(cx, &obj, "reserveSlot", JsFunction).root(cx)), - try_reserve_cb: Arc::new(js_getter!(cx, &obj, "tryReserveSlot", JsFunction).root(cx)), - mark_used_cb: Arc::new(js_getter!(cx, &obj, "markSlotUsed", JsFunction).root(cx)), - release_cb: Arc::new(js_getter!(cx, &obj, "releaseSlot", JsFunction).root(cx)), - channel: cx.channel(), - _kind: PhantomData, - }) - } -} - -struct BridgePermitData { - permit: Arc>, -} - -struct CallAbortOnDrop { - chan: Channel, - aborter: oneshot::Receiver>, -} - -impl Drop for CallAbortOnDrop { - fn drop(&mut self) { - if let Ok(aborter) = self.aborter.try_recv() { - let _ = self.chan.try_send(move |mut cx| { - let cb = aborter.to_inner(&mut cx); - let this = cx.undefined(); - let _ = cb.call(&mut cx, this, []); - Ok(()) - }); - } - } -} - -static PERMIT_DATA_FIELD: &str = "permit_data"; - -#[async_trait::async_trait] -impl SlotSupplier for SlotSupplierBridge { - type SlotKind = SK; - - async fn reserve_slot(&self, ctx: &dyn SlotReservationContext) -> SlotSupplierPermit { - loop { - let inner = self.inner.clone(); - let rcb = self.reserve_cb.clone(); - let task_queue = ctx.task_queue().to_string(); - let worker_identity = ctx.worker_identity().to_string(); - let worker_build_id = ctx.worker_build_id().to_string(); - let is_sticky = ctx.is_sticky(); - - let (callback_fut, _abort_on_drop) = match self - .channel - .send(move |mut cx| { - let context = Self::mk_reserve_ctx( - task_queue, - worker_identity, - worker_build_id, - is_sticky, - &mut cx, - )?; - let (aborter_tx, aborter) = oneshot::channel(); - let abort_on_drop = CallAbortOnDrop { - chan: cx.channel(), - aborter, - }; - let aborter_tx = RefCell::new(Some(aborter_tx)); - let abort_func = JsFunction::new(&mut cx, move |mut cx| { - let func: Handle = cx.argument(0)?; - if let Some(aborter_tx) = aborter_tx.take() { - let _ = aborter_tx.send(func.root(&mut cx)); - } - Ok(cx.undefined()) - })? - .upcast(); - - let this = (*inner).clone(&mut cx).into_inner(&mut cx); - let val = rcb - .to_inner(&mut cx) - .call(&mut cx, this, [context, abort_func])?; - let as_prom = val.downcast_or_throw::(&mut cx)?; - let fut = as_prom.to_future(&mut cx, |mut cx, result| match result { - Ok(value) => { - let permit_obj = JsObject::new(&mut cx); - permit_obj.set(&mut cx, PERMIT_DATA_FIELD, value)?; - Ok(Ok(permit_obj.root(&mut cx))) - } - Err(_) => Ok(Err(())), - })?; - Ok((fut, abort_on_drop)) - }) - .await - { - Ok(v) => v, - Err(e) => { - warn!("Error reserving slot: {:?}", e); - continue; - } - }; - - match callback_fut.await { - Ok(Ok(res)) => { - let permit = SlotSupplierPermit::with_user_data(BridgePermitData { - permit: Arc::new(res), - }); - return permit; - } - // Error in user function - Ok(Err(())) => { - // Nothing to do here. Error in user's function (or an abort). - // Logging handled on the JS side. - } - Err(e) => { - error!( - "There was an error in the rust/node bridge while reserving a slot: {}", - e - ); - } - } - // Wait a beat to avoid spamming errors - tokio::time::sleep(Duration::from_millis(1000)).await; - } - } - - fn try_reserve_slot(&self, ctx: &dyn SlotReservationContext) -> Option { - let inner = self.inner.clone(); - let rcb = self.try_reserve_cb.clone(); - let task_queue = ctx.task_queue().to_string(); - let worker_identity = ctx.worker_identity().to_string(); - let worker_build_id = ctx.worker_build_id().to_string(); - let is_sticky = ctx.is_sticky(); - - // This is... unfortunate but since this method is called from an async context way up - // the stack, but is not async itself AND we need some way to get the result from the JS - // callback, we must use this roundabout way of blocking. Simply calling `join` on the - // channel send won't work - it'll panic because it calls block_on internally. - let runtime_handle = tokio::runtime::Handle::current(); - let _entered = runtime_handle.enter(); - let callback_res = futures::executor::block_on(self.channel.send(move |mut cx| { - let context = Self::mk_reserve_ctx( - task_queue, - worker_identity, - worker_build_id, - is_sticky, - &mut cx, - )?; - - let this = (*inner).clone(&mut cx).into_inner(&mut cx); - let val = rcb.to_inner(&mut cx).call(&mut cx, this, [context])?; - if val.is_a::(&mut cx) || val.is_a::(&mut cx) { - return Ok(None); - } - let permit_obj = JsObject::new(&mut cx); - permit_obj.set(&mut cx, PERMIT_DATA_FIELD, val)?; - Ok(Some(permit_obj.root(&mut cx))) - })); - - // Ignore errors, they'll be logged by JS - callback_res.ok().flatten().map(|res| { - SlotSupplierPermit::with_user_data(BridgePermitData { - permit: Arc::new(res), - }) - }) - } - - fn mark_slot_used(&self, ctx: &dyn SlotMarkUsedContext) { - let inner = self.inner.clone(); - let cb = self.mark_used_cb.clone(); - let permit_data = ctx - .permit() - .user_data::() - .map(|d| d.permit.clone()); - // Get the slot info as bytes - let slot_info_bytes = ctx.info().encode_to_vec(); - - self.channel.send(move |mut cx| { - let context = JsObject::new(&mut cx); - if let Some(permit_obj) = permit_data { - let ph: Handle = permit_obj.to_inner(&mut cx); - let pd = ph.get_value(&mut cx, PERMIT_DATA_FIELD)?; - context.set(&mut cx, "permit", pd)?; - } - let slot_info = JsBuffer::from_slice(&mut cx, &slot_info_bytes)?; - context.set(&mut cx, "slotInfo", slot_info)?; - let context = context.as_value(&mut cx); - - let this = (*inner).clone(&mut cx).into_inner(&mut cx); - let val = cb.to_inner(&mut cx).call(&mut cx, this, [context])?; - if val.is_a::(&mut cx) { - return Ok(None); - } - let as_obj = val.downcast_or_throw::(&mut cx)?; - Ok(Some(as_obj.root(&mut cx))) - }); - } - - fn release_slot(&self, ctx: &dyn SlotReleaseContext) { - let inner = self.inner.clone(); - let cb = self.release_cb.clone(); - let permit_data = ctx - .permit() - .user_data::() - .map(|d| d.permit.clone()); - // Get the slot info as bytes - let slot_info_bytes = ctx.info().map(|m| m.encode_to_vec()); - - self.channel.send(move |mut cx| { - let context = JsObject::new(&mut cx); - if let Some(permit_obj) = permit_data { - let ph: Handle = permit_obj.to_inner(&mut cx); - let pd = ph.get_value(&mut cx, PERMIT_DATA_FIELD)?; - context.set(&mut cx, "permit", pd)?; - } - if let Some(slot_info_bytes) = slot_info_bytes { - let slot_info = JsBuffer::from_slice(&mut cx, &slot_info_bytes)?; - context.set(&mut cx, "slotInfo", slot_info)?; - } - let context = context.as_value(&mut cx); - - let this = (*inner).clone(&mut cx).into_inner(&mut cx); - let val = cb.to_inner(&mut cx).call(&mut cx, this, [context])?; - if val.is_a::(&mut cx) { - return Ok(None); - } - let as_obj = val.downcast_or_throw::(&mut cx)?; - Ok(Some(as_obj.root(&mut cx))) - }); - } -} - -impl SlotSupplierBridge { - fn mk_reserve_ctx<'a, C: Context<'a>>( - task_queue: String, - worker_identity: String, - worker_build_id: String, - is_sticky: bool, - cx: &mut C, - ) -> NeonResult> { - let context = JsObject::new(cx); - let slottype = cx.string(match SK::kind() { - SlotKindType::Workflow => "workflow", - SlotKindType::Activity => "activity", - SlotKindType::LocalActivity => "local-activity", - SlotKindType::Nexus => "nexus", - }); - context.set(cx, "slotType", slottype)?; - let tq = cx.string(task_queue); - context.set(cx, "taskQueue", tq)?; - let wid = cx.string(worker_identity); - context.set(cx, "workerIdentity", wid)?; - let bid = cx.string(worker_build_id); - context.set(cx, "workerBuildId", bid)?; - let is_sticky = cx.boolean(is_sticky); - context.set(cx, "isSticky", is_sticky)?; - let context = context.as_value(cx); - Ok(context) - } -} diff --git a/packages/core-bridge/src/ephemeral_server.rs b/packages/core-bridge/src/ephemeral_server.rs new file mode 100644 index 000000000..5a86abee5 --- /dev/null +++ b/packages/core-bridge/src/ephemeral_server.rs @@ -0,0 +1,251 @@ +use std::{process::Stdio, sync::Arc}; + +use anyhow::Context as _; +use neon::prelude::*; + +use temporal_sdk_core::ephemeral_server::{ + EphemeralServer as CoreEphemeralServer, TemporalDevServerConfig as CoreTemporalDevServerConfig, + TestServerConfig as CoreTestServerConfig, +}; + +use bridge_macros::js_function; +use temporal_sdk_core::CoreRuntime; + +use crate::helpers::*; +use crate::runtime::{Runtime, RuntimeExt as _}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +pub struct EphemeralServer { + core_runtime: Arc, + core_server: CoreEphemeralServer, +} + +impl Finalize for EphemeralServer {} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +pub fn init(cx: &mut neon::prelude::ModuleContext) -> neon::prelude::NeonResult<()> { + cx.export_function("startEphemeralServer", start_ephemeral_server)?; + cx.export_function("getEphemeralServerTarget", get_ephemeral_server_target)?; + cx.export_function("shutdownEphemeralServer", shutdown_ephemeral_server)?; + + Ok(()) +} + +/// start an ephemeral temporal server +#[js_function] +pub fn start_ephemeral_server( + runtime: OpaqueInboundHandle, + config: config::EphemeralServerConfig, +) -> BridgeResult>> { + let runtime = runtime.borrow_inner()?.core_runtime.clone(); + let config: CoreEphemeralServerConfig = config.try_into()?; + + runtime.clone().future_to_promise(async move { + // Node intentionally drops stdout/stderr on process fork for security reasons, + // which is causing various issues with ephemeral servers. To work around that + // behavior, we explicitly force stdout/stderr on the child process. + let stdout = Stdio::from(std::io::stdout()); + let stderr = Stdio::from(std::io::stderr()); + + let core_server = config + .start_server(stdout, stderr) + .await + .context("Failed to start ephemeral server")?; + + Ok(OpaqueOutboundHandle::new(EphemeralServer { + core_runtime: runtime, + core_server, + })) + }) +} + +/// get the ephemeral server "target" (address:port string) +#[js_function] +pub fn get_ephemeral_server_target( + server: OpaqueInboundHandle, +) -> BridgeResult { + Ok(server.borrow_inner()?.core_server.target.clone()) +} + +/// shutdown an ephemeral server - consumes the server +#[js_function] +pub fn shutdown_ephemeral_server( + server: OpaqueInboundHandle, +) -> BridgeResult> { + let mut server = server.take_inner()?; + let runtime = server.core_runtime; + + runtime.future_to_promise(async move { + server + .core_server + .shutdown() + .await + .context("Failed to shutdown ephemeral server")?; + Ok(()) + }) +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +enum CoreEphemeralServerConfig { + TimeSkipping(CoreTestServerConfig), + DevServer(CoreTemporalDevServerConfig), +} + +impl CoreEphemeralServerConfig { + async fn start_server( + self, + stdout: Stdio, + stderr: Stdio, + ) -> anyhow::Result { + match self { + Self::TimeSkipping(config) => config.start_server_with_output(stdout, stderr).await, + Self::DevServer(config) => config.start_server_with_output(stdout, stderr).await, + } + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +mod config { + use std::time::Duration; + + use anyhow::Context as _; + + use temporal_sdk_core::ephemeral_server::{ + EphemeralExe, EphemeralExeVersion, TemporalDevServerConfig as CoreTemporalDevServerConfig, + TemporalDevServerConfigBuilder, TestServerConfig as CoreTestServerConfig, + TestServerConfigBuilder, + }; + + use bridge_macros::TryFromJs; + + use crate::helpers::BridgeError; + + #[derive(Debug, Clone, TryFromJs)] + pub(super) enum EphemeralServerConfig { + TimeSkipping(TimeSkippingServerConfig), + DevServer(DevServerConfig), + } + + impl TryInto for EphemeralServerConfig { + type Error = BridgeError; + + fn try_into(self) -> Result { + match self { + Self::TimeSkipping(config) => Ok(super::CoreEphemeralServerConfig::TimeSkipping( + config.try_into()?, + )), + Self::DevServer(config) => Ok(super::CoreEphemeralServerConfig::DevServer( + config.try_into()?, + )), + } + } + } + + #[derive(Debug, Clone, TryFromJs)] + pub(super) struct TimeSkippingServerConfig { + executable: EphemeralServerExecutableConfig, + port: Option, + extra_args: Vec, + } + + impl TryInto for TimeSkippingServerConfig { + type Error = BridgeError; + + fn try_into(self) -> Result { + let mut config = TestServerConfigBuilder::default(); + let config = config + .exe(self.executable.try_into()?) + .port(self.port) + .extra_args(self.extra_args) + .build() + .context("Bulding Test Server config")?; + + Ok(config) + } + } + + #[derive(Debug, Clone, TryFromJs)] + pub(super) struct DevServerConfig { + executable: EphemeralServerExecutableConfig, + ip: String, + port: Option, + ui: bool, + ui_port: Option, + namespace: String, + db_filename: Option, + log: DevServerLogConfig, + extra_args: Vec, + } + + #[derive(Debug, Clone, TryFromJs)] + pub(super) struct DevServerLogConfig { + format: String, + level: String, + } + + impl TryInto for DevServerConfig { + type Error = BridgeError; + + fn try_into(self) -> Result { + let mut config = TemporalDevServerConfigBuilder::default(); + let config = config + .exe(self.executable.try_into()?) + .ip(self.ip) + .port(self.port) + .ui(self.ui) + .ui_port(self.ui_port) + .namespace(self.namespace) + .db_filename(self.db_filename) + .log((self.log.format, self.log.level)) + .extra_args(self.extra_args) + .build() + .context("Bulding Dev Server config")?; + + Ok(config) + } + } + + #[derive(Debug, Clone, TryFromJs)] + enum EphemeralServerExecutableConfig { + CachedDownload(CachedDownloadExecutable), + ExistingPath(ExistingPathExecutable), + } + + #[derive(Debug, Clone, TryFromJs)] + struct CachedDownloadExecutable { + download_dir: Option, + version: String, + ttl: Duration, + sdk_version: String, + } + + #[derive(Debug, Clone, TryFromJs)] + struct ExistingPathExecutable { + path: String, + } + + impl TryInto for EphemeralServerExecutableConfig { + type Error = BridgeError; + + fn try_into(self) -> Result { + match self { + Self::CachedDownload(config) => Ok(EphemeralExe::CachedDownload { + version: match config.version.as_str() { + "default" => EphemeralExeVersion::SDKDefault { + sdk_name: "sdk-typescript".to_owned(), + sdk_version: config.sdk_version, + }, + _ => EphemeralExeVersion::Fixed(config.version), + }, + dest_dir: config.download_dir, + ttl: Some(config.ttl), + }), + Self::ExistingPath(config) => Ok(EphemeralExe::ExistingPath(config.path)), + } + } + } +} diff --git a/packages/core-bridge/src/errors.rs b/packages/core-bridge/src/errors.rs deleted file mode 100644 index e92318531..000000000 --- a/packages/core-bridge/src/errors.rs +++ /dev/null @@ -1,38 +0,0 @@ -use neon::prelude::*; - -/// An unhandled error while communicating with the server, considered fatal -pub static TRANSPORT_ERROR: &str = "TransportError"; -/// Thrown after shutdown was requested as a response to a poll function, JS should stop polling -/// once this error is encountered -pub static SHUTDOWN_ERROR: &str = "ShutdownError"; -/// Something unexpected happened, considered fatal -pub static UNEXPECTED_ERROR: &str = "UnexpectedError"; -/// Used in different parts of the project to signal that something unexpected has happened -pub static ILLEGAL_STATE_ERROR: &str = "IllegalStateError"; - -pub fn make_named_error_from_string<'a, C>( - cx: &mut C, - name: &str, - message: impl Into, -) -> JsResult<'a, JsError> -where - C: Context<'a>, -{ - let error = cx.error(message.into()).unwrap(); - let name = cx.string(name); - error.set(cx, "name", name)?; - - Ok(error) -} - -pub fn make_named_error_from_error<'a, C, E>( - cx: &mut C, - name: &str, - err: E, -) -> JsResult<'a, JsError> -where - C: Context<'a>, - E: std::error::Error, -{ - make_named_error_from_string(cx, name, format!("{:?}", err)) -} diff --git a/packages/core-bridge/src/helpers.rs b/packages/core-bridge/src/helpers.rs deleted file mode 100644 index 4054c013c..000000000 --- a/packages/core-bridge/src/helpers.rs +++ /dev/null @@ -1,297 +0,0 @@ -use crate::errors::*; -use neon::{prelude::*, types::buffer::TypedArray}; -use std::{fmt::Display, future::Future, sync::Arc}; - -/// Send a result to JS via callback using a [Channel] -pub fn send_result(channel: Arc, callback: Root, res_fn: F) -where - F: for<'a> FnOnce(&mut TaskContext<'a>) -> NeonResult> + Send + 'static, - T: Value, -{ - channel.send(move |mut cx| { - let callback = callback.into_inner(&mut cx); - let this = cx.undefined(); - let error = cx.undefined(); - let result = res_fn(&mut cx)?; - let args: Vec> = vec![error.upcast(), result.upcast()]; - callback.call(&mut cx, this, args)?; - Ok(()) - }); -} - -/// Send an error to JS via callback using a [Channel] -pub fn send_error(channel: Arc, callback: Root, error_ctor: F) -where - E: Object, - F: for<'a> FnOnce(&mut TaskContext<'a>) -> JsResult<'a, E> + Send + 'static, -{ - channel.send(move |mut cx| { - let callback = callback.into_inner(&mut cx); - callback_with_error(&mut cx, callback, error_ctor) - }); -} - -/// Call `callback` with given error -pub fn callback_with_error<'a, C, E, F>( - cx: &mut C, - callback: Handle, - error_ctor: F, -) -> NeonResult<()> -where - C: Context<'a>, - E: Object, - F: FnOnce(&mut C) -> JsResult<'a, E> + Send + 'static, -{ - let this = cx.undefined(); - let error = error_ctor(cx)?; - let result = cx.undefined(); - let args: Vec> = vec![error.upcast(), result.upcast()]; - callback.call(cx, this, args)?; - Ok(()) -} - -/// Call `callback` with an UnexpectedError created from `err` -pub fn callback_with_unexpected_error<'a, C, E>( - cx: &mut C, - callback: Handle, - err: E, -) -> NeonResult<()> -where - C: Context<'a>, - E: Display, -{ - let err_str = format!("{}", err); - callback_with_error(cx, callback, move |cx| { - make_named_error_from_string(cx, UNEXPECTED_ERROR, err_str) - }) -} - -/// When Future completes, call given JS callback using a neon::Channel with either error or -/// undefined -pub async fn void_future_to_js( - channel: Arc, - callback: Root, - f: F, - error_function: EF, -) where - E: Display + Send + 'static, - F: Future> + Send, - ER: Object, - EF: for<'a> FnOnce(&mut TaskContext<'a>, E) -> JsResult<'a, ER> + Send + 'static, -{ - match f.await { - Ok(()) => { - send_result(channel, callback, |cx| Ok(cx.undefined())); - } - Err(err) => { - send_error(channel, callback, |cx| error_function(cx, err)); - } - } -} - -macro_rules! js_optional_getter { - ($js_cx:expr, $js_obj:expr, $prop_name:expr, $js_type:ty) => { - match get_optional($js_cx, $js_obj, $prop_name) { - None => None, - Some(val) => { - if val.is_a::<$js_type, _>($js_cx) { - Some(val.downcast_or_throw::<$js_type, _>($js_cx)?) - } else { - Some($js_cx.throw_type_error(format!("Invalid {}", $prop_name))?) - } - } - } - }; -} - -pub(crate) use js_optional_getter; - -macro_rules! js_getter { - ($js_cx:expr, $js_obj:expr, $prop_name:expr, $js_type:ty) => { - match get_optional($js_cx, $js_obj, $prop_name) { - None => $js_cx.throw_type_error(format!("{} must be defined", $prop_name))?, - Some(val) => { - if val.is_a::<$js_type, _>($js_cx) { - val.downcast_or_throw::<$js_type, _>($js_cx)? - } else { - $js_cx.throw_type_error(format!("Invalid {}", $prop_name))? - } - } - } - }; -} - -pub(crate) use js_getter; - -macro_rules! js_optional_value_getter { - ($js_cx:expr, $js_obj:expr, $prop_name:expr, $js_type:ty) => { - js_optional_getter!($js_cx, $js_obj, $prop_name, $js_type).map(|v| v.value($js_cx)) - }; -} - -pub(crate) use js_optional_value_getter; - -macro_rules! js_value_getter { - ($js_cx:expr, $js_obj:expr, $prop_name:expr, $js_type:ty) => { - match js_optional_getter!($js_cx, $js_obj, $prop_name, $js_type) { - Some(val) => val.value($js_cx), - None => $js_cx.throw_type_error(format!("{} must be defined", $prop_name))?, - } - }; -} - -pub(crate) use js_value_getter; - -/// Helper for extracting an optional attribute from [obj]. -/// If [obj].[attr] is undefined or not present, None is returned -pub fn get_optional<'a, C, K>( - cx: &mut C, - obj: &Handle, - attr: K, -) -> Option> -where - K: neon::object::PropertyKey, - C: Context<'a>, -{ - match obj.get_value(cx, attr) { - Err(_) => None, - Ok(val) => match val.is_a::(cx) { - true => None, - false => Some(val), - }, - } -} - -/// Helper for extracting a Vec from optional Buffer at [obj].[attr] -pub fn get_optional_vec<'a, C, K>( - cx: &mut C, - obj: &Handle, - attr: K, -) -> Result>, neon::result::Throw> -where - K: neon::object::PropertyKey + Display + Clone, - C: Context<'a>, -{ - if let Some(val) = get_optional(cx, obj, attr.clone()) { - let buf = val.downcast::(cx).map_err(|_| { - cx.throw_type_error::<_, Option>>(format!("Invalid {}", attr)) - .unwrap_err() - })?; - Ok(Some(buf.as_slice(cx).to_vec())) - } else { - Ok(None) - } -} - -/// Helper for extracting a Vec from optional Buffer at [obj].[attr] -pub fn get_vec<'a, C, K>( - cx: &mut C, - obj: &Handle, - attr: K, - full_attr_path: &str, -) -> Result, neon::result::Throw> -where - K: neon::object::PropertyKey + Display + Clone, - C: Context<'a>, -{ - if let Some(val) = get_optional(cx, obj, attr.clone()) { - let buf = val.downcast::(cx).map_err(|_| { - cx.throw_type_error::<_, Option>>(format!("Invalid {}", attr)) - .unwrap_err() - })?; - Ok(buf.as_slice(cx).to_vec()) - } else { - cx.throw_type_error::<_, Vec>(format!("Invalid or missing {}", full_attr_path)) - } -} - -// Recursively convert a Serde value to a JS value -pub fn serde_value_to_js_value<'a>( - cx: &mut impl Context<'a>, - val: serde_json::Value, -) -> JsResult<'a, JsValue> { - match val { - serde_json::Value::String(s) => Ok(cx.string(s).upcast()), - serde_json::Value::Number(n) => Ok(cx.number(n.as_f64().unwrap()).upcast()), - serde_json::Value::Bool(b) => Ok(cx.boolean(b).upcast()), - serde_json::Value::Null => Ok(cx.null().upcast()), - serde_json::Value::Array(vec) => { - let arr: Handle<'a, JsArray> = JsArray::new(cx, vec.len()); - for (i, v) in vec.into_iter().enumerate() { - let v = serde_value_to_js_value(cx, v)?; - arr.set(cx, i as u32, v)?; - } - Ok(arr.upcast()) - } - serde_json::Value::Object(map) => hashmap_to_js_value(cx, map).map(|v| v.upcast()), - } -} - -pub fn hashmap_to_js_value<'a>( - cx: &mut impl Context<'a>, - map: impl IntoIterator, -) -> JsResult<'a, JsObject> { - let obj: Handle<'a, JsObject> = cx.empty_object(); - for (k, v) in map { - let k = cx.string(snake_to_camel(k)); - let v = serde_value_to_js_value(cx, v)?; - obj.set(cx, k, v)?; - } - Ok(obj) -} - -fn snake_to_camel(input: String) -> String { - match input.find('_') { - None => input, - Some(first) => { - let mut result = String::with_capacity(input.len()); - if first > 0 { - result.push_str(&input[..first]); - } - let mut capitalize = true; - for c in input[first + 1..].chars() { - if c == '_' { - capitalize = true; - } else if capitalize { - result.push(c.to_ascii_uppercase()); - capitalize = false; - } else { - result.push(c.to_ascii_lowercase()); - } - } - result - } - } -} - -#[allow(dead_code)] -// Useful to help debug JSObject contents -pub fn log_js_object<'a, 'b, C: Context<'b>>(cx: &mut C, js_object: &Handle<'a, JsObject>) { - let global = cx.global_object(); - let console = global - .get::(cx, "console") - .expect("Failed to get console object"); - - let log = console - .get::(cx, "log") - .expect("Failed to get log function"); - - let args = vec![js_object.upcast()]; // Upcast js_object to JsValue - log.call(cx, console, args) - .expect("Failed to call console.log"); -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn snake_to_camel_works() { - assert_eq!(snake_to_camel("this_is_a_test".into()), "thisIsATest"); - assert_eq!(snake_to_camel("this___IS_a_TEST".into()), "thisIsATest"); - assert_eq!( - snake_to_camel("éàç_this_is_a_test".into()), - "éàçThisIsATest" - ); - } -} diff --git a/packages/core-bridge/src/helpers/abort_controller.rs b/packages/core-bridge/src/helpers/abort_controller.rs new file mode 100644 index 000000000..094fa5a10 --- /dev/null +++ b/packages/core-bridge/src/helpers/abort_controller.rs @@ -0,0 +1,152 @@ +use std::sync::{Arc, OnceLock}; + +use neon::{ + handle::{Handle, Root}, + object::Object, + prelude::{Context, JsResult}, + types::{JsFunction, JsObject, JsValue}, +}; + +use super::{BridgeResult, JsCallback, TryIntoJs, errors::IntoThrow as _}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +pub type JsAbortSignal = JsValue; + +/// An object that modelizes a JavaScript `AbortController`, and its corresponding `AbortSignal`, +/// allowing the Rust side to fire that signal if/when needed, e.g. when dropped from the Rust side. +/// +/// The JS counterpart objects are lazily intantiated when the signal gets converted to JS; this ensures +/// that the Rust side can be created without waiting for acquisition of the JS execution lock. +pub struct AbortController { + inner: Arc, + drop_abort_reason: String, +} + +impl AbortController { + /// Create a new `AbortController` and `AbortSignal` pair. + /// + /// The `drop_abort_reason` string will be used as the reason for the abort + /// if the controller is dropped from the Rust side. + #[must_use] + pub fn new(drop_abort_reason: String) -> (Self, AbortSignal) { + let inner = AbortControllerInner { + js_counterpart: OnceLock::new(), + aborted: OnceLock::new(), + }; + let inner = Arc::new(inner); + ( + Self { + inner: inner.clone(), + drop_abort_reason, + }, + AbortSignal { inner }, + ) + } + + pub fn abort(&self, reason: impl Into) { + self.inner.abort(reason); + } +} + +impl Drop for AbortController { + fn drop(&mut self) { + self.abort(self.drop_abort_reason.clone()); + } +} + +/// An object that models the signal of a JavaScript `AbortController`. +pub struct AbortSignal { + inner: Arc, +} + +impl TryIntoJs for AbortSignal { + type Output = JsAbortSignal; + + fn try_into_js<'cx>(self, cx: &mut impl Context<'cx>) -> JsResult<'cx, JsAbortSignal> { + let signal = self.inner.ensure_js_initialized(cx).into_throw(cx)?; + Ok(signal) + } +} + +/// The inner state of an `AbortController`, shared between the Rust and JS sides. +struct AbortControllerInner { + // The fact that we require a `Context` in `ensure_js_initialized` means that we are running on + // the Node's thread, which guarantees that there can't be multiple threads calling into that + // function concurrently; that should in theory aleviate the need to use a lock on `js_counterpart`. + // + // It is however possible for the rust-side controller to get aborted from a non-Node thread + // while the JS-side controller is being created on the Node thread, in which case we don't + // want the Rust-side thread to get blocked for the JS-side to complete instantiation. + // + // By modelling the "JS initialization" and "is aborted" states as two distinct independant + // structures, we ensure that we're never blocking execution of either thread. This however + // means that either step may happen before the other, so we need to be careful not to miss + // sending the abort signal. The good news is that nothing bad will happen if we call the JS + // abort callback multiple times. + js_counterpart: OnceLock>, + aborted: OnceLock, +} + +struct AbortControllerJsCounterpart { + signal: Root, + abort: JsCallback<(String,), ()>, +} + +impl AbortControllerInner { + /// Create the JS `AbortController` if it hasn't been created yet. + /// Returns a reference to the signal object that can be passed to JS. + fn ensure_js_initialized<'cx, C: Context<'cx>>( + &self, + cx: &mut C, + ) -> BridgeResult> { + if let Some(js_counterpart) = self.js_counterpart.get() { + // Already initialized, return the signal + return Ok(js_counterpart.signal.to_inner(cx).upcast()); + } + + // Not initialized yet, create the JS AbortController + let global = cx.global_object(); + let abort_controller_class = global.get::(cx, "AbortController")?; + + let abort_controller = abort_controller_class.construct(cx, [])?; + let signal = abort_controller.get::(cx, "signal")?; + let abort_fn = abort_controller.get::(cx, "abort")?; + + let abort_cb = JsCallback::new(cx, abort_fn, Some(abort_controller)); + + let js_counterpart = Arc::new(AbortControllerJsCounterpart { + signal: signal.root(cx), + abort: abort_cb, + }); + + let js_counterpart = match self.js_counterpart.set(js_counterpart.clone()) { + Ok(()) => { + // If the Rust controller has already been aborted, call the JS abort callback now + // VALIDATE: Do we need a memory barrier here to ensure that js_counterpart and aborted are coherent? + // I assume that the get() call ensures visibility of the js_counterpart + if let Some(aborted) = self.aborted.get() { + // Fire and forget + let _ = js_counterpart.abort.call_on_js_thread((aborted.clone(),)); + } + js_counterpart + } + Err(js_counterpart) => js_counterpart, + }; + + Ok(js_counterpart.signal.to_inner(cx).upcast()) + } + + /// Immediately abort the `AbortController`, causing the JS side `signal` to fire. + fn abort(&self, reason: impl Into) { + let reason = reason.into(); + if self.aborted.set(reason.clone()) == Ok(()) { + // If we haven't created the JS AbortController yet, there's nothing to abort + // VALIDATE: Do we need a memory barrier here to ensure that js_counterpart and aborted are coherent? + if let Some(js_counterpart) = self.js_counterpart.get() { + // Fire and forget + let _ = js_counterpart.abort.call_on_js_thread((reason,)); + } + } + } +} diff --git a/packages/core-bridge/src/helpers/callbacks.rs b/packages/core-bridge/src/helpers/callbacks.rs new file mode 100644 index 000000000..c67b73756 --- /dev/null +++ b/packages/core-bridge/src/helpers/callbacks.rs @@ -0,0 +1,300 @@ +use std::{marker::PhantomData, sync::Arc}; + +use neon::{ + event::Channel, + handle::{Handle, Root}, + object::Object, + prelude::Context, + types::{JsFunction, JsFuture, JsObject, JsPromise, JsValue, Value as _}, +}; + +use super::{BridgeError, BridgeResult, TryFromJs, TryIntoJs, errors::IntoThrow as _}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// A callback is a JS function that is meant to be called by the Rust side. +/// A `JsCallback` is a callback that returns a value synchronously. +/// +/// Note that at this level, a callback is a pure function, not a method on an object; +/// that is, the callback will be called with `this` set to `undefined`. If the JS side +/// API presents the callback as a method on some object, then the function should be +/// `bind(this)`'d on the JS side before passing it to the Rust side. +#[derive(Debug)] +pub struct JsCallback +where + Args: TryIntoJsArgs + Send + Sync, + Ret: TryFromJs + Send + Sync, +{ + inner: Arc>, +} + +impl Clone for JsCallback +where + Args: TryIntoJsArgs + Send + Sync, + Ret: TryFromJs + Send + Sync, +{ + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + +impl JsCallback +where + Args: TryIntoJsArgs + Send + Sync + 'static, + Ret: TryFromJs + Send + Sync + 'static, +{ + pub fn new<'cx, C: Context<'cx>>( + cx: &mut C, + func: Handle, + this: Option>, + ) -> Self { + Self { + inner: Arc::new(CallbackInner { + this: this.map(|t| t.root(cx)), + func: func.root(cx), + func_name: func + .to_string(cx) + .map_or_else(|_| "anonymous func".to_owned(), |s| s.value(cx)), + chan: cx.channel(), + _marker: PhantomData, + }), + } + } + + /// Call the callback on the JS thread and return a handle to the result. + pub fn call_on_js_thread(&self, args: Args) -> BridgeResult> { + let inner = self.inner.clone(); + + let ret = inner + .chan + .clone() + .try_send(move |mut cx| inner.call(&mut cx, args).into_throw(&mut cx)) + .map_err(|e| BridgeError::Other(e.into()))?; + + Ok(ret) + } + + pub fn call_and_block(&self, args: Args) -> BridgeResult { + let join_handle = self.call_on_js_thread(args)?; + + // This is... unfortunate but since this method is called from an async context way up + // the stack, but is not async itself AND we need some way to get the result from the JS + // callback, we must use this roundabout way of blocking. Simply calling `join` on the + // channel send won't work - it'll panic because it calls block_on internally. + let callback_res = futures::executor::block_on(join_handle); + + match callback_res { + Ok(x) => Ok(x), + Err(e) => Err(BridgeError::Other(e.into())), + } + } +} + +/// Unfortunately, the `TryFromJS` trait doesn't have access to the containing object, and therefore +/// can't preserve the `this` context. If the JS side API presents the callback as a method on some +/// object, then the function should be `bind(this)`'d on the JS side before passing it to Rust. +impl TryFromJs for JsCallback +where + Args: TryIntoJsArgs + Send + Sync + 'static, + Ret: TryFromJs + Send + Sync + 'static, +{ + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + let func = js_value.downcast::(cx)?; + Ok(Self::new(cx, func, None)) + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +pub struct JsAsyncCallback +where + Args: TryIntoJsArgs + Send + Sync, + Ret: TryFromJs + Send + Sync + 'static, +{ + inner: Arc>>>, +} + +impl JsAsyncCallback +where + Args: TryIntoJsArgs + Send + Sync + 'static, + Ret: TryFromJs + Send + Sync + 'static, +{ + pub fn new<'cx, C: Context<'cx>>( + cx: &mut C, + func: Handle, + this: Option>, + ) -> Self { + Self { + inner: Arc::new(CallbackInner { + this: this.map(|t| t.root(cx)), + func: func.root(cx), + func_name: func + .to_string(cx) + .map_or_else(|_| "anonymous func".to_owned(), |s| s.value(cx)), + chan: cx.channel(), + _marker: PhantomData, + }), + } + } + + pub async fn call(&self, args: Args) -> BridgeResult { + let inner = self.inner.clone(); + + let join_handle = inner + .chan + .clone() + .try_send(move |mut cx| inner.call(&mut cx, args).into_throw(&mut cx)) + .map_err(|e| BridgeError::Other(e.into()))?; + + // Wait for the JS function to return a Promise... + let res = join_handle.await; + let fut = res.map_err(|e| BridgeError::Other(e.into()))?; + + // ... and then wait for the Promise to resolve + let res = fut.await; + let res = res.map_err(|e| BridgeError::Other(e.into()))??; + Ok(res) + } +} + +/// Unfortunately, the `TryFromJS` trait doesn't have access to the containing object, and therefore +/// can't preserve the `this` context. If the JS side API presents the callback as a method on some +/// object, then the function should be `bind(this)`'d on the JS side before passing it to Rust. +impl TryFromJs for JsAsyncCallback +where + Args: TryIntoJsArgs + Send + Sync + 'static, + Ret: TryFromJs + Send + Sync + 'static, +{ + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + let func = js_value.downcast::(cx)?; + Ok(Self::new(cx, func, None)) + } +} + +impl TryFromJs for JsFuture> { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + let prom = js_value.downcast::(cx)?; + let fut = prom.to_future(cx, |mut cx, result| match result { + // to_future() expects a function that throws a JS , + Ok(value) => { + // Promise resolved, but there might still be an error trying to + // convert the promise's returned value to the desired type. + let val = R::try_from_js(&mut cx, value); + match val { + Ok(val) => Ok(Ok(val)), + Err(e) => Ok(Err(e)), + } + } + Err(e) => { + // Promise failed to resolve + let err_str = e.to_string(&mut cx)?.value(&mut cx); + Ok(Err(BridgeError::UnexpectedError(err_str))) + } + })?; + + Ok(fut) + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +#[derive(Debug)] +struct CallbackInner +where + Args: TryIntoJsArgs + Send + Sync, + Ret: TryFromJs + Send + Sync, +{ + this: Option>, + func: Root, + func_name: String, + chan: Channel, + _marker: PhantomData<(Args, Ret)>, +} + +impl CallbackInner { + fn call<'a, C: Context<'a>>(&self, cx: &mut C, args: Args) -> BridgeResult { + let this: Handle<'a, JsValue> = self + .this + .as_ref() + .map_or(cx.undefined().upcast(), |t| t.to_inner(cx).upcast()); + + // Convert the arguments to a JS array using the new trait + let js_args = args.try_into_js_args(cx)?; + + // Call the function with the JS arguments directly + let ret = cx + .try_catch(|cx| self.func.to_inner(cx).call(cx, this, js_args)) + .map_err(|e| { + let err_str = format!( + "Error calling JS callback '{}' on JS thread, {:?}", + self.func_name, e + ); + log::info!("{err_str}"); + BridgeError::UnexpectedError(err_str) + })?; + + ::try_from_js(cx, ret) + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// A trait to build an arguments array for a JS function call from a tuple. +pub trait TryIntoJsArgs { + fn try_into_js_args<'cx, 'a>( + self, + cx: &mut impl Context<'cx>, + ) -> BridgeResult>> + where + 'cx: 'a; +} + +impl TryIntoJsArgs for () { + fn try_into_js_args<'cx, 'a>( + self, + _cx: &mut impl Context<'cx>, + ) -> BridgeResult>> + where + 'cx: 'a, + { + Ok(Vec::new()) + } +} + +impl TryIntoJsArgs for (T0,) { + fn try_into_js_args<'cx, 'a>( + self, + cx: &mut impl Context<'cx>, + ) -> BridgeResult>> + where + 'cx: 'a, + { + let js_value = self.0.try_into_js(cx)?; + Ok(vec![js_value.upcast()]) + } +} + +impl TryIntoJsArgs for (T0, T1) { + fn try_into_js_args<'cx, 'a>( + self, + cx: &mut impl Context<'cx>, + ) -> BridgeResult>> + where + 'cx: 'a, + { + let v0 = self.0.try_into_js(cx)?; + let v1 = self.1.try_into_js(cx)?; + Ok(vec![v0.upcast(), v1.upcast()]) + } +} diff --git a/packages/core-bridge/src/helpers/errors.rs b/packages/core-bridge/src/helpers/errors.rs new file mode 100644 index 000000000..327f13eb9 --- /dev/null +++ b/packages/core-bridge/src/helpers/errors.rs @@ -0,0 +1,294 @@ +use std::cell::RefCell; + +use anyhow::Error as AnyhowError; +use neon::{handle::DowncastError, prelude::*, result::Throw}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// A specialized Result type for errors that can be rethrown as a JS error. +pub type BridgeResult = Result; + +/// Errors that can be rethrown by the Bridge as JS errors. +#[derive(Debug, thiserror::Error)] +#[allow(clippy::option_if_let_else)] +pub enum BridgeError { + /// General error related to conversion between JS and Rust types. + /// + /// Use `BridgeResult::field()` to prepend a field name to the error while + /// while propagating the error up through call stack. + /// + /// Becomes a JS `TypeError`. + #[error("{}{message}", field_prefix(.field))] + TypeError { + field: Option, + message: String, + }, + + /// A specific variant of type errors that indicates that a JS provided object + /// that is expected to be an enum variant carries an invalid `type` value. + /// + /// Use `BridgeResult::field()` to prepend a field name to the error while + /// while propagating the error up through call stack. Doing so will implicitly + /// convert the error into the more generic `TypeError` variant. + /// + /// Becomes a JS `TypeError`. + #[error("Invalid variant '{variant}' for enum {enum_name}")] + InvalidVariant { enum_name: String, variant: String }, + + /// Error when a transport error occurs. + /// + /// Becomes a JS `TransportError`. + #[error("{0}")] + TransportError(String), + + /// Error when a resource is still in use while it shouldn't be. + /// + /// Becomes a JS `IllegalStateError`. + #[error("{what} still in use{}", details_suffix(.details))] + IllegalStateStillInUse { + what: &'static str, + details: Option, + }, + + /// Error when a resource is already closed while it shouldn't be. + /// + /// Becomes a JS `IllegalStateError`. + #[error("{what} already closed")] + IllegalStateAlreadyClosed { what: &'static str }, + + /// Something unexpected happened, considered fatal. + /// + /// Becomes a JS `UnexpectedError`. + #[error("Unexpected error: {0}")] + UnexpectedError(String), + + /// An error used to inform the JS side that a worker has completed draining + /// pending tasks of a certain type. This is really just a poison pill, not + /// an actual error. + /// + /// Becomes a JS `ShutdownError`. + #[error("Worker has been shutdown")] + WorkerShutdown, + + /// Generic wrapper for other errors. + /// + /// Becomes a JS `Error`. + #[error(transparent)] + Other(#[from] AnyhowError), + + /// Wrapper for errors that have already been _thrown_ from the JS context. This requires + /// special handling because while a JavaScript thread is throwing, its context cannot be used; + /// doing so would result in a panic. That notably means that we cannot use `cx.throw_error` to + /// throw a different error at that point. + /// + /// Though this case is technically unavoidable due to most Neon's APIs returning either + /// `NeonResult` or `JsResult`, and is better than just panicking from `.unwrap()` those, code + /// should strive as much as possible to detect potential errors in a way that will not result + /// in a JS exception being thrown. + /// + /// Unfortunately, it is not possible to extract the message or the type of the thrown error, so + /// this error will always have a message of "Error thrown from JS". + #[error("Error thrown from JS")] + JsThrow { thrown: RefCell> }, +} + +// Append Field Context //////////////////////////////////////////////////////////////////////////// + +pub trait AppendFieldContext { + /// Add context to a `TypeError` by prepending a field name. + /// + /// This is useful when converting from JS to Rust types, where the field name is known and can + /// be added to the error message to guide the user in the right direction. It is expected that + /// field name context will be provided on `BridgeResult` _after_ the conversion that may fail, + /// while propagating the error through the `BridgeResult` chain, hence the fact that fields + /// are _prepended_, not _appended_. + #[must_use] + fn field(self, prepend_field: &str) -> Self; +} + +impl AppendFieldContext for BridgeResult { + fn field(self, prepend_field: &str) -> Self { + match self { + Ok(value) => Ok(value), + Err(e) => Err(e.field(prepend_field)), + } + } +} + +impl AppendFieldContext for BridgeError { + fn field(self, prepend_field: &str) -> Self { + match self { + Self::TypeError { + field: previous_field, + message, + } => Self::TypeError { + field: match previous_field { + Some(previous_field) => Some(format!("{prepend_field}.{previous_field}")), + None => Some(prepend_field.to_string()), + }, + message, + }, + Self::InvalidVariant { enum_name, variant } => Self::TypeError { + field: Some(prepend_field.to_string()), + message: format!("Invalid variant '{variant}' for enum {enum_name}"), + }, + e => e, + } + } +} + +// Conversions from other errors /////////////////////////////////////////////////////////////////// + +impl From> for BridgeError { + fn from(error: DowncastError) -> Self { + Self::TypeError { + field: None, + message: error.to_string(), + } + } +} + +// Conversion to/from Throw //////////////////////////////////////////////////////////////////////// + +impl From for BridgeError { + fn from(throw: Throw) -> Self { + Self::JsThrow { + thrown: RefCell::new(Some(ThrowBox(throw))), + } + } +} + +pub trait IntoThrow { + type Output; + fn into_throw<'a, C: Context<'a>>(self, cx: &mut C) -> NeonResult + where + Self: Sized; +} + +impl IntoThrow for BridgeResult { + type Output = T; + + fn into_throw<'cx, C: Context<'cx>>(self, cx: &mut C) -> NeonResult { + match self { + Ok(value) => Ok(value), + Err(err) => err.into_throw::(cx), + } + } +} + +impl BridgeError { + fn into_throw<'cx, C: Context<'cx>, T>(self, cx: &mut C) -> NeonResult { + match self { + Self::TypeError { .. } => cx.throw_type_error(self.to_string()), + Self::InvalidVariant { .. } => cx.throw_type_error(self.to_string()), + + Self::TransportError(..) => throw_custom_error(cx, TRANSPORT_ERROR, self.to_string()), + + Self::IllegalStateStillInUse { .. } => { + throw_custom_error(cx, ILLEGAL_STATE_ERROR, self.to_string()) + } + + Self::IllegalStateAlreadyClosed { .. } => { + throw_custom_error(cx, ILLEGAL_STATE_ERROR, self.to_string()) + } + + Self::UnexpectedError(..) => throw_custom_error(cx, UNEXPECTED_ERROR, self.to_string()), + + Self::WorkerShutdown => throw_custom_error(cx, SHUTDOWN_ERROR, self.to_string()), + + Self::Other(e) => { + if let Some(thrown) = underlying_js_throw_error(&e) { + // FIXME: Send to logger + eprintln!("Error thrown from JavaScript side: {e:?}"); + Err(thrown) + } else { + cx.throw_error(format!("{e:#}")) + } + } + + Self::JsThrow { thrown } => Err(thrown.take().expect("Throw already consumed").0), + } + } +} + +/// Extracts a `Throw` from an `AnyhowError` chain. +fn underlying_js_throw_error(error: &AnyhowError) -> Option { + for cause in error.chain() { + if let Some(BridgeError::JsThrow { thrown }) = cause.downcast_ref::() { + return Some(thrown.take().expect("Throw already consumed").0); + } + } + None +} + +/// A wrapper around a `Throw` that implements `Send` and `Sync`. This is necessary because we need +/// `BridgeError` to be `Send`, but `BridgeError::JsThrow` contains a `Throw`, which isn't `Send`. +/// +/// We however know for sure that `BridgeError::JsThrow` will never be sent across threads, because +/// a `Throw`'s lifetime is attached to that of the JS `Context`, which itself isn't `Send`. +#[repr(transparent)] +pub struct ThrowBox(Throw); + +/// SAFETY: We know for sure that this type will never actually be sent across threads. +#[allow(clippy::non_send_fields_in_send_ty)] +unsafe impl Send for ThrowBox {} + +/// SAFETY: We know for sure that this type will never actually be sent across threads. +unsafe impl Sync for ThrowBox {} + +impl std::fmt::Debug for ThrowBox { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +// Message formatting utilities //////////////////////////////////////////////////////////////////// + +fn field_prefix(field: &Option) -> String { + match field { + Some(field) => format!("{field}: "), + None => String::new(), + } +} + +fn details_suffix(details: &Option) -> String { + match details { + Some(details) => format!(": {details}"), + None => String::new(), + } +} + +// Custom JavaScript errors //////////////////////////////////////////////////////////////////////// + +/// Signals that a requested operation can't be completed because it is illegal given the +/// current state of the object; e.g. trying to use a resource after it has been closed. +const ILLEGAL_STATE_ERROR: &str = "IllegalStateError"; + +/// Something unexpected happened, considered fatal +const UNEXPECTED_ERROR: &str = "UnexpectedError"; + +/// An unhandled error while communicating with the server, considered fatal +const TRANSPORT_ERROR: &str = "TransportError"; + +/// Thrown after shutdown was requested as a response to a poll function, JS should stop polling +const SHUTDOWN_ERROR: &str = "ShutdownError"; + +/// Instantiate and throw a custom JS Error object with a given name and message. +/// +/// Note that this do not actually instantiate the proper JS classes, as those +/// are not accessible from here (i.e. they are not exposed as globals). Instead, +/// we simply override the `name` property of the error object; the bridge adds +/// a JS-side wrapper around native calls that will look at the `name` property +/// of every thrown errors, and replace them by proper instances of the custom +/// Error classes if appropriate. Refer to `core-bridge/ts/errors.ts`. +fn throw_custom_error<'cx, C: Context<'cx>, S: AsRef, T>( + cx: &mut C, + name: &str, + message: S, +) -> NeonResult { + let error = cx.error(message)?; + let name = cx.string(name); + error.set(cx, "name", name)?; + + cx.throw(error) +} diff --git a/packages/core-bridge/src/helpers/future.rs b/packages/core-bridge/src/helpers/future.rs new file mode 100644 index 000000000..8c128fdf0 --- /dev/null +++ b/packages/core-bridge/src/helpers/future.rs @@ -0,0 +1,45 @@ +use std::pin::Pin; + +use neon::{prelude::Context, result::JsResult, types::JsPromise}; + +use crate::helpers::{BridgeError, IntoThrow, TryIntoJs}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +pub struct BridgeFuture { + future: Pin> + Send + 'static>>, + tokio_handle: tokio::runtime::Handle, +} + +impl BridgeFuture { + #[must_use] + pub fn new( + future: Pin> + Send + 'static>>, + ) -> Self { + Self { + future, + tokio_handle: tokio::runtime::Handle::current(), + } + } +} + +impl TryIntoJs for BridgeFuture { + type Output = JsPromise; + + fn try_into_js<'a>(self, cx: &mut impl Context<'a>) -> JsResult<'a, JsPromise> { + let cx_channel = cx.channel(); + let (deferred, promise) = cx.promise(); + + self.tokio_handle.spawn(async move { + let result = self.future.await; + let send_result = deferred.try_settle_with(&cx_channel, move |mut cx| { + result.into_throw(&mut cx)?.try_into_js(&mut cx) + }); + if let Err(err) = send_result { + eprint!("Failed to complete JS Promise: {err:?}"); + } + }); + + Ok(promise) + } +} diff --git a/packages/core-bridge/src/helpers/handles.rs b/packages/core-bridge/src/helpers/handles.rs new file mode 100644 index 000000000..554e99bcd --- /dev/null +++ b/packages/core-bridge/src/helpers/handles.rs @@ -0,0 +1,106 @@ +use std::{ + any, + cell::{Ref, RefCell}, + sync::Arc, +}; + +use neon::{prelude::*, types::JsBox}; + +use super::{BridgeError, BridgeResult, TryFromJs, TryIntoJs}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Opaque Handles are native structures that are sent into the JS side but without exposing +/// their internal structure; i.e. they are only meant to be passed back to the Rust side +/// on appropriate API calls. +/// +/// Due to very different ownership models and usage patterns, and to avoid unnecessary +/// complexity, Opaque Handles are split into two variants: +/// +/// - `OpaqueOutboundHandle` - for sending an opaque handle that was just created by the Rust +/// side to the JS side (e.g. as the return value of an API call). +/// - `OpaqueInboundHandle` - for accepting an opaque handle sent from the JS side to the Rust +/// side (e.g. as an argument to an API call). +pub struct OpaqueOutboundHandle { + inner: T, +} + +impl OpaqueOutboundHandle { + // We late-wrap into Arc>> so that T can be send across threads without mutex. + pub const fn new(inner: T) -> Self { + Self { inner } + } +} + +impl TryIntoJs for OpaqueOutboundHandle { + type Output = JsBox>>>; + fn try_into_js<'cx>(self, cx: &mut impl Context<'cx>) -> JsResult<'cx, Self::Output> { + Ok(cx.boxed(Arc::new(RefCell::new(Some(self.inner))))) + } +} + +/// Opaque Handles are native structures that are sent into the JS side but without exposing +/// their internal structure; i.e. they are only meant to be passed back to the Rust side +/// on appropriate API calls. +/// +/// Due to very different ownership models and usage patterns, and to avoid unnecessary +/// complexity, Opaque Handles are split into two variants: +/// +/// - `OpaqueOutboundHandle` - for sending an opaque handle that was just created by the Rust +/// side to the JS side (e.g. as the return value of an API call). +/// - `OpaqueInboundHandle` - for accepting an opaque handle sent from the JS side to the Rust +/// side (e.g. as an argument to an API call). +pub struct OpaqueInboundHandle +where + T: 'static, +{ + inner: Arc>>, +} + +impl TryFromJs for OpaqueInboundHandle +where + T: 'static, +{ + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + let boxed = js_value.downcast::>>>, _>(cx)?; + Ok(Self { + inner: Arc::clone(&boxed), + }) + } +} + +impl OpaqueInboundHandle { + pub fn borrow_inner(&self) -> BridgeResult> { + match Ref::filter_map(self.inner.borrow(), std::option::Option::as_ref) { + Ok(inner_ref) => Ok(inner_ref), + Err(_guard) => Err(BridgeError::IllegalStateAlreadyClosed { + what: any::type_name::() + .rsplit("::") + .next() + .unwrap_or("Resource"), + }), + } + } + + pub fn map_inner(&self, f: impl FnOnce(&T) -> U) -> BridgeResult { + Ok(f(&*self.borrow_inner()?)) + } + + pub fn take_inner(&self) -> BridgeResult { + // It is safe to ignore risks of conflicting borrows as OpaqueInbound are only + // accessed while holding a Neon Context (indirectly), which itself guarantees + // we are running synchronously with the single Node thread. + match self.inner.borrow_mut().take() { + Some(x) => Ok(x), + None => Err(BridgeError::IllegalStateAlreadyClosed { + what: any::type_name::() + .rsplit("::") + .next() + .unwrap_or("Resource"), + }), + } + } +} diff --git a/packages/core-bridge/src/helpers/inspect.rs b/packages/core-bridge/src/helpers/inspect.rs new file mode 100644 index 000000000..c76f62b75 --- /dev/null +++ b/packages/core-bridge/src/helpers/inspect.rs @@ -0,0 +1,20 @@ +use neon::{context::Context, handle::Handle, prelude::*}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Print out a JS object to the console. +/// +/// Literally: `console.log(obj)`. +pub fn log_js_object<'a, 'b, C: Context<'b>>( + cx: &mut C, + js_object: &Handle<'a, JsValue>, +) -> NeonResult<()> { + let global = cx.global_object(); + let console = global.get::(cx, "console")?; + let log = console.get::(cx, "log")?; + let args = vec![js_object.upcast()]; // Upcast js_object to JsValue + + log.call(cx, console, args)?; + + Ok(()) +} diff --git a/packages/core-bridge/src/helpers/json_string.rs b/packages/core-bridge/src/helpers/json_string.rs new file mode 100644 index 000000000..69c7ceed9 --- /dev/null +++ b/packages/core-bridge/src/helpers/json_string.rs @@ -0,0 +1,47 @@ +use std::marker::PhantomData; + +use neon::{prelude::Context, result::JsResult, types::JsString}; +use serde::Serialize; +use serde_json; + +use super::{BridgeError, TryIntoJs}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// A newtype wrapper for a T serialized as a JSON string. +/// +/// Creating objects through NAPI is incredibly slow in Node due to the fact that object data +/// layout is recalculated every time a property is added. So much that, surprisingly, transferring +/// Rust objects to JS by serializing them to JSON strings then deserializing them back in JS +/// using `JSON.parse` is on average twice as fast as creating those objects using NAPI calls +/// (claim from the Neon's author, circa April 2025). +/// +/// This newtype wrapper allows specifying values that will be serialized to a JSON string +/// when being transferred to JS using the `TryIntoJs` trait. The JSON serialization happens +/// on the caller Rust thread, therefore limiting the time spent in the JS thread. +#[derive(Debug, Clone)] +pub struct JsonString { + json: String, + _phantom: PhantomData, +} + +impl JsonString +where + T: Serialize, +{ + pub fn try_from_value(value: T) -> Result { + let json = serde_json::to_string(&value) + .map_err(|e| BridgeError::Other(anyhow::Error::from(e)))?; + Ok(Self { + json, + _phantom: PhantomData, + }) + } +} + +impl TryIntoJs for JsonString { + type Output = JsString; + fn try_into_js<'a>(self, cx: &mut impl Context<'a>) -> JsResult<'a, JsString> { + Ok(cx.string(&self.json)) + } +} diff --git a/packages/core-bridge/src/helpers/mod.rs b/packages/core-bridge/src/helpers/mod.rs new file mode 100644 index 000000000..ae959c6c2 --- /dev/null +++ b/packages/core-bridge/src/helpers/mod.rs @@ -0,0 +1,20 @@ +pub mod abort_controller; +pub mod callbacks; +pub mod errors; +pub mod future; +pub mod handles; +pub mod inspect; +pub mod json_string; +pub mod properties; +pub mod try_from_js; +pub mod try_into_js; + +pub use abort_controller::{AbortController, AbortSignal}; +pub use callbacks::{JsAsyncCallback, JsCallback}; +pub use errors::{AppendFieldContext, BridgeError, BridgeResult, IntoThrow}; +pub use future::BridgeFuture; +pub use handles::{OpaqueInboundHandle, OpaqueOutboundHandle}; +pub use json_string::JsonString; +pub use properties::{FunctionContextExt as _, ObjectExt as _}; +pub use try_from_js::TryFromJs; +pub use try_into_js::TryIntoJs; diff --git a/packages/core-bridge/src/helpers/properties.rs b/packages/core-bridge/src/helpers/properties.rs new file mode 100644 index 000000000..1a362fb53 --- /dev/null +++ b/packages/core-bridge/src/helpers/properties.rs @@ -0,0 +1,45 @@ +use neon::prelude::*; + +use super::{AppendFieldContext as _, BridgeError, BridgeResult, TryFromJs}; + +/// Extension trait for `JsObject` that adds a method to get a property and convert +/// it to a value using `TryFromJs`. +/// +/// Type errors will be reported with `field` set to the name of the property being +/// accessed; it is expected that caller will prepend any additional path components +/// that led to this object, to help identify the object that failed. +pub trait ObjectExt: Object { + fn get_property_into<'cx, C: Context<'cx>, T: TryFromJs>( + &self, + cx: &mut C, + key: &str, + ) -> BridgeResult { + let value = self.get_value(cx, key)?; + if value.is_a::(cx) { + return Err(BridgeError::TypeError { + message: format!("Missing property '{key}'"), + field: Some(key.to_string()), + }); + } + ::try_from_js(cx, value).field(key) + } +} + +impl ObjectExt for JsObject {} + +/// Extension trait for `FunctionContext` that adds a method to get an argument and +/// convert it to a value using `TryFromJs`. +/// +/// Type errors will be reported with field name `args[index]`; it is expected that +/// caller will prepend the function name to the error message's `field` to help identify +/// the function that failed. +pub trait FunctionContextExt { + fn argument_into(&mut self, index: usize) -> BridgeResult; +} + +impl FunctionContextExt for FunctionContext<'_> { + fn argument_into(&mut self, index: usize) -> BridgeResult { + let value = self.argument::(index)?; + ::try_from_js(self, value).field(format!("args[{index}]").as_str()) + } +} diff --git a/packages/core-bridge/src/helpers/try_from_js.rs b/packages/core-bridge/src/helpers/try_from_js.rs new file mode 100644 index 000000000..a3c2ab016 --- /dev/null +++ b/packages/core-bridge/src/helpers/try_from_js.rs @@ -0,0 +1,222 @@ +use std::{collections::HashMap, net::SocketAddr, time::Duration}; + +use neon::{ + handle::Handle, + object::Object, + prelude::Context, + types::{ + JsArray, JsBoolean, JsBuffer, JsNull, JsNumber, JsObject, JsString, JsValue, Value, + buffer::TypedArray, + }, +}; +use temporal_sdk_core::Url; + +use super::{BridgeError, BridgeResult}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Trait for Rust types that can be created from JavaScript values, possibly throwing an error. +pub trait TryFromJs: Sized { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult; +} + +// TryFromJs implementations for primitives and other basic types ////////////////////////////////// + +impl TryFromJs for () { + fn try_from_js<'cx, 'b>( + _cx: &mut impl Context<'cx>, + _: Handle<'b, JsValue>, + ) -> BridgeResult { + Ok(()) + } +} + +impl TryFromJs for String { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + Ok(js_value.downcast::(cx)?.value(cx)) + } +} + +impl TryFromJs for bool { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + Ok(js_value.downcast::(cx)?.value(cx)) + } +} + +impl TryFromJs for u16 { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] + Ok(js_value.downcast::(cx)?.value(cx) as Self) + } +} + +#[allow(clippy::cast_possible_truncation)] +impl TryFromJs for i32 { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + Ok(js_value.downcast::(cx)?.value(cx) as Self) + } +} + +#[allow(clippy::cast_possible_truncation)] +impl TryFromJs for f32 { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + Ok(js_value.downcast::(cx)?.value(cx) as Self) + } +} + +#[allow(clippy::cast_possible_truncation)] +impl TryFromJs for u64 { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + #[allow(clippy::cast_sign_loss)] + Ok(js_value.downcast::(cx)?.value(cx) as Self) + } +} + +impl TryFromJs for f64 { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + Ok(js_value.downcast::(cx)?.value(cx)) + } +} + +// impl TryFromJs for u128 { +// fn try_from_js<'cx, 'b>( +// cx: &mut impl Context<'cx>, +// js_value: Handle<'b, JsValue>, +// ) -> BridgeResult { +// Ok(js_value.downcast::(cx)?.value(cx) as Self) +// } +// } + +impl TryFromJs for usize { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] + Ok(js_value.downcast::(cx)?.value(cx) as Self) + } +} + +impl TryFromJs for Duration { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] + Ok(Self::from_millis( + js_value.downcast::(cx)?.value(cx) as u64, + )) + } +} + +impl TryFromJs for Option { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + if js_value.is_a::(cx) { + Ok(None) + } else { + Ok(Some(T::try_from_js(cx, js_value)?)) + } + } +} + +impl TryFromJs for Vec { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + Ok(js_value.downcast::(cx)?.as_slice(cx).to_vec()) + } +} + +impl TryFromJs for Vec { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + let array = js_value.downcast::(cx)?; + let len = array.len(cx); + let mut result = Self::with_capacity(len as usize); + + for i in 0..len { + let value = array.get_value(cx, i)?; + result.push(T::try_from_js(cx, value)?); + } + Ok(result) + } +} + +#[allow(clippy::implicit_hasher)] +impl TryFromJs for HashMap { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + let obj = js_value.downcast::(cx)?; + let props = obj.get_own_property_names(cx)?.to_vec(cx)?; + + let mut map = Self::new(); + for key_handle in props { + let key = key_handle.to_string(cx)?.value(cx); + let value = obj.get_value(cx, key_handle)?; + map.insert(key, T::try_from_js(cx, value)?); + } + Ok(map) + } +} + +impl TryFromJs for SocketAddr { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + let addr = js_value.downcast::(cx)?; + addr.value(cx) + .parse::() + .map_err(|e| BridgeError::TypeError { + field: None, + message: e.to_string(), + }) + } +} + +impl TryFromJs for Url { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + let url = js_value.downcast::(cx)?; + url.value(cx) + .parse::() + .map_err(|e| BridgeError::TypeError { + field: None, + message: e.to_string(), + }) + } +} diff --git a/packages/core-bridge/src/helpers/try_into_js.rs b/packages/core-bridge/src/helpers/try_into_js.rs new file mode 100644 index 000000000..ae456f86a --- /dev/null +++ b/packages/core-bridge/src/helpers/try_into_js.rs @@ -0,0 +1,106 @@ +use std::{ + sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; + +use neon::{ + object::Object, + prelude::Context, + result::JsResult, + types::{JsArray, JsBigInt, JsBoolean, JsBuffer, JsString, JsUndefined, JsValue, Value}, +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Trait for types that can be converted to JavaScript values, possibly throwing an error. +pub trait TryIntoJs { + type Output: Value; + fn try_into_js<'cx>(self, cx: &mut impl Context<'cx>) -> JsResult<'cx, Self::Output>; +} + +// TryIntoJs implementations for primitives and other basic types ////////////////////////////////// + +impl TryIntoJs for bool { + type Output = JsBoolean; + fn try_into_js<'a>(self, cx: &mut impl Context<'a>) -> JsResult<'a, JsBoolean> { + Ok(cx.boolean(self)) + } +} + +impl TryIntoJs for String { + type Output = JsString; + fn try_into_js<'a>(self, cx: &mut impl Context<'a>) -> JsResult<'a, JsString> { + Ok(cx.string(self.as_str())) + } +} + +impl TryIntoJs for Vec { + type Output = JsArray; + + fn try_into_js<'a>(self, cx: &mut impl Context<'a>) -> JsResult<'a, JsArray> { + let array = cx.empty_array(); + for (i, item) in self.into_iter().enumerate() { + let item = item.try_into_js(cx)?; + #[allow(clippy::cast_possible_truncation)] + array.set(cx, i as u32, item)?; + } + Ok(array) + } +} + +impl TryIntoJs for Vec { + type Output = JsBuffer; + fn try_into_js<'a>(self, cx: &mut impl Context<'a>) -> JsResult<'a, JsBuffer> { + JsBuffer::from_slice(cx, &self) + } +} + +impl TryIntoJs for SystemTime { + type Output = JsBigInt; + fn try_into_js<'a>(self, cx: &mut impl Context<'a>) -> JsResult<'a, JsBigInt> { + let nanos = self + .duration_since(UNIX_EPOCH) + .unwrap_or(Duration::ZERO) + .as_nanos(); + Ok(JsBigInt::from_u128(cx, nanos)) + } +} + +impl TryIntoJs for Option { + // Output really is (T::Output | JsNull), hence JsValue + type Output = JsValue; + fn try_into_js<'a>(self, cx: &mut impl Context<'a>) -> JsResult<'a, JsValue> { + if let Some(value) = self { + Ok(value.try_into_js(cx)?.upcast()) + } else { + Ok(cx.null().upcast()) + } + } +} + +impl TryIntoJs for Arc { + type Output = T::Output; + fn try_into_js<'a>(self, cx: &mut impl Context<'a>) -> JsResult<'a, T::Output> { + self.as_ref().clone().try_into_js(cx) + } +} + +impl TryIntoJs for () { + type Output = JsUndefined; + fn try_into_js<'a>(self, cx: &mut impl Context<'a>) -> JsResult<'a, JsUndefined> { + Ok(cx.undefined()) + } +} + +impl TryIntoJs for (T0, T1) { + type Output = JsArray; + fn try_into_js<'a>(self, cx: &mut impl Context<'a>) -> JsResult<'a, JsArray> { + let v0 = self.0.try_into_js(cx)?; + let v1 = self.1.try_into_js(cx)?; + + let array = cx.empty_array(); + array.set(cx, 0, v0)?; + array.set(cx, 1, v1)?; + Ok(array) + } +} diff --git a/packages/core-bridge/src/lib.rs b/packages/core-bridge/src/lib.rs index e4da6a8ce..fd2e0b86f 100644 --- a/packages/core-bridge/src/lib.rs +++ b/packages/core-bridge/src/lib.rs @@ -1,47 +1,32 @@ -mod conversions; -mod errors; -mod helpers; +#![warn( + clippy::pedantic, + clippy::nursery, + clippy::cargo, + clippy::perf, + clippy::style +)] +#![allow( + clippy::missing_errors_doc, + clippy::too_long_first_doc_paragraph, + clippy::option_if_let_else, + clippy::multiple_crate_versions +)] + +pub mod helpers; + +mod client; +mod ephemeral_server; +mod logs; mod runtime; -mod testing; mod worker; -use crate::runtime::*; -use crate::worker::*; -use neon::prelude::*; -use testing::*; - #[neon::main] -fn main(mut cx: ModuleContext) -> NeonResult<()> { - cx.export_function("getTimeOfDay", get_time_of_day)?; - cx.export_function("newRuntime", runtime_new)?; - cx.export_function("newClient", client_new)?; - cx.export_function("clientUpdateHeaders", client_update_headers)?; - cx.export_function("clientUpdateApiKey", client_update_api_key)?; - cx.export_function("newWorker", worker_new)?; - cx.export_function("newReplayWorker", replay_worker_new)?; - cx.export_function("pushHistory", push_history)?; - cx.export_function("closeHistoryStream", close_history_stream)?; - cx.export_function("workerInitiateShutdown", worker_initiate_shutdown)?; - cx.export_function("workerFinalizeShutdown", worker_finalize_shutdown)?; - cx.export_function("clientClose", client_close)?; - cx.export_function("runtimeShutdown", runtime_shutdown)?; - cx.export_function("pollLogs", poll_logs)?; - cx.export_function( - "workerPollWorkflowActivation", - worker_poll_workflow_activation, - )?; - cx.export_function("workerPollActivityTask", worker_poll_activity_task)?; - cx.export_function( - "workerCompleteWorkflowActivation", - worker_complete_workflow_activation, - )?; - cx.export_function("workerCompleteActivityTask", worker_complete_activity_task)?; - cx.export_function( - "workerRecordActivityHeartbeat", - worker_record_activity_heartbeat, - )?; - cx.export_function("startEphemeralServer", start_ephemeral_server)?; - cx.export_function("shutdownEphemeralServer", shutdown_ephemeral_server)?; - cx.export_function("getEphemeralServerTarget", get_ephemeral_server_target)?; +fn main(mut cx: neon::prelude::ModuleContext) -> neon::prelude::NeonResult<()> { + runtime::init(&mut cx)?; + client::init(&mut cx)?; + worker::init(&mut cx)?; + logs::init(&mut cx)?; + ephemeral_server::init(&mut cx)?; + Ok(()) } diff --git a/packages/core-bridge/src/logs.rs b/packages/core-bridge/src/logs.rs new file mode 100644 index 000000000..26e7cf556 --- /dev/null +++ b/packages/core-bridge/src/logs.rs @@ -0,0 +1,114 @@ +use std::{ + collections::HashMap, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; + +use neon::prelude::*; + +use serde::{Serialize, ser::SerializeMap as _}; +use temporal_sdk_core::api::telemetry::CoreLog; + +use bridge_macros::js_function; + +// use crate::helpers::primitives::JsonString; +use crate::helpers::{BridgeError, BridgeResult, IntoThrow, JsonString, TryIntoJs}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +pub fn init(cx: &mut neon::prelude::ModuleContext) -> neon::prelude::NeonResult<()> { + cx.export_function("getTimeOfDay", get_time_of_day)?; + + Ok(()) +} + +/// Helper to get the current time in nanosecond resolution. Nano seconds timestamps are +/// used to precisely sort logs emitted from the Workflow Context, main thread, and Core. +#[js_function] +pub fn get_time_of_day() -> BridgeResult { + Ok(SystemTime::now()) +} + +#[derive(Debug, Clone, Serialize)] +pub struct LogEntry { + pub level: String, + pub timestamp: String, // u128 as a string - JSON doesn't support u128 numbers + pub message: String, + + #[serde(serialize_with = "serialize_map_as_camel_case")] + pub fields: HashMap, + // + // CoreLog has a `target` field, but we don't want to expose it to JS + // pub target: String, +} + +impl TryFrom for JsonString { + type Error = BridgeError; + + fn try_from(core_log: CoreLog) -> BridgeResult { + let timestamp = core_log + .timestamp + .duration_since(UNIX_EPOCH) + .unwrap_or(Duration::ZERO) + .as_nanos(); + + Self::try_from_value(LogEntry { + level: core_log.level.to_string(), + timestamp: timestamp.to_string(), + message: core_log.message, + fields: core_log.fields, + }) + } +} + +fn serialize_map_as_camel_case( + value: &HashMap, + serializer: S, +) -> Result +where + S: serde::Serializer, +{ + // let mut map = serializer.serialize_map(Some(value.len()))?; + let mut map = serializer.serialize_map(Some(value.len()))?; + for (k, v) in value { + map.serialize_entry(&snake_to_camel(k), v)?; + } + map.end() +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +fn snake_to_camel(input: &str) -> String { + match input.find('_') { + None => input.to_string(), + Some(first) => { + let mut result = String::with_capacity(input.len()); + if first > 0 { + result.push_str(&input[..first]); + } + let mut capitalize = true; + for c in input[first + 1..].chars() { + if c == '_' { + capitalize = true; + } else if capitalize { + result.push(c.to_ascii_uppercase()); + capitalize = false; + } else { + result.push(c.to_ascii_lowercase()); + } + } + result + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn snake_to_camel_works() { + assert_eq!(snake_to_camel("this_is_a_test"), "thisIsATest"); + assert_eq!(snake_to_camel("this___IS_a_TEST"), "thisIsATest"); + assert_eq!(snake_to_camel("éàç_this_is_a_test"), "éàçThisIsATest"); + } +} diff --git a/packages/core-bridge/src/runtime.rs b/packages/core-bridge/src/runtime.rs index dd13cc2cc..cf74ddc7f 100644 --- a/packages/core-bridge/src/runtime.rs +++ b/packages/core-bridge/src/runtime.rs @@ -1,557 +1,440 @@ -use crate::{conversions::*, errors::*, helpers::*, worker::*}; -use neon::{context::Context, prelude::*}; -use std::{ - cell::{Cell, RefCell}, - collections::HashMap, - ops::Deref, - process::Stdio, - sync::Arc, - time::{Duration, SystemTime, UNIX_EPOCH}, -}; -use temporal_client::{ClientInitError, ConfiguredClient, TemporalServiceClientWithMetrics}; +use std::{sync::Arc, time::Duration}; + +use anyhow::Context as _; +use futures::channel::mpsc::Receiver; +use neon::prelude::*; + use temporal_sdk_core::{ - ClientOptions, CoreRuntime, RetryClient, TokioRuntimeBuilder, WorkerConfig, - api::telemetry::CoreTelemetry, - ephemeral_server::EphemeralServer as CoreEphemeralServer, - init_replay_worker, init_worker, - replay::{HistoryForReplay, ReplayWorkerInput}, -}; -use tokio::sync::{ - Mutex, - mpsc::{Sender, UnboundedReceiver, UnboundedSender, channel, unbounded_channel}, - oneshot, + CoreRuntime, TokioRuntimeBuilder, + api::telemetry::{ + CoreLog, OtelCollectorOptions as CoreOtelCollectorOptions, + PrometheusExporterOptions as CorePrometheusExporterOptions, metrics::CoreMeter, + }, + telemetry::{build_otlp_metric_exporter, start_prometheus_metric_exporter}, }; -use tokio_stream::wrappers::ReceiverStream; -pub type CoreClient = RetryClient>; +use bridge_macros::js_function; +use tokio_stream::StreamExt as _; -#[derive(Clone)] -pub struct EphemeralServer { - pub(crate) runtime: Arc, - pub(crate) core_server: Arc>, -} -pub type BoxedEphemeralServer = JsBox>>; -impl Finalize for EphemeralServer {} +use crate::{helpers::*, logs::LogEntry}; -pub struct RuntimeHandle { - pub(crate) sender: UnboundedSender, -} +//////////////////////////////////////////////////////////////////////////////////////////////////// -/// Box it so we can use the runtime from JS -pub type BoxedRuntime = JsBox>; -impl Finalize for RuntimeHandle {} +pub struct Runtime { + // Public because it's accessed from all other modules + #[allow(clippy::struct_field_names)] + pub(crate) core_runtime: Arc, -#[derive(Clone)] -pub struct Client { - pub(crate) runtime: Arc, - pub(crate) core_client: Arc, + log_exporter_task: Option>>, + metrics_exporter_task: Option>, + + // For some unknown reason, the otel metrics exporter will go crazy on shutdown in some + // scenarios if we don't hold on to the `CoreOtelMeter` till the `Runtime` finally gets dropped. + _otel_metrics_exporter: Option>, } -pub type BoxedClient = JsBox>>; -impl Finalize for Client {} +impl Finalize for Runtime {} -/// A request from JS to bridge to core -pub enum RuntimeRequest { - /// A request to shutdown the runtime, breaks from the thread loop. - Shutdown { - /// Used to send the result back into JS - callback: Root, - }, - /// A request to create a client in a runtime - CreateClient { - runtime: Arc, - options: ClientOptions, - /// Used to send the result back into JS - callback: Root, - }, - /// A request to update a client's HTTP request headers - UpdateClientHeaders { - client: Arc, - headers: HashMap, - /// Used to send the result back into JS - callback: Root, - }, - /// A request to create a new Worker using a connected client - InitWorker { - /// Worker configuration e.g. limits and task queue - config: WorkerConfig, - /// A client created with a [CreateClient] request - client: Arc, - /// Used to send the result back into JS - callback: Root, - }, - /// A request to register a replay worker - InitReplayWorker { - runtime: Arc, - /// Worker configuration. Must have unique task queue name. - config: WorkerConfig, - /// Used to send the result back into JS - callback: Root, - }, - /// A request to drain logs from core so they can be emitted in node - PollLogs { - /// Logs are sent to this function - callback: Root, - }, - StartEphemeralServer { - runtime: Arc, - config: EphemeralServerConfig, - callback: Root, - }, - ShutdownEphemeralServer { - server: Arc>, - callback: Root, - }, - PushReplayHistory { - tx: Sender, - pushme: HistoryForReplay, - callback: Root, - }, - UpdateClientApiKey { - client: Arc, - key: String, - callback: Root, - }, +//////////////////////////////////////////////////////////////////////////////////////////////////// + +pub fn init(cx: &mut neon::prelude::ModuleContext) -> neon::prelude::NeonResult<()> { + cx.export_function("newRuntime", runtime_new)?; + cx.export_function("runtimeShutdown", runtime_shutdown)?; + + Ok(()) } -/// Builds a tokio runtime and starts polling on [RuntimeRequest]s via an internal channel. -/// Bridges requests from JS to core and sends responses back to JS using a neon::Channel. -/// Blocks current thread until a [Shutdown] request is received in channel. -pub fn start_bridge_loop( - telemetry_options: TelemOptsRes, - channel: Arc, - receiver: &mut UnboundedReceiver, - result_sender: oneshot::Sender>, -) { - let mut tokio_builder = tokio::runtime::Builder::new_multi_thread(); - tokio_builder.enable_all().thread_name("core"); - let telem_opts = telemetry_options.0; - let meter_maker = telemetry_options.1; - let tokio_builder: TokioRuntimeBuilder> = TokioRuntimeBuilder { - inner: tokio_builder, - lang_on_thread_start: None, - }; - let mut core_runtime = - CoreRuntime::new(telem_opts, tokio_builder).expect("Failed to create CoreRuntime"); - - core_runtime.tokio_handle().block_on(async { - if let Some(meter_maker) = meter_maker { - match meter_maker() { - Ok(meter) => { - core_runtime.telemetry_mut().attach_late_init_metrics(meter); - } - Err(err) => { - result_sender - .send(Err(format!("Failed to create meter: {}", err))) - .unwrap_or_else(|_| { - panic!("Failed to report runtime start error: {}", err) - }); - return; - } - } +/// Initialize Core global telemetry and create the tokio runtime required to run Core. +/// This should typically be called once on process startup. +#[js_function] +pub fn runtime_new( + bridge_options: config::RuntimeOptions, +) -> BridgeResult> { + let (telemetry_options, metrics_options, logging_options) = bridge_options.try_into()?; + + // Create core runtime which starts tokio multi-thread runtime + let mut core_runtime = CoreRuntime::new(telemetry_options, TokioRuntimeBuilder::default()) + .context("Failed to initialize Core Runtime")?; + + let _guard = core_runtime.tokio_handle().enter(); + + // Run the metrics exporter task, if needed + // Created after Core runtime since it needs Tokio handle + let (prom_metrics_exporter_task, otel_metrics_exporter) = match metrics_options { + Some(BridgeMetricsExporter::Prometheus(prom_opts)) => { + let exporter = start_prometheus_metric_exporter(prom_opts) + .context("Failed to start prometheus metrics exporter")?; + + core_runtime + .telemetry_mut() + .attach_late_init_metrics(exporter.meter); + + (Some(exporter.abort_handle), None) } - result_sender - .send(Ok(())) - .expect("Failed to report runtime start success"); - - loop { - let request_option = receiver.recv().await; - let request = match request_option { - None => break, - Some(request) => request, - }; + Some(BridgeMetricsExporter::Otel(otel_opts)) => { + let exporter = build_otlp_metric_exporter(otel_opts) + .context("Failed to start OTel metrics exporter")?; - let channel = channel.clone(); - - match request { - RuntimeRequest::Shutdown { callback } => { - send_result(channel, callback, |cx| Ok(cx.undefined())); - break; - } - RuntimeRequest::CreateClient { - runtime, - options, - callback, - } => { - let mm = core_runtime.telemetry().get_temporal_metric_meter(); - core_runtime.tokio_handle().spawn(async move { - match options - .connect_no_namespace(mm) - .await - { - Err(err) => { - send_error(channel.clone(), callback, |cx| match err { - ClientInitError::SystemInfoCallError(e) => { - make_named_error_from_string( - cx, - TRANSPORT_ERROR, - format!("Failed to call GetSystemInfo: {}", e), - ) - } - ClientInitError::TonicTransportError(e) => { - make_named_error_from_error(cx, TRANSPORT_ERROR, e) - } - ClientInitError::InvalidUri(e) => { - Ok(JsError::type_error(cx, format!("{}", e))?) - } - }); - } - Ok(client) => { - send_result(channel.clone(), callback, |cx| { - Ok(cx.boxed(RefCell::new(Some(Client { - runtime, - core_client: Arc::new(client), - })))) - }); - } - } - }); - } - RuntimeRequest::UpdateClientHeaders { - client, - headers, - callback, - } => { - client.get_client().set_headers(headers); - send_result(channel.clone(), callback, |cx| Ok(cx.undefined())); - } - RuntimeRequest::UpdateClientApiKey { client, key, callback } => { - client.get_client().set_api_key(Some(key)); - send_result(channel.clone(), callback, |cx| Ok(cx.undefined())); - } - RuntimeRequest::PollLogs { callback } => { - let logs = core_runtime.telemetry().fetch_buffered_logs(); - send_result(channel.clone(), callback, |cx| { - let logarr = cx.empty_array(); - for (i, cl) in logs.into_iter().enumerate() { - // Not much to do here except for panic when there's an error here. - let logobj = cx.empty_object(); + let exporter: Arc = Arc::new(exporter); + core_runtime + .telemetry_mut() + .attach_late_init_metrics(exporter.clone()); - let level = cx.string(cl.level.to_string()); - logobj.set(cx, "level", level).unwrap(); + (None, Some(exporter)) + } + None => (None, None), + }; - let ts = system_time_to_js(cx, cl.timestamp).unwrap(); - logobj.set(cx, "timestamp", ts).unwrap(); + // Run the log exporter task, if needed + // Created after Core runtime since it needs Tokio handle + let log_exporter_task = if let BridgeLogExporter::Push { stream, receiver } = logging_options { + let log_exporter_task = Arc::new(core_runtime.tokio_handle().spawn(async move { + let mut stream = std::pin::pin!(stream.chunks_timeout( + config::FORWARD_LOG_BUFFER_SIZE, + Duration::from_millis(config::FORWARD_LOG_MAX_FREQ_MS) + )); + + while let Some(core_logs) = stream.next().await { + // We silently swallow errors here because logging them could + // cause a bad loop and we don't want to assume console presence + let core_logs = core_logs + .into_iter() + .filter_map(|log| JsonString::::try_from(log).ok()) + .collect::>(); + let _ = receiver.call_on_js_thread((core_logs,)); + } + })); + Some(log_exporter_task) + } else { + None + }; - let msg = cx.string(cl.message); - logobj.set(cx, "message", msg).unwrap(); + Ok(OpaqueOutboundHandle::new(Runtime { + core_runtime: Arc::new(core_runtime), + log_exporter_task, + metrics_exporter_task: prom_metrics_exporter_task.map(Arc::new), + _otel_metrics_exporter: otel_metrics_exporter, + })) +} - let fieldsobj = hashmap_to_js_value(cx, cl.fields); - logobj.set(cx, "fields", fieldsobj.unwrap()).unwrap(); +/// Stops the bridge runtime. In practice, this simply drops the RuntimeHandle out of the +/// BoxedRuntimeRef, and is therefore almost the same as just waiting for the lang-side GC +/// to drop the JS counterpart of BoxedRuntimeRef, but this function gives us a bit more +/// control on when that happens (b/c we don't have to wait for the JS GC), which is useful +/// when starting/stopping runtimes at a high pace, e.g. during tests execution. +#[js_function] +pub fn runtime_shutdown(runtime: OpaqueInboundHandle) -> BridgeResult<()> { + std::mem::drop(runtime.take_inner()?); + Ok(()) +} - let target = cx.string(cl.target); - logobj.set(cx, "target", target).unwrap(); +impl Drop for Runtime { + fn drop(&mut self) { + if let Some(handle) = self.log_exporter_task.take() { + handle.abort(); + } - logarr.set(cx, i as u32, logobj).unwrap(); - } - Ok(logarr) - }); - } - RuntimeRequest::InitWorker { - config, - client, - callback, - } => { - let client = (*client).clone(); - match init_worker(&core_runtime, config, client) { - Ok(worker) => { - core_runtime.tokio_handle().spawn(start_worker_loop( - worker, - channel, - callback, - None, - )); - } - Err(err) => send_error(channel.clone(), callback, move |cx| { - make_named_error_from_error(cx, UNEXPECTED_ERROR, err.deref()) - }), - } - } - RuntimeRequest::InitReplayWorker { - runtime, - config, - callback, - } => { - let (tunnel, stream) = HistoryForReplayTunnel::new(runtime); - match init_replay_worker(ReplayWorkerInput::new(config, Box::pin(stream))) { - Ok(worker) => { - core_runtime.tokio_handle().spawn(start_worker_loop( - worker, - channel.clone(), - callback, - Some(tunnel), - )); - } - Err(err) => send_error(channel.clone(), callback, move |cx| { - make_named_error_from_error(cx, UNEXPECTED_ERROR, err.deref()) - }), - }; - } - RuntimeRequest::StartEphemeralServer { - runtime, - config, - callback, - } => { - core_runtime.tokio_handle().spawn(async move { - let stdout = Stdio::from(std::io::stdout()); - let stderr = Stdio::from(std::io::stderr()); - let result = match config { - EphemeralServerConfig::TestServer(config) => { - config.start_server_with_output(stdout, stderr).await - } - EphemeralServerConfig::DevServer(config) => { - config.start_server_with_output(stdout, stderr).await - } - }; - match result { - Err(err) => { - let err_str = format!("Failed to start ephemeral server: {}", err); - send_error(channel.clone(), callback, |cx| { - make_named_error_from_string(cx, UNEXPECTED_ERROR, err_str) - }); - } - Ok(server) => { - send_result(channel.clone(), callback, |cx| { - Ok(cx.boxed(RefCell::new(Some(EphemeralServer { - runtime, - core_server: Arc::new(Mutex::new(server)), - })))) - }); - } - } - }); - } - RuntimeRequest::ShutdownEphemeralServer { server, callback } => { - core_runtime.tokio_handle().spawn(async move { - void_future_to_js( - channel, - callback, - async move { - let mut guard = server.lock().await; - guard.shutdown().await - }, - |cx, err| { - make_named_error_from_string( - cx, - UNEXPECTED_ERROR, - format!("Failed to start test server: {}", err), - ) - }, - ).await - }); - } - RuntimeRequest::PushReplayHistory { - tx, - pushme, - callback, - } => { - core_runtime.tokio_handle().spawn(async move { - let sendfut = async move { - tx.send(pushme).await.map_err(|e| { - format!( - "Receive side of history replay channel is gone. This is an sdk bug. {:?}", - e - ) - }) - }; - void_future_to_js(channel, callback, sendfut, |cx, err| { - make_named_error_from_string( - cx, - UNEXPECTED_ERROR, - format!("Error pushing replay history {}", err), - ) - }).await - }); - } - } + if let Some(handle) = self.metrics_exporter_task.take() { + handle.abort(); } - }) + } } -// Below are functions exported to JS - -/// Convert Rust SystemTime into a JS array with 2 numbers (seconds, nanos) -pub fn system_time_to_js<'a, C>(cx: &mut C, time: SystemTime) -> NeonResult> -where - C: Context<'a>, -{ - let nanos = time - .duration_since(UNIX_EPOCH) - .unwrap_or(Duration::ZERO) - .as_nanos(); - let only_nanos = cx.number((nanos % 1_000_000_000) as f64); - let ts_seconds = cx.number((nanos / 1_000_000_000) as f64); - let ts = cx.empty_array(); - ts.set(cx, 0, ts_seconds).unwrap(); - ts.set(cx, 1, only_nanos).unwrap(); - Ok(ts) +//////////////////////////////////////////////////////////////////////////////////////////////////// + +#[macro_export] +macro_rules! enter_sync { + ($runtime:expr) => { + if let Some(subscriber) = $runtime.telemetry().trace_subscriber() { + temporal_sdk_core::telemetry::set_trace_subscriber_for_current_thread(subscriber); + } + let _guard = $runtime.tokio_handle().enter(); + }; } -/// Helper to get the current time in nanosecond resolution. -pub fn get_time_of_day(mut cx: FunctionContext) -> JsResult { - system_time_to_js(&mut cx, SystemTime::now()) +pub trait RuntimeExt { + fn future_to_promise(&self, future: F) -> BridgeResult> + where + F: Future> + Send + 'static, + R: TryIntoJs + Send + 'static; } -/// Initialize Core global telemetry and create the tokio runtime required to run Core. -/// This should typically be called once on process startup. -/// Immediately spawns a poller thread that will block on [RuntimeRequest]s -pub fn runtime_new(mut cx: FunctionContext) -> JsResult { - let telemetry_options = cx.argument::(0)?.as_telemetry_options(&mut cx)?; - let channel = Arc::new(cx.channel()); - let (sender, mut receiver) = unbounded_channel::(); - - // FIXME: This is a temporary fix to get sync notifications of errors while initializing the runtime. - // The proper fix would be to avoid spawning a new thread here, so that start_bridge_loop - // can simply yeild back a Result. But early attempts to do just that caused panics - // on runtime shutdown, so let's use this hack until we can dig deeper. - let (result_sender, result_receiver) = oneshot::channel::>(); - - std::thread::spawn(move || { - start_bridge_loop(telemetry_options, channel, &mut receiver, result_sender) - }); - - if let Ok(Err(e)) = result_receiver.blocking_recv() { - Err(cx.throw_error::<_, String>(e).unwrap_err())?; +impl RuntimeExt for CoreRuntime { + fn future_to_promise(&self, future: F) -> BridgeResult> + where + F: Future> + Send + 'static, + R: TryIntoJs + Send + 'static, + { + enter_sync!(self); + Ok(BridgeFuture::new(Box::pin(future))) } +} - Ok(cx.boxed(Arc::new(RuntimeHandle { sender }))) +impl RuntimeExt for Arc { + fn future_to_promise(&self, future: F) -> BridgeResult> + where + F: Future> + Send + 'static, + R: TryIntoJs + Send + 'static, + { + self.as_ref().future_to_promise(future) + } } -/// Shutdown the Core instance and break out of the thread loop -pub fn runtime_shutdown(mut cx: FunctionContext) -> JsResult { - let runtime = cx.argument::(0)?; - let callback = cx.argument::(1)?; - let request = RuntimeRequest::Shutdown { - callback: callback.root(&mut cx), - }; - if let Err(err) = runtime.sender.send(request) { - callback_with_unexpected_error(&mut cx, callback, err)?; - }; - Ok(cx.undefined()) +//////////////////////////////////////////////////////////////////////////////////////////////////// + +#[derive(Debug, Clone)] +pub enum BridgeMetricsExporter { + Prometheus(CorePrometheusExporterOptions), + Otel(CoreOtelCollectorOptions), } -/// Request to drain forwarded logs from core -pub fn poll_logs(mut cx: FunctionContext) -> JsResult { - let runtime = cx.argument::(0)?; - let callback = cx.argument::(1)?; - let request = RuntimeRequest::PollLogs { - callback: callback.root(&mut cx), - }; - if let Err(err) = runtime.sender.send(request) { - callback_with_unexpected_error(&mut cx, callback, err)?; - } - Ok(cx.undefined()) +pub enum BridgeLogExporter { + Console, + Push { + stream: Receiver, + receiver: JsCallback<(Vec>,), ()>, + }, } -/// Create a connected gRPC client which can be used to initialize workers. -/// Client will be returned in the supplied `callback`. -pub fn client_new(mut cx: FunctionContext) -> JsResult { - let runtime = cx.argument::(0)?; - let opts = cx.argument::(1)?; - let callback = cx.argument::(2)?; +//////////////////////////////////////////////////////////////////////////////////////////////////// - let client_options = opts.as_client_options(&mut cx)?; +// IMORTANT: Any struct or enum below this point must be kept in sync with the type of the same name +// in native.ts. Similarly, - let request = RuntimeRequest::CreateClient { - runtime: (**runtime).clone(), - options: client_options, - callback: callback.root(&mut cx), - }; - if let Err(err) = runtime.sender.send(request) { - callback_with_unexpected_error(&mut cx, callback, err)?; +mod config { + use std::{collections::HashMap, net::SocketAddr, sync::Arc, time::Duration}; + + use anyhow::Context as _; + + use neon::prelude::*; + use temporal_sdk_core::{ + Url, + api::telemetry::{ + HistogramBucketOverrides, Logger as CoreTelemetryLogger, MetricTemporality, + OtelCollectorOptions as CoreOtelCollectorOptions, OtelCollectorOptionsBuilder, + OtlpProtocol, PrometheusExporterOptions as CorePrometheusExporterOptions, + PrometheusExporterOptionsBuilder, TelemetryOptions as CoreTelemetryOptions, + TelemetryOptionsBuilder, + }, + telemetry::CoreLogStreamConsumer, }; - Ok(cx.undefined()) -} + use bridge_macros::TryFromJs; -/// Drop a reference to a Client, once all references are dropped, the Client will be closed. -pub fn client_close(mut cx: FunctionContext) -> JsResult { - let client = cx.argument::(0)?; - if client.replace(None).is_none() { - make_named_error_from_string(&mut cx, ILLEGAL_STATE_ERROR, "Client already closed") - .and_then(|err| cx.throw(err))?; + use crate::{ + helpers::{BridgeError, BridgeResult, JsCallback, JsonString, TryFromJs}, + logs::LogEntry, }; - Ok(cx.undefined()) -} -/// Update a Client's HTTP request headers -pub fn client_update_headers(mut cx: FunctionContext) -> JsResult { - let client = cx.argument::(0)?; - let headers = cx - .argument::(1)? - .as_hash_map_of_string_to_string(&mut cx)?; - let callback = cx.argument::(2)?; - - match client.borrow().as_ref() { - None => { - callback_with_unexpected_error(&mut cx, callback, "Tried to use closed Client")?; - } - Some(client) => { - let request = RuntimeRequest::UpdateClientHeaders { - client: client.core_client.clone(), - headers, - callback: callback.root(&mut cx), - }; - if let Err(err) = client.runtime.sender.send(request) { - callback_with_unexpected_error(&mut cx, callback, err)?; + use super::BridgeLogExporter; + + pub(super) const FORWARD_LOG_BUFFER_SIZE: usize = 2048; + pub(super) const FORWARD_LOG_MAX_FREQ_MS: u64 = 10; + + #[derive(Debug, Clone, TryFromJs)] + pub(super) struct RuntimeOptions { + log_exporter: LogExporter, + telemetry: TelemetryOptions, + metrics_exporter: Option, + } + + impl + TryInto<( + CoreTelemetryOptions, + Option, + super::BridgeLogExporter, + )> for RuntimeOptions + { + type Error = BridgeError; + + fn try_into( + self, + ) -> BridgeResult<( + CoreTelemetryOptions, + Option, + super::BridgeLogExporter, + )> { + let (telemetry_logger, log_exporter) = match self.log_exporter { + LogExporter::Console { filter } => ( + CoreTelemetryLogger::Console { filter }, + BridgeLogExporter::Console, + ), + LogExporter::Forward { filter, receiver } => { + let (consumer, stream) = CoreLogStreamConsumer::new(FORWARD_LOG_BUFFER_SIZE); + ( + CoreTelemetryLogger::Push { + filter, + consumer: Arc::new(consumer), + }, + BridgeLogExporter::Push { stream, receiver }, + ) + } }; + + let mut telemetry_options = TelemetryOptionsBuilder::default(); + let telemetry_options = telemetry_options + .logging(telemetry_logger) + .metric_prefix(self.telemetry.metric_prefix) + .attach_service_name(self.telemetry.attach_service_name) + .build() + .context("Failed to build telemetry options")?; + + let metrics_exporter = self + .metrics_exporter + .map(std::convert::TryInto::try_into) + .transpose()?; + + Ok((telemetry_options, metrics_exporter, log_exporter)) } } - Ok(cx.undefined()) -} + #[derive(Debug, Clone, TryFromJs)] + pub(super) struct TelemetryOptions { + metric_prefix: String, + attach_service_name: bool, + } + + #[derive(Debug, Clone, TryFromJs)] + pub(super) enum LogExporter { + Console { + filter: String, + }, + Forward { + filter: String, + receiver: JsCallback<(Vec>,), ()>, + }, + } -/// Update a Client's API key -pub fn client_update_api_key(mut cx: FunctionContext) -> JsResult { - let client = cx.argument::(0)?; - let key = cx.argument::(1)?.value(&mut cx); - let callback = cx.argument::(2)?; + #[derive(Debug, Clone, TryFromJs)] + pub(super) enum MetricsExporter { + Prometheus(PrometheusConfig), + Otel(OtelConfig), + } - match client.borrow().as_ref() { - None => { - callback_with_unexpected_error(&mut cx, callback, "Tried to use closed Client")?; + #[derive(Debug, Clone, TryFromJs)] + pub(super) struct PrometheusConfig { + bind_address: SocketAddr, + counters_total_suffix: bool, + unit_suffix: bool, + use_seconds_for_durations: bool, + histogram_bucket_overrides: HashMap>, + global_tags: HashMap, + } + + impl TryInto for MetricsExporter { + type Error = BridgeError; + fn try_into(self) -> BridgeResult { + match self { + Self::Prometheus(prom) => { + Ok(super::BridgeMetricsExporter::Prometheus(prom.try_into()?)) + } + Self::Otel(otel) => Ok(super::BridgeMetricsExporter::Otel(otel.try_into()?)), + } } - Some(client) => { - let request = RuntimeRequest::UpdateClientApiKey { - client: client.core_client.clone(), - key, - callback: callback.root(&mut cx), - }; - if let Err(err) = client.runtime.sender.send(request) { - callback_with_unexpected_error(&mut cx, callback, err)?; - }; + } + + impl TryInto for PrometheusConfig { + type Error = BridgeError; + + fn try_into(self) -> BridgeResult { + let mut options = PrometheusExporterOptionsBuilder::default(); + let options = options + .socket_addr(self.bind_address) + .counters_total_suffix(self.counters_total_suffix) + .unit_suffix(self.unit_suffix) + .use_seconds_for_durations(self.use_seconds_for_durations) + .histogram_bucket_overrides(HistogramBucketOverrides { + overrides: self.histogram_bucket_overrides, + }) + .global_tags(self.global_tags) + .build() + .context("Failed to build prometheus exporter options")?; + + Ok(options) } } - Ok(cx.undefined()) -} + #[derive(Debug, Clone, TryFromJs)] + pub(super) struct OtelConfig { + url: Url, + protocol: StringEncoded, + headers: HashMap, + metrics_export_interval: Duration, + use_seconds_for_durations: bool, + temporality: StringEncoded, + histogram_bucket_overrides: HashMap>, + global_tags: HashMap, + } -pub(crate) struct HistoryForReplayTunnel { - pub(crate) runtime: Arc, - sender: Cell>>, -} -impl HistoryForReplayTunnel { - fn new(runtime: Arc) -> (Self, ReceiverStream) { - let (sender, rx) = channel(1); - ( - HistoryForReplayTunnel { - runtime, - sender: Cell::new(Some(sender)), - }, - ReceiverStream::new(rx), - ) + impl TryInto for OtelConfig { + type Error = BridgeError; + + fn try_into(self) -> BridgeResult { + let mut options = OtelCollectorOptionsBuilder::default(); + let options = options + .url(self.url) + .protocol(*self.protocol) + .headers(self.headers) + .metric_periodicity(self.metrics_export_interval) + .use_seconds_for_durations(self.use_seconds_for_durations) + .metric_temporality(*self.temporality) + .histogram_bucket_overrides(HistogramBucketOverrides { + overrides: self.histogram_bucket_overrides, + }) + .global_tags(self.global_tags) + .build() + .context("Failed to build otel exporter options")?; + + Ok(options) + } + } + + /// A private newtype so that we can implement `TryFromJs` on simple externally defined enums + #[derive(Debug, Clone)] + struct StringEncoded(T); + + impl TryFromJs for StringEncoded { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + let value = js_value.downcast::(cx)?; + let value = value.value(cx); + + match value.as_str() { + "http" => Ok(Self(OtlpProtocol::Http)), + "grpc" => Ok(Self(OtlpProtocol::Grpc)), + _ => Err(BridgeError::TypeError { + field: None, + message: "Expected either 'http' or 'grpc'".to_string(), + }), + } + } } - pub fn get_chan(&self) -> Result, &'static str> { - let chan = self.sender.take(); - self.sender.set(chan.clone()); - if let Some(chan) = chan { - Ok(chan) - } else { - Err("History replay channel is already closed") + + impl TryFromJs for StringEncoded { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + let value = js_value.downcast::(cx)?; + let value = value.value(cx); + + match value.as_str() { + "cumulative" => Ok(Self(MetricTemporality::Cumulative)), + "delta" => Ok(Self(MetricTemporality::Delta)), + _ => Err(BridgeError::TypeError { + field: None, + message: "Expected either 'cumulative' or 'delta'".to_string(), + }), + } } } - pub fn shutdown(&self) { - self.sender.take(); + + impl std::ops::Deref for StringEncoded { + type Target = T; + fn deref(&self) -> &Self::Target { + &self.0 + } } } -impl Finalize for HistoryForReplayTunnel {} diff --git a/packages/core-bridge/src/testing.rs b/packages/core-bridge/src/testing.rs deleted file mode 100644 index 6374df1b1..000000000 --- a/packages/core-bridge/src/testing.rs +++ /dev/null @@ -1,70 +0,0 @@ -use crate::conversions::*; -use crate::errors::*; -use crate::helpers::*; -use crate::runtime::{BoxedEphemeralServer, BoxedRuntime, RuntimeRequest}; -use neon::prelude::*; - -// Below are functions exported to JS - -/// Start an ephemeral Temporal server -pub fn start_ephemeral_server(mut cx: FunctionContext) -> JsResult { - let runtime = cx.argument::(0)?; - let config = cx.argument::(1)?; - let sdk_version = cx.argument::(2)?.value(&mut cx); - let callback = cx.argument::(3)?; - - let config = config.as_ephemeral_server_config(&mut cx, sdk_version)?; - let request = RuntimeRequest::StartEphemeralServer { - runtime: (**runtime).clone(), - config, - callback: callback.root(&mut cx), - }; - if let Err(err) = runtime.sender.send(request) { - callback_with_unexpected_error(&mut cx, callback, err)?; - }; - - Ok(cx.undefined()) -} - -/// Get the ephemeral server "target" (address:port string) -pub fn get_ephemeral_server_target(mut cx: FunctionContext) -> JsResult { - let server = cx.argument::(0)?; - let target = server - .borrow() - .as_ref() - .map(|s| cx.string(s.core_server.blocking_lock().target.as_str())); - if target.is_none() { - make_named_error_from_string( - &mut cx, - ILLEGAL_STATE_ERROR, - "Tried to use closed test server", - ) - .and_then(|err| cx.throw(err))?; - }; - Ok(target.unwrap()) -} - -/// Shutdown an ephemeral server - consumes the server -pub fn shutdown_ephemeral_server(mut cx: FunctionContext) -> JsResult { - let server = cx.argument::(0)?; - let callback = cx.argument::(1)?; - // Drop the ref - match server.replace(None) { - None => { - callback_with_unexpected_error(&mut cx, callback, "Tried to use closed test server")?; - } - Some(server) => { - if let Err(err) = server - .runtime - .sender - .send(RuntimeRequest::ShutdownEphemeralServer { - server: server.core_server.clone(), - callback: callback.root(&mut cx), - }) - { - callback_with_unexpected_error(&mut cx, callback, err)?; - }; - } - } - Ok(cx.undefined()) -} diff --git a/packages/core-bridge/src/worker.rs b/packages/core-bridge/src/worker.rs index 14b1de273..5d0298a30 100644 --- a/packages/core-bridge/src/worker.rs +++ b/packages/core-bridge/src/worker.rs @@ -1,15 +1,18 @@ -use crate::{conversions::ObjectHandleConversionsExt, errors::*, helpers::*, runtime::*}; -use futures::stream::StreamExt; -use neon::{prelude::*, types::buffer::TypedArray}; +use std::sync::Arc; + +use anyhow::Context as AnyhowContext; +use neon::prelude::*; use prost::Message; -use std::{cell::RefCell, sync::Arc}; -use temporal_sdk_core::replay::HistoryForReplay; +use tokio::sync::mpsc::{Sender, channel}; +use tokio_stream::wrappers::ReceiverStream; + use temporal_sdk_core::{ - Worker as CoreWorker, + CoreRuntime, api::{ Worker as CoreWorkerTrait, errors::{CompleteActivityError, CompleteWfError, PollError}, }, + init_replay_worker, init_worker, protos::{ coresdk::{ ActivityHeartbeat, ActivityTaskCompletion, @@ -17,438 +20,839 @@ use temporal_sdk_core::{ }, temporal::api::history::v1::History, }, + replay::{HistoryForReplay, ReplayWorkerInput}, }; -use tokio::sync::mpsc::{UnboundedSender, unbounded_channel}; -use tokio_stream::wrappers::UnboundedReceiverStream; -/// Worker struct, hold a reference for the channel sender responsible for sending requests from -/// JS to a bridge thread which forwards them to core -pub struct WorkerHandle { - pub(crate) sender: UnboundedSender, +use bridge_macros::js_function; + +use crate::{ + client::Client, + enter_sync, + helpers::*, + runtime::{Runtime, RuntimeExt}, +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +// Arc because most of CoreWorker's API require sending reference to the Worker into async closures. +type CoreWorker = Arc; + +pub struct Worker { + core_runtime: Arc, + core_worker: CoreWorker, } -/// Box it so we can use Worker from JS -pub type BoxedWorker = JsBox>>; -impl Finalize for WorkerHandle {} - -#[derive(Debug)] -pub enum WorkerRequest { - /// A request to shutdown a worker, the worker instance will remain active to - /// allow draining of pending tasks - InitiateShutdown { - /// Used to send the result back into JS - callback: Root, - }, - /// A request to poll for workflow activations - PollWorkflowActivation { - /// Used to send the result back into JS - callback: Root, - }, - /// A request to complete a single workflow activation - CompleteWorkflowActivation { - completion: WorkflowActivationCompletion, - /// Used to send the result back into JS - callback: Root, - }, - /// A request to poll for activity tasks - PollActivityTask { - /// Used to report completion or error back into JS - callback: Root, - }, - /// A request to complete a single activity task - CompleteActivityTask { - completion: ActivityTaskCompletion, - /// Used to send the result back into JS - callback: Root, - }, - /// A request to send a heartbeat from a running activity - RecordActivityHeartbeat { heartbeat: ActivityHeartbeat }, +impl Finalize for Worker {} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +pub fn init(cx: &mut neon::prelude::ModuleContext) -> neon::prelude::NeonResult<()> { + cx.export_function("newWorker", worker_new)?; + cx.export_function("workerValidate", worker_validate)?; + cx.export_function( + "workerPollWorkflowActivation", + worker_poll_workflow_activation, + )?; + cx.export_function( + "workerCompleteWorkflowActivation", + worker_complete_workflow_activation, + )?; + cx.export_function("workerPollActivityTask", worker_poll_activity_task)?; + cx.export_function("workerCompleteActivityTask", worker_complete_activity_task)?; + cx.export_function( + "workerRecordActivityHeartbeat", + worker_record_activity_heartbeat, + )?; + cx.export_function("workerInitiateShutdown", worker_initiate_shutdown)?; + cx.export_function("workerFinalizeShutdown", worker_finalize_shutdown)?; + + // Replay worker functions + cx.export_function("newReplayWorker", replay_worker_new)?; + cx.export_function("pushHistory", push_history)?; + cx.export_function("closeHistoryStream", close_history_stream)?; + + Ok(()) } -/// Polls on [WorkerRequest]s via given channel. -/// Bridges requests from JS to core and sends responses back to JS using a neon::Channel. -/// Returns when the given channel is dropped. -pub async fn start_worker_loop( - worker: CoreWorker, - channel: Arc, - callback: Root, - is_replay: Option, -) { - if is_replay.is_none() { - if let Err(e) = worker.validate().await { - send_error(channel, callback, move |cx| { - make_named_error_from_error(cx, TRANSPORT_ERROR, e) - }); - return; +/// Create a new worker. +#[js_function] +pub fn worker_new( + client: OpaqueInboundHandle, + worker_options: config::BridgeWorkerOptions, +) -> BridgeResult> { + let config = worker_options + .into_core_config() + .context("Failed to convert WorkerOptions to CoreWorkerConfig")?; + + let client_ref = client.borrow_inner()?; + let client = client_ref.core_client.clone(); + let runtime = client_ref.core_runtime.clone(); + + enter_sync!(runtime); + let worker = init_worker(&runtime, config, client).context("Failed to initialize worker")?; + + Ok(OpaqueOutboundHandle::new(Worker { + core_runtime: runtime, + core_worker: Arc::new(worker), + })) +} + +/// Validate a worker. +#[js_function] +pub fn worker_validate(worker: OpaqueInboundHandle) -> BridgeResult> { + let (runtime, worker) = worker.map_inner(|worker| { + ( + Arc::clone(&worker.core_runtime), + Arc::clone(&worker.core_worker), + ) + })?; + + runtime.future_to_promise(async move { + worker + .validate() + .await + .map_err(|err| BridgeError::TransportError(err.to_string())) + }) +} + +/// Initiate a single workflow activation poll request. +/// There should be only one concurrent poll request for this type. +#[js_function] +pub fn worker_poll_workflow_activation( + worker: OpaqueInboundHandle, +) -> BridgeResult>> { + let (runtime, worker) = worker.map_inner(|worker| { + ( + Arc::clone(&worker.core_runtime), + Arc::clone(&worker.core_worker), + ) + })?; + + runtime.future_to_promise(async move { + let result = worker.poll_workflow_activation().await; + + match result { + Ok(task) => Ok(task.encode_to_vec()), + Err(err) => match err { + PollError::ShutDown => Err(BridgeError::WorkerShutdown)?, + PollError::TonicError(status) => { + Err(BridgeError::TransportError(status.message().to_string()))? + } + }, } - } - let (tx, rx) = unbounded_channel(); - // Return the worker after validation has happened - if let Some(tunnel) = is_replay { - send_result(channel.clone(), callback, |cx| { - let worker = cx.boxed(RefCell::new(Some(WorkerHandle { sender: tx }))); - let tunnel = cx.boxed(tunnel); - let retme = cx.empty_object(); - retme.set(cx, "worker", worker)?; - retme.set(cx, "pusher", tunnel)?; - Ok(retme) - }) - } else { - send_result(channel.clone(), callback, |cx| { - Ok(cx.boxed(RefCell::new(Some(WorkerHandle { sender: tx })))) - }); - } - UnboundedReceiverStream::new(rx) - .for_each_concurrent(None, |request| { - let worker = &worker; - let channel = channel.clone(); - async move { - match request { - WorkerRequest::InitiateShutdown { callback } => { - worker.initiate_shutdown(); - send_result(channel, callback, |cx| Ok(cx.undefined())); - } - WorkerRequest::PollWorkflowActivation { callback } => { - handle_poll_workflow_activation_request(worker, channel, callback).await - } - WorkerRequest::PollActivityTask { callback } => { - handle_poll_activity_task_request(worker, channel, callback).await - } - WorkerRequest::CompleteWorkflowActivation { - completion, - callback, - } => { - void_future_to_js( - channel, - callback, - async move { worker.complete_workflow_activation(completion).await }, - |cx, err| -> JsResult { - match err { - CompleteWfError::MalformedWorkflowCompletion { - reason, .. - } => Ok(JsError::type_error(cx, reason)?.upcast()), - } - }, - ) - .await; - } - WorkerRequest::CompleteActivityTask { - completion, - callback, - } => { - void_future_to_js( - channel, - callback, - async move { worker.complete_activity_task(completion).await }, - |cx, err| -> JsResult { - match err { - CompleteActivityError::MalformedActivityCompletion { - reason, - .. - } => Ok(JsError::type_error(cx, reason)?.upcast()), - } - }, - ) - .await; - } - WorkerRequest::RecordActivityHeartbeat { heartbeat } => { - worker.record_activity_heartbeat(heartbeat) + }) +} + +/// Submit a workflow activation completion to core. +#[js_function] +pub fn worker_complete_workflow_activation( + worker: OpaqueInboundHandle, + completion: Vec, +) -> BridgeResult> { + let workflow_completion = WorkflowActivationCompletion::decode_length_delimited( + completion.as_slice(), + ) + .map_err(|err| BridgeError::TypeError { + field: None, + message: format!("Cannot decode Completion from buffer: {err:?}"), + })?; + + let (runtime, worker) = worker.map_inner(|worker| { + ( + Arc::clone(&worker.core_runtime), + Arc::clone(&worker.core_worker), + ) + })?; + + runtime.future_to_promise(async move { + worker + .complete_workflow_activation(workflow_completion) + .await + .map_err(|err| match err { + CompleteWfError::MalformedWorkflowCompletion { reason, run_id } => { + BridgeError::TypeError { + field: None, + message: format!( + "Malformed Workflow Completion: {reason:?} for RunID={run_id}" + ), } } - } - }) - .await; - worker.finalize_shutdown().await; + }) + }) } -/// Called within the poll loop thread, calls core and triggers JS callback with result -async fn handle_poll_workflow_activation_request( - worker: &CoreWorker, - channel: Arc, - callback: Root, -) { - match worker.poll_workflow_activation().await { - Ok(task) => { - send_result(channel, callback, move |cx| { - let len = task.encoded_len(); - let mut result = JsArrayBuffer::new(cx, len)?; - let mut slice = result.as_mut_slice(cx); - if task.encode(&mut slice).is_err() { - panic!("Failed to encode task") - }; - Ok(result) - }); - } - Err(err) => { - send_error(channel, callback, move |cx| match err { - PollError::ShutDown => make_named_error_from_error(cx, SHUTDOWN_ERROR, err), - PollError::TonicError(_) => make_named_error_from_error(cx, TRANSPORT_ERROR, err), - }); +/// Initiate a single activity task poll request. +/// There should be only one concurrent poll request for this type. +#[js_function] +pub fn worker_poll_activity_task( + worker: OpaqueInboundHandle, +) -> BridgeResult>> { + let (runtime, worker) = worker.map_inner(|worker| { + ( + Arc::clone(&worker.core_runtime), + Arc::clone(&worker.core_worker), + ) + })?; + + runtime.future_to_promise(async move { + let result = worker.poll_activity_task().await; + + match result { + Ok(task) => Ok(task.encode_to_vec()), + Err(err) => match err { + PollError::ShutDown => Err(BridgeError::WorkerShutdown)?, + PollError::TonicError(status) => { + Err(BridgeError::TransportError(status.message().to_string()))? + } + }, } - } + }) } -/// Called within the poll loop thread, calls core and triggers JS callback with result -pub async fn handle_poll_activity_task_request( - worker: &CoreWorker, - channel: Arc, - callback: Root, -) { - match worker.poll_activity_task().await { - Ok(task) => { - send_result(channel, callback, move |cx| { - let len = task.encoded_len(); - let mut result = JsArrayBuffer::new(cx, len)?; - let mut slice = result.as_mut_slice(cx); - if task.encode(&mut slice).is_err() { - panic!("Failed to encode task") - }; - Ok(result) - }); - } - Err(err) => { - send_error(channel, callback, move |cx| match err { - PollError::ShutDown => make_named_error_from_error(cx, SHUTDOWN_ERROR, err), - PollError::TonicError(_) => make_named_error_from_error(cx, TRANSPORT_ERROR, err), - }); - } - } +/// Submit an activity task completion to core. +#[js_function] +pub fn worker_complete_activity_task( + worker: OpaqueInboundHandle, + completion: Vec, +) -> BridgeResult> { + let activity_completion = + ActivityTaskCompletion::decode_length_delimited(completion.as_slice()).map_err(|err| { + BridgeError::TypeError { + field: None, + message: format!("Cannot decode Completion from buffer: {err:?}"), + } + })?; + + let (runtime, worker) = worker.map_inner(|worker| { + ( + Arc::clone(&worker.core_runtime), + Arc::clone(&worker.core_worker), + ) + })?; + + runtime.future_to_promise(async move { + worker + .complete_activity_task(activity_completion) + .await + .map_err(|err| match err { + CompleteActivityError::MalformedActivityCompletion { + reason, + completion: _, + } => BridgeError::TypeError { + field: None, + message: format!("Malformed Activity Completion: {reason:?}"), + }, + }) + }) } -// Below are functions exported to JS +/// Submit an activity heartbeat to core. +#[js_function] +pub fn worker_record_activity_heartbeat( + worker: OpaqueInboundHandle, + heartbeat: Vec, +) -> BridgeResult<()> { + let activity_heartbeat = ActivityHeartbeat::decode_length_delimited(heartbeat.as_slice()) + .map_err(|err| BridgeError::TypeError { + field: None, + message: format!("Cannot decode Heartbeat from buffer: {err:?}"), + })?; -/// Create a new worker asynchronously. -/// Worker uses the provided connection and returned to JS using supplied `callback`. -pub fn worker_new(mut cx: FunctionContext) -> JsResult { - let client = cx.argument::(0)?; - let worker_options = cx.argument::(1)?; - let callback = cx.argument::(2)?; + let worker_ref = worker.borrow_inner()?; + worker_ref + .core_worker + .record_activity_heartbeat(activity_heartbeat); - match client.borrow().as_ref() { - None => { - callback_with_unexpected_error(&mut cx, callback, "Tried to use closed Client")?; - } - Some(client) => { - let config = worker_options.as_worker_config(&mut cx)?; - let request = RuntimeRequest::InitWorker { - client: client.core_client.clone(), - config, - callback: callback.root(&mut cx), - }; - if let Err(err) = client.runtime.sender.send(request) { - callback_with_unexpected_error(&mut cx, callback, err)?; - }; - } + Ok(()) +} + +/// Request shutdown of the worker. +/// Once complete Core will stop polling on new tasks and activations on worker's task queue. +/// Caller should drain any pending tasks and activations and call worker_finalize_shutdown before breaking from +/// the loop to ensure graceful shutdown. +#[js_function] +pub fn worker_initiate_shutdown(worker: OpaqueInboundHandle) -> BridgeResult<()> { + let worker_ref = worker.borrow_inner()?; + worker_ref.core_worker.initiate_shutdown(); + Ok(()) +} + +#[js_function] +pub fn worker_finalize_shutdown( + worker: OpaqueInboundHandle, +) -> BridgeResult> { + let (runtime, worker) = { + let worker_ref = worker.take_inner()?; + (worker_ref.core_runtime, worker_ref.core_worker) }; - Ok(cx.undefined()) + let worker = Arc::try_unwrap(worker).map_err(|arc| BridgeError::IllegalStateStillInUse { + what: "Worker", + details: Some(format!( + "Expected 1 reference, but got {}", + Arc::strong_count(&arc) + )), + })?; + + runtime.future_to_promise(async move { + worker.finalize_shutdown().await; + Ok(()) + }) +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +pub struct HistoryForReplayTunnelHandle { + core_runtime: Arc, + sender: Sender, +} + +impl HistoryForReplayTunnelHandle { + fn new(runtime: &Arc) -> (Self, ReceiverStream) { + let (sender, rx) = channel(1); + ( + Self { + core_runtime: Arc::clone(runtime), + sender, + }, + ReceiverStream::new(rx), + ) + } + + pub(crate) fn get_chan(&self) -> Sender { + self.sender.clone() + } } +impl Finalize for HistoryForReplayTunnelHandle {} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + /// Create a new replay worker asynchronously. -/// Worker is returned to JS using supplied callback. -pub fn replay_worker_new(mut cx: FunctionContext) -> JsResult { - let runtime = cx.argument::(0)?; - let worker_options = cx.argument::(1)?; - let callback = cx.argument::(2)?; - - let config = worker_options.as_worker_config(&mut cx)?; - let request = RuntimeRequest::InitReplayWorker { - runtime: (*runtime).clone(), - config, - callback: callback.root(&mut cx), - }; - if let Err(err) = runtime.sender.send(request) { - callback_with_unexpected_error(&mut cx, callback, err)?; +#[js_function] +pub fn replay_worker_new( + runtime: OpaqueInboundHandle, + config: config::BridgeWorkerOptions, +) -> BridgeResult<( + OpaqueOutboundHandle, + OpaqueOutboundHandle, +)> { + let config = config + .into_core_config() + .context("Failed to convert WorkerOptions to CoreWorkerConfig")?; + + let runtime = runtime.borrow_inner()?.core_runtime.clone(); + enter_sync!(runtime); + + let (tunnel, stream) = HistoryForReplayTunnelHandle::new(&runtime); + + let worker = init_replay_worker(ReplayWorkerInput::new(config, Box::pin(stream))) + .context("Failed to initialize replay worker")?; + + let worker_handle = Worker { + core_runtime: runtime, + core_worker: Arc::new(worker), }; - Ok(cx.undefined()) + Ok(( + OpaqueOutboundHandle::new(worker_handle), + OpaqueOutboundHandle::new(tunnel), + )) } -pub fn push_history(mut cx: FunctionContext) -> JsResult { - let pusher = cx.argument::>(0)?; - let workflow_id = cx.argument::(1)?; - let history_binary = cx.argument::(2)?; - let callback = cx.argument::(3)?; - let data = history_binary.as_slice(&cx); - match History::decode_length_delimited(data) { - Ok(hist) => { - let workflow_id = workflow_id.value(&mut cx); - if let Err(e) = pusher.get_chan().map(|chan| { - pusher - .runtime - .sender - .send(RuntimeRequest::PushReplayHistory { - tx: chan, - pushme: HistoryForReplay::new(hist, workflow_id), - callback: callback.root(&mut cx), - }) - }) { - callback_with_unexpected_error(&mut cx, callback, e)?; +#[js_function] +pub fn push_history( + pusher: OpaqueInboundHandle, + workflow_id: String, + history_binary: Vec, +) -> BridgeResult> { + let history: History = + History::decode_length_delimited(history_binary.as_slice()).map_err(|err| { + BridgeError::TypeError { + field: None, + message: format!("Cannot decode History from buffer: {err:?}"), } - Ok(cx.undefined()) - } - Err(e) => cx.throw_error(format!("Error decoding history: {:?}", e)), - } + })?; + let history = HistoryForReplay::new(history, workflow_id); + + let pusher_ref = pusher.borrow_inner()?; + let chan = pusher_ref.get_chan(); + + pusher_ref.core_runtime.future_to_promise(async move { + chan.send(history) + .await + .context("Error pushing history to replay worker")?; + Ok(()) + }) } -pub fn close_history_stream(mut cx: FunctionContext) -> JsResult { - let pusher = cx.argument::>(0)?; - pusher.shutdown(); - Ok(cx.undefined()) +#[js_function] +pub fn close_history_stream( + pusher: OpaqueInboundHandle, +) -> BridgeResult<()> { + // Just drop the pusher's channel; there's actually no "close" method on the channel. + let _pusher_ref = pusher.take_inner()?; + Ok(()) } -/// Initiate a single workflow activation poll request. -/// There should be only one concurrent poll request for this type. -pub fn worker_poll_workflow_activation(mut cx: FunctionContext) -> JsResult { - let worker = cx.argument::(0)?; - let callback = cx.argument::(1)?; - match worker.borrow().as_ref() { - None => { - callback_with_unexpected_error(&mut cx, callback, "Tried to use closed Worker")?; +//////////////////////////////////////////////////////////////////////////////////////////////////// + +mod config { + use std::{sync::Arc, time::Duration}; + + use temporal_sdk_core::{ + ResourceBasedSlotsOptions, ResourceBasedSlotsOptionsBuilder, ResourceSlotOptions, + SlotSupplierOptions as CoreSlotSupplierOptions, TunerHolder, TunerHolderOptionsBuilder, + api::worker::{ + ActivitySlotKind, LocalActivitySlotKind, PollerBehavior, SlotKind, WorkerConfig, + WorkerConfigBuilder, WorkerConfigBuilderError, WorkflowSlotKind, + }, + }; + + use bridge_macros::TryFromJs; + + use super::custom_slot_supplier::CustomSlotSupplierOptions; + + #[derive(TryFromJs)] + pub struct BridgeWorkerOptions { + identity: String, + build_id: String, + use_versioning: bool, + task_queue: String, + namespace: String, + tuner: WorkerTuner, + non_sticky_to_sticky_poll_ratio: f32, + max_concurrent_workflow_task_polls: usize, + max_concurrent_activity_task_polls: usize, + enable_non_local_activities: bool, + sticky_queue_schedule_to_start_timeout: Duration, + max_cached_workflows: usize, + max_heartbeat_throttle_interval: Duration, + default_heartbeat_throttle_interval: Duration, + max_activities_per_second: Option, + max_task_queue_activities_per_second: Option, + shutdown_grace_time: Option, + } + + impl BridgeWorkerOptions { + pub(crate) fn into_core_config(self) -> Result { + // Set all other options + let mut builder = WorkerConfigBuilder::default(); + builder + .client_identity_override(Some(self.identity)) + .worker_build_id(self.build_id) + .use_worker_versioning(self.use_versioning) + .task_queue(self.task_queue) + .namespace(self.namespace) + .tuner(self.tuner.into_core_config()?) + .nonsticky_to_sticky_poll_ratio(self.non_sticky_to_sticky_poll_ratio) + .workflow_task_poller_behavior(PollerBehavior::SimpleMaximum( + self.max_concurrent_workflow_task_polls, + )) + .activity_task_poller_behavior(PollerBehavior::SimpleMaximum( + self.max_concurrent_activity_task_polls, + )) + .no_remote_activities(!self.enable_non_local_activities) + .sticky_queue_schedule_to_start_timeout(self.sticky_queue_schedule_to_start_timeout) + .max_cached_workflows(self.max_cached_workflows) + .max_heartbeat_throttle_interval(self.max_heartbeat_throttle_interval) + .default_heartbeat_throttle_interval(self.default_heartbeat_throttle_interval) + .max_task_queue_activities_per_second(self.max_task_queue_activities_per_second) + .max_worker_activities_per_second(self.max_activities_per_second) + .graceful_shutdown_period(self.shutdown_grace_time) + .build() } - Some(worker) => { - let request = WorkerRequest::PollWorkflowActivation { - callback: callback.root(&mut cx), - }; - if let Err(err) = worker.sender.send(request) { - callback_with_unexpected_error(&mut cx, callback, err)?; + } + + #[derive(TryFromJs)] + #[allow(clippy::struct_field_names)] + pub(super) struct WorkerTuner { + workflow_task_slot_supplier: SlotSupplier, + activity_task_slot_supplier: SlotSupplier, + local_activity_task_slot_supplier: SlotSupplier, + } + + impl WorkerTuner { + fn into_core_config(self) -> Result, String> { + let mut tuner_holder = TunerHolderOptionsBuilder::default(); + let mut rbo = None; + + tuner_holder.workflow_slot_options( + self.workflow_task_slot_supplier + .into_slot_supplier(&mut rbo), + ); + tuner_holder.activity_slot_options( + self.activity_task_slot_supplier + .into_slot_supplier(&mut rbo), + ); + tuner_holder.local_activity_slot_options( + self.local_activity_task_slot_supplier + .into_slot_supplier(&mut rbo), + ); + if let Some(rbo) = rbo { + tuner_holder.resource_based_options(rbo); } + + tuner_holder + .build_tuner_holder() + .map(Arc::new) + .map_err(|e| format!("Invalid tuner options: {e:?}")) } } - Ok(cx.undefined()) -} -/// Initiate a single activity task poll request. -/// There should be only one concurrent poll request for this type. -pub fn worker_poll_activity_task(mut cx: FunctionContext) -> JsResult { - let worker = cx.argument::(0)?; - let callback = cx.argument::(1)?; - match worker.borrow().as_ref() { - None => { - callback_with_unexpected_error(&mut cx, callback, "Tried to use closed Worker")?; - } - Some(worker) => { - let request = WorkerRequest::PollActivityTask { - callback: callback.root(&mut cx), - }; - if let Err(err) = worker.sender.send(request) { - callback_with_unexpected_error(&mut cx, callback, err)?; + #[derive(TryFromJs)] + pub(super) enum SlotSupplier { + FixedSize(FixedSizeSlotSupplierOptions), + ResourceBased(ResourceBasedSlotSupplierOptions), + Custom(CustomSlotSupplierOptions), + } + + #[derive(TryFromJs)] + pub(super) struct FixedSizeSlotSupplierOptions { + num_slots: usize, + } + + #[derive(TryFromJs)] + pub(super) struct ResourceBasedSlotSupplierOptions { + minimum_slots: usize, + maximum_slots: usize, + ramp_throttle: Duration, + tuner_options: ResourceBasedTunerOptions, + } + + #[derive(TryFromJs)] + pub(super) struct ResourceBasedTunerOptions { + target_memory_usage: f64, + target_cpu_usage: f64, + } + + impl SlotSupplier { + fn into_slot_supplier( + self, + rbo: &mut Option, + ) -> CoreSlotSupplierOptions { + match self { + Self::FixedSize(opts) => CoreSlotSupplierOptions::FixedSize { + slots: opts.num_slots, + }, + Self::ResourceBased(opts) => { + *rbo = Some( + ResourceBasedSlotsOptionsBuilder::default() + .target_cpu_usage(opts.tuner_options.target_cpu_usage) + .target_mem_usage(opts.tuner_options.target_memory_usage) + .build() + .expect("Building ResourceBasedSlotsOptions can't fail"), + ); + CoreSlotSupplierOptions::ResourceBased(ResourceSlotOptions::new( + opts.minimum_slots, + opts.maximum_slots, + opts.ramp_throttle, + )) + } + Self::Custom(opts) => CoreSlotSupplierOptions::Custom(Arc::new( + super::custom_slot_supplier::SlotSupplierBridge::new(opts), + )), } } } - Ok(cx.undefined()) } -/// Submit a workflow activation completion to core. -pub fn worker_complete_workflow_activation(mut cx: FunctionContext) -> JsResult { - let worker = cx.argument::(0)?; - let completion = cx.argument::(1)?; - let callback = cx.argument::(2)?; - match worker.borrow().as_ref() { - None => { - callback_with_unexpected_error(&mut cx, callback, "Tried to use closed Worker")?; +//////////////////////////////////////////////////////////////////////////////////////////////////// + +mod custom_slot_supplier { + use std::{marker::PhantomData, sync::Arc}; + + use neon::{context::Context, handle::Handle, prelude::*}; + + use temporal_sdk_core::{ + SlotSupplierOptions as CoreSlotSupplierOptions, + api::worker::{ + SlotInfo as CoreSlotInfo, SlotInfoTrait as _, SlotKind, + SlotKindType as CoreSlotKindType, SlotMarkUsedContext as CoreSlotMarkUsedContext, + SlotReleaseContext as CoreSlotReleaseContext, + SlotReservationContext as CoreSlotReservationContext, SlotSupplier as CoreSlotSupplier, + SlotSupplierPermit as CoreSlotSupplierPermit, + }, + }; + + use bridge_macros::{TryFromJs, TryIntoJs}; + + use crate::helpers::*; + + // Custom Slot Supplier //////////////////////////////////////////////////////////////////////////// + + pub(super) struct SlotSupplierBridge { + options: CustomSlotSupplierOptions, + } + + impl SlotSupplierBridge { + pub(crate) const fn new(options: CustomSlotSupplierOptions) -> Self { + Self { options } } - Some(worker) => { - match WorkflowActivationCompletion::decode_length_delimited(completion.as_slice(&cx)) { - Ok(completion) => { - let request = WorkerRequest::CompleteWorkflowActivation { - completion, - callback: callback.root(&mut cx), - }; - if let Err(err) = worker.sender.send(request) { - callback_with_unexpected_error(&mut cx, callback, err)?; - }; + } + + #[async_trait::async_trait] + impl CoreSlotSupplier for SlotSupplierBridge { + type SlotKind = SK; + + async fn reserve_slot( + &self, + ctx: &dyn CoreSlotReservationContext, + ) -> CoreSlotSupplierPermit { + loop { + let reserve_ctx = SlotReserveContext { + slot_type: SK::kind().into(), + task_queue: ctx.task_queue().to_string(), + worker_identity: ctx.worker_identity().to_string(), + worker_build_id: ctx.worker_build_id().to_string(), + is_sticky: ctx.is_sticky(), + }; + + let (_abort_controller, abort_signal) = + AbortController::new("Request Cancelled".to_string()); + + let permit_result = self + .options + .reserve_slot + .call((reserve_ctx, abort_signal)) + .await; + + match permit_result { + Ok(permit) => { + return CoreSlotSupplierPermit::with_user_data(BridgePermitData { + permit: Arc::new(permit), + }); + } + Err(err) => { + log::warn!("Error reserving slot: {err:?}"); + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + continue; + } } - Err(_) => callback_with_error(&mut cx, callback, |cx| { - JsError::type_error(cx, "Cannot decode Completion from buffer") - })?, } } - }; - Ok(cx.undefined()) -} -/// Submit an activity task completion to core. -pub fn worker_complete_activity_task(mut cx: FunctionContext) -> JsResult { - let worker = cx.argument::(0)?; - let result = cx.argument::(1)?; - let callback = cx.argument::(2)?; - match worker.borrow().as_ref() { - None => { - callback_with_unexpected_error(&mut cx, callback, "Tried to use closed Worker")?; - } - Some(worker) => { - match ActivityTaskCompletion::decode_length_delimited(result.as_slice(&cx)) { - Ok(completion) => { - let request = WorkerRequest::CompleteActivityTask { - completion, - callback: callback.root(&mut cx), - }; - if let Err(err) = worker.sender.send(request) { - callback_with_unexpected_error(&mut cx, callback, err)?; - }; + fn try_reserve_slot( + &self, + ctx: &dyn CoreSlotReservationContext, + ) -> Option { + let tokio_runtime = tokio::runtime::Handle::current(); + let _entered = tokio_runtime.enter(); + + let reserve_ctx = SlotReserveContext { + slot_type: SK::kind().into(), + task_queue: ctx.task_queue().to_string(), + worker_identity: ctx.worker_identity().to_string(), + worker_build_id: ctx.worker_build_id().to_string(), + is_sticky: ctx.is_sticky(), + }; + + // Try to reserve slot synchronously + let result = self.options.try_reserve_slot.call_and_block((reserve_ctx,)); + + match result { + Ok(res) => res.map(|permit| { + CoreSlotSupplierPermit::with_user_data(BridgePermitData { + permit: Arc::new(permit), + }) + }), + Err(err) => { + log::warn!("Error reserving {} slot: {:?}", SK::kind(), err); + None } - Err(_) => callback_with_error(&mut cx, callback, |cx| { - JsError::type_error(cx, "Cannot decode Completion from buffer") - })?, } } - }; - Ok(cx.undefined()) -} -/// Submit an activity heartbeat to core. -pub fn worker_record_activity_heartbeat(mut cx: FunctionContext) -> JsResult { - let worker = cx.argument::(0)?; - let heartbeat = cx.argument::(1)?; - match worker.borrow().as_ref() { - None => { - make_named_error_from_string(&mut cx, UNEXPECTED_ERROR, "Tried to use closed Worker") - .and_then(|err| cx.throw(err))? + fn mark_slot_used(&self, ctx: &dyn CoreSlotMarkUsedContext) { + let tokio_runtime = tokio::runtime::Handle::current(); + let _entered = tokio_runtime.enter(); + + let permit_data = ctx + .permit() + .user_data::() + .expect("Expected BridgePermitData in mark_slot_used"); + + let slot_info = SlotInfo::from(&ctx.info().downcast()); + + // Fire and forget call to mark_slot_used + let _ = self + .options + .mark_slot_used + .call_on_js_thread((SlotMarkUsedContext:: { + slot_info, + permit: permit_data.permit.clone(), + _marker: PhantomData, + },)); } - Some(worker) => match ActivityHeartbeat::decode_length_delimited(heartbeat.as_slice(&cx)) { - Ok(heartbeat) => { - let request = WorkerRequest::RecordActivityHeartbeat { heartbeat }; - if let Err(err) = worker.sender.send(request) { - make_named_error_from_error(&mut cx, UNEXPECTED_ERROR, err) - .and_then(|err| cx.throw(err))?; - } - } - Err(_) => cx.throw_type_error("Cannot decode ActivityHeartbeat from buffer")?, + + fn release_slot(&self, ctx: &dyn CoreSlotReleaseContext) { + let tokio_runtime = tokio::runtime::Handle::current(); + let _entered = tokio_runtime.enter(); + + let permit_data = ctx + .permit() + .user_data::() + .expect("Expected BridgePermitData in release_slot"); + + let slot_info = ctx.info().map(|info| SlotInfo::from(&info.downcast())); + + // Fire and forget call to release_slot + let _ = self + .options + .release_slot + .call_on_js_thread((SlotReleaseContext:: { + slot_info, + permit: permit_data.permit.clone(), + _marker: PhantomData, + },)); + } + } + + #[derive(TryFromJs)] + pub(super) struct CustomSlotSupplierOptions { + reserve_slot: JsAsyncCallback<(SlotReserveContext, AbortSignal), SlotPermitOpaqueData>, + try_reserve_slot: JsCallback<(SlotReserveContext,), Option>, + mark_slot_used: JsCallback<(SlotMarkUsedContext,), ()>, + release_slot: JsCallback<(SlotReleaseContext,), ()>, + } + + impl TryInto> + for CustomSlotSupplierOptions + { + type Error = BridgeError; + + fn try_into(self) -> Result, Self::Error> { + Ok(CoreSlotSupplierOptions::Custom(Arc::new( + SlotSupplierBridge { options: self }, + ))) + } + } + + #[derive(TryIntoJs)] + enum SlotInfo { + Workflow { + workflow_type: String, + is_sticky: bool, }, - }; - Ok(cx.undefined()) -} + Activity { + activity_type: String, + }, + LocalActivity { + activity_type: String, + }, + Nexus { + service: String, + operation: String, + }, + } -/// Request shutdown of the worker. -/// Once complete Core will stop polling on new tasks and activations on worker's task queue. -/// Caller should drain any pending tasks and activations and call worker_finalize_shutdown before breaking from -/// the loop to ensure graceful shutdown. -pub fn worker_initiate_shutdown(mut cx: FunctionContext) -> JsResult { - let worker = cx.argument::(0)?; - let callback = cx.argument::(1)?; - match worker.borrow().as_ref() { - None => { - callback_with_unexpected_error(&mut cx, callback, "Tried to use closed Worker")?; + impl<'a> From<&'a CoreSlotInfo<'a>> for SlotInfo { + fn from(info: &'a CoreSlotInfo<'a>) -> Self { + match info { + CoreSlotInfo::Workflow(info) => Self::Workflow { + workflow_type: info.workflow_type.to_string(), + is_sticky: info.is_sticky, + }, + CoreSlotInfo::Activity(info) => Self::Activity { + activity_type: info.activity_type.to_string(), + }, + CoreSlotInfo::LocalActivity(info) => Self::LocalActivity { + activity_type: info.activity_type.to_string(), + }, + CoreSlotInfo::Nexus(info) => Self::Nexus { + service: info.service.to_string(), + operation: info.operation.to_string(), + }, + } } - Some(worker) => { - if let Err(err) = worker.sender.send(WorkerRequest::InitiateShutdown { - callback: callback.root(&mut cx), - }) { - make_named_error_from_error(&mut cx, UNEXPECTED_ERROR, err) - .and_then(|err| cx.throw(err))?; + } + + #[derive(TryIntoJs)] + struct SlotReserveContext { + slot_type: SlotKindType, + task_queue: String, + worker_identity: String, + worker_build_id: String, + is_sticky: bool, + } + + #[derive(TryIntoJs)] + struct SlotMarkUsedContext { + slot_info: SlotInfo, + permit: Arc, + _marker: PhantomData, + } + + #[derive(TryIntoJs)] + struct SlotReleaseContext { + slot_info: Option, + permit: Arc, + _marker: PhantomData, + } + + enum SlotKindType { + Workflow, + Activity, + LocalActivity, + Nexus, + } + + // FIXME: Anyway we could get this auto-generated from CoreSlotKindType? + impl TryIntoJs for SlotKindType { + type Output = JsString; + fn try_into_js<'cx>(self, cx: &mut impl Context<'cx>) -> JsResult<'cx, JsString> { + let s = match self { + Self::Workflow => "workflow", + Self::Activity => "activity", + Self::LocalActivity => "local-activity", + Self::Nexus => "nexus", }; + Ok(cx.string(s)) } } - Ok(cx.undefined()) -} -pub fn worker_finalize_shutdown(mut cx: FunctionContext) -> JsResult { - let worker = cx.argument::(0)?; - if worker.replace(None).is_none() { - make_named_error_from_string(&mut cx, ILLEGAL_STATE_ERROR, "Worker already closed") - .and_then(|err| cx.throw(err))?; + impl From for SlotKindType { + fn from(val: CoreSlotKindType) -> Self { + match val { + CoreSlotKindType::Workflow => Self::Workflow, + CoreSlotKindType::Activity => Self::Activity, + CoreSlotKindType::LocalActivity => Self::LocalActivity, + CoreSlotKindType::Nexus => Self::Nexus, + } + } } - Ok(cx.undefined()) + /// `BridgePermitData` holds the data associated with a slot permit. + struct BridgePermitData { + permit: Arc, + } + + /// An opaque handle to a root'd JS object. + /// + /// Note that even though the public API allows `permit` to be any JS value, including + /// `undefined` or `null`, we may in fact only root JS _objects_ (including arrays and + /// functions, but not primitives). For that reason, we wrap the user's JS value in a + /// `JSObject`, and root that object instead. + struct SlotPermitOpaqueData(Root); + + static PERMIT_DATA_FIELD: &str = "permit_data"; + + impl TryFromJs for SlotPermitOpaqueData { + fn try_from_js<'cx, 'b>( + cx: &mut impl Context<'cx>, + js_value: Handle<'b, JsValue>, + ) -> BridgeResult { + let obj = cx.empty_object(); + obj.set(cx, PERMIT_DATA_FIELD, js_value)?; + Ok(Self(obj.root(cx))) + } + } + + impl TryIntoJs for Arc { + type Output = JsValue; + fn try_into_js<'a>(self, cx: &mut impl Context<'a>) -> JsResult<'a, JsValue> { + let obj = self.as_ref().0.to_inner(cx); + obj.get_value(cx, PERMIT_DATA_FIELD) + } + } } diff --git a/packages/core-bridge/ts/index.ts b/packages/core-bridge/ts/index.ts index e4ab77eaf..2c44599fd 100644 --- a/packages/core-bridge/ts/index.ts +++ b/packages/core-bridge/ts/index.ts @@ -1,628 +1,18 @@ -import { LogLevel, Duration, SearchAttributeType } from '@temporalio/common'; -import type { TLSConfig, ProxyConfig, HttpConnectProxyConfig } from '@temporalio/common/lib/internal-non-workflow'; -import { WorkerTuner } from './worker-tuner'; -import { SearchAttributeKey } from '@temporalio/common/src/search-attributes'; +import * as native from './native'; +import * as errors from './errors'; export { - WorkerTuner, - SlotSupplier, - ResourceBasedSlotOptions, - ResourceBasedTunerOptions, - FixedSizeSlotSupplier, - CustomSlotSupplier, - SlotInfo, - WorkflowSlotInfo, - ActivitySlotInfo, - LocalActivitySlotInfo, - SlotMarkUsedContext, - SlotPermit, - SlotReserveContext, - SlotReleaseContext, -} from './worker-tuner'; - -export type { TLSConfig, ProxyConfig, HttpConnectProxyConfig }; - -/** @deprecated Import from @temporalio/common instead */ -export { LogLevel }; - -export interface RetryOptions { - /** Initial wait time before the first retry. */ - initialInterval: number; /** - * Randomization jitter that is used as a multiplier for the current retry interval - * and is added or subtracted from the interval length. + * @internal This module is not intended to be used directly. Any API provided + * by this package is internal and subject to change without notice. + * @hidden */ - randomizationFactor: number; - /** Rate at which retry time should be increased, until it reaches max_interval. */ - multiplier: number; - /** Maximum amount of time to wait between retries. */ - maxInterval: number; - /** Maximum total amount of time requests should be retried for, if None is set then no limit will be used. */ - maxElapsedTime?: number; - /** Maximum number of retry attempts. */ - maxRetries: number; -} + native, -export interface ClientOptions { /** - * The URL of the Temporal server to connect to + * @internal This module is not intended to be used directly. Any API provided + * by this package is internal and subject to change without notice. + * @hidden */ - url: string; - - /** Version string for the whole node SDK. Should never be set by user */ - sdkVersion: string; - - /** - * TLS configuration options. - * - * Pass undefined to use a non-encrypted connection or an empty object to - * connect with TLS without any customization. - */ - tls?: TLSConfig; - - /** - * Proxying configuration. - */ - proxy?: ProxyConfig; - - /** - * Optional retry options for server requests. - */ - retry?: RetryOptions; - - /** - * Optional mapping of gRPC metadata (HTTP headers) to send with each request to the server. - * - * Set statically at connection time, can be replaced later using {@link clientUpdateHeaders}. - */ - metadata?: Record; - - /** - * API key for Temporal. This becomes the "Authorization" HTTP header with "Bearer " prepended. - * This is only set if RPC metadata doesn't already have an "authorization" key. - * - * Set statically at connection time, can be replaced later using {@link clientUpdateApiKey}. - */ - apiKey?: string; - - /** - * If set to true, error code labels will not be included on request failure - * metrics emitted by this Client. - * - * @default false - */ - disableErrorCodeMetricTags?: boolean; -} - -/** - * Log directly to console - */ -export interface ConsoleLogger { - console: {}; // eslint-disable-line @typescript-eslint/no-empty-object-type -} - -/** - * Forward logs to {@link Runtime} logger - */ -export interface ForwardLogger { - forward: { - /** - * What level, if any, logs should be forwarded from core at - * - * @deprecated Use {@link TelemetryOptions.logging.filter} instead - */ - level?: LogLevel; - }; -} - -/** - * Logger types supported by Core - */ -export type Logger = ConsoleLogger | ForwardLogger; - -/** - * OpenTelemetry Collector options for exporting metrics or traces - */ -export interface OtelCollectorExporter { - otel: { - /** - * URL of a gRPC OpenTelemetry collector. - * - * Syntax generally looks like `http://server:4317` or `grpc://server:4317` for OTLP/gRPC exporters, - * or `http://server:4318/v1/metrics` for OTLP/HTTP exporters. Make sure to set the `http` option - * to `true` for OTLP/HTTP endpoints. - * - * @format Starts with "grpc://" or "http://" for an unsecured connection (typical), - * or "grpcs://" or "https://" for a TLS connection. - * @note The `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable, if set, will override this property. - */ - url: string; - - /** - * If set to true, the exporter will use OTLP/HTTP instead of OTLP/gRPC. - * - * @default false meaning that the exporter will use OTLP/gRPC. - */ - http?: boolean; - - /** - * Optional set of HTTP request headers to send to Collector (e.g. for authentication) - */ - headers?: Record; - - /** - * Specify how frequently in metrics should be exported. - * - * @format number of milliseconds or {@link https://www.npmjs.com/package/ms | ms-formatted string} - * @default 1 second - */ - metricsExportInterval?: Duration; - - /** - * If set to true, the exporter will use seconds for durations instead of milliseconds. - * - * @default false - */ - useSecondsForDurations?: boolean; - - /** - * Determines if the metrics exporter should use cumulative or delta temporality. - - * See the [OpenTelemetry specification](https://github.com/open-telemetry/opentelemetry-specification/blob/ce50e4634efcba8da445cc23523243cb893905cb/specification/metrics/datamodel.md#temporality) - * for more information. - * - * @default 'cumulative' - */ - temporality?: 'cumulative' | 'delta'; - - /** - * Overrides boundary values for histogram metrics. - * - * The key is the metric name and the value is the list of bucket boundaries. - * - * For example: - * - * ``` - * { - * "request_latency": [1, 5, 10, 25, 50, 100, 250, 500, 1000], - * } - * ``` - * - * The metric name will apply regardless of name prefixing. - * - * See [this doc](https://docs.rs/opentelemetry_sdk/latest/opentelemetry_sdk/metrics/enum.Aggregation.html#variant.ExplicitBucketHistogram.field.boundaries) - * for the exact meaning of boundaries. - */ - histogramBucketOverrides?: Record; - }; -} - -interface CompiledOtelMetricsExporter { - otel: { - url: string; - http: boolean; - headers: Record | undefined; - metricsExportInterval: number; - useSecondsForDurations: boolean; - temporality: 'cumulative' | 'delta'; - histogramBucketOverrides: Record | undefined; - }; -} - -/** - * Prometheus metrics exporter options - */ -export interface PrometheusMetricsExporter { - prometheus: { - /** - * Address to bind the Prometheus HTTP metrics exporter server - * (for example, `0.0.0.0:1234`). - * - * Metrics will be available for scraping under the standard `/metrics` route. - */ - bindAddress: string; - /** - * If set to true, all counter names will include a "_total" suffix. - * - * @default false - */ - countersTotalSuffix?: boolean; - /** - * If set to true, all histograms will include the unit in their name as a suffix. - * EX: "_milliseconds" - * - * @default false - */ - unitSuffix?: boolean; - /** - * If set to true, the exporter will use seconds for durations instead of milliseconds. - * - * @default false - */ - useSecondsForDurations?: boolean; - - /** - * Overrides boundary values for histogram metrics. - * - * The key is the metric name and the value is the list of bucket boundaries. - * - * For example: - * - * ``` - * { - * "request_latency": [1, 5, 10, 25, 50, 100, 250, 500, 1000], - * } - * ``` - * - * The metric name will apply regardless of name prefixing. - * - * See [this doc](https://docs.rs/opentelemetry_sdk/latest/opentelemetry_sdk/metrics/enum.Aggregation.html#variant.ExplicitBucketHistogram.field.boundaries) - * for the exact meaning of boundaries. - */ - histogramBucketOverrides?: Record; - }; -} - -interface CompiledPrometheusMetricsExporter { - prometheus: { - bindAddress: string; - countersTotalSuffix: boolean; - unitSuffix: boolean; - useSecondsForDurations: boolean; - histogramBucketOverrides: Record | undefined; - }; -} - -/** - * Metrics exporters supported by Core - */ -export type MetricsExporter = { - /** - * Determines if the metrics exporter should use cumulative or delta temporality. - * Only applies to OpenTelemetry exporter. - * - * @deprecated Use 'otel.temporality' instead - */ - temporality?: 'cumulative' | 'delta'; - - /** - * A prefix to add to all metrics. - * - * @default 'temporal_' - */ - metricPrefix?: string; - - /** - * Tags to add to all metrics emitted by the worker. - */ - globalTags?: Record; - - /** - * Whether to put the service_name on every metric. - * - * @default true - */ - attachServiceName?: boolean; -} & (PrometheusMetricsExporter | OtelCollectorExporter); - -export interface TelemetryOptions { - /** - * A string in the env filter format specified here: - * https://docs.rs/tracing-subscriber/0.2.20/tracing_subscriber/struct.EnvFilter.html - * - * Which determines what tracing data is collected in the Core SDK. - * - * @deprecated Use `logging.filter` instead - */ - tracingFilter?: string; - - /** - * If set true, do not prefix metrics with `temporal_`. - * - * @deprecated Use `metrics.metricPrefix` instead - */ - noTemporalPrefixForMetrics?: boolean; - - /** - * Control where to send Rust Core logs - */ - logging?: { - /** - * A string in (env filter format)[https://docs.rs/tracing-subscriber/0.2.20/tracing_subscriber/struct.EnvFilter.html] - * which determines the verboseness of logging output. - * - * You can use {@link Runtime.makeTelemetryFilterString()} to easily build a correctly formatted filter - * string based on desired log level for Core SDK and other native packages. - * - * **BACKWARD COMPATIBILITY** - * - * If `logging.filter` is missing, the following legacy values (if present) will be used instead (in the given order): - * - {@link ForwardLogger.forward.level} => `makeTelemetryFilterString({ core: level, other: level })` - * - {@link TelemetryOptions.tracingFilter} - * - Default value of `makeTelemetryFilterString({ core: 'WARN', other: 'ERROR'})` - * - * @default `makeTelemetryFilterString({ core: 'WARN', other: 'ERROR'})` (with some exceptions, as described in backward compatibility note above) - */ - filter?: string; - } & Partial; - - /** - * Control exporting {@link NativeConnection} and {@link Worker} metrics. - * - * Turned off by default - */ - metrics?: MetricsExporter; - - /** - * @deprecated Core SDK tracing is no longer supported. This option is ignored. - */ - tracing?: unknown; -} - -export type CompiledTelemetryOptions = { - logging: { - filter: string; - } & ( - | { console: {} /* eslint-disable-line @typescript-eslint/no-empty-object-type */ } - | { forward: {} /* eslint-disable-line @typescript-eslint/no-empty-object-type */ } - ); - metrics?: { - metricPrefix: string; - globalTags: Record | undefined; - attachServiceName: boolean; - } & (CompiledPrometheusMetricsExporter | CompiledOtelMetricsExporter); + errors, }; - -export interface WorkerOptions { - identity: string; - buildId: string; - useVersioning: boolean; - taskQueue: string; - tuner: WorkerTuner; - nonStickyToStickyPollRatio: number; - maxConcurrentWorkflowTaskPolls: number; - maxConcurrentActivityTaskPolls: number; - enableNonLocalActivities: boolean; - stickyQueueScheduleToStartTimeoutMs: number; - maxCachedWorkflows: number; - maxHeartbeatThrottleIntervalMs: number; - defaultHeartbeatThrottleIntervalMs: number; - maxTaskQueueActivitiesPerSecond?: number; - maxActivitiesPerSecond?: number; - shutdownGraceTimeMs: number; -} - -export type LogEntryMetadata = { - [key: string]: string | number | boolean | LogEntryMetadata; -}; - -export interface LogEntry { - /** Log message */ - message: string; - /** - * Time since epoch [seconds, nanos]. - * - * Should be switched to bigint once it is supported in neon. - */ - timestamp: [number, number]; - - /** Log level */ - level: LogLevel; - - /** Name of the Core subsystem that emitted that log entry */ - target: string; - - /*** Metadata fields */ - fields: LogEntryMetadata; -} - -/** - * Which version of the executable to run. - */ -export type EphemeralServerExecutable = - | { - type: 'cached-download'; - /** - * Download destination directory or the system's temp directory if none set. - */ - downloadDir?: string; - /** - * Optional version, can be set to a specific server release or "default" or "latest". - * - * At the time of writing the the server is released as part of the Java SDK - (https://github.com/temporalio/sdk-java/releases). - * - * @default "default" - get the best version for the current SDK version. - */ - version?: string; - /** How long to cache the download for. Undefined means forever. */ - ttlMs?: number; - } - | { - type: 'existing-path'; - /** Path to executable */ - path: string; - }; - -/** - * Configuration for the time-skipping test server. - */ -export interface TimeSkippingServerConfig { - type: 'time-skipping'; - executable?: EphemeralServerExecutable; - /** - * Optional port to listen on, defaults to find a random free port. - */ - port?: number; - /** - * Extra args to pass to the executable command. - * - * Note that the Test Server implementation may be changed to another one in the future. Therefore, there is - * no guarantee that server options, and particularly those provided through the `extraArgs` array, will continue to - * be supported in the future. - */ - extraArgs?: string[]; -} - -/** - * Configuration for the Temporal CLI Dev Server. - */ -export interface DevServerConfig { - type: 'dev-server'; - executable?: EphemeralServerExecutable; - /** - * Sqlite DB filename if persisting or non-persistent if none (default). - */ - dbFilename?: string; - /** - * Namespace to use - created at startup. - * - * @default "default" - */ - namespace?: string; - /** - * IP to bind to. - * - * @default localhost - */ - ip?: string; - /** - * Port to listen on; defaults to find a random free port. - */ - port?: number; - /** - * Whether to enable the UI. - * - * @default true if `uiPort` is set; defaults to `false` otherwise. - */ - ui?: boolean; - /** - * Port to listen on for the UI; if `ui` is true, defaults to `port + 1000`. - */ - uiPort?: number; - /** - * Log format and level - * @default { format: "pretty", level" "warn" } - */ - log?: { format: string; level: string }; - /** - * Extra args to pass to the executable command. - * - * Note that the Dev Server implementation may be changed to another one in the future. Therefore, there is no - * guarantee that Dev Server options, and particularly those provided through the `extraArgs` array, will continue to - * be supported in the future. - */ - extraArgs?: string[]; - /** - * Search attributes to be registered with the dev server. - */ - searchAttributes?: SearchAttributeKey[]; -} - -/** - * Configuration for spawning an ephemeral Temporal server. - * - * Both the time-skipping Test Server and Temporal CLI dev server are supported. - */ -export type EphemeralServerConfig = TimeSkippingServerConfig | DevServerConfig; - -export interface Worker { - type: 'Worker'; -} - -export interface Runtime { - type: 'Runtime'; -} - -export interface Client { - type: 'Client'; -} - -export interface EphemeralServer { - type: 'EphemeralServer'; -} - -export interface HistoryPusher { - type: 'HistoryPusher'; -} - -export interface ReplayWorker { - type: 'ReplayWorker'; - worker: Worker; - pusher: HistoryPusher; -} - -export declare type Callback = (err: Error, result: T) => void; -export declare type PollCallback = (err: Error, result: ArrayBuffer) => void; -export declare type WorkerCallback = (err: Error, result: Worker) => void; -export declare type ReplayWorkerCallback = (err: Error, worker: ReplayWorker) => void; -export declare type ClientCallback = (err: Error, result: Client) => void; -export declare type VoidCallback = (err: Error, result: void) => void; -export declare type LogsCallback = (err: Error, result: LogEntry[]) => void; - -export declare function newRuntime(telemOptions: CompiledTelemetryOptions): Runtime; - -export declare function newClient(runtime: Runtime, clientOptions: ClientOptions, callback: ClientCallback): void; - -export declare function newWorker(client: Client, workerOptions: WorkerOptions, callback: WorkerCallback): void; - -export declare function newReplayWorker( - runtime: Runtime, - workerOptions: WorkerOptions, - callback: ReplayWorkerCallback -): void; - -export declare function pushHistory( - pusher: HistoryPusher, - workflowId: string, - history: ArrayBuffer, - callback: VoidCallback -): void; - -export declare function closeHistoryStream(pusher: HistoryPusher): void; - -export declare function workerInitiateShutdown(worker: Worker, callback: VoidCallback): void; - -export declare function workerFinalizeShutdown(worker: Worker): void; - -export declare function clientUpdateHeaders( - client: Client, - headers: Record, - callback: VoidCallback -): void; - -export declare function clientUpdateApiKey(client: Client, apiKey: string, callback: VoidCallback): void; - -export declare function clientClose(client: Client): void; - -export declare function runtimeShutdown(runtime: Runtime, callback: VoidCallback): void; - -export declare function pollLogs(runtime: Runtime, callback: LogsCallback): void; - -export declare function workerPollWorkflowActivation(worker: Worker, callback: PollCallback): void; - -export declare function workerCompleteWorkflowActivation( - worker: Worker, - result: ArrayBuffer, - callback: VoidCallback -): void; - -export declare function workerPollActivityTask(worker: Worker, callback: PollCallback): void; - -export declare function workerCompleteActivityTask(worker: Worker, result: ArrayBuffer, callback: VoidCallback): void; - -export declare function workerRecordActivityHeartbeat(worker: Worker, heartbeat: ArrayBuffer): void; - -export declare function getTimeOfDay(): [number, number]; - -export declare function startEphemeralServer( - runtime: Runtime, - config: EphemeralServerConfig, - sdkVersion: string, - callback: Callback -): void; - -export declare function shutdownEphemeralServer(server: EphemeralServer, callback: Callback): void; - -export declare function getEphemeralServerTarget(server: EphemeralServer): string; - -export { ShutdownError, TransportError, UnexpectedError } from './errors'; diff --git a/packages/core-bridge/ts/native.ts b/packages/core-bridge/ts/native.ts new file mode 100644 index 000000000..08bd35109 --- /dev/null +++ b/packages/core-bridge/ts/native.ts @@ -0,0 +1,335 @@ +/** + * Indicates a property that is allowed to be unspecified when calling in or out of + * native code (the equivalent of the `Option` type in Rust). + * + * Always use either this type or the `T | null` idiom to indicate a property that may + * legitimately be left unspecified when calling into or out of native code. Never use + * `T | undefined` or `prop?: T` on TS/Rust interfaces. + * + * ### Rationale + * + * Differentiating between "a property that is set to an unspecified optional value" + * and "a non-existant property" allows eager detection of some of the most common + * bug patterns resulting from incoherencies between the JS and Rust type definitions + * (e.g. optional properties whose names differ between the two languages, or that + * are missing in the JS interface, etc.). + * + * Unfortunately, it is not possible at present in Neon to differentiate between + * a property that is set to `undefined` and a property that is missing; + * i.e. `obj.get_value(cx, "prop")` will return `undefined` in both cases. + * + * We therefore follow the following principles for our TypeScript/Rust interfaces: + * + * - Always use `null` to indicate an intentionally unspecified optional value + * in TypeScript interfaces. This will be converted to `None` on the Rust side. + * - Explicitly set _every_ properties on objects sent to the native code, + * including optional properties (e.g. `{ prop: input.prop ?? null }`). + * - Never use the "optional property" syntax in TypeScript (i.e. `prop?: T`). + * + * Thanks to those conventions, a property that reads as `undefined` is known to to always + * indicate an _unintentionally missing_ property, which will results in a runtime error. + */ +type Option = T | null; + +/** + * Marker for values that are transferred as JSON strings. + */ +export type JsonString<_T> = string; + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Runtime +//////////////////////////////////////////////////////////////////////////////////////////////////// + +export declare function newRuntime(telemOptions: RuntimeOptions): Runtime; +export declare function runtimeShutdown(runtime: Runtime): void; + +export interface Runtime { + type: 'runtime'; +} + +export type RuntimeOptions = { + logExporter: LogExporterOptions; + telemetry: { + metricPrefix: string; + attachServiceName: boolean; + }; + metricsExporter: MetricExporterOptions; +}; + +export type LogExporterOptions = + | { + type: 'console'; + filter: string; + } + | { + type: 'forward'; + filter: string; + receiver: (entries: JsonString[]) => void; + }; + +export type MetricExporterOptions = + | { + type: 'prometheus'; + bindAddress: string; + countersTotalSuffix: boolean; + unitSuffix: boolean; + useSecondsForDurations: boolean; + histogramBucketOverrides: Record; + globalTags: Record; + } + | { + type: 'otel'; + url: string; + protocol: 'http' | 'grpc'; + headers: Record; + metricsExportInterval: number; + useSecondsForDurations: boolean; + temporality: 'cumulative' | 'delta'; + histogramBucketOverrides: Record; + globalTags: Record; + } + | null; + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Client +//////////////////////////////////////////////////////////////////////////////////////////////////// + +export declare function newClient(runtime: Runtime, clientOptions: ClientOptions): Promise; +export declare function clientUpdateHeaders(client: Client, headers: Record): void; +export declare function clientUpdateApiKey(client: Client, apiKey: string): void; +export declare function clientClose(client: Client): void; + +export interface Client { + type: 'client'; +} + +export interface ClientOptions { + url: string; + sdkVersion: string; + tls: Option; + proxy: Option; + metadata: Option>; + apiKey: Option; + disableErrorCodeMetricTags: boolean; +} + +export interface TLSConfig { + serverNameOverride: Option; + serverRootCaCertificate: Option; + clientCertPair: Option<{ + crt: Buffer; + key: Buffer; + }>; +} + +export interface ProxyConfig { + type: 'http-connect'; + targetHost: string; + basicAuth: Option<{ + username: string; + password: string; + }>; +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Worker +//////////////////////////////////////////////////////////////////////////////////////////////////// + +export declare function newWorker(client: Client, workerOptions: WorkerOptions): Worker; +export declare function workerValidate(worker: Worker): Promise; + +export declare function workerInitiateShutdown(worker: Worker): void; +export declare function workerFinalizeShutdown(worker: Worker): Promise; + +export declare function workerPollWorkflowActivation(worker: Worker): Promise; +export declare function workerCompleteWorkflowActivation(worker: Worker, result: Buffer): Promise; + +export declare function workerPollActivityTask(worker: Worker): Promise; +export declare function workerCompleteActivityTask(worker: Worker, result: Buffer): Promise; +export declare function workerRecordActivityHeartbeat(worker: Worker, heartbeat: Buffer): void; + +export interface Worker { + type: 'worker'; +} + +export interface WorkerOptions { + identity: string; + buildId: string; + useVersioning: boolean; + taskQueue: string; + namespace: string; + tuner: WorkerTunerOptions; + nonStickyToStickyPollRatio: number; + maxConcurrentWorkflowTaskPolls: number; + maxConcurrentActivityTaskPolls: number; + enableNonLocalActivities: boolean; + stickyQueueScheduleToStartTimeout: number; + maxCachedWorkflows: number; + maxHeartbeatThrottleInterval: number; + defaultHeartbeatThrottleInterval: number; + maxTaskQueueActivitiesPerSecond: Option; + maxActivitiesPerSecond: Option; + shutdownGraceTime: number; +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Worker Tuner +//////////////////////////////////////////////////////////////////////////////////////////////////// + +export interface WorkerTunerOptions { + workflowTaskSlotSupplier: SlotSupplierOptions; + activityTaskSlotSupplier: SlotSupplierOptions; + localActivityTaskSlotSupplier: SlotSupplierOptions; +} + +export type SlotSupplierOptions = + | FixedSizeSlotSupplierOptions + | ResourceBasedSlotSupplierOptions + | CustomSlotSupplierOptions; // FIXME: any? + +interface FixedSizeSlotSupplierOptions { + type: 'fixed-size'; + numSlots: number; +} + +interface ResourceBasedSlotSupplierOptions { + type: 'resource-based'; + minimumSlots: number; + maximumSlots: number; + rampThrottle: number; + tunerOptions: ResourceBasedTunerOptions; +} + +interface ResourceBasedTunerOptions { + targetMemoryUsage: number; + targetCpuUsage: number; +} + +export interface CustomSlotSupplierOptions { + type: 'custom'; + reserveSlot(ctx: SlotReserveContext, abortSignal: AbortSignal): Promise; + tryReserveSlot(ctx: SlotReserveContext): Option; + markSlotUsed(ctx: SlotMarkUsedContext): void; + releaseSlot(ctx: SlotReleaseContext): void; +} + +export type SlotInfo = + | { + type: 'workflow'; + workflowType: string; + isSticky: boolean; + } + | { + type: 'activity'; + activityType: string; + } + | { + type: 'local-activity'; + activityType: string; + } + | { + type: 'nexus'; + service: string; + operation: string; + }; + +export interface SlotReserveContext { + slotType: SlotInfo['type']; + taskQueue: string; + workerIdentity: string; + workerBuildId: string; + isSticky: boolean; +} + +export interface SlotMarkUsedContext { + slotInfo: SI; + permit: SlotPermit; +} + +export interface SlotReleaseContext { + slotInfo: Option; + permit: SlotPermit; +} + +// eslint-disable-next-line @typescript-eslint/no-empty-object-type +export interface SlotPermit {} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// ReplayWorker +//////////////////////////////////////////////////////////////////////////////////////////////////// + +export declare function newReplayWorker(runtime: Runtime, workerOptions: WorkerOptions): [Worker, HistoryPusher]; +export declare function pushHistory(pusher: HistoryPusher, workflowId: string, history: Buffer): Promise; +export declare function closeHistoryStream(pusher: HistoryPusher): void; + +export interface HistoryPusher { + type: 'history-pusher'; +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Log Forwarding +//////////////////////////////////////////////////////////////////////////////////////////////////// + +// export declare function pollLogs(runtime: Runtime): LogEntry[]; +export declare function getTimeOfDay(): bigint; + +export interface LogEntry { + message: string; + timestamp: string; // u128 as a string - JSON doesn't support u128 numbers + level: LogLevel; + target: string; + fields: LogEntryMetadata; +} + +type LogLevel = 'TRACE' | 'DEBUG' | 'INFO' | 'WARN' | 'ERROR'; + +type LogEntryMetadata = { + [key: string]: string | number | boolean | LogEntryMetadata; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Ephemeral Server +//////////////////////////////////////////////////////////////////////////////////////////////////// + +export interface EphemeralServer { + type: 'ephemeral-server'; +} + +export declare function startEphemeralServer(runtime: Runtime, config: EphemeralServerConfig): Promise; +export declare function getEphemeralServerTarget(server: EphemeralServer): string; +export declare function shutdownEphemeralServer(server: EphemeralServer): Promise; + +export type EphemeralServerConfig = TimeSkippingServerConfig | DevServerConfig; + +export interface TimeSkippingServerConfig { + type: 'time-skipping'; + executable: EphemeralServerExecutableConfig; + port: Option; + extraArgs: string[]; +} + +export interface DevServerConfig { + type: 'dev-server'; + executable: EphemeralServerExecutableConfig; + ip: string; + port: Option; + ui: boolean; + uiPort: Option; + namespace: string; + dbFilename: Option; + log: { format: string; level: string }; + extraArgs: string[]; +} + +export type EphemeralServerExecutableConfig = + | { + type: 'cached-download'; + downloadDir: Option; + version: string; + ttl: number; + sdkVersion: string; + } + | { + type: 'existing-path'; + path: string; + }; diff --git a/packages/core-bridge/ts/worker-tuner.ts b/packages/core-bridge/ts/worker-tuner.ts deleted file mode 100644 index e3327c5ef..000000000 --- a/packages/core-bridge/ts/worker-tuner.ts +++ /dev/null @@ -1,211 +0,0 @@ -/** - * A worker tuner allows the customization of the performance characteristics of workers by - * controlling how "slots" are handed out for different task types. In order to poll for and then - * run tasks, a slot must first be reserved by the {@link SlotSupplier} returned by the tuner. - * - * @experimental Worker Tuner is an experimental feature and may be subject to change. - */ -export interface WorkerTuner { - workflowTaskSlotSupplier: SlotSupplier; - activityTaskSlotSupplier: SlotSupplier; - localActivityTaskSlotSupplier: SlotSupplier; -} - -export type SlotInfo = WorkflowSlotInfo | ActivitySlotInfo | LocalActivitySlotInfo; - -export interface WorkflowSlotInfo { - type: 'workflow'; - workflowId: string; - runId: string; -} - -export interface ActivitySlotInfo { - type: 'activity'; - activityId: string; -} - -export interface LocalActivitySlotInfo { - type: 'local-activity'; - activityId: string; -} - -/** - * Controls how slots are handed out for a specific task type. - * - * For now, only {@link ResourceBasedSlotOptions} and {@link FixedSizeSlotSupplier} are supported, - * but we may add support for custom tuners in the future. - * - * @experimental Worker Tuner is an experimental feature and may be subject to change. - */ -export type SlotSupplier = ResourceBasedSlotsForType | FixedSizeSlotSupplier | CustomSlotSupplier; - -/** - * Options for a specific slot type within a {@link ResourceBasedSlotsForType} - * - * @experimental Worker Tuner is an experimental feature and may be subject to change. - */ -export interface ResourceBasedSlotOptions { - // Amount of slots that will be issued regardless of any other checks - minimumSlots: number; - // Maximum amount of slots permitted - maximumSlots: number; - // Minimum time we will wait (after passing the minimum slots number) between handing out new - // slots in milliseconds. - rampThrottleMs: number; -} - -/** - * @experimental Worker Tuner is an experimental feature and may be subject to change. - */ -type ResourceBasedSlotsForType = ResourceBasedSlotOptions & { - type: 'resource-based'; - tunerOptions: ResourceBasedTunerOptions; -}; - -/** - * Options for a {@link ResourceBasedTuner} to control target resource usage - * - * @experimental Worker Tuner is an experimental feature and may be subject to change. - */ -export interface ResourceBasedTunerOptions { - // A value between 0 and 1 that represents the target (system) memory usage. It's not recommended - // to set this higher than 0.8, since how much memory a workflow may use is not predictable, and - // you don't want to encounter OOM errors. - targetMemoryUsage: number; - // A value between 0 and 1 that represents the target (system) CPU usage. This can be set to 1.0 - // if desired, but it's recommended to leave some headroom for other processes. - targetCpuUsage: number; -} - -/** - * A fixed-size slot supplier that will never issue more than a fixed number of slots. - * - * @experimental Worker Tuner is an experimental feature and may be subject to change. - */ -export interface FixedSizeSlotSupplier { - type: 'fixed-size'; - // The maximum number of slots that can be issued - numSlots: number; -} - -/** - * The interface can be implemented to provide custom slot supplier behavior. - * - * @experimental Worker Tuner is an experimental feature and may be subject to change. - */ -export interface CustomSlotSupplier { - type: 'custom'; - - /** - * This function is called before polling for new tasks. Your implementation should block until a - * slot is available then return a permit to use that slot. - * - * The only acceptable exception to throw is AbortError, any other exceptions thrown will be - * logged and ignored. - * - * The value inside the returned promise should be an object, however other types will still count - * as having issued a permit. Including undefined or null. Returning undefined or null does *not* - * mean you have not issued a permit. Implementations are expected to block until a meaningful - * permit can be issued. - * - * @param ctx The context for slot reservation. - * @param abortSignal The SDK may decide to abort the reservation request if it's no longer - * needed. Implementations may clean up and then must reject the promise with AbortError. - * @returns A permit to use the slot which may be populated with your own data. - */ - reserveSlot(ctx: SlotReserveContext, abortSignal: AbortSignal): Promise; - - /** - * This function is called when trying to reserve slots for "eager" workflow and activity tasks. - * Eager tasks are those which are returned as a result of completing a workflow task, rather than - * from polling. Your implementation must not block, and if a slot is available, return a permit - * to use that slot. - * - * @param ctx The context for slot reservation. - * @returns Maybe a permit to use the slot which may be populated with your own data. - */ - tryReserveSlot(ctx: SlotReserveContext): SlotPermit | null; - - /** - * This function is called once a slot is actually being used to process some task, which may be - * some time after the slot was reserved originally. For example, if there is no work for a - * worker, a number of slots equal to the number of active pollers may already be reserved, but - * none of them are being used yet. This call should be non-blocking. - * - * @param ctx The context for marking a slot as used. - */ - markSlotUsed(slot: SlotMarkUsedContext): void; - - /** - * This function is called once a permit is no longer needed. This could be because the task has - * finished, whether successfully or not, or because the slot was no longer needed (ex: the number - * of active pollers decreased). This call should be non-blocking. - * - * @param ctx The context for releasing a slot. - */ - releaseSlot(slot: SlotReleaseContext): void; -} - -/** - * A permit to use a slot. - * - * @experimental Worker Tuner is an experimental feature and may be subject to change. - */ -// eslint-disable-next-line @typescript-eslint/no-empty-object-type -export interface SlotPermit {} - -export interface SlotReserveContext { - /** - * The type of slot trying to be reserved - */ - slotType: SlotInfo['type']; - /** - * The name of the task queue for which this reservation request is associated - */ - taskQueue: string; - /** - * The identity of the worker that is requesting the reservation - */ - workerIdentity: string; - /** - * The build id of the worker that is requesting the reservation - */ - workerBuildId: string; - /** - * True iff this is a reservation for a sticky poll for a workflow task - */ - isSticky: boolean; -} - -/** - * Context for marking a slot as used. - * - * @experimental Worker Tuner is an experimental feature and may be subject to change. - */ -export interface SlotMarkUsedContext { - /** - * Info about the task that will be using the slot - */ - slotInfo: SI; - /** - * The permit that was issued when the slot was reserved - */ - permit: SlotPermit; -} - -/** - * Context for releasing a slot. - * - * @experimental Worker Tuner is an experimental feature and may be subject to change. - */ -export interface SlotReleaseContext { - /** - * Info about the task that used this slot, if any. A slot may be released without being used in - * the event a poll times out. - */ - slotInfo?: SI; - /** - * The permit that was issued when the slot was reserved - */ - permit: SlotPermit; -} diff --git a/packages/test/src/helpers-integration.ts b/packages/test/src/helpers-integration.ts index e932a1f66..7abcbacd7 100644 --- a/packages/test/src/helpers-integration.ts +++ b/packages/test/src/helpers-integration.ts @@ -54,7 +54,7 @@ function setupRuntime(recordedLogs?: { [workflowId: string]: LogEntry[] }) { recordedLogs![workflowId] ??= []; recordedLogs![workflowId].push(entry); }) - : new DefaultLogger((process.env.TEST_LOG_LEVEL || 'DEBUG').toUpperCase() as LogLevel); + : new DefaultLogger((process.env.TEST_LOG_LEVEL || 'WARN').toUpperCase() as LogLevel); Runtime.install({ logger, telemetryOptions: { diff --git a/packages/test/src/load/worker.ts b/packages/test/src/load/worker.ts index 8bc50874a..379e18455 100644 --- a/packages/test/src/load/worker.ts +++ b/packages/test/src/load/worker.ts @@ -5,13 +5,14 @@ import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-grpc'; import * as opentelemetry from '@opentelemetry/sdk-node'; import { SemanticResourceAttributes } from '@opentelemetry/semantic-conventions'; import arg from 'arg'; -import { LogLevel, TelemetryOptions } from '@temporalio/core-bridge'; import { Connection } from '@temporalio/client'; import { DefaultLogger, LogEntry, + LogLevel, NativeConnection, Runtime, + TelemetryOptions, Worker, makeTelemetryFilterString, } from '@temporalio/worker'; diff --git a/packages/test/src/mock-native-worker.ts b/packages/test/src/mock-native-worker.ts index a0946071d..385233e97 100644 --- a/packages/test/src/mock-native-worker.ts +++ b/packages/test/src/mock-native-worker.ts @@ -34,14 +34,14 @@ export type Task = | { activity: coresdk.activity_task.IActivityTask }; export class MockNativeWorker implements NativeWorkerLike { - public readonly type = 'Worker'; + public readonly type = 'worker'; flushCoreLogs(): void { // noop } - activityTasks: Array> = []; - workflowActivations: Array> = []; - activityCompletionCallback?: (arr: ArrayBuffer) => void; - workflowCompletionCallback?: (arr: ArrayBuffer) => void; + activityTasks: Array> = []; + workflowActivations: Array> = []; + activityCompletionCallback?: (arr: Buffer) => void; + workflowCompletionCallback?: (arr: Buffer) => void; activityHeartbeatCallback?: (taskToken: Uint8Array, details: any) => void; reject?: (err: Error) => void; namespace = 'mock'; @@ -52,14 +52,14 @@ export class MockNativeWorker implements NativeWorkerLike { } public static async createReplay(): Promise { - return { worker: new this(), historyPusher: { type: 'HistoryPusher' } }; + return { worker: new this(), historyPusher: { type: 'history-pusher' } }; } public async finalizeShutdown(): Promise { // Nothing to do here } - public async initiateShutdown(): Promise { + public initiateShutdown(): void { const shutdownErrorPromise = Promise.reject(new ShutdownError('Core is shut down')); shutdownErrorPromise.catch(() => { /* avoid unhandled rejection */ @@ -68,7 +68,7 @@ export class MockNativeWorker implements NativeWorkerLike { this.workflowActivations.unshift(shutdownErrorPromise); } - public async pollWorkflowActivation(): Promise { + public async pollWorkflowActivation(): Promise { for (;;) { const task = this.workflowActivations.pop(); if (task !== undefined) { @@ -78,7 +78,7 @@ export class MockNativeWorker implements NativeWorkerLike { } } - public async pollActivityTask(): Promise { + public async pollActivityTask(): Promise { for (;;) { const task = this.activityTasks.pop(); if (task !== undefined) { @@ -88,12 +88,12 @@ export class MockNativeWorker implements NativeWorkerLike { } } - public async completeWorkflowActivation(result: ArrayBuffer): Promise { + public async completeWorkflowActivation(result: Buffer): Promise { this.workflowCompletionCallback!(result); this.workflowCompletionCallback = undefined; } - public async completeActivityTask(result: ArrayBuffer): Promise { + public async completeActivityTask(result: Buffer): Promise { this.activityCompletionCallback!(result); this.activityCompletionCallback = undefined; } @@ -116,7 +116,7 @@ export class MockNativeWorker implements NativeWorkerLike { ): Promise { const arr = coresdk.workflow_activation.WorkflowActivation.encode(activation).finish(); const buffer = byteArrayToBuffer(arr); - const result = await new Promise((resolve) => { + const result = await new Promise((resolve) => { this.workflowCompletionCallback = resolve; this.workflowActivations.unshift(Promise.resolve(buffer)); }); @@ -127,14 +127,14 @@ export class MockNativeWorker implements NativeWorkerLike { addActivityStartDefaults(task); const arr = coresdk.activity_task.ActivityTask.encode(task).finish(); const buffer = byteArrayToBuffer(arr); - const result = await new Promise((resolve) => { + const result = await new Promise((resolve) => { this.activityCompletionCallback = resolve; this.activityTasks.unshift(Promise.resolve(buffer)); }); return coresdk.ActivityTaskCompletion.decodeDelimited(new Uint8Array(result)); } - public recordActivityHeartbeat(buffer: ArrayBuffer): void { + public recordActivityHeartbeat(buffer: Buffer): void { const { taskToken, details } = coresdk.ActivityHeartbeat.decodeDelimited(new Uint8Array(buffer)); const arg = fromPayloadsAtIndex(defaultPayloadConverter, 0, details); this.activityHeartbeatCallback!(taskToken, arg); @@ -159,12 +159,13 @@ export class Worker extends RealWorker { } public constructor(workflowCreator: WorkflowCreator, opts: CompiledWorkerOptions) { - const logger = withMetadata(Runtime.instance().logger, { + const runtime = Runtime.instance(); + const logger = withMetadata(runtime.logger, { sdkComponent: SdkComponent.worker, taskQueue: opts.taskQueue, }); const nativeWorker = new MockNativeWorker(); - super(nativeWorker, workflowCreator, opts, logger); + super(runtime, nativeWorker, workflowCreator, opts, logger); } public runWorkflows(...args: Parameters): Promise { diff --git a/packages/test/src/test-bridge.ts b/packages/test/src/test-bridge.ts new file mode 100644 index 000000000..5b2fffd2c --- /dev/null +++ b/packages/test/src/test-bridge.ts @@ -0,0 +1,244 @@ +import { setTimeout } from 'node:timers/promises'; +import ms from 'ms'; +import test from 'ava'; +import { native, errors } from '@temporalio/core-bridge'; + +// TESTING NOTES +// +// - Tests in this file requires an external Temporal server to be running, because using the ephemeral +// server support provided by Core SDK would affect the behavior that we're testing here. +// - Tests in this file can't be run in parallel, since the bridge is mostly a singleton. +// - Some of these tests explicitly use the native bridge, without going through the lang side Runtime/Worker. + +test('Can instantiate and shutdown the native runtime', async (t) => { + const runtime = native.newRuntime(GenericConfigs.runtime.basic); + t.is(typeof runtime, 'object'); + native.runtimeShutdown(runtime); + + // Pass this point, any operation on the runtime should throw + + t.throws(() => native.newClient(runtime, GenericConfigs.client.basic), { + instanceOf: errors.IllegalStateError, + message: 'Runtime already closed', + }); + + // Trying to shutdown the runtime a second time should throw + t.throws(() => native.runtimeShutdown(runtime), { + instanceOf: errors.IllegalStateError, + message: 'Runtime already closed', + }); +}); + +test('Can run multiple runtime concurrently', async (t) => { + const runtime1 = native.newRuntime(GenericConfigs.runtime.basic); + const runtime2 = native.newRuntime(GenericConfigs.runtime.basic); + const runtime3 = native.newRuntime(GenericConfigs.runtime.basic); + + // Order is intentionally random - distinct runtimes are expected to be independent + const _client2 = await native.newClient(runtime3, GenericConfigs.client.basic); + const _client1 = await native.newClient(runtime1, GenericConfigs.client.basic); + const _client3 = await native.newClient(runtime2, GenericConfigs.client.basic); + + native.runtimeShutdown(runtime1); + native.runtimeShutdown(runtime3); + + await t.throwsAsync(async () => await native.newClient(runtime1, GenericConfigs.client.basic), { + instanceOf: errors.IllegalStateError, + message: 'Runtime already closed', + }); + + const _client5 = await native.newClient(runtime2, GenericConfigs.client.basic); + + native.runtimeShutdown(runtime2); + + t.pass(); +}); + +test('Missing/invalid properties in config throws appropriately', async (t) => { + // required string = undefined ==> missing property + t.throws( + () => + native.newRuntime({ + ...GenericConfigs.runtime.basic, + logExporter: { + type: 'forward', + // @ts-expect-error 2322 + filter: undefined, + }, + }), + { + instanceOf: TypeError, + message: "fn runtime_new.args[0].logExporter.forward.filter: Missing property 'filter'", + } + ); + + // required string = null ==> failed to downcast + t.throws( + () => + native.newRuntime({ + ...GenericConfigs.runtime.basic, + logExporter: { + type: 'forward', + // @ts-expect-error 2322 + filter: null, + }, + }), + { + instanceOf: TypeError, + // FIXME: should say "failed to downcast _null_ to string" + message: 'fn runtime_new.args[0].logExporter.forward.filter: failed to downcast any to string', + } + ); + + // required string = number ==> failed to downcast + t.throws( + () => + native.newRuntime({ + ...GenericConfigs.runtime.basic, + logExporter: { + type: 'forward', + // @ts-expect-error 2322 + filter: 1234, + }, + }), + { + instanceOf: TypeError, + // FIXME: should say "failed to downcast _number_ to string" + message: 'fn runtime_new.args[0].logExporter.forward.filter: failed to downcast any to string', + } + ); + + // optional object = undefined ==> missing property + t.throws( + () => + native.newRuntime({ + ...GenericConfigs.runtime.basic, + // @ts-expect-error 2322 + metricsExporter: undefined, + }), + { + instanceOf: TypeError, + message: "fn runtime_new.args[0].metricsExporter: Missing property 'metricsExporter'", + } + ); +}); + +test(`get_time_of_day() returns a bigint`, async (t) => { + const time_1 = await native.getTimeOfDay(); + const time_2 = await native.getTimeOfDay(); + await setTimeout(100); + const time_3 = await native.getTimeOfDay(); + + t.is(typeof time_1, 'bigint'); + t.true(time_1 < time_2); + t.true(time_2 + 100_000_000n < time_3); // At least 100ms passed +}); + +test("Creating Runtime without shutting it down doesn't hang process", (t) => { + const _runtime = native.newRuntime(GenericConfigs.runtime.basic); + t.pass(); +}); + +test("Dropping Client without closing doesn't hang process", (t) => { + const runtime = native.newRuntime(GenericConfigs.runtime.basic); + const _client = native.newClient(runtime, GenericConfigs.client.basic); + t.pass(); +}); + +test("Dropping Worker without shutting it down doesn't hang process", async (t) => { + const runtime = native.newRuntime(GenericConfigs.runtime.basic); + const client = await native.newClient(runtime, GenericConfigs.client.basic); + const worker = native.newWorker(client, GenericConfigs.worker.basic); + await native.workerValidate(worker); + t.pass(); +}); + +// FIXME: Not hanging, but server is left running. Should try to kill process on Finalize? +test.skip("Dropping EphemeralServer without shutting it down doesn't hang process", async (t) => { + const runtime = native.newRuntime(GenericConfigs.runtime.basic); + const _ephemeralServer = await native.startEphemeralServer(runtime, GenericConfigs.ephemeralServer.basic); + t.pass(); +}); + +// Sample configs /////////////////////////////////////////////////////////////////////////////////// + +const GenericConfigs = { + runtime: { + basic: { + logExporter: { + type: 'console', + filter: 'ERROR', + }, + telemetry: { + metricPrefix: 'test', + attachServiceName: false, + }, + metricsExporter: null, + } satisfies native.RuntimeOptions, + }, + client: { + basic: { + url: 'http://127.0.0.1:7233', + sdkVersion: '1.0.0', + tls: null, + proxy: null, + metadata: null, + apiKey: null, + disableErrorCodeMetricTags: false, + } satisfies native.ClientOptions, + }, + worker: { + basic: { + taskQueue: 'default', + identity: 'test-worker', + buildId: 'test-build-id', + useVersioning: false, + namespace: 'default', + tuner: { + workflowTaskSlotSupplier: { + type: 'fixed-size', + numSlots: 1, + }, + activityTaskSlotSupplier: { + type: 'fixed-size', + numSlots: 1, + }, + localActivityTaskSlotSupplier: { + type: 'fixed-size', + numSlots: 1, + }, + }, + nonStickyToStickyPollRatio: 0.5, + maxConcurrentWorkflowTaskPolls: 1, + maxConcurrentActivityTaskPolls: 1, + enableNonLocalActivities: false, + stickyQueueScheduleToStartTimeout: 1000, + maxCachedWorkflows: 1000, + maxHeartbeatThrottleInterval: 1000, + defaultHeartbeatThrottleInterval: 1000, + maxTaskQueueActivitiesPerSecond: null, + maxActivitiesPerSecond: null, + shutdownGraceTime: 1000, + } satisfies native.WorkerOptions, + }, + ephemeralServer: { + basic: { + type: 'dev-server', + executable: { + type: 'cached-download', + downloadDir: null, + version: 'default', + ttl: ms('1y'), + sdkVersion: '1.0.0', + }, + ip: '127.0.0.1', + port: null, + ui: false, + uiPort: null, + namespace: 'default', + dbFilename: null, + log: { format: 'json', level: 'info' }, + extraArgs: [], + } satisfies native.EphemeralServerConfig, + }, +} as const; diff --git a/packages/test/src/test-ephemeral-server.ts b/packages/test/src/test-ephemeral-server.ts index 2deddd019..4930b2639 100644 --- a/packages/test/src/test-ephemeral-server.ts +++ b/packages/test/src/test-ephemeral-server.ts @@ -82,7 +82,10 @@ test('TestEnvironment sets up dev server with db filename', async (t) => { }); test('TestEnvironment sets up dev server with custom port and ui', async (t) => { - const port = await getRandomPort(); + // FIXME: We'd really need to assert that the UI port is not being used by another process. + let port = await getRandomPort(); + if (port > 65535 - 1000) port = 65535 - 1000; + const testEnv = await TestWorkflowEnvironment.createLocal({ server: { ip: '127.0.0.1', @@ -125,3 +128,18 @@ test('TestEnvironment sets up dev server with custom ui port', async (t) => { await testEnv.teardown(); } }); + +test("TestEnvironment doesn't hang on fail to download", async (t) => { + try { + await TestWorkflowEnvironment.createLocal({ + server: { + executable: { + type: 'cached-download', + version: '999.999.999', + }, + }, + }); + } catch (_e) { + t.pass(); + } +}); diff --git a/packages/test/src/test-native-connection.ts b/packages/test/src/test-native-connection.ts index d68aa3980..dfbdb830c 100644 --- a/packages/test/src/test-native-connection.ts +++ b/packages/test/src/test-native-connection.ts @@ -48,7 +48,7 @@ test('NativeConnection.connect() throws meaningful error when passed invalid add test('NativeConnection.connect() throws meaningful error when passed invalid clientCertPair', async (t) => { await t.throwsAsync(NativeConnection.connect({ tls: { clientCertPair: {} as any } }), { instanceOf: TypeError, - message: 'Invalid or missing serverOptions.tls.clientCertPair.crt', + message: /tls\.clientCertPair\.crt: Missing property/, }); }); diff --git a/packages/test/src/test-otel.ts b/packages/test/src/test-otel.ts index b87925de0..369e8b547 100644 --- a/packages/test/src/test-otel.ts +++ b/packages/test/src/test-otel.ts @@ -58,7 +58,16 @@ async function withFakeGrpcServer( }); fn(addr.port) .catch((e) => reject(e)) - .finally(() => srv.close((_) => resolve())); + .finally(() => { + resolve(); + + // The OTel exporter will try to flush metrics on drop, which may result in tons of ERROR + // messages on the console if the server has had time to complete shutdown before then. + // Delaying closing the server by 1 second is enough to avoid that situation. + setTimeout(() => { + srv.close(); + }, 1000).unref(); + }); }); }); } @@ -81,7 +90,16 @@ async function withHttpServer( }); fn(addr.port) .catch((e) => reject(e)) - .finally(() => srv.close((_) => resolve())); + .finally(() => { + resolve(); + + // The OTel exporter will try to flush metrics on drop, which may result in ERROR messages + // on the console if the server has had time to complete shutdown before then. + // Not really a problem, but delaying closing the server by 1 second helps avoid the error. + setTimeout(() => { + srv.close(); + }, 1000).unref(); + }); }); }); } @@ -89,7 +107,7 @@ async function withHttpServer( test.serial('Runtime.install() throws meaningful error when passed invalid metrics.otel.url', async (t) => { t.throws(() => Runtime.install({ telemetryOptions: { metrics: { otel: { url: ':invalid' } } } }), { instanceOf: TypeError, - message: /Invalid telemetryOptions.metrics.otel.url/, + message: /metricsExporter.otel.url/, }); }); @@ -106,106 +124,102 @@ test.serial('Runtime.install() accepts metrics.otel.url without headers', async test.serial('Exporting OTEL metrics from Core works', async (t) => { let resolveCapturedRequest = (_req: http2.Http2ServerRequest) => undefined as void; const capturedRequest = new Promise((r) => (resolveCapturedRequest = r)); - try { - await withFakeGrpcServer(async (port: number) => { - Runtime.install({ - telemetryOptions: { - metrics: { - otel: { - url: `http://127.0.0.1:${port}`, - headers: { - 'x-test-header': 'test-value', - }, - metricsExportInterval: 10, + + await withFakeGrpcServer(async (port: number) => { + Runtime.install({ + telemetryOptions: { + metrics: { + otel: { + url: `http://127.0.0.1:${port}`, + headers: { + 'x-test-header': 'test-value', }, + metricsExportInterval: 10, }, }, - }); + }, + }); - const localEnv = await TestWorkflowEnvironment.createLocal(); - try { - const worker = await Worker.create({ - connection: localEnv.nativeConnection, - workflowsPath: require.resolve('./workflows'), + const localEnv = await TestWorkflowEnvironment.createLocal(); + try { + const worker = await Worker.create({ + connection: localEnv.nativeConnection, + workflowsPath: require.resolve('./workflows'), + taskQueue: 'test-otel', + }); + const client = new WorkflowClient({ + connection: localEnv.connection, + }); + await worker.runUntil(async () => { + await client.execute(workflows.successString, { taskQueue: 'test-otel', + workflowId: uuid4(), }); - const client = new WorkflowClient({ - connection: localEnv.connection, - }); - await worker.runUntil(async () => { - await client.execute(workflows.successString, { - taskQueue: 'test-otel', - workflowId: uuid4(), - }); - const req = await Promise.race([ - capturedRequest, - await new Promise((resolve) => setTimeout(() => resolve(undefined), 2000)), - ]); - t.truthy(req); - t.is(req?.url, '/opentelemetry.proto.collector.metrics.v1.MetricsService/Export'); - t.is(req?.headers['x-test-header'], 'test-value'); - }); - } finally { - await localEnv.teardown(); - } - }, resolveCapturedRequest); - } finally { - // Cleanup the runtime so that it doesn't interfere with other tests - await Runtime._instance?.shutdown(); - } + const req = await Promise.race([ + capturedRequest, + await new Promise((resolve) => setTimeout(() => resolve(undefined), 2000)), + ]); + t.truthy(req); + t.is(req?.url, '/opentelemetry.proto.collector.metrics.v1.MetricsService/Export'); + t.is(req?.headers['x-test-header'], 'test-value'); + }); + } finally { + await localEnv.teardown(); + // Cleanup the runtime so that it doesn't interfere with other tests + await Runtime._instance?.shutdown(); + } + }, resolveCapturedRequest); }); test.serial('Exporting OTEL metrics using OTLP/HTTP from Core works', async (t) => { let resolveCapturedRequest = (_req: http.IncomingMessage) => undefined as void; const capturedRequest = new Promise((r) => (resolveCapturedRequest = r)); - try { - await withHttpServer(async (port: number) => { - Runtime.install({ - telemetryOptions: { - metrics: { - otel: { - url: `http://127.0.0.1:${port}/v1/metrics`, - http: true, - headers: { - 'x-test-header': 'test-value', - }, - metricsExportInterval: 10, + // try { + await withHttpServer(async (port: number) => { + Runtime.install({ + telemetryOptions: { + metrics: { + otel: { + url: `http://127.0.0.1:${port}/v1/metrics`, + http: true, + headers: { + 'x-test-header': 'test-value', }, + metricsExportInterval: 10, }, }, - }); + }, + }); - const localEnv = await TestWorkflowEnvironment.createLocal(); - try { - const worker = await Worker.create({ - connection: localEnv.nativeConnection, - workflowsPath: require.resolve('./workflows'), + const localEnv = await TestWorkflowEnvironment.createLocal(); + try { + const worker = await Worker.create({ + connection: localEnv.nativeConnection, + workflowsPath: require.resolve('./workflows'), + taskQueue: 'test-otel', + }); + const client = new WorkflowClient({ + connection: localEnv.connection, + }); + await worker.runUntil(async () => { + await client.execute(workflows.successString, { taskQueue: 'test-otel', + workflowId: uuid4(), }); - const client = new WorkflowClient({ - connection: localEnv.connection, - }); - await worker.runUntil(async () => { - await client.execute(workflows.successString, { - taskQueue: 'test-otel', - workflowId: uuid4(), - }); - const req = await Promise.race([ - capturedRequest, - await new Promise((resolve) => setTimeout(() => resolve(undefined), 2000)), - ]); - t.truthy(req); - t.is(req?.url, '/v1/metrics'); - t.is(req?.headers['x-test-header'], 'test-value'); - }); - } finally { - await localEnv.teardown(); - } - }, resolveCapturedRequest); - } finally { - // Cleanup the runtime so that it doesn't interfere with other tests - await Runtime._instance?.shutdown(); - } + const req = await Promise.race([ + capturedRequest, + await new Promise((resolve) => setTimeout(() => resolve(undefined), 2000)), + ]); + t.truthy(req); + t.is(req?.url, '/v1/metrics'); + t.is(req?.headers['x-test-header'], 'test-value'); + }); + } finally { + await localEnv.teardown(); + // Cleanup the runtime so that it doesn't interfere with other tests + await Runtime._instance?.shutdown(); + } + }, resolveCapturedRequest); }); if (RUN_INTEGRATION_TESTS) { @@ -365,8 +379,10 @@ if (RUN_INTEGRATION_TESTS) { } }); - // Un-skip this test and run it by hand to inspect outputted traces - test.serial('Otel spans connected', async (t) => { + // FIXME: This tests take ~9 seconds to complete on my local machine, even + // more in CI, and yet, it doesn't really do any assertion by itself. + // To be revisited at a later time. + test.skip('Otel spans connected', async (t) => { const logger = new DefaultLogger('DEBUG'); Runtime.install({ logger, diff --git a/packages/test/src/test-prometheus.ts b/packages/test/src/test-prometheus.ts index 8cbd3af6a..38edc6fe3 100644 --- a/packages/test/src/test-prometheus.ts +++ b/packages/test/src/test-prometheus.ts @@ -9,7 +9,7 @@ import * as workflows from './workflows'; test.serial('Runtime.install() throws meaningful error when passed invalid metrics.prometheus.bindAddress', (t) => { t.throws(() => Runtime.install({ telemetryOptions: { metrics: { prometheus: { bindAddress: ':invalid' } } } }), { instanceOf: TypeError, - message: 'Invalid telemetryOptions.metrics.prometheus.bindAddress', + message: /metricsExporter.prometheus.bindAddress/, }); }); diff --git a/packages/test/src/test-runtime.ts b/packages/test/src/test-runtime.ts index 4ac9bdd8b..8c030795c 100644 --- a/packages/test/src/test-runtime.ts +++ b/packages/test/src/test-runtime.ts @@ -82,10 +82,6 @@ if (RUN_INTEGRATION_TESTS) { telemetryOptions: { logging: { forward: {}, filter: makeTelemetryFilterString({ core: 'DEBUG' }) } }, }); try { - { - const runtime = Runtime.instance(); - t.is(runtime.options.logger, logger); - } await new Client().workflow.start('not-existant', { taskQueue: 'q1', workflowId: uuid4() }); const worker = await Worker.create({ ...defaultOptions, @@ -97,7 +93,7 @@ if (RUN_INTEGRATION_TESTS) { if (!logEntries.some((x) => x.message === 'Failing workflow task')) throw new Error('Waiting for failing workflow task'); }, - { maxTimeout: 5000, retries: 50 } + { maxTimeout: 200, minTimeout: 20, retries: 40 } ) ); @@ -117,11 +113,4 @@ if (RUN_INTEGRATION_TESTS) { await Runtime.instance().shutdown(); } }); - - test.serial('Runtime.install() throws meaningful error when passed invalid telemetryOptions.logging.filter', (t) => { - t.throws(() => Runtime.install({ telemetryOptions: { logging: { filter: 2 as any } } }), { - instanceOf: TypeError, - message: 'Invalid filter', - }); - }); } diff --git a/packages/test/src/test-sinks.ts b/packages/test/src/test-sinks.ts index f3991fccb..b7b18f263 100644 --- a/packages/test/src/test-sinks.ts +++ b/packages/test/src/test-sinks.ts @@ -441,8 +441,6 @@ if (RUN_INTEGRATION_TESTS) { ...defaultOptions, taskQueue, sinks, - maxCachedWorkflows: 2, - maxConcurrentWorkflowTaskExecutions: 2, // Cut down on execution time stickyQueueScheduleToStartTimeout: 1, diff --git a/packages/test/src/test-worker-lifecycle.ts b/packages/test/src/test-worker-lifecycle.ts index 2c9a28d73..2cb408ecd 100644 --- a/packages/test/src/test-worker-lifecycle.ts +++ b/packages/test/src/test-worker-lifecycle.ts @@ -7,8 +7,7 @@ import { setTimeout } from 'timers/promises'; import { randomUUID } from 'crypto'; import test from 'ava'; -import { Runtime, PromiseCompletionTimeoutError } from '@temporalio/worker'; -import { TransportError, UnexpectedError } from '@temporalio/core-bridge'; +import { Runtime, PromiseCompletionTimeoutError, TransportError, UnexpectedError } from '@temporalio/worker'; import { Client } from '@temporalio/client'; import { RUN_INTEGRATION_TESTS, Worker } from './helpers'; import { defaultOptions, isolateFreeWorker } from './mock-native-worker'; @@ -21,15 +20,17 @@ if (RUN_INTEGRATION_TESTS) { taskQueue: t.title.replace(/ /g, '_'), }); t.is(worker.getState(), 'INITIALIZED'); + t.not(Runtime._instance, undefined); const p = worker.run(); t.is(worker.getState(), 'RUNNING'); process.emit('SIGINT', 'SIGINT'); // Shutdown callback is enqueued as a microtask await new Promise((resolve) => process.nextTick(resolve)); - t.is(worker.getState(), 'STOPPING'); + t.is(worker.getState(), 'DRAINING'); await p; t.is(worker.getState(), 'STOPPED'); await t.throwsAsync(worker.run(), { message: 'Poller was already started' }); + t.is(Runtime._instance, undefined); }); test.serial("Worker.runUntil doesn't hang if provided promise survives to Worker's shutdown", async (t) => { @@ -37,6 +38,7 @@ if (RUN_INTEGRATION_TESTS) { ...defaultOptions, taskQueue: t.title.replace(/ /g, '_'), }); + t.not(Runtime._instance, undefined); const p = worker.runUntil( new Promise(() => { /* a promise that will never unblock */ @@ -44,9 +46,10 @@ if (RUN_INTEGRATION_TESTS) { ); t.is(worker.getState(), 'RUNNING'); worker.shutdown(); - t.is(worker.getState(), 'STOPPING'); + t.is(worker.getState(), 'DRAINING'); await t.throwsAsync(p, { instanceOf: PromiseCompletionTimeoutError }); t.is(worker.getState(), 'STOPPED'); + t.is(Runtime._instance, undefined); }); test.serial('Worker shuts down gracefully if interrupted before running', async (t) => { @@ -148,7 +151,7 @@ test.serial('Mocked run shuts down gracefully if interrupted before running', as const worker = isolateFreeWorker({ taskQueue: t.title.replace(/ /g, '_'), }); - // worker.native.initiateShutdown = () => new Promise(() => undefined); + // worker.native.initiateShutdown = () => undefined; t.is(worker.getState(), 'INITIALIZED'); process.emit('SIGINT', 'SIGINT'); const p = worker.run(); @@ -169,7 +172,7 @@ test.serial('Mocked run throws if not shut down gracefully', async (t) => { const p = worker.run(); t.is(worker.getState(), 'RUNNING'); // Make sure shutdown never resolves - worker.native.initiateShutdown = () => new Promise(() => undefined); + worker.native.initiateShutdown = () => undefined; worker.shutdown(); await t.throwsAsync(p, { message: 'Timed out while waiting for worker to shutdown gracefully', @@ -183,7 +186,7 @@ test.serial('Mocked throws combined error in runUntil', async (t) => { shutdownForceTime: '5ms', taskQueue: t.title.replace(/ /g, '_'), }); - worker.native.initiateShutdown = () => new Promise(() => undefined); + worker.native.initiateShutdown = () => undefined; const err = await t.throwsAsync( worker.runUntil(async () => { throw new Error('inner'); diff --git a/packages/test/src/test-worker-tuner.ts b/packages/test/src/test-worker-tuner.ts index 1fe8ec61c..0a5709e09 100644 --- a/packages/test/src/test-worker-tuner.ts +++ b/packages/test/src/test-worker-tuner.ts @@ -1,7 +1,7 @@ import { ExecutionContext } from 'ava'; -import { ResourceBasedTunerOptions } from '@temporalio/core-bridge'; import { CustomSlotSupplier, + ResourceBasedTunerOptions, SlotInfo, SlotMarkUsedContext, SlotPermit, diff --git a/packages/testing/src/client.ts b/packages/testing/src/client.ts new file mode 100644 index 000000000..be771abdf --- /dev/null +++ b/packages/testing/src/client.ts @@ -0,0 +1,85 @@ +import 'abort-controller/polyfill'; // eslint-disable-line import/no-unassigned-import +import { + Client, + ClientOptions, + WorkflowClient, + WorkflowClientOptions, + WorkflowResultOptions, +} from '@temporalio/client'; +import { Connection, TestService } from './connection'; + +// Config /////////////////////////////////////////////////////////////////////////////////////////// + +export interface TimeSkippingWorkflowClientOptions extends WorkflowClientOptions { + connection: Connection; + enableTimeSkipping: boolean; +} + +export interface TestEnvClientOptions extends ClientOptions { + connection: Connection; + enableTimeSkipping: boolean; +} + +/** + * Subset of the "normal" client options that are used to create a client for the test environment. + */ +export type ClientOptionsForTestEnv = Omit; + +// Implementation ////////////////////////////////////////////////////////////////////////////////// + +/** + * A client with the exact same API as the "normal" client with 1 exception, + * When this client waits on a Workflow's result, it will enable time skipping + * in the test server. + */ +export class TimeSkippingWorkflowClient extends WorkflowClient { + protected readonly testService: TestService; + protected readonly enableTimeSkipping: boolean; + + constructor(options: TimeSkippingWorkflowClientOptions) { + super(options); + this.enableTimeSkipping = options.enableTimeSkipping; + this.testService = options.connection.testService; + } + + /** + * Gets the result of a Workflow execution. + * + * @see {@link WorkflowClient.result} + */ + override async result( + workflowId: string, + runId?: string | undefined, + opts?: WorkflowResultOptions | undefined + ): Promise { + if (this.enableTimeSkipping) { + await this.testService.unlockTimeSkipping({}); + try { + return await super.result(workflowId, runId, opts); + } finally { + await this.testService.lockTimeSkipping({}); + } + } else { + return await super.result(workflowId, runId, opts); + } + } +} + +/** + * A client with the exact same API as the "normal" client with one exception: + * when `TestEnvClient.workflow` (an instance of {@link TimeSkippingWorkflowClient}) waits on a Workflow's result, it will enable time skipping + * in the Test Server. + */ +export class TestEnvClient extends Client { + constructor(options: TestEnvClientOptions) { + super(options); + + // Recreate the client (this isn't optimal but it's better than adding public methods just for testing). + // NOTE: we cast to "any" to work around `workflow` being a readonly attribute. + (this as any).workflow = new TimeSkippingWorkflowClient({ + ...this.workflow.options, + connection: options.connection, + enableTimeSkipping: options.enableTimeSkipping, + }); + } +} diff --git a/packages/testing/src/ephemeral-server.ts b/packages/testing/src/ephemeral-server.ts new file mode 100644 index 000000000..0f7c7bee4 --- /dev/null +++ b/packages/testing/src/ephemeral-server.ts @@ -0,0 +1,188 @@ +import 'abort-controller/polyfill'; // eslint-disable-line import/no-unassigned-import +import { Duration, SearchAttributeType } from '@temporalio/common'; +import { msToNumber } from '@temporalio/common/lib/time'; +import { native } from '@temporalio/core-bridge'; +import { SearchAttributeKey } from '@temporalio/common/lib/search-attributes'; +import pkg from './pkg'; + +// Config ////////////////////////////////////////////////////////////////////////////////////////// + +/** + * Configuration for the Temporal CLI Dev Server. + */ +export interface DevServerConfig { + type: 'dev-server'; + + executable?: EphemeralServerExecutable; + + /** + * Sqlite DB filename if persisting or non-persistent if none (default). + */ + dbFilename?: string; + + /** + * Namespace to use - created at startup. + * + * @default "default" + */ + namespace?: string; + + /** + * IP to bind to. + * + * @default localhost + */ + ip?: string; + + /** + * Port to listen on; defaults to find a random free port. + */ + port?: number; + + /** + * Whether to enable the UI. + * + * @default true if `uiPort` is set; defaults to `false` otherwise. + */ + ui?: boolean; + + /** + * Port to listen on for the UI; if `ui` is true, defaults to `port + 1000`. + */ + uiPort?: number; + + /** + * Log format and level + * @default { format: "pretty", level" "warn" } + */ + log?: { format: string; level: string }; + + /** + * Extra args to pass to the executable command. + * + * Note that the Dev Server implementation may be changed to another one in the future. Therefore, there is no + * guarantee that Dev Server options, and particularly those provided through the `extraArgs` array, will continue to + * be supported in the future. + */ + extraArgs?: string[]; + + /** + * Search attributes to be registered with the dev server. + */ + searchAttributes?: SearchAttributeKey[]; +} + +/** + * Configuration for the time-skipping test server. + */ +export interface TimeSkippingServerConfig { + type: 'time-skipping'; + + executable?: EphemeralServerExecutable; + + /** + * Optional port to listen on, defaults to find a random free port. + */ + port?: number; + + /** + * Extra args to pass to the executable command. + * + * Note that the Test Server implementation may be changed to another one in the future. Therefore, there is + * no guarantee that server options, and particularly those provided through the `extraArgs` array, will continue to + * be supported in the future. + */ + extraArgs?: string[]; +} + +/** + * Which version of the executable to run. + */ +export type EphemeralServerExecutable = + | { + type: 'cached-download'; + /** + * Download destination directory or the system's temp directory if none set. + */ + downloadDir?: string; + /** + * Optional version, can be set to a specific server release or "default" or "latest". + * + * At the time of writing the the server is released as part of the + * Java SDK - (https://github.com/temporalio/sdk-java/releases). + * + * @default "default" - get the best version for the current SDK version. + */ + version?: string; + + /** How long to cache the download for. Default to 1 day. */ + ttl?: Duration; + } + | { + type: 'existing-path'; + /** Path to executable */ + path: string; + }; + +// Compile Config ////////////////////////////////////////////////////////////////////////////////// + +/** + * @internal + */ +export function toNativeEphemeralServerConfig( + server: DevServerConfig | TimeSkippingServerConfig +): native.EphemeralServerConfig { + switch (server.type) { + case 'dev-server': + return { + type: 'dev-server', + executable: toNativeEphemeralServerExecutableConfig(server.executable), + ip: server.ip ?? '127.0.0.1', + port: server.port ?? null, + ui: server.ui ?? false, + uiPort: server.uiPort ?? null, + namespace: server.namespace ?? 'default', + dbFilename: server.dbFilename ?? null, + log: server.log ?? { format: 'pretty', level: 'warn' }, + extraArgs: server.extraArgs ?? [], + }; + + case 'time-skipping': + return { + type: 'time-skipping', + executable: toNativeEphemeralServerExecutableConfig(server.executable), + port: server.port ?? null, + extraArgs: server.extraArgs ?? [], + }; + + default: + throw new TypeError(`Unsupported server type: ${String((server as any).type)}`); + } +} + +/** + * @internal + */ +function toNativeEphemeralServerExecutableConfig( + executable: EphemeralServerExecutable = { type: 'cached-download' } +): native.EphemeralServerExecutableConfig { + switch (executable.type) { + case 'cached-download': + return { + type: 'cached-download', + downloadDir: executable.downloadDir ?? null, + version: executable.version ?? 'default', + ttl: msToNumber(executable.ttl ?? '1d'), + sdkVersion: pkg.version, + }; + + case 'existing-path': + return { + type: 'existing-path', + path: executable.path, + }; + + default: + throw new TypeError(`Unsupported server executable type: ${String((executable as any).type)}`); + } +} diff --git a/packages/testing/src/index.ts b/packages/testing/src/index.ts index ff56e6165..d00594ce4 100644 --- a/packages/testing/src/index.ts +++ b/packages/testing/src/index.ts @@ -8,110 +8,34 @@ * @module */ -import 'abort-controller/polyfill'; // eslint-disable-line import/no-unassigned-import import path from 'node:path'; -import events from 'node:events'; -import * as activity from '@temporalio/activity'; -import { - AsyncCompletionClient, - Client, - ClientOptions, - WorkflowClient, - WorkflowClientOptions, - WorkflowResultOptions, -} from '@temporalio/client'; -import { - ActivityFunction, - Duration, - SdkComponent, - Logger, - defaultFailureConverter, - defaultPayloadConverter, - TypedSearchAttributes, -} from '@temporalio/common'; -import { msToNumber, msToTs, tsToMs } from '@temporalio/common/lib/time'; -import { ActivityInterceptorsFactory, DefaultLogger, NativeConnection, Runtime } from '@temporalio/worker'; -import { withMetadata } from '@temporalio/worker/lib/logger'; -import { Activity } from '@temporalio/worker/lib/activity'; -import { - EphemeralServer, - EphemeralServerConfig, - getEphemeralServerTarget, - DevServerConfig, - TimeSkippingServerConfig, -} from '@temporalio/core-bridge'; -import { filterNullAndUndefined } from '@temporalio/common/lib/internal-non-workflow'; -import { Connection, TestService } from './connection'; -export { TimeSkippingServerConfig, DevServerConfig, EphemeralServerExecutable } from '@temporalio/core-bridge'; -export { EphemeralServerConfig }; - -export interface TimeSkippingWorkflowClientOptions extends WorkflowClientOptions { - connection: Connection; - enableTimeSkipping: boolean; -} - -export interface TestEnvClientOptions extends ClientOptions { - connection: Connection; - enableTimeSkipping: boolean; -} - -/** - * A client with the exact same API as the "normal" client with 1 exception, - * When this client waits on a Workflow's result, it will enable time skipping - * in the test server. - */ -export class TimeSkippingWorkflowClient extends WorkflowClient { - protected readonly testService: TestService; - protected readonly enableTimeSkipping: boolean; - - constructor(options: TimeSkippingWorkflowClientOptions) { - super(options); - this.enableTimeSkipping = options.enableTimeSkipping; - this.testService = options.connection.testService; - } - - /** - * Gets the result of a Workflow execution. - * - * @see {@link WorkflowClient.result} - */ - override async result( - workflowId: string, - runId?: string | undefined, - opts?: WorkflowResultOptions | undefined - ): Promise { - if (this.enableTimeSkipping) { - await this.testService.unlockTimeSkipping({}); - try { - return await super.result(workflowId, runId, opts); - } finally { - await this.testService.lockTimeSkipping({}); - } - } else { - return await super.result(workflowId, runId, opts); - } - } -} - -/** - * A client with the exact same API as the "normal" client with one exception: - * when `TestEnvClient.workflow` (an instance of {@link TimeSkippingWorkflowClient}) waits on a Workflow's result, it will enable time skipping - * in the Test Server. - */ -class TestEnvClient extends Client { - constructor(options: TestEnvClientOptions) { - super(options); - - // Recreate the client (this isn't optimal but it's better than adding public methods just for testing). - // NOTE: we cast to "any" to work around `workflow` being a readonly attribute. - (this as any).workflow = new TimeSkippingWorkflowClient({ - ...this.workflow.options, - connection: options.connection, - enableTimeSkipping: options.enableTimeSkipping, - }); - } -} +export { + TestWorkflowEnvironment, + type LocalTestWorkflowEnvironmentOptions, + type TimeSkippingTestWorkflowEnvironmentOptions, +} from './testing-workflow-environment'; + +export { + type DevServerConfig, + type TimeSkippingServerConfig, + type EphemeralServerExecutable, +} from './ephemeral-server'; + +export { + // FIXME: Revise the pertinence of these types + type ClientOptionsForTestEnv, + type TestEnvClientOptions, + type TimeSkippingWorkflowClientOptions, + TestEnvClient, + TimeSkippingWorkflowClient, +} from './client'; + +export { + type MockActivityEnvironmentOptions, + MockActivityEnvironment, + defaultActivityInfo, +} from './mocking-activity-environment'; /** * Convenience workflow interceptors @@ -120,354 +44,3 @@ class TestEnvClient extends Client { * retryable `ApplicationFailure`s. */ export const workflowInterceptorModules = [path.join(__dirname, 'assert-to-failure-interceptor')]; - -/** - * Subset of the "normal" client options that are used to create a client for the test environment. - */ -export type ClientOptionsForTestEnv = Omit; - -/** - * Options for {@link TestWorkflowEnvironment.create} - */ -export type TestWorkflowEnvironmentOptions = { - server: EphemeralServerConfig; - client?: ClientOptionsForTestEnv; -}; - -/** - * Options for {@link TestWorkflowEnvironment.createTimeSkipping} - */ -export type TimeSkippingTestWorkflowEnvironmentOptions = { - server?: Omit; - client?: ClientOptionsForTestEnv; -}; - -/** - * Options for {@link TestWorkflowEnvironment.createLocal} - */ -export type LocalTestWorkflowEnvironmentOptions = { - server?: Omit; - client?: ClientOptionsForTestEnv; -}; - -export type TestWorkflowEnvironmentOptionsWithDefaults = Required; - -function addDefaults(opts: TestWorkflowEnvironmentOptions): TestWorkflowEnvironmentOptionsWithDefaults { - return { - client: {}, - ...opts, - }; -} - -/** - * An execution environment for running Workflow integration tests. - * - * Runs an external server. - * By default, the Java test server is used which supports time skipping. - */ -export class TestWorkflowEnvironment { - /** - * Namespace used in this environment (taken from {@link TestWorkflowEnvironmentOptions}) - */ - public readonly namespace?: string; - /** - * Get an established {@link Connection} to the ephemeral server - */ - public readonly connection: Connection; - - /** - * A {@link TestEnvClient} for interacting with the ephemeral server - */ - public readonly client: Client; - - /** - * An {@link AsyncCompletionClient} for interacting with the test server - * - * @deprecated - use `client.activity` instead - */ - public readonly asyncCompletionClient: AsyncCompletionClient; - - /** - * A {@link TimeSkippingWorkflowClient} for interacting with the test server - * - * @deprecated - use `client.workflow` instead - */ - public readonly workflowClient: WorkflowClient; - - /** - * A {@link NativeConnection} for interacting with the test server. - * - * Use this connection when creating Workers for testing. - */ - public readonly nativeConnection: NativeConnection; - - protected constructor( - public readonly options: TestWorkflowEnvironmentOptionsWithDefaults, - public readonly supportsTimeSkipping: boolean, - protected readonly server: EphemeralServer, - connection: Connection, - nativeConnection: NativeConnection, - namespace: string | undefined - ) { - this.connection = connection; - this.nativeConnection = nativeConnection; - this.namespace = namespace; - this.client = new TestEnvClient({ - connection, - namespace: this.namespace, - enableTimeSkipping: supportsTimeSkipping, - ...options.client, - }); - // eslint-disable-next-line deprecation/deprecation - this.asyncCompletionClient = this.client.activity; - // eslint-disable-next-line deprecation/deprecation - this.workflowClient = this.client.workflow; - } - - /** - * Start a time skipping workflow environment. - * - * This environment automatically skips to the next events in time when a workflow handle's `result` is awaited on - * (which includes {@link WorkflowClient.execute}). Before the result is awaited on, time can be manually skipped - * forward using {@link sleep}. The currently known time can be obtained via {@link currentTimeMs}. - * - * This environment will be powered by the Temporal Time Skipping Test Server (part of the [Java SDK](https://github.com/temporalio/sdk-java)). - * Note that the Time Skipping Test Server does not support full capabilities of the regular Temporal Server, and may - * occasionally present different behaviors. For general Workflow testing, it is generally preferable to use {@link createLocal} - * instead. - * - * Users can reuse this environment for testing multiple independent workflows, but not concurrently. Time skipping, - * which is automatically done when awaiting a workflow result and manually done on sleep, is global to the - * environment, not to the workflow under test. We highly recommend running tests serially when using a single - * environment or creating a separate environment per test. - * - * By default, the latest release of the Test Serveer will be downloaded and cached to a temporary directory - * (e.g. `$TMPDIR/temporal-test-server-sdk-typescript-*` or `%TEMP%/temporal-test-server-sdk-typescript-*.exe`). Note - * that existing cached binairies will be reused without validation that they are still up-to-date, until the SDK - * itself is updated. Alternatively, a specific version number of the Test Server may be provided, or the path to an - * existing Test Server binary may be supplied; see {@link LocalTestWorkflowEnvironmentOptions.server.executable}. - * - * Note that the Test Server implementation may be changed to another one in the future. Therefore, there is no - * guarantee that Test Server options, and particularly those provided through the `extraArgs` array, will continue to - * be supported in the future. - * - * IMPORTANT: At this time, the Time Skipping Test Server is not supported on ARM platforms. Execution on Apple - * silicon Macs will work if Rosetta 2 is installed. - */ - static async createTimeSkipping(opts?: TimeSkippingTestWorkflowEnvironmentOptions): Promise { - return await this.create({ - server: { type: 'time-skipping', ...opts?.server }, - client: opts?.client, - supportsTimeSkipping: true, - }); - } - - /** - * Start a full Temporal server locally. - * - * This environment is good for testing full server capabilities, but does not support time skipping like - * {@link createTimeSkipping} does. {@link supportsTimeSkipping} will always return `false` for this environment. - * {@link sleep} will sleep the actual amount of time and {@link currentTimeMs} will return the current time. - * - * This local environment will be powered by [Temporal CLI](https://github.com/temporalio/cli), which is a - * self-contained executable for Temporal. By default, Temporal's database will not be persisted to disk, and no UI - * will be launched. - * - * By default, the latest release of the CLI will be downloaded and cached to a temporary directory - * (e.g. `$TMPDIR/temporal-sdk-typescript-*` or `%TEMP%/temporal-sdk-typescript-*.exe`). Note that existing cached - * binairies will be reused without validation that they are still up-to-date, until the SDK itself is updated. - * Alternatively, a specific version number of the CLI may be provided, or the path to an existing CLI binary may be - * supplied; see {@link LocalTestWorkflowEnvironmentOptions.server.executable}. - * - * Note that the Dev Server implementation may be changed to another one in the future. Therefore, there is no - * guarantee that Dev Server options, and particularly those provided through the `extraArgs` array, will continue to - * be supported in the future. - */ - static async createLocal(opts?: LocalTestWorkflowEnvironmentOptions): Promise { - return await this.create({ - server: { type: 'dev-server', ...opts?.server }, - client: opts?.client, - namespace: opts?.server?.namespace, - supportsTimeSkipping: false, - }); - } - - /** - * Create a new test environment - */ - private static async create( - opts: TestWorkflowEnvironmentOptions & { - supportsTimeSkipping: boolean; - namespace?: string; - } - ): Promise { - const { supportsTimeSkipping, namespace, ...rest } = opts; - const optsWithDefaults = addDefaults(filterNullAndUndefined(rest)); - - // Add search attributes to CLI server arguments - if ('searchAttributes' in optsWithDefaults.server && optsWithDefaults.server.searchAttributes) { - let newArgs: string[] = []; - for (const { name, type } of optsWithDefaults.server.searchAttributes) { - newArgs.push('--search-attribute'); - newArgs.push(`${name}=${TypedSearchAttributes.toMetadataType(type)}`); - } - newArgs = newArgs.concat(optsWithDefaults.server.extraArgs ?? []); - optsWithDefaults.server.extraArgs = newArgs; - } - - const server = await Runtime.instance().createEphemeralServer(optsWithDefaults.server); - const address = getEphemeralServerTarget(server); - - const nativeConnection = await NativeConnection.connect({ address }); - const connection = await Connection.connect({ address }); - - return new this(optsWithDefaults, supportsTimeSkipping, server, connection, nativeConnection, namespace); - } - - /** - * Kill the test server process and close the connection to it - */ - async teardown(): Promise { - await this.connection.close(); - await this.nativeConnection.close(); - await Runtime.instance().shutdownEphemeralServer(this.server); - } - - /** - * Wait for `durationMs` in "server time". - * - * This awaits using regular setTimeout in regular environments, or manually skips time in time-skipping environments. - * - * Useful for simulating events far into the future like completion of long running activities. - * - * **Time skippping**: - * - * The time skippping server toggles between skipped time and normal time depending on what it needs to execute. - * - * This method is _likely_ to resolve in less than `durationMs` of "real time". - * - * @param durationMs number of milliseconds or {@link https://www.npmjs.com/package/ms | ms-formatted string} - * - * @example - * - * `workflow.ts` - * - * ```ts - * const activities = proxyActivities({ startToCloseTimeout: 2_000_000 }); - * - * export async function raceActivityAndTimer(): Promise { - * return await Promise.race([ - * wf.sleep(500_000).then(() => 'timer'), - * activities.longRunning().then(() => 'activity'), - * ]); - * } - * ``` - * - * `test.ts` - * - * ```ts - * const worker = await Worker.create({ - * connection: testEnv.nativeConnection, - * activities: { - * async longRunning() { - * await testEnv.sleep(1_000_000); // <-- sleep called here - * }, - * }, - * // ... - * }); - * ``` - */ - sleep = async (durationMs: Duration): Promise => { - if (this.supportsTimeSkipping) { - await (this.connection as Connection).testService.unlockTimeSkippingWithSleep({ duration: msToTs(durationMs) }); - } else { - await new Promise((resolve) => setTimeout(resolve, msToNumber(durationMs))); - } - }; - - /** - * Get the current time known to this environment. - * - * For non-time-skipping environments this is simply the system time. For time-skipping environments this is whatever - * time has been skipped to. - */ - async currentTimeMs(): Promise { - if (this.supportsTimeSkipping) { - const { time } = await (this.connection as Connection).testService.getCurrentTime({}); - return tsToMs(time); - } else { - return Date.now(); - } - } -} - -export interface MockActivityEnvironmentOptions { - interceptors?: ActivityInterceptorsFactory[]; - logger?: Logger; -} - -/** - * Used as the default activity info for Activities executed in the {@link MockActivityEnvironment} - */ -export const defaultActivityInfo: activity.Info = { - attempt: 1, - taskQueue: 'test', - isLocal: false, - taskToken: Buffer.from('test'), - activityId: 'test', - activityType: 'unknown', - workflowType: 'test', - base64TaskToken: Buffer.from('test').toString('base64'), - heartbeatTimeoutMs: undefined, - heartbeatDetails: undefined, - activityNamespace: 'default', - workflowNamespace: 'default', - workflowExecution: { workflowId: 'test', runId: 'dead-beef' }, - scheduledTimestampMs: 1, - startToCloseTimeoutMs: 1000, - scheduleToCloseTimeoutMs: 1000, - currentAttemptScheduledTimestampMs: 1, - priority: undefined, -}; - -/** - * An execution environment for testing Activities. - * - * Mocks Activity {@link Context | activity.Context} and exposes hooks for cancellation and heartbeats. - * - * Note that the `Context` object used by this environment will be reused for all activities that are run in this - * environment. Consequently, once `cancel()` is called, any further activity that gets executed in this environment - * will immediately be in a cancelled state. - */ -export class MockActivityEnvironment extends events.EventEmitter { - public cancel: (reason?: any) => void = () => undefined; - public readonly context: activity.Context; - private readonly activity: Activity; - - constructor(info?: Partial, opts?: MockActivityEnvironmentOptions) { - super(); - const heartbeatCallback = (details?: unknown) => this.emit('heartbeat', details); - const loadedDataConverter = { - payloadConverter: defaultPayloadConverter, - payloadCodecs: [], - failureConverter: defaultFailureConverter, - }; - this.activity = new Activity( - { ...defaultActivityInfo, ...info }, - undefined, - loadedDataConverter, - heartbeatCallback, - withMetadata(opts?.logger ?? new DefaultLogger(), { sdkComponent: SdkComponent.worker }), - opts?.interceptors ?? [] - ); - this.context = this.activity.context; - this.cancel = this.activity.cancel; - } - - /** - * Run a function in Activity Context - */ - public async run

>(fn: F, ...args: P): Promise { - return this.activity.runNoEncoding(fn as ActivityFunction, { args, headers: {} }) as Promise; - } -} diff --git a/packages/testing/src/mocking-activity-environment.ts b/packages/testing/src/mocking-activity-environment.ts new file mode 100644 index 000000000..3240cddba --- /dev/null +++ b/packages/testing/src/mocking-activity-environment.ts @@ -0,0 +1,90 @@ +import 'abort-controller/polyfill'; // eslint-disable-line import/no-unassigned-import +import events from 'node:events'; +import * as activity from '@temporalio/activity'; +import { + ActivityFunction, + Logger, + SdkComponent, + defaultFailureConverter, + defaultPayloadConverter, +} from '@temporalio/common'; +import { ActivityInterceptorsFactory, DefaultLogger } from '@temporalio/worker'; +import { withMetadata } from '@temporalio/worker/lib/logger'; +import { Activity } from '@temporalio/worker/lib/activity'; + +// Config ////////////////////////////////////////////////////////////////////////////////////////// + +export interface MockActivityEnvironmentOptions { + interceptors?: ActivityInterceptorsFactory[]; + logger?: Logger; +} + +// Implementation ////////////////////////////////////////////////////////////////////////////////// + +/** + * An execution environment for testing Activities. + * + * Mocks Activity {@link Context | activity.Context} and exposes hooks for cancellation and heartbeats. + * + * Note that the `Context` object used by this environment will be reused for all activities that are run in this + * environment. Consequently, once `cancel()` is called, any further activity that gets executed in this environment + * will immediately be in a cancelled state. + */ +export class MockActivityEnvironment extends events.EventEmitter { + public cancel: (reason?: any) => void = () => undefined; + public readonly context: activity.Context; + private readonly activity: Activity; + + constructor(info?: Partial, opts?: MockActivityEnvironmentOptions) { + super(); + const heartbeatCallback = (details?: unknown) => this.emit('heartbeat', details); + const loadedDataConverter = { + payloadConverter: defaultPayloadConverter, + payloadCodecs: [], + failureConverter: defaultFailureConverter, + }; + this.activity = new Activity( + { ...defaultActivityInfo, ...info }, + undefined, + loadedDataConverter, + heartbeatCallback, + withMetadata(opts?.logger ?? new DefaultLogger(), { sdkComponent: SdkComponent.worker }), + opts?.interceptors ?? [] + ); + this.context = this.activity.context; + this.cancel = this.activity.cancel; + } + + /** + * Run a function in Activity Context + */ + public async run

>(fn: F, ...args: P): Promise { + return this.activity.runNoEncoding(fn as ActivityFunction, { args, headers: {} }) as Promise; + } +} + +/** + * Used as the default activity info for Activities executed in the {@link MockActivityEnvironment} + * + * @hidden + */ +export const defaultActivityInfo: activity.Info = { + attempt: 1, + taskQueue: 'test', + isLocal: false, + taskToken: Buffer.from('test'), + activityId: 'test', + activityType: 'unknown', + workflowType: 'test', + base64TaskToken: Buffer.from('test').toString('base64'), + heartbeatTimeoutMs: undefined, + heartbeatDetails: undefined, + activityNamespace: 'default', + workflowNamespace: 'default', + workflowExecution: { workflowId: 'test', runId: 'dead-beef' }, + scheduledTimestampMs: 1, + startToCloseTimeoutMs: 1000, + scheduleToCloseTimeoutMs: 1000, + currentAttemptScheduledTimestampMs: 1, + priority: undefined, +}; diff --git a/packages/testing/src/pkg.ts b/packages/testing/src/pkg.ts new file mode 100644 index 000000000..60ba3e205 --- /dev/null +++ b/packages/testing/src/pkg.ts @@ -0,0 +1,7 @@ +// ../package.json is outside of the TS project rootDir which causes TS to complain about this import. +// We do not want to change the rootDir because it messes up the output structure. +// eslint-disable-next-line @typescript-eslint/ban-ts-comment +// @ts-ignore +import pkg from '../package.json'; + +export default pkg as { name: string; version: string }; diff --git a/packages/testing/src/testing-workflow-environment.ts b/packages/testing/src/testing-workflow-environment.ts new file mode 100644 index 000000000..68e11a5e7 --- /dev/null +++ b/packages/testing/src/testing-workflow-environment.ts @@ -0,0 +1,306 @@ +import 'abort-controller/polyfill'; // eslint-disable-line import/no-unassigned-import +import { AsyncCompletionClient, Client, WorkflowClient } from '@temporalio/client'; +import { Duration, TypedSearchAttributes } from '@temporalio/common'; +import { msToNumber, msToTs, tsToMs } from '@temporalio/common/lib/time'; +import { NativeConnection, Runtime } from '@temporalio/worker'; +import { native } from '@temporalio/core-bridge'; +import { filterNullAndUndefined } from '@temporalio/common/lib/internal-non-workflow'; +import { Connection } from './connection'; +import { toNativeEphemeralServerConfig, DevServerConfig, TimeSkippingServerConfig } from './ephemeral-server'; +import { ClientOptionsForTestEnv, TestEnvClient } from './client'; + +// Config ////////////////////////////////////////////////////////////////////////////////////////// + +/** + * Options for {@link TestWorkflowEnvironment.createLocal} + */ +export type LocalTestWorkflowEnvironmentOptions = { + server?: Omit; + client?: ClientOptionsForTestEnv; +}; + +/** + * Options for {@link TestWorkflowEnvironment.createTimeSkipping} + */ +export type TimeSkippingTestWorkflowEnvironmentOptions = { + server?: Omit; + client?: ClientOptionsForTestEnv; +}; + +// Implementation ////////////////////////////////////////////////////////////////////////////////// + +/** + * An execution environment for running Workflow integration tests. + * + * Runs an external server. + * By default, the Java test server is used which supports time skipping. + */ +export class TestWorkflowEnvironment { + /** + * Namespace used in this environment (taken from {@link TestWorkflowEnvironmentOptions}) + */ + public readonly namespace?: string; + /** + * Get an established {@link Connection} to the ephemeral server + */ + public readonly connection: Connection; + + /** + * A {@link TestEnvClient} for interacting with the ephemeral server + */ + public readonly client: Client; + + /** + * An {@link AsyncCompletionClient} for interacting with the test server + * + * @deprecated - use `client.activity` instead + */ + public readonly asyncCompletionClient: AsyncCompletionClient; + + /** + * A {@link TimeSkippingWorkflowClient} for interacting with the test server + * + * @deprecated - use `client.workflow` instead + */ + public readonly workflowClient: WorkflowClient; + + /** + * A {@link NativeConnection} for interacting with the test server. + * + * Use this connection when creating Workers for testing. + */ + public readonly nativeConnection: NativeConnection; + + protected constructor( + private readonly runtime: Runtime, + public readonly options: TestWorkflowEnvironmentOptionsWithDefaults, + public readonly supportsTimeSkipping: boolean, + protected readonly server: native.EphemeralServer, + connection: Connection, + nativeConnection: NativeConnection, + namespace: string | undefined + ) { + this.connection = connection; + this.nativeConnection = nativeConnection; + this.namespace = namespace; + this.client = new TestEnvClient({ + connection, + namespace: this.namespace, + enableTimeSkipping: supportsTimeSkipping, + ...options.client, + }); + // eslint-disable-next-line deprecation/deprecation + this.asyncCompletionClient = this.client.activity; + // eslint-disable-next-line deprecation/deprecation + this.workflowClient = this.client.workflow; + } + + /** + * Start a time skipping workflow environment. + * + * This environment automatically skips to the next events in time when a workflow handle's `result` is awaited on + * (which includes {@link WorkflowClient.execute}). Before the result is awaited on, time can be manually skipped + * forward using {@link sleep}. The currently known time can be obtained via {@link currentTimeMs}. + * + * This environment will be powered by the Temporal Time Skipping Test Server (part of the [Java SDK](https://github.com/temporalio/sdk-java)). + * Note that the Time Skipping Test Server does not support full capabilities of the regular Temporal Server, and may + * occasionally present different behaviors. For general Workflow testing, it is generally preferable to use {@link createLocal} + * instead. + * + * Users can reuse this environment for testing multiple independent workflows, but not concurrently. Time skipping, + * which is automatically done when awaiting a workflow result and manually done on sleep, is global to the + * environment, not to the workflow under test. We highly recommend running tests serially when using a single + * environment or creating a separate environment per test. + * + * By default, the latest release of the Test Serveer will be downloaded and cached to a temporary directory + * (e.g. `$TMPDIR/temporal-test-server-sdk-typescript-*` or `%TEMP%/temporal-test-server-sdk-typescript-*.exe`). Note + * that existing cached binairies will be reused without validation that they are still up-to-date, until the SDK + * itself is updated. Alternatively, a specific version number of the Test Server may be provided, or the path to an + * existing Test Server binary may be supplied; see {@link LocalTestWorkflowEnvironmentOptions.server.executable}. + * + * Note that the Test Server implementation may be changed to another one in the future. Therefore, there is no + * guarantee that Test Server options, and particularly those provided through the `extraArgs` array, will continue to + * be supported in the future. + * + * IMPORTANT: At this time, the Time Skipping Test Server is not supported on ARM platforms. Execution on Apple + * silicon Macs will work if Rosetta 2 is installed. + */ + static async createTimeSkipping(opts?: TimeSkippingTestWorkflowEnvironmentOptions): Promise { + return await this.create({ + server: { type: 'time-skipping', ...opts?.server }, + client: opts?.client, + supportsTimeSkipping: true, + }); + } + + /** + * Start a full Temporal server locally. + * + * This environment is good for testing full server capabilities, but does not support time skipping like + * {@link createTimeSkipping} does. {@link supportsTimeSkipping} will always return `false` for this environment. + * {@link sleep} will sleep the actual amount of time and {@link currentTimeMs} will return the current time. + * + * This local environment will be powered by [Temporal CLI](https://github.com/temporalio/cli), which is a + * self-contained executable for Temporal. By default, Temporal's database will not be persisted to disk, and no UI + * will be launched. + * + * By default, the latest release of the CLI will be downloaded and cached to a temporary directory + * (e.g. `$TMPDIR/temporal-sdk-typescript-*` or `%TEMP%/temporal-sdk-typescript-*.exe`). Note that existing cached + * binairies will be reused without validation that they are still up-to-date, until the SDK itself is updated. + * Alternatively, a specific version number of the CLI may be provided, or the path to an existing CLI binary may be + * supplied; see {@link LocalTestWorkflowEnvironmentOptions.server.executable}. + * + * Note that the Dev Server implementation may be changed to another one in the future. Therefore, there is no + * guarantee that Dev Server options, and particularly those provided through the `extraArgs` array, will continue to + * be supported in the future. + */ + static async createLocal(opts?: LocalTestWorkflowEnvironmentOptions): Promise { + return await this.create({ + server: { type: 'dev-server', ...opts?.server }, + client: opts?.client, + namespace: opts?.server?.namespace, + supportsTimeSkipping: false, + }); + } + + /** + * Create a new test environment + */ + private static async create( + opts: TestWorkflowEnvironmentOptions & { + supportsTimeSkipping: boolean; + namespace?: string; + } + ): Promise { + const { supportsTimeSkipping, namespace, ...rest } = opts; + const optsWithDefaults = addDefaults(filterNullAndUndefined(rest)); + + // Add search attributes to CLI server arguments + if ('searchAttributes' in optsWithDefaults.server && optsWithDefaults.server.searchAttributes) { + let newArgs: string[] = []; + for (const { name, type } of optsWithDefaults.server.searchAttributes) { + newArgs.push('--search-attribute'); + newArgs.push(`${name}=${TypedSearchAttributes.toMetadataType(type)}`); + } + newArgs = newArgs.concat(optsWithDefaults.server.extraArgs ?? []); + optsWithDefaults.server.extraArgs = newArgs; + } + + const runtime = Runtime.instance(); + const server = await runtime.createEphemeralServer(toNativeEphemeralServerConfig(optsWithDefaults.server)); + const address = native.getEphemeralServerTarget(server); + + const nativeConnection = await NativeConnection.connect({ address }); + const connection = await Connection.connect({ address }); + + return new this(runtime, optsWithDefaults, supportsTimeSkipping, server, connection, nativeConnection, namespace); + } + + /** + * Kill the test server process and close the connection to it + */ + async teardown(): Promise { + await this.connection.close().catch((e) => { + console.error(e); + /* ignore */ + }); + await this.nativeConnection.close().catch((e) => { + console.error(e); + /* ignore */ + }); + await this.runtime.shutdownEphemeralServer(this.server).catch((e) => { + console.error(e); + /* ignore */ + }); + console.log('teardown done'); + } + + /** + * Wait for `durationMs` in "server time". + * + * This awaits using regular setTimeout in regular environments, or manually skips time in time-skipping environments. + * + * Useful for simulating events far into the future like completion of long running activities. + * + * **Time skippping**: + * + * The time skippping server toggles between skipped time and normal time depending on what it needs to execute. + * + * This method is _likely_ to resolve in less than `durationMs` of "real time". + * + * @param durationMs number of milliseconds or {@link https://www.npmjs.com/package/ms | ms-formatted string} + * + * @example + * + * `workflow.ts` + * + * ```ts + * const activities = proxyActivities({ startToCloseTimeout: 2_000_000 }); + * + * export async function raceActivityAndTimer(): Promise { + * return await Promise.race([ + * wf.sleep(500_000).then(() => 'timer'), + * activities.longRunning().then(() => 'activity'), + * ]); + * } + * ``` + * + * `test.ts` + * + * ```ts + * const worker = await Worker.create({ + * connection: testEnv.nativeConnection, + * activities: { + * async longRunning() { + * await testEnv.sleep(1_000_000); // <-- sleep called here + * }, + * }, + * // ... + * }); + * ``` + */ + sleep = async (durationMs: Duration): Promise => { + if (this.supportsTimeSkipping) { + await (this.connection as Connection).testService.unlockTimeSkippingWithSleep({ duration: msToTs(durationMs) }); + } else { + await new Promise((resolve) => setTimeout(resolve, msToNumber(durationMs))); + } + }; + + /** + * Get the current time known to this environment. + * + * For non-time-skipping environments this is simply the system time. For time-skipping environments this is whatever + * time has been skipped to. + */ + async currentTimeMs(): Promise { + if (this.supportsTimeSkipping) { + const { time } = await (this.connection as Connection).testService.getCurrentTime({}); + return tsToMs(time); + } else { + return Date.now(); + } + } +} + +// Compile Config ////////////////////////////////////////////////////////////////////////////////// + +/** + * Options for {@link TestWorkflowEnvironment.create} + */ +type TestWorkflowEnvironmentOptions = { + server: DevServerConfig | TimeSkippingServerConfig; + client?: ClientOptionsForTestEnv; +}; + +type TestWorkflowEnvironmentOptionsWithDefaults = Required; + +function addDefaults(opts: TestWorkflowEnvironmentOptions): TestWorkflowEnvironmentOptionsWithDefaults { + return { + client: {}, + ...opts, + server: { + ...opts.server, + }, + }; +} diff --git a/packages/worker/src/connection-options.ts b/packages/worker/src/connection-options.ts index 961b87146..48e252a29 100644 --- a/packages/worker/src/connection-options.ts +++ b/packages/worker/src/connection-options.ts @@ -1,14 +1,14 @@ -import * as native from '@temporalio/core-bridge'; +import { native } from '@temporalio/core-bridge'; import { - normalizeGrpcEndpointAddress, joinProtoHostPort, + normalizeGrpcEndpointAddress, + normalizeTlsConfig, parseHttpConnectProxyAddress, + ProxyConfig, + TLSConfig, } from '@temporalio/common/lib/internal-non-workflow'; import pkg from './pkg'; -type TLSConfig = native.TLSConfig; -type ProxyConfig = native.ProxyConfig; - export { TLSConfig, ProxyConfig }; /** @@ -61,40 +61,50 @@ export interface NativeConnectionOptions { disableErrorCodeMetricTags?: boolean; } -export type RequiredNativeConnectionOptions = Omit< - Required, - 'tls' | 'proxy' | 'metadata' | 'apiKey' -> & { - tls?: NativeConnectionOptions['tls']; - proxy?: NativeConnectionOptions['proxy']; - metadata?: NativeConnectionOptions['metadata']; - apiKey?: NativeConnectionOptions['apiKey']; - sdkVersion: string; -}; +// Compile to Native /////////////////////////////////////////////////////////////////////////////// -export function getDefaultConnectionOptions(): RequiredNativeConnectionOptions { - return { - address: 'localhost:7233', - sdkVersion: pkg.version, - disableErrorCodeMetricTags: false, - }; -} +export function toNativeClientOptions(options: NativeConnectionOptions): native.ClientOptions { + const address = normalizeGrpcEndpointAddress(options.address ?? 'localhost:7233', DEFAULT_TEMPORAL_GRPC_PORT); + + const tlsInput = normalizeTlsConfig(options.tls); + const tls: native.TLSConfig | null = tlsInput + ? { + serverRootCaCertificate: tlsInput.serverRootCACertificate ?? null, + clientCertPair: tlsInput.clientCertPair ?? null, + serverNameOverride: tlsInput.serverNameOverride ?? null, + } + : null; -export function compileConnectionOptions(options: RequiredNativeConnectionOptions): RequiredNativeConnectionOptions { - const { address, ...rest } = options; - const proxyOpts: Partial = {}; + let proxy: native.ProxyConfig | null = null; if (options.proxy?.targetHost) { - const { targetHost: target, basicAuth } = options.proxy; + const { targetHost: target } = options.proxy; const { hostname: host, port } = parseHttpConnectProxyAddress(target); - proxyOpts.proxy = { + const basicAuth = options.proxy.basicAuth + ? { + username: options.proxy.basicAuth.username, + password: options.proxy.basicAuth.password, + } + : null; + proxy = { type: 'http-connect', targetHost: joinProtoHostPort({ hostname: host, port }), basicAuth, }; } + + if (options?.apiKey && options.metadata?.['Authorization']) { + throw new TypeError( + 'Both `apiKey` option and `Authorization` header were provided. Only one makes sense to use at a time.' + ); + } + return { - ...rest, - address: normalizeGrpcEndpointAddress(address, DEFAULT_TEMPORAL_GRPC_PORT), - ...proxyOpts, + url: tls ? `https://${address}` : `http://${address}`, + sdkVersion: pkg.version, + tls, + proxy, + metadata: options.metadata ?? null, + apiKey: options.apiKey ?? null, + disableErrorCodeMetricTags: options.disableErrorCodeMetricTags ?? false, }; } diff --git a/packages/worker/src/connection.ts b/packages/worker/src/connection.ts index 65a151309..49a148fdc 100644 --- a/packages/worker/src/connection.ts +++ b/packages/worker/src/connection.ts @@ -1,12 +1,9 @@ -import util from 'node:util'; import { IllegalStateError } from '@temporalio/common'; -import { Client, Worker, clientUpdateHeaders, TransportError, clientUpdateApiKey } from '@temporalio/core-bridge'; +import { native } from '@temporalio/core-bridge'; +import { TransportError } from './errors'; import { NativeConnectionOptions } from './connection-options'; import { Runtime } from './runtime'; -const updateHeaders = util.promisify(clientUpdateHeaders); -const updateApiKey = util.promisify(clientUpdateApiKey); - /** * A Native Connection object that delegates calls to the Rust Core binary extension. * @@ -18,20 +15,24 @@ export class NativeConnection { /** * referenceHolders is used internally by the framework, it can be accessed with `extractReferenceHolders` (below) */ - private readonly referenceHolders = new Set(); + private readonly referenceHolders = new Set(); /** * nativeClient is intentionally left private, framework code can access it with `extractNativeClient` (below) */ - protected constructor(private nativeClient: Client) {} + protected constructor( + private readonly runtime: Runtime, + private readonly nativeClient: native.Client + ) {} /** * @deprecated use `connect` instead */ static async create(options?: NativeConnectionOptions): Promise { try { - const client = await Runtime.instance().createNativeClient(options); - return new this(client); + const runtime = Runtime.instance(); + const client = await runtime.createNativeClient(options); + return new this(runtime, client); } catch (err) { if (err instanceof TransportError) { throw new TransportError(err.message); @@ -45,8 +46,9 @@ export class NativeConnection { */ static async connect(options?: NativeConnectionOptions): Promise { try { - const client = await Runtime.instance().createNativeClient(options); - return new this(client); + const runtime = Runtime.instance(); + const client = await runtime.createNativeClient(options); + return new this(runtime, client); } catch (err) { if (err instanceof TransportError) { throw new TransportError(err.message); @@ -65,7 +67,7 @@ export class NativeConnection { if (this.referenceHolders.size > 0) { throw new IllegalStateError('Cannot close connection while Workers hold a reference to it'); } - await Runtime.instance().closeNativeClient(this.nativeClient); + await this.runtime.closeNativeClient(this.nativeClient); } /** @@ -74,7 +76,7 @@ export class NativeConnection { * Use {@link NativeConnectionOptions.metadata} to set the initial metadata for client creation. */ async setMetadata(metadata: Record): Promise { - await updateHeaders(this.nativeClient, metadata); + native.clientUpdateHeaders(this.nativeClient, metadata); } /** @@ -84,7 +86,7 @@ export class NativeConnection { * Use {@link NativeConnectionOptions.apiKey} to set the initial metadata for client creation. */ async setApiKey(apiKey: string): Promise { - await updateApiKey(this.nativeClient, apiKey); + native.clientUpdateApiKey(this.nativeClient, apiKey); } } @@ -93,7 +95,7 @@ export class NativeConnection { * * Only meant to be used by the framework. */ -export function extractNativeClient(conn: NativeConnection): Client { +export function extractNativeClient(conn: NativeConnection): native.Client { return (conn as any).nativeClient; } @@ -102,7 +104,7 @@ export function extractNativeClient(conn: NativeConnection): Client { * * Only meant to be used by the framework. */ -export function extractReferenceHolders(conn: NativeConnection): Set { +export function extractReferenceHolders(conn: NativeConnection): Set { return (conn as any).referenceHolders; } diff --git a/packages/worker/src/errors.ts b/packages/worker/src/errors.ts index 3bf4962d7..f0c1f40dd 100644 --- a/packages/worker/src/errors.ts +++ b/packages/worker/src/errors.ts @@ -1,6 +1,9 @@ import { IllegalStateError } from '@temporalio/common'; import { SymbolBasedInstanceOfError } from '@temporalio/common/lib/type-helpers'; -import { ShutdownError, TransportError, UnexpectedError } from '@temporalio/core-bridge'; +import { errors as bridgeErrors } from '@temporalio/core-bridge'; + +const { ShutdownError, TransportError, UnexpectedError } = bridgeErrors; +export { ShutdownError, TransportError, UnexpectedError }; /** * Thrown from JS if Worker does not shutdown in configured period diff --git a/packages/worker/src/index.ts b/packages/worker/src/index.ts index 9f9b65bd2..7567c2587 100644 --- a/packages/worker/src/index.ts +++ b/packages/worker/src/index.ts @@ -8,47 +8,31 @@ * @module */ -export { - ConsoleLogger, - ForwardLogger, - Logger as TelemLogger, - MetricsExporter, - OtelCollectorExporter, - PrometheusMetricsExporter, - TelemetryOptions, - CustomSlotSupplier, - SlotInfo, - WorkflowSlotInfo, - LocalActivitySlotInfo, - ActivitySlotInfo, - SlotMarkUsedContext, - SlotPermit, - SlotReleaseContext, - SlotReserveContext, -} from '@temporalio/core-bridge'; export { NativeConnection } from './connection'; -export { NativeConnectionOptions, RequiredNativeConnectionOptions, TLSConfig } from './connection-options'; +export { NativeConnectionOptions, TLSConfig } from './connection-options'; export { startDebugReplayer } from './debug-replayer'; export { IllegalStateError } from '@temporalio/common'; -export { - ShutdownError, - TransportError, - UnexpectedError, - SlotSupplier, - ResourceBasedSlotOptions, - ResourceBasedTunerOptions, - FixedSizeSlotSupplier, -} from '@temporalio/core-bridge'; export { CombinedWorkerRunError, CombinedWorkerRunErrorCause, GracefulShutdownPeriodExpiredError, - UnhandledRejectionError, PromiseCompletionTimeoutError, + UnhandledRejectionError, } from './errors'; export * from './interceptors'; export { DefaultLogger, LogEntry, LogLevel, LogMetadata, LogTimestamp, Logger } from './logger'; -export { History, Runtime, RuntimeOptions, makeTelemetryFilterString } from './runtime'; +export { History, Runtime } from './runtime'; +export { + RuntimeOptions, + makeTelemetryFilterString, + ConsoleLogger, + ForwardLogger, + LogExporterConfig, + MetricsExporterConfig, + OtelCollectorExporter, + PrometheusMetricsExporter, + TelemetryOptions, +} from './runtime-options'; export * from './sinks'; export { DataConverter, defaultPayloadConverter, State, Worker, WorkerStatus } from './worker'; export { @@ -61,7 +45,25 @@ export { } from './worker-options'; export { ReplayError, ReplayHistoriesIterable, ReplayResult } from './replay'; export { BundleOptions, bundleWorkflowCode, WorkflowBundleWithSourceMap } from './workflow/bundler'; -export { WorkerTuner } from './worker-tuner'; +export { + WorkerTuner, + TunerHolder, + SlotSupplier, + ResourceBasedTuner, + ResourceBasedTunerOptions, + ResourceBasedSlotOptions, + ResourceBasedSlotsForType, + FixedSizeSlotSupplier, + CustomSlotSupplier, + SlotInfo, + WorkflowSlotInfo, + ActivitySlotInfo, + LocalActivitySlotInfo, + SlotPermit, + SlotReserveContext, + SlotMarkUsedContext, + SlotReleaseContext, +} from './worker-tuner'; /* eslint-disable deprecation/deprecation */ // Anything below this line is deprecated @@ -130,18 +132,26 @@ export { workflowLogAttributes, } from '@temporalio/workflow/lib/logs'; -export { - /** - * @deprecated This function is meant for internal usage. Don't use it. - */ - timeOfDayToBigint, -} from './logger'; - export { /** * @deprecated Import error classes directly */ errors, + /** + * @deprecated - meant for internal use only + * @hidden + */ + ShutdownError, + /** + * @deprecated - meant for internal use only + * @hidden + */ + TransportError, + /** + * @deprecated - meant for internal use only + * @hidden + */ + UnexpectedError, } from './errors'; /** diff --git a/packages/worker/src/logger.ts b/packages/worker/src/logger.ts index c06a6c36f..3f625538a 100644 --- a/packages/worker/src/logger.ts +++ b/packages/worker/src/logger.ts @@ -1,16 +1,30 @@ import { formatWithOptions } from 'node:util'; import * as supportsColor from 'supports-color'; -import { getTimeOfDay } from '@temporalio/core-bridge'; +import { native } from '@temporalio/core-bridge'; import { LogLevel, LogMetadata, Logger } from '@temporalio/common'; /** @deprecated Import from @temporalio/common instead */ export { LogLevel, LogMetadata, Logger }; export interface LogEntry { - level: LogLevel; + /** + * Log message + */ message: string; + + /** + * Log level + */ + level: LogLevel; + + /** + * Time since epoch, in nanoseconds. + */ timestampNanos: bigint; - /** Custom attributes */ + + /** + * Custom attributes + */ meta?: LogMetadata; } @@ -46,14 +60,6 @@ function defaultLogFunction(entry: LogEntry): void { } } -/** - * Takes a `[seconds, nanos]` tuple as returned from getTimeOfDay and turns it into bigint. - */ -export function timeOfDayToBigint(timeOfDay: [number, number]): bigint { - const [seconds, nanos] = timeOfDay; - return BigInt(seconds) * 1_000_000_000n + BigInt(nanos); -} - /** * Default worker logger - uses a default log function to log messages to `console.error`. * See constructor arguments for customization. @@ -77,7 +83,7 @@ export class DefaultLogger implements Logger { level, message, meta: Object.keys(rest).length === 0 ? undefined : rest, - timestampNanos: timestampNanos ?? timeOfDayToBigint(getTimeOfDay()), + timestampNanos: timestampNanos ?? native.getTimeOfDay(), }); } } @@ -175,3 +181,11 @@ class LoggerWithMetadata implements Logger { return [...chain, meta]; } } + +export interface FlushableLogger extends Logger { + flush(): void; +} + +export function isFlushableLogger(logger: Logger): logger is FlushableLogger { + return 'flush' in logger && typeof logger.flush === 'function'; +} diff --git a/packages/worker/src/native-log-forward.ts b/packages/worker/src/native-log-forward.ts new file mode 100644 index 000000000..2c87218a4 --- /dev/null +++ b/packages/worker/src/native-log-forward.ts @@ -0,0 +1,71 @@ +import { Heap } from 'heap-js'; +import { SdkComponent } from '@temporalio/common'; +import { native } from '@temporalio/core-bridge'; +import { DefaultLogger, LogEntry, Logger, LogTimestamp } from './logger'; + +/** + * A logger that buffers logs from both Node.js and Rust Core and emits logs in the right order. + * + * @internal + */ +export class NativeLogCollector { + public readonly logger: Logger; + protected buffer = new Heap((a, b) => Number(a.timestampNanos - b.timestampNanos)); + + constructor(protected readonly next: Logger) { + this.logger = new DefaultLogger('TRACE', (entry) => this.buffer.add(entry)); + this.receive = this.receive.bind(this); + } + + /** + * Receive logs pushed from the native layer. + * + * Called from the native layer; this function is not allowed to throw. + */ + public receive(entries: native.JsonString[]): void { + try { + for (const entry of entries) { + const log = this.convertFromNativeLogEntry(entry); + if (log) { + this.buffer.add(log); + } + } + this.flush(); + } catch (e) { + console.error('Error in NativeLogCollector.receive()', e); + } + } + + private convertFromNativeLogEntry(entry: native.JsonString): LogEntry | undefined { + try { + const log = JSON.parse(entry) as native.LogEntry; + const timestampNanos = BigInt(log.timestamp); + return { + level: log.level, + message: log.message, + meta: { + [LogTimestamp]: timestampNanos, + sdkComponent: SdkComponent.core, + ...log.fields, + }, + timestampNanos, + }; + } catch (e) { + console.error('Error in NativeLogCollector.convertFromNativeLogEntry()', e, entry); + return undefined; + } + } + + /** + * Flush all buffered logs into the logger supplied to the constructor + */ + flush(): void { + for (const entry of this.buffer) { + this.next.log(entry.level, entry.message, { + [LogTimestamp]: entry.timestampNanos, + ...entry.meta, + }); + } + this.buffer.clear(); + } +} diff --git a/packages/worker/src/runtime-options.ts b/packages/worker/src/runtime-options.ts new file mode 100644 index 000000000..ae6c620d9 --- /dev/null +++ b/packages/worker/src/runtime-options.ts @@ -0,0 +1,476 @@ +import { native } from '@temporalio/core-bridge'; +import { Logger, LogLevel } from '@temporalio/common'; +import { Duration, msToNumber } from '@temporalio/common/lib/time'; +import { DefaultLogger } from './logger'; +import { NativeLogCollector } from './native-log-forward'; + +/** + * Options used to create a Temporal Runtime. These are mostly about logging and telemetry. + */ +export interface RuntimeOptions { + /** + * A logger that will receive log messages emitted by the SDK, as well as through the + * [Workflow](https://typescript.temporal.io/api/namespaces/workflow#log) and + * [Activity](https://typescript.temporal.io/api/namespaces/activity#log) context loggers. + * + * By default, the Runtime's logger outputs everything to stderr, filtering out + * messages below the `INFO` level. To customize this behavior, instantiate a + * {@link DefaultLogger} with a different log level and a custom output function. Refer to + * [this sample](https://github.com/temporalio/samples-typescript/tree/main/hello-world/src/sample.ts) + * for an example. + * + * Note that by default, log messages emitted from the native side of the SDK are printed directly + * to the console, _independently of `RuntimeOptions.logger`_ – that is, this option only applies + * to log messages emitted from the TS side of the SDK. See {@link TelemetryOptions.logging} on + * how to turn on forwarding of native logs to the TS logger. + */ + logger?: Logger; + + /** + * Options for Core-side telemetry, including logs and metrics. + */ + telemetryOptions?: TelemetryOptions; + + /** + * Automatically shut down workers on any of these signals. + * @default + * ```ts + * ['SIGINT', 'SIGTERM', 'SIGQUIT', 'SIGUSR2'] + * ``` + */ + shutdownSignals?: NodeJS.Signals[]; +} + +// Telemetry Options /////////////////////////////////////////////////////////////////////////////// + +export interface TelemetryOptions { + /** + * A string in the env filter format specified here: + * https://docs.rs/tracing-subscriber/0.2.20/tracing_subscriber/struct.EnvFilter.html + * + * Which determines what tracing data is collected in the Core SDK. + * + * @deprecated Use `logging.filter` instead + */ + tracingFilter?: string; + + /** + * If set to `true`, do not prefix metrics with `temporal_`. + * + * @deprecated Use `metrics.metricPrefix` instead + */ + noTemporalPrefixForMetrics?: boolean; + + /** + * Control where to send log messages emitted by native code. + * + * ### Log Forwarding + * + * By default, logs emitted by the native side of the SDK are printed directly to the console, + * _independently of `RuntimeOptions.logger`_. To enable forwarding of those logs messages to the + * TS side logger, add the `forward` property to the `logging` object. + * + * For example: + * + * ```ts + * Runtime.install({ + * logger: new DefaultLogger('INFO'), + * telemetryOptions: { + * logging: { + * filter: { core: 'INFO', other: 'WARN' }, + * forward: {}, + * }, + * }, + * }); + * ``` + * + * Note that forwarded log messages are internally throttled/buffered for a few milliseconds to + * reduce overhead incurred by Rust-to-JS calls. In rare cases, this may result in log messages + * appearing out of order by a few milliseconds. Users are discouraged from using log forwarding + * with verboseness sets to `DEBUG` or `TRACE`. + */ + logging?: LogExporterConfig; + + /** + * Control exporting {@link NativeConnection} and {@link Worker} metrics. + * + * Turned off by default + */ + metrics?: MetricsExporterConfig; + + /** + * @deprecated Core SDK tracing is no longer supported. This option is ignored. + */ + tracing?: unknown; +} + +// Log Exporter //////////////////////////////////////////////////////////////////////////////////// + +/** + * Configuration for logs emitted by the native side of the SDK. + * + * @see {@link TelemetryOptions.logging} + */ +export type LogExporterConfig = { + /** + * Determines the verboseness of log output emitted by the native side of the SDK. + * + * This can be specified either as an (env filter format)[https://docs.rs/tracing-subscriber/0.2.20/tracing_subscriber/struct.EnvFilter.html] + * string, or as a {@link CoreLogFilterOptions} object. + * + * + * Note that if log forwarding is enabled, then the configured {@link Runtime.logger} may apply + * further filtering on top of this. + * + * + * **BACKWARD COMPATIBILITY** + * + * If `logging.filter` is missing, the following legacy values (if present) will be used instead (in the given order): + * - {@link ForwardLogger.forward.level} => `makeTelemetryFilterString({ core: level, other: level })` + * - {@link TelemetryOptions.tracingFilter} + * - Default value of `makeTelemetryFilterString({ core: 'WARN', other: 'ERROR'})` + * + * @default `{ core: 'WARN', other: 'ERROR'}` (with some exceptions, as described in backward compatibility note above). + */ + filter?: string | CoreLogFilterOptions; +} & Partial; + +/** + * Log directly to console + */ +export interface ConsoleLogger { + console: {}; // eslint-disable-line @typescript-eslint/no-empty-object-type +} + +/** + * Forward logs to {@link Runtime} logger + */ +export interface ForwardLogger { + forward: { + /** + * What level, if any, logs should be forwarded from core at + * + * @deprecated Use {@link TelemetryOptions.logging.filter} instead + */ + level?: LogLevel; + }; +} + +/** + * Options for configuring the verboseness of log output emitted by the native side of the SDK. + */ +export interface CoreLogFilterOptions { + /** + * Determines which level of verbosity to keep for _SDK Core_'s related events. + * Any event with a verbosity level less than that value will be discarded. + * Possible values are, in order: 'TRACE' | 'DEBUG' | 'INFO' | 'WARN' | 'ERROR'. + */ + core: LogLevel; + + /** + * Determines which level of verbosity to keep for events related to third + * party native packages imported by SDK Core. Any event with a verbosity level + * less than that value will be discarded. Possible values are, in order: + * 'TRACE' | 'DEBUG' | 'INFO' | 'WARN' | 'ERROR'. + * + * @defaults `'ERROR'`. + */ + other?: LogLevel; +} + +// Metrics Exporter //////////////////////////////////////////////////////////////////////////////// + +/** + * Configuration for exporting metrics emitted by Core. + */ +export type MetricsExporterConfig = { + /** + * Determines if the metrics exporter should use cumulative or delta temporality. + * Only applies to OpenTelemetry exporter. + * + * @deprecated Use 'otel.temporality' instead + */ + temporality?: 'cumulative' | 'delta'; + + /** + * A prefix to add to all metrics. + * + * @default 'temporal_' + */ + metricPrefix?: string; + + /** + * Tags to add to all metrics emitted by the worker. + */ + globalTags?: Record; + + /** + * Whether to put the service_name on every metric. + * + * @default true + */ + attachServiceName?: boolean; +} & (PrometheusMetricsExporter | OtelCollectorExporter); + +/** + * OpenTelemetry Collector options for exporting metrics or traces + */ +export interface OtelCollectorExporter { + otel: { + /** + * URL of a gRPC OpenTelemetry collector. + * + * Syntax generally looks like `http://server:4317` or `grpc://server:4317` for OTLP/gRPC exporters, + * or `http://server:4318/v1/metrics` for OTLP/HTTP exporters. Make sure to set the `http` option + * to `true` for OTLP/HTTP endpoints. + * + * @format Starts with "grpc://" or "http://" for an unsecured connection (typical), + * or "grpcs://" or "https://" for a TLS connection. + * @note The `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable, if set, will override this property. + */ + url: string; + + /** + * If set to true, the exporter will use OTLP/HTTP instead of OTLP/gRPC. + * + * @default false meaning that the exporter will use OTLP/gRPC. + */ + http?: boolean; + + /** + * Optional set of HTTP request headers to send to Collector (e.g. for authentication) + */ + headers?: Record; + + /** + * Specify how frequently in metrics should be exported. + * + * @format number of milliseconds or {@link https://www.npmjs.com/package/ms | ms-formatted string} + * @default 1 second + */ + metricsExportInterval?: Duration; + + /** + * If set to true, the exporter will use seconds for durations instead of milliseconds. + * + * @default false + */ + useSecondsForDurations?: boolean; + + /** + * Determines if the metrics exporter should use cumulative or delta temporality. + + * See the [OpenTelemetry specification](https://github.com/open-telemetry/opentelemetry-specification/blob/ce50e4634efcba8da445cc23523243cb893905cb/specification/metrics/datamodel.md#temporality) + * for more information. + * + * @default 'cumulative' + */ + temporality?: 'cumulative' | 'delta'; + + /** + * Overrides boundary values for histogram metrics. + * + * The key is the metric name and the value is the list of bucket boundaries. + * + * For example: + * + * ``` + * { + * "request_latency": [1, 5, 10, 25, 50, 100, 250, 500, 1000], + * } + * ``` + * + * The metric name will apply regardless of name prefixing. + * + * See [this doc](https://docs.rs/opentelemetry_sdk/latest/opentelemetry_sdk/metrics/enum.Aggregation.html#variant.ExplicitBucketHistogram.field.boundaries) + * for the exact meaning of boundaries. + */ + histogramBucketOverrides?: Record; + }; +} + +/** + * Prometheus metrics exporter options + */ +export interface PrometheusMetricsExporter { + prometheus: { + /** + * Address to bind the Prometheus HTTP metrics exporter server + * (for example, `0.0.0.0:1234`). + * + * Metrics will be available for scraping under the standard `/metrics` route. + */ + bindAddress: string; + /** + * If set to true, all counter names will include a "_total" suffix. + * + * @default false + */ + countersTotalSuffix?: boolean; + /** + * If set to true, all histograms will include the unit in their name as a suffix. + * EX: "_milliseconds" + * + * @default false + */ + unitSuffix?: boolean; + /** + * If set to true, the exporter will use seconds for durations instead of milliseconds. + * + * @default false + */ + useSecondsForDurations?: boolean; + + /** + * Overrides boundary values for histogram metrics. + * + * The key is the metric name and the value is the list of bucket boundaries. + * + * For example: + * + * ``` + * { + * "request_latency": [1, 5, 10, 25, 50, 100, 250, 500, 1000], + * } + * ``` + * + * The metric name will apply regardless of name prefixing. + * + * See [this doc](https://docs.rs/opentelemetry_sdk/latest/opentelemetry_sdk/metrics/enum.Aggregation.html#variant.ExplicitBucketHistogram.field.boundaries) + * for the exact meaning of boundaries. + */ + histogramBucketOverrides?: Record; + }; +} + +// Compile Options //////////////////////////////////////////////////////////////////////////////// + +export interface CompiledRuntimeOptions { + shutdownSignals: NodeJS.Signals[]; + telemetryOptions: native.RuntimeOptions; + logger: Logger; +} + +export function compileOptions(options: RuntimeOptions): CompiledRuntimeOptions { + // eslint-disable-next-line deprecation/deprecation + const { metrics, noTemporalPrefixForMetrics } = options.telemetryOptions ?? {}; + + const [logger, logExporter] = compileLoggerOptions(options); + + return { + logger, + shutdownSignals: options.shutdownSignals ?? ['SIGINT', 'SIGTERM', 'SIGQUIT', 'SIGUSR2'], + telemetryOptions: { + logExporter, + telemetry: { + metricPrefix: metrics?.metricPrefix ?? (noTemporalPrefixForMetrics ? '' : 'temporal_'), + attachServiceName: metrics?.attachServiceName ?? true, + }, + metricsExporter: + metrics && isOtelCollectorExporter(metrics) + ? ({ + type: 'otel', + url: metrics.otel.url, + protocol: metrics.otel.http ? 'http' : 'grpc', + headers: metrics.otel.headers ?? {}, + metricsExportInterval: msToNumber(metrics.otel.metricsExportInterval ?? '1s'), + // eslint-disable-next-line deprecation/deprecation + temporality: metrics.otel.temporality ?? metrics.temporality ?? 'cumulative', + useSecondsForDurations: metrics.otel.useSecondsForDurations ?? false, + histogramBucketOverrides: metrics.otel.histogramBucketOverrides ?? {}, + globalTags: metrics.globalTags ?? {}, + } satisfies native.MetricExporterOptions) + : metrics && isPrometheusMetricsExporter(metrics) + ? ({ + type: 'prometheus', + bindAddress: metrics.prometheus.bindAddress, + unitSuffix: metrics.prometheus.unitSuffix ?? false, + countersTotalSuffix: metrics.prometheus.countersTotalSuffix ?? false, + useSecondsForDurations: metrics.prometheus.useSecondsForDurations ?? false, + histogramBucketOverrides: metrics.prometheus.histogramBucketOverrides ?? {}, + globalTags: metrics.globalTags ?? {}, + } satisfies native.MetricExporterOptions) + : null, + }, + }; +} + +function compileLoggerOptions(options: RuntimeOptions): [Logger, native.LogExporterOptions] { + // eslint-disable-next-line deprecation/deprecation + const { logging, tracingFilter } = options.telemetryOptions ?? {}; + + const logger = options.logger ?? new DefaultLogger('INFO'); + + // Unfortunately, "filter" has changed place and semantics a few times in the past, and we want to + // do our best not to break existing users, so this gets a bit more complex than it should be. + const defaultFilter = tracingFilter ?? makeTelemetryFilterString({ core: 'WARN', other: 'ERROR' }); + let loggingFilter: string | undefined = undefined; + if (logging?.filter) { + if (typeof logging.filter === 'string') { + loggingFilter = logging.filter; + } else if (typeof logging.filter === 'object') { + loggingFilter = makeTelemetryFilterString(logging.filter); + } else { + throw new TypeError('Invalid logging filter'); + } + } + // eslint-disable-next-line deprecation/deprecation + const forwardLevel = (logging as ForwardLogger | undefined)?.forward?.level; + const forwardLevelFilter = + forwardLevel && + makeTelemetryFilterString({ + core: forwardLevel, + other: forwardLevel, + }); + + if (logging && isForwardingLogger(logging)) { + const collector = new NativeLogCollector(logger); + return [ + collector.logger, + { + type: 'forward', + filter: loggingFilter ?? forwardLevelFilter ?? defaultFilter, + receiver: collector.receive, + }, + ]; + } else { + return [ + logger, + { + type: 'console', + filter: loggingFilter ?? defaultFilter, + }, + ]; + } +} + +// Utilities ////////////////////////////////////////////////////////////////////////////////////// + +/** + * @deprecated Use {@link CoreLogFilterOptions} instead. + */ +export type MakeTelemetryFilterStringOptions = CoreLogFilterOptions; + +/** + * A helper to build a filter string for use in `RuntimeOptions.telemetryOptions.tracingFilter`. + * + * Note that one may instead simply pass a {@link CoreLogFilterOptions} object directly to + * `RuntimeOptions.telemetryOptions.logging.filter`. This function may however still be useful + * in some particular use cases and will therefore be kept around. + */ +export function makeTelemetryFilterString(options: CoreLogFilterOptions): string { + const { core, other } = options; + return `${other ?? 'ERROR'},temporal_sdk_core=${core},temporal_client=${core},temporal_sdk=${core}`; +} + +function isOtelCollectorExporter(metrics: MetricsExporterConfig): metrics is OtelCollectorExporter { + return 'otel' in metrics && typeof metrics.otel === 'object'; +} + +function isPrometheusMetricsExporter(metrics: MetricsExporterConfig): metrics is PrometheusMetricsExporter { + return 'prometheus' in metrics && typeof metrics.prometheus === 'object'; +} + +function isForwardingLogger(options: LogExporterConfig): boolean { + return 'forward' in options && typeof options.forward === 'object'; +} diff --git a/packages/worker/src/runtime.ts b/packages/worker/src/runtime.ts index 7d44d534d..0e8911ce1 100644 --- a/packages/worker/src/runtime.ts +++ b/packages/worker/src/runtime.ts @@ -1,125 +1,18 @@ -import { promisify } from 'node:util'; import * as v8 from 'node:v8'; import * as fs from 'node:fs'; import * as os from 'node:os'; -import { Heap } from 'heap-js'; -import * as native from '@temporalio/core-bridge'; -import { - pollLogs, - runtimeShutdown, - newClient, - newRuntime, - TelemetryOptions, - CompiledTelemetryOptions, - ForwardLogger, - MetricsExporter, - OtelCollectorExporter, -} from '@temporalio/core-bridge'; -import { filterNullAndUndefined, normalizeTlsConfig } from '@temporalio/common/lib/internal-non-workflow'; -import { IllegalStateError, LogMetadata, SdkComponent } from '@temporalio/common'; +import { native } from '@temporalio/core-bridge'; +import { filterNullAndUndefined } from '@temporalio/common/lib/internal-non-workflow'; +import { IllegalStateError, Logger, SdkComponent } from '@temporalio/common'; import { temporal } from '@temporalio/proto'; import { History } from '@temporalio/common/lib/proto-utils'; -import { msToNumber } from '@temporalio/common/lib/time'; -import { DefaultLogger, LogEntry, Logger, LogTimestamp, timeOfDayToBigint } from './logger'; -import { compileConnectionOptions, getDefaultConnectionOptions, NativeConnectionOptions } from './connection-options'; +import { isFlushableLogger } from './logger'; +import { toNativeClientOptions, NativeConnectionOptions } from './connection-options'; import { byteArrayToBuffer, toMB } from './utils'; -import pkg from './pkg'; +import { CompiledRuntimeOptions, compileOptions, RuntimeOptions } from './runtime-options'; export { History }; -function isForwardingLogger(opts: TelemetryOptions['logging']): opts is ForwardLogger { - return Object.hasOwnProperty.call(opts, 'forward'); -} - -function isOtelCollectorExporter(opts: MetricsExporter): opts is OtelCollectorExporter { - return Object.hasOwnProperty.call(opts, 'otel'); -} - -/** - * Options used to create a Core runtime - */ -export interface RuntimeOptions { - /** - * Automatically shut down workers on any of these signals. - * @default - * ```ts - * ['SIGINT', 'SIGTERM', 'SIGQUIT', 'SIGUSR2'] - * ``` - */ - shutdownSignals?: NodeJS.Signals[]; - - /** Telemetry options for traces/metrics/logging */ - telemetryOptions?: TelemetryOptions; - /** - * Custom logger for logging events from the SDK, by default we log everything to stderr - * at the INFO level. See https://docs.temporal.io/typescript/logging/ for more info. - */ - logger?: Logger; -} - -export interface CompiledRuntimeOptions { - shutdownSignals: NodeJS.Signals[]; - telemetryOptions: CompiledTelemetryOptions; - logger: Logger; -} - -export interface MakeTelemetryFilterStringOptions { - /** - * Determines which level of verbosity to keep for _SDK Core_'s related events. - * Any event with a verbosity level less than that value will be discarded. - * Possible values are, in order: 'TRACE' | 'DEBUG' | 'INFO' | 'WARN' | 'ERROR'. - */ - core: native.LogLevel; - - /** - * Determines which level of verbosity to keep for events related to third - * party native packages imported by SDK Core. Any event with a verbosity level - * less than that value will be discarded. Possible values are, in order: - * 'TRACE' | 'DEBUG' | 'INFO' | 'WARN' | 'ERROR'. - * - * @defaults `'INFO'` - */ - other?: native.LogLevel; -} - -/** - * A helper to build a filter string for use in `RuntimeOptions.telemetryOptions.tracingFilter`. - * - * Example: - * ``` - * telemetryOptions: { - * logging: { - * filter: makeTelemetryFilterString({ core: 'TRACE', other: 'DEBUG' }); - * // ... - * }, - * } - * ``` - */ -export function makeTelemetryFilterString(options: MakeTelemetryFilterStringOptions): string { - const { core, other } = { other: 'INFO', ...options }; - return `${other},temporal_sdk_core=${core},temporal_client=${core},temporal_sdk=${core}`; -} - -/** A logger that buffers logs from both Node.js and Rust Core and emits logs in the right order */ -class BufferedLogger extends DefaultLogger { - protected buffer = new Heap((a, b) => Number(a.timestampNanos - b.timestampNanos)); - - constructor(protected readonly next: Logger) { - super('TRACE', (entry) => this.buffer.add(entry)); - } - - /** Flush all buffered logs into the logger supplied to the constructor */ - flush(): void { - for (const entry of this.buffer) { - this.next.log(entry.level, entry.message, { - ...entry.meta, - [LogTimestamp]: entry.timestampNanos, - }); - } - this.buffer.clear(); - } -} - type TrackedNativeObject = native.Client | native.Worker | native.EphemeralServer; /** @@ -128,14 +21,13 @@ type TrackedNativeObject = native.Client | native.Worker | native.EphemeralServe * Use {@link install} in order to customize the server connection options or other global process options. */ export class Runtime { + public readonly logger: Logger; + /** Track the number of pending creation calls into the tokio runtime to prevent shut down */ protected pendingCreations = 0; /** Track the registered native objects to automatically shutdown when all have been deregistered */ protected readonly backRefs = new Set(); - protected stopPollingForLogs = false; - protected stopPollingForLogsCallback?: () => void; - protected readonly logPollPromise: Promise; - public readonly logger: Logger; + protected readonly shutdownSignalCallbacks = new Set<() => void>(); protected state: 'RUNNING' | 'SHUTTING_DOWN' = 'RUNNING'; @@ -152,13 +44,7 @@ export class Runtime { public readonly native: native.Runtime, public readonly options: CompiledRuntimeOptions ) { - if (this.isForwardingLogs()) { - const logger = (this.logger = new BufferedLogger(this.options.logger)); - this.logPollPromise = this.initLogPolling(logger); - } else { - this.logger = this.options.logger; - this.logPollPromise = Promise.resolve(); - } + this.logger = options.logger; this.checkHeapSizeLimit(); this.setupShutdownHook(); } @@ -201,8 +87,8 @@ export class Runtime { * Factory function for creating a new Core instance, not exposed because Core is meant to be used as a singleton */ protected static create(options: RuntimeOptions, instantiator: 'install' | 'instance'): Runtime { - const compiledOptions = this.compileOptions(options); - const runtime = newRuntime(compiledOptions.telemetryOptions); + const compiledOptions = compileOptions(options); + const runtime = native.newRuntime(compiledOptions.telemetryOptions); // Remember the provided options in case Core is reinstantiated after being shut down this.defaultOptions = options; @@ -211,129 +97,12 @@ export class Runtime { return this._instance; } - // eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types - protected static compileOptions(options: RuntimeOptions): CompiledRuntimeOptions { - // eslint-disable-next-line deprecation/deprecation - const { logging, metrics, tracingFilter, noTemporalPrefixForMetrics } = options.telemetryOptions ?? {}; - - const defaultFilter = - tracingFilter ?? - makeTelemetryFilterString({ - core: 'WARN', - other: 'ERROR', - }); - const loggingFilter = logging?.filter; - - // eslint-disable-next-line deprecation/deprecation - const forwardLevel = (logging as ForwardLogger | undefined)?.forward?.level; - const forwardLevelFilter = - forwardLevel && - makeTelemetryFilterString({ - core: forwardLevel, - other: forwardLevel, - }); - - return { - shutdownSignals: options.shutdownSignals ?? ['SIGINT', 'SIGTERM', 'SIGQUIT', 'SIGUSR2'], - telemetryOptions: { - logging: - !!logging && isForwardingLogger(logging) - ? { - filter: loggingFilter ?? forwardLevelFilter ?? defaultFilter, - forward: {}, - } - : { - filter: loggingFilter ?? defaultFilter, - console: {}, - }, - metrics: metrics && { - metricPrefix: metrics.metricPrefix ?? (noTemporalPrefixForMetrics ? '' : 'temporal_'), - globalTags: metrics.globalTags, - attachServiceName: metrics.attachServiceName ?? true, - ...(isOtelCollectorExporter(metrics) - ? { - otel: { - url: metrics.otel.url, - http: metrics.otel.http ?? false, - headers: metrics.otel.headers ?? {}, - metricsExportInterval: msToNumber(metrics.otel.metricsExportInterval ?? '1s'), - // eslint-disable-next-line deprecation/deprecation - temporality: metrics.otel.temporality ?? metrics.temporality ?? 'cumulative', - useSecondsForDurations: metrics.otel.useSecondsForDurations ?? false, - histogramBucketOverrides: metrics.otel.histogramBucketOverrides, - }, - } - : { - prometheus: { - bindAddress: metrics.prometheus.bindAddress, - unitSuffix: metrics.prometheus.unitSuffix ?? false, - countersTotalSuffix: metrics.prometheus.countersTotalSuffix ?? false, - useSecondsForDurations: metrics.prometheus.useSecondsForDurations ?? false, - histogramBucketOverrides: metrics.prometheus.histogramBucketOverrides, - }, - }), - }, - }, - logger: options.logger ?? new DefaultLogger('INFO'), - }; - } - - protected async initLogPolling(logger: BufferedLogger): Promise { - if (!this.isForwardingLogs()) { - return; - } - - const poll = promisify(pollLogs); - const doPoll = async () => { - const logs = await poll(this.native); - for (const log of logs) { - const meta: LogMetadata = { - [LogTimestamp]: timeOfDayToBigint(log.timestamp), - sdkComponent: SdkComponent.core, - ...log.fields, - }; - logger.log(log.level, log.message, meta); - } - }; - - try { - for (;;) { - await doPoll(); - logger.flush(); - if (this.stopPollingForLogs) { - break; - } - await new Promise((resolve) => { - setTimeout(resolve, 3); - this.stopPollingForLogsCallback = resolve; - }); - } - } catch (error) { - // Log using the original logger instead of buffering - this.options.logger.warn('Error gathering forwarded logs from core', { - error, - sdkComponent: SdkComponent.worker, - }); - } finally { - logger.flush(); - } - } - - protected isForwardingLogs(): boolean { - const logger = this.options.telemetryOptions.logging; - return logger != null && isForwardingLogger(logger); - } - /** * Flush any buffered logs. - * - * This is a noop in case the instance is configured with - * `logForwardingLevel=OFF`. */ flushLogs(): void { - if (this.isForwardingLogs()) { - const logger = this.logger as BufferedLogger; - logger.flush(); + if (isFlushableLogger(this.logger)) { + this.logger.flush(); } } @@ -344,21 +113,11 @@ export class Runtime { * @hidden */ public async createNativeClient(options?: NativeConnectionOptions): Promise { - const compiledServerOptions = compileConnectionOptions({ - ...getDefaultConnectionOptions(), - ...filterNullAndUndefined(options ?? {}), - }); - if (options?.apiKey && compiledServerOptions.metadata?.['Authorization']) { - throw new TypeError( - 'Both `apiKey` option and `Authorization` header were provided. Only one makes sense to use at a time.' - ); - } - const clientOptions = { - ...compiledServerOptions, - tls: normalizeTlsConfig(compiledServerOptions.tls), - url: options?.tls ? `https://${compiledServerOptions.address}` : `http://${compiledServerOptions.address}`, - }; - return await this.createNative(promisify(newClient), this.native, clientOptions); + return await this.createNative( + native.newClient, + this.native, + toNativeClientOptions(filterNullAndUndefined(options ?? {})) + ); } /** @@ -380,16 +139,20 @@ export class Runtime { * @hidden */ public async registerWorker(client: native.Client, options: native.WorkerOptions): Promise { - return await this.createNative(promisify(native.newWorker), client, options); + return await this.createNativeNoBackRef(async () => { + const worker = native.newWorker(client, options); + await native.workerValidate(worker); + this.backRefs.add(worker); + return worker; + }); } /** @hidden */ - public async createReplayWorker(options: native.WorkerOptions): Promise { + public async createReplayWorker(options: native.WorkerOptions): Promise<[native.Worker, native.HistoryPusher]> { return await this.createNativeNoBackRef(async () => { - const fn = promisify(native.newReplayWorker); - const replayWorker = await fn(this.native, options); - this.backRefs.add(replayWorker.worker); - return replayWorker; + const [worker, pusher] = native.newReplayWorker(this.native, options); + this.backRefs.add(worker); + return [worker, pusher]; }); } @@ -402,7 +165,7 @@ export class Runtime { */ public async pushHistory(pusher: native.HistoryPusher, workflowId: string, history: History): Promise { const encoded = byteArrayToBuffer(temporal.api.history.v1.History.encodeDelimited(history).finish()); - return await promisify(native.pushHistory)(pusher, workflowId, encoded); + return await native.pushHistory(pusher, workflowId, encoded); } /** @@ -423,9 +186,12 @@ export class Runtime { * @hidden */ public async deregisterWorker(worker: native.Worker): Promise { - native.workerFinalizeShutdown(worker); - this.backRefs.delete(worker); - await this.shutdownIfIdle(); + try { + await native.workerFinalizeShutdown(worker); + } finally { + this.backRefs.delete(worker); + await this.shutdownIfIdle(); + } } /** @@ -435,7 +201,7 @@ export class Runtime { * @hidden */ public async createEphemeralServer(options: native.EphemeralServerConfig): Promise { - return await this.createNative(promisify(native.startEphemeralServer), this.native, options, pkg.version); + return await this.createNative(native.startEphemeralServer, this.native, options); } /** @@ -445,7 +211,7 @@ export class Runtime { * @hidden */ public async shutdownEphemeralServer(server: native.EphemeralServer): Promise { - await promisify(native.shutdownEphemeralServer)(server); + await native.shutdownEphemeralServer(server); this.backRefs.delete(server); await this.shutdownIfIdle(); } @@ -498,13 +264,16 @@ export class Runtime { * @hidden */ public async shutdown(): Promise { - delete Runtime._instance; - this.teardownShutdownHook(); - this.stopPollingForLogs = true; - this.stopPollingForLogsCallback?.(); - // This will effectively drain all logs - await this.logPollPromise; - await promisify(runtimeShutdown)(this.native); + if (this.native === undefined) return; + try { + delete Runtime._instance; + this.teardownShutdownHook(); + // native.runtimeShutdown(this.native); + (this as any).native = undefined; + this.flushLogs(); + } finally { + delete (this as any).native; + } } /** diff --git a/packages/worker/src/utils.ts b/packages/worker/src/utils.ts index 62b8198e0..22f77fb44 100644 --- a/packages/worker/src/utils.ts +++ b/packages/worker/src/utils.ts @@ -7,8 +7,8 @@ export function toMB(bytes: number, fractionDigits = 2): string { return (bytes / 1024 / 1024).toFixed(fractionDigits); } -export function byteArrayToBuffer(array: Uint8Array): ArrayBuffer { - return array.buffer.slice(array.byteOffset, array.byteLength + array.byteOffset); +export function byteArrayToBuffer(array: Uint8Array): Buffer { + return Buffer.from(array, array.byteOffset, array.byteLength + array.byteOffset); } export function convertToParentWorkflowType( diff --git a/packages/worker/src/worker-options.ts b/packages/worker/src/worker-options.ts index 993de4592..ce9946c93 100644 --- a/packages/worker/src/worker-options.ts +++ b/packages/worker/src/worker-options.ts @@ -6,8 +6,7 @@ import { Duration, msOptionalToNumber, msToNumber } from '@temporalio/common/lib import { loadDataConverter } from '@temporalio/common/lib/internal-non-workflow'; import { LoggerSinks } from '@temporalio/workflow'; import { Context } from '@temporalio/activity'; -import { checkExtends } from '@temporalio/common/lib/type-helpers'; -import { WorkerOptions as NativeWorkerOptions, WorkerTuner as NativeWorkerTuner } from '@temporalio/core-bridge'; +import { native } from '@temporalio/core-bridge'; import { ActivityInboundLogInterceptor } from './activity-log-interceptor'; import { NativeConnection } from './connection'; import { CompiledWorkerInterceptors, WorkerInterceptors } from './interceptors'; @@ -19,43 +18,6 @@ import { MiB } from './utils'; import { WorkflowBundleWithSourceMap } from './workflow/bundler'; import { asNativeTuner, WorkerTuner } from './worker-tuner'; -export type { WebpackConfiguration }; - -export interface WorkflowBundlePath { - codePath: string; -} - -/** - * Note this no longer contains a source map. - * The name was preserved to avoid breaking backwards compatibility. - * - * @deprecated - */ -export interface WorkflowBundlePathWithSourceMap { - codePath: string; - sourceMapPath: string; -} - -export interface WorkflowBundle { - code: string; -} - -export type WorkflowBundleOption = - | WorkflowBundle - | WorkflowBundleWithSourceMap - | WorkflowBundlePath - | WorkflowBundlePathWithSourceMap; // eslint-disable-line deprecation/deprecation - -export function isCodeBundleOption(bundleOpt: WorkflowBundleOption): bundleOpt is WorkflowBundle { - const opt = bundleOpt as any; // Cast to access properties without TS complaining - return typeof opt.code === 'string'; -} - -export function isPathBundleOption(bundleOpt: WorkflowBundleOption): bundleOpt is WorkflowBundlePath { - const opt = bundleOpt as any; // Cast to access properties without TS complaining - return typeof opt.codePath === 'string'; -} - /** * Options to configure the {@link Worker} * @@ -537,68 +499,7 @@ export interface WorkerOptions { }; } -/** - * WorkerOptions with all of the Worker required attributes - */ -export type WorkerOptionsWithDefaults = WorkerOptions & - Required< - Pick< - WorkerOptions, - | 'namespace' - | 'identity' - | 'useVersioning' - | 'shutdownGraceTime' - | 'maxConcurrentWorkflowTaskPolls' - | 'maxConcurrentActivityTaskPolls' - | 'nonStickyToStickyPollRatio' - | 'enableNonLocalActivities' - | 'stickyQueueScheduleToStartTimeout' - | 'maxCachedWorkflows' - | 'workflowThreadPoolSize' - | 'maxHeartbeatThrottleInterval' - | 'defaultHeartbeatThrottleInterval' - | 'showStackTraceSources' - | 'debugMode' - | 'reuseV8Context' - | 'tuner' - > - > & { - interceptors: Required; - - /** - * Time to wait for result when calling a Workflow isolate function. - * @format number of milliseconds or {@link https://www.npmjs.com/package/ms | ms-formatted string} - * - * This value is not exposed at the moment. - * - * @default 5s - */ - isolateExecutionTimeout: Duration; - }; - -/** - * {@link WorkerOptions} where the attributes the Worker requires are required and time units are converted from ms - * formatted strings to numbers. - */ -export interface CompiledWorkerOptions - extends Omit { - interceptors: CompiledWorkerInterceptors; - shutdownGraceTimeMs: number; - shutdownForceTimeMs?: number; - isolateExecutionTimeoutMs: number; - stickyQueueScheduleToStartTimeoutMs: number; - maxHeartbeatThrottleIntervalMs: number; - defaultHeartbeatThrottleIntervalMs: number; - loadedDataConverter: LoadedDataConverter; - activities: Map; - tuner: NativeWorkerTuner; -} - -export type CompiledWorkerOptionsWithBuildId = CompiledWorkerOptions & { - buildId: string; -}; - -checkExtends(); +// Replay Worker /////////////////////////////////////////////////////////////////////////////////// /** * {@link WorkerOptions} with inapplicable-to-replay fields removed. @@ -636,6 +537,47 @@ export interface ReplayWorkerOptions replayName?: string; } +// Workflow Bundle ///////////////////////////////////////////////////////////////////////////////// + +export type { WebpackConfiguration }; + +export interface WorkflowBundlePath { + codePath: string; +} + +/** + * Note this no longer contains a source map. + * The name was preserved to avoid breaking backwards compatibility. + * + * @deprecated + */ +export interface WorkflowBundlePathWithSourceMap { + codePath: string; + sourceMapPath: string; +} + +export interface WorkflowBundle { + code: string; +} + +export type WorkflowBundleOption = + | WorkflowBundle + | WorkflowBundleWithSourceMap + | WorkflowBundlePath + | WorkflowBundlePathWithSourceMap; // eslint-disable-line deprecation/deprecation + +export function isCodeBundleOption(bundleOpt: WorkflowBundleOption): bundleOpt is WorkflowBundle { + const opt = bundleOpt as any; // Cast to access properties without TS complaining + return typeof opt.code === 'string'; +} + +export function isPathBundleOption(bundleOpt: WorkflowBundleOption): bundleOpt is WorkflowBundlePath { + const opt = bundleOpt as any; // Cast to access properties without TS complaining + return typeof opt.codePath === 'string'; +} + +// Sinks and Interceptors ////////////////////////////////////////////////////////////////////////// + /** * Build the sink used internally by the SDK to forwards log messages from the Workflow sandbox to an actual logger. * @@ -697,6 +639,69 @@ function compileWorkerInterceptors({ }; } +// Compile Options ///////////////////////////////////////////////////////////////////////////////// + +/** + * WorkerOptions with all of the Worker required attributes + */ +export type WorkerOptionsWithDefaults = WorkerOptions & + Required< + Pick< + WorkerOptions, + | 'namespace' + | 'identity' + | 'useVersioning' + | 'shutdownGraceTime' + | 'maxConcurrentWorkflowTaskPolls' + | 'maxConcurrentActivityTaskPolls' + | 'nonStickyToStickyPollRatio' + | 'enableNonLocalActivities' + | 'stickyQueueScheduleToStartTimeout' + | 'maxCachedWorkflows' + | 'workflowThreadPoolSize' + | 'maxHeartbeatThrottleInterval' + | 'defaultHeartbeatThrottleInterval' + | 'showStackTraceSources' + | 'debugMode' + | 'reuseV8Context' + | 'tuner' + > + > & { + interceptors: Required; + + /** + * Time to wait for result when calling a Workflow isolate function. + * @format number of milliseconds or {@link https://www.npmjs.com/package/ms | ms-formatted string} + * + * This value is not exposed at the moment. + * + * @default 5s + */ + isolateExecutionTimeout: Duration; + }; + +/** + * {@link WorkerOptions} where the attributes the Worker requires are required and time units are converted from ms + * formatted strings to numbers. + */ +export interface CompiledWorkerOptions + extends Omit { + interceptors: CompiledWorkerInterceptors; + shutdownGraceTimeMs: number; + shutdownForceTimeMs?: number; + isolateExecutionTimeoutMs: number; + stickyQueueScheduleToStartTimeoutMs: number; + maxHeartbeatThrottleIntervalMs: number; + defaultHeartbeatThrottleIntervalMs: number; + loadedDataConverter: LoadedDataConverter; + activities: Map; + tuner: native.WorkerTunerOptions; +} + +export type CompiledWorkerOptionsWithBuildId = CompiledWorkerOptions & { + buildId: string; +}; + function addDefaultWorkerOptions(options: WorkerOptions, logger: Logger): WorkerOptionsWithDefaults { const { buildId, @@ -840,6 +845,30 @@ export function compileWorkerOptions(rawOpts: WorkerOptions, logger: Logger): Co }; } +export function toNativeWorkerOptions(opts: CompiledWorkerOptionsWithBuildId): native.WorkerOptions { + return { + identity: opts.identity, + buildId: opts.buildId, + useVersioning: opts.useVersioning, + taskQueue: opts.taskQueue, + namespace: opts.namespace, + tuner: opts.tuner, + nonStickyToStickyPollRatio: opts.nonStickyToStickyPollRatio, + maxConcurrentWorkflowTaskPolls: opts.maxConcurrentWorkflowTaskPolls, + maxConcurrentActivityTaskPolls: opts.maxConcurrentActivityTaskPolls, + enableNonLocalActivities: opts.enableNonLocalActivities, + stickyQueueScheduleToStartTimeout: msToNumber(opts.stickyQueueScheduleToStartTimeout), + maxCachedWorkflows: opts.maxCachedWorkflows, + maxHeartbeatThrottleInterval: msToNumber(opts.maxHeartbeatThrottleInterval), + defaultHeartbeatThrottleInterval: msToNumber(opts.defaultHeartbeatThrottleInterval), + maxTaskQueueActivitiesPerSecond: opts.maxTaskQueueActivitiesPerSecond ?? null, + maxActivitiesPerSecond: opts.maxActivitiesPerSecond ?? null, + shutdownGraceTime: msToNumber(opts.shutdownGraceTime), + }; +} + +// Utils /////////////////////////////////////////////////////////////////////////////////////////// + function isSet(env: string | undefined): boolean { if (env === undefined) return false; env = env.toLocaleLowerCase(); diff --git a/packages/worker/src/worker-tuner.ts b/packages/worker/src/worker-tuner.ts index a5876c75b..22d82d5d7 100644 --- a/packages/worker/src/worker-tuner.ts +++ b/packages/worker/src/worker-tuner.ts @@ -1,25 +1,11 @@ -import { - ActivitySlotInfo, - CustomSlotSupplier, - FixedSizeSlotSupplier, - LocalActivitySlotInfo, - ResourceBasedTunerOptions, - SlotInfo, - SlotMarkUsedContext, - SlotPermit, - SlotReleaseContext, - SlotReserveContext, - SlotSupplier as NativeSlotSupplier, - WorkerTuner as NativeWorkerTuner, - WorkflowSlotInfo, -} from '@temporalio/core-bridge'; +import { native } from '@temporalio/core-bridge'; import { Duration, msToNumber } from '@temporalio/common/lib/time'; import { Logger } from '@temporalio/common'; -export { FixedSizeSlotSupplier, ResourceBasedTunerOptions }; - /** - * Controls how slots for different task types will be handed out. + * A worker tuner allows the customization of the performance characteristics of workers by + * controlling how "slots" are handed out for different task types. In order to poll for and then + * run tasks, a slot must first be reserved by the {@link SlotSupplier} returned by the tuner. * * @experimental Worker Tuner is an experimental feature and may be subject to change. */ @@ -39,9 +25,6 @@ export interface TunerHolder { /** * Controls how slots are handed out for a specific task type. * - * For now, only {@link ResourceBasedSlotOptions} and {@link FixedSizeSlotSupplier} are supported, - * but we may add support for custom tuners in the future. - * * @experimental Worker Tuner is an experimental feature and may be subject to change. */ export type SlotSupplier = @@ -49,15 +32,50 @@ export type SlotSupplier = | FixedSizeSlotSupplier | CustomSlotSupplier; +// Resource Based ////////////////////////////////////////////////////////////////////////////////// + /** - * Resource based slot supplier options for a specific kind of slot. + * This tuner attempts to maintain certain levels of resource usage when under load. You do not + * need more than one instance of this when using it for multiple slot types. * * @experimental Worker Tuner is an experimental feature and may be subject to change. */ -type ResourceBasedSlotsForType = ResourceBasedSlotOptions & { - type: 'resource-based'; +export interface ResourceBasedTuner { + /** + * Options for the tuner + */ tunerOptions: ResourceBasedTunerOptions; -}; + /** + * Options for workflow task slots. Defaults to a minimum of 2 slots and a maximum of 1000 slots + * with no ramp throttle + */ + workflowTaskSlotOptions?: ResourceBasedSlotOptions; + /** + * Options for activity task slots. Defaults to a minimum of 1 slots and a maximum of 2000 slots + * with 50ms ramp throttle + */ + activityTaskSlotOptions?: ResourceBasedSlotOptions; + /** + * Options for local activity task slots. Defaults to a minimum of 1 slots and a maximum of 2000 + * slots with 50ms ramp throttle + */ + localActivityTaskSlotOptions?: ResourceBasedSlotOptions; +} + +/** + * Options for a {@link ResourceBasedTuner} to control target resource usage + * + * @experimental Worker Tuner is an experimental feature and may be subject to change. + */ +export interface ResourceBasedTunerOptions { + // A value between 0 and 1 that represents the target (system) memory usage. It's not recommended + // to set this higher than 0.8, since how much memory a workflow may use is not predictable, and + // you don't want to encounter OOM errors. + targetMemoryUsage: number; + // A value between 0 and 1 that represents the target (system) CPU usage. This can be set to 1.0 + // if desired, but it's recommended to leave some headroom for other processes. + targetCpuUsage: number; +} /** * Options for a specific slot type within a {@link ResourceBasedSlotsForType} @@ -83,34 +101,174 @@ export interface ResourceBasedSlotOptions { } /** - * This tuner attempts to maintain certain levels of resource usage when under load. You do not - * need more than one instance of this when using it for multiple slot types. + * Resource based slot supplier options for a specific kind of slot. * * @experimental Worker Tuner is an experimental feature and may be subject to change. */ -export interface ResourceBasedTuner { +export type ResourceBasedSlotsForType = ResourceBasedSlotOptions & { + type: 'resource-based'; + tunerOptions: ResourceBasedTunerOptions; +}; + +// Fixed Size ////////////////////////////////////////////////////////////////////////////////////// + +/** + * A fixed-size slot supplier that will never issue more than a fixed number of slots. + * + * @experimental Worker Tuner is an experimental feature and may be subject to change. + */ +export interface FixedSizeSlotSupplier { + type: 'fixed-size'; + // The maximum number of slots that can be issued + numSlots: number; +} + +/** + * The interface can be implemented to provide custom slot supplier behavior. + * + * @experimental Worker Tuner is an experimental feature and may be subject to change. + */ +export interface CustomSlotSupplier { + type: 'custom'; + /** - * Options for the tuner + * This function is called before polling for new tasks. Your implementation should return a permit + * when a slot is available. + * + * Note: This function is called asynchronously from the Rust side. It should return a Promise that + * resolves when a slot is available. You can use async/await or return a Promise directly. + * + * The only acceptable exception to throw is AbortError, any other exceptions thrown will be + * logged and ignored. + * + * The value inside the returned promise should be an object, however other types will still count + * as having issued a permit. Including undefined or null. Returning undefined or null does *not* + * mean you have not issued a permit. Implementations are expected to block until a meaningful + * permit can be issued. + * + * @param ctx The context for slot reservation. + * @param abortSignal The SDK may decide to abort the reservation request if it's no longer + * needed. Implementations may clean up and then must reject the promise with AbortError. + * @returns A promise that resolves to a permit to use the slot which may be populated with your own data. */ - tunerOptions: ResourceBasedTunerOptions; + reserveSlot(ctx: SlotReserveContext, abortSignal: AbortSignal): Promise; + /** - * Options for workflow task slots. Defaults to a minimum of 2 slots and a maximum of 1000 slots - * with no ramp throttle + * This function is called when trying to reserve slots for "eager" workflow and activity tasks. + * Eager tasks are those which are returned as a result of completing a workflow task, rather than + * from polling. Your implementation must not block, and if a slot is available, return a permit + * to use that slot. + * + * @param ctx The context for slot reservation. + * @returns Maybe a permit to use the slot which may be populated with your own data. */ - workflowTaskSlotOptions?: ResourceBasedSlotOptions; + tryReserveSlot(ctx: SlotReserveContext): SlotPermit | null; + /** - * Options for activity task slots. Defaults to a minimum of 1 slots and a maximum of 2000 slots - * with 50ms ramp throttle + * This function is called once a slot is actually being used to process some task, which may be + * some time after the slot was reserved originally. For example, if there is no work for a + * worker, a number of slots equal to the number of active pollers may already be reserved, but + * none of them are being used yet. This call should be non-blocking. + * + * @param ctx The context for marking a slot as used. */ - activityTaskSlotOptions?: ResourceBasedSlotOptions; + markSlotUsed(slot: SlotMarkUsedContext): void; + /** - * Options for local activity task slots. Defaults to a minimum of 1 slots and a maximum of 2000 - * slots with 50ms ramp throttle + * This function is called once a permit is no longer needed. This could be because the task has + * finished, whether successfully or not, or because the slot was no longer needed (ex: the number + * of active pollers decreased). This call should be non-blocking. + * + * @param ctx The context for releasing a slot. */ - localActivityTaskSlotOptions?: ResourceBasedSlotOptions; + releaseSlot(slot: SlotReleaseContext): void; +} + +export type SlotInfo = WorkflowSlotInfo | ActivitySlotInfo | LocalActivitySlotInfo; + +export interface WorkflowSlotInfo { + type: 'workflow'; + workflowType: string; + isSticky: boolean; +} + +export interface ActivitySlotInfo { + type: 'activity'; + activityType: string; +} + +export interface LocalActivitySlotInfo { + type: 'local-activity'; + activityType: string; +} + +/** + * A permit to use a slot. + * + * @experimental Worker Tuner is an experimental feature and may be subject to change. + */ +// eslint-disable-next-line @typescript-eslint/no-empty-object-type +export interface SlotPermit {} + +export interface SlotReserveContext { + /** + * The type of slot trying to be reserved + */ + slotType: SlotInfo['type']; + /** + * The name of the task queue for which this reservation request is associated + */ + taskQueue: string; + /** + * The identity of the worker that is requesting the reservation + */ + workerIdentity: string; + /** + * The build id of the worker that is requesting the reservation + */ + workerBuildId: string; + /** + * True iff this is a reservation for a sticky poll for a workflow task + */ + isSticky: boolean; +} + +/** + * Context for marking a slot as used. + * + * @experimental Worker Tuner is an experimental feature and may be subject to change. + */ +export interface SlotMarkUsedContext { + /** + * Info about the task that will be using the slot + */ + slotInfo: SI; + /** + * The permit that was issued when the slot was reserved + */ + permit: SlotPermit; +} + +/** + * Context for releasing a slot. + * + * @experimental Worker Tuner is an experimental feature and may be subject to change. + */ +export interface SlotReleaseContext { + /** + * Info about the task that used this slot, if any. A slot may be released without being used in + * the event a poll times out. + */ + slotInfo?: SI; + /** + * The permit that was issued when the slot was reserved + */ + permit: SlotPermit; } -export function asNativeTuner(tuner: WorkerTuner, logger: Logger): NativeWorkerTuner { +//////////////////////////////////////////////////////////////////////////////////////////////////// + +export function asNativeTuner(tuner: WorkerTuner, logger: Logger): native.WorkerTunerOptions { if (isTunerHolder(tuner)) { let tunerOptions = undefined; const retme = { @@ -125,7 +283,10 @@ export function asNativeTuner(tuner: WorkerTuner, logger: Logger): NativeWorkerT ]) { if (isResourceBased(supplier)) { if (tunerOptions !== undefined) { - if (tunerOptions !== supplier.tunerOptions) { + if ( + tunerOptions.targetCpuUsage !== supplier.tunerOptions.targetCpuUsage || + tunerOptions.targetMemoryUsage !== supplier.tunerOptions.targetMemoryUsage + ) { throw new TypeError('Cannot construct worker tuner with multiple different tuner options'); } } else { @@ -143,19 +304,19 @@ export function asNativeTuner(tuner: WorkerTuner, logger: Logger): NativeWorkerT type: 'resource-based', tunerOptions: tuner.tunerOptions, ...wftSO, - rampThrottleMs: msToNumber(wftSO.rampThrottle), + rampThrottle: msToNumber(wftSO.rampThrottle), }, activityTaskSlotSupplier: { type: 'resource-based', tunerOptions: tuner.tunerOptions, ...atSO, - rampThrottleMs: msToNumber(atSO.rampThrottle), + rampThrottle: msToNumber(atSO.rampThrottle), }, localActivityTaskSlotSupplier: { type: 'resource-based', tunerOptions: tuner.tunerOptions, ...latSO, - rampThrottleMs: msToNumber(latSO.rampThrottle), + rampThrottle: msToNumber(latSO.rampThrottle), }, }; } else { @@ -167,78 +328,43 @@ const isResourceBasedTuner = (tuner: WorkerTuner): tuner is ResourceBasedTuner = Object.hasOwnProperty.call(tuner, 'tunerOptions'); const isTunerHolder = (tuner: WorkerTuner): tuner is TunerHolder => Object.hasOwnProperty.call(tuner, 'workflowTaskSlotSupplier'); -const isResourceBased = (sup: SlotSupplier | NativeSlotSupplier): sup is ResourceBasedSlotsForType => +const isResourceBased = (sup: SlotSupplier | native.SlotSupplierOptions): sup is ResourceBasedSlotsForType => sup.type === 'resource-based'; -const isCustom = (sup: SlotSupplier | NativeSlotSupplier): sup is CustomSlotSupplier => sup.type === 'custom'; +const isCustom = (sup: SlotSupplier | native.SlotSupplierOptions): sup is CustomSlotSupplier => + sup.type === 'custom'; type ActOrWorkflow = 'activity' | 'workflow'; -const abortString = '__ABORTED_BY_CORE__'; - -class ErrorLoggingSlotSupplier implements CustomSlotSupplier { - readonly type = 'custom'; - constructor( - private readonly supplier: CustomSlotSupplier, - private readonly logger: Logger - ) {} - - async reserveSlot(ctx: SlotReserveContext, registerAbort: any): Promise { - const abortController = new AbortController(); - registerAbort(() => abortController.abort(abortString)); - return await this.supplier.reserveSlot(ctx, abortController.signal).catch((err) => { - if (err !== abortString) { - this.logger.error('Error in custom slot supplier `reserveSlot`', err); - } - throw err; - }); - } - - tryReserveSlot(ctx: SlotReserveContext): SlotPermit | null { - try { - return this.supplier.tryReserveSlot(ctx); - } catch (err: any) { - this.logger.error('Error in custom slot supplier `tryReserveSlot`', err); - } - return null; - } - - markSlotUsed(ctx: SlotMarkUsedContext): void { - try { - this.supplier.markSlotUsed(ctx); - } catch (err: any) { - this.logger.error('Error in custom slot supplier `markSlotUsed`', err); - } - } - - releaseSlot(ctx: SlotReleaseContext): void { - try { - this.supplier.releaseSlot(ctx); - } catch (err: any) { - this.logger.error('Error in custom slot supplier `releaseSlot`', err); - } - } -} +//////////////////////////////////////////////////////////////////////////////////////////////////// function nativeifySupplier( supplier: SlotSupplier, kind: ActOrWorkflow, logger: Logger -): NativeSlotSupplier { +): native.SlotSupplierOptions { if (isResourceBased(supplier)) { const tunerOptions = supplier.tunerOptions; const defaulted = addResourceBasedSlotDefaults(supplier, kind); return { - ...defaulted, type: 'resource-based', - tunerOptions, - rampThrottleMs: msToNumber(defaulted.rampThrottle), + minimumSlots: defaulted.minimumSlots, + maximumSlots: defaulted.maximumSlots, + rampThrottle: msToNumber(defaulted.rampThrottle), + tunerOptions: { + targetMemoryUsage: tunerOptions.targetMemoryUsage, + targetCpuUsage: tunerOptions.targetCpuUsage, + }, }; } + if (isCustom(supplier)) { - return new ErrorLoggingSlotSupplier(supplier, logger); + return new NativeifiedCustomSlotSupplier(supplier, logger); } - return supplier; + return { + type: 'fixed-size', + numSlots: supplier.numSlots, + }; } function addResourceBasedSlotDefaults( @@ -259,3 +385,76 @@ function addResourceBasedSlotDefaults( }; } } + +class NativeifiedCustomSlotSupplier implements CustomSlotSupplier { + readonly type = 'custom'; + + constructor( + private readonly supplier: CustomSlotSupplier, + private readonly logger: Logger + ) { + this.reserveSlot = this.reserveSlot.bind(this); + this.tryReserveSlot = this.tryReserveSlot.bind(this); + this.markSlotUsed = this.markSlotUsed.bind(this); + this.releaseSlot = this.releaseSlot.bind(this); + } + + async reserveSlot(ctx: SlotReserveContext, abortSignal: AbortSignal): Promise { + try { + const result = await this.supplier.reserveSlot( + { + slotType: ctx.slotType, + taskQueue: ctx.taskQueue, + workerIdentity: ctx.workerIdentity, + workerBuildId: ctx.workerBuildId, + isSticky: ctx.isSticky, + }, + abortSignal + ); + return result; + } catch (error) { + if (abortSignal.aborted && error !== abortSignal.reason) { + this.logger.error('Error in custom slot supplier `reserveSlot`', { error }); + } + throw error; + } + } + + tryReserveSlot(ctx: SlotReserveContext): SlotPermit | null { + try { + const result = this.supplier.tryReserveSlot({ + slotType: ctx.slotType, + taskQueue: ctx.taskQueue, + workerIdentity: ctx.workerIdentity, + workerBuildId: ctx.workerBuildId, + isSticky: ctx.isSticky, + }); + return result ?? null; + } catch (error) { + this.logger.error(`Error in custom slot supplier tryReserveSlot`, { error }); + return null; + } + } + + markSlotUsed(ctx: SlotMarkUsedContext): void { + try { + this.supplier.markSlotUsed({ + slotInfo: ctx.slotInfo, + permit: ctx.permit, + }); + } catch (error) { + this.logger.error(`Error in custom slot supplier markSlotUsed`, { error }); + } + } + + releaseSlot(ctx: SlotReleaseContext): void { + try { + this.supplier.releaseSlot({ + slotInfo: ctx.slotInfo ?? undefined, + permit: ctx.permit, + }); + } catch (error) { + this.logger.error(`Error in custom slot supplier releaseSlot`, { error }); + } + } +} diff --git a/packages/worker/src/worker.ts b/packages/worker/src/worker.ts index a47a22a36..29ea95a42 100644 --- a/packages/worker/src/worker.ts +++ b/packages/worker/src/worker.ts @@ -2,7 +2,6 @@ import crypto from 'node:crypto'; import fs from 'node:fs/promises'; import * as path from 'node:path'; import * as vm from 'node:vm'; -import { promisify } from 'node:util'; import { EventEmitter, on } from 'node:events'; import { setTimeout as setTimeoutCallback } from 'node:timers'; import { @@ -53,8 +52,7 @@ import { } from '@temporalio/common/lib/time'; import { errorMessage } from '@temporalio/common/lib/type-helpers'; import { workflowLogAttributes } from '@temporalio/workflow/lib/logs'; -import * as native from '@temporalio/core-bridge'; -import { ShutdownError, UnexpectedError } from '@temporalio/core-bridge'; +import { native } from '@temporalio/core-bridge'; import { coresdk, temporal } from '@temporalio/proto'; import { type SinkCall, type WorkflowInfo } from '@temporalio/workflow'; import { Activity, CancelReason, activityLogAttributes } from './activity'; @@ -79,6 +77,7 @@ import { isCodeBundleOption, isPathBundleOption, ReplayWorkerOptions, + toNativeWorkerOptions, WorkerOptions, WorkflowBundle, } from './worker-options'; @@ -89,7 +88,13 @@ import { ReusableVMWorkflowCreator } from './workflow/reusable-vm'; import { ThreadedVMWorkflowCreator } from './workflow/threaded-vm'; import { VMWorkflowCreator } from './workflow/vm'; import { WorkflowBundleWithSourceMapAndFilename } from './workflow/workflow-worker-thread/input'; -import { CombinedWorkerRunError, GracefulShutdownPeriodExpiredError, PromiseCompletionTimeoutError } from './errors'; +import { + CombinedWorkerRunError, + GracefulShutdownPeriodExpiredError, + PromiseCompletionTimeoutError, + ShutdownError, + UnexpectedError, +} from './errors'; export { DataConverter, defaultPayloadConverter }; @@ -105,19 +110,10 @@ export { DataConverter, defaultPayloadConverter }; */ export type State = 'INITIALIZED' | 'RUNNING' | 'STOPPED' | 'STOPPING' | 'DRAINING' | 'DRAINED' | 'FAILED'; -type ExtractToPromise = T extends (err: any, result: infer R) => void ? Promise : never; -// For some reason the lint rule doesn't realize that _I should be ignored -// eslint-disable-next-line @typescript-eslint/no-unused-vars -type Last = T extends [...infer _I, infer L] ? L : never; -type LastParameter any> = Last>; type OmitFirst = T extends [any, ...infer REST] ? REST : never; -type OmitLast = T extends [...infer REST, any] ? REST : never; type OmitFirstParam = T extends (...args: any[]) => any ? (...args: OmitFirst>) => ReturnType : never; -type Promisify = T extends (...args: any[]) => void - ? (...args: OmitLast>) => ExtractToPromise> - : never; type NonNullableObject = { [P in keyof T]-?: NonNullable }; @@ -129,7 +125,7 @@ export type ActivityTaskWithBase64Token = { // The unaltered protobuf-encoded ActivityTask; kept so that it can be printed // out for analysis if decoding fails at a later step. - protobufEncodedTask: ArrayBuffer; + protobufEncodedTask: Buffer; }; interface EvictionWithRunID { @@ -138,14 +134,14 @@ interface EvictionWithRunID { } export interface NativeWorkerLike { - type: 'Worker'; - initiateShutdown: Promisify>; + type: 'worker'; + initiateShutdown: OmitFirstParam; finalizeShutdown(): Promise; flushCoreLogs(): void; - pollWorkflowActivation: Promisify>; - pollActivityTask: Promisify>; - completeWorkflowActivation: Promisify>; - completeActivityTask: Promisify>; + pollWorkflowActivation: OmitFirstParam; + pollActivityTask: OmitFirstParam; + completeWorkflowActivation: OmitFirstParam; + completeActivityTask: OmitFirstParam; recordActivityHeartbeat: OmitFirstParam; } @@ -173,29 +169,29 @@ function addBuildIdIfMissing(options: CompiledWorkerOptions, bundleCode?: string } export class NativeWorker implements NativeWorkerLike { - public readonly type = 'Worker'; - public readonly pollWorkflowActivation: Promisify>; - public readonly pollActivityTask: Promisify>; - public readonly completeWorkflowActivation: Promisify>; - public readonly completeActivityTask: Promisify>; + public readonly type = 'worker'; + public readonly pollWorkflowActivation: OmitFirstParam; + public readonly pollActivityTask: OmitFirstParam; + public readonly completeWorkflowActivation: OmitFirstParam; + public readonly completeActivityTask: OmitFirstParam; public readonly recordActivityHeartbeat: OmitFirstParam; - public readonly initiateShutdown: Promisify>; + public readonly initiateShutdown: OmitFirstParam; public static async create( connection: NativeConnection, options: CompiledWorkerOptionsWithBuildId ): Promise { const runtime = Runtime.instance(); - const nativeWorker = await runtime.registerWorker(extractNativeClient(connection), options); + const nativeWorker = await runtime.registerWorker(extractNativeClient(connection), toNativeWorkerOptions(options)); return new NativeWorker(runtime, nativeWorker); } public static async createReplay(options: CompiledWorkerOptionsWithBuildId): Promise { const runtime = Runtime.instance(); - const replayer = await runtime.createReplayWorker(options); + const [worker, historyPusher] = await runtime.createReplayWorker(toNativeWorkerOptions(options)); return { - worker: new NativeWorker(runtime, replayer.worker), - historyPusher: replayer.pusher, + worker: new NativeWorker(runtime, worker), + historyPusher, }; } @@ -203,12 +199,12 @@ export class NativeWorker implements NativeWorkerLike { protected readonly runtime: Runtime, protected readonly nativeWorker: native.Worker ) { - this.pollWorkflowActivation = promisify(native.workerPollWorkflowActivation).bind(undefined, nativeWorker); - this.pollActivityTask = promisify(native.workerPollActivityTask).bind(undefined, nativeWorker); - this.completeWorkflowActivation = promisify(native.workerCompleteWorkflowActivation).bind(undefined, nativeWorker); - this.completeActivityTask = promisify(native.workerCompleteActivityTask).bind(undefined, nativeWorker); + this.pollWorkflowActivation = native.workerPollWorkflowActivation.bind(undefined, nativeWorker); + this.pollActivityTask = native.workerPollActivityTask.bind(undefined, nativeWorker); + this.completeWorkflowActivation = native.workerCompleteWorkflowActivation.bind(undefined, nativeWorker); + this.completeActivityTask = native.workerCompleteActivityTask.bind(undefined, nativeWorker); this.recordActivityHeartbeat = native.workerRecordActivityHeartbeat.bind(undefined, nativeWorker); - this.initiateShutdown = promisify(native.workerInitiateShutdown).bind(undefined, nativeWorker); + this.initiateShutdown = native.workerInitiateShutdown.bind(undefined, nativeWorker); } flushCoreLogs(): void { @@ -464,13 +460,14 @@ export class Worker { * This method initiates a connection to the server and will throw (asynchronously) on connection failure. */ public static async create(options: WorkerOptions): Promise { - const logger = withMetadata(Runtime.instance().logger, { + const runtime = Runtime.instance(); + const logger = withMetadata(runtime.logger, { sdkComponent: SdkComponent.worker, taskQueue: options.taskQueue ?? 'default', }); const nativeWorkerCtor: NativeWorkerConstructor = this.nativeWorkerCtor; const compiledOptions = compileWorkerOptions(options, logger); - logger.info('Creating worker', { + logger.debug('Creating worker', { options: { ...compiledOptions, ...(compiledOptions.workflowBundle && isCodeBundleOption(compiledOptions.workflowBundle) @@ -503,7 +500,7 @@ export class Worker { throw err; } extractReferenceHolders(connection).add(nativeWorker); - return new this(nativeWorker, workflowCreator, compiledOptionsWithBuildId, logger, connection); + return new this(runtime, nativeWorker, workflowCreator, compiledOptionsWithBuildId, logger, connection); } protected static async createWorkflowCreator( @@ -651,7 +648,8 @@ export class Worker { ...options, }; this.replayWorkerCount++; - const logger = withMetadata(Runtime.instance().logger, { + const runtime = Runtime.instance(); + const logger = withMetadata(runtime.logger, { sdkComponent: 'worker', taskQueue: fixedUpOptions.taskQueue, }); @@ -663,7 +661,7 @@ export class Worker { const workflowCreator = await this.createWorkflowCreator(bundle, compiledOptions, logger); const replayHandle = await nativeWorkerCtor.createReplay(addBuildIdIfMissing(compiledOptions, bundle.code)); return [ - new this(replayHandle.worker, workflowCreator, compiledOptions, logger, undefined, true), + new this(runtime, replayHandle.worker, workflowCreator, compiledOptions, logger, undefined, true), replayHandle.historyPusher, ]; } @@ -721,6 +719,7 @@ export class Worker { * Create a new Worker from nativeWorker. */ protected constructor( + protected readonly runtime: Runtime, protected readonly nativeWorker: NativeWorkerLike, /** * Optional WorkflowCreator - if not provided, Worker will not poll on Workflows @@ -794,38 +793,34 @@ export class Worker { /** * Start shutting down the Worker. The Worker stops polling for new tasks and sends - * {@link https://typescript.temporal.io/api/namespaces/activity#cancellation | cancellation} (via a - * {@link CancelledFailure} with `message` set to `'WORKER_SHUTDOWN'`) to running Activities. Note: if the Activity - * accepts cancellation (i.e. re-throws or allows the `CancelledFailure` to be thrown out of the Activity function), - * the Activity Task will be marked as failed, not cancelled. It's helpful for the Activity Task to be marked failed - * during shutdown because the Server will retry the Activity sooner (than if the Server had to wait for the Activity - * Task to time out). + * {@link https://typescript.temporal.io/api/namespaces/activity#cancellation | cancellation} + * (via a {@link CancelledFailure} with `message` set to `'WORKER_SHUTDOWN'`) to running Activities. + * Note: if the Activity accepts cancellation (i.e. re-throws or allows the `CancelledFailure` + * to be thrown out of the Activity function), the Activity Task will be marked as failed, not + * cancelled. It's helpful for the Activity Task to be marked failed during shutdown because the + * Server will retry the Activity sooner (than if the Server had to wait for the Activity Task + * to time out). * - * When called, immediately transitions {@link state} to `'STOPPING'` and asks Core to shut down. Once Core has - * confirmed that it's shutting down, the Worker enters `'DRAINING'` state unless the Worker has already been - * `'DRAINED'`. Once all currently running Activities and Workflow Tasks have completed, the Worker transitions to - * `'STOPPED'`. + * When called, immediately transitions {@link state} to `'STOPPING'` and asks Core to shut down. + * Once Core has confirmed that it's shutting down, the Worker enters `'DRAINING'` state. It will + * stay in that state until both task pollers receive a `ShutdownError`, at which point we'll + * transition to `DRAINED` state. Once all currently running Activities and Workflow Tasks have + * completed, the Worker transitions to `'STOPPED'`. */ shutdown(): void { if (this.state !== 'RUNNING') { throw new IllegalStateError(`Not running. Current state: ${this.state}`); } this.state = 'STOPPING'; - this.nativeWorker - .initiateShutdown() - .then(() => { - // Core may have already returned a ShutdownError to our pollers in which - // case the state would transition to DRAINED - if (this.state === 'STOPPING') { - this.state = 'DRAINING'; - } - }) - .catch((error) => { - // This is totally unexpected. If we reach this point, something horribly wrong in the Worker - // state, and attempt to shutdown gracefully will very likely hang. Just terminate immediately. - this.logger.error('Failed to initiate shutdown', { error }); - this.instantTerminateErrorSubject.error(error); - }); + try { + this.nativeWorker.initiateShutdown(); + this.state = 'DRAINING'; + } catch (error) { + // This is totally unexpected, and indicates there's something horribly wrong with the Worker + // state. Attempt to shutdown gracefully will very likely hang, so just terminate immediately. + this.logger.error('Failed to initiate shutdown', { error }); + this.instantTerminateErrorSubject.error(error); + } } /** @@ -1522,7 +1517,7 @@ export class Worker { protected workflowPoll$(): Observable { return this.pollLoop$(async () => { this.hasOutstandingWorkflowPoll = true; - let buffer: ArrayBuffer; + let buffer: Buffer; try { buffer = await this.nativeWorker.pollWorkflowActivation(); } finally { @@ -1561,7 +1556,7 @@ export class Worker { mergeMap(this.handleWorkflowActivations.bind(this)), mergeMap(async (completion) => { try { - await this.nativeWorker.completeWorkflowActivation(completion.buffer.slice(completion.byteOffset)); + await this.nativeWorker.completeWorkflowActivation(Buffer.from(completion, completion.byteOffset)); } catch (error) { this.logger.error('Core reported failure in completeWorkflowActivation(). Initiating Worker shutdown.', { error, @@ -1583,7 +1578,7 @@ export class Worker { protected activityPoll$(): Observable { return this.pollLoop$(async () => { this.hasOutstandingActivityPoll = true; - let buffer: ArrayBuffer; + let buffer: Buffer; try { buffer = await this.nativeWorker.pollActivityTask(); } finally { @@ -1623,7 +1618,7 @@ export class Worker { return this.activityPoll$().pipe( this.activityOperator(), mergeMap(async (completion) => { - await this.nativeWorker.completeActivityTask(completion.buffer.slice(completion.byteOffset)); + await this.nativeWorker.completeActivityTask(Buffer.from(completion, completion.byteOffset)); }), tap({ complete: () => this.logger.debug('Activity Worker terminated') }) ); @@ -1734,7 +1729,7 @@ export class Worker { const shutdownCallback = () => { if (this.state === 'RUNNING') this.shutdown(); }; - Runtime.instance().registerShutdownSignalCallback(shutdownCallback); + this.runtime.registerShutdownSignalCallback(shutdownCallback); let fatalError: Error | undefined; const unexpectedErrorSubscription = this.unexpectedErrorSubject.subscribe({ @@ -1784,7 +1779,7 @@ export class Worker { { defaultValue: undefined } ); } finally { - Runtime.instance().deregisterShutdownSignalCallback(shutdownCallback); + this.runtime.deregisterShutdownSignalCallback(shutdownCallback); await this.nativeWorker.finalizeShutdown(); } diff --git a/packages/worker/src/workflow/reusable-vm.ts b/packages/worker/src/workflow/reusable-vm.ts index 55dd2a07c..5824e0ade 100644 --- a/packages/worker/src/workflow/reusable-vm.ts +++ b/packages/worker/src/workflow/reusable-vm.ts @@ -1,8 +1,7 @@ import vm from 'node:vm'; import * as internals from '@temporalio/workflow/lib/worker-interface'; import { IllegalStateError } from '@temporalio/common'; -import { getTimeOfDay } from '@temporalio/core-bridge'; -import { timeOfDayToBigint } from '../logger'; +import { native } from '@temporalio/core-bridge'; import { Workflow, WorkflowCreateOptions, WorkflowCreator } from './interface'; import { WorkflowBundleWithSourceMapAndFilename } from './workflow-worker-thread/input'; import { BaseVMWorkflow, globalHandlers, injectGlobals, setUnhandledRejectionHandler } from './vm-shared'; @@ -181,7 +180,7 @@ export class ReusableVMWorkflowCreator implements WorkflowCreator { workflowModule.initRuntime({ ...options, sourceMap: this.workflowBundle.sourceMap, - getTimeOfDay: () => timeOfDayToBigint(getTimeOfDay()), + getTimeOfDay: native.getTimeOfDay, registeredActivityNames: this.registeredActivityNames, }); const activator = context['__TEMPORAL_ACTIVATOR__']; diff --git a/packages/worker/src/workflow/threaded-vm.ts b/packages/worker/src/workflow/threaded-vm.ts index 509cb54e4..ac6e73c7c 100644 --- a/packages/worker/src/workflow/threaded-vm.ts +++ b/packages/worker/src/workflow/threaded-vm.ts @@ -12,8 +12,8 @@ import { Worker as NodeWorker } from 'node:worker_threads'; import { coresdk } from '@temporalio/proto'; import { IllegalStateError, type SinkCall } from '@temporalio/workflow'; -import { UnexpectedError } from '@temporalio/core-bridge'; import { Logger } from '@temporalio/common'; +import { UnexpectedError } from '../errors'; import { WorkflowBundleWithSourceMapAndFilename, WorkerThreadInput, diff --git a/packages/worker/src/workflow/vm.ts b/packages/worker/src/workflow/vm.ts index 8d8f8bdb2..0478a8d0c 100644 --- a/packages/worker/src/workflow/vm.ts +++ b/packages/worker/src/workflow/vm.ts @@ -1,7 +1,6 @@ import vm from 'node:vm'; import { IllegalStateError } from '@temporalio/common'; -import { getTimeOfDay } from '@temporalio/core-bridge'; -import { timeOfDayToBigint } from '../logger'; +import { native } from '@temporalio/core-bridge'; import { Workflow, WorkflowCreateOptions, WorkflowCreator } from './interface'; import { WorkflowBundleWithSourceMapAndFilename } from './workflow-worker-thread/input'; import { @@ -59,7 +58,7 @@ export class VMWorkflowCreator implements WorkflowCreator { workflowModule.initRuntime({ ...options, sourceMap: this.workflowBundle.sourceMap, - getTimeOfDay: () => timeOfDayToBigint(getTimeOfDay()), + getTimeOfDay: native.getTimeOfDay, registeredActivityNames: this.registeredActivityNames, }); const activator = context.__TEMPORAL_ACTIVATOR__ as any; diff --git a/packages/workflow/src/internals.ts b/packages/workflow/src/internals.ts index acc49985b..4bd9e3c63 100644 --- a/packages/workflow/src/internals.ts +++ b/packages/workflow/src/internals.ts @@ -410,7 +410,9 @@ export class Activator implements ActivationHandler { sinkCalls = Array(); /** - * A nanosecond resolution time function, externally injected + * A nanosecond resolution time function, externally injected. This is used to + * precisely sort logs entries emitted from the Workflow Context vs those emitted + * from other sources (e.g. main thread, Core, etc). */ public readonly getTimeOfDay: () => bigint;