diff --git a/.cargo/audit.toml b/.cargo/audit.toml index 8ec19633dc..a3491f0fe7 100644 --- a/.cargo/audit.toml +++ b/.cargo/audit.toml @@ -3,4 +3,5 @@ ignore = [ "RUSTSEC-2024-0365", # Bound by diesel 1.4 (4GB limit n/a to tokenserver) "RUSTSEC-2024-0421", # Bound by diesel 1.4, `idna` < 0.1.5, Upgrade to >=1.0.0 "RUSTSEC-2024-0437", # Bound by grpcio 0.13, + "RUSTSEC-2022-0090", # Bound by diesel 1.4.8, diesel_migrations 1.4.0, diesel_logger 0.1.1 ] diff --git a/.circleci/config.yml b/.circleci/config.yml index 07c0cb882a..795eeb7d75 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -64,6 +64,11 @@ commands: - run: name: Rust Clippy MySQL command: make clippy_mysql + rust-clippy-sqlite: + steps: + - run: + name: Rust Clippy SQLite + command: make clippy_sqlite rust-clippy-spanner: steps: - run: @@ -156,6 +161,20 @@ commands: environment: SYNCSTORAGE_RS_IMAGE: app:build + run-sqlite-tests: + steps: + - run: + name: e2e tests (syncstorage sqlite) + command: > + /usr/local/bin/docker-compose + -f docker-compose.sqlite.yaml + -f docker-compose.e2e.sqlite.yaml + up + --exit-code-from sqlite-e2e-tests + --abort-on-container-exit + environment: + SYNCSTORAGE_RS_IMAGE: app:build + run-spanner-tests: steps: - run: @@ -347,6 +366,42 @@ jobs: key: mysql-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }}-{{ epoch }} paths: - /home/circleci/cache + + build-sqlite-image: + docker: + - image: cimg/rust:1.86 # RUST_VER + auth: + username: $DOCKER_USER + password: $DOCKER_PASS + resource_class: large + steps: + - setup_remote_docker: + docker_layer_caching: true + - checkout + - display-versions + - write-version + - run: + name: Build SQLite Docker image + command: docker build -t app:build --build-arg DATABASE_BACKEND=sqlite . + no_output_timeout: 30m + # save the built docker container into CircleCI's cache. This is + # required since Workflows do not have the same remote docker instance. + - run: + name: docker save app:build + command: | + mkdir -p /home/circleci/cache + docker save -o /home/circleci/cache/docker.tar "app:build" + - run: + name: Save docker-compose config + command: cp docker-compose*sqlite.yaml /home/circleci/cache + - run: + name: Save Makefile to cache + command: cp Makefile /home/circleci/cache + - save_cache: + key: sqlite-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }}-{{ epoch }} + paths: + - /home/circleci/cache + build-spanner-image: docker: - image: cimg/rust:1.86 # RUST_VER @@ -499,6 +554,36 @@ jobs: destination: gs://ecosystem-test-eng-metrics/syncstorage-rs/junit extension: xml + sqlite-e2e-tests: + docker: + - image: cimg/base:2025.04 + auth: + username: $DOCKER_USER + password: $DOCKER_PASS + steps: + - setup_remote_docker + - display-versions + - restore_cache: + key: sqlite-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }} + - run: + name: Restore Docker image cache + command: docker load -i /home/circleci/cache/docker.tar + - run: + name: Restore Makefile from save_cache + command: cp /home/circleci/cache/Makefile . + - run: + name: Restore docker-compose config + command: cp /home/circleci/cache/docker-compose*.yaml . + - make-test-dir + - run-e2e-tests: + db: sqlite + - store-test-results + - gcs-configure-and-upload: + source: workflow/test-results + destination: gs://ecosystem-test-eng-metrics/syncstorage-rs/junit + extension: xml + - run-e2e-sqlite-tests + deploy: docker: - image: docker:18.02.0-ce @@ -600,6 +685,12 @@ workflows: filters: tags: only: /.*/ + - build-sqlite-image: + requires: + - build-and-test + filters: + tags: + only: /.*/ - mysql-e2e-tests: requires: - build-mysql-image @@ -612,10 +703,17 @@ workflows: filters: tags: only: /.*/ + - sqlite-e2e-tests: + requires: + - build-sqlite-image + filters: + tags: + only: /.*/ - deploy: requires: - mysql-e2e-tests - spanner-e2e-tests + - sqlite-e2e-tests filters: tags: only: /.*/ @@ -629,6 +727,7 @@ workflows: requires: - mysql-e2e-tests - spanner-e2e-tests + - sqlite-e2e-tests filters: tags: only: /.*/ @@ -638,6 +737,7 @@ workflows: requires: - mysql-e2e-tests - spanner-e2e-tests + - sqlite-e2e-tests filters: tags: only: /.*/ diff --git a/.gitignore b/.gitignore index 30ad189070..61030820f2 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,8 @@ html_coverage .hgignore .idea *.iml +Pipfile +Pipfile.lock site-packages/* lib-python/* bin/* @@ -31,11 +33,12 @@ target service-account.json .sentryclirc .envrc +.env config/local.toml tools/tokenserver/loadtests/*.pem tools/tokenserver/loadtests/*.pub -venv +venv/ .vscode/settings.json # circleci diff --git a/Cargo.lock b/Cargo.lock index 169e3188fe..c5ed8132db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,7 +8,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "bytes", "futures-core", "futures-sink", @@ -21,9 +21,9 @@ dependencies = [ [[package]] name = "actix-cors" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9e772b3bcafe335042b5db010ab7c09013dad6eac4915c91d8d50902769f331" +checksum = "daa239b93927be1ff123eebada5a3ff23e89f0124ccb8609234e5103d5a5ae6d" dependencies = [ "actix-utils", "actix-web", @@ -36,25 +36,25 @@ dependencies = [ [[package]] name = "actix-http" -version = "3.9.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d48f96fc3003717aeb9856ca3d02a8c7de502667ad76eeacd830b48d2e91fac4" +checksum = "44dfe5c9e0004c623edc65391dfd51daa201e7e30ebd9c9bedf873048ec32bc2" dependencies = [ "actix-codec", "actix-rt", "actix-service", "actix-utils", - "ahash", "base64", - "bitflags 2.9.0", + "bitflags 2.9.1", "brotli", "bytes", "bytestring", "derive_more", "encoding_rs", "flate2", + "foldhash", "futures-core", - "h2 0.3.26", + "h2 0.3.27", "http 0.2.12", "httparse", "httpdate", @@ -64,7 +64,7 @@ dependencies = [ "mime", "percent-encoding 2.3.1", "pin-project-lite", - "rand 0.8.5", + "rand 0.9.2", "sha1", "smallvec", "tokio", @@ -80,7 +80,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" dependencies = [ "quote", - "syn 2.0.99", + "syn 2.0.104", ] [[package]] @@ -93,7 +93,7 @@ dependencies = [ "cfg-if", "http 0.2.12", "regex-lite", - "serde 1.0.218", + "serde 1.0.219", "tracing", ] @@ -110,9 +110,9 @@ dependencies = [ [[package]] name = "actix-server" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ca2549781d8dd6d75c40cf6b6051260a2cc2f3c62343d761a969a0640646894" +checksum = "a65064ea4a457eaf07f2fba30b4c695bf43b721790e9530d26cb6f9019ff7502" dependencies = [ "actix-rt", "actix-service", @@ -120,19 +120,18 @@ dependencies = [ "futures-core", "futures-util", "mio", - "socket2", + "socket2 0.5.10", "tokio", "tracing", ] [[package]] name = "actix-service" -version = "2.0.2" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b894941f818cfdc7ccc4b9e60fa7e53b5042a2e8567270f9147d5591893373a" +checksum = "9e46f36bf0e5af44bdc4bdb36fbbd421aa98c79a9bce724e1edeb3894e10dc7f" dependencies = [ "futures-core", - "paste", "pin-project-lite", ] @@ -148,9 +147,9 @@ dependencies = [ [[package]] name = "actix-web" -version = "4.9.0" +version = "4.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9180d76e5cc7ccbc4d60a506f2c727730b154010262df5b910eb17dbe4b8cb38" +checksum = "a597b77b5c6d6a1e1097fddde329a83665e25c5437c696a3a9a4aa514a614dea" dependencies = [ "actix-codec", "actix-http", @@ -161,12 +160,12 @@ dependencies = [ "actix-service", "actix-utils", "actix-web-codegen", - "ahash", "bytes", "bytestring", "cfg-if", "derive_more", "encoding_rs", + "foldhash", "futures-core", "futures-util", "impl-more", @@ -177,12 +176,13 @@ dependencies = [ "once_cell", "pin-project-lite", "regex-lite", - "serde 1.0.218", + "serde 1.0.219", "serde_json", "serde_urlencoded", "smallvec", - "socket2", + "socket2 0.5.10", "time", + "tracing", "url 2.5.4", ] @@ -195,7 +195,7 @@ dependencies = [ "actix-router", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.104", ] [[package]] @@ -209,22 +209,9 @@ dependencies = [ [[package]] name = "adler2" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" - -[[package]] -name = "ahash" -version = "0.8.11" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" -dependencies = [ - "cfg-if", - "getrandom 0.2.15", - "once_cell", - "version_check", - "zerocopy 0.7.35", -] +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aho-corasick" @@ -276,9 +263,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.18" +version = "0.6.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" dependencies = [ "anstyle", "anstyle-parse", @@ -291,44 +278,44 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" [[package]] name = "anstyle-parse" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.7" +version = "3.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" +checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" dependencies = [ "anstyle", - "once_cell", + "once_cell_polyfill", "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.97" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" [[package]] name = "arc-swap" @@ -348,19 +335,19 @@ version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" dependencies = [ - "serde 1.0.218", + "serde 1.0.219", "serde_json", ] [[package]] name = "async-trait" -version = "0.1.87" +version = "0.1.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d556ec1359574147ec0c4fc5eb525f3f23263a592b1a9c07e0a75b427de55c97" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.104", ] [[package]] @@ -382,15 +369,15 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "backtrace" -version = "0.3.74" +version = "0.3.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" dependencies = [ "addr2line", "cfg-if", @@ -398,7 +385,7 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -438,9 +425,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" [[package]] name = "block-buffer" @@ -462,9 +449,9 @@ dependencies = [ [[package]] name = "brotli" -version = "6.0.0" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f7971dbd9326d58187408ab83117d8ac1bb9c17b085fdacd1cf2f598719b6b" +checksum = "9991eea70ea4f293524138648e41ee89b0b2b12ddef3b255effa43c8056e0e0d" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -473,9 +460,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "4.0.2" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74fa05ad7d803d413eb8380983b092cbbaf9a85f151b871360e7b00cd7060b37" +checksum = "874bb8112abecc98cbd6d81ea4fa7e94fb9449648c93cc89aa40c81c24d7de03" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -483,9 +470,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "byteorder" @@ -510,18 +497,18 @@ dependencies = [ [[package]] name = "cadence" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62fd689c825a93386a2ac05a46f88342c6df9ec3e79416f665650614e92e7475" +checksum = "3075f133bee430b7644c54fb629b9b4420346ffa275a45c81a6babe8b09b4f51" dependencies = [ "crossbeam-channel", ] [[package]] name = "cc" -version = "1.2.16" +version = "1.2.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" +checksum = "deec109607ca693028562ed836a5f1c4b8bd77755c4e132fc5ce11b0b6211ae7" dependencies = [ "jobserver", "libc", @@ -539,9 +526,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "cfg_aliases" @@ -551,9 +538,9 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.40" +version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" +checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" dependencies = [ "android-tzdata", "iana-time-zone", @@ -600,9 +587,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "colored" @@ -622,19 +609,13 @@ dependencies = [ "lazy_static", "nom 5.1.3", "rust-ini", - "serde 1.0.218", + "serde 1.0.219", "serde-hjson", "serde_json", "toml", "yaml-rust", ] -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -652,9 +633,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] @@ -686,24 +667,24 @@ dependencies = [ [[package]] name = "curl" -version = "0.4.47" +version = "0.4.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9fb4d13a1be2b58f14d60adba57c9834b78c62fd86c3e76a148f732686e9265" +checksum = "9e2d5c8f48d9c0c23250e52b55e82a6ab4fdba6650c931f5a0a57a43abda812b" dependencies = [ "curl-sys", "libc", "openssl-probe", "openssl-sys", "schannel", - "socket2", - "windows-sys 0.52.0", + "socket2 0.5.10", + "windows-sys 0.59.0", ] [[package]] name = "curl-sys" -version = "0.4.80+curl-8.12.1" +version = "0.4.82+curl-8.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55f7df2eac63200c3ab25bde3b2268ef2ee56af3d238e76d61f01c3c49bff734" +checksum = "c4d63638b5ec65f1a4ae945287b3fd035be4554bbaf211901159c9a2a74fb5be" dependencies = [ "cc", "libc", @@ -711,14 +692,14 @@ dependencies = [ "openssl-sys", "pkg-config", "vcpkg", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "darling" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" dependencies = [ "darling_core", "darling_macro", @@ -726,27 +707,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.99", + "syn 2.0.104", ] [[package]] name = "darling_macro" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core", "quote", - "syn 2.0.99", + "syn 2.0.104", ] [[package]] @@ -775,30 +756,38 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" dependencies = [ - "serde 1.0.218", + "serde 1.0.219", "uuid", ] [[package]] name = "deranged" -version = "0.3.11" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" dependencies = [ "powerfmt", ] [[package]] name = "derive_more" -version = "0.99.19" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da29a38df43d6f156149c9b43ded5e018ddff2a855cf2cfd62e8cd7d079c69f" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ - "convert_case", "proc-macro2", "quote", - "rustc_version", - "syn 2.0.99", + "syn 2.0.104", + "unicode-xid", ] [[package]] @@ -809,6 +798,7 @@ checksum = "b28135ecf6b7d446b43e27e225622a038cc4e2930a1022f51cdb97ada19b8e4d" dependencies = [ "byteorder", "diesel_derives", + "libsqlite3-sys", "mysqlclient-sys", "r2d2", "url 1.7.2", @@ -885,7 +875,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.104", ] [[package]] @@ -896,15 +886,15 @@ checksum = "7f3f119846c823f9eafcf953a8f6ffb6ed69bf6240883261a7f13b634579a51f" dependencies = [ "lazy_static", "regex", - "serde 1.0.218", + "serde 1.0.219", "strsim 0.10.0", ] [[package]] name = "dyn-clone" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c7a8fb8a9fbf66c1f703fe16184d10ca0ee9d23be5b4436400408ba54a95005" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" [[package]] name = "either" @@ -946,14 +936,14 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcaee3d8e3cfc3fd92428d477bc97fc29ec8716d180c0d74c643bb26166660e0" +checksum = "13c863f0904021b108aa8b2f55046443e6b1ebde8fd4a15c399893aae4fa069f" dependencies = [ "anstream", "anstyle", "env_filter", - "humantime", + "jiff", "log", ] @@ -969,17 +959,17 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" dependencies = [ - "serde 1.0.218", + "serde 1.0.219", ] [[package]] name = "errno" -version = "0.3.11" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.60.2", ] [[package]] @@ -996,9 +986,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc" +checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" dependencies = [ "crc32fast", "miniz_oxide", @@ -1010,6 +1000,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -1081,7 +1077,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.104", ] [[package]] @@ -1127,27 +1123,29 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.3.1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if", + "js-sys", "libc", - "wasi 0.13.3+wasi-0.2.2", - "windows-targets", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", + "wasm-bindgen", ] [[package]] @@ -1161,7 +1159,7 @@ name = "glean" version = "0.18.3" dependencies = [ "chrono", - "serde 1.0.218", + "serde 1.0.219", "serde_json", "uuid", ] @@ -1216,9 +1214,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d" dependencies = [ "bytes", "fnv", @@ -1235,16 +1233,16 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.8" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" +checksum = "17da50a276f1e01e0ba6c029e47b7100754904ee8a278f886546e98575380785" dependencies = [ "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "http 1.2.0", + "http 1.3.1", "indexmap", "slab", "tokio", @@ -1254,9 +1252,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.2" +version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" [[package]] name = "hawk" @@ -1290,15 +1288,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" - -[[package]] -name = "hermit-abi" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hex" @@ -1335,13 +1327,13 @@ dependencies = [ [[package]] name = "hostname" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" +checksum = "a56f203cd1c76362b69e3863fd987520ac36cf70a8c92627449b2f64a8cf7d65" dependencies = [ "cfg-if", "libc", - "windows", + "windows-link", ] [[package]] @@ -1357,9 +1349,9 @@ dependencies = [ [[package]] name = "http" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", @@ -1373,18 +1365,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.2.0", + "http 1.3.1", ] [[package]] name = "http-body-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", - "futures-util", - "http 1.2.0", + "futures-core", + "http 1.3.1", "http-body", "pin-project-lite", ] @@ -1403,9 +1395,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" [[package]] name = "hyper" @@ -1416,8 +1408,8 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.8", - "http 1.2.0", + "h2 0.4.11", + "http 1.3.1", "http-body", "httparse", "httpdate", @@ -1430,12 +1422,11 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.5" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "futures-util", - "http 1.2.0", + "http 1.3.1", "hyper", "hyper-util", "rustls", @@ -1448,18 +1439,23 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.10" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" +checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e" dependencies = [ + "base64", "bytes", "futures-channel", + "futures-core", "futures-util", - "http 1.2.0", + "http 1.3.1", "http-body", "hyper", + "ipnet", + "libc", + "percent-encoding 2.3.1", "pin-project-lite", - "socket2", + "socket2 0.6.0", "tokio", "tower-service", "tracing", @@ -1467,14 +1463,15 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.61" +version = "0.1.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" +checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", + "log", "wasm-bindgen", "windows-core", ] @@ -1490,21 +1487,22 @@ dependencies = [ [[package]] name = "icu_collections" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", + "potential_utf", "yoke", "zerofrom", "zerovec", ] [[package]] -name = "icu_locid" -version = "1.5.0" +name = "icu_locale_core" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ "displaydoc", "litemap", @@ -1513,31 +1511,11 @@ dependencies = [ "zerovec", ] -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" - [[package]] name = "icu_normalizer" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ "displaydoc", "icu_collections", @@ -1545,67 +1523,54 @@ dependencies = [ "icu_properties", "icu_provider", "smallvec", - "utf16_iter", - "utf8_iter", - "write16", "zerovec", ] [[package]] name = "icu_normalizer_data" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "1.5.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ "displaydoc", "icu_collections", - "icu_locid_transform", + "icu_locale_core", "icu_properties_data", "icu_provider", - "tinystr", + "potential_utf", + "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "1.5.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" dependencies = [ "displaydoc", - "icu_locid", - "icu_provider_macros", + "icu_locale_core", "stable_deref_trait", "tinystr", "writeable", "yoke", "zerofrom", + "zerotrie", "zerovec", ] -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.99", -] - [[package]] name = "ident_case" version = "1.0.1" @@ -1636,9 +1601,9 @@ dependencies = [ [[package]] name = "idna_adapter" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ "icu_normalizer", "icu_properties", @@ -1652,9 +1617,9 @@ checksum = "e8a5a9a0ff0086c7a148acb942baaabeadf9504d10400b5a05645853729b9cd2" [[package]] name = "indexmap" -version = "2.7.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" dependencies = [ "equivalent", "hashbrown", @@ -1666,19 +1631,40 @@ version = "2.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4c7245a08504955605670dbf141fceab975f15ca21570696aebe9d2e71576bd" +[[package]] +name = "io-uring" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "libc", +] + [[package]] name = "ipnet" version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +[[package]] +name = "iri-string" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +dependencies = [ + "memchr", + "serde 1.0.219", +] + [[package]] name = "is-terminal" -version = "0.4.15" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e19b23d53f35ce9f56aebc7d1bb4e6ac1e9c0db7ac85c8d1760c04379edced37" +checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ - "hermit-abi 0.4.0", + "hermit-abi 0.5.2", "libc", "windows-sys 0.59.0", ] @@ -1695,12 +1681,37 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +[[package]] +name = "jiff" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be1f93b8b1eb69c77f24bbb0afdf66f54b632ee39af40ca21c4365a1d7347e49" +dependencies = [ + "jiff-static", + "log", + "portable-atomic", + "portable-atomic-util", + "serde 1.0.219", +] + +[[package]] +name = "jiff-static" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + [[package]] name = "jobserver" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" dependencies = [ + "getrandom 0.3.3", "libc", ] @@ -1723,7 +1734,7 @@ dependencies = [ "base64", "js-sys", "ring", - "serde 1.0.218", + "serde 1.0.219", "serde_json", ] @@ -1760,35 +1771,45 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.170" +version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "libloading" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" dependencies = [ "cfg-if", - "windows-targets", + "windows-targets 0.53.3", ] [[package]] name = "libredox" -version = "0.1.3" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +checksum = "360e552c93fa0e8152ab463bc4c4837fce76a225df11dfaeea66c313de5e61f7" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "libc", ] +[[package]] +name = "libsqlite3-sys" +version = "0.22.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290b64917f8b0cb885d9de0f9959fe1f775d7fa12f1da2db9001c1c8ab60f89d" +dependencies = [ + "pkg-config", + "vcpkg", +] + [[package]] name = "libz-sys" -version = "1.1.21" +version = "1.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" +checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" dependencies = [ "cc", "libc", @@ -1810,9 +1831,9 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "local-channel" @@ -1833,9 +1854,9 @@ checksum = "4d873d7c67ce09b42110d801813efbc9364414e356be9935700d368351657487" [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", @@ -1843,9 +1864,15 @@ dependencies = [ [[package]] name = "log" -version = "0.4.26" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" [[package]] name = "matches" @@ -1855,9 +1882,9 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" [[package]] name = "memoffset" @@ -1903,23 +1930,23 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.5" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", ] [[package]] name = "mio" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", "log", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.59.0", ] [[package]] @@ -1932,13 +1959,13 @@ dependencies = [ "bytes", "colored", "futures-util", - "http 1.2.0", + "http 1.3.1", "http-body", "http-body-util", "hyper", "hyper-util", "log", - "rand 0.9.0", + "rand 0.9.2", "regex", "serde_json", "serde_urlencoded", @@ -2003,11 +2030,11 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi 0.5.2", "libc", ] @@ -2022,9 +2049,15 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.3" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "once_cell_polyfill" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" +checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" [[package]] name = "openssl-probe" @@ -2034,9 +2067,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" -version = "0.9.106" +version = "0.9.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bb61ea9811cc39e3c2069f40b8b8e2e70d8569b361f879786cc7ed48b777cdd" +checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" dependencies = [ "cc", "libc", @@ -2046,20 +2079,21 @@ dependencies = [ [[package]] name = "os_info" -version = "3.10.0" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a604e53c24761286860eba4e2c8b23a0161526476b1de520139d69cdb85a6b5" +checksum = "d0e1ac5fde8d43c34139135df8ea9ee9465394b2d8d20f032d38998f64afffc3" dependencies = [ "log", - "serde 1.0.218", + "plist", + "serde 1.0.219", "windows-sys 0.52.0", ] [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", "parking_lot_core", @@ -2067,23 +2101,17 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-targets", + "windows-targets 0.52.6", ] -[[package]] -name = "paste" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" - [[package]] name = "peeking_take_while" version = "0.1.2" @@ -2120,11 +2148,42 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "plist" +version = "1.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3af6b589e163c5a788fab00ce0c0366f6efbb9959c2f9874b224936af7fce7e1" +dependencies = [ + "base64", + "indexmap", + "quick-xml", + "serde 1.0.219", + "time", +] + [[package]] name = "portable-atomic" -version = "1.11.0" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] + +[[package]] +name = "potential_utf" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +dependencies = [ + "zerovec", +] [[package]] name = "powerfmt" @@ -2134,11 +2193,11 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.7.35", + "zerocopy", ] [[package]] @@ -2160,14 +2219,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.104", ] [[package]] name = "proc-macro2" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ "unicode-ident", ] @@ -2180,9 +2239,9 @@ checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" [[package]] name = "pyo3" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17da310086b068fbdcefbba30aeb3721d5bb9af8db4987d6735b2183ca567229" +checksum = "e5203598f366b11a02b13aa20cab591229ff0a89fd121a308a5df751d5fc9219" dependencies = [ "cfg-if", "indoc", @@ -2198,9 +2257,9 @@ dependencies = [ [[package]] name = "pyo3-build-config" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e27165889bd793000a098bb966adc4300c312497ea25cf7a690a9f0ac5aa5fc1" +checksum = "99636d423fa2ca130fa5acde3059308006d46f98caac629418e53f7ebb1e9999" dependencies = [ "once_cell", "target-lexicon", @@ -2208,9 +2267,9 @@ dependencies = [ [[package]] name = "pyo3-ffi" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05280526e1dbf6b420062f3ef228b78c0c54ba94e157f5cb724a609d0f2faabc" +checksum = "78f9cf92ba9c409279bc3305b5409d90db2d2c22392d443a87df3a1adad59e33" dependencies = [ "libc", "pyo3-build-config", @@ -2218,56 +2277,68 @@ dependencies = [ [[package]] name = "pyo3-macros" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c3ce5686aa4d3f63359a5100c62a127c9f15e8398e5fdeb5deef1fed5cd5f44" +checksum = "0b999cb1a6ce21f9a6b147dcf1be9ffedf02e0043aec74dc390f3007047cecd9" dependencies = [ "proc-macro2", "pyo3-macros-backend", "quote", - "syn 2.0.99", + "syn 2.0.104", ] [[package]] name = "pyo3-macros-backend" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4cf6faa0cbfb0ed08e89beb8103ae9724eb4750e3a78084ba4017cbe94f3855" +checksum = "822ece1c7e1012745607d5cf0bcb2874769f0f7cb34c4cde03b9358eb9ef911a" dependencies = [ "heck", "proc-macro2", "pyo3-build-config", "quote", - "syn 2.0.99", + "syn 2.0.104", +] + +[[package]] +name = "quick-xml" +version = "0.38.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8927b0664f5c5a98265138b7e3f90aa19a6b21353182469ace36d4ac527b7b1b" +dependencies = [ + "memchr", ] [[package]] name = "quinn" -version = "0.11.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" +checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8" dependencies = [ "bytes", + "cfg_aliases", "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash 2.1.1", "rustls", - "socket2", + "socket2 0.5.10", "thiserror 2.0.12", "tokio", "tracing", + "web-time", ] [[package]] name = "quinn-proto" -version = "0.11.9" +version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" +checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e" dependencies = [ "bytes", - "getrandom 0.2.15", - "rand 0.8.5", + "getrandom 0.3.3", + "lru-slab", + "rand 0.9.2", "ring", "rustc-hash 2.1.1", "rustls", @@ -2281,27 +2352,33 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.10" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e46f3055866785f6b92bc6164b76be02ca8f2eb4b002c0354b28cf4c119e5944" +checksum = "fcebb1209ee276352ef14ff8732e24cc2b02bbac986cd74a4c81bcb2f9881970" dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2", + "socket2 0.5.10", "tracing", "windows-sys 0.59.0", ] [[package]] name = "quote" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + [[package]] name = "r2d2" version = "0.8.10" @@ -2326,13 +2403,12 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", - "zerocopy 0.8.23", ] [[package]] @@ -2361,7 +2437,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", ] [[package]] @@ -2370,16 +2446,16 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.3", ] [[package]] name = "redox_syscall" -version = "0.5.10" +version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" +checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", ] [[package]] @@ -2388,7 +2464,7 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", "libredox", "thiserror 1.0.69", ] @@ -2430,56 +2506,51 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.12" +version = "0.12.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" +checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531" dependencies = [ "base64", "bytes", "futures-core", - "futures-util", - "http 1.2.0", + "http 1.3.1", "http-body", "http-body-util", "hyper", "hyper-rustls", "hyper-util", - "ipnet", "js-sys", "log", - "mime", - "once_cell", "percent-encoding 2.3.1", "pin-project-lite", "quinn", "rustls", - "rustls-pemfile", "rustls-pki-types", - "serde 1.0.218", + "serde 1.0.219", "serde_json", "serde_urlencoded", "sync_wrapper", "tokio", "tokio-rustls", "tower", + "tower-http", "tower-service", "url 2.5.4", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", "webpki-roots", - "windows-registry", ] [[package]] name = "ring" -version = "0.17.13" +version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ac5d832aa16abd7d1def883a8545280c20a60f523a370aa3a9617c2b8550ee" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.15", + "getrandom 0.2.16", "libc", "untrusted", "windows-sys 0.52.0", @@ -2493,9 +2564,9 @@ checksum = "3e52c148ef37f8c375d49d5a73aa70713125b7f19095948a923f80afdeb22ec2" [[package]] name = "rustc-demangle" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" [[package]] name = "rustc-hash" @@ -2524,18 +2595,18 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "rustls" -version = "0.23.23" +version = "0.23.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" +checksum = "2491382039b29b9b11ff08b76ff6c97cf287671dbb74f0be44bda389fffe9bd1" dependencies = [ "once_cell", "ring", @@ -2545,29 +2616,21 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rustls-pemfile" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" -dependencies = [ - "rustls-pki-types", -] - [[package]] name = "rustls-pki-types" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" dependencies = [ "web-time", + "zeroize", ] [[package]] name = "rustls-webpki" -version = "0.102.8" +version = "0.103.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" dependencies = [ "ring", "rustls-pki-types", @@ -2576,9 +2639,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" [[package]] name = "ryu" @@ -2675,7 +2738,7 @@ dependencies = [ "once_cell", "rand 0.8.5", "sentry-types", - "serde 1.0.218", + "serde 1.0.219", "serde_json", ] @@ -2711,7 +2774,7 @@ dependencies = [ "debugid", "hex", "rand 0.8.5", - "serde 1.0.218", + "serde 1.0.219", "serde_json", "thiserror 1.0.69", "time", @@ -2727,9 +2790,9 @@ checksum = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8" [[package]] name = "serde" -version = "1.0.218" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" dependencies = [ "serde_derive", ] @@ -2748,25 +2811,25 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.218" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.104", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3" dependencies = [ "itoa", "memchr", "ryu", - "serde 1.0.218", + "serde 1.0.219", ] [[package]] @@ -2778,7 +2841,7 @@ dependencies = [ "form_urlencoded", "itoa", "ryu", - "serde 1.0.218", + "serde 1.0.219", ] [[package]] @@ -2794,9 +2857,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.8" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", "cpufeatures", @@ -2811,9 +2874,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.2" +version = "1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" dependencies = [ "libc", ] @@ -2826,12 +2889,9 @@ checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" [[package]] name = "slab" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" [[package]] name = "slog" @@ -2876,7 +2936,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f400f1c5db96f1f52065e8931ca0c524cceb029f7537c9e6d5424488ca137ca0" dependencies = [ "chrono", - "serde 1.0.218", + "serde 1.0.219", "serde_json", "slog", ] @@ -2918,20 +2978,30 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.14.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "socket2" -version = "0.5.8" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", ] +[[package]] +name = "socket2" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -2981,9 +3051,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.99" +version = "2.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e02e925281e18ffd9d640e234264753c43edc62d64b2d4cf898f1bc5e75f3fc2" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" dependencies = [ "proc-macro2", "quote", @@ -3018,13 +3088,13 @@ dependencies = [ "hex", "hmac", "hostname", - "http 1.2.0", + "http 1.3.1", "lazy_static", "mime", "rand 0.8.5", "regex", "sentry", - "serde 1.0.218", + "serde 1.0.219", "serde_json", "sha2", "slog", @@ -3080,7 +3150,7 @@ dependencies = [ "diesel", "diesel_migrations", "futures 0.3.31", - "http 1.2.0", + "http 1.3.1", "syncserver-common", "thiserror 1.0.69", ] @@ -3091,7 +3161,7 @@ version = "0.18.3" dependencies = [ "config", "num_cpus", - "serde 1.0.218", + "serde 1.0.219", "slog-scope", "syncserver-common", "syncstorage-settings", @@ -3104,7 +3174,7 @@ name = "syncstorage-db" version = "0.18.3" dependencies = [ "async-trait", - "env_logger 0.11.6", + "env_logger 0.11.8", "futures 0.3.31", "lazy_static", "log", @@ -3117,6 +3187,7 @@ dependencies = [ "syncstorage-mysql", "syncstorage-settings", "syncstorage-spanner", + "syncstorage-sqlite", "tokio", ] @@ -3129,9 +3200,9 @@ dependencies = [ "chrono", "diesel", "futures 0.3.31", - "http 1.2.0", + "http 1.3.1", "lazy_static", - "serde 1.0.218", + "serde 1.0.219", "serde_json", "syncserver-common", "syncserver-db-common", @@ -3148,15 +3219,16 @@ dependencies = [ "diesel", "diesel_logger", "diesel_migrations", - "env_logger 0.11.6", + "env_logger 0.11.8", "futures 0.3.31", - "http 1.2.0", + "http 1.3.1", "slog-scope", "syncserver-common", "syncserver-db-common", "syncserver-settings", "syncstorage-db-common", "syncstorage-settings", + "syncstorage-sql-db-common", "thiserror 1.0.69", "url 2.5.4", ] @@ -3166,7 +3238,7 @@ name = "syncstorage-settings" version = "0.18.3" dependencies = [ "rand 0.8.5", - "serde 1.0.218", + "serde 1.0.219", "syncserver-common", "time", ] @@ -3183,7 +3255,7 @@ dependencies = [ "futures 0.3.31", "google-cloud-rust-raw", "grpcio", - "http 1.2.0", + "http 1.3.1", "protobuf", "slog-scope", "syncserver-common", @@ -3195,15 +3267,62 @@ dependencies = [ "uuid", ] +[[package]] +name = "syncstorage-sql-db-common" +version = "0.18.3" +dependencies = [ + "async-trait", + "backtrace", + "base64", + "diesel", + "diesel_logger", + "diesel_migrations", + "env_logger 0.11.8", + "futures 0.3.31", + "http 1.3.1", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncserver-settings", + "syncstorage-db-common", + "syncstorage-settings", + "thiserror 1.0.69", + "url 2.5.4", +] + +[[package]] +name = "syncstorage-sqlite" +version = "0.18.3" +dependencies = [ + "async-trait", + "backtrace", + "base64", + "diesel", + "diesel_logger", + "diesel_migrations", + "env_logger 0.11.8", + "futures 0.3.31", + "http 1.3.1", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncserver-settings", + "syncstorage-db-common", + "syncstorage-settings", + "syncstorage-sql-db-common", + "thiserror 1.0.69", + "url 2.5.4", +] + [[package]] name = "synstructure" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.104", ] [[package]] @@ -3273,7 +3392,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.104", ] [[package]] @@ -3284,45 +3403,44 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.104", ] [[package]] name = "thread_local" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", - "once_cell", ] [[package]] name = "time" -version = "0.3.39" +version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad298b01a40a23aac4580b67e3dbedb7cc8402f3592d7f49469de2ea4aecdd8" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" dependencies = [ "deranged", "itoa", "num-conv", "powerfmt", - "serde 1.0.218", + "serde 1.0.219", "time-core", "time-macros", ] [[package]] name = "time-core" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765c97a5b985b7c11d7bc27fa927dc4fe6af3a6dfb021d28deb60d3bf51e76ef" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" [[package]] name = "time-macros" -version = "0.2.20" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8093bc3e81c3bc5f7879de09619d06c9a5a5e45ca44dfeeb7225bae38005c5c" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" dependencies = [ "num-conv", "time-core", @@ -3330,9 +3448,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.7.6" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ "displaydoc", "zerovec", @@ -3368,7 +3486,7 @@ dependencies = [ "pyo3", "reqwest", "ring", - "serde 1.0.218", + "serde 1.0.219", "serde_json", "sha2", "slog-scope", @@ -3385,9 +3503,9 @@ version = "0.18.3" dependencies = [ "actix-web", "backtrace", - "http 1.2.0", + "http 1.3.1", "pyo3", - "serde 1.0.218", + "serde 1.0.219", "syncserver-common", ] @@ -3400,16 +3518,96 @@ dependencies = [ "diesel", "diesel_logger", "diesel_migrations", - "env_logger 0.11.6", + "env_logger 0.11.8", "futures 0.3.31", - "http 1.2.0", - "serde 1.0.218", + "http 1.3.1", + "serde 1.0.219", "slog-scope", "syncserver-common", "syncserver-db-common", "syncserver-settings", "thiserror 1.0.69", "tokenserver-common", + "tokenserver-db-common", + "tokenserver-db-mysql", + "tokenserver-db-sqlite", + "tokenserver-settings", + "tokio", +] + +[[package]] +name = "tokenserver-db-common" +version = "0.18.3" +dependencies = [ + "async-trait", + "backtrace", + "diesel", + "diesel_logger", + "diesel_migrations", + "env_logger 0.11.8", + "futures 0.3.31", + "http 1.3.1", + "serde 1.0.219", + "serde_derive", + "serde_json", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncserver-settings", + "thiserror 1.0.69", + "tokenserver-common", + "tokenserver-settings", + "tokio", +] + +[[package]] +name = "tokenserver-db-mysql" +version = "0.18.3" +dependencies = [ + "async-trait", + "backtrace", + "diesel", + "diesel_logger", + "diesel_migrations", + "env_logger 0.11.8", + "futures 0.3.31", + "http 1.3.1", + "serde 1.0.219", + "serde_derive", + "serde_json", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncserver-settings", + "thiserror 1.0.69", + "tokenserver-common", + "tokenserver-db-common", + "tokenserver-settings", + "tokio", +] + +[[package]] +name = "tokenserver-db-sqlite" +version = "0.18.3" +dependencies = [ + "async-trait", + "backtrace", + "diesel", + "diesel_logger", + "diesel_migrations", + "env_logger 0.11.8", + "futures 0.3.31", + "http 1.3.1", + "serde 1.0.219", + "serde_derive", + "serde_json", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncserver-settings", + "thiserror 1.0.69", + "tokenserver-common", + "tokenserver-db-common", "tokenserver-settings", "tokio", ] @@ -3419,26 +3617,28 @@ name = "tokenserver-settings" version = "0.18.3" dependencies = [ "jsonwebtoken", - "serde 1.0.218", + "serde 1.0.219", "tokenserver-common", ] [[package]] name = "tokio" -version = "1.43.0" +version = "1.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +checksum = "43864ed400b6043a4757a25c7a64a8efde741aed79a056a2fb348a406701bb35" dependencies = [ "backtrace", "bytes", + "io-uring", "libc", "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2", + "slab", + "socket2 0.6.0", "tokio-macros", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3449,7 +3649,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.104", ] [[package]] @@ -3464,9 +3664,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.13" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" +checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" dependencies = [ "bytes", "futures-core", @@ -3481,7 +3681,7 @@ version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ - "serde 1.0.218", + "serde 1.0.219", ] [[package]] @@ -3499,6 +3699,24 @@ dependencies = [ "tower-service", ] +[[package]] +name = "tower-http" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +dependencies = [ + "bitflags 2.9.1", + "bytes", + "futures-util", + "http 1.3.1", + "http-body", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" version = "0.3.3" @@ -3519,14 +3737,26 @@ checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", + "tracing-attributes", "tracing-core", ] +[[package]] +name = "tracing-attributes" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", @@ -3589,6 +3819,12 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + [[package]] name = "unindent" version = "0.2.4" @@ -3621,7 +3857,7 @@ dependencies = [ "form_urlencoded", "idna 1.0.3", "percent-encoding 2.3.1", - "serde 1.0.218", + "serde 1.0.219", ] [[package]] @@ -3630,12 +3866,6 @@ version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - [[package]] name = "utf8_iter" version = "1.0.4" @@ -3650,12 +3880,14 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.15.1" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f540e3240398cce6128b64ba83fdbdd86129c16a3aa1a3a252efd66eb3d587" +checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" dependencies = [ - "getrandom 0.3.1", - "serde 1.0.218", + "getrandom 0.3.3", + "js-sys", + "serde 1.0.219", + "wasm-bindgen", ] [[package]] @@ -3667,7 +3899,7 @@ dependencies = [ "idna 1.0.3", "once_cell", "regex", - "serde 1.0.218", + "serde 1.0.219", "serde_derive", "serde_json", "url 2.5.4", @@ -3684,7 +3916,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.104", ] [[package]] @@ -3732,15 +3964,15 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" -version = "0.13.3+wasi-0.2.2" +version = "0.14.2+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" dependencies = [ "wit-bindgen-rt", ] @@ -3767,7 +3999,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.104", "wasm-bindgen-shared", ] @@ -3802,7 +4034,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.104", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3838,9 +4070,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.8" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2210b291f7ea53617fbafcc4939f10914214ec15aace5ba62293a668f322c5c9" +checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" dependencies = [ "rustls-pki-types", ] @@ -3889,58 +4121,62 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "windows" -version = "0.52.0" +name = "windows-core" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ - "windows-core", - "windows-targets", + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", ] [[package]] -name = "windows-core" -version = "0.52.0" +name = "windows-implement" +version = "0.60.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ - "windows-targets", + "proc-macro2", + "quote", + "syn 2.0.104", ] [[package]] -name = "windows-link" -version = "0.1.0" +name = "windows-interface" +version = "0.59.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3" +checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] [[package]] -name = "windows-registry" -version = "0.2.0" +name = "windows-link" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" -dependencies = [ - "windows-result", - "windows-strings", - "windows-targets", -] +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" [[package]] name = "windows-result" -version = "0.2.0" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ - "windows-targets", + "windows-link", ] [[package]] name = "windows-strings" -version = "0.1.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ - "windows-result", - "windows-targets", + "windows-link", ] [[package]] @@ -3949,7 +4185,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -3958,7 +4194,16 @@ version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.3", ] [[package]] @@ -3967,14 +4212,31 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", ] [[package]] @@ -3983,55 +4245,103 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + [[package]] name = "wit-bindgen-rt" -version = "0.33.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", ] [[package]] @@ -4044,17 +4354,11 @@ dependencies = [ "regex", ] -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - [[package]] name = "writeable" -version = "0.5.5" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "yaml-rust" @@ -4067,11 +4371,11 @@ dependencies = [ [[package]] name = "yoke" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ - "serde 1.0.218", + "serde 1.0.219", "stable_deref_trait", "yoke-derive", "zerofrom", @@ -4079,55 +4383,34 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.104", "synstructure", ] [[package]] name = "zerocopy" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" -dependencies = [ - "byteorder", - "zerocopy-derive 0.7.35", -] - -[[package]] -name = "zerocopy" -version = "0.8.23" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd97444d05a4328b90e75e503a34bad781f14e28a823ad3557f0750df1ebcbc6" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" dependencies = [ - "zerocopy-derive 0.8.23", + "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.35" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", -] - -[[package]] -name = "zerocopy-derive" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6352c01d0edd5db859a63e2605f4ea3183ddbd15e2c4a9e7d32184df75e4f154" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.99", + "syn 2.0.104", ] [[package]] @@ -4147,7 +4430,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.104", "synstructure", ] @@ -4157,11 +4440,22 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + [[package]] name = "zerovec" -version = "0.10.4" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" dependencies = [ "yoke", "zerofrom", @@ -4170,13 +4464,13 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.3" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.104", ] [[package]] @@ -4190,18 +4484,18 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "7.2.3" +version = "7.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3051792fbdc2e1e143244dc28c60f73d8470e93f3f9cbd0ead44da5ed802722" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.14+zstd.1.5.7" +version = "2.0.15+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5" +checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index cdc8045474..4ca5239300 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,11 +9,15 @@ members = [ "syncstorage-mysql", "syncstorage-settings", "syncstorage-spanner", + "syncstorage-sqlite", "tokenserver-auth", "tokenserver-common", "tokenserver-db", + "tokenserver-db-common", + "tokenserver-db-mysql", + "tokenserver-db-sqlite", "tokenserver-settings", - "syncserver", + "syncserver", "syncstorage-sql-db-common", ] default-members = ["syncserver"] @@ -23,6 +27,7 @@ authors = [ "Ben Bangert ", "Phil Jenvey ", "Mozilla Services Engineering ", + "Eragon ", ] edition = "2021" rust-version = "1.86" diff --git a/Dockerfile b/Dockerfile index 3003b379c6..f673265050 100644 --- a/Dockerfile +++ b/Dockerfile @@ -26,7 +26,7 @@ RUN \ apt-get -q install -y --no-install-recommends $MYSQLCLIENT_PKG cmake COPY --from=planner /app/recipe.json recipe.json -RUN cargo chef cook --release --no-default-features --features=syncstorage-db/$DATABASE_BACKEND --features=py_verifier --recipe-path recipe.json +RUN cargo chef cook --release --no-default-features --features=$DATABASE_BACKEND,py_verifier --recipe-path recipe.json FROM chef AS builder ARG DATABASE_BACKEND @@ -56,7 +56,8 @@ ENV PATH=$PATH:/root/.cargo/bin RUN \ cargo --version && \ rustc --version && \ - cargo install --path ./syncserver --no-default-features --features=syncstorage-db/$DATABASE_BACKEND --features=py_verifier --locked --root /app + cargo install --path ./syncserver --no-default-features --features=$DATABASE_BACKEND,py_verifier --locked --root /app && \ + if [ "$DATABASE_BACKEND" = "spanner" ] ; then cargo install --path ./syncstorage-spanner --locked --root /app --bin purge_ttl ; fi FROM docker.io/library/debian:bullseye-slim ARG MYSQLCLIENT_PKG diff --git a/Makefile b/Makefile index 0f652e57ce..81439165de 100644 --- a/Makefile +++ b/Makefile @@ -33,13 +33,17 @@ SYNC_TOKENSERVER__DATABASE_URL ?= mysql://sample_user:sample_password@localhost/ SRC_ROOT = $(shell pwd) PYTHON_SITE_PACKGES = $(shell $(SRC_ROOT)/venv/bin/python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") +clippy_sqlite: + # Matches what's run in circleci + cargo clippy --workspace --all-targets --no-default-features --features=sqlite,py_verifier -- -D warnings + clippy_mysql: # Matches what's run in circleci - cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/mysql --features=py_verifier -- -D clippy::dbg_macro -D warnings + cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/mysql,tokenserver-db/mysql --features=py_verifier -- -D clippy::dbg_macro -D warnings clippy_spanner: # Matches what's run in circleci - cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/spanner --features=py_verifier -- -D clippy::dbg_macro -D warnings + cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/spanner,tokenserver-db/mysql --features=py_verifier -- -D clippy::dbg_macro -D warnings clean: cargo clean @@ -98,9 +102,18 @@ run_mysql: python # See https://github.com/PyO3/pyo3/issues/1741 for discussion re: why we need to set the # below env var PYTHONPATH=$(PYTHON_SITE_PACKGES) \ - RUST_LOG=debug \ + RUST_LOG=debug \ + RUST_BACKTRACE=full \ + cargo run --no-default-features --features=mysql,py_verifier -- --config config/local.toml + +run_sqlite: python + PATH="./venv/bin:$(PATH)" \ + # See https://github.com/PyO3/pyo3/issues/1741 for discussion re: why we need to set the + # below env var + PYTHONPATH=$(PYTHON_SITE_PACKGES) \ + RUST_LOG=debug \ RUST_BACKTRACE=full \ - cargo run --no-default-features --features=syncstorage-db/mysql --features=py_verifier -- --config config/local.toml + cargo run --no-default-features --features=sqlite,py_verifier -- --config config/local.toml run_spanner: python GOOGLE_APPLICATION_CREDENTIALS=$(PATH_TO_SYNC_SPANNER_KEYS) \ @@ -108,10 +121,10 @@ run_spanner: python # See https://github.com/PyO3/pyo3/issues/1741 for discussion re: why we need to set the # below env var PYTHONPATH=$(PYTHON_SITE_PACKGES) \ - PATH="./venv/bin:$(PATH)" \ + PATH="./venv/bin:$(PATH)" \ RUST_LOG=debug \ RUST_BACKTRACE=full \ - cargo run --no-default-features --features=syncstorage-db/spanner --features=py_verifier -- --config config/local.toml + cargo run --no-default-features --features=spanner,py_verifier -- --config config/local.toml .ONESHELL: test: diff --git a/README.md b/README.md index 42a22cecff..38e3f4d5f0 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ -[![License: MPL 2.0][mpl-svg]][mpl] [![Build Status][circleci-badge]][circleci] [![Connect to Matrix via the Riot webapp][matrix-badge]][matrix] +[![License: MPL 2.0][mpl-svg]][mpl] [![Build Status][circleci-badge]][circleci] +[![Connect to Matrix via the Riot webapp][matrix-badge]][matrix] # Syncstorage-rs @@ -12,6 +13,7 @@ Mozilla Sync Storage built with [Rust](https://rust-lang.org). - [Local Setup](#local-setup) - [MySQL](#mysql) - [Spanner](#spanner) + - [Sqlite](#sqlite) - [Running via Docker](#running-via-docker) - [Connecting to Firefox](#connecting-to-firefox) - [Logging](#logging) @@ -37,8 +39,13 @@ Mozilla Sync Storage built with [Rust](https://rust-lang.org). - pkg-config - [Rust stable](https://rustup.rs) - python 3.9+ -- MySQL 8.0 (or compatible) - * libmysqlclient (`brew install mysql` on macOS, `apt install libmysqlclient-dev` on Ubuntu, `apt install libmariadb-dev-compat` on Debian) +- At least one database backend (depending on which one you'll be running) + * MySQL 8.0 (or compatible) + * libmysqlclient ( + `brew install mysql` on macOS, + `apt install libmysqlclient-dev` on Ubuntu, + `apt install libmariadb-dev-compat` on Debian) + * SQLite v3.24 or greater Depending on your OS, you may also need to install `libgrpcdev`, and `protobuf-compiler-grpc`. *Note*: if the code complies cleanly, @@ -47,18 +54,23 @@ are missing `libcurl4-openssl-dev`. ## Local Setup -1. Follow the instructions below to use either MySQL or Spanner as your DB. -2. Now `cp config/local.example.toml config/local.toml`. Open `config/local.toml` and make sure you have the desired settings configured. For a complete list of available configuration options, check out [docs/config.md](docs/config.md). +1. Follow the instructions below to use either MySQL, Spanner, or SQLite as your DB. +2. Now `cp config/local.example.toml config/local.toml`. +Open `config/local.toml` and make sure you have the desired settings configured. +For a complete list of available configuration options, check out [docs/config.md](docs/config.md). 3. To start a local server in debug mode, run either: - `make run_mysql` if using MySQL or, - - `make run_spanner` if using spanner. + - `make run_spanner` if using spanner or, + - `make run_sqlite` if using SQLite. - The above starts the server in debug mode, using your new `local.toml` file for config options. Or, simply `cargo run` with your own config options provided as env vars. + The above starts the server in debug mode, using your new `local.toml` file for config options. + Or, simply `cargo run` with your own config options provided as env vars. 4. Visit `http://localhost:8000/__heartbeat__` to make sure the server is running. ### MySQL -Durable sync needs only a valid mysql DSN in order to set up connections to a MySQL database. The database can be local and is usually specified with a DSN like: +Durable sync needs only a valid mysql DSN in order to set up connections to a MySQL database. +The database can be local and is usually specified with a DSN like: `mysql://_user_:_password_@_host_/_database_` @@ -80,34 +92,59 @@ GRANT ALL PRIVILEGES on tokenserver_rs.* to sample_user@localhost; ### Spanner #### Authenticating via OAuth -The correct way to authenticate with Spanner is by generating an OAuth token and pointing your local application server to the token. In order for this to work, your Google Cloud account must have the correct permissions; contact the Ops team to ensure the correct permissions are added to your account. - -First, install the Google Cloud command-line interface by following the instructions for your operating system [here](https://cloud.google.com/sdk/docs/install). Next, run the following to log in with your Google account (this should be the Google account associated with your Mozilla LDAP credentials): +The correct way to authenticate with Spanner is by generating an OAuth token +and pointing your local application server to the token. +In order for this to work, your Google Cloud account must have the correct permissions; +contact the Ops team to ensure the correct permissions are added to your account. + +First, install the Google Cloud command-line interface by following the instructions +for your operating system [here](https://cloud.google.com/sdk/docs/install). +Next, run the following to log in with your Google account +(this should be the Google account associated with your Mozilla LDAP credentials): ```sh gcloud auth application-default login ``` -The above command will prompt you to visit a webpage in your browser to complete the login process. Once completed, ensure that a file called `application_default_credentials.json` has been created in the appropriate directory (on Linux, this directory is `$HOME/.config/gcloud/`). The Google Cloud SDK knows to check this location for your credentials, so no further configuration is needed. +The above command will prompt you to visit a webpage in your browser to complete the login process. +Once completed, ensure that a file called `application_default_credentials.json` has been created in the appropriate directory +(on Linux, this directory is `$HOME/.config/gcloud/`). +The Google Cloud SDK knows to check this location for your credentials, +so no further configuration is needed. ##### Key Revocation -Accidents happen, and you may need to revoke the access of a set of credentials if they have been publicly leaked. To do this, run: +Accidents happen, and you may need to revoke the access of a set of credentials if they have been publicly leaked. +To do this, run: ```sh gcloud auth application-default revoke ``` -This will revoke the access of the credentials currently stored in the `application_default_credentials.json` file. **If the file in that location does not contain the leaked credentials, you will need to copy the file containing the leaked credentials to that location and re-run the above command.** You can ensure that the leaked credentials are no longer active by attempting to connect to Spanner using the credentials. If access has been revoked, your application server should print an error saying that the token has expired or has been revoked. +This will revoke the access of the credentials currently stored in the `application_default_credentials.json` file. +**If the file in that location does not contain the leaked credentials, +you will need to copy the file containing the leaked credentials to that location and re-run the above command.** +You can ensure that the leaked credentials are no longer active by attempting to connect to Spanner using the credentials. +If access has been revoked, your application server should print an error saying that the token has expired or has been revoked. #### Authenticating via Service Account -An alternative to authentication via application default credentials is authentication via a service account. **Note that this method of authentication is not recommended. Service accounts are intended to be used by other applications or virtual machines and not people. See [this article](https://cloud.google.com/iam/docs/service-accounts#what_are_service_accounts) for more information.** +An alternative to authentication via application default credentials is authentication via a service account. +**Note that this method of authentication is not recommended. +Service accounts are intended to be used by other applications or virtual machines and not people. +See [this article](https://cloud.google.com/iam/docs/service-accounts#what_are_service_accounts) for more information.** -Your system administrator will be able to tell you which service account keys have access to the Spanner instance to which you are trying to connect. Once you are given the email identifier of an active key, log into the [Google Cloud Console Service Accounts](https://console.cloud.google.com/iam-admin/serviceaccounts) page. Be sure to select the correct project. +Your system administrator will be able to tell you which service account keys +have access to the Spanner instance to which you are trying to connect. +Once you are given the email identifier of an active key, +log into the [Google Cloud Console Service Accounts](https://console.cloud.google.com/iam-admin/serviceaccounts) page. +Be sure to select the correct project. - Locate the email identifier of the access key and pick the vertical dot menu at the far right of the row. - Select "_Create Key_" from the pop-up menu. - Select "JSON" from the Dialog Box. -A proper key file will be downloaded to your local directory. It's important to safeguard that key file. For this example, we're going to name the file +A proper key file will be downloaded to your local directory. +It's important to safeguard that key file. +For this example, we're going to name the file `service-account.json`. -The proper key file is in JSON format. An example file is provided below, with private information replaced by "`...`" +The proper key file is in JSON format. +An example file is provided below, with private information replaced by "`...`" ```json { @@ -135,11 +172,17 @@ To point to a GCP-hosted Spanner instance from your local machine, follow these 4. `make run_spanner`. 5. Visit `http://localhost:8000/__heartbeat__` to make sure the server is running. -Note, that unlike MySQL, there is no automatic migrations facility. Currently, the Spanner schema must be hand edited and modified. +Note, that unlike MySQL, there is no automatic migrations facility. +Currently, the Spanner schema must be hand edited and modified. #### Emulator -Google supports an in-memory Spanner emulator, which can run on your local machine for development purposes. You can install the emulator via the gcloud CLI or Docker by following the instructions [here](https://cloud.google.com/spanner/docs/emulator#installing_and_running_the_emulator). Once the emulator is running, you'll need to create a new instance and a new database. To create an instance using the REST API (exposed via port 9020 on the emulator), we can use `curl`: +Google supports an in-memory Spanner emulator, which can run on your local machine for development purposes. +You can install the emulator via the gcloud CLI or Docker by following the instructions +[here](https://cloud.google.com/spanner/docs/emulator#installing_and_running_the_emulator). +Once the emulator is running, you'll need to create a new instance and a new database. +To create an instance using the REST API (exposed via port 9020 on the emulator), +we can use `curl`: ```sh curl --request POST \ @@ -149,7 +192,11 @@ curl --request POST \ --data "{\"instance\":{\"config\":\"emulator-test-config\",\"nodeCount\":1,\"displayName\":\"Test Instance\"},\"instanceId\":\"$INSTANCE_ID\"}" ``` -Note that you may set `PROJECT_ID` and `INSTANCE_ID` to your liking. To create a new database on this instance, we'll use a similar HTTP request, but we'll need to include information about the database schema. Since we don't have migrations for Spanner, we keep an up-to-date schema in `src/db/spanner/schema.ddl`. The `jq` utility allows us to parse this file for use in the JSON body of an HTTP POST request: +Note that you may set `PROJECT_ID` and `INSTANCE_ID` to your liking. +To create a new database on this instance, we'll use a similar HTTP request, +but we'll need to include information about the database schema. +Since we don't have migrations for Spanner, we keep an up-to-date schema in `src/db/spanner/schema.ddl`. +The `jq` utility allows us to parse this file for use in the JSON body of an HTTP POST request: ```sh DDL_STATEMENTS=$( @@ -171,7 +218,10 @@ curl -sS --request POST \ --data "{\"createStatement\":\"CREATE DATABASE \`$DATABASE_ID\`\",\"extraStatements\":$DDL_STATEMENTS}" ``` -Note that, again, you may set `DATABASE_ID` to your liking. Make sure that the `database_url` config variable reflects your choice of project name, instance name, and database name (i.e. it should be of the format `spanner://projects//instances//databases/`). +Note that, again, you may set `DATABASE_ID` to your liking. +Make sure that the `database_url` config variable reflects your choice of project name, +instance name, and database name +(i.e. it should be of the format `spanner://projects//instances//databases/`). To run an application server that points to the local Spanner emulator: @@ -179,13 +229,41 @@ To run an application server that points to the local Spanner emulator: SYNC_SYNCSTORAGE__SPANNER_EMULATOR_HOST=localhost:9010 make run_spanner ``` +### SQLite + +Setting up the server with SQLite only requires a path to the database file, +which will be created automatically: + +One for the syncserver data +`sqlite:///syncdb.sqlite` +And one for the tokenserver data +`sqlite:///tokendb.sqlite` + +Note that after database initialisation you will still need to run two SQL +insert on the tokenserver database to announce the presence of your syncserver +to the clients. +```sql +-- Create a new service record +INSERT INTO `services` (`id`, `service`, `pattern`) +VALUES ('1', 'sync-1.5', '{node}/1.5/{uid}'); + +-- Create a new service node record. Set the node field to the path of your +-- syncserver. +INSERT INTO `nodes` (`id`, `service`, `node`, `available`, `current_load`, `capacity`, `downed`, `backoff`) +VALUES ('1', '1', 'http://localhost:8000', '1', '0', '1', '0', '0'); +``` + ### Running via Docker -This requires access to [Google Cloud Rust (raw)](https://crates.io/crates/google-cloud-rust-raw/) crate. Please note that due to interdependencies, you will need to ensure that `grpcio` and `protobuf` match the version used by `google-cloud-rust-raw`. +This requires access to [Google Cloud Rust (raw)](https://crates.io/crates/google-cloud-rust-raw/) crate. +Please note that due to interdependencies, +you will need to ensure that `grpcio` and `protobuf` match the version used by `google-cloud-rust-raw`. 1. Make sure you have [Docker installed](https://docs.docker.com/install/) locally. 2. Copy the contents of mozilla-rust-sdk into top level root dir here. -3. Comment out the `image` value under `syncserver` in either docker-compose.mysql.yml or docker-compose.spanner.yml (depending on which database backend you want to run), and add this instead: +3. Comment out the `image` value under `syncserver` in either docker-compose.mysql.yml +or docker-compose.spanner.yml +(depending on which database backend you want to run), and add this instead: ```yml build: @@ -193,15 +271,19 @@ This requires access to [Google Cloud Rust (raw)](https://crates.io/crates/googl ``` 4. If you are using MySQL, adjust the MySQL db credentials in docker-compose.mysql.yml to match your local setup. -5. `make docker_start_mysql` or `make docker_start_spanner` - You can verify it's working by visiting [localhost:8000/\_\_heartbeat\_\_](http://localhost:8000/__heartbeat__) +5. `make docker_start_mysql` or `make docker_start_spanner` - +You can verify it's working by visiting [localhost:8000/\_\_heartbeat\_\_](http://localhost:8000/__heartbeat__) ### Connecting to Firefox This will walk you through the steps to connect this project to your local copy of Firefox. -1. Follow the steps outlined above for running this project using [MySQL](https://github.com/mozilla-services/syncstorage-rs#mysql). +1. Follow the steps outlined above for running this project using [MySQL](#mysql) or [SQLite](#sqlite). -2. Setup a local copy of [syncserver](https://github.com/mozilla-services/syncserver), with a few special changes to [syncserver.ini](https://github.com/mozilla-services/syncserver/blob/master/syncserver.ini); make sure that you're using the following values (in addition to all of the other defaults): +2. Setup a local copy of [syncserver](https://github.com/mozilla-services/syncserver), +with a few special changes to +[syncserver.ini](https://github.com/mozilla-services/syncserver/blob/master/syncserver.ini); +make sure that you're using the following values (in addition to all of the other defaults): ```ini [server:main] @@ -221,13 +303,18 @@ This will walk you through the steps to connect this project to your local copy sync-1.5 = "http://localhost:8000/1.5/1" ``` -3. In Firefox, go to `about:config`. Change `identity.sync.tokenserver.uri` to `http://localhost:5000/1.0/sync/1.5`. +3. In Firefox, go to `about:config`. +Change `identity.sync.tokenserver.uri` to `http://localhost:5000/1.0/sync/1.5`. 4. Restart Firefox. Now, try syncing. You should see new BSOs in your local MySQL instance. ## Logging ### Sentry: -1. If you want to connect to the existing [Sentry project](https://sentry.prod.mozaws.net/operations/syncstorage-local/) for local development, login to Sentry, and go to the page with [api keys](https://sentry.prod.mozaws.net/settings/operations/syncstorage-local/keys/). Copy the `DSN` value. +1. If you want to connect to the existing +[Sentry project](https://sentry.prod.mozaws.net/operations/syncstorage-local/) for local development, +login to Sentry, and go to the page with +[api keys](https://sentry.prod.mozaws.net/settings/operations/syncstorage-local/keys/). +Copy the `DSN` value. 2. Comment out the `human_logs` line in your `config/local.toml` file. 3. You can force an error to appear in Sentry by adding a `panic!` into main.rs, just before the final `Ok(())`. 4. Now, `SENTRY_DSN={INSERT_DSN_FROM_STEP_1_HERE} make run`. @@ -273,11 +360,17 @@ in the mysql client: ### End-to-End tests -Functional tests live in [server-syncstorage](https://github.com/mozilla-services/server-syncstorage/) and can be run against a local server, e.g.: +Functional tests live in +[server-syncstorage](https://github.com/mozilla-services/server-syncstorage/) +and can be run against a local server, e.g.: -1. If you haven't already followed the instructions [here](https://mozilla-services.readthedocs.io/en/latest/howtos/run-sync-1.5.html) to get all the dependencies for the [server-syncstorage](https://github.com/mozilla-services/server-syncstorage/) repo, you should start there. +1. If you haven't already followed the instructions +[here](https://mozilla-services.readthedocs.io/en/latest/howtos/run-sync-1.5.html) +to get all the dependencies for the +[server-syncstorage](https://github.com/mozilla-services/server-syncstorage/) repo, +you should start there. -2. Install (Python) server-syncstorage: +2. Install (Python) server-syncstorage: $ git clone https://github.com/mozilla-services/server-syncstorage/ $ cd server-syncstorage @@ -287,48 +380,68 @@ Functional tests live in [server-syncstorage](https://github.com/mozilla-service 4. To run all tests: - $ ./local/bin/python syncstorage/tests/functional/test_storage.py http://localhost:8000# + $ ./local/bin/python syncstorage/tests/functional/test_storage.py http://localhost:8000# 5. Individual tests can be specified via the `SYNC_TEST_PREFIX` env var: - $ SYNC_TEST_PREFIX=test_get_collection \ - ./local/bin/python syncstorage/tests/functional/test_storage.py http://localhost:8000# + $ SYNC_TEST_PREFIX=test_get_collection \ + ./local/bin/python syncstorage/tests/functional/test_storage.py http://localhost:8000# ## Creating Releases 1. Switch to master branch of syncstorage-rs -1. `git pull` to ensure that the local copy is up-to-date. -1. `git pull origin master` to make sure that you've incorporated any changes to the master branch. -1. `git diff origin/master` to ensure that there are no local staged or uncommited changes. -1. Bump the version number in [Cargo.toml](https://github.com/mozilla-services/syncstorage-rs/blob/master/Cargo.toml) (this new version number will be designated as `` in this checklist) -1. create a git branch for the new version `git checkout -b release/` -1. `cargo build --release` - Build with the release profile [release mode](https://doc.rust-lang.org/book/ch14-01-release-profiles.html). -1. `clog -C CHANGELOG.md` - Generate release notes. We're using [clog](https://github.com/clog-tool/clog-cli) for release notes. Add a `-p`, `-m` or `-M` flag to denote major/minor/patch version, ie `clog -C CHANGELOG.md -p`. -1. Review the `CHANGELOG.md` file and ensure all relevant changes since the last tag are included. -1. Create a new [release in Sentry](https://docs.sentry.io/product/releases/#create-release): `VERSION={release-version-here} bash scripts/sentry-release.sh`. If you're doing this for the first time, checkout the [tips below](https://github.com/mozilla-services/syncstorage-rs#troubleshooting) for troubleshooting sentry cli access. -1. `git commit -am "chore: tag "` to commit the new version and changes -1. `git tag -s -m "chore: tag " ` to create a signed tag of the current HEAD commit for release. -1. `git push origin release/` to push the commits to a new origin release branch -1. `git push --tags origin release/` to push the tags to the release branch. -1. Submit a Pull Request (PR) on github to merge the release branch to master. -1. Go to the [GitHub release](https://github.com/mozilla-services/syncstorage-rs/releases), you should see the new tag with no release information. -1. Click the `Draft a new release` button. -1. Enter the \ number for `Tag version`. -1. Copy and paste the most recent change set from `CHANGELOG.md` into the release description, omitting the top 2 lines (the name and version) -1. Once your PR merges, click [Publish Release] on the [GitHub release](https://github.com/mozilla-services/syncstorage-rs/releases) page. - -Sync server is automatically deployed to STAGE, however QA may need to be notified if testing is required. Once QA signs off, then a bug should be filed to promote the server to PRODUCTION. +2. `git pull` to ensure that the local copy is up-to-date. +3. `git pull origin master` to make sure that you've incorporated any changes to the master branch. +4. `git diff origin/master` to ensure that there are no local staged or uncommited changes. +5. Bump the version number in [Cargo.toml](https://github.com/mozilla-services/syncstorage-rs/blob/master/Cargo.toml) +(this new version number will be designated as `` in this checklist) +6. create a git branch for the new version `git checkout -b release/` +7. `cargo build --release` - Build with the release profile +[release mode](https://doc.rust-lang.org/book/ch14-01-release-profiles.html). +8. `clog -C CHANGELOG.md` - Generate release notes. +We're using [clog](https://github.com/clog-tool/clog-cli) for release notes. +Add a `-p`, `-m` or `-M` flag to denote major/minor/patch version, ie `clog -C CHANGELOG.md -p`. +9. Review the `CHANGELOG.md` file and ensure all relevant changes since the last tag are included. +10. Create a new [release in Sentry](https://docs.sentry.io/product/releases/#create-release): +`VERSION={release-version-here} bash scripts/sentry-release.sh`. +If you're doing this for the first time, checkout the +[tips below](https://github.com/mozilla-services/syncstorage-rs#troubleshooting) for troubleshooting sentry cli access. +11. `git commit -am "chore: tag "` to commit the new version and changes +12. `git tag -s -m "chore: tag " ` to create a signed tag of the current HEAD commit for release. +13. `git push origin release/` to push the commits to a new origin release branch +14. `git push --tags origin release/` to push the tags to the release branch. +15. Submit a Pull Request (PR) on github to merge the release branch to master. +16. Go to the [GitHub release](https://github.com/mozilla-services/syncstorage-rs/releases), +you should see the new tag with no release information. +17. Click the `Draft a new release` button. +18. Enter the \ number for `Tag version`. +19. Copy and paste the most recent change set from `CHANGELOG.md` into the release description, +omitting the top 2 lines (the name and version) +20. Once your PR merges, click [Publish Release] on the +[GitHub release](https://github.com/mozilla-services/syncstorage-rs/releases) page. + +Sync server is automatically deployed to STAGE, +however QA may need to be notified if testing is required. +Once QA signs off, then a bug should be filed to promote the server to PRODUCTION. ## Troubleshooting - `rm Cargo.lock; cargo clean;` - Try this if you're having problems compiling. -- Some versions of OpenSSL 1.1.1 can conflict with grpcio's built in BoringSSL. These errors can cause syncstorage to fail to run or compile. -If you see a problem related to `libssl` you may need to specify the `cargo` option `--features grpcio/openssl` to force grpcio to use OpenSSL. +- Some versions of OpenSSL 1.1.1 can conflict with grpcio's built in BoringSSL. +These errors can cause syncstorage to fail to run or compile. +If you see a problem related to `libssl` you may need to specify the `cargo` option +`--features grpcio/openssl` to force grpcio to use OpenSSL. ### Sentry -- If you're having trouble working with Sentry to create releases, try authenticating using their self hosted server option that's outlined [here](https://docs.sentry.io/product/cli/configuration/) Ie, `sentry-cli --url https://selfhosted.url.com/ login`. It's also recommended to create a `.sentryclirc` config file. See [this example](https://github.com/mozilla-services/syncstorage-rs/blob/master/.sentryclirc.example) for the config values you'll need. +- If you're having trouble working with Sentry to create releases, +try authenticating using their self hosted server option that's outlined +[here](https://docs.sentry.io/product/cli/configuration/) Ie, +`sentry-cli --url https://selfhosted.url.com/ login`. +It's also recommended to create a `.sentryclirc` config file. +See [this example](https://github.com/mozilla-services/syncstorage-rs/blob/master/.sentryclirc.example) +for the config values you'll need. ## Related Documentation diff --git a/config/local.example.toml b/config/local.example.toml index f845b5c99e..1f3fdfd2f4 100644 --- a/config/local.example.toml +++ b/config/local.example.toml @@ -8,6 +8,9 @@ human_logs = 1 syncstorage.database_url = "mysql://sample_user:sample_password@localhost/syncstorage_rs" # Example Spanner DSN: # database_url="spanner://projects/SAMPLE_GCP_PROJECT/instances/SAMPLE_SPANNER_INSTANCE/databases/SAMPLE_SPANNER_DB" +# Example SQLite DSN: +# database_url="sqlite://PATH_TO_FILE/FILE.sqlite" + # enable quota limits syncstorage.enable_quota = 0 # set the quota limit to 2GB. @@ -26,5 +29,5 @@ tokenserver.fxa_browserid_issuer = "https://api-accounts.stage.mozaws.net" tokenserver.fxa_browserid_server_url = "https://verifier.stage.mozaws.net/v2" # cors settings -# cors_allowed_origin = "localhost" +cors_allowed_origin = "localhost" # cors_max_age = 86400 diff --git a/docker-compose.e2e.sqlite.yaml b/docker-compose.e2e.sqlite.yaml new file mode 100644 index 0000000000..d83c185d71 --- /dev/null +++ b/docker-compose.e2e.sqlite.yaml @@ -0,0 +1,39 @@ +version: "3" +services: + syncserver: + entrypoint: + /bin/sh -c " + sleep 15; + /app/bin/syncserver; + " + sqlite-e2e-tests: + depends_on: + - mock-fxa-server + - syncserver + image: app:build + privileged: true + user: root + environment: + MOCK_FXA_SERVER_URL: http://mock-fxa-server:6000 + SYNC_HOST: 0.0.0.0 + SYNC_MASTER_SECRET: secret0 + SYNC_SYNCSTORAGE__DATABASE_URL: sqlite:////data/syncstoragedb.sqlite + SYNC_TOKENSERVER__DATABASE_URL: sqlite:////data/tokenserverdb.sqlite + SYNC_TOKENSERVER__ENABLED: "true" + SYNC_TOKENSERVER__FXA_BROWSERID_AUDIENCE: "https://token.stage.mozaws.net/" + SYNC_TOKENSERVER__FXA_BROWSERID_ISSUER: "api-accounts.stage.mozaws.net" + SYNC_TOKENSERVER__FXA_EMAIL_DOMAIN: api-accounts.stage.mozaws.net + SYNC_TOKENSERVER__FXA_METRICS_HASH_SECRET: secret0 + SYNC_TOKENSERVER__RUN_MIGRATIONS: "true" + SYNC_TOKENSERVER__FXA_OAUTH_PRIMARY_JWK__KTY: "RSA" + SYNC_TOKENSERVER__FXA_OAUTH_PRIMARY_JWK__ALG: "RS256" + SYNC_TOKENSERVER__FXA_OAUTH_PRIMARY_JWK__KID: "20190730-15e473fd" + SYNC_TOKENSERVER__FXA_OAUTH_PRIMARY_JWK__FXA_CREATED_AT: "1564502400" + SYNC_TOKENSERVER__FXA_OAUTH_PRIMARY_JWK__USE: "sig" + SYNC_TOKENSERVER__FXA_OAUTH_PRIMARY_JWK__N: "15OpVGC7ws_SlU0gRbRh1Iwo8_gR8ElX2CDnbN5blKyXLg-ll0ogktoDXc-tDvTabRTxi7AXU0wWQ247odhHT47y5uz0GASYXdfPponynQ_xR9CpNn1eEL1gvDhQN9rfPIzfncl8FUi9V4WMd5f600QC81yDw9dX-Z8gdkru0aDaoEKF9-wU2TqrCNcQdiJCX9BISotjz_9cmGwKXFEekQNJWBeRQxH2bUmgwUK0HaqwW9WbYOs-zstNXXWFsgK9fbDQqQeGehXLZM4Cy5Mgl_iuSvnT3rLzPo2BmlxMLUvRqBx3_v8BTtwmNGA0v9O0FJS_mnDq0Iue0Dz8BssQCQ" + SYNC_TOKENSERVER__FXA_OAUTH_PRIMARY_JWK__E: "AQAB" + TOKENSERVER_HOST: http://localhost:8000 + entrypoint: + /bin/sh -c " + sleep 28; python3 /app/tools/integration_tests/run.py 'http://localhost:8000#secret0' + " diff --git a/docker-compose.sqlite.yaml b/docker-compose.sqlite.yaml new file mode 100644 index 0000000000..9f6329a646 --- /dev/null +++ b/docker-compose.sqlite.yaml @@ -0,0 +1,42 @@ +# NOTE: This docker-compose file was constructed to create a base for +# use by the End-to-end tests. It has not been fully tested for use in +# constructing a true, stand-alone sync server. +# If you're interested in doing that, please join our community in the +# github issues and comments. +# +# Application runs off of port 8000. +# you can test if it's available with +# curl "http://localhost:8000/__heartbeat__" + +version: "3" +services: + mock-fxa-server: + image: app:build + restart: "no" + entrypoint: "sh scripts/start_mock_fxa_server.sh" + environment: + MOCK_FXA_SERVER_HOST: 0.0.0.0 + MOCK_FXA_SERVER_PORT: 6000 + + syncserver: + # NOTE: The naming in the rest of this repository has been updated to reflect the fact + # that Syncstorage and Tokenserver are now part of one repository/server called + # "Syncserver" (updated from "syncstorage-rs"). We keep the legacy naming below for + # backwards compatibility with previous Docker images. + image: ${SYNCSTORAGE_RS_IMAGE:-syncstorage-rs:latest} + restart: always + ports: + - "8000:8000" + volumes: + - sqlite_data:/data/ + environment: + SYNC_HOST: 0.0.0.0 + SYNC_MASTER_SECRET: secret0 + SYNC_SYNCSTORAGE__DATABASE_URL: sqlite:////data/syncserverdb.sqlite + SYNC_SYNCSTORAGE__DATABASE_POOL_MIN_IDLE: 0 + SYNC_TOKENSERVER__DATABASE_URL: sqlite:////data/tokenserverdb.sqlite + SYNC_TOKENSERVER__DATABASE_POOL_MIN_IDLE: 0 + SYNC_TOKENSERVER__RUN_MIGRATIONS: "true" + +volumes: + sqlite_data: diff --git a/err.txt b/err.txt new file mode 100644 index 0000000000..c20f3b0e06 --- /dev/null +++ b/err.txt @@ -0,0 +1,20 @@ +MYSQL + +ERROR: test_client_specified_duration (tokenserver.test_authorization.TestAuthorization) +---------------------------------------------------------------------- +Traceback (most recent call last): + File "/app/tools/integration_tests/tokenserver/test_authorization.py", line 10, in setUp + super(TestAuthorization, self).setUp() + File "/app/tools/integration_tests/tokenserver/test_support.py", line 62, in setUp + self._add_node(capacity=100, node=self.NODE_URL, id=self.NODE_ID) + File "/app/tools/integration_tests/tokenserver/test_support.py", line 144, in _add_node + cursor = self._execute_sql(query, data) + File "/app/tools/integration_tests/tokenserver/test_support.py", line 323, in _execute_sql + cursor.execute(query, args) + File "/usr/local/lib/python3.9/dist-packages/MySQLdb/cursors.py", line 206, in execute + res = self._query(query) + File "/usr/local/lib/python3.9/dist-packages/MySQLdb/cursors.py", line 319, in _query + db.query(q) + File "/usr/local/lib/python3.9/dist-packages/MySQLdb/connections.py", line 254, in query + _mysql.connection.query(self, query) +MySQLdb.OperationalError: (1048, "Column 'service' cannot be null") diff --git a/sqlite-e2e-tests_1 b/sqlite-e2e-tests_1 new file mode 100644 index 0000000000..ec35c72640 --- /dev/null +++ b/sqlite-e2e-tests_1 @@ -0,0 +1,329 @@ + Creating project_mock-fxa-server_1 ... done  Creating project_syncserver_1 ... done Creating project_sqlite-e2e-tests_1 ... + Creating project_sqlite-e2e-tests_1 ... done Attaching to project_mock-fxa-server_1, project_syncserver_1, project_sqlite-e2e-tests_1 +sqlite-e2e-tests_1 | thread 'main' panicked at syncserver/src/main.rs:61:55: +sqlite-e2e-tests_1 | called `Result::unwrap()` on an `Err` value: ApiError { kind: Db(DbError { kind: Sql(SqlError { kind: DieselConnection(BadConnection("Unable to open the database file")), status: 500, backtrace: 0: >::from +sqlite-e2e-tests_1 | at /app/syncserver-db-common/src/error.rs:37:24 +sqlite-e2e-tests_1 | >::into +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/convert/mod.rs:759:9 +sqlite-e2e-tests_1 | >::from +sqlite-e2e-tests_1 | at /app/syncserver-common/src/lib.rs:50:33 +sqlite-e2e-tests_1 | 1: >::from::{{closure}} +sqlite-e2e-tests_1 | at /app/syncstorage-sql-db-common/src/error.rs:148:77 +sqlite-e2e-tests_1 | >::from +sqlite-e2e-tests_1 | at /app/syncserver-common/src/lib.rs:50:17 +sqlite-e2e-tests_1 | 2: as core::ops::try_trait::FromResidual>>::from_residual +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/result.rs:1964:27 +sqlite-e2e-tests_1 | syncstorage_sqlite::pool::run_embedded_migrations +sqlite-e2e-tests_1 | at /app/syncstorage-sqlite/src/pool.rs:37:16 +sqlite-e2e-tests_1 | syncstorage_sqlite::pool::SqliteDbPool::new +sqlite-e2e-tests_1 | at /app/syncstorage-sqlite/src/pool.rs:69:9 +sqlite-e2e-tests_1 | 3: syncserver::server::Server::with_settings::{{closure}} +sqlite-e2e-tests_1 | at /app/syncserver/src/server/mod.rs:271:23 +sqlite-e2e-tests_1 | syncserver::main::{{closure}} +sqlite-e2e-tests_1 | at /app/syncserver/src/main.rs:61:49 +sqlite-e2e-tests_1 | 4: as core::future::future::Future>::poll::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:1041:42 +sqlite-e2e-tests_1 | tokio::task::local::LocalSet::with::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:793:13 +sqlite-e2e-tests_1 | std::thread::local::LocalKey::try_with +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/thread/local.rs:284:16 +sqlite-e2e-tests_1 | std::thread::local::LocalKey::with +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/thread/local.rs:260:9 +sqlite-e2e-tests_1 | tokio::task::local::LocalSet::with +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:791:17 +sqlite-e2e-tests_1 | as core::future::future::Future>::poll +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:1031:9 +sqlite-e2e-tests_1 | 5: tokio::task::local::LocalSet::run_until::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:689:19 +sqlite-e2e-tests_1 | as core::future::future::Future>::poll +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/future/future.rs:123:9 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::block_on::{{closure}}::{{closure}}::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:729:57 +sqlite-e2e-tests_1 | tokio::runtime::coop::with_budget +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/coop.rs:107:5 +sqlite-e2e-tests_1 | tokio::runtime::coop::budget +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/coop.rs:73:5 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::block_on::{{closure}}::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:729:25 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::Context::enter +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:428:19 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::block_on::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:728:36 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::enter::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:807:68 +sqlite-e2e-tests_1 | tokio::runtime::context::scoped::Scoped::set +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/context/scoped.rs:40:9 +sqlite-e2e-tests_1 | tokio::runtime::context::set_scheduler::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/context.rs:180:26 +sqlite-e2e-tests_1 | std::thread::local::LocalKey::try_with +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/thread/local.rs:284:16 +sqlite-e2e-tests_1 | std::thread::local::LocalKey::with +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/thread/local.rs:260:9 +sqlite-e2e-tests_1 | tokio::runtime::context::set_scheduler +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/context.rs:180:17 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::enter +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:807:27 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:716:19 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CurrentThread::block_on::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:196:28 +sqlite-e2e-tests_1 | tokio::runtime::context::runtime::enter_runtime +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/context/runtime.rs:65:16 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CurrentThread::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:184:9 +sqlite-e2e-tests_1 | tokio::runtime::runtime::Runtime::block_on_inner +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/runtime.rs:368:47 +sqlite-e2e-tests_1 | tokio::runtime::runtime::Runtime::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/runtime.rs:342:13 +sqlite-e2e-tests_1 | tokio::task::local::LocalSet::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:646:9 +sqlite-e2e-tests_1 | actix_rt::runtime::Runtime::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/actix-rt-2.10.0/src/runtime.rs:138:20 +sqlite-e2e-tests_1 | actix_rt::system::SystemRunner::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/actix-rt-2.10.0/src/system.rs:244:17 +sqlite-e2e-tests_1 | syncserver::main +sqlite-e2e-tests_1 | at /app/syncserver/src/main.rs:27:1 +sqlite-e2e-tests_1 | 6: core::ops::function::FnOnce::call_once +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/ops/function.rs:250:5 +sqlite-e2e-tests_1 | std::sys_common::backtrace::__rust_begin_short_backtrace +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/sys_common/backtrace.rs:155:18 +sqlite-e2e-tests_1 | 7: std::rt::lang_start::{{closure}} +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/rt.rs:166:18 +sqlite-e2e-tests_1 | 8: core::ops::function::impls:: for &F>::call_once +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/ops/function.rs:284:13 +sqlite-e2e-tests_1 | std::panicking::try::do_call +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panicking.rs:552:40 +sqlite-e2e-tests_1 | std::panicking::try +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panicking.rs:516:19 +sqlite-e2e-tests_1 | std::panic::catch_unwind +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panic.rs:146:14 +sqlite-e2e-tests_1 | std::rt::lang_start_internal::{{closure}} +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/rt.rs:148:48 +sqlite-e2e-tests_1 | std::panicking::try::do_call +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panicking.rs:552:40 +sqlite-e2e-tests_1 | std::panicking::try +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panicking.rs:516:19 +sqlite-e2e-tests_1 | std::panic::catch_unwind +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panic.rs:146:14 +sqlite-e2e-tests_1 | std::rt::lang_start_internal +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/rt.rs:148:20 +sqlite-e2e-tests_1 | 9: main +sqlite-e2e-tests_1 | 10: __libc_start_main +sqlite-e2e-tests_1 | at ./csu/../csu/libc-start.c:308:16 +sqlite-e2e-tests_1 | 11: _start +sqlite-e2e-tests_1 | }), status: 500, backtrace: 0: >::from +sqlite-e2e-tests_1 | at /app/syncstorage-sql-db-common/src/error.rs:66:37 +sqlite-e2e-tests_1 | 1: >::from::{{closure}} +sqlite-e2e-tests_1 | at /app/syncstorage-sql-db-common/src/error.rs:148:46 +sqlite-e2e-tests_1 | >::from +sqlite-e2e-tests_1 | at /app/syncserver-common/src/lib.rs:50:17 +sqlite-e2e-tests_1 | 2: as core::ops::try_trait::FromResidual>>::from_residual +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/result.rs:1964:27 +sqlite-e2e-tests_1 | syncstorage_sqlite::pool::run_embedded_migrations +sqlite-e2e-tests_1 | at /app/syncstorage-sqlite/src/pool.rs:37:16 +sqlite-e2e-tests_1 | syncstorage_sqlite::pool::SqliteDbPool::new +sqlite-e2e-tests_1 | at /app/syncstorage-sqlite/src/pool.rs:69:9 +sqlite-e2e-tests_1 | 3: syncserver::server::Server::with_settings::{{closure}} +sqlite-e2e-tests_1 | at /app/syncserver/src/server/mod.rs:271:23 +sqlite-e2e-tests_1 | syncserver::main::{{closure}} +sqlite-e2e-tests_1 | at /app/syncserver/src/main.rs:61:49 +sqlite-e2e-tests_1 | 4: as core::future::future::Future>::poll::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:1041:42 +sqlite-e2e-tests_1 | tokio::task::local::LocalSet::with::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:793:13 +sqlite-e2e-tests_1 | std::thread::local::LocalKey::try_with +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/thread/local.rs:284:16 +sqlite-e2e-tests_1 | std::thread::local::LocalKey::with +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/thread/local.rs:260:9 +sqlite-e2e-tests_1 | tokio::task::local::LocalSet::with +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:791:17 +sqlite-e2e-tests_1 | as core::future::future::Future>::poll +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:1031:9 +sqlite-e2e-tests_1 | 5: tokio::task::local::LocalSet::run_until::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:689:19 +sqlite-e2e-tests_1 | as core::future::future::Future>::poll +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/future/future.rs:123:9 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::block_on::{{closure}}::{{closure}}::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:729:57 +sqlite-e2e-tests_1 | tokio::runtime::coop::with_budget +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/coop.rs:107:5 +sqlite-e2e-tests_1 | tokio::runtime::coop::budget +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/coop.rs:73:5 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::block_on::{{closure}}::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:729:25 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::Context::enter +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:428:19 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::block_on::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:728:36 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::enter::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:807:68 +sqlite-e2e-tests_1 | tokio::runtime::context::scoped::Scoped::set +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/context/scoped.rs:40:9 +sqlite-e2e-tests_1 | tokio::runtime::context::set_scheduler::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/context.rs:180:26 +sqlite-e2e-tests_1 | std::thread::local::LocalKey::try_with +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/thread/local.rs:284:16 +sqlite-e2e-tests_1 | std::thread::local::LocalKey::with +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/thread/local.rs:260:9 +sqlite-e2e-tests_1 | tokio::runtime::context::set_scheduler +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/context.rs:180:17 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::enter +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:807:27 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:716:19 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CurrentThread::block_on::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:196:28 +sqlite-e2e-tests_1 | tokio::runtime::context::runtime::enter_runtime +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/context/runtime.rs:65:16 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CurrentThread::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:184:9 +sqlite-e2e-tests_1 | tokio::runtime::runtime::Runtime::block_on_inner +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/runtime.rs:368:47 +sqlite-e2e-tests_1 | tokio::runtime::runtime::Runtime::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/runtime.rs:342:13 +sqlite-e2e-tests_1 | tokio::task::local::LocalSet::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:646:9 +sqlite-e2e-tests_1 | actix_rt::runtime::Runtime::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/actix-rt-2.10.0/src/runtime.rs:138:20 +sqlite-e2e-tests_1 | actix_rt::system::SystemRunner::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/actix-rt-2.10.0/src/system.rs:244:17 +sqlite-e2e-tests_1 | syncserver::main +sqlite-e2e-tests_1 | at /app/syncserver/src/main.rs:27:1 +sqlite-e2e-tests_1 | 6: core::ops::function::FnOnce::call_once +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/ops/function.rs:250:5 +sqlite-e2e-tests_1 | std::sys_common::backtrace::__rust_begin_short_backtrace +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/sys_common/backtrace.rs:155:18 +sqlite-e2e-tests_1 | 7: std::rt::lang_start::{{closure}} +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/rt.rs:166:18 +sqlite-e2e-tests_1 | 8: core::ops::function::impls:: for &F>::call_once +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/ops/function.rs:284:13 +sqlite-e2e-tests_1 | std::panicking::try::do_call +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panicking.rs:552:40 +sqlite-e2e-tests_1 | std::panicking::try +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panicking.rs:516:19 +sqlite-e2e-tests_1 | std::panic::catch_unwind +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panic.rs:146:14 +sqlite-e2e-tests_1 | std::rt::lang_start_internal::{{closure}} +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/rt.rs:148:48 +sqlite-e2e-tests_1 | std::panicking::try::do_call +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panicking.rs:552:40 +sqlite-e2e-tests_1 | std::panicking::try +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panicking.rs:516:19 +sqlite-e2e-tests_1 | std::panic::catch_unwind +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panic.rs:146:14 +sqlite-e2e-tests_1 | std::rt::lang_start_internal +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/rt.rs:148:20 +sqlite-e2e-tests_1 | 9: main +sqlite-e2e-tests_1 | 10: __libc_start_main +sqlite-e2e-tests_1 | at ./csu/../csu/libc-start.c:308:16 +sqlite-e2e-tests_1 | 11: _start +sqlite-e2e-tests_1 | }), backtrace: 0: >::from +sqlite-e2e-tests_1 | at /app/syncstorage-sql-db-common/src/error.rs:66:37 +sqlite-e2e-tests_1 | 1: >::from::{{closure}} +sqlite-e2e-tests_1 | at /app/syncstorage-sql-db-common/src/error.rs:148:46 +sqlite-e2e-tests_1 | >::from +sqlite-e2e-tests_1 | at /app/syncserver-common/src/lib.rs:50:17 +sqlite-e2e-tests_1 | 2: as core::ops::try_trait::FromResidual>>::from_residual +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/result.rs:1964:27 +sqlite-e2e-tests_1 | syncstorage_sqlite::pool::run_embedded_migrations +sqlite-e2e-tests_1 | at /app/syncstorage-sqlite/src/pool.rs:37:16 +sqlite-e2e-tests_1 | syncstorage_sqlite::pool::SqliteDbPool::new +sqlite-e2e-tests_1 | at /app/syncstorage-sqlite/src/pool.rs:69:9 +sqlite-e2e-tests_1 | 3: syncserver::server::Server::with_settings::{{closure}} +sqlite-e2e-tests_1 | at /app/syncserver/src/server/mod.rs:271:23 +sqlite-e2e-tests_1 | syncserver::main::{{closure}} +sqlite-e2e-tests_1 | at /app/syncserver/src/main.rs:61:49 +sqlite-e2e-tests_1 | 4: as core::future::future::Future>::poll::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:1041:42 +sqlite-e2e-tests_1 | tokio::task::local::LocalSet::with::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:793:13 +sqlite-e2e-tests_1 | std::thread::local::LocalKey::try_with +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/thread/local.rs:284:16 +sqlite-e2e-tests_1 | std::thread::local::LocalKey::with +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/thread/local.rs:260:9 +sqlite-e2e-tests_1 | tokio::task::local::LocalSet::with +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:791:17 +sqlite-e2e-tests_1 | as core::future::future::Future>::poll +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:1031:9 +sqlite-e2e-tests_1 | 5: tokio::task::local::LocalSet::run_until::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:689:19 +sqlite-e2e-tests_1 | as core::future::future::Future>::poll +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/future/future.rs:123:9 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::block_on::{{closure}}::{{closure}}::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:729:57 +sqlite-e2e-tests_1 | tokio::runtime::coop::with_budget +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/coop.rs:107:5 +sqlite-e2e-tests_1 | tokio::runtime::coop::budget +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/coop.rs:73:5 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::block_on::{{closure}}::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:729:25 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::Context::enter +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:428:19 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::block_on::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:728:36 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::enter::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:807:68 +sqlite-e2e-tests_1 | tokio::runtime::context::scoped::Scoped::set +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/context/scoped.rs:40:9 +sqlite-e2e-tests_1 | tokio::runtime::context::set_scheduler::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/context.rs:180:26 +sqlite-e2e-tests_1 | std::thread::local::LocalKey::try_with +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/thread/local.rs:284:16 +sqlite-e2e-tests_1 | std::thread::local::LocalKey::with +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/thread/local.rs:260:9 +sqlite-e2e-tests_1 | tokio::runtime::context::set_scheduler +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/context.rs:180:17 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::enter +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:807:27 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CoreGuard::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:716:19 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CurrentThread::block_on::{{closure}} +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:196:28 +sqlite-e2e-tests_1 | tokio::runtime::context::runtime::enter_runtime +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/context/runtime.rs:65:16 +sqlite-e2e-tests_1 | tokio::runtime::scheduler::current_thread::CurrentThread::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/scheduler/current_thread/mod.rs:184:9 +sqlite-e2e-tests_1 | tokio::runtime::runtime::Runtime::block_on_inner +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/runtime.rs:368:47 +sqlite-e2e-tests_1 | tokio::runtime::runtime::Runtime::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/runtime/runtime.rs:342:13 +sqlite-e2e-tests_1 | tokio::task::local::LocalSet::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.43.0/src/task/local.rs:646:9 +sqlite-e2e-tests_1 | actix_rt::runtime::Runtime::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/actix-rt-2.10.0/src/runtime.rs:138:20 +sqlite-e2e-tests_1 | actix_rt::system::SystemRunner::block_on +sqlite-e2e-tests_1 | at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/actix-rt-2.10.0/src/system.rs:244:17 +sqlite-e2e-tests_1 | syncserver::main +sqlite-e2e-tests_1 | at /app/syncserver/src/main.rs:27:1 +sqlite-e2e-tests_1 | 6: core::ops::function::FnOnce::call_once +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/ops/function.rs:250:5 +sqlite-e2e-tests_1 | std::sys_common::backtrace::__rust_begin_short_backtrace +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/sys_common/backtrace.rs:155:18 +sqlite-e2e-tests_1 | 7: std::rt::lang_start::{{closure}} +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/rt.rs:166:18 +sqlite-e2e-tests_1 | 8: core::ops::function::impls:: for &F>::call_once +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/ops/function.rs:284:13 +sqlite-e2e-tests_1 | std::panicking::try::do_call +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panicking.rs:552:40 +sqlite-e2e-tests_1 | std::panicking::try +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panicking.rs:516:19 +sqlite-e2e-tests_1 | std::panic::catch_unwind +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panic.rs:146:14 +sqlite-e2e-tests_1 | std::rt::lang_start_internal::{{closure}} +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/rt.rs:148:48 +sqlite-e2e-tests_1 | std::panicking::try::do_call +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panicking.rs:552:40 +sqlite-e2e-tests_1 | std::panicking::try +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panicking.rs:516:19 +sqlite-e2e-tests_1 | std::panic::catch_unwind +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panic.rs:146:14 +sqlite-e2e-tests_1 | std::rt::lang_start_internal +sqlite-e2e-tests_1 | at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/rt.rs:148:20 +sqlite-e2e-tests_1 | 9: main +sqlite-e2e-tests_1 | 10: __libc_start_main +sqlite-e2e-tests_1 | at ./csu/../csu/libc-start.c:308:16 +sqlite-e2e-tests_1 | 11: _start +sqlite-e2e-tests_1 | , status: 500 } +sqlite-e2e-tests_1 | note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace +Stopping project_sqlite-e2e-tests_1 ... + Stopping project_sqlite-e2e-tests_1 ... done  Stopping project_syncserver_1 ... done  Stopping project_mock-fxa-server_1 ... done  diff --git a/syncserver-db-common/Cargo.toml b/syncserver-db-common/Cargo.toml index 499aa274d5..813522e732 100644 --- a/syncserver-db-common/Cargo.toml +++ b/syncserver-db-common/Cargo.toml @@ -12,6 +12,11 @@ futures.workspace = true http.workspace = true thiserror.workspace = true -diesel = { workspace = true, features = ["mysql", "r2d2"] } -diesel_migrations = { workspace = true, features = ["mysql"] } +diesel = { workspace = true, features = ["mysql", "sqlite","r2d2"] } +diesel_migrations = { workspace = true, features = ["mysql", "sqlite"] } syncserver-common = { path = "../syncserver-common" } + +[features] +sql = [] +mysql = ["sql"] +sqlite = ["sql"] diff --git a/syncserver-db-common/src/lib.rs b/syncserver-db-common/src/lib.rs index 5e2273760a..d3259bfea5 100644 --- a/syncserver-db-common/src/lib.rs +++ b/syncserver-db-common/src/lib.rs @@ -1,3 +1,4 @@ +#[cfg(feature = "sql")] pub mod error; pub mod test; diff --git a/syncserver-db-common/src/test.rs b/syncserver-db-common/src/test.rs index 351888f3fb..6db3dd0376 100644 --- a/syncserver-db-common/src/test.rs +++ b/syncserver-db-common/src/test.rs @@ -1,14 +1,26 @@ use diesel::{ - mysql::MysqlConnection, r2d2::{CustomizeConnection, Error as PoolError}, Connection, }; +#[cfg(feature = "mysql")] +use diesel::mysql::MysqlConnection; +#[cfg(feature = "sqlite")] +use diesel::sqlite::SqliteConnection; + #[derive(Debug)] pub struct TestTransactionCustomizer; +#[cfg(feature = "mysql")] impl CustomizeConnection for TestTransactionCustomizer { fn on_acquire(&self, conn: &mut MysqlConnection) -> Result<(), PoolError> { conn.begin_test_transaction().map_err(PoolError::QueryError) } } + +#[cfg(feature = "sqlite")] +impl CustomizeConnection for TestTransactionCustomizer { + fn on_acquire(&self, conn: &mut SqliteConnection) -> Result<(), PoolError> { + conn.begin_test_transaction().map_err(PoolError::QueryError) + } +} diff --git a/syncserver/Cargo.toml b/syncserver/Cargo.toml index 7c26ac3f98..43d28d0373 100644 --- a/syncserver/Cargo.toml +++ b/syncserver/Cargo.toml @@ -58,9 +58,11 @@ validator_derive = "0.19" woothee = "0.13" [features] -default = ["mysql", "py_verifier"] +default = ["py_verifier"] no_auth = [] py_verifier = ["tokenserver-auth/py"] -mysql = ["syncstorage-db/mysql"] -spanner = ["syncstorage-db/spanner"] +mysql = ["syncstorage-db/mysql", "tokenserver-db/mysql"] +# Spanner backend rely on MySQL for token server database +spanner = ["syncstorage-db/spanner", "tokenserver-db/mysql"] +sqlite = ["syncstorage-db/sqlite", "tokenserver-db/sqlite"] actix-compress = ["actix-web/compress-brotli", "actix-web/compress-gzip", "actix-web/compress-zstd"] diff --git a/syncserver/src/db/mod.rs b/syncserver/src/db/mod.rs index a7d89e69bf..b0f18f6631 100644 --- a/syncserver/src/db/mod.rs +++ b/syncserver/src/db/mod.rs @@ -1,8 +1,12 @@ //! Generic db abstration. pub mod mock; +#[cfg(feature = "mysql")] pub mod mysql; +#[cfg(feature = "spanner")] pub mod spanner; +#[cfg(feature = "sqlite")] +pub mod sqlite; #[cfg(test)] mod tests; pub mod transaction; @@ -27,14 +31,22 @@ pub async fn pool_from_settings( let url = Url::parse(&settings.database_url).map_err(|e| DbErrorKind::InvalidUrl(e.to_string()))?; Ok(match url.scheme() { + #[cfg(feature = "mysql")] "mysql" => Box::new(mysql::pool::MysqlDbPool::new( settings, metrics, blocking_threadpool, )?), + #[cfg(feature = "spanner")] "spanner" => Box::new( spanner::pool::SpannerDbPool::new(settings, metrics, blocking_threadpool).await?, ), + #[cfg(feature = "sqlite")] + "sqlite" => Box::new(sqlite::pool::SqliteDbPool::new( + settings, + metrics, + blocking_threadpool, + )?), _ => Err(DbErrorKind::InvalidUrl(settings.database_url.to_owned()))?, }) } diff --git a/syncserver/src/server/test.rs b/syncserver/src/server/test.rs index 67038841d5..1c96ed4d63 100644 --- a/syncserver/src/server/test.rs +++ b/syncserver/src/server/test.rs @@ -62,15 +62,33 @@ fn get_test_settings() -> Settings { .as_str(), ) .expect("Could not get pool_size in get_test_settings"); - if cfg!(feature = "mysql") && settings.syncstorage.uses_spanner() { + if cfg!(feature = "mysql") + && !&settings + .syncstorage + .database_url + .as_str() + .starts_with("mysql://") + { panic!( - "Spanner database_url specified for MySQL feature, please correct.\n\t{}", + "Spanner or SQLite database_url specified for MySQL feature, please correct.\n\t{}", &settings.syncstorage.database_url ) } if cfg!(feature = "spanner") && !&settings.syncstorage.uses_spanner() { panic!( - "MySQL database_url specified for Spanner feature, please correct.\n\t{}", + "MySQL or SQLite database_url specified for Spanner feature, please correct.\n\t{}", + &settings.syncstorage.database_url + ) + } + if cfg!(feature = "sqlite") + && !&settings + .syncstorage + .database_url + .as_str() + .starts_with("sqlite://") + { + panic!( + "Spanner or MySQL database_url specified for SQLite feature, please correct.\n\t{}", &settings.syncstorage.database_url ) } diff --git a/syncstorage-db-common/Cargo.toml b/syncstorage-db-common/Cargo.toml index 25c40ac4dd..dc59e1eade 100644 --- a/syncstorage-db-common/Cargo.toml +++ b/syncstorage-db-common/Cargo.toml @@ -16,7 +16,6 @@ serde_json.workspace = true thiserror.workspace = true async-trait = "0.1.40" -# diesel = 1.4 diesel = { workspace = true, features = ["mysql", "r2d2"] } syncserver-common = { path = "../syncserver-common" } -syncserver-db-common = { path = "../syncserver-db-common" } +syncserver-db-common = { path = "../syncserver-db-common", features = ["sql"] } diff --git a/syncstorage-db/Cargo.toml b/syncstorage-db/Cargo.toml index eb183355e9..a9ce51a3a7 100644 --- a/syncstorage-db/Cargo.toml +++ b/syncstorage-db/Cargo.toml @@ -18,14 +18,16 @@ log = { version = "0.4", features = [ "release_max_level_info", ] } syncserver-common = { path = "../syncserver-common" } -syncserver-db-common = { path = "../syncserver-db-common" } +syncserver-db-common = { path = "../syncserver-db-common", features = ["sql"] } syncserver-settings = { path = "../syncserver-settings" } syncstorage-db-common = { path = "../syncstorage-db-common" } syncstorage-mysql = { path = "../syncstorage-mysql", optional = true } -syncstorage-settings = { path = "../syncstorage-settings" } +syncstorage-sqlite = { path = "../syncstorage-sqlite/", optional = true} syncstorage-spanner = { path = "../syncstorage-spanner", optional = true } +syncstorage-settings = { path = "../syncstorage-settings" } tokio = { workspace = true, features = ["macros", "sync"] } [features] mysql = ['syncstorage-mysql'] spanner = ['syncstorage-spanner'] +sqlite = ['syncstorage-sqlite'] diff --git a/syncstorage-db/src/lib.rs b/syncstorage-db/src/lib.rs index cfd0ee8fea..763557833d 100644 --- a/syncstorage-db/src/lib.rs +++ b/syncstorage-db/src/lib.rs @@ -15,6 +15,13 @@ pub use syncstorage_mysql::DbError; #[cfg(feature = "mysql")] pub type DbImpl = syncstorage_mysql::MysqlDb; +#[cfg(feature = "sqlite")] +pub type DbPoolImpl = syncstorage_sqlite::SqliteDbPool; +#[cfg(feature = "sqlite")] +pub use syncstorage_sqlite::DbError; +#[cfg(feature = "sqlite")] +pub type DbImpl = syncstorage_sqlite::SqliteDb; + #[cfg(feature = "spanner")] pub type DbPoolImpl = syncstorage_spanner::SpannerDbPool; #[cfg(feature = "spanner")] @@ -31,8 +38,14 @@ pub use syncstorage_db_common::{ Db, DbPool, Sorting, UserIdentifier, }; -#[cfg(all(feature = "mysql", feature = "spanner"))] -compile_error!("only one of the \"mysql\" and \"spanner\" features can be enabled at a time"); - -#[cfg(not(any(feature = "mysql", feature = "spanner")))] -compile_error!("exactly one of the \"mysql\" and \"spanner\" features must be enabled"); +#[cfg(any( + all(feature = "mysql", feature = "spanner"), + all(feature = "mysql", feature = "sqlite"), + all(feature = "spanner", feature = "sqlite") +))] +compile_error!( + "only one of the \"mysql\", \"spanner\" and \"sqlite\" features can be enabled at a time" +); + +#[cfg(not(any(feature = "mysql", feature = "spanner", feature = "sqlite")))] +compile_error!("exactly one of the \"mysql\", \"spanner\" and \"sqlite\" features must be enabled"); diff --git a/syncstorage-mysql/Cargo.toml b/syncstorage-mysql/Cargo.toml index 7d2a7c3421..659f612d9b 100644 --- a/syncstorage-mysql/Cargo.toml +++ b/syncstorage-mysql/Cargo.toml @@ -14,14 +14,14 @@ slog-scope.workspace = true thiserror.workspace = true async-trait = "0.1.40" -# There appears to be a compilation error with diesel diesel = { workspace = true, features = ["mysql", "r2d2"] } diesel_logger = { workspace = true } diesel_migrations = { workspace = true, features = ["mysql"] } syncserver-common = { path = "../syncserver-common" } -syncserver-db-common = { path = "../syncserver-db-common" } +syncserver-db-common = { path = "../syncserver-db-common", features = ["mysql"] } syncstorage-db-common = { path = "../syncstorage-db-common" } syncstorage-settings = { path = "../syncstorage-settings" } +syncstorage-sql-db-common = { path = "../syncstorage-sql-db-common" } url = "2.1" [dev-dependencies] diff --git a/syncstorage-mysql/src/batch.rs b/syncstorage-mysql/src/batch.rs index 1e487440b5..d3fa31af36 100644 --- a/syncstorage-mysql/src/batch.rs +++ b/syncstorage-mysql/src/batch.rs @@ -11,9 +11,9 @@ use diesel::{ ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, }; use syncstorage_db_common::{params, results, UserIdentifier, BATCH_LIFETIME}; +use syncstorage_sql_db_common::error::DbError; use super::{ - error::DbError, models::MysqlDb, schema::{batch_upload_items, batch_uploads}, DbResult, diff --git a/syncstorage-mysql/src/lib.rs b/syncstorage-mysql/src/lib.rs index ea1541b5f2..d0d33f2032 100644 --- a/syncstorage-mysql/src/lib.rs +++ b/syncstorage-mysql/src/lib.rs @@ -9,15 +9,14 @@ extern crate slog_scope; #[macro_use] mod batch; mod diesel_ext; -mod error; mod models; mod pool; mod schema; #[cfg(test)] mod test; -pub use error::DbError; pub use models::MysqlDb; pub use pool::MysqlDbPool; +pub use syncstorage_sql_db_common::error::DbError; -pub(crate) type DbResult = Result; +pub(crate) type DbResult = Result; diff --git a/syncstorage-mysql/src/models.rs b/syncstorage-mysql/src/models.rs index 287692bfb3..64c251bc8c 100644 --- a/syncstorage-mysql/src/models.rs +++ b/syncstorage-mysql/src/models.rs @@ -22,10 +22,12 @@ use syncstorage_db_common::{ DEFAULT_BSO_TTL, }; use syncstorage_settings::{Quota, DEFAULT_MAX_TOTAL_RECORDS}; +use syncstorage_sql_db_common::error::DbError; use super::{ batch, error::DbError, + diesel_ext::LockInShareModeDsl, pool::CollectionCache, schema::{bso, collections, user_collections}, DbResult, diff --git a/syncstorage-mysql/src/pool.rs b/syncstorage-mysql/src/pool.rs index ea6030b9b5..088f2c90c4 100644 --- a/syncstorage-mysql/src/pool.rs +++ b/syncstorage-mysql/src/pool.rs @@ -20,8 +20,9 @@ use syncserver_db_common::test::TestTransactionCustomizer; use syncserver_db_common::{GetPoolState, PoolState}; use syncstorage_db_common::{Db, DbPool, STD_COLLS}; use syncstorage_settings::{Quota, Settings}; +use syncstorage_sql_db_common::error::DbError; -use super::{error::DbError, models::MysqlDb, DbResult}; +use super::{models::MysqlDb, DbResult}; embed_migrations!(); diff --git a/syncstorage-sql-db-common/Cargo.toml b/syncstorage-sql-db-common/Cargo.toml new file mode 100644 index 0000000000..c15b81c722 --- /dev/null +++ b/syncstorage-sql-db-common/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "syncstorage-sql-db-common" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] +backtrace.workspace=true +base64.workspace=true +futures.workspace=true +http.workspace=true +slog-scope.workspace=true + +async-trait = "0.1.40" +diesel = { version = "1.4", features = ["sqlite", "r2d2"] } +diesel_logger = "0.1.1" +diesel_migrations = { version = "1.4.0", features = ["sqlite"] } +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common" } +syncstorage-db-common = { path = "../syncstorage-db-common" } +syncstorage-settings = { path = "../syncstorage-settings" } +thiserror = "1.0.26" +url = "2.1" + +[dev-dependencies] +env_logger.workspace=true +syncserver-settings = { path = "../syncserver-settings" } diff --git a/syncstorage-mysql/src/error.rs b/syncstorage-sql-db-common/src/error.rs similarity index 89% rename from syncstorage-mysql/src/error.rs rename to syncstorage-sql-db-common/src/error.rs index 7ab200a439..96027efadc 100644 --- a/syncstorage-mysql/src/error.rs +++ b/syncstorage-sql-db-common/src/error.rs @@ -49,7 +49,7 @@ enum DbErrorKind { Common(SyncstorageDbError), #[error("{}", _0)] - Mysql(SqlError), + Sql(SqlError), } impl From for DbError { @@ -95,35 +95,35 @@ impl ReportableError for DbError { fn reportable_source(&self) -> Option<&(dyn ReportableError + 'static)> { Some(match &self.kind { DbErrorKind::Common(e) => e, - DbErrorKind::Mysql(e) => e, + DbErrorKind::Sql(e) => e, }) } fn is_sentry_event(&self) -> bool { match &self.kind { DbErrorKind::Common(e) => e.is_sentry_event(), - DbErrorKind::Mysql(e) => e.is_sentry_event(), + DbErrorKind::Sql(e) => e.is_sentry_event(), } } fn metric_label(&self) -> Option<&str> { match &self.kind { DbErrorKind::Common(e) => e.metric_label(), - DbErrorKind::Mysql(e) => e.metric_label(), + DbErrorKind::Sql(e) => e.metric_label(), } } fn backtrace(&self) -> Option<&Backtrace> { match &self.kind { DbErrorKind::Common(e) => e.backtrace(), - DbErrorKind::Mysql(e) => e.backtrace(), + DbErrorKind::Sql(e) => e.backtrace(), } } fn tags(&self) -> Vec<(&str, String)> { match &self.kind { DbErrorKind::Common(e) => e.tags(), - DbErrorKind::Mysql(e) => e.tags(), + DbErrorKind::Sql(e) => e.tags(), } } } @@ -140,24 +140,22 @@ from_error!(SyncstorageDbError, DbError, DbErrorKind::Common); from_error!( diesel::result::Error, DbError, - |error: diesel::result::Error| DbError::from(DbErrorKind::Mysql(SqlError::from(error))) + |error: diesel::result::Error| DbError::from(DbErrorKind::Sql(SqlError::from(error))) ); from_error!( diesel::result::ConnectionError, DbError, - |error: diesel::result::ConnectionError| DbError::from(DbErrorKind::Mysql(SqlError::from( - error - ))) + |error: diesel::result::ConnectionError| DbError::from(DbErrorKind::Sql(SqlError::from(error))) ); from_error!( diesel::r2d2::PoolError, DbError, - |error: diesel::r2d2::PoolError| DbError::from(DbErrorKind::Mysql(SqlError::from(error))) + |error: diesel::r2d2::PoolError| DbError::from(DbErrorKind::Sql(SqlError::from(error))) ); from_error!( diesel_migrations::RunMigrationsError, DbError, - |error: diesel_migrations::RunMigrationsError| DbError::from(DbErrorKind::Mysql( - SqlError::from(error) - )) + |error: diesel_migrations::RunMigrationsError| DbError::from(DbErrorKind::Sql(SqlError::from( + error + ))) ); diff --git a/syncstorage-sql-db-common/src/lib.rs b/syncstorage-sql-db-common/src/lib.rs new file mode 100644 index 0000000000..a91e735174 --- /dev/null +++ b/syncstorage-sql-db-common/src/lib.rs @@ -0,0 +1 @@ +pub mod error; diff --git a/syncstorage-sqlite/Cargo.toml b/syncstorage-sqlite/Cargo.toml new file mode 100644 index 0000000000..22a9eccfb6 --- /dev/null +++ b/syncstorage-sqlite/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "syncstorage-sqlite" +version.workspace=true +license.workspace=true +authors.workspace=true +edition.workspace=true + +[dependencies] +backtrace.workspace=true +base64.workspace=true +futures.workspace=true +http.workspace=true +slog-scope.workspace=true +thiserror.workspace = true +diesel_logger.workspace = true + +async-trait = "0.1.40" +diesel = { workspace = true, features = ["sqlite", "r2d2"] } +diesel_migrations = { workspace = true, features = ["sqlite"] } +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common", features = ["sqlite"] } +syncstorage-db-common = { path = "../syncstorage-db-common" } +syncstorage-settings = { path = "../syncstorage-settings" } +syncstorage-sql-db-common = { path = "../syncstorage-sql-db-common" } +url = "2.1" + +[dev-dependencies] +env_logger.workspace=true +syncserver-settings = { path = "../syncserver-settings" } diff --git a/syncstorage-sqlite/migrations/2024-01-19-131212_Init/down.sql b/syncstorage-sqlite/migrations/2024-01-19-131212_Init/down.sql new file mode 100644 index 0000000000..63a99dd2eb --- /dev/null +++ b/syncstorage-sqlite/migrations/2024-01-19-131212_Init/down.sql @@ -0,0 +1,8 @@ +-- DROP INDEX IF EXISTS `bso_expiry_idx`; +-- DROP INDEX IF EXISTS `bso_usr_col_mod_idx`; + +-- DROP TABLE IF EXISTS `bso`; +-- DROP TABLE IF EXISTS `collections`; +-- DROP TABLE IF EXISTS `user_collections`; +-- DROP TABLE IF EXISTS `batch_uploads`; +-- DROP TABLE IF EXISTS `batch_upload_items`; diff --git a/syncstorage-sqlite/migrations/2024-01-19-131212_Init/up.sql b/syncstorage-sqlite/migrations/2024-01-19-131212_Init/up.sql new file mode 100644 index 0000000000..2fd4aeb2ca --- /dev/null +++ b/syncstorage-sqlite/migrations/2024-01-19-131212_Init/up.sql @@ -0,0 +1,79 @@ +-- XXX: bsov1, etc +-- We use Bigint for some fields instead of Integer, even though Sqlite does not have the concept of Bigint, +-- to allow diesel to assume that integer can be mapped to i64. See https://github.com/diesel-rs/diesel/issues/852 + + +CREATE TABLE IF NOT EXISTS `bso` +( + `userid` BIGINT NOT NULL, + `collection` INTEGER NOT NULL, + `id` TEXT NOT NULL, + + `sortindex` INTEGER, + + `payload` TEXT NOT NULL, + `payload_size` BIGINT DEFAULT 0, + + -- last modified time in milliseconds since epoch + `modified` BIGINT NOT NULL, + -- expiration in milliseconds since epoch + `ttl` BIGINT DEFAULT '3153600000000' NOT NULL, + + PRIMARY KEY (`userid`, `collection`, `id`) +); +CREATE INDEX IF NOT EXISTS `bso_expiry_idx` ON `bso` (`ttl`); +CREATE INDEX IF NOT EXISTS `bso_usr_col_mod_idx` ON `bso` (`userid`, `collection`, `modified`); + +CREATE TABLE IF NOT EXISTS `collections` +( + `id` INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + `name` TEXT UNIQUE NOT NULL +); +INSERT INTO collections (id, name) +VALUES (1, 'clients'), + (2, 'crypto'), + (3, 'forms'), + (4, 'history'), + (5, 'keys'), + (6, 'meta'), + (7, 'bookmarks'), + (8, 'prefs'), + (9, 'tabs'), + (10, 'passwords'), + (11, 'addons'), + (12, 'addresses'), + (13, 'creditcards'), + -- Reserve space for additions to the standard collections + (100, ''); + + +CREATE TABLE IF NOT EXISTS `user_collections` +( + `userid` BIGINT NOT NULL, + `collection` INTEGER NOT NULL, + -- last modified time in milliseconds since epoch + `last_modified` BIGINT NOT NULL, + `total_bytes` BIGINT, + `count` INTEGER, + PRIMARY KEY (`userid`, `collection`) +); + +CREATE TABLE IF NOT EXISTS `batch_uploads` +( + `batch` BIGINT NOT NULL, + `userid` BIGINT NOT NULL, + `collection` INTEGER NOT NULL, + PRIMARY KEY (`batch`, `userid`) +); + +CREATE TABLE IF NOT EXISTS `batch_upload_items` +( + `batch` BIGINT NOT NULL, + `userid` BIGINT NOT NULL, + `id` TEXT NOT NULL, + `sortindex` INTEGER DEFAULT NULL, + `payload` TEXT, + `payload_size` BIGINT DEFAULT NULL, + `ttl_offset` INTEGER DEFAULT NULL, + PRIMARY KEY (`batch`, `userid`, `id`) +); diff --git a/syncstorage-sqlite/src/batch.rs b/syncstorage-sqlite/src/batch.rs new file mode 100644 index 0000000000..d452b78222 --- /dev/null +++ b/syncstorage-sqlite/src/batch.rs @@ -0,0 +1,278 @@ +use base64::Engine; +use std::collections::HashSet; + +use diesel::{ + self, + dsl::sql, + insert_into, + result::{DatabaseErrorKind::UniqueViolation, Error as DieselError}, + sql_query, + sql_types::{BigInt, Integer}, + ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, +}; +use syncstorage_db_common::{params, results, UserIdentifier, BATCH_LIFETIME}; +use syncstorage_sql_db_common::error::DbError; + +use super::{ + models::SqliteDb, + schema::{batch_upload_items, batch_uploads}, + DbResult, +}; + +const MAXTTL: i32 = 2_100_000_000; + +pub fn create(db: &SqliteDb, params: params::CreateBatch) -> DbResult { + let user_id = params.user_id.legacy_id as i64; + let collection_id = db.get_collection_id(¶ms.collection)?; + // Careful, there's some weirdness here! + // + // Sync timestamps are in seconds and quantized to two decimal places, so + // when we convert one to a bigint in milliseconds, the final digit is + // always zero. But we want to use the lower digits of the batchid for + // sharding writes via (batchid % num_tables), and leaving it as zero would + // skew the sharding distribution. + // + // So we mix in the lowest digit of the uid to improve the distribution + // while still letting us treat these ids as millisecond timestamps. It's + // yuck, but it works and it keeps the weirdness contained to this single + // line of code. + let batch_id = db.timestamp().as_i64() + (user_id % 10); + insert_into(batch_uploads::table) + .values(( + batch_uploads::batch_id.eq(&batch_id), + batch_uploads::user_id.eq(&user_id), + batch_uploads::collection_id.eq(&collection_id), + )) + .execute(&db.conn) + .map_err(|e| -> DbError { + match e { + // The user tried to create two batches with the same timestamp + DieselError::DatabaseError(UniqueViolation, _) => DbError::conflict(), + _ => e.into(), + } + })?; + + do_append(db, batch_id, params.user_id, collection_id, params.bsos)?; + Ok(results::CreateBatch { + id: encode_id(batch_id), + size: None, + }) +} + +pub fn validate(db: &SqliteDb, params: params::ValidateBatch) -> DbResult { + let batch_id = decode_id(¶ms.id)?; + // Avoid hitting the db for batches that are obviously too old. Recall + // that the batchid is a millisecond timestamp. + if (batch_id + BATCH_LIFETIME) < db.timestamp().as_i64() { + return Ok(false); + } + + let user_id = params.user_id.legacy_id as i64; + let collection_id = db.get_collection_id(¶ms.collection)?; + let exists = batch_uploads::table + .select(sql::("1")) + .filter(batch_uploads::batch_id.eq(&batch_id)) + .filter(batch_uploads::user_id.eq(&user_id)) + .filter(batch_uploads::collection_id.eq(&collection_id)) + .get_result::(&db.conn) + .optional()?; + Ok(exists.is_some()) +} + +pub fn append(db: &SqliteDb, params: params::AppendToBatch) -> DbResult<()> { + let exists = validate( + db, + params::ValidateBatch { + user_id: params.user_id.clone(), + collection: params.collection.clone(), + id: params.batch.id.clone(), + }, + )?; + + if !exists { + return Err(DbError::batch_not_found()); + } + + let batch_id = decode_id(¶ms.batch.id)?; + let collection_id = db.get_collection_id(¶ms.collection)?; + do_append(db, batch_id, params.user_id, collection_id, params.bsos)?; + Ok(()) +} + +pub fn get(db: &SqliteDb, params: params::GetBatch) -> DbResult> { + let is_valid = validate( + db, + params::ValidateBatch { + user_id: params.user_id, + collection: params.collection, + id: params.id.clone(), + }, + )?; + let batch = if is_valid { + Some(results::GetBatch { id: params.id }) + } else { + None + }; + Ok(batch) +} + +pub fn delete(db: &SqliteDb, params: params::DeleteBatch) -> DbResult<()> { + let batch_id = decode_id(¶ms.id)?; + let user_id = params.user_id.legacy_id as i64; + let collection_id = db.get_collection_id(¶ms.collection)?; + diesel::delete(batch_uploads::table) + .filter(batch_uploads::batch_id.eq(&batch_id)) + .filter(batch_uploads::user_id.eq(&user_id)) + .filter(batch_uploads::collection_id.eq(&collection_id)) + .execute(&db.conn)?; + diesel::delete(batch_upload_items::table) + .filter(batch_upload_items::batch_id.eq(&batch_id)) + .filter(batch_upload_items::user_id.eq(&user_id)) + .execute(&db.conn)?; + Ok(()) +} + +/// Commits a batch to the bsos table, deleting the batch when succesful +pub fn commit(db: &SqliteDb, params: params::CommitBatch) -> DbResult { + let batch_id = decode_id(¶ms.batch.id)?; + let user_id = params.user_id.legacy_id as i64; + let collection_id = db.get_collection_id(¶ms.collection)?; + let timestamp = db.timestamp(); + sql_query(include_str!("batch_commit.sql")) + .bind::(user_id) + .bind::(&collection_id) + .bind::(&db.timestamp().as_i64()) + .bind::(&db.timestamp().as_i64()) + .bind::((MAXTTL as i64) * 1000) // XXX: + .bind::(&batch_id) + .bind::(user_id) + .bind::(&db.timestamp().as_i64()) + .execute(&db.conn)?; + + db.update_collection(user_id as u32, collection_id)?; + + delete( + db, + params::DeleteBatch { + user_id: params.user_id, + collection: params.collection, + id: params.batch.id, + }, + )?; + Ok(timestamp) +} + +pub fn do_append( + db: &SqliteDb, + batch_id: i64, + user_id: UserIdentifier, + _collection_id: i32, + bsos: Vec, +) -> DbResult<()> { + fn exist_idx(user_id: u64, batch_id: i64, bso_id: &str) -> String { + // Construct something that matches the key for batch_upload_items + format!( + "{batch_id}-{user_id}-{bso_id}", + batch_id = batch_id, + user_id = user_id, + bso_id = bso_id, + ) + } + + // It's possible for the list of items to contain a duplicate key entry. + // This means that we can't really call `ON CONFLICT` here, because that's + // more about inserting one item at a time. (e.g. it works great if the + // values contain a key that's already in the database, less so if the + // the duplicate is in the value set we're inserting. + #[derive(Debug, QueryableByName)] + #[table_name = "batch_upload_items"] + struct ExistsResult { + batch_id: i64, + id: String, + } + + #[derive(AsChangeset)] + #[table_name = "batch_upload_items"] + struct UpdateBatches { + payload: Option, + payload_size: Option, + ttl_offset: Option, + } + + let mut existing = HashSet::new(); + + // pre-load the "existing" hashset with any batched uploads that are already in the table. + for item in sql_query( + "SELECT userid as user_id, batch as batch_id, id FROM batch_upload_items WHERE userid=? AND batch=?;", + ) + .bind::(user_id.legacy_id as i64) + .bind::(batch_id) + .get_results::(&db.conn)? + { + existing.insert(exist_idx( + user_id.legacy_id, + item.batch_id, + &item.id.to_string(), + )); + } + + for bso in bsos { + let payload_size = bso.payload.as_ref().map(|p| p.len() as i64); + let exist_idx = exist_idx(user_id.legacy_id, batch_id, &bso.id); + + if existing.contains(&exist_idx) { + diesel::update( + batch_upload_items::table + .filter(batch_upload_items::user_id.eq(user_id.legacy_id as i64)) + .filter(batch_upload_items::batch_id.eq(batch_id)), + ) + .set(&UpdateBatches { + payload: bso.payload, + payload_size, + ttl_offset: bso.ttl.map(|ttl| ttl as i32), + }) + .execute(&db.conn)?; + } else { + diesel::insert_into(batch_upload_items::table) + .values(( + batch_upload_items::batch_id.eq(&batch_id), + batch_upload_items::user_id.eq(user_id.legacy_id as i64), + batch_upload_items::id.eq(bso.id.clone()), + batch_upload_items::sortindex.eq(bso.sortindex), + batch_upload_items::payload.eq(bso.payload), + batch_upload_items::payload_size.eq(payload_size), + )) + .execute(&db.conn)?; + // make sure to include the key into our table check. + existing.insert(exist_idx); + } + } + + Ok(()) +} + +pub fn validate_batch_id(id: &str) -> DbResult<()> { + decode_id(id).map(|_| ()) +} + +fn encode_id(id: i64) -> String { + base64::engine::general_purpose::STANDARD.encode(id.to_string()) +} + +fn decode_id(id: &str) -> DbResult { + let bytes = base64::engine::general_purpose::STANDARD + .decode(id) + .unwrap_or_else(|_| id.as_bytes().to_vec()); + let decoded = std::str::from_utf8(&bytes).unwrap_or(id); + decoded + .parse::() + .map_err(|e| DbError::internal(format!("Invalid batch_id: {}", e))) +} + +macro_rules! batch_db_method { + ($name:ident, $batch_name:ident, $type:ident) => { + pub fn $name(&self, params: params::$type) -> DbResult { + batch::$batch_name(self, params) + } + }; +} diff --git a/syncstorage-sqlite/src/batch_commit.sql b/syncstorage-sqlite/src/batch_commit.sql new file mode 100644 index 0000000000..63728e5f8a --- /dev/null +++ b/syncstorage-sqlite/src/batch_commit.sql @@ -0,0 +1,19 @@ +INSERT INTO bso (userid, collection, id, modified, sortindex, ttl, payload, payload_size) +SELECT + ?, + ?, + id, + ?, + sortindex, + COALESCE((ttl_offset * 1000) + ?, ?) as ttl, + COALESCE(payload, '') as payload, + COALESCE(payload_size, 0) as payload_size + FROM batch_upload_items + WHERE batch = ? + AND userid = ? + ON CONFLICT(userid, collection, id) DO UPDATE SET + modified = ?, + sortindex = COALESCE(excluded.sortindex, bso.sortindex), + ttl = COALESCE(excluded.ttl, bso.ttl), + payload = COALESCE(NULLIF(excluded.payload, ''), bso.payload), + payload_size = COALESCE(excluded.payload_size, bso.payload_size) diff --git a/syncstorage-sqlite/src/diesel_ext.rs b/syncstorage-sqlite/src/diesel_ext.rs new file mode 100644 index 0000000000..153a4cfd5f --- /dev/null +++ b/syncstorage-sqlite/src/diesel_ext.rs @@ -0,0 +1,50 @@ +use core::fmt; + +use diesel::{ + backend::Backend, + insertable::CanInsertInSingleQuery, + query_builder::{AstPass, InsertStatement, QueryFragment, QueryId}, + result::QueryResult, + sqlite::Sqlite, + Expression, RunQueryDsl, Table, +}; + +#[derive(Debug, Clone, Copy, QueryId)] +pub struct LockInShareMode; + +impl QueryFragment for LockInShareMode { + fn walk_ast(&self, mut out: AstPass<'_, Sqlite>) -> QueryResult<()> { + out.push_sql(" LOCK IN SHARE MODE"); + Ok(()) + } +} + +#[derive(Debug, Clone)] +pub struct OnDuplicateKeyUpdate(Box>, X); + +impl QueryFragment for OnDuplicateKeyUpdate +where + DB: Backend, + T: Table, + T::FromClause: QueryFragment, + U: QueryFragment + CanInsertInSingleQuery, + Op: QueryFragment, + Ret: QueryFragment, + X: Expression + fmt::Debug, +{ + fn walk_ast(&self, mut out: AstPass<'_, DB>) -> QueryResult<()> { + self.0.walk_ast(out.reborrow())?; + out.push_sql(" ON CONFLICT({user_id}, {collection_id}) DO UPDATE SET "); + //self.1.walk_ast(out.reborrow())?; + debug!("{:?}", self.1); + Ok(()) + } +} + +impl RunQueryDsl for OnDuplicateKeyUpdate {} + +impl QueryId for OnDuplicateKeyUpdate { + type QueryId = (); + + const HAS_STATIC_QUERY_ID: bool = false; +} diff --git a/syncstorage-sqlite/src/lib.rs b/syncstorage-sqlite/src/lib.rs new file mode 100644 index 0000000000..43be80eabb --- /dev/null +++ b/syncstorage-sqlite/src/lib.rs @@ -0,0 +1,22 @@ +#[macro_use] +extern crate diesel; +#[macro_use] +extern crate diesel_migrations; +#[macro_use] +extern crate slog_scope; + +#[macro_use] +mod batch; +mod diesel_ext; +mod models; +mod pool; +mod schema; +#[cfg(test)] +mod test; +mod wal; + +pub use models::SqliteDb; +pub use pool::SqliteDbPool; +pub use syncstorage_sql_db_common::error::DbError; + +pub(crate) type DbResult = Result; diff --git a/syncstorage-sqlite/src/models.rs b/syncstorage-sqlite/src/models.rs new file mode 100644 index 0000000000..97609ad8de --- /dev/null +++ b/syncstorage-sqlite/src/models.rs @@ -0,0 +1,1147 @@ +use futures::future::TryFutureExt; + +use std::{self, cell::RefCell, collections::HashMap, fmt, ops::Deref, sync::Arc}; + +use diesel::{ + connection::TransactionManager, + delete, + dsl::max, + expression::sql_literal::sql, + r2d2::{ConnectionManager, PooledConnection}, + sql_query, + sql_types::{BigInt, Integer, Nullable, Text}, + sqlite::SqliteConnection, + Connection, ExpressionMethods, GroupByDsl, OptionalExtension, QueryDsl, RunQueryDsl, +}; +#[cfg(debug_assertions)] +use diesel_logger::LoggingConnection; +use syncserver_common::{BlockingThreadpool, Metrics}; +use syncserver_db_common::{sync_db_method, DbFuture}; +use syncstorage_db_common::{ + error::DbErrorIntrospect, params, results, util::SyncTimestamp, Db, Sorting, UserIdentifier, + DEFAULT_BSO_TTL, +}; +use syncstorage_settings::{Quota, DEFAULT_MAX_TOTAL_RECORDS}; +use syncstorage_sql_db_common::error::DbError; + +use super::{ + batch, + pool::CollectionCache, + schema::{bso, collections, user_collections}, + DbResult, +}; + +type Conn = PooledConnection>; + +// this is the max number of records we will return. +static DEFAULT_LIMIT: u32 = DEFAULT_MAX_TOTAL_RECORDS; + +const TOMBSTONE: i32 = 0; +/// SQL Variable remapping +/// These names are the legacy values mapped to the new names. +const COLLECTION_ID: &str = "collection"; +const USER_ID: &str = "userid"; +const MODIFIED: &str = "modified"; +const EXPIRY: &str = "ttl"; +const LAST_MODIFIED: &str = "last_modified"; +const COUNT: &str = "count"; +const TOTAL_BYTES: &str = "total_bytes"; + +#[derive(Debug)] +enum CollectionLock { + Read, + Write, +} + +/// Per session Db metadata +#[derive(Debug, Default)] +struct SqliteDbSession { + /// The "current time" on the server used for this session's operations + timestamp: SyncTimestamp, + /// Cache of collection modified timestamps per (user_id, collection_id) + coll_modified_cache: HashMap<(u32, i32), SyncTimestamp>, + /// Currently locked collections + coll_locks: HashMap<(u32, i32), CollectionLock>, + /// Whether a transaction was started (begin() called) + in_transaction: bool, + in_write_transaction: bool, +} + +#[derive(Clone, Debug)] +pub struct SqliteDb { + /// Synchronous Diesel calls are executed in actix_web::web::block to satisfy + /// the Db trait's asynchronous interface. + /// + /// Arc provides a Clone impl utilized for safely moving to + /// the thread pool but does not provide Send as the underlying db + /// conn. structs are !Sync (Arc requires both for Send). See the Send impl + /// below. + pub(super) inner: Arc, + + /// Pool level cache of collection_ids and their names + coll_cache: Arc, + + pub metrics: Metrics, + pub quota: Quota, + blocking_threadpool: Arc, +} + +/// Despite the db conn structs being !Sync (see Arc above) we +/// don't spawn multiple SqliteDb calls at a time in the thread pool. Calls are +/// queued to the thread pool via Futures, naturally serialized. +unsafe impl Send for SqliteDb {} + +pub struct SqliteDbInner { + #[cfg(not(debug_assertions))] + pub(super) conn: Conn, + #[cfg(debug_assertions)] + pub(super) conn: LoggingConnection, // display SQL when RUST_LOG="diesel_logger=trace" + + session: RefCell, +} + +impl fmt::Debug for SqliteDbInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "SqliteDbInner {{ session: {:?} }}", self.session) + } +} + +impl Deref for SqliteDb { + type Target = SqliteDbInner; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl SqliteDb { + pub(super) fn new( + conn: Conn, + coll_cache: Arc, + metrics: &Metrics, + quota: &Quota, + blocking_threadpool: Arc, + ) -> Self { + let inner = SqliteDbInner { + #[cfg(not(debug_assertions))] + conn, + #[cfg(debug_assertions)] + conn: LoggingConnection::new(conn), + session: RefCell::new(Default::default()), + }; + // https://github.com/mozilla-services/syncstorage-rs/issues/1480 + #[allow(clippy::arc_with_non_send_sync)] + SqliteDb { + inner: Arc::new(inner), + coll_cache, + metrics: metrics.clone(), + quota: *quota, + blocking_threadpool, + } + } + + /// APIs for collection-level locking + /// + /// Explicitly lock the matching row in the user_collections table. Read + /// locks do SELECT ... LOCK IN SHARE MODE and write locks do SELECT + /// ... FOR UPDATE. + /// + /// In theory it would be possible to use serializable transactions rather + /// than explicit locking, but our ops team have expressed concerns about + /// the efficiency of that approach at scale. + pub fn lock_for_read_sync(&self, params: params::LockCollection) -> DbResult<()> { + let user_id = params.user_id.legacy_id as i64; + let collection_id = self.get_collection_id(¶ms.collection).or_else(|e| { + if e.is_collection_not_found() { + // If the collection doesn't exist, we still want to start a + // transaction so it will continue to not exist. + Ok(0) + } else { + Err(e) + } + })?; + // If we already have a read or write lock then it's safe to + // use it as-is. + if self + .session + .borrow() + .coll_locks + .contains_key(&(user_id as u32, collection_id)) + { + return Ok(()); + } + + // Lock the db + self.begin(false)?; + let modified = user_collections::table + .select(user_collections::modified) + .filter(user_collections::user_id.eq(user_id)) + .filter(user_collections::collection_id.eq(collection_id)) + //.lock_in_share_mode() + .first(&self.conn) + .optional()?; + if let Some(modified) = modified { + let modified = SyncTimestamp::from_i64(modified)?; + self.session + .borrow_mut() + .coll_modified_cache + .insert((user_id as u32, collection_id), modified); // why does it still expect a u32 int? + } + // XXX: who's responsible for unlocking (removing the entry) + self.session + .borrow_mut() + .coll_locks + .insert((user_id as u32, collection_id), CollectionLock::Read); + Ok(()) + } + + pub fn lock_for_write_sync(&self, params: params::LockCollection) -> DbResult<()> { + let user_id = params.user_id.legacy_id as i64; + let collection_id = self.get_or_create_collection_id(¶ms.collection)?; + if let Some(CollectionLock::Read) = self + .session + .borrow() + .coll_locks + .get(&(user_id as u32, collection_id)) + { + return Err(DbError::internal( + "Can't escalate read-lock to write-lock".to_owned(), + )); + } + + // Lock the db + self.begin(true)?; + let modified = user_collections::table + .select(user_collections::modified) + .filter(user_collections::user_id.eq(user_id)) + .filter(user_collections::collection_id.eq(collection_id)) + // .for_update() + .first(&self.conn) + .optional()?; + if let Some(modified) = modified { + let modified = SyncTimestamp::from_i64(modified)?; + // Forbid the write if it would not properly incr the timestamp + if modified >= self.timestamp() { + return Err(DbError::conflict()); + } + self.session + .borrow_mut() + .coll_modified_cache + .insert((user_id as u32, collection_id), modified); + } + self.session + .borrow_mut() + .coll_locks + .insert((user_id as u32, collection_id), CollectionLock::Write); + Ok(()) + } + + pub(super) fn begin(&self, for_write: bool) -> DbResult<()> { + self.conn + .transaction_manager() + .begin_transaction(&self.conn)?; + self.session.borrow_mut().in_transaction = true; + if for_write { + self.session.borrow_mut().in_write_transaction = true; + } + Ok(()) + } + + pub async fn begin_async(&self, for_write: bool) -> DbResult<()> { + self.begin(for_write) + } + + pub fn commit_sync(&self) -> DbResult<()> { + if self.session.borrow().in_transaction { + self.conn + .transaction_manager() + .commit_transaction(&self.conn)?; + } + Ok(()) + } + + pub fn rollback_sync(&self) -> DbResult<()> { + if self.session.borrow().in_transaction { + self.conn + .transaction_manager() + .rollback_transaction(&self.conn)?; + } + Ok(()) + } + + fn erect_tombstone(&self, user_id: i32) -> DbResult<()> { + sql_query(format!( + r#"INSERT INTO user_collections ({user_id}, {collection_id}, {modified}) + VALUES (?, ?, ?) + ON CONFLICT({user_id}, {collection_id}) DO UPDATE SET + {modified} = excluded.{modified}"#, + user_id = USER_ID, + collection_id = COLLECTION_ID, + modified = LAST_MODIFIED + )) + .bind::(user_id as i64) + .bind::(TOMBSTONE) + .bind::(self.timestamp().as_i64()) + .execute(&self.conn)?; + Ok(()) + } + + pub fn delete_storage_sync(&self, user_id: UserIdentifier) -> DbResult<()> { + let user_id = user_id.legacy_id as i64; + // Delete user data. + delete(bso::table) + .filter(bso::user_id.eq(user_id)) + .execute(&self.conn)?; + // Delete user collections. + delete(user_collections::table) + .filter(user_collections::user_id.eq(user_id)) + .execute(&self.conn)?; + Ok(()) + } + + // Deleting the collection should result in: + // - collection does not appear in /info/collections + // - X-Last-Modified timestamp at the storage level changing + pub fn delete_collection_sync( + &self, + params: params::DeleteCollection, + ) -> DbResult { + let user_id = params.user_id.legacy_id as i64; + let collection_id = self.get_collection_id(¶ms.collection)?; + let mut count = delete(bso::table) + .filter(bso::user_id.eq(user_id)) + .filter(bso::collection_id.eq(&collection_id)) + .execute(&self.conn)?; + count += delete(user_collections::table) + .filter(user_collections::user_id.eq(user_id)) + .filter(user_collections::collection_id.eq(&collection_id)) + .execute(&self.conn)?; + if count == 0 { + return Err(DbError::collection_not_found()); + } else { + self.erect_tombstone(user_id as i32)?; + } + self.get_storage_timestamp_sync(params.user_id) + } + + pub(super) fn get_or_create_collection_id(&self, name: &str) -> DbResult { + if let Some(id) = self.coll_cache.get_id(name)? { + return Ok(id); + } + + let id = self.conn.transaction(|| { + diesel::insert_or_ignore_into(collections::table) + .values(collections::name.eq(name)) + .execute(&self.conn)?; + + collections::table + .select(collections::id) + .filter(collections::name.eq(name)) + .first(&self.conn) + })?; + + if !self.session.borrow().in_write_transaction { + self.coll_cache.put(id, name.to_owned())?; + } + + Ok(id) + } + + pub(super) fn get_collection_id(&self, name: &str) -> DbResult { + if let Some(id) = self.coll_cache.get_id(name)? { + return Ok(id); + } + + let id = sql_query( + "SELECT id + FROM collections + WHERE name = ?", + ) + .bind::(name) + .get_result::(&self.conn) + .optional()? + .ok_or_else(DbError::collection_not_found)? + .id; + if !self.session.borrow().in_write_transaction { + self.coll_cache.put(id, name.to_owned())?; + } + Ok(id) + } + + fn _get_collection_name(&self, id: i32) -> DbResult { + let name = if let Some(name) = self.coll_cache.get_name(id)? { + name + } else { + sql_query( + "SELECT name + FROM collections + WHERE id = ?", + ) + .bind::(&id) + .get_result::(&self.conn) + .optional()? + .ok_or_else(DbError::collection_not_found)? + .name + }; + Ok(name) + } + + pub fn put_bso_sync(&self, bso: params::PutBso) -> DbResult { + /* + if bso.payload.is_none() && bso.sortindex.is_none() && bso.ttl.is_none() { + // XXX: go returns an error here (ErrNothingToDo), and is treated + // as other errors + return Ok(()); + } + */ + + let collection_id = self.get_or_create_collection_id(&bso.collection)?; + let user_id: u64 = bso.user_id.legacy_id; + let timestamp = self.timestamp().as_i64(); + if self.quota.enabled { + let usage = self.get_quota_usage_sync(params::GetQuotaUsage { + user_id: bso.user_id.clone(), + collection: bso.collection.clone(), + collection_id, + })?; + if usage.total_bytes >= self.quota.size { + let mut tags = HashMap::default(); + tags.insert("collection".to_owned(), bso.collection.clone()); + self.metrics.incr_with_tags("storage.quota.at_limit", tags); + if self.quota.enforced { + return Err(DbError::quota()); + } else { + warn!("Quota at limit for user's collection ({} bytes)", usage.total_bytes; "collection"=>bso.collection.clone()); + } + } + } + + self.conn.transaction(|| { + let payload = bso.payload.as_deref().unwrap_or_default(); + let sortindex = bso.sortindex; + let ttl = bso.ttl.map_or(DEFAULT_BSO_TTL, |ttl| ttl); + let q = format!(r#" + INSERT INTO bso ({user_id}, {collection_id}, id, sortindex, payload, {modified}, {expiry}) + VALUES (?, ?, ?, ?, ?, ?, ?) + ON CONFLICT({user_id}, {collection_id}, id) DO UPDATE SET + {user_id} = excluded.{user_id}, + {collection_id} = excluded.{collection_id}, + id = excluded.id + "#, user_id=USER_ID, modified=MODIFIED, collection_id=COLLECTION_ID, expiry=EXPIRY); + let q = format!( + "{}{}", + q, + if bso.sortindex.is_some() { + ", sortindex = excluded.sortindex" + } else { + "" + }, + ); + let q = format!( + "{}{}", + q, + if bso.payload.is_some() { + ", payload = excluded.payload" + } else { + "" + }, + ); + let q = format!( + "{}{}", + q, + if bso.ttl.is_some() { + format!(", {expiry} = excluded.{expiry}", expiry=EXPIRY) + } else { + "".to_owned() + }, + ); + let q = format!( + "{}{}", + q, + if bso.payload.is_some() || bso.sortindex.is_some() { + format!(", {modified} = excluded.{modified}", modified=MODIFIED) + } else { + "".to_owned() + }, + ); + sql_query(q) + .bind::(user_id as i64) // XXX: + .bind::(&collection_id) + .bind::(&bso.id) + .bind::, _>(sortindex) + .bind::(payload) + .bind::(timestamp) + .bind::(timestamp + (i64::from(ttl) * 1000)) // remember: this is in millis + .execute(&self.conn)?; + self.update_collection(user_id as u32, collection_id) + }) + } + + pub fn get_bsos_sync(&self, params: params::GetBsos) -> DbResult { + let user_id = params.user_id.legacy_id as i64; + let collection_id = self.get_collection_id(¶ms.collection)?; + let now = self.timestamp().as_i64(); + let mut query = bso::table + .select(( + bso::id, + bso::modified, + bso::payload, + bso::sortindex, + bso::expiry, + )) + .filter(bso::user_id.eq(user_id)) + .filter(bso::collection_id.eq(collection_id)) // XXX: + .filter(bso::expiry.gt(now)) + .into_boxed(); + + if let Some(older) = params.older { + query = query.filter(bso::modified.lt(older.as_i64())); + } + if let Some(newer) = params.newer { + query = query.filter(bso::modified.gt(newer.as_i64())); + } + + if !params.ids.is_empty() { + query = query.filter(bso::id.eq_any(params.ids)); + } + + // it's possible for two BSOs to be inserted with the same `modified` date, + // since there's no guarantee of order when doing a get, pagination can return + // an error. We "fudge" a bit here by taking the id order as a secondary, since + // that is guaranteed to be unique by the client. + query = match params.sort { + // issue559: Revert to previous sorting + /* + Sorting::Index => query.order(bso::id.desc()).order(bso::sortindex.desc()), + Sorting::Newest | Sorting::None => { + query.order(bso::id.desc()).order(bso::modified.desc()) + } + Sorting::Oldest => query.order(bso::id.asc()).order(bso::modified.asc()), + */ + Sorting::Index => query.order(bso::sortindex.desc()), + Sorting::Newest => query.order((bso::modified.desc(), bso::id.desc())), + Sorting::Oldest => query.order((bso::modified.asc(), bso::id.asc())), + _ => query, + }; + + let limit = params + .limit + .map(i64::from) + .unwrap_or(DEFAULT_LIMIT as i64) + .max(0); + // fetch an extra row to detect if there are more rows that + // match the query conditions + query = query.limit(if limit > 0 { limit + 1 } else { limit }); + + let numeric_offset = params.offset.map_or(0, |offset| offset.offset as i64); + + if numeric_offset > 0 { + // XXX: copy over this optimization: + // https://github.com/mozilla-services/server-syncstorage/blob/a0f8117/syncstorage/storage/sql/__init__.py#L404 + query = query.offset(numeric_offset); + } + let mut bsos = query.load::(&self.conn)?; + + // XXX: an additional get_collection_timestamp is done here in + // python to trigger potential CollectionNotFoundErrors + //if bsos.len() == 0 { + //} + + let next_offset = if limit >= 0 && bsos.len() > limit as usize { + bsos.pop(); + Some((limit + numeric_offset).to_string()) + } else { + // if an explicit "limit=0" is sent, return the offset of "0" + // Otherwise, this would break at least the db::tests::db::get_bsos_limit_offset + // unit test. + if limit == 0 { + Some(0.to_string()) + } else { + None + } + }; + + Ok(results::GetBsos { + items: bsos, + offset: next_offset, + }) + } + + pub fn get_bso_ids_sync(&self, params: params::GetBsos) -> DbResult { + let user_id = params.user_id.legacy_id as i64; + let collection_id = self.get_collection_id(¶ms.collection)?; + let mut query = bso::table + .select(bso::id) + .filter(bso::user_id.eq(user_id)) + .filter(bso::collection_id.eq(collection_id)) // XXX: + .filter(bso::expiry.gt(self.timestamp().as_i64())) + .into_boxed(); + + if let Some(older) = params.older { + query = query.filter(bso::modified.lt(older.as_i64())); + } + if let Some(newer) = params.newer { + query = query.filter(bso::modified.gt(newer.as_i64())); + } + + if !params.ids.is_empty() { + query = query.filter(bso::id.eq_any(params.ids)); + } + + query = match params.sort { + Sorting::Index => query.order(bso::sortindex.desc()), + Sorting::Newest => query.order(bso::modified.desc()), + Sorting::Oldest => query.order(bso::modified.asc()), + _ => query, + }; + + // negative limits are no longer allowed by mysql. + let limit = params + .limit + .map(i64::from) + .unwrap_or(DEFAULT_LIMIT as i64) + .max(0); + // fetch an extra row to detect if there are more rows that + // match the query conditions. Negative limits will cause an error. + query = query.limit(if limit == 0 { limit } else { limit + 1 }); + let numeric_offset = params.offset.map_or(0, |offset| offset.offset as i64); + if numeric_offset != 0 { + // XXX: copy over this optimization: + // https://github.com/mozilla-services/server-syncstorage/blob/a0f8117/syncstorage/storage/sql/__init__.py#L404 + query = query.offset(numeric_offset); + } + let mut ids = query.load::(&self.conn)?; + + // XXX: an additional get_collection_timestamp is done here in + // python to trigger potential CollectionNotFoundErrors + //if bsos.len() == 0 { + //} + + let next_offset = if limit >= 0 && ids.len() > limit as usize { + ids.pop(); + Some((limit + numeric_offset).to_string()) + } else { + None + }; + + Ok(results::GetBsoIds { + items: ids, + offset: next_offset, + }) + } + + pub fn get_bso_sync(&self, params: params::GetBso) -> DbResult> { + let user_id = params.user_id.legacy_id as i64; + let collection_id = self.get_collection_id(¶ms.collection)?; + Ok(bso::table + .select(( + bso::id, + bso::modified, + bso::payload, + bso::sortindex, + bso::expiry, + )) + .filter(bso::user_id.eq(user_id)) + .filter(bso::collection_id.eq(&collection_id)) + .filter(bso::id.eq(¶ms.id)) + .filter(bso::expiry.ge(self.timestamp().as_i64())) + .get_result::(&self.conn) + .optional()?) + } + + pub fn delete_bso_sync(&self, params: params::DeleteBso) -> DbResult { + let user_id = params.user_id.legacy_id; + let collection_id = self.get_collection_id(¶ms.collection)?; + let affected_rows = delete(bso::table) + .filter(bso::user_id.eq(user_id as i64)) + .filter(bso::collection_id.eq(&collection_id)) + .filter(bso::id.eq(params.id)) + .filter(bso::expiry.gt(&self.timestamp().as_i64())) + .execute(&self.conn)?; + if affected_rows == 0 { + return Err(DbError::bso_not_found()); + } + self.update_collection(user_id as u32, collection_id) + } + + pub fn delete_bsos_sync(&self, params: params::DeleteBsos) -> DbResult { + let user_id = params.user_id.legacy_id as i64; + let collection_id = self.get_collection_id(¶ms.collection)?; + delete(bso::table) + .filter(bso::user_id.eq(user_id)) + .filter(bso::collection_id.eq(&collection_id)) + .filter(bso::id.eq_any(params.ids)) + .execute(&self.conn)?; + self.update_collection(user_id as u32, collection_id) + } + + pub fn post_bsos_sync(&self, input: params::PostBsos) -> DbResult { + let collection_id = self.get_or_create_collection_id(&input.collection)?; + let mut result = results::PostBsos { + modified: self.timestamp(), + success: Default::default(), + failed: input.failed, + }; + + for pbso in input.bsos { + let id = pbso.id; + let put_result = self.put_bso_sync(params::PutBso { + user_id: input.user_id.clone(), + collection: input.collection.clone(), + id: id.clone(), + payload: pbso.payload, + sortindex: pbso.sortindex, + ttl: pbso.ttl, + }); + // XXX: python version doesn't report failures from db + // layer.. (wouldn't db failures abort the entire transaction + // anyway?) + // XXX: sanitize to.to_string()? + match put_result { + Ok(_) => result.success.push(id), + Err(e) => { + result.failed.insert(id, e.to_string()); + } + } + } + self.update_collection(input.user_id.legacy_id as u32, collection_id)?; + Ok(result) + } + + pub fn get_storage_timestamp_sync(&self, user_id: UserIdentifier) -> DbResult { + let user_id = user_id.legacy_id as i64; + let modified = user_collections::table + .select(max(user_collections::modified)) + .filter(user_collections::user_id.eq(user_id)) + .first::>(&self.conn)? + .unwrap_or_default(); + SyncTimestamp::from_i64(modified).map_err(Into::into) + } + + pub fn get_collection_timestamp_sync( + &self, + params: params::GetCollectionTimestamp, + ) -> DbResult { + let user_id = params.user_id.legacy_id as u32; + let collection_id = self.get_collection_id(¶ms.collection)?; + if let Some(modified) = self + .session + .borrow() + .coll_modified_cache + .get(&(user_id, collection_id)) + { + return Ok(*modified); + } + user_collections::table + .select(user_collections::modified) + .filter(user_collections::user_id.eq(user_id as i64)) + .filter(user_collections::collection_id.eq(collection_id)) + .first(&self.conn) + .optional()? + .ok_or_else(DbError::collection_not_found) + } + + pub fn get_bso_timestamp_sync( + &self, + params: params::GetBsoTimestamp, + ) -> DbResult { + let user_id = params.user_id.legacy_id as i64; + let collection_id = self.get_collection_id(¶ms.collection)?; + let modified = bso::table + .select(bso::modified) + .filter(bso::user_id.eq(user_id)) + .filter(bso::collection_id.eq(&collection_id)) + .filter(bso::id.eq(¶ms.id)) + .first::(&self.conn) + .optional()? + .unwrap_or_default(); + SyncTimestamp::from_i64(modified).map_err(Into::into) + } + + pub fn get_collection_timestamps_sync( + &self, + user_id: UserIdentifier, + ) -> DbResult { + let modifieds = sql_query(format!( + "SELECT {collection_id}, {modified} + FROM user_collections + WHERE {user_id} = ? + AND {collection_id} != ?", + collection_id = COLLECTION_ID, + user_id = USER_ID, + modified = LAST_MODIFIED + )) + .bind::(user_id.legacy_id as i64) + .bind::(TOMBSTONE) + .load::(&self.conn)? + .into_iter() + .map(|cr| { + SyncTimestamp::from_i64(cr.last_modified) + .map(|ts| (cr.collection, ts)) + .map_err(Into::into) + }) + .collect::>>()?; + self.map_collection_names(modifieds) + } + + fn check_sync(&self) -> DbResult { + // Check if querying works + sql_query("SELECT 1").execute(&self.conn)?; + Ok(true) + } + + fn map_collection_names(&self, by_id: HashMap) -> DbResult> { + let mut names = self.load_collection_names(by_id.keys())?; + by_id + .into_iter() + .map(|(id, value)| { + names.remove(&id).map(|name| (name, value)).ok_or_else(|| { + DbError::internal("load_collection_names unknown collection id".to_owned()) + }) + }) + .collect() + } + + fn load_collection_names<'a>( + &self, + collection_ids: impl Iterator, + ) -> DbResult> { + let mut names = HashMap::new(); + let mut uncached = Vec::new(); + for &id in collection_ids { + if let Some(name) = self.coll_cache.get_name(id)? { + names.insert(id, name); + } else { + uncached.push(id); + } + } + + if !uncached.is_empty() { + let result = collections::table + .select((collections::id, collections::name)) + .filter(collections::id.eq_any(uncached)) + .load::<(i32, String)>(&self.conn)?; + + for (id, name) in result { + names.insert(id, name.clone()); + if !self.session.borrow().in_write_transaction { + self.coll_cache.put(id, name)?; + } + } + } + + Ok(names) + } + + pub(super) fn update_collection( + &self, + user_id: u32, + collection_id: i32, + ) -> DbResult { + let quota = if self.quota.enabled { + self.calc_quota_usage_sync(user_id, collection_id)? + } else { + results::GetQuotaUsage { + count: 0, + total_bytes: 0, + } + }; + let upsert = format!( + r#" + INSERT INTO user_collections ({user_id}, {collection_id}, {modified}, {total_bytes}, {count}) + VALUES (?, ?, ?, ?, ?) + ON CONFLICT({user_id}, {collection_id}) DO UPDATE SET + {modified} = ?, + {total_bytes} = ?, + {count} = ? + "#, + user_id = USER_ID, + collection_id = COLLECTION_ID, + modified = LAST_MODIFIED, + count = COUNT, + total_bytes = TOTAL_BYTES, + ); + let total_bytes = quota.total_bytes as i64; + sql_query(upsert) + .bind::(user_id as i64) + .bind::(&collection_id) + .bind::(&self.timestamp().as_i64()) + .bind::(&total_bytes) + .bind::("a.count) + .bind::(&self.timestamp().as_i64()) + .bind::(&total_bytes) + .bind::("a.count) + .execute(&self.conn)?; + Ok(self.timestamp()) + } + + // Perform a lighter weight "read only" storage size check + pub fn get_storage_usage_sync( + &self, + user_id: UserIdentifier, + ) -> DbResult { + let uid = user_id.legacy_id as i64; + let total_bytes = bso::table + .select(sql::>("SUM(LENGTH(payload))")) + .filter(bso::user_id.eq(uid)) + .filter(bso::expiry.gt(&self.timestamp().as_i64())) + .get_result::>(&self.conn)?; + Ok(total_bytes.unwrap_or_default() as u64) + } + + // Perform a lighter weight "read only" quota storage check + pub fn get_quota_usage_sync( + &self, + params: params::GetQuotaUsage, + ) -> DbResult { + let uid = params.user_id.legacy_id as i64; + let (total_bytes, count): (i64, i32) = user_collections::table + .select(( + sql::("COALESCE(SUM(COALESCE(total_bytes, 0)), 0)"), + sql::("COALESCE(SUM(COALESCE(count, 0)), 0)"), + )) + .filter(user_collections::user_id.eq(uid)) + .filter(user_collections::collection_id.eq(params.collection_id)) + .get_result(&self.conn) + .optional()? + .unwrap_or_default(); + Ok(results::GetQuotaUsage { + total_bytes: total_bytes as usize, + count, + }) + } + + // perform a heavier weight quota calculation + pub fn calc_quota_usage_sync( + &self, + user_id: u32, + collection_id: i32, + ) -> DbResult { + let (total_bytes, count): (i64, i32) = bso::table + .select(( + sql::(r#"COALESCE(SUM(LENGTH(COALESCE(payload, ""))),0)"#), + sql::("COALESCE(COUNT(*),0)"), + )) + .filter(bso::user_id.eq(user_id as i64)) + .filter(bso::expiry.gt(self.timestamp().as_i64())) + .filter(bso::collection_id.eq(collection_id)) + .get_result(&self.conn) + .optional()? + .unwrap_or_default(); + Ok(results::GetQuotaUsage { + total_bytes: total_bytes as usize, + count, + }) + } + + pub fn get_collection_usage_sync( + &self, + user_id: UserIdentifier, + ) -> DbResult { + let counts = bso::table + .select((bso::collection_id, sql::("SUM(LENGTH(payload))"))) + .filter(bso::user_id.eq(user_id.legacy_id as i64)) + .filter(bso::expiry.gt(&self.timestamp().as_i64())) + .group_by(bso::collection_id) + .load(&self.conn)? + .into_iter() + .collect(); + self.map_collection_names(counts) + } + + pub fn get_collection_counts_sync( + &self, + user_id: UserIdentifier, + ) -> DbResult { + let counts = bso::table + .select(( + bso::collection_id, + sql::(&format!( + "COUNT({collection_id})", + collection_id = COLLECTION_ID + )), + )) + .filter(bso::user_id.eq(user_id.legacy_id as i64)) + .filter(bso::expiry.gt(&self.timestamp().as_i64())) + .group_by(bso::collection_id) + .load(&self.conn)? + .into_iter() + .collect(); + self.map_collection_names(counts) + } + + batch_db_method!(create_batch_sync, create, CreateBatch); + batch_db_method!(validate_batch_sync, validate, ValidateBatch); + batch_db_method!(append_to_batch_sync, append, AppendToBatch); + batch_db_method!(commit_batch_sync, commit, CommitBatch); + batch_db_method!(delete_batch_sync, delete, DeleteBatch); + + pub fn get_batch_sync(&self, params: params::GetBatch) -> DbResult> { + batch::get(self, params) + } + + pub fn timestamp(&self) -> SyncTimestamp { + self.session.borrow().timestamp + } +} + +impl Db for SqliteDb { + type Error = DbError; + + fn commit(&self) -> DbFuture<'_, (), Self::Error> { + let db = self.clone(); + Box::pin(self.blocking_threadpool.spawn(move || db.commit_sync())) + } + + fn rollback(&self) -> DbFuture<'_, (), Self::Error> { + let db = self.clone(); + Box::pin(self.blocking_threadpool.spawn(move || db.rollback_sync())) + } + + fn begin(&self, for_write: bool) -> DbFuture<'_, (), Self::Error> { + let db = self.clone(); + Box::pin(async move { db.begin_async(for_write).map_err(Into::into).await }) + } + + fn check(&self) -> DbFuture<'_, results::Check, Self::Error> { + let db = self.clone(); + Box::pin(self.blocking_threadpool.spawn(move || db.check_sync())) + } + + sync_db_method!(lock_for_read, lock_for_read_sync, LockCollection); + sync_db_method!(lock_for_write, lock_for_write_sync, LockCollection); + sync_db_method!( + get_collection_timestamps, + get_collection_timestamps_sync, + GetCollectionTimestamps + ); + sync_db_method!( + get_collection_timestamp, + get_collection_timestamp_sync, + GetCollectionTimestamp + ); + sync_db_method!( + get_collection_counts, + get_collection_counts_sync, + GetCollectionCounts + ); + sync_db_method!( + get_collection_usage, + get_collection_usage_sync, + GetCollectionUsage + ); + sync_db_method!( + get_storage_timestamp, + get_storage_timestamp_sync, + GetStorageTimestamp + ); + sync_db_method!(get_storage_usage, get_storage_usage_sync, GetStorageUsage); + sync_db_method!(get_quota_usage, get_quota_usage_sync, GetQuotaUsage); + sync_db_method!(delete_storage, delete_storage_sync, DeleteStorage); + sync_db_method!(delete_collection, delete_collection_sync, DeleteCollection); + sync_db_method!(delete_bsos, delete_bsos_sync, DeleteBsos); + sync_db_method!(get_bsos, get_bsos_sync, GetBsos); + sync_db_method!(get_bso_ids, get_bso_ids_sync, GetBsoIds); + sync_db_method!(post_bsos, post_bsos_sync, PostBsos); + sync_db_method!(delete_bso, delete_bso_sync, DeleteBso); + sync_db_method!(get_bso, get_bso_sync, GetBso, Option); + sync_db_method!( + get_bso_timestamp, + get_bso_timestamp_sync, + GetBsoTimestamp, + results::GetBsoTimestamp + ); + sync_db_method!(put_bso, put_bso_sync, PutBso); + sync_db_method!(create_batch, create_batch_sync, CreateBatch); + sync_db_method!(validate_batch, validate_batch_sync, ValidateBatch); + sync_db_method!(append_to_batch, append_to_batch_sync, AppendToBatch); + sync_db_method!( + get_batch, + get_batch_sync, + GetBatch, + Option + ); + sync_db_method!(commit_batch, commit_batch_sync, CommitBatch); + + fn get_collection_id(&self, name: String) -> DbFuture<'_, i32, Self::Error> { + let db = self.clone(); + Box::pin( + self.blocking_threadpool + .spawn(move || db.get_collection_id(&name)), + ) + } + + fn get_connection_info(&self) -> results::ConnectionInfo { + results::ConnectionInfo::default() + } + + fn create_collection(&self, name: String) -> DbFuture<'_, i32, Self::Error> { + let db = self.clone(); + Box::pin( + self.blocking_threadpool + .spawn(move || db.get_or_create_collection_id(&name)), + ) + } + + fn update_collection( + &self, + param: params::UpdateCollection, + ) -> DbFuture<'_, SyncTimestamp, Self::Error> { + let db = self.clone(); + Box::pin(self.blocking_threadpool.spawn(move || { + db.update_collection(param.user_id.legacy_id as u32, param.collection_id) + })) + } + + fn timestamp(&self) -> SyncTimestamp { + self.timestamp() + } + + fn set_timestamp(&self, timestamp: SyncTimestamp) { + self.session.borrow_mut().timestamp = timestamp; + } + + sync_db_method!(delete_batch, delete_batch_sync, DeleteBatch); + + fn clear_coll_cache(&self) -> DbFuture<'_, (), Self::Error> { + let db = self.clone(); + Box::pin(self.blocking_threadpool.spawn(move || { + db.coll_cache.clear(); + Ok(()) + })) + } + + fn set_quota(&mut self, enabled: bool, limit: usize, enforced: bool) { + self.quota = Quota { + size: limit, + enabled, + enforced, + } + } + + fn box_clone(&self) -> Box> { + Box::new(self.clone()) + } +} + +#[derive(Debug, QueryableByName)] +struct IdResult { + #[sql_type = "Integer"] + id: i32, +} + +#[allow(dead_code)] // Not really dead, Rust can't see the use above +#[derive(Debug, QueryableByName)] +struct NameResult { + #[sql_type = "Text"] + name: String, +} + +#[derive(Debug, QueryableByName)] +struct UserCollectionsResult { + // Can't substitute column names here. + #[sql_type = "Integer"] + collection: i32, // COLLECTION_ID + #[sql_type = "BigInt"] + last_modified: i64, // LAST_MODIFIED +} diff --git a/syncstorage-sqlite/src/pool.rs b/syncstorage-sqlite/src/pool.rs new file mode 100644 index 0000000000..f7bc71a852 --- /dev/null +++ b/syncstorage-sqlite/src/pool.rs @@ -0,0 +1,239 @@ +use async_trait::async_trait; + +use std::{ + collections::HashMap, + fmt, + sync::{Arc, RwLock}, + time::Duration, +}; + +use diesel::{ + r2d2::{ConnectionManager, Pool}, + sqlite::SqliteConnection, + Connection, +}; +#[cfg(debug_assertions)] +use diesel_logger::LoggingConnection; +use syncserver_common::{BlockingThreadpool, Metrics}; +#[cfg(debug_assertions)] +use syncserver_db_common::test::TestTransactionCustomizer; +use syncserver_db_common::{GetPoolState, PoolState}; +use syncstorage_db_common::{Db, DbPool, STD_COLLS}; +use syncstorage_settings::{Quota, Settings}; +use syncstorage_sql_db_common::error::DbError; + +use super::{models::SqliteDb, wal::WALTransactionCustomizer, DbResult}; + +embed_migrations!(); + +/// Run the diesel embedded migrations +/// +/// Sqlite DDL statements implicitly commit which could disrupt SqlitePool's +/// begin_test_transaction during tests. So this runs on its own separate conn. +fn run_embedded_migrations(database_url: &str) -> DbResult<()> { + let path = database_url + .strip_prefix("sqlite:///") + .unwrap_or(database_url); + let conn = SqliteConnection::establish(path)?; + #[cfg(debug_assertions)] + // XXX: this doesn't show the DDL statements + // https://github.com/shssoichiro/diesel-logger/issues/1 + embedded_migrations::run(&LoggingConnection::new(conn))?; + #[cfg(not(debug_assertions))] + embedded_migrations::run(&conn)?; + Ok(()) +} + +#[derive(Clone)] +pub struct SqliteDbPool { + /// Pool of db connections + pool: Pool>, + /// Thread Pool for running synchronous db calls + /// In-memory cache of collection_ids and their names + coll_cache: Arc, + + metrics: Metrics, + quota: Quota, + blocking_threadpool: Arc, +} + +impl SqliteDbPool { + /// Creates a new pool of Sqlite db connections. + /// + /// Also initializes the Sqlite db, ensuring all migrations are ran. + pub fn new( + settings: &Settings, + metrics: &Metrics, + blocking_threadpool: Arc, + ) -> DbResult { + run_embedded_migrations(&settings.database_url)?; + Self::new_without_migrations(settings, metrics, blocking_threadpool) + } + + pub fn new_without_migrations( + settings: &Settings, + metrics: &Metrics, + blocking_threadpool: Arc, + ) -> DbResult { + let path = settings + .database_url + .strip_prefix("sqlite:///") + .unwrap_or(&settings.database_url); + info!("Using SQLite database at: {}", path); + let manager = ConnectionManager::::new(path); + let builder = Pool::builder() + .max_size(settings.database_pool_max_size) + .connection_timeout(Duration::from_secs( + settings.database_pool_connection_timeout.unwrap_or(30) as u64, + )) + // NOTE: Even with WAL we don't want more than one connection + // This ignores settings.database_pool_min_idle + .min_idle(Some(0)) + .connection_customizer(Box::new(WALTransactionCustomizer)); + + #[cfg(debug_assertions)] + let builder = if settings.database_use_test_transactions { + // NOTE: E2E tests might not be able to run with test transactions + // This is untested + builder.connection_customizer(Box::new(TestTransactionCustomizer)) + } else { + builder + }; + + Ok(Self { + pool: builder.build(manager)?, + coll_cache: Default::default(), + metrics: metrics.clone(), + quota: Quota { + size: settings.limits.max_quota_limit as usize, + enabled: settings.enable_quota, + enforced: settings.enforce_quota, + }, + blocking_threadpool, + }) + } + + /// Spawn a task to periodically evict idle connections. Calls wrapper sweeper fn + /// to use pool.retain, retaining objects only if they are shorter in duration than + /// defined max_idle. Noop for mysql impl. + pub fn spawn_sweeper(&self, _interval: Duration) { + sweeper() + } + + pub fn get_sync(&self) -> DbResult { + Ok(SqliteDb::new( + self.pool.get()?, + Arc::clone(&self.coll_cache), + &self.metrics, + &self.quota, + self.blocking_threadpool.clone(), + )) + } +} + +/// Sweeper to retain only the objects specified within the closure. +/// In this context, if a Spanner connection is unutilized, we want it +/// to release the given connections. +/// See: https://docs.rs/deadpool/latest/deadpool/managed/struct.Pool.html#method.retain +/// Noop for sql impl +fn sweeper() {} + +#[async_trait] +impl DbPool for SqliteDbPool { + type Error = DbError; + + async fn get<'a>(&'a self) -> DbResult>> { + let pool = self.clone(); + self.blocking_threadpool + .spawn(move || pool.get_sync()) + .await + .map(|db| Box::new(db) as Box>) + } + + fn validate_batch_id(&self, id: String) -> DbResult<()> { + super::batch::validate_batch_id(&id) + } + + fn box_clone(&self) -> Box> { + Box::new(self.clone()) + } +} + +impl fmt::Debug for SqliteDbPool { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("SqliteDbPool") + .field("coll_cache", &self.coll_cache) + .finish() + } +} + +impl GetPoolState for SqliteDbPool { + fn state(&self) -> PoolState { + self.pool.state().into() + } +} + +#[derive(Debug)] +pub(super) struct CollectionCache { + pub by_name: RwLock>, + pub by_id: RwLock>, +} + +impl CollectionCache { + pub fn put(&self, id: i32, name: String) -> DbResult<()> { + // XXX: should this emit a metric? + // XXX: should probably either lock both simultaneously during + // writes or use an RwLock alternative + self.by_name + .write() + .map_err(|_| DbError::internal("by_name write".to_owned()))? + .insert(name.clone(), id); + self.by_id + .write() + .map_err(|_| DbError::internal("by_id write".to_owned()))? + .insert(id, name); + Ok(()) + } + + pub fn get_id(&self, name: &str) -> DbResult> { + Ok(self + .by_name + .read() + .map_err(|_| DbError::internal("by_name read".to_owned()))? + .get(name) + .cloned()) + } + + pub fn get_name(&self, id: i32) -> DbResult> { + Ok(self + .by_id + .read() + .map_err(|_| DbError::internal("by_id read".to_owned()))? + .get(&id) + .cloned()) + } + + pub fn clear(&self) { + self.by_name.write().expect("by_name write").clear(); + self.by_id.write().expect("by_id write").clear(); + } +} + +impl Default for CollectionCache { + fn default() -> Self { + Self { + by_name: RwLock::new( + STD_COLLS + .iter() + .map(|(k, v)| ((*v).to_owned(), *k)) + .collect(), + ), + by_id: RwLock::new( + STD_COLLS + .iter() + .map(|(k, v)| (*k, (*v).to_owned())) + .collect(), + ), + } + } +} diff --git a/syncstorage-sqlite/src/schema.rs b/syncstorage-sqlite/src/schema.rs new file mode 100644 index 0000000000..9fb0be6a5c --- /dev/null +++ b/syncstorage-sqlite/src/schema.rs @@ -0,0 +1,71 @@ +table! { + batch_uploads (batch_id, user_id) { + #[sql_name="batch"] + batch_id -> Bigint, + #[sql_name="userid"] + user_id -> Bigint, + #[sql_name="collection"] + collection_id -> Integer, + } +} + +table! { + batch_upload_items (batch_id, user_id, id) { + #[sql_name="batch"] + batch_id -> Bigint, + #[sql_name="userid"] + user_id -> Bigint, + id -> Varchar, + sortindex -> Nullable, + payload -> Nullable, + payload_size -> Nullable, + ttl_offset -> Nullable, + } +} + +table! { + bso (user_id, collection_id, id) { + #[sql_name="userid"] + user_id -> BigInt, + #[sql_name="collection"] + collection_id -> Integer, + id -> Varchar, + sortindex -> Nullable, + payload -> Mediumtext, + // not used, but legacy + payload_size -> Bigint, + modified -> Bigint, + #[sql_name="ttl"] + expiry -> Bigint, + } +} + +table! { + collections (id) { + id -> Integer, + name -> Varchar, + } +} + +table! { + user_collections (user_id, collection_id) { + #[sql_name="userid"] + user_id -> BigInt, + #[sql_name="collection"] + collection_id -> Integer, + #[sql_name="last_modified"] + modified -> Bigint, + #[sql_name="count"] + count -> Integer, + #[sql_name="total_bytes"] + total_bytes -> BigInt, + } +} + +allow_tables_to_appear_in_same_query!( + batch_uploads, + batch_upload_items, + bso, + collections, + user_collections, +); diff --git a/syncstorage-sqlite/src/test.rs b/syncstorage-sqlite/src/test.rs new file mode 100644 index 0000000000..cbb85fbff7 --- /dev/null +++ b/syncstorage-sqlite/src/test.rs @@ -0,0 +1,80 @@ +use std::{collections::HashMap, sync::Arc}; + +use diesel::{ + // expression_methods::TextExpressionMethods, // See note below about `not_like` becoming swedish + ExpressionMethods, + QueryDsl, + RunQueryDsl, +}; +use syncserver_common::{BlockingThreadpool, Metrics}; +use syncserver_settings::Settings as SyncserverSettings; +use syncstorage_settings::Settings as SyncstorageSettings; +use url::Url; + +use crate::{models::SqliteDb, pool::SqliteDbPool, schema::collections, DbResult}; + +pub fn db(settings: &SyncstorageSettings) -> DbResult { + let _ = env_logger::try_init(); + // inherit SYNC_SYNCSTORAGE__DATABASE_URL from the env + + let pool = SqliteDbPool::new( + settings, + &Metrics::noop(), + Arc::new(BlockingThreadpool::new(1)), + )?; + pool.get_sync() +} + +#[test] +fn static_collection_id() -> DbResult<()> { + let settings = SyncserverSettings::test_settings().syncstorage; + if Url::parse(&settings.database_url).unwrap().scheme() != "sqlite" { + // Skip this test if we're not using mysql + return Ok(()); + } + let db = db(&settings)?; + + // ensure DB actually has predefined common collections + let cols: Vec<(i32, _)> = vec![ + (1, "clients"), + (2, "crypto"), + (3, "forms"), + (4, "history"), + (5, "keys"), + (6, "meta"), + (7, "bookmarks"), + (8, "prefs"), + (9, "tabs"), + (10, "passwords"), + (11, "addons"), + (12, "addresses"), + (13, "creditcards"), + ]; + // The integration tests can create collections that start + // with `xxx%`. We should not include those in our counts for local + // unit tests. + // Note: not sure why but as of 11/02/20, `.not_like("xxx%")` is apparently + // swedish-ci. Commenting that out for now. + let results: HashMap = collections::table + .select((collections::id, collections::name)) + .filter(collections::name.ne("")) + //.filter(collections::name.not_like("xxx%")) // from most integration tests + .filter(collections::name.ne("xxx_col2")) // from server::test + .filter(collections::name.ne("col2")) // from older intergration tests + .load(&db.inner.conn)? + .into_iter() + .collect(); + assert_eq!(results.len(), cols.len(), "mismatched columns"); + for (id, name) in &cols { + assert_eq!(results.get(id).unwrap(), name); + } + + for (id, name) in &cols { + let result = db.get_collection_id(name)?; + assert_eq!(result, *id); + } + + let cid = db.get_or_create_collection_id("col1")?; + assert!(cid >= 100); + Ok(()) +} diff --git a/syncstorage-sqlite/src/wal.rs b/syncstorage-sqlite/src/wal.rs new file mode 100644 index 0000000000..3fcbc66b0d --- /dev/null +++ b/syncstorage-sqlite/src/wal.rs @@ -0,0 +1,22 @@ +use diesel::{ + connection::SimpleConnection, + r2d2::{CustomizeConnection, Error as PoolError}, + sqlite::SqliteConnection, +}; + +// For e2e tests only +#[derive(Debug)] +pub struct WALTransactionCustomizer; + +impl CustomizeConnection for WALTransactionCustomizer { + fn on_acquire(&self, conn: &mut SqliteConnection) -> Result<(), PoolError> { + (|| { + conn.batch_execute("PRAGMA journal_mode = WAL;")?; + conn.batch_execute("PRAGMA synchronous = NORMAL;")?; + conn.batch_execute("PRAGMA foreign_keys = ON;")?; + conn.batch_execute("PRAGMA busy_timeout = 10000;")?; + Ok(()) + })() + .map_err(PoolError::QueryError) + } +} diff --git a/tokenserver-common/src/lib.rs b/tokenserver-common/src/lib.rs index f21d9e0cae..d3d62b82ba 100644 --- a/tokenserver-common/src/lib.rs +++ b/tokenserver-common/src/lib.rs @@ -10,6 +10,8 @@ pub enum NodeType { MySql, #[serde(rename = "spanner")] Spanner, + #[serde(rename = "sqlite")] + Sqlite, } impl NodeType { diff --git a/tokenserver-db-common/Cargo.toml b/tokenserver-db-common/Cargo.toml new file mode 100644 index 0000000000..94eb3d1c42 --- /dev/null +++ b/tokenserver-db-common/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "tokenserver-db-common" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] +backtrace.workspace = true +futures.workspace = true +http.workspace = true +serde.workspace = true +serde_derive.workspace = true +serde_json.workspace = true +slog-scope.workspace = true + +async-trait = "0.1.40" +diesel = { version = "1.4", features = ["mysql", "r2d2"] } +diesel_logger = "0.1.1" +diesel_migrations = { version = "1.4.0", features = ["mysql"] } +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common", features = ["sql"] } +thiserror = "1.0.26" +tokenserver-common = { path = "../tokenserver-common" } +tokenserver-settings = { path = "../tokenserver-settings" } +tokio = { workspace = true, features = ["macros", "sync"] } + +[dev-dependencies] +env_logger.workspace = true + +syncserver-settings = { path = "../syncserver-settings" } diff --git a/tokenserver-db/src/error.rs b/tokenserver-db-common/src/error.rs similarity index 94% rename from tokenserver-db/src/error.rs rename to tokenserver-db-common/src/error.rs index 6b7e2ddcca..666ab82155 100644 --- a/tokenserver-db/src/error.rs +++ b/tokenserver-db-common/src/error.rs @@ -7,8 +7,8 @@ use syncserver_db_common::error::SqlError; use thiserror::Error; use tokenserver_common::TokenserverError; -pub(crate) type DbFuture<'a, T> = syncserver_db_common::DbFuture<'a, T, DbError>; -pub(crate) type DbResult = Result; +pub type DbFuture<'a, T> = syncserver_db_common::DbFuture<'a, T, DbError>; +pub type DbResult = Result; /// An error type that represents any database-related errors that may occur while processing a /// tokenserver request. @@ -20,7 +20,7 @@ pub struct DbError { } impl DbError { - pub(crate) fn internal(msg: String) -> Self { + pub fn internal(msg: String) -> Self { DbErrorKind::Internal(msg).into() } } diff --git a/tokenserver-db-common/src/lib.rs b/tokenserver-db-common/src/lib.rs new file mode 100644 index 0000000000..a91e735174 --- /dev/null +++ b/tokenserver-db-common/src/lib.rs @@ -0,0 +1 @@ +pub mod error; diff --git a/tokenserver-db-mysql/Cargo.toml b/tokenserver-db-mysql/Cargo.toml new file mode 100644 index 0000000000..e7db08636f --- /dev/null +++ b/tokenserver-db-mysql/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "tokenserver-db-mysql" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] +backtrace.workspace = true +futures.workspace = true +http.workspace = true +serde.workspace = true +serde_derive.workspace = true +serde_json.workspace = true +slog-scope.workspace = true + +async-trait = "0.1.40" +diesel = { version = "1.4", features = ["mysql", "r2d2"] } +diesel_logger = "0.1.1" +diesel_migrations = { version = "1.4.0", features = ["mysql"] } +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common", features = ["sql"] } +thiserror = "1.0.26" +tokenserver-common = { path = "../tokenserver-common" } +tokenserver-db-common = { path = "../tokenserver-db-common" } +tokenserver-settings = { path = "../tokenserver-settings" } +tokio = { workspace = true, features = ["macros", "sync"] } + +[dev-dependencies] +env_logger.workspace = true + +syncserver-settings = { path = "../syncserver-settings" } diff --git a/tokenserver-db/migrations/2021-07-16-001122_init/down.sql b/tokenserver-db-mysql/migrations/2021-07-16-001122_init/down.sql similarity index 100% rename from tokenserver-db/migrations/2021-07-16-001122_init/down.sql rename to tokenserver-db-mysql/migrations/2021-07-16-001122_init/down.sql diff --git a/tokenserver-db/migrations/2021-07-16-001122_init/up.sql b/tokenserver-db-mysql/migrations/2021-07-16-001122_init/up.sql similarity index 100% rename from tokenserver-db/migrations/2021-07-16-001122_init/up.sql rename to tokenserver-db-mysql/migrations/2021-07-16-001122_init/up.sql diff --git a/tokenserver-db/migrations/2021-08-03-234845_populate_services/down.sql b/tokenserver-db-mysql/migrations/2021-08-03-234845_populate_services/down.sql similarity index 100% rename from tokenserver-db/migrations/2021-08-03-234845_populate_services/down.sql rename to tokenserver-db-mysql/migrations/2021-08-03-234845_populate_services/down.sql diff --git a/tokenserver-db/migrations/2021-08-03-234845_populate_services/up.sql b/tokenserver-db-mysql/migrations/2021-08-03-234845_populate_services/up.sql similarity index 100% rename from tokenserver-db/migrations/2021-08-03-234845_populate_services/up.sql rename to tokenserver-db-mysql/migrations/2021-08-03-234845_populate_services/up.sql diff --git a/tokenserver-db/migrations/2021-09-30-142643_remove_foreign_key_constraints/down.sql b/tokenserver-db-mysql/migrations/2021-09-30-142643_remove_foreign_key_constraints/down.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-142643_remove_foreign_key_constraints/down.sql rename to tokenserver-db-mysql/migrations/2021-09-30-142643_remove_foreign_key_constraints/down.sql diff --git a/tokenserver-db/migrations/2021-09-30-142643_remove_foreign_key_constraints/up.sql b/tokenserver-db-mysql/migrations/2021-09-30-142643_remove_foreign_key_constraints/up.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-142643_remove_foreign_key_constraints/up.sql rename to tokenserver-db-mysql/migrations/2021-09-30-142643_remove_foreign_key_constraints/up.sql diff --git a/tokenserver-db/migrations/2021-09-30-142654_remove_node_defaults/down.sql b/tokenserver-db-mysql/migrations/2021-09-30-142654_remove_node_defaults/down.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-142654_remove_node_defaults/down.sql rename to tokenserver-db-mysql/migrations/2021-09-30-142654_remove_node_defaults/down.sql diff --git a/tokenserver-db/migrations/2021-09-30-142654_remove_node_defaults/up.sql b/tokenserver-db-mysql/migrations/2021-09-30-142654_remove_node_defaults/up.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-142654_remove_node_defaults/up.sql rename to tokenserver-db-mysql/migrations/2021-09-30-142654_remove_node_defaults/up.sql diff --git a/tokenserver-db/migrations/2021-09-30-142746_add_indexes/down.sql b/tokenserver-db-mysql/migrations/2021-09-30-142746_add_indexes/down.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-142746_add_indexes/down.sql rename to tokenserver-db-mysql/migrations/2021-09-30-142746_add_indexes/down.sql diff --git a/tokenserver-db/migrations/2021-09-30-142746_add_indexes/up.sql b/tokenserver-db-mysql/migrations/2021-09-30-142746_add_indexes/up.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-142746_add_indexes/up.sql rename to tokenserver-db-mysql/migrations/2021-09-30-142746_add_indexes/up.sql diff --git a/tokenserver-db/migrations/2021-09-30-144043_remove_nodes_service_key/down.sql b/tokenserver-db-mysql/migrations/2021-09-30-144043_remove_nodes_service_key/down.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-144043_remove_nodes_service_key/down.sql rename to tokenserver-db-mysql/migrations/2021-09-30-144043_remove_nodes_service_key/down.sql diff --git a/tokenserver-db/migrations/2021-09-30-144043_remove_nodes_service_key/up.sql b/tokenserver-db-mysql/migrations/2021-09-30-144043_remove_nodes_service_key/up.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-144043_remove_nodes_service_key/up.sql rename to tokenserver-db-mysql/migrations/2021-09-30-144043_remove_nodes_service_key/up.sql diff --git a/tokenserver-db/migrations/2021-09-30-144225_remove_users_nodeid_key/down.sql b/tokenserver-db-mysql/migrations/2021-09-30-144225_remove_users_nodeid_key/down.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-144225_remove_users_nodeid_key/down.sql rename to tokenserver-db-mysql/migrations/2021-09-30-144225_remove_users_nodeid_key/down.sql diff --git a/tokenserver-db/migrations/2021-09-30-144225_remove_users_nodeid_key/up.sql b/tokenserver-db-mysql/migrations/2021-09-30-144225_remove_users_nodeid_key/up.sql similarity index 100% rename from tokenserver-db/migrations/2021-09-30-144225_remove_users_nodeid_key/up.sql rename to tokenserver-db-mysql/migrations/2021-09-30-144225_remove_users_nodeid_key/up.sql diff --git a/tokenserver-db/migrations/2021-12-22-160451_remove_services/down.sql b/tokenserver-db-mysql/migrations/2021-12-22-160451_remove_services/down.sql similarity index 100% rename from tokenserver-db/migrations/2021-12-22-160451_remove_services/down.sql rename to tokenserver-db-mysql/migrations/2021-12-22-160451_remove_services/down.sql diff --git a/tokenserver-db/migrations/2021-12-22-160451_remove_services/up.sql b/tokenserver-db-mysql/migrations/2021-12-22-160451_remove_services/up.sql similarity index 100% rename from tokenserver-db/migrations/2021-12-22-160451_remove_services/up.sql rename to tokenserver-db-mysql/migrations/2021-12-22-160451_remove_services/up.sql diff --git a/tokenserver-db-mysql/src/lib.rs b/tokenserver-db-mysql/src/lib.rs new file mode 100644 index 0000000000..2e19539531 --- /dev/null +++ b/tokenserver-db-mysql/src/lib.rs @@ -0,0 +1,6 @@ +extern crate diesel; +#[macro_use] +extern crate diesel_migrations; + +pub mod models; +pub mod pool; diff --git a/tokenserver-db-mysql/src/models.rs b/tokenserver-db-mysql/src/models.rs new file mode 100644 index 0000000000..0c42cee6cf --- /dev/null +++ b/tokenserver-db-mysql/src/models.rs @@ -0,0 +1,129 @@ +pub const LAST_INSERT_ID_QUERY: &str = "SELECT LAST_INSERT_ID() AS id"; + +pub const GET_NODE_ID_SYNC_QUERY: &str = r#" +SELECT id +FROM nodes +WHERE service = ? +AND node = ?"#; + +pub const REPLACE_USERS_SYNC_QUERY: &str = r#" +UPDATE users +SET replaced_at = ? +WHERE service = ? +AND email = ? +AND replaced_at IS NULL +AND created_at < ?"#; + +pub const REPLACE_USER_SYNC_QUERY: &str = r#" +UPDATE users +SET replaced_at = ? +WHERE service = ? +AND uid = ?"#; + +// The `where` clause on this statement is designed as an extra layer of +// protection, to ensure that concurrent updates don't accidentally move +// timestamp fields backwards in time. The handling of `keys_changed_at` +// is additionally weird because we want to treat the default `NULL` value +// as zero. +pub const PUT_USER_SYNC_QUERY: &str = r#" +UPDATE users +SET generation = ?, +keys_changed_at = ? +WHERE service = ? +AND email = ? +AND generation <= ? +AND COALESCE(keys_changed_at, 0) <= COALESCE(?, keys_changed_at, 0) +AND replaced_at IS NULL"#; + +pub const POST_USER_SYNC_QUERY: &str = r#" +INSERT INTO users (service, email, generation, client_state, created_at, nodeid, keys_changed_at, replaced_at) +VALUES (?, ?, ?, ?, ?, ?, ?, NULL);"#; + +pub const CHECK_SYNC_QUERY: &str = "SHOW STATUS LIKE \"Uptime\""; + +pub const GET_BEST_NODE_QUERY: &str = r#" +SELECT id, node +FROM nodes +WHERE service = ? +AND available > 0 +AND capacity > current_load +AND downed = 0 +AND backoff = 0 +ORDER BY LOG(current_load) / LOG(capacity) ASC,available DESC +LIMIT 1"#; + +pub const GET_BEST_NODE_RELEASE_CAPACITY_QUERY: &str = r#" +UPDATE nodes +SET available = LEAST(capacity * ?, capacity - current_load) +WHERE service = ? +AND available <= 0 +AND capacity > current_load +AND downed = 0"#; + +pub const GET_BEST_NODE_SPANNER_QUERY: &str = r#" +SELECT id, node +FROM nodes +WHERE id = ? +LIMIT 1"#; + +pub const ADD_USER_TO_NODE_SYNC_QUERY: &str = r#" +UPDATE nodes +SET current_load = current_load + 1, +available = GREATEST(available - 1, 0) +WHERE service = ? +AND node = ?"#; + +pub const ADD_USER_TO_NODE_SYNC_SPANNER_QUERY: &str = r#" +UPDATE nodes +SET current_load = current_load + 1 +WHERE service = ? +AND node = ?"#; + +pub const GET_USERS_SYNC_QUERY: &str = r#" +SELECT uid, nodes.node, generation, keys_changed_at, client_state, created_at, replaced_at +FROM users +LEFT OUTER JOIN nodes ON users.nodeid = nodes.id +WHERE email = ? +AND users.service = ? +ORDER BY created_at DESC, uid DESC +LIMIT 20"#; + +pub const GET_SERVICE_ID_SYNC_QUERY: &str = r#" +SELECT id +FROM services +WHERE service = ?"#; + +pub const SET_USER_CREATED_AT_SYNC_QUERY: &str = r#" +UPDATE users +SET created_at = ? +WHERE uid = ?"#; + +pub const SET_USER_REPLACED_AT_SYNC_QUERY: &str = r#" +UPDATE users +SET replaced_at = ? +WHERE uid = ?"#; + +pub const GET_USER_SYNC_QUERY: &str = r#" +SELECT service, email, generation, client_state, replaced_at, nodeid, keys_changed_at +FROM users +WHERE uid = ?"#; + +pub const POST_NODE_SYNC_QUERY: &str = r#" +INSERT INTO nodes (service, node, available, current_load, capacity, downed, backoff) +VALUES (?, ?, ?, ?, ?, ?, ?)"#; + +pub const GET_NODE_SYNC_QUERY: &str = r#" +SELECT * +FROM nodes +WHERE id = ?"#; + +pub const UNASSIGNED_NODE_SYNC_QUERY: &str = r#" +UPDATE users +SET replaced_at = ? +WHERE nodeid = ?"#; + +pub const REMOVE_NODE_SYNC_QUERY: &str = "DELETE FROM nodes WHERE id = ?"; + +pub const POST_SERVICE_INSERT_SERVICE_QUERY: &str = r#" +INSERT INTO services (service, pattern) +VALUES (?, ?)"#; diff --git a/tokenserver-db-mysql/src/pool.rs b/tokenserver-db-mysql/src/pool.rs new file mode 100644 index 0000000000..685de5d423 --- /dev/null +++ b/tokenserver-db-mysql/src/pool.rs @@ -0,0 +1,20 @@ +use diesel::{mysql::MysqlConnection, Connection}; +use diesel_logger::LoggingConnection; +use tokenserver_db_common::error::DbResult; + +embed_migrations!(); + +/// Run the diesel embedded migrations +/// +/// Mysql DDL statements implicitly commit which could disrupt MysqlPool's +/// begin_test_transaction during tests. So this runs on its own separate conn. +pub fn run_embedded_migrations(database_url: &str) -> DbResult<()> { + let conn = MysqlConnection::establish(database_url)?; + #[cfg(debug_assertions)] + // XXX: this doesn't show the DDL statements + // https://github.com/shssoichiro/diesel-logger/issues/1 + embedded_migrations::run(&LoggingConnection::new(conn))?; + #[cfg(not(debug_assertions))] + embedded_migrations::run(&conn)?; + Ok(()) +} diff --git a/tokenserver-db-sqlite/Cargo.toml b/tokenserver-db-sqlite/Cargo.toml new file mode 100644 index 0000000000..c157eebed2 --- /dev/null +++ b/tokenserver-db-sqlite/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "tokenserver-db-sqlite" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] +backtrace.workspace = true +futures.workspace = true +http.workspace = true +serde.workspace = true +serde_derive.workspace = true +serde_json.workspace = true +slog-scope.workspace = true + +async-trait = "0.1.40" +diesel = { version = "1.4", features = ["sqlite", "r2d2"] } +diesel_logger = "0.1.1" +diesel_migrations = { version = "1.4.0", features = ["sqlite"] } +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common", features = ["sql"] } +thiserror = "1.0.26" +tokenserver-common = { path = "../tokenserver-common" } +tokenserver-db-common = { path = "../tokenserver-db-common"} +tokenserver-settings = { path = "../tokenserver-settings" } +tokio = { workspace = true, features = ["macros", "sync"] } + +[dev-dependencies] +env_logger.workspace = true + +syncserver-settings = { path = "../syncserver-settings" } diff --git a/tokenserver-db-sqlite/migrations/2024-01-28-211312_init/down.sql b/tokenserver-db-sqlite/migrations/2024-01-28-211312_init/down.sql new file mode 100644 index 0000000000..da49bf74a9 --- /dev/null +++ b/tokenserver-db-sqlite/migrations/2024-01-28-211312_init/down.sql @@ -0,0 +1,3 @@ +DROP TABLE IF EXISTS `users`; +DROP TABLE IF EXISTS `nodes`; +DROP TABLE IF EXISTS `services`; diff --git a/tokenserver-db-sqlite/migrations/2024-01-28-211312_init/up.sql b/tokenserver-db-sqlite/migrations/2024-01-28-211312_init/up.sql new file mode 100644 index 0000000000..78ee3929ce --- /dev/null +++ b/tokenserver-db-sqlite/migrations/2024-01-28-211312_init/up.sql @@ -0,0 +1,34 @@ +CREATE TABLE IF NOT EXISTS `services` ( + `id` INTEGER PRIMARY KEY, + `service` varchar(30) DEFAULT NULL UNIQUE, + `pattern` varchar(128) DEFAULT NULL +); + +CREATE TABLE IF NOT EXISTS `nodes` ( + `id` INTEGER PRIMARY KEY, + `service` int NOT NULL, + `node` varchar(64) NOT NULL, + `available` int NOT NULL, + `current_load` int NOT NULL, + `capacity` int NOT NULL, + `downed` int NOT NULL, + `backoff` int NOT NULL +); + +CREATE UNIQUE INDEX `unique_idx` ON `nodes` (`service`, `node`); + +CREATE TABLE IF NOT EXISTS `users` ( + `uid` INTEGER PRIMARY KEY, + `service` int NOT NULL, + `email` varchar(255) NOT NULL, + `generation` bigint NOT NULL, + `client_state` varchar(32) NOT NULL, + `created_at` bigint NOT NULL, + `replaced_at` bigint DEFAULT NULL, + `nodeid` bigint NOT NULL, + `keys_changed_at` bigint DEFAULT NULL +); + +CREATE INDEX `lookup_idx` ON `users` (`email`, `service`, `created_at`); +CREATE INDEX `replaced_at_idx` ON `users` (`service`, `replaced_at`); +CREATE INDEX `node_idx` ON `users` (`nodeid`); diff --git a/tokenserver-db-sqlite/src/lib.rs b/tokenserver-db-sqlite/src/lib.rs new file mode 100644 index 0000000000..2e19539531 --- /dev/null +++ b/tokenserver-db-sqlite/src/lib.rs @@ -0,0 +1,6 @@ +extern crate diesel; +#[macro_use] +extern crate diesel_migrations; + +pub mod models; +pub mod pool; diff --git a/tokenserver-db-sqlite/src/models.rs b/tokenserver-db-sqlite/src/models.rs new file mode 100644 index 0000000000..6c96173721 --- /dev/null +++ b/tokenserver-db-sqlite/src/models.rs @@ -0,0 +1,130 @@ +pub const LAST_INSERT_ID_QUERY: &str = "SELECT LAST_INSERT_ROWID() AS id"; + +pub const GET_NODE_ID_SYNC_QUERY: &str = r#" +SELECT rowid as id +FROM nodes +WHERE service = ? +AND node = ?"#; + +pub const REPLACE_USERS_SYNC_QUERY: &str = r#" +UPDATE users +SET replaced_at = ? +WHERE service = ? +AND email = ? +AND replaced_at IS NULL +AND created_at < ?"#; + +pub const REPLACE_USER_SYNC_QUERY: &str = r#" +UPDATE users +SET replaced_at = ? +WHERE service = ? +AND uid = ?"#; + +// The `where` clause on this statement is designed as an extra layer of +// protection, to ensure that concurrent updates don't accidentally move +// timestamp fields backwards in time. The handling of `keys_changed_at` +// is additionally weird because we want to treat the default `NULL` value +// as zero. +pub const PUT_USER_SYNC_QUERY: &str = r#" +UPDATE users +SET generation = ?, +keys_changed_at = ? +WHERE service = ? +AND email = ? +AND generation <= ? +AND COALESCE(keys_changed_at, 0) <= COALESCE(?, keys_changed_at, 0) +AND replaced_at IS NULL"#; + +pub const POST_USER_SYNC_QUERY: &str = r#" +INSERT INTO users (service, email, generation, client_state, created_at, nodeid, keys_changed_at, replaced_at) +VALUES (?, ?, ?, ?, ?, ?, ?, NULL);"#; + +pub const CHECK_SYNC_QUERY: &str = "SHOW STATUS LIKE \"Uptime\""; + +pub const GET_BEST_NODE_QUERY: &str = r#" +SELECT id, node +FROM nodes +WHERE service = ? +AND available > 0 +AND capacity > current_load +AND downed = 0 +AND backoff = 0 +ORDER BY LOG(current_load) / LOG(capacity) +LIMIT 1"#; + +pub const GET_BEST_NODE_RELEASE_CAPACITY_QUERY: &str = r#" +UPDATE nodes +SET available = MIN(capacity * ?, capacity - current_load) +WHERE service = ? +AND available <= 0 +AND capacity > current_load +AND downed = 0"#; + +// FIXME: MySQL specific +pub const GET_BEST_NODE_SPANNER_QUERY: &str = r#" +SELECT id, node +FROM nodes +WHERE id = ? +LIMIT 1"#; + +pub const ADD_USER_TO_NODE_SYNC_QUERY: &str = r#" +UPDATE nodes +SET current_load = current_load + 1, +available = MAX(available - 1, 0) +WHERE service = ? +AND node = ?"#; + +pub const ADD_USER_TO_NODE_SYNC_SPANNER_QUERY: &str = r#" +UPDATE nodes +SET current_load = current_load + 1 +WHERE service = ? +AND node = ?"#; + +pub const GET_USERS_SYNC_QUERY: &str = r#" +SELECT uid, nodes.node, generation, keys_changed_at, client_state, created_at, replaced_at +FROM users +LEFT OUTER JOIN nodes ON users.nodeid = nodes.id +WHERE email = ? +AND users.service = ? +ORDER BY created_at DESC, uid DESC +LIMIT 20"#; + +pub const GET_SERVICE_ID_SYNC_QUERY: &str = r#" +SELECT id +FROM services +WHERE service = ?"#; + +pub const SET_USER_CREATED_AT_SYNC_QUERY: &str = r#" +UPDATE users +SET created_at = ? +WHERE uid = ?"#; + +pub const SET_USER_REPLACED_AT_SYNC_QUERY: &str = r#" +UPDATE users +SET replaced_at = ? +WHERE uid = ?"#; + +pub const GET_USER_SYNC_QUERY: &str = r#" +SELECT service, email, generation, client_state, replaced_at, nodeid, keys_changed_at +FROM users +WHERE uid = ?"#; + +pub const POST_NODE_SYNC_QUERY: &str = r#" +INSERT INTO nodes (service, node, available, current_load, capacity, downed, backoff) +VALUES (?, ?, ?, ?, ?, ?, ?)"#; + +pub const GET_NODE_SYNC_QUERY: &str = r#" +SELECT * +FROM nodes +WHERE id = ?"#; + +pub const UNASSIGNED_NODE_SYNC_QUERY: &str = r#" +UPDATE users +SET replaced_at = ? +WHERE nodeid = ?"#; + +pub const REMOVE_NODE_SYNC_QUERY: &str = "DELETE FROM nodes WHERE id = ?"; + +pub const POST_SERVICE_INSERT_SERVICE_QUERY: &str = r#" +INSERT INTO services (service, pattern) +VALUES (?, ?)"#; diff --git a/tokenserver-db-sqlite/src/pool.rs b/tokenserver-db-sqlite/src/pool.rs new file mode 100644 index 0000000000..aadf1e0599 --- /dev/null +++ b/tokenserver-db-sqlite/src/pool.rs @@ -0,0 +1,23 @@ +use diesel::{sqlite::SqliteConnection, Connection}; +use diesel_logger::LoggingConnection; +use tokenserver_db_common::error::DbResult; + +embed_migrations!(); + +/// Run the diesel embedded migrations +pub fn run_embedded_migrations(database_url: &str) -> DbResult<()> { + let path = database_url + .strip_prefix("sqlite:///") + .unwrap_or(database_url); + + let conn = SqliteConnection::establish(path)?; + + #[cfg(debug_assertions)] + // XXX: this doesn't show the DDL statements + // https://github.com/shssoichiro/diesel-logger/issues/1 + embedded_migrations::run(&LoggingConnection::new(conn))?; + #[cfg(not(debug_assertions))] + embedded_migrations::run(&conn)?; + + Ok(()) +} diff --git a/tokenserver-db/Cargo.toml b/tokenserver-db/Cargo.toml index d999cf785a..b0ae7871c6 100644 --- a/tokenserver-db/Cargo.toml +++ b/tokenserver-db/Cargo.toml @@ -19,12 +19,20 @@ diesel = { workspace = true, features = ["mysql", "r2d2"] } diesel_logger = { workspace = true } diesel_migrations = { workspace = true, features = ["mysql"] } syncserver-common = { path = "../syncserver-common" } -syncserver-db-common = { path = "../syncserver-db-common" } +syncserver-db-common = { path = "../syncserver-db-common", features = ["mysql", "sqlite"] } tokenserver-common = { path = "../tokenserver-common" } tokenserver-settings = { path = "../tokenserver-settings" } +tokenserver-db-common = { path = "../tokenserver-db-common" } +tokenserver-db-mysql = { path = "../tokenserver-db-mysql", optional = true} +tokenserver-db-sqlite = { path = "../tokenserver-db-sqlite", optional = true} tokio = { workspace = true, features = ["macros", "sync"] } [dev-dependencies] env_logger.workspace = true syncserver-settings = { path = "../syncserver-settings" } + +[features] +default = [] +mysql = ["tokenserver-db-mysql"] +sqlite = ["tokenserver-db-sqlite"] diff --git a/tokenserver-db/src/lib.rs b/tokenserver-db/src/lib.rs index 86fa5c57fa..d2707a2f67 100644 --- a/tokenserver-db/src/lib.rs +++ b/tokenserver-db/src/lib.rs @@ -1,11 +1,15 @@ #![allow(non_local_definitions)] extern crate diesel; -#[macro_use] extern crate diesel_migrations; #[macro_use] extern crate slog_scope; -mod error; +use diesel::r2d2::{ConnectionManager, PooledConnection}; +#[cfg(feature = "mysql")] +use diesel::MysqlConnection; +#[cfg(feature = "sqlite")] +use diesel::SqliteConnection; + pub mod mock; mod models; pub mod params; @@ -14,3 +18,15 @@ pub mod results; pub use models::{Db, TokenserverDb}; pub use pool::{DbPool, TokenserverPool}; + +#[cfg(feature = "mysql")] +type Conn = MysqlConnection; +#[cfg(feature = "sqlite")] +type Conn = SqliteConnection; +type PooledConn = PooledConnection>; + +#[cfg(all(feature = "mysql", feature = "sqlite"))] +compile_error!("only one of the \"mysql\" and \"sqlite\" features can be enabled at a time"); + +#[cfg(not(any(feature = "mysql", feature = "sqlite")))] +compile_error!("exactly one of the \"mysql\", \"spanner\" and \"sqlite\" features must be enabled"); diff --git a/tokenserver-db/src/mock.rs b/tokenserver-db/src/mock.rs index 29041091d7..1e2b783f3e 100644 --- a/tokenserver-db/src/mock.rs +++ b/tokenserver-db/src/mock.rs @@ -3,8 +3,8 @@ use async_trait::async_trait; use futures::future; use syncserver_db_common::{GetPoolState, PoolState}; +use tokenserver_db_common::error::{DbError, DbFuture}; -use super::error::{DbError, DbFuture}; use super::models::Db; use super::params; use super::pool::DbPool; diff --git a/tokenserver-db/src/models.rs b/tokenserver-db/src/models.rs index 2e6ba32c19..177e0ebf1f 100644 --- a/tokenserver-db/src/models.rs +++ b/tokenserver-db/src/models.rs @@ -1,6 +1,9 @@ +use std::{ + sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; + use diesel::{ - mysql::MysqlConnection, - r2d2::{ConnectionManager, PooledConnection}, sql_types::{Bigint, Float, Integer, Nullable, Text}, OptionalExtension, RunQueryDsl, }; @@ -9,23 +12,18 @@ use diesel_logger::LoggingConnection; use http::StatusCode; use syncserver_common::{BlockingThreadpool, Metrics}; use syncserver_db_common::{sync_db_method, DbFuture}; +use tokenserver_db_common::error::{DbError, DbResult}; +#[cfg(feature = "mysql")] +use tokenserver_db_mysql::models::*; +#[cfg(feature = "sqlite")] +use tokenserver_db_sqlite::models::*; -use std::{ - sync::Arc, - time::{Duration, SystemTime, UNIX_EPOCH}, -}; - -use super::{ - error::{DbError, DbResult}, - params, results, -}; +use super::{params, results, PooledConn}; /// The maximum possible generation number. Used as a tombstone to mark users that have been /// "retired" from the db. const MAX_GENERATION: i64 = i64::MAX; -type Conn = PooledConnection>; - #[derive(Clone)] pub struct TokenserverDb { /// Synchronous Diesel calls are executed on a blocking threadpool to satisfy @@ -50,21 +48,20 @@ unsafe impl Send for TokenserverDb {} struct DbInner { #[cfg(not(test))] - pub(super) conn: Conn, + pub(super) conn: PooledConn, #[cfg(test)] - pub(super) conn: LoggingConnection, // display SQL when RUST_LOG="diesel_logger=trace" + pub(super) conn: LoggingConnection, // display SQL when RUST_LOG="diesel_logger=trace" } impl TokenserverDb { // Note that this only works because an instance of `TokenserverDb` has *exclusive access* to - // a connection from the r2d2 pool for its lifetime. `LAST_INSERT_ID()` returns the ID of the - // most recently-inserted record *for a given connection*. If connections were shared across - // requests, using this function would introduce a race condition, as we could potentially - // get IDs from records created during other requests. - const LAST_INSERT_ID_QUERY: &'static str = "SELECT LAST_INSERT_ID() AS id"; + // a connection from the r2d2 pool for its lifetime. `LAST_INSERT_ID_QUERY` + // returns the ID of the most recently-inserted record *for a given connection*. + // If connections were shared across requests, using this function would introduce a race condition, + // as we could potentially get IDs from records created during other requests. pub fn new( - conn: Conn, + conn: PooledConn, metrics: &Metrics, service_id: Option, spanner_node_id: Option, @@ -91,20 +88,13 @@ impl TokenserverDb { } fn get_node_id_sync(&self, params: params::GetNodeId) -> DbResult { - const QUERY: &str = r#" - SELECT id - FROM nodes - WHERE service = ? - AND node = ? - "#; - if let Some(id) = self.spanner_node_id { Ok(results::GetNodeId { id: id as i64 }) } else { let mut metrics = self.metrics.clone(); metrics.start_timer("storage.get_node_id", None); - diesel::sql_query(QUERY) + diesel::sql_query(GET_NODE_ID_SYNC_QUERY) .bind::(params.service_id) .bind::(¶ms.node) .get_result(&self.inner.conn) @@ -114,19 +104,10 @@ impl TokenserverDb { /// Mark users matching the given email and service ID as replaced. fn replace_users_sync(&self, params: params::ReplaceUsers) -> DbResult { - const QUERY: &str = r#" - UPDATE users - SET replaced_at = ? - WHERE service = ? - AND email = ? - AND replaced_at IS NULL - AND created_at < ? - "#; - let mut metrics = self.metrics.clone(); metrics.start_timer("storage.replace_users", None); - diesel::sql_query(QUERY) + diesel::sql_query(REPLACE_USERS_SYNC_QUERY) .bind::(params.replaced_at) .bind::(¶ms.service_id) .bind::(¶ms.email) @@ -138,14 +119,7 @@ impl TokenserverDb { /// Mark the user with the given uid and service ID as being replaced. fn replace_user_sync(&self, params: params::ReplaceUser) -> DbResult { - const QUERY: &str = r#" - UPDATE users - SET replaced_at = ? - WHERE service = ? - AND uid = ? - "#; - - diesel::sql_query(QUERY) + diesel::sql_query(REPLACE_USER_SYNC_QUERY) .bind::(params.replaced_at) .bind::(params.service_id) .bind::(params.uid) @@ -157,26 +131,10 @@ impl TokenserverDb { /// Update the user with the given email and service ID with the given `generation` and /// `keys_changed_at`. fn put_user_sync(&self, params: params::PutUser) -> DbResult { - // The `where` clause on this statement is designed as an extra layer of - // protection, to ensure that concurrent updates don't accidentally move - // timestamp fields backwards in time. The handling of `keys_changed_at` - // is additionally weird because we want to treat the default `NULL` value - // as zero. - const QUERY: &str = r#" - UPDATE users - SET generation = ?, - keys_changed_at = ? - WHERE service = ? - AND email = ? - AND generation <= ? - AND COALESCE(keys_changed_at, 0) <= COALESCE(?, keys_changed_at, 0) - AND replaced_at IS NULL - "#; - let mut metrics = self.metrics.clone(); metrics.start_timer("storage.put_user", None); - diesel::sql_query(QUERY) + diesel::sql_query(PUT_USER_SYNC_QUERY) .bind::(params.generation) .bind::, _>(params.keys_changed_at) .bind::(¶ms.service_id) @@ -190,15 +148,10 @@ impl TokenserverDb { /// Create a new user. fn post_user_sync(&self, user: params::PostUser) -> DbResult { - const QUERY: &str = r#" - INSERT INTO users (service, email, generation, client_state, created_at, nodeid, keys_changed_at, replaced_at) - VALUES (?, ?, ?, ?, ?, ?, ?, NULL); - "#; - let mut metrics = self.metrics.clone(); metrics.start_timer("storage.post_user", None); - diesel::sql_query(QUERY) + diesel::sql_query(POST_USER_SYNC_QUERY) .bind::(user.service_id) .bind::(&user.email) .bind::(user.generation) @@ -208,52 +161,26 @@ impl TokenserverDb { .bind::, _>(user.keys_changed_at) .execute(&self.inner.conn)?; - diesel::sql_query(Self::LAST_INSERT_ID_QUERY) - .bind::(&user.email) + diesel::sql_query(LAST_INSERT_ID_QUERY) .get_result::(&self.inner.conn) .map_err(Into::into) } fn check_sync(&self) -> DbResult { // has the database been up for more than 0 seconds? - let result = diesel::sql_query("SHOW STATUS LIKE \"Uptime\"").execute(&self.inner.conn)?; + let result = diesel::sql_query(CHECK_SYNC_QUERY).execute(&self.inner.conn)?; Ok(result as u64 > 0) } /// Gets the least-loaded node that has available slots. fn get_best_node_sync(&self, params: params::GetBestNode) -> DbResult { const DEFAULT_CAPACITY_RELEASE_RATE: f32 = 0.1; - const GET_BEST_NODE_QUERY: &str = r#" - SELECT id, node - FROM nodes - WHERE service = ? - AND available > 0 - AND capacity > current_load - AND downed = 0 - AND backoff = 0 - ORDER BY LOG(current_load) / LOG(capacity) - LIMIT 1 - "#; - const RELEASE_CAPACITY_QUERY: &str = r#" - UPDATE nodes - SET available = LEAST(capacity * ?, capacity - current_load) - WHERE service = ? - AND available <= 0 - AND capacity > current_load - AND downed = 0 - "#; - const SPANNER_QUERY: &str = r#" - SELECT id, node - FROM nodes - WHERE id = ? - LIMIT 1 - "#; let mut metrics = self.metrics.clone(); metrics.start_timer("storage.get_best_node", None); if let Some(spanner_node_id) = self.spanner_node_id { - diesel::sql_query(SPANNER_QUERY) + diesel::sql_query(GET_BEST_NODE_SPANNER_QUERY) .bind::(spanner_node_id) .get_result::(&self.inner.conn) .map_err(|e| { @@ -277,7 +204,7 @@ impl TokenserverDb { // There were no available nodes. Try to release additional capacity from any nodes // that are not fully occupied. - let affected_rows = diesel::sql_query(RELEASE_CAPACITY_QUERY) + let affected_rows = diesel::sql_query(GET_BEST_NODE_RELEASE_CAPACITY_QUERY) .bind::( params .capacity_release_rate @@ -305,24 +232,10 @@ impl TokenserverDb { let mut metrics = self.metrics.clone(); metrics.start_timer("storage.add_user_to_node", None); - const QUERY: &str = r#" - UPDATE nodes - SET current_load = current_load + 1, - available = GREATEST(available - 1, 0) - WHERE service = ? - AND node = ? - "#; - const SPANNER_QUERY: &str = r#" - UPDATE nodes - SET current_load = current_load + 1 - WHERE service = ? - AND node = ? - "#; - let query = if self.spanner_node_id.is_some() { - SPANNER_QUERY + ADD_USER_TO_NODE_SYNC_SPANNER_QUERY } else { - QUERY + ADD_USER_TO_NODE_SYNC_QUERY }; diesel::sql_query(query) @@ -337,18 +250,7 @@ impl TokenserverDb { let mut metrics = self.metrics.clone(); metrics.start_timer("storage.get_users", None); - const QUERY: &str = r#" - SELECT uid, nodes.node, generation, keys_changed_at, client_state, created_at, - replaced_at - FROM users - LEFT OUTER JOIN nodes ON users.nodeid = nodes.id - WHERE email = ? - AND users.service = ? - ORDER BY created_at DESC, uid DESC - LIMIT 20 - "#; - - diesel::sql_query(QUERY) + diesel::sql_query(GET_USERS_SYNC_QUERY) .bind::(¶ms.email) .bind::(params.service_id) .load::(&self.inner.conn) @@ -519,16 +421,10 @@ impl TokenserverDb { &self, params: params::GetServiceId, ) -> DbResult { - const QUERY: &str = r#" - SELECT id - FROM services - WHERE service = ? - "#; - if let Some(id) = self.service_id { Ok(results::GetServiceId { id }) } else { - diesel::sql_query(QUERY) + diesel::sql_query(GET_SERVICE_ID_SYNC_QUERY) .bind::(params.service) .get_result::(&self.inner.conn) .map_err(Into::into) @@ -540,12 +436,7 @@ impl TokenserverDb { &self, params: params::SetUserCreatedAt, ) -> DbResult { - const QUERY: &str = r#" - UPDATE users - SET created_at = ? - WHERE uid = ? - "#; - diesel::sql_query(QUERY) + diesel::sql_query(SET_USER_CREATED_AT_SYNC_QUERY) .bind::(params.created_at) .bind::(¶ms.uid) .execute(&self.inner.conn) @@ -558,12 +449,7 @@ impl TokenserverDb { &self, params: params::SetUserReplacedAt, ) -> DbResult { - const QUERY: &str = r#" - UPDATE users - SET replaced_at = ? - WHERE uid = ? - "#; - diesel::sql_query(QUERY) + diesel::sql_query(SET_USER_REPLACED_AT_SYNC_QUERY) .bind::(params.replaced_at) .bind::(¶ms.uid) .execute(&self.inner.conn) @@ -573,13 +459,7 @@ impl TokenserverDb { #[cfg(test)] fn get_user_sync(&self, params: params::GetUser) -> DbResult { - const QUERY: &str = r#" - SELECT service, email, generation, client_state, replaced_at, nodeid, keys_changed_at - FROM users - WHERE uid = ? - "#; - - diesel::sql_query(QUERY) + diesel::sql_query(GET_USER_SYNC_QUERY) .bind::(params.id) .get_result::(&self.inner.conn) .map_err(Into::into) @@ -587,11 +467,7 @@ impl TokenserverDb { #[cfg(test)] fn post_node_sync(&self, params: params::PostNode) -> DbResult { - const QUERY: &str = r#" - INSERT INTO nodes (service, node, available, current_load, capacity, downed, backoff) - VALUES (?, ?, ?, ?, ?, ?, ?) - "#; - diesel::sql_query(QUERY) + diesel::sql_query(POST_NODE_SYNC_QUERY) .bind::(params.service_id) .bind::(¶ms.node) .bind::(params.available) @@ -601,20 +477,14 @@ impl TokenserverDb { .bind::(params.backoff) .execute(&self.inner.conn)?; - diesel::sql_query(Self::LAST_INSERT_ID_QUERY) + diesel::sql_query(LAST_INSERT_ID_QUERY) .get_result::(&self.inner.conn) .map_err(Into::into) } #[cfg(test)] fn get_node_sync(&self, params: params::GetNode) -> DbResult { - const QUERY: &str = r#" - SELECT * - FROM nodes - WHERE id = ? - "#; - - diesel::sql_query(QUERY) + diesel::sql_query(GET_NODE_SYNC_QUERY) .bind::(params.id) .get_result::(&self.inner.conn) .map_err(Into::into) @@ -622,18 +492,12 @@ impl TokenserverDb { #[cfg(test)] fn unassign_node_sync(&self, params: params::UnassignNode) -> DbResult { - const QUERY: &str = r#" - UPDATE users - SET replaced_at = ? - WHERE nodeid = ? - "#; - let current_time = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_millis() as i64; - diesel::sql_query(QUERY) + diesel::sql_query(UNASSIGNED_NODE_SYNC_QUERY) .bind::(current_time) .bind::(params.node_id) .execute(&self.inner.conn) @@ -643,9 +507,7 @@ impl TokenserverDb { #[cfg(test)] fn remove_node_sync(&self, params: params::RemoveNode) -> DbResult { - const QUERY: &str = "DELETE FROM nodes WHERE id = ?"; - - diesel::sql_query(QUERY) + diesel::sql_query(REMOVE_NODE_SYNC_QUERY) .bind::(params.node_id) .execute(&self.inner.conn) .map(|_| ()) @@ -654,17 +516,12 @@ impl TokenserverDb { #[cfg(test)] fn post_service_sync(&self, params: params::PostService) -> DbResult { - const INSERT_SERVICE_QUERY: &str = r#" - INSERT INTO services (service, pattern) - VALUES (?, ?) - "#; - - diesel::sql_query(INSERT_SERVICE_QUERY) + diesel::sql_query(POST_SERVICE_INSERT_SERVICE_QUERY) .bind::(¶ms.service) .bind::(¶ms.pattern) .execute(&self.inner.conn)?; - diesel::sql_query(Self::LAST_INSERT_ID_QUERY) + diesel::sql_query(LAST_INSERT_ID_QUERY) .get_result::(&self.inner.conn) .map(|result| results::PostService { id: result.id as i32, diff --git a/tokenserver-db/src/pool.rs b/tokenserver-db/src/pool.rs index 5c8d9b5e96..42c60c2eb9 100644 --- a/tokenserver-db/src/pool.rs +++ b/tokenserver-db/src/pool.rs @@ -1,41 +1,27 @@ use std::{sync::Arc, time::Duration}; use async_trait::async_trait; -use diesel::{ - mysql::MysqlConnection, - r2d2::{ConnectionManager, Pool}, - Connection, -}; -use diesel_logger::LoggingConnection; +use diesel::r2d2::{ConnectionManager, Pool}; use syncserver_common::{BlockingThreadpool, Metrics}; #[cfg(debug_assertions)] use syncserver_db_common::test::TestTransactionCustomizer; use syncserver_db_common::{GetPoolState, PoolState}; +use tokenserver_db_common::error::{DbError, DbResult}; +#[cfg(feature = "mysql")] +use tokenserver_db_mysql::pool::run_embedded_migrations; +#[cfg(feature = "sqlite")] +use tokenserver_db_sqlite::pool::run_embedded_migrations; use tokenserver_settings::Settings; use super::{ - error::{DbError, DbResult}, models::{Db, TokenserverDb}, + Conn, }; -embed_migrations!(); - -/// Run the diesel embedded migrations -/// -/// Mysql DDL statements implicitly commit which could disrupt MysqlPool's -/// begin_test_transaction during tests. So this runs on its own separate conn. -fn run_embedded_migrations(database_url: &str) -> DbResult<()> { - let conn = MysqlConnection::establish(database_url)?; - - embedded_migrations::run(&LoggingConnection::new(conn))?; - - Ok(()) -} - #[derive(Clone)] pub struct TokenserverPool { /// Pool of db connections - inner: Pool>, + inner: Pool>, metrics: Metrics, // This field is public so the service ID can be set after the pool is created pub service_id: Option, @@ -55,12 +41,21 @@ impl TokenserverPool { run_embedded_migrations(&settings.database_url)?; } - let manager = ConnectionManager::::new(settings.database_url.clone()); + // SQLite can't handle its uri prefix + let database_url = settings + .database_url + .strip_prefix("sqlite:///") + .unwrap_or(&settings.database_url); + #[cfg(feature = "sqlite")] + info!("Using SQLite database at: {}", database_url); + + let manager = ConnectionManager::::new(database_url); let builder = Pool::builder() .max_size(settings.database_pool_max_size) .connection_timeout(Duration::from_secs( settings.database_pool_connection_timeout.unwrap_or(30) as u64, )) + .idle_timeout(Some(Duration::from_secs(1))) // FIXME: This one should only be enabled in testing sqlite .min_idle(settings.database_pool_min_idle); #[cfg(debug_assertions)] diff --git a/tools/hawk/make_hawk_token.py b/tools/hawk/make_hawk_token.py index e5fbfd48c2..8e2ad25fa2 100644 --- a/tools/hawk/make_hawk_token.py +++ b/tools/hawk/make_hawk_token.py @@ -36,60 +36,73 @@ # 10 years DURATION = timedelta(days=10 * 365).total_seconds() -SALT = hexlify(os.urandom(3)).decode('ascii') +SALT = hexlify(os.urandom(3)).decode("ascii") def get_args(): parser = argparse.ArgumentParser( description="Create a hawk header for use in testing" - ) + ) parser.add_argument( - '--uid', type=int, default=LEGACY_UID, - help="Legacy UID ({})".format(LEGACY_UID)) + "--uid", + type=int, + default=LEGACY_UID, + help="Legacy UID ({})".format(LEGACY_UID), + ) + parser.add_argument("--uri", default=URI, help="URI path ({})".format(URI)) parser.add_argument( - '--uri', default=URI, - help="URI path ({})".format(URI)) + "--method", default=METHOD, help="The HTTP Method ({})".format(METHOD) + ) parser.add_argument( - '--method', default=METHOD, - help="The HTTP Method ({})".format(METHOD)) + "--fxa_uid", default=FXA_UID, help="FxA User ID ({})".format(FXA_UID) + ) parser.add_argument( - '--fxa_uid', default=FXA_UID, - help="FxA User ID ({})".format(FXA_UID)) + "--fxa_kid", default=FXA_KID, help="FxA K ID ({})".format(FXA_KID) + ) parser.add_argument( - '--fxa_kid', default=FXA_KID, - help="FxA K ID ({})".format(FXA_KID)) + "--device_id", + default=DEVICE_ID, + help="FxA Device ID ({})".format(DEVICE_ID), + ) parser.add_argument( - '--device_id', default=DEVICE_ID, - help="FxA Device ID ({})".format(DEVICE_ID)) + "--node", default=NODE, help="HTTP Host URI for node ({})".format(NODE) + ) parser.add_argument( - '--node', default=NODE, - help="HTTP Host URI for node ({})".format(NODE)) + "--duration", + type=int, + default=DURATION, + help="Hawk TTL ({})".format(DURATION), + ) parser.add_argument( - '--duration', type=int, default=DURATION, - help="Hawk TTL ({})".format(DURATION)) + "--secret", + default=SECRET, + help="Shared HAWK secret ({})".format(SECRET), + ) parser.add_argument( - '--secret', default=SECRET, - help="Shared HAWK secret ({})".format(SECRET)) + "--hmac_key", + default=HMAC_KEY, + help="HAWK HMAC key ({})".format(HMAC_KEY), + ) parser.add_argument( - '--hmac_key', default=HMAC_KEY, - help="HAWK HMAC key ({})".format(HMAC_KEY)) - parser.add_argument( - '--as_header', action="store_true", default=False, - help="return only header (False)") + "--as_header", + action="store_true", + default=False, + help="return only header (False)", + ) return parser.parse_args() def create_token(args): expires = int(time.time()) + args.duration token_data = { - 'uid': args.uid, - 'node': args.node, - 'expires': expires, - 'fxa_uid': args.fxa_uid, - 'fxa_kid': args.fxa_kid, - 'hashed_fxa_uid': metrics_hash(args, args.fxa_uid), - 'hashed_device_id': metrics_hash(args, args.device_id), - 'salt': SALT, + "uid": args.uid, + "node": args.node, + "expires": expires, + "fxa_uid": args.fxa_uid, + "fxa_kid": args.fxa_kid, + "hashed_fxa_uid": metrics_hash(args, args.fxa_uid), + "hashed_device_id": metrics_hash(args, args.device_id), + "salt": SALT, } token = tokenlib.make_token(token_data, secret=args.secret) key = tokenlib.get_derived_secret(token, secret=args.secret) @@ -99,18 +112,16 @@ def create_token(args): def metrics_hash(args, value): if isinstance(args.hmac_key, str): args.hmac_key = args.hmac_key.encode() - hasher = hmac.new(args.hmac_key, b'', sha256) + hasher = hmac.new(args.hmac_key, b"", sha256) # value may be an email address, in which case we only want the first part - hasher.update(value.encode('utf-8').split(b"@", 1)[0]) + hasher.update(value.encode("utf-8").split(b"@", 1)[0]) return hasher.hexdigest() def main(): args = get_args() token, key, expires, salt = create_token(args) - path = "{node}{uri}".format( - node=args.node, - uri=args.uri) + path = "{node}{uri}".format(node=args.node, uri=args.uri) req = Request.blank(path) req.method = args.method header = hawkauthlib.sign_request(req, token, key) @@ -123,5 +134,5 @@ def main(): print("Authorization:", header) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tools/integration_tests/test_storage.py b/tools/integration_tests/test_storage.py index c74b93bbf3..161d76dfae 100644 --- a/tools/integration_tests/test_storage.py +++ b/tools/integration_tests/test_storage.py @@ -126,9 +126,9 @@ def test_get_info_collections(self): resp = self.app.get(self.root + "/info/collections") res = resp.json keys = sorted(list(res.keys())) - self.assertEqual(keys, ["xxx_col1", "xxx_col2"]) - self.assertEqual(res["xxx_col1"], ts1) - self.assertEqual(res["xxx_col2"], ts2) + self.assertEquals(keys, ["xxx_col1", "xxx_col2"]) + self.assertEquals(res["xxx_col1"], ts1) + self.assertEquals(res["xxx_col2"], ts2) # Updating items in xxx_col2, check timestamps. bsos = [{"id": str(i).zfill(2), "payload": "yyy"} for i in range(2)] resp = self.retry_post_json(self.root + "/storage/xxx_col2", bsos) @@ -137,9 +137,9 @@ def test_get_info_collections(self): resp = self.app.get(self.root + "/info/collections") res = resp.json keys = sorted(list(res.keys())) - self.assertEqual(keys, ["xxx_col1", "xxx_col2"]) - self.assertEqual(res["xxx_col1"], ts1) - self.assertEqual(res["xxx_col2"], ts2) + self.assertEquals(keys, ["xxx_col1", "xxx_col2"]) + self.assertEquals(res["xxx_col1"], ts1) + self.assertEquals(res["xxx_col2"], ts2) def test_get_collection_count(self): # xxx_col1 gets 3 items, xxx_col2 gets 5 items. @@ -150,9 +150,9 @@ def test_get_collection_count(self): # those counts should be reflected back in query. resp = self.app.get(self.root + "/info/collection_counts") res = resp.json - self.assertEqual(len(res), 2) - self.assertEqual(res["xxx_col1"], 3) - self.assertEqual(res["xxx_col2"], 5) + self.assertEquals(len(res), 2) + self.assertEquals(res["xxx_col1"], 3) + self.assertEquals(res["xxx_col2"], 5) def test_bad_cache(self): # fixes #637332 @@ -170,7 +170,7 @@ def test_bad_cache(self): # 3. get collection info again, should find the new ones resp = self.app.get(self.root + "/info/collections") - self.assertEqual(len(resp.json), numcols + 1) + self.assertEquals(len(resp.json), numcols + 1) def test_get_collection_only(self): bsos = [{"id": str(i).zfill(2), "payload": "xxx"} for i in range(5)] @@ -179,14 +179,14 @@ def test_get_collection_only(self): # non-existent collections appear as empty resp = self.app.get(self.root + "/storage/nonexistent") res = resp.json - self.assertEqual(res, []) + self.assertEquals(res, []) # try just getting all items at once. resp = self.app.get(self.root + "/storage/xxx_col2") res = resp.json res.sort() - self.assertEqual(res, ["00", "01", "02", "03", "04"]) - self.assertEqual(int(resp.headers["X-Weave-Records"]), 5) + self.assertEquals(res, ["00", "01", "02", "03", "04"]) + self.assertEquals(int(resp.headers["X-Weave-Records"]), 5) # trying various filters @@ -196,7 +196,7 @@ def test_get_collection_only(self): res = self.app.get(self.root + "/storage/xxx_col2?ids=01,03,17") res = res.json res.sort() - self.assertEqual(res, ["01", "03"]) + self.assertEquals(res, ["01", "03"]) # "newer" # Returns only ids for objects in the collection that have been last @@ -215,15 +215,15 @@ def test_get_collection_only(self): self.assertTrue(ts1 < ts2) res = self.app.get(self.root + "/storage/xxx_col2?newer=%s" % ts1) - self.assertEqual(res.json, ["129"]) + self.assertEquals(res.json, ["129"]) res = self.app.get(self.root + "/storage/xxx_col2?newer=%s" % ts2) - self.assertEqual(res.json, []) + self.assertEquals(res.json, []) res = self.app.get( self.root + "/storage/xxx_col2?newer=%s" % (ts1 - 1) ) - self.assertEqual(sorted(res.json), ["128", "129"]) + self.assertEquals(sorted(res.json), ["128", "129"]) # "older" # Returns only ids for objects in the collection that have been last @@ -242,19 +242,19 @@ def test_get_collection_only(self): self.assertTrue(ts1 < ts2) res = self.app.get(self.root + "/storage/xxx_col2?older=%s" % ts1) - self.assertEqual(res.json, []) + self.assertEquals(res.json, []) res = self.app.get(self.root + "/storage/xxx_col2?older=%s" % ts2) - self.assertEqual(res.json, ["128"]) + self.assertEquals(res.json, ["128"]) res = self.app.get( self.root + "/storage/xxx_col2?older=%s" % (ts2 + 1) ) - self.assertEqual(sorted(res.json), ["128", "129"]) + self.assertEquals(sorted(res.json), ["128", "129"]) qs = "?older=%s&newer=%s" % (ts2 + 1, ts1) res = self.app.get(self.root + "/storage/xxx_col2" + qs) - self.assertEqual(sorted(res.json), ["129"]) + self.assertEquals(sorted(res.json), ["129"]) # "full" # If defined, returns the full BSO, rather than just the id. @@ -262,7 +262,7 @@ def test_get_collection_only(self): keys = list(res.json[0].keys()) keys.sort() wanted = ["id", "modified", "payload"] - self.assertEqual(keys, wanted) + self.assertEquals(keys, wanted) res = self.app.get(self.root + "/storage/xxx_col2") self.assertTrue(isinstance(res.json, list)) @@ -280,84 +280,84 @@ def test_get_collection_only(self): query_url = self.root + "/storage/xxx_col2?sort=index" res = self.app.get(query_url) all_items = res.json - self.assertEqual(len(all_items), 10) + self.assertEquals(len(all_items), 10) res = self.app.get(query_url + "&limit=2") - self.assertEqual(res.json, all_items[:2]) + self.assertEquals(res.json, all_items[:2]) # "offset" # Skips over items that have already been returned. next_offset = res.headers["X-Weave-Next-Offset"] res = self.app.get(query_url + "&limit=3&offset=" + next_offset) - self.assertEqual(res.json, all_items[2:5]) + self.assertEquals(res.json, all_items[2:5]) next_offset = res.headers["X-Weave-Next-Offset"] res = self.app.get(query_url + "&offset=" + next_offset) - self.assertEqual(res.json, all_items[5:]) + self.assertEquals(res.json, all_items[5:]) self.assertTrue("X-Weave-Next-Offset" not in res.headers) res = self.app.get(query_url + "&limit=10000&offset=" + next_offset) - self.assertEqual(res.json, all_items[5:]) + self.assertEquals(res.json, all_items[5:]) self.assertTrue("X-Weave-Next-Offset" not in res.headers) # "offset" again, this time ordering by descending timestamp. query_url = self.root + "/storage/xxx_col2?sort=newest" res = self.app.get(query_url) all_items = res.json - self.assertEqual(len(all_items), 10) + self.assertEquals(len(all_items), 10) res = self.app.get(query_url + "&limit=2") - self.assertEqual(res.json, all_items[:2]) + self.assertEquals(res.json, all_items[:2]) next_offset = res.headers["X-Weave-Next-Offset"] res = self.app.get(query_url + "&limit=3&offset=" + next_offset) - self.assertEqual(res.json, all_items[2:5]) + self.assertEquals(res.json, all_items[2:5]) next_offset = res.headers["X-Weave-Next-Offset"] res = self.app.get(query_url + "&offset=" + next_offset) - self.assertEqual(res.json, all_items[5:]) + self.assertEquals(res.json, all_items[5:]) self.assertTrue("X-Weave-Next-Offset" not in res.headers) res = self.app.get(query_url + "&limit=10000&offset=" + next_offset) - self.assertEqual(res.json, all_items[5:]) + self.assertEquals(res.json, all_items[5:]) # "offset" again, this time ordering by ascending timestamp. query_url = self.root + "/storage/xxx_col2?sort=oldest" res = self.app.get(query_url) all_items = res.json - self.assertEqual(len(all_items), 10) + self.assertEquals(len(all_items), 10) res = self.app.get(query_url + "&limit=2") - self.assertEqual(res.json, all_items[:2]) + self.assertEquals(res.json, all_items[:2]) next_offset = res.headers["X-Weave-Next-Offset"] res = self.app.get(query_url + "&limit=3&offset=" + next_offset) - self.assertEqual(res.json, all_items[2:5]) + self.assertEquals(res.json, all_items[2:5]) next_offset = res.headers["X-Weave-Next-Offset"] res = self.app.get(query_url + "&offset=" + next_offset) - self.assertEqual(res.json, all_items[5:]) + self.assertEquals(res.json, all_items[5:]) self.assertTrue("X-Weave-Next-Offset" not in res.headers) res = self.app.get(query_url + "&limit=10000&offset=" + next_offset) - self.assertEqual(res.json, all_items[5:]) + self.assertEquals(res.json, all_items[5:]) # "offset" once more, this time with no explicit ordering query_url = self.root + "/storage/xxx_col2?" res = self.app.get(query_url) all_items = res.json - self.assertEqual(len(all_items), 10) + self.assertEquals(len(all_items), 10) res = self.app.get(query_url + "&limit=2") - self.assertEqual(res.json, all_items[:2]) + self.assertEquals(res.json, all_items[:2]) next_offset = res.headers["X-Weave-Next-Offset"] res = self.app.get(query_url + "&limit=3&offset=" + next_offset) - self.assertEqual(res.json, all_items[2:5]) + self.assertEquals(res.json, all_items[2:5]) next_offset = res.headers["X-Weave-Next-Offset"] res = self.app.get(query_url + "&offset=" + next_offset) - self.assertEqual(res.json, all_items[5:]) + self.assertEquals(res.json, all_items[5:]) self.assertTrue("X-Weave-Next-Offset" not in res.headers) res = self.app.get(query_url + "&limit=10000&offset=" + next_offset) @@ -374,15 +374,15 @@ def test_get_collection_only(self): res = self.app.get(self.root + "/storage/xxx_col2?sort=newest") res = res.json - self.assertEqual(res, ["02", "01", "00"]) + self.assertEquals(res, ["02", "01", "00"]) res = self.app.get(self.root + "/storage/xxx_col2?sort=oldest") res = res.json - self.assertEqual(res, ["00", "01", "02"]) + self.assertEquals(res, ["00", "01", "02"]) res = self.app.get(self.root + "/storage/xxx_col2?sort=index") res = res.json - self.assertEqual(res, ["01", "02", "00"]) + self.assertEquals(res, ["01", "02", "00"]) def test_alternative_formats(self): bsos = [{"id": str(i).zfill(2), "payload": "xxx"} for i in range(5)] @@ -393,18 +393,18 @@ def test_alternative_formats(self): self.root + "/storage/xxx_col2", headers=[("Accept", "application/json")], ) - self.assertEqual(res.content_type.split(";")[0], "application/json") + self.assertEquals(res.content_type.split(";")[0], "application/json") res = res.json res.sort() - self.assertEqual(res, ["00", "01", "02", "03", "04"]) + self.assertEquals(res, ["00", "01", "02", "03", "04"]) # application/newlines res = self.app.get( self.root + "/storage/xxx_col2", headers=[("Accept", "application/newlines")], ) - self.assertEqual(res.content_type, "application/newlines") + self.assertEquals(res.content_type, "application/newlines") self.assertTrue(res.body.endswith(b"\n")) res = [ @@ -412,11 +412,11 @@ def test_alternative_formats(self): for line in res.body.decode("utf-8").strip().split("\n") ] res.sort() - self.assertEqual(res, ["00", "01", "02", "03", "04"]) + self.assertEquals(res, ["00", "01", "02", "03", "04"]) # unspecified format defaults to json res = self.app.get(self.root + "/storage/xxx_col2") - self.assertEqual(res.content_type.split(";")[0], "application/json") + self.assertEquals(res.content_type.split(";")[0], "application/json") # unkown format gets a 406 self.app.get( @@ -432,7 +432,7 @@ def test_set_collection_with_if_modified_since(self): self.retry_post_json(self.root + "/storage/xxx_col2", bsos) # Get them all, along with their timestamps. res = self.app.get(self.root + "/storage/xxx_col2?full=true").json - self.assertEqual(len(res), 5) + self.assertEquals(len(res), 5) timestamps = sorted([r["modified"] for r in res]) # The timestamp of the collection should be the max of all those. self.app.get( @@ -455,8 +455,8 @@ def test_get_item(self): res = res.json keys = list(res.keys()) keys.sort() - self.assertEqual(keys, ["id", "modified", "payload"]) - self.assertEqual(res["id"], "01") + self.assertEquals(keys, ["id", "modified", "payload"]) + self.assertEquals(res["id"], "01") # unexisting object self.app.get(self.root + "/storage/xxx_col2/99", status=404) @@ -476,7 +476,7 @@ def test_get_item(self): self.root + "/storage/xxx_col2/01", headers={"X-If-Modified-Since": str(res["modified"] - 1)}, ) - self.assertEqual(res.json["id"], "01") + self.assertEquals(res.json["id"], "01") def test_set_item(self): # let's create an object @@ -484,14 +484,14 @@ def test_set_item(self): self.retry_put_json(self.root + "/storage/xxx_col2/12345", bso) res = self.app.get(self.root + "/storage/xxx_col2/12345") res = res.json - self.assertEqual(res["payload"], _PLD) + self.assertEquals(res["payload"], _PLD) # now let's update it bso = {"payload": "YYY"} self.retry_put_json(self.root + "/storage/xxx_col2/12345", bso) res = self.app.get(self.root + "/storage/xxx_col2/12345") res = res.json - self.assertEqual(res["payload"], "YYY") + self.assertEquals(res["payload"], "YYY") def test_set_collection(self): # sending two bsos @@ -503,10 +503,10 @@ def test_set_collection(self): # checking what we did res = self.app.get(self.root + "/storage/xxx_col2/12") res = res.json - self.assertEqual(res["payload"], _PLD) + self.assertEquals(res["payload"], _PLD) res = self.app.get(self.root + "/storage/xxx_col2/13") res = res.json - self.assertEqual(res["payload"], _PLD) + self.assertEquals(res["payload"], _PLD) # one more time, with changes bso1 = {"id": "13", "payload": "XyX"} @@ -517,10 +517,10 @@ def test_set_collection(self): # checking what we did res = self.app.get(self.root + "/storage/xxx_col2/14") res = res.json - self.assertEqual(res["payload"], _PLD) + self.assertEquals(res["payload"], _PLD) res = self.app.get(self.root + "/storage/xxx_col2/13") res = res.json - self.assertEqual(res["payload"], "XyX") + self.assertEquals(res["payload"], "XyX") # sending two bsos with one bad sortindex bso1 = {"id": "one", "payload": _PLD} @@ -541,7 +541,7 @@ def test_set_collection_input_formats(self): headers={"Content-Type": "application/newlines"}, ) items = self.app.get(self.root + "/storage/xxx_col2").json - self.assertEqual(len(items), 2) + self.assertEquals(len(items), 2) # If we send an unknown content type, we get an error. self.retry_delete(self.root + "/storage/xxx_col2") body = json_dumps(bsos) @@ -552,7 +552,7 @@ def test_set_collection_input_formats(self): status=415, ) items = self.app.get(self.root + "/storage/xxx_col2").json - self.assertEqual(len(items), 0) + self.assertEquals(len(items), 0) def test_set_item_input_formats(self): # If we send with application/json it should work. @@ -563,7 +563,7 @@ def test_set_item_input_formats(self): headers={"Content-Type": "application/json"}, ) item = self.app.get(self.root + "/storage/xxx_col2/TEST").json - self.assertEqual(item["payload"], _PLD) + self.assertEquals(item["payload"], _PLD) # If we send json with some other content type, it should fail self.retry_delete(self.root + "/storage/xxx_col2") self.app.put( @@ -580,7 +580,7 @@ def test_set_item_input_formats(self): headers={"Content-Type": "text/plain"}, ) item = self.app.get(self.root + "/storage/xxx_col2/TEST").json - self.assertEqual(item["payload"], _PLD) + self.assertEquals(item["payload"], _PLD) def test_app_newlines_when_payloads_contain_newlines(self): # Send some application/newlines with embedded newline chars. @@ -589,7 +589,7 @@ def test_app_newlines_when_payloads_contain_newlines(self): {"id": "02", "payload": "\nmarco\npolo\n"}, ] body = "\n".join(json_dumps(bso) for bso in bsos) - self.assertEqual(len(body.split("\n")), 2) + self.assertEquals(len(body.split("\n")), 2) self.app.post( self.root + "/storage/xxx_col2", body, @@ -597,10 +597,10 @@ def test_app_newlines_when_payloads_contain_newlines(self): ) # Read them back as JSON list, check payloads. items = self.app.get(self.root + "/storage/xxx_col2?full=1").json - self.assertEqual(len(items), 2) + self.assertEquals(len(items), 2) items.sort(key=lambda bso: bso["id"]) - self.assertEqual(items[0]["payload"], bsos[0]["payload"]) - self.assertEqual(items[1]["payload"], bsos[1]["payload"]) + self.assertEquals(items[0]["payload"], bsos[0]["payload"]) + self.assertEquals(items[1]["payload"], bsos[1]["payload"]) # Read them back as application/newlines, check payloads. res = self.app.get( self.root + "/storage/xxx_col2?full=1", @@ -612,10 +612,10 @@ def test_app_newlines_when_payloads_contain_newlines(self): json_loads(line) for line in res.body.decode("utf-8").strip().split("\n") ] - self.assertEqual(len(items), 2) + self.assertEquals(len(items), 2) items.sort(key=lambda bso: bso["id"]) - self.assertEqual(items[0]["payload"], bsos[0]["payload"]) - self.assertEqual(items[1]["payload"], bsos[1]["payload"]) + self.assertEquals(items[0]["payload"], bsos[0]["payload"]) + self.assertEquals(items[1]["payload"], bsos[1]["payload"]) def test_collection_usage(self): self.retry_delete(self.root + "/storage") @@ -629,7 +629,7 @@ def test_collection_usage(self): usage = res.json xxx_col2_size = usage["xxx_col2"] wanted = (len(bso1["payload"]) + len(bso2["payload"])) / 1024.0 - self.assertEqual(round(xxx_col2_size, 2), round(wanted, 2)) + self.assertEquals(round(xxx_col2_size, 2), round(wanted, 2)) def test_delete_collection_items(self): # creating a collection of three @@ -639,24 +639,24 @@ def test_delete_collection_items(self): bsos = [bso1, bso2, bso3] self.retry_post_json(self.root + "/storage/xxx_col2", bsos) res = self.app.get(self.root + "/storage/xxx_col2") - self.assertEqual(len(res.json), 3) + self.assertEquals(len(res.json), 3) # deleting all items self.retry_delete(self.root + "/storage/xxx_col2") items = self.app.get(self.root + "/storage/xxx_col2").json - self.assertEqual(len(items), 0) + self.assertEquals(len(items), 0) # Deletes the ids for objects in the collection that are in the # provided comma-separated list. self.retry_post_json(self.root + "/storage/xxx_col2", bsos) res = self.app.get(self.root + "/storage/xxx_col2") - self.assertEqual(len(res.json), 3) + self.assertEquals(len(res.json), 3) self.retry_delete(self.root + "/storage/xxx_col2?ids=12,14") res = self.app.get(self.root + "/storage/xxx_col2") - self.assertEqual(len(res.json), 1) + self.assertEquals(len(res.json), 1) self.retry_delete(self.root + "/storage/xxx_col2?ids=13") res = self.app.get(self.root + "/storage/xxx_col2") - self.assertEqual(len(res.json), 0) + self.assertEquals(len(res.json), 0) def test_delete_item(self): # creating a collection of three @@ -666,13 +666,13 @@ def test_delete_item(self): bsos = [bso1, bso2, bso3] self.retry_post_json(self.root + "/storage/xxx_col2", bsos) res = self.app.get(self.root + "/storage/xxx_col2") - self.assertEqual(len(res.json), 3) + self.assertEquals(len(res.json), 3) ts = float(res.headers["X-Last-Modified"]) # deleting item 13 self.retry_delete(self.root + "/storage/xxx_col2/13") res = self.app.get(self.root + "/storage/xxx_col2") - self.assertEqual(len(res.json), 2) + self.assertEquals(len(res.json), 2) # unexisting item should return a 404 self.retry_delete(self.root + "/storage/xxx_col2/12982", status=404) @@ -689,14 +689,14 @@ def test_delete_storage(self): bsos = [bso1, bso2, bso3] self.retry_post_json(self.root + "/storage/xxx_col2", bsos) res = self.app.get(self.root + "/storage/xxx_col2") - self.assertEqual(len(res.json), 3) + self.assertEquals(len(res.json), 3) # deleting all self.retry_delete(self.root + "/storage") items = self.app.get(self.root + "/storage/xxx_col2").json - self.assertEqual(len(items), 0) + self.assertEquals(len(items), 0) self.retry_delete(self.root + "/storage/xxx_col2", status=200) - self.assertEqual(len(items), 0) + self.assertEquals(len(items), 0) def test_x_timestamp_header(self): if self.distant: @@ -786,8 +786,8 @@ def test_ifunmodifiedsince(self): ts = res2.headers["X-Last-Modified"] # All of those should have left the BSO unchanged res2 = self.app.get(self.root + "/storage/xxx_col2/12345") - self.assertEqual(res2.json["payload"], _PLD) - self.assertEqual( + self.assertEquals(res2.json["payload"], _PLD) + self.assertEquals( res2.headers["X-Last-Modified"], res.headers["X-Last-Modified"] ) # Using an X-If-Unmodified-Since equal to @@ -834,30 +834,30 @@ def test_quota(self): self.retry_put_json(self.root + "/storage/xxx_col2/12345", bso) res = self.app.get(self.root + "/info/quota") used = res.json[0] - self.assertEqual(used - old_used, len(_PLD) / 1024.0) + self.assertEquals(used - old_used, len(_PLD) / 1024.0) def test_get_collection_ttl(self): bso = {"payload": _PLD, "ttl": 0} res = self.retry_put_json(self.root + "/storage/xxx_col2/12345", bso) time.sleep(1.1) res = self.app.get(self.root + "/storage/xxx_col2") - self.assertEqual(res.json, []) + self.assertEquals(res.json, []) bso = {"payload": _PLD, "ttl": 2} res = self.retry_put_json(self.root + "/storage/xxx_col2/123456", bso) # it should exists now res = self.app.get(self.root + "/storage/xxx_col2") - self.assertEqual(len(res.json), 1) + self.assertEquals(len(res.json), 1) # trying a second put again self.retry_put_json(self.root + "/storage/xxx_col2/123456", bso) res = self.app.get(self.root + "/storage/xxx_col2") - self.assertEqual(len(res.json), 1) + self.assertEquals(len(res.json), 1) time.sleep(2.1) res = self.app.get(self.root + "/storage/xxx_col2") - self.assertEqual(len(res.json), 0) + self.assertEquals(len(res.json), 0) def test_multi_item_post_limits(self): res = self.app.get(self.root + "/info/configuration") @@ -881,8 +881,8 @@ def test_multi_item_post_limits(self): ] res = self.retry_post_json(self.root + "/storage/xxx_col2", bsos) res = res.json - self.assertEqual(len(res["success"]), max_count - 5) - self.assertEqual(len(res["failed"]), 0) + self.assertEquals(len(res["success"]), max_count - 5) + self.assertEquals(len(res["failed"]), 0) # Uploading max_count+5 items should produce five failures. bsos = [ @@ -891,8 +891,8 @@ def test_multi_item_post_limits(self): ] res = self.retry_post_json(self.root + "/storage/xxx_col2", bsos) res = res.json - self.assertEqual(len(res["success"]), max_count) - self.assertEqual(len(res["failed"]), 5) + self.assertEquals(len(res["success"]), max_count) + self.assertEquals(len(res["failed"]), 5) # Uploading items such that the last item puts us over the # cumulative limit on payload size, should produce 1 failure. @@ -911,8 +911,8 @@ def test_multi_item_post_limits(self): res = self.retry_post_json(self.root + "/storage/xxx_col2", bsos) res = res.json - self.assertEqual(len(res["success"]), max_items) - self.assertEqual(len(res["failed"]), 1) + self.assertEquals(len(res["success"]), max_items) + self.assertEquals(len(res["failed"]), 1) def test_weird_args(self): # pushing some data in xxx_col2 @@ -934,7 +934,7 @@ def test_weird_args(self): # what about a crazy ids= string ? ids = ",".join([randtext(10) for i in range(100)]) res = self.app.get(self.root + "/storage/xxx_col2?ids=%s" % ids) - self.assertEqual(res.json, []) + self.assertEquals(res.json, []) # trying unexpected args - they should not break self.app.get(self.root + "/storage/xxx_col2?blabla=1", status=200) @@ -950,7 +950,7 @@ def test_guid_deletion(self): ] res = self.retry_post_json(self.root + "/storage/passwords", bsos) res = res.json - self.assertEqual(len(res["success"]), 5) + self.assertEquals(len(res["success"]), 5) # now deleting some of them ids = ",".join( @@ -960,25 +960,25 @@ def test_guid_deletion(self): self.retry_delete(self.root + "/storage/passwords?ids=%s" % ids) res = self.app.get(self.root + "/storage/passwords?ids=%s" % ids) - self.assertEqual(len(res.json), 0) + self.assertEquals(len(res.json), 0) res = self.app.get(self.root + "/storage/passwords") - self.assertEqual(len(res.json), 3) + self.assertEquals(len(res.json), 3) def test_specifying_ids_with_percent_encoded_query_string(self): # create some items bsos = [{"id": "test-%d" % i, "payload": _PLD} for i in range(5)] res = self.retry_post_json(self.root + "/storage/xxx_col2", bsos) res = res.json - self.assertEqual(len(res["success"]), 5) + self.assertEquals(len(res["success"]), 5) # now delete some of them ids = ",".join(["test-%d" % i for i in range(2)]) ids = urllib.request.quote(ids) self.retry_delete(self.root + "/storage/xxx_col2?ids=%s" % ids) # check that the correct items were deleted res = self.app.get(self.root + "/storage/xxx_col2?ids=%s" % ids) - self.assertEqual(len(res.json), 0) + self.assertEquals(len(res.json), 0) res = self.app.get(self.root + "/storage/xxx_col2") - self.assertEqual(len(res.json), 3) + self.assertEquals(len(res.json), 3) def test_timestamp_numbers_are_decimals(self): # Create five items with different timestamps. @@ -1008,7 +1008,7 @@ def test_timestamp_numbers_are_decimals(self): res = self.app.get(self.root + "/storage/xxx_col2?newer=%s" % ts) res = res.json try: - self.assertEqual(sorted(res), ["03", "04"]) + self.assertEquals(sorted(res), ["03", "04"]) except AssertionError: # need to display the whole collection to understand the issue msg = "Timestamp used: %s" % ts @@ -1037,7 +1037,7 @@ def test_strict_newer(self): # of bso 1 and 2, should not return them res = self.app.get(self.root + "/storage/xxx_meh?newer=%s" % ts) res = res.json - self.assertEqual(sorted(res), ["03", "04"]) + self.assertEquals(sorted(res), ["03", "04"]) def test_strict_older(self): # send two bsos in the 'xxx_meh' collection @@ -1057,7 +1057,7 @@ def test_strict_older(self): # of bso 3 and 4, should not return them res = self.app.get(self.root + "/storage/xxx_meh?older=%s" % ts) res = res.json - self.assertEqual(sorted(res), ["01", "02"]) + self.assertEquals(sorted(res), ["01", "02"]) def test_handling_of_invalid_json_in_bso_uploads(self): # Single upload with JSON that's not a BSO. @@ -1065,32 +1065,32 @@ def test_handling_of_invalid_json_in_bso_uploads(self): res = self.retry_put_json( self.root + "/storage/xxx_col2/invalid", bso, status=400 ) - self.assertEqual(res.json, WEAVE_INVALID_WBO) + self.assertEquals(res.json, WEAVE_INVALID_WBO) bso = 42 res = self.retry_put_json( self.root + "/storage/xxx_col2/invalid", bso, status=400 ) - self.assertEqual(res.json, WEAVE_INVALID_WBO) + self.assertEquals(res.json, WEAVE_INVALID_WBO) bso = {"id": ["01", "02"], "payload": {"3": "4"}} res = self.retry_put_json( self.root + "/storage/xxx_col2/invalid", bso, status=400 ) - self.assertEqual(res.json, WEAVE_INVALID_WBO) + self.assertEquals(res.json, WEAVE_INVALID_WBO) # Batch upload with JSON that's not a list of BSOs bsos = "notalist" res = self.retry_post_json( self.root + "/storage/xxx_col2", bsos, status=400 ) - self.assertEqual(res.json, WEAVE_INVALID_WBO) + self.assertEquals(res.json, WEAVE_INVALID_WBO) bsos = 42 res = self.retry_post_json( self.root + "/storage/xxx_col2", bsos, status=400 ) - self.assertEqual(res.json, WEAVE_INVALID_WBO) + self.assertEquals(res.json, WEAVE_INVALID_WBO) # Batch upload a list with something that's not a valid data dict. # It should fail out entirely, as the input is seriously broken. @@ -1104,8 +1104,8 @@ def test_handling_of_invalid_json_in_bso_uploads(self): bsos = [{"id": "01", "payload": "GOOD"}, {"id": "02", "invalid": "ya"}] res = self.retry_post_json(self.root + "/storage/xxx_col2", bsos) res = res.json - self.assertEqual(len(res["success"]), 1) - self.assertEqual(len(res["failed"]), 1) + self.assertEquals(len(res["success"]), 1) + self.assertEquals(len(res["failed"]), 1) def test_handling_of_invalid_bso_fields(self): coll_url = self.root + "/storage/xxx_col2" @@ -1142,43 +1142,43 @@ def test_handling_of_invalid_bso_fields(self): res = self.retry_post_json(coll_url, [bso]) self.assertTrue(res.json["failed"] and not res.json["success"]) res = self.retry_put_json(coll_url + "/" + bso["id"], bso, status=400) - self.assertEqual(res.json, WEAVE_INVALID_WBO) + self.assertEquals(res.json, WEAVE_INVALID_WBO) # Invalid sortindex - not an integer bso = {"id": "TEST", "payload": "testing", "sortindex": "2.6"} res = self.retry_post_json(coll_url, [bso]) self.assertTrue(res.json["failed"] and not res.json["success"]) res = self.retry_put_json(coll_url + "/" + bso["id"], bso, status=400) - self.assertEqual(res.json, WEAVE_INVALID_WBO) + self.assertEquals(res.json, WEAVE_INVALID_WBO) # Invalid sortindex - larger than max value bso = {"id": "TEST", "payload": "testing", "sortindex": "1" + "0" * 9} res = self.retry_post_json(coll_url, [bso]) self.assertTrue(res.json["failed"] and not res.json["success"]) res = self.retry_put_json(coll_url + "/" + bso["id"], bso, status=400) - self.assertEqual(res.json, WEAVE_INVALID_WBO) + self.assertEquals(res.json, WEAVE_INVALID_WBO) # Invalid payload - not a string bso = {"id": "TEST", "payload": 42} res = self.retry_post_json(coll_url, [bso]) self.assertTrue(res.json["failed"] and not res.json["success"]) res = self.retry_put_json(coll_url + "/" + bso["id"], bso, status=400) - self.assertEqual(res.json, WEAVE_INVALID_WBO) + self.assertEquals(res.json, WEAVE_INVALID_WBO) # Invalid ttl - not an integer bso = {"id": "TEST", "payload": "testing", "ttl": "eh?"} res = self.retry_post_json(coll_url, [bso]) self.assertTrue(res.json["failed"] and not res.json["success"]) res = self.retry_put_json(coll_url + "/" + bso["id"], bso, status=400) - self.assertEqual(res.json, WEAVE_INVALID_WBO) + self.assertEquals(res.json, WEAVE_INVALID_WBO) # Invalid ttl - not an integer bso = {"id": "TEST", "payload": "testing", "ttl": "4.2"} res = self.retry_post_json(coll_url, [bso]) self.assertTrue(res.json["failed"] and not res.json["success"]) res = self.retry_put_json(coll_url + "/" + bso["id"], bso, status=400) - self.assertEqual(res.json, WEAVE_INVALID_WBO) + self.assertEquals(res.json, WEAVE_INVALID_WBO) # Invalid BSO - unknown field bso = {"id": "TEST", "unexpected": "spanish-inquisition"} res = self.retry_post_json(coll_url, [bso]) self.assertTrue(res.json["failed"] and not res.json["success"]) res = self.retry_put_json(coll_url + "/" + bso["id"], bso, status=400) - self.assertEqual(res.json, WEAVE_INVALID_WBO) + self.assertEquals(res.json, WEAVE_INVALID_WBO) def test_that_batch_gets_are_limited_to_max_number_of_ids(self): bso = {"id": "01", "payload": "testing"} @@ -1187,12 +1187,12 @@ def test_that_batch_gets_are_limited_to_max_number_of_ids(self): # Getting with less than the limit works OK. ids = ",".join(str(i).zfill(2) for i in range(BATCH_MAX_IDS - 1)) res = self.app.get(self.root + "/storage/xxx_col2?ids=" + ids) - self.assertEqual(res.json, ["01"]) + self.assertEquals(res.json, ["01"]) # Getting with equal to the limit works OK. ids = ",".join(str(i).zfill(2) for i in range(BATCH_MAX_IDS)) res = self.app.get(self.root + "/storage/xxx_col2?ids=" + ids) - self.assertEqual(res.json, ["01"]) + self.assertEquals(res.json, ["01"]) # Getting with more than the limit fails. ids = ",".join(str(i).zfill(2) for i in range(BATCH_MAX_IDS + 1)) @@ -1306,11 +1306,11 @@ def test_update_of_ttl_without_sending_data(self): time.sleep(0.8) items = self.app.get(self.root + "/storage/xxx_col2?full=1").json items = dict((item["id"], item) for item in items) - self.assertEqual(sorted(list(items.keys())), ["TEST2", "TEST3"]) + self.assertEquals(sorted(list(items.keys())), ["TEST2", "TEST3"]) # The existing item should have retained its payload. # The new item should have got a default payload of empty string. - self.assertEqual(items["TEST2"]["payload"], "x") - self.assertEqual(items["TEST3"]["payload"], "") + self.assertEquals(items["TEST2"]["payload"], "x") + self.assertEquals(items["TEST3"]["payload"], "") ts2 = items["TEST2"]["modified"] ts3 = items["TEST3"]["modified"] self.assertTrue(ts2 < ts3) @@ -1330,26 +1330,26 @@ def test_bulk_update_of_ttls_without_sending_data(self): bsos = [{"id": str(i).zfill(2), "ttl": 10} for i in range(3, 7)] bsos[0]["payload"] = "xx" r = self.retry_post_json(self.root + "/storage/xxx_col2", bsos) - self.assertEqual(len(r.json["success"]), 4) + self.assertEquals(len(r.json["success"]), 4) ts2 = float(r.headers["X-Last-Modified"]) # If we wait then items 0, 1, 2 should have expired. # Items 3, 4, 5, 6 should still exist. time.sleep(0.8) items = self.app.get(self.root + "/storage/xxx_col2?full=1").json items = dict((item["id"], item) for item in items) - self.assertEqual(sorted(list(items.keys())), ["03", "04", "05", "06"]) + self.assertEquals(sorted(list(items.keys())), ["03", "04", "05", "06"]) # Items 3 and 4 should have the specified payloads. # Items 5 and 6 should have payload defaulted to empty string. - self.assertEqual(items["03"]["payload"], "xx") - self.assertEqual(items["04"]["payload"], "x") - self.assertEqual(items["05"]["payload"], "") - self.assertEqual(items["06"]["payload"], "") + self.assertEquals(items["03"]["payload"], "xx") + self.assertEquals(items["04"]["payload"], "x") + self.assertEquals(items["05"]["payload"], "") + self.assertEquals(items["06"]["payload"], "") # All items created or modified by the request should get their # timestamps update. Just bumping the ttl should not bump timestamp. - self.assertEqual(items["03"]["modified"], ts2) - self.assertEqual(items["04"]["modified"], ts1) - self.assertEqual(items["05"]["modified"], ts2) - self.assertEqual(items["06"]["modified"], ts2) + self.assertEquals(items["03"]["modified"], ts2) + self.assertEquals(items["04"]["modified"], ts1) + self.assertEquals(items["05"]["modified"], ts2) + self.assertEquals(items["06"]["modified"], ts2) def test_that_negative_integer_fields_are_not_accepted(self): # ttls cannot be negative @@ -1401,18 +1401,18 @@ def test_meta_global_sanity(self): # in the base tests because there's nothing memcached-specific here. self.app.get(self.root + "/storage/meta/global", status=404) res = self.app.get(self.root + "/storage/meta") - self.assertEqual(res.json, []) + self.assertEquals(res.json, []) self.retry_put_json( self.root + "/storage/meta/global", {"payload": "blob"} ) res = self.app.get(self.root + "/storage/meta") - self.assertEqual(res.json, ["global"]) + self.assertEquals(res.json, ["global"]) res = self.app.get(self.root + "/storage/meta/global") - self.assertEqual(res.json["payload"], "blob") + self.assertEquals(res.json["payload"], "blob") # It should not have extra keys. keys = list(res.json.keys()) keys.sort() - self.assertEqual(keys, ["id", "modified", "payload"]) + self.assertEquals(keys, ["id", "modified", "payload"]) # It should have a properly-formatted "modified" field. modified_re = r"['\"]modified['\"]:\s*[0-9]+\.[0-9][0-9]\s*[,}]" self.assertTrue(re.search(modified_re, res.body.decode("utf-8"))) @@ -1423,12 +1423,12 @@ def test_meta_global_sanity(self): ) ts = float(res.headers["X-Weave-Timestamp"]) res = self.app.get(self.root + "/storage/meta/global") - self.assertEqual(res.json["modified"], ts) + self.assertEquals(res.json["modified"], ts) def test_that_404_responses_have_a_json_body(self): res = self.app.get(self.root + "/nonexistent/url", status=404) - self.assertEqual(res.content_type, "application/json") - self.assertEqual(res.json, 0) + self.assertEquals(res.content_type, "application/json") + self.assertEquals(res.json, 0) def test_that_internal_server_fields_are_not_echoed(self): self.retry_post_json( @@ -1438,7 +1438,7 @@ def test_that_internal_server_fields_are_not_echoed(self): self.root + "/storage/xxx_col1/two", {"payload": "blub"} ) res = self.app.get(self.root + "/storage/xxx_col1?full=1") - self.assertEqual(len(res.json), 2) + self.assertEquals(len(res.json), 2) for item in res.json: self.assertTrue("id" in item) self.assertTrue("payload" in item) @@ -1464,8 +1464,8 @@ def test_accessing_info_collections_with_an_expired_token(self): # Check that we can read the info correctly. resp = self.app.get(self.root + "/info/collections") - self.assertEqual(list(resp.json.keys()), ["xxx_col1"]) - self.assertEqual(resp.json["xxx_col1"], ts) + self.assertEquals(list(resp.json.keys()), ["xxx_col1"]) + self.assertEquals(resp.json["xxx_col1"], ts) # Forge an expired token to use for the test. auth_policy = self.config.registry.getUtility(IAuthenticationPolicy) @@ -1490,8 +1490,8 @@ def test_accessing_info_collections_with_an_expired_token(self): # But it still allows access to /info/collections. resp = self.app.get(self.root + "/info/collections") - self.assertEqual(list(resp.json.keys()), ["xxx_col1"]) - self.assertEqual(resp.json["xxx_col1"], ts) + self.assertEquals(list(resp.json.keys()), ["xxx_col1"]) + self.assertEquals(resp.json["xxx_col1"], ts) def test_pagination_with_newer_and_sort_by_oldest(self): # Twelve bsos with three different modification times. @@ -1512,7 +1512,7 @@ def test_pagination_with_newer_and_sort_by_oldest(self): # Try with several different pagination sizes, # to hit various boundary conditions. for limit in (2, 3, 4, 5, 6): - for (start, ts) in timestamps: + for start, ts in timestamps: query_url = ( self.root + "/storage/xxx_col2?full=true&sort=oldest" ) @@ -1535,7 +1535,7 @@ def test_pagination_with_newer_and_sort_by_oldest(self): # They should all be in order, starting from the item # *after* the one that was used for the newer= timestamp. - self.assertEqual( + self.assertEquals( sorted(int(item["id"]) for item in items), list(range(start + 1, NUM_ITEMS)), ) @@ -1559,7 +1559,7 @@ def test_pagination_with_older_and_sort_by_newest(self): # Try with several different pagination sizes, # to hit various boundary conditions. for limit in (2, 3, 4, 5, 6): - for (start, ts) in timestamps: + for start, ts in timestamps: query_url = ( self.root + "/storage/xxx_col2?full=true&sort=newest" ) @@ -1582,7 +1582,7 @@ def test_pagination_with_older_and_sort_by_newest(self): # They should all be in order, up to the item *before* # the one that was used for the older= timestamp. - self.assertEqual( + self.assertEquals( sorted(int(item["id"]) for item in items), list(range(0, start)), ) @@ -1613,15 +1613,15 @@ def test_batches(self): batch = resp.json["batch"] # The collection should not be reported as modified. - self.assertEqual(orig_modified, resp.headers["X-Last-Modified"]) + self.assertEquals(orig_modified, resp.headers["X-Last-Modified"]) # And reading from it shouldn't show the new records yet. resp = self.app.get(endpoint) res = resp.json res.sort() - self.assertEqual(res, ["12", "13"]) - self.assertEqual(int(resp.headers["X-Weave-Records"]), 2) - self.assertEqual(orig_modified, resp.headers["X-Last-Modified"]) + self.assertEquals(res, ["12", "13"]) + self.assertEquals(int(resp.headers["X-Weave-Records"]), 2) + self.assertEquals(orig_modified, resp.headers["X-Last-Modified"]) bso5 = {"id": "c", "payload": "tinsel"} bso6 = {"id": "13", "payload": "portnoy"} @@ -1629,31 +1629,31 @@ def test_batches(self): commit = "?batch={0}&commit=true".format(batch) resp = self.retry_post_json(endpoint + commit, [bso5, bso6, bso0]) committed = resp.json["modified"] - self.assertEqual( + self.assertEquals( resp.json["modified"], float(resp.headers["X-Last-Modified"]) ) # make sure /info/collections got updated resp = self.app.get(self.root + "/info/collections") - self.assertEqual(float(resp.headers["X-Last-Modified"]), committed) - self.assertEqual(resp.json["xxx_col2"], committed) + self.assertEquals(float(resp.headers["X-Last-Modified"]), committed) + self.assertEquals(resp.json["xxx_col2"], committed) # make sure the changes applied resp = self.app.get(endpoint) res = resp.json res.sort() - self.assertEqual(res, ["12", "13", "14", "a", "b", "c"]) - self.assertEqual(int(resp.headers["X-Weave-Records"]), 6) + self.assertEquals(res, ["12", "13", "14", "a", "b", "c"]) + self.assertEquals(int(resp.headers["X-Weave-Records"]), 6) resp = self.app.get(endpoint + "/13") - self.assertEqual(resp.json["payload"], "portnoy") - self.assertEqual(committed, float(resp.headers["X-Last-Modified"])) - self.assertEqual(committed, resp.json["modified"]) + self.assertEquals(resp.json["payload"], "portnoy") + self.assertEquals(committed, float(resp.headers["X-Last-Modified"])) + self.assertEquals(committed, resp.json["modified"]) resp = self.app.get(endpoint + "/c") - self.assertEqual(resp.json["payload"], "tinsel") - self.assertEqual(committed, resp.json["modified"]) + self.assertEquals(resp.json["payload"], "tinsel") + self.assertEquals(committed, resp.json["modified"]) resp = self.app.get(endpoint + "/14") - self.assertEqual(resp.json["payload"], "itsybitsy") - self.assertEqual(committed, resp.json["modified"]) + self.assertEquals(resp.json["payload"], "itsybitsy") + self.assertEquals(committed, resp.json["modified"]) # empty commit POST bso7 = {"id": "a", "payload": "burrito"} @@ -1665,15 +1665,15 @@ def test_batches(self): resp1 = self.retry_post_json(endpoint + commit, []) committed = resp1.json["modified"] - self.assertEqual(committed, float(resp1.headers["X-Last-Modified"])) + self.assertEquals(committed, float(resp1.headers["X-Last-Modified"])) resp2 = self.app.get(endpoint + "/a") - self.assertEqual(committed, float(resp2.headers["X-Last-Modified"])) - self.assertEqual(committed, resp2.json["modified"]) - self.assertEqual(resp2.json["payload"], "burrito") + self.assertEquals(committed, float(resp2.headers["X-Last-Modified"])) + self.assertEquals(committed, resp2.json["modified"]) + self.assertEquals(resp2.json["payload"], "burrito") resp3 = self.app.get(endpoint + "/e") - self.assertEqual(committed, resp3.json["modified"]) + self.assertEquals(committed, resp3.json["modified"]) def test_aaa_batch_commit_collision(self): # It's possible that a batch contain a BSO inside a batch as well @@ -1720,64 +1720,64 @@ def test_batch_size_limits(self): self.assertTrue("max_request_bytes" in limits) endpoint = self.root + "/storage/xxx_col2?batch=true" -# There are certain obvious constraints on these limits, -# violations of which would be very confusing for clients. -# -# self.assertTrue( -# limits['max_request_bytes'] > limits['max_post_bytes'] -# ) -# self.assertTrue( -# limits['max_post_bytes'] >= limits['max_record_payload_bytes'] -# ) -# self.assertTrue( -# limits['max_total_records'] >= limits['max_post_records'] -# ) -# self.assertTrue( -# limits['max_total_bytes'] >= limits['max_post_bytes'] -# ) -# -# # `max_post_records` is an (inclusive) limit on -# # the number of items in a single post. -# -# res = self.retry_post_json(endpoint, [], headers={ -# 'X-Weave-Records': str(limits['max_post_records']) -# }) -# self.assertFalse(res.json['failed']) -# res = self.retry_post_json(endpoint, [], headers={ -# 'X-Weave-Records': str(limits['max_post_records'] + 1) -# }, status=400) -# self.assertEqual(res.json, WEAVE_SIZE_LIMIT_EXCEEDED) -# -# bsos = [{'id': str(x), 'payload': ''} -# for x in range(limits['max_post_records'])] -# res = self.retry_post_json(endpoint, bsos) -# self.assertFalse(res.json['failed']) -# bsos.append({'id': 'toomany', 'payload': ''}) -# res = self.retry_post_json(endpoint, bsos) -# self.assertEqual(res.json['failed']['toomany'], 'retry bso') -# -# # `max_total_records` is an (inclusive) limit on the -# # total number of items in a batch. We can only enforce -# # it if the client tells us this via header. -# -# self.retry_post_json(endpoint, [], headers={ -# 'X-Weave-Total-Records': str(limits['max_total_records']) -# }) -# res = self.retry_post_json(endpoint, [], headers={ -# 'X-Weave-Total-Records': str(limits['max_total_records'] + 1) -# }, status=400) -# self.assertEqual(res.json, WEAVE_SIZE_LIMIT_EXCEEDED) -# -# # `max_post_bytes` is an (inclusive) limit on the -# # total size of payloads in a single post. -# -# self.retry_post_json(endpoint, [], headers={ -# 'X-Weave-Bytes': str(limits['max_post_bytes']) -# }) -# res = self.retry_post_json(endpoint, [], headers={ -# 'X-Weave-Bytes': str(limits['max_post_bytes'] + 1) -# }, status=400) -# self.assertEqual(res.json, WEAVE_SIZE_LIMIT_EXCEEDED) + # There are certain obvious constraints on these limits, + # violations of which would be very confusing for clients. + # + # self.assertTrue( + # limits['max_request_bytes'] > limits['max_post_bytes'] + # ) + # self.assertTrue( + # limits['max_post_bytes'] >= limits['max_record_payload_bytes'] + # ) + # self.assertTrue( + # limits['max_total_records'] >= limits['max_post_records'] + # ) + # self.assertTrue( + # limits['max_total_bytes'] >= limits['max_post_bytes'] + # ) + # + # # `max_post_records` is an (inclusive) limit on + # # the number of items in a single post. + # + # res = self.retry_post_json(endpoint, [], headers={ + # 'X-Weave-Records': str(limits['max_post_records']) + # }) + # self.assertFalse(res.json['failed']) + # res = self.retry_post_json(endpoint, [], headers={ + # 'X-Weave-Records': str(limits['max_post_records'] + 1) + # }, status=400) + # self.assertEqualss(res.json, WEAVE_SIZE_LIMIT_EXCEEDED) + # + # bsos = [{'id': str(x), 'payload': ''} + # for x in range(limits['max_post_records'])] + # res = self.retry_post_json(endpoint, bsos) + # self.assertFalse(res.json['failed']) + # bsos.append({'id': 'toomany', 'payload': ''}) + # res = self.retry_post_json(endpoint, bsos) + # self.assertEqualss(res.json['failed']['toomany'], 'retry bso') + # + # # `max_total_records` is an (inclusive) limit on the + # # total number of items in a batch. We can only enforce + # # it if the client tells us this via header. + # + # self.retry_post_json(endpoint, [], headers={ + # 'X-Weave-Total-Records': str(limits['max_total_records']) + # }) + # res = self.retry_post_json(endpoint, [], headers={ + # 'X-Weave-Total-Records': str(limits['max_total_records'] + 1) + # }, status=400) + # self.assertEqualss(res.json, WEAVE_SIZE_LIMIT_EXCEEDED) + # + # # `max_post_bytes` is an (inclusive) limit on the + # # total size of payloads in a single post. + # + # self.retry_post_json(endpoint, [], headers={ + # 'X-Weave-Bytes': str(limits['max_post_bytes']) + # }) + # res = self.retry_post_json(endpoint, [], headers={ + # 'X-Weave-Bytes': str(limits['max_post_bytes'] + 1) + # }, status=400) + # self.assertEqualss(res.json, WEAVE_SIZE_LIMIT_EXCEEDED) bsos = [ {"id": "little", "payload": "XXX"}, {"id": "big", "payload": "X" * (limits["max_post_bytes"] - 3)}, @@ -1786,8 +1786,8 @@ def test_batch_size_limits(self): self.assertFalse(res.json["failed"]) bsos[1]["payload"] += "X" res = self.retry_post_json(endpoint, bsos) - self.assertEqual(res.json["success"], ["little"]) - self.assertEqual(res.json["failed"]["big"], "retry bytes") + self.assertEquals(res.json["success"], ["little"]) + self.assertEquals(res.json["failed"]["big"], "retry bytes") # `max_total_bytes` is an (inclusive) limit on the # total size of all payloads in a batch. We can only enforce @@ -1806,7 +1806,7 @@ def test_batch_size_limits(self): }, status=400, ) - self.assertEqual(res.json, WEAVE_SIZE_LIMIT_EXCEEDED) + self.assertEquals(res.json, WEAVE_SIZE_LIMIT_EXCEEDED) def test_batch_partial_update(self): collection = self.root + "/storage/xxx_col2" @@ -1824,18 +1824,18 @@ def test_batch_partial_update(self): ] resp = self.retry_post_json(collection + "?batch=true", bsos) batch = resp.json["batch"] - self.assertEqual(orig_ts, float(resp.headers["X-Last-Modified"])) + self.assertEquals(orig_ts, float(resp.headers["X-Last-Modified"])) # The updated item hasn't been written yet. resp = self.app.get(collection + "?full=1") res = resp.json res.sort(key=lambda bso: bso["id"]) - self.assertEqual(len(res), 2) - self.assertEqual(res[0]["payload"], "aai") - self.assertEqual(res[1]["payload"], "bee") - self.assertEqual(res[0]["modified"], orig_ts) - self.assertEqual(res[1]["modified"], orig_ts) - self.assertEqual(res[1]["sortindex"], 17) + self.assertEquals(len(res), 2) + self.assertEquals(res[0]["payload"], "aai") + self.assertEquals(res[1]["payload"], "bee") + self.assertEquals(res[0]["modified"], orig_ts) + self.assertEquals(res[1]["modified"], orig_ts) + self.assertEquals(res[1]["sortindex"], 17) endpoint = collection + "?batch={0}&commit=true".format(batch) resp = self.retry_post_json(endpoint, []) @@ -1845,16 +1845,16 @@ def test_batch_partial_update(self): resp = self.app.get(collection + "?full=1") res = resp.json res.sort(key=lambda bso: bso["id"]) - self.assertEqual(len(res), 3) - self.assertEqual(res[0]["payload"], "aai") - self.assertEqual(res[1]["payload"], "bii") - self.assertEqual(res[2]["payload"], "sea") - self.assertEqual(res[0]["modified"], orig_ts) - self.assertEqual(res[1]["modified"], commit_ts) - self.assertEqual(res[2]["modified"], commit_ts) + self.assertEquals(len(res), 3) + self.assertEquals(res[0]["payload"], "aai") + self.assertEquals(res[1]["payload"], "bii") + self.assertEquals(res[2]["payload"], "sea") + self.assertEquals(res[0]["modified"], orig_ts) + self.assertEquals(res[1]["modified"], commit_ts) + self.assertEquals(res[2]["modified"], commit_ts) # Fields not touched by the batch, should have been preserved. - self.assertEqual(res[1]["sortindex"], 17) + self.assertEquals(res[1]["sortindex"], 17) def test_batch_ttl_update(self): collection = self.root + "/storage/xxx_col2" @@ -1874,28 +1874,28 @@ def test_batch_ttl_update(self): resp = self.retry_post_json( endpoint, [{"id": "a", "ttl": 2}], status=202 ) - self.assertEqual(orig_ts, float(resp.headers["X-Last-Modified"])) + self.assertEquals(orig_ts, float(resp.headers["X-Last-Modified"])) resp = self.retry_post_json( endpoint, [{"id": "b", "ttl": 2}], status=202 ) - self.assertEqual(orig_ts, float(resp.headers["X-Last-Modified"])) + self.assertEquals(orig_ts, float(resp.headers["X-Last-Modified"])) resp = self.retry_post_json(endpoint + "&commit=true", [], status=200) # The payloads should be unchanged resp = self.app.get(collection + "?full=1") res = resp.json res.sort(key=lambda bso: bso["id"]) - self.assertEqual(len(res), 3) - self.assertEqual(res[0]["payload"], "ayy") - self.assertEqual(res[1]["payload"], "bea") - self.assertEqual(res[2]["payload"], "see") + self.assertEquals(len(res), 3) + self.assertEquals(res[0]["payload"], "ayy") + self.assertEquals(res[1]["payload"], "bea") + self.assertEquals(res[2]["payload"], "see") # If we wait, the ttls should kick in time.sleep(2.1) resp = self.app.get(collection + "?full=1") res = resp.json - self.assertEqual(len(res), 1) - self.assertEqual(res[0]["payload"], "see") + self.assertEquals(len(res), 1) + self.assertEquals(res[0]["payload"], "see") def test_batch_ttl_is_based_on_commit_timestamp(self): collection = self.root + "/storage/xxx_col2" @@ -1917,14 +1917,14 @@ def test_batch_ttl_is_based_on_commit_timestamp(self): time.sleep(1.6) resp = self.app.get(collection) res = resp.json - self.assertEqual(len(res), 1) - self.assertEqual(res[0], "a") + self.assertEquals(len(res), 1) + self.assertEquals(res[0], "a") # Wait some more, and the ttl should kick in. time.sleep(1.6) resp = self.app.get(collection) res = resp.json - self.assertEqual(len(res), 0) + self.assertEquals(len(res), 0) def test_batch_with_immediate_commit(self): collection = self.root + "/storage/xxx_col2" @@ -1942,17 +1942,17 @@ def test_batch_with_immediate_commit(self): committed = resp.json["modified"] resp = self.app.get(self.root + "/info/collections") - self.assertEqual(float(resp.headers["X-Last-Modified"]), committed) - self.assertEqual(resp.json["xxx_col2"], committed) + self.assertEquals(float(resp.headers["X-Last-Modified"]), committed) + self.assertEquals(resp.json["xxx_col2"], committed) resp = self.app.get(collection + "?full=1") - self.assertEqual(float(resp.headers["X-Last-Modified"]), committed) + self.assertEquals(float(resp.headers["X-Last-Modified"]), committed) res = resp.json res.sort(key=lambda bso: bso["id"]) - self.assertEqual(len(res), 3) - self.assertEqual(res[0]["payload"], "aih") - self.assertEqual(res[1]["payload"], "bie") - self.assertEqual(res[2]["payload"], "cee") + self.assertEquals(len(res), 3) + self.assertEquals(res[0]["payload"], "aih") + self.assertEquals(res[1]["payload"], "bie") + self.assertEquals(res[2]["payload"], "cee") def test_batch_uploads_properly_update_info_collections(self): collection1 = self.root + "/storage/xxx_col1" @@ -1970,9 +1970,9 @@ def test_batch_uploads_properly_update_info_collections(self): ts2 = resp.json["modified"] resp = self.app.get(self.root + "/info/collections") - self.assertEqual(float(resp.headers["X-Last-Modified"]), ts2) - self.assertEqual(resp.json["xxx_col1"], ts1) - self.assertEqual(resp.json["xxx_col2"], ts2) + self.assertEquals(float(resp.headers["X-Last-Modified"]), ts2) + self.assertEquals(resp.json["xxx_col1"], ts1) + self.assertEquals(resp.json["xxx_col2"], ts2) # Overwrite in place, timestamp should change. resp = self.retry_post_json( @@ -1982,9 +1982,9 @@ def test_batch_uploads_properly_update_info_collections(self): ts2 = resp.json["modified"] resp = self.app.get(self.root + "/info/collections") - self.assertEqual(float(resp.headers["X-Last-Modified"]), ts2) - self.assertEqual(resp.json["xxx_col1"], ts1) - self.assertEqual(resp.json["xxx_col2"], ts2) + self.assertEquals(float(resp.headers["X-Last-Modified"]), ts2) + self.assertEquals(resp.json["xxx_col1"], ts1) + self.assertEquals(resp.json["xxx_col2"], ts2) # Add new items, timestamp should change resp = self.retry_post_json( @@ -1996,9 +1996,9 @@ def test_batch_uploads_properly_update_info_collections(self): ts1 = resp.json["modified"] resp = self.app.get(self.root + "/info/collections") - self.assertEqual(float(resp.headers["X-Last-Modified"]), ts1) - self.assertEqual(resp.json["xxx_col1"], ts1) - self.assertEqual(resp.json["xxx_col2"], ts2) + self.assertEquals(float(resp.headers["X-Last-Modified"]), ts1) + self.assertEquals(resp.json["xxx_col1"], ts1) + self.assertEquals(resp.json["xxx_col2"], ts2) def test_batch_with_failing_bsos(self): collection = self.root + "/storage/xxx_col2" @@ -2007,8 +2007,8 @@ def test_batch_with_failing_bsos(self): {"id": "b\n", "payload": "i am invalid", "sortindex": 17}, ] resp = self.retry_post_json(collection + "?batch=true", bsos) - self.assertEqual(len(resp.json["failed"]), 1) - self.assertEqual(len(resp.json["success"]), 1) + self.assertEquals(len(resp.json["failed"]), 1) + self.assertEquals(len(resp.json["success"]), 1) batch = resp.json["batch"] bsos = [ @@ -2017,8 +2017,8 @@ def test_batch_with_failing_bsos(self): ] endpoint = collection + "?batch={0}&commit=true".format(batch) resp = self.retry_post_json(endpoint, bsos) - self.assertEqual(len(resp.json["failed"]), 1) - self.assertEqual(len(resp.json["success"]), 1) + self.assertEquals(len(resp.json["failed"]), 1) + self.assertEquals(len(resp.json["success"]), 1) # To correctly match semantics of batchless POST, the batch # should be committed including only the successful items. @@ -2027,9 +2027,9 @@ def test_batch_with_failing_bsos(self): resp = self.app.get(collection + "?full=1") res = resp.json res.sort(key=lambda bso: bso["id"]) - self.assertEqual(len(res), 2) - self.assertEqual(res[0]["payload"], "aai") - self.assertEqual(res[1]["payload"], "sea") + self.assertEquals(len(res), 2) + self.assertEquals(res[0]["payload"], "aai") + self.assertEquals(res[1]["payload"], "sea") def test_batch_id_is_correctly_scoped_to_a_collection(self): collection1 = self.root + "/storage/xxx_col1" @@ -2058,11 +2058,11 @@ def test_batch_id_is_correctly_scoped_to_a_collection(self): resp = self.app.get(collection1 + "?full=1") res = resp.json res.sort(key=lambda bso: bso["id"]) - self.assertEqual(len(res), 4) - self.assertEqual(res[0]["payload"], "aih") - self.assertEqual(res[1]["payload"], "bie") - self.assertEqual(res[2]["payload"], "cee") - self.assertEqual(res[3]["payload"], "dii") + self.assertEquals(len(res), 4) + self.assertEquals(res[0]["payload"], "aih") + self.assertEquals(res[1]["payload"], "bie") + self.assertEquals(res[2]["payload"], "cee") + self.assertEquals(res[3]["payload"], "dii") def test_users_with_the_same_batch_id_get_separate_data(self): # Try to generate two users with the same batch-id. @@ -2082,14 +2082,14 @@ def test_users_with_the_same_batch_id_get_separate_data(self): self.retry_post_json(self.root + req, []) # It should only have a single item. resp = self.app.get(self.root + "/storage/xxx_col1") - self.assertEqual(resp.json, ["b"]) + self.assertEquals(resp.json, ["b"]) # The first user's collection should still be empty. # Now have the first user commit their batch. req = "/storage/xxx_col1?batch={0}&commit=true".format(batch1) self.retry_post_json(self.root + req, []) # It should only have a single item. resp = self.app.get(self.root + "/storage/xxx_col1") - self.assertEqual(resp.json, ["a"]) + self.assertEquals(resp.json, ["a"]) # If we didn't make a conflict, try again. if batch1 == batch2: break @@ -2123,7 +2123,7 @@ def test_that_we_dont_resurrect_committed_batches(self): # Despite having the same batchid, the second batch should # be completely independent of the first. resp = self.app.get(self.root + "/storage/xxx_col2") - self.assertEqual(resp.json, ["j"]) + self.assertEquals(resp.json, ["j"]) def test_batch_id_is_correctly_scoped_to_a_user(self): collection = self.root + "/storage/xxx_col1" @@ -2155,11 +2155,11 @@ def test_batch_id_is_correctly_scoped_to_a_user(self): resp = self.app.get(collection + "?full=1") res = resp.json res.sort(key=lambda bso: bso["id"]) - self.assertEqual(len(res), 4) - self.assertEqual(res[0]["payload"], "aih") - self.assertEqual(res[1]["payload"], "bie") - self.assertEqual(res[2]["payload"], "cee") - self.assertEqual(res[3]["payload"], "di") + self.assertEquals(len(res), 4) + self.assertEquals(res[0]["payload"], "aih") + self.assertEquals(res[1]["payload"], "bie") + self.assertEquals(res[2]["payload"], "cee") + self.assertEquals(res[3]["payload"], "di") # bug 1332552 make sure ttl:null use the default ttl def test_create_bso_with_null_ttl(self): @@ -2167,7 +2167,7 @@ def test_create_bso_with_null_ttl(self): self.retry_put_json(self.root + "/storage/xxx_col2/TEST1", bso) time.sleep(0.1) res = self.app.get(self.root + "/storage/xxx_col2/TEST1?full=1") - self.assertEqual(res.json["payload"], "x") + self.assertEquals(res.json["payload"], "x") def test_rejection_of_known_bad_payloads(self): bso = { @@ -2202,8 +2202,8 @@ def testEmptyCommit(contentType, body, status=200): res = self.retry_post_json( self.root + "/storage/xxx_col?batch=true", bsos ) - self.assertEqual(len(res.json["success"]), 5) - self.assertEqual(len(res.json["failed"]), 0) + self.assertEquals(len(res.json["success"]), 5) + self.assertEquals(len(res.json["failed"]), 0) batch = res.json["batch"] self.app.post( self.root + "/storage/xxx_col?commit=true&batch=" + batch, @@ -2232,10 +2232,8 @@ def test_cors_settings_are_set(self): }, ) - self.assertEqual( - int(res.headers["access-control-max-age"]), 555 - ) - self.assertEqual( + self.assertEquals(int(res.headers["access-control-max-age"]), 555) + self.assertEquals( res.headers["access-control-allow-origin"], "localhost" ) @@ -2245,9 +2243,9 @@ def test_cors_allows_any_origin(self): headers={ "Access-Control-Request-Method": "GET", "Origin": "http://test-website.com", - "Access-Control-Request-Headers": "Content-Type" + "Access-Control-Request-Headers": "Content-Type", }, - status=200 + status=200, ) # PATCH is not a default allowed method, so request should return 405 diff --git a/tools/integration_tests/test_support.py b/tools/integration_tests/test_support.py index 0fb851ac95..b3042bdbee 100644 --- a/tools/integration_tests/test_support.py +++ b/tools/integration_tests/test_support.py @@ -1,8 +1,7 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. -""" Base test class, with an instanciated app. -""" +"""Base test class, with an instanciated app.""" import contextlib import functools @@ -24,6 +23,7 @@ import time import tokenlib import urllib.parse as urlparse + # unittest imported by pytest requirement import unittest import uuid diff --git a/tools/integration_tests/tokenserver/mock_fxa_server.py b/tools/integration_tests/tokenserver/mock_fxa_server.py index ddcd562886..f526a396cd 100644 --- a/tools/integration_tests/tokenserver/mock_fxa_server.py +++ b/tools/integration_tests/tokenserver/mock_fxa_server.py @@ -6,39 +6,44 @@ import os -@view_config(route_name='mock_oauth_verify', renderer='json') +@view_config(route_name="mock_oauth_verify", renderer="json") def _mock_oauth_verify(request): - body = json.loads(request.json_body['token']) + body = json.loads(request.json_body["token"]) - return Response(json=body['body'], content_type='application/json', - status=body['status']) + return Response( + json=body["body"], + content_type="application/json", + status=body["status"], + ) # The PyFxA OAuth client makes a request to the FxA OAuth server for its # current public RSA key. While the client allows us to pass in a JWK to # prevent this request from happening, mocking the endpoint is simpler. -@view_config(route_name='mock_oauth_jwk', renderer='json') +@view_config(route_name="mock_oauth_jwk", renderer="json") def _mock_oauth_jwk(request): - return {'keys': [{'fake': 'RSA key'}]} + return {"keys": [{"fake": "RSA key"}]} def make_server(host, port): with Configurator() as config: - config.add_route('mock_oauth_verify', '/v1/verify') - config.add_view(_mock_oauth_verify, route_name='mock_oauth_verify', - renderer='json') - - config.add_route('mock_oauth_jwk', '/v1/jwks') - config.add_view(_mock_oauth_jwk, route_name='mock_oauth_jwk', - renderer='json') + config.add_route("mock_oauth_verify", "/v1/verify") + config.add_view( + _mock_oauth_verify, route_name="mock_oauth_verify", renderer="json" + ) + + config.add_route("mock_oauth_jwk", "/v1/jwks") + config.add_view( + _mock_oauth_jwk, route_name="mock_oauth_jwk", renderer="json" + ) app = config.make_wsgi_app() return _make_server(host, port, app) -if __name__ == '__main__': - host = os.environ.get('MOCK_FXA_SERVER_HOST', 'localhost') - port = os.environ.get('MOCK_FXA_SERVER_PORT', 6000) +if __name__ == "__main__": + host = os.environ.get("MOCK_FXA_SERVER_HOST", "localhost") + port = os.environ.get("MOCK_FXA_SERVER_PORT", 6000) with make_server(host, int(port)) as httpd: print("Running mock FxA server on %s:%s" % (host, port)) diff --git a/tools/integration_tests/tokenserver/tables.py b/tools/integration_tests/tokenserver/tables.py new file mode 100644 index 0000000000..d471e18f5b --- /dev/null +++ b/tools/integration_tests/tokenserver/tables.py @@ -0,0 +1,96 @@ +from sqlalchemy import Integer, String, Null, BigInteger, Index +from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column + + +class Base(DeclarativeBase): + pass + + +class Services(Base): + __tablename__ = "services" + + id: Mapped[int] = mapped_column(Integer, primary_key=True) + service: Mapped[str] = mapped_column(String(30), default=Null) + pattern: Mapped[str] = mapped_column(String(128), default=Null) + + def __repr__(self) -> str: + return f"""Services(id={self.id!r}, service={self.service!r}, +pattern={self.pattern!r})""" + + def _asdict(self): + return { + "id": self.id, + "service": self.service, + "pattern": self.pattern, + } + + +class Nodes(Base): + __tablename__ = "nodes" + + id: Mapped[int] = mapped_column(Integer, primary_key=True) + service: Mapped[int] = mapped_column(Integer, nullable=False) + node: Mapped[str] = mapped_column(String(64), nullable=False) + available: Mapped[int] = mapped_column(Integer, nullable=False) + current_load: Mapped[int] = mapped_column(Integer, nullable=False) + capacity: Mapped[int] = mapped_column(Integer, nullable=False) + downed: Mapped[int] = mapped_column(Integer, nullable=False) + backoff: Mapped[int] = mapped_column(Integer, nullable=False) + + unique_idx = Index(service, node) + + def __repr__(self) -> str: + return f"""Nodes(id={self.id!r}, service={self.service!r}, +node={self.node!r}, available={self.available!r}, +current_load={self.current_load!r}, capacity={self.capacity!r}, +downed={self.downed!r}, backoff={self.backoff!r})""" + + def _asdict(self): + return { + "id": self.id, + "service": self.service, + "node": self.node, + "available": self.available, + "current_load": self.current_load, + "capacity": self.capacity, + "downed": self.downed, + "backoff": self.backoff, + } + + +class Users(Base): + __tablename__ = "users" + + uid: Mapped[int] = mapped_column(Integer, primary_key=True) + service: Mapped[int] = mapped_column(Integer, nullable=False) + email: Mapped[str] = mapped_column(String(255), nullable=False) + generation: Mapped[int] = mapped_column(BigInteger, nullable=False) + client_state: Mapped[str] = mapped_column(String(32), nullable=False) + created_at: Mapped[int] = mapped_column(BigInteger, nullable=False) + replaced_at: Mapped[int] = mapped_column(BigInteger, default=Null) + nodeid: Mapped[int] = mapped_column(BigInteger, nullable=False) + keys_changed_at: Mapped[int] = mapped_column(BigInteger, default=Null) + + lookup_idx = Index(email, service, created_at) + replaced_at_idx = Index(service, replaced_at) + node_idx = Index(nodeid) + + def __repr__(self) -> str: + return f"""Users(uid={self.uid!r}, service={self.service!r}, +email={self.email!r}, generation={self.generation!r}, +client_state={self.client_state!r}, created_at={self.created_at!r}, +replaced_at={self.replaced_at!r}, nodeid={self.nodeid!r}, +keys_changed_at={self.keys_changed_at!r})""" + + def _asdict(self): + return { + "uid": self.uid, + "service": self.service, + "email": self.email, + "generation": self.generation, + "client_state": self.client_state, + "created_at": self.created_at, + "replaced_at": self.replaced_at, + "nodeid": self.nodeid, + "keys_changed_at": self.keys_changed_at, + } diff --git a/tools/integration_tests/tokenserver/test_authorization.py b/tools/integration_tests/tokenserver/test_authorization.py index ecd8fc5c69..7628bd172d 100644 --- a/tools/integration_tests/tokenserver/test_authorization.py +++ b/tools/integration_tests/tokenserver/test_authorization.py @@ -16,76 +16,63 @@ def tearDown(self): def test_unauthorized_error_status(self): # Totally busted auth -> generic error. - headers = {'Authorization': 'Unsupported-Auth-Scheme IHACKYOU'} - res = self.app.get('/1.0/sync/1.5', headers=headers, status=401) + headers = {"Authorization": "Unsupported-Auth-Scheme IHACKYOU"} + res = self.app.get("/1.0/sync/1.5", headers=headers, status=401) expected_error_response = { - 'errors': [ - { - 'description': 'Unsupported', - 'location': 'body', - 'name': '' - } + "errors": [ + {"description": "Unsupported", "location": "body", "name": ""} ], - 'status': 'error' + "status": "error", } self.assertEqual(res.json, expected_error_response) def test_no_auth(self): - res = self.app.get('/1.0/sync/1.5', status=401) + res = self.app.get("/1.0/sync/1.5", status=401) expected_error_response = { - 'status': 'error', - 'errors': [ - { - 'location': 'body', - 'name': '', - 'description': 'Unauthorized' - } - ] + "status": "error", + "errors": [ + {"location": "body", "name": "", "description": "Unauthorized"} + ], } self.assertEqual(res.json, expected_error_response) def test_invalid_client_state_in_key_id(self): - additional_headers = { - 'X-KeyID': "1234-state!" - } + additional_headers = {"X-KeyID": "1234-state!"} headers = self._build_auth_headers( - keys_changed_at=1234, - client_state='aaaa', - **additional_headers) - res = self.app.get('/1.0/sync/1.5', headers=headers, status=401) + keys_changed_at=1234, client_state="aaaa", **additional_headers + ) + res = self.app.get("/1.0/sync/1.5", headers=headers, status=401) expected_error_response = { - 'status': 'invalid-credentials', - 'errors': [ - { - 'location': 'body', - 'name': '', - 'description': 'Unauthorized' - } - ] + "status": "invalid-credentials", + "errors": [ + {"location": "body", "name": "", "description": "Unauthorized"} + ], } self.assertEqual(res.json, expected_error_response) def test_invalid_client_state_in_x_client_state(self): - additional_headers = {'X-Client-State': 'state!'} - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa', - **additional_headers) + additional_headers = {"X-Client-State": "state!"} + headers = self._build_auth_headers( + generation=1234, + keys_changed_at=1234, + client_state="aaaa", + **additional_headers + ) - res = self.app.get('/1.0/sync/1.5', headers=headers, status=400) + res = self.app.get("/1.0/sync/1.5", headers=headers, status=400) expected_error_response = { - 'status': 'error', - 'errors': [ + "status": "error", + "errors": [ { - 'location': 'header', - 'name': 'X-Client-State', - 'description': 'Invalid client state value' + "location": "header", + "name": "X-Client-State", + "description": "Invalid client state value", } - ] + ], } self.assertEqual(res.json, expected_error_response) @@ -93,485 +80,490 @@ def test_keys_changed_at_less_than_equal_to_generation(self): self._add_user(generation=1232, keys_changed_at=1234) # If keys_changed_at changes, that change must be less than or equal # to the new generation - headers = self._build_auth_headers(generation=1235, - keys_changed_at=1236, - client_state='aaaa') - res = self.app.get('/1.0/sync/1.5', headers=headers, status=401) + headers = self._build_auth_headers( + generation=1235, keys_changed_at=1236, client_state="aaaa" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers, status=401) expected_error_response = { - 'status': 'invalid-keysChangedAt', - 'errors': [ - { - 'location': 'body', - 'name': '', - 'description': 'Unauthorized' - } - ] + "status": "invalid-keysChangedAt", + "errors": [ + {"location": "body", "name": "", "description": "Unauthorized"} + ], } self.assertEqual(res.json, expected_error_response) # If the keys_changed_at on the request matches that currently stored # on the user record, it does not need to be less than or equal to the # generation on the request - headers = self._build_auth_headers(generation=1233, - keys_changed_at=1234, - client_state='aaaa') - self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=1233, keys_changed_at=1234, client_state="aaaa" + ) + self.app.get("/1.0/sync/1.5", headers=headers) # A request with no generation is acceptable - headers = self._build_auth_headers(generation=None, - keys_changed_at=1235, - client_state='aaaa') - self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=None, keys_changed_at=1235, client_state="aaaa" + ) + self.app.get("/1.0/sync/1.5", headers=headers) # A request with a keys_changed_at less than the new generation # is acceptable - headers = self._build_auth_headers(generation=1236, - keys_changed_at=1235, - client_state='aaaa') - self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=1236, keys_changed_at=1235, client_state="aaaa" + ) + self.app.get("/1.0/sync/1.5", headers=headers) def test_disallow_reusing_old_client_state(self): # Add a user record that has already been replaced - self._add_user(client_state='aaaa', replaced_at=1200) + self._add_user(client_state="aaaa", replaced_at=1200) # Add the most up-to-date user record - self._add_user(client_state='bbbb') + self._add_user(client_state="bbbb") # A request cannot use a client state associated with a replaced user - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa') - res = self.app.get('/1.0/sync/1.5', headers=headers, status=401) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers, status=401) expected_error_response = { - 'status': 'invalid-client-state', - 'errors': [ + "status": "invalid-client-state", + "errors": [ { - 'location': 'header', - 'name': 'X-Client-State', - 'description': 'Unacceptable client-state value stale ' - 'value' + "location": "header", + "name": "X-Client-State", + "description": "Unacceptable client-state value stale " + "value", } - ] + ], } self.assertEqual(res.json, expected_error_response) # Using the last-seen client state is okay - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='bbbb') - res1 = self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="bbbb" + ) + res1 = self.app.get("/1.0/sync/1.5", headers=headers) # Using a new client state (with an updated generation and # keys_changed_at) is okay - headers = self._build_auth_headers(generation=1235, - keys_changed_at=1235, - client_state='cccc') - res2 = self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=1235, keys_changed_at=1235, client_state="cccc" + ) + res2 = self.app.get("/1.0/sync/1.5", headers=headers) # This results in the creation of a new user record - self.assertNotEqual(res1.json['uid'], res2.json['uid']) + self.assertNotEqual(res1.json["uid"], res2.json["uid"]) def test_generation_change_must_accompany_client_state_change(self): - self._add_user(generation=1234, client_state='aaaa') + self._add_user(generation=1234, client_state="aaaa") # A request with a new client state must also contain a new generation - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='bbbb') - res = self.app.get('/1.0/sync/1.5', headers=headers, status=401) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="bbbb" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers, status=401) expected_error_response = { - 'status': 'invalid-client-state', - 'errors': [ + "status": "invalid-client-state", + "errors": [ { - 'location': 'header', - 'name': 'X-Client-State', - 'description': 'Unacceptable client-state value new ' - 'value with no generation change' + "location": "header", + "name": "X-Client-State", + "description": "Unacceptable client-state value new " + "value with no generation change", } - ] + ], } self.assertEqual(res.json, expected_error_response) # A request with no generation is acceptable - headers = self._build_auth_headers(generation=None, - keys_changed_at=1235, - client_state='bbbb') - self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=None, keys_changed_at=1235, client_state="bbbb" + ) + self.app.get("/1.0/sync/1.5", headers=headers) # We can't use a generation of 1235 when setting a new client state # because the generation was set to be equal to the keys_changed_at # in the previous request, which was 1235 - headers = self._build_auth_headers(generation=1235, - keys_changed_at=1235, - client_state='cccc') + headers = self._build_auth_headers( + generation=1235, keys_changed_at=1235, client_state="cccc" + ) expected_error_response = { - 'status': 'invalid-client-state', - 'errors': [ - { - 'location': 'header', - 'name': 'X-Client-State', - 'description': 'Unacceptable client-state value new ' - 'value with no generation change' - } - ] + "status": "invalid-client-state", + "errors": [ + { + "location": "header", + "name": "X-Client-State", + "description": "Unacceptable client-state value new " + "value with no generation change", + } + ], } - res = self.app.get('/1.0/sync/1.5', headers=headers, status=401) + res = self.app.get("/1.0/sync/1.5", headers=headers, status=401) self.assertEqual(res.json, expected_error_response) # A change in client state is acceptable only with a change in # generation (if it is present) - headers = self._build_auth_headers(generation=1236, - keys_changed_at=1236, - client_state='cccc') - self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=1236, keys_changed_at=1236, client_state="cccc" + ) + self.app.get("/1.0/sync/1.5", headers=headers) def test_keys_changed_at_change_must_accompany_client_state_change(self): - self._add_user(generation=1234, keys_changed_at=1234, - client_state='aaaa') + self._add_user( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) # A request with a new client state must also contain a new # keys_changed_at - headers = self._build_auth_headers(generation=1235, - keys_changed_at=1234, - client_state='bbbb') - res = self.app.get('/1.0/sync/1.5', headers=headers, status=401) + headers = self._build_auth_headers( + generation=1235, keys_changed_at=1234, client_state="bbbb" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers, status=401) expected_error_response = { - 'status': 'invalid-client-state', - 'errors': [ + "status": "invalid-client-state", + "errors": [ { - 'location': 'header', - 'name': 'X-Client-State', - 'description': 'Unacceptable client-state value new ' - 'value with no keys_changed_at change' + "location": "header", + "name": "X-Client-State", + "description": "Unacceptable client-state value new " + "value with no keys_changed_at change", } - ] + ], } self.assertEqual(res.json, expected_error_response) # A request with a new keys_changed_at is acceptable - headers = self._build_auth_headers(generation=1235, - keys_changed_at=1235, - client_state='bbbb') - self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=1235, keys_changed_at=1235, client_state="bbbb" + ) + self.app.get("/1.0/sync/1.5", headers=headers) def test_generation_must_not_be_less_than_last_seen_value(self): uid = self._add_user(generation=1234) # The generation in the request cannot be less than the generation # currently stored on the user record - headers = self._build_auth_headers(generation=1233, - keys_changed_at=1234, - client_state='aaaa') - res = self.app.get('/1.0/sync/1.5', headers=headers, status=401) + headers = self._build_auth_headers( + generation=1233, keys_changed_at=1234, client_state="aaaa" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers, status=401) expected_error_response = { - 'status': 'invalid-generation', - 'errors': [ + "status": "invalid-generation", + "errors": [ { - 'location': 'body', - 'name': '', - 'description': 'Unauthorized', + "location": "body", + "name": "", + "description": "Unauthorized", } - ] + ], } self.assertEqual(res.json, expected_error_response) # A request with no generation is acceptable - headers = self._build_auth_headers(generation=None, - keys_changed_at=1234, - client_state='aaaa') - self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=None, keys_changed_at=1234, client_state="aaaa" + ) + self.app.get("/1.0/sync/1.5", headers=headers) # A request with a generation equal to the last-seen generation is # acceptable - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa') - self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) + self.app.get("/1.0/sync/1.5", headers=headers) # A request with a generation greater than the last-seen generation is # acceptable - headers = self._build_auth_headers(generation=1235, - keys_changed_at=1234, - client_state='aaaa') - res = self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=1235, keys_changed_at=1234, client_state="aaaa" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers) # This should not result in the creation of a new user - self.assertEqual(res.json['uid'], uid) + self.assertEqual(res.json["uid"], uid) def test_set_generation_unchanged_without_keys_changed_at_update(self): # Add a user who has never sent us a generation - uid = self._add_user(generation=0, keys_changed_at=1234, - client_state='aaaa') + uid = self._add_user( + generation=0, keys_changed_at=1234, client_state="aaaa" + ) # Send a request without a generation that doesn't update # keys_changed_at - headers = self._build_auth_headers(generation=None, - keys_changed_at=1234, - client_state='aaaa') - self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=None, keys_changed_at=1234, client_state="aaaa" + ) + self.app.get("/1.0/sync/1.5", headers=headers) user = self._get_user(uid) # This should not have set the user's generation - self.assertEqual(user['generation'], 0) + self.assertEqual(user["generation"], 0) # Send a request without a generation that updates keys_changed_at - headers = self._build_auth_headers(generation=None, - keys_changed_at=1235, - client_state='aaaa') - self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=None, keys_changed_at=1235, client_state="aaaa" + ) + self.app.get("/1.0/sync/1.5", headers=headers) user = self._get_user(uid) # This should have set the user's generation - self.assertEqual(user['generation'], 1235) + self.assertEqual(user["generation"], 1235) def test_set_generation_with_keys_changed_at_initialization(self): # Add a user who has never sent us a generation or a keys_changed_at - uid = self._add_user(generation=0, keys_changed_at=None, - client_state='aaaa') + uid = self._add_user( + generation=0, keys_changed_at=None, client_state="aaaa" + ) # Send a request without a generation that updates keys_changed_at - headers = self._build_auth_headers(generation=None, - keys_changed_at=1234, - client_state='aaaa') - self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=None, keys_changed_at=1234, client_state="aaaa" + ) + self.app.get("/1.0/sync/1.5", headers=headers) user = self._get_user(uid) # This should have set the user's generation - self.assertEqual(user['generation'], 1234) + self.assertEqual(user["generation"], 1234) def test_fxa_kid_change(self): - self._add_user(generation=1234, keys_changed_at=None, - client_state='aaaa') + self._add_user( + generation=1234, keys_changed_at=None, client_state="aaaa" + ) # An OAuth client shows up, setting keys_changed_at. # (The value matches generation number above, beause in this scenario # FxA hasn't been updated to track and report keysChangedAt yet). - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa') - res = self.app.get('/1.0/sync/1.5', headers=headers) - token0 = self.unsafelyParseToken(res.json['id']) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers) + token0 = self.unsafelyParseToken(res.json["id"]) # Reject keys_changed_at lower than the value previously seen - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1233, - client_state='aaaa') - res = self.app.get('/1.0/sync/1.5', headers=headers, status=401) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1233, client_state="aaaa" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers, status=401) expected_error_response = { - 'status': 'invalid-keysChangedAt', - 'errors': [ + "status": "invalid-keysChangedAt", + "errors": [ { - 'location': 'body', - 'name': '', - 'description': 'Unauthorized', + "location": "body", + "name": "", + "description": "Unauthorized", } - ] + ], } self.assertEqual(res.json, expected_error_response) # Reject greater keys_changed_at with no corresponding update to # generation - headers = self._build_auth_headers(generation=1234, - keys_changed_at=2345, - client_state='bbbb') - res = self.app.get('/1.0/sync/1.5', headers=headers, status=401) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=2345, client_state="bbbb" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers, status=401) self.assertEqual(res.json, expected_error_response) # Accept equal keys_changed_at - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa') - self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) + self.app.get("/1.0/sync/1.5", headers=headers) # Accept greater keys_changed_at with new generation - headers = self._build_auth_headers(generation=2345, - keys_changed_at=2345, - client_state='bbbb') - res = self.app.get('/1.0/sync/1.5', headers=headers) - token = self.unsafelyParseToken(res.json['id']) - self.assertEqual(token['fxa_kid'], '0000000002345-u7s') - self.assertNotEqual(token['uid'], token0['uid']) - self.assertEqual(token['node'], token0['node']) + headers = self._build_auth_headers( + generation=2345, keys_changed_at=2345, client_state="bbbb" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers) + token = self.unsafelyParseToken(res.json["id"]) + self.assertEqual(token["fxa_kid"], "0000000002345-u7s") + self.assertNotEqual(token["uid"], token0["uid"]) + self.assertEqual(token["node"], token0["node"]) def test_client_specified_duration(self): - self._add_user(generation=1234, keys_changed_at=1234, - client_state='aaaa') - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa') + self._add_user( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) # It's ok to request a shorter-duration token. - res = self.app.get('/1.0/sync/1.5?duration=12', headers=headers) - self.assertEqual(res.json['duration'], 12) + res = self.app.get("/1.0/sync/1.5?duration=12", headers=headers) + self.assertEqual(res.json["duration"], 12) # But you can't exceed the server's default value. - res = self.app.get('/1.0/sync/1.5?duration=4000', headers=headers) - self.assertEqual(res.json['duration'], 3600) + res = self.app.get("/1.0/sync/1.5?duration=4000", headers=headers) + self.assertEqual(res.json["duration"], 3600) # And nonsense values are ignored. - res = self.app.get('/1.0/sync/1.5?duration=lolwut', headers=headers) - self.assertEqual(res.json['duration'], 3600) - res = self.app.get('/1.0/sync/1.5?duration=-1', headers=headers) - self.assertEqual(res.json['duration'], 3600) + res = self.app.get("/1.0/sync/1.5?duration=lolwut", headers=headers) + self.assertEqual(res.json["duration"], 3600) + res = self.app.get("/1.0/sync/1.5?duration=-1", headers=headers) + self.assertEqual(res.json["duration"], 3600) # Although all servers are now writing keys_changed_at, we still need this # case to be handled. See this PR for more information: # https://github.com/mozilla-services/tokenserver/pull/176 def test_kid_change_during_gradual_tokenserver_rollout(self): # Let's start with a user already in the db, with no keys_changed_at. - uid = self._add_user(generation=1234, client_state='aaaa', - keys_changed_at=None) + uid = self._add_user( + generation=1234, client_state="aaaa", keys_changed_at=None + ) user1 = self._get_user(uid) # User hits updated tokenserver node, writing keys_changed_at to db. - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1200, - client_state='aaaa') - res = self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1200, client_state="aaaa" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers) # That should not have triggered a node re-assignment. - user2 = self._get_user(res.json['uid']) - self.assertEqual(user1['uid'], user2['uid']) - self.assertEqual(user1['nodeid'], user2['nodeid']) + user2 = self._get_user(res.json["uid"]) + self.assertEqual(user1["uid"], user2["uid"]) + self.assertEqual(user1["nodeid"], user2["nodeid"]) # That should have written keys_changed_at into the db. - self.assertEqual(user2['generation'], 1234) - self.assertEqual(user2['keys_changed_at'], 1200) + self.assertEqual(user2["generation"], 1234) + self.assertEqual(user2["keys_changed_at"], 1200) # User does a password reset on their Firefox Account. - headers = self._build_auth_headers(generation=2345, - keys_changed_at=2345, - client_state='bbbb') + headers = self._build_auth_headers( + generation=2345, keys_changed_at=2345, client_state="bbbb" + ) # They sync again, but hit a tokenserver node that isn't updated yet. # This would trigger the allocation of a new user, so we simulate this # by adding a new user. We set keys_changed_at to be the last-used # value, since we are simulating a server that doesn't pay attention # to keys_changed_at. - uid = self._add_user(generation=2345, keys_changed_at=1200, - client_state='bbbb') + uid = self._add_user( + generation=2345, keys_changed_at=1200, client_state="bbbb" + ) user2 = self._get_user(uid) - self.assertNotEqual(user1['uid'], user2['uid']) - self.assertEqual(user1['nodeid'], user2['nodeid']) + self.assertNotEqual(user1["uid"], user2["uid"]) + self.assertEqual(user1["nodeid"], user2["nodeid"]) # They sync again, hitting an updated tokenserver node. # This should succeed, despite keys_changed_at appearing to have # changed without any corresponding change in generation number. - res = self.app.get('/1.0/sync/1.5', headers=headers) + res = self.app.get("/1.0/sync/1.5", headers=headers) # That should not have triggered a second user allocation. user1 = user2 - user2 = self._get_user(res.json['uid']) - self.assertEqual(user2['uid'], user1['uid']) - self.assertEqual(user2['nodeid'], user1['nodeid']) + user2 = self._get_user(res.json["uid"]) + self.assertEqual(user2["uid"], user1["uid"]) + self.assertEqual(user2["nodeid"], user1["nodeid"]) def test_update_client_state(self): - uid = self._add_user(generation=0, keys_changed_at=None, - client_state='') + uid = self._add_user( + generation=0, keys_changed_at=None, client_state="" + ) user1 = self._get_user(uid) # The user starts out with no client_state - self.assertEqual(user1['generation'], 0) - self.assertEqual(user1['client_state'], '') + self.assertEqual(user1["generation"], 0) + self.assertEqual(user1["client_state"], "") seen_uids = set((uid,)) - orig_node = user1['nodeid'] + orig_node = user1["nodeid"] # Changing client_state allocates a new user, resulting in a new uid - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='bbbb') - res = self.app.get('/1.0/sync/1.5', headers=headers) - user2 = self._get_user(res.json['uid']) - self.assertTrue(user2['uid'] not in seen_uids) - self.assertEqual(user2['nodeid'], orig_node) - self.assertEqual(user2['generation'], 1234) - self.assertEqual(user2['keys_changed_at'], 1234) - self.assertEqual(user2['client_state'], 'bbbb') - seen_uids.add(user2['uid']) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="bbbb" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers) + user2 = self._get_user(res.json["uid"]) + self.assertTrue(user2["uid"] not in seen_uids) + self.assertEqual(user2["nodeid"], orig_node) + self.assertEqual(user2["generation"], 1234) + self.assertEqual(user2["keys_changed_at"], 1234) + self.assertEqual(user2["client_state"], "bbbb") + seen_uids.add(user2["uid"]) # We can change the client state even if no generation is present on # the request - headers = self._build_auth_headers(generation=None, - keys_changed_at=1235, - client_state='cccc') - res = self.app.get('/1.0/sync/1.5', headers=headers) - user3 = self._get_user(res.json['uid']) - self.assertTrue(user3['uid'] not in seen_uids) - self.assertEqual(user3['nodeid'], orig_node) + headers = self._build_auth_headers( + generation=None, keys_changed_at=1235, client_state="cccc" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers) + user3 = self._get_user(res.json["uid"]) + self.assertTrue(user3["uid"] not in seen_uids) + self.assertEqual(user3["nodeid"], orig_node) # When keys_changed_at changes and generation is not present on the # request, generation is set to be the same as keys_changed_at - self.assertEqual(user3['generation'], 1235) - self.assertEqual(user3['keys_changed_at'], 1235) - self.assertEqual(user3['client_state'], 'cccc') - seen_uids.add(user3['uid']) + self.assertEqual(user3["generation"], 1235) + self.assertEqual(user3["keys_changed_at"], 1235) + self.assertEqual(user3["client_state"], "cccc") + seen_uids.add(user3["uid"]) # We cannot change client_state without a change in keys_changed_at - headers = self._build_auth_headers(generation=None, - keys_changed_at=1235, - client_state='dddd') - res = self.app.get('/1.0/sync/1.5', headers=headers, status=401) + headers = self._build_auth_headers( + generation=None, keys_changed_at=1235, client_state="dddd" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers, status=401) expected_error_response = { - 'status': 'invalid-client-state', - 'errors': [ + "status": "invalid-client-state", + "errors": [ { - 'location': 'header', - 'name': 'X-Client-State', - 'description': 'Unacceptable client-state value new ' - 'value with no keys_changed_at change' + "location": "header", + "name": "X-Client-State", + "description": "Unacceptable client-state value new " + "value with no keys_changed_at change", } - ] + ], } self.assertEqual(expected_error_response, res.json) # We cannot use a previously-used client_state - headers = self._build_auth_headers(generation=1236, - keys_changed_at=1236, - client_state='bbbb') - res = self.app.get('/1.0/sync/1.5', headers=headers, status=401) + headers = self._build_auth_headers( + generation=1236, keys_changed_at=1236, client_state="bbbb" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers, status=401) expected_error_response = { - 'status': 'invalid-client-state', - 'errors': [ + "status": "invalid-client-state", + "errors": [ { - 'location': 'header', - 'name': 'X-Client-State', - 'description': 'Unacceptable client-state value stale ' - 'value' + "location": "header", + "name": "X-Client-State", + "description": "Unacceptable client-state value stale " + "value", } - ] + ], } self.assertEqual(expected_error_response, res.json) def test_set_generation_from_no_generation(self): # Add a user that has no generation set - uid = self._add_user(generation=0, keys_changed_at=None, - client_state='aaaa') - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa') + uid = self._add_user( + generation=0, keys_changed_at=None, client_state="aaaa" + ) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) # Send a request to set the generation - self.app.get('/1.0/sync/1.5', headers=headers) + self.app.get("/1.0/sync/1.5", headers=headers) user = self._get_user(uid) # Ensure that the user had the correct generation set - self.assertEqual(user['generation'], 1234) + self.assertEqual(user["generation"], 1234) def test_set_keys_changed_at_from_no_keys_changed_at(self): # Add a user that has no keys_changed_at set - uid = self._add_user(generation=1234, keys_changed_at=None, - client_state='aaaa') - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa') + uid = self._add_user( + generation=1234, keys_changed_at=None, client_state="aaaa" + ) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) # Send a request to set the keys_changed_at - self.app.get('/1.0/sync/1.5', headers=headers) + self.app.get("/1.0/sync/1.5", headers=headers) user = self._get_user(uid) # Ensure that the user had the correct generation set - self.assertEqual(user['keys_changed_at'], 1234) + self.assertEqual(user["keys_changed_at"], 1234) def test_x_client_state_must_have_same_client_state_as_key_id(self): - self._add_user(client_state='aaaa') - additional_headers = {'X-Client-State': 'bbbb'} + self._add_user(client_state="aaaa") + additional_headers = {"X-Client-State": "bbbb"} headers = self._build_auth_headers( generation=1234, keys_changed_at=1234, - client_state='aaaa', - **additional_headers) + client_state="aaaa", + **additional_headers + ) # If present, the X-Client-State header must have the same client # state as the X-KeyID header - res = self.app.get('/1.0/sync/1.5', headers=headers, status=401) + res = self.app.get("/1.0/sync/1.5", headers=headers, status=401) expected_error_response = { - 'errors': [ - { - 'description': 'Unauthorized', - 'location': 'body', - 'name': '' - } + "errors": [ + {"description": "Unauthorized", "location": "body", "name": ""} ], - 'status': 'invalid-client-state' + "status": "invalid-client-state", } self.assertEqual(res.json, expected_error_response) - headers['X-Client-State'] = 'aaaa' - res = self.app.get('/1.0/sync/1.5', headers=headers) + headers["X-Client-State"] = "aaaa" + res = self.app.get("/1.0/sync/1.5", headers=headers) def test_zero_generation_treated_as_null(self): # Add a user that has a generation set - uid = self._add_user(generation=1234, keys_changed_at=1234, - client_state='aaaa') - headers = self._build_auth_headers(generation=0, - keys_changed_at=1234, - client_state='aaaa') + uid = self._add_user( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) + headers = self._build_auth_headers( + generation=0, keys_changed_at=1234, client_state="aaaa" + ) # Send a request with a generation of 0 - self.app.get('/1.0/sync/1.5', headers=headers) + self.app.get("/1.0/sync/1.5", headers=headers) # Ensure that the request succeeded and that the user's generation # was not updated user = self._get_user(uid) - self.assertEqual(user['generation'], 1234) + self.assertEqual(user["generation"], 1234) def test_zero_keys_changed_at_treated_as_null(self): # Add a user that has no keys_changed_at set - uid = self._add_user(generation=1234, keys_changed_at=None, - client_state='aaaa') - headers = self._build_auth_headers(generation=1234, - keys_changed_at=0, - client_state='aaaa') + uid = self._add_user( + generation=1234, keys_changed_at=None, client_state="aaaa" + ) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=0, client_state="aaaa" + ) # Send a request with a keys_changed_at of 0 - self.app.get('/1.0/sync/1.5', headers=headers) + self.app.get("/1.0/sync/1.5", headers=headers) # Ensure that the request succeeded and that the user's # keys_changed_at was not updated user = self._get_user(uid) - self.assertEqual(user['keys_changed_at'], None) + self.assertEqual(user["keys_changed_at"], None) diff --git a/tools/integration_tests/tokenserver/test_e2e.py b/tools/integration_tests/tokenserver/test_e2e.py index 85d68b4d1a..c4ee1295e8 100644 --- a/tools/integration_tests/tokenserver/test_e2e.py +++ b/tools/integration_tests/tokenserver/test_e2e.py @@ -25,13 +25,13 @@ # This is the client ID used for Firefox Desktop. The FxA team confirmed that # this is the proper client ID to be using for these integration tests. -CLIENT_ID = '5882386c6d801776' +CLIENT_ID = "5882386c6d801776" DEFAULT_TOKEN_DURATION = 3600 -FXA_ACCOUNT_STAGE_HOST = 'https://api-accounts.stage.mozaws.net' -FXA_OAUTH_STAGE_HOST = 'https://oauth.stage.mozaws.net' +FXA_ACCOUNT_STAGE_HOST = "https://api-accounts.stage.mozaws.net" +FXA_OAUTH_STAGE_HOST = "https://oauth.stage.mozaws.net" PASSWORD_CHARACTERS = string.ascii_letters + string.punctuation + string.digits PASSWORD_LENGTH = 32 -SCOPE = 'https://identity.mozilla.com/apps/oldsync' +SCOPE = "https://identity.mozilla.com/apps/oldsync" @pytest.mark.usefixtures('setup_server_end_to_end_testing') @@ -48,12 +48,14 @@ def setUpClass(cls): # Create an ephemeral email account to use to create an FxA account cls.acct = TestEmailAccount() cls.client = Client(FXA_ACCOUNT_STAGE_HOST) - cls.oauth_client = OAuthClient(CLIENT_ID, None, - server_url=FXA_OAUTH_STAGE_HOST) + cls.oauth_client = OAuthClient( + CLIENT_ID, None, server_url=FXA_OAUTH_STAGE_HOST + ) cls.fxa_password = cls._generate_password() # Create an FxA account for these end-to-end tests - cls.session = cls.client.create_account(cls.acct.email, - password=cls.fxa_password) + cls.session = cls.client.create_account( + cls.acct.email, password=cls.fxa_password + ) # Loop until we receive the verification email from FxA while not cls.acct.messages: time.sleep(0.5) @@ -61,8 +63,8 @@ def setUpClass(cls): # Find the message containing the verification code and verify the # code for m in cls.acct.messages: - if 'x-verify-code' in m['headers']: - cls.session.verify_email_code(m['headers']['x-verify-code']) + if "x-verify-code" in m["headers"]: + cls.session.verify_email_code(m["headers"]["x-verify-code"]) # Create an OAuth token to be used for the end-to-end tests cls.oauth_token = cls.oauth_client.authorize_token(cls.session, SCOPE) @@ -86,35 +88,37 @@ def tearDownClass(cls): def _generate_password(): r = range(PASSWORD_LENGTH) - return ''.join(random.choice(PASSWORD_CHARACTERS) for i in r) + return "".join(random.choice(PASSWORD_CHARACTERS) for i in r) def _get_oauth_token_with_bad_scope(self): - bad_scope = 'bad_scope' + bad_scope = "bad_scope" return self.oauth_client.authorize_token(self.session, bad_scope) def _get_bad_token(self): - key = rsa.generate_private_key(backend=default_backend(), - public_exponent=65537, - key_size=2048) + key = rsa.generate_private_key( + backend=default_backend(), public_exponent=65537, key_size=2048 + ) format = serialization.PrivateFormat.TraditionalOpenSSL algorithm = serialization.NoEncryption() - pem = key.private_bytes(encoding=serialization.Encoding.PEM, - format=format, - encryption_algorithm=algorithm) - private_key = pem.decode('utf-8') + pem = key.private_bytes( + encoding=serialization.Encoding.PEM, + format=format, + encryption_algorithm=algorithm, + ) + private_key = pem.decode("utf-8") claims = { - 'sub': 'fake sub', - 'iat': 12345, - 'exp': 12345, + "sub": "fake sub", + "iat": 12345, + "exp": 12345, } - return jwt.encode(claims, private_key, algorithm='RS256') + return jwt.encode(claims, private_key, algorithm="RS256") def _extract_keys_changed_at_from_assertion(self, assertion): - token = assertion.split('~')[-2] + token = assertion.split("~")[-2] claims = jwt.decode(token, options={"verify_signature": False}) - return claims['fxa-keysChangedAt'] + return claims["fxa-keysChangedAt"] @classmethod def _change_password(cls): @@ -125,105 +129,96 @@ def _change_password(cls): # Adapted from the original Tokenserver: # https://github.com/mozilla-services/tokenserver/blob/master/tokenserver/util.py#L24 def _fxa_metrics_hash(self, value): - hasher = hmac.new(self.FXA_METRICS_HASH_SECRET.encode('utf-8'), b'', - sha256) - hasher.update(value.encode('utf-8')) + hasher = hmac.new( + self.FXA_METRICS_HASH_SECRET.encode("utf-8"), b"", sha256 + ) + hasher.update(value.encode("utf-8")) return hasher.hexdigest() def test_unauthorized_oauth_error_status(self): # Totally busted auth -> generic error. headers = { - 'Authorization': 'Unsupported-Auth-Scheme IHACKYOU', - 'X-KeyID': '1234-qqo' + "Authorization": "Unsupported-Auth-Scheme IHACKYOU", + "X-KeyID": "1234-qqo", } - res = self.app.get('/1.0/sync/1.5', headers=headers, status=401) + res = self.app.get("/1.0/sync/1.5", headers=headers, status=401) expected_error_response = { - 'errors': [ - { - 'description': 'Unsupported', - 'location': 'body', - 'name': '' - } + "errors": [ + {"description": "Unsupported", "location": "body", "name": ""} ], - 'status': 'error' + "status": "error", } self.assertEqual(res.json, expected_error_response) token = self._get_bad_token() - headers = { - 'Authorization': 'Bearer %s' % token, - 'X-KeyID': '1234-qqo' - } + headers = {"Authorization": "Bearer %s" % token, "X-KeyID": "1234-qqo"} # Bad token -> 'invalid-credentials' - res = self.app.get('/1.0/sync/1.5', headers=headers, status=401) + res = self.app.get("/1.0/sync/1.5", headers=headers, status=401) expected_error_response = { - 'errors': [ - { - 'description': 'Unauthorized', - 'location': 'body', - 'name': '' - } + "errors": [ + {"description": "Unauthorized", "location": "body", "name": ""} ], - 'status': 'invalid-credentials' + "status": "invalid-credentials", } self.assertEqual(res.json, expected_error_response) # Untrusted scopes -> 'invalid-credentials' token = self._get_oauth_token_with_bad_scope() - headers = { - 'Authorization': 'Bearer %s' % token, - 'X-KeyID': '1234-qqo' - } - res = self.app.get('/1.0/sync/1.5', headers=headers, status=401) + headers = {"Authorization": "Bearer %s" % token, "X-KeyID": "1234-qqo"} + res = self.app.get("/1.0/sync/1.5", headers=headers, status=401) self.assertEqual(res.json, expected_error_response) def test_valid_oauth_request(self): oauth_token = self.oauth_token headers = { - 'Authorization': 'Bearer %s' % oauth_token, - 'X-KeyID': '1234-qqo' + "Authorization": "Bearer %s" % oauth_token, + "X-KeyID": "1234-qqo", } # Send a valid request, allocating a new user - res = self.app.get('/1.0/sync/1.5', headers=headers) + res = self.app.get("/1.0/sync/1.5", headers=headers) fxa_uid = self.session.uid # Retrieve the user from the database - user = self._get_user(res.json['uid']) + user = self._get_user(res.json["uid"]) # First, let's verify that the token we received is valid. To do this, # we can unpack the hawk header ID into the payload and its signature # and then construct a tokenlib token to compute the signature # ourselves. To obtain a matching signature, we use the same secret as # is used by Tokenserver. - raw = urlsafe_b64decode(res.json['id']) + raw = urlsafe_b64decode(res.json["id"]) payload = raw[:-32] signature = raw[-32:] - payload_str = payload.decode('utf-8') + payload_str = payload.decode("utf-8") payload_dict = json.loads(payload_str) # The `id` payload should include a field indicating the origin of the # token - self.assertEqual(payload_dict['tokenserver_origin'], 'rust') + self.assertEqual(payload_dict["tokenserver_origin"], "rust") signing_secret = self.TOKEN_SIGNING_SECRET tm = tokenlib.TokenManager(secret=signing_secret) - expected_signature = tm._get_signature(payload_str.encode('utf8')) + expected_signature = tm._get_signature(payload_str.encode("utf8")) # Using the #compare_digest method here is not strictly necessary, as # this is not a security-sensitive situation, but it's good practice self.assertTrue(hmac.compare_digest(expected_signature, signature)) # Check that the given key is a secret derived from the hawk ID - expected_secret = tokenlib.get_derived_secret(res.json['id'], - secret=signing_secret) - self.assertEqual(res.json['key'], expected_secret) + expected_secret = tokenlib.get_derived_secret( + res.json["id"], secret=signing_secret + ) + self.assertEqual(res.json["key"], expected_secret) # Check to make sure the remainder of the fields are valid - self.assertEqual(res.json['uid'], user['uid']) - self.assertEqual(res.json['api_endpoint'], - '%s/1.5/%s' % (self.NODE_URL, user['uid'])) - self.assertEqual(res.json['duration'], DEFAULT_TOKEN_DURATION) - self.assertEqual(res.json['hashalg'], 'sha256') - self.assertEqual(res.json['hashed_fxa_uid'], - self._fxa_metrics_hash(fxa_uid)[:32]) - self.assertEqual(res.json['node_type'], 'spanner') + self.assertEqual(res.json["uid"], user["uid"]) + self.assertEqual( + res.json["api_endpoint"], + "%s/1.5/%s" % (self.NODE_URL, user["uid"]), + ) + self.assertEqual(res.json["duration"], DEFAULT_TOKEN_DURATION) + self.assertEqual(res.json["hashalg"], "sha256") + self.assertEqual( + res.json["hashed_fxa_uid"], self._fxa_metrics_hash(fxa_uid)[:32] + ) + self.assertEqual(res.json["node_type"], "spanner") # The response should have an X-Timestamp header that contains the # number of seconds since the UNIX epoch - self.assertIn('X-Timestamp', res.headers) - self.assertIsNotNone(int(res.headers['X-Timestamp'])) - token = self.unsafelyParseToken(res.json['id']) - self.assertIn('hashed_device_id', token) + self.assertIn("X-Timestamp", res.headers) + self.assertIsNotNone(int(res.headers["X-Timestamp"])) + token = self.unsafelyParseToken(res.json["id"]) + self.assertIn("hashed_device_id", token) self.assertEqual(token["uid"], res.json["uid"]) self.assertEqual(token["fxa_uid"], fxa_uid) self.assertEqual(token["fxa_kid"], "0000000001234-qqo") diff --git a/tools/integration_tests/tokenserver/test_misc.py b/tools/integration_tests/tokenserver/test_misc.py index 96eb641ec5..7798ae0bf9 100644 --- a/tools/integration_tests/tokenserver/test_misc.py +++ b/tools/integration_tests/tokenserver/test_misc.py @@ -18,48 +18,48 @@ def tearDown(self): super(TestMisc, self).tearDown() def test_unknown_app(self): - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa') - res = self.app.get('/1.0/xXx/token', headers=headers, status=404) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) + res = self.app.get("/1.0/xXx/token", headers=headers, status=404) expected_error_response = { - 'errors': [ + "errors": [ { - 'description': 'Unsupported application', - 'location': 'url', - 'name': 'application' + "description": "Unsupported application", + "location": "url", + "name": "application", } ], - 'status': 'error' + "status": "error", } self.assertEqual(res.json, expected_error_response) def test_unknown_version(self): - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa') - res = self.app.get('/1.0/sync/1.2', headers=headers, status=404) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) + res = self.app.get("/1.0/sync/1.2", headers=headers, status=404) expected_error_response = { - 'errors': [ + "errors": [ { - 'description': 'Unsupported application version', - 'location': 'url', - 'name': '1.2' + "description": "Unsupported application version", + "location": "url", + "name": "1.2", } ], - 'status': 'error' + "status": "error", } self.assertEqual(res.json, expected_error_response) def test_valid_app(self): self._add_user() - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa') - res = self.app.get('/1.0/sync/1.5', headers=headers) - self.assertIn('https://example.com/1.5', res.json['api_endpoint']) - self.assertIn('duration', res.json) - self.assertEqual(res.json['duration'], 3600) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers) + self.assertIn("https://example.com/1.5", res.json["api_endpoint"]) + self.assertIn("duration", res.json) + self.assertEqual(res.json["duration"], 3600) def test_current_user_is_the_most_up_to_date(self): # Add some users @@ -69,26 +69,27 @@ def test_current_user_is_the_most_up_to_date(self): uid = self._add_user(generation=1236, created_at=1233) # Users are sorted by (generation, created_at), so the fourth user # record is considered to be the current user - headers = self._build_auth_headers(generation=1236, - keys_changed_at=1234, - client_state='aaaa') - res = self.app.get('/1.0/sync/1.5', headers=headers) - self.assertEqual(res.json['uid'], uid) + headers = self._build_auth_headers( + generation=1236, keys_changed_at=1234, client_state="aaaa" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers) + self.assertEqual(res.json["uid"], uid) def test_user_creation_when_most_current_user_is_replaced(self): # Add some users uid1 = self._add_user(generation=1234, created_at=1234) uid2 = self._add_user(generation=1235, created_at=1235) - uid3 = self._add_user(generation=1236, created_at=1236, - replaced_at=1237) + uid3 = self._add_user( + generation=1236, created_at=1236, replaced_at=1237 + ) seen_uids = [uid1, uid2, uid3] # Because the current user (the one with uid3) has been replaced, a new # user record is created - headers = self._build_auth_headers(generation=1237, - keys_changed_at=1237, - client_state='aaaa') - res = self.app.get('/1.0/sync/1.5', headers=headers) - self.assertNotIn(res.json['uid'], seen_uids) + headers = self._build_auth_headers( + generation=1237, keys_changed_at=1237, client_state="aaaa" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers) + self.assertNotIn(res.json["uid"], seen_uids) def test_old_users_marked_as_replaced_in_race_recovery(self): # Add some users @@ -96,141 +97,142 @@ def test_old_users_marked_as_replaced_in_race_recovery(self): uid2 = self._add_user(generation=1235, created_at=1235) uid3 = self._add_user(generation=1236, created_at=1240) # Make a request - headers = self._build_auth_headers(generation=1236, - keys_changed_at=1236, - client_state='aaaa') - res = self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=1236, keys_changed_at=1236, client_state="aaaa" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers) # uid3 is associated with the current user - self.assertEqual(res.json['uid'], uid3) + self.assertEqual(res.json["uid"], uid3) # The users associated with uid1 and uid2 have replaced_at set to be # equal to created_at on the current user record user1 = self._get_user(uid1) user2 = self._get_user(uid2) - self.assertEqual(user1['replaced_at'], 1240) - self.assertEqual(user2['replaced_at'], 1240) + self.assertEqual(user1["replaced_at"], 1240) + self.assertEqual(user2["replaced_at"], 1240) def test_user_updates_with_new_client_state(self): # Start with a single user in the database - uid = self._add_user(generation=1234, keys_changed_at=1234, - client_state='aaaa') + uid = self._add_user( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) # Send a request, updating the generation, keys_changed_at, and # client_state - headers = self._build_auth_headers(generation=1235, - keys_changed_at=1235, - client_state='bbbb') - res = self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=1235, keys_changed_at=1235, client_state="bbbb" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers) # A new user should have been created self.assertEqual(self._count_users(), 2) - self.assertNotEqual(uid, res.json['uid']) + self.assertNotEqual(uid, res.json["uid"]) # The new user record should have the updated generation, # keys_changed_at, and client_state - user = self._get_user(res.json['uid']) - self.assertEqual(user['generation'], 1235) - self.assertEqual(user['keys_changed_at'], 1235) - self.assertEqual(user['client_state'], 'bbbb') + user = self._get_user(res.json["uid"]) + self.assertEqual(user["generation"], 1235) + self.assertEqual(user["keys_changed_at"], 1235) + self.assertEqual(user["client_state"], "bbbb") # The old user record should not have the updated values user = self._get_user(uid) - self.assertEqual(user['generation'], 1234) - self.assertEqual(user['keys_changed_at'], 1234) - self.assertEqual(user['client_state'], 'aaaa') + self.assertEqual(user["generation"], 1234) + self.assertEqual(user["keys_changed_at"], 1234) + self.assertEqual(user["client_state"], "aaaa") # Get all the replaced users - email = 'test@%s' % self.FXA_EMAIL_DOMAIN - replaced_users = self._get_replaced_users(self.service_id, - email) + email = "test@%s" % self.FXA_EMAIL_DOMAIN + replaced_users = self._get_replaced_users(self.service_id, email) # Only one user should be replaced self.assertEqual(len(replaced_users), 1) # The replaced user record should have the old generation, # keys_changed_at, and client_state replaced_user = replaced_users[0] - self.assertEqual(replaced_user['generation'], 1234) - self.assertEqual(replaced_user['keys_changed_at'], 1234) - self.assertEqual(replaced_user['client_state'], 'aaaa') + self.assertEqual(replaced_user["generation"], 1234) + self.assertEqual(replaced_user["keys_changed_at"], 1234) + self.assertEqual(replaced_user["client_state"], "aaaa") def test_user_updates_with_same_client_state(self): # Start with a single user in the database uid = self._add_user(generation=1234, keys_changed_at=1234) # Send a request, updating the generation and keys_changed_at but not # the client state - headers = self._build_auth_headers(generation=1235, - keys_changed_at=1235, - client_state='aaaa') - res = self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=1235, keys_changed_at=1235, client_state="aaaa" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers) # A new user should not have been created self.assertEqual(self._count_users(), 1) - self.assertEqual(uid, res.json['uid']) + self.assertEqual(uid, res.json["uid"]) # The user record should have been updated user = self._get_user(uid) - self.assertEqual(user['generation'], 1235) - self.assertEqual(user['keys_changed_at'], 1235) + self.assertEqual(user["generation"], 1235) + self.assertEqual(user["keys_changed_at"], 1235) def test_retired_users_can_make_requests(self): # Add a retired user to the database self._add_user(generation=MAX_GENERATION) - headers = self._build_auth_headers(generation=1235, - keys_changed_at=1234, - client_state='aaaa') + headers = self._build_auth_headers( + generation=1235, keys_changed_at=1234, client_state="aaaa" + ) # Retired users cannot make requests with a generation smaller than # the max generation - res = self.app.get('/1.0/sync/1.5', headers=headers, status=401) + res = self.app.get("/1.0/sync/1.5", headers=headers, status=401) expected_error_response = { "status": "invalid-generation", "errors": [ - { - "location": "body", - "name": "", - "description": "Unauthorized" - } - ] + {"location": "body", "name": "", "description": "Unauthorized"} + ], } self.assertEqual(res.json, expected_error_response) # Retired users can make requests with a generation number equal to # the max generation - headers = self._build_auth_headers(generation=MAX_GENERATION, - keys_changed_at=1234, - client_state='aaaa') - self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=MAX_GENERATION, + keys_changed_at=1234, + client_state="aaaa", + ) + self.app.get("/1.0/sync/1.5", headers=headers) def test_replaced_users_can_make_requests(self): # Add a replaced user to the database self._add_user(generation=1234, created_at=1234, replaced_at=1234) - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa') + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) # Replaced users can make requests - self.app.get('/1.0/sync/1.5', headers=headers) + self.app.get("/1.0/sync/1.5", headers=headers) def test_retired_users_with_no_node_cannot_make_requests(self): # Add a retired user to the database invalid_node_id = self.NODE_ID + 1 self._add_user(generation=MAX_GENERATION, nodeid=invalid_node_id) # Retired users without a node cannot make requests - headers = self._build_auth_headers(generation=MAX_GENERATION, - keys_changed_at=1234, - client_state='aaaa') - self.app.get('/1.0/sync/1.5', headers=headers, status=500) + headers = self._build_auth_headers( + generation=MAX_GENERATION, + keys_changed_at=1234, + client_state="aaaa", + ) + self.app.get("/1.0/sync/1.5", headers=headers, status=500) def test_replaced_users_with_no_node_can_make_requests(self): # Add a replaced user to the database invalid_node_id = self.NODE_ID + 1 - self._add_user(created_at=1234, replaced_at=1234, - nodeid=invalid_node_id) - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa') + self._add_user( + created_at=1234, replaced_at=1234, nodeid=invalid_node_id + ) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) # Replaced users without a node can make requests - res = self.app.get('/1.0/sync/1.5', headers=headers) - user = self._get_user(res.json['uid']) + res = self.app.get("/1.0/sync/1.5", headers=headers) + user = self._get_user(res.json["uid"]) # The user is assigned to a new node - self.assertEqual(user['nodeid'], self.NODE_ID) + self.assertEqual(user["nodeid"], self.NODE_ID) def test_x_content_type_options(self): - self._add_user(generation=1234, - keys_changed_at=1234, - client_state='aaaa') - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa') - res = self.app.get('/1.0/sync/1.5', headers=headers) + self._add_user( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers) # Tokenserver responses should include the # `X-Content-Type-Options: nosniff` header - self.assertEqual(res.headers['X-Content-Type-Options'], 'nosniff') + self.assertEqual(res.headers["X-Content-Type-Options"], "nosniff") diff --git a/tools/integration_tests/tokenserver/test_node_assignment.py b/tools/integration_tests/tokenserver/test_node_assignment.py index 9ab621aa98..985956d364 100644 --- a/tools/integration_tests/tokenserver/test_node_assignment.py +++ b/tools/integration_tests/tokenserver/test_node_assignment.py @@ -17,132 +17,158 @@ def tearDown(self): def test_user_creation(self): # Add a few more nodes - self._add_node(available=0, node='https://node1') - self._add_node(available=1, node='https://node2') - self._add_node(available=5, node='https://node3') + self._add_node(available=0, node="https://node1") + self._add_node(available=1, node="https://node2") + self._add_node(available=5, node="https://node3") # Send a request from an unseen user - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa') - res = self.app.get('/1.0/sync/1.5', headers=headers) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers) # Ensure a single user was created self.assertEqual(self._count_users(), 1) # Ensure the user has the correct attributes - user1 = self._get_user(res.json['uid']) - self.assertEqual(user1['generation'], 1234) - self.assertEqual(user1['keys_changed_at'], 1234) - self.assertEqual(user1['client_state'], 'aaaa') - self.assertEqual(user1['nodeid'], self.NODE_ID) - self.assertEqual(user1['service'], self.service_id) + user1 = self._get_user(res.json["uid"]) + self.assertEqual(user1["generation"], 1234) + self.assertEqual(user1["keys_changed_at"], 1234) + self.assertEqual(user1["client_state"], "aaaa") + self.assertEqual(user1["nodeid"], self.NODE_ID) + self.assertEqual(user1["service"], self.service_id) # Ensure the 'available' and 'current_load' counts on the node # assigned to the user have been decremented appropriately node = self._get_node(self.NODE_ID) - self.assertEqual(node['available'], 99) - self.assertEqual(node['current_load'], 1) + self.assertEqual(node["available"], 99) + self.assertEqual(node["current_load"], 1) # Send a request from the same user - self.app.get('/1.0/sync/1.5', headers=headers) + self.app.get("/1.0/sync/1.5", headers=headers) # Ensure another user record was not created self.assertEqual(self._count_users(), 1) def test_new_user_allocation(self): # Start with a clean database - cursor = self._execute_sql('DELETE FROM nodes', ()) - cursor.close() + self._clear_nodes() - self._add_node(available=100, current_load=0, capacity=100, backoff=1, - node='https://node1') - self._add_node(available=100, current_load=0, capacity=100, downed=1, - node='https://node2') - node_id = self._add_node(available=99, current_load=1, capacity=100, - node='https://node3') - self._add_node(available=98, current_load=2, capacity=100, - node='https://node4') - self._add_node(available=97, current_load=3, capacity=100, - node='https://node5') - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa') - res = self.app.get('/1.0/sync/1.5', headers=headers) + self._add_node( + available=100, + current_load=0, + capacity=100, + backoff=1, + node="https://node1", + ) + self._add_node( + available=100, + current_load=0, + capacity=100, + downed=1, + node="https://node2", + ) + node_id = self._add_node( + available=99, current_load=1, capacity=100, node="https://node3" + ) + self._add_node( + available=98, current_load=2, capacity=100, node="https://node4" + ) + self._add_node( + available=97, current_load=3, capacity=100, node="https://node5" + ) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers) # The user should have been allocated to the least-loaded node # (computed as current_load / capacity) that has backoff and downed # set to 0 - user = self._get_user(res.json['uid']) - self.assertEqual(user['nodeid'], node_id) + user = self._get_user(res.json["uid"]) + self.assertEqual(user["nodeid"], node_id) # The selected node should have current_load incremented and available # decremented node = self._get_node(node_id) - self.assertEqual(node['current_load'], 2) - self.assertEqual(node['available'], 98) + self.assertEqual(node["current_load"], 2) + self.assertEqual(node["available"], 98) def test_successfully_releasing_node_capacity(self): # Start with a clean database - cursor = self._execute_sql('DELETE FROM nodes', ()) - cursor.close() + self._clear_nodes() - node_id1 = self._add_node(available=0, current_load=99, capacity=100, - node='https://node1') - node_id2 = self._add_node(available=0, current_load=90, capacity=100, - node='https://node2') - node_id3 = self._add_node(available=0, current_load=80, capacity=81, - node='https://node3') - node_id4 = self._add_node(available=0, current_load=70, capacity=71, - node='https://node4', backoff=1) - node_id5 = self._add_node(available=0, current_load=60, capacity=61, - node='https://node5', downed=1) - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa') - res = self.app.get('/1.0/sync/1.5', headers=headers) + node_id1 = self._add_node( + available=0, current_load=99, capacity=100, node="https://node1" + ) + node_id2 = self._add_node( + available=0, current_load=90, capacity=100, node="https://node2" + ) + node_id3 = self._add_node( + available=0, current_load=80, capacity=81, node="https://node3" + ) + node_id4 = self._add_node( + available=0, + current_load=70, + capacity=71, + node="https://node4", + backoff=1, + ) + node_id5 = self._add_node( + available=0, + current_load=60, + capacity=61, + node="https://node5", + downed=1, + ) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) + res = self.app.get("/1.0/sync/1.5", headers=headers) # Since every node has no available spots, capacity is added to each # node according to the equation # min(capacity*capacity_release_rate, capacity - current_load). Since # capacity - current_load is 0 for every node, the node with the # greatest capacity is chosen - user = self._get_user(res.json['uid']) - self.assertEqual(user['nodeid'], node_id2) + user = self._get_user(res.json["uid"]) + self.assertEqual(user["nodeid"], node_id2) # min(100 * 0.1, 100 - 99) = 1 node1 = self._get_node(node_id1) - self.assertEqual(node1['available'], 1) + self.assertEqual(node1["available"], 1) # min(100 * 0.1, 100 - 90) = 10, and this is the node to which the # user was assigned, so the final available count is 9 node2 = self._get_node(node_id2) - self.assertEqual(node2['available'], 9) + self.assertEqual(node2["available"], 9) # min(81 * 0.1, 81 - 80) = 1 node3 = self._get_node(node_id3) - self.assertEqual(node3['available'], 1) + self.assertEqual(node3["available"], 1) # min(100 * 0.1, 71 - 70) = 1 node4 = self._get_node(node_id4) - self.assertEqual(node4['available'], 1) + self.assertEqual(node4["available"], 1) # Nodes with downed set to 1 do not have their availability updated node5 = self._get_node(node_id5) - self.assertEqual(node5['available'], 0) + self.assertEqual(node5["available"], 0) def test_unsuccessfully_releasing_node_capacity(self): # Start with a clean database - cursor = self._execute_sql('DELETE FROM nodes', ()) - cursor.close() + self._clear_nodes() - self._add_node(available=0, current_load=100, capacity=100, - node='https://node1') - self._add_node(available=0, current_load=90, capacity=90, - node='https://node2') - self._add_node(available=0, current_load=80, capacity=80, - node='https://node3') - headers = self._build_auth_headers(generation=1234, - keys_changed_at=1234, - client_state='aaaa') + self._add_node( + available=0, current_load=100, capacity=100, node="https://node1" + ) + self._add_node( + available=0, current_load=90, capacity=90, node="https://node2" + ) + self._add_node( + available=0, current_load=80, capacity=80, node="https://node3" + ) + headers = self._build_auth_headers( + generation=1234, keys_changed_at=1234, client_state="aaaa" + ) # All of these nodes are completely full, and no capacity can be # released - res = self.app.get('/1.0/sync/1.5', headers=headers, status=503) + res = self.app.get("/1.0/sync/1.5", headers=headers, status=503) # The response has the expected body expected_error_response = { - 'errors': [ + "errors": [ { - 'description': 'Unexpected error: unable to get a node', - 'location': 'internal', - 'name': '' + "description": "Unexpected error: unable to get a node", + "location": "internal", + "name": "", } ], - 'status': 'internal-error' + "status": "internal-error", } self.assertEqual(res.json, expected_error_response) diff --git a/tools/integration_tests/tokenserver/test_support.py b/tools/integration_tests/tokenserver/test_support.py index 982066dab1..1f49d61214 100644 --- a/tools/integration_tests/tokenserver/test_support.py +++ b/tools/integration_tests/tokenserver/test_support.py @@ -9,229 +9,249 @@ import time import urllib.parse as urlparse -from sqlalchemy import create_engine +from sqlalchemy import ( + create_engine, + event, + select, + delete, + insert, + and_, + func, + distinct, +) +from sqlalchemy.pool import NullPool +from sqlalchemy.engine import Engine +from sqlalchemy.orm import close_all_sessions, Session from tokenlib.utils import decode_token_bytes from webtest import TestApp -DEFAULT_OAUTH_SCOPE = 'https://identity.mozilla.com/apps/oldsync' +from tokenserver.tables import Users, Nodes, Services + +DEFAULT_OAUTH_SCOPE = "https://identity.mozilla.com/apps/oldsync" class TestCase: - FXA_EMAIL_DOMAIN = 'api-accounts.stage.mozaws.net' - FXA_METRICS_HASH_SECRET = os.environ.get("SYNC_MASTER_SECRET", 'secret0') + FXA_EMAIL_DOMAIN = "api-accounts.stage.mozaws.net" + FXA_METRICS_HASH_SECRET = os.environ.get("SYNC_MASTER_SECRET", "secret0") NODE_ID = 800 - NODE_URL = 'https://example.com' - TOKEN_SIGNING_SECRET = os.environ.get("SYNC_MASTER_SECRET", 'secret0') - TOKENSERVER_HOST = os.environ['TOKENSERVER_HOST'] + NODE_URL = "https://example.com" + TOKEN_SIGNING_SECRET = os.environ.get("SYNC_MASTER_SECRET", "secret0") + TOKENSERVER_HOST = os.environ["TOKENSERVER_HOST"] @classmethod def setUpClass(cls): cls._build_auth_headers = cls._build_oauth_headers def setUp(self): - engine = create_engine(os.environ['SYNC_TOKENSERVER__DATABASE_URL']) - self.database = engine. \ - execution_options(isolation_level='AUTOCOMMIT'). \ - connect() + self._db_connect() host_url = urlparse.urlparse(self.TOKENSERVER_HOST) - self.app = TestApp(self.TOKENSERVER_HOST, extra_environ={ - 'HTTP_HOST': host_url.netloc, - 'wsgi.url_scheme': host_url.scheme or 'http', - 'SERVER_NAME': host_url.hostname, - 'REMOTE_ADDR': '127.0.0.1', - 'SCRIPT_NAME': host_url.path, - }) + self.app = TestApp( + self.TOKENSERVER_HOST, + extra_environ={ + "HTTP_HOST": host_url.netloc, + "wsgi.url_scheme": host_url.scheme or "http", + "SERVER_NAME": host_url.hostname, + "REMOTE_ADDR": "127.0.0.1", + "SCRIPT_NAME": host_url.path, + }, + ) # Start each test with a blank slate. - cursor = self._execute_sql(('DELETE FROM users'), ()) - cursor.close() - - cursor = self._execute_sql(('DELETE FROM nodes'), ()) - cursor.close() + with Session(self.engine) as session, session.begin(): + session.execute(delete(Users)) + session.execute(delete(Nodes)) + session.execute(delete(Services)) - self.service_id = self._add_service('sync-1.5', r'{node}/1.5/{uid}') + self.service_id = self._add_service("sync-1.5", r"{node}/1.5/{uid}") # Ensure we have a node with enough capacity to run the tests. self._add_node(capacity=100, node=self.NODE_URL, id=self.NODE_ID) def tearDown(self): # And clean up at the end, for good measure. - cursor = self._execute_sql(('DELETE FROM users'), ()) - cursor.close() - - cursor = self._execute_sql(('DELETE FROM nodes'), ()) - cursor.close() - - cursor = self._execute_sql(('DELETE FROM services'), ()) - cursor.close() - - self.database.close() - - def _build_oauth_headers(self, generation=None, user='test', - keys_changed_at=None, client_state=None, - status=200, **additional_headers): + with Session(self.engine) as session, session.begin(): + session.execute(delete(Users)) + session.execute(delete(Nodes)) + session.execute(delete(Services)) + + # Ensure that everything is saved in db + close_all_sessions() + self.engine.dispose() + + def _build_oauth_headers( + self, + generation=None, + user="test", + keys_changed_at=None, + client_state=None, + status=200, + **additional_headers + ): claims = { - 'user': user, - 'generation': generation, - 'client_id': 'fake client id', - 'scope': [DEFAULT_OAUTH_SCOPE], + "user": user, + "generation": generation, + "client_id": "fake client id", + "scope": [DEFAULT_OAUTH_SCOPE], } if generation is not None: - claims['generation'] = generation + claims["generation"] = generation - body = { - 'body': claims, - 'status': status - } + body = {"body": claims, "status": status} headers = {} - headers['Authorization'] = 'Bearer %s' % json.dumps(body) + headers["Authorization"] = "Bearer %s" % json.dumps(body) client_state = binascii.unhexlify(client_state) - client_state = b64encode(client_state).strip(b'=').decode('utf-8') - headers['X-KeyID'] = '%s-%s' % (keys_changed_at, client_state) + client_state = b64encode(client_state).strip(b"=").decode("utf-8") + headers["X-KeyID"] = "%s-%s" % (keys_changed_at, client_state) headers.update(additional_headers) return headers - def _add_node(self, capacity=100, available=100, node=NODE_URL, id=None, - current_load=0, backoff=0, downed=0): - query = 'INSERT INTO nodes (service, node, available, capacity, \ - current_load, backoff, downed' - data = (self.service_id, node, available, capacity, current_load, - backoff, downed) - - if id: - query += ', id) VALUES(%s, %s, %s, %s, %s, %s, %s, %s)' - data += (id,) - else: - query += ') VALUES(%s, %s, %s, %s, %s, %s, %s)' - - cursor = self._execute_sql(query, data) - cursor.close() - - return self._last_insert_id() + def _add_node( + self, + capacity=100, + available=100, + node=NODE_URL, + id=None, + current_load=0, + backoff=0, + downed=0, + ): + query = insert(Nodes).values( + service=self.service_id, + node=node, + available=available, + capacity=capacity, + current_load=current_load, + backoff=backoff, + downed=downed, + ) + + if id is not None: + query = insert(Nodes).values( + service=self.service_id, + node=node, + available=available, + capacity=capacity, + current_load=current_load, + backoff=backoff, + downed=downed, + id=id, + ) + + with Session(self.engine) as session, session.begin(): + result = session.execute(query) + lastrowid = result.lastrowid + + return lastrowid def _get_node(self, id): - query = 'SELECT * FROM nodes WHERE id=%s' - cursor = self._execute_sql(query, (id,)) - (id, service, node, available, current_load, capacity, downed, - backoff) = cursor.fetchone() - cursor.close() - - return { - 'id': id, - 'service': service, - 'node': node, - 'available': available, - 'current_load': current_load, - 'capacity': capacity, - 'downed': downed, - 'backoff': backoff - } + query = select(Nodes).where(Nodes.id == id) - def _last_insert_id(self): - cursor = self._execute_sql('SELECT LAST_INSERT_ID()', ()) - (id,) = cursor.fetchone() - cursor.close() + with Session(self.engine) as session: + result = session.execute(query) + (node,) = result.fetchone() - return id + return node._asdict() def _add_service(self, service_name, pattern): - query = 'INSERT INTO services (service, pattern) \ - VALUES(%s, %s)' - cursor = self._execute_sql(query, (service_name, pattern)) - cursor.close() - - return self._last_insert_id() - - def _add_user(self, email=None, generation=1234, client_state='aaaa', - created_at=None, nodeid=NODE_ID, keys_changed_at=1234, - replaced_at=None): - query = ''' - INSERT INTO users (service, email, generation, client_state, \ - created_at, nodeid, keys_changed_at, replaced_at) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s); - ''' + query = insert(Services).values(service=service_name, pattern=pattern) + with Session(self.engine) as session, session.begin(): + result = session.execute(query) + lastrowid = result.lastrowid + + return lastrowid + + def _add_user( + self, + email=None, + generation=1234, + client_state="aaaa", + created_at=None, + nodeid=NODE_ID, + keys_changed_at=1234, + replaced_at=None, + ): created_at = created_at or math.trunc(time.time() * 1000) - cursor = self._execute_sql(query, - (self.service_id, - email or 'test@%s' % self.FXA_EMAIL_DOMAIN, - generation, client_state, - created_at, nodeid, keys_changed_at, - replaced_at)) - cursor.close() - - return self._last_insert_id() + query = insert(Users).values( + service=self.service_id, + email=email or "test@%s" % self.FXA_EMAIL_DOMAIN, + generation=generation, + client_state=client_state, + created_at=created_at, + nodeid=nodeid, + keys_changed_at=keys_changed_at, + replaced_at=replaced_at, + ) + with Session(self.engine) as session, session.begin(): + result = session.execute(query) + lastrowid = result.lastrowid + + return lastrowid def _get_user(self, uid): - query = 'SELECT * FROM users WHERE uid = %s' - cursor = self._execute_sql(query, (uid,)) - - (uid, service, email, generation, client_state, created_at, - replaced_at, nodeid, keys_changed_at) = cursor.fetchone() - cursor.close() - - return { - 'uid': uid, - 'service': service, - 'email': email, - 'generation': generation, - 'client_state': client_state, - 'created_at': created_at, - 'replaced_at': replaced_at, - 'nodeid': nodeid, - 'keys_changed_at': keys_changed_at - } + query = select(Users).where(Users.uid == uid) + with Session(self.engine) as session: + result = session.execute(query) + (user,) = result.fetchone() + + return user._asdict() def _get_replaced_users(self, service_id, email): - query = 'SELECT * FROM users WHERE service = %s AND email = %s AND \ - replaced_at IS NOT NULL' - cursor = self._execute_sql(query, (service_id, email)) - - users = [] - for user in cursor.fetchall(): - (uid, service, email, generation, client_state, created_at, - replaced_at, nodeid, keys_changed_at) = user - - user_dict = { - 'uid': uid, - 'service': service, - 'email': email, - 'generation': generation, - 'client_state': client_state, - 'created_at': created_at, - 'replaced_at': replaced_at, - 'nodeid': nodeid, - 'keys_changed_at': keys_changed_at - } - users.append(user_dict) - - cursor.close() - return users + query = select(Users).where( + and_( + Users.service == service_id, + and_(Users.email == email, Users.replaced_at is not None), + ) + ) + with Session(self.engine) as session: + result = session.execute(query) + users = result.fetchall() + + users_dicts = [] + for user in users: + users_dicts.append(user._asdict()) + + return users_dicts def _get_service_id(self, service): - query = 'SELECT id FROM services WHERE service = %s' - cursor = self._execute_sql(query, (service,)) - (service_id,) = cursor.fetchone() - cursor.close() + query = select(Services.id).where(Services.service == service) + with Session(self.engine) as session: + result = session.execute(query) + (service_id,) = result.fetchone() return service_id def _count_users(self): - query = 'SELECT COUNT(DISTINCT(uid)) FROM users' - cursor = self._execute_sql(query, ()) - (count,) = cursor.fetchone() - cursor.close() + query = select(func.count(distinct(Users.uid))) + with Session(self.engine) as session: + result = session.execute(query) + (count,) = result.fetchone() return count - def _execute_sql(self, query, args): - cursor = self.database.execute(query, args) - - return cursor + def _clear_nodes(self): + with Session(self.engine) as session, session.begin(): + session.execute(delete(Nodes)) + + def _db_connect(self): + self.engine = create_engine( + os.environ["SYNC_TOKENSERVER__DATABASE_URL"], poolclass=NullPool + ) + if self.engine.name == "sqlite": + + @event.listens_for(Engine, "connect") + def set_sqlite_pragma(dbapi_connection, connection_record): + cursor = dbapi_connection.cursor() + cursor.execute("PRAGMA journal_mode = WAL;") + cursor.execute("PRAGMA synchronous = NORMAL;") + cursor.execute("PRAGMA foreign_keys = ON;") + cursor.execute("PRAGMA busy_timeout = 10000") + cursor.close() + dbapi_connection.commit() def unsafelyParseToken(self, token): # For testing purposes, don't check HMAC or anything... - return json.loads(decode_token_bytes(token)[:-32].decode('utf8')) + return json.loads(decode_token_bytes(token)[:-32].decode("utf8")) diff --git a/tools/spanner/count_expired_rows.py b/tools/spanner/count_expired_rows.py index 824a983c1b..202693e0ed 100644 --- a/tools/spanner/count_expired_rows.py +++ b/tools/spanner/count_expired_rows.py @@ -19,7 +19,8 @@ logging.basicConfig( format='{"datetime": "%(asctime)s", "message": "%(message)s"}', stream=sys.stdout, - level=logging.INFO) + level=logging.INFO, +) # Change these to match your install. client = spanner.Client() @@ -51,10 +52,12 @@ def spanner_read_data(query: str, table: str) -> None: if __name__ == "__main__": - logging.info('Starting count_expired_rows.py') + logging.info("Starting count_expired_rows.py") - for table in ['batches', 'bsos']: - query = f'SELECT COUNT(*) FROM {table} WHERE expiry < CURRENT_TIMESTAMP()' + for table in ["batches", "bsos"]: + query = ( + f"SELECT COUNT(*) FROM {table} WHERE expiry < CURRENT_TIMESTAMP()" + ) spanner_read_data(query, table) - logging.info('Completed count_expired_rows.py') + logging.info("Completed count_expired_rows.py") diff --git a/tools/spanner/count_users.py b/tools/spanner/count_users.py index db8771b759..ef9a6a728f 100644 --- a/tools/spanner/count_users.py +++ b/tools/spanner/count_users.py @@ -20,7 +20,8 @@ logging.basicConfig( format='{"datetime": "%(asctime)s", "message": "%(message)s"}', stream=sys.stdout, - level=logging.INFO) + level=logging.INFO, +) # Change these to match your install. client = spanner.Client() @@ -49,7 +50,7 @@ def spanner_read_data() -> None: # Count users with statsd.timer("syncstorage.count_users.duration"): with database.snapshot() as snapshot: - query = 'SELECT COUNT (DISTINCT fxa_uid) FROM user_collections' + query = "SELECT COUNT (DISTINCT fxa_uid) FROM user_collections" result = snapshot.execute_sql(query) user_count = result.one()[0] statsd.gauge("syncstorage.distinct_fxa_uid", user_count) @@ -57,8 +58,8 @@ def spanner_read_data() -> None: if __name__ == "__main__": - logging.info('Starting count_users.py') + logging.info("Starting count_users.py") spanner_read_data() - logging.info('Completed count_users.py') + logging.info("Completed count_users.py") diff --git a/tools/spanner/purge_ttl.py b/tools/spanner/purge_ttl.py index 19d34f76e5..c935fea5d2 100644 --- a/tools/spanner/purge_ttl.py +++ b/tools/spanner/purge_ttl.py @@ -23,7 +23,8 @@ logging.basicConfig( format='{"datetime": "%(asctime)s", "message": "%(message)s"}', stream=sys.stdout, - level=logging.INFO) + level=logging.INFO, +) # Change these to match your install. client = spanner.Client() @@ -40,10 +41,13 @@ def deleter(database: Database, start = datetime.now() result = 0 if not dryrun: - result = database.execute_partitioned_dml(query, params=params, param_types=param_types) + result = database.execute_partitioned_dml( + query, params=params, param_types=param_types + ) end = datetime.now() logging.info( - f"{name}: removed {result} rows, {name}_duration: {end - start}, prefix: {prefix}") + f"{name}: removed {result} rows, {name}_duration: {end - start}, prefix: {prefix}" + ) def add_conditions(args, query: str, prefix: Optional[str]): """ @@ -61,19 +65,18 @@ def add_conditions(args, query: str, prefix: Optional[str]): query += " AND collection_id" if len(ids) == 1: query += " = @collection_id".format(ids[0]) - params['collection_id'] = ids[0] - types['collection_id'] = param_types.INT64 + params["collection_id"] = ids[0] + types["collection_id"] = param_types.INT64 else: - for count,id in enumerate(ids): - name = f'collection_id_{count}' + for count, id in enumerate(ids): + name = "collection_id_{}".format(count) params[name] = id types[name] = param_types.INT64 - query += " in (@{})".format( - ', @'.join(params.keys())) + query += " in (@{})".format(", @".join(params.keys())) if prefix: - query += ' AND STARTS_WITH(fxa_uid, @prefix)'.format(prefix) - params['prefix'] = prefix - types['prefix'] = param_types.STRING + query += " AND STARTS_WITH(fxa_uid, @prefix)".format(prefix) + params["prefix"] = prefix + types["prefix"] = param_types.STRING return (query, params, types) @@ -84,7 +87,7 @@ def get_expiry_condition(args): :return: A SQL snippet to use in the WHERE clause """ if args.expiry_mode == "now": - return 'expiry < CURRENT_TIMESTAMP()' + return "expiry < CURRENT_TIMESTAMP()" elif args.expiry_mode == "midnight": return 'expiry < TIMESTAMP_TRUNC(CURRENT_TIMESTAMP(), DAY, "UTC")' else: @@ -112,19 +115,24 @@ def spanner_purge(args) -> None: expiry_condition = get_expiry_condition(args) if args.auto_split: args.uid_prefixes = [ - hex(i).lstrip("0x").zfill(args.auto_split) for i in range( - 0, 16 ** args.auto_split)] + hex(i).lstrip("0x").zfill(args.auto_split) + for i in range(0, 16**args.auto_split) + ] prefixes = args.uid_prefixes if args.uid_prefixes else [None] for prefix in prefixes: - logging.info(f"For {args.instance_id}:{args.database_id}, prefix = {prefix}") + logging.info( + "For {}:{}, prefix = {}".format( + args.instance_id, args.database_id, prefix + ) + ) if args.mode in ["batches", "both"]: # Delete Batches. Also deletes child batch_bsos rows (INTERLEAVE # IN PARENT batches ON DELETE CASCADE) (batch_query, params, types) = add_conditions( args, - f'DELETE FROM batches WHERE {expiry_condition}', + "DELETE FROM batches WHERE {}".format(expiry_condition), prefix, ) deleter( @@ -180,13 +188,13 @@ def get_args(): "-i", "--instance_id", default=os.environ.get("INSTANCE_ID", "spanner-test"), - help="Spanner instance ID" + help="Spanner instance ID", ) parser.add_argument( "-d", "--database_id", default=os.environ.get("DATABASE_ID", "sync_schema3"), - help="Spanner Database ID" + help="Spanner Database ID", ) parser.add_argument( "-p", @@ -198,14 +206,14 @@ def get_args(): "-u", "--sync_database_url", default=os.environ.get("SYNC_SYNCSTORAGE__DATABASE_URL"), - help="Spanner Database DSN" + help="Spanner Database DSN", ) parser.add_argument( "--collection_ids", "--ids", type=parse_args_list, default=os.environ.get("COLLECTION_IDS", "[]"), - help="Array of collection IDs to purge" + help="Array of collection IDs to purge", ) parser.add_argument( "--uid_prefixes", @@ -213,34 +221,34 @@ def get_args(): type=parse_args_list, default=os.environ.get("PURGE_UID_PREFIXES", "[]"), help="Array of strings used to limit purges based on UID. " - "Each entry is a separate purge run." + "Each entry is a separate purge run.", ) parser.add_argument( "--auto_split", type=int, default=os.environ.get("PURGE_AUTO_SPLIT"), help="""Automatically generate `uid_prefixes` for this many digits, """ - """(e.g. `3` would produce """ - """`uid_prefixes=["000","001","002",...,"fff"])""" + """(e.g. `3` would produce """ + """`uid_prefixes=["000","001","002",...,"fff"])""", ) parser.add_argument( "--mode", type=str, choices=["batches", "bsos", "both"], default=os.environ.get("PURGE_MODE", "both"), - help="Purge TTLs in batches, bsos, or both" + help="Purge TTLs in batches, bsos, or both", ) parser.add_argument( "--expiry_mode", type=str, choices=["now", "midnight"], default=os.environ.get("PURGE_EXPIRY_MODE", "midnight"), - help="Choose the timestamp used to check if an entry is expired" + help="Choose the timestamp used to check if an entry is expired", ) parser.add_argument( - '--dryrun', + "--dryrun", action="store_true", - help="Do not purge user records from spanner" + help="Do not purge user records from spanner", ) args = parser.parse_args() diff --git a/tools/spanner/write_batch.py b/tools/spanner/write_batch.py index 923e3c7ed1..2291e70d0c 100644 --- a/tools/spanner/write_batch.py +++ b/tools/spanner/write_batch.py @@ -71,11 +71,12 @@ PAYLOAD_SIZE = 25000 # fake a base64 like payload. Not strictly neccessary, but may help ML # routines. -PAYLOAD = ''.join( +PAYLOAD = "".join( random.choice( string.digits + string.ascii_uppercase + string.ascii_lowercase + "-_=" ) - for _ in range(PAYLOAD_SIZE)) + for _ in range(PAYLOAD_SIZE) +) def load(instance, db, coll_id, name): @@ -100,14 +101,14 @@ def create_user(txn): fxa_uid=fxa_uid, fxa_kid=fxa_kid, collection_id=coll_id, - modified=start + modified=start, ), param_types=dict( fxa_uid=param_types.STRING, fxa_kid=param_types.STRING, collection_id=param_types.INT64, - modified=param_types.TIMESTAMP - ) + modified=param_types.TIMESTAMP, + ), ) try: @@ -132,7 +133,7 @@ def create_user(txn): None, PAYLOAD, start, - start + timedelta(days=365 * 5) + start + timedelta(days=365 * 5), ) # determine it's size. rlen = len(record[1]) * 4 @@ -145,35 +146,40 @@ def create_user(txn): records.append(record) with db.batch() as batch: batch.insert( - table='bsos', + table="bsos", columns=( - 'fxa_uid', - 'fxa_kid', - 'collection_id', - 'bso_id', - 'sortindex', - 'payload', - 'modified', - 'expiry' + "fxa_uid", + "fxa_kid", + "collection_id", + "bso_id", + "sortindex", + "payload", + "modified", + "expiry", ), - values=records + values=records, ) print( - ('{name} Wrote batch {b} of {bb}:' - ' {c} records {r} bytes, {t}').format( + ( + "{name} Wrote batch {b} of {bb}:" " {c} records {r} bytes, {t}" + ).format( name=name, b=j + 1, bb=BATCHES, c=BATCH_SIZE, r=rlen, - t=datetime.now() - start)) - print('{name} Total: {t} (count: {c}, size: {s} in {sec})'.format( - name=name, - t=BATCHES, - c=BATCHES * BATCH_SIZE, - s=BATCHES * BATCH_SIZE * rlen, - sec=datetime.now() - start - )) + t=datetime.now() - start, + ) + ) + print( + "{name} Total: {t} (count: {c}, size: {s} in {sec})".format( + name=name, + t=BATCHES, + c=BATCHES * BATCH_SIZE, + s=BATCHES * BATCH_SIZE * rlen, + sec=datetime.now() - start, + ) + ) def loader(): @@ -195,5 +201,5 @@ def main(): t.start() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tools/tokenserver/add_node.py b/tools/tokenserver/add_node.py index e86b01db5e..53e5aaa5da 100644 --- a/tools/tokenserver/add_node.py +++ b/tools/tokenserver/add_node.py @@ -41,16 +41,37 @@ def main(args=None): usage = "usage: %prog [options] node_name capacity" descr = "Add a new node to the tokenserver database" parser = optparse.OptionParser(usage=usage, description=descr) - parser.add_option("", "--available", type="int", - help="How many user slots the node has available") - parser.add_option("", "--current-load", type="int", - help="How many user slots the node has occupied") - parser.add_option("", "--downed", action="store_true", - help="Mark the node as down in the db") - parser.add_option("", "--backoff", action="store_true", - help="Mark the node as backed-off in the db") - parser.add_option("-v", "--verbose", action="count", dest="verbosity", - help="Control verbosity of log messages") + parser.add_option( + "", + "--available", + type="int", + help="How many user slots the node has available", + ) + parser.add_option( + "", + "--current-load", + type="int", + help="How many user slots the node has occupied", + ) + parser.add_option( + "", + "--downed", + action="store_true", + help="Mark the node as down in the db", + ) + parser.add_option( + "", + "--backoff", + action="store_true", + help="Mark the node as backed-off in the db", + ) + parser.add_option( + "-v", + "--verbose", + action="count", + dest="verbosity", + help="Control verbosity of log messages", + ) opts, args = parser.parse_args(args) if len(args) != 2: diff --git a/tools/tokenserver/allocate_user.py b/tools/tokenserver/allocate_user.py index 197ac7ecdd..2a78b4b93e 100644 --- a/tools/tokenserver/allocate_user.py +++ b/tools/tokenserver/allocate_user.py @@ -46,11 +46,18 @@ def main(args=None): to the allocate_user() function. """ usage = "usage: %prog [options] email [node_name]" - descr = "Allocate a user to a node. You may specify a particular node, "\ - "or omit to use the best available node." + descr = ( + "Allocate a user to a node. You may specify a particular node, " + "or omit to use the best available node." + ) parser = optparse.OptionParser(usage=usage, description=descr) - parser.add_option("-v", "--verbose", action="count", dest="verbosity", - help="Control verbosity of log messages") + parser.add_option( + "-v", + "--verbose", + action="count", + dest="verbosity", + help="Control verbosity of log messages", + ) opts, args = parser.parse_args(args) if not 1 <= len(args) <= 2: diff --git a/tools/tokenserver/count_users.py b/tools/tokenserver/count_users.py index f2963ebd98..68c2c04e61 100644 --- a/tools/tokenserver/count_users.py +++ b/tools/tokenserver/count_users.py @@ -56,7 +56,7 @@ def count_users(outfile, timestamp=None): "op": "sync_count_users", "total_users": count, "time": datetime.fromtimestamp(ts_sec, utc).isoformat(), - "v": 0 + "v": 0, } json.dump(output, outfile) outfile.write("\n") @@ -71,12 +71,20 @@ def main(args=None): usage = "usage: %prog [options]" descr = "Count total users in the tokenserver database" parser = optparse.OptionParser(usage=usage, description=descr) - parser.add_option("-t", "--timestamp", type="int", - help="Max creation timestamp; default previous midnight") - parser.add_option("-o", "--output", - help="Output file; default stderr") - parser.add_option("-v", "--verbose", action="count", dest="verbosity", - help="Control verbosity of log messages") + parser.add_option( + "-t", + "--timestamp", + type="int", + help="Max creation timestamp; default previous midnight", + ) + parser.add_option("-o", "--output", help="Output file; default stderr") + parser.add_option( + "-v", + "--verbose", + action="count", + dest="verbosity", + help="Control verbosity of log messages", + ) opts, args = parser.parse_args(args) if len(args) != 0: diff --git a/tools/tokenserver/database.py b/tools/tokenserver/database.py index 1d57af320d..f180eab79e 100644 --- a/tools/tokenserver/database.py +++ b/tools/tokenserver/database.py @@ -14,7 +14,8 @@ MAX_GENERATION = 9223372036854775807 NODE_FIELDS = ("capacity", "available", "current_load", "downed", "backoff") -_GET_USER_RECORDS = sqltext("""\ +_GET_USER_RECORDS = sqltext( + """\ select uid, nodes.node, generation, keys_changed_at, client_state, created_at, replaced_at @@ -26,9 +27,11 @@ created_at desc, uid desc limit 20 -""") +""" +) -_CREATE_USER_RECORD = sqltext("""\ +_CREATE_USER_RECORD = sqltext( + """\ insert into users (service, email, nodeid, generation, keys_changed_at, client_state, @@ -36,14 +39,16 @@ values (:service, :email, :nodeid, :generation, :keys_changed_at, :client_state, :timestamp, NULL) -""") +""" +) # The `where` clause on this statement is designed as an extra layer of # protection, to ensure that concurrent updates don't accidentally move # timestamp fields backwards in time. The handling of `keys_changed_at` # is additionally weird because we want to treat the default `NULL` value # as zero. -_UPDATE_USER_RECORD_IN_PLACE = sqltext("""\ +_UPDATE_USER_RECORD_IN_PLACE = sqltext( + """\ update users set @@ -55,10 +60,12 @@ COALESCE(keys_changed_at, 0) <= COALESCE(:keys_changed_at, keys_changed_at, 0) and replaced_at is null -""") +""" +) -_REPLACE_USER_RECORDS = sqltext("""\ +_REPLACE_USER_RECORDS = sqltext( + """\ update users set @@ -66,12 +73,14 @@ where service = :service and email = :email and replaced_at is null and created_at < :timestamp -""") +""" +) # Mark all records for the user as replaced, # and set a large generation number to block future logins. -_RETIRE_USER_RECORDS = sqltext("""\ +_RETIRE_USER_RECORDS = sqltext( + """\ update users set @@ -80,10 +89,12 @@ where email = :email and replaced_at is null -""") +""" +) -_GET_OLD_USER_RECORDS_FOR_SERVICE = sqltext("""\ +_GET_OLD_USER_RECORDS_FOR_SERVICE = sqltext( + """\ select uid, email, generation, keys_changed_at, client_state, nodes.node, nodes.downed, created_at, replaced_at @@ -99,7 +110,8 @@ :limit offset :offset -""") +""" +) _GET_OLD_USER_RECORDS_FOR_SERVICE_RANGE = """\ select @@ -122,7 +134,8 @@ """ -_GET_ALL_USER_RECORDS_FOR_SERVICE = sqltext("""\ +_GET_ALL_USER_RECORDS_FOR_SERVICE = sqltext( + """\ select uid, nodes.node, created_at, replaced_at from @@ -131,10 +144,12 @@ email = :email and users.service = :service order by created_at asc, uid desc -""") +""" +) -_REPLACE_USER_RECORD = sqltext("""\ +_REPLACE_USER_RECORD = sqltext( + """\ update users set @@ -143,30 +158,36 @@ service = :service and uid = :uid -""") +""" +) -_DELETE_USER_RECORD = sqltext("""\ +_DELETE_USER_RECORD = sqltext( + """\ delete from users where service = :service and uid = :uid -""") +""" +) -_FREE_SLOT_ON_NODE = sqltext("""\ +_FREE_SLOT_ON_NODE = sqltext( + """\ update nodes set available = available + 1, current_load = current_load - 1 where id = (SELECT nodeid FROM users WHERE service=:service AND uid=:uid) -""") +""" +) -_COUNT_USER_RECORDS = sqltext("""\ +_COUNT_USER_RECORDS = sqltext( + """\ select count(email) from @@ -174,10 +195,12 @@ where replaced_at is null and created_at <= :timestamp -""") +""" +) -_GET_BEST_NODE = sqltext("""\ +_GET_BEST_NODE = sqltext( + """\ select id, node from @@ -191,10 +214,12 @@ order by log(current_load) / log(capacity) limit 1 -""") +""" +) -_RELEASE_NODE_CAPACITY = sqltext("""\ +_RELEASE_NODE_CAPACITY = sqltext( + """\ update nodes set @@ -205,10 +230,12 @@ and available <= 0 and capacity > current_load and downed = 0 -""") +""" +) -_ADD_USER_TO_NODE = sqltext("""\ +_ADD_USER_TO_NODE = sqltext( + """\ update nodes set @@ -217,20 +244,24 @@ where service = :service and node = :node -""") +""" +) -_GET_SERVICE_ID = sqltext("""\ +_GET_SERVICE_ID = sqltext( + """\ select id from services where service = :service -""") +""" +) -_GET_NODE = sqltext("""\ +_GET_NODE = sqltext( + """\ select * from @@ -238,10 +269,12 @@ where service = :service and node = :node - """) + """ +) -_GET_SPANNER_NODE = sqltext("""\ +_GET_SPANNER_NODE = sqltext( + """\ select id, node from @@ -250,21 +283,24 @@ id = :id limit 1 -""") +""" +) -SERVICE_NAME = 'sync-1.5' +SERVICE_NAME = "sync-1.5" class Database: def __init__(self): - engine = create_engine(os.environ['SYNC_TOKENSERVER__DATABASE_URL']) - self.database = engine. \ - execution_options(isolation_level="AUTOCOMMIT"). \ - connect() - self.capacity_release_rate = os.environ. \ - get("NODE_CAPACITY_RELEASE_RATE", 0.1) + engine = create_engine(os.environ["SYNC_TOKENSERVER__DATABASE_URL"]) + self.database = engine.execution_options( + isolation_level="AUTOCOMMIT" + ).connect() + self.capacity_release_rate = os.environ.get( + "NODE_CAPACITY_RELEASE_RATE", 0.1 + ) self.spanner_node_id = os.environ.get( - "SYNC_TOKENSERVER__SPANNER_NODE_ID") + "SYNC_TOKENSERVER__SPANNER_NODE_ID" + ) self.spanner_node = None if self.spanner_node_id: self.spanner_node = self.get_spanner_node(self.spanner_node_id) @@ -276,8 +312,10 @@ def close(self): self.database.close() def get_user(self, email): - params = {'service': self._get_service_id(SERVICE_NAME), - 'email': email} + params = { + "service": self._get_service_id(SERVICE_NAME), + "email": email, + } res = self._execute_sql(_GET_USER_RECORDS, **params) try: # The query fetches rows ordered by created_at, but we want @@ -294,40 +332,49 @@ def get_user(self, email): cur_row = rows[0] old_rows = rows[1:] user = { - 'email': email, - 'uid': cur_row.uid, - 'node': cur_row.node, - 'generation': cur_row.generation, - 'keys_changed_at': cur_row.keys_changed_at or 0, - 'client_state': cur_row.client_state, - 'old_client_states': {}, - 'first_seen_at': cur_row.created_at, + "email": email, + "uid": cur_row.uid, + "node": cur_row.node, + "generation": cur_row.generation, + "keys_changed_at": cur_row.keys_changed_at or 0, + "client_state": cur_row.client_state, + "old_client_states": {}, + "first_seen_at": cur_row.created_at, } # If the current row is marked as replaced or is missing a node, # and they haven't been retired, then assign them a new node. if cur_row.replaced_at is not None or cur_row.node is None: if cur_row.generation < MAX_GENERATION: - user = self.allocate_user(email, - cur_row.generation, - cur_row.client_state, - cur_row.keys_changed_at) + user = self.allocate_user( + email, + cur_row.generation, + cur_row.client_state, + cur_row.keys_changed_at, + ) for old_row in old_rows: # Collect any previously-seen client-state values. - if old_row.client_state != user['client_state']: - user['old_client_states'][old_row.client_state] = True + if old_row.client_state != user["client_state"]: + user["old_client_states"][old_row.client_state] = True # Make sure each old row is marked as replaced. # They might not be, due to races in row creation. if old_row.replaced_at is None: timestamp = cur_row.created_at self.replace_user_record(old_row.uid, timestamp) # Track backwards to the oldest timestamp at which we saw them. - user['first_seen_at'] = old_row.created_at + user["first_seen_at"] = old_row.created_at return user finally: res.close() - def allocate_user(self, email, generation=0, client_state='', - keys_changed_at=0, node=None, timestamp=None): + def allocate_user( + self, + email, + generation=0, + client_state="", + keys_changed_at=0, + node=None, + timestamp=None, + ): if timestamp is None: timestamp = get_timestamp() if node is None: @@ -335,99 +382,109 @@ def allocate_user(self, email, generation=0, client_state='', else: nodeid = self.get_node_id(node) params = { - 'service': self._get_service_id(SERVICE_NAME), - 'email': email, - 'nodeid': nodeid, - 'generation': generation, - 'keys_changed_at': keys_changed_at, - 'client_state': client_state, - 'timestamp': timestamp + "service": self._get_service_id(SERVICE_NAME), + "email": email, + "nodeid": nodeid, + "generation": generation, + "keys_changed_at": keys_changed_at, + "client_state": client_state, + "timestamp": timestamp, } res = self._execute_sql(_CREATE_USER_RECORD, **params) return { - 'email': email, - 'uid': res.lastrowid, - 'node': node, - 'generation': generation, - 'keys_changed_at': keys_changed_at, - 'client_state': client_state, - 'old_client_states': {}, - 'first_seen_at': timestamp, + "email": email, + "uid": res.lastrowid, + "node": node, + "generation": generation, + "keys_changed_at": keys_changed_at, + "client_state": client_state, + "old_client_states": {}, + "first_seen_at": timestamp, } - def update_user(self, user, generation=None, client_state=None, - keys_changed_at=None, node=None): + def update_user( + self, + user, + generation=None, + client_state=None, + keys_changed_at=None, + node=None, + ): if client_state is None and node is None: # No need for a node-reassignment, just update the row in place. # Note that if we're changing keys_changed_at without changing # client_state, it's because we're seeing an existing value of # keys_changed_at for the first time. params = { - 'service': self._get_service_id(SERVICE_NAME), - 'email': user['email'], - 'generation': generation, - 'keys_changed_at': keys_changed_at + "service": self._get_service_id(SERVICE_NAME), + "email": user["email"], + "generation": generation, + "keys_changed_at": keys_changed_at, } res = self._execute_sql(_UPDATE_USER_RECORD_IN_PLACE, **params) res.close() if generation is not None: - user['generation'] = max(user['generation'], generation) - user['keys_changed_at'] = max_keys_changed_at( - user, - keys_changed_at + user["generation"] = max(user["generation"], generation) + user["keys_changed_at"] = max_keys_changed_at( + user, keys_changed_at ) else: # Reject previously-seen client-state strings. if client_state is None: - client_state = user['client_state'] + client_state = user["client_state"] else: - if client_state == user['client_state']: - raise Exception('previously seen client-state string') - if client_state in user['old_client_states']: - raise Exception('previously seen client-state string') + if client_state == user["client_state"]: + raise Exception("previously seen client-state string") + if client_state in user["old_client_states"]: + raise Exception("previously seen client-state string") # Need to create a new record for new user state. # If the node is not explicitly changing, try to keep them on the # same node, but if e.g. it no longer exists them allocate them to # a new one. if node is not None: nodeid = self.get_node_id(node) - user['node'] = node + user["node"] = node else: try: - nodeid = self.get_node_id(user['node']) + nodeid = self.get_node_id(user["node"]) except ValueError: nodeid, node = self.get_best_node() - user['node'] = node + user["node"] = node if generation is not None: - generation = max(user['generation'], generation) + generation = max(user["generation"], generation) else: - generation = user['generation'] + generation = user["generation"] keys_changed_at = max_keys_changed_at(user, keys_changed_at) now = get_timestamp() params = { - 'service': self._get_service_id(SERVICE_NAME), - 'email': user['email'], 'nodeid': nodeid, - 'generation': generation, 'keys_changed_at': keys_changed_at, - 'client_state': client_state, 'timestamp': now, + "service": self._get_service_id(SERVICE_NAME), + "email": user["email"], + "nodeid": nodeid, + "generation": generation, + "keys_changed_at": keys_changed_at, + "client_state": client_state, + "timestamp": now, } res = self._execute_sql(_CREATE_USER_RECORD, **params) res.close() - user['uid'] = res.lastrowid - user['generation'] = generation - user['keys_changed_at'] = keys_changed_at - user['old_client_states'][user['client_state']] = True - user['client_state'] = client_state + user["uid"] = res.lastrowid + user["generation"] = generation + user["keys_changed_at"] = keys_changed_at + user["old_client_states"][user["client_state"]] = True + user["client_state"] = client_state # mark old records as having been replaced. # if we crash here, they are unmarked and we may fail to # garbage collect them for a while, but the active state # will be undamaged. - self.replace_user_records(user['email'], now) + self.replace_user_records(user["email"], now) def retire_user(self, email): now = get_timestamp() params = { - 'email': email, 'timestamp': now, 'generation': MAX_GENERATION + "email": email, + "timestamp": now, + "generation": MAX_GENERATION, } # Pass through explicit engine to help with sharded implementation, # since we can't shard by service name here. @@ -448,8 +505,10 @@ def count_users(self, timestamp=None): def get_user_records(self, email): """Get all the user's records, including the old ones.""" - params = {'service': self._get_service_id(SERVICE_NAME), - 'email': email} + params = { + "service": self._get_service_id(SERVICE_NAME), + "email": email, + } res = self._execute_sql(_GET_ALL_USER_RECORDS_FOR_SERVICE, **params) try: for row in res: @@ -473,13 +532,16 @@ def _build_old_user_query(self, uid_range, params, **kwargs): rrep = " and ".join(rstr) sql = sqltext( _GET_OLD_USER_RECORDS_FOR_SERVICE_RANGE.replace( - "::RANGE::", rrep)) + "::RANGE::", rrep + ) + ) else: sql = _GET_OLD_USER_RECORDS_FOR_SERVICE return sql - def get_old_user_records(self, grace_period=-1, limit=100, - offset=0, uid_range=None): + def get_old_user_records( + self, grace_period=-1, limit=100, offset=0, uid_range=None + ): """Get user records that were replaced outside the grace period.""" if grace_period < 0: grace_period = 60 * 60 * 24 * 7 # one week, in seconds @@ -488,7 +550,7 @@ def get_old_user_records(self, grace_period=-1, limit=100, "service": self._get_service_id(SERVICE_NAME), "timestamp": get_timestamp() - grace_period, "limit": limit, - "offset": offset + "offset": offset, } sql = self._build_old_user_query(uid_range, params) @@ -505,8 +567,9 @@ def replace_user_records(self, email, timestamp=None): if timestamp is None: timestamp = get_timestamp() params = { - 'service': self._get_service_id(SERVICE_NAME), 'email': email, - 'timestamp': timestamp + "service": self._get_service_id(SERVICE_NAME), + "email": email, + "timestamp": timestamp, } res = self._execute_sql(_REPLACE_USER_RECORDS, **params) res.close() @@ -516,15 +579,16 @@ def replace_user_record(self, uid, timestamp=None): if timestamp is None: timestamp = get_timestamp() params = { - 'service': self._get_service_id(SERVICE_NAME), 'uid': uid, - 'timestamp': timestamp + "service": self._get_service_id(SERVICE_NAME), + "uid": uid, + "timestamp": timestamp, } res = self._execute_sql(_REPLACE_USER_RECORD, **params) res.close() def delete_user_record(self, uid): """Delete the user record with the given uid.""" - params = {'service': self._get_service_id(SERVICE_NAME), 'uid': uid} + params = {"service": self._get_service_id(SERVICE_NAME), "uid": uid} if not self.spanner_node_id: res = self._execute_sql(_FREE_SLOT_ON_NODE, **params) res.close() @@ -536,35 +600,49 @@ def delete_user_record(self, uid): # def _get_service_id(self, service): - if hasattr(self, 'service_id'): + if hasattr(self, "service_id"): return self.service_id else: res = self._execute_sql(_GET_SERVICE_ID, service=service) row = res.fetchone() res.close() if row is None: - raise Exception('unknown service: ' + service) + raise Exception("unknown service: " + service) self.service_id = row.id return row.id def add_service(self, service_name, pattern, **kwds): """Add definition for a new service.""" - res = self._execute_sql(sqltext(""" + res = self._execute_sql( + sqltext( + """ insert into services (service, pattern) values (:servicename, :pattern) - """), servicename=service_name, pattern=pattern, **kwds) + """ + ), + servicename=service_name, + pattern=pattern, + **kwds, + ) res.close() return res.lastrowid def add_node(self, node, capacity, **kwds): """Add definition for a new node.""" - available = kwds.get('available') + available = kwds.get("available") # We release only a fraction of the node's capacity to start. if available is None: available = math.ceil(capacity * self.capacity_release_rate) - cols = ["service", "node", "available", "capacity", - "current_load", "downed", "backoff"] + cols = [ + "service", + "node", + "available", + "capacity", + "current_load", + "downed", + "backoff", + ] args = [":" + v for v in cols] # Handle test cases that require nodeid to be 800 if "nodeid" in kwds: @@ -573,17 +651,19 @@ def add_node(self, node, capacity, **kwds): query = """ insert into nodes ({cols}) values ({args}) - """.format(cols=", ".join(cols), args=", ".join(args)) + """.format( + cols=", ".join(cols), args=", ".join(args) + ) res = self._execute_sql( sqltext(query), - nodeid=kwds.get('nodeid'), + nodeid=kwds.get("nodeid"), service=self._get_service_id(SERVICE_NAME), node=node, capacity=capacity, available=available, - current_load=kwds.get('current_load', 0), - downed=kwds.get('downed', 0), - backoff=kwds.get('backoff', 0), + current_load=kwds.get("current_load", 0), + downed=kwds.get("downed", 0), + backoff=kwds.get("backoff", 0), ) res.close() @@ -605,8 +685,8 @@ def update_node(self, node, **kwds): query += """ where service = :service and node = :node """ - values['service'] = self._get_service_id(SERVICE_NAME) - values['node'] = node + values["service"] = self._get_service_id(SERVICE_NAME) + values["node"] = node if kwds: raise ValueError("unknown fields: " + str(kwds.keys())) con = self._execute_sql(sqltext(query), **values) @@ -615,11 +695,14 @@ def update_node(self, node, **kwds): def get_node_id(self, node): """Get numeric id for a node.""" res = self._execute_sql( - sqltext(""" + sqltext( + """ select id from nodes where service=:service and node=:node - """), - service=self._get_service_id(SERVICE_NAME), node=node + """ + ), + service=self._get_service_id(SERVICE_NAME), + node=node, ) row = res.fetchone() res.close() @@ -630,11 +713,13 @@ def get_node_id(self, node): def remove_node(self, node, timestamp=None): """Remove definition for a node.""" nodeid = self.get_node_id(node) - res = self._execute_sql(sqltext( - """ + res = self._execute_sql( + sqltext( + """ delete from nodes where id=:nodeid - """), - nodeid=nodeid + """ + ), + nodeid=nodeid, ) res.close() self.unassign_node(node, timestamp, nodeid=nodeid) @@ -646,12 +731,15 @@ def unassign_node(self, node, timestamp=None, nodeid=None): if nodeid is None: nodeid = self.get_node_id(node) res = self._execute_sql( - sqltext(""" + sqltext( + """ update users set replaced_at=:timestamp where nodeid=:nodeid - """), - nodeid=nodeid, timestamp=timestamp + """ + ), + nodeid=nodeid, + timestamp=timestamp, ) res.close() @@ -672,8 +760,8 @@ def get_best_node(self): # bailing out. for _ in range(5): res = self._execute_sql( - _GET_BEST_NODE, - service=self._get_service_id(SERVICE_NAME)) + _GET_BEST_NODE, service=self._get_service_id(SERVICE_NAME) + ) row = res.fetchone() res.close() if row is None: @@ -682,7 +770,7 @@ def get_best_node(self): res = self._execute_sql( _RELEASE_NODE_CAPACITY, capacity_release_rate=self.capacity_release_rate, - service=self._get_service_id(SERVICE_NAME) + service=self._get_service_id(SERVICE_NAME), ) res.close() if res.rowcount == 0: @@ -692,7 +780,7 @@ def get_best_node(self): # Did we succeed in finding a node? if row is None: - raise Exception('unable to get a node') + raise Exception("unable to get a node") nodeid = row.id node = str(row.node) @@ -700,9 +788,11 @@ def get_best_node(self): # Update the node to reflect the new assignment. # This is a little racy with concurrent assignments, but no big # deal. - con = self._execute_sql(_ADD_USER_TO_NODE, - service=self._get_service_id(SERVICE_NAME), - node=node) + con = self._execute_sql( + _ADD_USER_TO_NODE, + service=self._get_service_id(SERVICE_NAME), + node=node, + ) con.close() return nodeid, node @@ -710,23 +800,22 @@ def get_best_node(self): def get_node(self, node): if node is None: raise Exception("NONE node") - res = self._execute_sql(_GET_NODE, - service=self._get_service_id(SERVICE_NAME), - node=node) + res = self._execute_sql( + _GET_NODE, service=self._get_service_id(SERVICE_NAME), node=node + ) row = res.fetchone() res.close() if row is None: - raise Exception('unknown node: ' + node) + raise Exception("unknown node: " + node) return row # somewhat simplified version that just gets the one Spanner node. def get_spanner_node(self, node): - res = self._execute_sql(_GET_SPANNER_NODE, - id=node) + res = self._execute_sql(_GET_SPANNER_NODE, id=node) row = res.fetchone() res.close() if row is None: - raise Exception(f'unknown node: {node}') + raise Exception(f"unknown node: {node}") return str(row.node) @@ -738,8 +827,6 @@ def max_keys_changed_at(user, keys_changed_at): """ it = ( - x - for x in (keys_changed_at, user['keys_changed_at']) - if x is not None + x for x in (keys_changed_at, user["keys_changed_at"]) if x is not None ) return max(it, default=None) diff --git a/tools/tokenserver/loadtests/get_jwk.py b/tools/tokenserver/loadtests/get_jwk.py index 93cd012090..d7f6f73c8a 100644 --- a/tools/tokenserver/loadtests/get_jwk.py +++ b/tools/tokenserver/loadtests/get_jwk.py @@ -2,5 +2,5 @@ from authlib.jose import JsonWebKey raw_public_key = open(sys.argv[1], "rb").read() -public_key = JsonWebKey.import_key(raw_public_key, {"kty": "RSA"}) +public_key = JsonWebKey.import_key(raw_public_key, {"kty": "RSA"}) print(public_key.as_json()) diff --git a/tools/tokenserver/loadtests/locustfile.py b/tools/tokenserver/loadtests/locustfile.py index a3b41fcd5d..f128e500d7 100644 --- a/tools/tokenserver/loadtests/locustfile.py +++ b/tools/tokenserver/loadtests/locustfile.py @@ -7,7 +7,7 @@ from cryptography.hazmat.primitives.asymmetric import rsa from locust import HttpUser, task, between -DEFAULT_OAUTH_SCOPE = 'https://identity.mozilla.com/apps/oldsync' +DEFAULT_OAUTH_SCOPE = "https://identity.mozilla.com/apps/oldsync" # To create an invalid token, we sign the JWT with a private key that doesn't # correspond with the public key set on Tokenserver. To accomplish this, we @@ -21,13 +21,14 @@ # It's hosted in a static S3 bucket so we don't swamp the live mockmyid server. MOCKMYID_DOMAIN = "mockmyid.s3-us-west-2.amazonaws.com" ONE_YEAR = 60 * 60 * 24 * 365 -TOKENSERVER_PATH = '/1.0/sync/1.5' +TOKENSERVER_PATH = "/1.0/sync/1.5" # This is a private key used to "forge" valid tokens. The associated public # key must be set using the SYNC_TOKENSERVER__FXA_PRIMARY_JWK_* environment # variables on Tokenserver. VALID_OAUTH_PRIVATE_KEY = private_key = serialization.load_pem_private_key( - open(os.environ['OAUTH_PEM_FILE'], "rb").read(), password=None, + open(os.environ["OAUTH_PEM_FILE"], "rb").read(), + password=None, ) @@ -44,7 +45,8 @@ def __init__(self, *args, **kwargs): # Keep track of this user's generation number. self.generation_counter = 0 self.client_state = binascii.hexlify( - self.generation_counter.to_bytes(16, 'big')).decode('utf8') + self.generation_counter.to_bytes(16, "big") + ).decode("utf8") # Locust spawns a new instance of this class for each user. Using the # object ID as the FxA UID guarantees uniqueness. self.fxa_uid = id(self) @@ -59,8 +61,7 @@ def test_oauth_success(self): @task(100) def test_invalid_oauth(self): token = self._make_oauth_token( - self.email, - key=INVALID_OAUTH_PRIVATE_KEY + self.email, key=INVALID_OAUTH_PRIVATE_KEY ) self._do_token_exchange_via_oauth(token, status=401) @@ -80,7 +81,8 @@ def test_encryption_key_change(self): # keys_changed_at for the user both increase. self.generation_counter += 1 self.client_state = binascii.hexlify( - self.generation_counter.to_bytes(16, 'big')).decode('utf8') + self.generation_counter.to_bytes(16, "big") + ).decode("utf8") token = self._make_oauth_token(self.email) self._do_token_exchange_via_oauth(token) @@ -105,14 +107,11 @@ def _make_oauth_token(self, email, key=VALID_OAUTH_PRIVATE_KEY, **fields): sub, issuer = email.split("@", 1) body["sub"] = sub body["issuer"] = issuer - body['fxa-generation'] = self.generation_counter + body["fxa-generation"] = self.generation_counter body.update(fields) return jwt.encode( - body, - key, - algorithm="RS256", - headers={'typ': 'application/at+jwt'} + body, key, algorithm="RS256", headers={"typ": "application/at+jwt"} ) def _make_x_key_id_header(self): @@ -121,18 +120,18 @@ def _make_x_key_id_header(self): # the accuracy of the load test is unaffected. keys_changed_at = self.generation_counter raw_client_state = binascii.unhexlify(self.client_state) - client_state = b64encode(raw_client_state).strip(b'=').decode('utf-8') + client_state = b64encode(raw_client_state).strip(b"=").decode("utf-8") - return '%s-%s' % (keys_changed_at, client_state) + return "%s-%s" % (keys_changed_at, client_state) def _do_token_exchange_via_oauth(self, token, status=200): headers = { - 'Authorization': 'Bearer %s' % token, - 'X-KeyID': self._make_x_key_id_header(), + "Authorization": "Bearer %s" % token, + "X-KeyID": self._make_x_key_id_header(), } - with self.client.get(TOKENSERVER_PATH, - catch_response=True, - headers=headers) as res: + with self.client.get( + TOKENSERVER_PATH, catch_response=True, headers=headers + ) as res: if res.status_code == status: res.success() diff --git a/tools/tokenserver/loadtests/populate_db.py b/tools/tokenserver/loadtests/populate_db.py index 6f31af5482..a2803cb327 100644 --- a/tools/tokenserver/loadtests/populate_db.py +++ b/tools/tokenserver/loadtests/populate_db.py @@ -5,34 +5,40 @@ from sqlalchemy import create_engine from sqlalchemy.sql import text as sqltext -_CREATE_USER_RECORD = sqltext("""\ +_CREATE_USER_RECORD = sqltext( + """\ insert into users (service, email, nodeid, generation, client_state, created_at, replaced_at) values (:service, :email, :nodeid, 0, "", :timestamp, NULL) -""") +""" +) -_GET_SERVICE_ID = sqltext("""\ +_GET_SERVICE_ID = sqltext( + """\ select id from services where service = :service -""") +""" +) -_GET_NODE_ID = sqltext("""\ +_GET_NODE_ID = sqltext( + """\ select id from nodes where service=:service and node=:node -""") +""" +) -_SERVICE_NAME = 'sync-1.5' +_SERVICE_NAME = "sync-1.5" # This class creates a bunch of users associated with the sync-1.5 service. @@ -51,9 +57,9 @@ class PopulateDatabase: def __init__(self, sqluri, nodes, user_range, host="loadtest.local"): engine = create_engine(sqluri) - self.database = engine. \ - execution_options(isolation_level="AUTOCOMMIT"). \ - connect() + self.database = engine.execution_options( + isolation_level="AUTOCOMMIT" + ).connect() self.service_id = self._get_service_id() self.node_ids = [self._get_node_id(node) for node in nodes] @@ -62,9 +68,9 @@ def __init__(self, sqluri, nodes, user_range, host="loadtest.local"): def _get_node_id(self, node_name): """Get numeric id for a node.""" - res = self.database.execute(_GET_NODE_ID, - service=self.service_id, - node=node_name) + res = self.database.execute( + _GET_NODE_ID, service=self.service_id, node=node_name + ) row = res.fetchone() res.close() if row is None: @@ -79,18 +85,17 @@ def _get_service_id(self): def run(self): params = { - 'service': self.service_id, - 'timestamp': int(time.time() * 1000), + "service": self.service_id, + "timestamp": int(time.time() * 1000), } # for each user in the range, assign them to a node for idx in range(0, self.user_range): email = "%s@%s" % (idx, self.host) nodeid = random.choice(self.node_ids) - self.database.execute(_CREATE_USER_RECORD, - email=email, - nodeid=nodeid, - **params) + self.database.execute( + _CREATE_USER_RECORD, email=email, nodeid=nodeid, **params + ) def main(): @@ -102,16 +107,19 @@ def main(): # python3 populate-db.py sqlite:////tmp/tokenserver\ # node1,node2,node3,node4,node5,node6 100 import sys + if len(sys.argv) < 4: - raise ValueError('You need to specify (in this order) sqluri, ' - 'nodes (comma separated), and user_range') + raise ValueError( + "You need to specify (in this order) sqluri, " + "nodes (comma separated), and user_range" + ) # transform the values from the cli to python objects - sys.argv[2] = sys.argv[2].split(',') # comma separated => list + sys.argv[2] = sys.argv[2].split(",") # comma separated => list sys.argv[3] = int(sys.argv[3]) PopulateDatabase(*sys.argv[1:]).run() print("created {nb_users} users".format(nb_users=sys.argv[3])) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tools/tokenserver/process_account_events.py b/tools/tokenserver/process_account_events.py index b2b60843ad..2b04f3d6d4 100644 --- a/tools/tokenserver/process_account_events.py +++ b/tools/tokenserver/process_account_events.py @@ -47,10 +47,8 @@ def process_account_events( - queue_name, - aws_region=None, - queue_wait_time=20, - metrics=None): + queue_name, aws_region=None, queue_wait_time=20, metrics=None +): """Process account events from an SQS queue. This function polls the specified SQS queue for account-realted events, @@ -95,7 +93,7 @@ def process_account_event(database, body, metrics=None): generation = None try: body = json.loads(body) - event = json.loads(body['Message']) + event = json.loads(body["Message"]) event_type = event["event"] uid = event["uid"] # Older versions of the fxa-auth-server would send an email-like @@ -108,7 +106,10 @@ def process_account_event(database, body, metrics=None): if "@" not in uid: raise ValueError("uid field does not contain issuer info") email = uid - if event_type in ("reset", "passwordChange",): + if event_type in ( + "reset", + "passwordChange", + ): generation = event["generation"] except (ValueError, KeyError) as e: logger.exception("Invalid account message: %s", e) @@ -123,11 +124,13 @@ def process_account_event(database, body, metrics=None): elif event_type == "reset": logger.info("Processing account reset for %r", email) update_generation_number( - database, email, generation, metrics=metrics) + database, email, generation, metrics=metrics + ) elif event_type == "passwordChange": logger.info("Processing password change for %r", email) update_generation_number( - database, email, generation, metrics=metrics) + database, email, generation, metrics=metrics + ) else: record_metric = False logger.warning("Dropping unknown event type %r", event_type) @@ -171,14 +174,26 @@ def main(args=None): """ usage = "usage: %prog [options] queue_name" parser = optparse.OptionParser(usage=usage) - parser.add_option("", "--aws-region", - help="aws region in which the queue can be found") - parser.add_option("", "--queue-wait-time", type="int", default=20, - help="Number of seconds to wait for jobs on the queue") - parser.add_option("-v", "--verbose", action="count", dest="verbosity", - help="Control verbosity of log messages") - parser.add_option("", "--human_logs", action="store_true", - help="Human readable logs") + parser.add_option( + "", "--aws-region", help="aws region in which the queue can be found" + ) + parser.add_option( + "", + "--queue-wait-time", + type="int", + default=20, + help="Number of seconds to wait for jobs on the queue", + ) + parser.add_option( + "-v", + "--verbose", + action="count", + dest="verbosity", + help="Control verbosity of log messages", + ) + parser.add_option( + "", "--human_logs", action="store_true", help="Human readable logs" + ) util.add_metric_options(parser) opts, args = parser.parse_args(args) @@ -197,10 +212,8 @@ def main(args=None): queue_name = args[0] process_account_events( - queue_name, - opts.aws_region, - opts.queue_wait_time, - metrics=metrics) + queue_name, opts.aws_region, opts.queue_wait_time, metrics=metrics + ) return 0 diff --git a/tools/tokenserver/purge_old_records.py b/tools/tokenserver/purge_old_records.py index ca8e6f10a3..d99963791d 100644 --- a/tools/tokenserver/purge_old_records.py +++ b/tools/tokenserver/purge_old_records.py @@ -91,7 +91,8 @@ def purge_old_records( f" to {uid_range[1] or 'End'}" ) logger.info( - f"Fetched {len(rows)} rows at offset {offset}{range_msg}") + f"Fetched {len(rows)} rows at offset {offset}{range_msg}" + ) counter = 0 for row in rows: # Don't attempt to purge data from downed nodes. @@ -101,21 +102,18 @@ def purge_old_records( logger.info( "Deleting user record for uid %s on %s", row.uid, - row.node + row.node, ) if not dryrun: if metrics: metrics.incr( - "delete_user", - tags={"type": "nodeless"}) + "delete_user", tags={"type": "nodeless"} + ) retryable(database.delete_user_record, row.uid) # NOTE: only delete_user+service_data calls count # against the counter elif not row.downed: - logger.info( - "Purging uid %s on %s", - row.uid, - row.node) + logger.info("Purging uid %s on %s", row.uid, row.node) if not dryrun: retryable( delete_service_data, @@ -127,18 +125,16 @@ def purge_old_records( ) if metrics: metrics.incr("delete_data") - retryable( - database.delete_user_record, - row.uid) + retryable(database.delete_user_record, row.uid) if metrics: metrics.incr( - "delete_user", - tags={"type": "not_down"} + "delete_user", tags={"type": "not_down"} ) counter += 1 elif force: delete_sd = not points_to_active( - database, row, override_node, metrics=metrics) + database, row, override_node, metrics=metrics + ) logger.info( "Forcing tokenserver record delete: " f"{row.uid} on {row.node} " @@ -154,30 +150,24 @@ def purge_old_records( # the existing data set. # (The call mimics a user DELETE request.) retryable( - delete_service_data, - row, - secret, - timeout=request_timeout, - dryrun=dryrun, - # if an override was specifed, - # use that node ID - override_node=override_node, - metrics=metrics, - ) + delete_service_data, + row, + secret, + timeout=request_timeout, + dryrun=dryrun, + # if an override was specifed, + # use that node ID + override_node=override_node, + metrics=metrics, + ) if metrics: metrics.incr( - "delete_data", - tags={"type": "force"} - ) + "delete_data", tags={"type": "force"} + ) - retryable( - database.delete_user_record, - row.uid) + retryable(database.delete_user_record, row.uid) if metrics: - metrics.incr( - "delete_data", - tags={"type": "force"} - ) + metrics.incr("delete_data", tags={"type": "force"}) counter += 1 if max_records and counter >= max_records: logger.info("Reached max_records, exiting") @@ -195,8 +185,8 @@ def purge_old_records( def delete_service_data( - user, secret, timeout=60, dryrun=False, override_node=None, - metrics=None): + user, secret, timeout=60, dryrun=False, override_node=None, metrics=None +): """Send a data-deletion request to the user's service node. This is a little bit of hackery to cause the user's service node to @@ -235,11 +225,7 @@ def retry_giveup(e): return 500 <= e.response.status_code < 505 -@backoff.on_exception( - backoff.expo, - requests.HTTPError, - giveup=retry_giveup - ) +@backoff.on_exception(backoff.expo, requests.HTTPError, giveup=retry_giveup) def retryable(fn, *args, **kwargs): fn(*args, **kwargs) @@ -343,8 +329,10 @@ def main(args=None): help="Timeout in seconds for service deletion requests", ) parser.add_option( - "", "--oneshot", action="store_true", - help="Do a single purge run and then exit" + "", + "--oneshot", + action="store_true", + help="Do a single purge run and then exit", ) parser.add_option( "-v", @@ -354,8 +342,7 @@ def main(args=None): help="Control verbosity of log messages", ) parser.add_option( - "", "--dryrun", action="store_true", - help="Don't do destructive things" + "", "--dryrun", action="store_true", help="Don't do destructive things" ) parser.add_option( "", @@ -365,26 +352,18 @@ def main(args=None): "if the user's node is marked as down", ) parser.add_option( - "", "--override_node", - help="Use this node when deleting (if data was copied)" + "", + "--override_node", + help="Use this node when deleting (if data was copied)", ) parser.add_option( - "", - "--range_start", - default=None, - help="Start of UID range to check" + "", "--range_start", default=None, help="Start of UID range to check" ) parser.add_option( - "", - "--range_end", - default=None, - help="End of UID range to check" + "", "--range_end", default=None, help="End of UID range to check" ) parser.add_option( - "", - "--human_logs", - action="store_true", - help="Human readable logs" + "", "--human_logs", action="store_true", help="Human readable logs" ) util.add_metric_options(parser) diff --git a/tools/tokenserver/remove_node.py b/tools/tokenserver/remove_node.py index 6789063f27..9a0f319980 100644 --- a/tools/tokenserver/remove_node.py +++ b/tools/tokenserver/remove_node.py @@ -53,8 +53,13 @@ def main(args=None): usage = "usage: %prog [options] node_name" descr = "Remove a node from the tokenserver database" parser = optparse.OptionParser(usage=usage, description=descr) - parser.add_option("-v", "--verbose", action="count", dest="verbosity", - help="Control verbosity of log messages") + parser.add_option( + "-v", + "--verbose", + action="count", + dest="verbosity", + help="Control verbosity of log messages", + ) opts, args = parser.parse_args(args) if len(args) != 1: diff --git a/tools/tokenserver/run_tests.py b/tools/tokenserver/run_tests.py new file mode 100644 index 0000000000..343d21bb52 --- /dev/null +++ b/tools/tokenserver/run_tests.py @@ -0,0 +1,29 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this file, +# You can obtain one at http://mozilla.org/MPL/2.0/. + +import sys +import unittest + +from test_database import TestDatabase +from test_process_account_events import TestProcessAccountEvents +from test_purge_old_records import TestPurgeOldRecords +from test_scripts import TestScripts + +if __name__ == "__main__": + loader = unittest.TestLoader() + test_cases = [ + TestDatabase, + TestPurgeOldRecords, + TestProcessAccountEvents, + TestScripts, + ] + + res = 0 + for test_case in test_cases: + suite = loader.loadTestsFromTestCase(test_case) + runner = unittest.TextTestRunner() + if not runner.run(suite).wasSuccessful(): + res = 1 + + sys.exit(res) diff --git a/tools/tokenserver/test_database.py b/tools/tokenserver/test_database.py index ef1e7560b0..86c4e1354f 100644 --- a/tools/tokenserver/test_database.py +++ b/tools/tokenserver/test_database.py @@ -15,185 +15,181 @@ def setUp(self): super(TestDatabase, self).setUp() self.database = Database() # Start each test with a blank slate. - cursor = self.database._execute_sql(('DELETE FROM users'), ()) + cursor = self.database._execute_sql(("DELETE FROM users"), ()) cursor.close() - cursor = self.database._execute_sql(('DELETE FROM nodes'), ()) + cursor = self.database._execute_sql(("DELETE FROM nodes"), ()) cursor.close() - cursor = self.database._execute_sql(('DELETE FROM services'), ()) + cursor = self.database._execute_sql(("DELETE FROM services"), ()) cursor.close() - self.database.add_service('sync-1.5', r'{node}/1.5/{uid}') - self.database.add_node('https://phx12', 100) + self.database.add_service("sync-1.5", r"{node}/1.5/{uid}") + self.database.add_node("https://phx12", 100) def tearDown(self): super(TestDatabase, self).tearDown() # And clean up at the end, for good measure. - cursor = self.database._execute_sql(('DELETE FROM users'), ()) + cursor = self.database._execute_sql(("DELETE FROM users"), ()) cursor.close() - cursor = self.database._execute_sql(('DELETE FROM nodes'), ()) + cursor = self.database._execute_sql(("DELETE FROM nodes"), ()) cursor.close() - cursor = self.database._execute_sql(('DELETE FROM services'), ()) + cursor = self.database._execute_sql(("DELETE FROM services"), ()) cursor.close() self.database.close() def test_node_allocation(self): - user = self.database.get_user('test1@example.com') + user = self.database.get_user("test1@example.com") self.assertEqual(user, None) - user = self.database.allocate_user('test1@example.com') - wanted = 'https://phx12' - self.assertEqual(user['node'], wanted) + user = self.database.allocate_user("test1@example.com") + wanted = "https://phx12" + self.assertEqual(user["node"], wanted) - user = self.database.get_user('test1@example.com') - self.assertEqual(user['node'], wanted) + user = self.database.get_user("test1@example.com") + self.assertEqual(user["node"], wanted) def test_allocation_to_least_loaded_node(self): - self.database.add_node('https://phx13', 100) - user1 = self.database.allocate_user('test1@mozilla.com') - user2 = self.database.allocate_user('test2@mozilla.com') - self.assertNotEqual(user1['node'], user2['node']) + self.database.add_node("https://phx13", 100) + user1 = self.database.allocate_user("test1@mozilla.com") + user2 = self.database.allocate_user("test2@mozilla.com") + self.assertNotEqual(user1["node"], user2["node"]) def test_allocation_is_not_allowed_to_downed_nodes(self): - self.database.update_node('https://phx12', - downed=True) + self.database.update_node("https://phx12", downed=True) with self.assertRaises(Exception): - self.database.allocate_user('test1@mozilla.com') + self.database.allocate_user("test1@mozilla.com") def test_allocation_is_not_allowed_to_backoff_nodes(self): - self.database.update_node('https://phx12', - backoff=True) + self.database.update_node("https://phx12", backoff=True) with self.assertRaises(Exception): - self.database.allocate_user('test1@mozilla.com') + self.database.allocate_user("test1@mozilla.com") def test_update_generation_number(self): - user = self.database.allocate_user('test1@example.com') - self.assertEqual(user['generation'], 0) - self.assertEqual(user['client_state'], '') - orig_uid = user['uid'] - orig_node = user['node'] + user = self.database.allocate_user("test1@example.com") + self.assertEqual(user["generation"], 0) + self.assertEqual(user["client_state"], "") + orig_uid = user["uid"] + orig_node = user["node"] # Changing generation should leave other properties unchanged. self.database.update_user(user, generation=42) - self.assertEqual(user['uid'], orig_uid) - self.assertEqual(user['node'], orig_node) - self.assertEqual(user['generation'], 42) - self.assertEqual(user['client_state'], '') + self.assertEqual(user["uid"], orig_uid) + self.assertEqual(user["node"], orig_node) + self.assertEqual(user["generation"], 42) + self.assertEqual(user["client_state"], "") - user = self.database.get_user('test1@example.com') - self.assertEqual(user['uid'], orig_uid) - self.assertEqual(user['node'], orig_node) - self.assertEqual(user['generation'], 42) - self.assertEqual(user['client_state'], '') + user = self.database.get_user("test1@example.com") + self.assertEqual(user["uid"], orig_uid) + self.assertEqual(user["node"], orig_node) + self.assertEqual(user["generation"], 42) + self.assertEqual(user["client_state"], "") # It's not possible to move generation number backwards. self.database.update_user(user, generation=17) - self.assertEqual(user['uid'], orig_uid) - self.assertEqual(user['node'], orig_node) - self.assertEqual(user['generation'], 42) - self.assertEqual(user['client_state'], '') + self.assertEqual(user["uid"], orig_uid) + self.assertEqual(user["node"], orig_node) + self.assertEqual(user["generation"], 42) + self.assertEqual(user["client_state"], "") - user = self.database.get_user('test1@example.com') - self.assertEqual(user['uid'], orig_uid) - self.assertEqual(user['node'], orig_node) - self.assertEqual(user['generation'], 42) - self.assertEqual(user['client_state'], '') + user = self.database.get_user("test1@example.com") + self.assertEqual(user["uid"], orig_uid) + self.assertEqual(user["node"], orig_node) + self.assertEqual(user["generation"], 42) + self.assertEqual(user["client_state"], "") def test_update_client_state(self): - user = self.database.allocate_user('test1@example.com') - self.assertEqual(user['generation'], 0) - self.assertEqual(user['client_state'], '') - self.assertEqual(set(user['old_client_states']), set(())) - seen_uids = set((user['uid'],)) - orig_node = user['node'] + user = self.database.allocate_user("test1@example.com") + self.assertEqual(user["generation"], 0) + self.assertEqual(user["client_state"], "") + self.assertEqual(set(user["old_client_states"]), set(())) + seen_uids = set((user["uid"],)) + orig_node = user["node"] # Changing client-state allocates a new userid. - self.database.update_user(user, client_state='aaaa') - self.assertTrue(user['uid'] not in seen_uids) - self.assertEqual(user['node'], orig_node) - self.assertEqual(user['generation'], 0) - self.assertEqual(user['client_state'], 'aaaa') - self.assertEqual(set(user['old_client_states']), set(('',))) - - user = self.database.get_user('test1@example.com') - self.assertTrue(user['uid'] not in seen_uids) - self.assertEqual(user['node'], orig_node) - self.assertEqual(user['generation'], 0) - self.assertEqual(user['client_state'], 'aaaa') - self.assertEqual(set(user['old_client_states']), set(('',))) - - seen_uids.add(user['uid']) + self.database.update_user(user, client_state="aaaa") + self.assertTrue(user["uid"] not in seen_uids) + self.assertEqual(user["node"], orig_node) + self.assertEqual(user["generation"], 0) + self.assertEqual(user["client_state"], "aaaa") + self.assertEqual(set(user["old_client_states"]), set(("",))) + + user = self.database.get_user("test1@example.com") + self.assertTrue(user["uid"] not in seen_uids) + self.assertEqual(user["node"], orig_node) + self.assertEqual(user["generation"], 0) + self.assertEqual(user["client_state"], "aaaa") + self.assertEqual(set(user["old_client_states"]), set(("",))) + + seen_uids.add(user["uid"]) # It's possible to change client-state and generation at once. - self.database.update_user(user, - client_state='bbbb', generation=12) - self.assertTrue(user['uid'] not in seen_uids) - self.assertEqual(user['node'], orig_node) - self.assertEqual(user['generation'], 12) - self.assertEqual(user['client_state'], 'bbbb') - self.assertEqual(set(user['old_client_states']), set(('', 'aaaa'))) - - user = self.database.get_user('test1@example.com') - self.assertTrue(user['uid'] not in seen_uids) - self.assertEqual(user['node'], orig_node) - self.assertEqual(user['generation'], 12) - self.assertEqual(user['client_state'], 'bbbb') - self.assertEqual(set(user['old_client_states']), set(('', 'aaaa'))) + self.database.update_user(user, client_state="bbbb", generation=12) + self.assertTrue(user["uid"] not in seen_uids) + self.assertEqual(user["node"], orig_node) + self.assertEqual(user["generation"], 12) + self.assertEqual(user["client_state"], "bbbb") + self.assertEqual(set(user["old_client_states"]), set(("", "aaaa"))) + + user = self.database.get_user("test1@example.com") + self.assertTrue(user["uid"] not in seen_uids) + self.assertEqual(user["node"], orig_node) + self.assertEqual(user["generation"], 12) + self.assertEqual(user["client_state"], "bbbb") + self.assertEqual(set(user["old_client_states"]), set(("", "aaaa"))) # You can't got back to an old client_state. - orig_uid = user['uid'] + orig_uid = user["uid"] with self.assertRaises(Exception): - self.database.update_user(user, - client_state='aaaa') + self.database.update_user(user, client_state="aaaa") - user = self.database.get_user('test1@example.com') - self.assertEqual(user['uid'], orig_uid) - self.assertEqual(user['node'], orig_node) - self.assertEqual(user['generation'], 12) - self.assertEqual(user['client_state'], 'bbbb') - self.assertEqual(set(user['old_client_states']), set(('', 'aaaa'))) + user = self.database.get_user("test1@example.com") + self.assertEqual(user["uid"], orig_uid) + self.assertEqual(user["node"], orig_node) + self.assertEqual(user["generation"], 12) + self.assertEqual(user["client_state"], "bbbb") + self.assertEqual(set(user["old_client_states"]), set(("", "aaaa"))) def test_user_retirement(self): - self.database.allocate_user('test@mozilla.com') - user1 = self.database.get_user('test@mozilla.com') - self.database.retire_user('test@mozilla.com') - user2 = self.database.get_user('test@mozilla.com') - self.assertTrue(user2['generation'] > user1['generation']) + self.database.allocate_user("test@mozilla.com") + user1 = self.database.get_user("test@mozilla.com") + self.database.retire_user("test@mozilla.com") + user2 = self.database.get_user("test@mozilla.com") + self.assertTrue(user2["generation"] > user1["generation"]) def test_cleanup_of_old_records(self): # Create 6 user records for the first user. # Do a sleep halfway through so we can test use of grace period. - email1 = 'test1@mozilla.com' + email1 = "test1@mozilla.com" user1 = self.database.allocate_user(email1) # We have to sleep between every user create/update operation: if two # users are created with the same timestamp, it can lead to a # situation where two active user records exist for a single email. time.sleep(0.1) - self.database.update_user(user1, client_state='aaaa') + self.database.update_user(user1, client_state="aaaa") time.sleep(0.1) - self.database.update_user(user1, client_state='bbbb') + self.database.update_user(user1, client_state="bbbb") time.sleep(0.1) - self.database.update_user(user1, client_state='cccc') + self.database.update_user(user1, client_state="cccc") time.sleep(0.1) break_time = time.time() time.sleep(0.1) - self.database.update_user(user1, client_state='dddd') + self.database.update_user(user1, client_state="dddd") time.sleep(0.1) - self.database.update_user(user1, client_state='eeee') + self.database.update_user(user1, client_state="eeee") time.sleep(0.1) records = list(self.database.get_user_records(email1)) self.assertEqual(len(records), 6) # Create 3 user records for the second user. - email2 = 'test2@mozilla.com' + email2 = "test2@mozilla.com" user2 = self.database.allocate_user(email2) time.sleep(0.1) - self.database.update_user(user2, client_state='aaaa') + self.database.update_user(user2, client_state="aaaa") time.sleep(0.1) - self.database.update_user(user2, client_state='bbbb') + self.database.update_user(user2, client_state="bbbb") time.sleep(0.1) records = list(self.database.get_user_records(email2)) self.assertEqual(len(records), 3) @@ -201,8 +197,7 @@ def test_cleanup_of_old_records(self): old_records = list(self.database.get_old_user_records(0)) self.assertEqual(len(old_records), 7) # And with max_offset of 3, the first record should be id 4 - old_records = list(self.database.get_old_user_records(0, - 100, 3)) + old_records = list(self.database.get_old_user_records(0, 100, 3)) # The 'limit' parameter should be respected. old_records = list(self.database.get_old_user_records(0, 2)) self.assertEqual(len(old_records), 2) @@ -220,84 +215,83 @@ def test_cleanup_of_old_records(self): self.assertEqual(len(old_records), 4) def test_node_reassignment_when_records_are_replaced(self): - self.database.allocate_user('test@mozilla.com', - generation=42, - keys_changed_at=12, - client_state='aaaa') - user1 = self.database.get_user('test@mozilla.com') - self.database.replace_user_records('test@mozilla.com') - user2 = self.database.get_user('test@mozilla.com') + self.database.allocate_user( + "test@mozilla.com", + generation=42, + keys_changed_at=12, + client_state="aaaa", + ) + user1 = self.database.get_user("test@mozilla.com") + self.database.replace_user_records("test@mozilla.com") + user2 = self.database.get_user("test@mozilla.com") # They should have got a new uid. - self.assertNotEqual(user2['uid'], user1['uid']) + self.assertNotEqual(user2["uid"], user1["uid"]) # But their account metadata should have been preserved. - self.assertEqual(user2['generation'], user1['generation']) - self.assertEqual(user2['keys_changed_at'], user1['keys_changed_at']) - self.assertEqual(user2['client_state'], user1['client_state']) + self.assertEqual(user2["generation"], user1["generation"]) + self.assertEqual(user2["keys_changed_at"], user1["keys_changed_at"]) + self.assertEqual(user2["client_state"], user1["client_state"]) def test_node_reassignment_not_done_for_retired_users(self): - self.database.allocate_user('test@mozilla.com', - generation=42, client_state='aaaa') - user1 = self.database.get_user('test@mozilla.com') - self.database.retire_user('test@mozilla.com') - user2 = self.database.get_user('test@mozilla.com') - self.assertEqual(user2['uid'], user1['uid']) - self.assertEqual(user2['generation'], MAX_GENERATION) - self.assertEqual(user2['client_state'], user2['client_state']) + self.database.allocate_user( + "test@mozilla.com", generation=42, client_state="aaaa" + ) + user1 = self.database.get_user("test@mozilla.com") + self.database.retire_user("test@mozilla.com") + user2 = self.database.get_user("test@mozilla.com") + self.assertEqual(user2["uid"], user1["uid"]) + self.assertEqual(user2["generation"], MAX_GENERATION) + self.assertEqual(user2["client_state"], user2["client_state"]) def test_recovery_from_racy_record_creation(self): timestamp = get_timestamp() # Simulate race for forcing creation of two rows with same timestamp. - user1 = self.database.allocate_user('test@mozilla.com', - timestamp=timestamp) - user2 = self.database.allocate_user('test@mozilla.com', - timestamp=timestamp) - self.assertNotEqual(user1['uid'], user2['uid']) - # Neither is marked replaced initially. - old_records = list( - self.database.get_old_user_records(0) + user1 = self.database.allocate_user( + "test@mozilla.com", timestamp=timestamp ) + user2 = self.database.allocate_user( + "test@mozilla.com", timestamp=timestamp + ) + self.assertNotEqual(user1["uid"], user2["uid"]) + # Neither is marked replaced initially. + old_records = list(self.database.get_old_user_records(0)) self.assertEqual(len(old_records), 0) # Reading current details will detect the problem and fix it. - self.database.get_user('test@mozilla.com') - old_records = list( - self.database.get_old_user_records(0) - ) + self.database.get_user("test@mozilla.com") + old_records = list(self.database.get_old_user_records(0)) self.assertEqual(len(old_records), 1) def test_that_race_recovery_respects_generation_number_monotonicity(self): timestamp = get_timestamp() # Simulate race between clients with different generation numbers, # in which the out-of-date client gets a higher timestamp. - user1 = self.database.allocate_user('test@mozilla.com', - generation=1, - timestamp=timestamp) - user2 = self.database.allocate_user('test@mozilla.com', - generation=2, - timestamp=timestamp - 1) - self.assertNotEqual(user1['uid'], user2['uid']) + user1 = self.database.allocate_user( + "test@mozilla.com", generation=1, timestamp=timestamp + ) + user2 = self.database.allocate_user( + "test@mozilla.com", generation=2, timestamp=timestamp - 1 + ) + self.assertNotEqual(user1["uid"], user2["uid"]) # Reading current details should promote the higher-generation one. - user = self.database.get_user('test@mozilla.com') - self.assertEqual(user['generation'], 2) - self.assertEqual(user['uid'], user2['uid']) + user = self.database.get_user("test@mozilla.com") + self.assertEqual(user["generation"], 2) + self.assertEqual(user["uid"], user2["uid"]) # And the other record should get marked as replaced. - old_records = list( - self.database.get_old_user_records(0) - ) + old_records = list(self.database.get_old_user_records(0)) self.assertEqual(len(old_records), 1) def test_node_reassignment_and_removal(self): - NODE1 = 'https://phx12' - NODE2 = 'https://phx13' + NODE1 = "https://phx12" + NODE2 = "https://phx13" # note that NODE1 is created by default for all tests. self.database.add_node(NODE2, 100) # Assign four users, we should get two on each node. - user1 = self.database.allocate_user('test1@mozilla.com') - user2 = self.database.allocate_user('test2@mozilla.com') - user3 = self.database.allocate_user('test3@mozilla.com') - user4 = self.database.allocate_user('test4@mozilla.com') + user1 = self.database.allocate_user("test1@mozilla.com") + user2 = self.database.allocate_user("test2@mozilla.com") + user3 = self.database.allocate_user("test3@mozilla.com") + user4 = self.database.allocate_user("test4@mozilla.com") node_counts = defaultdict(lambda: 0) for user in (user1, user2, user3, user4): - node_counts[user['node']] += 1 + node_counts[user["node"]] += 1 self.assertEqual(node_counts[NODE1], 2) self.assertEqual(node_counts[NODE2], 2) # Clear the assignments for NODE1, and re-assign. @@ -306,17 +300,17 @@ def test_node_reassignment_and_removal(self): self.database.unassign_node(NODE1) node_counts = defaultdict(lambda: 0) for user in (user1, user2, user3, user4): - new_user = self.database.get_user(user['email']) - if user['node'] == NODE2: - self.assertEqual(new_user['node'], NODE2) - node_counts[new_user['node']] += 1 + new_user = self.database.get_user(user["email"]) + if user["node"] == NODE2: + self.assertEqual(new_user["node"], NODE2) + node_counts[new_user["node"]] += 1 self.assertEqual(node_counts[NODE1], 1) self.assertEqual(node_counts[NODE2], 3) # Remove NODE2. Everyone should wind up on NODE1. self.database.remove_node(NODE2) for user in (user1, user2, user3, user4): - new_user = self.database.get_user(user['email']) - self.assertEqual(new_user['node'], NODE1) + new_user = self.database.get_user(user["email"]) + self.assertEqual(new_user["node"], NODE1) # The old users records pointing to NODE2 should have a NULL 'node' # property since it has been removed from the db. null_node_count = 0 @@ -331,138 +325,140 @@ def test_that_race_recovery_respects_generation_after_reassignment(self): timestamp = get_timestamp() # Simulate race between clients with different generation numbers, # in which the out-of-date client gets a higher timestamp. - user1 = self.database.allocate_user('test@mozilla.com', - generation=1, - timestamp=timestamp) - user2 = self.database.allocate_user('test@mozilla.com', - generation=2, - timestamp=timestamp - 1) - self.assertNotEqual(user1['uid'], user2['uid']) + user1 = self.database.allocate_user( + "test@mozilla.com", generation=1, timestamp=timestamp + ) + user2 = self.database.allocate_user( + "test@mozilla.com", generation=2, timestamp=timestamp - 1 + ) + self.assertNotEqual(user1["uid"], user2["uid"]) # Force node re-assignment by marking all records as replaced. - self.database.replace_user_records('test@mozilla.com', - timestamp=timestamp + 1) + self.database.replace_user_records( + "test@mozilla.com", timestamp=timestamp + 1 + ) # The next client to show up should get a new assignment, marked # with the correct generation number. - user = self.database.get_user('test@mozilla.com') - self.assertEqual(user['generation'], 2) - self.assertNotEqual(user['uid'], user1['uid']) - self.assertNotEqual(user['uid'], user2['uid']) + user = self.database.get_user("test@mozilla.com") + self.assertEqual(user["generation"], 2) + self.assertNotEqual(user["uid"], user1["uid"]) + self.assertNotEqual(user["uid"], user2["uid"]) def test_that_we_can_allocate_users_to_a_specific_node(self): - node = 'https://phx13' + node = "https://phx13" self.database.add_node(node, 50) # The new node is not selected by default, because of lower capacity. - user = self.database.allocate_user('test1@mozilla.com') - self.assertNotEqual(user['node'], node) + user = self.database.allocate_user("test1@mozilla.com") + self.assertNotEqual(user["node"], node) # But we can force it using keyword argument. - user = self.database.allocate_user('test2@mozilla.com', - node=node) - self.assertEqual(user['node'], node) + user = self.database.allocate_user("test2@mozilla.com", node=node) + self.assertEqual(user["node"], node) def test_that_we_can_move_users_to_a_specific_node(self): - node = 'https://phx13' + node = "https://phx13" self.database.add_node(node, 50) # The new node is not selected by default, because of lower capacity. - user = self.database.allocate_user('test@mozilla.com') - self.assertNotEqual(user['node'], node) + user = self.database.allocate_user("test@mozilla.com") + self.assertNotEqual(user["node"], node) # But we can move them there explicitly using keyword argument. self.database.update_user(user, node=node) - self.assertEqual(user['node'], node) + self.assertEqual(user["node"], node) # Sanity-check by re-reading it from the db. - user = self.database.get_user('test@mozilla.com') - self.assertEqual(user['node'], node) + user = self.database.get_user("test@mozilla.com") + self.assertEqual(user["node"], node) # Check that it properly respects client-state and generation. self.database.update_user(user, generation=12) - self.database.update_user(user, client_state='XXX') - self.database.update_user(user, generation=42, - client_state='YYY', node='https://phx12') - self.assertEqual(user['node'], 'https://phx12') - self.assertEqual(user['generation'], 42) - self.assertEqual(user['client_state'], 'YYY') - self.assertEqual(sorted(user['old_client_states']), ['', 'XXX']) + self.database.update_user(user, client_state="XXX") + self.database.update_user( + user, generation=42, client_state="YYY", node="https://phx12" + ) + self.assertEqual(user["node"], "https://phx12") + self.assertEqual(user["generation"], 42) + self.assertEqual(user["client_state"], "YYY") + self.assertEqual(sorted(user["old_client_states"]), ["", "XXX"]) # Sanity-check by re-reading it from the db. - user = self.database.get_user('test@mozilla.com') - self.assertEqual(user['node'], 'https://phx12') - self.assertEqual(user['generation'], 42) - self.assertEqual(user['client_state'], 'YYY') - self.assertEqual(sorted(user['old_client_states']), ['', 'XXX']) + user = self.database.get_user("test@mozilla.com") + self.assertEqual(user["node"], "https://phx12") + self.assertEqual(user["generation"], 42) + self.assertEqual(user["client_state"], "YYY") + self.assertEqual(sorted(user["old_client_states"]), ["", "XXX"]) def test_that_record_cleanup_frees_slots_on_the_node(self): - node = 'https://phx12' - self.database.update_node(node, capacity=10, available=1, - current_load=9) + node = "https://phx12" + self.database.update_node( + node, capacity=10, available=1, current_load=9 + ) # We should only be able to allocate one more user to that node. - user = self.database.allocate_user('test1@mozilla.com') - self.assertEqual(user['node'], node) + user = self.database.allocate_user("test1@mozilla.com") + self.assertEqual(user["node"], node) with self.assertRaises(Exception): - self.database.allocate_user('test2@mozilla.com') + self.database.allocate_user("test2@mozilla.com") # But when we clean up the user's record, it frees up the slot. - self.database.retire_user('test1@mozilla.com') - self.database.delete_user_record(user['uid']) - user = self.database.allocate_user('test2@mozilla.com') - self.assertEqual(user['node'], node) + self.database.retire_user("test1@mozilla.com") + self.database.delete_user_record(user["uid"]) + user = self.database.allocate_user("test2@mozilla.com") + self.assertEqual(user["node"], node) def test_gradual_release_of_node_capacity(self): - node1 = 'https://phx12' - self.database.update_node(node1, capacity=8, available=1, - current_load=4) - node2 = 'https://phx13' - self.database.add_node(node2, capacity=6, - available=1, current_load=4) + node1 = "https://phx12" + self.database.update_node( + node1, capacity=8, available=1, current_load=4 + ) + node2 = "https://phx13" + self.database.add_node(node2, capacity=6, available=1, current_load=4) # Two allocations should succeed without update, one on each node. - user = self.database.allocate_user('test1@mozilla.com') - self.assertEqual(user['node'], node1) - user = self.database.allocate_user('test2@mozilla.com') - self.assertEqual(user['node'], node2) + user = self.database.allocate_user("test1@mozilla.com") + self.assertEqual(user["node"], node1) + user = self.database.allocate_user("test2@mozilla.com") + self.assertEqual(user["node"], node2) # The next allocation attempt will release 10% more capacity, # which is one more slot for each node. - user = self.database.allocate_user('test3@mozilla.com') - self.assertEqual(user['node'], node1) - user = self.database.allocate_user('test4@mozilla.com') - self.assertEqual(user['node'], node2) + user = self.database.allocate_user("test3@mozilla.com") + self.assertEqual(user["node"], node1) + user = self.database.allocate_user("test4@mozilla.com") + self.assertEqual(user["node"], node2) # Now node2 is full, so further allocations all go to node1. - user = self.database.allocate_user('test5@mozilla.com') - self.assertEqual(user['node'], node1) - user = self.database.allocate_user('test6@mozilla.com') - self.assertEqual(user['node'], node1) + user = self.database.allocate_user("test5@mozilla.com") + self.assertEqual(user["node"], node1) + user = self.database.allocate_user("test6@mozilla.com") + self.assertEqual(user["node"], node1) # Until it finally reaches capacity. with self.assertRaises(Exception): - self.database.allocate_user('test7@mozilla.com') + self.database.allocate_user("test7@mozilla.com") def test_count_users(self): - user = self.database.allocate_user('test1@example.com') + user = self.database.allocate_user("test1@example.com") self.assertEqual(self.database.count_users(), 1) old_timestamp = get_timestamp() time.sleep(0.01) # Adding users increases the count. - user = self.database.allocate_user('rfkelly@mozilla.com') + user = self.database.allocate_user("rfkelly@mozilla.com") self.assertEqual(self.database.count_users(), 2) # Updating a user doesn't change the count. - self.database.update_user(user, client_state='aaaa') + self.database.update_user(user, client_state="aaaa") self.assertEqual(self.database.count_users(), 2) # Looking back in time doesn't count newer users. self.assertEqual(self.database.count_users(old_timestamp), 1) # Retiring a user decreases the count. - self.database.retire_user('test1@example.com') + self.database.retire_user("test1@example.com") self.assertEqual(self.database.count_users(), 1) def test_first_seen_at(self): - EMAIL = 'test1@example.com' + EMAIL = "test1@example.com" user0 = self.database.allocate_user(EMAIL) user1 = self.database.get_user(EMAIL) - self.assertEqual(user1['uid'], user0['uid']) - self.assertEqual(user1['first_seen_at'], user0['first_seen_at']) + self.assertEqual(user1["uid"], user0["uid"]) + self.assertEqual(user1["first_seen_at"], user0["first_seen_at"]) # It should stay consistent if we re-allocate the user's node. time.sleep(0.1) - self.database.update_user(user1, client_state='aaaa') + self.database.update_user(user1, client_state="aaaa") user2 = self.database.get_user(EMAIL) - self.assertNotEqual(user2['uid'], user0['uid']) - self.assertEqual(user2['first_seen_at'], user0['first_seen_at']) + self.assertNotEqual(user2["uid"], user0["uid"]) + self.assertEqual(user2["first_seen_at"], user0["first_seen_at"]) # Until we purge their old node-assignment records. - self.database.delete_user_record(user0['uid']) + self.database.delete_user_record(user0["uid"]) user3 = self.database.get_user(EMAIL) - self.assertEqual(user3['uid'], user2['uid']) - self.assertNotEqual(user3['first_seen_at'], user2['first_seen_at']) + self.assertEqual(user3["uid"], user2["uid"]) + self.assertNotEqual(user3["first_seen_at"], user2["first_seen_at"]) def test_build_old_range(self): params = dict() diff --git a/tools/tokenserver/test_process_account_events.py b/tools/tokenserver/test_process_account_events.py index ca02ef1774..59e0c794dc 100644 --- a/tools/tokenserver/test_process_account_events.py +++ b/tools/tokenserver/test_process_account_events.py @@ -20,20 +20,17 @@ def message_body(**kwds): - return json.dumps({ - "Message": json.dumps(kwds) - }) + return json.dumps({"Message": json.dumps(kwds)}) class ProcessAccountEventsTestCase(unittest.TestCase): def get_ini(self): - return os.path.join(os.path.dirname(__file__), - 'test_sql.ini') + return os.path.join(os.path.dirname(__file__), "test_sql.ini") def setUp(self): self.database = Database() - self.database.add_service('sync-1.5', r'{node}/1.5/{uid}') + self.database.add_service("sync-1.5", r"{node}/1.5/{uid}") self.database.add_node("https://phx12", 100) self.logs = LogCapture() @@ -41,13 +38,13 @@ def tearDown(self): self.logs.uninstall() testing.tearDown() - cursor = self.database._execute_sql('DELETE FROM users') + cursor = self.database._execute_sql("DELETE FROM users") cursor.close - cursor = self.database._execute_sql('DELETE FROM nodes') + cursor = self.database._execute_sql("DELETE FROM nodes") cursor.close() - cursor = self.database._execute_sql('DELETE FROM services') + cursor = self.database._execute_sql("DELETE FROM services") cursor.close() def assertMessageWasLogged(self, msg): @@ -75,11 +72,13 @@ def test_delete_user(self): self.assertEqual(len(records), 2) self.assertTrue(records[0]["replaced_at"] is not None) - self.process_account_event(message_body( - event="delete", - uid=UID, - iss=ISS, - )) + self.process_account_event( + message_body( + event="delete", + uid=UID, + iss=ISS, + ) + ) records = list(self.database.get_user_records(EMAIL)) self.assertEqual(len(records), 2) @@ -94,10 +93,12 @@ def test_delete_user_by_legacy_uid_format(self): self.assertEqual(len(records), 2) self.assertTrue(records[0]["replaced_at"] is not None) - self.process_account_event(message_body( - event="delete", - uid=EMAIL, - )) + self.process_account_event( + message_body( + event="delete", + uid=EMAIL, + ) + ) records = list(self.database.get_user_records(EMAIL)) self.assertEqual(len(records), 2) @@ -108,11 +109,9 @@ def test_delete_user_who_is_not_in_the_db(self): records = list(self.database.get_user_records(EMAIL)) self.assertEqual(len(records), 0) - self.process_account_event(message_body( - event="delete", - uid=UID, - iss=ISS - )) + self.process_account_event( + message_body(event="delete", uid=UID, iss=ISS) + ) records = list(self.database.get_user_records(EMAIL)) self.assertEqual(len(records), 0) @@ -120,12 +119,14 @@ def test_delete_user_who_is_not_in_the_db(self): def test_reset_user(self): self.database.allocate_user(EMAIL, generation=12) - self.process_account_event(message_body( - event="reset", - uid=UID, - iss=ISS, - generation=43, - )) + self.process_account_event( + message_body( + event="reset", + uid=UID, + iss=ISS, + generation=43, + ) + ) user = self.database.get_user(EMAIL) self.assertEqual(user["generation"], 42) @@ -133,11 +134,13 @@ def test_reset_user(self): def test_reset_user_by_legacy_uid_format(self): self.database.allocate_user(EMAIL, generation=12) - self.process_account_event(message_body( - event="reset", - uid=EMAIL, - generation=43, - )) + self.process_account_event( + message_body( + event="reset", + uid=EMAIL, + generation=43, + ) + ) user = self.database.get_user(EMAIL) self.assertEqual(user["generation"], 42) @@ -146,12 +149,14 @@ def test_reset_user_who_is_not_in_the_db(self): records = list(self.database.get_user_records(EMAIL)) self.assertEqual(len(records), 0) - self.process_account_event(message_body( - event="reset", - uid=UID, - iss=ISS, - generation=43, - )) + self.process_account_event( + message_body( + event="reset", + uid=UID, + iss=ISS, + generation=43, + ) + ) records = list(self.database.get_user_records(EMAIL)) self.assertEqual(len(records), 0) @@ -159,12 +164,14 @@ def test_reset_user_who_is_not_in_the_db(self): def test_password_change(self): self.database.allocate_user(EMAIL, generation=12) - self.process_account_event(message_body( - event="passwordChange", - uid=UID, - iss=ISS, - generation=43, - )) + self.process_account_event( + message_body( + event="passwordChange", + uid=UID, + iss=ISS, + generation=43, + ) + ) user = self.database.get_user(EMAIL) self.assertEqual(user["generation"], 42) @@ -173,12 +180,14 @@ def test_password_change_user_not_in_db(self): records = list(self.database.get_user_records(EMAIL)) self.assertEqual(len(records), 0) - self.process_account_event(message_body( - event="passwordChange", - uid=UID, - iss=ISS, - generation=43, - )) + self.process_account_event( + message_body( + event="passwordChange", + uid=UID, + iss=ISS, + generation=43, + ) + ) records = list(self.database.get_user_records(EMAIL)) self.assertEqual(len(records), 0) @@ -186,55 +195,67 @@ def test_password_change_user_not_in_db(self): def test_malformed_events(self): # Unknown event type. - self.process_account_event(message_body( - event="party", - uid=UID, - iss=ISS, - generation=43, - )) + self.process_account_event( + message_body( + event="party", + uid=UID, + iss=ISS, + generation=43, + ) + ) self.assertMessageWasLogged("Dropping unknown event type") self.clearLogs() # Missing event type. - self.process_account_event(message_body( - uid=UID, - iss=ISS, - generation=43, - )) + self.process_account_event( + message_body( + uid=UID, + iss=ISS, + generation=43, + ) + ) self.assertMessageWasLogged("Invalid account message") self.clearLogs() # Missing uid. - self.process_account_event(message_body( - event="delete", - iss=ISS, - )) + self.process_account_event( + message_body( + event="delete", + iss=ISS, + ) + ) self.assertMessageWasLogged("Invalid account message") self.clearLogs() # Missing generation for reset events. - self.process_account_event(message_body( - event="reset", - uid=UID, - iss=ISS, - )) + self.process_account_event( + message_body( + event="reset", + uid=UID, + iss=ISS, + ) + ) self.assertMessageWasLogged("Invalid account message") self.clearLogs() # Missing generation for passwordChange events. - self.process_account_event(message_body( - event="passwordChange", - uid=UID, - iss=ISS, - )) + self.process_account_event( + message_body( + event="passwordChange", + uid=UID, + iss=ISS, + ) + ) self.assertMessageWasLogged("Invalid account message") self.clearLogs() # Missing issuer with nonemail uid - self.process_account_event(message_body( - event="delete", - uid=UID, - )) + self.process_account_event( + message_body( + event="delete", + uid=UID, + ) + ) self.assertMessageWasLogged("Invalid account message") self.clearLogs() @@ -255,46 +276,43 @@ def test_malformed_events(self): def test_update_with_no_keys_changed_at(self): user = self.database.allocate_user( - EMAIL, - generation=12, - keys_changed_at=None + EMAIL, generation=12, keys_changed_at=None ) # These update_user calls previously failed (SYNC-3633) self.database.update_user(user, generation=13) self.database.update_user( - user, - generation=14, - client_state="abcdef", - keys_changed_at=13 + user, generation=14, client_state="abcdef", keys_changed_at=13 ) - self.process_account_event(message_body( - event="reset", - uid=UID, - iss=ISS, - generation=43, - )) + self.process_account_event( + message_body( + event="reset", + uid=UID, + iss=ISS, + generation=43, + ) + ) user = self.database.get_user(EMAIL) self.assertEqual(user["generation"], 42) def test_update_with_no_keys_changed_at2(self): user = self.database.allocate_user( - EMAIL, - generation=12, - keys_changed_at=None + EMAIL, generation=12, keys_changed_at=None ) # Mark the current record as replaced. This can probably only occur # during a race condition in row creation self.database.replace_user_record(user["uid"]) - self.process_account_event(message_body( - event="reset", - uid=UID, - iss=ISS, - generation=43, - )) + self.process_account_event( + message_body( + event="reset", + uid=UID, + iss=ISS, + generation=43, + ) + ) user = self.database.get_user(EMAIL) self.assertEqual(user["generation"], 42) @@ -305,7 +323,8 @@ class TestProcessAccountEventsForceSpanner(ProcessAccountEventsTestCase): def setUp(self): super().setUp() self.database.spanner_node_id = self.database.get_node_id( - "https://phx12") + "https://phx12" + ) def test_delete_user_force_spanner(self): self.database.allocate_user(EMAIL) @@ -315,11 +334,13 @@ def test_delete_user_force_spanner(self): self.assertEqual(len(records), 2) self.assertTrue(records[0]["replaced_at"] is not None) - self.process_account_event(message_body( - event="delete", - uid=UID, - iss=ISS, - )) + self.process_account_event( + message_body( + event="delete", + uid=UID, + iss=ISS, + ) + ) records = list(self.database.get_user_records(EMAIL)) self.assertEqual(len(records), 2) diff --git a/tools/tokenserver/test_purge_old_records.py b/tools/tokenserver/test_purge_old_records.py index bbd39b413b..ebbd8fb2b8 100644 --- a/tools/tokenserver/test_purge_old_records.py +++ b/tools/tokenserver/test_purge_old_records.py @@ -36,17 +36,17 @@ def setUp(self): # Configure the node-assignment backend to talk to our test service. self.database = Database() - self.database.add_service('sync-1.5', r'{node}/1.5/{uid}') + self.database.add_service("sync-1.5", r"{node}/1.5/{uid}") self.database.add_node(self.service_node, 100) def tearDown(self): - cursor = self.database._execute_sql('DELETE FROM users') + cursor = self.database._execute_sql("DELETE FROM users") cursor.close() - cursor = self.database._execute_sql('DELETE FROM nodes') + cursor = self.database._execute_sql("DELETE FROM nodes") cursor.close() - cursor = self.database._execute_sql('DELETE FROM services') + cursor = self.database._execute_sql("DELETE FROM services") cursor.close() del self.service_requests[:] @@ -74,12 +74,13 @@ class TestPurgeOldRecords(PurgeOldRecordsTestCase): def test_purging_of_old_user_records(self): # Make some old user records. email = "test@mozilla.com" - user = self.database.allocate_user(email, client_state="aa", - generation=123) - self.database.update_user(user, client_state="bb", - generation=456, keys_changed_at=450) - self.database.update_user(user, client_state="cc", - generation=789) + user = self.database.allocate_user( + email, client_state="aa", generation=123 + ) + self.database.update_user( + user, client_state="bb", generation=456, keys_changed_at=450 + ) + self.database.update_user(user, client_state="cc", generation=789) user_records = list(self.database.get_user_records(email)) self.assertEqual(len(user_records), 3) user = self.database.get_user(email) @@ -156,11 +157,8 @@ def test_force(self): self.database.update_node(self.service_node, downed=1) self.assertTrue( - purge_old_records( - node_secret, - grace_period=0, - force=True) - ) + purge_old_records(node_secret, grace_period=0, force=True) + ) user_records = list(self.database.get_user_records(email)) self.assertEqual(len(user_records), 1) @@ -179,11 +177,8 @@ def test_dry_run(self): # Don't actually perform anything destructive. self.assertTrue( - purge_old_records( - node_secret, - grace_period=0, - dryrun=True) - ) + purge_old_records(node_secret, grace_period=0, dryrun=True) + ) user_records = list(self.database.get_user_records(email)) self.assertEqual(len(user_records), 2) @@ -199,12 +194,12 @@ class TestMigrationRecords(PurgeOldRecordsTestCase): @classmethod def setUpClass(cls): super().setUpClass() - cls.spanner_service = make_server( - "localhost", 0, cls._service_app) + cls.spanner_service = make_server("localhost", 0, cls._service_app) host, port = cls.spanner_service.server_address cls.spanner_node = f"http://{host}:{port}" cls.spanner_thread = threading.Thread( - target=cls.spanner_service.serve_forever) + target=cls.spanner_service.serve_forever + ) cls.spanner_thread.start() cls.downed_node = f"http://{host}:9999" @@ -236,7 +231,8 @@ def test_purging_no_override(self): user = self.database.allocate_user(email, client_state="aa") self.database.replace_user_record(user["uid"]) user = self.database.allocate_user( - email, node=self.spanner_node, client_state="aa") + email, node=self.spanner_node, client_state="aa" + ) self.assertTrue(purge_old_records(node_secret, grace_period=0)) user_records = list(self.database.get_user_records(email)) @@ -268,7 +264,7 @@ def test_purging_override_with_migrated(self): node_secret, grace_period=0, force=True, - override_node=self.spanner_node + override_node=self.spanner_node, ) ) user_records = list(self.database.get_user_records(email)) @@ -302,7 +298,7 @@ def test_purging_override_with_migrated_password_change(self): node_secret, grace_period=0, force=True, - override_node=self.spanner_node + override_node=self.spanner_node, ) ) user_records = list(self.database.get_user_records(email)) @@ -336,7 +332,7 @@ def test_purging_override_null_keys_changed_at(self): node_secret, grace_period=0, force=True, - override_node=self.spanner_node + override_node=self.spanner_node, ) ) user_records = list(self.database.get_user_records(email)) diff --git a/tools/tokenserver/test_scripts.py b/tools/tokenserver/test_scripts.py index 023182d31d..5ac2e8ae88 100644 --- a/tools/tokenserver/test_scripts.py +++ b/tools/tokenserver/test_scripts.py @@ -19,45 +19,43 @@ class TestScripts(unittest.TestCase): NODE_ID = 800 - NODE_URL = 'https://node1' + NODE_URL = "https://node1" def setUp(self): self.database = Database() # Start each test with a blank slate. - cursor = self.database._execute_sql('DELETE FROM users') + cursor = self.database._execute_sql("DELETE FROM users") cursor.close() - cursor = self.database._execute_sql('DELETE FROM nodes') + cursor = self.database._execute_sql("DELETE FROM nodes") cursor.close() - cursor = self.database._execute_sql('DELETE FROM services') + cursor = self.database._execute_sql("DELETE FROM services") cursor.close() # Add a service - self.database.add_service('sync-1.5', r'{node}/1.5/{uid}') + self.database.add_service("sync-1.5", r"{node}/1.5/{uid}") # Ensure we have a node with enough capacity to run the tests. self.database.add_node(self.NODE_URL, 100, id=self.NODE_ID) def tearDown(self): # And clean up at the end, for good measure. - cursor = self.database._execute_sql('DELETE FROM users') + cursor = self.database._execute_sql("DELETE FROM users") cursor.close() - cursor = self.database._execute_sql('DELETE FROM nodes') + cursor = self.database._execute_sql("DELETE FROM nodes") cursor.close() - cursor = self.database._execute_sql('DELETE FROM services') + cursor = self.database._execute_sql("DELETE FROM services") cursor.close() self.database.close() def test_add_node(self): - add_node_script( - args=['--current-load', '9', 'test_node', '100'] - ) - res = self.database.get_node('test_node') + add_node_script(args=["--current-load", "9", "test_node", "100"]) + res = self.database.get_node("test_node") # The node should have the expected attributes self.assertEqual(res.capacity, 100) self.assertEqual(res.available, 10) @@ -67,9 +65,9 @@ def test_add_node(self): self.assertEqual(res.service, self.database.service_id) def test_add_node_with_explicit_available(self): - args = ['--current-load', '9', '--available', '5', 'test_node', '100'] + args = ["--current-load", "9", "--available", "5", "test_node", "100"] add_node_script(args=args) - res = self.database.get_node('test_node') + res = self.database.get_node("test_node") # The node should have the expected attributes self.assertEqual(res.capacity, 100) self.assertEqual(res.available, 5) @@ -79,10 +77,8 @@ def test_add_node_with_explicit_available(self): self.assertEqual(res.service, self.database.service_id) def test_add_downed_node(self): - add_node_script( - args=['--downed', 'test_node', '100'] - ) - res = self.database.get_node('test_node') + add_node_script(args=["--downed", "test_node", "100"]) + res = self.database.get_node("test_node") # The node should have the expected attributes self.assertEqual(res.capacity, 100) self.assertEqual(res.available, 10) @@ -92,10 +88,8 @@ def test_add_downed_node(self): self.assertEqual(res.service, self.database.service_id) def test_add_backoff_node(self): - add_node_script( - args=['--backoff', 'test_node', '100'] - ) - res = self.database.get_node('test_node') + add_node_script(args=["--backoff", "test_node", "100"]) + res = self.database.get_node("test_node") # The node should have the expected attributes self.assertEqual(res.capacity, 100) self.assertEqual(res.available, 10) @@ -105,128 +99,128 @@ def test_add_backoff_node(self): self.assertEqual(res.service, self.database.service_id) def test_allocate_user_user_already_exists(self): - email = 'test@test.com' + email = "test@test.com" self.database.allocate_user(email) - node = 'https://node2' + node = "https://node2" self.database.add_node(node, 100) allocate_user_script(args=[email, node]) user = self.database.get_user(email) # The user should be assigned to the given node - self.assertEqual(user['node'], node) + self.assertEqual(user["node"], node) # Another user should not have been created count = self.database.count_users() self.assertEqual(count, 1) def test_allocate_user_given_node(self): - email = 'test@test.com' - node = 'https://node2' + email = "test@test.com" + node = "https://node2" self.database.add_node(node, 100) allocate_user_script(args=[email, node]) user = self.database.get_user(email) # A new user should be created and assigned to the given node - self.assertEqual(user['node'], node) + self.assertEqual(user["node"], node) def test_allocate_user_not_given_node(self): - email = 'test@test.com' - self.database.add_node('https://node2', 100, - current_load=10) - self.database.add_node('https://node3', 100, - current_load=20) - self.database.add_node('https://node4', 100, - current_load=30) + email = "test@test.com" + self.database.add_node("https://node2", 100, current_load=10) + self.database.add_node("https://node3", 100, current_load=20) + self.database.add_node("https://node4", 100, current_load=30) allocate_user_script(args=[email]) user = self.database.get_user(email) # The user should be assigned to the least-loaded node - self.assertEqual(user['node'], 'https://node1') + self.assertEqual(user["node"], "https://node1") def test_count_users(self): - self.database.allocate_user('test1@test.com') - self.database.allocate_user('test2@test.com') - self.database.allocate_user('test3@test.com') + self.database.allocate_user("test1@test.com") + self.database.allocate_user("test2@test.com") + self.database.allocate_user("test3@test.com") timestamp = get_timestamp() - filename = '/tmp/' + str(uuid.uuid4()) + filename = "/tmp/" + str(uuid.uuid4()) try: count_users_script( - args=['--output', filename, '--timestamp', str(timestamp)] + args=["--output", filename, "--timestamp", str(timestamp)] ) with open(filename) as f: info = json.loads(f.readline()) - self.assertEqual(info['total_users'], 3) - self.assertEqual(info['op'], 'sync_count_users') + self.assertEqual(info["total_users"], 3) + self.assertEqual(info["op"], "sync_count_users") finally: os.remove(filename) - filename = '/tmp/' + str(uuid.uuid4()) + filename = "/tmp/" + str(uuid.uuid4()) try: - args = ['--output', filename, '--timestamp', - str(timestamp - 10000)] + args = [ + "--output", + filename, + "--timestamp", + str(timestamp - 10000), + ] count_users_script(args=args) with open(filename) as f: info = json.loads(f.readline()) - self.assertEqual(info['total_users'], 0) - self.assertEqual(info['op'], 'sync_count_users') + self.assertEqual(info["total_users"], 0) + self.assertEqual(info["op"], "sync_count_users") finally: os.remove(filename) def test_remove_node(self): - self.database.add_node('https://node2', 100) - self.database.allocate_user('test1@test.com', - node='https://node2') - self.database.allocate_user('test2@test.com', - node=self.NODE_URL) - self.database.allocate_user('test3@test.com', - node=self.NODE_URL) + self.database.add_node("https://node2", 100) + self.database.allocate_user("test1@test.com", node="https://node2") + self.database.allocate_user("test2@test.com", node=self.NODE_URL) + self.database.allocate_user("test3@test.com", node=self.NODE_URL) - remove_node_script(args=['https://node2']) + remove_node_script(args=["https://node2"]) # The node should have been removed from the database - args = ['https://node2'] + args = ["https://node2"] self.assertRaises(ValueError, self.database.get_node_id, *args) # The first user should have been assigned to a new node - user = self.database.get_user('test1@test.com') - self.assertEqual(user['node'], self.NODE_URL) + user = self.database.get_user("test1@test.com") + self.assertEqual(user["node"], self.NODE_URL) # The second and third users should still be on the first node - user = self.database.get_user('test2@test.com') - self.assertEqual(user['node'], self.NODE_URL) - user = self.database.get_user('test3@test.com') - self.assertEqual(user['node'], self.NODE_URL) + user = self.database.get_user("test2@test.com") + self.assertEqual(user["node"], self.NODE_URL) + user = self.database.get_user("test3@test.com") + self.assertEqual(user["node"], self.NODE_URL) def test_unassign_node(self): - self.database.add_node('https://node2', 100) - self.database.allocate_user('test1@test.com', - node='https://node2') - self.database.allocate_user('test2@test.com', - node='https://node2') - self.database.allocate_user('test3@test.com', - node=self.NODE_URL) - - unassign_node_script(args=['https://node2']) - self.database.remove_node('https://node2') + self.database.add_node("https://node2", 100) + self.database.allocate_user("test1@test.com", node="https://node2") + self.database.allocate_user("test2@test.com", node="https://node2") + self.database.allocate_user("test3@test.com", node=self.NODE_URL) + + unassign_node_script(args=["https://node2"]) + self.database.remove_node("https://node2") # All of the users should now be assigned to the first node - user = self.database.get_user('test1@test.com') - self.assertEqual(user['node'], self.NODE_URL) - user = self.database.get_user('test2@test.com') - self.assertEqual(user['node'], self.NODE_URL) - user = self.database.get_user('test3@test.com') - self.assertEqual(user['node'], self.NODE_URL) + user = self.database.get_user("test1@test.com") + self.assertEqual(user["node"], self.NODE_URL) + user = self.database.get_user("test2@test.com") + self.assertEqual(user["node"], self.NODE_URL) + user = self.database.get_user("test3@test.com") + self.assertEqual(user["node"], self.NODE_URL) def test_update_node(self): - self.database.add_node('https://node2', 100) - update_node_script(args=[ - '--capacity', '150', - '--available', '125', - '--current-load', '25', - '--downed', - '--backoff', - 'https://node2' - ]) - node = self.database.get_node('https://node2') + self.database.add_node("https://node2", 100) + update_node_script( + args=[ + "--capacity", + "150", + "--available", + "125", + "--current-load", + "25", + "--downed", + "--backoff", + "https://node2", + ] + ) + node = self.database.get_node("https://node2") # Ensure the node has the expected attributes - self.assertEqual(node['capacity'], 150) - self.assertEqual(node['available'], 125) - self.assertEqual(node['current_load'], 25) - self.assertEqual(node['downed'], 1) - self.assertEqual(node['backoff'], 1) + self.assertEqual(node["capacity"], 150) + self.assertEqual(node["available"], 125) + self.assertEqual(node["current_load"], 25) + self.assertEqual(node["downed"], 1) + self.assertEqual(node["backoff"], 1) diff --git a/tools/tokenserver/unassign_node.py b/tools/tokenserver/unassign_node.py index 0f5b1a820c..577198855c 100644 --- a/tools/tokenserver/unassign_node.py +++ b/tools/tokenserver/unassign_node.py @@ -52,8 +52,13 @@ def main(args=None): usage = "usage: %prog [options] node_name" descr = "Clear all assignments to node in the tokenserver database" parser = optparse.OptionParser(usage=usage, description=descr) - parser.add_option("-v", "--verbose", action="count", dest="verbosity", - help="Control verbosity of log messages") + parser.add_option( + "-v", + "--verbose", + action="count", + dest="verbosity", + help="Control verbosity of log messages", + ) opts, args = parser.parse_args(args) if len(args) != 1: diff --git a/tools/tokenserver/update_node.py b/tools/tokenserver/update_node.py index 45a1847989..c0c0898fbb 100644 --- a/tools/tokenserver/update_node.py +++ b/tools/tokenserver/update_node.py @@ -41,18 +41,43 @@ def main(args=None): usage = "usage: %prog [options] node_name" descr = "Update node details in the tokenserver database" parser = optparse.OptionParser(usage=usage, description=descr) - parser.add_option("", "--capacity", type="int", - help="How many user slots the node has overall") - parser.add_option("", "--available", type="int", - help="How many user slots the node has available") - parser.add_option("", "--current-load", type="int", - help="How many user slots the node has occupied") - parser.add_option("", "--downed", action="store_true", - help="Mark the node as down in the db") - parser.add_option("", "--backoff", action="store_true", - help="Mark the node as backed-off in the db") - parser.add_option("-v", "--verbose", action="count", dest="verbosity", - help="Control verbosity of log messages") + parser.add_option( + "", + "--capacity", + type="int", + help="How many user slots the node has overall", + ) + parser.add_option( + "", + "--available", + type="int", + help="How many user slots the node has available", + ) + parser.add_option( + "", + "--current-load", + type="int", + help="How many user slots the node has occupied", + ) + parser.add_option( + "", + "--downed", + action="store_true", + help="Mark the node as down in the db", + ) + parser.add_option( + "", + "--backoff", + action="store_true", + help="Mark the node as backed-off in the db", + ) + parser.add_option( + "-v", + "--verbose", + action="count", + dest="verbosity", + help="Control verbosity of log messages", + ) opts, args = parser.parse_args(args) if len(args) != 1: diff --git a/tools/tokenserver/util.py b/tools/tokenserver/util.py index 56f57514c2..32de05e957 100644 --- a/tools/tokenserver/util.py +++ b/tools/tokenserver/util.py @@ -20,7 +20,7 @@ def encode_bytes_b64(value): - return base64.urlsafe_b64encode(value).rstrip(b'=').decode('ascii') + return base64.urlsafe_b64encode(value).rstrip(b"=").decode("ascii") def run_script(main): @@ -41,12 +41,14 @@ def configure_script_logging(opts=None, logger_name=""): """ verbosity = ( - opts and getattr( - opts, "verbosity", logging.NOTSET)) or logging.NOTSET + opts and getattr(opts, "verbosity", logging.NOTSET) + ) or logging.NOTSET logger = logging.getLogger(logger_name) - level = os.environ.get("PYTHON_LOG", "").upper() or \ - max(logging.DEBUG, logging.WARNING - (verbosity * 10)) or \ - logger.getEffectiveLevel() + level = ( + os.environ.get("PYTHON_LOG", "").upper() + or max(logging.DEBUG, logging.WARNING - (verbosity * 10)) + or logger.getEffectiveLevel() + ) # if we've previously setup a handler, adjust it instead if logger.hasHandlers(): @@ -59,8 +61,8 @@ def configure_script_logging(opts=None, logger_name=""): if opts: if getattr(opts, "human_logs", None): formatter = logging.Formatter( - "{levelname:<8s}: {message}", - style="{") + "{levelname:<8s}: {message}", style="{" + ) handler.setFormatter(formatter) handler.setLevel(level) @@ -76,14 +78,15 @@ def configure_script_logging(opts=None, logger_name=""): class GCP_JSON_Formatter(logging.Formatter): def format(self, record): - return json.dumps({ - "severity": record.levelname, - "message": record.getMessage(), - "timestamp": datetime.fromtimestamp( - record.created).strftime( + return json.dumps( + { + "severity": record.levelname, + "message": record.getMessage(), + "timestamp": datetime.fromtimestamp(record.created).strftime( "%Y-%m-%dT%H:%M:%SZ" # RFC3339 ), - }) + } + ) def format_key_id(keys_changed_at, key_hash): @@ -99,7 +102,7 @@ def get_timestamp(): return int(time.time() * 1000) -class Metrics(): +class Metrics: def __init__(self, opts, namespace=""): options = dict( @@ -121,11 +124,11 @@ def add_metric_options(parser: optparse.OptionParser): "", "--metric_host", default=os.environ.get("SYNC_STATSD_HOST"), - help="Metric host name" + help="Metric host name", ) parser.add_option( "", "--metric_port", default=os.environ.get("SYNC_STATSD_PORT"), - help="Metric host port" + help="Metric host port", )