diff --git a/Pipfile b/Pipfile index 6cb93e6a9a4..4c352ab54ee 100644 --- a/Pipfile +++ b/Pipfile @@ -85,6 +85,7 @@ pyld = "==2.0.3" types-requests = "==2.31.0.10" types-pytz = "==2023.3.1.1" gevent = "~=23.9" - +types-aiofiles = "==23.2.0.0" +types-cachetools = "==5.3.0.7" [requires] python_version = "3.10" diff --git a/Pipfile.lock b/Pipfile.lock index 6693a503981..ffa44e13f62 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "225c236c64dfd5eff1f0a00347ba634147c2749de414df20447eea5ddaa1eaf0" + "sha256": "9f2fb3c8611b8b3b0a3597e56d0a732a477471a86be9802076f3beae419aafad" }, "pipfile-spec": 6, "requires": { @@ -724,11 +724,11 @@ }, "google-auth": { "hashes": [ - "sha256:2ec7b2a506989d7dbfdbe81cb8d0ead8876caaed14f86d29d34483cbe99c57af", - "sha256:9b82d5c8d3479a5391ea0a46d81cca698d328459da31d4a459d4e901a5d927e0" + "sha256:d5d66b8f4f6e3273740d7bb73ddefa6c2d1ff691704bd407d51c6b5800e7c97b", + "sha256:dfd7b44935d498e106c08883b2dac0ad36d8aa10402a6412e9a1c9d74b4773f1" ], "markers": "python_version >= '3.7'", - "version": "==2.24.0" + "version": "==2.25.1" }, "google-auth-httplib2": { "hashes": [ @@ -2235,99 +2235,99 @@ }, "yarl": { "hashes": [ - "sha256:09c19e5f4404574fcfb736efecf75844ffe8610606f3fccc35a1515b8b6712c4", - "sha256:0ab5baaea8450f4a3e241ef17e3d129b2143e38a685036b075976b9c415ea3eb", - "sha256:0d155a092bf0ebf4a9f6f3b7a650dc5d9a5bbb585ef83a52ed36ba46f55cc39d", - "sha256:126638ab961633f0940a06e1c9d59919003ef212a15869708dcb7305f91a6732", - "sha256:1a0a4f3aaa18580038cfa52a7183c8ffbbe7d727fe581300817efc1e96d1b0e9", - "sha256:1d93461e2cf76c4796355494f15ffcb50a3c198cc2d601ad8d6a96219a10c363", - "sha256:26a1a8443091c7fbc17b84a0d9f38de34b8423b459fb853e6c8cdfab0eacf613", - "sha256:271d63396460b6607b588555ea27a1a02b717ca2e3f2cf53bdde4013d7790929", - "sha256:28a108cb92ce6cf867690a962372996ca332d8cda0210c5ad487fe996e76b8bb", - "sha256:29beac86f33d6c7ab1d79bd0213aa7aed2d2f555386856bb3056d5fdd9dab279", - "sha256:2c757f64afe53a422e45e3e399e1e3cf82b7a2f244796ce80d8ca53e16a49b9f", - "sha256:2dad8166d41ebd1f76ce107cf6a31e39801aee3844a54a90af23278b072f1ccf", - "sha256:2dc72e891672343b99db6d497024bf8b985537ad6c393359dc5227ef653b2f17", - "sha256:2f3c8822bc8fb4a347a192dd6a28a25d7f0ea3262e826d7d4ef9cc99cd06d07e", - "sha256:32435d134414e01d937cd9d6cc56e8413a8d4741dea36af5840c7750f04d16ab", - "sha256:3cfa4dbe17b2e6fca1414e9c3bcc216f6930cb18ea7646e7d0d52792ac196808", - "sha256:3d5434b34100b504aabae75f0622ebb85defffe7b64ad8f52b8b30ec6ef6e4b9", - "sha256:4003f380dac50328c85e85416aca6985536812c082387255c35292cb4b41707e", - "sha256:44e91a669c43f03964f672c5a234ae0d7a4d49c9b85d1baa93dec28afa28ffbd", - "sha256:4a14907b597ec55740f63e52d7fee0e9ee09d5b9d57a4f399a7423268e457b57", - "sha256:4ce77d289f8d40905c054b63f29851ecbfd026ef4ba5c371a158cfe6f623663e", - "sha256:4d6d74a97e898c1c2df80339aa423234ad9ea2052f66366cef1e80448798c13d", - "sha256:51382c72dd5377861b573bd55dcf680df54cea84147c8648b15ac507fbef984d", - "sha256:525cd69eff44833b01f8ef39aa33a9cc53a99ff7f9d76a6ef6a9fb758f54d0ff", - "sha256:53ec65f7eee8655bebb1f6f1607760d123c3c115a324b443df4f916383482a67", - "sha256:5f74b015c99a5eac5ae589de27a1201418a5d9d460e89ccb3366015c6153e60a", - "sha256:6280353940f7e5e2efaaabd686193e61351e966cc02f401761c4d87f48c89ea4", - "sha256:632c7aeb99df718765adf58eacb9acb9cbc555e075da849c1378ef4d18bf536a", - "sha256:6465d36381af057d0fab4e0f24ef0e80ba61f03fe43e6eeccbe0056e74aadc70", - "sha256:66a6dbf6ca7d2db03cc61cafe1ee6be838ce0fbc97781881a22a58a7c5efef42", - "sha256:6d350388ba1129bc867c6af1cd17da2b197dff0d2801036d2d7d83c2d771a682", - "sha256:7217234b10c64b52cc39a8d82550342ae2e45be34f5bff02b890b8c452eb48d7", - "sha256:721ee3fc292f0d069a04016ef2c3a25595d48c5b8ddc6029be46f6158d129c92", - "sha256:72a57b41a0920b9a220125081c1e191b88a4cdec13bf9d0649e382a822705c65", - "sha256:73cc83f918b69110813a7d95024266072d987b903a623ecae673d1e71579d566", - "sha256:778df71c8d0c8c9f1b378624b26431ca80041660d7be7c3f724b2c7a6e65d0d6", - "sha256:79e1df60f7c2b148722fb6cafebffe1acd95fd8b5fd77795f56247edaf326752", - "sha256:7c86d0d0919952d05df880a1889a4f0aeb6868e98961c090e335671dea5c0361", - "sha256:7eaf13af79950142ab2bbb8362f8d8d935be9aaf8df1df89c86c3231e4ff238a", - "sha256:828235a2a169160ee73a2fcfb8a000709edf09d7511fccf203465c3d5acc59e4", - "sha256:8535e111a064f3bdd94c0ed443105934d6f005adad68dd13ce50a488a0ad1bf3", - "sha256:88d2c3cc4b2f46d1ba73d81c51ec0e486f59cc51165ea4f789677f91a303a9a7", - "sha256:8a2538806be846ea25e90c28786136932ec385c7ff3bc1148e45125984783dc6", - "sha256:8dab30b21bd6fb17c3f4684868c7e6a9e8468078db00f599fb1c14e324b10fca", - "sha256:8f18a7832ff85dfcd77871fe677b169b1bc60c021978c90c3bb14f727596e0ae", - "sha256:946db4511b2d815979d733ac6a961f47e20a29c297be0d55b6d4b77ee4b298f6", - "sha256:96758e56dceb8a70f8a5cff1e452daaeff07d1cc9f11e9b0c951330f0a2396a7", - "sha256:9a172c3d5447b7da1680a1a2d6ecdf6f87a319d21d52729f45ec938a7006d5d8", - "sha256:9a5211de242754b5e612557bca701f39f8b1a9408dff73c6db623f22d20f470e", - "sha256:9df9a0d4c5624790a0dea2e02e3b1b3c69aed14bcb8650e19606d9df3719e87d", - "sha256:aa4643635f26052401750bd54db911b6342eb1a9ac3e74f0f8b58a25d61dfe41", - "sha256:aed37db837ecb5962469fad448aaae0f0ee94ffce2062cf2eb9aed13328b5196", - "sha256:af52725c7c39b0ee655befbbab5b9a1b209e01bb39128dce0db226a10014aacc", - "sha256:b0b8c06afcf2bac5a50b37f64efbde978b7f9dc88842ce9729c020dc71fae4ce", - "sha256:b61e64b06c3640feab73fa4ff9cb64bd8182de52e5dc13038e01cfe674ebc321", - "sha256:b7831566595fe88ba17ea80e4b61c0eb599f84c85acaa14bf04dd90319a45b90", - "sha256:b8bc5b87a65a4e64bc83385c05145ea901b613d0d3a434d434b55511b6ab0067", - "sha256:b8d51817cf4b8d545963ec65ff06c1b92e5765aa98831678d0e2240b6e9fd281", - "sha256:b9f9cafaf031c34d95c1528c16b2fa07b710e6056b3c4e2e34e9317072da5d1a", - "sha256:bb72d2a94481e7dc7a0c522673db288f31849800d6ce2435317376a345728225", - "sha256:c25ec06e4241e162f5d1f57c370f4078797ade95c9208bd0c60f484834f09c96", - "sha256:c405d482c320a88ab53dcbd98d6d6f32ada074f2d965d6e9bf2d823158fa97de", - "sha256:c4472fe53ebf541113e533971bd8c32728debc4c6d8cc177f2bff31d011ec17e", - "sha256:c4b1efb11a8acd13246ffb0bee888dd0e8eb057f8bf30112e3e21e421eb82d4a", - "sha256:c5f3faeb8100a43adf3e7925d556801d14b5816a0ac9e75e22948e787feec642", - "sha256:c6f034386e5550b5dc8ded90b5e2ff7db21f0f5c7de37b6efc5dac046eb19c10", - "sha256:c99ddaddb2fbe04953b84d1651149a0d85214780e4d0ee824e610ab549d98d92", - "sha256:ca6b66f69e30f6e180d52f14d91ac854b8119553b524e0e28d5291a724f0f423", - "sha256:cccdc02e46d2bd7cb5f38f8cc3d9db0d24951abd082b2f242c9e9f59c0ab2af3", - "sha256:cd49a908cb6d387fc26acee8b7d9fcc9bbf8e1aca890c0b2fdfd706057546080", - "sha256:cf7a4e8de7f1092829caef66fd90eaf3710bc5efd322a816d5677b7664893c93", - "sha256:cfd77e8e5cafba3fb584e0f4b935a59216f352b73d4987be3af51f43a862c403", - "sha256:d34c4f80956227f2686ddea5b3585e109c2733e2d4ef12eb1b8b4e84f09a2ab6", - "sha256:d61a0ca95503867d4d627517bcfdc28a8468c3f1b0b06c626f30dd759d3999fd", - "sha256:d81657b23e0edb84b37167e98aefb04ae16cbc5352770057893bd222cdc6e45f", - "sha256:d92d897cb4b4bf915fbeb5e604c7911021a8456f0964f3b8ebbe7f9188b9eabb", - "sha256:dd318e6b75ca80bff0b22b302f83a8ee41c62b8ac662ddb49f67ec97e799885d", - "sha256:dd952b9c64f3b21aedd09b8fe958e4931864dba69926d8a90c90d36ac4e28c9a", - "sha256:e0e7e83f31e23c5d00ff618045ddc5e916f9e613d33c5a5823bc0b0a0feb522f", - "sha256:e0f17d1df951336a02afc8270c03c0c6e60d1f9996fcbd43a4ce6be81de0bd9d", - "sha256:e2a16ef5fa2382af83bef4a18c1b3bcb4284c4732906aa69422cf09df9c59f1f", - "sha256:e36021db54b8a0475805acc1d6c4bca5d9f52c3825ad29ae2d398a9d530ddb88", - "sha256:e73db54c967eb75037c178a54445c5a4e7461b5203b27c45ef656a81787c0c1b", - "sha256:e741bd48e6a417bdfbae02e088f60018286d6c141639359fb8df017a3b69415a", - "sha256:f7271d6bd8838c49ba8ae647fc06469137e1c161a7ef97d778b72904d9b68696", - "sha256:fc391e3941045fd0987c77484b2799adffd08e4b6735c4ee5f054366a2e1551d", - "sha256:fc94441bcf9cb8c59f51f23193316afefbf3ff858460cb47b5758bf66a14d130", - "sha256:fe34befb8c765b8ce562f0200afda3578f8abb159c76de3ab354c80b72244c41", - "sha256:fe8080b4f25dfc44a86bedd14bc4f9d469dfc6456e6f3c5d9077e81a5fedfba7", - "sha256:ff34cb09a332832d1cf38acd0f604c068665192c6107a439a92abfd8acf90fe2" + "sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51", + "sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce", + "sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559", + "sha256:09efe4615ada057ba2d30df871d2f668af661e971dfeedf0c159927d48bbeff0", + "sha256:0d2454f0aef65ea81037759be5ca9947539667eecebca092733b2eb43c965a81", + "sha256:0e9d124c191d5b881060a9e5060627694c3bdd1fe24c5eecc8d5d7d0eb6faabc", + "sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4", + "sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c", + "sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130", + "sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136", + "sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e", + "sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec", + "sha256:2b134fd795e2322b7684155b7855cc99409d10b2e408056db2b93b51a52accc7", + "sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1", + "sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455", + "sha256:35a2b9396879ce32754bd457d31a51ff0a9d426fd9e0e3c33394bf4b9036b099", + "sha256:3777ce5536d17989c91696db1d459574e9a9bd37660ea7ee4d3344579bb6f129", + "sha256:3986b6f41ad22988e53d5778f91855dc0399b043fc8946d4f2e68af22ee9ff10", + "sha256:44d8ffbb9c06e5a7f529f38f53eda23e50d1ed33c6c869e01481d3fafa6b8142", + "sha256:49a180c2e0743d5d6e0b4d1a9e5f633c62eca3f8a86ba5dd3c471060e352ca98", + "sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa", + "sha256:4b21516d181cd77ebd06ce160ef8cc2a5e9ad35fb1c5930882baff5ac865eee7", + "sha256:4b3c1ffe10069f655ea2d731808e76e0f452fc6c749bea04781daf18e6039525", + "sha256:4c7d56b293cc071e82532f70adcbd8b61909eec973ae9d2d1f9b233f3d943f2c", + "sha256:4e9035df8d0880b2f1c7f5031f33f69e071dfe72ee9310cfc76f7b605958ceb9", + "sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c", + "sha256:549d19c84c55d11687ddbd47eeb348a89df9cb30e1993f1b128f4685cd0ebbf8", + "sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b", + "sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf", + "sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23", + "sha256:5aef935237d60a51a62b86249839b51345f47564208c6ee615ed2a40878dccdd", + "sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27", + "sha256:63b20738b5aac74e239622d2fe30df4fca4942a86e31bf47a81a0e94c14df94f", + "sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece", + "sha256:6a962e04b8f91f8c4e5917e518d17958e3bdee71fd1d8b88cdce74dd0ebbf434", + "sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec", + "sha256:6f5cb257bc2ec58f437da2b37a8cd48f666db96d47b8a3115c29f316313654ff", + "sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78", + "sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d", + "sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863", + "sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53", + "sha256:81eb57278deb6098a5b62e88ad8281b2ba09f2f1147c4767522353eaa6260b31", + "sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15", + "sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5", + "sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b", + "sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57", + "sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3", + "sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1", + "sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f", + "sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad", + "sha256:957b4774373cf6f709359e5c8c4a0af9f6d7875db657adb0feaf8d6cb3c3964c", + "sha256:992f18e0ea248ee03b5a6e8b3b4738850ae7dbb172cc41c966462801cbf62cf7", + "sha256:9fc5fc1eeb029757349ad26bbc5880557389a03fa6ada41703db5e068881e5f2", + "sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b", + "sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2", + "sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b", + "sha256:a7409f968456111140c1c95301cadf071bd30a81cbd7ab829169fb9e3d72eae9", + "sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be", + "sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e", + "sha256:a9bd00dc3bc395a662900f33f74feb3e757429e545d831eef5bb280252631984", + "sha256:aa102d6d280a5455ad6a0f9e6d769989638718e938a6a0a2ff3f4a7ff8c62cc4", + "sha256:aaaea1e536f98754a6e5c56091baa1b6ce2f2700cc4a00b0d49eca8dea471074", + "sha256:ad4d7a90a92e528aadf4965d685c17dacff3df282db1121136c382dc0b6014d2", + "sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392", + "sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91", + "sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541", + "sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf", + "sha256:c0ec0ed476f77db9fb29bca17f0a8fcc7bc97ad4c6c1d8959c507decb22e8572", + "sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66", + "sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575", + "sha256:c74018551e31269d56fab81a728f683667e7c28c04e807ba08f8c9e3bba32f14", + "sha256:ca06675212f94e7a610e85ca36948bb8fc023e458dd6c63ef71abfd482481aa5", + "sha256:d1d2532b340b692880261c15aee4dc94dd22ca5d61b9db9a8a361953d36410b1", + "sha256:d25039a474c4c72a5ad4b52495056f843a7ff07b632c1b92ea9043a3d9950f6e", + "sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551", + "sha256:d7d7f7de27b8944f1fee2c26a88b4dabc2409d2fea7a9ed3df79b67277644e17", + "sha256:d7eeb6d22331e2fd42fce928a81c697c9ee2d51400bd1a28803965883e13cead", + "sha256:d8a1c6c0be645c745a081c192e747c5de06e944a0d21245f4cf7c05e457c36e0", + "sha256:d8b889777de69897406c9fb0b76cdf2fd0f31267861ae7501d93003d55f54fbe", + "sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234", + "sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0", + "sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7", + "sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34", + "sha256:e23a6d84d9d1738dbc6e38167776107e63307dfc8ad108e580548d1f2c587f42", + "sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385", + "sha256:ea65804b5dc88dacd4a40279af0cdadcfe74b3e5b4c897aa0d81cf86927fee78", + "sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be", + "sha256:ee04010f26d5102399bd17f8df8bc38dc7ccd7701dc77f4a68c5b8d733406958", + "sha256:f3bc6af6e2b8f92eced34ef6a96ffb248e863af20ef4fde9448cc8c9b858b749", + "sha256:f7d6b36dd2e029b6bcb8a13cf19664c7b8e19ab3a58e0fefbb5b8461447ed5ec" ], "markers": "python_version >= '3.7'", - "version": "==1.9.3" + "version": "==1.9.4" }, "zipp": { "hashes": [ @@ -2878,11 +2878,11 @@ }, "identify": { "hashes": [ - "sha256:0b7656ef6cba81664b783352c73f8c24b39cf82f926f78f4550eda928e5e0545", - "sha256:5d9979348ec1a21c768ae07e0a652924538e8bce67313a73cb0f681cf08ba407" + "sha256:161558f9fe4559e1557e1bff323e8631f6a0e4837f7497767c1782832f16b62d", + "sha256:d40ce5fcd762817627670da8a7d8d8e65f24342d14539c59488dc603bf662e34" ], "markers": "python_version >= '3.8'", - "version": "==2.5.32" + "version": "==2.5.33" }, "idna": { "hashes": [ @@ -3914,6 +3914,22 @@ "markers": "python_version >= '3.8'", "version": "==5.14.0" }, + "types-aiofiles": { + "hashes": [ + "sha256:5d6719e8148cb2a9c4ea46dad86d50d3b675c46a940adca698533a8d2216d53d", + "sha256:b6a7127bd232e0802532837b84140b1cd5df19ee60bea3a5699720d2b583361b" + ], + "index": "pypi", + "version": "==23.2.0.0" + }, + "types-cachetools": { + "hashes": [ + "sha256:27c982cdb9cf3fead8b0089ee6b895715ecc99dac90ec29e2cab56eb1aaf4199", + "sha256:98c069dc7fc087b1b061703369c80751b0a0fc561f6fb072b554e5eee23773a0" + ], + "index": "pypi", + "version": "==5.3.0.7" + }, "types-passlib": { "hashes": [ "sha256:4fad7d8de53745e113be58cca00e59062fddeb95c7774a21ba56802bc28851b5", @@ -4004,44 +4020,44 @@ }, "urwid": { "hashes": [ - "sha256:0607961f0ce4a60fd4701138bcd198579caafdc020c11fff2db8c7d295786817", - "sha256:0719114584eeda539e4dd649e8e434a64d0a38ba9769fe4f80d1286e19850713", - "sha256:1bbb1a2b8beecdaea4b2f6657efb6ca62c53e4156cfae156cb96bab90c738147", - "sha256:238ec0be47cbca99320f12864143638e4ebd1722a509f8037ff92ef66b0e9394", - "sha256:36a856fd0cb38c05fda2ee785f395684ea4f682b1f830ccdab7d0784543f9d13", - "sha256:42224c6aca61459b992c30b6740cbe9f4f0f420e8770a9345b7f3f09f19b2409", - "sha256:4e241ea0cc3b9f62e74749a143118877cc04a49c00688acd0ecd97cfc907027b", - "sha256:5706994d3197124ffc57762e8d088b492b2e89f4d1b21cf51a9199d67355c4f3", - "sha256:58950a17f3917b4a4a44e167d2ed1d59931f71a1be927c4d0204a035c51ebfd2", - "sha256:59dac9f19020f53a9400f8a0ba2b18b4389fd1deeab88c8319736fd108244d72", - "sha256:5a4ee4072e64b414075f32373fc5f63967a4686708ada3e8f8df52d4a2ada8a7", - "sha256:604facd127f7d9b535885291154144611fa0aad52f144e88e92e676e4fa09792", - "sha256:64a3d49f5a8d083198a6bd80ea4c93f45ab1e0cc0735433217925b5df7c32c86", - "sha256:7264f82f765339218297b09441d35f5bf90e3bb800a413b5116949729f91ba04", - "sha256:746dfc58c745360705d004f75c8799fa6782ebb635666ad36f6e0e3e5fada8fe", - "sha256:7bbc35b54546975df692ab198a272eb459f883dc9b20a19d56eb19ff50838e9a", - "sha256:80260cf2bd4be3eca611e978ee2926fc0338e9550702c77932efc80ca6b0b09d", - "sha256:8f73bb89aa79fd4604d24d59c3093adc6d472a22f604fad4df140b124ee9edcd", - "sha256:b0f9f62a82df0cfb15e8a23ae1f6e6fa856c3415677d98724bc2b8a5702477d1", - "sha256:b17dd1dd21926ca1dfbb7b0cfd43f18afe087fba2f488d29795d05a1fe763449", - "sha256:b3aab4fedbb2fa1b7c0d8fb1c537285c16a46e6c72b54ca4423b779306662f7f", - "sha256:b50c7a06a620320b5c124fb3af7d339f7720fec29bf49d5486ec4115ec3df8ff", - "sha256:bd036997b600c84b33c7bc49b98f029990f49a70d4b21d3399ae7aeba73f0c4e", - "sha256:bf7819bd19ceaa9c1baa5bd50d304b5d4d759591e6a2a7edb1727d1a9f16782d", - "sha256:c094b9d00aeb95496372bc27d5c3a2127546f2efc75cafcad019a99c4ab98d9b", - "sha256:c1c9c088dc9aff393489c61edd35e3a16930f9b62da3c78e419fc0880aa1291f", - "sha256:c4352f2a971c7b9dc236d5fce7c4492858441344ead85ce4b1cb1dd513098bbe", - "sha256:cc1b894918813f170dff2a3f0127e972cfb7c6def3f1fb819a09d903d1383d15", - "sha256:cd8c5f617807f2a8bb5750f851edd3305fb0417230c04b84f4782c51c58c19ea", - "sha256:deb48828224cdc8a70269973ebe2830de5ab05d9837651af529418ec0de06355", - "sha256:e4516d55dcee6bd012b3e72a10c75f2866c63a740f0ec4e1ada05c1e1cc02e34", - "sha256:efc234c8d74bf839896eeae9333be91e0fa832dee552f2c764e300080d06d2e2", - "sha256:efcff1822c71d4f9980c75bbfd91f9b45739c629b0b881646f9fa9dcf8bac27c", - "sha256:f55eb74095dcbeb0f53dc32b7d809d5c34792c7b9b372c3d24fe1ffaaef1863b", - "sha256:f7a9fb6d13595fa2f51a2c9927559c9d3706e1a788cbaf2d661631c65b5162bd" + "sha256:0080ad86d37792faeda2ddaf7eaab711861c34f19996876dde649e28139a741d", + "sha256:085b4b1ff4c0df96e2e82331a2353a56abbd5b7456e838baa995be4f12644347", + "sha256:0a263d8a90450166e0a195cf751b46d5081a48a3c00afa45a4ae582a34a6785e", + "sha256:0be1a86fa279850bd167a031dd71aa401b26f7e9fdcc99360785f1a292938c10", + "sha256:0c7e3f21b4427ecfffd6588fe5119eab7e12abd03576e8ba111fdfe2a78e3fdc", + "sha256:0d518d3cb428c9e0c03076dc6b83996bbfe0595d4612c3c2f572d8edbc260e9c", + "sha256:18b9f84cc80a4fcda55ea29f0ea260d31f8c1c721ff6a0396a396020e6667738", + "sha256:205c7aa020c92797f65465e1a226fbf2122159b1565b936a5fd6ed6bd34b4440", + "sha256:232b64678248c489e0dddadccfc2483a54567b74fcf74d140f3f4b4e9d15ddba", + "sha256:278ffe0c8366c03da533a983eb85ac80e325eec09f78988fd37fc830dc563eb2", + "sha256:2c4a213be475ae81b250e401e670d74d5d4ffb3b034ff6fc52e721759788eb05", + "sha256:32d91a4f2abaa022d6bcd8f4d7b179a1bb03afcb83be4707e5599131f322dbce", + "sha256:3c141f5e6a32e03e78ef28083691588d37f60fb5ffb2d96aecba3cda7fa38bb1", + "sha256:3ca3e5fbdddc3b4394cc93835a79358c56a54b05538edc7e8b66d2ff13c4689f", + "sha256:448d248bd3cbe34f0422108db0cecb7d24336703677bce06b7ed67dc4892d925", + "sha256:45e61a5be847ab36a5188aacd96554e0f354367dcae2f6cbc284de7fbbb3f075", + "sha256:4dbeb404751341f354f7d7bf5857cacb7ba415335427c78ee00991cdcc1b5bb0", + "sha256:4e4a3500b7166f27ca830df8bcfb9969d97a98f340cd4b0f0299557dee8c39b3", + "sha256:5a6fc3651d5aff40d53b52cae1951d17c62126ffcc11c7f0d53583d28198b0a4", + "sha256:5fbb765783120d5dc835424f6870190b6d73c020b44ab650b682fd9ffbc41a85", + "sha256:67bdc9c3f8a834b848e4400d91b5ab82620e1f963e4736600a304930433ea8fe", + "sha256:6cc8db27989f6166602a45f6382357827cba966e3bd607a9e409cf867f7b0ec6", + "sha256:7d38afb5bbcbca365fbd86746990c22167fbeb7f85d756bad33892f364028975", + "sha256:81a17afedc1f0ec7ee6af9ebae0c21f5caddf050831ffc5dfbd7e61a4966388a", + "sha256:8b0802494b1baaf691313cd24aaf5e4ec00bf8a6f4ba664bdf088119517db0d4", + "sha256:90440ef37b50ad5e947a095bfec7a048dfae938daa0d2355274accd886474932", + "sha256:963e89099b0438416b550161750a1e2fb52a328008732197d29e3675baacb150", + "sha256:9dae4dbccae166ee9ff03bda58c36369a566d03274c1fbf559cff11117539bc7", + "sha256:bae83944dc78bd178b0ad8cab9ff7d72b03ac01dcd0e1e01725142e241196f04", + "sha256:bbbb8c21920be76630d1ae03eb67dced3d362a56eabf05d619a0ce3f0488cb0d", + "sha256:c692bb9a314216c0cfe1fdd8787858e2e916e25f97c95e8411ba5933c0fe3c39", + "sha256:c79f55558dc50f8c19d1e1468e63942fc7577cbdb2ff948768931ef28548a5af", + "sha256:ce538f0e5c8ee2341f3e38239a1c65a5a042ee993577093067a4419c10615030", + "sha256:d1a0e6bbd4e70805519e843fa982ba322d819939eb7049e6ece5ed6aa132c96b", + "sha256:de28459d08020a07fe4d918a9ca5ff069102fa5455356cc5a1695b55ebb5bafe" ], "markers": "python_full_version >= '3.7.0'", - "version": "==2.2.3" + "version": "==2.3.4" }, "urwid-readline": { "hashes": [ diff --git a/src/apps/activities/crud/activity_history.py b/src/apps/activities/crud/activity_history.py index 6c5dec1b102..a4664b57d13 100644 --- a/src/apps/activities/crud/activity_history.py +++ b/src/apps/activities/crud/activity_history.py @@ -1,11 +1,12 @@ import uuid -from sqlalchemy import any_, distinct, exists, select +from sqlalchemy import distinct, exists, select, update from sqlalchemy.orm import Query from apps.activities.db.schemas import ( ActivityHistorySchema, ActivityItemHistorySchema, + ActivitySchema, ) from apps.activities.domain.response_type_config import ( PerformanceTaskType, @@ -112,17 +113,14 @@ async def get_applet_assessment( return db_result.scalars().first() async def get_reviewable_activities( - self, applet_id_versions: list[str] + self, activity_version_ids: list[str] ) -> list[ActivityHistorySchema]: - if not applet_id_versions: + if not activity_version_ids: return [] query: Query = ( select(ActivityHistorySchema) - .where( - ActivityHistorySchema.applet_id == any_(applet_id_versions), - ActivityHistorySchema.is_reviewable.is_(True), - ) + .where(ActivityHistorySchema.id_version.in_(activity_version_ids)) .order_by( ActivityHistorySchema.applet_id, ActivityHistorySchema.order ) @@ -179,14 +177,13 @@ async def get_by_applet_id_for_summary( query = query.distinct(ActivityHistorySchema.id) db_result = await self._execute(query) schemas = [] - for activity_history_schema, is_performance in db_result.all(): - activity_history_schema.is_performance_task = is_performance + for activity_history_schema, _ in db_result.all(): schemas.append(activity_history_schema) return schemas async def get_by_applet_id_version( - self, applet_id_version: str + self, applet_id_version: str, non_performance=False ) -> ActivityHistorySchema: query: Query = select(ActivityHistorySchema) query = query.where( @@ -195,6 +192,24 @@ async def get_by_applet_id_version( query = query.where( ActivityHistorySchema.is_reviewable == False # noqa ) + if non_performance: + activity_types_query: Query = select(ActivityItemHistorySchema.id) + activity_types_query = activity_types_query.where( + ActivityItemHistorySchema.response_type.in_( + [ + PerformanceTaskType.FLANKER, + PerformanceTaskType.GYROSCOPE, + PerformanceTaskType.TOUCH, + PerformanceTaskType.ABTRAILS, + ResponseType.STABILITYTRACKER, + ] + ) + ) + activity_types_query = activity_types_query.where( + ActivityItemHistorySchema.activity_id + == ActivityHistorySchema.id_version + ) + query.where(~exists(activity_types_query)) db_result = await self._execute(query) return db_result.scalars().all() @@ -253,3 +268,36 @@ async def get_activity_id_versions_for_report( ) db_result = await self._execute(query) return db_result.scalars().all() + + async def update_by_id(self, id_, **values): + subquery: Query = select(ActivityHistorySchema.id_version) + subquery = subquery.where(ActivityHistorySchema.id == id_) + subquery = subquery.limit(1) + subquery = subquery.order_by(ActivityHistorySchema.created_at.desc()) + subquery = subquery.subquery() + + query = update(ActivityHistorySchema) + query = query.where( + ActivityHistorySchema.id_version.in_(select([subquery])) + ) + query = query.values(**values) + query = query.returning(ActivityHistorySchema) + await self._execute(query) + + async def get_assessment_version_id(self, applet: uuid.UUID) -> str: + query: Query = ( + select(ActivityHistorySchema.id_version) + .select_from(ActivitySchema) + .join( + ActivityHistorySchema, + ActivityHistorySchema.id == ActivitySchema.id, + ) + .where( + ActivitySchema.applet_id == applet, + ActivitySchema.is_reviewable.is_(True), + ) + .order_by(ActivityHistorySchema.created_at.desc()) + .limit(1) + ) + db_result = await self._execute(query) + return db_result.scalars().first() diff --git a/src/apps/activities/crud/activity_item_history.py b/src/apps/activities/crud/activity_item_history.py index cd09cdd5d95..4a4ad375a33 100644 --- a/src/apps/activities/crud/activity_item_history.py +++ b/src/apps/activities/crud/activity_item_history.py @@ -6,6 +6,7 @@ from apps.activities.db.schemas import ( ActivityHistorySchema, ActivityItemHistorySchema, + ActivitySchema, ) from apps.applets.db.schemas import AppletHistorySchema from infrastructure.database import BaseCRUD @@ -86,24 +87,64 @@ async def get_by_activity_id_versions( return db_result.scalars().all() async def get_applets_assessments( - self, applet_id_version: str + self, + applet_id: uuid.UUID, ) -> list[ActivityItemHistorySchema]: + subquery: Query = ( + select(ActivityHistorySchema.id_version) + .join( + ActivitySchema, ActivitySchema.id == ActivityHistorySchema.id + ) + .where( + ActivitySchema.is_reviewable.is_(True), + ActivitySchema.applet_id == applet_id, + ) + .order_by(ActivityHistorySchema.created_at.desc()) + .limit(1) + .subquery() + ) + query: Query = select(ActivityItemHistorySchema) query = query.join( ActivityHistorySchema, ActivityHistorySchema.id_version == ActivityItemHistorySchema.activity_id, ) - query = query.where( - ActivityHistorySchema.applet_id == applet_id_version + query = query.join( + ActivitySchema, ActivitySchema.id == ActivityHistorySchema.id ) + query = query.where(ActivitySchema.applet_id == applet_id) query = query.where( - ActivityHistorySchema.is_reviewable == True # noqa: E712 + ActivityHistorySchema.is_reviewable == True, # noqa: E712 + ActivityHistorySchema.id_version.in_(subquery), ) query = query.order_by(ActivityItemHistorySchema.order.asc()) db_result = await self._execute(query) - return db_result.scalars().all() + res = db_result.scalars().all() + return res + + async def get_assessment_activity_items( + self, id_version: str | None + ) -> list[ActivityItemHistorySchema | None]: + if not id_version: + return [] + query: Query = select(ActivityItemHistorySchema) + query = query.join( + ActivityHistorySchema, + ActivityHistorySchema.id_version + == ActivityItemHistorySchema.activity_id, + ) + query = query.join( + ActivitySchema, ActivitySchema.id == ActivityHistorySchema.id + ) + query = query.where( + ActivityHistorySchema.is_reviewable == True, # noqa: E712 + ActivityHistorySchema.id_version == id_version, + ) + db_result = await self._execute(query) + res = db_result.scalars().all() + return res async def get_activity_items( self, activity_id: uuid.UUID, versions: list[str] | None diff --git a/src/apps/activities/db/schemas/activity.py b/src/apps/activities/db/schemas/activity.py index ba7735698bc..9d55a9aef1f 100644 --- a/src/apps/activities/db/schemas/activity.py +++ b/src/apps/activities/db/schemas/activity.py @@ -1,6 +1,17 @@ -from sqlalchemy import REAL, Boolean, Column, ForeignKey, String, Text, text +from sqlalchemy import ( + REAL, + Boolean, + Column, + ForeignKey, + String, + Text, + func, + text, +) from sqlalchemy.dialects.postgresql import JSONB, UUID +from sqlalchemy.ext.hybrid import hybrid_property +from apps.activities.domain.response_type_config import PerformanceTaskType from infrastructure.database.base import Base __all__ = ["ActivitySchema", "ActivityHistorySchema"] @@ -23,6 +34,17 @@ class _BaseActivitySchema: extra_fields = Column( JSONB(), default=dict, server_default=text("'{}'::jsonb") ) + performance_task_type = Column(String(255), nullable=True) + + @hybrid_property + def is_performance_task(self) -> bool: + return self.performance_task_type in PerformanceTaskType.get_values() + + @is_performance_task.expression # type: ignore[no-redef] + def is_performance_task(cls) -> bool: + return func.coalesce(cls.performance_task_type, "").in_( + PerformanceTaskType.get_values() + ) class ActivitySchema(Base, _BaseActivitySchema): diff --git a/src/apps/activities/domain/activity_base.py b/src/apps/activities/domain/activity_base.py index 3264fcca9c4..3cc1d1eca8f 100644 --- a/src/apps/activities/domain/activity_base.py +++ b/src/apps/activities/domain/activity_base.py @@ -1,5 +1,6 @@ from pydantic import BaseModel, Field +from apps.activities.domain.response_type_config import PerformanceTaskType from apps.activities.domain.scores_reports import ( ScoresAndReports, SubscaleSetting, @@ -20,3 +21,5 @@ class ActivityBase(BaseModel): scores_and_reports: ScoresAndReports | None = None subscale_setting: SubscaleSetting | None = None report_included_item_name: str | None = None + performance_task_type: PerformanceTaskType | None = None + is_performance_task: bool = False diff --git a/src/apps/activities/domain/activity_create.py b/src/apps/activities/domain/activity_create.py index cfc3a9a69b5..c1ef6d9fd70 100644 --- a/src/apps/activities/domain/activity_create.py +++ b/src/apps/activities/domain/activity_create.py @@ -6,6 +6,7 @@ from apps.activities.domain.activity_item_base import BaseActivityItem from apps.activities.domain.custom_validation import ( validate_item_flow, + validate_performance_task_type, validate_score_and_sections, validate_subscales, ) @@ -29,10 +30,10 @@ class ActivityCreate(ActivityBase, InternalModel): @root_validator() def validate_existing_ids_for_duplicate(cls, values): - items = values.get("items", []) + items: list[ActivityItemCreate] = values.get("items", []) item_names = set() - for item in items: # type:ActivityItemCreate + for item in items: if item.name in item_names: raise DuplicateActivityItemNameNameError() item_names.add(item.name) @@ -49,3 +50,7 @@ def validate_scores_and_reports_conditional_logic(cls, values): @root_validator() def validate_subscales(cls, values): return validate_subscales(values) + + @root_validator() + def validate_performance_task_type(cls, values): + return validate_performance_task_type(values) diff --git a/src/apps/activities/domain/activity_full.py b/src/apps/activities/domain/activity_full.py index 0ece48ab74e..076c3b74da7 100644 --- a/src/apps/activities/domain/activity_full.py +++ b/src/apps/activities/domain/activity_full.py @@ -1,15 +1,10 @@ import uuid from datetime import datetime -from pydantic import Field, validator +from pydantic import Field from apps.activities.domain.activity_base import ActivityBase from apps.activities.domain.activity_item_base import BaseActivityItem -from apps.activities.domain.custom_validation import ( - validate_is_performance_task, - validate_performance_task_type, -) -from apps.activities.domain.response_type_config import PerformanceTaskType from apps.shared.domain import InternalModel, PublicModel @@ -46,13 +41,3 @@ class PublicActivityFull(ActivityBase, PublicModel): id: uuid.UUID items: list[PublicActivityItemFull] = Field(default_factory=list) created_at: datetime - is_performance_task: bool = False - performance_task_type: PerformanceTaskType | None = None - - @validator("is_performance_task", always=True) - def validate_is_performance_task_full(cls, value, values): - return validate_is_performance_task(value, values) - - @validator("performance_task_type", always=True) - def validate_performance_task_type_full(cls, value, values): - return validate_performance_task_type(value, values) diff --git a/src/apps/activities/domain/activity_history.py b/src/apps/activities/domain/activity_history.py index 499825f5881..eef48c19668 100644 --- a/src/apps/activities/domain/activity_history.py +++ b/src/apps/activities/domain/activity_history.py @@ -52,6 +52,7 @@ class ActivityHistory(InternalModel): is_hidden: bool | None = False scores_and_reports: ScoresAndReports | None = None subscale_setting: SubscaleSetting | None = None + performance_task_type: PerformanceTaskType | None = None class ActivityHistoryChange(InternalModel): @@ -99,8 +100,6 @@ class ActivityHistoryTranslatedExport(ActivityBase, PublicModel): version: str | None = None description: str # type: ignore[assignment] created_at: datetime.datetime - is_performance_task: bool = False - performance_task_type: PerformanceTaskType | None = None items: list[ActivityItemSingleLanguageDetailPublic] = Field( default_factory=list ) diff --git a/src/apps/activities/domain/activity_update.py b/src/apps/activities/domain/activity_update.py index 1004396ada6..31b7ea0e9cb 100644 --- a/src/apps/activities/domain/activity_update.py +++ b/src/apps/activities/domain/activity_update.py @@ -6,6 +6,7 @@ from apps.activities.domain.activity_item_base import BaseActivityItem from apps.activities.domain.custom_validation import ( validate_item_flow, + validate_performance_task_type, validate_score_and_sections, validate_subscales, ) @@ -29,10 +30,10 @@ class ActivityUpdate(ActivityBase, InternalModel): @root_validator() def validate_existing_ids_for_duplicate(cls, values): - items = values.get("items", []) + items: list[ActivityItemUpdate] = values.get("items", []) item_names = set() - for item in items: # type:ActivityItemUpdate + for item in items: if item.name in item_names: raise DuplicateActivityItemNameNameError() item_names.add(item.name) @@ -50,6 +51,10 @@ def validate_score_and_sections_conditional_logic(cls, values): def validate_subscales(cls, values): return validate_subscales(values) + @root_validator() + def validate_performance_task_type(cls, values): + return validate_performance_task_type(values) + class ActivityReportConfiguration(PublicModel): report_included_item_name: str | None diff --git a/src/apps/activities/domain/custom_validation.py b/src/apps/activities/domain/custom_validation.py index 6b6570ace51..41830a0a266 100644 --- a/src/apps/activities/domain/custom_validation.py +++ b/src/apps/activities/domain/custom_validation.py @@ -219,36 +219,19 @@ def validate_subscales(values: dict): return values -def validate_is_performance_task(value: bool, values: dict): - # if items type is performance task type or contains part of the name - # of some performance task, then is_performance_task must be set - items = values.get("items", []) - for item in items: - for performance_task_type in list(PerformanceTaskType): - if performance_task_type in item.response_type: - return True - if item.response_type == ResponseType.STABILITYTRACKER: - return True - return value - - -def validate_performance_task_type(value: str | None, values: dict): +def validate_performance_task_type(values: dict): # if items type is performance task type or contains part of the name # of some performance task, then performance task type must be set items = values.get("items", []) for item in items: if item.response_type == ResponseType.STABILITYTRACKER: - for performance_task_type in list(PerformanceTaskType): - value = item.dict()["config"]["user_input_type"] - if value == performance_task_type: - return value - value = next( - ( - performance_task_type - for item in items - for performance_task_type in list(PerformanceTaskType) - if performance_task_type in item.response_type - ), - None, - ) - return value + value = item.dict()["config"]["user_input_type"] + for v in PerformanceTaskType.get_values(): + if value == v: + values["performance_task_type"] = value + elif item.response_type in ( + ResponseType.FLANKER, + ResponseType.ABTRAILS, + ): + values["performance_task_type"] = item.response_type + return values diff --git a/src/apps/activities/domain/response_type_config.py b/src/apps/activities/domain/response_type_config.py index 9405ef47168..e23aaf6dfa3 100644 --- a/src/apps/activities/domain/response_type_config.py +++ b/src/apps/activities/domain/response_type_config.py @@ -364,6 +364,10 @@ class PerformanceTaskType(str, Enum): TOUCH = "touch" ABTRAILS = "ABTrails" + @classmethod + def get_values(cls) -> list[str]: + return [i.value for i in cls] + ResponseTypeConfigOptions = [ TextConfig, diff --git a/src/apps/activities/fixtures/activities.json b/src/apps/activities/fixtures/activities.json index f435bc9ede9..f528a6e80d6 100644 --- a/src/apps/activities/fixtures/activities.json +++ b/src/apps/activities/fixtures/activities.json @@ -130,5 +130,26 @@ "response_is_editable": false, "order": 2 } + }, + { + "table": "activities", + "fields": { + "id": "09e3dbf0-aefb-4d0e-9177-bdb321bf3621", + "created_at": "2023-01-05T15:49:51.752113", + "updated_at": "2023-01-05T15:49:51.752113", + "applet_id": "92917a56-d586-4613-b7aa-991f2c4b15b1", + "name": "PHQ3", + "description": { + "en": "PHQ2", + "fr": "PHQ2" + }, + "splash_screen": "", + "image": "", + "show_all_at_once": false, + "is_skippable": false, + "is_reviewable": true, + "response_is_editable": false, + "order": 1.0 + } } ] \ No newline at end of file diff --git a/src/apps/activities/fixtures/activity_histories.json b/src/apps/activities/fixtures/activity_histories.json index 08f670cb2d8..9d8fa527ff1 100644 --- a/src/apps/activities/fixtures/activity_histories.json +++ b/src/apps/activities/fixtures/activity_histories.json @@ -25,7 +25,7 @@ "reports": [ { "type": "score", - "name":"Score 1", + "name": "Score 1", "id": "score_1", "calculation_type": "sum" } @@ -173,10 +173,10 @@ "updated_at": "2023-01-05T15:49:51.752113", "is_deleted": false, "applet_id": "92917a56-d586-4613-b7aa-991f2c4b15b1_1.9.9", - "name": "PHQ2 new", + "name": "Flanker", "description": { - "en": "PHQ2", - "fr": "PHQ2" + "en": "It is flanker because it has items with flanker response type", + "fr": "It is flanker because it has items with flanker response type" }, "splash_screen": "", "image": "", @@ -196,7 +196,8 @@ "calculation_type": "sum" } ] - } + }, + "performance_task_type": "flanker" } } -] \ No newline at end of file +] diff --git a/src/apps/activities/services/activity.py b/src/apps/activities/services/activity.py index f2cd2021807..99c09d61af1 100644 --- a/src/apps/activities/services/activity.py +++ b/src/apps/activities/services/activity.py @@ -1,6 +1,6 @@ import uuid -from apps.activities.crud import ActivitiesCRUD +from apps.activities.crud import ActivitiesCRUD, ActivityHistoriesCRUD from apps.activities.db.schemas import ActivitySchema from apps.activities.domain.activity import ( ActivityDuplicate, @@ -70,6 +70,7 @@ async def create( order=index + 1, report_included_item_name=activity_data.report_included_item_name, # noqa: E501 extra_fields=activity_data.extra_fields, + performance_task_type=activity_data.performance_task_type, ) ) @@ -116,7 +117,11 @@ async def create( # add default schedule for activities await ScheduleService(self.session).create_default_schedules( applet_id=applet_id, - activity_ids=[activity.id for activity in activities], + activity_ids=[ + activity.id + for activity in activities + if not activity.is_reviewable + ], is_activity=True, ) @@ -175,6 +180,7 @@ async def update_create( report_included_item_name=( activity_data.report_included_item_name ), + performance_task_type=activity_data.performance_task_type, ) ) @@ -298,6 +304,7 @@ async def get_single_language_by_applet_id( subscale_setting=schema.subscale_setting, created_at=schema.created_at, report_included_item_name=schema.report_included_item_name, + performance_task_type=schema.performance_task_type, ) ) return activities @@ -417,6 +424,7 @@ async def get_by_applet_id_for_duplicate( is_hidden=schema.is_hidden, scores_and_reports=schema.scores_and_reports, subscale_setting=schema.subscale_setting, + performance_task_type=schema.performance_task_type, ) activity_map[activity.id] = activity activities.append(activity) @@ -503,6 +511,11 @@ def _get_by_language(values: dict, language: str): async def update_report( self, activity_id: uuid.UUID, schema: ActivityReportConfiguration ): - await ActivitiesCRUD(self.session).update_by_id( - activity_id, **schema.dict(by_alias=False, exclude_unset=True) - ) + crud_list: list[type[ActivitiesCRUD] | type[ActivityHistoriesCRUD]] = [ + ActivitiesCRUD, + ActivityHistoriesCRUD, + ] + for crud in crud_list: + await crud(self.session).update_by_id( + activity_id, **schema.dict(by_alias=False, exclude_unset=True) + ) diff --git a/src/apps/activities/services/activity_history.py b/src/apps/activities/services/activity_history.py index 606d6dad807..5e216f4086e 100644 --- a/src/apps/activities/services/activity_history.py +++ b/src/apps/activities/services/activity_history.py @@ -54,6 +54,7 @@ async def add(self, activities: list[ActivityFull]): else None, report_included_item_name=activity.report_included_item_name, # noqa: E501 extra_fields=activity.extra_fields, + performance_task_type=activity.performance_task_type, ) ) @@ -179,10 +180,12 @@ async def get_by_id(self, activity_id: uuid.UUID) -> ActivityHistory: ) return ActivityHistory.from_orm(schema) - async def get_full(self) -> list[ActivityHistoryFull]: + async def get_full( + self, non_performance=False + ) -> list[ActivityHistoryFull]: schemas = await ActivityHistoriesCRUD( self.session - ).get_by_applet_id_version(self._applet_id_version) + ).get_by_applet_id_version(self._applet_id_version, non_performance) activities = [] activity_ids = [] activity_map = dict() diff --git a/src/apps/answers/commands/__init__.py b/src/apps/answers/commands/__init__.py new file mode 100644 index 00000000000..91333301bed --- /dev/null +++ b/src/apps/answers/commands/__init__.py @@ -0,0 +1,5 @@ +from apps.answers.commands.convert_assessments import ( # noqa: F401 + app as convert_assessments, +) + +__all__ = ["convert_assessments"] diff --git a/src/apps/answers/commands/convert_assessments.py b/src/apps/answers/commands/convert_assessments.py new file mode 100644 index 00000000000..b9b76fd809b --- /dev/null +++ b/src/apps/answers/commands/convert_assessments.py @@ -0,0 +1,59 @@ +import asyncio +from functools import wraps +from typing import Optional + +import typer +from rich import print + +from apps.answers.crud.assessment_crud import AssessmentCRUD +from infrastructure.database import atomic, session_manager + +app = typer.Typer() + + +def coro(f): + @wraps(f) + def wrapper(*args, **kwargs): + return asyncio.run(f(*args, **kwargs)) + + return wrapper + + +@app.command(short_help="Convert current assessments to version agnostic") +@coro +async def convert( + database_uri: Optional[str] = typer.Option( + None, + "--db-uri", + "-d", + help="Local or arbitrary server database uri", + ), +): + try: + if database_uri: + local_or_arb = session_manager.get_session(database_uri) + else: + local_or_arb = session_manager.get_session() + # Going to arbitrary or local db to get assessments + async with local_or_arb() as session: + crud = AssessmentCRUD(session) + assessments = await crud.get_all_assessments_data() + + # Going to local db to find activity id + local = session_manager.get_session() + async with local() as session: + async with atomic(session): + crud = AssessmentCRUD(session) + answers = await crud.get_updated_assessment(assessments) + await local.remove() + + # Return to arbitrary or local to update + async with local_or_arb() as session: + async with atomic(session): + crud = AssessmentCRUD(session) + for answer in answers: + await crud.update(answer) + await local_or_arb.remove() + + except Exception as ex: + print(f"[bold red] {ex}") diff --git a/src/apps/answers/crud/answer_items.py b/src/apps/answers/crud/answer_items.py index 9d1601bde09..08d8320adfe 100644 --- a/src/apps/answers/crud/answer_items.py +++ b/src/apps/answers/crud/answer_items.py @@ -104,9 +104,7 @@ async def get_assessment( query = query.where(AnswerItemSchema.answer_id == answer_id) query = query.where(AnswerItemSchema.respondent_id == user_id) query = query.where(AnswerItemSchema.is_assessment == True) # noqa - db_result = await self._execute(query) - return db_result.scalars().first() async def get_reviews_by_answer_id( @@ -203,3 +201,17 @@ async def get_applet_answers_by_activity_history_ids( ) db_result = await self._execute(query) return db_result.all() + + async def get_assessment_activity_id( + self, answer_id: uuid.UUID + ) -> list[tuple[uuid.UUID, str]] | None: + query: Query = select( + AnswerItemSchema.respondent_id, + AnswerItemSchema.assessment_activity_id, + ) + query = query.where( + AnswerItemSchema.answer_id == answer_id, + AnswerItemSchema.is_assessment.is_(True), + ) + db_result = await self._execute(query) + return db_result.all() # noqa diff --git a/src/apps/answers/crud/answers.py b/src/apps/answers/crud/answers.py index 69884a95836..fea5d1cdfa5 100644 --- a/src/apps/answers/crud/answers.py +++ b/src/apps/answers/crud/answers.py @@ -58,10 +58,10 @@ def filter_respondent_ids(self, field, value): AnswerSchema.activity_history_id, Comparisons.IN ) from_date = FilterField( - func.date(AnswerItemSchema.created_at), Comparisons.GREAT_OR_EQUAL + AnswerItemSchema.created_at, Comparisons.GREAT_OR_EQUAL ) to_date = FilterField( - func.date(AnswerItemSchema.created_at), Comparisons.LESS_OR_EQUAL + AnswerItemSchema.created_at, Comparisons.LESS_OR_EQUAL ) @@ -151,7 +151,6 @@ async def get_applet_answers( limit=None, **filters, ) -> tuple[list[RespondentAnswerData], int]: - reviewed_answer_id = case( (AnswerItemSchema.is_assessment.is_(True), AnswerSchema.id), else_=null(), @@ -163,7 +162,10 @@ async def get_applet_answers( ) activity_history_id = case( - (AnswerItemSchema.is_assessment.is_(True), null()), + ( + AnswerItemSchema.is_assessment.is_(True), + AnswerItemSchema.assessment_activity_id, + ), else_=AnswerSchema.activity_history_id, ) diff --git a/src/apps/answers/crud/assessment_crud.py b/src/apps/answers/crud/assessment_crud.py new file mode 100644 index 00000000000..5103cea7eee --- /dev/null +++ b/src/apps/answers/crud/assessment_crud.py @@ -0,0 +1,62 @@ +import uuid + +from sqlalchemy import select +from sqlalchemy.orm import Query + +from apps.activities.db.schemas import ActivityHistorySchema, ActivitySchema +from apps.answers.crud.answer_items import AnswerItemsCRUD +from apps.answers.db.schemas import AnswerItemSchema, AnswerSchema + + +class AssessmentCRUD(AnswerItemsCRUD): + async def get_all_assessments_data( + self, + ) -> list[tuple[AnswerItemSchema, uuid.UUID, str]]: + query: Query = select( + AnswerItemSchema, AnswerSchema.applet_id, AnswerSchema.version + ) + query = query.join( + AnswerSchema, AnswerSchema.id == AnswerItemSchema.answer_id + ) + query = query.where(AnswerItemSchema.is_assessment.is_(True)) + result = await self._execute(query) + return result.all() # noqa + + async def _get_assessment_by_applet( + self, applet_id: uuid.UUID + ) -> uuid.UUID | None: + query: Query = select(ActivitySchema.id) + query = query.where( + ActivitySchema.applet_id == applet_id, + ActivitySchema.is_reviewable.is_(True), + ) + result = await self._execute(query) + return result.scalars().first() + + async def _check_activity_version(self, id_version: str) -> bool: + query: Query = select(ActivityHistorySchema) + query = query.where(ActivityHistorySchema.id_version == id_version) + result = await self._execute(query) + schema: ActivityHistorySchema = result.scalars().first() + if not schema: + return False + return schema.is_reviewable + + async def get_updated_assessment( + self, answer_data: list[tuple[AnswerItemSchema, uuid.UUID, str]] + ) -> list[AnswerSchema]: + answers = [] + for data in answer_data: + answer, applet_id, version = data + activity_id = await self._get_assessment_by_applet(applet_id) + activity_id_version = f"{activity_id}_{version}" + is_valid = await self._check_activity_version(activity_id_version) + if not is_valid: + print( + f"Assessment version {activity_id_version} does not exist" + ) + continue + answer.assessment_activity_id = activity_id_version + print(f"{answer.id=} {applet_id=} {activity_id_version}") + answers.append(answer) + return answers diff --git a/src/apps/answers/db/schemas.py b/src/apps/answers/db/schemas.py index 0d24c885e4a..144f2409c15 100644 --- a/src/apps/answers/db/schemas.py +++ b/src/apps/answers/db/schemas.py @@ -59,3 +59,4 @@ class AnswerItemSchema(Base): local_end_time = Column(Time, nullable=True) is_assessment = Column(Boolean()) migrated_data = Column(JSONB()) + assessment_activity_id = Column(Text(), nullable=True, index=True) diff --git a/src/apps/answers/domain/answers.py b/src/apps/answers/domain/answers.py index 76ebe5b307c..49a96a9f401 100644 --- a/src/apps/answers/domain/answers.py +++ b/src/apps/answers/domain/answers.py @@ -55,7 +55,7 @@ class Slider(InternalModel): class ItemAnswerCreate(InternalModel): answer: str | None events: str | None - item_ids: list[uuid.UUID] | None + item_ids: list[uuid.UUID] identifier: str | None scheduled_time: datetime.datetime | None start_time: datetime.datetime @@ -112,6 +112,7 @@ class AssessmentAnswerCreate(InternalModel): answer: str item_ids: list[uuid.UUID] reviewer_public_key: str + assessment_version_id: str class AnswerDate(InternalModel): @@ -180,7 +181,11 @@ class AssessmentAnswer(InternalModel): answer: str | None item_ids: list[str] = Field(default_factory=list) items: list[PublicActivityItemFull] = Field(default_factory=list) + items_last: list[PublicActivityItemFull] | None = Field( + default_factory=list + ) is_edited: bool = False + versions: list[str] = [] class Reviewer(InternalModel): @@ -235,6 +240,10 @@ class AssessmentAnswerPublic(PublicModel): answer: str | None item_ids: list[str] = Field(default_factory=list) items: list[PublicActivityItemFull] = Field(default_factory=list) + items_last: list[PublicActivityItemFull] | None = Field( + default_factory=list + ) + versions: list[str] class AnswerNote(InternalModel): diff --git a/src/apps/answers/filters.py b/src/apps/answers/filters.py index 65ec85a2496..a9a224fd143 100644 --- a/src/apps/answers/filters.py +++ b/src/apps/answers/filters.py @@ -33,8 +33,8 @@ class AppletSubmitDateFilter(BaseQueryParams): class AnswerExportFilters(BaseQueryParams): respondent_ids: list[uuid.UUID] | None = Field(Query(None)) - from_date: datetime.date | None = None - to_date: datetime.date | None = None + from_date: datetime.datetime | None = None + to_date: datetime.datetime | None = None limit: int = 10000 diff --git a/src/apps/answers/fixtures/arbitrary_server_answers.json b/src/apps/answers/fixtures/arbitrary_server_answers.json index 65336af1132..e3ff5891191 100644 --- a/src/apps/answers/fixtures/arbitrary_server_answers.json +++ b/src/apps/answers/fixtures/arbitrary_server_answers.json @@ -264,6 +264,27 @@ "subscale_setting": null, "extra_fields": {} } + }, + { + "table": "activities", + "fields": { + "id": "09e3dbf0-aefb-4d0e-9177-bdb321bf3621", + "created_at": "2023-01-05T15:49:51.752113", + "updated_at": "2023-01-05T15:49:51.752113", + "applet_id": "92917a56-d586-4613-b7aa-991f2c4b15b8", + "name": "PHQ3", + "description": { + "en": "PHQ2", + "fr": "PHQ2" + }, + "splash_screen": "", + "image": "", + "show_all_at_once": false, + "is_skippable": false, + "is_reviewable": true, + "response_is_editable": false, + "order": 1.0 + } }, { "table": "activity_items", @@ -652,6 +673,110 @@ "order": 3, "id_version": "a18d3409-2c96-4a5e-a1f3-1c1c14be0018_1.1.0" } + }, + { + "table": "activity_histories", + "fields": { + "id": "09e3dbf0-aefb-4d0e-9177-bdb321bf3621", + "created_at": "2023-01-05T15:49:51.752113", + "updated_at": "2023-01-05T15:49:51.752113", + "is_deleted": false, + "applet_id": "92917a56-d586-4613-b7aa-991f2c4b15b8_1.1.0", + "name": "PHQ3", + "description": { + "en": "PHQ2", + "fr": "PHQ2" + }, + "splash_screen": "", + "image": "", + "show_all_at_once": false, + "is_skippable": false, + "is_reviewable": true, + "response_is_editable": false, + "order": 1.0, + "id_version": "09e3dbf0-aefb-4d0e-9177-bdb321bf3621_1.1.0", + "scores_and_reports": { + "generate_report": true, + "scores": [ + { + "name": "Score 1", + "id": "score_1", + "calculation_type": "sum" + } + ] + } + } + }, + { + "table": "activity_item_histories", + "fields": { + "id": "a18d3409-2c96-4a5e-a1f3-1c1c14be0021", + "name": "a18d3409_2c96_4a5e_a1f3_1c1c14be0021", + "created_at": "2023-01-05T15:49:51.752113", + "updated_at": "2023-01-05T15:49:51.752113", + "is_deleted": false, + "activity_id": "09e3dbf0-aefb-4d0e-9177-bdb321bf3621_1.1.0", + "question": { + "en": "Little interest or pleasure in doing things?", + "fr": "Peu dintérêt ou de plaisir à faire les choses ?" + }, + "response_type": "singleSelect", + "response_values": { + "options": [ + { + "id": "2ba4bb83-ed1c-4140-a225-c2c9b4db66d2", + "text": "Not at all", + "image": "domain.com/image.jpg", + "score": null, + "tooltip": null, + "is_hidden": false, + "color": null + }, + { + "id": "2ba4bb83-ed1c-4140-a225-c2c9b4db66d3", + "text": "Several days", + "image": "domain.com/image.jpg", + "score": null, + "tooltip": null, + "is_hidden": false, + "color": null + }, + { + "id": "2ba4bb83-ed1c-4140-a225-c2c9b4db66d4", + "text": "More than half the days", + "image": "domain.com/image.jpg", + "score": null, + "tooltip": null, + "is_hidden": false, + "color": null + }, + { + "id": "2ba4bb83-ed1c-4140-a225-c2c9b4db66d5", + "text": "Nearly every day", + "image": "domain.com/image.jpg", + "score": null, + "tooltip": null, + "is_hidden": false, + "color": null + } + ] + }, + "config": { + "randomize_options": false, + "timer": 0, + "add_scores": false, + "set_alerts": false, + "add_tooltip": false, + "set_palette": false, + "remove_back_button": false, + "skippable_item": false, + "additional_response_option": { + "text_input_option": false, + "text_input_required": false + } + }, + "order": 1, + "id_version": "a18d3409-2c96-4a5e-a1f3-1c1c14be0021_1.1.0" + } } - ] diff --git a/src/apps/answers/service.py b/src/apps/answers/service.py index e9b85d5a855..e9d2ec609c0 100644 --- a/src/apps/answers/service.py +++ b/src/apps/answers/service.py @@ -61,6 +61,7 @@ ActivityIsNotAssessment, AnswerAccessDeniedError, AnswerNoteAccessDeniedError, + AnswerNotFoundError, NonPublicAppletError, ReportServerError, ReportServerIsNotConfigured, @@ -346,6 +347,8 @@ async def get_by_id( answer_items = await AnswerItemsCRUD( self.answer_session ).get_by_answer_and_activity(answer_id, [pk(activity_id)]) + if not answer_items: + raise AnswerNotFoundError() answer_item = answer_items[0] activity_items = await ActivityItemHistoryService( @@ -455,31 +458,64 @@ async def get_assessment_by_answer_id( assert self.user_id await self._validate_answer_access(applet_id, answer_id) - schema = await AnswersCRUD(self.answer_session).get_by_id(answer_id) - pk = self._generate_history_id(schema.version) - - activity_items = await ActivityItemHistoriesCRUD( - self.session - ).get_applets_assessments(pk(applet_id)) - if len(activity_items) == 0: - return AssessmentAnswer(items=activity_items) - assessment_answer = await AnswerItemsCRUD( self.answer_session ).get_assessment(answer_id, self.user_id) - answer = AssessmentAnswer( - reviewer_public_key=assessment_answer.user_public_key - if assessment_answer - else None, - answer=assessment_answer.answer if assessment_answer else None, - item_ids=assessment_answer.item_ids if assessment_answer else [], - items=activity_items, - is_edited=assessment_answer.created_at - != assessment_answer.updated_at # noqa - if assessment_answer - else False, - ) + items_crud = ActivityItemHistoriesCRUD(self.session) + last = items_crud.get_applets_assessments(applet_id) + if assessment_answer: + current = items_crud.get_assessment_activity_items( + assessment_answer.assessment_activity_id + ) + items_last, items_current = await asyncio.gather(last, current) + else: + items_last = await last + items_current = None + + if len(items_last) == 0: + return AssessmentAnswer(items=items_last) + + if items_last == items_current and assessment_answer: + answer = AssessmentAnswer( + reviewer_public_key=assessment_answer.user_public_key + if assessment_answer + else None, + answer=assessment_answer.answer if assessment_answer else None, + item_ids=assessment_answer.item_ids + if assessment_answer + else [], + items=items_last, + is_edited=assessment_answer.created_at + != assessment_answer.updated_at # noqa + if assessment_answer + else False, + versions=[assessment_answer.assessment_activity_id], + ) + else: + if assessment_answer: + versions = [ + assessment_answer.assessment_activity_id, + items_last[0].activity_id, + ] + else: + versions = [items_last[0].activity_id] + answer = AssessmentAnswer( + reviewer_public_key=assessment_answer.user_public_key + if assessment_answer + else None, + answer=assessment_answer.answer if assessment_answer else None, + item_ids=assessment_answer.item_ids + if assessment_answer + else [], + items=items_current if assessment_answer else items_last, + items_last=items_last if assessment_answer else None, + is_edited=assessment_answer.created_at + != assessment_answer.updated_at # noqa + if assessment_answer + else False, + versions=versions, + ) return answer async def get_reviews_by_answer_id( @@ -488,12 +524,16 @@ async def get_reviews_by_answer_id( assert self.user_id await self._validate_answer_access(applet_id, answer_id) - schema = await AnswersCRUD(self.answer_session).get_by_id(answer_id) - pk = self._generate_history_id(schema.version) + reviewer_activity_version = await AnswerItemsCRUD( + self.answer_session + ).get_assessment_activity_id(answer_id) + if not reviewer_activity_version: + return [] + activity_versions = [t[1] for t in reviewer_activity_version] activity_items = await ActivityItemHistoriesCRUD( self.session - ).get_applets_assessments(pk(applet_id)) + ).get_by_activity_id_versions(activity_versions) reviews = await AnswerItemsCRUD( self.answer_session @@ -506,6 +546,12 @@ async def get_reviews_by_answer_id( user = next( filter(lambda u: u.id == schema.respondent_id, users), None ) + current_activity_items = list( + filter( + lambda i: i.activity_id == schema.assessment_activity_id, + activity_items, + ) + ) if not user: continue results.append( @@ -513,7 +559,7 @@ async def get_reviews_by_answer_id( reviewer_public_key=schema.user_public_key, answer=schema.answer, item_ids=schema.item_ids, - items=activity_items, + items=current_activity_items, reviewer=dict( first_name=user.first_name, last_name=user.last_name ), @@ -547,6 +593,7 @@ async def create_assessment_answer( is_assessment=True, start_datetime=datetime.datetime.utcnow(), end_datetime=datetime.datetime.utcnow(), + assessment_activity_id=schema.assessment_version_id, ) ) else: @@ -563,6 +610,7 @@ async def create_assessment_answer( end_datetime=now, created_at=now, updated_at=now, + assessment_activity_id=schema.assessment_version_id, ) ) @@ -638,9 +686,6 @@ async def get_export_data( if answer.activity_history_id: activity_hist_ids.add(answer.activity_history_id) - reviewer_activities_coro = ActivityHistoriesCRUD( - self.session - ).get_reviewable_activities(list(applet_assessment_ids)) flows_coro = FlowsHistoryCRUD(self.session).get_by_id_versions( list(flow_hist_ids) ) @@ -649,7 +694,6 @@ async def get_export_data( ).get_respondent_export_data(applet_id, list(respondent_ids)) coros_result = await asyncio.gather( - reviewer_activities_coro, flows_coro, user_map_coro, return_exceptions=True, @@ -658,13 +702,7 @@ async def get_export_data( if isinstance(res, BaseException): raise res - reviewer_activities, flows, user_map = coros_result - - reviewer_activity_map = {} - for activity in reviewer_activities: # type: ignore - reviewer_activity_map[activity.applet_id] = activity - activity_hist_ids.add(activity.id_version) - + flows, user_map = coros_result flow_map = {flow.id_version: flow for flow in flows} # type: ignore for answer in answers: @@ -678,12 +716,6 @@ async def get_export_data( if flow_id := answer.flow_history_id: if flow := flow_map.get(flow_id): answer.flow_name = flow.name - # assessment data - if answer.reviewed_answer_id: - if activity := reviewer_activity_map.get( - answer.applet_history_id - ): - answer.activity_history_id = activity.id_version repo_local = AnswersCRUD(self.session) activities_result = [] @@ -1217,7 +1249,10 @@ async def create_report( initial_answer.respondent_id, initial_answer.applet_id ) applet_full = await self._prepare_applet_data( - initial_answer.applet_id, initial_answer.version, applet.encryption + initial_answer.applet_id, + initial_answer.version, + applet.encryption, + non_performance=True, ) encryption = ReportServerEncryption(applet.report_public_key) @@ -1280,11 +1315,15 @@ def _is_activity_last_in_flow( return activity_id == flow["items"][-1]["activityId"].split("_")[0] async def _prepare_applet_data( - self, applet_id: uuid.UUID, version: str, encryption: dict + self, + applet_id: uuid.UUID, + version: str, + encryption: dict, + non_performance: bool = False, ): applet_full = await AppletHistoryService( self.session, applet_id, version - ).get_full() + ).get_full(non_performance) applet_full.encryption = Encryption(**encryption) return applet_full.dict(by_alias=True) diff --git a/src/apps/answers/tests/test_answers.py b/src/apps/answers/tests/test_answers.py index fcac78f4746..f2b6cd18de2 100644 --- a/src/apps/answers/tests/test_answers.py +++ b/src/apps/answers/tests/test_answers.py @@ -243,6 +243,10 @@ async def test_answer_skippable_activity_items_create_for_respondent(self): answer=dict( start_time=1690188679657, end_time=1690188731636, + itemIds=[ + "a18d3409-2c96-4a5e-a1f3-1c1c14be0011", + "a18d3409-2c96-4a5e-a1f3-1c1c14be0012", + ], ), client=dict( appId="mindlogger-mobile", @@ -378,6 +382,10 @@ async def test_answer_with_skipping_all(self): answer=dict( start_time=1690188679657, end_time=1690188731636, + itemIds=[ + "a18d3409-2c96-4a5e-a1f3-1c1c14be0011", + "a18d3409-2c96-4a5e-a1f3-1c1c14be0012", + ], ), client=dict( appId="mindlogger-mobile", @@ -713,6 +721,9 @@ async def test_applet_assessment_create(self): answer="some answer", item_ids=["a18d3409-2c96-4a5e-a1f3-1c1c14be0021"], reviewer_public_key="some public key", + assessment_version_id=( + "09e3dbf0-aefb-4d0e-9177-bdb321bf3621_1.0.0" + ), ), ) @@ -743,6 +754,9 @@ async def test_applet_assessment_create(self): answer="some answer", item_ids=["a18d3409-2c96-4a5e-a1f3-1c1c14be0021"], reviewer_public_key="some public key", + assessment_version_id=( + "09e3dbf0-aefb-4d0e-9177-bdb321bf3621_1.0.0" + ), ), ) @@ -1165,6 +1179,9 @@ async def test_answers_export(self): answer="some answer", item_ids=["a18d3409-2c96-4a5e-a1f3-1c1c14be0021"], reviewer_public_key="some public key", + assessment_version_id=( + "09e3dbf0-aefb-4d0e-9177-bdb321bf3621_1.0.0" + ), ), ) @@ -1310,9 +1327,9 @@ async def test_get_summary_activities(self): assert response.status_code == 200 assert response.json()["count"] == 1 - assert response.json()["result"][0]["name"] == "PHQ2 new" - assert response.json()["result"][0]["isPerformanceTask"] is True - assert response.json()["result"][0]["hasAnswer"] is False + assert response.json()["result"][0]["name"] == "Flanker" + assert response.json()["result"][0]["isPerformanceTask"] + assert not response.json()["result"][0]["hasAnswer"] @rollback async def test_get_summary_activities_after_submitted_answer(self): @@ -1361,7 +1378,7 @@ async def test_get_summary_activities_after_submitted_answer(self): assert response.status_code == 200 assert response.json()["count"] == 1 - assert response.json()["result"][0]["name"] == "PHQ2 new" + assert response.json()["result"][0]["name"] == "Flanker" assert response.json()["result"][0]["isPerformanceTask"] assert response.json()["result"][0]["hasAnswer"] diff --git a/src/apps/answers/tests/test_answers_arbitrary.py b/src/apps/answers/tests/test_answers_arbitrary.py index 024aae028bb..befec25de6b 100644 --- a/src/apps/answers/tests/test_answers_arbitrary.py +++ b/src/apps/answers/tests/test_answers_arbitrary.py @@ -264,6 +264,13 @@ async def test_answer_skippable_activity_items_create_for_respondent(self): answer=dict( start_time=1690188679657, end_time=1690188731636, + itemIds=[ + "f0ccc10a-2388-48da-a5a1-35e9b19cde5d", + "c6fd4e75-c5c1-4a99-89db-4044526b6ad5", + "f698d5c6-3861-46a1-a6e7-3bdae7228bce", + "8e5ef149-ce10-4590-bc03-594e5200ecb9", + "2bcf1de2-aff8-494e-af28-d1ce2602585f", + ], ), client=dict( appId="mindlogger-mobile", @@ -408,6 +415,13 @@ async def test_answer_with_skipping_all(self): answer=dict( start_time=1690188679657, end_time=1690188731636, + itemIds=[ + "f0ccc10a-2388-48da-a5a1-35e9b19cde5d", + "c6fd4e75-c5c1-4a99-89db-4044526b6ad5", + "f698d5c6-3861-46a1-a6e7-3bdae7228bce", + "8e5ef149-ce10-4590-bc03-594e5200ecb9", + "2bcf1de2-aff8-494e-af28-d1ce2602585f", + ], ), client=dict( appId="mindlogger-mobile", @@ -753,6 +767,9 @@ async def test_applet_assessment_create(self): answer="some answer", item_ids=["f0ccc10a-2388-48da-a5a1-35e9b19cde5d"], reviewer_public_key="some public key", + assessment_version_id=( + "09e3dbf0-aefb-4d0e-9177-bdb321bf3621_1.1.0" + ), ), ) @@ -783,6 +800,9 @@ async def test_applet_assessment_create(self): answer="some answer", item_ids=["a18d3409-2c96-4a5e-a1f3-1c1c14be0021"], reviewer_public_key="some public key", + assessment_version_id=( + "09e3dbf0-aefb-4d0e-9177-bdb321bf3621_1.1.0" + ), ), ) @@ -943,6 +963,9 @@ async def test_answers_export(self): answer="some answer", item_ids=["a18d3409-2c96-4a5e-a1f3-1c1c14be0021"], reviewer_public_key="some public key", + assessment_version_id=( + "09e3dbf0-aefb-4d0e-9177-bdb321bf3621_1.1.0" + ), ), ) @@ -1306,6 +1329,9 @@ async def test_answers_arbitrary_export(self): answer="some answer", item_ids=["a18d3409-2c96-4a5e-a1f3-1c1c14be0021"], reviewer_public_key="some public key", + assessment_version_id=( + "09e3dbf0-aefb-4d0e-9177-bdb321bf3621_1.1.0" + ), ), ) diff --git a/src/apps/applets/crud/applets.py b/src/apps/applets/crud/applets.py index 658200df74f..74e77fb59b0 100644 --- a/src/apps/applets/crud/applets.py +++ b/src/apps/applets/crud/applets.py @@ -273,6 +273,7 @@ async def get_name_duplicates( UserAppletAccessSchema.applet_id == AppletSchema.id, ) query = query.where(UserAppletAccessSchema.user_id == user_id) + query = query.where(AppletSchema.is_deleted == False) # noqa: E712 if exclude_applet_id: query = query.where(AppletSchema.id != exclude_applet_id) query = query.where( diff --git a/src/apps/applets/domain/applet_create_update.py b/src/apps/applets/domain/applet_create_update.py index c7cdc628bb5..5b420b95431 100644 --- a/src/apps/applets/domain/applet_create_update.py +++ b/src/apps/applets/domain/applet_create_update.py @@ -2,6 +2,9 @@ from apps.activities.domain.activity_create import ActivityCreate from apps.activities.domain.activity_update import ActivityUpdate +from apps.activities.domain.custom_validation import ( + validate_performance_task_type, +) from apps.activities.errors import ( AssessmentLimitExceed, DuplicateActivityFlowNameError, @@ -26,13 +29,13 @@ class AppletCreate(AppletReportConfigurationBase, AppletBase, InternalModel): @root_validator() def validate_existing_ids_for_duplicate(cls, values): - activities = values.get("activities", []) - flows = values.get("activity_flows", []) + activities: list[ActivityCreate] = values.get("activities", []) + flows: list[FlowCreate] = values.get("activity_flows", []) activity_names = set() flow_names = set() assessments_count = 0 - for activity in activities: # type:ActivityCreate + for activity in activities: if activity.name in activity_names: raise DuplicateActivityNameError() activity_names.add(activity.name) @@ -41,12 +44,16 @@ def validate_existing_ids_for_duplicate(cls, values): # if assessments_count > 1: # raise AssessmentLimitExceed() - for flow in flows: # type:FlowCreate + for flow in flows: if flow.name in flow_names: raise DuplicateActivityFlowNameError() flow_names.add(flow.name) return values + @root_validator + def validate_performance_task_type(cls, values): + return validate_performance_task_type(values) + class AppletUpdate(AppletBase, InternalModel): activities: list[ActivityUpdate] diff --git a/src/apps/applets/domain/applets/public_detail.py b/src/apps/applets/domain/applets/public_detail.py index 80ba917431b..f995f6c2eb7 100644 --- a/src/apps/applets/domain/applets/public_detail.py +++ b/src/apps/applets/domain/applets/public_detail.py @@ -1,12 +1,8 @@ import uuid -from pydantic import Field, validator +from pydantic import Field from apps.activities.domain.conditional_logic import ConditionalLogic -from apps.activities.domain.custom_validation import ( - validate_is_performance_task, - validate_performance_task_type, -) from apps.activities.domain.response_type_config import ( PerformanceTaskType, ResponseTypeConfig, @@ -47,17 +43,9 @@ class Activity(PublicModel): items: list[ActivityItem] = Field(default_factory=list) scores_and_reports: ScoresAndReports | None = None subscale_setting: SubscaleSetting | None = None - is_performance_task: bool = False performance_task_type: PerformanceTaskType | None = None report_included_item_name: str | None = None - - @validator("is_performance_task", always=True) - def validate_is_performance_task(cls, value, values): - return validate_is_performance_task(value, values) - - @validator("performance_task_type", always=True) - def validate_performance_task_type(cls, value, values): - return validate_performance_task_type(value, values) + is_performance_task: bool = False class ActivityFlowItem(PublicModel): diff --git a/src/apps/applets/service/applet.py b/src/apps/applets/service/applet.py index ed5a6437c15..3ad56b2093b 100644 --- a/src/apps/applets/service/applet.py +++ b/src/apps/applets/service/applet.py @@ -44,6 +44,7 @@ ) from apps.applets.service.applet_history_service import AppletHistoryService from apps.folders.crud import FolderAppletCRUD, FolderCRUD +from apps.schedule.service import ScheduleService from apps.shared.version import ( INITIAL_VERSION, VERSION_DIFFERENCE_ACTIVITY, @@ -200,13 +201,17 @@ async def update( self.session, self.user_id ).remove_applet_activities(applet_id) applet = await self._update(applet_id, update_data, next_version) - applet.activities = await ActivityService( self.session, self.user_id ).update_create(applet_id, update_data.activities) activity_key_id_map = dict() + activity_ids = [] + assessment_id = None for activity in applet.activities: activity_key_id_map[activity.key] = activity.id + activity_ids.append(activity.id) + if activity.is_reviewable: + assessment_id = activity.id applet.activity_flows = await FlowService(self.session).update_create( applet_id, update_data.activity_flows, activity_key_id_map ) @@ -215,6 +220,19 @@ async def update( self.session, applet.id, applet.version ).add_history(self.user_id, applet) + event_serv = ScheduleService(self.session) + to_await = [] + if assessment_id: + to_await.append( + event_serv.delete_by_activity_ids(applet_id, [assessment_id]) + ) + to_await.append( + event_serv.create_default_schedules_if_not_exist( + applet_id=applet.id, + activity_ids=activity_ids, + ) + ) + await asyncio.gather(*to_await) return applet async def update_encryption( diff --git a/src/apps/applets/service/applet_history_service.py b/src/apps/applets/service/applet_history_service.py index 2f957dfb7e9..f6abe2b3836 100644 --- a/src/apps/applets/service/applet_history_service.py +++ b/src/apps/applets/service/applet_history_service.py @@ -106,14 +106,14 @@ async def get_prev_version(self): return prev_version - async def get_full(self) -> AppletHistoryFull: + async def get_full(self, non_performance=False) -> AppletHistoryFull: schema = await AppletHistoriesCRUD(self.session).get_by_id_version( self._id_version ) applet = AppletHistoryFull.from_orm(schema) applet.activities = await ActivityHistoryService( self.session, self._applet_id, self._version - ).get_full() + ).get_full(non_performance) applet.activity_flows = await FlowHistoryService( self.session, self._applet_id, self._version ).get_full() diff --git a/src/apps/applets/tests/test_applet_activity_items.py b/src/apps/applets/tests/test_applet_activity_items.py index a365a70b0bd..120690a1ef0 100644 --- a/src/apps/applets/tests/test_applet_activity_items.py +++ b/src/apps/applets/tests/test_applet_activity_items.py @@ -1,9 +1,185 @@ import uuid +import pytest + from apps.shared.test import BaseTest from infrastructure.database import rollback +@pytest.fixture +def activity_flanker_data(): + return dict( + name="Activity_flanker", + key="577dbbda-3afc-4962-842b-8d8d11588bfe", + description=dict( + en="Description Activity flanker.", + fr="Description Activity flanker.", + ), + items=[ + dict( + name="Flanker_VSR_instructionsn", + # Nobody knows for what we need so big description + question=dict( + en="## General Instructions\n\n\n You will " + "see arrows presented at the center of the " + "screen that point either to the left ‘<’ " + "or right ‘>’.\n Press the left button " + "if the arrow is pointing to the left ‘<’ " + "or press the right button if the arrow is " + "pointing to the right ‘>’.\n These arrows " + "will appear in the center of a line of " + "other items. Sometimes, these other items " + "will be arrows pointing in the same " + "direction, e.g.. ‘> > > > >’, or in the " + "opposite direction, e.g. ‘< < > < <’.\n " + "Your job is to respond to the central " + "arrow, no matter what direction the other " + "arrows are pointing.\n For example, you " + "would press the left button for both " + "‘< < < < <’, and ‘> > < > >’ because the " + "middle arrow points to the left.\n " + "Finally, in some trials dashes ‘ - ’ " + "will appear beside the central arrow.\n " + "Again, respond only to the direction " + "of the central arrow. Please respond " + "as quickly and accurately as possible.", + fr="Flanker General instruction text.", + ), + response_type="message", + response_values=None, + config=dict( + remove_back_button=False, + timer=None, + ), + ), + dict( + name="Flanker_Practice_instructions_1", + question=dict( + en="## Instructions\n\nNow you will have a " + "chance to practice the task before moving " + "on to the test phase.\nRemember to " + "respond only to the central arrow\n", + fr="Flanker Сalibration/Practice " "instruction 1 text.", + ), + response_type="message", + response_values=None, + config=dict( + remove_back_button=False, + timer=None, + ), + ), + dict( + name="Flanker_Practise_1", + question=dict( + en="Flanker_Practise_1", + fr="Flanker_Practise_1", + ), + response_type="flanker", + response_values=None, + config=dict( + stimulusTrials=[ + { + "id": "1", + "image": "https://600.jpg", + "text": "left-con", + "value": 0, + "weight": 10, + }, + { + "id": "2", + "image": "https://600.jpg", + "text": "right-inc", + "value": 1, + "weight": 10, + }, + { + "id": "3", + "image": "https://600.jpg", + "text": "left-inc", + "value": 0, + "weight": 10, + }, + { + "id": "4", + "image": "https://600.jpg", + "text": "right-con", + "value": 1, + "weight": 10, + }, + { + "id": "5", + "image": "https://600.jpg", + "text": "left-neut", + "value": 0, + "weight": 10, + }, + { + "id": "6", + "image": "https://600.jpg", + "text": "right-neut", + "value": 1, + "weight": 10, + }, + ], + blocks=[ + { + "name": "Block 1", + "order": [ + "left-con", + "right-con", + "left-inc", + "right-inc", + "left-neut", + "right-neut", + ], + }, + { + "name": "Block 2", + "order": [ + "left-con", + "right-con", + "left-inc", + "right-inc", + "left-neut", + "right-neut", + ], + }, + ], + buttons=[ + { + "text": "Button_1_name_<", + "image": "https://1.jpg", + "value": 0, + }, + { + "text": "Button_2_name_>", + "image": "https://2.jpg", + "value": 1, + }, + ], + nextButton="OK", + fixationDuration=500, + fixationScreen={ + "value": "FixationScreen_value", + "image": "https://fixation-screen.jpg", + }, + minimumAccuracy=75, + sampleSize=1, + samplingMethod="randomize-order", + showFeedback=True, + showFixation=True, + showResults=False, + trialDuration=3000, + isLastPractice=False, + isFirstPractice=True, + isLastTest=False, + blockType="practice", + ), + ), + ], + ) + + class TestActivityItems(BaseTest): fixtures = [ "users/fixtures/users.json", @@ -1145,228 +1321,6 @@ async def test_creating_applet_with_touch_activity_items(self): ) assert response.status_code == 200 - @rollback - async def test_creating_applet_with_flanker_activity_items(self): - await self.client.login( - self.login_url, "tom@mindlogger.com", "Test1234!" - ) - create_data = dict( - display_name="flanker_activity_applet", - encryption=dict( - public_key=uuid.uuid4().hex, - prime=uuid.uuid4().hex, - base=uuid.uuid4().hex, - account_id=str(uuid.uuid4()), - ), - description=dict( - en="Performance Tasks flanker Applet", - fr="Performance Tasks flanker Applet", - ), - about=dict( - en="Applet flanker Task Builder Activity", - fr="Applet flanker Task Builder Activity", - ), - activities=[ - dict( - name="Activity_flanker", - key="577dbbda-3afc-4962-842b-8d8d11588bfe", - description=dict( - en="Description Activity flanker.", - fr="Description Activity flanker.", - ), - items=[ - dict( - name="Flanker_VSR_instructionsn", - question=dict( - en="## General Instructions\n\n\n You will " - "see arrows presented at the center of the " - "screen that point either to the left ‘<’ " - "or right ‘>’.\n Press the left button " - "if the arrow is pointing to the left ‘<’ " - "or press the right button if the arrow is " - "pointing to the right ‘>’.\n These arrows " - "will appear in the center of a line of " - "other items. Sometimes, these other items " - "will be arrows pointing in the same " - "direction, e.g.. ‘> > > > >’, or in the " - "opposite direction, e.g. ‘< < > < <’.\n " - "Your job is to respond to the central " - "arrow, no matter what direction the other " - "arrows are pointing.\n For example, you " - "would press the left button for both " - "‘< < < < <’, and ‘> > < > >’ because the " - "middle arrow points to the left.\n " - "Finally, in some trials dashes ‘ - ’ " - "will appear beside the central arrow.\n " - "Again, respond only to the direction " - "of the central arrow. Please respond " - "as quickly and accurately as possible.", - fr="Flanker General instruction text.", - ), - response_type="message", - response_values=None, - config=dict( - remove_back_button=False, - timer=None, - ), - ), - dict( - name="Flanker_Practice_instructions_1", - question=dict( - en="## Instructions\n\nNow you will have a " - "chance to practice the task before moving " - "on to the test phase.\nRemember to " - "respond only to the central arrow\n", - fr="Flanker Сalibration/Practice " - "instruction 1 text.", - ), - response_type="message", - response_values=None, - config=dict( - remove_back_button=False, - timer=None, - ), - ), - dict( - name="Flanker_Practise_1", - question=dict( - en="Flanker_Practise_1", - fr="Flanker_Practise_1", - ), - response_type="flanker", - response_values=None, - config=dict( - stimulusTrials=[ - { - "id": "1", - "image": "https://600.jpg", - "text": "left-con", - "value": 0, - "weight": 10, - }, - { - "id": "2", - "image": "https://600.jpg", - "text": "right-inc", - "value": 1, - "weight": 10, - }, - { - "id": "3", - "image": "https://600.jpg", - "text": "left-inc", - "value": 0, - "weight": 10, - }, - { - "id": "4", - "image": "https://600.jpg", - "text": "right-con", - "value": 1, - "weight": 10, - }, - { - "id": "5", - "image": "https://600.jpg", - "text": "left-neut", - "value": 0, - "weight": 10, - }, - { - "id": "6", - "image": "https://600.jpg", - "text": "right-neut", - "value": 1, - "weight": 10, - }, - ], - blocks=[ - { - "name": "Block 1", - "order": [ - "left-con", - "right-con", - "left-inc", - "right-inc", - "left-neut", - "right-neut", - ], - }, - { - "name": "Block 2", - "order": [ - "left-con", - "right-con", - "left-inc", - "right-inc", - "left-neut", - "right-neut", - ], - }, - ], - buttons=[ - { - "text": "Button_1_name_<", - "image": "https://1.jpg", - "value": 0, - }, - { - "text": "Button_2_name_>", - "image": "https://2.jpg", - "value": 1, - }, - ], - nextButton="OK", - fixationDuration=500, - fixationScreen={ - "value": "FixationScreen_value", - "image": "https://fixation-screen.jpg", - }, - minimumAccuracy=75, - sampleSize=1, - samplingMethod="randomize-order", - showFeedback=True, - showFixation=True, - showResults=False, - trialDuration=3000, - isLastPractice=False, - isFirstPractice=True, - isLastTest=False, - blockType="practice", - ), - ), - ], - ), - ], - activity_flows=[ - dict( - name="name_activityFlow", - description=dict( - en="description activityFlow", - fr="description activityFlow", - ), - items=[ - dict( - activity_key="577dbbda-3afc-" - "4962-842b-8d8d11588bfe" - ) - ], - ) - ], - ) - response = await self.client.post( - self.applet_create_url.format( - owner_id="7484f34a-3acc-4ee6-8a94-fd7299502fa1" - ), - data=create_data, - ) - assert response.status_code == 201, response.json() - - response = await self.client.get( - self.applet_detail_url.format(pk=response.json()["result"]["id"]) - ) - assert response.status_code == 200 - @rollback async def test_creating_applet_with_activity_items_condition(self): await self.client.login( @@ -2135,41 +2089,86 @@ async def test_creating_activity_items_without_option_value(self): assert response.status_code == 200 @rollback - async def test_create_applet_with_preformance_activity_item(self): + async def test_create_applet_with_flanker_preformance_task( + self, activity_flanker_data + ): await self.client.login( self.login_url, "tom@mindlogger.com", "Test1234!" ) create_data = dict( - display_name="User daily behave", + display_name="Flanker", encryption=dict( public_key=uuid.uuid4().hex, prime=uuid.uuid4().hex, base=uuid.uuid4().hex, account_id=str(uuid.uuid4()), ), - description=dict( - en="Understand users behave", - fr="Comprendre le comportement des utilisateurs", + description=dict(en="Flanker", fr="Flanker"), + about=dict(en="Flanker", fr="Flanker"), + activities=[activity_flanker_data], + # Empty, but required + activity_flows=[], + ) + + response = await self.client.post( + self.applet_create_url.format( + owner_id="7484f34a-3acc-4ee6-8a94-fd7299502fa1" ), - about=dict( - en="Understand users behave", - fr="Comprendre le comportement des utilisateurs", + data=create_data, + ) + assert response.status_code == 201, response.json() + + assert response.json()["result"]["activities"][0]["isPerformanceTask"] + assert ( + response.json()["result"]["activities"][0]["performanceTaskType"] + == "flanker" + ) + + # Check that the 'get' after creating new applet returns correct data + response = await self.client.get( + self.applet_workspace_detail_url.format( + owner_id="7484f34a-3acc-4ee6-8a94-fd7299502fa1", + pk=response.json()["result"]["id"], + ) + ) + assert response.status_code == 200 + assert response.json()["result"]["activities"][0]["isPerformanceTask"] + assert ( + response.json()["result"]["activities"][0]["performanceTaskType"] + == "flanker" + ) + + @rollback + async def test_applet_add_performance_task_to_the_applet( + self, activity_flanker_data + ): + await self.client.login( + self.login_url, "tom@mindlogger.com", "Test1234!" + ) + + create_data = dict( + display_name="Add flanker to existing applet", + encryption=dict( + public_key=uuid.uuid4().hex, + prime=uuid.uuid4().hex, + base=uuid.uuid4().hex, + account_id=str(uuid.uuid4()), ), + description=dict(en="Add flanker to existing applet"), + about=dict(en="Add flanker to existing applet"), activities=[ dict( name="Morning activity", key="577dbbda-3afc-4962-842b-8d8d11588bfe", description=dict( en="Understand morning feelings.", - fr="Understand morning feelings.", ), items=[ dict( name="activity_item_text", question=dict( en="How had you slept?", - fr="How had you slept?", ), response_type="text", response_values=None, @@ -2187,226 +2186,47 @@ async def test_create_applet_with_preformance_activity_item(self): ], ), ], - activity_flows=[ - dict( - name="Morning questionnaire", - description=dict( - en="Understand how was the morning", - fr="Understand how was the morning", - ), - items=[ - dict( - activity_key="577dbbda-3afc-" - "4962-842b-8d8d11588bfe" - ) - ], - ) - ], + # Empty, but required + activity_flows=[], ) + response = await self.client.post( self.applet_create_url.format( owner_id="7484f34a-3acc-4ee6-8a94-fd7299502fa1" ), data=create_data, ) - assert response.status_code == 201, response.json() - assert ( - response.json()["result"]["activities"][0]["isPerformanceTask"] - is False - ) - assert ( - response.json()["result"]["activities"][0]["performanceTaskType"] - is None + assert response.status_code == 201 + activity = response.json()["result"]["activities"][0] + assert not activity["isPerformanceTask"] + assert not activity["performanceTaskType"] + # Test that get after creating new applet returns correct data + # Generaly we don't need to test, tested data, but for now let leave + # it here + response = await self.client.get( + self.applet_workspace_detail_url.format( + owner_id="7484f34a-3acc-4ee6-8a94-fd7299502fa1", + pk=response.json()["result"]["id"], + ) ) + assert response.status_code == 200 + activity = response.json()["result"]["activities"][0] + assert not activity["isPerformanceTask"] + assert not activity["performanceTaskType"] - create_data["activities"] = [ - dict( - name="Activity_flanker", - key="577dbbda-3afc-4962-842b-8d8d11588bfe", - description=dict( - en="Description Activity flanker.", - fr="Description Activity flanker.", - ), - items=[ - dict( - name="Flanker_VSR_instructionsn", - question=dict( - en="## General Instructions\n\n\n You will " - "see arrows presented at the center of the " - "screen that point either to the left ‘<’ " - "or right ‘>’.\n Press the left button " - "if the arrow is pointing to the left ‘<’ " - "or press the right button if the arrow is " - "pointing to the right ‘>’.\n These arrows " - "will appear in the center of a line of " - "other items. Sometimes, these other items " - "will be arrows pointing in the same " - "direction, e.g.. ‘> > > > >’, or in the " - "opposite direction, e.g. ‘< < > < <’.\n " - "Your job is to respond to the central " - "arrow, no matter what direction the other " - "arrows are pointing.\n For example, you " - "would press the left button for both " - "‘< < < < <’, and ‘> > < > >’ because the " - "middle arrow points to the left.\n " - "Finally, in some trials dashes ‘ - ’ " - "will appear beside the central arrow.\n " - "Again, respond only to the direction " - "of the central arrow. Please respond " - "as quickly and accurately as possible.", - fr="Flanker General instruction text.", - ), - response_type="message", - response_values=None, - config=dict( - remove_back_button=False, - timer=None, - ), - ), - dict( - name="Flanker_Practice_instructions_1", - question=dict( - en="## Instructions\n\nNow you will have a " - "chance to practice the task before moving " - "on to the test phase.\nRemember to " - "respond only to the central arrow\n", - fr="Flanker Сalibration/Practice " - "instruction 1 text.", - ), - response_type="message", - response_values=None, - config=dict( - remove_back_button=False, - timer=None, - ), - ), - dict( - name="Flanker_Practise_1", - question=dict( - en="Flanker_Practise_1", - fr="Flanker_Practise_1", - ), - response_type="flanker", - response_values=None, - config=dict( - stimulusTrials=[ - { - "id": "1", - "image": "https://600.jpg", - "text": "left-con", - "value": 0, - "weight": 10, - }, - { - "id": "2", - "image": "https://600.jpg", - "text": "right-inc", - "value": 1, - "weight": 10, - }, - { - "id": "3", - "image": "https://600.jpg", - "text": "left-inc", - "value": 0, - "weight": 10, - }, - { - "id": "4", - "image": "https://600.jpg", - "text": "right-con", - "value": 1, - "weight": 10, - }, - { - "id": "5", - "image": "https://600.jpg", - "text": "left-neut", - "value": 0, - "weight": 10, - }, - { - "id": "6", - "image": "https://600.jpg", - "text": "right-neut", - "value": 1, - "weight": 10, - }, - ], - blocks=[ - { - "name": "Block 1", - "order": [ - "left-con", - "right-con", - "left-inc", - "right-inc", - "left-neut", - "right-neut", - ], - }, - { - "name": "Block 2", - "order": [ - "left-con", - "right-con", - "left-inc", - "right-inc", - "left-neut", - "right-neut", - ], - }, - ], - buttons=[ - { - "text": "Button_1_name_<", - "image": "https://1.jpg", - "value": 0, - }, - { - "text": "Button_2_name_>", - "image": "https://2.jpg", - "value": 1, - }, - ], - nextButton="OK", - fixationDuration=500, - fixationScreen={ - "value": "FixationScreen_value", - "image": "https://fixation-screen.jpg", - }, - minimumAccuracy=75, - sampleSize=1, - samplingMethod="randomize-order", - showFeedback=True, - showFixation=True, - showResults=False, - trialDuration=3000, - isLastPractice=False, - isFirstPractice=True, - isLastTest=False, - blockType="practice", - ), - ), - ], - ), - ] + # Add flanker performance task + create_data["activities"].append(activity_flanker_data) response = await self.client.put( self.applet_detail_url.format(pk=response.json()["result"]["id"]), data=create_data, ) assert response.status_code == 200 + flanker = response.json()["result"]["activities"][1] + assert flanker["isPerformanceTask"] + assert flanker["performanceTaskType"] == "flanker" - assert ( - response.json()["result"]["activities"][0]["isPerformanceTask"] - is True - ) - assert ( - response.json()["result"]["activities"][0]["performanceTaskType"] - == "flanker" - ) - + # Check the 'get' method response = await self.client.get( self.applet_workspace_detail_url.format( owner_id="7484f34a-3acc-4ee6-8a94-fd7299502fa1", @@ -2414,11 +2234,6 @@ async def test_create_applet_with_preformance_activity_item(self): ) ) assert response.status_code == 200 - assert ( - response.json()["result"]["activities"][0]["isPerformanceTask"] - is True - ) - assert ( - response.json()["result"]["activities"][0]["performanceTaskType"] - == "flanker" - ) + flanker = response.json()["result"]["activities"][1] + assert flanker["isPerformanceTask"] + assert flanker["performanceTaskType"] == "flanker" diff --git a/src/apps/file/api/file.py b/src/apps/file/api/file.py index 629f39a1f4f..e9bead42610 100644 --- a/src/apps/file/api/file.py +++ b/src/apps/file/api/file.py @@ -6,7 +6,7 @@ from functools import partial from urllib.parse import quote -import aiofiles # type: ignore[import] +import aiofiles import pytz from botocore.exceptions import ClientError from fastapi import Body, Depends, File, Query, UploadFile diff --git a/src/apps/healthcheck/api.py b/src/apps/healthcheck/api.py index 151fd443a68..1f421110cb7 100644 --- a/src/apps/healthcheck/api.py +++ b/src/apps/healthcheck/api.py @@ -1,4 +1,8 @@ +import asyncio + +from fastapi import Query from fastapi.responses import Response +from starlette import status def readiness(): @@ -7,3 +11,19 @@ def readiness(): def liveness(): return Response("Liveness - OK!") + + +statuses = { + code for var, code in vars(status).items() if var.startswith("HTTP_") +} +exclude = {301, 302} +supported_statuses = statuses - exclude + + +async def statuscode( + code: int = 200, timeout: float = Query(0.0, ge=0.0, le=60.0) +): + if code not in supported_statuses: + return Response("Wrong status code", status_code=400) + await asyncio.sleep(timeout) + return Response(status_code=code) diff --git a/src/apps/healthcheck/router.py b/src/apps/healthcheck/router.py index a543a3a2035..3f54710b30d 100644 --- a/src/apps/healthcheck/router.py +++ b/src/apps/healthcheck/router.py @@ -1,9 +1,11 @@ from fastapi import status from fastapi.routing import APIRouter -from apps.healthcheck.api import liveness, readiness +from apps.healthcheck.api import liveness, readiness, statuscode router = APIRouter(tags=["Health check"]) router.get("/readiness", status_code=status.HTTP_200_OK)(readiness) router.get("/liveness", status_code=status.HTTP_200_OK)(liveness) +router.get("/statuscode")(statuscode) +router.post("/statuscode")(statuscode) diff --git a/src/apps/invitations/crud.py b/src/apps/invitations/crud.py index 6813a476685..3f899d4e28d 100644 --- a/src/apps/invitations/crud.py +++ b/src/apps/invitations/crud.py @@ -104,6 +104,7 @@ async def get_pending_by_invited_email( first_name=invitation.first_name, last_name=invitation.last_name, created_at=invitation.created_at, + nickname=invitation.nickname, ) ) return results diff --git a/src/apps/invitations/services.py b/src/apps/invitations/services.py index 6cf47039501..e617ad9f0db 100644 --- a/src/apps/invitations/services.py +++ b/src/apps/invitations/services.py @@ -193,9 +193,9 @@ async def send_respondent_invitation( try: await UsersCRUD(self.session).get_by_email(schema.email) except UserNotFound: - path = "invitation_new_user_en" + path = f"invitation_new_user_{schema.language or 'en'}" else: - path = "invitation_registered_user_en" + path = f"invitation_registered_user_{schema.language or 'en'}" # Send email to the user service = MailingService() diff --git a/src/apps/jsonld_converter/dependencies.py b/src/apps/jsonld_converter/dependencies.py index be291800bf4..fb67e45bffb 100644 --- a/src/apps/jsonld_converter/dependencies.py +++ b/src/apps/jsonld_converter/dependencies.py @@ -1,6 +1,6 @@ from typing import Callable -from cachetools import LRUCache # type: ignore[import] +from cachetools import LRUCache from fastapi import Depends from pyld import ContextResolver from pyld.jsonld import requests_document_loader @@ -19,7 +19,7 @@ def get_document_loader() -> Callable: def get_context_resolver( document_loader: Callable = Depends(get_document_loader), ) -> ContextResolver: - _resolved_context_cache = LRUCache(maxsize=100) + _resolved_context_cache: LRUCache = LRUCache(maxsize=100) return ContextResolver(_resolved_context_cache, document_loader) diff --git a/src/apps/library/service.py b/src/apps/library/service.py index ba08a74d41f..f6db6d2a242 100644 --- a/src/apps/library/service.py +++ b/src/apps/library/service.py @@ -9,10 +9,6 @@ ActivityHistoriesCRUD, ActivityItemHistoriesCRUD, ) -from apps.activities.domain.custom_validation import ( - validate_is_performance_task, - validate_performance_task_type, -) from apps.activity_flows.crud import FlowItemHistoriesCRUD, FlowsHistoryCRUD from apps.applets.crud import AppletHistoriesCRUD, AppletsCRUD from apps.library.crud import CartCRUD, LibraryCRUD @@ -196,12 +192,8 @@ async def _get_full_library_item( show_all_at_once=activity.show_all_at_once, is_skippable=activity.is_skippable, is_reviewable=activity.is_reviewable, - is_performance_task=validate_is_performance_task( - False, {"items": items} - ), - performance_task_type=validate_performance_task_type( - None, {"items": items} - ), + is_performance_task=activity.is_performance_task, + performance_task_type=activity.performance_task_type, response_is_editable=activity.response_is_editable, is_hidden=activity.is_hidden, scores_and_reports=activity.scores_and_reports, diff --git a/src/apps/mailing/static/templates/applet_create_success_en.html b/src/apps/mailing/static/templates/applet_create_success_en.html index e5700399f4e..de57de08be3 100644 --- a/src/apps/mailing/static/templates/applet_create_success_en.html +++ b/src/apps/mailing/static/templates/applet_create_success_en.html @@ -14,12 +14,12 @@ - You can now send invitations and create schedules. + It is ready for you to send out invitations, set schedules, and configure in other ways. - - Important: Please ensure your applet password is stored securely outside of MindLogger. This password can never be changed or retrieved if forgotten due to security reasons. You will lose all data previously collected if this password is lost. + + Please ensure the Applet password is stored securely outside of MindLogger. Due to security reasons, this password can never be changed or retrieved if forgotten. If you forget this password, you will lose all previously collected data. {% include 'blocks/write_us_en.html' %} diff --git a/src/apps/mailing/static/templates/blocks/write_us_en.html b/src/apps/mailing/static/templates/blocks/write_us_en.html index 03cb2016481..abd3c9ad9b5 100644 --- a/src/apps/mailing/static/templates/blocks/write_us_en.html +++ b/src/apps/mailing/static/templates/blocks/write_us_en.html @@ -1,7 +1,7 @@ - If you have had any issues joining an applet or creating an account, please + If you have had any issues creating an account or joining someone else’s applet, please write - us and we can help you. + to us for help. diff --git a/src/apps/migrate/answers/answer_item_service.py b/src/apps/migrate/answers/answer_item_service.py index 11537abffd6..5f7563e4c4f 100644 --- a/src/apps/migrate/answers/answer_item_service.py +++ b/src/apps/migrate/answers/answer_item_service.py @@ -9,26 +9,30 @@ class AnswerItemMigrationService: - async def create_item( - self, - *, - regular_session, - regular_or_arbitary_session, - mongo_answer: dict, - **kwargs, - ): - identifier = mongo_answer["meta"]["subject"].get("identifier", "") + async def get_respondent_id(self, regular_session, mongo_answer): respondent_mongo_id = Profile().findOne( {"_id": mongo_answer["meta"]["subject"].get("@id")} )["userId"] if respondent_mongo_id: - respondent_id = mongoid_to_uuid(respondent_mongo_id) + return mongoid_to_uuid(respondent_mongo_id) else: anon_respondent = await MigrateUsersMCRUD( regular_session ).get_anonymous_respondent() - respondent_id = anon_respondent.id + return anon_respondent.id + async def create_item( + self, + *, + regular_session, + regular_or_arbitary_session, + mongo_answer: dict, + **kwargs, + ): + identifier = mongo_answer["meta"]["subject"].get("identifier", "") + respondent_id = await self.get_respondent_id( + regular_session, mongo_answer + ) answer_item = await AnswerItemsCRUD( regular_or_arbitary_session ).create( @@ -95,3 +99,51 @@ def _fromtimestamp(self, timestamp: int | None): if timestamp is None: return None return datetime.utcfromtimestamp((float(timestamp) / 1000)) + + async def create_or_update_assessment( + self, + regular_session, + regular_or_arbitary_session, + mongo_answer: dict, + **kwargs, + ): + respondent_id = await self.get_respondent_id( + regular_session, mongo_answer + ) + crud = AnswerItemsCRUD(regular_or_arbitary_session) + assessment = await crud.get_assessment( + answer_id=kwargs["answer_id"], user_id=respondent_id + ) + identifier = mongo_answer["meta"]["subject"].get("identifier", "") + data = dict( + created_at=mongo_answer["created"], + updated_at=mongo_answer["updated"], + answer_id=kwargs["answer_id"], + answer=mongo_answer["meta"]["dataSource"], + item_ids=self._get_item_ids(mongo_answer), + events=mongo_answer["meta"].get("events", ""), + respondent_id=respondent_id, + identifier=mongo_answer["meta"]["subject"].get("identifier", None), + user_public_key=str(mongo_answer["meta"]["userPublicKey"]), + scheduled_datetime=self._fromtimestamp( + mongo_answer["meta"].get("scheduledTime") + ), + start_datetime=self._fromtimestamp( + mongo_answer["meta"].get("responseStarted") + ), + end_datetime=self._fromtimestamp( + mongo_answer["meta"].get("responseCompleted") + ), + is_assessment=kwargs["is_assessment"], + migrated_data=self._get_migrated_data(identifier), + assessment_activity_id=mongo_answer["activity_id_version"], + ) + if not assessment: + data["migrated_date"] = datetime.utcnow() + await crud.create(AnswerItemSchema(**data)) + + else: + data["id"] = assessment.id + data["migrated_date"] = assessment.migrated_date + data["migrated_updated"] = datetime.utcnow() + await crud.update(AnswerItemSchema(**data)) diff --git a/src/apps/migrate/answers/run.py b/src/apps/migrate/answers/run.py index 63412dfe237..d03cb06cb7a 100644 --- a/src/apps/migrate/answers/run.py +++ b/src/apps/migrate/answers/run.py @@ -16,6 +16,7 @@ from apps.migrate.answers.user_applet_access import ( MigrateUserAppletAccessService, ) +from apps.migrate.answers.utills import get_arguments from apps.migrate.run import get_applets_ids from apps.migrate.services.mongo import Mongo @@ -23,7 +24,6 @@ configure_report, migration_log, mongoid_to_uuid, - get_arguments, intersection, ) from apps.workspaces.crud.user_applet_access import UserAppletAccessCRUD @@ -32,6 +32,7 @@ from apps.activities.crud import ( ActivityHistoriesCRUD, ActivityItemHistoriesCRUD, + ActivitiesCRUD, ) from apps.activities.db.schemas import ( ActivityHistorySchema, @@ -78,7 +79,7 @@ def __init__(self): self.answer_item_migrate_service = AnswerItemMigrationService() self.answer_note_migrate_service = AnswerNoteMigrateService() - async def migrate(self, workspace, applets): + async def migrate(self, workspace, applets, assessments_only, update_data): regular_session = session_manager.get_session() applets_ids = await self._get_allowed_applets_ids(workspace, applets) @@ -88,10 +89,11 @@ async def migrate(self, workspace, applets): if applet_id not in APPLETS_WITH_ISSUES_DONT_MIGRATE_ANSWERS ] - await self._wipe_answers_data(regular_session, applets_ids) + if not update_data: + await self._wipe_answers_data(regular_session, applets_ids) async for answer_with_files in self._collect_migratable_answers( - applets_ids + applets_ids, assessments_only ): self.total_answers += 1 query = answer_with_files["query"] @@ -124,7 +126,7 @@ async def migrate(self, workspace, applets): mongo_answer["meta"]["reviewing"]["responseId"] ) await self._create_reviewer_assessment( - regular_session, mongo_answer + regular_session, mongo_answer, assessments_only ) else: @@ -202,7 +204,7 @@ async def migrate(self, workspace, applets): async with atomic(regular_session): await self._migrate_answers_items( - regular_session, self.answer_items_data + regular_session, self.answer_items_data, assessments_only ) self._log_migration_results() @@ -250,7 +252,9 @@ async def _get_regular_or_arbitary_session(self, session, applet_id): return arbitary_session return session - async def _collect_migratable_answers(self, applets_ids: list[uuid.UUID]): + async def _collect_migratable_answers( + self, applets_ids: list[uuid.UUID], assessments_only: bool = False + ): migratable_data_count = 0 regular_session = session_manager.get_session() @@ -261,8 +265,12 @@ async def _collect_migratable_answers(self, applets_ids: list[uuid.UUID]): ).get_answers_migration_params(applets_ids) for answer_migration_params in answers_migration_params: + kwargs = { + **answer_migration_params, + "assessments_only": assessments_only, + } answer_migration_queries = self.mongo.get_answer_migration_queries( - **answer_migration_params + **kwargs ) anwswers_with_files = self.mongo.get_answers_with_files( @@ -276,7 +284,9 @@ async def _collect_migratable_answers(self, applets_ids: list[uuid.UUID]): migratable_data_count += 1 - async def _migrate_answers_items(self, regular_session, answer_items_data): + async def _migrate_answers_items( + self, regular_session, answer_items_data, assessments_only + ): for i, answer_item_data in enumerate(answer_items_data): migration_log.debug( f"Migrating {i} answer_item of {len(answer_items_data)}" @@ -300,11 +310,18 @@ async def _migrate_answers_items(self, regular_session, answer_items_data): ) try: async with atomic(regular_or_arbitary_session): - await self.answer_item_migrate_service.create_item( - regular_session=regular_session, - regular_or_arbitary_session=regular_or_arbitary_session, - **answer_item_data, - ) + if assessments_only: + await self.answer_item_migrate_service.create_or_update_assessment( + regular_session=regular_session, + regular_or_arbitary_session=regular_or_arbitary_session, + **answer_item_data, + ) + else: + await self.answer_item_migrate_service.create_item( + regular_session=regular_session, + regular_or_arbitary_session=regular_or_arbitary_session, + **answer_item_data, + ) except Exception as e: self.error_answers_migration.append((answer_item_data, str(e))) continue @@ -351,7 +368,12 @@ def _log_migration_results(self): f"Anonymous users answers count: {self.anonymous_respondent_answers}" ) - async def _create_reviewer_assessment(self, regular_session, mongo_answer): + async def _create_reviewer_assessment( + self, + regular_session, + mongo_answer, + assessment_only, + ): # check if reviewer assessment activity for this answers applet version exists original_answer = self.mongo.db["item"].find_one( {"_id": mongo_answer["meta"]["reviewing"]["responseId"]} @@ -362,13 +384,9 @@ async def _create_reviewer_assessment(self, regular_session, mongo_answer): ) original_applet_version = original_answer["meta"]["applet"]["version"] - all_assessment_activities = await ActivityHistoriesCRUD( + all_assessment_activities = await ActivitiesCRUD( regular_session - ).retrieve_by_applet_ids( - [ - f"{original_applet_id}_{original_applet_version}", - ] - ) + ).get_by_applet_id(original_applet_id) reviewer_assessment_activities = [ _a for _a in all_assessment_activities if _a.is_reviewable ] @@ -381,7 +399,7 @@ async def _create_reviewer_assessment(self, regular_session, mongo_answer): ) # if not, create it - if not reviewer_assessment_activities: + if not reviewer_assessment_activities and not assessment_only: missing_applet_version = mongo_answer["meta"]["applet"]["version"] duplicating_activity_res = await ActivityHistoriesCRUD( @@ -415,9 +433,29 @@ async def _create_reviewer_assessment(self, regular_session, mongo_answer): item = await ActivityItemHistoriesCRUD( regular_session )._create(ActivityItemHistorySchema(**item)) + elif assessment_only and reviewer_assessment_activities: + activity = reviewer_assessment_activities[0] + id_version = f"{activity.id}_{original_applet_version}" + activity_hist = await ActivityHistoriesCRUD( + regular_session + ).get_by_id(id_version) + if activity_hist: + mongo_answer["activity_id_version"] = activity_hist.id_version + else: + raise Exception( + f"Assessment activity history {id_version} does not " + f"exist for applet {original_applet_id}" + ) if __name__ == "__main__": args = get_arguments() configure_report(migration_log, args.report_file) - asyncio.run(AnswersMigrateFacade().migrate(args.workspace, args.applet)) + asyncio.run( + AnswersMigrateFacade().migrate( + args.workspace, + args.applet, + args.assessments_only, + args.update_data, + ) + ) diff --git a/src/apps/migrate/answers/utills.py b/src/apps/migrate/answers/utills.py new file mode 100644 index 00000000000..5156fe12d76 --- /dev/null +++ b/src/apps/migrate/answers/utills.py @@ -0,0 +1,41 @@ +import argparse + +from pydantic import BaseModel, validator + + +class Params(BaseModel): + class Config: + orm_mode = True + + workspace: str | None = None + applet: list[str] | None = None + report_file: str | None = None + assessments_only: bool = False + update_data: bool = True + + @validator("applet", pre=True) + def to_array(cls, value, values): + if isinstance(value, str): + return value.split(",") + + return value + + +def get_arguments() -> Params: + parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS) + parser.add_argument("-w", "--workspace", type=str, required=False) + parser.add_argument("-a", "--applet", type=str, required=False) + parser.add_argument("-r", "--report_file", type=str, required=False) + parser.add_argument("--assessments_only", type=bool, required=False) + parser.add_argument("--update_data", type=bool, required=False) + args = parser.parse_args() + arguments = Params.from_orm(args) + return arguments + + @validator("assessments_only") + def assessments_only_to_bool(values): + return bool(values) + + @validator("update_data") + def update_data_to_bool(values): + return bool(values) diff --git a/src/apps/migrate/services/mongo.py b/src/apps/migrate/services/mongo.py index 233208023eb..0094a5d270f 100644 --- a/src/apps/migrate/services/mongo.py +++ b/src/apps/migrate/services/mongo.py @@ -1412,6 +1412,8 @@ def get_answer_migration_queries(self, **kwargs): "meta.applet.@id": kwargs["applet_id"], "meta.applet.version": kwargs["version"], } + if kwargs.get("assessments_only"): + query["meta.reviewing"] = {"$exists": True} item_collection = db["item"] try: creators_ids = item_collection.find(query).distinct("creatorId") diff --git a/src/apps/schedule/api/schedule.py b/src/apps/schedule/api/schedule.py index 11f09f624bd..f33d37e5381 100644 --- a/src/apps/schedule/api/schedule.py +++ b/src/apps/schedule/api/schedule.py @@ -54,8 +54,8 @@ async def schedule_create( try: await applet_service.send_notification_to_applet_respondents( applet_id, - "Schedules are updated", - "Schedules are updated", + "Your schedule has been changed, click to update.", + "Your schedule has been changed, click to update.", FirebaseNotificationType.SCHEDULE_UPDATED, respondent_ids=[schedule.respondent_id] if schedule.respondent_id @@ -147,8 +147,8 @@ async def schedule_delete_all( try: await applet_service.send_notification_to_applet_respondents( applet_id, - "Schedules are updated", - "Schedules are updated", + "Your schedule has been changed, click to update.", + "Your schedule has been changed, click to update.", FirebaseNotificationType.SCHEDULE_UPDATED, ) except FirebaseError as e: @@ -177,8 +177,8 @@ async def schedule_delete_by_id( try: await applet_service.send_notification_to_applet_respondents( applet_id, - "Schedules are updated", - "Schedules are updated", + "Your schedule has been changed, click to update.", + "Your schedule has been changed, click to update.", FirebaseNotificationType.SCHEDULE_UPDATED, respondent_ids=[respondent_id] if respondent_id else None, ) @@ -209,8 +209,8 @@ async def schedule_update( try: await applet_service.send_notification_to_applet_respondents( applet_id, - "Schedules are updated", - "Schedules are updated", + "Your schedule has been changed, click to update.", + "Your schedule has been changed, click to update.", FirebaseNotificationType.SCHEDULE_UPDATED, respondent_ids=[schedule.respondent_id] if schedule.respondent_id @@ -258,8 +258,8 @@ async def schedule_delete_by_user( try: await applet_service.send_notification_to_applet_respondents( applet_id, - "Schedules are updated", - "Schedules are updated", + "Your schedule has been changed, click to update.", + "Your schedule has been changed, click to update.", FirebaseNotificationType.SCHEDULE_UPDATED, respondent_ids=[respondent_id], ) @@ -356,8 +356,8 @@ async def schedule_remove_individual_calendar( try: await applet_service.send_notification_to_applet_respondents( applet_id, - "Schedules are updated", - "Schedules are updated", + "Your schedule has been changed, click to update.", + "Your schedule has been changed, click to update.", FirebaseNotificationType.SCHEDULE_UPDATED, respondent_ids=[respondent_id], ) @@ -409,8 +409,8 @@ async def schedule_create_individual( try: await applet_service.send_notification_to_applet_respondents( applet_id, - "Schedules are updated", - "Schedules are updated", + "Your schedule has been changed, click to update.", + "Your schedule has been changed, click to update.", FirebaseNotificationType.SCHEDULE_UPDATED, respondent_ids=[respondent_id], ) diff --git a/src/apps/schedule/commands/__init__.py b/src/apps/schedule/commands/__init__.py new file mode 100644 index 00000000000..d5dcc72dd58 --- /dev/null +++ b/src/apps/schedule/commands/__init__.py @@ -0,0 +1,3 @@ +from apps.schedule.commands.remove_events import ( # noqa: F401 + app as events_cli, +) diff --git a/src/apps/schedule/commands/remove_events.py b/src/apps/schedule/commands/remove_events.py new file mode 100644 index 00000000000..c0399b38513 --- /dev/null +++ b/src/apps/schedule/commands/remove_events.py @@ -0,0 +1,52 @@ +import asyncio +from functools import wraps + +import typer +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession +from sqlalchemy.orm import Query + +from apps.activities.db.schemas.activity import ActivitySchema +from apps.schedule.service import ScheduleService +from infrastructure.database import atomic, session_manager + +app = typer.Typer() + + +def coro(f): + @wraps(f) + def wrapper(*args, **kwargs): + return asyncio.run(f(*args, **kwargs)) + + return wrapper + + +async def get_assessments(session: AsyncSession) -> list[ActivitySchema]: + query: Query = select(ActivitySchema) + query = query.where(ActivitySchema.is_reviewable.is_(True)) + res = await session.execute(query) + return res.scalars().all() # noqa + + +@app.command(short_help="Remove events for assessments") +@coro +async def remove_events(): + session_maker = session_manager.get_session() + try: + async with session_maker() as session: + async with atomic(session): + try: + assessments = await get_assessments(session) + service = ScheduleService(session) + for activity in assessments: + print( + f"Applet: {activity.applet_id} " + f"Activity: {activity.id}" + ) + await service.delete_by_activity_ids( + activity.applet_id, [activity.id] + ) + except Exception as ex: + print(ex) + finally: + await session_maker.remove() diff --git a/src/apps/schedule/crud/events.py b/src/apps/schedule/crud/events.py index e1d47939715..3f07a7b15bc 100644 --- a/src/apps/schedule/crud/events.py +++ b/src/apps/schedule/crud/events.py @@ -358,7 +358,8 @@ async def get_all_by_applet_and_activity( ) query = query.where(EventSchema.applet_id == applet_id) query = query.where(EventSchema.is_deleted == False) # noqa: E712 - query = query.where(UserEventsSchema.user_id == respondent_id) + if respondent_id: + query = query.where(UserEventsSchema.user_id == respondent_id) result = await self._execute(query) return result.scalars().all() @@ -399,7 +400,8 @@ async def get_all_by_applet_and_flow( query = query.where(EventSchema.applet_id == applet_id) query = query.where(EventSchema.is_deleted == False) # noqa: E712 - query = query.where(UserEventsSchema.user_id == respondent_id) + if respondent_id: + query = query.where(UserEventsSchema.user_id == respondent_id) result = await self._execute(query) return result.scalars().all() @@ -740,6 +742,13 @@ async def count_individual_events_by_user( db_result = await self._execute(query) return db_result.scalar() + async def get_all(self, applet_id: uuid.UUID) -> list[EventSchema]: + query: Query = select(EventSchema) + query = query.where(EventSchema.applet_id == applet_id) + query = query.where(EventSchema.is_deleted.is_(False)) + result = await self._execute(query) + return result.scalars().all() + class UserEventsCRUD(BaseCRUD[UserEventsSchema]): schema_class = UserEventsSchema @@ -946,6 +955,22 @@ async def get_by_applet_and_user_id( for activity_event in activity_events ] + async def get_missing_events( + self, activity_ids: list[uuid.UUID] + ) -> list[uuid.UUID]: + query: Query = select(ActivityEventsSchema.activity_id) + query.join( + ActivitySchema, + and_( + ActivitySchema.id == ActivityEventsSchema.activity_id, + ActivitySchema.is_reviewable.is_(False), + ), + ) + query.where(ActivityEventsSchema.activity_id.in_(activity_ids)) + res = await self._execute(query) + db_result = res.scalars().all() + return list(set(activity_ids) - set(db_result)) + class FlowEventsCRUD(BaseCRUD[FlowEventsSchema]): schema_class = FlowEventsSchema diff --git a/src/apps/schedule/service/schedule.py b/src/apps/schedule/service/schedule.py index 732082fcb14..d48cf75c7c9 100644 --- a/src/apps/schedule/service/schedule.py +++ b/src/apps/schedule/service/schedule.py @@ -338,7 +338,7 @@ async def delete_all_schedules(self, applet_id: uuid.UUID): event_schemas: list[EventSchema] = await EventCRUD( self.session - ).get_all_by_applet_id_with_filter(applet_id, None) + ).get_all(applet_id) event_ids = [event_schema.id for event_schema in event_schemas] periodicity_ids = [ event_schema.periodicity_id for event_schema in event_schemas @@ -1226,3 +1226,18 @@ async def create_schedule_individual( applet_id, QueryParams(filters={"respondent_id": respondent_id}), ) + + async def create_default_schedules_if_not_exist( + self, + applet_id: uuid.UUID, + activity_ids: list[uuid.UUID], + ) -> None: + """Create default schedules for applet.""" + activities_without_events = await ActivityEventsCRUD( + self.session + ).get_missing_events(activity_ids) + await self.create_default_schedules( + applet_id=applet_id, + activity_ids=activities_without_events, + is_activity=True, + ) diff --git a/src/apps/workspaces/commands/arbitrary_server.py b/src/apps/workspaces/commands/arbitrary_server.py index fd3acd438db..02bf4c2107d 100644 --- a/src/apps/workspaces/commands/arbitrary_server.py +++ b/src/apps/workspaces/commands/arbitrary_server.py @@ -1,6 +1,7 @@ import asyncio import uuid from functools import wraps +from typing import Optional import typer from pydantic import ValidationError @@ -156,19 +157,30 @@ async def add( @app.command(short_help="Show arbitrary server settings") @coro async def show( - owner_id: uuid.UUID = typer.Argument(..., help="Workspace owner id"), + owner_id: Optional[uuid.UUID] = typer.Argument( + None, help="Workspace owner id" + ), ): session_maker = session_manager.get_session() try: async with session_maker() as session: - data = await WorkspaceService( - session, owner_id - ).get_arbitrary_info_by_owner_id(owner_id) - if not data: - print( - "[bold green]Arbitrary server not configured[/bold green]" - ) - return - print_data_table(WorkspaceArbitraryFields.from_orm(data)) + if owner_id: + data = await WorkspaceService( + session, owner_id + ).get_arbitrary_info_by_owner_id(owner_id) + if not data: + print( + "[bold green]" + "Arbitrary server not configured" + "[/bold green]" + ) + return + print_data_table(WorkspaceArbitraryFields.from_orm(data)) + else: + workspaces = await WorkspaceService( + session, uuid.uuid4() + ).get_arbitrary_list() + for data in workspaces: + print_data_table(WorkspaceArbitraryFields.from_orm(data)) finally: await session_maker.remove() diff --git a/src/apps/workspaces/crud/workspaces.py b/src/apps/workspaces/crud/workspaces.py index 722f407fbdf..deb6febfb22 100644 --- a/src/apps/workspaces/crud/workspaces.py +++ b/src/apps/workspaces/crud/workspaces.py @@ -180,3 +180,9 @@ async def get_user_answers_db_info( res = db_result.all() return parse_obj_as(list[UserAnswersDBInfo], res) + + async def get_arbitrary_list(self) -> UserWorkspaceSchema: + query: Query = select(UserWorkspaceSchema) + query = query.where(UserWorkspaceSchema.database_uri.isnot(None)) + result: Result = await self._execute(query) + return result.scalars().all() diff --git a/src/apps/workspaces/service/user_applet_access.py b/src/apps/workspaces/service/user_applet_access.py index 580abee25c3..a66042e64c3 100644 --- a/src/apps/workspaces/service/user_applet_access.py +++ b/src/apps/workspaces/service/user_applet_access.py @@ -43,17 +43,11 @@ async def _get_default_role_meta( return meta - async def _get_default_role_meta_for_anonymous_respondent( - self, user_id: uuid.UUID - ) -> dict: + async def _get_default_role_meta_for_anonymous_respondent(self) -> dict: meta: dict = {} - - user = await UsersCRUD(self.session).get_by_id(user_id) meta.update( secretUserId="Guest Account Submission", - nickname=f"{user.first_name} {user.last_name}", ) - return meta async def add_role( @@ -100,10 +94,7 @@ async def add_role_for_anonymous_respondent( ) return UserAppletAccess.from_orm(access_schema) - meta = await self._get_default_role_meta_for_anonymous_respondent( - anonymous_respondent.id, - ) - nickname = meta.pop("nickname") + meta = await self._get_default_role_meta_for_anonymous_respondent() owner_access = await UserAppletAccessCRUD( self.session ).get_applet_owner(applet_id=self._applet_id) @@ -115,7 +106,7 @@ async def add_role_for_anonymous_respondent( owner_id=owner_access.user_id, invitor_id=self._user_id, meta=meta, - nickname=nickname, + nickname=None, ) ) return UserAppletAccess.from_orm(access_schema) diff --git a/src/apps/workspaces/service/workspace.py b/src/apps/workspaces/service/workspace.py index 0ee8ae409aa..b0dee95e852 100644 --- a/src/apps/workspaces/service/workspace.py +++ b/src/apps/workspaces/service/workspace.py @@ -368,3 +368,9 @@ async def set_arbitrary_server( for k, v in data.dict(by_alias=False).items(): setattr(schema, k, v) await repository.update_by_user_id(schema.user_id, schema) + + async def get_arbitrary_list(self) -> list[WorkspaceArbitrary]: + schemas = await UserWorkspaceCRUD(self.session).get_arbitrary_list() + if not schemas: + return [] + return [WorkspaceArbitrary.from_orm(schema) for schema in schemas] diff --git a/src/cli.py b/src/cli.py index a9a351c07a8..1855466f23f 100644 --- a/src/cli.py +++ b/src/cli.py @@ -7,10 +7,12 @@ import typer # noqa: E402 +from apps.answers.commands import convert_assessments # noqa: E402 from apps.workspaces.commands import arbitrary_server_cli # noqa: E402 cli = typer.Typer() cli.add_typer(arbitrary_server_cli, name="arbitrary") +cli.add_typer(convert_assessments, name="assessments") if __name__ == "__main__": diff --git a/src/config/__init__.py b/src/config/__init__.py index 5e394dff373..bc86da13a21 100644 --- a/src/config/__init__.py +++ b/src/config/__init__.py @@ -27,6 +27,7 @@ class Settings(BaseSettings): apps_dir: Path locale_dir: Path default_language: str = "en" + content_length_limit: int | None = 150 * 1024 * 1024 debug: bool = True commit_id: str = "Not assigned" diff --git a/src/infrastructure/app.py b/src/infrastructure/app.py index a2daed520a8..88d3b02dfb7 100644 --- a/src/infrastructure/app.py +++ b/src/infrastructure/app.py @@ -64,6 +64,13 @@ # Declare your middlewares here middlewares: Iterable[tuple[Type[middlewares_.Middleware], dict]] = ( + ( + middlewares_.ContentLengthLimitMiddleware, + dict( + content_length_limit=settings.content_length_limit, + methods=["POST"], + ), + ), (middlewares_.InternalizationMiddleware, {}), (middlewares_.CORSMiddleware, middlewares_.cors_options), ) diff --git a/src/infrastructure/database/migrations/versions/2023_11_29_17_08-cron_removing_expired_blacklisted_tokens.py b/src/infrastructure/database/migrations/versions/2023_11_29_17_08-cron_removing_expired_blacklisted_tokens.py index 47b9ad8aecb..dddf69081ab 100644 --- a/src/infrastructure/database/migrations/versions/2023_11_29_17_08-cron_removing_expired_blacklisted_tokens.py +++ b/src/infrastructure/database/migrations/versions/2023_11_29_17_08-cron_removing_expired_blacklisted_tokens.py @@ -9,6 +9,8 @@ from alembic import op from sqlalchemy import text +from config import settings + # revision identifiers, used by Alembic. revision = "69b1dfaf3c0d" down_revision = "75c9ca1f506b" @@ -17,19 +19,24 @@ task_name = "clear_token_blacklist" schedule = "0 9 * * *" -query = text("delete from token_blacklist " - "where \"exp\" < now() at time zone 'utc'") +query = text( + "delete from token_blacklist " "where \"exp\" < now() at time zone 'utc'" +) def upgrade() -> None: - op.execute( - text(f"SELECT cron.schedule(:task_name, :schedule, $${query}$$);") - .bindparams(task_name=task_name, schedule=schedule) - ) + if settings.env != "testing": + op.execute( + text( + f"SELECT cron.schedule(:task_name, :schedule, $${query}$$);" + ).bindparams(task_name=task_name, schedule=schedule) + ) def downgrade() -> None: - op.execute( - text(f"SELECT cron.unschedule(:task_name);") - .bindparams(task_name=task_name) - ) + if settings.env != "testing": + op.execute( + text(f"SELECT cron.unschedule(:task_name);").bindparams( + task_name=task_name + ) + ) diff --git a/src/infrastructure/database/migrations/versions/2023_12_04_15_45-remove_nickname_from_guest_account.py b/src/infrastructure/database/migrations/versions/2023_12_04_15_45-remove_nickname_from_guest_account.py new file mode 100644 index 00000000000..50c076b27eb --- /dev/null +++ b/src/infrastructure/database/migrations/versions/2023_12_04_15_45-remove_nickname_from_guest_account.py @@ -0,0 +1,35 @@ +"""Remove nickname from guest account + +Revision ID: 63a2a290c7e6 +Revises: 69b1dfaf3c0d +Create Date: 2023-12-04 15:45:11.543448 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "63a2a290c7e6" +down_revision = "69b1dfaf3c0d" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + conn = op.get_bind() + result = conn.execute( + sa.text( + f""" + UPDATE user_applet_accesses SET nickname=NULL + WHERE user_id in ( + SELECT id + FROM users + WHERE is_anonymous_respondent=TRUE + ); + """ + ) + ) + + +def downgrade() -> None: + pass diff --git a/src/infrastructure/database/migrations/versions/2023_12_06_13_47-add_performance_task_type_column.py b/src/infrastructure/database/migrations/versions/2023_12_06_13_47-add_performance_task_type_column.py new file mode 100644 index 00000000000..29e4c8c11d4 --- /dev/null +++ b/src/infrastructure/database/migrations/versions/2023_12_06_13_47-add_performance_task_type_column.py @@ -0,0 +1,79 @@ +"""Add performance_task_type to the table + +Revision ID: 186481f0c0cc +Revises: 63a2a290c7e6 +Create Date: 2023-12-06 13:47:49.694746 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "186481f0c0cc" +down_revision = "63a2a290c7e6" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "activities", + sa.Column( + "performance_task_type", sa.String(length=255), nullable=True + ), + ) + op.add_column( + "activity_histories", + sa.Column( + "performance_task_type", sa.String(length=255), nullable=True + ), + ) + conn = op.get_bind() + conn.execute( + sa.text( + """ + with + performance as ( + select distinct + activity_id, + case when response_type in ('ABTrails', 'flanker') then response_type + else config->>'user_input_type' + end as performance_task_type + from activity_items + where response_type in ('ABTrails', 'flanker') + or response_type = 'stabilityTracker' and config->>'user_input_type' in ('touch', 'gyroscope') + ) + update activities set performance_task_type = performance.performance_task_type + from performance + where id = performance.activity_id + """ + ) + ) + conn.execute( + sa.text( + """ + with + performance as ( + select distinct + activity_id, + case when response_type in ('ABTrails', 'flanker') then response_type + else config->>'user_input_type' + end as performance_task_type + from activity_item_histories + where response_type in ('ABTrails', 'flanker') + or response_type = 'stabilityTracker' and config->>'user_input_type' in ('touch', 'gyroscope') + ) + update activity_histories set performance_task_type = performance.performance_task_type + from performance + where id_version = performance.activity_id + """ + ) + ) + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("activity_histories", "performance_task_type") + op.drop_column("activities", "performance_task_type") + # ### end Alembic commands ### diff --git a/src/infrastructure/database/migrations/versions/2023_12_08_19_55-add_assessment_activity_version_id_on_.py b/src/infrastructure/database/migrations/versions/2023_12_08_19_55-add_assessment_activity_version_id_on_.py new file mode 100644 index 00000000000..36b0be1ba1d --- /dev/null +++ b/src/infrastructure/database/migrations/versions/2023_12_08_19_55-add_assessment_activity_version_id_on_.py @@ -0,0 +1,40 @@ +"""add assessment activity version id on answers item + +Revision ID: 60528d410fd1 +Revises: 8c59c7363c67 +Create Date: 2023-11-13 19:55:57.797942 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "60528d410fd1" +down_revision = "186481f0c0cc" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "answers_items", + sa.Column("assessment_activity_id", sa.Text(), nullable=True), + ) + op.create_index( + op.f("ix_answers_items_assessment_activity_id"), + "answers_items", + ["assessment_activity_id"], + unique=False, + ) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + op.f("ix_answers_items_assessment_activity_id"), + table_name="answers_items", + ) + op.drop_column("answers_items", "assessment_activity_id") + # ### end Alembic commands ### diff --git a/src/infrastructure/database/migrations_arbitrary/versions/2023_11_13_19_55-add_assessment_activity_version_id_on_.py b/src/infrastructure/database/migrations_arbitrary/versions/2023_11_13_19_55-add_assessment_activity_version_id_on_.py new file mode 100644 index 00000000000..c10475d2b02 --- /dev/null +++ b/src/infrastructure/database/migrations_arbitrary/versions/2023_11_13_19_55-add_assessment_activity_version_id_on_.py @@ -0,0 +1,40 @@ +"""add assessment activity version id on answers item + +Revision ID: 60528d410fd1 +Revises: 8c59c7363c67 +Create Date: 2023-11-13 19:55:57.797942 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "60528d410fd1" +down_revision = "016848d34c04" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "answers_items", + sa.Column("assessment_activity_id", sa.Text(), nullable=True), + ) + op.create_index( + op.f("ix_answers_items_assessment_activity_id"), + "answers_items", + ["assessment_activity_id"], + unique=False, + ) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + op.f("ix_answers_items_assessment_activity_id"), + table_name="answers_items", + ) + op.drop_column("answers_items", "assessment_activity_id") + # ### end Alembic commands ### diff --git a/src/infrastructure/dependency/cdn.py b/src/infrastructure/dependency/cdn.py index cd944f9c4ba..404afdb11f6 100644 --- a/src/infrastructure/dependency/cdn.py +++ b/src/infrastructure/dependency/cdn.py @@ -31,6 +31,9 @@ async def get_media_bucket() -> CDNClient: async def get_log_bucket() -> CDNClient: config = CdnConfig( + endpoint_url=settings.cdn.endpoint_url, + access_key=settings.cdn.access_key, + secret_key=settings.cdn.secret_key, region=settings.cdn.region, bucket=settings.cdn.bucket_answer, ttl_signed_urls=settings.cdn.ttl_signed_urls, diff --git a/src/middlewares/__init__.py b/src/middlewares/__init__.py index 5b2f27e3764..9b1922cc1af 100644 --- a/src/middlewares/__init__.py +++ b/src/middlewares/__init__.py @@ -1,3 +1,6 @@ +from middlewares.content_length import ( # noqa: F401, F403 + ContentLengthLimitMiddleware, +) from middlewares.cors import * # noqa: F401, F403 from middlewares.domain import * # noqa: F401, F403 from middlewares.internalization import * # noqa: F401, F403 diff --git a/src/middlewares/content_length.py b/src/middlewares/content_length.py new file mode 100644 index 00000000000..222a65fe300 --- /dev/null +++ b/src/middlewares/content_length.py @@ -0,0 +1,49 @@ +from fastapi import HTTPException +from starlette import status +from starlette.types import ASGIApp + + +class ContentLengthLimitMiddleware: + def __init__( + self, + app: ASGIApp, + content_length_limit: int | None = None, + methods: list | None = None, + ): + self.app = app + self.content_length_limit = content_length_limit + self.methods = methods + + def method_matches(self, method): + if self.methods: + return method in self.methods + return True + + async def __call__(self, scope, receive, send): + if not ( + scope["type"] == "http" + and self.method_matches(scope.get("method")) + and self.content_length_limit is not None + ): + await self.app(scope, receive, send) + return + + def _receiver(): + read_length: int = 0 + + async def _receive(): + nonlocal read_length, receive + + message = await receive() + if message["type"] == "http.request": + read_length += len(message.get("body", b"")) + if read_length > self.content_length_limit: + raise HTTPException( + status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE # noqa: E501 + ) + return message + + return _receive + + _receive = _receiver() + await self.app(scope, _receive, send)