From 01860b7af7484073089f6fde2c2028cfcb5c4fe4 Mon Sep 17 00:00:00 2001 From: Nicolas Cantu Date: Fri, 3 Apr 2026 17:55:50 +0200 Subject: [PATCH] chore: remove gitmodules, add docv workspace crates, update systemd README - Drop .gitmodules (ia_dev tracked as submodule pointer without file) - Add services/docv Cargo workspace: docv-back, docv-shared, migrations, sources - Refresh systemd/README.md --- .gitmodules | 3 - services/docv/Cargo.lock | 2419 +++++++++ services/docv/Cargo.toml | 14 + services/docv/docv-back/.env.example | 46 + services/docv/docv-back/Cargo.toml | 34 + .../20260330120000_create_users.sql | 14 + .../20260330120001_seed_demo_user.sql | 9 + .../20260330140000_offices_folders.sql | 30 + ...260401140000_offices_add_siren_address.sql | 3 + .../20260401150000_office_members_role.sql | 2 + .../20260401160000_user_stub_lists.sql | 55 + .../20260401170000_folder_documents.sql | 22 + ...0_folder_documents_storage_pending_idx.sql | 5 + ...0402110000_folders_title_legacy_compat.sql | 22 + ...260402183000_folders_status_if_missing.sql | 2 + .../20260403120000_folders_dp_layout.sql | 9 + .../20260403140000_roles_minimal.sql | 10 + ...260403160000_office_society_extensions.sql | 19 + ...00_folder_sources_notes_tasks_workflow.sql | 61 + ...4140000_folders_purpose_operation_type.sql | 18 + services/docv/docv-back/src/api_auth.rs | 34 + services/docv/docv-back/src/branding.rs | 243 + services/docv/docv-back/src/config/mod.rs | 219 + services/docv/docv-back/src/db/mod.rs | 1576 ++++++ services/docv/docv-back/src/dp_git_sync.rs | 109 + services/docv/docv-back/src/dp_layout_fs.rs | 362 ++ services/docv/docv-back/src/dp_mirror.rs | 64 + services/docv/docv-back/src/main.rs | 74 + .../docv/docv-back/src/server/ai_forward.rs | 45 + services/docv/docv-back/src/server/api_v1.rs | 4545 +++++++++++++++++ services/docv/docv-back/src/server/mod.rs | 568 ++ .../docv/docv-back/src/server/v1_route.rs | 106 + services/docv/docv-back/tenants.default.json | 18 + services/docv/docv-shared/Cargo.toml | 16 + .../docv/docv-shared/src/constants/mod.rs | 4 + services/docv/docv-shared/src/format/mod.rs | 1 + services/docv/docv-shared/src/lib.rs | 7 + services/docv/docv-shared/src/rules/mod.rs | 1 + .../docv/docv-shared/src/validation/mod.rs | 5 + systemd/README.md | 8 + 40 files changed, 10799 insertions(+), 3 deletions(-) delete mode 100644 .gitmodules create mode 100644 services/docv/Cargo.lock create mode 100644 services/docv/Cargo.toml create mode 100644 services/docv/docv-back/.env.example create mode 100644 services/docv/docv-back/Cargo.toml create mode 100644 services/docv/docv-back/migrations/20260330120000_create_users.sql create mode 100644 services/docv/docv-back/migrations/20260330120001_seed_demo_user.sql create mode 100644 services/docv/docv-back/migrations/20260330140000_offices_folders.sql create mode 100644 services/docv/docv-back/migrations/20260401140000_offices_add_siren_address.sql create mode 100644 services/docv/docv-back/migrations/20260401150000_office_members_role.sql create mode 100644 services/docv/docv-back/migrations/20260401160000_user_stub_lists.sql create mode 100644 services/docv/docv-back/migrations/20260401170000_folder_documents.sql create mode 100644 services/docv/docv-back/migrations/20260402100000_folder_documents_storage_pending_idx.sql create mode 100644 services/docv/docv-back/migrations/20260402110000_folders_title_legacy_compat.sql create mode 100644 services/docv/docv-back/migrations/20260402183000_folders_status_if_missing.sql create mode 100644 services/docv/docv-back/migrations/20260403120000_folders_dp_layout.sql create mode 100644 services/docv/docv-back/migrations/20260403140000_roles_minimal.sql create mode 100644 services/docv/docv-back/migrations/20260403160000_office_society_extensions.sql create mode 100644 services/docv/docv-back/migrations/20260404120000_folder_sources_notes_tasks_workflow.sql create mode 100644 services/docv/docv-back/migrations/20260404140000_folders_purpose_operation_type.sql create mode 100644 services/docv/docv-back/src/api_auth.rs create mode 100644 services/docv/docv-back/src/branding.rs create mode 100644 services/docv/docv-back/src/config/mod.rs create mode 100644 services/docv/docv-back/src/db/mod.rs create mode 100644 services/docv/docv-back/src/dp_git_sync.rs create mode 100644 services/docv/docv-back/src/dp_layout_fs.rs create mode 100644 services/docv/docv-back/src/dp_mirror.rs create mode 100644 services/docv/docv-back/src/main.rs create mode 100644 services/docv/docv-back/src/server/ai_forward.rs create mode 100644 services/docv/docv-back/src/server/api_v1.rs create mode 100644 services/docv/docv-back/src/server/mod.rs create mode 100644 services/docv/docv-back/src/server/v1_route.rs create mode 100644 services/docv/docv-back/tenants.default.json create mode 100644 services/docv/docv-shared/Cargo.toml create mode 100644 services/docv/docv-shared/src/constants/mod.rs create mode 100644 services/docv/docv-shared/src/format/mod.rs create mode 100644 services/docv/docv-shared/src/lib.rs create mode 100644 services/docv/docv-shared/src/rules/mod.rs create mode 100644 services/docv/docv-shared/src/validation/mod.rs diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 399fc69..0000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "ia_dev"] - path = ia_dev - url = https://git.4nkweb.com/4nk/ia_dev.git diff --git a/services/docv/Cargo.lock b/services/docv/Cargo.lock new file mode 100644 index 0000000..b824d04 --- /dev/null +++ b/services/docv/Cargo.lock @@ -0,0 +1,2419 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +dependencies = [ + "memchr", +] + +[[package]] +name = "anyhow" +version = "1.0.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" + +[[package]] +name = "async-trait" +version = "0.1.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bcrypt" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7e7c93a3fb23b2fdde989b2c9ec4dd153063ec81f408507f84c090cd91c6641" +dependencies = [ + "base64 0.13.1", + "blowfish", + "getrandom 0.2.17", + "zeroize", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "blowfish" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e412e2cd0f2b2d93e02543ceae7917b3c70331573df19ee046bcbc35e45e87d7" +dependencies = [ + "byteorder", + "cipher", +] + +[[package]] +name = "bumpalo" +version = "3.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" + +[[package]] +name = "cc" +version = "1.2.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1e928d4b69e3077709075a938a05ffbedfa53a84c8f766efbf8220bb1ff60e1" +dependencies = [ + "find-msvc-tools", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crypto-common" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "deadpool" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "421fe0f90f2ab22016f32a9881be5134fdd71c65298917084b0c7477cbc3856e" +dependencies = [ + "async-trait", + "deadpool-runtime", + "num_cpus", + "retain_mut", + "tokio", +] + +[[package]] +name = "deadpool-postgres" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "836a24a9d49deefe610b8b60c767a7412e9a931d79a89415cd2d2d71630ca8d7" +dependencies = [ + "deadpool", + "log", + "tokio", + "tokio-postgres", +] + +[[package]] +name = "deadpool-runtime" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "092966b41edc516079bdf31ec78a2e0588d1d0c08f78b91d8307215928642b2b" +dependencies = [ + "tokio", +] + +[[package]] +name = "deranged" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c" +dependencies = [ + "powerfmt", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", + "subtle", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "docv-back" +version = "0.1.0" +dependencies = [ + "bcrypt", + "deadpool-postgres", + "docv-shared", + "html-escape", + "hyper", + "jsonwebtoken", + "postgres-types", + "reqwest", + "serde", + "serde_json", + "serde_urlencoded", + "tempfile", + "tokio", + "tokio-postgres", + "tracing", + "tracing-subscriber", + "urlencoding", + "uuid", +] + +[[package]] +name = "docv-shared" +version = "0.1.0" + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "fallible-iterator" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "find-msvc-tools" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures-channel" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" + +[[package]] +name = "futures-macro" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" + +[[package]] +name = "futures-task" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" + +[[package]] +name = "futures-util" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" +dependencies = [ + "futures-core", + "futures-macro", + "futures-sink", + "futures-task", + "pin-project-lite", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "libc", + "r-efi 5.3.0", + "wasip2", +] + +[[package]] +name = "getrandom" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" +dependencies = [ + "cfg-if", + "libc", + "r-efi 6.0.0", + "wasip2", + "wasip3", +] + +[[package]] +name = "h2" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "foldhash", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + +[[package]] +name = "html-escape" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d1ad449764d627e22bfd7cd5e8868264fc9236e07c752972b4080cd351cb476" +dependencies = [ + "utf8-width", +] + +[[package]] +name = "http" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "0.14.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2 0.5.10", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http", + "hyper", + "rustls", + "tokio", + "tokio-rustls", +] + +[[package]] +name = "icu_collections" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2984d1cd16c883d7935b9e07e44071dca8d917fd52ecc02c04d5fa0b5a3f191c" +dependencies = [ + "displaydoc", + "potential_utf", + "utf8_iter", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92219b62b3e2b4d88ac5119f8904c10f8f61bf7e95b640d25ba3075e6cac2c29" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c56e5ee99d6e3d33bd91c5d85458b6005a22140021cc324cea84dd0e72cff3b4" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da3be0ae77ea334f4da67c12f149704f19f81d1adf7c51cf482943e84a2bad38" + +[[package]] +name = "icu_properties" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bee3b67d0ea5c2cca5003417989af8996f8604e34fb9ddf96208a033901e70de" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e2bbb201e0c04f7b4b3e14382af113e17ba4f63e2c9d2ee626b720cbce54a14" + +[[package]] +name = "icu_provider" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139c4cf31c8b5f33d7e199446eff9c1e02decfc2f0eec2c8d71f65befa45b421" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indexmap" +version = "2.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" +dependencies = [ + "equivalent", + "hashbrown 0.16.1", + "serde", + "serde_core", +] + +[[package]] +name = "inout" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" +dependencies = [ + "generic-array", +] + +[[package]] +name = "ipnet" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2" + +[[package]] +name = "itoa" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682" + +[[package]] +name = "js-sys" +version = "0.3.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc4c90f45aa2e6eacbe8645f77fdea542ac97a494bcd117a67df9ff4d611f995" +dependencies = [ + "cfg-if", + "futures-util", + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "jsonwebtoken" +version = "8.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" +dependencies = [ + "base64 0.21.7", + "pem", + "ring 0.16.20", + "serde", + "serde_json", + "simple_asn1", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + +[[package]] +name = "libc" +version = "0.2.183" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d" + +[[package]] +name = "libredox" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ddbf48fd451246b1f8c2610bd3b4ac0cc6e149d89832867093ab69a17194f08" +dependencies = [ + "bitflags 2.11.0", + "libc", + "plain", + "redox_syscall 0.7.3", +] + +[[package]] +name = "linux-raw-sys" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" + +[[package]] +name = "litemap" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92daf443525c4cce67b150400bc2316076100ce0b3686209eb8cf3c31612e6f0" + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest", +] + +[[package]] +name = "memchr" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "mio" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.61.2", +] + +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6673768db2d862beb9b39a78fdcb1a69439615d5794a1be50caa9bc92c81967" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "once_cell" +version = "1.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50" + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall 0.5.18", + "smallvec", + "windows-link", +] + +[[package]] +name = "pem" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" +dependencies = [ + "base64 0.13.1", +] + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "phf" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_shared" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +dependencies = [ + "siphasher", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd" + +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" + +[[package]] +name = "postgres-protocol" +version = "0.6.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee9dd5fe15055d2b6806f4736aa0c9637217074e224bbec46d4041b91bb9491" +dependencies = [ + "base64 0.22.1", + "byteorder", + "bytes", + "fallible-iterator", + "hmac", + "md-5", + "memchr", + "rand 0.9.2", + "sha2", + "stringprep", +] + +[[package]] +name = "postgres-types" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d2234cdee9408b523530a9b6d2d6b373d1db34f6a8e51dc03ded1828d7fb67c" +dependencies = [ + "bytes", + "fallible-iterator", + "postgres-protocol", + "uuid", +] + +[[package]] +name = "potential_utf" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0103b1cef7ec0cf76490e969665504990193874ea05c85ff9bab8b911d0a0564" +dependencies = [ + "zerovec", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "r-efi" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.5", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.5", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.17", +] + +[[package]] +name = "rand_core" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" +dependencies = [ + "getrandom 0.3.4", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags 2.11.0", +] + +[[package]] +name = "redox_syscall" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce70a74e890531977d37e532c34d45e9055d2409ed08ddba14529471ed0be16" +dependencies = [ + "bitflags 2.11.0", +] + +[[package]] +name = "regex-automata" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" + +[[package]] +name = "reqwest" +version = "0.11.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" +dependencies = [ + "base64 0.21.7", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-rustls", + "ipnet", + "js-sys", + "log", + "mime", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls", + "rustls-pemfile", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "system-configuration", + "tokio", + "tokio-rustls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots", + "winreg", +] + +[[package]] +name = "retain_mut" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4389f1d5789befaf6029ebd9f7dac4af7f7e3d61b69d4f30e2ac02b57e7712b0" + +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin", + "untrusted 0.7.1", + "web-sys", + "winapi", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.17", + "libc", + "untrusted 0.9.0", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustix" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" +dependencies = [ + "bitflags 2.11.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring 0.17.14", + "rustls-webpki", + "sct", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring 0.17.14", + "untrusted 0.9.0", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "ryu" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring 0.17.14", + "untrusted 0.9.0", +] + +[[package]] +name = "semver" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.149" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" +dependencies = [ + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" +dependencies = [ + "errno", + "libc", +] + +[[package]] +name = "simple_asn1" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d585997b0ac10be3c5ee635f1bab02d512760d14b7c468801ac8a01d9ae5f1d" +dependencies = [ + "num-bigint", + "num-traits", + "thiserror", + "time", +] + +[[package]] +name = "siphasher" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" + +[[package]] +name = "slab" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "socket2" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a766e1110788c36f4fa1c2b71b387a7815aa65f88ce0229841826633d93723e" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "stringprep" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", + "unicode-properties", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.117" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "tempfile" +version = "3.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd" +dependencies = [ + "fastrand", + "getrandom 0.4.2", + "once_cell", + "rustix", + "windows-sys 0.61.2", +] + +[[package]] +name = "thiserror" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "time" +version = "0.3.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde_core", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" + +[[package]] +name = "time-macros" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tinystr" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8323304221c2a851516f22236c5722a72eaa19749016521d6dff0824447d96d" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e61e67053d25a4e82c844e8424039d9745781b3fc4f32b8d55ed50f5f667ef3" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27ad5e34374e03cfffefc301becb44e9dc3c17584f414349ebe29ed26661822d" +dependencies = [ + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2 0.6.3", + "tokio-macros", + "windows-sys 0.61.2", +] + +[[package]] +name = "tokio-macros" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c55a2eff8b69ce66c84f85e1da1c233edc36ceb85a2058d11b0d6a3c7e7569c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-postgres" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d340244b32d920260ae7448cb72b6e238bddc3d4f7603394e7dd46ed8e48f5b8" +dependencies = [ + "async-trait", + "byteorder", + "bytes", + "fallible-iterator", + "futures-channel", + "futures-util", + "log", + "parking_lot", + "percent-encoding", + "phf", + "pin-project-lite", + "postgres-protocol", + "postgres-types", + "rand 0.8.5", + "socket2 0.5.10", + "tokio", + "tokio-util", + "whoami", +] + +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" +dependencies = [ + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7f578e5945fb242538965c2d0b04418d38ec25c79d160cd279bf0731c8d319" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex-automata", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "typenum" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" + +[[package]] +name = "unicode-bidi" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" + +[[package]] +name = "unicode-ident" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" + +[[package]] +name = "unicode-normalization" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fd4f6878c9cb28d874b009da9e8d183b5abc80117c40bbd187a1fde336be6e8" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-properties" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7df058c713841ad818f1dc5d3fd88063241cc61f49f5fbea4b951e8cf5a8d71d" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + +[[package]] +name = "utf8-width" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1292c0d970b54115d14f2492fe0170adf21d68a1de108eebc51c1df4f346a091" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "uuid" +version = "1.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ac8b6f42ead25368cf5b098aeb3dc8a1a2c05a3eee8a9a1a68c640edbfc79d9" +dependencies = [ + "getrandom 0.4.2", + "js-sys", + "serde_core", + "wasm-bindgen", +] + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.2+wasi-0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + +[[package]] +name = "wasm-bindgen" +version = "0.2.115" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6523d69017b7633e396a89c5efab138161ed5aafcbc8d3e5c5a42ae38f50495a" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d1faf851e778dfa54db7cd438b70758eba9755cb47403f3496edd7c8fc212f0" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.115" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e3a6c758eb2f701ed3d052ff5737f5bfe6614326ea7f3bbac7156192dc32e67" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.115" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "921de2737904886b52bcbb237301552d05969a6f9c40d261eb0533c8b055fedf" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.115" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a93e946af942b58934c604527337bad9ae33ba1d5c6900bbb41c2c07c2364a93" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap", + "wasm-encoder", + "wasmparser", +] + +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags 2.11.0", + "hashbrown 0.15.5", + "indexmap", + "semver", +] + +[[package]] +name = "web-sys" +version = "0.3.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84cde8507f4d7cfcb1185b8cb5890c494ffea65edbe1ba82cfd63661c805ed94" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "0.25.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" + +[[package]] +name = "whoami" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d4a4db5077702ca3015d3d02d74974948aba2ad9e12ab7df718ee64ccd7e97d" +dependencies = [ + "libredox", + "wasite", + "web-sys", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + +[[package]] +name = "wit-bindgen" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap", + "prettyplease", + "syn", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags 2.11.0", + "indexmap", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] + +[[package]] +name = "writeable" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ffae5123b2d3fc086436f8834ae3ab053a283cfac8fe0a0b8eaae044768a4c4" + +[[package]] +name = "yoke" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abe8c5fda708d9ca3df187cae8bfb9ceda00dd96231bed36e445a1a48e66f9ca" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de844c262c8848816172cef550288e7dc6c7b7814b4ee56b3e1553f275f1858e" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eed437bf9d6692032087e337407a86f04cd8d6a16a37199ed57949d415bd68e9" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70e3cd084b1788766f53af483dd21f93881ff30d7320490ec3ef7526d203bad4" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zerofrom" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69faa1f2a1ea75661980b013019ed6687ed0e83d069bc1114e2cc74c6c04c4df" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11532158c46691caf0f2593ea8358fed6bbf68a0315e80aae9bd41fbade684a1" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" + +[[package]] +name = "zerotrie" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f9152d31db0792fa83f70fb2f83148effb5c1f5b8c7686c3459e361d9bc20bf" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90f911cbc359ab6af17377d242225f4d75119aec87ea711a880987b18cd7b239" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "625dc425cab0dca6dc3c3319506e6593dcb08a9f387ea3b284dbd52a92c40555" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" diff --git a/services/docv/Cargo.toml b/services/docv/Cargo.toml new file mode 100644 index 0000000..ed6fbff --- /dev/null +++ b/services/docv/Cargo.toml @@ -0,0 +1,14 @@ +[workspace] +resolver = "2" +members = ["docv-shared", "docv-back"] + +[workspace.lints.clippy] +# Align with fix-lint / docs/REGLES_CODING_PROJET.md: no unwrap in production, typed errors, propagation via ? +unwrap_used = "warn" +expect_used = "warn" +# Encourage explicit types and avoid unclear code +type_complexity = "warn" +# Avoid redundant clones +redundant_clone = "warn" +# Prefer ? over match on Option/Result where clear +question_mark = "warn" diff --git a/services/docv/docv-back/.env.example b/services/docv/docv-back/.env.example new file mode 100644 index 0000000..da80cd4 --- /dev/null +++ b/services/docv/docv-back/.env.example @@ -0,0 +1,46 @@ +# docv-back configuration. Copy to .env and set values. +# Database (docv BDD) +DATABASE_URL=postgres://user:password@localhost:5432/docv + +# Auth +JWT_SECRET=your-secret-min-32-chars +# Durée (secondes) pour : jeton Bearer OAuth, JWT du cookie docv_oauth_session, Max-Age du cookie après POST /oauth/sign-in. Défaut 900 (15 min). Plage 60–86400. +# DOCV_OAUTH_ACCESS_TOKEN_TTL_SEC=900 +# Emails séparés par des virgules : au démarrage, rattachement au premier office (ancienneté) comme rôle client si l’utilisateur n’a aucune société. Laisser vide pour désactiver. +# DOCV_DEMO_MEMBER_EMAILS=client@example.com +# Test uniquement : rattacher tous les utilisateurs sans office_members au plus ancien office (évite GET /api/v1/offices vide). Voir docs/docv/AUTH_SESSION.md +# DOCV_LINK_ORPHAN_USERS_TO_FIRST_OFFICE=1 + +# OAuth2 (authorization_code) — comma-separated client ids if several apps share this secret +OAUTH_CLIENT_ID=enso-web +OAUTH_CLIENT_SECRET=change-me +OAUTH_REDIRECT_URIS=https://localhost:3032/auth/docv-callback +# Prefix browsers use before /oauth/... (reverse proxy, e.g. /docv-api). Empty only if OAuth is at site root. +OAUTH_BROWSER_PATH_PREFIX=/docv-api +# Primary key column on table users: `id` (shipped migration) or `uid` (legacy / IMPL-style schema). +# DOCV_USERS_PK_COLUMN=uid + +# Optional: branding for HTML sign-in (defaults to tenants.default.json in repo) +# OAUTH_TENANTS_PATH=/path/to/tenants.json +# OAUTH_TENANTS_JSON={"default":{"heading":"Connexion"},"clients":{}} + +# External APIs (backend only): anchoring (services), IA (submodule ai) +ANCHORING_URL=http://localhost:3016 +IA_API_URL=http://localhost:3022 + +# Server (aligned with nginx proxy → docv-back and /docv-api/; see docs/PORTS_ENSO.md) +HOST=0.0.0.0 +PORT=3038 + +# Optional: directory for binary document uploads (POST /api/v1/folders/:uid/documents/binary). +# When unset, the front falls back to JSON-only metadata (no downloadable file). +# DOCV_FILE_STORAGE_DIR=/var/lib/docv/uploads +# Max body size for binary upload in bytes (default 10485760). +# DOCV_UPLOAD_MAX_BYTES=10485760 + +# Optional: mirror uploads under data/dossiers-permanents/ and git commit/push (see docs/features/DOSSIERS_PERMANENTS_DATA_GIT.md). +# DOCV_DP_GIT_SYNC=1 +# DOCV_DP_GIT_REPO_ROOT=/path/to/enso +# DOCV_DP_GIT_DATA_SUBPATH=data/dossiers-permanents +# DOCV_DP_GIT_REMOTE=origin +# DOCV_DP_GIT_BRANCH=test diff --git a/services/docv/docv-back/Cargo.toml b/services/docv/docv-back/Cargo.toml new file mode 100644 index 0000000..70c5ad0 --- /dev/null +++ b/services/docv/docv-back/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "docv-back" +version = "0.1.0" +edition = "2021" +description = "docv backend: HTTP API (Rust), PostgreSQL, auth, OAuth2 for enso-front" + +[lints] +workspace = true + +[[bin]] +name = "docv-back" +path = "src/main.rs" + +[dependencies] +docv-shared = { path = "../docv-shared" } +bcrypt = "0.13" +deadpool-postgres = "0.10" +hyper = { version = "0.14", features = ["full"] } +jsonwebtoken = "8" +serde = { version = "1", features = ["derive"] } +serde_json = "1" +serde_urlencoded = "0.7" +tokio = { version = "1", features = ["full"] } +tokio-postgres = { version = "0.7.8", features = ["with-uuid-1"] } +postgres-types = { version = "0.2.6", features = ["with-uuid-1"] } +urlencoding = "2" +html-escape = "0.2" +uuid = { version = "1", features = ["v4", "serde"] } +tracing = "0.1" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } +reqwest = { version = "0.11", default-features = false, features = ["json", "rustls-tls"] } + +[dev-dependencies] +tempfile = "3" diff --git a/services/docv/docv-back/migrations/20260330120000_create_users.sql b/services/docv/docv-back/migrations/20260330120000_create_users.sql new file mode 100644 index 0000000..b6b0ced --- /dev/null +++ b/services/docv/docv-back/migrations/20260330120000_create_users.sql @@ -0,0 +1,14 @@ +-- Minimal users table for docv authentication (zone 1, IMPL_01). +-- Compatible with PostgreSQL 13+ (gen_random_uuid). + +CREATE TABLE IF NOT EXISTS users ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + email TEXT NOT NULL UNIQUE, + password_hash TEXT NOT NULL, + name TEXT, + phone TEXT, + preferences JSONB, + created_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE INDEX IF NOT EXISTS idx_users_email ON users (email); diff --git a/services/docv/docv-back/migrations/20260330120001_seed_demo_user.sql b/services/docv/docv-back/migrations/20260330120001_seed_demo_user.sql new file mode 100644 index 0000000..7f91100 --- /dev/null +++ b/services/docv/docv-back/migrations/20260330120001_seed_demo_user.sql @@ -0,0 +1,9 @@ +-- Demo user aligned with Lovable mock (password: demo). Safe on repeated apply. + +INSERT INTO users (email, password_hash, name) +VALUES ( + 'client@example.com', + '$2b$10$mUXYmHFzQhhB3e7OiRR4JOhLwIrQUhXVLM8b2mFBwjeJHFjUywrUq', + 'Jean Dupont' +) +ON CONFLICT (email) DO NOTHING; diff --git a/services/docv/docv-back/migrations/20260330140000_offices_folders.sql b/services/docv/docv-back/migrations/20260330140000_offices_folders.sql new file mode 100644 index 0000000..71a4dee --- /dev/null +++ b/services/docv/docv-back/migrations/20260330140000_offices_folders.sql @@ -0,0 +1,30 @@ +-- Minimal offices / memberships / folders for docv API (zones 5 & 2 subset). +-- user_uid matches users primary key value (UUID as text from JWT sub) without DB FK (supports id or uid PK on users). + +CREATE TABLE IF NOT EXISTS offices ( + uid UUID PRIMARY KEY DEFAULT gen_random_uuid(), + name TEXT NOT NULL, + siren TEXT, + address TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE TABLE IF NOT EXISTS office_members ( + office_uid UUID NOT NULL REFERENCES offices(uid) ON DELETE CASCADE, + user_uid UUID NOT NULL, + role TEXT NOT NULL DEFAULT 'member', + PRIMARY KEY (office_uid, user_uid) +); + +CREATE INDEX IF NOT EXISTS idx_office_members_user ON office_members(user_uid); + +CREATE TABLE IF NOT EXISTS folders ( + uid UUID PRIMARY KEY DEFAULT gen_random_uuid(), + office_uid UUID NOT NULL REFERENCES offices(uid) ON DELETE CASCADE, + title TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'open', + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE INDEX IF NOT EXISTS idx_folders_office ON folders(office_uid); diff --git a/services/docv/docv-back/migrations/20260401140000_offices_add_siren_address.sql b/services/docv/docv-back/migrations/20260401140000_offices_add_siren_address.sql new file mode 100644 index 0000000..1fe1cee --- /dev/null +++ b/services/docv/docv-back/migrations/20260401140000_offices_add_siren_address.sql @@ -0,0 +1,3 @@ +-- Align legacy offices table (created before siren/address) with current API queries. +ALTER TABLE offices ADD COLUMN IF NOT EXISTS siren TEXT; +ALTER TABLE offices ADD COLUMN IF NOT EXISTS address TEXT; diff --git a/services/docv/docv-back/migrations/20260401150000_office_members_role.sql b/services/docv/docv-back/migrations/20260401150000_office_members_role.sql new file mode 100644 index 0000000..8e892b1 --- /dev/null +++ b/services/docv/docv-back/migrations/20260401150000_office_members_role.sql @@ -0,0 +1,2 @@ +-- Align legacy office_members (created before membership role) with API JOIN on m.role. +ALTER TABLE office_members ADD COLUMN IF NOT EXISTS role TEXT NOT NULL DEFAULT 'member'; diff --git a/services/docv/docv-back/migrations/20260401160000_user_stub_lists.sql b/services/docv/docv-back/migrations/20260401160000_user_stub_lists.sql new file mode 100644 index 0000000..a0c3134 --- /dev/null +++ b/services/docv/docv-back/migrations/20260401160000_user_stub_lists.sql @@ -0,0 +1,55 @@ +-- Dashboard lists: notifications, pending documents, conversations (per user + office scope). +-- No FK to users (same pattern as office_members.user_uid) for legacy PK compatibility. + +CREATE TABLE IF NOT EXISTS user_notifications ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + user_uid UUID NOT NULL, + office_uid UUID NOT NULL REFERENCES offices(uid) ON DELETE CASCADE, + notif_type TEXT NOT NULL CHECK (notif_type IN ('new_document', 'request_document', 'case_update')), + message TEXT NOT NULL, + case_uid UUID REFERENCES folders(uid) ON DELETE SET NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + is_read BOOLEAN NOT NULL DEFAULT false +); + +CREATE INDEX IF NOT EXISTS idx_user_notifications_user ON user_notifications(user_uid, created_at DESC); + +CREATE TABLE IF NOT EXISTS user_pending_documents ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + user_uid UUID NOT NULL, + office_uid UUID NOT NULL REFERENCES offices(uid) ON DELETE CASCADE, + name TEXT NOT NULL, + description TEXT NOT NULL DEFAULT '', + case_uid UUID NOT NULL REFERENCES folders(uid) ON DELETE CASCADE, + case_name TEXT NOT NULL DEFAULT '', + requested_at TIMESTAMPTZ NOT NULL DEFAULT now(), + due_date DATE +); + +CREATE INDEX IF NOT EXISTS idx_user_pending_user ON user_pending_documents(user_uid, requested_at DESC); + +CREATE TABLE IF NOT EXISTS user_conversations ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + user_uid UUID NOT NULL, + office_uid UUID NOT NULL REFERENCES offices(uid) ON DELETE CASCADE, + contact_name TEXT NOT NULL, + contact_role TEXT NOT NULL DEFAULT '', + last_message TEXT NOT NULL DEFAULT '', + last_message_at TIMESTAMPTZ NOT NULL DEFAULT now(), + unread_count INT NOT NULL DEFAULT 0 +); + +CREATE INDEX IF NOT EXISTS idx_user_conversations_user ON user_conversations(user_uid, last_message_at DESC); + +CREATE TABLE IF NOT EXISTS conversation_messages ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + conversation_id UUID NOT NULL REFERENCES user_conversations(id) ON DELETE CASCADE, + sender_id TEXT NOT NULL DEFAULT '', + sender_name TEXT NOT NULL, + sender_role TEXT NOT NULL CHECK (sender_role IN ('client', 'cabinet')), + content TEXT NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + is_read BOOLEAN NOT NULL DEFAULT false +); + +CREATE INDEX IF NOT EXISTS idx_conversation_messages_conv ON conversation_messages(conversation_id, created_at); diff --git a/services/docv/docv-back/migrations/20260401170000_folder_documents.sql b/services/docv/docv-back/migrations/20260401170000_folder_documents.sql new file mode 100644 index 0000000..6c85283 --- /dev/null +++ b/services/docv/docv-back/migrations/20260401170000_folder_documents.sql @@ -0,0 +1,22 @@ +-- Documents rattachés à un dossier (lecture côté fiche dossier ; CRUD via API). + +CREATE TABLE IF NOT EXISTS folder_documents ( + uid UUID PRIMARY KEY DEFAULT gen_random_uuid(), + folder_uid UUID NOT NULL REFERENCES folders(uid) ON DELETE CASCADE, + name TEXT NOT NULL, + doc_type TEXT NOT NULL DEFAULT 'autre', + category TEXT NOT NULL DEFAULT 'dossier', + uploaded_by TEXT NOT NULL CHECK (uploaded_by IN ('cabinet', 'client')), + size_label TEXT NOT NULL DEFAULT '—', + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + CONSTRAINT folder_documents_doc_type_chk CHECK ( + doc_type IN ( + 'kbis', 'statuts', 'pv_ag', 'pacte_associes', 'contrat', 'facture', 'autre' + ) + ), + CONSTRAINT folder_documents_category_chk CHECK ( + category IN ('permanent', 'dossier') + ) +); + +CREATE INDEX IF NOT EXISTS idx_folder_documents_folder ON folder_documents(folder_uid); diff --git a/services/docv/docv-back/migrations/20260402100000_folder_documents_storage_pending_idx.sql b/services/docv/docv-back/migrations/20260402100000_folder_documents_storage_pending_idx.sql new file mode 100644 index 0000000..38e2389 --- /dev/null +++ b/services/docv/docv-back/migrations/20260402100000_folder_documents_storage_pending_idx.sql @@ -0,0 +1,5 @@ +-- Métadonnées fichier (URL de stockage / type MIME) pour intégration upload future. +ALTER TABLE folder_documents ADD COLUMN IF NOT EXISTS storage_url TEXT; +ALTER TABLE folder_documents ADD COLUMN IF NOT EXISTS mime_type TEXT; + +CREATE INDEX IF NOT EXISTS idx_user_pending_office ON user_pending_documents(office_uid); diff --git a/services/docv/docv-back/migrations/20260402110000_folders_title_legacy_compat.sql b/services/docv/docv-back/migrations/20260402110000_folders_title_legacy_compat.sql new file mode 100644 index 0000000..f2bfc34 --- /dev/null +++ b/services/docv/docv-back/migrations/20260402110000_folders_title_legacy_compat.sql @@ -0,0 +1,22 @@ +-- Stub seed and API expect folders.title. Some setups only had folders.name. +DO $$ +BEGIN + IF EXISTS ( + SELECT 1 FROM information_schema.tables + WHERE table_schema = 'public' AND table_name = 'folders' + ) THEN + IF EXISTS ( + SELECT 1 FROM information_schema.columns + WHERE table_schema = 'public' AND table_name = 'folders' AND column_name = 'title' + ) THEN + NULL; + ELSIF EXISTS ( + SELECT 1 FROM information_schema.columns + WHERE table_schema = 'public' AND table_name = 'folders' AND column_name = 'name' + ) THEN + ALTER TABLE folders RENAME COLUMN name TO title; + ELSE + ALTER TABLE folders ADD COLUMN title TEXT NOT NULL DEFAULT ''; + END IF; + END IF; +END $$; diff --git a/services/docv/docv-back/migrations/20260402183000_folders_status_if_missing.sql b/services/docv/docv-back/migrations/20260402183000_folders_status_if_missing.sql new file mode 100644 index 0000000..0427360 --- /dev/null +++ b/services/docv/docv-back/migrations/20260402183000_folders_status_if_missing.sql @@ -0,0 +1,2 @@ +-- Schémas antérieurs (IMPL) : table folders sans `status`. L’API docv-back et le seed démo l’utilisent. +ALTER TABLE folders ADD COLUMN IF NOT EXISTS status TEXT NOT NULL DEFAULT 'open'; diff --git a/services/docv/docv-back/migrations/20260403120000_folders_dp_layout.sql b/services/docv/docv-back/migrations/20260403120000_folders_dp_layout.sql new file mode 100644 index 0000000..d0228ea --- /dev/null +++ b/services/docv/docv-back/migrations/20260403120000_folders_dp_layout.sql @@ -0,0 +1,9 @@ +-- Dossiers permanents types : lien BDD ↔ arborescence data/dossiers-permanents + +ALTER TABLE folders ADD COLUMN IF NOT EXISTS dp_archetype TEXT; +ALTER TABLE folders ADD COLUMN IF NOT EXISTS dp_layout_root TEXT; + +ALTER TABLE folder_documents ADD COLUMN IF NOT EXISTS dp_mirror_path TEXT; + +CREATE INDEX IF NOT EXISTS idx_folders_dp_layout_root ON folders(dp_layout_root) + WHERE dp_layout_root IS NOT NULL; diff --git a/services/docv/docv-back/migrations/20260403140000_roles_minimal.sql b/services/docv/docv-back/migrations/20260403140000_roles_minimal.sql new file mode 100644 index 0000000..e2ad1d4 --- /dev/null +++ b/services/docv/docv-back/migrations/20260403140000_roles_minimal.sql @@ -0,0 +1,10 @@ +-- Rôles par office : requis lorsque office_members référence roles (role_uid IMPL / extensions). +-- Si la table existe déjà avec un autre schéma, cette commande ne fait rien (IF NOT EXISTS). +CREATE TABLE IF NOT EXISTS roles ( + uid UUID PRIMARY KEY DEFAULT gen_random_uuid(), + office_uid UUID REFERENCES offices(uid) ON DELETE CASCADE, + name TEXT NOT NULL DEFAULT 'Member', + created_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE INDEX IF NOT EXISTS idx_roles_office_uid ON roles(office_uid); diff --git a/services/docv/docv-back/migrations/20260403160000_office_society_extensions.sql b/services/docv/docv-back/migrations/20260403160000_office_society_extensions.sql new file mode 100644 index 0000000..691fd7a --- /dev/null +++ b/services/docv/docv-back/migrations/20260403160000_office_society_extensions.sql @@ -0,0 +1,19 @@ +-- Société (office) : hiérarchie, archivage ; dossier : prolonge le DP ; commentaires société (socle SPEC_18). + +ALTER TABLE offices ADD COLUMN IF NOT EXISTS parent_office_uid UUID REFERENCES offices(uid) ON DELETE SET NULL; +ALTER TABLE offices ADD COLUMN IF NOT EXISTS archived_at TIMESTAMPTZ; + +CREATE INDEX IF NOT EXISTS idx_offices_parent ON offices(parent_office_uid) WHERE parent_office_uid IS NOT NULL; + +ALTER TABLE folders ADD COLUMN IF NOT EXISTS extends_permanent_record BOOLEAN NOT NULL DEFAULT false; + +CREATE TABLE IF NOT EXISTS office_comments ( + uid UUID PRIMARY KEY DEFAULT gen_random_uuid(), + office_uid UUID NOT NULL REFERENCES offices(uid) ON DELETE CASCADE, + user_uid UUID NOT NULL, + content TEXT NOT NULL, + access_level TEXT NOT NULL DEFAULT 'internal', + created_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE INDEX IF NOT EXISTS idx_office_comments_office ON office_comments(office_uid); diff --git a/services/docv/docv-back/migrations/20260404120000_folder_sources_notes_tasks_workflow.sql b/services/docv/docv-back/migrations/20260404120000_folder_sources_notes_tasks_workflow.sql new file mode 100644 index 0000000..5667703 --- /dev/null +++ b/services/docv/docv-back/migrations/20260404120000_folder_sources_notes_tasks_workflow.sql @@ -0,0 +1,61 @@ +-- Sources (liens vers d'autres pièces), notes dossier, tâches, état workflow document. + +ALTER TABLE folder_documents + ADD COLUMN IF NOT EXISTS workflow_state TEXT NOT NULL DEFAULT 'draft'; + +ALTER TABLE folder_documents + DROP CONSTRAINT IF EXISTS folder_documents_workflow_state_chk; + +ALTER TABLE folder_documents + ADD CONSTRAINT folder_documents_workflow_state_chk CHECK ( + workflow_state IN ( + 'draft', + 'requested', + 'submitted', + 'validated', + 'rejected', + 'archived' + ) + ); + +CREATE TABLE IF NOT EXISTS folder_document_sources ( + uid UUID PRIMARY KEY DEFAULT gen_random_uuid(), + folder_uid UUID NOT NULL REFERENCES folders(uid) ON DELETE CASCADE, + label TEXT, + target_document_uid UUID NOT NULL REFERENCES folder_documents(uid) ON DELETE CASCADE, + created_by UUID NOT NULL REFERENCES users(id) ON DELETE RESTRICT, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + CONSTRAINT folder_document_sources_unique_target UNIQUE (folder_uid, target_document_uid) +); + +CREATE INDEX IF NOT EXISTS idx_folder_document_sources_folder ON folder_document_sources(folder_uid); +CREATE INDEX IF NOT EXISTS idx_folder_document_sources_target ON folder_document_sources(target_document_uid); + +CREATE TABLE IF NOT EXISTS folder_notes ( + uid UUID PRIMARY KEY DEFAULT gen_random_uuid(), + folder_uid UUID NOT NULL REFERENCES folders(uid) ON DELETE CASCADE, + content TEXT NOT NULL, + author_user_uid UUID NOT NULL REFERENCES users(id) ON DELETE RESTRICT, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE INDEX IF NOT EXISTS idx_folder_notes_folder ON folder_notes(folder_uid); + +CREATE TABLE IF NOT EXISTS tasks ( + uid UUID PRIMARY KEY DEFAULT gen_random_uuid(), + office_uid UUID NOT NULL REFERENCES offices(uid) ON DELETE CASCADE, + folder_uid UUID REFERENCES folders(uid) ON DELETE CASCADE, + title TEXT NOT NULL, + description TEXT, + status TEXT NOT NULL DEFAULT 'open', + assignee_user_uid UUID REFERENCES users(id) ON DELETE SET NULL, + due_at TIMESTAMPTZ, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT now(), + CONSTRAINT tasks_status_chk CHECK (status IN ('open', 'in_progress', 'done', 'cancelled')) +); + +CREATE INDEX IF NOT EXISTS idx_tasks_office ON tasks(office_uid); +CREATE INDEX IF NOT EXISTS idx_tasks_folder ON tasks(folder_uid); +CREATE INDEX IF NOT EXISTS idx_tasks_status ON tasks(status); diff --git a/services/docv/docv-back/migrations/20260404140000_folders_purpose_operation_type.sql b/services/docv/docv-back/migrations/20260404140000_folders_purpose_operation_type.sql new file mode 100644 index 0000000..edfa547 --- /dev/null +++ b/services/docv/docv-back/migrations/20260404140000_folders_purpose_operation_type.sql @@ -0,0 +1,18 @@ +-- Dossiers : distinction opération client vs structure type démo (DP) ; type métier d'opération. + +ALTER TABLE folders ADD COLUMN IF NOT EXISTS folder_purpose TEXT NOT NULL DEFAULT 'client_operation'; +ALTER TABLE folders ADD COLUMN IF NOT EXISTS operation_type TEXT; + +UPDATE folders +SET folder_purpose = 'dp_structure_demo' +WHERE title LIKE 'Jeu type %' + AND dp_layout_root IS NOT NULL + AND dp_layout_root LIKE 'instances/%'; + +UPDATE offices +SET name = 'Entreprise démo (fictive)' +WHERE lower(btrim(name)) = lower(btrim('Cabinet démo')); + +ALTER TABLE folders DROP CONSTRAINT IF EXISTS folders_folder_purpose_check; +ALTER TABLE folders ADD CONSTRAINT folders_folder_purpose_check + CHECK (folder_purpose IN ('client_operation', 'dp_structure_demo')); diff --git a/services/docv/docv-back/src/api_auth.rs b/services/docv/docv-back/src/api_auth.rs new file mode 100644 index 0000000..e556228 --- /dev/null +++ b/services/docv/docv-back/src/api_auth.rs @@ -0,0 +1,34 @@ +//! Bearer JWT validation for `/api/v1/*` (same secret and claims shape as OAuth access tokens). + +use hyper::header::HeaderMap; +use jsonwebtoken::{decode, DecodingKey, Validation}; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AccessClaims { + pub sub: String, + pub email: String, + pub name: Option, + pub exp: usize, + pub iss: String, +} + +pub fn bearer_token(headers: &HeaderMap) -> Option { + let hv = headers.get(hyper::header::AUTHORIZATION)?.to_str().ok()?; + let rest = hv.strip_prefix("Bearer ")?; + let t = rest.trim(); + if t.is_empty() { + return None; + } + Some(t.to_string()) +} + +pub fn decode_access_token(secret: &str, token: &str) -> Result { + decode::( + token, + &DecodingKey::from_secret(secret.as_bytes()), + &Validation::default(), + ) + .map(|d| d.claims) + .map_err(|_| ()) +} diff --git a/services/docv/docv-back/src/branding.rs b/services/docv/docv-back/src/branding.rs new file mode 100644 index 0000000..86c0161 --- /dev/null +++ b/services/docv/docv-back/src/branding.rs @@ -0,0 +1,243 @@ +//! Per–OAuth-client branding for the sign-in HTML (wording + CSS colors). + +use html_escape::encode_text; +use serde::Deserialize; +use std::collections::HashMap; + +const FALLBACK_JSON: &str = include_str!("../tenants.default.json"); + +#[derive(Debug, Deserialize, Clone, Default)] +struct BrandingPartial { + page_title: Option, + heading: Option, + subtitle: Option, + primary_color: Option, + accent_color: Option, + surface_color: Option, + text_color: Option, + submit_label: Option, + font_family: Option, +} + +#[derive(Debug, Clone)] +pub struct BrandingResolved { + pub page_title: String, + pub heading: String, + pub subtitle: String, + pub primary_color: String, + pub accent_color: String, + pub surface_color: String, + pub text_color: String, + pub submit_label: String, + pub font_family: String, +} + +#[derive(Debug, Deserialize)] +struct TenantsFile { + #[serde(rename = "default")] + fallback: BrandingPartial, + #[serde(default)] + clients: HashMap, +} + +/// Extracts `client_id` from a return URL pointing at `/oauth/authorize?...`. +pub fn client_id_from_authorize_return_url(return_url: &str) -> Option { + let q = return_url.find('?')?; + let qs = &return_url[q + 1..]; + let map: HashMap = serde_urlencoded::from_str(qs).ok()?; + let id = map.get("client_id")?.trim(); + if id.is_empty() { + return None; + } + Some(id.to_string()) +} + +fn sanitize_hex_color(input: &str, fallback: &str) -> String { + let t = input.trim(); + if !t.starts_with('#') || t.len() < 4 { + return fallback.to_string(); + } + if !t.chars().skip(1).all(|c| c.is_ascii_hexdigit()) { + return fallback.to_string(); + } + t.to_string() +} + +fn merge_field(opt_client: Option<&String>, opt_def: Option<&String>, hard: &str) -> String { + opt_client + .or(opt_def) + .cloned() + .filter(|s| !s.trim().is_empty()) + .unwrap_or_else(|| hard.to_string()) +} + +fn resolve_partial(def: &BrandingPartial, ovr: &BrandingPartial) -> BrandingResolved { + let page_title = merge_field( + ovr.page_title.as_ref(), + def.page_title.as_ref(), + "Connexion", + ); + let heading = merge_field(ovr.heading.as_ref(), def.heading.as_ref(), "Connexion"); + let subtitle = merge_field( + ovr.subtitle.as_ref(), + def.subtitle.as_ref(), + "Saisissez vos identifiants pour continuer.", + ); + let primary = sanitize_hex_color( + merge_field( + ovr.primary_color.as_ref(), + def.primary_color.as_ref(), + "#1e3a5f", + ) + .as_str(), + "#1e3a5f", + ); + let accent = sanitize_hex_color( + merge_field( + ovr.accent_color.as_ref(), + def.accent_color.as_ref(), + "#b45309", + ) + .as_str(), + "#b45309", + ); + let surface = sanitize_hex_color( + merge_field( + ovr.surface_color.as_ref(), + def.surface_color.as_ref(), + "#f8fafc", + ) + .as_str(), + "#f8fafc", + ); + let text = sanitize_hex_color( + merge_field( + ovr.text_color.as_ref(), + def.text_color.as_ref(), + "#0f172a", + ) + .as_str(), + "#0f172a", + ); + let submit_label = merge_field( + ovr.submit_label.as_ref(), + def.submit_label.as_ref(), + "Continuer", + ); + let font_family = merge_field( + ovr.font_family.as_ref(), + def.font_family.as_ref(), + "system-ui, sans-serif", + ); + BrandingResolved { + page_title, + heading, + subtitle, + primary_color: primary, + accent_color: accent, + surface_color: surface, + text_color: text, + submit_label, + font_family, + } +} + +/// Parses `OAUTH_TENANTS_JSON` (or built-in default). Invalid JSON falls back to embedded default. +fn parse_tenants_json(raw: &str) -> TenantsFile { + let use_raw = if raw.trim().is_empty() { + FALLBACK_JSON + } else { + raw + }; + serde_json::from_str::(use_raw).unwrap_or_else(|_| { + serde_json::from_str(FALLBACK_JSON).expect("tenants.default.json valid") + }) +} + +pub fn resolve_branding(tenants_json: &str, client_id: Option<&str>) -> BrandingResolved { + let file = parse_tenants_json(tenants_json); + let ovr = client_id.and_then(|id| file.clients.get(id)); + resolve_partial( + &file.fallback, + ovr.unwrap_or(&BrandingPartial::default()), + ) +} + +fn sanitize_font_css(input: &str) -> String { + input + .chars() + .filter(|&c| !matches!(c, '<' | '>' | '{' | '}' | ';' | '\n' | '\r')) + .collect() +} + +/// `form_action_attr_escaped` — full POST target e.g. `/docv-api/oauth/sign-in` (attribute-encoded). +pub fn sign_in_page_html( + return_url_attr_escaped: &str, + b: &BrandingResolved, + form_action_attr_escaped: &str, +) -> String { + let title = encode_text(&b.page_title); + let heading = encode_text(&b.heading); + let subtitle = encode_text(&b.subtitle); + let submit = encode_text(&b.submit_label); + let font = sanitize_font_css(&b.font_family); + let p = &b.primary_color; + let a = &b.accent_color; + let s = &b.surface_color; + let t = &b.text_color; + format!( + r#"{title} + +

{heading}

{subtitle}

+
+ + + + +
"#, + ) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parses_client_id_from_relative_authorize_url() { + let u = "/oauth/authorize?response_type=code&client_id=enso-web&redirect_uri=http%3A%2F%2Flocal"; + assert_eq!( + client_id_from_authorize_return_url(u).as_deref(), + Some("enso-web") + ); + } + + #[test] + fn parses_client_id_when_path_has_reverse_proxy_prefix() { + let u = "/docv-api/oauth/authorize?response_type=code&client_id=enso-web&redirect_uri=x"; + assert_eq!( + client_id_from_authorize_return_url(u).as_deref(), + Some("enso-web") + ); + } + + #[test] + fn resolves_client_override_subtitle() { + let raw = + r#"{"default":{"subtitle":"Def"},"clients":{"x":{"subtitle":"Custom"}}}"#; + let b = resolve_branding(raw, Some("x")); + assert_eq!(b.subtitle, "Custom"); + } +} diff --git a/services/docv/docv-back/src/config/mod.rs b/services/docv/docv-back/src/config/mod.rs new file mode 100644 index 0000000..f1d609a --- /dev/null +++ b/services/docv/docv-back/src/config/mod.rs @@ -0,0 +1,219 @@ +//! Configuration from environment. + +use std::env; +use std::fs; +use std::path::PathBuf; + +/// Optional Git sync for `data/dossiers-permanents` after mirror uploads. +#[derive(Clone)] +pub struct DpGitSyncConfig { + pub enabled: bool, + pub repo_root: Option, + /// Relative to repo root (e.g. `data/dossiers-permanents`). + pub data_subpath: PathBuf, + pub remote: String, + pub branch: Option, +} + +#[derive(Clone)] +pub struct Config { + pub database_url: String, + pub jwt_secret: String, + pub host: String, + pub port: u16, + /// Registered OAuth2 client ids (comma-separated in `OAUTH_CLIENT_ID`). + pub oauth_client_ids: Vec, + pub oauth_client_secret: String, + pub oauth_redirect_uris: Vec, + /// JSON branding config (`OAUTH_TENANTS_JSON`, or file `OAUTH_TENANTS_PATH`, or built-in default). + pub tenants_json: String, + /// Path prefix browsers use to reach this service (e.g. `/docv-api`). Empty if OAuth is at site root. + pub browser_oauth_prefix: String, + /// Primary key column on `users` for OAuth sign-in lookup: `id` (default migration) or `uid` (legacy / IMPL_01 style). + pub users_pk_column: String, + /// When set, `POST .../folders/:uid/documents/binary` stores bytes on disk and sets `storage_url` for download (`GET .../files/:docUid`). + pub file_storage_dir: Option, + /// Max body size for binary document upload (bytes). + pub upload_max_bytes: usize, + pub dp_git_sync: DpGitSyncConfig, + /// OAuth2 access token (`Bearer`) lifetime in seconds (JWT `exp`, `expires_in`). + pub oauth_access_token_ttl_secs: u64, + /// Optional upstream IA HTTP endpoint (`POST` JSON in, JSON out). If unset, `POST /api/v1/ai/*` returns 503. + pub ai_service_url: Option, + /// Optional `Authorization: Bearer …` sent to the IA service. + pub ai_api_key: Option, + pub ai_timeout_secs: u64, + /// Max total characters for forwarded chat / assist prompts (guardrail). + pub ai_max_input_chars: usize, +} + +impl Config { + pub fn load_from_env() -> Self { + let redirect = env::var("OAUTH_REDIRECT_URIS").unwrap_or_default(); + let oauth_redirect_uris: Vec = redirect + .split(',') + .map(|s| s.trim().to_owned()) + .filter(|s| !s.is_empty()) + .collect(); + + let raw_ids = env::var("OAUTH_CLIENT_ID").unwrap_or_else(|_| "enso-web".into()); + let mut oauth_client_ids: Vec = raw_ids + .split(',') + .map(|s| s.trim().to_owned()) + .filter(|s| !s.is_empty()) + .collect(); + if oauth_client_ids.is_empty() { + oauth_client_ids.push("enso-web".into()); + } + + let tenants_json = Self::load_tenants_json(); + let browser_oauth_prefix = Self::normalize_browser_prefix( + &env::var("OAUTH_BROWSER_PATH_PREFIX").unwrap_or_else(|_| "/docv-api".into()), + ); + let users_pk_column = Self::normalize_users_pk_column( + &env::var("DOCV_USERS_PK_COLUMN").unwrap_or_else(|_| "id".into()), + ); + + let file_storage_dir = env::var("DOCV_FILE_STORAGE_DIR").ok().map(|s| s.trim().to_owned()).filter(|s| !s.is_empty()).map(PathBuf::from); + let upload_max_bytes = env::var("DOCV_UPLOAD_MAX_BYTES") + .ok() + .and_then(|s| s.parse().ok()) + .filter(|&n| n > 0) + .unwrap_or(10_485_760); + + let dp_git_enabled = env::var("DOCV_DP_GIT_SYNC") + .ok() + .map(|s| s.trim().to_ascii_lowercase()) + .map(|s| s == "1" || s == "true" || s == "yes") + .unwrap_or(false); + let dp_git_repo_root = env::var("DOCV_DP_GIT_REPO_ROOT") + .ok() + .map(|s| s.trim().to_owned()) + .filter(|s| !s.is_empty()) + .map(PathBuf::from); + let dp_sub = env::var("DOCV_DP_GIT_DATA_SUBPATH") + .ok() + .map(|s| s.trim().to_owned()) + .filter(|s| !s.is_empty()) + .unwrap_or_else(|| "data/dossiers-permanents".into()); + let dp_remote = env::var("DOCV_DP_GIT_REMOTE") + .ok() + .map(|s| s.trim().to_owned()) + .filter(|s| !s.is_empty()) + .unwrap_or_else(|| "origin".into()); + let dp_branch = env::var("DOCV_DP_GIT_BRANCH") + .ok() + .map(|s| s.trim().to_owned()) + .filter(|s| !s.is_empty()); + let dp_git_sync = DpGitSyncConfig { + enabled: dp_git_enabled, + repo_root: dp_git_repo_root, + data_subpath: PathBuf::from(dp_sub), + remote: dp_remote, + branch: dp_branch, + }; + + let oauth_access_token_ttl_secs = env::var("DOCV_OAUTH_ACCESS_TOKEN_TTL_SEC") + .ok() + .and_then(|s| s.parse::().ok()) + .filter(|&n| (60..=86400).contains(&n)) + .unwrap_or(900); + + let ai_service_url = env::var("DOCV_AI_SERVICE_URL") + .ok() + .map(|s| s.trim().to_owned()) + .filter(|s| !s.is_empty()); + let ai_api_key = env::var("DOCV_AI_API_KEY") + .ok() + .map(|s| s.trim().to_owned()) + .filter(|s| !s.is_empty()); + let ai_timeout_secs = env::var("DOCV_AI_TIMEOUT_SECS") + .ok() + .and_then(|s| s.parse::().ok()) + .filter(|&n| n > 0 && n <= 300) + .unwrap_or(60); + let ai_max_input_chars = env::var("DOCV_AI_MAX_INPUT_CHARS") + .ok() + .and_then(|s| s.parse::().ok()) + .filter(|&n| n >= 512 && n <= 500_000) + .unwrap_or(32_000); + + Self { + database_url: env::var("DATABASE_URL") + .unwrap_or_else(|_| "postgres://localhost/docv".into()), + jwt_secret: env::var("JWT_SECRET") + .unwrap_or_else(|_| "dev-secret-change-in-production".into()), + host: env::var("HOST").unwrap_or_else(|_| "0.0.0.0".into()), + port: env::var("PORT") + .unwrap_or_else(|_| "3038".into()) + .parse() + .unwrap_or(3038), + oauth_client_ids, + oauth_client_secret: env::var("OAUTH_CLIENT_SECRET") + .unwrap_or_else(|_| "dev-oauth-secret-change-me".into()), + oauth_redirect_uris, + tenants_json, + browser_oauth_prefix, + users_pk_column, + file_storage_dir, + upload_max_bytes, + dp_git_sync, + oauth_access_token_ttl_secs, + ai_service_url, + ai_api_key, + ai_timeout_secs, + ai_max_input_chars, + } + } + + fn normalize_users_pk_column(raw: &str) -> String { + match raw.trim().to_ascii_lowercase().as_str() { + "uid" => "uid".to_string(), + _ => "id".to_string(), + } + } + + fn normalize_browser_prefix(raw: &str) -> String { + let t = raw.trim(); + if t.is_empty() { + return String::new(); + } + let inner = t.trim_matches('/').trim(); + if inner.is_empty() { + return String::new(); + } + format!("/{}", inner) + } + + /// Prepends [`Self::browser_oauth_prefix`] to a path starting with `/oauth/...` for `Location` and forms. + pub fn browser_oauth_path(&self, path_from_oauth_mount: &str) -> String { + if !path_from_oauth_mount.starts_with('/') { + return path_from_oauth_mount.to_string(); + } + let p = self.browser_oauth_prefix.trim(); + if p.is_empty() { + return path_from_oauth_mount.to_string(); + } + format!("{}{}", p, path_from_oauth_mount) + } + + fn load_tenants_json() -> String { + const DEFAULT: &str = include_str!("../../tenants.default.json"); + if let Ok(path) = env::var("OAUTH_TENANTS_PATH") { + if let Ok(s) = fs::read_to_string(path.trim()) { + if !s.trim().is_empty() { + return s; + } + } + } + env::var("OAUTH_TENANTS_JSON").unwrap_or_else(|_| DEFAULT.to_string()) + } + + pub fn redirect_uri_allowed(&self, uri: &str) -> bool { + self.oauth_redirect_uris.iter().any(|a| a == uri) + } + + pub fn oauth_client_id_allowed(&self, id: &str) -> bool { + self.oauth_client_ids.iter().any(|c| c == id) + } +} diff --git a/services/docv/docv-back/src/db/mod.rs b/services/docv/docv-back/src/db/mod.rs new file mode 100644 index 0000000..7335213 --- /dev/null +++ b/services/docv/docv-back/src/db/mod.rs @@ -0,0 +1,1576 @@ +//! PostgreSQL pool. + +use deadpool_postgres::{Manager, Pool}; +use std::collections::HashSet; +use std::env; +use std::ops::Deref; +use tokio_postgres::{Client as PgClient, Config as PgConfig, NoTls}; +use tracing::{error, info, warn}; +use uuid::Uuid; + +pub type DbPool = Pool; + +/// Nom temporaire pendant la migration démo « 1 office + 8 dossiers » → « 9 offices ». +/// Ne doit pas apparaître dans les listes sociétés ; voir `remove_stale_legacy_demo_migration_placeholder_office_if_needed`. +const LEGACY_DEMO_MIGRATION_TMP_NAME: &str = "__docv_migrating_legacy_demo__"; + +/// Nom technique exclu des réponses API sociétés (migration interrompue). +pub fn legacy_demo_migration_placeholder_office_name() -> &'static str { + LEGACY_DEMO_MIGRATION_TMP_NAME +} + +/// IMPL / données réelles : `office_members.role_uid` → `roles`, plus `joined_at`. +async fn office_members_has_role_uid(client: &PgClient) -> bool { + match client + .query_opt( + "SELECT 1 FROM information_schema.columns \ + WHERE table_schema = 'public' AND table_name = 'office_members' \ + AND column_name = 'role_uid' LIMIT 1", + &[], + ) + .await + { + Ok(row) => row.is_some(), + Err(e) => { + error!(?e, "office_members role_uid column probe"); + false + } + } +} + +/// Rôle **strictement lié à l’office** (`roles.office_uid = office`). Aucun repli sur un rôle « global ». +async fn require_office_scoped_role_uid( + client: &PgClient, + office_uid: Uuid, +) -> Result { + let row = client + .query_one( + "SELECT uid FROM roles WHERE office_uid = $1 ORDER BY created_at ASC LIMIT 1", + &[&office_uid], + ) + .await?; + Ok(row.get(0)) +} + +async fn roles_table_exists(client: &PgClient) -> bool { + matches!( + client + .query_opt( + "SELECT 1 FROM information_schema.tables \ + WHERE table_schema = 'public' AND table_name = 'roles' LIMIT 1", + &[], + ) + .await, + Ok(Some(_)) + ) +} + +async fn folder_types_table_exists(client: &PgClient) -> bool { + matches!( + client + .query_opt( + "SELECT 1 FROM information_schema.tables \ + WHERE table_schema = 'public' AND table_name = 'folder_types' LIMIT 1", + &[], + ) + .await, + Ok(Some(_)) + ) +} + +/// IMPL : une ligne `roles` d’office avec le libellé métier standard **Membre** si aucune ligne pour cet office. +/// À appeler après chaque création d’`offices` avant `insert_office_member_with_role_label`. +async fn ensure_one_office_scoped_role_row_if_impl( + client: &PgClient, + office_uid: Uuid, +) -> Result<(), tokio_postgres::Error> { + if !office_members_has_role_uid(client).await || !roles_table_exists(client).await { + return Ok(()); + } + let n: i64 = client + .query_one( + "SELECT COUNT(*)::bigint FROM roles WHERE office_uid = $1", + &[&office_uid], + ) + .await? + .get(0); + if n > 0 { + return Ok(()); + } + client + .execute( + "INSERT INTO roles (office_uid, name) VALUES ($1, $2)", + &[&office_uid, &"Membre"], + ) + .await?; + info!( + office_uid = %office_uid, + "inserted default IMPL roles row (office-scoped Membre)" + ); + Ok(()) +} + +/// IMPL : une ligne **`folder_types`** d’office (**Dossier standard**) si la colonne **`folders.folder_type_uid`** existe et qu’aucune ligne pour cet office. +/// À appeler après chaque création d’`offices` avant **`insert_folder_with_dp_layout`** sur ce schéma. +async fn ensure_one_folder_type_row_if_impl( + client: &PgClient, + office_uid: Uuid, +) -> Result<(), tokio_postgres::Error> { + if !folders_has_folder_type_uid_column(client).await || !folder_types_table_exists(client).await { + return Ok(()); + } + let n: i64 = client + .query_one( + "SELECT COUNT(*)::bigint FROM folder_types WHERE office_uid = $1", + &[&office_uid], + ) + .await? + .get(0); + if n > 0 { + return Ok(()); + } + client + .execute( + "INSERT INTO folder_types (office_uid, label) VALUES ($1, $2)", + &[&office_uid, &"Dossier standard"], + ) + .await?; + info!( + office_uid = %office_uid, + "inserted default IMPL folder_types row (office-scoped Dossier standard)" + ); + Ok(()) +} + +/// Après **`INSERT INTO offices`** : lignes IMPL **`roles`** et **`folder_types`** lorsque le schéma les impose. +async fn ensure_auxiliary_rows_for_new_office_if_impl( + client: &PgClient, + office_uid: Uuid, +) -> Result<(), tokio_postgres::Error> { + ensure_one_office_scoped_role_row_if_impl(client, office_uid).await?; + ensure_one_folder_type_row_if_impl(client, office_uid).await?; + Ok(()) +} + +/// Insère une ligne `office_members` compatible schéma minimal **ou** schéma IMPL (`role_uid`, `joined_at`). +async fn insert_office_member_with_role_label( + client: &PgClient, + office_uid: Uuid, + user_uid: Uuid, + role_label: &str, +) -> Result<(), tokio_postgres::Error> { + if office_members_has_role_uid(client).await { + let role_uid = require_office_scoped_role_uid(client, office_uid).await?; + client + .execute( + "INSERT INTO office_members (office_uid, user_uid, role_uid, joined_at, role) \ + VALUES ($1, $2, $3, now(), $4) ON CONFLICT DO NOTHING", + &[&office_uid, &user_uid, &role_uid, &role_label], + ) + .await?; + } else { + client + .execute( + "INSERT INTO office_members (office_uid, user_uid, role) \ + VALUES ($1, $2, $3) ON CONFLICT DO NOTHING", + &[&office_uid, &user_uid, &role_label], + ) + .await?; + } + Ok(()) +} + +pub fn create_pool() -> Result> { + let database_url = + env::var("DATABASE_URL").unwrap_or_else(|_| "postgres://localhost/docv".into()); + let pg_config: PgConfig = database_url.parse()?; + let manager = Manager::new(pg_config, NoTls); + let pool = Pool::builder(manager).build()?; + Ok(pool) +} + +pub async fn ensure_users_table(pool: &DbPool) { + let Ok(client) = pool.get().await else { + error!("db pool get client failed for migrations check"); + return; + }; + let sql = include_str!("../../migrations/20260330120000_create_users.sql"); + if let Err(e) = client.batch_execute(sql).await { + error!(?e, "users migration batch_execute failed (may already exist)"); + } +} + +pub async fn ensure_offices_folders_schema(pool: &DbPool) { + let Ok(client) = pool.get().await else { + error!("db pool get client failed for offices schema"); + return; + }; + let sql = include_str!("../../migrations/20260330140000_offices_folders.sql"); + if let Err(e) = client.batch_execute(sql).await { + error!(?e, "offices_folders migration batch_execute failed"); + } +} + +/// Align legacy `folders.name` with canonical `folders.title` for API + stub seed. +pub async fn ensure_folders_title_legacy_compat(pool: &DbPool) { + let Ok(client) = pool.get().await else { + error!("db pool get client failed for folders title compat"); + return; + }; + let sql = include_str!("../../migrations/20260402110000_folders_title_legacy_compat.sql"); + if let Err(e) = client.batch_execute(sql).await { + error!(?e, "folders_title_legacy_compat migration failed"); + } +} + +/// Add columns introduced after early deployments (`CREATE TABLE IF NOT EXISTS` skips them). +pub async fn ensure_offices_extended_columns(pool: &DbPool) { + let Ok(client) = pool.get().await else { + error!("db pool get client failed for offices extended columns"); + return; + }; + let sql = include_str!("../../migrations/20260401140000_offices_add_siren_address.sql"); + if let Err(e) = client.batch_execute(sql).await { + error!(?e, "offices_add_siren_address migration failed"); + } +} + +pub async fn ensure_office_members_role_column(pool: &DbPool) { + let Ok(client) = pool.get().await else { + error!("db pool get client failed for office_members role column"); + return; + }; + let sql = include_str!("../../migrations/20260401150000_office_members_role.sql"); + if let Err(e) = client.batch_execute(sql).await { + error!(?e, "office_members_role migration failed"); + } +} + +/// Table `roles` minimale (`office_uid`, `name`, …) si absente — requis pour `office_members.role_uid` (IMPL). +pub async fn ensure_roles_minimal_table(pool: &DbPool) { + let Ok(client) = pool.get().await else { + error!("db pool get client failed for roles minimal"); + return; + }; + let sql = include_str!("../../migrations/20260403140000_roles_minimal.sql"); + if let Err(e) = client.batch_execute(sql).await { + error!(?e, "roles_minimal migration failed"); + } +} + +pub async fn ensure_user_stub_lists_schema(pool: &DbPool) { + let Ok(client) = pool.get().await else { + error!("db pool get client failed for user stub lists"); + return; + }; + let sql = include_str!("../../migrations/20260401160000_user_stub_lists.sql"); + if let Err(e) = client.batch_execute(sql).await { + error!(?e, "user_stub_lists migration failed"); + } +} + +pub async fn ensure_folder_documents_schema(pool: &DbPool) { + let Ok(client) = pool.get().await else { + error!("db pool get client failed for folder_documents schema"); + return; + }; + let sql = include_str!("../../migrations/20260401170000_folder_documents.sql"); + if let Err(e) = client.batch_execute(sql).await { + error!(?e, "folder_documents migration failed"); + } +} + +pub async fn ensure_folder_documents_storage_columns(pool: &DbPool) { + let Ok(client) = pool.get().await else { + error!("db pool get client failed for folder_documents storage columns"); + return; + }; + let sql = include_str!("../../migrations/20260402100000_folder_documents_storage_pending_idx.sql"); + if let Err(e) = client.batch_execute(sql).await { + error!(?e, "folder_documents_storage migration failed"); + } +} + +pub async fn ensure_folders_dp_layout_columns(pool: &DbPool) { + let Ok(client) = pool.get().await else { + error!("db pool get client failed for folders dp layout"); + return; + }; + let sql = include_str!("../../migrations/20260403120000_folders_dp_layout.sql"); + if let Err(e) = client.batch_execute(sql).await { + error!(?e, "folders_dp_layout migration failed"); + } +} + +/// Colonne `status` attendue par `GET/POST /api/v1/folders` (schémas IMPL sans cette colonne). +pub async fn ensure_folders_status_column(pool: &DbPool) { + let Ok(client) = pool.get().await else { + error!("db pool get client failed for folders status column"); + return; + }; + let sql = include_str!("../../migrations/20260402183000_folders_status_if_missing.sql"); + if let Err(e) = client.batch_execute(sql).await { + error!(?e, "folders_status migration failed"); + } +} + +/// Colonnes `folder_purpose` / `operation_type` (opération vs structure type démo, type métier). +pub async fn ensure_folders_purpose_operation_type(pool: &DbPool) { + let Ok(client) = pool.get().await else { + error!("db pool get client failed for folders purpose columns"); + return; + }; + let sql = + include_str!("../../migrations/20260404140000_folders_purpose_operation_type.sql"); + if let Err(e) = client.batch_execute(sql).await { + error!(?e, "folders_purpose_operation_type migration failed"); + } +} + +/// Colonnes **`offices.parent_office_uid`**, **`archived_at`** ; **`folders.extends_permanent_record`** ; table **`office_comments`**. +pub async fn ensure_office_society_extensions(pool: &DbPool) { + let Ok(client) = pool.get().await else { + error!("db pool get client failed for office society extensions"); + return; + }; + let sql = include_str!("../../migrations/20260403160000_office_society_extensions.sql"); + if let Err(e) = client.batch_execute(sql).await { + error!(?e, "office_society_extensions migration failed"); + } +} + +/// Sources pièce-à-pièce, notes dossier, tâches, `workflow_state` sur `folder_documents`. +pub async fn ensure_folder_sources_notes_tasks_workflow(pool: &DbPool) { + let Ok(client) = pool.get().await else { + error!("db pool get client failed for sources/notes/tasks/workflow"); + return; + }; + let sql = include_str!("../../migrations/20260404120000_folder_sources_notes_tasks_workflow.sql"); + if let Err(e) = client.batch_execute(sql).await { + error!(?e, "folder_sources_notes_tasks_workflow migration failed"); + } +} + +/// Au moins une ligne **`roles`** par **`offices`** lorsque le schéma IMPL utilise **`office_members.role_uid`**. +pub async fn ensure_impl_role_rows_for_all_offices( + pool: &DbPool, +) -> Result<(), Box> { + let client = pool.get().await.map_err(|e| { + error!(?e, "ensure_impl_role_rows: pool get"); + Box::new(e) as Box + })?; + if !office_members_has_role_uid(client.deref()).await || !roles_table_exists(client.deref()).await + { + return Ok(()); + } + let rows = client.query("SELECT uid FROM offices", &[]).await.map_err(|e| { + error!(?e, "ensure_impl_role_rows: list offices"); + Box::new(e) as Box + })?; + for row in rows { + let office_uid: Uuid = row.get(0); + ensure_one_office_scoped_role_row_if_impl(client.deref(), office_uid) + .await + .map_err(|e| { + error!(?e, office_uid = %office_uid, "ensure_impl_role_rows: role row for office"); + Box::new(e) as Box + })?; + } + Ok(()) +} + +/// Au moins une ligne **`folder_types`** par **`offices`** lorsque le schéma IMPL utilise **`folders.folder_type_uid`**. +pub async fn ensure_impl_folder_type_rows_for_all_offices( + pool: &DbPool, +) -> Result<(), Box> { + let client = pool.get().await.map_err(|e| { + error!(?e, "ensure_impl_folder_type_rows: pool get"); + Box::new(e) as Box + })?; + if !folders_has_folder_type_uid_column(client.deref()).await + || !folder_types_table_exists(client.deref()).await + { + return Ok(()); + } + let rows = client.query("SELECT uid FROM offices", &[]).await.map_err(|e| { + error!(?e, "ensure_impl_folder_type_rows: list offices"); + Box::new(e) as Box + })?; + for row in rows { + let office_uid: Uuid = row.get(0); + ensure_one_folder_type_row_if_impl(client.deref(), office_uid) + .await + .map_err(|e| { + error!( + ?e, + office_uid = %office_uid, + "ensure_impl_folder_type_rows: folder_types row for office" + ); + Box::new(e) as Box + })?; + } + Ok(()) +} + +/// Rattache **`client@example.com`** aux offices sans **`office_members`** (rattrapage seed / migration démo). +pub async fn repair_seed_demo_user_office_memberships_if_needed( + pool: &DbPool, + users_pk_column: &str, +) { + let Ok(client) = pool.get().await else { + return; + }; + let pk_col = if users_pk_column == "uid" { + "uid" + } else { + "id" + }; + let user_sql = format!( + "SELECT {pk_col}::text FROM users WHERE lower(email) = lower('client@example.com') LIMIT 1" + ); + let row = match client.query_opt(&user_sql, &[]).await { + Ok(r) => r, + Err(e) => { + error!(?e, "repair demo members user lookup"); + return; + } + }; + let Some(row) = row else { + return; + }; + let user_uuid: Uuid = match row.get::<_, String>(0).parse() { + Ok(u) => u, + Err(_) => return, + }; + let offices = match client.query("SELECT uid, name FROM offices", &[]).await { + Ok(r) => r, + Err(e) => { + error!(?e, "repair demo members list offices"); + return; + } + }; + for orow in offices { + let office_uid: Uuid = orow.get(0); + let office_name: String = orow.get(1); + if office_name.trim() == LEGACY_DEMO_MIGRATION_TMP_NAME { + continue; + } + let cnt: i64 = match client + .query_one( + "SELECT COUNT(*)::bigint FROM office_members WHERE office_uid = $1 AND user_uid = $2", + &[&office_uid, &user_uuid], + ) + .await + { + Ok(r) => r.get(0), + Err(e) => { + error!(?e, "repair demo members count"); + continue; + } + }; + if cnt > 0 { + continue; + } + if let Err(e) = insert_office_member_with_role_label( + client.deref(), + office_uid, + user_uuid, + "admin", + ) + .await + { + error!(?e, office_uid = %office_uid, "repair demo members insert"); + } else { + info!( + office_uid = %office_uid, + "repaired office_members for client@example.com" + ); + } + } +} + +/// Supprime un office laissé avec le nom de migration si **aucun** dossier ne le référence (interruption après renommage). +pub async fn remove_stale_legacy_demo_migration_placeholder_office_if_needed(pool: &DbPool) { + let Ok(client) = pool.get().await else { + return; + }; + let row = match client + .query_opt( + "SELECT uid FROM offices WHERE btrim(name) = $1", + &[&LEGACY_DEMO_MIGRATION_TMP_NAME], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "stale migration placeholder: lookup"); + return; + } + }; + let Some(row) = row else { + return; + }; + let uid: Uuid = row.get(0); + let n: i64 = match client + .query_one( + "SELECT COUNT(*)::bigint FROM folders WHERE office_uid = $1", + &[&uid], + ) + .await + { + Ok(r) => r.get(0), + Err(e) => { + error!(?e, office_uid = %uid, "stale migration placeholder: folder count"); + return; + } + }; + if n > 0 { + warn!( + office_uid = %uid, + count = n, + "stale migration placeholder office still has folders; skip auto-delete" + ); + return; + } + if let Err(e) = client + .execute( + "DELETE FROM office_members WHERE office_uid = $1", + &[&uid], + ) + .await + { + error!(?e, office_uid = %uid, "stale migration placeholder: delete members"); + return; + } + if let Err(e) = client + .execute("DELETE FROM offices WHERE uid = $1", &[&uid]) + .await + { + error!(?e, office_uid = %uid, "stale migration placeholder: delete office"); + return; + } + info!( + office_uid = %uid, + "removed stale legacy demo migration placeholder office" + ); +} + +/// Société cliente **fictive** pour la démo espace client (pas le cabinet d’avocats de l’utilisateur). +pub const DEMO_OFFICE_DISPLAY_NAME: &str = "Entreprise démo (fictive)"; + +/// Dossier démo seul : `data/dossiers-permanents/instances/entreprise_demo/`. +pub const DEMO_ENTREPRISE_INSTANCE_ID: &str = "entreprise_demo"; + +/// Arborescence Git opération cession démo (relatif au répertoire `dossiers-permanents/` du dépôt données). +pub const DEMO_CESSION_DP_LAYOUT_ROOT: &str = "operations/entreprise_demo/cession_demo"; + +/// Jeux type : `data/dossiers-permanents/instances//` dans le dépôt (voir `data/dossiers-permanents/README.md`). +const DEMO_DP_INSTANCE_IDS: &[&str] = &[ + "entreprise_commercial_ir", + "entreprise_commercial_is", + "entreprise_sci_ir", + "entreprise_sci_is", + "groupe_commercial_ir", + "groupe_commercial_is", + "groupe_sci_ir", + "groupe_sci_is", +]; + +/// Libellé « Sociétés » (carte = une société) pour chaque **type** DP — aligné sur le vocabulaire i18n enso-front (`company.dpArchetype*`). +pub fn demo_archetype_office_display_name(archetype: &str) -> Option<&'static str> { + Some(match archetype { + "entreprise_commercial_ir" => "Entreprise · Société commerciale · IR", + "entreprise_commercial_is" => "Entreprise · Société commerciale · IS", + "entreprise_sci_ir" => "Entreprise · SCI · IR", + "entreprise_sci_is" => "Entreprise · SCI · IS", + "groupe_commercial_ir" => "Groupe · Société commerciale · IR", + "groupe_commercial_is" => "Groupe · Société commerciale · IS", + "groupe_sci_ir" => "Groupe · SCI · IR", + "groupe_sci_is" => "Groupe · SCI · IS", + _ => return None, + }) +} + +/// Ancien schéma : un office « Entreprise démo » avec 8 dossiers `dp_structure_demo`. Éclatement en **9 offices** (8 types + démo) pour afficher chaque type comme une société dans l’UX. +pub async fn migrate_legacy_demo_single_office_to_demo_and_type_offices_if_needed(pool: &DbPool) { + let Ok(client) = pool.get().await else { + return; + }; + let old_row = match client + .query_opt( + "SELECT uid FROM offices WHERE lower(btrim(name)) = lower(btrim($1::text)) LIMIT 1", + &[&DEMO_OFFICE_DISPLAY_NAME], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "migrate legacy demo: find office"); + return; + } + }; + let Some(old_row) = old_row else { + return; + }; + let old_uid: Uuid = old_row.get(0); + + let folder_rows = match client + .query( + "SELECT uid, COALESCE(dp_archetype, ''), folder_purpose FROM folders WHERE office_uid = $1", + &[&old_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "migrate legacy demo: list folders"); + return; + } + }; + if folder_rows.len() != DEMO_DP_INSTANCE_IDS.len() { + return; + } + let mut seen_arch: HashSet = HashSet::new(); + for row in &folder_rows { + let arch: String = row.get(1); + let purpose: Option = row.get(2); + let arch_trim = arch.trim().to_string(); + if arch_trim.is_empty() + || purpose.as_deref().map(|p| p.trim()) != Some("dp_structure_demo") + { + return; + } + if demo_archetype_office_display_name(&arch_trim).is_none() { + return; + } + if !DEMO_DP_INSTANCE_IDS + .iter() + .any(|&id| id == arch_trim.as_str()) + { + return; + } + seen_arch.insert(arch_trim); + } + if seen_arch.len() != DEMO_DP_INSTANCE_IDS.len() { + return; + } + + let members = match client + .query( + "SELECT user_uid, role FROM office_members WHERE office_uid = $1", + &[&old_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "migrate legacy demo: office_members"); + return; + } + }; + + if let Err(e) = client + .execute( + "UPDATE offices SET name = $1 WHERE uid = $2", + &[&LEGACY_DEMO_MIGRATION_TMP_NAME, &old_uid], + ) + .await + { + error!(?e, "migrate legacy demo: rename old office"); + return; + } + + info!(office_uid = %old_uid, "migrating legacy demo office into 9 offices (8 types + demo)"); + + for row in &folder_rows { + let folder_uid: Uuid = row.get(0); + let arch: String = row.get(1); + let arch_trim = arch.trim(); + let Some(label) = demo_archetype_office_display_name(arch_trim) else { + continue; + }; + let new_office: Uuid = match client + .query_one("INSERT INTO offices (name) VALUES ($1) RETURNING uid", &[&label]) + .await + { + Ok(r) => r.get(0), + Err(e) => { + error!(?e, label = %label, "migrate legacy demo: insert type office"); + return; + } + }; + if let Err(e) = ensure_auxiliary_rows_for_new_office_if_impl(client.deref(), new_office).await { + error!( + ?e, + office_uid = %new_office, + "migrate legacy demo: IMPL auxiliary rows for type office" + ); + return; + } + if let Err(e) = client + .execute( + "UPDATE folders SET office_uid = $1 WHERE uid = $2", + &[&new_office, &folder_uid], + ) + .await + { + error!(?e, "migrate legacy demo: move folder"); + return; + } + for mrow in &members { + let user_uid: Uuid = mrow.get(0); + let role: String = mrow.get(1); + if let Err(e) = + insert_office_member_with_role_label(client.deref(), new_office, user_uid, &role) + .await + { + error!(?e, "migrate legacy demo: member for type office"); + return; + } + } + } + + let demo_uid: Uuid = match client + .query_one( + "INSERT INTO offices (name) VALUES ($1) RETURNING uid", + &[&DEMO_OFFICE_DISPLAY_NAME], + ) + .await + { + Ok(r) => r.get(0), + Err(e) => { + error!(?e, "migrate legacy demo: insert demo office"); + return; + } + }; + if let Err(e) = ensure_auxiliary_rows_for_new_office_if_impl(client.deref(), demo_uid).await { + error!( + ?e, + office_uid = %demo_uid, + "migrate legacy demo: IMPL auxiliary rows for demo office" + ); + return; + } + + let layout = format!("instances/{DEMO_ENTREPRISE_INSTANCE_ID}"); + let demo_folder_title = "Dossier permanent — démonstration générale"; + let folder_uid = match insert_folder_with_dp_layout( + client.deref(), + demo_uid, + demo_folder_title, + DEMO_ENTREPRISE_INSTANCE_ID, + &layout, + ) + .await + { + Ok(u) => u, + Err(e) => { + error!(?e, "migrate legacy demo: insert demo folder"); + return; + } + }; + if let Err(e) = client + .execute( + "INSERT INTO folder_documents (folder_uid, name, doc_type, category, uploaded_by, size_label) \ + VALUES ($1, $2, 'autre', 'dossier', 'cabinet', '—')", + &[&folder_uid, &"Note — arborescence type dans data/dossiers-permanents"], + ) + .await + { + error!(?e, "migrate legacy demo: folder_documents"); + } + + for mrow in &members { + let user_uid: Uuid = mrow.get(0); + let role: String = mrow.get(1); + if let Err(e) = + insert_office_member_with_role_label(client.deref(), demo_uid, user_uid, &role).await + { + error!(?e, "migrate legacy demo: demo office member"); + return; + } + } + + if let Err(e) = client + .execute( + "UPDATE user_notifications n SET office_uid = f.office_uid \ + FROM folders f WHERE n.case_uid = f.uid AND n.office_uid = $1", + &[&old_uid], + ) + .await + { + error!(?e, "migrate legacy demo: repoint notifications"); + } + if let Err(e) = client + .execute( + "UPDATE user_notifications SET office_uid = $2 WHERE office_uid = $1 AND case_uid IS NULL", + &[&old_uid, &demo_uid], + ) + .await + { + error!(?e, "migrate legacy demo: repoint orphan notifications"); + } + if let Err(e) = client + .execute( + "UPDATE user_pending_documents p SET office_uid = f.office_uid \ + FROM folders f WHERE p.case_uid = f.uid AND p.office_uid = $1", + &[&old_uid], + ) + .await + { + error!(?e, "migrate legacy demo: repoint pending docs"); + } + if let Err(e) = client + .execute( + "UPDATE user_conversations SET office_uid = $2 WHERE office_uid = $1", + &[&old_uid, &demo_uid], + ) + .await + { + error!(?e, "migrate legacy demo: repoint conversations"); + } + + if let Err(e) = client + .execute( + "DELETE FROM office_members WHERE office_uid = $1", + &[&old_uid], + ) + .await + { + error!(?e, "migrate legacy demo: delete old members"); + return; + } + if let Err(e) = client + .execute("DELETE FROM offices WHERE uid = $1", &[&old_uid]) + .await + { + error!(?e, "migrate legacy demo: delete old office"); + } +} + +pub async fn folders_has_folder_type_uid_column(client: &PgClient) -> bool { + matches!( + client + .query_opt( + "SELECT 1 FROM information_schema.columns \ + WHERE table_schema = 'public' AND table_name = 'folders' \ + AND column_name = 'folder_type_uid' LIMIT 1", + &[], + ) + .await, + Ok(Some(_)) + ) +} + +/// Rôle **`folder_types`** strictement lié à l’office (pas de création implicite ici). +async fn require_office_scoped_folder_type_uid( + client: &PgClient, + office_uid: Uuid, +) -> Result { + let row = client + .query_one( + "SELECT uid FROM folder_types WHERE office_uid = $1 ORDER BY created_at ASC LIMIT 1", + &[&office_uid], + ) + .await?; + Ok(row.get(0)) +} + +/// **`POST /api/v1/folders`** sur schéma IMPL : assure **`ensure_one_folder_type_row_if_impl`**, puis **`require_office_scoped_folder_type_uid`**. +/// N’appeler que si **`folders_has_folder_type_uid_column`** est vrai. +pub async fn ensure_folder_type_uid_for_api_create( + client: &PgClient, + office_uid: Uuid, +) -> Result { + ensure_one_folder_type_row_if_impl(client, office_uid).await?; + require_office_scoped_folder_type_uid(client, office_uid).await +} + +/// Crée un dossier avec `dp_archetype` / `dp_layout_root` (schéma minimal ou IMPL avec `folder_type_uid`). +async fn insert_folder_with_dp_layout( + client: &PgClient, + office_uid: Uuid, + title: &str, + dp_archetype: &str, + dp_layout_root: &str, +) -> Result { + if folders_has_folder_type_uid_column(client).await { + let ft_uid = require_office_scoped_folder_type_uid(client, office_uid).await?; + let row = client + .query_one( + "INSERT INTO folders (office_uid, folder_type_uid, title, description, status, dp_archetype, dp_layout_root, folder_purpose, operation_type) \ + VALUES ($1, $2, $3, '', 'open', $4, $5, 'dp_structure_demo', NULL) RETURNING uid", + &[&office_uid, &ft_uid, &title, &dp_archetype, &dp_layout_root], + ) + .await?; + Ok(row.get(0)) + } else { + let row = client + .query_one( + "INSERT INTO folders (office_uid, title, status, dp_archetype, dp_layout_root, folder_purpose) \ + VALUES ($1, $2, 'open', $3, $4, 'dp_structure_demo') RETURNING uid", + &[&office_uid, &title, &dp_archetype, &dp_layout_root], + ) + .await?; + Ok(row.get(0)) + } +} + +/// Dossier **`client_operation`** / cession avec `dp_layout_root` (démo ou production). +async fn insert_folder_client_operation_cession_demo( + client: &PgClient, + office_uid: Uuid, + title: &str, + dp_archetype: &str, + dp_layout_root: &str, +) -> Result { + if folders_has_folder_type_uid_column(client).await { + let ft_uid = require_office_scoped_folder_type_uid(client, office_uid).await?; + let row = client + .query_one( + "INSERT INTO folders (office_uid, folder_type_uid, title, description, status, dp_archetype, dp_layout_root, folder_purpose, operation_type) \ + VALUES ($1, $2, $3, '', 'open', $4, $5, 'client_operation', 'cession') RETURNING uid", + &[&office_uid, &ft_uid, &title, &dp_archetype, &dp_layout_root], + ) + .await?; + Ok(row.get(0)) + } else { + let row = client + .query_one( + "INSERT INTO folders (office_uid, title, status, dp_archetype, dp_layout_root, folder_purpose, operation_type) \ + VALUES ($1, $2, 'open', $3, $4, 'client_operation', 'cession') RETURNING uid", + &[&office_uid, &title, &dp_archetype, &dp_layout_root], + ) + .await?; + Ok(row.get(0)) + } +} + +async fn insert_demo_dp_folder_doc_stub(client: &PgClient, folder_uid: Uuid) { + if let Err(e) = client + .execute( + "INSERT INTO folder_documents (folder_uid, name, doc_type, category, uploaded_by, size_label) \ + VALUES ($1, $2, 'autre', 'dossier', 'cabinet', '—')", + &[&folder_uid, &"Note — arborescence type dans data/dossiers-permanents"], + ) + .await + { + error!(?e, "seed demo dp folder_documents stub"); + } +} + +/// Une ligne **structure type** par office démo (8 soc. types + office « Entreprise démo » avec `instances/entreprise_demo/`). +pub async fn seed_demo_dp_folders_for_cabinet_demo_if_needed(pool: &DbPool) { + let Ok(client) = pool.get().await else { + return; + }; + + for archetype in DEMO_DP_INSTANCE_IDS { + let Some(label) = demo_archetype_office_display_name(archetype) else { + continue; + }; + let office_row = match client + .query_opt( + "SELECT uid FROM offices WHERE lower(btrim(name)) = lower(btrim($1::text)) LIMIT 1", + &[&label], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, archetype = %archetype, "seed demo dp: lookup type office"); + continue; + } + }; + let Some(office_row) = office_row else { + continue; + }; + let office_uid: Uuid = office_row.get(0); + let n: i64 = match client + .query_one( + "SELECT COUNT(*)::bigint FROM folders WHERE office_uid = $1", + &[&office_uid], + ) + .await + { + Ok(r) => r.get(0), + Err(e) => { + error!(?e, archetype = %archetype, "seed demo dp count folders"); + continue; + } + }; + if n > 0 { + continue; + } + let layout = format!("instances/{archetype}"); + let title = format!("Jeu type {archetype}"); + let folder_uid = match insert_folder_with_dp_layout( + client.deref(), + office_uid, + &title, + archetype, + &layout, + ) + .await + { + Ok(u) => u, + Err(e) => { + error!(?e, archetype = %archetype, "seed demo dp insert type folder"); + continue; + } + }; + insert_demo_dp_folder_doc_stub(client.deref(), folder_uid).await; + } + + let demo_row = match client + .query_opt( + "SELECT uid FROM offices WHERE lower(btrim(name)) = lower(btrim($1::text)) LIMIT 1", + &[&DEMO_OFFICE_DISPLAY_NAME], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "seed demo dp: lookup demo office"); + return; + } + }; + let Some(demo_row) = demo_row else { + return; + }; + let demo_uid: Uuid = demo_row.get(0); + let n: i64 = match client + .query_one( + "SELECT COUNT(*)::bigint FROM folders WHERE office_uid = $1", + &[&demo_uid], + ) + .await + { + Ok(r) => r.get(0), + Err(e) => { + error!(?e, "seed demo dp count demo folders"); + return; + } + }; + if n > 0 { + return; + } + let layout = format!("instances/{DEMO_ENTREPRISE_INSTANCE_ID}"); + let title = "Dossier permanent — démonstration générale"; + let folder_uid = match insert_folder_with_dp_layout( + client.deref(), + demo_uid, + title, + DEMO_ENTREPRISE_INSTANCE_ID, + &layout, + ) + .await + { + Ok(u) => u, + Err(e) => { + error!(?e, "seed demo dp insert demo enterprise folder"); + return; + } + }; + insert_demo_dp_folder_doc_stub(client.deref(), folder_uid).await; + info!( + demo_uid = %demo_uid, + "seeded demo DP folder for {}", + DEMO_ENTREPRISE_INSTANCE_ID + ); +} + +/// Une ligne **opération cession** démo pour l’office « Entreprise démo », avec arborescence Git sous `operations/entreprise_demo/cession_demo/`. +/// S’exécute même si des dossiers existent déjà (contrairement au jeu type / dossier permanent seul). +pub async fn seed_demo_cession_operation_folder_if_needed(pool: &DbPool) { + let Ok(client) = pool.get().await else { + return; + }; + let demo_row = match client + .query_opt( + "SELECT uid FROM offices WHERE lower(btrim(name)) = lower(btrim($1::text)) LIMIT 1", + &[&DEMO_OFFICE_DISPLAY_NAME], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "seed demo cession: lookup demo office"); + return; + } + }; + let Some(demo_row) = demo_row else { + return; + }; + let demo_uid: Uuid = demo_row.get(0); + let exists: bool = match client + .query_one( + "SELECT EXISTS(SELECT 1 FROM folders WHERE office_uid = $1 \ + AND btrim(COALESCE(folder_purpose, '')) = 'client_operation' \ + AND lower(btrim(COALESCE(operation_type, ''))) = 'cession' \ + AND btrim(COALESCE(dp_layout_root, '')) <> '')", + &[&demo_uid], + ) + .await + { + Ok(r) => r.get(0), + Err(e) => { + error!(?e, "seed demo cession: exists check"); + return; + } + }; + if exists { + return; + } + let title = "Opération de cession (démo)"; + let folder_uid = match insert_folder_client_operation_cession_demo( + client.deref(), + demo_uid, + title, + DEMO_ENTREPRISE_INSTANCE_ID, + DEMO_CESSION_DP_LAYOUT_ROOT, + ) + .await + { + Ok(u) => u, + Err(e) => { + error!(?e, "seed demo cession insert"); + return; + } + }; + insert_demo_dp_folder_doc_stub(client.deref(), folder_uid).await; + info!( + demo_uid = %demo_uid, + folder_uid = %folder_uid, + "seeded demo cession operation folder" + ); +} + +/// Quand la table `offices` est vide : 9 sociétés démo (8 types + « Entreprise démo ») + membre `client@example.com` sur chacune. +/// Appelle aussi la migration **un office + 8 dossiers** → **9 offices** si une base héritée correspond. +pub async fn seed_demo_office_if_needed(pool: &DbPool, users_pk_column: &str) { + migrate_legacy_demo_single_office_to_demo_and_type_offices_if_needed(pool).await; + + let Ok(client) = pool.get().await else { + return; + }; + let count: Result, tokio_postgres::Error> = client + .query_one("SELECT COUNT(*)::bigint FROM offices", &[]) + .await + .map(|r| Some(r.get::<_, i64>(0))) + .or_else(|e| { + error!(?e, "count offices"); + Ok(None) + }); + if count.ok().flatten().unwrap_or(1) > 0 { + return; + } + let pk_col = if users_pk_column == "uid" { + "uid" + } else { + "id" + }; + let user_sql = format!( + "SELECT {pk_col}::text FROM users WHERE lower(email) = lower('client@example.com') LIMIT 1" + ); + let row = match client.query_opt(&user_sql, &[]).await { + Ok(r) => r, + Err(e) => { + error!(?e, "demo user lookup"); + return; + } + }; + let Some(row) = row else { + return; + }; + let user_uuid: uuid::Uuid = match row.get::<_, String>(0).parse() { + Ok(u) => u, + Err(_) => return, + }; + + for archetype in DEMO_DP_INSTANCE_IDS { + let Some(label) = demo_archetype_office_display_name(archetype) else { + continue; + }; + let office_uid: Uuid = match client + .query_one("INSERT INTO offices (name) VALUES ($1) RETURNING uid", &[&label]) + .await + { + Ok(r) => r.get(0), + Err(e) => { + error!(?e, label = %label, "insert demo type office"); + return; + } + }; + if let Err(e) = ensure_auxiliary_rows_for_new_office_if_impl(client.deref(), office_uid).await + { + error!( + ?e, + office_uid = %office_uid, + "IMPL auxiliary rows for demo type office" + ); + return; + } + if let Err(e) = insert_office_member_with_role_label( + client.deref(), + office_uid, + user_uuid, + "admin", + ) + .await + { + error!(?e, "insert office_member demo type office"); + return; + } + } + + let demo_uid: Uuid = match client + .query_one( + "INSERT INTO offices (name) VALUES ($1) RETURNING uid", + &[&DEMO_OFFICE_DISPLAY_NAME], + ) + .await + { + Ok(r) => r.get(0), + Err(e) => { + error!(?e, "insert demo enterprise office"); + return; + } + }; + if let Err(e) = ensure_auxiliary_rows_for_new_office_if_impl(client.deref(), demo_uid).await { + error!( + ?e, + office_uid = %demo_uid, + "IMPL auxiliary rows for demo enterprise office" + ); + return; + } + if let Err(e) = + insert_office_member_with_role_label(client.deref(), demo_uid, user_uuid, "admin").await + { + error!(?e, "insert office_member demo enterprise"); + return; + } + + info!( + "seeded {} demo type offices + demo enterprise office", + DEMO_DP_INSTANCE_IDS.len() + ); +} + +/// Si `DOCV_DEMO_MEMBER_EMAILS` est défini (emails séparés par des virgules), chaque utilisateur listé +/// sans aucune ligne `office_members` est rattaché au plus ancien office (`created_at`) avec le rôle `client`. +pub async fn link_listed_users_to_first_office_if_configured( + pool: &DbPool, + users_pk_column: &str, +) { + let Ok(raw) = env::var("DOCV_DEMO_MEMBER_EMAILS") else { + return; + }; + if raw.trim().is_empty() { + return; + } + let Ok(client) = pool.get().await else { + return; + }; + let office_row = match client + .query_opt( + "SELECT uid FROM offices WHERE btrim(name) <> $1 ORDER BY created_at ASC LIMIT 1", + &[&LEGACY_DEMO_MIGRATION_TMP_NAME], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "link demo emails: first office"); + return; + } + }; + let Some(office_row) = office_row else { + return; + }; + let office_uid: Uuid = office_row.get(0); + let pk_col = if users_pk_column == "uid" { + "uid" + } else { + "id" + }; + + for email in raw.split(',') { + let email = email.trim(); + if email.is_empty() { + continue; + } + let user_sql = format!( + "SELECT {pk_col}::text FROM users WHERE lower(email) = lower($1) LIMIT 1" + ); + let urow = match client.query_opt(&user_sql, &[&email]).await { + Ok(r) => r, + Err(e) => { + error!(?e, email = %email, "link demo emails user lookup"); + continue; + } + }; + let Some(urow) = urow else { + continue; + }; + let user_uuid: Uuid = match urow.get::<_, String>(0).parse() { + Ok(u) => u, + Err(_) => continue, + }; + let n: i64 = match client + .query_one( + "SELECT COUNT(*)::bigint FROM office_members WHERE user_uid = $1", + &[&user_uuid], + ) + .await + { + Ok(r) => r.get(0), + Err(e) => { + error!(?e, "link demo emails count members"); + continue; + } + }; + if n > 0 { + continue; + } + if let Err(e) = + insert_office_member_with_role_label(client.deref(), office_uid, user_uuid, "client") + .await + { + error!(?e, email = %email, "link demo emails insert member"); + } + } +} + +fn env_truthy_docv(var: &str) -> bool { + match env::var(var) { + Ok(s) => { + let t = s.trim().to_ascii_lowercase(); + matches!(t.as_str(), "1" | "true" | "yes" | "on") + } + Err(_) => false, + } +} + +/// Si `DOCV_LINK_ORPHAN_USERS_TO_FIRST_OFFICE` vaut `1` / `true` / `yes` / `on`, chaque utilisateur +/// **sans aucune** ligne `office_members` est rattaché au plus ancien office (`created_at`) en rôle `client`. +/// +/// Utile lorsque la BDD contient déjà des `offices` (le seed « démo » ne s’exécute pas) mais les +/// comptes réels n’ont jamais été liés — **`GET /api/v1/offices`** serait vide sans cela. +/// **À ne pas activer en production** sans revue métier (tous les comptes verraient au moins une société). +pub async fn link_orphan_users_to_first_office_if_configured( + pool: &DbPool, + users_pk_column: &str, +) { + if !env_truthy_docv("DOCV_LINK_ORPHAN_USERS_TO_FIRST_OFFICE") { + return; + } + let Ok(client) = pool.get().await else { + return; + }; + let office_row = match client + .query_opt( + "SELECT uid FROM offices WHERE btrim(name) <> $1 ORDER BY created_at ASC LIMIT 1", + &[&LEGACY_DEMO_MIGRATION_TMP_NAME], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "link orphan users: first office"); + return; + } + }; + let Some(office_row) = office_row else { + warn!("DOCV_LINK_ORPHAN_USERS_TO_FIRST_OFFICE is set but offices table is empty"); + return; + }; + let office_uid: Uuid = office_row.get(0); + let pk_col = if users_pk_column == "uid" { + "uid" + } else { + "id" + }; + let insert_res = if office_members_has_role_uid(client.deref()).await { + let role_uid = match require_office_scoped_role_uid(client.deref(), office_uid).await { + Ok(u) => u, + Err(e) => { + error!( + ?e, + office_uid = %office_uid, + "DOCV_LINK_ORPHAN_USERS_TO_FIRST_OFFICE: no office-scoped role row — batch insert aborted" + ); + return; + } + }; + let insert_sql = format!( + "INSERT INTO office_members (office_uid, user_uid, role_uid, joined_at, role) \ + SELECT $1, u.{pk}::uuid, $2, now(), 'client' FROM users u \ + WHERE NOT EXISTS (SELECT 1 FROM office_members m WHERE m.user_uid = u.{pk}::uuid) \ + ON CONFLICT DO NOTHING", + pk = pk_col + ); + client + .execute(&insert_sql, &[&office_uid, &role_uid]) + .await + } else { + let insert_sql = format!( + "INSERT INTO office_members (office_uid, user_uid, role) \ + SELECT $1, u.{pk}::uuid, 'client' FROM users u \ + WHERE NOT EXISTS (SELECT 1 FROM office_members m WHERE m.user_uid = u.{pk}::uuid) \ + ON CONFLICT DO NOTHING", + pk = pk_col + ); + client.execute(&insert_sql, &[&office_uid]).await + }; + match insert_res { + Ok(n) => { + info!( + rows = n, + office_uid = %office_uid, + "DOCV_LINK_ORPHAN_USERS_TO_FIRST_OFFICE: inserted office_members rows" + ); + } + Err(e) => error!(?e, "link orphan users batch insert"), + } +} + +/// One notification, pending document, and conversation (with message) for demo data when tables are empty. +pub async fn seed_stub_lists_demo_if_needed(pool: &DbPool, users_pk_column: &str) { + let Ok(client) = pool.get().await else { + return; + }; + let count: i64 = match client + .query_one("SELECT COUNT(*)::bigint FROM user_notifications", &[]) + .await + { + Ok(r) => r.get(0), + Err(e) => { + error!(?e, "count user_notifications"); + return; + } + }; + if count > 0 { + return; + } + + let pk_col = if users_pk_column == "uid" { + "uid" + } else { + "id" + }; + let user_sql = format!( + "SELECT {pk_col}::text FROM users WHERE lower(email) = lower('client@example.com') LIMIT 1" + ); + let row = match client.query_opt(&user_sql, &[]).await { + Ok(r) => r, + Err(e) => { + error!(?e, "stub seed user lookup"); + return; + } + }; + let Some(row) = row else { + return; + }; + let user_uuid: uuid::Uuid = match row.get::<_, String>(0).parse() { + Ok(u) => u, + Err(_) => return, + }; + + let ctx = match client + .query_opt( + "SELECT f.office_uid, f.uid, f.title FROM folders f \ + INNER JOIN office_members m ON m.office_uid = f.office_uid AND m.user_uid = $1 \ + LIMIT 1", + &[&user_uuid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "stub seed folder lookup"); + return; + } + }; + let Some(ctx) = ctx else { + return; + }; + let office_uid: uuid::Uuid = ctx.get(0); + let folder_uid: uuid::Uuid = ctx.get(1); + let folder_title: String = ctx.get(2); + + let msg = format!("Mise à jour sur « {folder_title} »"); + if let Err(e) = client + .execute( + "INSERT INTO user_notifications \ + (user_uid, office_uid, notif_type, message, case_uid, is_read) \ + VALUES ($1, $2, 'case_update', $3, $4, false)", + &[&user_uuid, &office_uid, &msg, &folder_uid], + ) + .await + { + error!(?e, "insert demo notification"); + return; + } + + if let Err(e) = client + .execute( + "INSERT INTO user_pending_documents \ + (user_uid, office_uid, name, description, case_uid, case_name) \ + VALUES ($1, $2, $3, $4, $5, $6)", + &[ + &user_uuid, + &office_uid, + &"Pièce à fournir (démo)", + &"Merci de déposer le dernier KBIS signé.", + &folder_uid, + &folder_title, + ], + ) + .await + { + error!(?e, "insert demo pending document"); + return; + } + + let conv_id: uuid::Uuid = match client + .query_one( + "INSERT INTO user_conversations \ + (user_uid, office_uid, contact_name, contact_role, last_message, last_message_at, unread_count) \ + VALUES ($1, $2, $3, $4, $5, now(), 1) \ + RETURNING id", + &[ + &user_uuid, + &office_uid, + &"Client démo", + &"client", + &"Bonjour, pouvez-vous confirmer la date d’AG ?", + ], + ) + .await + { + Ok(r) => r.get(0), + Err(e) => { + error!(?e, "insert demo conversation"); + return; + } + }; + + if let Err(e) = client + .execute( + "INSERT INTO conversation_messages \ + (conversation_id, sender_id, sender_name, sender_role, content, is_read) \ + VALUES ($1, $2, $3, 'client', $4, false)", + &[ + &conv_id, + &"client-demo", + &"Client démo", + &"Bonjour, pouvez-vous confirmer la date d’AG ?", + ], + ) + .await + { + error!(?e, "insert demo conversation message"); + } +} diff --git a/services/docv/docv-back/src/dp_git_sync.rs b/services/docv/docv-back/src/dp_git_sync.rs new file mode 100644 index 0000000..052c6f0 --- /dev/null +++ b/services/docv/docv-back/src/dp_git_sync.rs @@ -0,0 +1,109 @@ +//! Optional Git commit/push after dossier-permanent mirror uploads (see docs/features/DOSSIERS_PERMANENTS_DATA_GIT.md). + +use crate::config::DpGitSyncConfig; +use std::path::{Component, Path, PathBuf}; +use std::process::Command; +use tracing::{info, warn}; + +/// Rejects absolute paths and `..` segments. +pub fn safe_relative_under_repo(path: &str) -> Option { + let trimmed = path.trim(); + if trimmed.is_empty() { + return None; + } + let p = Path::new(trimmed); + if p.is_absolute() { + return None; + } + for c in p.components() { + match c { + Component::ParentDir => return None, + Component::RootDir => return None, + _ => {} + } + } + Some(p.to_path_buf()) +} + +fn git_staged_files_nonempty(repo_root: &Path) -> bool { + let out = match Command::new("git") + .current_dir(repo_root) + .args(["diff", "--cached", "--name-only"]) + .output() + { + Ok(o) => o, + Err(e) => { + warn!(?e, "git diff --cached"); + return false; + } + }; + !out.stdout.is_empty() +} + +/// Runs `git add`, `git commit` (if needed), `git push` for paths relative to repo root. +pub fn sync_data_paths(cfg: &DpGitSyncConfig, paths_relative_to_repo: &[PathBuf]) { + if !cfg.enabled { + return; + } + let Some(ref repo_root) = cfg.repo_root else { + warn!("DOCV_DP_GIT_SYNC enabled but DOCV_DP_GIT_REPO_ROOT unset"); + return; + }; + if paths_relative_to_repo.is_empty() { + return; + } + let mut add_cmd = Command::new("git"); + add_cmd.current_dir(repo_root); + add_cmd.arg("add").arg("--"); + for p in paths_relative_to_repo { + add_cmd.arg(p); + } + match add_cmd.status() { + Ok(s) if s.success() => {} + Ok(s) => { + warn!(?s, "git add failed for DP sync"); + return; + } + Err(e) => { + warn!(?e, "git add spawn failed"); + return; + } + } + if !git_staged_files_nonempty(repo_root) { + info!("DP git sync: nothing staged after add; skipping commit"); + return; + } + let msg = "docv: sync dossiers-permanents mirror upload"; + let commit = Command::new("git") + .current_dir(repo_root) + .args(["commit", "-m", msg]) + .status(); + match commit { + Ok(s) if s.success() => info!("DP git sync: committed"), + Ok(s) => warn!(?s, "git commit (may be empty or hook failure)"), + Err(e) => { + warn!(?e, "git commit spawn failed"); + return; + } + } + let push = if let Some(ref b) = cfg.branch { + Command::new("git") + .current_dir(repo_root) + .args([ + "push", + cfg.remote.as_str(), + &format!("HEAD:refs/heads/{b}"), + ]) + .status() + } else { + Command::new("git") + .current_dir(repo_root) + .args(["push", cfg.remote.as_str()]) + .status() + }; + match push { + Ok(s) if s.success() => info!("DP git sync: push OK"), + Ok(s) => warn!(?s, "git push failed — check remote and branch"), + Err(e) => warn!(?e, "git push spawn failed"), + } +} diff --git a/services/docv/docv-back/src/dp_layout_fs.rs b/services/docv/docv-back/src/dp_layout_fs.rs new file mode 100644 index 0000000..e76481b --- /dev/null +++ b/services/docv/docv-back/src/dp_layout_fs.rs @@ -0,0 +1,362 @@ +//! Lecture disque sous `data/dossiers-permanents` pour les routes DP layout (instances/, operations/). + +use serde::Serialize; +use std::path::PathBuf; + +#[derive(Serialize)] +pub struct DpLayoutEntryJson { + pub name: String, + pub entry_type: String, +} + +/// Répertoire canonique `repo_root` / `data_subpath` / segments de `dp_layout_root`. +pub fn dp_layout_base_canonical( + repo_root: PathBuf, + data_subpath: PathBuf, + dp_layout_root: &str, +) -> Result { + let mut base = repo_root; + base.push(&data_subpath); + for seg in dp_layout_root.trim().split('/').filter(|s| !s.is_empty()) { + base.push(seg); + } + base.canonicalize() + .map_err(|e| format!("instance_root_missing:{e}")) +} + +pub fn read_dp_layout_dir_entries( + repo_root: PathBuf, + data_subpath: PathBuf, + dp_layout_root: String, + rel: PathBuf, +) -> Result, String> { + let base_canon = dp_layout_base_canonical(repo_root, data_subpath, &dp_layout_root)?; + let target = if rel.as_os_str().is_empty() { + base_canon.clone() + } else { + let joined = base_canon.join(&rel); + joined + .canonicalize() + .map_err(|_| "not_found".to_string())? + }; + if !target.starts_with(&base_canon) { + return Err("invalid_path".into()); + } + let read = std::fs::read_dir(&target).map_err(|e| format!("read_dir:{e}"))?; + let mut entries: Vec = Vec::new(); + for ent in read { + let ent = ent.map_err(|e| format!("dir_entry:{e}"))?; + let name = ent.file_name().to_string_lossy().into_owned(); + if name.starts_with('.') { + continue; + } + let path = ent.path(); + // Follow symlinks: `DirEntry::file_type` does not; symlink→dir would be misclassified as file. + let entry_type = if path.is_dir() { + "dir" + } else if path.is_file() { + "file" + } else { + continue; + }; + entries.push(DpLayoutEntryJson { + name, + entry_type: entry_type.into(), + }); + } + entries.sort_by(|a, b| { + a.entry_type + .cmp(&b.entry_type) + .then_with(|| a.name.to_lowercase().cmp(&b.name.to_lowercase())) + }); + Ok(entries) +} + +pub fn dp_layout_text_file_name_allowed(file_name: &str) -> bool { + let lower = file_name.to_lowercase(); + lower.ends_with(".md") + || lower.ends_with(".txt") + || lower.ends_with(".markdown") +} + +/// Gabarits versionnés sous `data/dossiers-permanents/` : `__GABARIT__.md` par dossier et fichiers `*.__TEMPLATE__.md`. +pub fn dp_layout_gabarit_file_name_allowed(file_name: &str) -> bool { + let lower = file_name.to_lowercase(); + if lower == "__gabarit__.md" { + return true; + } + lower.ends_with(".__template__.md") +} + +/// Overwrites an existing UTF-8 gabarit file (same path rules as [`read_dp_layout_text_file`]). +pub fn write_dp_layout_gabarit_text_file( + repo_root: PathBuf, + data_subpath: PathBuf, + dp_layout_root: String, + rel_file: PathBuf, + text: &str, +) -> Result<(), String> { + const MAX_BYTES: u64 = 512 * 1024; + if text.as_bytes().len() as u64 > MAX_BYTES { + return Err("file_too_large".into()); + } + let base_canon = dp_layout_base_canonical(repo_root, data_subpath, &dp_layout_root)?; + let joined = base_canon.join(&rel_file); + let target = joined + .canonicalize() + .map_err(|_| "not_found".to_string())?; + if !target.starts_with(&base_canon) { + return Err("invalid_path".into()); + } + let meta = std::fs::metadata(&target).map_err(|e| format!("metadata:{e}"))?; + if !meta.is_file() { + return Err("not_a_file".into()); + } + let name = target + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or(""); + if !dp_layout_gabarit_file_name_allowed(name) { + return Err("not_a_gabarit_file".into()); + } + std::fs::write(&target, text).map_err(|e| format!("write:{e}")) +} + +/// Reads a small UTF-8 text file under `dp_layout_root` (same base as directory listing). +pub fn read_dp_layout_text_file( + repo_root: PathBuf, + data_subpath: PathBuf, + dp_layout_root: String, + rel_file: PathBuf, +) -> Result { + const MAX_BYTES: u64 = 512 * 1024; + let base_canon = dp_layout_base_canonical(repo_root, data_subpath, &dp_layout_root)?; + let joined = base_canon.join(&rel_file); + let target = joined + .canonicalize() + .map_err(|_| "not_found".to_string())?; + if !target.starts_with(&base_canon) { + return Err("invalid_path".into()); + } + let meta = std::fs::metadata(&target).map_err(|e| format!("metadata:{e}"))?; + if !meta.is_file() { + return Err("not_a_file".into()); + } + if meta.len() > MAX_BYTES { + return Err("file_too_large".into()); + } + let name = target + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or(""); + if !dp_layout_text_file_name_allowed(name) { + return Err("file_type_not_allowed".into()); + } + std::fs::read_to_string(&target).map_err(|e| format!("read:{e}")) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fs; + use tempfile::tempdir; + + fn write_layout_tree() -> (tempfile::TempDir, PathBuf, String) { + let repo = tempdir().expect("tempdir"); + let repo_root = repo.path().to_path_buf(); + let data = repo_root.join("data/dossiers-permanents"); + let root = data.join("instances/demo_case"); + fs::create_dir_all(root.join("Z_folder")).expect("mkdir"); + fs::create_dir_all(root.join("a_folder")).expect("mkdir"); + fs::write(root.join("readme.md"), "# hi").expect("write md"); + fs::write(root.join("note.txt"), "plain").expect("write txt"); + fs::write(root.join("a_folder/inner.txt"), "inner").expect("write nested"); + (repo, repo_root, "instances/demo_case".to_string()) + } + + #[test] + fn dir_entries_sorts_dirs_before_files_then_name() { + let (_tmp, repo_root, dp_root) = write_layout_tree(); + let entries = read_dp_layout_dir_entries( + repo_root, + PathBuf::from("data/dossiers-permanents"), + dp_root, + PathBuf::new(), + ) + .expect("ok"); + let names: Vec<&str> = entries.iter().map(|e| e.name.as_str()).collect(); + assert_eq!( + names, + vec!["a_folder", "Z_folder", "note.txt", "readme.md"] + ); + assert!(entries.iter().any(|e| e.name == "a_folder" && e.entry_type == "dir")); + assert!(entries.iter().any(|e| e.name == "readme.md" && e.entry_type == "file")); + } + + #[test] + fn dir_entries_nested_path() { + let (_tmp, repo_root, dp_root) = write_layout_tree(); + let entries = read_dp_layout_dir_entries( + repo_root, + PathBuf::from("data/dossiers-permanents"), + dp_root, + PathBuf::from("a_folder"), + ) + .expect("ok"); + assert_eq!(entries.len(), 1); + assert_eq!(entries[0].name, "inner.txt"); + assert_eq!(entries[0].entry_type, "file"); + } + + #[cfg(unix)] + #[test] + fn dir_entries_symlink_to_dir_is_classified_as_dir() { + use std::os::unix::fs::symlink; + let (_tmp, repo_root, dp_root) = write_layout_tree(); + let base = dp_layout_base_canonical( + repo_root.clone(), + PathBuf::from("data/dossiers-permanents"), + &dp_root, + ) + .expect("base"); + let target_dir = base.join("a_folder"); + let link_path = base.join("link_to_a"); + symlink(&target_dir, &link_path).expect("symlink"); + let entries = read_dp_layout_dir_entries( + repo_root.clone(), + PathBuf::from("data/dossiers-permanents"), + dp_root.clone(), + PathBuf::new(), + ) + .expect("ok"); + let link = entries + .iter() + .find(|e| e.name == "link_to_a") + .expect("symlink entry"); + assert_eq!(link.entry_type, "dir"); + let nested = read_dp_layout_dir_entries( + repo_root, + PathBuf::from("data/dossiers-permanents"), + dp_root, + PathBuf::from("link_to_a"), + ) + .expect("nested"); + assert_eq!(nested.len(), 1); + assert_eq!(nested[0].name, "inner.txt"); + } + + #[test] + fn read_text_file_ok() { + let (_tmp, repo_root, dp_root) = write_layout_tree(); + let text = read_dp_layout_text_file( + repo_root, + PathBuf::from("data/dossiers-permanents"), + dp_root, + PathBuf::from("a_folder/inner.txt"), + ) + .expect("read"); + assert_eq!(text, "inner"); + } + + #[test] + fn read_text_file_rejects_wrong_extension() { + let repo = tempdir().expect("tempdir"); + let data = repo.path().join("data/dossiers-permanents"); + let root = data.join("instances/x"); + fs::create_dir_all(&root).expect("mkdir"); + fs::write(root.join("bad.bin"), "x").expect("write"); + let err = read_dp_layout_text_file( + repo.path().to_path_buf(), + PathBuf::from("data/dossiers-permanents"), + "instances/x".into(), + PathBuf::from("bad.bin"), + ) + .expect_err("type"); + assert_eq!(err, "file_type_not_allowed"); + } + + #[test] + fn base_canonical_matches_joined_roots() { + let (_tmp, repo_root, dp_root) = write_layout_tree(); + let base = dp_layout_base_canonical( + repo_root.clone(), + PathBuf::from("data/dossiers-permanents"), + &dp_root, + ) + .expect("canon"); + assert!(base.ends_with("demo_case")); + let entries = read_dp_layout_dir_entries( + repo_root, + PathBuf::from("data/dossiers-permanents"), + dp_root, + PathBuf::new(), + ) + .expect("entries"); + assert!(!entries.is_empty()); + } + + #[test] + fn path_escape_rejected() { + let (_tmp, repo_root, dp_root) = write_layout_tree(); + let res = read_dp_layout_dir_entries( + repo_root, + PathBuf::from("data/dossiers-permanents"), + dp_root, + PathBuf::from("../.."), + ); + assert!(res.is_err()); + } + + #[test] + fn gabarit_name_allowed() { + assert!(dp_layout_gabarit_file_name_allowed("__GABARIT__.md")); + assert!(dp_layout_gabarit_file_name_allowed("IM.07.__TEMPLATE__.md")); + assert!(!dp_layout_gabarit_file_name_allowed("readme.md")); + assert!(!dp_layout_gabarit_file_name_allowed("note.txt")); + } + + #[test] + fn write_gabarit_roundtrip() { + let repo = tempdir().expect("tempdir"); + let data = repo.path().join("data/dossiers-permanents"); + let root = data.join("instances/x"); + fs::create_dir_all(root.join("sub")).expect("mkdir"); + fs::write(root.join("sub/__GABARIT__.md"), "old").expect("write"); + let repo_root = repo.path().to_path_buf(); + let dp = "instances/x".to_string(); + write_dp_layout_gabarit_text_file( + repo_root.clone(), + PathBuf::from("data/dossiers-permanents"), + dp.clone(), + PathBuf::from("sub/__GABARIT__.md"), + "new content", + ) + .expect("write ok"); + let text = read_dp_layout_text_file( + repo_root, + PathBuf::from("data/dossiers-permanents"), + dp, + PathBuf::from("sub/__GABARIT__.md"), + ) + .expect("read"); + assert_eq!(text, "new content"); + } + + #[test] + fn write_gabarit_rejects_plain_md() { + let repo = tempdir().expect("tempdir"); + let data = repo.path().join("data/dossiers-permanents"); + let root = data.join("instances/x"); + fs::create_dir_all(&root).expect("mkdir"); + fs::write(root.join("readme.md"), "x").expect("write"); + let err = write_dp_layout_gabarit_text_file( + repo.path().to_path_buf(), + PathBuf::from("data/dossiers-permanents"), + "instances/x".into(), + PathBuf::from("readme.md"), + "y", + ) + .expect_err("not gabarit"); + assert_eq!(err, "not_a_gabarit_file"); + } +} diff --git a/services/docv/docv-back/src/dp_mirror.rs b/services/docv/docv-back/src/dp_mirror.rs new file mode 100644 index 0000000..8e82dec --- /dev/null +++ b/services/docv/docv-back/src/dp_mirror.rs @@ -0,0 +1,64 @@ +//! Résolution des chemins miroir sous `data/dossiers-permanents/` (upload binaire, sync Git). + +use crate::config::Config; +use hyper::header::HeaderMap; +use std::path::PathBuf; +use uuid::Uuid; + +use crate::dp_git_sync; + +pub fn sanitize_dp_mirror_filename(name: &str) -> String { + let base: String = name + .chars() + .map(|c| match c { + '/' | '\\' | ':' | '\0' => '-', + c if c.is_control() => '-', + c => c, + }) + .collect(); + let t = base.trim(); + if t.is_empty() { + return "document".into(); + } + t.chars().take(240).collect() +} + +pub fn header_dp_mirror_relative_path(hdr: &HeaderMap) -> Option { + hdr.get("x-enso-dp-mirror-relative-path") + .and_then(|v| v.to_str().ok()) + .map(str::trim) + .filter(|s| !s.is_empty()) + .map(|s| s.to_string()) +} + +/// Chemin relatif sous `DOCV_DP_GIT_DATA_SUBPATH`, si entête valide ou sync Git activé (défaut `_uploads/...`). +pub fn resolve_dp_mirror_relative( + cfg: &Config, + hdr: &HeaderMap, + folder_uid: Uuid, + name: &str, +) -> Option { + let safe = sanitize_dp_mirror_filename(name); + if let Some(raw) = header_dp_mirror_relative_path(hdr) { + let t = raw.trim(); + if !t.is_empty() { + let is_dir = t.ends_with('/'); + let trimmed = t.trim_end_matches('/').trim(); + if let Some(mut p) = dp_git_sync::safe_relative_under_repo(trimmed) { + if is_dir { + p.push(safe.clone()); + } + return Some(p); + } + } + } + if cfg.dp_git_sync.enabled { + Some( + PathBuf::from("_uploads") + .join(folder_uid.to_string()) + .join(safe), + ) + } else { + None + } +} diff --git a/services/docv/docv-back/src/main.rs b/services/docv/docv-back/src/main.rs new file mode 100644 index 0000000..8bc9af5 --- /dev/null +++ b/services/docv/docv-back/src/main.rs @@ -0,0 +1,74 @@ +//! docv-back: HTTP API, PostgreSQL, OAuth2 authorization server for enso-front. + +mod api_auth; +mod branding; +mod config; +mod db; +mod dp_layout_fs; +mod dp_git_sync; +mod dp_mirror; +mod server; + +use crate::config::Config; +use tracing_subscriber::EnvFilter; + +#[tokio::main] +async fn main() -> Result<(), Box> { + tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env().add_directive("info".parse()?)) + .init(); + + let config = Config::load_from_env(); + tracing::info!( + host = %config.host, + port = config.port, + db_len = config.database_url.len(), + jwt_len = config.jwt_secret.len(), + oauth_redirects = config.oauth_redirect_uris.len(), + browser_oauth_prefix = %config.browser_oauth_prefix, + users_pk_column = %config.users_pk_column, + file_storage = config.file_storage_dir.as_ref().map(|p| p.display().to_string()).unwrap_or_else(|| "disabled".into()), + upload_max_bytes = config.upload_max_bytes, + dp_git_sync = config.dp_git_sync.enabled, + dp_git_repo = config.dp_git_sync.repo_root.as_ref().map(|p| p.display().to_string()).unwrap_or_else(|| "unset".into()), + oauth_access_token_ttl_secs = config.oauth_access_token_ttl_secs, + "docv-back starting" + ); + + let pool = db::create_pool()?; + db::ensure_users_table(&pool).await; + if let Ok(client) = pool.get().await { + let seed = include_str!("../migrations/20260330120001_seed_demo_user.sql"); + if let Err(e) = client.batch_execute(seed).await { + tracing::warn!(?e, "seed demo user skipped or failed"); + } + } + db::ensure_offices_folders_schema(&pool).await; + db::ensure_folders_title_legacy_compat(&pool).await; + db::ensure_offices_extended_columns(&pool).await; + db::ensure_office_members_role_column(&pool).await; + db::ensure_roles_minimal_table(&pool).await; + db::ensure_user_stub_lists_schema(&pool).await; + db::ensure_folder_documents_schema(&pool).await; + db::ensure_folder_documents_storage_columns(&pool).await; + db::ensure_folders_dp_layout_columns(&pool).await; + db::ensure_folders_status_column(&pool).await; + db::ensure_folders_purpose_operation_type(&pool).await; + db::ensure_office_society_extensions(&pool).await; + db::ensure_folder_sources_notes_tasks_workflow(&pool).await; + db::ensure_impl_role_rows_for_all_offices(&pool).await?; + db::ensure_impl_folder_type_rows_for_all_offices(&pool).await?; + db::seed_demo_office_if_needed(&pool, &config.users_pk_column).await; + db::ensure_impl_role_rows_for_all_offices(&pool).await?; + db::ensure_impl_folder_type_rows_for_all_offices(&pool).await?; + db::seed_demo_dp_folders_for_cabinet_demo_if_needed(&pool).await; + db::seed_demo_cession_operation_folder_if_needed(&pool).await; + db::link_listed_users_to_first_office_if_configured(&pool, &config.users_pk_column).await; + db::link_orphan_users_to_first_office_if_configured(&pool, &config.users_pk_column).await; + db::repair_seed_demo_user_office_memberships_if_needed(&pool, &config.users_pk_column).await; + db::remove_stale_legacy_demo_migration_placeholder_office_if_needed(&pool).await; + db::seed_stub_lists_demo_if_needed(&pool, &config.users_pk_column).await; + + server::serve(config, pool).await?; + Ok(()) +} diff --git a/services/docv/docv-back/src/server/ai_forward.rs b/services/docv/docv-back/src/server/ai_forward.rs new file mode 100644 index 0000000..9792e19 --- /dev/null +++ b/services/docv/docv-back/src/server/ai_forward.rs @@ -0,0 +1,45 @@ +//! Forward JSON to an optional upstream IA HTTP service (server-side only). + +use crate::config::Config; +use serde_json::Value; +use std::time::Duration; + +#[derive(Debug)] +pub enum AiForwardError { + NotConfigured, + Request(String), + UpstreamStatus(u16), +} + +pub async fn post_ai_json(config: &Config, body: &Value) -> Result { + let Some(url) = config.ai_service_url.as_deref() else { + return Err(AiForwardError::NotConfigured); + }; + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(config.ai_timeout_secs)) + .build() + .map_err(|e| AiForwardError::Request(e.to_string()))?; + let mut req = client.post(url).json(body); + if let Some(ref key) = config.ai_api_key { + req = req.header( + reqwest::header::AUTHORIZATION, + format!("Bearer {}", key.trim()), + ); + } + let res = req + .send() + .await + .map_err(|e| AiForwardError::Request(e.to_string()))?; + let status = res.status(); + let text = res + .text() + .await + .map_err(|e| AiForwardError::Request(e.to_string()))?; + if !status.is_success() { + return Err(AiForwardError::UpstreamStatus(status.as_u16())); + } + match serde_json::from_str::(&text) { + Ok(v) => Ok(v), + Err(_) => Ok(serde_json::json!({ "text": text.trim() })), + } +} diff --git a/services/docv/docv-back/src/server/api_v1.rs b/services/docv/docv-back/src/server/api_v1.rs new file mode 100644 index 0000000..f699e49 --- /dev/null +++ b/services/docv/docv-back/src/server/api_v1.rs @@ -0,0 +1,4545 @@ +//! JSON API `/api/v1/*` (Bearer access token from OAuth). + +use super::ai_forward::{post_ai_json, AiForwardError}; +use super::AppState; +use crate::api_auth::{bearer_token, decode_access_token, AccessClaims}; +use hyper::body::to_bytes; +use hyper::header::HeaderMap; +use hyper::{Body, Method, Request, Response, StatusCode}; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use std::borrow::Cow; +use std::collections::{HashMap, HashSet}; +use std::ops::Deref; +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use tokio_postgres::Row; +use tracing::{error, warn}; +use uuid::Uuid; + +use crate::dp_layout_fs::{read_dp_layout_dir_entries, read_dp_layout_text_file}; +use crate::{db, dp_git_sync, dp_mirror}; + +fn json_response(status: StatusCode, body: serde_json::Value) -> Response { + Response::builder() + .status(status) + .header(hyper::header::CONTENT_TYPE, "application/json") + .body(Body::from(body.to_string())) + .unwrap() +} + +fn unauthorized_json() -> Response { + json_response( + StatusCode::UNAUTHORIZED, + json!({ "error": "unauthorized", "detail": "invalid or missing bearer token" }), + ) +} + +fn forbidden_json() -> Response { + json_response( + StatusCode::FORBIDDEN, + json!({ "error": "forbidden", "detail": "not a member of this resource" }), + ) +} + +fn not_found_json() -> Response { + json_response( + StatusCode::NOT_FOUND, + json!({ "error": "not_found" }), + ) +} + +/// Relative path under `dp-layout-entries` / `dp-layout-file` from extra URI segments (each segment +/// percent-decoded), or from the legacy `?path=` query when there are no extra segments. +/// Using path segments avoids encoded slashes in the query string (`path=a%2Fb`), which some +/// reverse proxies normalize or block, causing empty `path` and a stuck “root only” listing. +fn dp_layout_rel_from_uri_segments_and_query( + segments: &[&str], + first_rel_idx: usize, + query_path: &str, +) -> String { + if segments.len() > first_rel_idx { + segments[first_rel_idx..] + .iter() + .map(|s| { + urlencoding::decode(s) + .unwrap_or(Cow::Borrowed(*s)) + .into_owned() + }) + .collect::>() + .join("/") + } else { + query_path.to_string() + } +} + +fn server_error_json(msg: &str) -> Response { + json_response( + StatusCode::INTERNAL_SERVER_ERROR, + json!({ "error": "server_error", "detail": msg }), + ) +} + +#[derive(Serialize)] +struct MeBody { + id: String, + email: String, + name: Option, + phone: Option, +} + +async fn handle_me(state: &AppState, claims: &AccessClaims) -> Response { + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let col = if state.config.users_pk_column == "uid" { + "uid" + } else { + "id" + }; + let sql = format!( + "SELECT {}::text, email, name, phone FROM users WHERE {}::text = $1", + col, col + ); + let row = match client.query_opt(&sql, &[&claims.sub]).await { + Ok(r) => r, + Err(e) => { + error!(?e, "me query"); + return server_error_json("database error"); + } + }; + let Some(row) = row else { + return not_found_json(); + }; + let me = MeBody { + id: row.get(0), + email: row.get(1), + name: row.get(2), + phone: row.get(3), + }; + json_response(StatusCode::OK, serde_json::to_value(&me).unwrap()) +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct FolderDocumentJson { + id: String, + name: String, + #[serde(rename = "type")] + doc_type: String, + category: String, + uploaded_by: String, + uploaded_at: String, + size: String, + #[serde(skip_serializing_if = "Option::is_none")] + storage_url: Option, + #[serde(skip_serializing_if = "Option::is_none")] + mime_type: Option, + #[serde(skip_serializing_if = "Option::is_none")] + dp_mirror_path: Option, + workflow_state: String, +} + +fn row_folder_document(r: &Row) -> FolderDocumentJson { + let uid: Uuid = r.get("uid"); + let storage_url: Option = r.get::<_, Option>("storage_url"); + let mime_type: Option = r.get::<_, Option>("mime_type"); + let dp_mirror_path: Option = r.get::<_, Option>("dp_mirror_path"); + let workflow_state: String = r + .try_get::<_, String>("workflow_state") + .unwrap_or_else(|_| "draft".to_string()); + FolderDocumentJson { + id: uid.to_string(), + name: r.get("name"), + doc_type: r.get("doc_type"), + category: r.get("category"), + uploaded_by: r.get("uploaded_by"), + uploaded_at: r.get::<_, String>("created_at"), + size: r.get("size_label"), + storage_url, + mime_type, + dp_mirror_path, + workflow_state, + } +} + +#[derive(Serialize)] +struct OfficeJson { + uid: String, + name: String, + siren: Option, + address: Option, + role: String, + created_at: String, + #[serde(skip_serializing_if = "Option::is_none")] + parent_office_uid: Option, + #[serde(skip_serializing_if = "Option::is_none")] + archived_at: Option, + #[serde(skip_serializing_if = "Vec::is_empty")] + permanent_documents: Vec, +} + +fn row_office(r: &Row) -> OfficeJson { + let uid: Uuid = r.get("uid"); + let parent_office_uid: Option = r.get("parent_office_uid"); + let archived_at: Option = r.get("archived_at"); + OfficeJson { + uid: uid.to_string(), + name: r.get("name"), + siren: r.get("siren"), + address: r.get("address"), + role: r.get("role"), + created_at: r.get::<_, String>("created_at"), + parent_office_uid: parent_office_uid.map(|u| u.to_string()), + archived_at, + permanent_documents: vec![], + } +} + +async fn handle_offices_list(state: &AppState, claims: &AccessClaims) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let ph = db::legacy_demo_migration_placeholder_office_name(); + let sql = "SELECT o.uid, o.name, o.siren, o.address, o.created_at::text AS created_at, m.role, \ + o.parent_office_uid, o.archived_at::text AS archived_at \ + FROM offices o \ + INNER JOIN office_members m ON m.office_uid = o.uid \ + WHERE m.user_uid = $1 AND btrim(o.name) <> $2 AND o.archived_at IS NULL \ + ORDER BY o.name"; + let rows = match client.query(sql, &[&user_uid, &ph]).await { + Ok(r) => r, + Err(e) => { + error!(?e, "offices list"); + return server_error_json("database error"); + } + }; + let list: Vec = rows.iter().map(|r| row_office(r)).collect(); + json_response(StatusCode::OK, serde_json::to_value(&list).unwrap()) +} + +fn bad_claims_sub() -> Response { + json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_sub", "detail": "subject must be a UUID" }), + ) +} + +fn invalid_uuid_json() -> Response { + json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_uuid" }), + ) +} + +async fn user_member_of_office( + client: &deadpool_postgres::Client, + user_uid: Uuid, + office_uid: Uuid, +) -> bool { + match client + .query_one( + "SELECT COUNT(*)::bigint FROM office_members WHERE office_uid = $1 AND user_uid = $2", + &[&office_uid, &user_uid], + ) + .await + { + Ok(row) => row.get::<_, i64>(0) > 0, + Err(e) => { + error!(?e, "member check"); + false + } + } +} + +async fn handle_office_detail( + state: &AppState, + claims: &AccessClaims, + office_uid: Uuid, +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + if !user_member_of_office(&client, user_uid, office_uid).await { + return forbidden_json(); + } + let ph = db::legacy_demo_migration_placeholder_office_name(); + let row = match client + .query_opt( + "SELECT o.uid, o.name, o.siren, o.address, o.created_at::text AS created_at, m.role, \ + o.parent_office_uid, o.archived_at::text AS archived_at \ + FROM offices o \ + INNER JOIN office_members m ON m.office_uid = o.uid AND m.user_uid = $2 \ + WHERE o.uid = $1 AND btrim(o.name) <> $3", + &[&office_uid, &user_uid, &ph], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "office detail"); + return server_error_json("database error"); + } + }; + let Some(row) = row else { + return not_found_json(); + }; + let mut o = row_office(&row); + let doc_rows = match client + .query( + "SELECT d.uid, d.name, d.doc_type, d.category, d.uploaded_by, d.size_label, \ + d.created_at::text AS created_at, d.storage_url, d.mime_type, d.dp_mirror_path, d.workflow_state \ + FROM folder_documents d \ + INNER JOIN folders f ON f.uid = d.folder_uid \ + WHERE f.office_uid = $1 AND d.category = $2 \ + ORDER BY d.created_at DESC", + &[&office_uid, &"permanent"], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "office permanent documents"); + return server_error_json("database error"); + } + }; + o.permanent_documents = doc_rows.iter().map(row_folder_document).collect(); + json_response(StatusCode::OK, serde_json::to_value(&o).unwrap()) +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct OfficeCommentJson { + id: String, + user_uid: String, + content: String, + access_level: String, + created_at: String, +} + +async fn handle_office_comments_list( + state: &AppState, + claims: &AccessClaims, + office_uid: Uuid, +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + if !user_member_of_office(&client, user_uid, office_uid).await { + return forbidden_json(); + } + let rows = match client + .query( + "SELECT uid, user_uid, content, access_level, created_at::text AS created_at \ + FROM office_comments WHERE office_uid = $1 ORDER BY created_at ASC", + &[&office_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "office_comments list"); + return server_error_json("database error"); + } + }; + let list: Vec = rows + .iter() + .map(|r| { + let id: Uuid = r.get(0); + let u: Uuid = r.get(1); + OfficeCommentJson { + id: id.to_string(), + user_uid: u.to_string(), + content: r.get(2), + access_level: r.get(3), + created_at: r.get(4), + } + }) + .collect(); + json_response(StatusCode::OK, serde_json::to_value(&list).unwrap()) +} + +#[derive(Deserialize)] +struct PostOfficeCommentBody { + content: String, + #[serde(default)] + access_level: Option, +} + +async fn handle_office_comments_create( + state: &AppState, + claims: &AccessClaims, + office_uid: Uuid, + body: &[u8], +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let parsed: PostOfficeCommentBody = match serde_json::from_slice(body) { + Ok(b) => b, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_json" }), + ); + } + }; + let content = parsed.content.trim(); + if content.is_empty() { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "empty_content" }), + ); + } + let access = parsed + .access_level + .as_deref() + .map(str::trim) + .filter(|s| !s.is_empty()) + .unwrap_or("internal"); + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + if !user_member_of_office(&client, user_uid, office_uid).await { + return forbidden_json(); + } + let row = match client + .query_one( + "INSERT INTO office_comments (office_uid, user_uid, content, access_level) \ + VALUES ($1, $2, $3, $4) \ + RETURNING uid, user_uid, content, access_level, created_at::text AS created_at", + &[&office_uid, &user_uid, &content, &access], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "office_comments insert"); + return server_error_json("database error"); + } + }; + let id: Uuid = row.get(0); + let u: Uuid = row.get(1); + let c = OfficeCommentJson { + id: id.to_string(), + user_uid: u.to_string(), + content: row.get(2), + access_level: row.get(3), + created_at: row.get(4), + }; + json_response(StatusCode::CREATED, serde_json::to_value(&c).unwrap()) +} + +#[derive(Serialize)] +struct FolderJson { + uid: String, + office_uid: String, + title: String, + status: String, + created_at: String, + updated_at: String, + folder_purpose: String, + #[serde(skip_serializing_if = "Option::is_none")] + operation_type: Option, + #[serde(skip_serializing_if = "Option::is_none")] + dp_archetype: Option, + #[serde(skip_serializing_if = "Option::is_none")] + dp_layout_root: Option, + #[serde(skip_serializing_if = "std::ops::Not::not")] + extends_permanent_record: bool, +} + +fn row_folder(r: &Row) -> FolderJson { + let uid: Uuid = r.get("uid"); + let office_uid: Uuid = r.get("office_uid"); + let dp_archetype: Option = r.get("dp_archetype"); + let dp_layout_root: Option = r.get("dp_layout_root"); + let folder_purpose: String = r + .try_get::<_, String>("folder_purpose") + .unwrap_or_else(|_| "client_operation".to_string()); + let folder_purpose = match folder_purpose.as_str() { + "client_operation" | "dp_structure_demo" => folder_purpose, + _ => "client_operation".to_string(), + }; + let operation_type: Option = r + .try_get::<_, Option>("operation_type") + .ok() + .flatten(); + let extends_permanent_record: bool = r + .try_get::<_, bool>("extends_permanent_record") + .unwrap_or(false); + FolderJson { + uid: uid.to_string(), + office_uid: office_uid.to_string(), + title: r.get("title"), + status: r.get("status"), + created_at: r.get::<_, String>("created_at"), + updated_at: r.get::<_, String>("updated_at"), + folder_purpose, + operation_type, + dp_archetype, + dp_layout_root, + extends_permanent_record, + } +} + +#[derive(Serialize)] +struct FolderDetailJson { + uid: String, + office_uid: String, + title: String, + status: String, + created_at: String, + updated_at: String, + folder_purpose: String, + #[serde(skip_serializing_if = "Option::is_none")] + operation_type: Option, + #[serde(skip_serializing_if = "Option::is_none")] + dp_archetype: Option, + #[serde(skip_serializing_if = "Option::is_none")] + dp_layout_root: Option, + #[serde(skip_serializing_if = "std::ops::Not::not")] + extends_permanent_record: bool, + documents: Vec, +} + +async fn folder_access_office_uid( + client: &deadpool_postgres::Client, + user_uid: Uuid, + folder_uid: Uuid, +) -> Option { + let row = client + .query_opt( + "SELECT f.office_uid FROM folders f \ + INNER JOIN office_members m ON m.office_uid = f.office_uid AND m.user_uid = $2 \ + WHERE f.uid = $1", + &[&folder_uid, &user_uid], + ) + .await + .ok()??; + Some(row.get(0)) +} + +#[derive(Clone, Copy)] +enum NotifyAudience { + /// Rôles autres que `client` (cabinet : member, admin, etc.). + CabinetPeers, + /// Rôle `client` uniquement. + ClientPeers, +} + +async fn member_role_lower( + client: &deadpool_postgres::Client, + office_uid: Uuid, + user_uid: Uuid, +) -> Option { + let row = client + .query_opt( + "SELECT lower(trim(m.role)) FROM office_members m \ + WHERE m.office_uid = $1 AND m.user_uid = $2", + &[&office_uid, &user_uid], + ) + .await + .ok()??; + Some(row.get::<_, String>(0)) +} + +fn audience_for_case_side(actor_role_lower: &str) -> NotifyAudience { + if actor_role_lower == "client" { + NotifyAudience::CabinetPeers + } else { + NotifyAudience::ClientPeers + } +} + +fn audience_for_document_upload(uploaded_by_lower: &str) -> NotifyAudience { + if uploaded_by_lower == "client" { + NotifyAudience::CabinetPeers + } else { + NotifyAudience::ClientPeers + } +} + +async fn insert_notifications_for_peers( + client: &deadpool_postgres::Client, + office_uid: Uuid, + actor_uid: Uuid, + notif_type: &str, + message: &str, + case_uid: Option, + audience: NotifyAudience, +) -> Result { + let sql = match audience { + NotifyAudience::CabinetPeers => { + "INSERT INTO user_notifications (user_uid, office_uid, notif_type, message, case_uid, is_read) \ + SELECT m.user_uid, $1, $2, $3, $4, false \ + FROM office_members m \ + WHERE m.office_uid = $1 AND m.user_uid <> $5 AND lower(trim(m.role)) <> 'client'" + } + NotifyAudience::ClientPeers => { + "INSERT INTO user_notifications (user_uid, office_uid, notif_type, message, case_uid, is_read) \ + SELECT m.user_uid, $1, $2, $3, $4, false \ + FROM office_members m \ + WHERE m.office_uid = $1 AND m.user_uid <> $5 AND lower(trim(m.role)) = 'client'" + } + }; + client + .execute( + sql, + &[&office_uid, ¬if_type, &message, &case_uid, &actor_uid], + ) + .await +} + +async fn handle_folders_list( + state: &AppState, + claims: &AccessClaims, + office_uid: Uuid, +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + if !user_member_of_office(&client, user_uid, office_uid).await { + return forbidden_json(); + } + let rows = match client + .query( + "SELECT uid, office_uid, title, status, created_at::text AS created_at, updated_at::text AS updated_at, \ + folder_purpose, operation_type, dp_archetype, dp_layout_root, extends_permanent_record \ + FROM folders WHERE office_uid = $1 ORDER BY updated_at DESC", + &[&office_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "folders list"); + return server_error_json("database error"); + } + }; + let list: Vec = rows.iter().map(|r| row_folder(r)).collect(); + json_response(StatusCode::OK, serde_json::to_value(&list).unwrap()) +} + +async fn handle_folder_detail( + state: &AppState, + claims: &AccessClaims, + folder_uid: Uuid, +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let row = match client + .query_opt( + "SELECT f.uid, f.office_uid, f.title, f.status, \ + f.created_at::text AS created_at, f.updated_at::text AS updated_at, \ + f.folder_purpose, f.operation_type, f.dp_archetype, f.dp_layout_root, f.extends_permanent_record \ + FROM folders f \ + INNER JOIN office_members m ON m.office_uid = f.office_uid AND m.user_uid = $2 \ + WHERE f.uid = $1", + &[&folder_uid, &user_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "folder detail"); + return server_error_json("database error"); + } + }; + let Some(row) = row else { + return not_found_json(); + }; + let f = row_folder(&row); + let doc_rows = match client + .query( + "SELECT uid, name, doc_type, category, uploaded_by, size_label, created_at::text AS created_at, \ + storage_url, mime_type, dp_mirror_path, workflow_state \ + FROM folder_documents WHERE folder_uid = $1 ORDER BY created_at DESC", + &[&folder_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "folder documents"); + return server_error_json("database error"); + } + }; + let documents: Vec = doc_rows.iter().map(row_folder_document).collect(); + let detail = FolderDetailJson { + uid: f.uid, + office_uid: f.office_uid, + title: f.title, + status: f.status, + created_at: f.created_at, + updated_at: f.updated_at, + folder_purpose: f.folder_purpose.clone(), + operation_type: f.operation_type.clone(), + dp_archetype: f.dp_archetype.clone(), + dp_layout_root: f.dp_layout_root.clone(), + extends_permanent_record: f.extends_permanent_record, + documents, + }; + json_response(StatusCode::OK, serde_json::to_value(&detail).unwrap()) +} + +/// Returns `dp_layout_root` when the folder may use DP layout APIs (structure démo or cession). +async fn resolve_dp_layout_folder_dp_root( + state: &AppState, + claims: &AccessClaims, + folder_uid: Uuid, +) -> Result> { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return Err(bad_claims_sub()), + }; + let Ok(client) = state.pool.get().await else { + return Err(server_error_json("database unavailable")); + }; + let row = match client + .query_opt( + "SELECT office_uid, btrim(COALESCE(folder_purpose, '')), btrim(COALESCE(dp_layout_root, '')), \ + lower(btrim(COALESCE(operation_type, ''))) \ + FROM folders WHERE uid = $1", + &[&folder_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "dp-layout folder lookup"); + return Err(server_error_json("database error")); + } + }; + let Some(row) = row else { + return Err(not_found_json()); + }; + let office_uid: Uuid = row.get(0); + let purpose: String = row.get(1); + let dp_root: String = row.get(2); + let operation_type_lc: String = row.get(3); + let allow_layout = (purpose == "dp_structure_demo") + || (purpose == "client_operation" && operation_type_lc == "cession"); + if !allow_layout || dp_root.is_empty() { + return Err(json_response( + StatusCode::BAD_REQUEST, + json!({ + "error": "folder_not_dp_layout_eligible", + "detail": "dp-layout-entries require dp_layout_root and (dp_structure_demo or client_operation/cession)" + }), + )); + } + if !user_member_of_office(&client, user_uid, office_uid).await { + return Err(forbidden_json()); + } + Ok(dp_root) +} + +/// Single path segment safe under `instances/` (archetype id from seed, e.g. `groupe_sci_is`). +fn dp_instance_archetype_segment_safe(seg: &str) -> bool { + let s = seg.trim(); + !s.is_empty() + && s.len() <= 240 + && !s.contains('/') + && !s.contains("..") + && s + .chars() + .all(|c| c.is_ascii_alphanumeric() || c == '_' || c == '-' || c == '.') +} + +/// `dp_layout_root` for company-level browsing under `data/dossiers-permanents/instances/`. +/// +/// Resolution (first match): any folder of the office with `dp_layout_root` under `instances`; +/// else first non-empty `dp_archetype` → `instances/`; else **`instances`** (full type +/// tree). Any office member may list this shared referential — no `dp_structure_demo`-only gate. +async fn resolve_office_instance_dp_root( + state: &AppState, + claims: &AccessClaims, + office_uid: Uuid, +) -> Result> { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return Err(bad_claims_sub()), + }; + let Ok(client) = state.pool.get().await else { + return Err(server_error_json("database unavailable")); + }; + if !user_member_of_office(&client, user_uid, office_uid).await { + return Err(forbidden_json()); + } + let row = match client + .query_opt( + "SELECT btrim(COALESCE(dp_layout_root, '')) FROM folders \ + WHERE office_uid = $1 \ + AND btrim(COALESCE(dp_layout_root, '')) <> '' \ + AND (btrim(COALESCE(dp_layout_root, '')) LIKE 'instances/%' \ + OR lower(btrim(COALESCE(dp_layout_root, ''))) = 'instances') \ + ORDER BY created_at ASC LIMIT 1", + &[&office_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, office_uid = %office_uid, "office instance-layout lookup"); + return Err(server_error_json("database error")); + } + }; + if let Some(row) = row { + let dp_root: String = row.get(0); + return Ok(dp_root); + } + let row_arch = match client + .query_opt( + "SELECT btrim(COALESCE(dp_archetype, '')) FROM folders \ + WHERE office_uid = $1 AND btrim(COALESCE(dp_archetype, '')) <> '' \ + ORDER BY created_at ASC LIMIT 1", + &[&office_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, office_uid = %office_uid, "office instance-layout archetype lookup"); + return Err(server_error_json("database error")); + } + }; + if let Some(row) = row_arch { + let arch: String = row.get(0); + if dp_instance_archetype_segment_safe(&arch) { + return Ok(format!("instances/{arch}")); + } + } + Ok("instances".to_string()) +} + +async fn handle_office_instance_layout_entries( + state: &AppState, + claims: &AccessClaims, + office_uid: Uuid, + path_query: &str, +) -> Response { + let Some(ref repo_root) = state.config.dp_git_sync.repo_root else { + return json_response( + StatusCode::SERVICE_UNAVAILABLE, + json!({ + "error": "dp_repo_root_unconfigured", + "detail": "Set DOCV_DP_GIT_REPO_ROOT to the monorepo root containing data/dossiers-permanents" + }), + ); + }; + let dp_root = match resolve_office_instance_dp_root(state, claims, office_uid).await { + Ok(d) => d, + Err(r) => return r, + }; + let rel_pb = if path_query.trim().is_empty() { + PathBuf::new() + } else { + match dp_git_sync::safe_relative_under_repo(path_query) { + Some(p) => p, + None => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_path" }), + ); + } + } + }; + let repo_root = repo_root.clone(); + let data_sub = state.config.dp_git_sync.data_subpath.clone(); + let entries = match tokio::task::spawn_blocking(move || { + read_dp_layout_dir_entries(repo_root, data_sub, dp_root, rel_pb) + }) + .await + { + Ok(Ok(e)) => e, + Ok(Err(msg)) => { + if msg == "not_found" { + return not_found_json(); + } + error!(%msg, office_uid = %office_uid, "office instance-layout read"); + return json_response( + StatusCode::INTERNAL_SERVER_ERROR, + json!({ "error": "dp_layout_read_failed", "detail": msg }), + ); + } + Err(e) => { + error!(?e, "office instance-layout spawn_blocking"); + return server_error_json("database error"); + } + }; + json_response( + StatusCode::OK, + serde_json::json!({ + "path": path_query, + "entries": entries, + }), + ) +} + +async fn handle_office_instance_layout_file( + state: &AppState, + claims: &AccessClaims, + office_uid: Uuid, + path_query: &str, +) -> Response { + let Some(ref repo_root) = state.config.dp_git_sync.repo_root else { + return json_response( + StatusCode::SERVICE_UNAVAILABLE, + json!({ + "error": "dp_repo_root_unconfigured", + "detail": "Set DOCV_DP_GIT_REPO_ROOT to the monorepo root containing data/dossiers-permanents" + }), + ); + }; + let dp_root = match resolve_office_instance_dp_root(state, claims, office_uid).await { + Ok(d) => d, + Err(r) => return r, + }; + if path_query.trim().is_empty() { + return json_response( + StatusCode::BAD_REQUEST, + json!({ + "error": "path_required", + "detail": "path must reference a file under the office instance layout root" + }), + ); + } + let rel_pb = match dp_git_sync::safe_relative_under_repo(path_query) { + Some(p) => p, + None => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_path" }), + ); + } + }; + let repo_root = repo_root.clone(); + let data_sub = state.config.dp_git_sync.data_subpath.clone(); + let path_owned = path_query.to_string(); + let text_res = match tokio::task::spawn_blocking(move || { + read_dp_layout_text_file(repo_root, data_sub, dp_root, rel_pb) + }) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "office instance-layout file spawn_blocking"); + return server_error_json("database error"); + } + }; + match text_res { + Ok(text) => json_response( + StatusCode::OK, + json!({ "path": path_owned, "text": text }), + ), + Err(ref msg) if msg == "not_found" => not_found_json(), + Err(ref msg) if msg == "invalid_path" => { + json_response(StatusCode::BAD_REQUEST, json!({ "error": "invalid_path" })) + } + Err(ref msg) if msg == "not_a_file" => { + json_response(StatusCode::BAD_REQUEST, json!({ "error": "not_a_file" })) + } + Err(ref msg) if msg == "file_too_large" => json_response( + StatusCode::PAYLOAD_TOO_LARGE, + json!({ "error": "file_too_large" }), + ), + Err(ref msg) if msg == "file_type_not_allowed" => json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "file_type_not_allowed" }), + ), + Err(msg) => { + error!(%msg, office_uid = %office_uid, "office instance-layout file read"); + json_response( + StatusCode::INTERNAL_SERVER_ERROR, + json!({ "error": "dp_layout_read_failed", "detail": msg }), + ) + } + } +} + +async fn handle_folder_dp_layout_entries( + state: &AppState, + claims: &AccessClaims, + folder_uid: Uuid, + path_query: &str, +) -> Response { + let Some(ref repo_root) = state.config.dp_git_sync.repo_root else { + return json_response( + StatusCode::SERVICE_UNAVAILABLE, + json!({ + "error": "dp_repo_root_unconfigured", + "detail": "Set DOCV_DP_GIT_REPO_ROOT to the monorepo root containing data/dossiers-permanents" + }), + ); + }; + let dp_root = match resolve_dp_layout_folder_dp_root(state, claims, folder_uid).await { + Ok(d) => d, + Err(r) => return r, + }; + let rel_pb = if path_query.trim().is_empty() { + PathBuf::new() + } else { + match dp_git_sync::safe_relative_under_repo(path_query) { + Some(p) => p, + None => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_path" }), + ); + } + } + }; + let repo_root = repo_root.clone(); + let data_sub = state.config.dp_git_sync.data_subpath.clone(); + let entries = match tokio::task::spawn_blocking(move || { + read_dp_layout_dir_entries(repo_root, data_sub, dp_root, rel_pb) + }) + .await + { + Ok(Ok(e)) => e, + Ok(Err(msg)) => { + if msg == "not_found" { + return not_found_json(); + } + error!(%msg, folder_uid = %folder_uid, "dp-layout read"); + return json_response( + StatusCode::INTERNAL_SERVER_ERROR, + json!({ "error": "dp_layout_read_failed", "detail": msg }), + ); + } + Err(e) => { + error!(?e, "dp-layout spawn_blocking"); + return server_error_json("database error"); + } + }; + json_response( + StatusCode::OK, + serde_json::json!({ + "path": path_query, + "entries": entries, + }), + ) +} + +async fn handle_folder_dp_layout_file( + state: &AppState, + claims: &AccessClaims, + folder_uid: Uuid, + path_query: &str, +) -> Response { + let Some(ref repo_root) = state.config.dp_git_sync.repo_root else { + return json_response( + StatusCode::SERVICE_UNAVAILABLE, + json!({ + "error": "dp_repo_root_unconfigured", + "detail": "Set DOCV_DP_GIT_REPO_ROOT to the monorepo root containing data/dossiers-permanents" + }), + ); + }; + let dp_root = match resolve_dp_layout_folder_dp_root(state, claims, folder_uid).await { + Ok(d) => d, + Err(r) => return r, + }; + if path_query.trim().is_empty() { + return json_response( + StatusCode::BAD_REQUEST, + json!({ + "error": "path_required", + "detail": "query parameter path must reference a file under dp_layout_root" + }), + ); + } + let rel_pb = match dp_git_sync::safe_relative_under_repo(path_query) { + Some(p) => p, + None => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_path" }), + ); + } + }; + let repo_root = repo_root.clone(); + let data_sub = state.config.dp_git_sync.data_subpath.clone(); + let path_owned = path_query.to_string(); + let text_res = match tokio::task::spawn_blocking(move || { + read_dp_layout_text_file(repo_root, data_sub, dp_root, rel_pb) + }) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "dp-layout file spawn_blocking"); + return server_error_json("database error"); + } + }; + match text_res { + Ok(text) => json_response( + StatusCode::OK, + json!({ "path": path_owned, "text": text }), + ), + Err(ref msg) if msg == "not_found" => not_found_json(), + Err(ref msg) if msg == "invalid_path" => { + json_response(StatusCode::BAD_REQUEST, json!({ "error": "invalid_path" })) + } + Err(ref msg) if msg == "not_a_file" => { + json_response(StatusCode::BAD_REQUEST, json!({ "error": "not_a_file" })) + } + Err(ref msg) if msg == "file_too_large" => json_response( + StatusCode::PAYLOAD_TOO_LARGE, + json!({ "error": "file_too_large" }), + ), + Err(ref msg) if msg == "file_type_not_allowed" => json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "file_type_not_allowed" }), + ), + Err(msg) => { + error!(%msg, folder_uid = %folder_uid, "dp-layout file read"); + json_response( + StatusCode::INTERNAL_SERVER_ERROR, + json!({ "error": "dp_layout_read_failed", "detail": msg }), + ) + } + } +} + +#[derive(Deserialize)] +struct PutDpGabaritBody { + text: String, +} + +/// Path relative to the Git repo root for `git add` after a gabarit write. +fn dp_layout_file_repo_relative(data_subpath: &Path, dp_layout_root: &str, rel_file: &Path) -> PathBuf { + let mut p = data_subpath.to_path_buf(); + for seg in dp_layout_root.trim().split('/').filter(|s| !s.is_empty()) { + p.push(seg); + } + p.push(rel_file); + p +} + +async fn handle_put_office_instance_layout_gabarit_file( + state: &AppState, + claims: &AccessClaims, + office_uid: Uuid, + path_query: &str, + body_bytes: &[u8], +) -> Response { + use crate::dp_layout_fs::{dp_layout_gabarit_file_name_allowed, write_dp_layout_gabarit_text_file}; + + let Some(ref repo_root_cfg) = state.config.dp_git_sync.repo_root else { + return json_response( + StatusCode::SERVICE_UNAVAILABLE, + json!({ + "error": "dp_repo_root_unconfigured", + "detail": "Set DOCV_DP_GIT_REPO_ROOT to the monorepo root containing data/dossiers-permanents" + }), + ); + }; + let parsed: PutDpGabaritBody = match serde_json::from_slice(body_bytes) { + Ok(b) => b, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_json", "detail": "expected JSON body { \"text\": \"...\" }" }), + ); + } + }; + const MAX_BYTES: usize = 512 * 1024; + if parsed.text.as_bytes().len() > MAX_BYTES { + return json_response( + StatusCode::PAYLOAD_TOO_LARGE, + json!({ "error": "file_too_large" }), + ); + } + let dp_root = match resolve_office_instance_dp_root(state, claims, office_uid).await { + Ok(d) => d, + Err(r) => return r, + }; + if path_query.trim().is_empty() { + return json_response( + StatusCode::BAD_REQUEST, + json!({ + "error": "path_required", + "detail": "path must reference a gabarit file under the office instance layout root" + }), + ); + } + let rel_pb = match dp_git_sync::safe_relative_under_repo(path_query) { + Some(p) => p, + None => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_path" }), + ); + } + }; + let file_name = rel_pb + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or(""); + if file_name.is_empty() { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "not_a_file" }), + ); + } + if !dp_layout_gabarit_file_name_allowed(file_name) { + return json_response( + StatusCode::BAD_REQUEST, + json!({ + "error": "not_a_gabarit_file", + "detail": "only __GABARIT__.md and *.__TEMPLATE__.md may be edited via this API" + }), + ); + } + + let repo_root = repo_root_cfg.clone(); + let data_sub = state.config.dp_git_sync.data_subpath.clone(); + let dp_owned = dp_root.clone(); + let rel_owned = rel_pb.clone(); + let text_owned = parsed.text; + let path_owned = path_query.to_string(); + let rel_git = dp_layout_file_repo_relative(&data_sub, &dp_root, &rel_pb); + + let _guard = state.dp_git_serial.lock().await; + let write_res = match tokio::task::spawn_blocking(move || { + write_dp_layout_gabarit_text_file(repo_root, data_sub, dp_owned, rel_owned, &text_owned) + }) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "put office instance-layout gabarit spawn_blocking"); + return server_error_json("database error"); + } + }; + match write_res { + Ok(()) => { + if state.config.dp_git_sync.enabled { + dp_git_sync::sync_data_paths(&state.config.dp_git_sync, &[rel_git]); + } + json_response( + StatusCode::OK, + json!({ "path": path_owned, "saved": true }), + ) + } + Err(ref msg) if msg == "not_found" => not_found_json(), + Err(ref msg) if msg == "invalid_path" => { + json_response(StatusCode::BAD_REQUEST, json!({ "error": "invalid_path" })) + } + Err(ref msg) if msg == "not_a_file" => { + json_response(StatusCode::BAD_REQUEST, json!({ "error": "not_a_file" })) + } + Err(ref msg) if msg == "not_a_gabarit_file" => { + json_response(StatusCode::BAD_REQUEST, json!({ "error": "not_a_gabarit_file" })) + } + Err(ref msg) if msg == "file_too_large" => json_response( + StatusCode::PAYLOAD_TOO_LARGE, + json!({ "error": "file_too_large" }), + ), + Err(msg) => { + error!(%msg, office_uid = %office_uid, "put office instance-layout gabarit write"); + json_response( + StatusCode::INTERNAL_SERVER_ERROR, + json!({ "error": "dp_layout_write_failed", "detail": msg }), + ) + } + } +} + +async fn handle_put_folder_dp_layout_gabarit_file( + state: &AppState, + claims: &AccessClaims, + folder_uid: Uuid, + path_query: &str, + body_bytes: &[u8], +) -> Response { + use crate::dp_layout_fs::{dp_layout_gabarit_file_name_allowed, write_dp_layout_gabarit_text_file}; + + let Some(ref repo_root_cfg) = state.config.dp_git_sync.repo_root else { + return json_response( + StatusCode::SERVICE_UNAVAILABLE, + json!({ + "error": "dp_repo_root_unconfigured", + "detail": "Set DOCV_DP_GIT_REPO_ROOT to the monorepo root containing data/dossiers-permanents" + }), + ); + }; + let parsed: PutDpGabaritBody = match serde_json::from_slice(body_bytes) { + Ok(b) => b, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_json", "detail": "expected JSON body { \"text\": \"...\" }" }), + ); + } + }; + const MAX_BYTES: usize = 512 * 1024; + if parsed.text.as_bytes().len() > MAX_BYTES { + return json_response( + StatusCode::PAYLOAD_TOO_LARGE, + json!({ "error": "file_too_large" }), + ); + } + let dp_root = match resolve_dp_layout_folder_dp_root(state, claims, folder_uid).await { + Ok(d) => d, + Err(r) => return r, + }; + if path_query.trim().is_empty() { + return json_response( + StatusCode::BAD_REQUEST, + json!({ + "error": "path_required", + "detail": "query parameter path must reference a gabarit file under dp_layout_root" + }), + ); + } + let rel_pb = match dp_git_sync::safe_relative_under_repo(path_query) { + Some(p) => p, + None => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_path" }), + ); + } + }; + let file_name = rel_pb + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or(""); + if file_name.is_empty() { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "not_a_file" }), + ); + } + if !dp_layout_gabarit_file_name_allowed(file_name) { + return json_response( + StatusCode::BAD_REQUEST, + json!({ + "error": "not_a_gabarit_file", + "detail": "only __GABARIT__.md and *.__TEMPLATE__.md may be edited via this API" + }), + ); + } + + let repo_root = repo_root_cfg.clone(); + let data_sub = state.config.dp_git_sync.data_subpath.clone(); + let dp_owned = dp_root.clone(); + let rel_owned = rel_pb.clone(); + let text_owned = parsed.text; + let path_owned = path_query.to_string(); + let rel_git = dp_layout_file_repo_relative(&data_sub, &dp_root, &rel_pb); + + let _guard = state.dp_git_serial.lock().await; + let write_res = match tokio::task::spawn_blocking(move || { + write_dp_layout_gabarit_text_file(repo_root, data_sub, dp_owned, rel_owned, &text_owned) + }) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "put folder dp-layout gabarit spawn_blocking"); + return server_error_json("database error"); + } + }; + match write_res { + Ok(()) => { + if state.config.dp_git_sync.enabled { + dp_git_sync::sync_data_paths(&state.config.dp_git_sync, &[rel_git]); + } + json_response( + StatusCode::OK, + json!({ "path": path_owned, "saved": true }), + ) + } + Err(ref msg) if msg == "not_found" => not_found_json(), + Err(ref msg) if msg == "invalid_path" => { + json_response(StatusCode::BAD_REQUEST, json!({ "error": "invalid_path" })) + } + Err(ref msg) if msg == "not_a_file" => { + json_response(StatusCode::BAD_REQUEST, json!({ "error": "not_a_file" })) + } + Err(ref msg) if msg == "not_a_gabarit_file" => { + json_response(StatusCode::BAD_REQUEST, json!({ "error": "not_a_gabarit_file" })) + } + Err(ref msg) if msg == "file_too_large" => json_response( + StatusCode::PAYLOAD_TOO_LARGE, + json!({ "error": "file_too_large" }), + ), + Err(msg) => { + error!(%msg, folder_uid = %folder_uid, "put folder dp-layout gabarit write"); + json_response( + StatusCode::INTERNAL_SERVER_ERROR, + json!({ "error": "dp_layout_write_failed", "detail": msg }), + ) + } + } +} + +#[derive(Deserialize)] +struct CreateFolderBody { + office_uid: Uuid, + title: String, + #[serde(default)] + operation_type: Option, +} + +async fn handle_create_folder( + state: &AppState, + claims: &AccessClaims, + body: &[u8], +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let parsed: CreateFolderBody = match serde_json::from_slice(body) { + Ok(b) => b, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_json" }), + ); + } + }; + let title = parsed.title.trim(); + if title.is_empty() { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "title_required" }), + ); + } + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + if !user_member_of_office(&client, user_uid, parsed.office_uid).await { + return forbidden_json(); + } + let op_store: Option = parsed + .operation_type + .as_ref() + .map(|s| s.trim()) + .filter(|s| !s.is_empty()) + .map(|s| s.chars().take(500).collect::()); + let row = if db::folders_has_folder_type_uid_column(client.deref()).await { + let ft_uid = match db::ensure_folder_type_uid_for_api_create(client.deref(), parsed.office_uid).await + { + Ok(u) => u, + Err(e) => { + error!( + ?e, + office_uid = %parsed.office_uid, + "create folder: IMPL folder_type_uid (ensure + require)" + ); + return server_error_json("database error"); + } + }; + match client + .query_one( + "INSERT INTO folders (office_uid, folder_type_uid, title, description, status, \ + dp_archetype, dp_layout_root, folder_purpose, operation_type) \ + VALUES ($1, $2, $3, '', 'open', NULL, NULL, 'client_operation', $4) \ + RETURNING uid, office_uid, title, status, created_at::text AS created_at, \ + updated_at::text AS updated_at, folder_purpose, operation_type, dp_archetype, dp_layout_root, extends_permanent_record", + &[&parsed.office_uid, &ft_uid, &title, &op_store], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "folder insert (IMPL folder_type_uid)"); + return server_error_json("database error"); + } + } + } else { + match client + .query_one( + "INSERT INTO folders (office_uid, title, status, operation_type) VALUES ($1, $2, 'open', $3) \ + RETURNING uid, office_uid, title, status, created_at::text AS created_at, updated_at::text AS updated_at, \ + folder_purpose, operation_type, dp_archetype, dp_layout_root, extends_permanent_record", + &[&parsed.office_uid, &title, &op_store], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "folder insert"); + return server_error_json("database error"); + } + } + }; + let new_folder_uid: Uuid = row.get("uid"); + let f = row_folder(&row); + let msg = format!("Nouveau dossier : {title}"); + let actor_role = member_role_lower(&client, parsed.office_uid, user_uid) + .await + .unwrap_or_else(|| "member".into()); + let aud = audience_for_case_side(actor_role.trim()); + if let Err(e) = insert_notifications_for_peers( + &client, + parsed.office_uid, + user_uid, + "case_update", + &msg, + Some(new_folder_uid), + aud, + ) + .await + { + error!(?e, "notify create folder"); + } + json_response(StatusCode::CREATED, serde_json::to_value(&f).unwrap()) +} + +#[derive(Deserialize)] +struct PatchFolderBody { + title: Option, + status: Option, + #[serde(default)] + operation_type: Option, + #[serde(default)] + extends_permanent_record: Option, +} + +async fn handle_patch_folder( + state: &AppState, + claims: &AccessClaims, + folder_uid: Uuid, + body: &[u8], +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let parsed: PatchFolderBody = match serde_json::from_slice(body) { + Ok(b) => b, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_json" }), + ); + } + }; + if parsed.title.is_none() + && parsed.status.is_none() + && parsed.operation_type.is_none() + && parsed.extends_permanent_record.is_none() + { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "no_fields" }), + ); + } + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let Some(office_uid) = folder_access_office_uid(&client, user_uid, folder_uid).await else { + return not_found_json(); + }; + let prev = match client + .query_one( + "SELECT title, status, operation_type, extends_permanent_record FROM folders WHERE uid = $1", + &[&folder_uid], + ) + .await + { + Ok(r) => r, + Err(_) => return not_found_json(), + }; + let prev_title: String = prev.get(0); + let prev_status: String = prev.get(1); + let prev_op: Option = prev.get(2); + let prev_ext: bool = prev.get(3); + + let new_title = parsed + .title + .as_ref() + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty()); + let new_status = parsed.status.as_ref().map(|s| s.trim().to_string()); + + if let Some(ref s) = new_status { + let lower = s.to_lowercase(); + if lower != "open" && lower != "closed" && lower != "pending" { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_status" }), + ); + } + } + + let title_param: String = new_title.clone().unwrap_or(prev_title.clone()); + let status_param: String = new_status.clone().unwrap_or(prev_status.clone()); + let op_param: Option = if let Some(ref s) = parsed.operation_type { + let t = s.trim(); + if t.is_empty() { + None + } else { + Some(t.chars().take(500).collect()) + } + } else { + prev_op.clone() + }; + + let ext_param: bool = parsed + .extends_permanent_record + .unwrap_or(prev_ext); + + let row = match client + .query_one( + "UPDATE folders SET title = $2, status = $3, operation_type = $4, extends_permanent_record = $5, updated_at = now() \ + WHERE uid = $1 \ + RETURNING uid, office_uid, title, status, created_at::text AS created_at, updated_at::text AS updated_at, \ + folder_purpose, operation_type, dp_archetype, dp_layout_root, extends_permanent_record", + &[&folder_uid, &title_param, &status_param, &op_param, &ext_param], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "folder patch"); + return server_error_json("database error"); + } + }; + + let actor_role = member_role_lower(&client, office_uid, user_uid) + .await + .unwrap_or_else(|| "member".into()); + let aud_case = audience_for_case_side(actor_role.trim()); + + if new_status.is_some() && new_status.as_ref().unwrap().to_lowercase() != prev_status.to_lowercase() + { + let msg = format!("Statut du dossier « {title_param} » mis à jour"); + if let Err(e) = insert_notifications_for_peers( + &client, + office_uid, + user_uid, + "case_update", + &msg, + Some(folder_uid), + aud_case, + ) + .await + { + error!(?e, "notify folder status"); + } + } else if new_title.is_some() && new_title.as_deref() != Some(prev_title.as_str()) { + let msg = format!("Dossier renommé : {title_param}"); + if let Err(e) = insert_notifications_for_peers( + &client, + office_uid, + user_uid, + "case_update", + &msg, + Some(folder_uid), + aud_case, + ) + .await + { + error!(?e, "notify folder title"); + } + } else if parsed.operation_type.is_some() && prev_op != op_param { + let msg = format!("Type d’opération mis à jour : {title_param}"); + if let Err(e) = insert_notifications_for_peers( + &client, + office_uid, + user_uid, + "case_update", + &msg, + Some(folder_uid), + aud_case, + ) + .await + { + error!(?e, "notify folder operation_type"); + } + } + + let f = row_folder(&row); + json_response(StatusCode::OK, serde_json::to_value(&f).unwrap()) +} + +fn binary_upload_disabled_json() -> Response { + json_response( + StatusCode::SERVICE_UNAVAILABLE, + json!({ "error": "binary_upload_disabled", "detail": "set DOCV_FILE_STORAGE_DIR" }), + ) +} + +fn payload_too_large_json() -> Response { + json_response( + StatusCode::PAYLOAD_TOO_LARGE, + json!({ "error": "payload_too_large" }), + ) +} + +fn public_document_file_url(config: &crate::config::Config, doc_uid: &Uuid) -> String { + let base = config.browser_oauth_prefix.trim().trim_end_matches('/'); + if base.is_empty() { + format!("/api/v1/files/{}", doc_uid) + } else { + format!("{}/api/v1/files/{}", base, doc_uid) + } +} + +fn format_upload_size_label(len: usize) -> String { + if len == 0 { + return "0 o".into(); + } + let mut n = len as f64; + let units = ["o", "Ko", "Mo", "Go"]; + let mut i = 0usize; + while n >= 1024.0 && i + 1 < units.len() { + n /= 1024.0; + i += 1; + } + let rounded = if i > 0 && n < 10.0 { + let s = format!("{:.1}", n); + if let Some(stripped) = s.strip_suffix(".0") { + stripped.to_string() + } else { + s + } + } else { + format!("{:.0}", n) + }; + format!("{} {}", rounded, units[i]) +} + +fn header_document_name(hdr: &HeaderMap) -> Option { + let raw = hdr + .get("x-enso-document-name") + .and_then(|v| v.to_str().ok()) + .map(str::trim) + .filter(|s| !s.is_empty())?; + match urlencoding::decode(raw) { + Ok(c) => { + let t = c.trim(); + if t.is_empty() { + None + } else { + Some(t.to_string()) + } + } + Err(_) => Some(raw.to_string()), + } +} + +fn header_content_type_mime(hdr: &HeaderMap) -> String { + hdr.get(hyper::header::CONTENT_TYPE) + .and_then(|v| v.to_str().ok()) + .map(|s| { + s.split(';') + .next() + .unwrap_or(s) + .trim() + .to_lowercase() + }) + .filter(|s| !s.is_empty()) + .unwrap_or_else(|| "application/octet-stream".into()) +} + +async fn remove_stored_document_file_if_any(state: &AppState, doc_uid: &Uuid) { + let Some(ref dir) = state.config.file_storage_dir else { + return; + }; + let path = dir.join(doc_uid.to_string()); + if let Err(e) = tokio::fs::remove_file(&path).await { + tracing::debug!(?e, path = %path.display(), "remove stored file (optional)"); + } +} + +async fn handle_get_folder_document_file( + state: &AppState, + claims: &AccessClaims, + doc_uid: Uuid, +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Some(ref base_dir) = state.config.file_storage_dir else { + return not_found_json(); + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let row = match client + .query_opt( + "SELECT folder_uid, mime_type FROM folder_documents WHERE uid = $1", + &[&doc_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "file meta lookup"); + return server_error_json("database error"); + } + }; + let Some(row) = row else { + return not_found_json(); + }; + let folder_uid: Uuid = row.get(0); + let mime_db: Option = row.get(1); + if folder_access_office_uid(&client, user_uid, folder_uid) + .await + .is_none() + { + return not_found_json(); + } + let path = base_dir.join(doc_uid.to_string()); + let bytes = match tokio::fs::read(&path).await { + Ok(b) => b, + Err(e) => { + error!(?e, path = %path.display(), "read stored file"); + return not_found_json(); + } + }; + let ct = mime_db + .as_ref() + .map(|s| s.trim()) + .filter(|s| !s.is_empty()) + .unwrap_or("application/octet-stream"); + Response::builder() + .status(StatusCode::OK) + .header(hyper::header::CONTENT_TYPE, ct) + .header(hyper::header::CACHE_CONTROL, "private, no-store") + .body(Body::from(bytes)) + .unwrap() +} + +async fn handle_upload_folder_document_binary( + state: &AppState, + claims: &AccessClaims, + folder_uid: Uuid, + hdr: &HeaderMap, + body: &[u8], +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Some(ref base_dir) = state.config.file_storage_dir else { + return binary_upload_disabled_json(); + }; + if body.len() > state.config.upload_max_bytes { + return payload_too_large_json(); + } + let name = match header_document_name(hdr) { + Some(n) => n, + None => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "name_required", "detail": "header X-Enso-Document-Name" }), + ); + } + }; + let name = name.trim(); + if name.is_empty() { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "name_required" }), + ); + } + let kind = hdr + .get("x-enso-document-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("autre") + .trim() + .to_lowercase(); + let allowed_kind = matches!( + kind.as_str(), + "kbis" | "statuts" | "pv_ag" | "pacte_associes" | "contrat" | "facture" | "autre" + ); + if !allowed_kind { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_document_type" }), + ); + } + let cat = hdr + .get("x-enso-category") + .and_then(|v| v.to_str().ok()) + .unwrap_or("dossier") + .trim() + .to_lowercase(); + if cat != "permanent" && cat != "dossier" { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_category" }), + ); + } + let upl = hdr + .get("x-enso-uploaded-by") + .and_then(|v| v.to_str().ok()) + .unwrap_or("client") + .trim() + .to_lowercase(); + if upl != "cabinet" && upl != "client" { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_uploaded_by" }), + ); + } + let mime_type = header_content_type_mime(hdr); + let size_label = format_upload_size_label(body.len()); + + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let Some(office_uid) = folder_access_office_uid(&client, user_uid, folder_uid).await else { + return not_found_json(); + }; + + let dp_mirror_rel = dp_mirror::resolve_dp_mirror_relative(&state.config, hdr, folder_uid, name); + let dp_mirror_db: Option = dp_mirror_rel + .as_ref() + .map(|p| p.to_string_lossy().to_string()); + + let row = match client + .query_one( + "INSERT INTO folder_documents (folder_uid, name, doc_type, category, uploaded_by, size_label, storage_url, mime_type, dp_mirror_path) \ + VALUES ($1, $2, $3, $4, $5, $6, NULL, $7, $8) \ + RETURNING uid, name, doc_type, category, uploaded_by, size_label, created_at::text AS created_at, storage_url, mime_type, dp_mirror_path, workflow_state", + &[ + &folder_uid, + &name, + &kind, + &cat, + &upl, + &size_label, + &mime_type, + &dp_mirror_db, + ], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "folder document insert binary"); + return server_error_json("database error"); + } + }; + let doc_uid: Uuid = row.get("uid"); + + if let Err(e) = tokio::fs::create_dir_all(base_dir).await { + error!(?e, "create upload dir"); + let _ = client + .execute( + "DELETE FROM folder_documents WHERE uid = $1", + &[&doc_uid], + ) + .await; + return server_error_json("storage unavailable"); + } + let fs_path = base_dir.join(doc_uid.to_string()); + if let Err(e) = tokio::fs::write(&fs_path, body).await { + error!(?e, path = %fs_path.display(), "write upload file"); + let _ = client + .execute( + "DELETE FROM folder_documents WHERE uid = $1", + &[&doc_uid], + ) + .await; + return server_error_json("storage write failed"); + } + + let storage_url = public_document_file_url(&state.config, &doc_uid); + if let Err(e) = client + .execute( + "UPDATE folder_documents SET storage_url = $1 WHERE uid = $2", + &[&storage_url, &doc_uid], + ) + .await + { + error!(?e, "folder document set storage_url"); + let _ = tokio::fs::remove_file(&fs_path).await; + let _ = client + .execute( + "DELETE FROM folder_documents WHERE uid = $1", + &[&doc_uid], + ) + .await; + return server_error_json("database error"); + } + + if let (Some(ref mrel), Some(ref repo)) = (&dp_mirror_rel, &state.config.dp_git_sync.repo_root) { + let _dp_git_serial = state.dp_git_serial.lock().await; + let dest = repo + .join(&state.config.dp_git_sync.data_subpath) + .join(mrel); + if let Some(parent) = dest.parent() { + if let Err(e) = tokio::fs::create_dir_all(parent).await { + warn!(?e, path = %parent.display(), "DP mirror mkdir"); + } + } + match tokio::fs::copy(&fs_path, &dest).await { + Ok(_) => { + if state.config.dp_git_sync.enabled { + let rel = state.config.dp_git_sync.data_subpath.join(mrel); + dp_git_sync::sync_data_paths(&state.config.dp_git_sync, &[rel]); + } + } + Err(e) => warn!(?e, path = %dest.display(), "DP mirror copy failed"), + } + } else if state.config.dp_git_sync.enabled && dp_mirror_rel.is_some() { + warn!("DOCV_DP_GIT_SYNC enabled but DOCV_DP_GIT_REPO_ROOT unset; mirror skipped"); + } + + let row_out = match client + .query_one( + "SELECT uid, name, doc_type, category, uploaded_by, size_label, created_at::text AS created_at, storage_url, mime_type, dp_mirror_path, workflow_state \ + FROM folder_documents WHERE uid = $1", + &[&doc_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "reload document after binary upload"); + return server_error_json("database error"); + } + }; + let doc = row_folder_document(&row_out); + + let msg = format!("Nouveau document : {name}"); + let aud_doc = audience_for_document_upload(&upl); + if let Err(e) = insert_notifications_for_peers( + &client, + office_uid, + user_uid, + "new_document", + &msg, + Some(folder_uid), + aud_doc, + ) + .await + { + error!(?e, "notify new document"); + } + + json_response(StatusCode::CREATED, serde_json::to_value(&doc).unwrap()) +} + +#[derive(Deserialize)] +struct CreateDocumentBody { + name: String, + #[serde(rename = "type", default = "default_doc_kind")] + kind: String, + #[serde(default = "default_doc_category")] + category: String, + #[serde(default = "default_doc_uploader")] + uploaded_by: String, + #[serde(default)] + size: Option, + #[serde(default)] + storage_url: Option, + #[serde(default)] + mime_type: Option, +} + +fn default_doc_kind() -> String { + "autre".to_string() +} + +fn default_doc_category() -> String { + "dossier".to_string() +} + +fn default_doc_uploader() -> String { + "client".to_string() +} + +async fn handle_create_folder_document( + state: &AppState, + claims: &AccessClaims, + folder_uid: Uuid, + body: &[u8], +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let parsed: CreateDocumentBody = match serde_json::from_slice(body) { + Ok(b) => b, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_json" }), + ); + } + }; + let name = parsed.name.trim(); + if name.is_empty() { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "name_required" }), + ); + } + let kind = parsed.kind.to_lowercase(); + let allowed_kind = matches!( + kind.as_str(), + "kbis" | "statuts" | "pv_ag" | "pacte_associes" | "contrat" | "facture" | "autre" + ); + if !allowed_kind { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_document_type" }), + ); + } + let cat = parsed.category.to_lowercase(); + if cat != "permanent" && cat != "dossier" { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_category" }), + ); + } + let upl = parsed.uploaded_by.to_lowercase(); + if upl != "cabinet" && upl != "client" { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_uploaded_by" }), + ); + } + let size_label = parsed.size.unwrap_or_else(|| "—".to_string()); + + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let Some(office_uid) = folder_access_office_uid(&client, user_uid, folder_uid).await else { + return not_found_json(); + }; + let row = match client + .query_one( + "INSERT INTO folder_documents (folder_uid, name, doc_type, category, uploaded_by, size_label, storage_url, mime_type, dp_mirror_path) \ + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, NULL) \ + RETURNING uid, name, doc_type, category, uploaded_by, size_label, created_at::text AS created_at, storage_url, mime_type, dp_mirror_path, workflow_state", + &[ + &folder_uid, + &name, + &kind, + &cat, + &upl, + &size_label, + &parsed.storage_url, + &parsed.mime_type, + ], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "folder document insert"); + return server_error_json("database error"); + } + }; + let doc = row_folder_document(&row); + + let msg = format!("Nouveau document : {name}"); + let aud_doc = audience_for_document_upload(&upl); + if let Err(e) = insert_notifications_for_peers( + &client, + office_uid, + user_uid, + "new_document", + &msg, + Some(folder_uid), + aud_doc, + ) + .await + { + error!(?e, "notify new document"); + } + + json_response(StatusCode::CREATED, serde_json::to_value(&doc).unwrap()) +} + +async fn handle_delete_folder_document( + state: &AppState, + claims: &AccessClaims, + folder_uid: Uuid, + doc_uid: Uuid, +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + if folder_access_office_uid(&client, user_uid, folder_uid) + .await + .is_none() + { + return not_found_json(); + } + let n = match client + .execute( + "DELETE FROM folder_documents WHERE uid = $1 AND folder_uid = $2", + &[&doc_uid, &folder_uid], + ) + .await + { + Ok(n) => n, + Err(e) => { + error!(?e, "folder document delete"); + return server_error_json("database error"); + } + }; + if n == 0 { + return not_found_json(); + } + remove_stored_document_file_if_any(state, &doc_uid).await; + json_response(StatusCode::OK, json!({ "ok": true })) +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct OfficeMemberJson { + user_uid: String, + email: String, + name: Option, + role: String, +} + +async fn handle_office_members( + state: &AppState, + claims: &AccessClaims, + office_uid: Uuid, +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + if !user_member_of_office(&client, user_uid, office_uid).await { + return forbidden_json(); + } + let col = if state.config.users_pk_column == "uid" { + "uid" + } else { + "id" + }; + let sql = format!( + "SELECT m.user_uid::text, u.email, u.name, m.role \ + FROM office_members m \ + INNER JOIN users u ON u.{col}::text = m.user_uid::text \ + WHERE m.office_uid = $1 \ + ORDER BY lower(u.email)" + ); + let rows = match client.query(&sql, &[&office_uid]).await { + Ok(r) => r, + Err(e) => { + error!(?e, "office members list"); + return server_error_json("database error"); + } + }; + let list: Vec = rows + .iter() + .map(|r| OfficeMemberJson { + user_uid: r.get(0), + email: r.get(1), + name: r.get(2), + role: r.get(3), + }) + .collect(); + json_response(StatusCode::OK, serde_json::to_value(&list).unwrap()) +} + +#[derive(Deserialize)] +struct CreatePendingDocumentBody { + target_user_uid: Uuid, + office_uid: Uuid, + case_uid: Uuid, + name: String, + description: String, + #[serde(default)] + case_name: Option, + #[serde(default)] + due_date: Option, +} + +async fn handle_create_pending_document( + state: &AppState, + claims: &AccessClaims, + body: &[u8], +) -> Response { + let actor_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let parsed: CreatePendingDocumentBody = match serde_json::from_slice(body) { + Ok(b) => b, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_json" }), + ); + } + }; + let name = parsed.name.trim(); + if name.is_empty() { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "name_required" }), + ); + } + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + if !user_member_of_office(&client, actor_uid, parsed.office_uid).await { + return forbidden_json(); + } + let actor_role = member_role_lower(&client, parsed.office_uid, actor_uid) + .await + .unwrap_or_else(|| "member".into()); + if actor_role.trim() == "client" { + return forbidden_json(); + } + if !user_member_of_office(&client, parsed.target_user_uid, parsed.office_uid).await { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "target_not_in_office" }), + ); + } + let target_role = member_role_lower(&client, parsed.office_uid, parsed.target_user_uid) + .await + .unwrap_or_else(|| "member".into()); + if target_role.trim() != "client" { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "target_must_be_client_role" }), + ); + } + let folder_office = match client + .query_opt( + "SELECT office_uid FROM folders WHERE uid = $1", + &[&parsed.case_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "pending folder lookup"); + return server_error_json("database error"); + } + }; + let Some(fo) = folder_office else { + return not_found_json(); + }; + let fo_uid: Uuid = fo.get(0); + if fo_uid != parsed.office_uid { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "case_office_mismatch" }), + ); + } + let case_name = parsed + .case_name + .clone() + .unwrap_or_else(|| name.to_string()); + let due_cl = parsed.due_date.as_ref().and_then(|s| { + let t = s.trim(); + if t.is_empty() { + None + } else { + Some(t.to_string()) + } + }); + let pending_id: Uuid = match client + .query_one( + "INSERT INTO user_pending_documents \ + (user_uid, office_uid, name, description, case_uid, case_name, due_date) \ + VALUES ($1, $2, $3, $4, $5, $6, $7::date) \ + RETURNING id", + &[ + &parsed.target_user_uid, + &parsed.office_uid, + &name, + &parsed.description, + &parsed.case_uid, + &case_name, + &due_cl, + ], + ) + .await + { + Ok(r) => r.get(0), + Err(e) => { + error!(?e, "insert pending document"); + return server_error_json("database error"); + } + }; + let nmsg = format!("Pièce demandée : {name}"); + if let Err(e) = client + .execute( + "INSERT INTO user_notifications (user_uid, office_uid, notif_type, message, case_uid, is_read) \ + VALUES ($1, $2, 'request_document', $3, $4, false)", + &[ + &parsed.target_user_uid, + &parsed.office_uid, + &nmsg, + &parsed.case_uid, + ], + ) + .await + { + error!(?e, "notify pending request"); + } + json_response( + StatusCode::CREATED, + json!({ "id": pending_id.to_string() }), + ) +} + +async fn handle_delete_pending_document( + state: &AppState, + claims: &AccessClaims, + pending_id: Uuid, +) -> Response { + let actor_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let row = match client + .query_opt( + "SELECT user_uid, office_uid FROM user_pending_documents WHERE id = $1", + &[&pending_id], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "pending lookup"); + return server_error_json("database error"); + } + }; + let Some(row) = row else { + return not_found_json(); + }; + let recipient: Uuid = row.get(0); + let office_uid: Uuid = row.get(1); + if recipient != actor_uid { + if !user_member_of_office(&client, actor_uid, office_uid).await { + return forbidden_json(); + } + let role = member_role_lower(&client, office_uid, actor_uid) + .await + .unwrap_or_else(|| "client".into()); + if role.trim() == "client" { + return forbidden_json(); + } + } else if !user_member_of_office(&client, actor_uid, office_uid).await { + return forbidden_json(); + } + let n = match client + .execute( + "DELETE FROM user_pending_documents WHERE id = $1", + &[&pending_id], + ) + .await + { + Ok(n) => n, + Err(e) => { + error!(?e, "pending delete"); + return server_error_json("database error"); + } + }; + if n == 0 { + return not_found_json(); + } + json_response(StatusCode::OK, json!({ "ok": true })) +} + +#[derive(Deserialize)] +struct PostConversationMessageBody { + content: String, +} + +async fn handle_post_conversation_message( + state: &AppState, + claims: &AccessClaims, + conversation_id: Uuid, + body: &[u8], +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let parsed: PostConversationMessageBody = match serde_json::from_slice(body) { + Ok(b) => b, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_json" }), + ); + } + }; + let content = parsed.content.trim(); + if content.is_empty() { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "content_required" }), + ); + } + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let row = match client + .query_opt( + "SELECT office_uid FROM user_conversations WHERE id = $1 AND user_uid = $2", + &[&conversation_id, &user_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "conversation lookup"); + return server_error_json("database error"); + } + }; + let Some(row) = row else { + return not_found_json(); + }; + let office_uid: Uuid = row.get(0); + let role = member_role_lower(&client, office_uid, user_uid) + .await + .unwrap_or_else(|| "member".into()); + let sender_role = if role.trim() == "client" { + "client" + } else { + "cabinet" + }; + let sender_name = claims + .name + .clone() + .filter(|s| !s.trim().is_empty()) + .unwrap_or_else(|| claims.email.clone()); + let ins_row = match client + .query_one( + "INSERT INTO conversation_messages \ + (conversation_id, sender_id, sender_name, sender_role, content, is_read) \ + VALUES ($1, $2, $3, $4, $5, true) \ + RETURNING id::text, created_at::text", + &[ + &conversation_id, + &claims.sub, + &sender_name, + &sender_role, + &content, + ], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "insert message"); + return server_error_json("database error"); + } + }; + let mid: String = ins_row.get(0); + let created_at: String = ins_row.get(1); + if let Err(e) = client + .execute( + "UPDATE user_conversations SET last_message = $1, last_message_at = now(), unread_count = 0 \ + WHERE id = $2", + &[&content, &conversation_id], + ) + .await + { + error!(?e, "update conversation preview"); + } + json_response( + StatusCode::CREATED, + json!({ + "id": mid, + "senderId": claims.sub, + "senderName": sender_name, + "senderRole": sender_role, + "content": content, + "createdAt": created_at, + "isRead": true + }), + ) +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct NotificationJson { + id: String, + #[serde(rename = "type")] + kind: String, + message: String, + company_id: String, + company_name: String, + #[serde(skip_serializing_if = "Option::is_none")] + case_id: Option, + created_at: String, + is_read: bool, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +struct NotificationPatchBody { + #[serde(default)] + is_read: Option, +} + +async fn handle_patch_notification_read( + state: &AppState, + claims: &AccessClaims, + notification_id: Uuid, + body_bytes: &[u8], +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let body: NotificationPatchBody = if body_bytes.is_empty() { + NotificationPatchBody { is_read: None } + } else { + match serde_json::from_slice(body_bytes) { + Ok(b) => b, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_json" }), + ); + } + } + }; + if body.is_read == Some(false) { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_body", "detail": "isRead must be true" }), + ); + } + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let updated = match client + .execute( + "UPDATE user_notifications SET is_read = true \ + WHERE id = $1 AND user_uid = $2", + &[¬ification_id, &user_uid], + ) + .await + { + Ok(n) => n, + Err(e) => { + error!(?e, "patch notification read"); + return server_error_json("database error"); + } + }; + if updated == 0 { + return not_found_json(); + } + json_response( + StatusCode::OK, + json!({ + "id": notification_id.to_string(), + "isRead": true + }), + ) +} + +async fn handle_notifications_list(state: &AppState, claims: &AccessClaims) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let rows = match client + .query( + "SELECT n.id::text, n.notif_type, n.message, n.office_uid::text, o.name, n.case_uid, \ + n.created_at::text, n.is_read \ + FROM user_notifications n \ + INNER JOIN offices o ON o.uid = n.office_uid \ + INNER JOIN office_members m ON m.office_uid = n.office_uid AND m.user_uid = $1 \ + WHERE n.user_uid = $1 ORDER BY n.created_at DESC LIMIT 100", + &[&user_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "notifications list"); + return server_error_json("database error"); + } + }; + let list: Vec = rows + .iter() + .map(|r| { + let case_uid: Option = r.get(5); + NotificationJson { + id: r.get(0), + kind: r.get(1), + message: r.get(2), + company_id: r.get(3), + company_name: r.get(4), + case_id: case_uid.map(|u| Uuid::to_string(&u)), + created_at: r.get(6), + is_read: r.get(7), + } + }) + .collect(); + json_response(StatusCode::OK, serde_json::to_value(&list).unwrap()) +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct PendingDocumentJson { + id: String, + name: String, + description: String, + company_id: String, + company_name: String, + case_id: String, + case_name: String, + requested_at: String, + #[serde(skip_serializing_if = "Option::is_none")] + due_date: Option, +} + +async fn handle_pending_documents_list(state: &AppState, claims: &AccessClaims) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let rows = match client + .query( + "SELECT p.id::text, p.name, p.description, p.office_uid::text, o.name, p.case_uid::text, \ + p.case_name, p.requested_at::text, p.due_date::text \ + FROM user_pending_documents p \ + INNER JOIN offices o ON o.uid = p.office_uid \ + INNER JOIN office_members m ON m.office_uid = p.office_uid AND m.user_uid = $1 \ + WHERE p.user_uid = $1 ORDER BY p.requested_at DESC LIMIT 100", + &[&user_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "pending documents list"); + return server_error_json("database error"); + } + }; + let list: Vec = rows + .iter() + .map(|r| PendingDocumentJson { + id: r.get(0), + name: r.get(1), + description: r.get(2), + company_id: r.get(3), + company_name: r.get(4), + case_id: r.get(5), + case_name: r.get(6), + requested_at: r.get(7), + due_date: r.get(8), + }) + .collect(); + json_response(StatusCode::OK, serde_json::to_value(&list).unwrap()) +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct MessageJson { + id: String, + sender_id: String, + sender_name: String, + sender_role: String, + content: String, + created_at: String, + is_read: bool, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct ConversationJson { + id: String, + contact_name: String, + contact_role: String, + last_message: String, + last_message_at: String, + unread_count: i32, + messages: Vec, +} + +async fn handle_conversations_list(state: &AppState, claims: &AccessClaims) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let conv_rows = match client + .query( + "SELECT c.id, c.contact_name, c.contact_role, c.last_message, c.last_message_at::text, c.unread_count \ + FROM user_conversations c \ + INNER JOIN office_members om ON om.office_uid = c.office_uid AND om.user_uid = $1 \ + WHERE c.user_uid = $1 ORDER BY c.last_message_at DESC LIMIT 50", + &[&user_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "conversations list"); + return server_error_json("database error"); + } + }; + + let msg_rows = match client + .query( + "SELECT m.conversation_id, m.id::text, m.sender_id, m.sender_name, m.sender_role, m.content, \ + m.created_at::text, m.is_read \ + FROM conversation_messages m \ + INNER JOIN user_conversations c ON c.id = m.conversation_id \ + INNER JOIN office_members om ON om.office_uid = c.office_uid AND om.user_uid = $1 \ + WHERE c.user_uid = $1 ORDER BY m.conversation_id, m.created_at ASC", + &[&user_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "conversation messages list"); + return server_error_json("database error"); + } + }; + + let mut by_conv: HashMap> = HashMap::new(); + for r in msg_rows { + let conv_id: Uuid = r.get(0); + let msg = MessageJson { + id: r.get(1), + sender_id: r.get(2), + sender_name: r.get(3), + sender_role: r.get(4), + content: r.get(5), + created_at: r.get(6), + is_read: r.get(7), + }; + by_conv.entry(conv_id).or_default().push(msg); + } + + let list: Vec = conv_rows + .iter() + .map(|row: &Row| { + let cid: Uuid = row.get(0); + let messages = by_conv.remove(&cid).unwrap_or_default(); + ConversationJson { + id: cid.to_string(), + contact_name: row.get(1), + contact_role: row.get(2), + last_message: row.get(3), + last_message_at: row.get(4), + unread_count: row.get::<_, i32>(5), + messages, + } + }) + .collect(); + + json_response(StatusCode::OK, serde_json::to_value(&list).unwrap()) +} + +fn service_unavailable_json(detail: &str) -> Response { + json_response( + StatusCode::SERVICE_UNAVAILABLE, + json!({ "error": "service_unavailable", "detail": detail }), + ) +} + +async fn office_uids_visible_for_downstream( + client: &deadpool_postgres::Client, + user_uid: Uuid, +) -> Result, tokio_postgres::Error> { + let mut set: HashSet = HashSet::new(); + let rows = client + .query( + "SELECT office_uid FROM office_members WHERE user_uid = $1", + &[&user_uid], + ) + .await?; + let mut frontier: Vec = Vec::new(); + for row in rows { + let u: Uuid = row.get(0); + if set.insert(u) { + frontier.push(u); + } + } + while !frontier.is_empty() { + let mut next: Vec = Vec::new(); + for p in frontier { + let children = client + .query( + "SELECT uid FROM offices \ + WHERE parent_office_uid = $1 AND archived_at IS NULL", + &[&p], + ) + .await?; + for row in children { + let uid: Uuid = row.get(0); + if set.insert(uid) { + next.push(uid); + } + } + } + frontier = next; + } + Ok(set.into_iter().collect()) +} + +async fn user_may_reference_document_as_source( + client: &deadpool_postgres::Client, + user_uid: Uuid, + target_doc_uid: Uuid, +) -> Result { + let row = client + .query_opt( + "SELECT f.office_uid FROM folder_documents d \ + INNER JOIN folders f ON f.uid = d.folder_uid WHERE d.uid = $1", + &[&target_doc_uid], + ) + .await?; + let Some(row) = row else { + return Ok(false); + }; + let target_office: Uuid = row.get(0); + let visible = office_uids_visible_for_downstream(client, user_uid).await?; + Ok(visible.contains(&target_office)) +} + +fn workflow_transition_allowed(current: &str, next: &str, is_client: bool) -> bool { + if current == next { + return true; + } + let cabinet = !is_client; + match (current, next) { + ("draft", "requested") => cabinet, + ("requested", "submitted") => is_client, + ("submitted", "validated") => cabinet, + ("submitted", "rejected") => cabinet, + ("draft", "archived") | ("requested", "archived") => cabinet, + ("submitted", "archived") => cabinet, + ("validated", "archived") | ("rejected", "archived") => cabinet, + _ => false, + } +} + +async fn handle_document_search( + state: &AppState, + claims: &AccessClaims, + q_raw: Option<&str>, + limit: i64, +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let visible = match office_uids_visible_for_downstream(&client, user_uid).await { + Ok(v) => v, + Err(e) => { + error!(?e, "document search visible offices"); + return server_error_json("database error"); + } + }; + if visible.is_empty() { + return json_response(StatusCode::OK, json!([])); + } + let needle = q_raw.unwrap_or("").trim(); + let like = if needle.is_empty() { + "%".to_string() + } else { + format!("%{}%", needle.replace('%', "\\%").replace('_', "\\_")) + }; + let lim = limit.clamp(1, 100); + let rows = match client + .query( + "SELECT d.uid::text, d.name, d.doc_type::text, d.folder_uid::text, f.title, f.office_uid::text \ + FROM folder_documents d \ + INNER JOIN folders f ON f.uid = d.folder_uid \ + WHERE f.office_uid = ANY($1) AND d.name ILIKE $2 ESCAPE '\\' \ + ORDER BY d.name ASC LIMIT $3", + &[&&visible[..], &like, &lim], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "document search query"); + return server_error_json("database error"); + } + }; + let list: Vec = rows + .iter() + .map(|r| { + json!({ + "id": r.get::<_, String>(0), + "name": r.get::<_, String>(1), + "type": r.get::<_, String>(2), + "folderUid": r.get::<_, String>(3), + "folderTitle": r.get::<_, String>(4), + "officeUid": r.get::<_, String>(5), + }) + }) + .collect(); + json_response(StatusCode::OK, serde_json::Value::Array(list)) +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct DocumentSourceJson { + id: String, + folder_uid: String, + label: Option, + target_document_uid: String, + #[serde(skip_serializing_if = "Option::is_none")] + target_document_name: Option, + created_by: String, + created_at: String, +} + +async fn handle_folder_document_sources_list( + state: &AppState, + claims: &AccessClaims, + folder_uid: Uuid, +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + if folder_access_office_uid(&client, user_uid, folder_uid) + .await + .is_none() + { + return not_found_json(); + } + let rows = match client + .query( + "SELECT s.uid::text, s.folder_uid::text, s.label, s.target_document_uid::text, \ + s.created_by::text, s.created_at::text, d.name \ + FROM folder_document_sources s \ + LEFT JOIN folder_documents d ON d.uid = s.target_document_uid \ + WHERE s.folder_uid = $1 ORDER BY s.created_at DESC", + &[&folder_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "document sources list"); + return server_error_json("database error"); + } + }; + let list: Vec = rows + .iter() + .map(|r| DocumentSourceJson { + id: r.get(0), + folder_uid: r.get(1), + label: r.get(2), + target_document_uid: r.get(3), + target_document_name: r.get(6), + created_by: r.get(4), + created_at: r.get(5), + }) + .collect(); + json_response(StatusCode::OK, serde_json::to_value(&list).unwrap()) +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct CreateDocumentSourceBody { + target_document_uid: String, + #[serde(default)] + label: Option, +} + +async fn handle_folder_document_sources_create( + state: &AppState, + claims: &AccessClaims, + folder_uid: Uuid, + body: &[u8], +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let parsed: CreateDocumentSourceBody = match serde_json::from_slice(body) { + Ok(b) => b, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_json" }), + ); + } + }; + let target = match Uuid::parse_str(parsed.target_document_uid.trim()) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + if folder_access_office_uid(&client, user_uid, folder_uid) + .await + .is_none() + { + return not_found_json(); + } + let ok = match user_may_reference_document_as_source(&client, user_uid, target).await { + Ok(b) => b, + Err(e) => { + error!(?e, "source target visibility"); + return server_error_json("database error"); + } + }; + if !ok { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "target_document_not_visible" }), + ); + } + let row = match client + .query_one( + "INSERT INTO folder_document_sources (folder_uid, label, target_document_uid, created_by) \ + VALUES ($1, $2, $3, $4) \ + RETURNING uid::text, folder_uid::text, label, target_document_uid::text, created_by::text, created_at::text", + &[ + &folder_uid, + &parsed.label, + &target, + &user_uid, + ], + ) + .await + { + Ok(r) => r, + Err(e) => { + if e.code() == Some(&tokio_postgres::error::SqlState::UNIQUE_VIOLATION) { + return json_response( + StatusCode::CONFLICT, + json!({ "error": "source_already_exists" }), + ); + } + error!(?e, "document source insert"); + return server_error_json("database error"); + } + }; + let target_name = match client + .query_opt("SELECT name FROM folder_documents WHERE uid = $1", &[&target]) + .await + { + Ok(Some(r)) => Some(r.get::<_, String>(0)), + _ => None, + }; + let out = DocumentSourceJson { + id: row.get(0), + folder_uid: row.get(1), + label: row.get(2), + target_document_uid: row.get(3), + target_document_name: target_name, + created_by: row.get(4), + created_at: row.get(5), + }; + json_response(StatusCode::CREATED, serde_json::to_value(&out).unwrap()) +} + +async fn handle_folder_document_sources_delete( + state: &AppState, + claims: &AccessClaims, + folder_uid: Uuid, + source_uid: Uuid, +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + if folder_access_office_uid(&client, user_uid, folder_uid) + .await + .is_none() + { + return not_found_json(); + } + let n = match client + .execute( + "DELETE FROM folder_document_sources WHERE uid = $1 AND folder_uid = $2", + &[&source_uid, &folder_uid], + ) + .await + { + Ok(n) => n, + Err(e) => { + error!(?e, "document source delete"); + return server_error_json("database error"); + } + }; + if n == 0 { + return not_found_json(); + } + json_response(StatusCode::OK, json!({ "ok": true })) +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct FolderNoteJson { + id: String, + folder_uid: String, + content: String, + author_user_uid: String, + created_at: String, + updated_at: String, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct FolderNoteCreateBody { + content: String, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct FolderNotePatchBody { + content: String, +} + +async fn handle_folder_notes_list( + state: &AppState, + claims: &AccessClaims, + folder_uid: Uuid, +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + if folder_access_office_uid(&client, user_uid, folder_uid) + .await + .is_none() + { + return not_found_json(); + } + let rows = match client + .query( + "SELECT uid::text, folder_uid::text, content, author_user_uid::text, \ + created_at::text, updated_at::text FROM folder_notes \ + WHERE folder_uid = $1 ORDER BY created_at DESC", + &[&folder_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "folder notes list"); + return server_error_json("database error"); + } + }; + let list: Vec = rows + .iter() + .map(|r| FolderNoteJson { + id: r.get(0), + folder_uid: r.get(1), + content: r.get(2), + author_user_uid: r.get(3), + created_at: r.get(4), + updated_at: r.get(5), + }) + .collect(); + json_response(StatusCode::OK, serde_json::to_value(&list).unwrap()) +} + +async fn handle_folder_notes_create( + state: &AppState, + claims: &AccessClaims, + folder_uid: Uuid, + body: &[u8], +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let parsed: FolderNoteCreateBody = match serde_json::from_slice(body) { + Ok(b) => b, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_json" }), + ); + } + }; + let content = parsed.content.trim(); + if content.is_empty() { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "content_required" }), + ); + } + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + if folder_access_office_uid(&client, user_uid, folder_uid) + .await + .is_none() + { + return not_found_json(); + } + let row = match client + .query_one( + "INSERT INTO folder_notes (folder_uid, content, author_user_uid) \ + VALUES ($1, $2, $3) \ + RETURNING uid::text, folder_uid::text, content, author_user_uid::text, created_at::text, updated_at::text", + &[&folder_uid, &content, &user_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "folder note insert"); + return server_error_json("database error"); + } + }; + let out = FolderNoteJson { + id: row.get(0), + folder_uid: row.get(1), + content: row.get(2), + author_user_uid: row.get(3), + created_at: row.get(4), + updated_at: row.get(5), + }; + json_response(StatusCode::CREATED, serde_json::to_value(&out).unwrap()) +} + +async fn handle_folder_notes_patch( + state: &AppState, + claims: &AccessClaims, + folder_uid: Uuid, + note_uid: Uuid, + body: &[u8], +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let parsed: FolderNotePatchBody = match serde_json::from_slice(body) { + Ok(b) => b, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_json" }), + ); + } + }; + let content = parsed.content.trim(); + if content.is_empty() { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "content_required" }), + ); + } + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + if folder_access_office_uid(&client, user_uid, folder_uid) + .await + .is_none() + { + return not_found_json(); + } + let n = match client + .execute( + "UPDATE folder_notes SET content = $1, updated_at = now() \ + WHERE uid = $2 AND folder_uid = $3 AND author_user_uid = $4", + &[&content, ¬e_uid, &folder_uid, &user_uid], + ) + .await + { + Ok(n) => n, + Err(e) => { + error!(?e, "folder note patch"); + return server_error_json("database error"); + } + }; + if n == 0 { + return not_found_json(); + } + let row = match client + .query_one( + "SELECT uid::text, folder_uid::text, content, author_user_uid::text, created_at::text, updated_at::text \ + FROM folder_notes WHERE uid = $1", + &[¬e_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "folder note reload"); + return server_error_json("database error"); + } + }; + let out = FolderNoteJson { + id: row.get(0), + folder_uid: row.get(1), + content: row.get(2), + author_user_uid: row.get(3), + created_at: row.get(4), + updated_at: row.get(5), + }; + json_response(StatusCode::OK, serde_json::to_value(&out).unwrap()) +} + +async fn handle_folder_notes_delete( + state: &AppState, + claims: &AccessClaims, + folder_uid: Uuid, + note_uid: Uuid, +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + if folder_access_office_uid(&client, user_uid, folder_uid) + .await + .is_none() + { + return not_found_json(); + } + let n = match client + .execute( + "DELETE FROM folder_notes WHERE uid = $1 AND folder_uid = $2 AND author_user_uid = $3", + &[¬e_uid, &folder_uid, &user_uid], + ) + .await + { + Ok(n) => n, + Err(e) => { + error!(?e, "folder note delete"); + return server_error_json("database error"); + } + }; + if n == 0 { + return not_found_json(); + } + json_response(StatusCode::OK, json!({ "ok": true })) +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct TaskJson { + id: String, + office_uid: String, + folder_uid: Option, + title: String, + description: Option, + status: String, + assignee_user_uid: Option, + due_at: Option, + created_at: String, + updated_at: String, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct TaskCreateBody { + office_uid: String, + #[serde(default)] + folder_uid: Option, + title: String, + #[serde(default)] + description: Option, + #[serde(default)] + status: Option, + #[serde(default)] + assignee_user_uid: Option, + #[serde(default)] + due_at: Option, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct TaskPatchBody { + #[serde(default)] + title: Option, + #[serde(default)] + description: Option>, + #[serde(default)] + status: Option, + #[serde(default)] + assignee_user_uid: Option>, + #[serde(default)] + due_at: Option>, +} + +fn row_task(r: &Row) -> TaskJson { + let folder_uid: Option = r.get("folder_uid"); + let assignee: Option = r.get("assignee_user_uid"); + let due: Option = r.get("due_at"); + TaskJson { + id: r.get::<_, Uuid>("uid").to_string(), + office_uid: r.get::<_, Uuid>("office_uid").to_string(), + folder_uid: folder_uid.map(|u| u.to_string()), + title: r.get("title"), + description: r.get("description"), + status: r.get("status"), + assignee_user_uid: assignee.map(|u| u.to_string()), + due_at: due, + created_at: r.get::<_, String>("created_at"), + updated_at: r.get::<_, String>("updated_at"), + } +} + +async fn handle_tasks_list( + state: &AppState, + claims: &AccessClaims, + office_uid: Option, + folder_uid: Option, + status_f: Option<&str>, +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Some(office_uid) = office_uid else { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "office_uid_required" }), + ); + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + if !user_member_of_office(&client, user_uid, office_uid).await { + return forbidden_json(); + } + let status_s = status_f.map(str::trim).filter(|s| !s.is_empty()); + let rows = match (folder_uid, status_s) { + (None, None) => { + client + .query( + "SELECT uid, office_uid, folder_uid, title, description, status, assignee_user_uid, \ + due_at::text, created_at::text, updated_at::text FROM tasks WHERE office_uid = $1 \ + ORDER BY updated_at DESC LIMIT 200", + &[&office_uid], + ) + .await + } + (Some(fu), None) => { + client + .query( + "SELECT uid, office_uid, folder_uid, title, description, status, assignee_user_uid, \ + due_at::text, created_at::text, updated_at::text FROM tasks WHERE office_uid = $1 AND folder_uid = $2 \ + ORDER BY updated_at DESC LIMIT 200", + &[&office_uid, &fu], + ) + .await + } + (None, Some(st)) => { + client + .query( + "SELECT uid, office_uid, folder_uid, title, description, status, assignee_user_uid, \ + due_at::text, created_at::text, updated_at::text FROM tasks WHERE office_uid = $1 AND status = $2 \ + ORDER BY updated_at DESC LIMIT 200", + &[&office_uid, &st], + ) + .await + } + (Some(fu), Some(st)) => { + client + .query( + "SELECT uid, office_uid, folder_uid, title, description, status, assignee_user_uid, \ + due_at::text, created_at::text, updated_at::text FROM tasks WHERE office_uid = $1 AND folder_uid = $2 AND status = $3 \ + ORDER BY updated_at DESC LIMIT 200", + &[&office_uid, &fu, &st], + ) + .await + } + }; + let rows = match rows { + Ok(r) => r, + Err(e) => { + error!(?e, "tasks list"); + return server_error_json("database error"); + } + }; + let list: Vec = rows.iter().map(row_task).collect(); + json_response(StatusCode::OK, serde_json::to_value(&list).unwrap()) +} + +async fn handle_tasks_create( + state: &AppState, + claims: &AccessClaims, + body: &[u8], +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let parsed: TaskCreateBody = match serde_json::from_slice(body) { + Ok(b) => b, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_json" }), + ); + } + }; + let office_uid = match Uuid::parse_str(parsed.office_uid.trim()) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + let title = parsed.title.trim(); + if title.is_empty() { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "title_required" }), + ); + } + let folder_uid = match parsed.folder_uid.as_deref() { + Some(s) if !s.trim().is_empty() => match Uuid::parse_str(s.trim()) { + Ok(u) => Some(u), + Err(_) => return invalid_uuid_json(), + }, + _ => None, + }; + let status = parsed + .status + .unwrap_or_else(|| "open".to_string()) + .to_lowercase(); + if !matches!( + status.as_str(), + "open" | "in_progress" | "done" | "cancelled" + ) { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_task_status" }), + ); + } + let assignee = match parsed.assignee_user_uid.as_deref() { + Some(s) if !s.trim().is_empty() => match Uuid::parse_str(s.trim()) { + Ok(u) => Some(u), + Err(_) => return invalid_uuid_json(), + }, + _ => None, + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + if !user_member_of_office(&client, user_uid, office_uid).await { + return forbidden_json(); + } + if let Some(fu) = folder_uid { + let fo = match client + .query_opt("SELECT office_uid FROM folders WHERE uid = $1", &[&fu]) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "task create folder lookup"); + return server_error_json("database error"); + } + }; + let Some(fo) = fo else { + return not_found_json(); + }; + let f_office: Uuid = fo.get(0); + if f_office != office_uid { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "folder_office_mismatch" }), + ); + } + if folder_access_office_uid(&client, user_uid, fu) + .await + .is_none() + { + return not_found_json(); + } + } + let row = match client + .query_one( + "INSERT INTO tasks (office_uid, folder_uid, title, description, status, assignee_user_uid, due_at) \ + VALUES ($1, $2, $3, $4, $5, $6, $7) \ + RETURNING uid, office_uid, folder_uid, title, description, status, assignee_user_uid, \ + due_at::text, created_at::text, updated_at::text", + &[ + &office_uid, + &folder_uid, + &title, + &parsed.description, + &status, + &assignee, + &parsed.due_at, + ], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "task insert"); + return server_error_json("database error"); + } + }; + json_response(StatusCode::CREATED, serde_json::to_value(&row_task(&row)).unwrap()) +} + +async fn handle_tasks_patch( + state: &AppState, + claims: &AccessClaims, + task_uid: Uuid, + body: &[u8], +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let parsed: TaskPatchBody = match serde_json::from_slice(body) { + Ok(b) => b, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_json" }), + ); + } + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let row = match client + .query_opt( + "SELECT uid, office_uid, folder_uid, title, description, status, assignee_user_uid, \ + due_at::text, created_at::text, updated_at::text FROM tasks WHERE uid = $1", + &[&task_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "task patch load"); + return server_error_json("database error"); + } + }; + let Some(row) = row else { + return not_found_json(); + }; + let office_uid: Uuid = row.get("office_uid"); + if !user_member_of_office(&client, user_uid, office_uid).await { + return forbidden_json(); + } + let mut title: String = row.get("title"); + let mut description: Option = row.get("description"); + let mut status: String = row.get("status"); + let mut assignee: Option = row.get("assignee_user_uid"); + let mut due_at: Option = row.get("due_at"); + if let Some(t) = parsed.title { + let t = t.trim(); + if t.is_empty() { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "title_required" }), + ); + } + title = t.to_string(); + } + if let Some(d) = parsed.description { + description = d; + } + if let Some(st) = parsed.status { + let st = st.to_lowercase(); + if !matches!( + st.as_str(), + "open" | "in_progress" | "done" | "cancelled" + ) { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_task_status" }), + ); + } + status = st; + } + if let Some(a) = parsed.assignee_user_uid { + assignee = match a { + Some(s) if !s.trim().is_empty() => match Uuid::parse_str(s.trim()) { + Ok(u) => Some(u), + Err(_) => return invalid_uuid_json(), + }, + _ => None, + }; + } + if let Some(d) = parsed.due_at { + due_at = d; + } + let row2 = match client + .query_one( + "UPDATE tasks SET title = $1, description = $2, status = $3, assignee_user_uid = $4, due_at = $5, updated_at = now() \ + WHERE uid = $6 \ + RETURNING uid, office_uid, folder_uid, title, description, status, assignee_user_uid, \ + due_at::text, created_at::text, updated_at::text", + &[&title, &description, &status, &assignee, &due_at, &task_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "task update"); + return server_error_json("database error"); + } + }; + json_response(StatusCode::OK, serde_json::to_value(&row_task(&row2)).unwrap()) +} + +async fn handle_tasks_delete( + state: &AppState, + claims: &AccessClaims, + task_uid: Uuid, +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let row = match client + .query_opt( + "SELECT office_uid FROM tasks WHERE uid = $1", + &[&task_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "task delete lookup"); + return server_error_json("database error"); + } + }; + let Some(row) = row else { + return not_found_json(); + }; + let office_uid: Uuid = row.get(0); + if !user_member_of_office(&client, user_uid, office_uid).await { + return forbidden_json(); + } + let n = match client + .execute("DELETE FROM tasks WHERE uid = $1", &[&task_uid]) + .await + { + Ok(n) => n, + Err(e) => { + error!(?e, "task delete"); + return server_error_json("database error"); + } + }; + if n == 0 { + return not_found_json(); + } + json_response(StatusCode::OK, json!({ "ok": true })) +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct PatchWorkflowBody { + workflow_state: String, +} + +async fn handle_patch_document_workflow( + state: &AppState, + claims: &AccessClaims, + folder_uid: Uuid, + doc_uid: Uuid, + body: &[u8], +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let parsed: PatchWorkflowBody = match serde_json::from_slice(body) { + Ok(b) => b, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_json" }), + ); + } + }; + let next = parsed.workflow_state.trim().to_lowercase(); + let allowed_next = matches!( + next.as_str(), + "draft" + | "requested" + | "submitted" + | "validated" + | "rejected" + | "archived" + ); + if !allowed_next { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_workflow_state" }), + ); + } + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let Some(office_uid) = folder_access_office_uid(&client, user_uid, folder_uid).await else { + return not_found_json(); + }; + let role_lc = member_role_lower(&client, office_uid, user_uid) + .await + .unwrap_or_default(); + let is_client = role_lc == "client"; + let row = match client + .query_opt( + "SELECT workflow_state FROM folder_documents WHERE uid = $1 AND folder_uid = $2", + &[&doc_uid, &folder_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "workflow load"); + return server_error_json("database error"); + } + }; + let Some(row) = row else { + return not_found_json(); + }; + let current: String = row.get(0); + let cur = current.to_lowercase(); + if !workflow_transition_allowed(cur.as_str(), next.as_str(), is_client) { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "workflow_transition_not_allowed" }), + ); + } + let row2 = match client + .query_one( + "UPDATE folder_documents SET workflow_state = $1 WHERE uid = $2 AND folder_uid = $3 \ + RETURNING uid, name, doc_type, category, uploaded_by, size_label, created_at::text AS created_at, \ + storage_url, mime_type, dp_mirror_path, workflow_state", + &[&next, &doc_uid, &folder_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "workflow update"); + return server_error_json("database error"); + } + }; + json_response( + StatusCode::OK, + serde_json::to_value(&row_folder_document(&row2)).unwrap(), + ) +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct AiChatBody { + messages: serde_json::Value, + #[serde(default)] + context: Option, +} + +async fn handle_ai_chat( + state: &AppState, + claims: &AccessClaims, + body: &[u8], +) -> Response { + let _user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let parsed: AiChatBody = match serde_json::from_slice(body) { + Ok(b) => b, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_json" }), + ); + } + }; + let forward = json!({ + "messages": parsed.messages, + "context": parsed.context, + }); + let s = forward.to_string(); + if s.len() > state.config.ai_max_input_chars { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "prompt_too_large" }), + ); + } + match post_ai_json(&state.config, &forward).await { + Ok(v) => json_response(StatusCode::OK, v), + Err(AiForwardError::NotConfigured) => { + service_unavailable_json("IA service is not configured") + } + Err(AiForwardError::Request(msg)) => { + error!(%msg, "ai chat forward"); + service_unavailable_json("IA request failed") + } + Err(AiForwardError::UpstreamStatus(c)) => { + json_response( + StatusCode::BAD_GATEWAY, + json!({ "error": "ai_upstream_error", "status": c }), + ) + } + } +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct AiAssistBody { + prompt: String, + #[serde(default)] + mode: Option, +} + +async fn handle_ai_document_assist( + state: &AppState, + claims: &AccessClaims, + doc_uid: Uuid, + body: &[u8], +) -> Response { + let user_uid = match claims.sub.parse::() { + Ok(u) => u, + Err(_) => return bad_claims_sub(), + }; + let parsed: AiAssistBody = match serde_json::from_slice(body) { + Ok(b) => b, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_json" }), + ); + } + }; + let prompt = parsed.prompt.trim(); + if prompt.is_empty() { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "prompt_required" }), + ); + } + let mode = parsed.mode.unwrap_or_else(|| "summary".to_string()); + let Ok(client) = state.pool.get().await else { + return server_error_json("database unavailable"); + }; + let row = match client + .query_opt( + "SELECT folder_uid, name, mime_type FROM folder_documents WHERE uid = $1", + &[&doc_uid], + ) + .await + { + Ok(r) => r, + Err(e) => { + error!(?e, "ai assist doc lookup"); + return server_error_json("database error"); + } + }; + let Some(row) = row else { + return not_found_json(); + }; + let folder_uid: Uuid = row.get(0); + let doc_name: String = row.get(1); + let mime: Option = row.get(2); + if folder_access_office_uid(&client, user_uid, folder_uid) + .await + .is_none() + { + return not_found_json(); + } + let mut excerpt = String::new(); + if let Some(ref base_dir) = state.config.file_storage_dir { + let path = base_dir.join(doc_uid.to_string()); + if let Ok(bytes) = tokio::fs::read(&path).await { + let max = 24_000usize; + let slice = if bytes.len() > max { + &bytes[..max] + } else { + bytes.as_slice() + }; + excerpt = String::from_utf8_lossy(slice).to_string(); + } + } + let forward = json!({ + "mode": mode, + "documentUid": doc_uid.to_string(), + "documentName": doc_name, + "mimeType": mime, + "userPrompt": prompt, + "excerpt": excerpt, + }); + let s = forward.to_string(); + if s.len() > state.config.ai_max_input_chars { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "prompt_too_large" }), + ); + } + match post_ai_json(&state.config, &forward).await { + Ok(v) => json_response(StatusCode::OK, v), + Err(AiForwardError::NotConfigured) => { + service_unavailable_json("IA service is not configured") + } + Err(AiForwardError::Request(msg)) => { + error!(%msg, "ai assist forward"); + service_unavailable_json("IA request failed") + } + Err(AiForwardError::UpstreamStatus(c)) => { + json_response( + StatusCode::BAD_GATEWAY, + json!({ "error": "ai_upstream_error", "status": c }), + ) + } + } +} + +/// Handle `path` starting with `/api/v1` (full request URI path). +pub async fn handle(state: Arc, req: Request, path: &str) -> Response { + let (parts, body) = req.into_parts(); + let hdr = &parts.headers; + let method = parts.method.clone(); + let uri = parts.uri; + + let body_bytes = match to_bytes(body).await { + Ok(b) => b, + Err(e) => { + error!(?e, "api_v1 read body"); + return server_error_json("read body"); + } + }; + + let rest = path.trim_start_matches("/api/v1").trim_start_matches('/'); + let segments: Vec<&str> = rest.split('/').filter(|s| !s.is_empty()).collect(); + + let token = match bearer_token(hdr) { + Some(t) => t, + None => return unauthorized_json(), + }; + let claims = match decode_access_token(&state.config.jwt_secret, &token) { + Ok(c) => c, + Err(()) => return unauthorized_json(), + }; + if claims.iss != "docv-back" { + return unauthorized_json(); + } + + if method == Method::POST { + use super::v1_route::{match_post, PostAction}; + match match_post(&segments) { + Some(PostAction::CreateFolder) => { + return handle_create_folder(&state, &claims, &body_bytes).await; + } + Some(PostAction::UploadFolderDocumentBinary(folder_uid)) => { + return handle_upload_folder_document_binary( + &state, + &claims, + folder_uid, + hdr, + &body_bytes, + ) + .await; + } + Some(PostAction::CreateFolderDocument(folder_uid)) => { + return handle_create_folder_document( + &state, + &claims, + folder_uid, + &body_bytes, + ) + .await; + } + Some(PostAction::CreatePendingDocument) => { + return handle_create_pending_document(&state, &claims, &body_bytes).await; + } + Some(PostAction::PostConversationMessage(cid)) => { + return handle_post_conversation_message(&state, &claims, cid, &body_bytes).await; + } + Some(PostAction::CreateOfficeComment(office_uid)) => { + return handle_office_comments_create(&state, &claims, office_uid, &body_bytes).await; + } + Some(PostAction::CreateDocumentSource(folder_uid)) => { + return handle_folder_document_sources_create( + &state, + &claims, + folder_uid, + &body_bytes, + ) + .await; + } + Some(PostAction::CreateFolderNote(folder_uid)) => { + return handle_folder_notes_create(&state, &claims, folder_uid, &body_bytes).await; + } + Some(PostAction::CreateTask) => { + return handle_tasks_create(&state, &claims, &body_bytes).await; + } + Some(PostAction::AiChat) => { + return handle_ai_chat(&state, &claims, &body_bytes).await; + } + Some(PostAction::AiDocumentAssist(doc_uid)) => { + return handle_ai_document_assist(&state, &claims, doc_uid, &body_bytes).await; + } + None => return not_found_json(), + } + } + + if method == Method::PATCH { + if segments.len() == 2 && segments[0] == "notifications" { + let notification_id = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + return handle_patch_notification_read(&state, &claims, notification_id, &body_bytes) + .await; + } + if segments.len() == 2 && segments[0] == "folders" { + let folder_uid = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + return handle_patch_folder(&state, &claims, folder_uid, &body_bytes).await; + } + if segments.len() == 4 && segments[0] == "folders" && segments[2] == "notes" { + let folder_uid = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + let note_uid = match Uuid::parse_str(segments[3]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + return handle_folder_notes_patch(&state, &claims, folder_uid, note_uid, &body_bytes) + .await; + } + if segments.len() == 2 && segments[0] == "tasks" { + let task_uid = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + return handle_tasks_patch(&state, &claims, task_uid, &body_bytes).await; + } + if segments.len() == 5 + && segments[0] == "folders" + && segments[2] == "documents" + && segments[4] == "workflow" + { + let folder_uid = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + let doc_uid = match Uuid::parse_str(segments[3]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + return handle_patch_document_workflow(&state, &claims, folder_uid, doc_uid, &body_bytes) + .await; + } + return not_found_json(); + } + + if method == Method::PUT { + if segments.len() >= 3 && segments[0] == "offices" && segments[2] == "instance-layout-file" { + let office_uid = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + let q = uri.query().unwrap_or(""); + let map: HashMap = serde_urlencoded::from_str(q).unwrap_or_default(); + let path_from_query = map.get("path").map(|s| s.as_str()).unwrap_or(""); + let path_merged = + dp_layout_rel_from_uri_segments_and_query(&segments, 3, path_from_query); + return handle_put_office_instance_layout_gabarit_file( + &state, + &claims, + office_uid, + path_merged.as_str(), + body_bytes.as_ref(), + ) + .await; + } + if segments.len() >= 3 && segments[0] == "folders" && segments[2] == "dp-layout-file" { + let folder_uid = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + let q = uri.query().unwrap_or(""); + let map: HashMap = serde_urlencoded::from_str(q).unwrap_or_default(); + let path_from_query = map.get("path").map(|s| s.as_str()).unwrap_or(""); + let path_merged = + dp_layout_rel_from_uri_segments_and_query(&segments, 3, path_from_query); + return handle_put_folder_dp_layout_gabarit_file( + &state, + &claims, + folder_uid, + path_merged.as_str(), + body_bytes.as_ref(), + ) + .await; + } + return not_found_json(); + } + + if method == Method::DELETE { + use super::v1_route::{match_delete, DeleteAction}; + match match_delete(&segments) { + Some(DeleteAction::FolderDocument { folder_uid, doc_uid }) => { + return handle_delete_folder_document(&state, &claims, folder_uid, doc_uid).await; + } + Some(DeleteAction::PendingDocument(pid)) => { + return handle_delete_pending_document(&state, &claims, pid).await; + } + Some(DeleteAction::DocumentSource { + folder_uid, + source_uid, + }) => { + return handle_folder_document_sources_delete( + &state, + &claims, + folder_uid, + source_uid, + ) + .await; + } + Some(DeleteAction::FolderNote { + folder_uid, + note_uid, + }) => { + return handle_folder_notes_delete(&state, &claims, folder_uid, note_uid).await; + } + Some(DeleteAction::Task(task_uid)) => { + return handle_tasks_delete(&state, &claims, task_uid).await; + } + None => return not_found_json(), + } + } + + if method != Method::GET { + return json_response( + StatusCode::METHOD_NOT_ALLOWED, + json!({ "error": "method_not_allowed" }), + ); + } + + if segments.len() == 2 && segments[0] == "files" { + let doc_uid = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + return handle_get_folder_document_file(&state, &claims, doc_uid).await; + } + + if rest == "me" { + return handle_me(&state, &claims).await; + } + if rest == "offices" { + return handle_offices_list(&state, &claims).await; + } + if rest == "notifications" { + return handle_notifications_list(&state, &claims).await; + } + if rest == "pending-documents" { + return handle_pending_documents_list(&state, &claims).await; + } + if rest == "conversations" { + return handle_conversations_list(&state, &claims).await; + } + + if segments.len() == 1 && segments[0] == "document-search" { + let q = uri.query().unwrap_or(""); + let map: HashMap = serde_urlencoded::from_str(q).unwrap_or_default(); + let q_raw = map.get("q").map(|s| s.as_str()); + let limit = map + .get("limit") + .and_then(|s| s.parse::().ok()) + .unwrap_or(40); + return handle_document_search(&state, &claims, q_raw, limit).await; + } + + if segments.len() == 1 && segments[0] == "tasks" { + let q = uri.query().unwrap_or(""); + let map: HashMap = serde_urlencoded::from_str(q).unwrap_or_default(); + let office_uid = map + .get("office_uid") + .and_then(|s| Uuid::parse_str(s.trim()).ok()); + let folder_uid = map + .get("folder_uid") + .and_then(|s| Uuid::parse_str(s.trim()).ok()); + let status_f = map.get("status").map(|s| s.as_str()); + return handle_tasks_list(&state, &claims, office_uid, folder_uid, status_f).await; + } + + if segments.len() == 3 && segments[0] == "folders" && segments[2] == "document-sources" { + let folder_uid = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + return handle_folder_document_sources_list(&state, &claims, folder_uid).await; + } + + if segments.len() == 3 && segments[0] == "folders" && segments[2] == "notes" { + let folder_uid = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + return handle_folder_notes_list(&state, &claims, folder_uid).await; + } + + if segments.len() == 3 && segments[0] == "offices" && segments[2] == "members" { + let uid = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + return handle_office_members(&state, &claims, uid).await; + } + if segments.len() == 3 && segments[0] == "offices" && segments[2] == "comments" { + let office_uid = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + return handle_office_comments_list(&state, &claims, office_uid).await; + } + if segments.len() >= 3 && segments[0] == "offices" && segments[2] == "instance-layout-entries" { + let office_uid = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + let q = uri.query().unwrap_or(""); + let map: HashMap = serde_urlencoded::from_str(q).unwrap_or_default(); + let path_from_query = map.get("path").map(|s| s.as_str()).unwrap_or(""); + let path_merged = + dp_layout_rel_from_uri_segments_and_query(&segments, 3, path_from_query); + return handle_office_instance_layout_entries(&state, &claims, office_uid, path_merged.as_str()) + .await; + } + if segments.len() >= 3 && segments[0] == "offices" && segments[2] == "instance-layout-file" { + let office_uid = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + let q = uri.query().unwrap_or(""); + let map: HashMap = serde_urlencoded::from_str(q).unwrap_or_default(); + let path_from_query = map.get("path").map(|s| s.as_str()).unwrap_or(""); + let path_merged = + dp_layout_rel_from_uri_segments_and_query(&segments, 3, path_from_query); + return handle_office_instance_layout_file(&state, &claims, office_uid, path_merged.as_str()) + .await; + } + if segments.len() == 2 && segments[0] == "offices" { + let uid = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + return handle_office_detail(&state, &claims, uid).await; + } + if segments.len() >= 3 && segments[0] == "folders" && segments[2] == "dp-layout-entries" { + let folder_uid = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + let q = uri.query().unwrap_or(""); + let map: HashMap = serde_urlencoded::from_str(q).unwrap_or_default(); + let path_from_query = map.get("path").map(|s| s.as_str()).unwrap_or(""); + let path_merged = + dp_layout_rel_from_uri_segments_and_query(&segments, 3, path_from_query); + return handle_folder_dp_layout_entries(&state, &claims, folder_uid, path_merged.as_str()) + .await; + } + if segments.len() >= 3 && segments[0] == "folders" && segments[2] == "dp-layout-file" { + let folder_uid = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + let q = uri.query().unwrap_or(""); + let map: HashMap = serde_urlencoded::from_str(q).unwrap_or_default(); + let path_from_query = map.get("path").map(|s| s.as_str()).unwrap_or(""); + let path_merged = + dp_layout_rel_from_uri_segments_and_query(&segments, 3, path_from_query); + return handle_folder_dp_layout_file(&state, &claims, folder_uid, path_merged.as_str()) + .await; + } + if segments.len() == 2 && segments[0] == "folders" { + let uid = match Uuid::parse_str(segments[1]) { + Ok(u) => u, + Err(_) => return invalid_uuid_json(), + }; + return handle_folder_detail(&state, &claims, uid).await; + } + if segments.len() == 1 && segments[0] == "folders" { + let q = uri.query().unwrap_or(""); + let map: std::collections::HashMap = + serde_urlencoded::from_str(q).unwrap_or_default(); + let office_s = match map.get("office_uid") { + Some(s) if !s.is_empty() => s.as_str(), + _ => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "office_uid_required" }), + ); + } + }; + let office_uid = match Uuid::parse_str(office_s) { + Ok(u) => u, + Err(_) => { + return json_response( + StatusCode::BAD_REQUEST, + json!({ "error": "invalid_office_uid" }), + ); + } + }; + return handle_folders_list(&state, &claims, office_uid).await; + } + + not_found_json() +} diff --git a/services/docv/docv-back/src/server/mod.rs b/services/docv/docv-back/src/server/mod.rs new file mode 100644 index 0000000..8d5368c --- /dev/null +++ b/services/docv/docv-back/src/server/mod.rs @@ -0,0 +1,568 @@ +//! HTTP (hyper 0.14): health, OAuth2 authorization_code, docv sign-in (users en BDD docv). + +mod ai_forward; +mod api_v1; +mod v1_route; + +use crate::api_auth::AccessClaims; +use crate::config::Config; +use crate::db::DbPool; +use hyper::body::to_bytes; +use hyper::service::{make_service_fn, service_fn}; +use hyper::{Body, Method, Request, Response, Server, StatusCode}; +use jsonwebtoken::{decode, encode, DecodingKey, EncodingKey, Header, Validation}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::convert::Infallible; +use std::net::SocketAddr; +use std::sync::{Arc, Mutex}; +use std::time::{Duration, Instant}; +use tokio::sync::Mutex as AsyncMutex; +use tokio_postgres::Row; +use tracing::{error, warn}; +use uuid::Uuid; + +const COOKIE_SESSION: &str = "docv_oauth_session"; +const CODE_TTL: Duration = Duration::from_secs(300); + +#[derive(Clone)] +pub struct AppState { + pub config: Config, + pub pool: DbPool, + pub codes: Arc>>, + /// Sérialise miroir disque + `git add/commit/push` pour limiter les conflits concurrents. + pub dp_git_serial: Arc>, +} + +#[derive(Clone)] +pub struct OauthCodeEntry { + pub sub: String, + pub email: String, + pub name: Option, + pub expires: Instant, +} + +#[derive(Debug, Serialize, Deserialize)] +struct SessionClaims { + sub: String, + email: String, + name: Option, + exp: usize, +} + +#[derive(Deserialize)] +struct TokenForm { + grant_type: String, + code: String, + redirect_uri: String, + client_id: String, + client_secret: String, +} + +fn unix_now() -> usize { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .map(|d| d.as_secs() as usize) + .unwrap_or(0) +} + +fn session_cookie_value( + secret: &str, + sub: &str, + email: &str, + name: &Option, + ttl_secs: u64, +) -> Result { + let exp = unix_now() + ttl_secs as usize; + let claims = SessionClaims { + sub: sub.to_string(), + email: email.to_string(), + name: name.clone(), + exp, + }; + encode( + &Header::default(), + &claims, + &EncodingKey::from_secret(secret.as_bytes()), + ) +} + +fn read_session(secret: &str, cookie_header: Option<&str>) -> Option { + let raw = extract_cookie(cookie_header?, COOKIE_SESSION)?; + decode::( + &raw, + &DecodingKey::from_secret(secret.as_bytes()), + &Validation::default(), + ) + .ok() + .map(|d| d.claims) +} + +fn extract_cookie(header_val: &str, name: &str) -> Option { + for part in header_val.split(';') { + let part = part.trim(); + if let Some(rest) = part.strip_prefix(name) { + if let Some(v) = rest.strip_prefix('=') { + return Some(v.trim().to_string()); + } + } + } + None +} + +fn redirect(status: StatusCode, loc: &str) -> Response { + Response::builder() + .status(status) + .header(hyper::header::LOCATION, loc) + .body(Body::empty()) + .unwrap() +} + +/// Origin `scheme://host[:port]` without path (no dependency on `url` crate). +fn uri_origin(uri: &str) -> Option { + let u = uri.trim(); + let pos = u.find("://")?; + let rest = &u[pos + 3..]; + let path_i = rest.find('/').unwrap_or(rest.len()); + Some(u[..pos + 3 + path_i].to_string()) +} + +/// `return_url` must share the same origin as at least one registered OAuth `redirect_uri`. +fn sign_out_return_allowed(config: &Config, return_url: &str) -> bool { + let rt = return_url.trim(); + if rt.is_empty() { + return false; + } + config.oauth_redirect_uris.iter().any(|r| { + uri_origin(r) + .map(|o| rt.starts_with(&format!("{}/", o)) || rt == o) + .unwrap_or(false) + }) +} + +fn response_clear_session_redirect(loc: &str) -> Response { + let clear = format!( + "{}=; Path=/; HttpOnly; SameSite=Lax; Max-Age=0", + COOKIE_SESSION + ); + let hv = hyper::header::HeaderValue::from_str(&clear).unwrap_or_else(|_| { + hyper::header::HeaderValue::from_static("docv_oauth_session=; Path=/; Max-Age=0") + }); + let loc_h = hyper::header::HeaderValue::from_str(loc).unwrap_or_else(|_| { + hyper::header::HeaderValue::from_static("/") + }); + Response::builder() + .status(StatusCode::FOUND) + .header(hyper::header::LOCATION, loc_h) + .header(hyper::header::SET_COOKIE, hv) + .body(Body::empty()) + .unwrap() +} + +async fn handle_sign_out_get(state: Arc, req: &Request) -> Response { + let Some(qs) = req.uri().query() else { + return bad_request(); + }; + let map: HashMap = match serde_urlencoded::from_str(qs) { + Ok(m) => m, + Err(_) => return bad_request(), + }; + let return_url = match map.get("return_url") { + Some(s) if !s.is_empty() => s.as_str(), + _ => return bad_request(), + }; + if !sign_out_return_allowed(&state.config, return_url) { + warn!(return_url = %return_url, "sign-out return_url rejected"); + return bad_request(); + } + response_clear_session_redirect(return_url) +} + +fn redirect_with_set_cookie(status: StatusCode, loc: &str, set_cookie: &str) -> Response { + let hv = hyper::header::HeaderValue::from_str(set_cookie).unwrap_or_else(|_| { + hyper::header::HeaderValue::from_static("docv_oauth_session=invalid; Path=/") + }); + Response::builder() + .status(status) + .header(hyper::header::LOCATION, loc) + .header(hyper::header::SET_COOKIE, hv) + .body(Body::empty()) + .unwrap() +} + +fn bad_request() -> Response { + Response::builder() + .status(StatusCode::BAD_REQUEST) + .body(Body::from("bad request")) + .unwrap() +} + +fn unauthorized() -> Response { + Response::builder() + .status(StatusCode::UNAUTHORIZED) + .body(Body::from("unauthorized")) + .unwrap() +} + +fn html_response(html: String) -> Response { + Response::builder() + .status(StatusCode::OK) + .header(hyper::header::CONTENT_TYPE, "text/html; charset=utf-8") + .body(Body::from(html)) + .unwrap() +} + +async fn row_by_email( + client: &deadpool_postgres::Client, + email: &str, + users_pk_column: &str, +) -> Result, tokio_postgres::Error> { + let col = if users_pk_column == "uid" { + "uid" + } else { + "id" + }; + let sql = format!( + "SELECT {}::text AS id, email, password_hash, name FROM users WHERE lower(email) = lower($1)", + col + ); + client.query_opt(&sql, &[&email]).await +} + +fn sign_in_server_error_html(title: &str, detail: &str) -> Response { + let title_esc = html_escape::encode_text(title); + let detail_esc = html_escape::encode_text(detail); + let html = format!( + r#"{title_esc} + +

{title_esc}

{detail_esc}

"# + ); + Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .header(hyper::header::CONTENT_TYPE, "text/html; charset=utf-8") + .body(Body::from(html)) + .unwrap() +} + +async fn handle_authorize(state: Arc, req: &Request) -> Response { + let Some(qs) = req.uri().query() else { + return bad_request(); + }; + let map: HashMap = match serde_urlencoded::from_str(qs) { + Ok(m) => m, + Err(_) => return bad_request(), + }; + let response_type = match map.get("response_type") { + Some(s) => s.as_str(), + None => return bad_request(), + }; + if response_type != "code" { + return bad_request(); + } + let client_id = match map.get("client_id") { + Some(s) => s.as_str(), + None => return bad_request(), + }; + let redirect_uri = match map.get("redirect_uri") { + Some(s) => s.as_str(), + None => return bad_request(), + }; + if !state.config.oauth_client_id_allowed(client_id) { + return bad_request(); + } + if !state.config.redirect_uri_allowed(redirect_uri) { + warn!(uri = %redirect_uri, "redirect_uri not allowed"); + return bad_request(); + } + + let cookie_header = req + .headers() + .get(hyper::header::COOKIE) + .and_then(|v| v.to_str().ok()); + + if read_session(&state.config.jwt_secret, cookie_header).is_none() { + let mut current = format!( + "/oauth/authorize?response_type=code&client_id={}&redirect_uri={}", + urlencoding::encode(client_id), + urlencoding::encode(redirect_uri), + ); + if let Some(st) = map.get("state") { + current.push_str(&format!("&state={}", urlencoding::encode(st))); + } + if let Some(sc) = map.get("scope") { + current.push_str(&format!("&scope={}", urlencoding::encode(sc))); + } + let return_for_browser = state.config.browser_oauth_path(¤t); + let sign_in = state.config.browser_oauth_path("/oauth/sign-in"); + let to = format!( + "{}?return_url={}", + sign_in, + urlencoding::encode(&return_for_browser) + ); + return redirect(StatusCode::FOUND, &to); + } + + let s = read_session(&state.config.jwt_secret, cookie_header).unwrap(); + let code = Uuid::new_v4().to_string(); + { + let mut cmap = state.codes.lock().expect("codes"); + cmap.retain(|_, v| v.expires > Instant::now()); + cmap.insert( + code.clone(), + OauthCodeEntry { + sub: s.sub.clone(), + email: s.email.clone(), + name: s.name.clone(), + expires: Instant::now() + CODE_TTL, + }, + ); + } + + let mut loc = format!( + "{}?code={}", + redirect_uri, + urlencoding::encode(&code) + ); + if let Some(st) = map.get("state") { + loc.push_str(&format!("&state={}", urlencoding::encode(st))); + } + redirect(StatusCode::FOUND, &loc) +} + +async fn handle_sign_in_get(state: Arc, req: &Request) -> Response { + let Some(qs) = req.uri().query() else { + return bad_request(); + }; + let map: HashMap = match serde_urlencoded::from_str(qs) { + Ok(m) => m, + Err(_) => return bad_request(), + }; + let return_url = match map.get("return_url") { + Some(s) => s.as_str(), + None => return bad_request(), + }; + if return_url.is_empty() || !return_url.contains("/oauth/authorize") { + return bad_request(); + } + let ru = html_escape::encode_double_quoted_attribute(return_url); + let cid = crate::branding::client_id_from_authorize_return_url(return_url); + let branding = crate::branding::resolve_branding(&state.config.tenants_json, cid.as_deref()); + let form_action = state.config.browser_oauth_path("/oauth/sign-in"); + let form_action_esc = html_escape::encode_double_quoted_attribute(&form_action); + let html = crate::branding::sign_in_page_html( + ru.as_ref(), + &branding, + form_action_esc.as_ref(), + ); + html_response(html) +} + +async fn handle_sign_in_post(state: Arc, req: Request) -> Response { + let body = match to_bytes(req.into_body()).await { + Ok(b) => b, + Err(_) => return bad_request(), + }; + let map: HashMap = match serde_urlencoded::from_bytes(&body) { + Ok(m) => m, + Err(_) => return bad_request(), + }; + let return_url = match map.get("return_url") { + Some(s) => s.clone(), + None => return bad_request(), + }; + if return_url.is_empty() || !return_url.contains("/oauth/authorize") { + return bad_request(); + } + let email = match map.get("email") { + Some(s) => s.clone(), + None => return bad_request(), + }; + let password = match map.get("password") { + Some(s) => s.clone(), + None => return bad_request(), + }; + + let client = match state.pool.get().await { + Ok(c) => c, + Err(e) => { + error!(?e, "pool"); + return sign_in_server_error_html( + "Connexion indisponible", + "Impossible de joindre la base de données. Réessayez plus tard.", + ); + } + }; + + let row = match row_by_email(&client, &email, &state.config.users_pk_column).await { + Ok(r) => r, + Err(e) => { + error!(?e, "query"); + return sign_in_server_error_html( + "Erreur serveur", + "Une erreur technique est survenue pendant la connexion.", + ); + } + }; + + let Some(row) = row else { + warn!(email = %email, "unknown user"); + return unauthorized(); + }; + + let id: String = row.get("id"); + let hash: String = row.get("password_hash"); + let name: Option = row.get("name"); + + let ok = bcrypt::verify(password.as_bytes(), &hash).unwrap_or(false); + if !ok { + return unauthorized(); + } + + let ttl = state.config.oauth_access_token_ttl_secs; + let token = match session_cookie_value( + &state.config.jwt_secret, + &id, + &email, + &name, + ttl, + ) { + Ok(t) => t, + Err(_) => { + return Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body(Body::empty()) + .unwrap(); + } + }; + + let cookie = format!( + "{}={}; Path=/; HttpOnly; SameSite=Lax; Max-Age={}", + COOKIE_SESSION, + token, + ttl + ); + redirect_with_set_cookie(StatusCode::FOUND, &return_url, &cookie) +} + +async fn handle_token(state: Arc, req: Request) -> Response { + let body = match to_bytes(req.into_body()).await { + Ok(b) => b, + Err(_) => return bad_request(), + }; + let form: TokenForm = match serde_urlencoded::from_bytes(&body) { + Ok(f) => f, + Err(_) => return bad_request(), + }; + if form.grant_type != "authorization_code" { + return bad_request(); + } + if !state.config.oauth_client_id_allowed(&form.client_id) + || form.client_secret != state.config.oauth_client_secret + { + return unauthorized(); + } + if !state.config.redirect_uri_allowed(&form.redirect_uri) { + return bad_request(); + } + + let entry = { + let mut cmap = state.codes.lock().expect("codes"); + cmap.retain(|_, v| v.expires > Instant::now()); + cmap.remove(&form.code) + }; + + let Some(entry) = entry else { + return bad_request(); + }; + + let ttl = state.config.oauth_access_token_ttl_secs as usize; + let exp = unix_now() + ttl; + let claims = AccessClaims { + sub: entry.sub.clone(), + email: entry.email.clone(), + name: entry.name.clone(), + exp, + iss: "docv-back".to_string(), + }; + + let access_token = match encode( + &Header::default(), + &claims, + &EncodingKey::from_secret(state.config.jwt_secret.as_bytes()), + ) { + Ok(t) => t, + Err(_) => { + return Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body(Body::empty()) + .unwrap(); + } + }; + + let body = serde_json::json!({ + "access_token": access_token, + "token_type": "Bearer", + "expires_in": ttl, + }); + Response::builder() + .status(StatusCode::OK) + .header(hyper::header::CONTENT_TYPE, "application/json") + .body(Body::from(body.to_string())) + .unwrap() +} + +async fn router(state: Arc, req: Request) -> Result, Infallible> { + let path = req.uri().path().to_string(); + if path.starts_with("/api/v1") { + return Ok(api_v1::handle(state, req, &path).await); + } + let method = req.method(); + + let res = if path == "/" && method == Method::GET { + Response::builder() + .status(StatusCode::OK) + .body(Body::from("ok")) + .unwrap() + } else if path == "/oauth/authorize" && method == Method::GET { + handle_authorize(state.clone(), &req).await + } else if path == "/oauth/sign-in" && method == Method::GET { + handle_sign_in_get(state.clone(), &req).await + } else if path == "/oauth/sign-in" && method == Method::POST { + handle_sign_in_post(state.clone(), req).await + } else if path == "/oauth/token" && method == Method::POST { + handle_token(state, req).await + } else if path == "/oauth/sign-out" && method == Method::GET { + handle_sign_out_get(state.clone(), &req).await + } else { + Response::builder() + .status(StatusCode::NOT_FOUND) + .body(Body::from("not found")) + .unwrap() + }; + Ok(res) +} + +pub async fn serve(config: Config, pool: DbPool) -> Result<(), Box> { + let state = Arc::new(AppState { + config: config.clone(), + pool, + codes: Arc::new(Mutex::new(HashMap::new())), + dp_git_serial: Arc::new(AsyncMutex::new(())), + }); + + let addr: SocketAddr = format!("{}:{}", config.host, config.port).parse()?; + let make_svc = make_service_fn(move |_| { + let st = state.clone(); + async move { + Ok::<_, Infallible>(service_fn(move |req| { + let st = st.clone(); + async move { router(st, req).await } + })) + } + }); + + let server = Server::bind(&addr).serve(make_svc); + tracing::info!("docv-back listening http://{}", addr); + server.await?; + Ok(()) +} diff --git a/services/docv/docv-back/src/server/v1_route.rs b/services/docv/docv-back/src/server/v1_route.rs new file mode 100644 index 0000000..29f61d0 --- /dev/null +++ b/services/docv/docv-back/src/server/v1_route.rs @@ -0,0 +1,106 @@ +//! Pure routing helpers for `/api/v1/*` (factorisation du dispatch). + +use uuid::Uuid; + +/// Corps `POST` reconnus (segmentation après `/api/v1`). +#[derive(Debug, Clone)] +pub enum PostAction { + CreateFolder, + CreateFolderDocument(Uuid), + /// Raw body = file bytes; headers `X-Enso-*` for metadata. + UploadFolderDocumentBinary(Uuid), + CreatePendingDocument, + PostConversationMessage(Uuid), + CreateOfficeComment(Uuid), + CreateDocumentSource(Uuid), + CreateFolderNote(Uuid), + CreateTask, + AiChat, + AiDocumentAssist(Uuid), +} + +pub fn match_post(segments: &[&str]) -> Option { + match segments.len() { + 1 if segments[0] == "folders" => Some(PostAction::CreateFolder), + 1 if segments[0] == "pending-documents" => Some(PostAction::CreatePendingDocument), + 1 if segments[0] == "tasks" => Some(PostAction::CreateTask), + 2 if segments[0] == "ai" && segments[1] == "chat" => Some(PostAction::AiChat), + 4 if segments[0] == "folders" + && segments[2] == "documents" + && segments[3] == "binary" => + { + Uuid::parse_str(segments[1]) + .ok() + .map(PostAction::UploadFolderDocumentBinary) + } + 3 if segments[0] == "folders" && segments[2] == "document-sources" => { + Uuid::parse_str(segments[1]) + .ok() + .map(PostAction::CreateDocumentSource) + } + 3 if segments[0] == "folders" && segments[2] == "notes" => { + Uuid::parse_str(segments[1]).ok().map(PostAction::CreateFolderNote) + } + 3 if segments[0] == "folders" && segments[2] == "documents" => { + Uuid::parse_str(segments[1]).ok().map(PostAction::CreateFolderDocument) + } + 3 if segments[0] == "conversations" && segments[2] == "messages" => Uuid::parse_str( + segments[1], + ) + .ok() + .map(PostAction::PostConversationMessage), + 3 if segments[0] == "offices" && segments[2] == "comments" => { + Uuid::parse_str(segments[1]).ok().map(PostAction::CreateOfficeComment) + } + 4 if segments[0] == "ai" && segments[1] == "documents" && segments[3] == "assist" => { + Uuid::parse_str(segments[2]).ok().map(PostAction::AiDocumentAssist) + } + _ => None, + } +} + +#[derive(Debug, Clone)] +pub enum DeleteAction { + FolderDocument { folder_uid: Uuid, doc_uid: Uuid }, + PendingDocument(Uuid), + DocumentSource { + folder_uid: Uuid, + source_uid: Uuid, + }, + FolderNote { + folder_uid: Uuid, + note_uid: Uuid, + }, + Task(Uuid), +} + +pub fn match_delete(segments: &[&str]) -> Option { + match segments.len() { + 4 if segments[0] == "folders" && segments[2] == "documents" => { + let folder_uid = Uuid::parse_str(segments[1]).ok()?; + let doc_uid = Uuid::parse_str(segments[3]).ok()?; + Some(DeleteAction::FolderDocument { folder_uid, doc_uid }) + } + 4 if segments[0] == "folders" && segments[2] == "document-sources" => { + let folder_uid = Uuid::parse_str(segments[1]).ok()?; + let source_uid = Uuid::parse_str(segments[3]).ok()?; + Some(DeleteAction::DocumentSource { + folder_uid, + source_uid, + }) + } + 4 if segments[0] == "folders" && segments[2] == "notes" => { + let folder_uid = Uuid::parse_str(segments[1]).ok()?; + let note_uid = Uuid::parse_str(segments[3]).ok()?; + Some(DeleteAction::FolderNote { + folder_uid, + note_uid, + }) + } + 2 if segments[0] == "pending-documents" => { + Uuid::parse_str(segments[1]).ok().map(DeleteAction::PendingDocument) + } + 2 if segments[0] == "tasks" => Uuid::parse_str(segments[1]).ok().map(DeleteAction::Task), + _ => None, + } +} diff --git a/services/docv/docv-back/tenants.default.json b/services/docv/docv-back/tenants.default.json new file mode 100644 index 0000000..e38e5f2 --- /dev/null +++ b/services/docv/docv-back/tenants.default.json @@ -0,0 +1,18 @@ +{ + "default": { + "page_title": "Connexion", + "heading": "Connexion", + "subtitle": "Saisissez vos identifiants pour continuer.", + "primary_color": "#1e3a5f", + "accent_color": "#b45309", + "surface_color": "#f8fafc", + "text_color": "#0f172a", + "submit_label": "Continuer", + "font_family": "system-ui, -apple-system, \"Segoe UI\", sans-serif" + }, + "clients": { + "enso-web": { + "subtitle": "Accédez à votre espace Enso Avocats." + } + } +} diff --git a/services/docv/docv-shared/Cargo.toml b/services/docv/docv-shared/Cargo.toml new file mode 100644 index 0000000..735254f --- /dev/null +++ b/services/docv/docv-shared/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "docv-shared" +version = "0.1.0" +edition = "2021" +description = "Shared crate for docv: validation, format, constants, business rules (natif + WASM)" + +[lints] +workspace = true + +[lib] +crate-type = ["cdylib", "rlib"] + +[dependencies] + +[target.'cfg(target_arch = "wasm32")'.dependencies] +# wasm-bindgen when WASM enabled diff --git a/services/docv/docv-shared/src/constants/mod.rs b/services/docv/docv-shared/src/constants/mod.rs new file mode 100644 index 0000000..e61c68a --- /dev/null +++ b/services/docv/docv-shared/src/constants/mod.rs @@ -0,0 +1,4 @@ +//! Shared constants: error codes, limits, business parameters. + +pub const MAX_EMAIL_LEN: usize = 255; +pub const MIN_PASSWORD_LEN: usize = 8; diff --git a/services/docv/docv-shared/src/format/mod.rs b/services/docv/docv-shared/src/format/mod.rs new file mode 100644 index 0000000..5f6c6d4 --- /dev/null +++ b/services/docv/docv-shared/src/format/mod.rs @@ -0,0 +1 @@ +//! Formatting helpers (dates, amounts). Used by back and optionally by front via WASM. diff --git a/services/docv/docv-shared/src/lib.rs b/services/docv/docv-shared/src/lib.rs new file mode 100644 index 0000000..d65d57c --- /dev/null +++ b/services/docv/docv-shared/src/lib.rs @@ -0,0 +1,7 @@ +//! docv-shared: validation, format, constants, business rules. +//! Consumed by docv-back (natif) and optionally by docv-front via WASM. + +pub mod constants; +pub mod format; +pub mod rules; +pub mod validation; diff --git a/services/docv/docv-shared/src/rules/mod.rs b/services/docv/docv-shared/src/rules/mod.rs new file mode 100644 index 0000000..0c89806 --- /dev/null +++ b/services/docv/docv-shared/src/rules/mod.rs @@ -0,0 +1 @@ +//! Pure business rules (no I/O). Used by back and optionally by front via WASM. diff --git a/services/docv/docv-shared/src/validation/mod.rs b/services/docv/docv-shared/src/validation/mod.rs new file mode 100644 index 0000000..43ba7f5 --- /dev/null +++ b/services/docv/docv-shared/src/validation/mod.rs @@ -0,0 +1,5 @@ +//! Validation helpers (email, password, lengths). Used by back and optionally by front via WASM. + +pub fn is_valid_email(s: &str) -> bool { + !s.is_empty() && s.contains('@') && s.len() <= 255 +} diff --git a/systemd/README.md b/systemd/README.md index b9dbb78..4e800ae 100644 --- a/systemd/README.md +++ b/systemd/README.md @@ -37,3 +37,11 @@ journalctl -u anythingllm -f ``` Requires `docker.service`, user `ncantu` in group `docker`, and paths in `/etc/default/anythingllm` for non-default storage. + +## Git pull des clones projet (utilisateur, pas root) + +Tirage **périodique** des dépôts listés dans `projects/*/conf.json` (voir [`../cron/README.md`](../cron/README.md)) : + +- Unités : [`user/git-pull-project-clones.service.in`](./user/git-pull-project-clones.service.in) + [`user/git-pull-project-clones.timer`](./user/git-pull-project-clones.timer) +- Installation : [`../scripts/install-git-pull-systemd-user.sh`](../scripts/install-git-pull-systemd-user.sh) (écrit dans `~/.config/systemd/user/`, **sans** `sudo`) +- Configuration : [`../cron/config.env`](../cron/config.env)