diff --git a/README.md b/README.md index f11196f77..a4bfb9fe8 100644 --- a/README.md +++ b/README.md @@ -53,9 +53,9 @@ Pangolin is an open-source, identity-based remote access platform built on WireG ## Deployment Options -- **Pangolin Cloud** — Fully managed service - no infrastructure required. -- **Self-Host: Community Edition** — Free, open source, and licensed under AGPL-3. -- **Self-Host: Enterprise Edition** — Licensed under Fossorial Commercial License. Free for personal and hobbyist use, and for businesses making less than \$100K USD gross annual revenue. +- **Pangolin Cloud** - Fully managed service - no infrastructure required. +- **Self-Host: Community Edition** - Free, open source, and licensed under AGPL-3. +- **Self-Host: Enterprise Edition** - Licensed under Fossorial Commercial License. Free for personal and hobbyist use, and for businesses making less than \$100K USD gross annual revenue. ## Key Features diff --git a/bruno/API Keys/Create API Key.bru b/bruno/API Keys/Create API Key.bru deleted file mode 100644 index 009b4b049..000000000 --- a/bruno/API Keys/Create API Key.bru +++ /dev/null @@ -1,17 +0,0 @@ -meta { - name: Create API Key - type: http - seq: 1 -} - -put { - url: http://localhost:3000/api/v1/api-key - body: json - auth: inherit -} - -body:json { - { - "isRoot": true - } -} diff --git a/bruno/API Keys/Delete API Key.bru b/bruno/API Keys/Delete API Key.bru deleted file mode 100644 index 9285f7889..000000000 --- a/bruno/API Keys/Delete API Key.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: Delete API Key - type: http - seq: 2 -} - -delete { - url: http://localhost:3000/api/v1/api-key/dm47aacqxxn3ubj - body: none - auth: inherit -} diff --git a/bruno/API Keys/List API Key Actions.bru b/bruno/API Keys/List API Key Actions.bru deleted file mode 100644 index ae5b721e1..000000000 --- a/bruno/API Keys/List API Key Actions.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: List API Key Actions - type: http - seq: 6 -} - -get { - url: http://localhost:3000/api/v1/api-key/ex0izu2c37fjz9x/actions - body: none - auth: inherit -} diff --git a/bruno/API Keys/List Org API Keys.bru b/bruno/API Keys/List Org API Keys.bru deleted file mode 100644 index 468e964b9..000000000 --- a/bruno/API Keys/List Org API Keys.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: List Org API Keys - type: http - seq: 4 -} - -get { - url: http://localhost:3000/api/v1/org/home-lab/api-keys - body: none - auth: inherit -} diff --git a/bruno/API Keys/List Root API Keys.bru b/bruno/API Keys/List Root API Keys.bru deleted file mode 100644 index 8ef31b68c..000000000 --- a/bruno/API Keys/List Root API Keys.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: List Root API Keys - type: http - seq: 3 -} - -get { - url: http://localhost:3000/api/v1/root/api-keys - body: none - auth: inherit -} diff --git a/bruno/API Keys/Set API Key Actions.bru b/bruno/API Keys/Set API Key Actions.bru deleted file mode 100644 index 54a35c438..000000000 --- a/bruno/API Keys/Set API Key Actions.bru +++ /dev/null @@ -1,17 +0,0 @@ -meta { - name: Set API Key Actions - type: http - seq: 5 -} - -post { - url: http://localhost:3000/api/v1/api-key/ex0izu2c37fjz9x/actions - body: json - auth: inherit -} - -body:json { - { - "actionIds": ["listSites"] - } -} diff --git a/bruno/API Keys/Set API Key Orgs.bru b/bruno/API Keys/Set API Key Orgs.bru deleted file mode 100644 index 3f0676c5b..000000000 --- a/bruno/API Keys/Set API Key Orgs.bru +++ /dev/null @@ -1,17 +0,0 @@ -meta { - name: Set API Key Orgs - type: http - seq: 7 -} - -post { - url: http://localhost:3000/api/v1/api-key/ex0izu2c37fjz9x/orgs - body: json - auth: inherit -} - -body:json { - { - "orgIds": ["home-lab"] - } -} diff --git a/bruno/API Keys/folder.bru b/bruno/API Keys/folder.bru deleted file mode 100644 index bb8cd5c73..000000000 --- a/bruno/API Keys/folder.bru +++ /dev/null @@ -1,3 +0,0 @@ -meta { - name: API Keys -} diff --git a/bruno/Auth/2fa-disable.bru b/bruno/Auth/2fa-disable.bru deleted file mode 100644 index c98539c73..000000000 --- a/bruno/Auth/2fa-disable.bru +++ /dev/null @@ -1,18 +0,0 @@ -meta { - name: 2fa-disable - type: http - seq: 6 -} - -post { - url: http://localhost:3000/api/v1/auth/2fa/disable - body: json - auth: none -} - -body:json { - { - "password": "aaaaa-1A", - "code": "377289" - } -} diff --git a/bruno/Auth/2fa-enable.bru b/bruno/Auth/2fa-enable.bru deleted file mode 100644 index a3a01d177..000000000 --- a/bruno/Auth/2fa-enable.bru +++ /dev/null @@ -1,17 +0,0 @@ -meta { - name: 2fa-enable - type: http - seq: 4 -} - -post { - url: http://localhost:3000/api/v1/auth/2fa/enable - body: json - auth: none -} - -body:json { - { - "code": "374138" - } -} diff --git a/bruno/Auth/2fa-request.bru b/bruno/Auth/2fa-request.bru deleted file mode 100644 index fcf0c9862..000000000 --- a/bruno/Auth/2fa-request.bru +++ /dev/null @@ -1,17 +0,0 @@ -meta { - name: 2fa-request - type: http - seq: 5 -} - -post { - url: http://localhost:3000/api/v1/auth/2fa/request - body: json - auth: none -} - -body:json { - { - "password": "aaaaa-1A" - } -} diff --git a/bruno/Auth/change-password.bru b/bruno/Auth/change-password.bru deleted file mode 100644 index 7d1c707e5..000000000 --- a/bruno/Auth/change-password.bru +++ /dev/null @@ -1,18 +0,0 @@ -meta { - name: change-password - type: http - seq: 9 -} - -post { - url: http://localhost:3000/api/v1/auth/change-password - body: json - auth: none -} - -body:json { - { - "oldPassword": "", - "newPassword": "" - } -} diff --git a/bruno/Auth/login.bru b/bruno/Auth/login.bru deleted file mode 100644 index 3825a2525..000000000 --- a/bruno/Auth/login.bru +++ /dev/null @@ -1,18 +0,0 @@ -meta { - name: login - type: http - seq: 1 -} - -post { - url: http://localhost:3000/api/v1/auth/login - body: json - auth: none -} - -body:json { - { - "email": "admin@fosrl.io", - "password": "Password123!" - } -} diff --git a/bruno/Auth/logout.bru b/bruno/Auth/logout.bru deleted file mode 100644 index 623cd47fe..000000000 --- a/bruno/Auth/logout.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: logout - type: http - seq: 3 -} - -post { - url: http://localhost:4000/api/v1/auth/logout - body: none - auth: none -} diff --git a/bruno/Auth/reset-password-request.bru b/bruno/Auth/reset-password-request.bru deleted file mode 100644 index 29c3b89d1..000000000 --- a/bruno/Auth/reset-password-request.bru +++ /dev/null @@ -1,17 +0,0 @@ -meta { - name: reset-password-request - type: http - seq: 10 -} - -post { - url: http://localhost:3000/api/v1/auth/reset-password/request - body: json - auth: none -} - -body:json { - { - "email": "milo@pangolin.net" - } -} diff --git a/bruno/Auth/reset-password.bru b/bruno/Auth/reset-password.bru deleted file mode 100644 index 8d567b164..000000000 --- a/bruno/Auth/reset-password.bru +++ /dev/null @@ -1,19 +0,0 @@ -meta { - name: reset-password - type: http - seq: 11 -} - -post { - url: http://localhost:3000/api/v1/auth/reset-password - body: json - auth: none -} - -body:json { - { - "token": "3uhsbom72dwdhboctwrtntyd6jrlg4jtf5oaxy4k", - "newPassword": "aaaaa-1A", - "code": "6irqCGR3" - } -} diff --git a/bruno/Auth/signup.bru b/bruno/Auth/signup.bru deleted file mode 100644 index bec59235e..000000000 --- a/bruno/Auth/signup.bru +++ /dev/null @@ -1,18 +0,0 @@ -meta { - name: signup - type: http - seq: 2 -} - -put { - url: http://localhost:3000/api/v1/auth/signup - body: json - auth: none -} - -body:json { - { - "email": "numbat@pangolin.net", - "password": "Password123!" - } -} diff --git a/bruno/Auth/verify-email-request.bru b/bruno/Auth/verify-email-request.bru deleted file mode 100644 index 72189d1b2..000000000 --- a/bruno/Auth/verify-email-request.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: verify-email-request - type: http - seq: 8 -} - -post { - url: http://localhost:3000/api/v1/auth/verify-email/request - body: none - auth: none -} diff --git a/bruno/Auth/verify-email.bru b/bruno/Auth/verify-email.bru deleted file mode 100644 index a06a7108c..000000000 --- a/bruno/Auth/verify-email.bru +++ /dev/null @@ -1,17 +0,0 @@ -meta { - name: verify-email - type: http - seq: 7 -} - -post { - url: http://localhost:3000/api/v1/auth/verify-email - body: json - auth: none -} - -body:json { - { - "code": "50317187" - } -} diff --git a/bruno/Auth/verify-user.bru b/bruno/Auth/verify-user.bru deleted file mode 100644 index 38955449d..000000000 --- a/bruno/Auth/verify-user.bru +++ /dev/null @@ -1,15 +0,0 @@ -meta { - name: verify-user - type: http - seq: 4 -} - -get { - url: http://localhost:3001/api/v1/badger/verify-user?sessionId=mb52273jkb6t3oys2bx6ur5x7rcrkl26c7warg3e - body: none - auth: none -} - -params:query { - sessionId: mb52273jkb6t3oys2bx6ur5x7rcrkl26c7warg3e -} diff --git a/bruno/Clients/createClient.bru b/bruno/Clients/createClient.bru deleted file mode 100644 index 7577bb280..000000000 --- a/bruno/Clients/createClient.bru +++ /dev/null @@ -1,22 +0,0 @@ -meta { - name: createClient - type: http - seq: 1 -} - -put { - url: http://localhost:3000/api/v1/site/1/client - body: json - auth: none -} - -body:json { - { - "siteId": 1, - "name": "test", - "type": "olm", - "subnet": "100.90.129.4/30", - "olmId": "029yzunhx6nh3y5", - "secret": "l0ymp075y3d4rccb25l6sqpgar52k09etunui970qq5gj7x6" - } -} diff --git a/bruno/Clients/pickClientDefaults.bru b/bruno/Clients/pickClientDefaults.bru deleted file mode 100644 index 61509c112..000000000 --- a/bruno/Clients/pickClientDefaults.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: pickClientDefaults - type: http - seq: 2 -} - -get { - url: http://localhost:3000/api/v1/site/1/pick-client-defaults - body: none - auth: none -} diff --git a/bruno/IDP/Create OIDC Provider.bru b/bruno/IDP/Create OIDC Provider.bru deleted file mode 100644 index 23e807cf9..000000000 --- a/bruno/IDP/Create OIDC Provider.bru +++ /dev/null @@ -1,22 +0,0 @@ -meta { - name: Create OIDC Provider - type: http - seq: 1 -} - -put { - url: http://localhost:3000/api/v1/org/home-lab/idp/oidc - body: json - auth: inherit -} - -body:json { - { - "clientId": "JJoSvHCZcxnXT2sn6CObj6a21MuKNRXs3kN5wbys", - "clientSecret": "2SlGL2wOGgMEWLI9yUuMAeFxre7qSNJVnXMzyepdNzH1qlxYnC4lKhhQ6a157YQEkYH3vm40KK4RCqbYiF8QIweuPGagPX3oGxEj2exwutoXFfOhtq4hHybQKoFq01Z3", - "authUrl": "http://localhost:9000/application/o/authorize/", - "tokenUrl": "http://localhost:9000/application/o/token/", - "scopes": ["email", "openid", "profile"], - "userIdentifier": "email" - } -} diff --git a/bruno/IDP/Generate OIDC URL.bru b/bruno/IDP/Generate OIDC URL.bru deleted file mode 100644 index 90443096f..000000000 --- a/bruno/IDP/Generate OIDC URL.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: Generate OIDC URL - type: http - seq: 2 -} - -get { - url: http://localhost:3000/api/v1 - body: none - auth: inherit -} diff --git a/bruno/IDP/folder.bru b/bruno/IDP/folder.bru deleted file mode 100644 index fc1369159..000000000 --- a/bruno/IDP/folder.bru +++ /dev/null @@ -1,3 +0,0 @@ -meta { - name: IDP -} diff --git a/bruno/Internal/Traefik Config.bru b/bruno/Internal/Traefik Config.bru deleted file mode 100644 index 9fc1c1dcb..000000000 --- a/bruno/Internal/Traefik Config.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: Traefik Config - type: http - seq: 1 -} - -get { - url: http://localhost:3001/api/v1/traefik-config - body: none - auth: inherit -} diff --git a/bruno/Internal/folder.bru b/bruno/Internal/folder.bru deleted file mode 100644 index 702931ec4..000000000 --- a/bruno/Internal/folder.bru +++ /dev/null @@ -1,3 +0,0 @@ -meta { - name: Internal -} diff --git a/bruno/Newt/Create Newt.bru b/bruno/Newt/Create Newt.bru deleted file mode 100644 index 56baf89bd..000000000 --- a/bruno/Newt/Create Newt.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: Create Newt - type: http - seq: 2 -} - -get { - url: http://localhost:3000/api/v1/newt - body: none - auth: none -} diff --git a/bruno/Newt/Get Token.bru b/bruno/Newt/Get Token.bru deleted file mode 100644 index 93d91cc5d..000000000 --- a/bruno/Newt/Get Token.bru +++ /dev/null @@ -1,18 +0,0 @@ -meta { - name: Get Token - type: http - seq: 1 -} - -get { - url: http://localhost:3000/api/v1/auth/newt/get-token - body: json - auth: none -} - -body:json { - { - "newtId": "o0d4rdxq3stnz7b", - "secret": "sy7l09fnaesd03iwrfp9m3qf0ryn19g0zf3dqieaazb4k7vk" - } -} diff --git a/bruno/Olm/createOlm.bru b/bruno/Olm/createOlm.bru deleted file mode 100644 index ca755dea8..000000000 --- a/bruno/Olm/createOlm.bru +++ /dev/null @@ -1,15 +0,0 @@ -meta { - name: createOlm - type: http - seq: 1 -} - -put { - url: http://localhost:3000/api/v1/olm - body: none - auth: inherit -} - -settings { - encodeUrl: true -} diff --git a/bruno/Olm/folder.bru b/bruno/Olm/folder.bru deleted file mode 100644 index d245e6d1c..000000000 --- a/bruno/Olm/folder.bru +++ /dev/null @@ -1,8 +0,0 @@ -meta { - name: Olm - seq: 15 -} - -auth { - mode: inherit -} diff --git a/bruno/Orgs/Check Id.bru b/bruno/Orgs/Check Id.bru deleted file mode 100644 index 17b63953c..000000000 --- a/bruno/Orgs/Check Id.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: Check Id - type: http - seq: 2 -} - -get { - url: http://localhost:3000/api/v1/org/checkId - body: none - auth: none -} diff --git a/bruno/Orgs/listOrgs.bru b/bruno/Orgs/listOrgs.bru deleted file mode 100644 index 89c34d0cb..000000000 --- a/bruno/Orgs/listOrgs.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: listOrgs - type: http - seq: 1 -} - -get { - url: - body: none - auth: none -} diff --git a/bruno/Remote Exit Node/createRemoteExitNode.bru b/bruno/Remote Exit Node/createRemoteExitNode.bru deleted file mode 100644 index 1c749a311..000000000 --- a/bruno/Remote Exit Node/createRemoteExitNode.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: createRemoteExitNode - type: http - seq: 1 -} - -put { - url: http://localhost:4000/api/v1/org/org_i21aifypnlyxur2/remote-exit-node - body: none - auth: none -} diff --git a/bruno/Resources/listResourcesByOrg.bru b/bruno/Resources/listResourcesByOrg.bru deleted file mode 100644 index 6efce1b20..000000000 --- a/bruno/Resources/listResourcesByOrg.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: listResourcesByOrg - type: http - seq: 1 -} - -get { - url: - body: none - auth: none -} diff --git a/bruno/Resources/listResourcesBySite.bru b/bruno/Resources/listResourcesBySite.bru deleted file mode 100644 index 81c9cf99b..000000000 --- a/bruno/Resources/listResourcesBySite.bru +++ /dev/null @@ -1,16 +0,0 @@ -meta { - name: listResourcesBySite - type: http - seq: 2 -} - -get { - url: http://localhost:3000/api/v1/site/1/resources?limit=10&offset=0 - body: none - auth: none -} - -params:query { - limit: 10 - offset: 0 -} diff --git a/bruno/Sites/Get Site.bru b/bruno/Sites/Get Site.bru deleted file mode 100644 index fc2f7e62b..000000000 --- a/bruno/Sites/Get Site.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: Get Site - type: http - seq: 2 -} - -get { - url: http://localhost:3000/api/v1/org/test/sites/mexican-mole-lizard-windy - body: none - auth: none -} diff --git a/bruno/Sites/listSites.bru b/bruno/Sites/listSites.bru deleted file mode 100644 index b7912330a..000000000 --- a/bruno/Sites/listSites.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: listSites - type: http - seq: 1 -} - -get { - url: - body: none - auth: none -} diff --git a/bruno/Targets/listTargets.bru b/bruno/Targets/listTargets.bru deleted file mode 100644 index 7981eb453..000000000 --- a/bruno/Targets/listTargets.bru +++ /dev/null @@ -1,16 +0,0 @@ -meta { - name: listTargets - type: http - seq: 1 -} - -get { - url: http://localhost:3000/api/v1/resource/web.main.localhost/targets?limit=10&offset=0 - body: none - auth: none -} - -params:query { - limit: 10 - offset: 0 -} diff --git a/bruno/Test.bru b/bruno/Test.bru deleted file mode 100644 index 16286ec8c..000000000 --- a/bruno/Test.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: Test - type: http - seq: 2 -} - -get { - url: http://localhost:3000/api/v1 - body: none - auth: inherit -} diff --git a/bruno/Traefik/traefik-config.bru b/bruno/Traefik/traefik-config.bru deleted file mode 100644 index a50b7aa15..000000000 --- a/bruno/Traefik/traefik-config.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: traefik-config - type: http - seq: 1 -} - -get { - url: http://localhost:3001/api/v1/traefik-config - body: none - auth: none -} diff --git a/bruno/Users/adminListUsers.bru b/bruno/Users/adminListUsers.bru deleted file mode 100644 index cdc410956..000000000 --- a/bruno/Users/adminListUsers.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: adminListUsers - type: http - seq: 2 -} - -get { - url: http://localhost:3000/api/v1/users - body: none - auth: none -} diff --git a/bruno/Users/adminRemoveUser.bru b/bruno/Users/adminRemoveUser.bru deleted file mode 100644 index 9e9f35079..000000000 --- a/bruno/Users/adminRemoveUser.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: adminRemoveUser - type: http - seq: 3 -} - -delete { - url: http://localhost:3000/api/v1/user/ky5r7ivqs8wc7u4 - body: none - auth: none -} diff --git a/bruno/Users/getUser.bru b/bruno/Users/getUser.bru deleted file mode 100644 index d86372527..000000000 --- a/bruno/Users/getUser.bru +++ /dev/null @@ -1,11 +0,0 @@ -meta { - name: getUser - type: http - seq: 1 -} - -get { - url: - body: none - auth: none -} diff --git a/bruno/bruno.json b/bruno/bruno.json deleted file mode 100644 index f19d936a8..000000000 --- a/bruno/bruno.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "version": "1", - "name": "Pangolin", - "type": "collection", - "ignore": [ - "node_modules", - ".git" - ], - "presets": { - "requestType": "http", - "requestUrl": "http://localhost:3000/api/v1" - } -} \ No newline at end of file diff --git a/license_header_checker.py b/license_header_checker.py index c173d693b..ab7ddf4d5 100644 --- a/license_header_checker.py +++ b/license_header_checker.py @@ -96,7 +96,7 @@ def process_directory(root_dir): if has_correct_header: print(f"Header up-to-date: {file_path}") else: - # Either no header exists or the header is outdated — write + # Either no header exists or the header is outdated - write # the correct one. action = "Replaced header in" if has_any_header else "Added header to" new_content = HEADER_NORMALIZED + '\n\n' + body @@ -106,7 +106,7 @@ def process_directory(root_dir): files_modified += 1 else: if has_any_header: - # Remove the header — it shouldn't be here. + # Remove the header - it shouldn't be here. with open(file_path, 'w', encoding='utf-8') as f: f.write(body) print(f"Removed header from: {file_path}") @@ -134,4 +134,4 @@ if __name__ == "__main__": print(f"Error: Directory '{target_directory}' not found.") sys.exit(1) - process_directory(os.path.abspath(target_directory)) \ No newline at end of file + process_directory(os.path.abspath(target_directory)) diff --git a/messages/bg-BG.json b/messages/bg-BG.json index aec6f718c..2d6fead50 100644 --- a/messages/bg-BG.json +++ b/messages/bg-BG.json @@ -1994,7 +1994,7 @@ "description": "По-надежден и по-нисък поддръжка на Самостоятелно-хостван Панголиин сървър с допълнителни екстри", "introTitle": "Управлявано Самостоятелно-хостван Панголиин", "introDescription": "е опция за внедряване, предназначена за хора, които искат простота и допълнителна надеждност, като същевременно запазят данните си частни и самостоятелно-хоствани.", - "introDetail": "С тази опция все още управлявате свой собствен Панголиин възел — вашите тунели, SSL терминатора и трафик остават на вашия сървър. Разликата е, че управлението и мониторингът се обработват чрез нашия облачен панел за контрол, който отключва редица предимства:", + "introDetail": "С тази опция все още управлявате свой собствен Панголиин възел - вашите тунели, SSL терминатора и трафик остават на вашия сървър. Разликата е, че управлението и мониторингът се обработват чрез нашия облачен панел за контрол, който отключва редица предимства:", "benefitSimplerOperations": { "title": "По-прости операции", "description": "Няма нужда да управлявате свой собствен имейл сървър или да настройвате сложни аларми. Ще получите проверки и предупреждения при прекъсване от самото начало." @@ -2297,7 +2297,7 @@ "alerts": { "commercialUseDisclosure": { "title": "Разкриване на употреба", - "description": "Изберете лицензионен клас, който точно отразява вашата целена употреба. Персоналният лиценз позволява безплатно ползване на софтуера за индивидуална, некомерсиална или маломащабна комерсиална дейност с годишен брутен приход под 100,000 USD. Всяко ползване извън тези граници — включително ползване във фирма, организация или друга доходоносна среда — изисква валиден корпоративен лиценз и плащане на съответната лицензионна такса. Всички потребители, независимо дали са лични или корпоративни, трябва да спазват Условията на Fossorial Commercial License." + "description": "Изберете лицензионен клас, който точно отразява вашата целена употреба. Персоналният лиценз позволява безплатно ползване на софтуера за индивидуална, некомерсиална или маломащабна комерсиална дейност с годишен брутен приход под 100,000 USD. Всяко ползване извън тези граници - включително ползване във фирма, организация или друга доходоносна среда - изисква валиден корпоративен лиценз и плащане на съответната лицензионна такса. Всички потребители, независимо дали са лични или корпоративни, трябва да спазват Условията на Fossorial Commercial License." }, "trialPeriodInformation": { "title": "Информация за пробен период", @@ -2882,7 +2882,7 @@ "httpDestFormatJsonArrayTitle": "JSON масив", "httpDestFormatJsonArrayDescription": "Една заявка на партида, тялото е JSON масив. Съвместим с повечето общи уеб куки и Datadog.", "httpDestFormatNdjsonTitle": "NDJSON", - "httpDestFormatNdjsonDescription": "Една заявка на партида, тялото е ново линии отделени JSON — един обект на ред, няма външен масив. Изисквано от Splunk HEC, Elastic / OpenSearch и Grafana.", + "httpDestFormatNdjsonDescription": "Една заявка на партида, тялото е ново линии отделени JSON - един обект на ред, няма външен масив. Изисквано от Splunk HEC, Elastic / OpenSearch и Grafana.", "httpDestFormatSingleTitle": "Едно събитие на заявка", "httpDestFormatSingleDescription": "Изпращат се отделни HTTP POST за всяко индивидуално събитие. Използвайте само за крайни точки, които не могат да обработват партиди.", "httpDestLogTypesTitle": "Видове логове", diff --git a/messages/cs-CZ.json b/messages/cs-CZ.json index 7919aa02c..e6e952e4b 100644 --- a/messages/cs-CZ.json +++ b/messages/cs-CZ.json @@ -1994,7 +1994,7 @@ "description": "Spolehlivější a nízko udržovaný Pangolinův server s dalšími zvony a bičkami", "introTitle": "Spravovaný Pangolin", "introDescription": "je možnost nasazení určená pro lidi, kteří chtějí jednoduchost a spolehlivost při zachování soukromých a samoobslužných dat.", - "introDetail": "Pomocí této volby stále provozujete vlastní uzel Pangolin — tunely, SSL terminály a provoz všech pobytů na vašem serveru. Rozdíl spočívá v tom, že řízení a monitorování se řeší prostřednictvím našeho cloudového panelu, který odemkne řadu výhod:", + "introDetail": "Pomocí této volby stále provozujete vlastní uzel Pangolin - tunely, SSL terminály a provoz všech pobytů na vašem serveru. Rozdíl spočívá v tom, že řízení a monitorování se řeší prostřednictvím našeho cloudového panelu, který odemkne řadu výhod:", "benefitSimplerOperations": { "title": "Jednoduchý provoz", "description": "Není třeba spouštět svůj vlastní poštovní server nebo nastavit komplexní upozornění. Ze schránky dostanete upozornění na zdravotní kontrolu a výpadek." diff --git a/messages/de-DE.json b/messages/de-DE.json index a041ee694..43e055c3b 100644 --- a/messages/de-DE.json +++ b/messages/de-DE.json @@ -2297,7 +2297,7 @@ "alerts": { "commercialUseDisclosure": { "title": "Verwendungsanzeige", - "description": "Wählen Sie die Lizenz-Ebene, die Ihre beabsichtigte Nutzung genau widerspiegelt. Die Persönliche Lizenz erlaubt die freie Nutzung der Software für individuelle, nicht-kommerzielle oder kleine kommerzielle Aktivitäten mit jährlichen Brutto-Einnahmen von 100.000 USD. Über diese Grenzen hinausgehende Verwendungszwecke – einschließlich der Verwendung innerhalb eines Unternehmens, einer Organisation, oder eine andere umsatzgenerierende Umgebung — erfordert eine gültige Enterprise-Lizenz und die Zahlung der Lizenzgebühr. Alle Benutzer, ob Personal oder Enterprise, müssen die Fossorial Commercial License Bedingungen einhalten." + "description": "Wählen Sie die Lizenz-Ebene, die Ihre beabsichtigte Nutzung genau widerspiegelt. Die Persönliche Lizenz erlaubt die freie Nutzung der Software für individuelle, nicht-kommerzielle oder kleine kommerzielle Aktivitäten mit jährlichen Brutto-Einnahmen von 100.000 USD. Über diese Grenzen hinausgehende Verwendungszwecke – einschließlich der Verwendung innerhalb eines Unternehmens, einer Organisation, oder eine andere umsatzgenerierende Umgebung - erfordert eine gültige Enterprise-Lizenz und die Zahlung der Lizenzgebühr. Alle Benutzer, ob Personal oder Enterprise, müssen die Fossorial Commercial License Bedingungen einhalten." }, "trialPeriodInformation": { "title": "Testperiode Information", @@ -2882,7 +2882,7 @@ "httpDestFormatJsonArrayTitle": "JSON Array", "httpDestFormatJsonArrayDescription": "Eine Anfrage pro Stapel ist ein JSON-Array. Kompatibel mit den meisten generischen Webhooks und Datadog.", "httpDestFormatNdjsonTitle": "NDJSON", - "httpDestFormatNdjsonDescription": "Eine Anfrage pro Batch, der Körper ist newline-getrenntes JSON — ein Objekt pro Zeile, kein äußeres Array. Benötigt von Splunk HEC, Elastic / OpenSearch, und Grafana Loki.", + "httpDestFormatNdjsonDescription": "Eine Anfrage pro Batch, der Körper ist newline-getrenntes JSON - ein Objekt pro Zeile, kein äußeres Array. Benötigt von Splunk HEC, Elastic / OpenSearch, und Grafana Loki.", "httpDestFormatSingleTitle": "Ein Ereignis pro Anfrage", "httpDestFormatSingleDescription": "Sendet eine separate HTTP-POST für jedes einzelne Ereignis. Nur für Endpunkte, die Batches nicht handhaben können.", "httpDestLogTypesTitle": "Log-Typen", diff --git a/messages/en-US.json b/messages/en-US.json index ba9b1e5a0..a2a23ea51 100644 --- a/messages/en-US.json +++ b/messages/en-US.json @@ -1,4 +1,8 @@ { + "contactSalesEnable": "Contact sales to enable this feature.", + "contactSalesBookDemo": "Book a demo", + "contactSalesOr": "or", + "contactSalesContactUs": "contact us", "setupCreate": "Create the organization, site, and resources", "headerAuthCompatibilityInfo": "Enable this to force a 401 Unauthorized response when an authentication token is missing. This is required for browsers or specific HTTP libraries that do not send credentials without a server challenge.", "headerAuthCompatibility": "Extended compatibility", @@ -1260,6 +1264,7 @@ "actionViewLogs": "View Logs", "noneSelected": "None selected", "orgNotFound2": "No organizations found.", + "search": "Search…", "searchPlaceholder": "Search...", "emptySearchOptions": "No options found", "create": "Create", @@ -1344,10 +1349,138 @@ "sidebarGeneral": "Manage", "sidebarLogAndAnalytics": "Log & Analytics", "sidebarBluePrints": "Blueprints", + "sidebarAlerting": "Alerting", "sidebarOrganization": "Organization", "sidebarManagement": "Management", "sidebarBillingAndLicenses": "Billing & Licenses", "sidebarLogsAnalytics": "Analytics", + "alertingTitle": "Alerting", + "alertingDescription": "Define sources, triggers, and actions for notifications", + "alertingRules": "Alert rules", + "alertingSearchRules": "Search rules…", + "alertingAddRule": "Create Rule", + "alertingColumnSource": "Source", + "alertingColumnTrigger": "Trigger", + "alertingColumnActions": "Actions", + "alertingColumnEnabled": "Enabled", + "alertingDeleteQuestion": "Delete this alert rule? This cannot be undone.", + "alertingDeleteRule": "Delete alert rule", + "alertingRuleDeleted": "Alert rule deleted", + "alertingRuleSaved": "Alert rule saved", + "alertingEditRule": "Edit Alert Rule", + "alertingCreateRule": "Create Alert Rule", + "alertingRuleCredenzaDescription": "Choose what to watch, when to fire, and how to notify your team.", + "alertingRuleNamePlaceholder": "Production site down", + "alertingRuleEnabled": "Rule enabled", + "alertingSectionSource": "Source", + "alertingSourceType": "Source type", + "alertingSourceSite": "Site", + "alertingSourceHealthCheck": "Health check", + "alertingPickSites": "Sites", + "alertingPickHealthChecks": "Health checks", + "alertingPickResources": "Resources", + "alertingAllSites": "All Sites", + "alertingAllSitesDescription": "Alert fires for any site", + "alertingSpecificSites": "Specific Sites", + "alertingSpecificSitesDescription": "Choose specific sites to watch", + "alertingAllHealthChecks": "All Health Checks", + "alertingAllHealthChecksDescription": "Alert fires for any health check", + "alertingSpecificHealthChecks": "Specific Health Checks", + "alertingSpecificHealthChecksDescription": "Choose specific health checks to watch", + "alertingAllResources": "All Resources", + "alertingAllResourcesDescription": "Alert fires for any resource", + "alertingSpecificResources": "Specific resources", + "alertingSpecificResourcesDescription": "Choose specific resources to watch", + "alertingSelectResources": "Select resources…", + "alertingResourcesSelected": "{count} resources selected", + "alertingResourcesEmpty": "No resources with targets in the first 10 results.", + "alertingSectionTrigger": "Trigger", + "alertingTrigger": "When to alert", + "alertingTriggerSiteOnline": "Site online", + "alertingTriggerSiteOffline": "Site offline", + "alertingTriggerSiteToggle": "Site status changes", + "alertingTriggerHcHealthy": "Health check healthy", + "alertingTriggerHcUnhealthy": "Health check unhealthy", + "alertingTriggerHcToggle": "Health check status changes", + "alertingTriggerResourceHealthy": "Resource healthy", + "alertingTriggerResourceUnhealthy": "Resource unhealthy", + "alertingSearchHealthChecks": "Search health checks…", + "alertingHealthChecksEmpty": "No health checks available.", + "alertingTriggerResourceToggle": "Resource status changes", + "alertingSourceResource": "Resource", + "alertingSectionActions": "Actions", + "alertingAddAction": "Add action", + "alertingActionNotify": "Email", + "alertingActionNotifyDescription": "Send email notifications to users or roles", + "alertingActionWebhook": "Webhook", + "alertingActionWebhookDescription": "Send an HTTP request to a custom endpoint", + "alertingExternalIntegration": "External Integration", + "alertingExternalPagerDutyDescription": "Send alerts to PagerDuty for incident management", + "alertingExternalOpsgenieDescription": "Route alerts to Opsgenie for on-call management", + "alertingExternalServiceNowDescription": "Create ServiceNow incidents from alert events", + "alertingExternalIncidentIoDescription": "Trigger Incident.io workflows from alert events", + "alertingActionType": "Action type", + "alertingNotifyUsers": "Users", + "alertingNotifyRoles": "Roles", + "alertingNotifyEmails": "Email addresses", + "alertingEmailPlaceholder": "Add email and press Enter", + "alertingWebhookMethod": "HTTP method", + "alertingWebhookSecret": "Signing secret (optional)", + "alertingWebhookSecretPlaceholder": "HMAC secret", + "alertingWebhookHeaders": "Headers", + "alertingAddHeader": "Add header", + "alertingSelectSites": "Select sites…", + "alertingSitesSelected": "{count} sites selected", + "alertingSelectHealthChecks": "Select health checks…", + "alertingHealthChecksSelected": "{count} health checks selected", + "alertingNoHealthChecks": "No targets with health checks enabled", + "alertingHealthCheckStub": "Health check source selection is not wired up yet - you can still configure triggers and actions.", + "alertingSelectUsers": "Select users…", + "alertingUsersSelected": "{count} users selected", + "alertingSelectRoles": "Select roles…", + "alertingRolesSelected": "{count} roles selected", + "alertingSummarySites": "Sites ({count})", + "alertingSummaryHealthChecks": "Health checks ({count})", + "alertingSummaryResources": "Resources ({count})", + "alertingErrorNameRequired": "Enter a name", + "alertingErrorActionsMin": "Add at least one action", + "alertingErrorPickSites": "Select at least one site", + "alertingErrorPickHealthChecks": "Select at least one health check", + "alertingErrorPickResources": "Select at least one resource", + "alertingErrorTriggerSite": "Choose a site trigger", + "alertingErrorTriggerHealth": "Choose a health check trigger", + "alertingErrorTriggerResource": "Choose a resource trigger", + "alertingErrorNotifyRecipients": "Pick users, roles, or at least one email", + "alertingConfigureSource": "Configure Source", + "alertingConfigureTrigger": "Configure Trigger", + "alertingConfigureActions": "Configure Actions", + "alertingBackToRules": "Back to Rules", + "alertingDraftBadge": "Draft - save to store this rule", + "alertingSidebarHint": "Click a step on the canvas to edit it here.", + "alertingGraphCanvasTitle": "Rule Flow", + "alertingGraphCanvasDescription": "Visual overview of source, trigger, and actions. Select a node to edit it in the panel.", + "alertingNodeNotConfigured": "Not configured yet", + "alertingNodeActionsCount": "{count, plural, one {# action} other {# actions}}", + "alertingNodeRoleSource": "Source", + "alertingNodeRoleTrigger": "Trigger", + "alertingNodeRoleAction": "Action", + "alertingTabRules": "Alert Rules", + "alertingTabHealthChecks": "Health Checks", + "standaloneHcTableTitle": "Health Checks", + "standaloneHcSearchPlaceholder": "Search health checks…", + "standaloneHcAddButton": "Create Health Check", + "standaloneHcCreateTitle": "Create Health Check", + "standaloneHcEditTitle": "Edit Health Check", + "standaloneHcDescription": "Configure a HTTP or TCP health check for use in alert rules.", + "standaloneHcNameLabel": "Name", + "standaloneHcNamePlaceholder": "My HTTP Monitor", + "standaloneHcDeleteTitle": "Delete health check", + "standaloneHcDeleteQuestion": "Delete this health check? This cannot be undone.", + "standaloneHcDeleted": "Health check deleted", + "standaloneHcSaved": "Health check saved", + "standaloneHcColumnHealth": "Health", + "standaloneHcColumnMode": "Mode", + "standaloneHcColumnTarget": "Target", "blueprints": "Blueprints", "blueprintsDescription": "Apply declarative configurations and view previous runs", "blueprintAdd": "Add Blueprint", @@ -1753,8 +1886,8 @@ "retryAttempts": "Retry Attempts", "expectedResponseCodes": "Expected Response Codes", "expectedResponseCodesDescription": "HTTP status code that indicates healthy status. If left blank, 200-300 is considered healthy.", - "customHeaders": "Custom Headers", - "customHeadersDescription": "Headers new line separated: Header-Name: value", + "customHeaders": "Custom Request Headers", + "customHeadersDescription": "Request headers sent to the downstream targets. Headers new line separated: Header-Name: value", "headersValidationError": "Headers must be in the format: Header-Name: value", "saveHealthCheck": "Save Health Check", "healthCheckSaved": "Health Check Saved", @@ -1766,8 +1899,17 @@ "healthCheckIntervalMin": "Check interval must be at least 5 seconds", "healthCheckTimeoutMin": "Timeout must be at least 1 second", "healthCheckRetryMin": "Retry attempts must be at least 1", - "httpMethod": "HTTP Method", - "selectHttpMethod": "Select HTTP method", + "healthCheckMode": "Check Mode", + "healthCheckStrategy": "Strategy", + "healthCheckModeDescription": "TCP mode verifies connectivity only. HTTP mode validates the HTTP response.", + "healthyThreshold": "Healthy Threshold", + "healthyThresholdDescription": "Consecutive successes required before marking as healthy.", + "unhealthyThreshold": "Unhealthy Threshold", + "unhealthyThresholdDescription": "Consecutive failures required before marking as unhealthy.", + "healthCheckHealthyThresholdMin": "Healthy threshold must be at least 1", + "healthCheckUnhealthyThresholdMin": "Unhealthy threshold must be at least 1", + "httpMethod": "Scheme", + "selectHttpMethod": "Select scheme", "domainPickerSubdomainLabel": "Subdomain", "domainPickerBaseDomainLabel": "Base Domain", "domainPickerSearchDomains": "Search domains...", @@ -1825,6 +1967,11 @@ "editInternalResourceDialogModePort": "Port", "editInternalResourceDialogModeHost": "Host", "editInternalResourceDialogModeCidr": "CIDR", + "editInternalResourceDialogModeHttp": "HTTP", + "editInternalResourceDialogModeHttps": "HTTPS", + "editInternalResourceDialogScheme": "Scheme", + "editInternalResourceDialogEnableSsl": "Enable SSL", + "editInternalResourceDialogEnableSslDescription": "Enable SSL/TLS encryption for secure HTTPS connections to the destination.", "editInternalResourceDialogDestination": "Destination", "editInternalResourceDialogDestinationHostDescription": "The IP address or hostname of the resource on the site's network.", "editInternalResourceDialogDestinationIPDescription": "The IP or hostname address of the resource on the site's network.", @@ -1840,6 +1987,7 @@ "createInternalResourceDialogName": "Name", "createInternalResourceDialogSite": "Site", "selectSite": "Select site...", + "multiSitesSelectorSitesCount": "{count, plural, one {# site} other {# sites}}", "noSitesFound": "No sites found.", "createInternalResourceDialogProtocol": "Protocol", "createInternalResourceDialogTcp": "TCP", @@ -1868,11 +2016,19 @@ "createInternalResourceDialogModePort": "Port", "createInternalResourceDialogModeHost": "Host", "createInternalResourceDialogModeCidr": "CIDR", + "createInternalResourceDialogModeHttp": "HTTP", + "createInternalResourceDialogModeHttps": "HTTPS", + "scheme": "Scheme", + "createInternalResourceDialogScheme": "Scheme", + "createInternalResourceDialogEnableSsl": "Enable SSL", + "createInternalResourceDialogEnableSslDescription": "Enable SSL/TLS encryption for secure HTTPS connections to the destination.", "createInternalResourceDialogDestination": "Destination", "createInternalResourceDialogDestinationHostDescription": "The IP address or hostname of the resource on the site's network.", "createInternalResourceDialogDestinationCidrDescription": "The CIDR range of the resource on the site's network.", "createInternalResourceDialogAlias": "Alias", "createInternalResourceDialogAliasDescription": "An optional internal DNS alias for this resource.", + "internalResourceDownstreamSchemeRequired": "Scheme is required for HTTP resources", + "internalResourceHttpPortRequired": "Destination port is required for HTTP resources", "siteConfiguration": "Configuration", "siteAcceptClientConnections": "Accept Client Connections", "siteAcceptClientConnectionsDescription": "Allow user devices and clients to access resources on this site. This can be changed later.", @@ -1997,7 +2153,7 @@ "description": "More reliable and low-maintenance self-hosted Pangolin server with extra bells and whistles", "introTitle": "Managed Self-Hosted Pangolin", "introDescription": "is a deployment option designed for people who want simplicity and extra reliability while still keeping their data private and self-hosted.", - "introDetail": "With this option, you still run your own Pangolin node — your tunnels, SSL termination, and traffic all stay on your server. The difference is that management and monitoring are handled through our cloud dashboard, which unlocks a number of benefits:", + "introDetail": "With this option, you still run your own Pangolin node - your tunnels, SSL termination, and traffic all stay on your server. The difference is that management and monitoring are handled through our cloud dashboard, which unlocks a number of benefits:", "benefitSimplerOperations": { "title": "Simpler operations", "description": "No need to run your own mail server or set up complex alerting. You'll get health checks and downtime alerts out of the box." @@ -2122,7 +2278,7 @@ "selectDomainForOrgAuthPage": "Select a domain for the organization's authentication page", "domainPickerProvidedDomain": "Provided Domain", "domainPickerFreeProvidedDomain": "Provided Domain", - "domainPickerFreeDomainsPaidFeature": "Provided domains are a paid feature. Subscribe to get a domain included with your plan — no need to bring your own.", + "domainPickerFreeDomainsPaidFeature": "Provided domains are a paid feature. Subscribe to get a domain included with your plan - no need to bring your own.", "domainPickerVerified": "Verified", "domainPickerUnverified": "Unverified", "domainPickerManual": "Manual", @@ -2300,7 +2456,7 @@ "alerts": { "commercialUseDisclosure": { "title": "Usage Disclosure", - "description": "Select the license tier that accurately reflects your intended use. The Personal License permits free use of the Software for individual, non-commercial or small-scale commercial activities with annual gross revenue under $100,000 USD. Any use beyond these limits — including use within a business, organization, or other revenue-generating environment — requires a valid Enterprise License and payment of the applicable licensing fee. All users, whether Personal or Enterprise, must comply with the Fossorial Commercial License Terms." + "description": "Select the license tier that accurately reflects your intended use. The Personal License permits free use of the Software for individual, non-commercial or small-scale commercial activities with annual gross revenue under $100,000 USD. Any use beyond these limits - including use within a business, organization, or other revenue-generating environment - requires a valid Enterprise License and payment of the applicable licensing fee. All users, whether Personal or Enterprise, must comply with the Fossorial Commercial License Terms." }, "trialPeriodInformation": { "title": "Trial Period Information", @@ -2432,6 +2588,7 @@ "validPassword": "Valid Password", "validEmail": "Valid email", "validSSO": "Valid SSO", + "connectedClient": "Connected Client", "resourceBlocked": "Resource Blocked", "droppedByRule": "Dropped by Rule", "noSessions": "No Sessions", @@ -2669,8 +2826,12 @@ "editInternalResourceDialogAddUsers": "Add Users", "editInternalResourceDialogAddClients": "Add Clients", "editInternalResourceDialogDestinationLabel": "Destination", - "editInternalResourceDialogDestinationDescription": "Specify the destination address for the internal resource. This can be a hostname, IP address, or CIDR range depending on the selected mode. Optionally set an internal DNS alias for easier identification.", + "editInternalResourceDialogDestinationDescription": "Choose where this resource runs and how clients reach it. Selecting multiple sites will create a high availability resource that can be accessed from any of the selected sites.", "editInternalResourceDialogPortRestrictionsDescription": "Restrict access to specific TCP/UDP ports or allow/block all ports.", + "createInternalResourceDialogHttpConfiguration": "HTTP configuration", + "createInternalResourceDialogHttpConfigurationDescription": "Choose the domain clients will use to reach this resource over HTTP or HTTPS.", + "editInternalResourceDialogHttpConfiguration": "HTTP configuration", + "editInternalResourceDialogHttpConfigurationDescription": "Choose the domain clients will use to reach this resource over HTTP or HTTPS.", "editInternalResourceDialogTcp": "TCP", "editInternalResourceDialogUdp": "UDP", "editInternalResourceDialogIcmp": "ICMP", @@ -2709,6 +2870,8 @@ "maintenancePageMessagePlaceholder": "We'll be back soon! Our site is currently undergoing scheduled maintenance.", "maintenancePageMessageDescription": "Detailed message explaining the maintenance", "maintenancePageTimeTitle": "Estimated Completion Time (Optional)", + "privateMaintenanceScreenTitle": "Private Placeholder Screen", + "privateMaintenanceScreenMessage": "This domain is being used on a private resource. Please connect using the Pangolin client to access this resource.", "maintenanceTime": "e.g., 2 hours, Nov 1 at 5:00 PM", "maintenanceEstimatedTimeDescription": "When you expect maintenance to be completed", "editDomain": "Edit Domain", @@ -2828,9 +2991,9 @@ "streamingHttpWebhookTitle": "HTTP Webhook", "streamingHttpWebhookDescription": "Send events to any HTTP endpoint with flexible authentication and templating.", "streamingS3Title": "Amazon S3", - "streamingS3Description": "Stream events to an S3-compatible object storage bucket. Contact support to enable this destination.", + "streamingS3Description": "Stream events to an S3-compatible object storage bucket.", "streamingDatadogTitle": "Datadog", - "streamingDatadogDescription": "Forward events directly to your Datadog account. Contact support to enable this destination.", + "streamingDatadogDescription": "Forward events directly to your Datadog account.", "streamingTypePickerDescription": "Choose a destination type to get started.", "streamingFailedToLoad": "Failed to load destinations", "streamingUnexpectedError": "An unexpected error occurred.", @@ -2846,6 +3009,14 @@ "httpDestAddTitle": "Add HTTP Destination", "httpDestEditDescription": "Update the configuration for this HTTP event streaming destination.", "httpDestAddDescription": "Configure a new HTTP endpoint to receive your organization's events.", + "S3DestEditTitle": "Edit Destination", + "S3DestAddTitle": "Add S3 Destination", + "S3DestEditDescription": "Update the configuration for this S3 event streaming destination.", + "S3DestAddDescription": "Configure a new S3 endpoint to receive your organization's events.", + "datadogDestEditTitle": "Edit Destination", + "datadogDestAddTitle": "Add Datadog Destination", + "datadogDestEditDescription": "Update the configuration for this Datadog event streaming destination.", + "datadogDestAddDescription": "Configure a new Datadog endpoint to receive your organization's events.", "httpDestTabSettings": "Settings", "httpDestTabHeaders": "Headers", "httpDestTabBody": "Body", @@ -2885,7 +3056,7 @@ "httpDestFormatJsonArrayTitle": "JSON Array", "httpDestFormatJsonArrayDescription": "One request per batch, body is a JSON array. Compatible with most generic webhooks and Datadog.", "httpDestFormatNdjsonTitle": "NDJSON", - "httpDestFormatNdjsonDescription": "One request per batch, body is newline-delimited JSON — one object per line, no outer array. Required by Splunk HEC, Elastic / OpenSearch, and Grafana Loki.", + "httpDestFormatNdjsonDescription": "One request per batch, body is newline-delimited JSON - one object per line, no outer array. Required by Splunk HEC, Elastic / OpenSearch, and Grafana Loki.", "httpDestFormatSingleTitle": "One Event Per Request", "httpDestFormatSingleDescription": "Sends a separate HTTP POST for each individual event. Use only for endpoints that cannot handle batches.", "httpDestLogTypesTitle": "Log Types", @@ -2904,6 +3075,18 @@ "httpDestCreatedSuccess": "Destination created successfully", "httpDestUpdateFailed": "Failed to update destination", "httpDestCreateFailed": "Failed to create destination", + "followRedirects": "Follow Redirects", + "followRedirectsDescription": "Automatically follow HTTP redirects for requests.", + "alertingErrorWebhookUrl": "Please enter a valid URL for the webhook.", + "healthCheckStrategyHttp": "Validates connectivity and checks the HTTP response status.", + "healthCheckStrategyTcp": "Verifies TCP connectivity only, without inspecting the response.", + "healthCheckStrategySnmp": "Makes an SNMP get request to check the health of network devices and infrastructure.", + "healthCheckStrategyIcmp": "Uses ICMP echo requests (pings) to check if a resource is reachable and responsive.", + "healthCheckTabStrategy": "Strategy", + "healthCheckTabConnection": "Connection", + "healthCheckTabAdvanced": "Advanced", + "healthCheckStrategyNotAvailable": "This strategy is not available. Please contact sales to enable this feature.", + "uptime30d": "Uptime (30d)", "idpAddActionCreateNew": "Create new identity provider", "idpAddActionImportFromOrg": "Import from another organization", "idpImportDialogTitle": "Import Identity Provider", diff --git a/messages/es-ES.json b/messages/es-ES.json index 89a42fb96..b370ee7dc 100644 --- a/messages/es-ES.json +++ b/messages/es-ES.json @@ -2119,7 +2119,7 @@ "selectDomainForOrgAuthPage": "Seleccione un dominio para la página de autenticación de la organización", "domainPickerProvidedDomain": "Dominio proporcionado", "domainPickerFreeProvidedDomain": "Dominio proporcionado gratis", - "domainPickerFreeDomainsPaidFeature": "Los dominios proporcionados son una función de pago. Suscríbete para obtener un dominio incluido con tu plan — no necesitas traer el tuyo propio.", + "domainPickerFreeDomainsPaidFeature": "Los dominios proporcionados son una función de pago. Suscríbete para obtener un dominio incluido con tu plan - no necesitas traer el tuyo propio.", "domainPickerVerified": "Verificado", "domainPickerUnverified": "Sin verificar", "domainPickerManual": "Manual", @@ -2297,7 +2297,7 @@ "alerts": { "commercialUseDisclosure": { "title": "Divulgación de uso", - "description": "Seleccione el nivel de licencia que refleje con precisión su uso previsto. La Licencia Personal permite el uso libre del Software para actividades comerciales individuales, no comerciales o de pequeña escala con ingresos brutos anuales inferiores a $100,000 USD. Cualquier uso más allá de estos límites — incluyendo el uso dentro de una empresa, organización, u otro entorno de generación de ingresos — requiere una Licencia Empresarial válida y el pago de la cuota de licencia aplicable. Todos los usuarios, ya sean personales o empresariales, deben cumplir con las Condiciones de Licencia Comercial Fossorial." + "description": "Seleccione el nivel de licencia que refleje con precisión su uso previsto. La Licencia Personal permite el uso libre del Software para actividades comerciales individuales, no comerciales o de pequeña escala con ingresos brutos anuales inferiores a $100,000 USD. Cualquier uso más allá de estos límites - incluyendo el uso dentro de una empresa, organización, u otro entorno de generación de ingresos - requiere una Licencia Empresarial válida y el pago de la cuota de licencia aplicable. Todos los usuarios, ya sean personales o empresariales, deben cumplir con las Condiciones de Licencia Comercial Fossorial." }, "trialPeriodInformation": { "title": "Información del período de prueba", @@ -2882,7 +2882,7 @@ "httpDestFormatJsonArrayTitle": "Matriz JSON", "httpDestFormatJsonArrayDescription": "Una petición por lote, cuerpo es una matriz JSON. Compatible con la mayoría de los webhooks y Datadog.", "httpDestFormatNdjsonTitle": "NDJSON", - "httpDestFormatNdjsonDescription": "Una petición por lote, el cuerpo es JSON delimitado por línea — un objeto por línea, sin arrays externos. Requerido por Splunk HEC, Elastic / OpenSearch, y Grafana Loki.", + "httpDestFormatNdjsonDescription": "Una petición por lote, el cuerpo es JSON delimitado por línea - un objeto por línea, sin arrays externos. Requerido por Splunk HEC, Elastic / OpenSearch, y Grafana Loki.", "httpDestFormatSingleTitle": "Un evento por solicitud", "httpDestFormatSingleDescription": "Envía un HTTP POST separado para cada evento individual. Úsalo sólo para los extremos que no pueden manejar lotes.", "httpDestLogTypesTitle": "Tipos de Log", diff --git a/messages/fr-FR.json b/messages/fr-FR.json index c5ab2ba4a..98b769366 100644 --- a/messages/fr-FR.json +++ b/messages/fr-FR.json @@ -1994,7 +1994,7 @@ "description": "Serveur Pangolin auto-hébergé avec des cloches et des sifflets supplémentaires", "introTitle": "Pangolin auto-hébergé géré", "introDescription": "est une option de déploiement conçue pour les personnes qui veulent de la simplicité et de la fiabilité tout en gardant leurs données privées et auto-hébergées.", - "introDetail": "Avec cette option, vous exécutez toujours votre propre nœud Pangolin — vos tunnels, la terminaison SSL et le trafic restent sur votre serveur. La différence est que la gestion et la surveillance sont gérées via notre tableau de bord du cloud, qui déverrouille un certain nombre d'avantages :", + "introDetail": "Avec cette option, vous exécutez toujours votre propre nœud Pangolin - vos tunnels, la terminaison SSL et le trafic restent sur votre serveur. La différence est que la gestion et la surveillance sont gérées via notre tableau de bord du cloud, qui déverrouille un certain nombre d'avantages :", "benefitSimplerOperations": { "title": "Opérations plus simples", "description": "Pas besoin de faire tourner votre propre serveur de messagerie ou de configurer des alertes complexes. Vous obtiendrez des contrôles de santé et des alertes de temps d'arrêt par la suite." @@ -2119,7 +2119,7 @@ "selectDomainForOrgAuthPage": "Sélectionnez un domaine pour la page d'authentification de l'organisation", "domainPickerProvidedDomain": "Domaine fourni", "domainPickerFreeProvidedDomain": "Domaine fourni gratuitement", - "domainPickerFreeDomainsPaidFeature": "Les domaines fournis sont une fonctionnalité payante. Abonnez-vous pour obtenir un domaine inclus avec votre plan — plus besoin de fournir le vôtre.", + "domainPickerFreeDomainsPaidFeature": "Les domaines fournis sont une fonctionnalité payante. Abonnez-vous pour obtenir un domaine inclus avec votre plan - plus besoin de fournir le vôtre.", "domainPickerVerified": "Vérifié", "domainPickerUnverified": "Non vérifié", "domainPickerManual": "Manuel", @@ -2297,7 +2297,7 @@ "alerts": { "commercialUseDisclosure": { "title": "Divulgation d'utilisation", - "description": "Sélectionnez le niveau de licence qui correspond exactement à votre utilisation prévue. La Licence Personnelle autorise l'utilisation libre du Logiciel pour des activités commerciales individuelles, non commerciales ou à petite échelle avec un revenu annuel brut inférieur à 100 000 USD. Toute utilisation au-delà de ces limites — y compris l'utilisation au sein d'une entreprise, d'une organisation, ou tout autre environnement générateur de revenus — nécessite une licence d’entreprise valide et le paiement des droits de licence applicables. Tous les utilisateurs, qu'ils soient personnels ou d'entreprise, doivent se conformer aux conditions de licence commerciale Fossorial." + "description": "Sélectionnez le niveau de licence qui correspond exactement à votre utilisation prévue. La Licence Personnelle autorise l'utilisation libre du Logiciel pour des activités commerciales individuelles, non commerciales ou à petite échelle avec un revenu annuel brut inférieur à 100 000 USD. Toute utilisation au-delà de ces limites - y compris l'utilisation au sein d'une entreprise, d'une organisation, ou tout autre environnement générateur de revenus - nécessite une licence d’entreprise valide et le paiement des droits de licence applicables. Tous les utilisateurs, qu'ils soient personnels ou d'entreprise, doivent se conformer aux conditions de licence commerciale Fossorial." }, "trialPeriodInformation": { "title": "Informations sur la période d'essai", @@ -2882,7 +2882,7 @@ "httpDestFormatJsonArrayTitle": "Tableau JSON", "httpDestFormatJsonArrayDescription": "Une requête par lot, le corps est un tableau JSON. Compatible avec la plupart des webhooks génériques et des datadog.", "httpDestFormatNdjsonTitle": "NDJSON", - "httpDestFormatNdjsonDescription": "Une requête par lot, body est un JSON délimité par une nouvelle ligne — un objet par ligne, pas de tableau extérieur. Requis par Splunk HEC, Elastic / OpenSearch, et Grafana Loki.", + "httpDestFormatNdjsonDescription": "Une requête par lot, body est un JSON délimité par une nouvelle ligne - un objet par ligne, pas de tableau extérieur. Requis par Splunk HEC, Elastic / OpenSearch, et Grafana Loki.", "httpDestFormatSingleTitle": "Un événement par demande", "httpDestFormatSingleDescription": "Envoie un POST HTTP séparé pour chaque événement individuel. Utilisé uniquement pour les terminaux qui ne peuvent pas gérer des lots.", "httpDestLogTypesTitle": "Types de logs", diff --git a/messages/it-IT.json b/messages/it-IT.json index b76e1b3c7..babe33b59 100644 --- a/messages/it-IT.json +++ b/messages/it-IT.json @@ -1994,7 +1994,7 @@ "description": "Server Pangolin self-hosted più affidabile e a bassa manutenzione con campanelli e fischietti extra", "introTitle": "Managed Self-Hosted Pangolin", "introDescription": "è un'opzione di distribuzione progettata per le persone che vogliono la semplicità e l'affidabilità extra mantenendo i loro dati privati e self-hosted.", - "introDetail": "Con questa opzione, esegui ancora il tuo nodo Pangolin — i tunnel, la terminazione SSL e il traffico rimangono tutti sul tuo server. La differenza è che la gestione e il monitoraggio sono gestiti attraverso il nostro cruscotto cloud, che sblocca una serie di vantaggi:", + "introDetail": "Con questa opzione, esegui ancora il tuo nodo Pangolin - i tunnel, la terminazione SSL e il traffico rimangono tutti sul tuo server. La differenza è che la gestione e il monitoraggio sono gestiti attraverso il nostro cruscotto cloud, che sblocca una serie di vantaggi:", "benefitSimplerOperations": { "title": "Operazioni più semplici", "description": "Non è necessario eseguire il proprio server di posta o impostare un avviso complesso. Otterrai controlli di salute e avvisi di inattività fuori dalla casella." @@ -2119,7 +2119,7 @@ "selectDomainForOrgAuthPage": "Seleziona un dominio per la pagina di autenticazione dell'organizzazione", "domainPickerProvidedDomain": "Dominio Fornito", "domainPickerFreeProvidedDomain": "Dominio Fornito Gratuito", - "domainPickerFreeDomainsPaidFeature": "I domini forniti sono una funzionalità a pagamento. Abbonati per ricevere un dominio incluso con il tuo piano — non è necessario portare il proprio.", + "domainPickerFreeDomainsPaidFeature": "I domini forniti sono una funzionalità a pagamento. Abbonati per ricevere un dominio incluso con il tuo piano - non è necessario portare il proprio.", "domainPickerVerified": "Verificato", "domainPickerUnverified": "Non Verificato", "domainPickerManual": "Manuale", @@ -2297,7 +2297,7 @@ "alerts": { "commercialUseDisclosure": { "title": "Trasparenza Di Utilizzo", - "description": "Seleziona il livello di licenza che rispecchia accuratamente il tuo utilizzo previsto. La Licenza Personale consente l'uso gratuito del Software per le attività commerciali individuali, non commerciali o su piccola scala con entrate lorde annue inferiori a $100.000 USD. Qualsiasi uso oltre questi limiti — compreso l'uso all'interno di un'azienda, organizzazione, o altro ambiente generatore di entrate — richiede una licenza Enterprise valida e il pagamento della tassa di licenza applicabile. Tutti gli utenti, siano essi personali o aziendali, devono rispettare i termini di licenza commerciale Fossorial." + "description": "Seleziona il livello di licenza che rispecchia accuratamente il tuo utilizzo previsto. La Licenza Personale consente l'uso gratuito del Software per le attività commerciali individuali, non commerciali o su piccola scala con entrate lorde annue inferiori a $100.000 USD. Qualsiasi uso oltre questi limiti - compreso l'uso all'interno di un'azienda, organizzazione, o altro ambiente generatore di entrate - richiede una licenza Enterprise valida e il pagamento della tassa di licenza applicabile. Tutti gli utenti, siano essi personali o aziendali, devono rispettare i termini di licenza commerciale Fossorial." }, "trialPeriodInformation": { "title": "Informazioni Periodo Di Prova", @@ -2882,7 +2882,7 @@ "httpDestFormatJsonArrayTitle": "JSON Array", "httpDestFormatJsonArrayDescription": "Una richiesta per lotto, corpo è un array JSON. Compatibile con la maggior parte dei webhooks generici e Datadog.", "httpDestFormatNdjsonTitle": "NDJSON", - "httpDestFormatNdjsonDescription": "Una richiesta per lotto, corpo è newline-delimited JSON — un oggetto per linea, nessun array esterno. Richiesto da Splunk HEC, Elastic / OpenSearch, e Grafana Loki.", + "httpDestFormatNdjsonDescription": "Una richiesta per lotto, corpo è newline-delimited JSON - un oggetto per linea, nessun array esterno. Richiesto da Splunk HEC, Elastic / OpenSearch, e Grafana Loki.", "httpDestFormatSingleTitle": "Un Evento Per Richiesta", "httpDestFormatSingleDescription": "Invia un HTTP POST separato per ogni singolo evento. Usa solo per gli endpoint che non possono gestire i batch.", "httpDestLogTypesTitle": "Tipi Di Log", diff --git a/messages/ko-KR.json b/messages/ko-KR.json index cce7adeb8..9e55b0d32 100644 --- a/messages/ko-KR.json +++ b/messages/ko-KR.json @@ -2119,7 +2119,7 @@ "selectDomainForOrgAuthPage": "조직 인증 페이지에 대한 도메인을 선택하세요.", "domainPickerProvidedDomain": "제공된 도메인", "domainPickerFreeProvidedDomain": "무료 제공된 도메인", - "domainPickerFreeDomainsPaidFeature": "제공된 도메인은 유료 기능입니다. 요금제에 도메인이 포함되도록 구독하세요. — 별도로 도메인을 준비할 필요 없습니다.", + "domainPickerFreeDomainsPaidFeature": "제공된 도메인은 유료 기능입니다. 요금제에 도메인이 포함되도록 구독하세요. - 별도로 도메인을 준비할 필요 없습니다.", "domainPickerVerified": "검증됨", "domainPickerUnverified": "검증되지 않음", "domainPickerManual": "수동", @@ -2297,7 +2297,7 @@ "alerts": { "commercialUseDisclosure": { "title": "사용 공개", - "description": "당신의 의도된 사용에 정확히 맞는 라이선스 등급을 선택하세요. 개인 라이선스는 연간 총 수익 100,000 USD 이하의 개인, 비상업적 또는 소규모 상업 활동을 위한 소프트웨어의 무료 사용을 허용합니다. 이러한 제한을 넘는 모든 사용 — 비즈니스, 조직 또는 기타 수익 창출 환경 내에서의 사용 — 은 유효한 엔터프라이즈 라이선스 및 해당 라이선스 수수료의 지불이 필요합니다. 개인 또는 기업 사용자는 모두 Fossorial 상용 라이선스 조건을 준수해야 합니다." + "description": "당신의 의도된 사용에 정확히 맞는 라이선스 등급을 선택하세요. 개인 라이선스는 연간 총 수익 100,000 USD 이하의 개인, 비상업적 또는 소규모 상업 활동을 위한 소프트웨어의 무료 사용을 허용합니다. 이러한 제한을 넘는 모든 사용 - 비즈니스, 조직 또는 기타 수익 창출 환경 내에서의 사용 - 은 유효한 엔터프라이즈 라이선스 및 해당 라이선스 수수료의 지불이 필요합니다. 개인 또는 기업 사용자는 모두 Fossorial 상용 라이선스 조건을 준수해야 합니다." }, "trialPeriodInformation": { "title": "시험 기간 정보", @@ -2882,7 +2882,7 @@ "httpDestFormatJsonArrayTitle": "JSON 배열", "httpDestFormatJsonArrayDescription": "각 배치마다 요청 하나씩, 본문은 JSON 배열입니다. 대부분의 일반 웹훅 및 Datadog과 호환됩니다.", "httpDestFormatNdjsonTitle": "NDJSON", - "httpDestFormatNdjsonDescription": "각 배치마다 요청 하나씩, 본문은 줄 구분 JSON — 한 라인에 하나의 객체가 있으며 외부 배열이 없습니다. Splunk HEC, Elastic / OpenSearch, Grafana Loki에 필요합니다.", + "httpDestFormatNdjsonDescription": "각 배치마다 요청 하나씩, 본문은 줄 구분 JSON - 한 라인에 하나의 객체가 있으며 외부 배열이 없습니다. Splunk HEC, Elastic / OpenSearch, Grafana Loki에 필요합니다.", "httpDestFormatSingleTitle": "각 요청 당 하나의 이벤트", "httpDestFormatSingleDescription": "각 개별 이벤트에 대해 별도의 HTTP POST를 전송합니다. 배치를 처리할 수 없는 엔드포인트에만 사용하세요.", "httpDestLogTypesTitle": "로그 유형", diff --git a/messages/nb-NO.json b/messages/nb-NO.json index ab9334ee3..913d7ca94 100644 --- a/messages/nb-NO.json +++ b/messages/nb-NO.json @@ -2882,7 +2882,7 @@ "httpDestFormatJsonArrayTitle": "JSON liste", "httpDestFormatJsonArrayDescription": "Én forespørsel per batch, innholdet er en JSON-liste. Kompatibel med de mest generiske webhooks og Datadog.", "httpDestFormatNdjsonTitle": "NDJSON", - "httpDestFormatNdjsonDescription": "Én forespørsel per sats, innholdet er nytt avgrenset JSON — et objekt per linje, ingen ytterarray. Kreves av Splunk HEC, Elastisk/OpenSearch, og Grafana Loki.", + "httpDestFormatNdjsonDescription": "Én forespørsel per sats, innholdet er nytt avgrenset JSON - et objekt per linje, ingen ytterarray. Kreves av Splunk HEC, Elastisk/OpenSearch, og Grafana Loki.", "httpDestFormatSingleTitle": "En hendelse per forespørsel", "httpDestFormatSingleDescription": "Sender en separat HTTP POST for hver enkelt hendelse. Bruk bare for endepunkter som ikke kan håndtere batcher.", "httpDestLogTypesTitle": "Logg typer", diff --git a/messages/nl-NL.json b/messages/nl-NL.json index 403113ff3..f3803d445 100644 --- a/messages/nl-NL.json +++ b/messages/nl-NL.json @@ -2119,7 +2119,7 @@ "selectDomainForOrgAuthPage": "Selecteer een domein voor de authenticatiepagina van de organisatie", "domainPickerProvidedDomain": "Opgegeven domein", "domainPickerFreeProvidedDomain": "Gratis verstrekt domein", - "domainPickerFreeDomainsPaidFeature": "Geleverde domeinen zijn een betaalde functie. Abonneer je om een domein bij je plan te krijgen — je hoeft er zelf geen mee te brengen.", + "domainPickerFreeDomainsPaidFeature": "Geleverde domeinen zijn een betaalde functie. Abonneer je om een domein bij je plan te krijgen - je hoeft er zelf geen mee te brengen.", "domainPickerVerified": "Geverifieerd", "domainPickerUnverified": "Ongeverifieerd", "domainPickerManual": "Handleiding", diff --git a/messages/pl-PL.json b/messages/pl-PL.json index d6a454120..2e55ad2a8 100644 --- a/messages/pl-PL.json +++ b/messages/pl-PL.json @@ -1994,7 +1994,7 @@ "description": "Większa niezawodność i niska konserwacja serwera Pangolin z dodatkowymi dzwonkami i sygnałami", "introTitle": "Zarządzany samowystarczalny Pangolin", "introDescription": "jest opcją wdrażania zaprojektowaną dla osób, które chcą prostoty i dodatkowej niezawodności, przy jednoczesnym utrzymaniu swoich danych prywatnych i samodzielnych.", - "introDetail": "Z tą opcją nadal obsługujesz swój własny węzeł Pangolin — tunele, zakończenie SSL i ruch na Twoim serwerze. Różnica polega na tym, że zarządzanie i monitorowanie odbywa się za pomocą naszej tablicy rozdzielczej, która odblokowuje szereg korzyści:", + "introDetail": "Z tą opcją nadal obsługujesz swój własny węzeł Pangolin - tunele, zakończenie SSL i ruch na Twoim serwerze. Różnica polega na tym, że zarządzanie i monitorowanie odbywa się za pomocą naszej tablicy rozdzielczej, która odblokowuje szereg korzyści:", "benefitSimplerOperations": { "title": "Uproszczone operacje", "description": "Nie ma potrzeby uruchamiania własnego serwera pocztowego lub ustawiania skomplikowanych powiadomień. Będziesz mieć kontrolę zdrowia i powiadomienia o przestoju." @@ -2119,7 +2119,7 @@ "selectDomainForOrgAuthPage": "Wybierz domenę dla strony uwierzytelniania organizacji", "domainPickerProvidedDomain": "Dostarczona domena", "domainPickerFreeProvidedDomain": "Darmowa oferowana domena", - "domainPickerFreeDomainsPaidFeature": "Dostarczane domeny to funkcja płatna. Subskrybuj, aby uzyskać domenę w ramach swojego planu — nie ma potrzeby przynoszenia własnej.", + "domainPickerFreeDomainsPaidFeature": "Dostarczane domeny to funkcja płatna. Subskrybuj, aby uzyskać domenę w ramach swojego planu - nie ma potrzeby przynoszenia własnej.", "domainPickerVerified": "Zweryfikowano", "domainPickerUnverified": "Niezweryfikowane", "domainPickerManual": "Podręcznik", @@ -2882,7 +2882,7 @@ "httpDestFormatJsonArrayTitle": "Tablica JSON", "httpDestFormatJsonArrayDescription": "Jedna prośba na partię, treść jest tablicą JSON. Kompatybilna z najbardziej ogólnymi webhookami i Datadog.", "httpDestFormatNdjsonTitle": "NDJSON", - "httpDestFormatNdjsonDescription": "Jedno żądanie na partię, ciałem jest plik JSON rozdzielony na newline-delimited — jeden obiekt na wiersz, bez tablicy zewnętrznej. Wymagane przez Splunk HEC, Elastic / OpenSesearch i Grafana Loki.", + "httpDestFormatNdjsonDescription": "Jedno żądanie na partię, ciałem jest plik JSON rozdzielony na newline-delimited - jeden obiekt na wiersz, bez tablicy zewnętrznej. Wymagane przez Splunk HEC, Elastic / OpenSesearch i Grafana Loki.", "httpDestFormatSingleTitle": "Jedno wydarzenie na żądanie", "httpDestFormatSingleDescription": "Wysyła oddzielny POST HTTP dla każdego zdarzenia. Użyj tylko dla punktów końcowych, które nie mogą obsługiwać partii.", "httpDestLogTypesTitle": "Typy logów", diff --git a/messages/pt-PT.json b/messages/pt-PT.json index 86ee54d61..2fa228639 100644 --- a/messages/pt-PT.json +++ b/messages/pt-PT.json @@ -1994,7 +1994,7 @@ "description": "Servidor Pangolin auto-hospedado mais confiável e com baixa manutenção com sinos extras e assobiamentos", "introTitle": "Pangolin Auto-Hospedado Gerenciado", "introDescription": "é uma opção de implantação projetada para pessoas que querem simplicidade e confiança adicional, mantendo os seus dados privados e auto-hospedados.", - "introDetail": "Com esta opção, você ainda roda seu próprio nó Pangolin — seus túneis, terminação SSL e tráfego todos permanecem no seu servidor. A diferença é que a gestão e a monitorização são geridos através do nosso painel de nuvem, que desbloqueia vários benefícios:", + "introDetail": "Com esta opção, você ainda roda seu próprio nó Pangolin - seus túneis, terminação SSL e tráfego todos permanecem no seu servidor. A diferença é que a gestão e a monitorização são geridos através do nosso painel de nuvem, que desbloqueia vários benefícios:", "benefitSimplerOperations": { "title": "Operações simples", "description": "Não é necessário executar o seu próprio servidor de e-mail ou configurar um alerta complexo. Você receberá fora de caixa verificações de saúde e alertas de tempo de inatividade." @@ -2119,7 +2119,7 @@ "selectDomainForOrgAuthPage": "Selecione um domínio para a página de autenticação da organização", "domainPickerProvidedDomain": "Domínio fornecido", "domainPickerFreeProvidedDomain": "Domínio fornecido grátis", - "domainPickerFreeDomainsPaidFeature": "Os domínios fornecidos são um recurso pago. Assine para obter um domínio incluído no seu plano — não há necessidade de trazer o seu próprio.", + "domainPickerFreeDomainsPaidFeature": "Os domínios fornecidos são um recurso pago. Assine para obter um domínio incluído no seu plano - não há necessidade de trazer o seu próprio.", "domainPickerVerified": "Verificada", "domainPickerUnverified": "Não verificado", "domainPickerManual": "Manual", @@ -2297,7 +2297,7 @@ "alerts": { "commercialUseDisclosure": { "title": "Divulgação de uso", - "description": "Selecione o nível de licença que reflete corretamente seu uso pretendido. A Licença Pessoal permite o uso livre do Software para atividades comerciais individuais, não comerciais ou em pequena escala com rendimento bruto anual inferior a 100.000 USD. Qualquer uso além destes limites — incluindo uso dentro de um negócio, organização, ou outro ambiente gerador de receitas — requer uma Licença Enterprise válida e o pagamento da taxa aplicável de licenciamento. Todos os usuários, pessoais ou empresariais, devem cumprir os Termos da Licença Comercial Fossorial." + "description": "Selecione o nível de licença que reflete corretamente seu uso pretendido. A Licença Pessoal permite o uso livre do Software para atividades comerciais individuais, não comerciais ou em pequena escala com rendimento bruto anual inferior a 100.000 USD. Qualquer uso além destes limites - incluindo uso dentro de um negócio, organização, ou outro ambiente gerador de receitas - requer uma Licença Enterprise válida e o pagamento da taxa aplicável de licenciamento. Todos os usuários, pessoais ou empresariais, devem cumprir os Termos da Licença Comercial Fossorial." }, "trialPeriodInformation": { "title": "Informações do Período de Avaliação", @@ -2882,7 +2882,7 @@ "httpDestFormatJsonArrayTitle": "Matriz JSON", "httpDestFormatJsonArrayDescription": "Um pedido por lote, o corpo é um array JSON. Compatível com a maioria dos webhooks genéricos e Datadog.", "httpDestFormatNdjsonTitle": "NDJSON", - "httpDestFormatNdjsonDescription": "Um pedido por lote, o corpo é um JSON delimitado por nova-linha — um objeto por linha, sem array exterior. Requerido pelo Splunk HEC, Elástico / OpenSearch, e Grafana Loki.", + "httpDestFormatNdjsonDescription": "Um pedido por lote, o corpo é um JSON delimitado por nova-linha - um objeto por linha, sem array exterior. Requerido pelo Splunk HEC, Elástico / OpenSearch, e Grafana Loki.", "httpDestFormatSingleTitle": "Um Evento por Requisição", "httpDestFormatSingleDescription": "Envia um POST HTTP separado para cada evento. Utilize apenas para endpoints que não podem manipular lotes.", "httpDestLogTypesTitle": "Tipos de log", diff --git a/messages/ru-RU.json b/messages/ru-RU.json index 30596846a..871b292d9 100644 --- a/messages/ru-RU.json +++ b/messages/ru-RU.json @@ -56,7 +56,7 @@ "siteManageSites": "Управление сайтами", "siteDescription": "Создание и управление сайтами, чтобы включить подключение к приватным сетям", "sitesBannerTitle": "Подключить любую сеть", - "sitesBannerDescription": "Сайт — это соединение с удаленной сетью, которое позволяет Pangolin предоставлять доступ к ресурсам, будь они общедоступными или частными, пользователям в любом месте. Установите сетевой коннектор сайта (Newt) там, где можно запустить исполняемый файл или контейнер, чтобы установить соединение.", + "sitesBannerDescription": "Сайт - это соединение с удаленной сетью, которое позволяет Pangolin предоставлять доступ к ресурсам, будь они общедоступными или частными, пользователям в любом месте. Установите сетевой коннектор сайта (Newt) там, где можно запустить исполняемый файл или контейнер, чтобы установить соединение.", "sitesBannerButtonText": "Установить сайт", "approvalsBannerTitle": "Одобрить или запретить доступ к устройству", "approvalsBannerDescription": "Просмотрите и подтвердите или отклоните запросы на доступ к устройству от пользователей. Когда требуется подтверждение устройства, пользователи должны получить одобрение администратора, прежде чем их устройства смогут подключиться к ресурсам вашей организации.", @@ -163,7 +163,7 @@ "proxyResourceTitle": "Управление публичными ресурсами", "proxyResourceDescription": "Создание и управление ресурсами, которые доступны через веб-браузер", "proxyResourcesBannerTitle": "Общедоступный доступ через веб", - "proxyResourcesBannerDescription": "Общедоступные ресурсы — это прокси-по HTTPS или TCP/UDP, доступные любому пользователю в Интернете через веб-браузер. В отличие от частных ресурсов, они не требуют программного обеспечения на стороне клиента и могут включать политики доступа на основе идентификации и контекста.", + "proxyResourcesBannerDescription": "Общедоступные ресурсы - это прокси-по HTTPS или TCP/UDP, доступные любому пользователю в Интернете через веб-браузер. В отличие от частных ресурсов, они не требуют программного обеспечения на стороне клиента и могут включать политики доступа на основе идентификации и контекста.", "clientResourceTitle": "Управление приватными ресурсами", "clientResourceDescription": "Создание и управление ресурсами, которые доступны только через подключенный клиент", "privateResourcesBannerTitle": "Частный доступ с нулевым доверием", @@ -371,7 +371,7 @@ "provisioningKeysUpdated": "Ключ подготовки обновлен", "provisioningKeysUpdatedDescription": "Ваши изменения были сохранены.", "provisioningKeysBannerTitle": "Ключи подготовки сайта", - "provisioningKeysBannerDescription": "Создайте ключ настройки и используйте его с соединителем Newt для автоматического создания сайтов при первом запуске — нет необходимости настраивать отдельные учетные данные для каждого сайта.", + "provisioningKeysBannerDescription": "Создайте ключ настройки и используйте его с соединителем Newt для автоматического создания сайтов при первом запуске - нет необходимости настраивать отдельные учетные данные для каждого сайта.", "provisioningKeysBannerButtonText": "Узнать больше", "pendingSitesBannerTitle": "Ожидающие сайты", "pendingSitesBannerDescription": "Сайты, подключающиеся с помощью ключа настройки, отображаются здесь для проверки.", @@ -1994,7 +1994,7 @@ "description": "Более надежный и низко обслуживаемый сервер Pangolin с дополнительными колокольнями и свистками", "introTitle": "Управляемый Само-Хост Панголина", "introDescription": "- это вариант развертывания, предназначенный для людей, которые хотят простоты и надёжности, сохраняя при этом свои данные конфиденциальными и самостоятельными.", - "introDetail": "С помощью этой опции вы по-прежнему используете узел Pangolin — туннели, SSL, и весь остающийся на вашем сервере. Разница заключается в том, что управление и мониторинг осуществляются через нашу панель инструментов из облака, которая открывает ряд преимуществ:", + "introDetail": "С помощью этой опции вы по-прежнему используете узел Pangolin - туннели, SSL, и весь остающийся на вашем сервере. Разница заключается в том, что управление и мониторинг осуществляются через нашу панель инструментов из облака, которая открывает ряд преимуществ:", "benefitSimplerOperations": { "title": "Более простые операции", "description": "Не нужно запускать свой собственный почтовый сервер или настроить комплексное оповещение. Вы будете получать проверки состояния здоровья и оповещения о неисправностях из коробки." @@ -2119,7 +2119,7 @@ "selectDomainForOrgAuthPage": "Выберите домен для страницы аутентификации организации", "domainPickerProvidedDomain": "Домен предоставлен", "domainPickerFreeProvidedDomain": "Бесплатный домен", - "domainPickerFreeDomainsPaidFeature": "Предоставленные домены являются платной функцией. Подпишитесь, чтобы получить домен, включенный в ваш план — не нужно приносить свой собственный.", + "domainPickerFreeDomainsPaidFeature": "Предоставленные домены являются платной функцией. Подпишитесь, чтобы получить домен, включенный в ваш план - не нужно приносить свой собственный.", "domainPickerVerified": "Подтверждено", "domainPickerUnverified": "Не подтверждено", "domainPickerManual": "Ручной", @@ -2297,7 +2297,7 @@ "alerts": { "commercialUseDisclosure": { "title": "Раскрытие", - "description": "Выберите уровень лицензии, который точно отражает ваше предполагаемое использование. Личная Лицензия разрешает свободное использование Программного Обеспечения для частной, некоммерческой или малой коммерческой деятельности с годовым валовым доходом до $100 000 USD. Любое использование сверх этих пределов — включая использование в бизнесе, организацию, или другой приносящей доход среде — требует действительной лицензии предприятия и уплаты соответствующей лицензионной платы. Все пользователи, будь то Личные или Предприятия, обязаны соблюдать условия коммерческой лицензии Fossoral." + "description": "Выберите уровень лицензии, который точно отражает ваше предполагаемое использование. Личная Лицензия разрешает свободное использование Программного Обеспечения для частной, некоммерческой или малой коммерческой деятельности с годовым валовым доходом до $100 000 USD. Любое использование сверх этих пределов - включая использование в бизнесе, организацию, или другой приносящей доход среде - требует действительной лицензии предприятия и уплаты соответствующей лицензионной платы. Все пользователи, будь то Личные или Предприятия, обязаны соблюдать условия коммерческой лицензии Fossoral." }, "trialPeriodInformation": { "title": "Информация о пробном периоде", diff --git a/messages/tr-TR.json b/messages/tr-TR.json index 3055d6597..754b529ac 100644 --- a/messages/tr-TR.json +++ b/messages/tr-TR.json @@ -1994,7 +1994,7 @@ "description": "Daha güvenilir ve düşük bakım gerektiren, ekstra özelliklere sahip kendi kendine barındırabileceğiniz Pangolin sunucusu", "introTitle": "Yönetilen Kendi Kendine Barındırılan Pangolin", "introDescription": "Bu, basitlik ve ekstra güvenilirlik arayan, ancak verilerini gizli tutmak ve kendi sunucularında barındırmak isteyen kişiler için tasarlanmış bir dağıtım seçeneğidir.", - "introDetail": "Bu seçenekle, kendi Pangolin düğümünüzü çalıştırmaya devam edersiniz — tünelleriniz, SSL bitişiniz ve trafiğiniz tamamen sunucunuzda kalır. Fark, yönetim ve izlemeyi bulut panomuz üzerinden gerçekleştiririz, bu da bir dizi avantaj sağlar:", + "introDetail": "Bu seçenekle, kendi Pangolin düğümünüzü çalıştırmaya devam edersiniz - tünelleriniz, SSL bitişiniz ve trafiğiniz tamamen sunucunuzda kalır. Fark, yönetim ve izlemeyi bulut panomuz üzerinden gerçekleştiririz, bu da bir dizi avantaj sağlar:", "benefitSimplerOperations": { "title": "Daha basit işlemler", "description": "Kendi e-posta sunucunuzu çalıştırmanıza veya karmaşık uyarılar kurmanıza gerek yok. Sağlık kontrolleri ve kesinti uyarılarını kutudan çıktığı gibi alırsınız." @@ -2297,7 +2297,7 @@ "alerts": { "commercialUseDisclosure": { "title": "Kullanım Açıklaması", - "description": "Kullanım amacınızı doğru bir şekilde yansıtan lisans seviyesini seçin. Kişisel Lisans, yazılımın bireysel, ticari olmayan veya yıllık geliri 100,000 ABD Dolarının altında olan küçük ölçekli ticari faaliyetlerde ücretsiz kullanılmasına izin verir. Bu sınırların ötesinde kullanım — bir işletme, organizasyon veya diğer gelir getirici ortamlarda kullanım dahil olmak üzere — geçerli bir Kurumsal Lisans ve ilgili lisans ücretinin ödenmesini gerektirir. Tüm kullanıcılar, ister Kişisel ister Kurumsal, Fossorial Ticari Lisans Şartlarına uymalıdır." + "description": "Kullanım amacınızı doğru bir şekilde yansıtan lisans seviyesini seçin. Kişisel Lisans, yazılımın bireysel, ticari olmayan veya yıllık geliri 100,000 ABD Dolarının altında olan küçük ölçekli ticari faaliyetlerde ücretsiz kullanılmasına izin verir. Bu sınırların ötesinde kullanım - bir işletme, organizasyon veya diğer gelir getirici ortamlarda kullanım dahil olmak üzere - geçerli bir Kurumsal Lisans ve ilgili lisans ücretinin ödenmesini gerektirir. Tüm kullanıcılar, ister Kişisel ister Kurumsal, Fossorial Ticari Lisans Şartlarına uymalıdır." }, "trialPeriodInformation": { "title": "Deneme Süresi Bilgileri", diff --git a/messages/zh-CN.json b/messages/zh-CN.json index 5b446a0f3..038d4cb01 100644 --- a/messages/zh-CN.json +++ b/messages/zh-CN.json @@ -1994,7 +1994,7 @@ "description": "更可靠和低维护自我托管的 Pangolin 服务器,带有额外的铃声和告密器", "introTitle": "托管自托管的潘戈林公司", "introDescription": "这是一种部署选择,为那些希望简洁和额外可靠的人设计,同时仍然保持他们的数据的私密性和自我托管性。", - "introDetail": "通过此选项,您仍然运行您自己的 Pangolin 节点 — — 您的隧道、SSL 终止,并且流量在您的服务器上保持所有状态。 不同之处在于,管理和监测是通过我们的云层仪表板进行的,该仪表板开启了一些好处:", + "introDetail": "通过此选项,您仍然运行您自己的 Pangolin 节点 - - 您的隧道、SSL 终止,并且流量在您的服务器上保持所有状态。 不同之处在于,管理和监测是通过我们的云层仪表板进行的,该仪表板开启了一些好处:", "benefitSimplerOperations": { "title": "简单的操作", "description": "无需运行您自己的邮件服务器或设置复杂的警报。您将从方框中获得健康检查和下限提醒。" @@ -2119,7 +2119,7 @@ "selectDomainForOrgAuthPage": "选择组织认证页面的域", "domainPickerProvidedDomain": "提供的域", "domainPickerFreeProvidedDomain": "免费提供的域", - "domainPickerFreeDomainsPaidFeature": "提供的域名是付费功能。订阅即可将域名包含在您的计划中—无需自带域名。", + "domainPickerFreeDomainsPaidFeature": "提供的域名是付费功能。订阅即可将域名包含在您的计划中-无需自带域名。", "domainPickerVerified": "已验证", "domainPickerUnverified": "未验证", "domainPickerManual": "手动", @@ -2297,7 +2297,7 @@ "alerts": { "commercialUseDisclosure": { "title": "使用情况披露", - "description": "选择能准确反映您预定用途的许可等级。 个人许可证允许对个人、非商业性或小型商业活动免费使用软件,年收入毛额不到100 000美元。 超出这些限度的任何用途,包括在企业、组织内的用途。 或其他创收环境——需要有效的企业许可证和支付适用的许可证费用。 所有用户,不论是个人还是企业,都必须遵守寄养商业许可证条款。" + "description": "选择能准确反映您预定用途的许可等级。 个人许可证允许对个人、非商业性或小型商业活动免费使用软件,年收入毛额不到100 000美元。 超出这些限度的任何用途,包括在企业、组织内的用途。 或其他创收环境--需要有效的企业许可证和支付适用的许可证费用。 所有用户,不论是个人还是企业,都必须遵守寄养商业许可证条款。" }, "trialPeriodInformation": { "title": "试用期信息", @@ -2882,7 +2882,7 @@ "httpDestFormatJsonArrayTitle": "JSON 数组", "httpDestFormatJsonArrayDescription": "每批一个请求,实体是一个 JSON 数组。与大多数通用的 Web 钩子和数据兼容。", "httpDestFormatNdjsonTitle": "NDJSON", - "httpDestFormatNdjsonDescription": "每批有一个请求,物体是换行符限制的 JSON ——每行一个对象,不是外部数组。 Sluk HEC、Elastic / OpenSearch和Grafana Loki所需。", + "httpDestFormatNdjsonDescription": "每批有一个请求,物体是换行符限制的 JSON --每行一个对象,不是外部数组。 Sluk HEC、Elastic / OpenSearch和Grafana Loki所需。", "httpDestFormatSingleTitle": "每个请求一个事件", "httpDestFormatSingleDescription": "为每个事件单独发送一个 HTTP POST。仅用于无法处理批量的端点。", "httpDestLogTypesTitle": "日志类型", diff --git a/messages/zh-TW.json b/messages/zh-TW.json index cf7c25ced..1ef8061e2 100644 --- a/messages/zh-TW.json +++ b/messages/zh-TW.json @@ -1763,7 +1763,7 @@ "description": "更可靠、維護成本更低的自架 Pangolin 伺服器,並附帶額外的附加功能", "introTitle": "託管式自架 Pangolin", "introDescription": "這是一種部署選擇,為那些希望簡潔和額外可靠的人設計,同時仍然保持他們的數據的私密性和自我託管性。", - "introDetail": "通過此選項,您仍然運行您自己的 Pangolin 節點 — — 您的隧道、SSL 終止,並且流量在您的伺服器上保持所有狀態。 不同之處在於,管理和監測是通過我們的雲層儀錶板進行的,該儀錶板開啟了一些好處:", + "introDetail": "通過此選項,您仍然運行您自己的 Pangolin 節點 - - 您的隧道、SSL 終止,並且流量在您的伺服器上保持所有狀態。 不同之處在於,管理和監測是通過我們的雲層儀錶板進行的,該儀錶板開啟了一些好處:", "benefitSimplerOperations": { "title": "簡單的操作", "description": "無需運行您自己的郵件伺服器或設置複雜的警報。您將從方框中獲得健康檢查和下限提醒。" @@ -2035,7 +2035,7 @@ "alerts": { "commercialUseDisclosure": { "title": "使用情況披露", - "description": "選擇能準確反映您預定用途的許可等級。 個人許可證允許對個人、非商業性或小型商業活動免費使用軟體,年收入毛額不到 100,000 美元。 超出這些限度的任何用途,包括在企業、組織內的用途。 或其他創收環境——需要有效的企業許可證和支付適用的許可證費用。 所有用戶,不論是個人還是企業,都必須遵守寄養商業許可證條款。" + "description": "選擇能準確反映您預定用途的許可等級。 個人許可證允許對個人、非商業性或小型商業活動免費使用軟體,年收入毛額不到 100,000 美元。 超出這些限度的任何用途,包括在企業、組織內的用途。 或其他創收環境--需要有效的企業許可證和支付適用的許可證費用。 所有用戶,不論是個人還是企業,都必須遵守寄養商業許可證條款。" }, "trialPeriodInformation": { "title": "試用期資訊", diff --git a/package-lock.json b/package-lock.json index a6ce609ae..f5b422b89 100644 --- a/package-lock.json +++ b/package-lock.json @@ -44,6 +44,7 @@ "@tailwindcss/forms": "0.5.11", "@tanstack/react-query": "5.90.21", "@tanstack/react-table": "8.21.3", + "@xyflow/react": "^12.8.4", "arctic": "3.7.0", "axios": "1.13.5", "better-sqlite3": "11.9.1", @@ -89,13 +90,13 @@ "reodotdev": "1.1.0", "resend": "6.9.2", "semver": "7.7.4", - "sshpk": "^1.18.0", + "sshpk": "1.18.0", "stripe": "20.4.1", "swagger-ui-express": "5.0.1", "tailwind-merge": "3.5.0", "topojson-client": "3.1.0", "tw-animate-css": "1.4.0", - "use-debounce": "^10.1.0", + "use-debounce": "10.1.0", "uuid": "13.0.0", "vaul": "1.1.2", "visionscarto-world-atlas": "1.0.0", @@ -130,7 +131,7 @@ "@types/react": "19.2.14", "@types/react-dom": "19.2.3", "@types/semver": "7.7.1", - "@types/sshpk": "^1.17.4", + "@types/sshpk": "1.17.4", "@types/swagger-ui-express": "4.1.8", "@types/topojson-client": "3.1.5", "@types/ws": "8.18.1", @@ -1058,6 +1059,7 @@ "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.29.0", "@babel/generator": "^7.29.0", @@ -2353,6 +2355,7 @@ "cpu": [ "arm64" ], + "dev": true, "license": "Apache-2.0", "optional": true, "os": [ @@ -2375,6 +2378,7 @@ "cpu": [ "x64" ], + "dev": true, "license": "Apache-2.0", "optional": true, "os": [ @@ -2397,6 +2401,7 @@ "cpu": [ "arm64" ], + "dev": true, "license": "LGPL-3.0-or-later", "optional": true, "os": [ @@ -2413,6 +2418,7 @@ "cpu": [ "x64" ], + "dev": true, "license": "LGPL-3.0-or-later", "optional": true, "os": [ @@ -2429,6 +2435,7 @@ "cpu": [ "arm" ], + "dev": true, "license": "LGPL-3.0-or-later", "optional": true, "os": [ @@ -2445,6 +2452,7 @@ "cpu": [ "arm64" ], + "dev": true, "license": "LGPL-3.0-or-later", "optional": true, "os": [ @@ -2461,6 +2469,7 @@ "cpu": [ "ppc64" ], + "dev": true, "license": "LGPL-3.0-or-later", "optional": true, "os": [ @@ -2477,6 +2486,7 @@ "cpu": [ "s390x" ], + "dev": true, "license": "LGPL-3.0-or-later", "optional": true, "os": [ @@ -2493,6 +2503,7 @@ "cpu": [ "x64" ], + "dev": true, "license": "LGPL-3.0-or-later", "optional": true, "os": [ @@ -2509,6 +2520,7 @@ "cpu": [ "arm64" ], + "dev": true, "license": "LGPL-3.0-or-later", "optional": true, "os": [ @@ -2525,6 +2537,7 @@ "cpu": [ "x64" ], + "dev": true, "license": "LGPL-3.0-or-later", "optional": true, "os": [ @@ -2541,6 +2554,7 @@ "cpu": [ "arm" ], + "dev": true, "license": "Apache-2.0", "optional": true, "os": [ @@ -2563,6 +2577,7 @@ "cpu": [ "arm64" ], + "dev": true, "license": "Apache-2.0", "optional": true, "os": [ @@ -2585,6 +2600,7 @@ "cpu": [ "ppc64" ], + "dev": true, "license": "Apache-2.0", "optional": true, "os": [ @@ -2607,6 +2623,7 @@ "cpu": [ "s390x" ], + "dev": true, "license": "Apache-2.0", "optional": true, "os": [ @@ -2629,6 +2646,7 @@ "cpu": [ "x64" ], + "dev": true, "license": "Apache-2.0", "optional": true, "os": [ @@ -2651,6 +2669,7 @@ "cpu": [ "arm64" ], + "dev": true, "license": "Apache-2.0", "optional": true, "os": [ @@ -2673,6 +2692,7 @@ "cpu": [ "x64" ], + "dev": true, "license": "Apache-2.0", "optional": true, "os": [ @@ -2695,6 +2715,7 @@ "cpu": [ "wasm32" ], + "dev": true, "license": "Apache-2.0 AND LGPL-3.0-or-later AND MIT", "optional": true, "dependencies": { @@ -2714,6 +2735,7 @@ "cpu": [ "arm64" ], + "dev": true, "license": "Apache-2.0 AND LGPL-3.0-or-later", "optional": true, "os": [ @@ -2733,6 +2755,7 @@ "cpu": [ "ia32" ], + "dev": true, "license": "Apache-2.0 AND LGPL-3.0-or-later", "optional": true, "os": [ @@ -2752,6 +2775,7 @@ "cpu": [ "x64" ], + "dev": true, "license": "Apache-2.0 AND LGPL-3.0-or-later", "optional": true, "os": [ @@ -3011,6 +3035,7 @@ "integrity": "sha512-2I0gnIVPtfnMw9ee9h1dJG7tp81+8Ob3OJb3Mv37rx5L40/b0i7djjCVvGOVqc9AEIQyvyu1i6ypKdFw8R8gQw==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": "^14.21.3 || >=16" }, @@ -6957,6 +6982,7 @@ "resolved": "https://registry.npmjs.org/@react-email/text/-/text-0.1.6.tgz", "integrity": "sha512-TYqkioRS45wTR5il3dYk/SbUjjEdhSwh9BtRNB99qNH1pXAwA45H7rAuxehiu8iJQJH0IyIr+6n62gBz9ezmsw==", "license": "MIT", + "peer": true, "engines": { "node": ">=20.0.0" }, @@ -8417,6 +8443,7 @@ "version": "5.90.21", "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.90.21.tgz", "integrity": "sha512-0Lu6y5t+tvlTJMTO7oh5NSpJfpg/5D41LlThfepTixPYkJ0sE2Jj0m0f6yYqujBwIXlId87e234+MxG3D3g7kg==", + "peer": true, "dependencies": { "@tanstack/query-core": "5.90.20" }, @@ -8532,6 +8559,7 @@ "integrity": "sha512-NMv9ASNARoKksWtsq/SHakpYAYnhBrQgGD8zkLYk/jaK8jUGn08CfEdTRgYhMypUQAfzSP8W6gNLe0q19/t4VA==", "devOptional": true, "license": "MIT", + "peer": true, "dependencies": { "@types/node": "*" } @@ -8691,7 +8719,6 @@ "version": "3.0.7", "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz", "integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==", - "dev": true, "license": "MIT", "dependencies": { "@types/d3-selection": "*" @@ -8807,7 +8834,6 @@ "version": "3.0.11", "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.11.tgz", "integrity": "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==", - "dev": true, "license": "MIT" }, "node_modules/@types/d3-shape": { @@ -8842,7 +8868,6 @@ "version": "3.0.9", "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.9.tgz", "integrity": "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==", - "dev": true, "license": "MIT", "dependencies": { "@types/d3-selection": "*" @@ -8852,7 +8877,6 @@ "version": "3.0.8", "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz", "integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==", - "dev": true, "license": "MIT", "dependencies": { "@types/d3-interpolate": "*", @@ -8879,6 +8903,7 @@ "integrity": "sha512-sKYVuV7Sv9fbPIt/442koC7+IIwK5olP1KWeD88e/idgoJqDm3JV/YUiPwkoKK92ylff2MGxSz1CSjsXelx0YA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@types/body-parser": "*", "@types/express-serve-static-core": "^5.0.0", @@ -8974,6 +8999,7 @@ "integrity": "sha512-oX8xrhvpiyRCQkG1MFchB09f+cXftgIXb3a7UUa4Y3wpmZPw5tyZGTLWhlESOLq1Rq6oDlc8npVU2/9xiCuXMA==", "devOptional": true, "license": "MIT", + "peer": true, "dependencies": { "undici-types": "~7.18.0" } @@ -9001,6 +9027,7 @@ "integrity": "sha512-gT+oueVQkqnj6ajGJXblFR4iavIXWsGAFCk3dP4Kki5+a9R4NMt0JARdk6s8cUKcfUoqP5dAtDSLU8xYUTFV+Q==", "devOptional": true, "license": "MIT", + "peer": true, "dependencies": { "@types/node": "*", "pg-protocol": "*", @@ -9026,6 +9053,7 @@ "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz", "integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==", "devOptional": true, + "peer": true, "dependencies": { "csstype": "^3.2.2" } @@ -9036,6 +9064,7 @@ "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", "devOptional": true, "license": "MIT", + "peer": true, "peerDependencies": { "@types/react": "^19.2.0" } @@ -9122,8 +9151,7 @@ "resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz", "integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==", "license": "MIT", - "optional": true, - "peer": true + "optional": true }, "node_modules/@types/ws": { "version": "8.18.1", @@ -9197,6 +9225,7 @@ "integrity": "sha512-klQbnPAAiGYFyI02+znpBRLyjL4/BrBd0nyWkdC0s/6xFLkXYQ8OoRrSkqacS1ddVxf/LDyODIKbQ5TgKAf/Fg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@typescript-eslint/scope-manager": "8.56.1", "@typescript-eslint/types": "8.56.1", @@ -9651,6 +9680,38 @@ "win32" ] }, + "node_modules/@xyflow/react": { + "version": "12.8.4", + "resolved": "https://registry.npmjs.org/@xyflow/react/-/react-12.8.4.tgz", + "integrity": "sha512-bqUu4T5QSHiCFPkoH+b+LROKwQJdLvcjhGbNW9c1dLafCBRjmH1IYz0zPE+lRDXCtQ9kRyFxz3tG19+8VORJ1w==", + "license": "MIT", + "dependencies": { + "@xyflow/system": "0.0.68", + "classcat": "^5.0.3", + "zustand": "^4.4.0" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/@xyflow/system": { + "version": "0.0.68", + "resolved": "https://registry.npmjs.org/@xyflow/system/-/system-0.0.68.tgz", + "integrity": "sha512-QDG2wxIG4qX+uF8yzm1ULVZrcXX3MxPBoxv7O52FWsX87qIImOqifUhfa/TwsvLdzn7ic2DDBH1uI8TKbdNTYA==", + "license": "MIT", + "dependencies": { + "@types/d3-drag": "^3.0.7", + "@types/d3-interpolate": "^3.0.4", + "@types/d3-selection": "^3.0.10", + "@types/d3-transition": "^3.0.8", + "@types/d3-zoom": "^3.0.8", + "d3-drag": "^3.0.0", + "d3-interpolate": "^3.0.1", + "d3-selection": "^3.0.0", + "d3-zoom": "^3.0.0" + } + }, "node_modules/accepts": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/accepts/-/accepts-2.0.0.tgz", @@ -9670,6 +9731,7 @@ "integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==", "dev": true, "license": "MIT", + "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -10118,6 +10180,7 @@ "integrity": "sha512-Ixm8tFfoKKIPYdCCKYTsqv+Fd4IJ0DQqMyEimo+pxUOMUR9cVPlwTrFt9Avu+3cb6Zp3mAzl+t1MrG2fxxKsxw==", "devOptional": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/types": "^7.26.0" } @@ -10189,6 +10252,7 @@ "integrity": "sha512-Ba0KR+Fzxh2jDRhdg6TSH0SJGzb8C0aBY4hR8w8madIdIzzC6Y1+kx5qR6eS1Z+Gy20h6ZU28aeyg0z1VIrShQ==", "hasInstallScript": true, "license": "MIT", + "peer": true, "dependencies": { "bindings": "^1.5.0", "prebuild-install": "^7.1.1" @@ -10317,6 +10381,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "baseline-browser-mapping": "^2.9.0", "caniuse-lite": "^1.0.30001759", @@ -10499,6 +10564,12 @@ "url": "https://polar.sh/cva" } }, + "node_modules/classcat": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.5.tgz", + "integrity": "sha512-JhZUT7JFcQy/EzW605k/ktHtncoo9vnyW/2GspNYwFlN1C/WmjuV/xtS04e9SOkL2sTdw0VAZ2UGCcQ9lR6p6w==", + "license": "MIT" + }, "node_modules/cli-spinners": { "version": "2.9.2", "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz", @@ -11223,6 +11294,7 @@ "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", "license": "ISC", + "peer": true, "engines": { "node": ">=12" } @@ -11663,7 +11735,6 @@ "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.3.2.tgz", "integrity": "sha512-6obghkliLdmKa56xdbLOpUZ43pAR6xFy1uOrxBaIDjT+yaRuuybLjGS9eVBoSR/UPU5fq3OXClEHLJNGvbxKpQ==", "license": "(MPL-2.0 OR Apache-2.0)", - "peer": true, "engines": { "node": ">=20" }, @@ -12298,6 +12369,7 @@ "dev": true, "hasInstallScript": true, "license": "MIT", + "peer": true, "bin": { "esbuild": "bin/esbuild" }, @@ -12383,6 +12455,7 @@ "integrity": "sha512-COV33RzXZkqhG9P2rZCFl9ZmJ7WL+gQSCRzE7RhkbclbQPtLAWReL7ysA0Sh4c8Im2U9ynybdR56PV0XcKvqaQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.8.0", "@eslint-community/regexpp": "^4.12.2", @@ -12519,6 +12592,7 @@ "integrity": "sha512-whOE1HFo/qJDyX4SnXzP4N6zOWn79WhnCUY/iDR0mPfQZO8wcYE4JClzI2oZrhBnnMUCBCHZhO6VQyoBU95mZA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@rtsao/scc": "^1.1.0", "array-includes": "^3.1.9", @@ -12912,6 +12986,7 @@ "resolved": "https://registry.npmjs.org/express/-/express-5.2.1.tgz", "integrity": "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==", "license": "MIT", + "peer": true, "dependencies": { "accepts": "^2.0.0", "body-parser": "^2.2.1", @@ -15329,7 +15404,6 @@ "resolved": "https://registry.npmjs.org/monaco-editor/-/monaco-editor-0.55.1.tgz", "integrity": "sha512-jz4x+TJNFHwHtwuV9vA9rMujcZRb0CEilTEwG2rRSpe/A7Jdkuj8xPKttCgOh+v/lkHy7HsZ64oj+q3xoAFl9A==", "license": "MIT", - "peer": true, "dependencies": { "dompurify": "3.2.7", "marked": "14.0.0" @@ -15340,7 +15414,6 @@ "resolved": "https://registry.npmjs.org/marked/-/marked-14.0.0.tgz", "integrity": "sha512-uIj4+faQ+MgHgwUW1l2PsPglZLOLOT1uErt06dAPtx2kjteLAkbsd/0FiYg/MGS+i7ZKLb7w2WClxHkzOOuryQ==", "license": "MIT", - "peer": true, "bin": { "marked": "bin/marked.js" }, @@ -15429,6 +15502,7 @@ "resolved": "https://registry.npmjs.org/next/-/next-15.5.14.tgz", "integrity": "sha512-M6S+4JyRjmKic2Ssm7jHUPkE6YUJ6lv4507jprsSZLulubz0ihO2E+S4zmQK3JZ2ov81JrugukKU4Tz0ivgqqQ==", "license": "MIT", + "peer": true, "dependencies": { "@next/env": "15.5.14", "@swc/helpers": "0.5.15", @@ -16388,6 +16462,7 @@ "resolved": "https://registry.npmjs.org/pg/-/pg-8.20.0.tgz", "integrity": "sha512-ldhMxz2r8fl/6QkXnBD3CR9/xg694oT6DZQ2s6c/RI28OjtSOpxnPrUCGOBJ46RCUxcWdx3p6kw/xnDHjKvaRA==", "license": "MIT", + "peer": true, "dependencies": { "pg-connection-string": "^2.12.0", "pg-pool": "^3.13.0", @@ -16892,6 +16967,7 @@ "resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz", "integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==", "license": "MIT", + "peer": true, "engines": { "node": ">=0.10.0" } @@ -16923,6 +16999,7 @@ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz", "integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==", "license": "MIT", + "peer": true, "dependencies": { "scheduler": "^0.27.0" }, @@ -17215,6 +17292,7 @@ "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.71.2.tgz", "integrity": "sha512-1CHvcDYzuRUNOflt4MOq3ZM46AronNJtQ1S7tnX6YN4y72qhgiUItpacZUAQ0TyWYci3yz1X+rXaSxiuEm86PA==", "license": "MIT", + "peer": true, "engines": { "node": ">=18.0.0" }, @@ -18676,7 +18754,8 @@ "version": "4.2.2", "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.2.2.tgz", "integrity": "sha512-KWBIxs1Xb6NoLdMVqhbhgwZf2PGBpPEiwOqgI4pFIYbNTfBXiKYyWoTsXgBQ9WFg/OlhnvHaY+AEpW7wSmFo2Q==", - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/tapable": { "version": "2.3.2", @@ -19151,6 +19230,7 @@ "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", "devOptional": true, "license": "Apache-2.0", + "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -19578,6 +19658,7 @@ "resolved": "https://registry.npmjs.org/winston/-/winston-3.19.0.tgz", "integrity": "sha512-LZNJgPzfKR+/J3cHkxcpHKpKKvGfDZVPS4hfJCc4cCG0CgYzvlD6yE/S3CIL/Yt91ak327YCpiF/0MyeZHEHKA==", "license": "MIT", + "peer": true, "dependencies": { "@colors/colors": "^1.6.0", "@dabh/diagnostics": "^2.0.8", @@ -19784,6 +19865,7 @@ "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz", "integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==", "license": "MIT", + "peer": true, "funding": { "url": "https://github.com/sponsors/colinhacks" } @@ -19799,6 +19881,34 @@ "peerDependencies": { "zod": "^3.25.0 || ^4.0.0" } + }, + "node_modules/zustand": { + "version": "4.5.7", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-4.5.7.tgz", + "integrity": "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==", + "license": "MIT", + "dependencies": { + "use-sync-external-store": "^1.2.2" + }, + "engines": { + "node": ">=12.7.0" + }, + "peerDependencies": { + "@types/react": ">=16.8", + "immer": ">=9.0.6", + "react": ">=16.8" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + } + } } } } diff --git a/public/third-party/incidentio.png b/public/third-party/incidentio.png new file mode 100644 index 000000000..e567d31fb Binary files /dev/null and b/public/third-party/incidentio.png differ diff --git a/public/third-party/opsgenie.png b/public/third-party/opsgenie.png new file mode 100644 index 000000000..3a1f5a849 Binary files /dev/null and b/public/third-party/opsgenie.png differ diff --git a/public/third-party/pgd.png b/public/third-party/pgd.png new file mode 100644 index 000000000..b084406a0 Binary files /dev/null and b/public/third-party/pgd.png differ diff --git a/public/third-party/servicenow.png b/public/third-party/servicenow.png new file mode 100644 index 000000000..b3fcca4dc Binary files /dev/null and b/public/third-party/servicenow.png differ diff --git a/server/auth/actions.ts b/server/auth/actions.ts index 213dab9d3..51804fd64 100644 --- a/server/auth/actions.ts +++ b/server/auth/actions.ts @@ -144,7 +144,19 @@ export enum ActionsEnum { createEventStreamingDestination = "createEventStreamingDestination", updateEventStreamingDestination = "updateEventStreamingDestination", deleteEventStreamingDestination = "deleteEventStreamingDestination", - listEventStreamingDestinations = "listEventStreamingDestinations" + listEventStreamingDestinations = "listEventStreamingDestinations", + createAlertRule = "createAlertRule", + updateAlertRule = "updateAlertRule", + deleteAlertRule = "deleteAlertRule", + listAlertRules = "listAlertRules", + getAlertRule = "getAlertRule", + createHealthCheck = "createHealthCheck", + updateHealthCheck = "updateHealthCheck", + deleteHealthCheck = "deleteHealthCheck", + listHealthChecks = "listHealthChecks", + triggerSiteAlert = "triggerSiteAlert", + triggerResourceAlert = "triggerResourceAlert", + triggerHealthCheckAlert = "triggerHealthCheckAlert" } export async function checkUserActionPermission( diff --git a/server/db/pg/schema/privateSchema.ts b/server/db/pg/schema/privateSchema.ts index 4122fb5b5..1f8085c35 100644 --- a/server/db/pg/schema/privateSchema.ts +++ b/server/db/pg/schema/privateSchema.ts @@ -16,11 +16,14 @@ import { domains, orgs, targets, + roles, users, exitNodes, sessions, clients, + resources, siteResources, + targetHealthCheck, sites } from "./schema"; @@ -425,7 +428,9 @@ export const eventStreamingDestinations = pgTable( orgId: varchar("orgId", { length: 255 }) .notNull() .references(() => orgs.orgId, { onDelete: "cascade" }), - sendConnectionLogs: boolean("sendConnectionLogs").notNull().default(false), + sendConnectionLogs: boolean("sendConnectionLogs") + .notNull() + .default(false), sendRequestLogs: boolean("sendRequestLogs").notNull().default(false), sendActionLogs: boolean("sendActionLogs").notNull().default(false), sendAccessLogs: boolean("sendAccessLogs").notNull().default(false), @@ -447,7 +452,9 @@ export const eventStreamingCursors = pgTable( onDelete: "cascade" }), logType: varchar("logType", { length: 50 }).notNull(), // "request" | "action" | "access" | "connection" - lastSentId: bigint("lastSentId", { mode: "number" }).notNull().default(0), + lastSentId: bigint("lastSentId", { mode: "number" }) + .notNull() + .default(0), lastSentAt: bigint("lastSentAt", { mode: "number" }) // epoch milliseconds, null if never sent }, (table) => [ @@ -458,6 +465,104 @@ export const eventStreamingCursors = pgTable( ] ); +export const alertRules = pgTable("alertRules", { + alertRuleId: serial("alertRuleId").primaryKey(), + orgId: varchar("orgId", { length: 255 }) + .notNull() + .references(() => orgs.orgId, { onDelete: "cascade" }), + name: varchar("name", { length: 255 }).notNull(), + // Single field encodes both source and trigger - no redundancy + eventType: varchar("eventType", { length: 100 }) + .$type< + | "site_online" + | "site_offline" + | "site_toggle" + | "health_check_healthy" + | "health_check_unhealthy" + | "health_check_toggle" + | "resource_healthy" + | "resource_unhealthy" + | "resource_toggle" + >() + .notNull(), + // Nullable depending on eventType + enabled: boolean("enabled").notNull().default(true), + cooldownSeconds: integer("cooldownSeconds").notNull().default(300), + allSites: boolean("allSites").notNull().default(false), + allHealthChecks: boolean("allHealthChecks").notNull().default(false), + allResources: boolean("allResources").notNull().default(false), + lastTriggeredAt: bigint("lastTriggeredAt", { mode: "number" }), // nullable + createdAt: bigint("createdAt", { mode: "number" }).notNull(), + updatedAt: bigint("updatedAt", { mode: "number" }).notNull() +}); + +export const alertSites = pgTable("alertSites", { + alertRuleId: integer("alertRuleId") + .notNull() + .references(() => alertRules.alertRuleId, { onDelete: "cascade" }), + siteId: integer("siteId") + .notNull() + .references(() => sites.siteId, { onDelete: "cascade" }) +}); + +export const alertHealthChecks = pgTable("alertHealthChecks", { + alertRuleId: integer("alertRuleId") + .notNull() + .references(() => alertRules.alertRuleId, { onDelete: "cascade" }), + healthCheckId: integer("healthCheckId") + .notNull() + .references(() => targetHealthCheck.targetHealthCheckId, { + onDelete: "cascade" + }) +}); + +export const alertResources = pgTable("alertResources", { + alertRuleId: integer("alertRuleId") + .notNull() + .references(() => alertRules.alertRuleId, { onDelete: "cascade" }), + resourceId: integer("resourceId") + .notNull() + .references(() => resources.resourceId, { onDelete: "cascade" }) +}); + +// Separating channels by type avoids the mixed-shape problem entirely +export const alertEmailActions = pgTable("alertEmailActions", { + emailActionId: serial("emailActionId").primaryKey(), + alertRuleId: integer("alertRuleId") + .notNull() + .references(() => alertRules.alertRuleId, { onDelete: "cascade" }), + enabled: boolean("enabled").notNull().default(true), + lastSentAt: bigint("lastSentAt", { mode: "number" }) // nullable +}); + +export const alertEmailRecipients = pgTable("alertEmailRecipients", { + recipientId: serial("recipientId").primaryKey(), + emailActionId: integer("emailActionId") + .notNull() + .references(() => alertEmailActions.emailActionId, { + onDelete: "cascade" + }), + // At least one of these should be set - enforced at app level + userId: varchar("userId").references(() => users.userId, { + onDelete: "cascade" + }), + roleId: integer("roleId").references(() => roles.roleId, { + onDelete: "cascade" + }), + email: varchar("email", { length: 255 }) // external emails not tied to a user +}); + +export const alertWebhookActions = pgTable("alertWebhookActions", { + webhookActionId: serial("webhookActionId").primaryKey(), + alertRuleId: integer("alertRuleId") + .notNull() + .references(() => alertRules.alertRuleId, { onDelete: "cascade" }), + webhookUrl: text("webhookUrl").notNull(), + config: text("config"), // encrypted JSON with auth config (authType, credentials) + enabled: boolean("enabled").notNull().default(true), + lastSentAt: bigint("lastSentAt", { mode: "number" }) // nullable +}); + export type Approval = InferSelectModel; export type Limit = InferSelectModel; export type Account = InferSelectModel; @@ -495,3 +600,4 @@ export type EventStreamingDestination = InferSelectModel< export type EventStreamingCursor = InferSelectModel< typeof eventStreamingCursors >; +export type AlertResources = InferSelectModel; diff --git a/server/db/pg/schema/schema.ts b/server/db/pg/schema/schema.ts index acc3bb17f..b61cfcf19 100644 --- a/server/db/pg/schema/schema.ts +++ b/server/db/pg/schema/schema.ts @@ -57,7 +57,9 @@ export const orgs = pgTable("orgs", { settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year .notNull() .default(0), - settingsLogRetentionDaysConnection: integer("settingsLogRetentionDaysConnection") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year + settingsLogRetentionDaysConnection: integer( + "settingsLogRetentionDaysConnection" + ) // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year .notNull() .default(0), sshCaPrivateKey: text("sshCaPrivateKey"), // Encrypted SSH CA private key (PEM format) @@ -101,7 +103,9 @@ export const sites = pgTable("sites", { lastHolePunch: bigint("lastHolePunch", { mode: "number" }), listenPort: integer("listenPort"), dockerSocketEnabled: boolean("dockerSocketEnabled").notNull().default(true), - status: varchar("status").$type<"pending" | "approved">().default("approved") + status: varchar("status") + .$type<"pending" | "approved">() + .default("approved") }); export const resources = pgTable("resources", { @@ -182,9 +186,18 @@ export const targets = pgTable("targets", { export const targetHealthCheck = pgTable("targetHealthCheck", { targetHealthCheckId: serial("targetHealthCheckId").primaryKey(), - targetId: integer("targetId") - .notNull() - .references(() => targets.targetId, { onDelete: "cascade" }), + targetId: integer("targetId").references(() => targets.targetId, { + onDelete: "cascade" + }), + orgId: varchar("orgId") + .references(() => orgs.orgId, { + onDelete: "cascade" + }) + .notNull(), + siteId: integer("siteId").references(() => sites.siteId, { + onDelete: "cascade" + }).notNull(), + name: varchar("name"), hcEnabled: boolean("hcEnabled").notNull().default(false), hcPath: varchar("hcPath"), hcScheme: varchar("hcScheme"), @@ -201,7 +214,9 @@ export const targetHealthCheck = pgTable("targetHealthCheck", { hcHealth: text("hcHealth") .$type<"unknown" | "healthy" | "unhealthy">() .default("unknown"), // "unknown", "healthy", "unhealthy" - hcTlsServerName: text("hcTlsServerName") + hcTlsServerName: text("hcTlsServerName"), + hcHealthyThreshold: integer("hcHealthyThreshold").default(1), + hcUnhealthyThreshold: integer("hcUnhealthyThreshold").default(1) }); export const exitNodes = pgTable("exitNodes", { @@ -222,16 +237,23 @@ export const exitNodes = pgTable("exitNodes", { export const siteResources = pgTable("siteResources", { // this is for the clients siteResourceId: serial("siteResourceId").primaryKey(), - siteId: integer("siteId") - .notNull() - .references(() => sites.siteId, { onDelete: "cascade" }), orgId: varchar("orgId") .notNull() .references(() => orgs.orgId, { onDelete: "cascade" }), + networkId: integer("networkId").references(() => networks.networkId, { + onDelete: "set null" + }), + defaultNetworkId: integer("defaultNetworkId").references( + () => networks.networkId, + { + onDelete: "restrict" + } + ), niceId: varchar("niceId").notNull(), name: varchar("name").notNull(), - mode: varchar("mode").$type<"host" | "cidr">().notNull(), // "host" | "cidr" | "port" - protocol: varchar("protocol"), // only for port mode + ssl: boolean("ssl").notNull().default(false), + mode: varchar("mode").$type<"host" | "cidr" | "http">().notNull(), // "host" | "cidr" | "http" + scheme: varchar("scheme").$type<"http" | "https">(), // only for when we are doing https or http mode proxyPort: integer("proxyPort"), // only for port mode destinationPort: integer("destinationPort"), // only for port mode destination: varchar("destination").notNull(), // ip, cidr, hostname; validate against the mode @@ -244,7 +266,38 @@ export const siteResources = pgTable("siteResources", { authDaemonPort: integer("authDaemonPort").default(22123), authDaemonMode: varchar("authDaemonMode", { length: 32 }) .$type<"site" | "remote">() - .default("site") + .default("site"), + domainId: varchar("domainId").references(() => domains.domainId, { + onDelete: "set null" + }), + subdomain: varchar("subdomain"), + fullDomain: varchar("fullDomain") +}); + +export const networks = pgTable("networks", { + networkId: serial("networkId").primaryKey(), + niceId: text("niceId"), + name: text("name"), + scope: varchar("scope") + .$type<"global" | "resource">() + .notNull() + .default("global"), + orgId: varchar("orgId") + .references(() => orgs.orgId, { + onDelete: "cascade" + }) + .notNull() +}); + +export const siteNetworks = pgTable("siteNetworks", { + siteId: integer("siteId") + .notNull() + .references(() => sites.siteId, { + onDelete: "cascade" + }), + networkId: integer("networkId") + .notNull() + .references(() => networks.networkId, { onDelete: "cascade" }) }); export const clientSiteResources = pgTable("clientSiteResources", { @@ -994,6 +1047,7 @@ export const requestAuditLog = pgTable( actor: text("actor"), actorId: text("actorId"), resourceId: integer("resourceId"), + siteResourceId: integer("siteResourceId"), ip: text("ip"), location: text("location"), userAgent: text("userAgent"), @@ -1041,6 +1095,20 @@ export const roundTripMessageTracker = pgTable("roundTripMessageTracker", { complete: boolean("complete").notNull().default(false) }); +export const statusHistory = pgTable("statusHistory", { + id: serial("id").primaryKey(), + entityType: varchar("entityType").notNull(), + entityId: integer("entityId").notNull(), + orgId: varchar("orgId") + .notNull() + .references(() => orgs.orgId, { onDelete: "cascade" }), + status: varchar("status").notNull(), + timestamp: integer("timestamp").notNull(), +}, (table) => [ + index("idx_statusHistory_entity").on(table.entityType, table.entityId, table.timestamp), + index("idx_statusHistory_org_timestamp").on(table.orgId, table.timestamp), +]); + export type Org = InferSelectModel; export type User = InferSelectModel; export type Site = InferSelectModel; @@ -1107,3 +1175,5 @@ export type RequestAuditLog = InferSelectModel; export type RoundTripMessageTracker = InferSelectModel< typeof roundTripMessageTracker >; +export type Network = InferSelectModel; +export type StatusHistory = InferSelectModel; diff --git a/server/db/sqlite/schema/privateSchema.ts b/server/db/sqlite/schema/privateSchema.ts index c1aa084a2..a3168360f 100644 --- a/server/db/sqlite/schema/privateSchema.ts +++ b/server/db/sqlite/schema/privateSchema.ts @@ -13,9 +13,12 @@ import { domains, exitNodes, orgs, + resources, + roles, sessions, siteResources, sites, + targetHealthCheck, users } from "./schema"; @@ -455,6 +458,94 @@ export const eventStreamingCursors = sqliteTable( ] ); +export const alertRules = sqliteTable("alertRules", { + alertRuleId: integer("alertRuleId").primaryKey({ autoIncrement: true }), + orgId: text("orgId") + .notNull() + .references(() => orgs.orgId, { onDelete: "cascade" }), + name: text("name").notNull(), + eventType: text("eventType") + .$type< + | "site_online" + | "site_offline" + | "site_toggle" + | "health_check_healthy" + | "health_check_unhealthy" + | "health_check_toggle" + | "resource_healthy" + | "resource_unhealthy" + | "resource_toggle" + >() + .notNull(), + enabled: integer("enabled", { mode: "boolean" }).notNull().default(true), + cooldownSeconds: integer("cooldownSeconds").notNull().default(300), + allSites: integer("allSites", { mode: "boolean" }).notNull().default(false), + allHealthChecks: integer("allHealthChecks", { mode: "boolean" }).notNull().default(false), + allResources: integer("allResources", { mode: "boolean" }).notNull().default(false), + lastTriggeredAt: integer("lastTriggeredAt"), + createdAt: integer("createdAt").notNull(), + updatedAt: integer("updatedAt").notNull() +}); + +export const alertSites = sqliteTable("alertSites", { + alertRuleId: integer("alertRuleId") + .notNull() + .references(() => alertRules.alertRuleId, { onDelete: "cascade" }), + siteId: integer("siteId") + .notNull() + .references(() => sites.siteId, { onDelete: "cascade" }) +}); + +export const alertHealthChecks = sqliteTable("alertHealthChecks", { + alertRuleId: integer("alertRuleId") + .notNull() + .references(() => alertRules.alertRuleId, { onDelete: "cascade" }), + healthCheckId: integer("healthCheckId") + .notNull() + .references(() => targetHealthCheck.targetHealthCheckId, { + onDelete: "cascade" + }) +}); + +export const alertResources = sqliteTable("alertResources", { + alertRuleId: integer("alertRuleId") + .notNull() + .references(() => alertRules.alertRuleId, { onDelete: "cascade" }), + resourceId: integer("resourceId") + .notNull() + .references(() => resources.resourceId, { onDelete: "cascade" }) +}); + +export const alertEmailActions = sqliteTable("alertEmailActions", { + emailActionId: integer("emailActionId").primaryKey({ autoIncrement: true }), + alertRuleId: integer("alertRuleId") + .notNull() + .references(() => alertRules.alertRuleId, { onDelete: "cascade" }), + enabled: integer("enabled", { mode: "boolean" }).notNull().default(true), + lastSentAt: integer("lastSentAt") +}); + +export const alertEmailRecipients = sqliteTable("alertEmailRecipients", { + recipientId: integer("recipientId").primaryKey({ autoIncrement: true }), + emailActionId: integer("emailActionId") + .notNull() + .references(() => alertEmailActions.emailActionId, { onDelete: "cascade" }), + userId: text("userId").references(() => users.userId, { onDelete: "cascade" }), + roleId: integer("roleId").references(() => roles.roleId, { onDelete: "cascade" }), + email: text("email") +}); + +export const alertWebhookActions = sqliteTable("alertWebhookActions", { + webhookActionId: integer("webhookActionId").primaryKey({ autoIncrement: true }), + alertRuleId: integer("alertRuleId") + .notNull() + .references(() => alertRules.alertRuleId, { onDelete: "cascade" }), + webhookUrl: text("webhookUrl").notNull(), + config: text("config"), // encrypted JSON with auth config (authType, credentials) + enabled: integer("enabled", { mode: "boolean" }).notNull().default(true), + lastSentAt: integer("lastSentAt") +}); + export type Approval = InferSelectModel; export type Limit = InferSelectModel; export type Account = InferSelectModel; @@ -486,3 +577,4 @@ export type EventStreamingDestination = InferSelectModel< export type EventStreamingCursor = InferSelectModel< typeof eventStreamingCursors >; +export type AlertResources = InferSelectModel; diff --git a/server/db/sqlite/schema/schema.ts b/server/db/sqlite/schema/schema.ts index 1fb04ef14..c5600b756 100644 --- a/server/db/sqlite/schema/schema.ts +++ b/server/db/sqlite/schema/schema.ts @@ -54,7 +54,9 @@ export const orgs = sqliteTable("orgs", { settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year .notNull() .default(0), - settingsLogRetentionDaysConnection: integer("settingsLogRetentionDaysConnection") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year + settingsLogRetentionDaysConnection: integer( + "settingsLogRetentionDaysConnection" + ) // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year .notNull() .default(0), sshCaPrivateKey: text("sshCaPrivateKey"), // Encrypted SSH CA private key (PEM format) @@ -92,6 +94,9 @@ export const sites = sqliteTable("sites", { exitNodeId: integer("exitNode").references(() => exitNodes.exitNodeId, { onDelete: "set null" }), + networkId: integer("networkId").references(() => networks.networkId, { + onDelete: "set null" + }), name: text("name").notNull(), pubKey: text("pubKey"), subnet: text("subnet"), @@ -204,9 +209,18 @@ export const targetHealthCheck = sqliteTable("targetHealthCheck", { targetHealthCheckId: integer("targetHealthCheckId").primaryKey({ autoIncrement: true }), - targetId: integer("targetId") - .notNull() - .references(() => targets.targetId, { onDelete: "cascade" }), + targetId: integer("targetId").references(() => targets.targetId, { + onDelete: "cascade" + }), + orgId: text("orgId") + .references(() => orgs.orgId, { + onDelete: "cascade" + }) + .notNull(), + siteId: integer("siteId").references(() => sites.siteId, { + onDelete: "cascade" + }).notNull(), + name: text("name"), hcEnabled: integer("hcEnabled", { mode: "boolean" }) .notNull() .default(false), @@ -227,7 +241,9 @@ export const targetHealthCheck = sqliteTable("targetHealthCheck", { hcHealth: text("hcHealth") .$type<"unknown" | "healthy" | "unhealthy">() .default("unknown"), // "unknown", "healthy", "unhealthy" - hcTlsServerName: text("hcTlsServerName") + hcTlsServerName: text("hcTlsServerName"), + hcHealthyThreshold: integer("hcHealthyThreshold").default(1), + hcUnhealthyThreshold: integer("hcUnhealthyThreshold").default(1) }); export const exitNodes = sqliteTable("exitNodes", { @@ -250,16 +266,21 @@ export const siteResources = sqliteTable("siteResources", { siteResourceId: integer("siteResourceId").primaryKey({ autoIncrement: true }), - siteId: integer("siteId") - .notNull() - .references(() => sites.siteId, { onDelete: "cascade" }), orgId: text("orgId") .notNull() .references(() => orgs.orgId, { onDelete: "cascade" }), + networkId: integer("networkId").references(() => networks.networkId, { + onDelete: "set null" + }), + defaultNetworkId: integer("defaultNetworkId").references( + () => networks.networkId, + { onDelete: "restrict" } + ), niceId: text("niceId").notNull(), name: text("name").notNull(), - mode: text("mode").$type<"host" | "cidr">().notNull(), // "host" | "cidr" | "port" - protocol: text("protocol"), // only for port mode + ssl: integer("ssl", { mode: "boolean" }).notNull().default(false), + mode: text("mode").$type<"host" | "cidr" | "http">().notNull(), // "host" | "cidr" | "http" + scheme: text("scheme").$type<"http" | "https">(), // only for when we are doing https or http mode proxyPort: integer("proxyPort"), // only for port mode destinationPort: integer("destinationPort"), // only for port mode destination: text("destination").notNull(), // ip, cidr, hostname @@ -274,7 +295,36 @@ export const siteResources = sqliteTable("siteResources", { authDaemonPort: integer("authDaemonPort").default(22123), authDaemonMode: text("authDaemonMode") .$type<"site" | "remote">() - .default("site") + .default("site"), + domainId: text("domainId").references(() => domains.domainId, { + onDelete: "set null" + }), + subdomain: text("subdomain"), + fullDomain: text("fullDomain") +}); + +export const networks = sqliteTable("networks", { + networkId: integer("networkId").primaryKey({ autoIncrement: true }), + niceId: text("niceId"), + name: text("name"), + scope: text("scope") + .$type<"global" | "resource">() + .notNull() + .default("global"), + orgId: text("orgId") + .notNull() + .references(() => orgs.orgId, { onDelete: "cascade" }) +}); + +export const siteNetworks = sqliteTable("siteNetworks", { + siteId: integer("siteId") + .notNull() + .references(() => sites.siteId, { + onDelete: "cascade" + }), + networkId: integer("networkId") + .notNull() + .references(() => networks.networkId, { onDelete: "cascade" }) }); export const clientSiteResources = sqliteTable("clientSiteResources", { @@ -1096,6 +1146,7 @@ export const requestAuditLog = sqliteTable( actor: text("actor"), actorId: text("actorId"), resourceId: integer("resourceId"), + siteResourceId: integer("siteResourceId"), ip: text("ip"), location: text("location"), userAgent: text("userAgent"), @@ -1143,6 +1194,20 @@ export const roundTripMessageTracker = sqliteTable("roundTripMessageTracker", { complete: integer("complete", { mode: "boolean" }).notNull().default(false) }); +export const statusHistory = sqliteTable("statusHistory", { + id: integer("id").primaryKey({ autoIncrement: true }), + entityType: text("entityType").notNull(), // "site" | "healthCheck" + entityId: integer("entityId").notNull(), // siteId or targetHealthCheckId + orgId: text("orgId") + .notNull() + .references(() => orgs.orgId, { onDelete: "cascade" }), + status: text("status").notNull(), // "online"/"offline" for sites; "healthy"/"unhealthy"/"unknown" for healthChecks + timestamp: integer("timestamp").notNull(), // unix epoch seconds +}, (table) => [ + index("idx_statusHistory_entity").on(table.entityType, table.entityId, table.timestamp), + index("idx_statusHistory_org_timestamp").on(table.orgId, table.timestamp), +]); + export type Org = InferSelectModel; export type User = InferSelectModel; export type Site = InferSelectModel; @@ -1195,6 +1260,7 @@ export type ApiKey = InferSelectModel; export type ApiKeyAction = InferSelectModel; export type ApiKeyOrg = InferSelectModel; export type SiteResource = InferSelectModel; +export type Network = InferSelectModel; export type OrgDomains = InferSelectModel; export type SetupToken = InferSelectModel; export type HostMeta = InferSelectModel; @@ -1209,3 +1275,4 @@ export type DeviceWebAuthCode = InferSelectModel; export type RoundTripMessageTracker = InferSelectModel< typeof roundTripMessageTracker >; +export type StatusHistory = InferSelectModel; diff --git a/server/emails/templates/AlertNotification.tsx b/server/emails/templates/AlertNotification.tsx new file mode 100644 index 000000000..418924650 --- /dev/null +++ b/server/emails/templates/AlertNotification.tsx @@ -0,0 +1,201 @@ +import React from "react"; +import { Body, Head, Html, Preview, Tailwind } from "@react-email/components"; +import { themeColors } from "./lib/theme"; +import { + EmailContainer, + EmailFooter, + EmailGreeting, + EmailHeading, + EmailInfoSection, + EmailLetterHead, + EmailSignature, + EmailText +} from "./components/Email"; + +export type AlertEventType = + | "site_online" + | "site_offline" + | "site_toggle" + | "health_check_healthy" + | "health_check_unhealthy" + | "health_check_toggle" + | "resource_healthy" + | "resource_unhealthy" + | "resource_toggle"; + +interface Props { + eventType: AlertEventType; + orgId: string; + data: Record; +} + +function getEventMeta(eventType: AlertEventType): { + heading: string; + previewText: string; + summary: string; + statusLabel: string; + statusColor: string; +} { + switch (eventType) { + case "site_online": + return { + heading: "Site Back Online", + previewText: "A site in your organization is back online.", + summary: + "Good news – a site in your organization has come back online and is now reachable.", + statusLabel: "Online", + statusColor: "#16a34a" + }; + case "site_offline": + return { + heading: "Site Offline", + previewText: "A site in your organization has gone offline.", + summary: + "A site in your organization has gone offline and is no longer reachable. Please investigate as soon as possible.", + statusLabel: "Offline", + statusColor: "#dc2626" + }; + case "site_toggle": + return { + heading: "Site Status Changed", + previewText: "A site in your organization has changed status.", + summary: + "A site in your organization has changed status. Please review the details below and take action if needed.", + statusLabel: "Status Changed", + statusColor: "#f59e0b" + }; + case "health_check_healthy": + return { + heading: "Health Check Recovered", + previewText: + "A health check in your organization is now healthy.", + summary: + "A health check in your organization has recovered and is now reporting a healthy status.", + statusLabel: "Healthy", + statusColor: "#16a34a" + }; + case "health_check_unhealthy": + return { + heading: "Health Check Failing", + previewText: + "A health check in your organization is not healthy.", + summary: + "A health check in your organization is currently failing. Please review the details below and take action if needed.", + statusLabel: "Not Healthy", + statusColor: "#dc2626" + }; + case "health_check_toggle": + return { + heading: "Health Check Status Changed", + previewText: + "A health check in your organization has changed status.", + summary: + "A health check in your organization has changed status. Please review the details below and take action if needed.", + statusLabel: "Status Changed", + statusColor: "#f59e0b" + }; + case "resource_healthy": + return { + heading: "Resource Healthy", + previewText: "A resource in your organization is now healthy.", + summary: + "A resource in your organization has recovered and is now reporting a healthy status.", + statusLabel: "Healthy", + statusColor: "#16a34a" + }; + case "resource_unhealthy": + return { + heading: "Resource Unhealthy", + previewText: "A resource in your organization is not healthy.", + summary: + "A resource in your organization is currently unhealthy. Please review the details below and take action if needed.", + statusLabel: "Unhealthy", + statusColor: "#dc2626" + }; + case "resource_toggle": + return { + heading: "Resource Status Changed", + previewText: + "A resource in your organization has changed status.", + summary: + "A resource in your organization has changed status. Please review the details below and take action if needed.", + statusLabel: "Status Changed", + statusColor: "#f59e0b" + }; + default: + return { + heading: "Alert Notification", + previewText: "An alert event has occurred in your organization.", + summary: + "An alert event has occurred in your organization. Please review the details below and take action if needed.", + statusLabel: "Alert", + statusColor: "#f59e0b" + }; + } +} + +function formatDataItems( + data: Record +): { label: string; value: React.ReactNode }[] { + return Object.entries(data) + .filter(([key]) => key !== "orgId") + .map(([key, value]) => ({ + label: key + .replace(/([A-Z])/g, " $1") + .replace(/^./, (s) => s.toUpperCase()) + .trim(), + value: String(value ?? "-") + })); +} + +export const AlertNotification = ({ eventType, orgId, data }: Props) => { + const meta = getEventMeta(eventType); + const dataItems = formatDataItems(data); + + const allItems: { label: string; value: React.ReactNode }[] = [ + { label: "Organization", value: orgId }, + { label: "Status", value: ( + + {meta.statusLabel} + + )}, + { label: "Time", value: new Date().toUTCString() }, + ...dataItems + ]; + + return ( + + + {meta.previewText} + + + + + + {meta.heading} + + Hi there, + + {meta.summary} + + + + + Log in to your dashboard to view more details and + manage your alert rules. + + + + + + + + + + ); +}; + +export default AlertNotification; diff --git a/server/emails/templates/EnterpriseEditionKeyGenerated.tsx b/server/emails/templates/EnterpriseEditionKeyGenerated.tsx index 44472c8a6..82154ab7d 100644 --- a/server/emails/templates/EnterpriseEditionKeyGenerated.tsx +++ b/server/emails/templates/EnterpriseEditionKeyGenerated.tsx @@ -32,7 +32,7 @@ export const EnterpriseEditionKeyGenerated = ({ }: EnterpriseEditionKeyGeneratedProps) => { const previewText = personalUseOnly ? "Your Enterprise Edition key for personal use is ready" - : "Thank you for your purchase — your Enterprise Edition key is ready"; + : "Thank you for your purchase - your Enterprise Edition key is ready"; return ( diff --git a/server/index.ts b/server/index.ts index 0fc44c279..e3a6ba049 100644 --- a/server/index.ts +++ b/server/index.ts @@ -22,6 +22,7 @@ import { TraefikConfigManager } from "@server/lib/traefik/TraefikConfigManager"; import { initCleanup } from "#dynamic/cleanup"; import license from "#dynamic/license/license"; import { initLogCleanupInterval } from "@server/lib/cleanupLogs"; +import { initAcmeCertSync } from "#dynamic/lib/acmeCertSync"; import { fetchServerIp } from "@server/lib/serverIpService"; async function startServers() { @@ -39,6 +40,7 @@ async function startServers() { initTelemetryClient(); initLogCleanupInterval(); + initAcmeCertSync(); // Start all servers const apiServer = createApiServer(); diff --git a/server/lib/acmeCertSync.ts b/server/lib/acmeCertSync.ts new file mode 100644 index 000000000..d8fbd6368 --- /dev/null +++ b/server/lib/acmeCertSync.ts @@ -0,0 +1,3 @@ +export function initAcmeCertSync(): void { + // stub +} \ No newline at end of file diff --git a/server/lib/alerts/events/healthCheckEvents.ts b/server/lib/alerts/events/healthCheckEvents.ts new file mode 100644 index 000000000..dacb5287a --- /dev/null +++ b/server/lib/alerts/events/healthCheckEvents.ts @@ -0,0 +1,19 @@ +// stub + +export async function fireHealthCheckHealthyAlert( + orgId: string, + healthCheckId: number, + healthCheckName?: string, + extra?: Record +): Promise { + return; +} + +export async function fireHealthCheckNotHealthyAlert( + orgId: string, + healthCheckId: number, + healthCheckName?: string, + extra?: Record +): Promise { + return; +} diff --git a/server/lib/alerts/events/siteEvents.ts b/server/lib/alerts/events/siteEvents.ts new file mode 100644 index 000000000..8426fa9c2 --- /dev/null +++ b/server/lib/alerts/events/siteEvents.ts @@ -0,0 +1,19 @@ +// stub + +export async function fireSiteOnlineAlert( + orgId: string, + siteId: number, + siteName?: string, + extra?: Record +): Promise { + return; +} + +export async function fireSiteOfflineAlert( + orgId: string, + siteId: number, + siteName?: string, + extra?: Record +): Promise { + return; +} diff --git a/server/lib/alerts/index.ts b/server/lib/alerts/index.ts new file mode 100644 index 000000000..017603253 --- /dev/null +++ b/server/lib/alerts/index.ts @@ -0,0 +1,2 @@ +export * from "./events/siteEvents"; +export * from "./events/healthCheckEvents"; diff --git a/server/lib/billing/tierMatrix.ts b/server/lib/billing/tierMatrix.ts index 0756ea665..5ae57c8a7 100644 --- a/server/lib/billing/tierMatrix.ts +++ b/server/lib/billing/tierMatrix.ts @@ -20,7 +20,10 @@ export enum TierFeature { FullRbac = "fullRbac", SiteProvisioningKeys = "siteProvisioningKeys", // handle downgrade by revoking keys if needed SIEM = "siem", // handle downgrade by disabling SIEM integrations - DomainNamespaces = "domainNamespaces" // handle downgrade by removing custom domain namespaces + HTTPPrivateResources = "httpPrivateResources", // handle downgrade by disabling HTTP private resources + DomainNamespaces = "domainNamespaces", // handle downgrade by removing custom domain namespaces + StandaloneHealthChecks = "standaloneHealthChecks", + AlertingRules = "alertingRules" } export const tierMatrix: Record = { @@ -58,5 +61,8 @@ export const tierMatrix: Record = { [TierFeature.FullRbac]: ["tier1", "tier2", "tier3", "enterprise"], [TierFeature.SiteProvisioningKeys]: ["tier3", "enterprise"], [TierFeature.SIEM]: ["enterprise"], - [TierFeature.DomainNamespaces]: ["tier1", "tier2", "tier3", "enterprise"] + [TierFeature.HTTPPrivateResources]: ["tier3", "enterprise"], + [TierFeature.DomainNamespaces]: ["tier1", "tier2", "tier3", "enterprise"], + [TierFeature.StandaloneHealthChecks]: ["tier2", "tier3", "enterprise"], + [TierFeature.AlertingRules]: ["tier2", "tier3", "enterprise"] }; diff --git a/server/lib/blueprints/applyBlueprint.ts b/server/lib/blueprints/applyBlueprint.ts index a304bb392..fd189e6ca 100644 --- a/server/lib/blueprints/applyBlueprint.ts +++ b/server/lib/blueprints/applyBlueprint.ts @@ -121,8 +121,8 @@ export async function applyBlueprint({ for (const result of clientResourcesResults) { if ( result.oldSiteResource && - result.oldSiteResource.siteId != - result.newSiteResource.siteId + JSON.stringify(result.newSites?.sort()) !== + JSON.stringify(result.oldSites?.sort()) ) { // query existing associations const existingRoleIds = await trx @@ -222,38 +222,46 @@ export async function applyBlueprint({ trx ); } else { - const [newSite] = await trx - .select() - .from(sites) - .innerJoin(newts, eq(sites.siteId, newts.siteId)) - .where( - and( - eq(sites.siteId, result.newSiteResource.siteId), - eq(sites.orgId, orgId), - eq(sites.type, "newt"), - isNotNull(sites.pubKey) + let good = true; + for (const newSite of result.newSites) { + const [site] = await trx + .select() + .from(sites) + .innerJoin(newts, eq(sites.siteId, newts.siteId)) + .where( + and( + eq(sites.siteId, newSite.siteId), + eq(sites.orgId, orgId), + eq(sites.type, "newt"), + isNotNull(sites.pubKey) + ) ) - ) - .limit(1); + .limit(1); + + if (!site) { + logger.debug( + `No newt sites found for client resource ${result.newSiteResource.siteResourceId}, skipping target update` + ); + good = false; + break; + } - if (!newSite) { logger.debug( - `No newt site found for client resource ${result.newSiteResource.siteResourceId}, skipping target update` + `Updating client resource ${result.newSiteResource.siteResourceId} on site ${newSite.siteId}` ); - continue; } - logger.debug( - `Updating client resource ${result.newSiteResource.siteResourceId} on site ${newSite.sites.siteId}` - ); + if (!good) { + continue; + } await handleMessagingForUpdatedSiteResource( result.oldSiteResource, result.newSiteResource, - { - siteId: newSite.sites.siteId, - orgId: newSite.sites.orgId - }, + result.newSites.map((site) => ({ + siteId: site.siteId, + orgId: result.newSiteResource.orgId + })), trx ); } diff --git a/server/lib/blueprints/clientResources.ts b/server/lib/blueprints/clientResources.ts index 80c691c63..df1fd0cfb 100644 --- a/server/lib/blueprints/clientResources.ts +++ b/server/lib/blueprints/clientResources.ts @@ -1,24 +1,104 @@ import { clients, clientSiteResources, + domains, + orgDomains, roles, roleSiteResources, + Site, SiteResource, + siteNetworks, siteResources, Transaction, userOrgs, users, - userSiteResources + userSiteResources, + networks } from "@server/db"; import { sites } from "@server/db"; -import { eq, and, ne, inArray, or } from "drizzle-orm"; +import { eq, and, ne, inArray, or, isNotNull } from "drizzle-orm"; import { Config } from "./types"; import logger from "@server/logger"; import { getNextAvailableAliasAddress } from "../ip"; +import { createCertificate } from "#dynamic/routers/certificates/createCertificate"; + +async function getDomainForSiteResource( + siteResourceId: number | undefined, + fullDomain: string, + orgId: string, + trx: Transaction +): Promise<{ subdomain: string | null; domainId: string }> { + const [fullDomainExists] = await trx + .select({ siteResourceId: siteResources.siteResourceId }) + .from(siteResources) + .where( + and( + eq(siteResources.fullDomain, fullDomain), + eq(siteResources.orgId, orgId), + siteResourceId + ? ne(siteResources.siteResourceId, siteResourceId) + : isNotNull(siteResources.siteResourceId) + ) + ) + .limit(1); + + if (fullDomainExists) { + throw new Error( + `Site resource already exists with domain: ${fullDomain} in org ${orgId}` + ); + } + + const possibleDomains = await trx + .select() + .from(domains) + .innerJoin(orgDomains, eq(domains.domainId, orgDomains.domainId)) + .where(and(eq(orgDomains.orgId, orgId), eq(domains.verified, true))) + .execute(); + + if (possibleDomains.length === 0) { + throw new Error( + `Domain not found for full-domain: ${fullDomain} in org ${orgId}` + ); + } + + const validDomains = possibleDomains.filter((domain) => { + if (domain.domains.type == "ns" || domain.domains.type == "wildcard") { + return ( + fullDomain === domain.domains.baseDomain || + fullDomain.endsWith(`.${domain.domains.baseDomain}`) + ); + } else if (domain.domains.type == "cname") { + return fullDomain === domain.domains.baseDomain; + } + }); + + if (validDomains.length === 0) { + throw new Error( + `Domain not found for full-domain: ${fullDomain} in org ${orgId}` + ); + } + + const domainSelection = validDomains[0].domains; + const baseDomain = domainSelection.baseDomain; + + let subdomain: string | null = null; + if (fullDomain !== baseDomain) { + subdomain = fullDomain.replace(`.${baseDomain}`, ""); + } + + await createCertificate(domainSelection.domainId, fullDomain, trx); + + return { + subdomain, + domainId: domainSelection.domainId + }; +} export type ClientResourcesResults = { newSiteResource: SiteResource; oldSiteResource?: SiteResource; + newSites: { siteId: number }[]; + oldSites: { siteId: number }[]; }[]; export async function updateClientResources( @@ -43,53 +123,104 @@ export async function updateClientResources( ) .limit(1); - const resourceSiteId = resourceData.site; - let site; + const existingSiteIds = existingResource?.networkId + ? await trx + .select({ siteId: sites.siteId }) + .from(siteNetworks) + .where(eq(siteNetworks.networkId, existingResource.networkId)) + : []; - if (resourceSiteId) { - // Look up site by niceId - [site] = await trx - .select({ siteId: sites.siteId }) - .from(sites) - .where( - and( - eq(sites.niceId, resourceSiteId), - eq(sites.orgId, orgId) + let allSites: { siteId: number }[] = []; + if (resourceData.site) { + let siteSingle; + const resourceSiteId = resourceData.site; + + if (resourceSiteId) { + // Look up site by niceId + [siteSingle] = await trx + .select({ siteId: sites.siteId }) + .from(sites) + .where( + and( + eq(sites.niceId, resourceSiteId), + eq(sites.orgId, orgId) + ) ) - ) - .limit(1); - } else if (siteId) { - // Use the provided siteId directly, but verify it belongs to the org - [site] = await trx - .select({ siteId: sites.siteId }) - .from(sites) - .where(and(eq(sites.siteId, siteId), eq(sites.orgId, orgId))) - .limit(1); - } else { - throw new Error(`Target site is required`); + .limit(1); + } else if (siteId) { + // Use the provided siteId directly, but verify it belongs to the org + [siteSingle] = await trx + .select({ siteId: sites.siteId }) + .from(sites) + .where( + and(eq(sites.siteId, siteId), eq(sites.orgId, orgId)) + ) + .limit(1); + } else { + throw new Error(`Target site is required`); + } + + if (!siteSingle) { + throw new Error( + `Site not found: ${resourceSiteId} in org ${orgId}` + ); + } + allSites.push(siteSingle); } - if (!site) { - throw new Error( - `Site not found: ${resourceSiteId} in org ${orgId}` - ); + if (resourceData.sites) { + for (const siteNiceId of resourceData.sites) { + const [site] = await trx + .select({ siteId: sites.siteId }) + .from(sites) + .where( + and( + eq(sites.niceId, siteNiceId), + eq(sites.orgId, orgId) + ) + ) + .limit(1); + if (!site) { + throw new Error( + `Site not found: ${siteId} in org ${orgId}` + ); + } + allSites.push(site); + } } if (existingResource) { + let domainInfo: + | { subdomain: string | null; domainId: string } + | undefined; + if (resourceData["full-domain"] && resourceData.mode === "http") { + domainInfo = await getDomainForSiteResource( + existingResource.siteResourceId, + resourceData["full-domain"], + orgId, + trx + ); + } + // Update existing resource const [updatedResource] = await trx .update(siteResources) .set({ name: resourceData.name || resourceNiceId, - siteId: site.siteId, mode: resourceData.mode, + ssl: resourceData.ssl, + scheme: resourceData.scheme, destination: resourceData.destination, + destinationPort: resourceData["destination-port"], enabled: true, // hardcoded for now // enabled: resourceData.enabled ?? true, alias: resourceData.alias || null, disableIcmp: resourceData["disable-icmp"], tcpPortRangeString: resourceData["tcp-ports"], - udpPortRangeString: resourceData["udp-ports"] + udpPortRangeString: resourceData["udp-ports"], + fullDomain: resourceData["full-domain"] || null, + subdomain: domainInfo ? domainInfo.subdomain : null, + domainId: domainInfo ? domainInfo.domainId : null }) .where( eq( @@ -100,7 +231,21 @@ export async function updateClientResources( .returning(); const siteResourceId = existingResource.siteResourceId; - const orgId = existingResource.orgId; + + if (updatedResource.networkId) { + await trx + .delete(siteNetworks) + .where( + eq(siteNetworks.networkId, updatedResource.networkId) + ); + + for (const site of allSites) { + await trx.insert(siteNetworks).values({ + siteId: site.siteId, + networkId: updatedResource.networkId + }); + } + } await trx .delete(clientSiteResources) @@ -204,37 +349,72 @@ export async function updateClientResources( results.push({ newSiteResource: updatedResource, - oldSiteResource: existingResource + oldSiteResource: existingResource, + newSites: allSites, + oldSites: existingSiteIds }); } else { let aliasAddress: string | null = null; - if (resourceData.mode == "host") { - // we can only have an alias on a host + if (resourceData.mode === "host" || resourceData.mode === "http") { aliasAddress = await getNextAvailableAliasAddress(orgId); } + let domainInfo: + | { subdomain: string | null; domainId: string } + | undefined; + if (resourceData["full-domain"] && resourceData.mode === "http") { + domainInfo = await getDomainForSiteResource( + undefined, + resourceData["full-domain"], + orgId, + trx + ); + } + + const [network] = await trx + .insert(networks) + .values({ + scope: "resource", + orgId: orgId + }) + .returning(); + // Create new resource const [newResource] = await trx .insert(siteResources) .values({ orgId: orgId, - siteId: site.siteId, niceId: resourceNiceId, + networkId: network.networkId, + defaultNetworkId: network.networkId, name: resourceData.name || resourceNiceId, mode: resourceData.mode, + ssl: resourceData.ssl, + scheme: resourceData.scheme, destination: resourceData.destination, + destinationPort: resourceData["destination-port"], enabled: true, // hardcoded for now // enabled: resourceData.enabled ?? true, alias: resourceData.alias || null, aliasAddress: aliasAddress, disableIcmp: resourceData["disable-icmp"], tcpPortRangeString: resourceData["tcp-ports"], - udpPortRangeString: resourceData["udp-ports"] + udpPortRangeString: resourceData["udp-ports"], + fullDomain: resourceData["full-domain"] || null, + subdomain: domainInfo ? domainInfo.subdomain : null, + domainId: domainInfo ? domainInfo.domainId : null }) .returning(); const siteResourceId = newResource.siteResourceId; + for (const site of allSites) { + await trx.insert(siteNetworks).values({ + siteId: site.siteId, + networkId: network.networkId + }); + } + const [adminRole] = await trx .select() .from(roles) @@ -324,7 +504,11 @@ export async function updateClientResources( `Created new client resource ${newResource.name} (${newResource.siteResourceId}) for org ${orgId}` ); - results.push({ newSiteResource: newResource }); + results.push({ + newSiteResource: newResource, + newSites: allSites, + oldSites: existingSiteIds + }); } } diff --git a/server/lib/blueprints/proxyResources.ts b/server/lib/blueprints/proxyResources.ts index e16da2ea5..175c8c79f 100644 --- a/server/lib/blueprints/proxyResources.ts +++ b/server/lib/blueprints/proxyResources.ts @@ -140,7 +140,10 @@ export async function updateProxyResources( const [newHealthcheck] = await trx .insert(targetHealthCheck) .values({ + name: `${targetData.hostname}:${targetData.port}`, + siteId: site.siteId, targetId: newTarget.targetId, + orgId: orgId, hcEnabled: healthcheckData?.enabled || false, hcPath: healthcheckData?.path, hcScheme: healthcheckData?.scheme, @@ -158,7 +161,9 @@ export async function updateProxyResources( healthcheckData?.["follow-redirects"], hcMethod: healthcheckData?.method, hcStatus: healthcheckData?.status, - hcHealth: "unknown" + hcHealth: "unknown", + hcHealthyThreshold: healthcheckData?.["healthy-threshold"], + hcUnhealthyThreshold: healthcheckData?.["unhealthy-threshold"] }) .returning(); @@ -522,7 +527,9 @@ export async function updateProxyResources( healthcheckData?.followRedirects || healthcheckData?.["follow-redirects"], hcMethod: healthcheckData?.method, - hcStatus: healthcheckData?.status + hcStatus: healthcheckData?.status, + hcHealthyThreshold: healthcheckData?.["healthy-threshold"], + hcUnhealthyThreshold: healthcheckData?.["unhealthy-threshold"] }) .where( eq( @@ -1081,6 +1088,8 @@ function checkIfHealthcheckChanged( JSON.stringify(incoming.hcHeaders) ) return true; + if (existing.hcHealthyThreshold !== incoming.hcHealthyThreshold) return true; + if (existing.hcUnhealthyThreshold !== incoming.hcUnhealthyThreshold) return true; return false; } @@ -1100,7 +1109,7 @@ function checkIfTargetChanged( return false; } -async function getDomain( +export async function getDomain( resourceId: number | undefined, fullDomain: string, orgId: string, diff --git a/server/lib/blueprints/types.ts b/server/lib/blueprints/types.ts index 6ebc509b8..913cf31ed 100644 --- a/server/lib/blueprints/types.ts +++ b/server/lib/blueprints/types.ts @@ -12,7 +12,7 @@ export const TargetHealthCheckSchema = z.object({ hostname: z.string(), port: z.int().min(1).max(65535), enabled: z.boolean().optional().default(true), - path: z.string().optional().default("/"), + path: z.string().optional(), scheme: z.string().optional(), mode: z.string().default("http"), interval: z.int().default(30), @@ -26,8 +26,10 @@ export const TargetHealthCheckSchema = z.object({ .default(null), "follow-redirects": z.boolean().default(true), followRedirects: z.boolean().optional(), // deprecated alias - method: z.string().default("GET"), - status: z.int().optional() + method: z.string().optional(), + status: z.int().optional(), + "healthy-threshold": z.int().min(1).optional().default(1), + "unhealthy-threshold": z.int().min(1).optional().default(1) }); // Schema for individual target within a resource @@ -164,6 +166,7 @@ export const ResourceSchema = z name: z.string().optional(), protocol: z.enum(["http", "tcp", "udp"]).optional(), ssl: z.boolean().optional(), + scheme: z.enum(["http", "https"]).optional(), "full-domain": z.string().optional(), "proxy-port": z.int().min(1).max(65535).optional(), enabled: z.boolean().optional(), @@ -325,16 +328,20 @@ export function isTargetsOnlyResource(resource: any): boolean { export const ClientResourceSchema = z .object({ name: z.string().min(1).max(255), - mode: z.enum(["host", "cidr"]), - site: z.string(), + mode: z.enum(["host", "cidr", "http"]), + site: z.string(), // DEPRECATED IN FAVOR OF sites + sites: z.array(z.string()).optional().default([]), // protocol: z.enum(["tcp", "udp"]).optional(), // proxyPort: z.int().positive().optional(), - // destinationPort: z.int().positive().optional(), + "destination-port": z.int().positive().optional(), destination: z.string().min(1), // enabled: z.boolean().default(true), "tcp-ports": portRangeStringSchema.optional().default("*"), "udp-ports": portRangeStringSchema.optional().default("*"), "disable-icmp": z.boolean().optional().default(false), + "full-domain": z.string().optional(), + ssl: z.boolean().optional(), + scheme: z.enum(["http", "https"]).optional().nullable(), alias: z .string() .regex( @@ -477,6 +484,39 @@ export const ConfigSchema = z }); } + // Enforce the full-domain uniqueness across client-resources in the same stack + const clientFullDomainMap = new Map(); + + Object.entries(config["client-resources"]).forEach( + ([resourceKey, resource]) => { + const fullDomain = resource["full-domain"]; + if (fullDomain) { + if (!clientFullDomainMap.has(fullDomain)) { + clientFullDomainMap.set(fullDomain, []); + } + clientFullDomainMap.get(fullDomain)!.push(resourceKey); + } + } + ); + + const clientFullDomainDuplicates = Array.from( + clientFullDomainMap.entries() + ) + .filter(([_, resourceKeys]) => resourceKeys.length > 1) + .map( + ([fullDomain, resourceKeys]) => + `'${fullDomain}' used by resources: ${resourceKeys.join(", ")}` + ) + .join("; "); + + if (clientFullDomainDuplicates.length !== 0) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + path: ["client-resources"], + message: `Duplicate 'full-domain' values found: ${clientFullDomainDuplicates}` + }); + } + // Enforce proxy-port uniqueness within proxy-resources per protocol const protocolPortMap = new Map(); diff --git a/server/lib/encryption.ts b/server/lib/encryption.ts deleted file mode 100644 index 79caecd1a..000000000 --- a/server/lib/encryption.ts +++ /dev/null @@ -1,39 +0,0 @@ -import crypto from "crypto"; - -export function encryptData(data: string, key: Buffer): string { - const algorithm = "aes-256-gcm"; - const iv = crypto.randomBytes(16); - const cipher = crypto.createCipheriv(algorithm, key, iv); - - let encrypted = cipher.update(data, "utf8", "hex"); - encrypted += cipher.final("hex"); - - const authTag = cipher.getAuthTag(); - - // Combine IV, auth tag, and encrypted data - return iv.toString("hex") + ":" + authTag.toString("hex") + ":" + encrypted; -} - -// Helper function to decrypt data (you'll need this to read certificates) -export function decryptData(encryptedData: string, key: Buffer): string { - const algorithm = "aes-256-gcm"; - const parts = encryptedData.split(":"); - - if (parts.length !== 3) { - throw new Error("Invalid encrypted data format"); - } - - const iv = Buffer.from(parts[0], "hex"); - const authTag = Buffer.from(parts[1], "hex"); - const encrypted = parts[2]; - - const decipher = crypto.createDecipheriv(algorithm, key, iv); - decipher.setAuthTag(authTag); - - let decrypted = decipher.update(encrypted, "hex", "utf8"); - decrypted += decipher.final("utf8"); - - return decrypted; -} - -// openssl rand -hex 32 > config/encryption.key diff --git a/server/lib/ip.ts b/server/lib/ip.ts index 633983629..3e57e8c94 100644 --- a/server/lib/ip.ts +++ b/server/lib/ip.ts @@ -5,6 +5,7 @@ import config from "@server/lib/config"; import z from "zod"; import logger from "@server/logger"; import semver from "semver"; +import { getValidCertificatesForDomains } from "#dynamic/lib/certificates"; interface IPRange { start: bigint; @@ -477,9 +478,9 @@ export type Alias = { alias: string | null; aliasAddress: string | null }; export function generateAliasConfig(allSiteResources: SiteResource[]): Alias[] { return allSiteResources - .filter((sr) => sr.alias && sr.aliasAddress && sr.mode == "host") + .filter((sr) => sr.aliasAddress && ((sr.alias && sr.mode == "host") || (sr.fullDomain && sr.mode == "http"))) .map((sr) => ({ - alias: sr.alias, + alias: sr.alias || sr.fullDomain, aliasAddress: sr.aliasAddress })); } @@ -582,16 +583,26 @@ export type SubnetProxyTargetV2 = { protocol: "tcp" | "udp"; }[]; resourceId?: number; + protocol?: "http" | "https"; // if set, this target only applies to the specified protocol + httpTargets?: HTTPTarget[]; + tlsCert?: string; + tlsKey?: string; }; -export function generateSubnetProxyTargetV2( +export type HTTPTarget = { + destAddr: string; // must be an IP or hostname + destPort: number; + scheme: "http" | "https"; +}; + +export async function generateSubnetProxyTargetV2( siteResource: SiteResource, clients: { clientId: number; pubKey: string | null; subnet: string | null; }[] -): SubnetProxyTargetV2[] | undefined { +): Promise { if (clients.length === 0) { logger.debug( `No clients have access to site resource ${siteResource.siteResourceId}, skipping target generation.` @@ -642,6 +653,67 @@ export function generateSubnetProxyTargetV2( disableIcmp, resourceId: siteResource.siteResourceId }); + } else if (siteResource.mode == "http") { + let destination = siteResource.destination; + // check if this is a valid ip + const ipSchema = z.union([z.ipv4(), z.ipv6()]); + if (ipSchema.safeParse(destination).success) { + destination = `${destination}/32`; + } + + if ( + !siteResource.aliasAddress || + !siteResource.destinationPort || + !siteResource.scheme || + !siteResource.fullDomain + ) { + logger.debug( + `Site resource ${siteResource.siteResourceId} is in HTTP mode but is missing alias or alias address or destinationPort or scheme, skipping alias target generation.` + ); + return; + } + // also push a match for the alias address + let tlsCert: string | undefined; + let tlsKey: string | undefined; + + if (siteResource.ssl && siteResource.fullDomain) { + try { + const certs = await getValidCertificatesForDomains( + new Set([siteResource.fullDomain]), + true + ); + if (certs.length > 0 && certs[0].certFile && certs[0].keyFile) { + tlsCert = certs[0].certFile; + tlsKey = certs[0].keyFile; + } else { + logger.warn( + `No valid certificate found for SSL site resource ${siteResource.siteResourceId} with domain ${siteResource.fullDomain}` + ); + } + } catch (err) { + logger.error( + `Failed to retrieve certificate for site resource ${siteResource.siteResourceId} domain ${siteResource.fullDomain}: ${err}` + ); + } + } + + targets.push({ + sourcePrefixes: [], + destPrefix: `${siteResource.aliasAddress}/32`, + rewriteTo: destination, + portRange, + disableIcmp, + resourceId: siteResource.siteResourceId, + protocol: siteResource.ssl ? "https" : "http", + httpTargets: [ + { + destAddr: siteResource.destination, + destPort: siteResource.destinationPort, + scheme: siteResource.scheme + } + ], + ...(tlsCert && tlsKey ? { tlsCert, tlsKey } : {}) + }); } if (targets.length == 0) { diff --git a/server/lib/rebuildClientAssociations.ts b/server/lib/rebuildClientAssociations.ts index d636a2f2b..40c5a5bf7 100644 --- a/server/lib/rebuildClientAssociations.ts +++ b/server/lib/rebuildClientAssociations.ts @@ -11,17 +11,16 @@ import { roleSiteResources, Site, SiteResource, + siteNetworks, siteResources, sites, Transaction, userOrgRoles, - userOrgs, userSiteResources } from "@server/db"; import { and, eq, inArray, ne } from "drizzle-orm"; import { - addPeer as newtAddPeer, deletePeer as newtDeletePeer } from "@server/routers/newt/peers"; import { @@ -35,7 +34,6 @@ import { generateRemoteSubnets, generateSubnetProxyTargetV2, parseEndpoint, - formatEndpoint } from "@server/lib/ip"; import { addPeerData, @@ -48,15 +46,27 @@ export async function getClientSiteResourceAccess( siteResource: SiteResource, trx: Transaction | typeof db = db ) { - // get the site - const [site] = await trx - .select() - .from(sites) - .where(eq(sites.siteId, siteResource.siteId)) - .limit(1); + // get all sites associated with this siteResource via its network + const sitesList = siteResource.networkId + ? await trx + .select() + .from(sites) + .innerJoin( + siteNetworks, + eq(siteNetworks.siteId, sites.siteId) + ) + .where(eq(siteNetworks.networkId, siteResource.networkId)) + .then((rows) => rows.map((row) => row.sites)) + : []; - if (!site) { - throw new Error(`Site with ID ${siteResource.siteId} not found`); + logger.debug( + `rebuildClientAssociations: [getClientSiteResourceAccess] siteResourceId=${siteResource.siteResourceId} networkId=${siteResource.networkId} siteCount=${sitesList.length} siteIds=[${sitesList.map((s) => s.siteId).join(", ")}]` + ); + + if (sitesList.length === 0) { + logger.warn( + `No sites found for siteResource ${siteResource.siteResourceId} with networkId ${siteResource.networkId}` + ); } const roleIds = await trx @@ -136,8 +146,12 @@ export async function getClientSiteResourceAccess( const mergedAllClients = Array.from(allClientsMap.values()); const mergedAllClientIds = mergedAllClients.map((c) => c.clientId); + logger.debug( + `rebuildClientAssociations: [getClientSiteResourceAccess] siteResourceId=${siteResource.siteResourceId} mergedClientCount=${mergedAllClientIds.length} clientIds=[${mergedAllClientIds.join(", ")}] (userBased=${newAllClients.length} direct=${directClients.length})` + ); + return { - site, + sitesList, mergedAllClients, mergedAllClientIds }; @@ -153,40 +167,59 @@ export async function rebuildClientAssociationsFromSiteResource( subnet: string | null; }[]; }> { - const siteId = siteResource.siteId; + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] START siteResourceId=${siteResource.siteResourceId} networkId=${siteResource.networkId} orgId=${siteResource.orgId}` + ); - const { site, mergedAllClients, mergedAllClientIds } = + const { sitesList, mergedAllClients, mergedAllClientIds } = await getClientSiteResourceAccess(siteResource, trx); + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] access resolved siteResourceId=${siteResource.siteResourceId} siteCount=${sitesList.length} siteIds=[${sitesList.map((s) => s.siteId).join(", ")}] mergedClientCount=${mergedAllClients.length} clientIds=[${mergedAllClientIds.join(", ")}]` + ); + /////////// process the client-siteResource associations /////////// - // get all of the clients associated with other resources on this site - const allUpdatedClientsFromOtherResourcesOnThisSite = await trx - .select({ - clientId: clientSiteResourcesAssociationsCache.clientId - }) - .from(clientSiteResourcesAssociationsCache) - .innerJoin( - siteResources, - eq( - clientSiteResourcesAssociationsCache.siteResourceId, - siteResources.siteResourceId - ) - ) - .where( - and( - eq(siteResources.siteId, siteId), - ne(siteResources.siteResourceId, siteResource.siteResourceId) - ) - ); + // get all of the clients associated with other resources in the same network, + // joined through siteNetworks so we know which siteId each client belongs to + const allUpdatedClientsFromOtherResourcesOnThisSite = siteResource.networkId + ? await trx + .select({ + clientId: clientSiteResourcesAssociationsCache.clientId, + siteId: siteNetworks.siteId + }) + .from(clientSiteResourcesAssociationsCache) + .innerJoin( + siteResources, + eq( + clientSiteResourcesAssociationsCache.siteResourceId, + siteResources.siteResourceId + ) + ) + .innerJoin( + siteNetworks, + eq(siteNetworks.networkId, siteResources.networkId) + ) + .where( + and( + eq(siteResources.networkId, siteResource.networkId), + ne( + siteResources.siteResourceId, + siteResource.siteResourceId + ) + ) + ) + : []; - const allClientIdsFromOtherResourcesOnThisSite = Array.from( - new Set( - allUpdatedClientsFromOtherResourcesOnThisSite.map( - (row) => row.clientId - ) - ) - ); + // Build a per-site map so the loop below can check by siteId rather than + // across the entire network. + const clientsFromOtherResourcesBySite = new Map>(); + for (const row of allUpdatedClientsFromOtherResourcesOnThisSite) { + if (!clientsFromOtherResourcesBySite.has(row.siteId)) { + clientsFromOtherResourcesBySite.set(row.siteId, new Set()); + } + clientsFromOtherResourcesBySite.get(row.siteId)!.add(row.clientId); + } const existingClientSiteResources = await trx .select({ @@ -204,6 +237,10 @@ export async function rebuildClientAssociationsFromSiteResource( (row) => row.clientId ); + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteResourceId=${siteResource.siteResourceId} existingResourceClientIds=[${existingClientSiteResourceIds.join(", ")}]` + ); + // Get full client details for existing resource clients (needed for sending delete messages) const existingResourceClients = existingClientSiteResourceIds.length > 0 @@ -223,6 +260,10 @@ export async function rebuildClientAssociationsFromSiteResource( (clientId) => !existingClientSiteResourceIds.includes(clientId) ); + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteResourceId=${siteResource.siteResourceId} resourceClients toAdd=[${clientSiteResourcesToAdd.join(", ")}]` + ); + const clientSiteResourcesToInsert = clientSiteResourcesToAdd.map( (clientId) => ({ clientId, @@ -231,17 +272,34 @@ export async function rebuildClientAssociationsFromSiteResource( ); if (clientSiteResourcesToInsert.length > 0) { + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteResourceId=${siteResource.siteResourceId} inserting ${clientSiteResourcesToInsert.length} clientSiteResource association(s)` + ); await trx .insert(clientSiteResourcesAssociationsCache) .values(clientSiteResourcesToInsert) .returning(); + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteResourceId=${siteResource.siteResourceId} inserted clientSiteResource associations` + ); + } else { + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteResourceId=${siteResource.siteResourceId} no clientSiteResource associations to insert` + ); } const clientSiteResourcesToRemove = existingClientSiteResourceIds.filter( (clientId) => !mergedAllClientIds.includes(clientId) ); + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteResourceId=${siteResource.siteResourceId} resourceClients toRemove=[${clientSiteResourcesToRemove.join(", ")}]` + ); + if (clientSiteResourcesToRemove.length > 0) { + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteResourceId=${siteResource.siteResourceId} deleting ${clientSiteResourcesToRemove.length} clientSiteResource association(s)` + ); await trx .delete(clientSiteResourcesAssociationsCache) .where( @@ -260,82 +318,127 @@ export async function rebuildClientAssociationsFromSiteResource( /////////// process the client-site associations /////////// - const existingClientSites = await trx - .select({ - clientId: clientSitesAssociationsCache.clientId - }) - .from(clientSitesAssociationsCache) - .where(eq(clientSitesAssociationsCache.siteId, siteResource.siteId)); - - const existingClientSiteIds = existingClientSites.map( - (row) => row.clientId + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteResourceId=${siteResource.siteResourceId} beginning client-site association loop over ${sitesList.length} site(s)` ); - // Get full client details for existing clients (needed for sending delete messages) - const existingClients = await trx - .select({ - clientId: clients.clientId, - pubKey: clients.pubKey, - subnet: clients.subnet - }) - .from(clients) - .where(inArray(clients.clientId, existingClientSiteIds)); + for (const site of sitesList) { + const siteId = site.siteId; - const clientSitesToAdd = mergedAllClientIds.filter( - (clientId) => - !existingClientSiteIds.includes(clientId) && - !allClientIdsFromOtherResourcesOnThisSite.includes(clientId) // dont remove if there is still another connection for another site resource - ); + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] processing siteId=${siteId} for siteResourceId=${siteResource.siteResourceId}` + ); - const clientSitesToInsert = clientSitesToAdd.map((clientId) => ({ - clientId, - siteId - })); + const existingClientSites = await trx + .select({ + clientId: clientSitesAssociationsCache.clientId + }) + .from(clientSitesAssociationsCache) + .where(eq(clientSitesAssociationsCache.siteId, siteId)); - if (clientSitesToInsert.length > 0) { - await trx - .insert(clientSitesAssociationsCache) - .values(clientSitesToInsert) - .returning(); - } + const existingClientSiteIds = existingClientSites.map( + (row) => row.clientId + ); - // Now remove any client-site associations that should no longer exist - const clientSitesToRemove = existingClientSiteIds.filter( - (clientId) => - !mergedAllClientIds.includes(clientId) && - !allClientIdsFromOtherResourcesOnThisSite.includes(clientId) // dont remove if there is still another connection for another site resource - ); + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteId=${siteId} existingClientSiteIds=[${existingClientSiteIds.join(", ")}]` + ); - if (clientSitesToRemove.length > 0) { - await trx - .delete(clientSitesAssociationsCache) - .where( - and( - eq(clientSitesAssociationsCache.siteId, siteId), - inArray( - clientSitesAssociationsCache.clientId, - clientSitesToRemove - ) - ) + // Get full client details for existing clients (needed for sending delete messages) + const existingClients = + existingClientSiteIds.length > 0 + ? await trx + .select({ + clientId: clients.clientId, + pubKey: clients.pubKey, + subnet: clients.subnet + }) + .from(clients) + .where(inArray(clients.clientId, existingClientSiteIds)) + : []; + + const otherResourceClientIds = clientsFromOtherResourcesBySite.get(siteId) ?? new Set(); + + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteId=${siteId} otherResourceClientIds=[${[...otherResourceClientIds].join(", ")}] mergedAllClientIds=[${mergedAllClientIds.join(", ")}]` + ); + + const clientSitesToAdd = mergedAllClientIds.filter( + (clientId) => + !existingClientSiteIds.includes(clientId) && + !otherResourceClientIds.has(clientId) // dont add if already connected via another site resource + ); + + const clientSitesToInsert = clientSitesToAdd.map((clientId) => ({ + clientId, + siteId + })); + + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteId=${siteId} clientSites toAdd=[${clientSitesToAdd.join(", ")}]` + ); + + if (clientSitesToInsert.length > 0) { + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteId=${siteId} inserting ${clientSitesToInsert.length} clientSite association(s)` ); + await trx + .insert(clientSitesAssociationsCache) + .values(clientSitesToInsert) + .returning(); + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteId=${siteId} inserted clientSite associations` + ); + } else { + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteId=${siteId} no clientSite associations to insert` + ); + } + + // Now remove any client-site associations that should no longer exist + const clientSitesToRemove = existingClientSiteIds.filter( + (clientId) => + !mergedAllClientIds.includes(clientId) && + !otherResourceClientIds.has(clientId) // dont remove if there is still another connection for another site resource + ); + + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteId=${siteId} clientSites toRemove=[${clientSitesToRemove.join(", ")}]` + ); + + if (clientSitesToRemove.length > 0) { + logger.debug( + `rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteId=${siteId} deleting ${clientSitesToRemove.length} clientSite association(s)` + ); + await trx + .delete(clientSitesAssociationsCache) + .where( + and( + eq(clientSitesAssociationsCache.siteId, siteId), + inArray( + clientSitesAssociationsCache.clientId, + clientSitesToRemove + ) + ) + ); + } + + // Now handle the messages to add/remove peers on both the newt and olm sides + await handleMessagesForSiteClients( + site, + siteId, + mergedAllClients, + existingClients, + clientSitesToAdd, + clientSitesToRemove, + trx + ); } - /////////// send the messages /////////// - - // Now handle the messages to add/remove peers on both the newt and olm sides - await handleMessagesForSiteClients( - site, - siteId, - mergedAllClients, - existingClients, - clientSitesToAdd, - clientSitesToRemove, - trx - ); - // Handle subnet proxy target updates for the resource associations await handleSubnetProxyTargetUpdates( siteResource, + sitesList, mergedAllClients, existingResourceClients, clientSiteResourcesToAdd, @@ -624,6 +727,7 @@ export async function updateClientSiteDestinations( async function handleSubnetProxyTargetUpdates( siteResource: SiteResource, + sitesList: Site[], allClients: { clientId: number; pubKey: string | null; @@ -638,125 +742,138 @@ async function handleSubnetProxyTargetUpdates( clientSiteResourcesToRemove: number[], trx: Transaction | typeof db = db ): Promise { - // Get the newt for this site - const [newt] = await trx - .select() - .from(newts) - .where(eq(newts.siteId, siteResource.siteId)) - .limit(1); + const proxyJobs: Promise[] = []; + const olmJobs: Promise[] = []; - if (!newt) { - logger.warn( - `Newt not found for site ${siteResource.siteId}, skipping subnet proxy target updates` - ); - return; - } + for (const siteData of sitesList) { + const siteId = siteData.siteId; - const proxyJobs = []; - const olmJobs = []; - // Generate targets for added associations - if (clientSiteResourcesToAdd.length > 0) { - const addedClients = allClients.filter((client) => - clientSiteResourcesToAdd.includes(client.clientId) - ); + // Get the newt for this site + const [newt] = await trx + .select() + .from(newts) + .where(eq(newts.siteId, siteId)) + .limit(1); - if (addedClients.length > 0) { - const targetsToAdd = generateSubnetProxyTargetV2( - siteResource, - addedClients + if (!newt) { + logger.warn( + `Newt not found for site ${siteId}, skipping subnet proxy target updates` ); - - if (targetsToAdd) { - proxyJobs.push( - addSubnetProxyTargets( - newt.newtId, - targetsToAdd, - newt.version - ) - ); - } - - for (const client of addedClients) { - olmJobs.push( - addPeerData( - client.clientId, - siteResource.siteId, - generateRemoteSubnets([siteResource]), - generateAliasConfig([siteResource]) - ) - ); - } + continue; } - } - // here we use the existingSiteResource from BEFORE we updated the destination so we dont need to worry about updating destinations here - - // Generate targets for removed associations - if (clientSiteResourcesToRemove.length > 0) { - const removedClients = existingClients.filter((client) => - clientSiteResourcesToRemove.includes(client.clientId) - ); - - if (removedClients.length > 0) { - const targetsToRemove = generateSubnetProxyTargetV2( - siteResource, - removedClients + // Generate targets for added associations + if (clientSiteResourcesToAdd.length > 0) { + const addedClients = allClients.filter((client) => + clientSiteResourcesToAdd.includes(client.clientId) ); - if (targetsToRemove) { - proxyJobs.push( - removeSubnetProxyTargets( - newt.newtId, - targetsToRemove, - newt.version - ) + if (addedClients.length > 0) { + const targetsToAdd = await generateSubnetProxyTargetV2( + siteResource, + addedClients ); - } - for (const client of removedClients) { - // Check if this client still has access to another resource on this site with the same destination - const destinationStillInUse = await trx - .select() - .from(siteResources) - .innerJoin( - clientSiteResourcesAssociationsCache, - eq( - clientSiteResourcesAssociationsCache.siteResourceId, - siteResources.siteResourceId - ) - ) - .where( - and( - eq( - clientSiteResourcesAssociationsCache.clientId, - client.clientId - ), - eq(siteResources.siteId, siteResource.siteId), - eq( - siteResources.destination, - siteResource.destination - ), - ne( - siteResources.siteResourceId, - siteResource.siteResourceId - ) + if (targetsToAdd) { + proxyJobs.push( + addSubnetProxyTargets( + newt.newtId, + targetsToAdd, + newt.version ) ); + } - // Only remove remote subnet if no other resource uses the same destination - const remoteSubnetsToRemove = - destinationStillInUse.length > 0 - ? [] - : generateRemoteSubnets([siteResource]); + for (const client of addedClients) { + olmJobs.push( + addPeerData( + client.clientId, + siteId, + generateRemoteSubnets([siteResource]), + generateAliasConfig([siteResource]) + ) + ); + } + } + } - olmJobs.push( - removePeerData( - client.clientId, - siteResource.siteId, - remoteSubnetsToRemove, - generateAliasConfig([siteResource]) - ) + // here we use the existingSiteResource from BEFORE we updated the destination so we dont need to worry about updating destinations here + + // Generate targets for removed associations + if (clientSiteResourcesToRemove.length > 0) { + const removedClients = existingClients.filter((client) => + clientSiteResourcesToRemove.includes(client.clientId) + ); + + if (removedClients.length > 0) { + const targetsToRemove = await generateSubnetProxyTargetV2( + siteResource, + removedClients ); + + if (targetsToRemove) { + proxyJobs.push( + removeSubnetProxyTargets( + newt.newtId, + targetsToRemove, + newt.version + ) + ); + } + + for (const client of removedClients) { + // Check if this client still has access to another resource + // on this specific site with the same destination. We scope + // by siteId (via siteNetworks) rather than networkId because + // removePeerData operates per-site - a resource on a different + // site sharing the same network should not block removal here. + const destinationStillInUse = await trx + .select() + .from(siteResources) + .innerJoin( + clientSiteResourcesAssociationsCache, + eq( + clientSiteResourcesAssociationsCache.siteResourceId, + siteResources.siteResourceId + ) + ) + .innerJoin( + siteNetworks, + eq(siteNetworks.networkId, siteResources.networkId) + ) + .where( + and( + eq( + clientSiteResourcesAssociationsCache.clientId, + client.clientId + ), + eq(siteNetworks.siteId, siteId), + eq( + siteResources.destination, + siteResource.destination + ), + ne( + siteResources.siteResourceId, + siteResource.siteResourceId + ) + ) + ); + + // Only remove remote subnet if no other resource uses the same destination + const remoteSubnetsToRemove = + destinationStillInUse.length > 0 + ? [] + : generateRemoteSubnets([siteResource]); + + olmJobs.push( + removePeerData( + client.clientId, + siteId, + remoteSubnetsToRemove, + generateAliasConfig([siteResource]) + ) + ); + } } } } @@ -863,10 +980,25 @@ export async function rebuildClientAssociationsFromClient( ) : []; - // Group by siteId for site-level associations - const newSiteIds = Array.from( - new Set(newSiteResources.map((sr) => sr.siteId)) + // Group by siteId for site-level associations - look up via siteNetworks since + // siteResources no longer carries a direct siteId column. + const networkIds = Array.from( + new Set( + newSiteResources + .map((sr) => sr.networkId) + .filter((id): id is number => id !== null) + ) ); + const newSiteIds = + networkIds.length > 0 + ? await trx + .select({ siteId: siteNetworks.siteId }) + .from(siteNetworks) + .where(inArray(siteNetworks.networkId, networkIds)) + .then((rows) => + Array.from(new Set(rows.map((r) => r.siteId))) + ) + : []; /////////// Process client-siteResource associations /////////// @@ -1139,13 +1271,45 @@ async function handleMessagesForClientResources( resourcesToAdd.includes(r.siteResourceId) ); + // Build (resource, siteId) pairs by looking up siteNetworks for each resource's networkId + const addedNetworkIds = Array.from( + new Set( + addedResources + .map((r) => r.networkId) + .filter((id): id is number => id !== null) + ) + ); + const addedSiteNetworkRows = + addedNetworkIds.length > 0 + ? await trx + .select({ + networkId: siteNetworks.networkId, + siteId: siteNetworks.siteId + }) + .from(siteNetworks) + .where(inArray(siteNetworks.networkId, addedNetworkIds)) + : []; + const addedNetworkToSites = new Map(); + for (const row of addedSiteNetworkRows) { + if (!addedNetworkToSites.has(row.networkId)) { + addedNetworkToSites.set(row.networkId, []); + } + addedNetworkToSites.get(row.networkId)!.push(row.siteId); + } + // Group by site for proxy updates const addedBySite = new Map(); for (const resource of addedResources) { - if (!addedBySite.has(resource.siteId)) { - addedBySite.set(resource.siteId, []); + const siteIds = + resource.networkId != null + ? (addedNetworkToSites.get(resource.networkId) ?? []) + : []; + for (const siteId of siteIds) { + if (!addedBySite.has(siteId)) { + addedBySite.set(siteId, []); + } + addedBySite.get(siteId)!.push(resource); } - addedBySite.get(resource.siteId)!.push(resource); } // Add subnet proxy targets for each site @@ -1164,7 +1328,7 @@ async function handleMessagesForClientResources( } for (const resource of resources) { - const targets = generateSubnetProxyTargetV2(resource, [ + const targets = await generateSubnetProxyTargetV2(resource, [ { clientId: client.clientId, pubKey: client.pubKey, @@ -1187,7 +1351,7 @@ async function handleMessagesForClientResources( olmJobs.push( addPeerData( client.clientId, - resource.siteId, + siteId, generateRemoteSubnets([resource]), generateAliasConfig([resource]) ) @@ -1199,7 +1363,7 @@ async function handleMessagesForClientResources( error.message.includes("not found") ) { logger.debug( - `Olm data not found for client ${client.clientId} and site ${resource.siteId}, skipping removal` + `Olm data not found for client ${client.clientId} and site ${siteId}, skipping addition` ); } else { throw error; @@ -1216,13 +1380,45 @@ async function handleMessagesForClientResources( .from(siteResources) .where(inArray(siteResources.siteResourceId, resourcesToRemove)); + // Build (resource, siteId) pairs via siteNetworks + const removedNetworkIds = Array.from( + new Set( + removedResources + .map((r) => r.networkId) + .filter((id): id is number => id !== null) + ) + ); + const removedSiteNetworkRows = + removedNetworkIds.length > 0 + ? await trx + .select({ + networkId: siteNetworks.networkId, + siteId: siteNetworks.siteId + }) + .from(siteNetworks) + .where(inArray(siteNetworks.networkId, removedNetworkIds)) + : []; + const removedNetworkToSites = new Map(); + for (const row of removedSiteNetworkRows) { + if (!removedNetworkToSites.has(row.networkId)) { + removedNetworkToSites.set(row.networkId, []); + } + removedNetworkToSites.get(row.networkId)!.push(row.siteId); + } + // Group by site for proxy updates const removedBySite = new Map(); for (const resource of removedResources) { - if (!removedBySite.has(resource.siteId)) { - removedBySite.set(resource.siteId, []); + const siteIds = + resource.networkId != null + ? (removedNetworkToSites.get(resource.networkId) ?? []) + : []; + for (const siteId of siteIds) { + if (!removedBySite.has(siteId)) { + removedBySite.set(siteId, []); + } + removedBySite.get(siteId)!.push(resource); } - removedBySite.get(resource.siteId)!.push(resource); } // Remove subnet proxy targets for each site @@ -1241,7 +1437,7 @@ async function handleMessagesForClientResources( } for (const resource of resources) { - const targets = generateSubnetProxyTargetV2(resource, [ + const targets = await generateSubnetProxyTargetV2(resource, [ { clientId: client.clientId, pubKey: client.pubKey, @@ -1260,7 +1456,11 @@ async function handleMessagesForClientResources( } try { - // Check if this client still has access to another resource on this site with the same destination + // Check if this client still has access to another resource + // on this specific site with the same destination. We scope + // by siteId (via siteNetworks) rather than networkId because + // removePeerData operates per-site - a resource on a different + // site sharing the same network should not block removal here. const destinationStillInUse = await trx .select() .from(siteResources) @@ -1271,13 +1471,17 @@ async function handleMessagesForClientResources( siteResources.siteResourceId ) ) + .innerJoin( + siteNetworks, + eq(siteNetworks.networkId, siteResources.networkId) + ) .where( and( eq( clientSiteResourcesAssociationsCache.clientId, client.clientId ), - eq(siteResources.siteId, resource.siteId), + eq(siteNetworks.siteId, siteId), eq( siteResources.destination, resource.destination @@ -1299,7 +1503,7 @@ async function handleMessagesForClientResources( olmJobs.push( removePeerData( client.clientId, - resource.siteId, + siteId, remoteSubnetsToRemove, generateAliasConfig([resource]) ) @@ -1311,7 +1515,7 @@ async function handleMessagesForClientResources( error.message.includes("not found") ) { logger.debug( - `Olm data not found for client ${client.clientId} and site ${resource.siteId}, skipping removal` + `Olm data not found for client ${client.clientId} and site ${siteId}, skipping removal` ); } else { throw error; diff --git a/server/lib/statusHistory.ts b/server/lib/statusHistory.ts new file mode 100644 index 000000000..001a0b93b --- /dev/null +++ b/server/lib/statusHistory.ts @@ -0,0 +1,133 @@ +import { z } from "zod"; + +export const statusHistoryQuerySchema = z + .object({ + days: z + .string() + .optional() + .transform((v) => (v ? parseInt(v, 10) : 90)), + }) + .pipe( + z.object({ + days: z.number().int().min(1).max(365), + }) + ); + +export interface StatusHistoryDayBucket { + date: string; // ISO date "YYYY-MM-DD" + uptimePercent: number; // 0-100 + totalDowntimeSeconds: number; + downtimeWindows: { start: number; end: number | null; status: string }[]; + status: "good" | "degraded" | "bad" | "no_data"; +} + +export interface StatusHistoryResponse { + entityType: string; + entityId: number; + days: StatusHistoryDayBucket[]; + overallUptimePercent: number; + totalDowntimeSeconds: number; +} + +export function computeBuckets( + events: { entityType: string; entityId: number; orgId: string; status: string; timestamp: number; id: number }[], + days: number +): { buckets: StatusHistoryDayBucket[]; totalDowntime: number } { + const nowSec = Math.floor(Date.now() / 1000); + const buckets: StatusHistoryDayBucket[] = []; + let totalDowntime = 0; + + for (let d = 0; d < days; d++) { + const dayStartSec = nowSec - (days - d) * 86400; + const dayEndSec = dayStartSec + 86400; + + const dayEvents = events.filter( + (e) => e.timestamp >= dayStartSec && e.timestamp < dayEndSec + ); + + // Determine the status at the start of this day (last event before dayStart) + const lastBeforeDay = [...events] + .filter((e) => e.timestamp < dayStartSec) + .at(-1); + + const currentStatus = lastBeforeDay?.status ?? null; + + const windows: { start: number; end: number | null; status: string }[] = []; + let dayDowntime = 0; + + let windowStart = dayStartSec; + let windowStatus = currentStatus; + + for (const evt of dayEvents) { + if (windowStatus !== null && windowStatus !== evt.status) { + const windowEnd = evt.timestamp; + const isDown = + windowStatus === "offline" || + windowStatus === "unhealthy" || + windowStatus === "unknown"; + if (isDown) { + dayDowntime += windowEnd - windowStart; + windows.push({ + start: windowStart, + end: windowEnd, + status: windowStatus, + }); + } + } + windowStart = evt.timestamp; + windowStatus = evt.status; + } + + // Close the final window at the end of the day (or now if day hasn't ended) + if (windowStatus !== null) { + const finalEnd = Math.min(dayEndSec, nowSec); + const isDown = + windowStatus === "offline" || + windowStatus === "unhealthy" || + windowStatus === "unknown"; + if (isDown && finalEnd > windowStart) { + dayDowntime += finalEnd - windowStart; + windows.push({ + start: windowStart, + end: finalEnd, + status: windowStatus, + }); + } + } + + totalDowntime += dayDowntime; + + const effectiveDayLength = Math.max( + 0, + Math.min(dayEndSec, nowSec) - dayStartSec + ); + const uptimePct = + effectiveDayLength > 0 + ? Math.max( + 0, + ((effectiveDayLength - dayDowntime) / + effectiveDayLength) * + 100 + ) + : 100; + + const dateStr = new Date(dayStartSec * 1000).toISOString().slice(0, 10); + + let status: StatusHistoryDayBucket["status"] = "no_data"; + if (currentStatus !== null || dayEvents.length > 0) { + if (uptimePct >= 99) status = "good"; + else if (uptimePct >= 50) status = "degraded"; + else status = "bad"; + } + + buckets.push({ + date: dateStr, + uptimePercent: Math.round(uptimePct * 100) / 100, + totalDowntimeSeconds: dayDowntime, + downtimeWindows: windows, + status, + }); + } + + return { buckets, totalDowntime }; +} diff --git a/server/lib/traefik/TraefikConfigManager.ts b/server/lib/traefik/TraefikConfigManager.ts index 6ef3c45b5..c8fcfafdc 100644 --- a/server/lib/traefik/TraefikConfigManager.ts +++ b/server/lib/traefik/TraefikConfigManager.ts @@ -1011,7 +1011,7 @@ export class TraefikConfigManager { ); if (!isUnused) { - // Domain is still active — remove from pending deletion if it was queued + // Domain is still active - remove from pending deletion if it was queued if (this.pendingDeletion.has(dirName)) { logger.info( `Certificate ${dirName} is active again, cancelling pending deletion` @@ -1021,7 +1021,7 @@ export class TraefikConfigManager { continue; } - // Domain is unused — add to pending deletion or decrement its counter + // Domain is unused - add to pending deletion or decrement its counter if (!this.pendingDeletion.has(dirName)) { const graceCycles = 3; logger.info( @@ -1036,7 +1036,7 @@ export class TraefikConfigManager { ); this.pendingDeletion.set(dirName, remaining); } else { - // Grace period expired — actually delete now + // Grace period expired - actually delete now this.pendingDeletion.delete(dirName); const domainDir = path.join(certsPath, dirName); diff --git a/server/lib/traefik/pathEncoding.test.ts b/server/lib/traefik/pathEncoding.test.ts index 83d53a039..f0318807a 100644 --- a/server/lib/traefik/pathEncoding.test.ts +++ b/server/lib/traefik/pathEncoding.test.ts @@ -24,7 +24,7 @@ function encodePath(path: string | null | undefined): string { /** * Exact replica of the OLD key computation from upstream main. - * Uses sanitize() for paths — this is what had the collision bug. + * Uses sanitize() for paths - this is what had the collision bug. */ function oldKeyComputation( resourceId: number, @@ -44,7 +44,7 @@ function oldKeyComputation( /** * Replica of the NEW key computation from our fix. - * Uses encodePath() for paths — collision-free. + * Uses encodePath() for paths - collision-free. */ function newKeyComputation( resourceId: number, @@ -195,11 +195,11 @@ function runTests() { true, "/a/b and /a-b MUST have different keys" ); - console.log(" PASS: collision fix — /a/b vs /a-b have different keys"); + console.log(" PASS: collision fix - /a/b vs /a-b have different keys"); passed++; } - // Test 9: demonstrate the old bug — old code maps /a/b and /a-b to same key + // Test 9: demonstrate the old bug - old code maps /a/b and /a-b to same key { const oldKeyAB = oldKeyComputation(1, "/a/b", "prefix", null, null); const oldKeyDash = oldKeyComputation(1, "/a-b", "prefix", null, null); @@ -208,11 +208,11 @@ function runTests() { oldKeyDash, "old code MUST have this collision (confirms the bug exists)" ); - console.log(" PASS: confirmed old code bug — /a/b and /a-b collided"); + console.log(" PASS: confirmed old code bug - /a/b and /a-b collided"); passed++; } - // Test 10: /api/v1 and /api-v1 — old code collision, new code fixes it + // Test 10: /api/v1 and /api-v1 - old code collision, new code fixes it { const oldKey1 = oldKeyComputation(1, "/api/v1", "prefix", null, null); const oldKey2 = oldKeyComputation(1, "/api-v1", "prefix", null, null); @@ -229,11 +229,11 @@ function runTests() { true, "new code must separate /api/v1 and /api-v1" ); - console.log(" PASS: collision fix — /api/v1 vs /api-v1"); + console.log(" PASS: collision fix - /api/v1 vs /api-v1"); passed++; } - // Test 11: /app.v2 and /app/v2 and /app-v2 — three-way collision fixed + // Test 11: /app.v2 and /app/v2 and /app-v2 - three-way collision fixed { const a = newKeyComputation(1, "/app.v2", "prefix", null, null); const b = newKeyComputation(1, "/app/v2", "prefix", null, null); @@ -245,14 +245,14 @@ function runTests() { "three paths must produce three unique keys" ); console.log( - " PASS: collision fix — three-way /app.v2, /app/v2, /app-v2" + " PASS: collision fix - three-way /app.v2, /app/v2, /app-v2" ); passed++; } // ── Edge cases ─────────────────────────────────────────────────── - // Test 12: same path in different resources — always separate + // Test 12: same path in different resources - always separate { const key1 = newKeyComputation(1, "/api", "prefix", null, null); const key2 = newKeyComputation(2, "/api", "prefix", null, null); @@ -261,11 +261,11 @@ function runTests() { true, "different resources with same path must have different keys" ); - console.log(" PASS: edge case — same path, different resources"); + console.log(" PASS: edge case - same path, different resources"); passed++; } - // Test 13: same resource, different pathMatchType — separate keys + // Test 13: same resource, different pathMatchType - separate keys { const exact = newKeyComputation(1, "/api", "exact", null, null); const prefix = newKeyComputation(1, "/api", "prefix", null, null); @@ -274,11 +274,11 @@ function runTests() { true, "exact vs prefix must have different keys" ); - console.log(" PASS: edge case — same path, different match types"); + console.log(" PASS: edge case - same path, different match types"); passed++; } - // Test 14: same resource and path, different rewrite config — separate keys + // Test 14: same resource and path, different rewrite config - separate keys { const noRewrite = newKeyComputation(1, "/api", "prefix", null, null); const withRewrite = newKeyComputation( @@ -293,7 +293,7 @@ function runTests() { true, "with vs without rewrite must have different keys" ); - console.log(" PASS: edge case — same path, different rewrite config"); + console.log(" PASS: edge case - same path, different rewrite config"); passed++; } @@ -308,7 +308,7 @@ function runTests() { paths.length, "special URL chars must produce unique keys" ); - console.log(" PASS: edge case — special URL characters in paths"); + console.log(" PASS: edge case - special URL characters in paths"); passed++; } diff --git a/server/middlewares/verifyDomainAccess.ts b/server/middlewares/verifyDomainAccess.ts index c9ecf42e0..d37f6725d 100644 --- a/server/middlewares/verifyDomainAccess.ts +++ b/server/middlewares/verifyDomainAccess.ts @@ -15,7 +15,7 @@ export async function verifyDomainAccess( try { const userId = req.user!.userId; const domainId = - req.params.domainId || req.body.apiKeyId || req.query.apiKeyId; + req.params.domainId; const orgId = req.params.orgId; if (!userId) { diff --git a/server/private/lib/acmeCertSync.ts b/server/private/lib/acmeCertSync.ts new file mode 100644 index 000000000..faa45b08e --- /dev/null +++ b/server/private/lib/acmeCertSync.ts @@ -0,0 +1,478 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import fs from "fs"; +import crypto from "crypto"; +import { + certificates, + clients, + clientSiteResourcesAssociationsCache, + db, + domains, + newts, + siteNetworks, + SiteResource, + siteResources +} from "@server/db"; +import { and, eq } from "drizzle-orm"; +import { encrypt, decrypt } from "@server/lib/crypto"; +import logger from "@server/logger"; +import privateConfig from "#private/lib/config"; +import config from "@server/lib/config"; +import { + generateSubnetProxyTargetV2, + SubnetProxyTargetV2 +} from "@server/lib/ip"; +import { updateTargets } from "@server/routers/client/targets"; +import cache from "#private/lib/cache"; +import { build } from "@server/build"; + +interface AcmeCert { + domain: { main: string; sans?: string[] }; + certificate: string; + key: string; + Store: string; +} + +interface AcmeJson { + [resolver: string]: { + Certificates: AcmeCert[]; + }; +} + +async function pushCertUpdateToAffectedNewts( + domain: string, + domainId: string | null, + oldCertPem: string | null, + oldKeyPem: string | null +): Promise { + // Find all SSL-enabled HTTP site resources that use this cert's domain + let affectedResources: SiteResource[] = []; + + if (domainId) { + affectedResources = await db + .select() + .from(siteResources) + .where( + and( + eq(siteResources.domainId, domainId), + eq(siteResources.ssl, true) + ) + ); + } else { + // Fallback: match by exact fullDomain when no domainId is available + affectedResources = await db + .select() + .from(siteResources) + .where( + and( + eq(siteResources.fullDomain, domain), + eq(siteResources.ssl, true) + ) + ); + } + + if (affectedResources.length === 0) { + logger.debug( + `acmeCertSync: no affected site resources for cert domain "${domain}"` + ); + return; + } + + logger.info( + `acmeCertSync: pushing cert update to ${affectedResources.length} affected site resource(s) for domain "${domain}"` + ); + + for (const resource of affectedResources) { + try { + // Get all sites for this resource via siteNetworks + const resourceSiteRows = resource.networkId + ? await db + .select({ siteId: siteNetworks.siteId }) + .from(siteNetworks) + .where(eq(siteNetworks.networkId, resource.networkId)) + : []; + + if (resourceSiteRows.length === 0) { + logger.debug( + `acmeCertSync: no sites for resource ${resource.siteResourceId}, skipping` + ); + continue; + } + + // Get all clients with access to this resource + const resourceClients = await db + .select({ + clientId: clients.clientId, + pubKey: clients.pubKey, + subnet: clients.subnet + }) + .from(clients) + .innerJoin( + clientSiteResourcesAssociationsCache, + eq( + clients.clientId, + clientSiteResourcesAssociationsCache.clientId + ) + ) + .where( + eq( + clientSiteResourcesAssociationsCache.siteResourceId, + resource.siteResourceId + ) + ); + + if (resourceClients.length === 0) { + logger.debug( + `acmeCertSync: no clients for resource ${resource.siteResourceId}, skipping` + ); + continue; + } + + // Invalidate the cert cache so generateSubnetProxyTargetV2 fetches fresh data + if (resource.fullDomain) { + await cache.del(`cert:${resource.fullDomain}`); + } + + // Generate target once - same cert applies to all sites for this resource + const newTargets = await generateSubnetProxyTargetV2( + resource, + resourceClients + ); + + if (!newTargets) { + logger.debug( + `acmeCertSync: could not generate target for resource ${resource.siteResourceId}, skipping` + ); + continue; + } + + // Construct the old targets - same routing shape but with the previous cert/key. + // The newt only uses destPrefix/sourcePrefixes for removal, but we keep the + // semantics correct so the update message accurately reflects what changed. + const oldTargets: SubnetProxyTargetV2[] = newTargets.map((t) => ({ + ...t, + tlsCert: oldCertPem ?? undefined, + tlsKey: oldKeyPem ?? undefined + })); + + // Push update to each site's newt + for (const { siteId } of resourceSiteRows) { + const [newt] = await db + .select() + .from(newts) + .where(eq(newts.siteId, siteId)) + .limit(1); + + if (!newt) { + logger.debug( + `acmeCertSync: no newt found for site ${siteId}, skipping resource ${resource.siteResourceId}` + ); + continue; + } + + await updateTargets( + newt.newtId, + { oldTargets: oldTargets, newTargets: newTargets }, + newt.version + ); + + logger.info( + `acmeCertSync: pushed cert update to newt for site ${siteId}, resource ${resource.siteResourceId}` + ); + } + } catch (err) { + logger.error( + `acmeCertSync: error pushing cert update for resource ${resource?.siteResourceId}: ${err}` + ); + } + } +} + +async function findDomainId(certDomain: string): Promise { + // Strip wildcard prefix before lookup (*.example.com -> example.com) + const lookupDomain = certDomain.startsWith("*.") + ? certDomain.slice(2) + : certDomain; + + // 1. Exact baseDomain match (any domain type) + const exactMatch = await db + .select({ domainId: domains.domainId }) + .from(domains) + .where(eq(domains.baseDomain, lookupDomain)) + .limit(1); + + if (exactMatch.length > 0) { + return exactMatch[0].domainId; + } + + // 2. Walk up the domain hierarchy looking for a wildcard-type domain whose + // baseDomain is a suffix of the cert domain. e.g. cert "sub.example.com" + // matches a wildcard domain with baseDomain "example.com". + const parts = lookupDomain.split("."); + for (let i = 1; i < parts.length; i++) { + const candidate = parts.slice(i).join("."); + if (!candidate) continue; + + const wildcardMatch = await db + .select({ domainId: domains.domainId }) + .from(domains) + .where( + and( + eq(domains.baseDomain, candidate), + eq(domains.type, "wildcard") + ) + ) + .limit(1); + + if (wildcardMatch.length > 0) { + return wildcardMatch[0].domainId; + } + } + + return null; +} + +function extractFirstCert(pemBundle: string): string | null { + const match = pemBundle.match( + /-----BEGIN CERTIFICATE-----[\s\S]+?-----END CERTIFICATE-----/ + ); + return match ? match[0] : null; +} + +async function syncAcmeCerts( + acmeJsonPath: string, + resolver: string +): Promise { + let raw: string; + try { + raw = fs.readFileSync(acmeJsonPath, "utf8"); + } catch (err) { + logger.debug(`acmeCertSync: could not read ${acmeJsonPath}: ${err}`); + return; + } + + let acmeJson: AcmeJson; + try { + acmeJson = JSON.parse(raw); + } catch (err) { + logger.debug(`acmeCertSync: could not parse acme.json: ${err}`); + return; + } + + const resolverData = acmeJson[resolver]; + if (!resolverData || !Array.isArray(resolverData.Certificates)) { + logger.debug( + `acmeCertSync: no certificates found for resolver "${resolver}"` + ); + return; + } + + for (const cert of resolverData.Certificates) { + const domain = cert.domain?.main; + + if (!domain) { + logger.debug(`acmeCertSync: skipping cert with missing domain`); + continue; + } + + if (!cert.certificate || !cert.key) { + logger.debug( + `acmeCertSync: skipping cert for ${domain} - empty certificate or key field` + ); + continue; + } + + const certPem = Buffer.from(cert.certificate, "base64").toString( + "utf8" + ); + const keyPem = Buffer.from(cert.key, "base64").toString("utf8"); + + if (!certPem.trim() || !keyPem.trim()) { + logger.debug( + `acmeCertSync: skipping cert for ${domain} - blank PEM after base64 decode` + ); + continue; + } + + // Check if cert already exists in DB + const existing = await db + .select() + .from(certificates) + .where(eq(certificates.domain, domain)) + .limit(1); + + let oldCertPem: string | null = null; + let oldKeyPem: string | null = null; + + if (existing.length > 0 && existing[0].certFile) { + try { + const storedCertPem = decrypt( + existing[0].certFile, + config.getRawConfig().server.secret! + ); + if (storedCertPem === certPem) { + logger.debug( + `acmeCertSync: cert for ${domain} is unchanged, skipping` + ); + continue; + } + // Cert has changed; capture old values so we can send a correct + // update message to the newt after the DB write. + oldCertPem = storedCertPem; + if (existing[0].keyFile) { + try { + oldKeyPem = decrypt( + existing[0].keyFile, + config.getRawConfig().server.secret! + ); + } catch (keyErr) { + logger.debug( + `acmeCertSync: could not decrypt stored key for ${domain}: ${keyErr}` + ); + } + } + } catch (err) { + // Decryption failure means we should proceed with the update + logger.debug( + `acmeCertSync: could not decrypt stored cert for ${domain}, will update: ${err}` + ); + } + } + + // Parse cert expiry from the first cert in the PEM bundle + let expiresAt: number | null = null; + const firstCertPem = extractFirstCert(certPem); + if (firstCertPem) { + try { + const x509 = new crypto.X509Certificate(firstCertPem); + expiresAt = Math.floor(new Date(x509.validTo).getTime() / 1000); + } catch (err) { + logger.debug( + `acmeCertSync: could not parse cert expiry for ${domain}: ${err}` + ); + } + } + + const wildcard = domain.startsWith("*."); + const encryptedCert = encrypt( + certPem, + config.getRawConfig().server.secret! + ); + const encryptedKey = encrypt( + keyPem, + config.getRawConfig().server.secret! + ); + const now = Math.floor(Date.now() / 1000); + + const domainId = await findDomainId(domain); + if (domainId) { + logger.debug( + `acmeCertSync: resolved domainId "${domainId}" for cert domain "${domain}"` + ); + } else { + logger.debug( + `acmeCertSync: no matching domain record found for cert domain "${domain}"` + ); + } + + if (existing.length > 0) { + await db + .update(certificates) + .set({ + certFile: encryptedCert, + keyFile: encryptedKey, + status: "valid", + expiresAt, + updatedAt: now, + wildcard, + ...(domainId !== null && { domainId }) + }) + .where(eq(certificates.domain, domain)); + + logger.info( + `acmeCertSync: updated certificate for ${domain} (expires ${expiresAt ? new Date(expiresAt * 1000).toISOString() : "unknown"})` + ); + + await pushCertUpdateToAffectedNewts( + domain, + domainId, + oldCertPem, + oldKeyPem + ); + } else { + await db.insert(certificates).values({ + domain, + domainId, + certFile: encryptedCert, + keyFile: encryptedKey, + status: "valid", + expiresAt, + createdAt: now, + updatedAt: now, + wildcard + }); + + logger.info( + `acmeCertSync: inserted new certificate for ${domain} (expires ${expiresAt ? new Date(expiresAt * 1000).toISOString() : "unknown"})` + ); + + // For a brand-new cert, push to any SSL resources that were waiting for it + await pushCertUpdateToAffectedNewts(domain, domainId, null, null); + } + } +} + +export function initAcmeCertSync(): void { + if (build == "saas") { + logger.debug(`acmeCertSync: skipping ACME cert sync in SaaS build`); + return; + } + + const privateConfigData = privateConfig.getRawPrivateConfig(); + + if (!privateConfigData.flags?.enable_acme_cert_sync) { + logger.debug( + `acmeCertSync: ACME cert sync is disabled by config flag, skipping` + ); + return; + } + + if (privateConfigData.flags.use_pangolin_dns) { + logger.debug( + `acmeCertSync: ACME cert sync requires use_pangolin_dns flag to be disabled, skipping` + ); + return; + } + + const acmeJsonPath = + privateConfigData.acme?.acme_json_path ?? + "config/letsencrypt/acme.json"; + const resolver = privateConfigData.acme?.resolver ?? "letsencrypt"; + const intervalMs = privateConfigData.acme?.sync_interval_ms ?? 5000; + + logger.info( + `acmeCertSync: starting ACME cert sync from "${acmeJsonPath}" using resolver "${resolver}" every ${intervalMs}ms` + ); + + // Run immediately on init, then on the configured interval + syncAcmeCerts(acmeJsonPath, resolver).catch((err) => { + logger.error(`acmeCertSync: error during initial sync: ${err}`); + }); + + setInterval(() => { + syncAcmeCerts(acmeJsonPath, resolver).catch((err) => { + logger.error(`acmeCertSync: error during sync: ${err}`); + }); + }, intervalMs); +} diff --git a/server/private/lib/alerts/events/healthCheckEvents.ts b/server/private/lib/alerts/events/healthCheckEvents.ts new file mode 100644 index 000000000..594e27aec --- /dev/null +++ b/server/private/lib/alerts/events/healthCheckEvents.ts @@ -0,0 +1,91 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import logger from "@server/logger"; +import { processAlerts } from "../processAlerts"; + +// --------------------------------------------------------------------------- +// Public API +// --------------------------------------------------------------------------- + +/** + * Fire a `health_check_healthy` alert for the given health check. + * + * Call this after a previously-failing health check has recovered so that any + * matching `alertRules` can dispatch their email and webhook actions. + * + * @param orgId - Organisation that owns the health check. + * @param healthCheckId - Numeric primary key of the health check. + * @param healthCheckName - Human-readable name shown in notifications (optional). + * @param extra - Any additional key/value pairs to include in the payload. + */ +export async function fireHealthCheckHealthyAlert( + orgId: string, + healthCheckId: number, + healthCheckName?: string | null, + extra?: Record +): Promise { + try { + await processAlerts({ + eventType: "health_check_healthy", + orgId, + healthCheckId, + data: { + healthCheckId, + ...(healthCheckName != null ? { healthCheckName } : {}), + ...extra + } + }); + } catch (err) { + logger.error( + `fireHealthCheckHealthyAlert: unexpected error for healthCheckId ${healthCheckId}`, + err + ); + } +} + +/** + * Fire a `health_check_unhealthy` alert for the given health check. + * + * Call this after a health check has been detected as failing so that any + * matching `alertRules` can dispatch their email and webhook actions. + * + * @param orgId - Organisation that owns the health check. + * @param healthCheckId - Numeric primary key of the health check. + * @param healthCheckName - Human-readable name shown in notifications (optional). + * @param extra - Any additional key/value pairs to include in the payload. + */ +export async function fireHealthCheckNotHealthyAlert( + orgId: string, + healthCheckId: number, + healthCheckName?: string | null, + extra?: Record +): Promise { + try { + await processAlerts({ + eventType: "health_check_unhealthy", + orgId, + healthCheckId, + data: { + healthCheckId, + ...(healthCheckName != null ? { healthCheckName } : {}), + ...extra + } + }); + } catch (err) { + logger.error( + `fireHealthCheckNotHealthyAlert: unexpected error for healthCheckId ${healthCheckId}`, + err + ); + } +} diff --git a/server/private/lib/alerts/events/resourceEvents.ts b/server/private/lib/alerts/events/resourceEvents.ts new file mode 100644 index 000000000..5c9b168e8 --- /dev/null +++ b/server/private/lib/alerts/events/resourceEvents.ts @@ -0,0 +1,127 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import logger from "@server/logger"; +import { processAlerts } from "../processAlerts"; + +// --------------------------------------------------------------------------- +// Public API +// --------------------------------------------------------------------------- + +/** + * Fire a `resource_healthy` alert for the given resource. + * + * Call this after a previously-unhealthy resource has recovered so that any + * matching `alertRules` can dispatch their email and webhook actions. + * + * @param orgId - Organisation that owns the resource. + * @param resourceId - Numeric primary key of the resource. + * @param resourceName - Human-readable name shown in notifications (optional). + * @param extra - Any additional key/value pairs to include in the payload. + */ +export async function fireResourceHealthyAlert( + orgId: string, + resourceId: number, + resourceName?: string | null, + extra?: Record +): Promise { + try { + await processAlerts({ + eventType: "resource_healthy", + orgId, + resourceId, + data: { + resourceId, + ...(resourceName != null ? { resourceName } : {}), + ...extra + } + }); + } catch (err) { + logger.error( + `fireResourceHealthyAlert: unexpected error for resourceId ${resourceId}`, + err + ); + } +} + +/** + * Fire a `resource_unhealthy` alert for the given resource. + * + * Call this after a resource has been detected as unhealthy so that any + * matching `alertRules` can dispatch their email and webhook actions. + * + * @param orgId - Organisation that owns the resource. + * @param resourceId - Numeric primary key of the resource. + * @param resourceName - Human-readable name shown in notifications (optional). + * @param extra - Any additional key/value pairs to include in the payload. + */ +export async function fireResourceUnhealthyAlert( + orgId: string, + resourceId: number, + resourceName?: string | null, + extra?: Record +): Promise { + try { + await processAlerts({ + eventType: "resource_unhealthy", + orgId, + resourceId, + data: { + resourceId, + ...(resourceName != null ? { resourceName } : {}), + ...extra + } + }); + } catch (err) { + logger.error( + `fireResourceUnhealthyAlert: unexpected error for resourceId ${resourceId}`, + err + ); + } +} + +/** + * Fire a `resource_toggle` alert for the given resource. + * + * Call this when a resource's enabled/disabled status is toggled so that any + * matching `alertRules` can dispatch their email and webhook actions. + * + * @param orgId - Organisation that owns the resource. + * @param resourceId - Numeric primary key of the resource. + * @param resourceName - Human-readable name shown in notifications (optional). + * @param extra - Any additional key/value pairs to include in the payload. + */ +export async function fireResourceToggleAlert( + orgId: string, + resourceId: number, + resourceName?: string | null, + extra?: Record +): Promise { + try { + await processAlerts({ + eventType: "resource_toggle", + orgId, + resourceId, + data: { + resourceId, + ...(resourceName != null ? { resourceName } : {}), + ...extra + } + }); + } catch (err) { + logger.error( + `fireResourceToggleAlert: unexpected error for resourceId ${resourceId}`, + err + ); + } +} \ No newline at end of file diff --git a/server/private/lib/alerts/events/siteEvents.ts b/server/private/lib/alerts/events/siteEvents.ts new file mode 100644 index 000000000..27c4cb8bf --- /dev/null +++ b/server/private/lib/alerts/events/siteEvents.ts @@ -0,0 +1,91 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import logger from "@server/logger"; +import { processAlerts } from "../processAlerts"; + +// --------------------------------------------------------------------------- +// Public API +// --------------------------------------------------------------------------- + +/** + * Fire a `site_online` alert for the given site. + * + * Call this after the site has been confirmed reachable / connected so that + * any matching `alertRules` can dispatch their email and webhook actions. + * + * @param orgId - Organisation that owns the site. + * @param siteId - Numeric primary key of the site. + * @param siteName - Human-readable name shown in notifications (optional). + * @param extra - Any additional key/value pairs to include in the payload. + */ +export async function fireSiteOnlineAlert( + orgId: string, + siteId: number, + siteName?: string, + extra?: Record +): Promise { + try { + await processAlerts({ + eventType: "site_online", + orgId, + siteId, + data: { + siteId, + ...(siteName != null ? { siteName } : {}), + ...extra + } + }); + } catch (err) { + logger.error( + `fireSiteOnlineAlert: unexpected error for siteId ${siteId}`, + err + ); + } +} + +/** + * Fire a `site_offline` alert for the given site. + * + * Call this after the site has been detected as unreachable / disconnected so + * that any matching `alertRules` can dispatch their email and webhook actions. + * + * @param orgId - Organisation that owns the site. + * @param siteId - Numeric primary key of the site. + * @param siteName - Human-readable name shown in notifications (optional). + * @param extra - Any additional key/value pairs to include in the payload. + */ +export async function fireSiteOfflineAlert( + orgId: string, + siteId: number, + siteName?: string, + extra?: Record +): Promise { + try { + await processAlerts({ + eventType: "site_offline", + orgId, + siteId, + data: { + siteId, + ...(siteName != null ? { siteName } : {}), + ...extra + } + }); + } catch (err) { + logger.error( + `fireSiteOfflineAlert: unexpected error for siteId ${siteId}`, + err + ); + } +} \ No newline at end of file diff --git a/server/private/lib/alerts/index.ts b/server/private/lib/alerts/index.ts new file mode 100644 index 000000000..3460e965d --- /dev/null +++ b/server/private/lib/alerts/index.ts @@ -0,0 +1,19 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +export * from "./types"; +export * from "./processAlerts"; +export * from "./sendAlertWebhook"; +export * from "./sendAlertEmail"; +export * from "./events/siteEvents"; +export * from "./events/healthCheckEvents"; \ No newline at end of file diff --git a/server/private/lib/alerts/processAlerts.ts b/server/private/lib/alerts/processAlerts.ts new file mode 100644 index 000000000..5e098a1f2 --- /dev/null +++ b/server/private/lib/alerts/processAlerts.ts @@ -0,0 +1,333 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import { and, eq, or } from "drizzle-orm"; +import { db } from "@server/db"; +import { + alertRules, + alertSites, + alertHealthChecks, + alertResources, + alertEmailActions, + alertEmailRecipients, + alertWebhookActions, + userOrgRoles, + users +} from "@server/db"; +import config from "@server/lib/config"; +import { decrypt } from "@server/lib/crypto"; +import logger from "@server/logger"; +import { AlertContext, WebhookAlertConfig } from "./types"; +import { sendAlertWebhook } from "./sendAlertWebhook"; +import { sendAlertEmail } from "./sendAlertEmail"; + +/** + * Core alert processing pipeline. + * + * Given an `AlertContext`, this function: + * 1. Finds all enabled `alertRules` whose `eventType` matches and whose + * `siteId` / `healthCheckId` is listed in the `alertSites` / + * `alertHealthChecks` junction tables (or has no junction entries, + * meaning "match all"). + * 2. Applies per-rule cooldown gating. + * 3. Dispatches emails and webhook POSTs for every attached action. + * 4. Updates `lastTriggeredAt` and `lastSentAt` timestamps. + */ +export async function processAlerts(context: AlertContext): Promise { + const now = Date.now(); + + // ------------------------------------------------------------------ + // 1. Find matching alert rules + // ------------------------------------------------------------------ + // Rules with allSites / allHealthChecks / allResources set to true match + // ANY event of that type. Rules without these flags set match only the + // specific IDs listed in the junction tables. + const baseConditions = and( + eq(alertRules.orgId, context.orgId), + eq(alertRules.eventType, context.eventType), + eq(alertRules.enabled, true) + ); + + let rules: (typeof alertRules.$inferSelect)[]; + + if (context.siteId != null) { + const rows = await db + .select() + .from(alertRules) + .leftJoin( + alertSites, + eq(alertSites.alertRuleId, alertRules.alertRuleId) + ) + .where( + and( + baseConditions, + or( + eq(alertRules.allSites, true), + eq(alertSites.siteId, context.siteId) + ) + ) + ); + // Deduplicate in case a rule matched on multiple junction rows + const seen = new Set(); + rules = rows + .map((r) => r.alertRules) + .filter((r) => { + if (seen.has(r.alertRuleId)) return false; + seen.add(r.alertRuleId); + return true; + }); + } else if (context.healthCheckId != null) { + const rows = await db + .select() + .from(alertRules) + .leftJoin( + alertHealthChecks, + eq(alertHealthChecks.alertRuleId, alertRules.alertRuleId) + ) + .where( + and( + baseConditions, + or( + eq(alertRules.allHealthChecks, true), + eq(alertHealthChecks.healthCheckId, context.healthCheckId) + ) + ) + ); + const seen = new Set(); + rules = rows + .map((r) => r.alertRules) + .filter((r) => { + if (seen.has(r.alertRuleId)) return false; + seen.add(r.alertRuleId); + return true; + }); + } else if (context.resourceId != null) { + const rows = await db + .select() + .from(alertRules) + .leftJoin( + alertResources, + eq(alertResources.alertRuleId, alertRules.alertRuleId) + ) + .where( + and( + baseConditions, + or( + eq(alertRules.allResources, true), + eq(alertResources.resourceId, context.resourceId) + ) + ) + ); + const seen = new Set(); + rules = rows + .map((r) => r.alertRules) + .filter((r) => { + if (seen.has(r.alertRuleId)) return false; + seen.add(r.alertRuleId); + return true; + }); + } else { + rules = []; + } + + if (rules.length === 0) { + logger.debug( + `processAlerts: no matching rules for event "${context.eventType}" in org "${context.orgId}"` + ); + return; + } + + for (const rule of rules) { + try { + await processRule(rule, context, now); + } catch (err) { + logger.error( + `processAlerts: error processing rule ${rule.alertRuleId} for event "${context.eventType}"`, + err + ); + } + } +} + +// --------------------------------------------------------------------------- +// Per-rule processing +// --------------------------------------------------------------------------- + +async function processRule( + rule: typeof alertRules.$inferSelect, + context: AlertContext, + now: number +): Promise { + // ------------------------------------------------------------------ + // 2. Cooldown check + // ------------------------------------------------------------------ + if ( + rule.lastTriggeredAt != null && + now - rule.lastTriggeredAt < rule.cooldownSeconds * 1000 + ) { + const remainingSeconds = Math.ceil( + (rule.cooldownSeconds * 1000 - (now - rule.lastTriggeredAt)) / 1000 + ); + logger.debug( + `processAlerts: rule ${rule.alertRuleId} is in cooldown – ${remainingSeconds}s remaining` + ); + return; + } + + // ------------------------------------------------------------------ + // 3. Mark rule as triggered (optimistic update – before sending so we + // don't re-trigger if the send is slow) + // ------------------------------------------------------------------ + await db + .update(alertRules) + .set({ lastTriggeredAt: now }) + .where(eq(alertRules.alertRuleId, rule.alertRuleId)); + + // ------------------------------------------------------------------ + // 4. Process email actions + // ------------------------------------------------------------------ + const emailActions = await db + .select() + .from(alertEmailActions) + .where( + and( + eq(alertEmailActions.alertRuleId, rule.alertRuleId), + eq(alertEmailActions.enabled, true) + ) + ); + + for (const action of emailActions) { + try { + const recipients = await resolveEmailRecipients(action.emailActionId); + if (recipients.length > 0) { + await sendAlertEmail(recipients, context); + await db + .update(alertEmailActions) + .set({ lastSentAt: now }) + .where( + eq(alertEmailActions.emailActionId, action.emailActionId) + ); + } + } catch (err) { + logger.error( + `processAlerts: failed to send alert email for action ${action.emailActionId}`, + err + ); + } + } + + // ------------------------------------------------------------------ + // 5. Process webhook actions + // ------------------------------------------------------------------ + const webhookActions = await db + .select() + .from(alertWebhookActions) + .where( + and( + eq(alertWebhookActions.alertRuleId, rule.alertRuleId), + eq(alertWebhookActions.enabled, true) + ) + ); + + const serverSecret = config.getRawConfig().server.secret!; + + for (const action of webhookActions) { + try { + let webhookConfig: WebhookAlertConfig = { authType: "none" }; + + if (action.config) { + try { + const decrypted = decrypt(action.config, serverSecret); + webhookConfig = JSON.parse(decrypted) as WebhookAlertConfig; + } catch (err) { + logger.error( + `processAlerts: failed to decrypt webhook config for action ${action.webhookActionId}`, + err + ); + continue; + } + } + + await sendAlertWebhook(action.webhookUrl, webhookConfig, context); + await db + .update(alertWebhookActions) + .set({ lastSentAt: now }) + .where( + eq( + alertWebhookActions.webhookActionId, + action.webhookActionId + ) + ); + } catch (err) { + logger.error( + `processAlerts: failed to send alert webhook for action ${action.webhookActionId}`, + err + ); + } + } +} + +// --------------------------------------------------------------------------- +// Email recipient resolution +// --------------------------------------------------------------------------- + +/** + * Resolves all email addresses for a given `emailActionId`. + * + * Recipients may be: + * - Direct users (by `userId`) + * - All users in a role (by `roleId`, resolved via `userOrgRoles`) + * - Direct external email addresses + */ +async function resolveEmailRecipients(emailActionId: number): Promise { + const rows = await db + .select() + .from(alertEmailRecipients) + .where(eq(alertEmailRecipients.emailActionId, emailActionId)); + + const emailSet = new Set(); + + for (const row of rows) { + if (row.email) { + emailSet.add(row.email); + } + + if (row.userId) { + const [user] = await db + .select({ email: users.email }) + .from(users) + .where(eq(users.userId, row.userId)) + .limit(1); + if (user?.email) { + emailSet.add(user.email); + } + } + + if (row.roleId) { + // Find all users with this role via userOrgRoles + const roleUsers = await db + .select({ email: users.email }) + .from(userOrgRoles) + .innerJoin(users, eq(userOrgRoles.userId, users.userId)) + .where(eq(userOrgRoles.roleId, Number(row.roleId))); + + for (const u of roleUsers) { + if (u.email) { + emailSet.add(u.email); + } + } + } + } + + return Array.from(emailSet); +} \ No newline at end of file diff --git a/server/private/lib/alerts/sendAlertEmail.ts b/server/private/lib/alerts/sendAlertEmail.ts new file mode 100644 index 000000000..634598158 --- /dev/null +++ b/server/private/lib/alerts/sendAlertEmail.ts @@ -0,0 +1,97 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import { sendEmail } from "@server/emails"; +import AlertNotification from "@server/emails/templates/AlertNotification"; +import config from "@server/lib/config"; +import logger from "@server/logger"; +import { AlertContext } from "./types"; + +/** + * Sends an alert notification email to every address in `recipients`. + * + * Each recipient receives an individual email (no BCC list) so that delivery + * failures for one address do not affect the others. Failures per recipient + * are logged and swallowed – the caller only sees an error if something goes + * wrong before the send loop. + */ +export async function sendAlertEmail( + recipients: string[], + context: AlertContext +): Promise { + if (recipients.length === 0) { + return; + } + + const from = config.getNoReplyEmail(); + const subject = buildSubject(context); + + for (const to of recipients) { + try { + await sendEmail( + AlertNotification({ + eventType: context.eventType, + orgId: context.orgId, + data: context.data + }), + { + from, + to, + subject + } + ); + logger.debug( + `Alert email sent to "${to}" for event "${context.eventType}"` + ); + } catch (err) { + logger.error( + `sendAlertEmail: failed to send alert email to "${to}" for event "${context.eventType}"`, + err + ); + } + } +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +function buildSubject(context: AlertContext): string { + switch (context.eventType) { + case "site_online": + return "[Alert] Site Back Online"; + case "site_offline": + return "[Alert] Site Offline"; + case "site_toggle": + return "[Alert] Site Status Changed"; + case "health_check_healthy": + return "[Alert] Health Check Recovered"; + case "health_check_unhealthy": + return "[Alert] Health Check Failing"; + case "health_check_toggle": + return "[Alert] Health Check Status Changed"; + case "resource_healthy": + return "[Alert] Resource Healthy"; + case "resource_unhealthy": + return "[Alert] Resource Unhealthy"; + case "resource_toggle": + return "[Alert] Resource Status Changed"; + default: { + // Exhaustiveness fallback – should never be reached with a + // well-typed caller, but keeps runtime behaviour predictable. + const _exhaustive: never = context.eventType; + void _exhaustive; + return "[Alert] Event Notification"; + } + } +} diff --git a/server/private/lib/alerts/sendAlertWebhook.ts b/server/private/lib/alerts/sendAlertWebhook.ts new file mode 100644 index 000000000..52c687cbc --- /dev/null +++ b/server/private/lib/alerts/sendAlertWebhook.ts @@ -0,0 +1,140 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import logger from "@server/logger"; +import { AlertContext, WebhookAlertConfig } from "./types"; + +const REQUEST_TIMEOUT_MS = 15_000; + +/** + * Sends a single webhook POST for an alert event. + * + * The payload shape is: + * ```json + * { + * "event": "site_online", + * "timestamp": "2024-01-01T00:00:00.000Z", + * "data": { ... } + * } + * ``` + * + * Authentication headers are applied according to `config.authType`, + * mirroring the same strategies supported by HttpLogDestination: + * none | bearer | basic | custom. + */ +export async function sendAlertWebhook( + url: string, + webhookConfig: WebhookAlertConfig, + context: AlertContext +): Promise { + const payload = { + event: context.eventType, + timestamp: new Date().toISOString(), + data: { + orgId: context.orgId, + ...context.data + } + }; + + const body = JSON.stringify(payload); + const headers = buildHeaders(webhookConfig); + + const controller = new AbortController(); + const timeoutHandle = setTimeout(() => controller.abort(), REQUEST_TIMEOUT_MS); + + let response: Response; + try { + response = await fetch(url, { + method: webhookConfig.method ?? "POST", + headers, + body, + signal: controller.signal + }); + } catch (err: unknown) { + const isAbort = err instanceof Error && err.name === "AbortError"; + if (isAbort) { + throw new Error( + `Alert webhook: request to "${url}" timed out after ${REQUEST_TIMEOUT_MS} ms` + ); + } + const msg = err instanceof Error ? err.message : String(err); + throw new Error(`Alert webhook: request to "${url}" failed – ${msg}`); + } finally { + clearTimeout(timeoutHandle); + } + + if (!response.ok) { + let snippet = ""; + try { + const text = await response.text(); + snippet = text.slice(0, 300); + } catch { + // best-effort + } + throw new Error( + `Alert webhook: server at "${url}" returned HTTP ${response.status} ${response.statusText}` + + (snippet ? ` – ${snippet}` : "") + ); + } + + logger.debug(`Alert webhook sent successfully to "${url}" for event "${context.eventType}"`); +} + +// --------------------------------------------------------------------------- +// Header construction (mirrors HttpLogDestination.buildHeaders) +// --------------------------------------------------------------------------- + +function buildHeaders(webhookConfig: WebhookAlertConfig): Record { + const headers: Record = { + "Content-Type": "application/json" + }; + + switch (webhookConfig.authType) { + case "bearer": { + const token = webhookConfig.bearerToken?.trim(); + if (token) { + headers["Authorization"] = `Bearer ${token}`; + } + break; + } + case "basic": { + const creds = webhookConfig.basicCredentials?.trim(); + if (creds) { + const encoded = Buffer.from(creds).toString("base64"); + headers["Authorization"] = `Basic ${encoded}`; + } + break; + } + case "custom": { + const name = webhookConfig.customHeaderName?.trim(); + const value = webhookConfig.customHeaderValue ?? ""; + if (name) { + headers[name] = value; + } + break; + } + case "none": + default: + break; + } + + if (webhookConfig.headers) { + for (const { key, value } of webhookConfig.headers) { + if (key.trim()) { + headers[key.trim()] = value; + } + } + } + + return headers; +} \ No newline at end of file diff --git a/server/private/lib/alerts/types.ts b/server/private/lib/alerts/types.ts new file mode 100644 index 000000000..0679b7ece --- /dev/null +++ b/server/private/lib/alerts/types.ts @@ -0,0 +1,70 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +// --------------------------------------------------------------------------- +// Alert event types +// --------------------------------------------------------------------------- + +export type AlertEventType = + | "site_online" + | "site_offline" + | "site_toggle" + | "health_check_healthy" + | "health_check_unhealthy" + | "health_check_toggle" + | "resource_healthy" + | "resource_unhealthy" + | "resource_toggle"; + +// --------------------------------------------------------------------------- +// Webhook authentication config (stored as encrypted JSON in the DB) +// --------------------------------------------------------------------------- + +export type WebhookAuthType = "none" | "bearer" | "basic" | "custom"; + +/** + * Stored as an encrypted JSON blob in `alertWebhookActions.config`. + */ +export interface WebhookAlertConfig { + /** Authentication strategy for the webhook endpoint */ + authType: WebhookAuthType; + /** Bearer token – used when authType === "bearer" */ + bearerToken?: string; + /** Basic credentials – "username:password" – used when authType === "basic" */ + basicCredentials?: string; + /** Custom header name – used when authType === "custom" */ + customHeaderName?: string; + /** Custom header value – used when authType === "custom" */ + customHeaderValue?: string; + /** Extra headers to send with every webhook request */ + headers?: Array<{ key: string; value: string }>; + /** HTTP method (default POST) */ + method?: string; +} + +// --------------------------------------------------------------------------- +// Internal alert event passed through the processing pipeline +// --------------------------------------------------------------------------- + +export interface AlertContext { + eventType: AlertEventType; + orgId: string; + /** Set for site_online / site_offline events */ + siteId?: number; + /** Set for health_check_* events */ + healthCheckId?: number; + /** Set for resource_* events */ + resourceId?: number; + /** Human-readable context data included in emails and webhook payloads */ + data: Record; +} diff --git a/server/private/lib/certificates.ts b/server/private/lib/certificates.ts index ae076c48e..af6f6fdaa 100644 --- a/server/private/lib/certificates.ts +++ b/server/private/lib/certificates.ts @@ -11,23 +11,15 @@ * This file is not licensed under the AGPLv3. */ -import config from "./config"; +import privateConfig from "./config"; +import config from "@server/lib/config"; import { certificates, db } from "@server/db"; import { and, eq, isNotNull, or, inArray, sql } from "drizzle-orm"; -import { decryptData } from "@server/lib/encryption"; +import { decrypt } from "@server/lib/crypto"; import logger from "@server/logger"; import cache from "#private/lib/cache"; -let encryptionKeyHex = ""; -let encryptionKey: Buffer; -function loadEncryptData() { - if (encryptionKey) { - return; // already loaded - } - encryptionKeyHex = config.getRawPrivateConfig().server.encryption_key; - encryptionKey = Buffer.from(encryptionKeyHex, "hex"); -} // Define the return type for clarity and type safety export type CertificateResult = { @@ -45,7 +37,7 @@ export async function getValidCertificatesForDomains( domains: Set, useCache: boolean = true ): Promise> { - loadEncryptData(); // Ensure encryption key is loaded + const finalResults: CertificateResult[] = []; const domainsToQuery = new Set(); @@ -68,7 +60,7 @@ export async function getValidCertificatesForDomains( // 2. If all domains were resolved from the cache, return early if (domainsToQuery.size === 0) { - const decryptedResults = decryptFinalResults(finalResults); + const decryptedResults = decryptFinalResults(finalResults, config.getRawConfig().server.secret!); return decryptedResults; } @@ -173,22 +165,23 @@ export async function getValidCertificatesForDomains( } } - const decryptedResults = decryptFinalResults(finalResults); + const decryptedResults = decryptFinalResults(finalResults, config.getRawConfig().server.secret!); return decryptedResults; } function decryptFinalResults( - finalResults: CertificateResult[] + finalResults: CertificateResult[], + secret: string ): CertificateResult[] { const validCertsDecrypted = finalResults.map((cert) => { // Decrypt and save certificate file - const decryptedCert = decryptData( + const decryptedCert = decrypt( cert.certFile!, // is not null from query - encryptionKey + secret ); // Decrypt and save key file - const decryptedKey = decryptData(cert.keyFile!, encryptionKey); + const decryptedKey = decrypt(cert.keyFile!, secret); // Return only the certificate data without org information return { diff --git a/server/private/lib/logConnectionAudit.ts b/server/private/lib/logConnectionAudit.ts index 8cc3a1e52..039b75ec9 100644 --- a/server/private/lib/logConnectionAudit.ts +++ b/server/private/lib/logConnectionAudit.ts @@ -153,7 +153,7 @@ export async function flushConnectionLogToDb(): Promise { ); } - // Stop processing further batches from this snapshot — they will + // Stop processing further batches from this snapshot - they will // be picked up via the re-queued records on the next flush. const remaining = snapshot.slice(i + INSERT_BATCH_SIZE); if (remaining.length > 0) { @@ -180,7 +180,7 @@ const flushTimer = setInterval(async () => { }, FLUSH_INTERVAL_MS); // Calling unref() means this timer will not keep the Node.js event loop alive -// on its own — the process can still exit normally when there is no other work +// on its own - the process can still exit normally when there is no other work // left. The graceful-shutdown path will call flushConnectionLogToDb() explicitly // before process.exit(), so no data is lost. flushTimer.unref(); @@ -223,7 +223,7 @@ export function logConnectionAudit(record: ConnectionLogRecord): void { buffer.push(record); if (buffer.length >= MAX_BUFFERED_RECORDS) { - // Fire and forget — errors are handled inside flushConnectionLogToDb + // Fire and forget - errors are handled inside flushConnectionLogToDb flushConnectionLogToDb().catch((error) => { logger.error( "Unexpected error during size-triggered connection log flush:", @@ -231,4 +231,4 @@ export function logConnectionAudit(record: ConnectionLogRecord): void { ); }); } -} \ No newline at end of file +} diff --git a/server/private/lib/logStreaming/providers/HttpLogDestination.ts b/server/private/lib/logStreaming/providers/HttpLogDestination.ts index dde7bd695..337a58f1f 100644 --- a/server/private/lib/logStreaming/providers/HttpLogDestination.ts +++ b/server/private/lib/logStreaming/providers/HttpLogDestination.ts @@ -37,7 +37,7 @@ const DEFAULT_FORMAT: PayloadFormat = "json_array"; * * **Payload formats** (controlled by `config.format`): * - * - `json_array` (default) — one POST per batch, body is a JSON array: + * - `json_array` (default) - one POST per batch, body is a JSON array: * ```json * [ * { "event": "request", "timestamp": "2024-01-01T00:00:00.000Z", "data": { … } }, @@ -46,7 +46,7 @@ const DEFAULT_FORMAT: PayloadFormat = "json_array"; * ``` * `Content-Type: application/json` * - * - `ndjson` — one POST per batch, body is newline-delimited JSON (one object + * - `ndjson` - one POST per batch, body is newline-delimited JSON (one object * per line, no outer array). Required by Splunk HEC, Elastic/OpenSearch, * and Grafana Loki: * ``` @@ -55,7 +55,7 @@ const DEFAULT_FORMAT: PayloadFormat = "json_array"; * ``` * `Content-Type: application/x-ndjson` * - * - `json_single` — one POST **per event**, body is a plain JSON object. + * - `json_single` - one POST **per event**, body is a plain JSON object. * Use only for endpoints that cannot handle batches at all. * * With a body template each event is rendered through the template before @@ -319,4 +319,4 @@ function epochSecondsToIso(epochSeconds: number): string { function escapeJsonString(value: string): string { // JSON.stringify produces `""` – strip the outer quotes. return JSON.stringify(value).slice(1, -1); -} \ No newline at end of file +} diff --git a/server/private/lib/logStreaming/types.ts b/server/private/lib/logStreaming/types.ts index 5eed79520..1bcd25a66 100644 --- a/server/private/lib/logStreaming/types.ts +++ b/server/private/lib/logStreaming/types.ts @@ -60,9 +60,9 @@ export type AuthType = "none" | "bearer" | "basic" | "custom"; /** * Controls how the batch of events is serialised into the HTTP request body. * - * - `json_array` – `[{…}, {…}]` — default; one POST per batch wrapped in a + * - `json_array` – `[{…}, {…}]` - default; one POST per batch wrapped in a * JSON array. Works with most generic webhooks and Datadog. - * - `ndjson` – `{…}\n{…}` — newline-delimited JSON, one object per + * - `ndjson` – `{…}\n{…}` - newline-delimited JSON, one object per * line. Required by Splunk HEC, Elastic/OpenSearch, Loki. * - `json_single` – one HTTP POST per event, body is a plain JSON object. * Use only for endpoints that cannot handle batches at all. @@ -131,4 +131,4 @@ export interface DestinationFailureState { nextRetryAt: number; /** Date.now() value of the very first failure in the current streak */ firstFailedAt: number; -} \ No newline at end of file +} diff --git a/server/private/lib/readConfigFile.ts b/server/private/lib/readConfigFile.ts index f239edd85..c9cb1535a 100644 --- a/server/private/lib/readConfigFile.ts +++ b/server/private/lib/readConfigFile.ts @@ -34,10 +34,6 @@ export const privateConfigSchema = z.object({ }), server: z .object({ - encryption_key: z - .string() - .optional() - .transform(getEnvOrYaml("SERVER_ENCRYPTION_KEY")), reo_client_id: z .string() .optional() @@ -95,10 +91,21 @@ export const privateConfigSchema = z.object({ .object({ enable_redis: z.boolean().optional().default(false), use_pangolin_dns: z.boolean().optional().default(false), - use_org_only_idp: z.boolean().optional() + use_org_only_idp: z.boolean().optional(), + enable_acme_cert_sync: z.boolean().optional().default(true) }) .optional() .prefault({}), + acme: z + .object({ + acme_json_path: z + .string() + .optional() + .default("config/letsencrypt/acme.json"), + resolver: z.string().optional().default("letsencrypt"), + sync_interval_ms: z.number().optional().default(5000) + }) + .optional(), branding: z .object({ app_name: z.string().optional(), diff --git a/server/private/lib/traefik/getTraefikConfig.ts b/server/private/lib/traefik/getTraefikConfig.ts index 5ab96d6d6..fb6e176b8 100644 --- a/server/private/lib/traefik/getTraefikConfig.ts +++ b/server/private/lib/traefik/getTraefikConfig.ts @@ -33,7 +33,7 @@ import { } from "drizzle-orm"; import logger from "@server/logger"; import config from "@server/lib/config"; -import { orgs, resources, sites, Target, targets } from "@server/db"; +import { orgs, resources, sites, siteNetworks, siteResources, Target, targets } from "@server/db"; import { sanitize, encodePath, @@ -267,6 +267,35 @@ export async function getTraefikConfig( }); }); + // Query siteResources in HTTP mode with SSL enabled and aliases - cert generation / HTTPS edge + const siteResourcesWithFullDomain = await db + .select({ + siteResourceId: siteResources.siteResourceId, + fullDomain: siteResources.fullDomain, + mode: siteResources.mode + }) + .from(siteResources) + .innerJoin(siteNetworks, eq(siteResources.networkId, siteNetworks.networkId)) + .innerJoin(sites, eq(siteNetworks.siteId, sites.siteId)) + .where( + and( + eq(siteResources.enabled, true), + isNotNull(siteResources.fullDomain), + eq(siteResources.mode, "http"), + eq(siteResources.ssl, true), + or( + eq(sites.exitNodeId, exitNodeId), + and( + isNull(sites.exitNodeId), + sql`(${siteTypes.includes("local") ? 1 : 0} = 1)`, + eq(sites.type, "local"), + sql`(${build != "saas" ? 1 : 0} = 1)` + ) + ), + inArray(sites.type, siteTypes) + ) + ); + let validCerts: CertificateResult[] = []; if (privateConfig.getRawPrivateConfig().flags.use_pangolin_dns) { // create a list of all domains to get certs for @@ -276,6 +305,12 @@ export async function getTraefikConfig( domains.add(resource.fullDomain); } } + // Include siteResource aliases so pangolin-dns also fetches certs for them + for (const sr of siteResourcesWithFullDomain) { + if (sr.fullDomain) { + domains.add(sr.fullDomain); + } + } // get the valid certs for these domains validCerts = await getValidCertificatesForDomains(domains, true); // we are caching here because this is called often // logger.debug(`Valid certs for domains: ${JSON.stringify(validCerts)}`); @@ -867,6 +902,139 @@ export async function getTraefikConfig( } } + // Add Traefik routes for siteResource aliases (HTTP mode + SSL) so that + // Traefik generates TLS certificates for those domains even when no + // matching resource exists yet. + if (siteResourcesWithFullDomain.length > 0) { + // Build a set of domains already covered by normal resources + const existingFullDomains = new Set(); + for (const resource of resourcesMap.values()) { + if (resource.fullDomain) { + existingFullDomains.add(resource.fullDomain); + } + } + + for (const sr of siteResourcesWithFullDomain) { + if (!sr.fullDomain) continue; + + // Skip if this alias is already handled by a resource router + if (existingFullDomains.has(sr.fullDomain)) continue; + + const fullDomain = sr.fullDomain; + const srKey = `site-resource-cert-${sr.siteResourceId}`; + const siteResourceServiceName = `${srKey}-service`; + const siteResourceRouterName = `${srKey}-router`; + const siteResourceRewriteMiddlewareName = `${srKey}-rewrite`; + + const maintenancePort = config.getRawConfig().server.next_port; + const maintenanceHost = + config.getRawConfig().server.internal_hostname; + + if (!config_output.http.routers) { + config_output.http.routers = {}; + } + if (!config_output.http.services) { + config_output.http.services = {}; + } + if (!config_output.http.middlewares) { + config_output.http.middlewares = {}; + } + + // Service pointing at the internal maintenance/Next.js page + config_output.http.services[siteResourceServiceName] = { + loadBalancer: { + servers: [ + { + url: `http://${maintenanceHost}:${maintenancePort}` + } + ], + passHostHeader: true + } + }; + + // Middleware that rewrites any path to /maintenance-screen + config_output.http.middlewares[ + siteResourceRewriteMiddlewareName + ] = { + replacePathRegex: { + regex: "^/(.*)", + replacement: "/private-maintenance-screen" + } + }; + + // HTTP -> HTTPS redirect so the ACME challenge can be served + config_output.http.routers[ + `${siteResourceRouterName}-redirect` + ] = { + entryPoints: [ + config.getRawConfig().traefik.http_entrypoint + ], + middlewares: [redirectHttpsMiddlewareName], + service: siteResourceServiceName, + rule: `Host(\`${fullDomain}\`)`, + priority: 100 + }; + + // Determine TLS / cert-resolver configuration + let tls: any = {}; + if ( + !privateConfig.getRawPrivateConfig().flags.use_pangolin_dns + ) { + const domainParts = fullDomain.split("."); + const wildCard = + domainParts.length <= 2 + ? `*.${domainParts.join(".")}` + : `*.${domainParts.slice(1).join(".")}`; + + const globalDefaultResolver = + config.getRawConfig().traefik.cert_resolver; + const globalDefaultPreferWildcard = + config.getRawConfig().traefik.prefer_wildcard_cert; + + tls = { + certResolver: globalDefaultResolver, + ...(globalDefaultPreferWildcard + ? { domains: [{ main: wildCard }] } + : {}) + }; + } else { + // pangolin-dns: only add route if we already have a valid cert + const matchingCert = validCerts.find( + (cert) => cert.queriedDomain === fullDomain + ); + if (!matchingCert) { + logger.debug( + `No matching certificate found for siteResource alias: ${fullDomain}` + ); + continue; + } + } + + // HTTPS router - presence of this entry triggers cert generation + config_output.http.routers[siteResourceRouterName] = { + entryPoints: [ + config.getRawConfig().traefik.https_entrypoint + ], + service: siteResourceServiceName, + middlewares: [siteResourceRewriteMiddlewareName], + rule: `Host(\`${fullDomain}\`)`, + priority: 100, + tls + }; + + // Assets bypass router - lets Next.js static files load without rewrite + config_output.http.routers[`${siteResourceRouterName}-assets`] = { + entryPoints: [ + config.getRawConfig().traefik.https_entrypoint + ], + service: siteResourceServiceName, + rule: `Host(\`${fullDomain}\`) && (PathPrefix(\`/_next\`) || PathRegexp(\`^/__nextjs*\`))`, + priority: 101, + tls + }; + } + } + if (generateLoginPageRouters) { const exitNodeLoginPages = await db .select({ diff --git a/server/private/routers/alertEvents/index.ts b/server/private/routers/alertEvents/index.ts new file mode 100644 index 000000000..485b434eb --- /dev/null +++ b/server/private/routers/alertEvents/index.ts @@ -0,0 +1,16 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +export * from "./triggerSiteAlert"; +export * from "./triggerResourceAlert"; +export * from "./triggerHealthCheckAlert"; \ No newline at end of file diff --git a/server/private/routers/alertEvents/triggerHealthCheckAlert.ts b/server/private/routers/alertEvents/triggerHealthCheckAlert.ts new file mode 100644 index 000000000..246de8cd0 --- /dev/null +++ b/server/private/routers/alertEvents/triggerHealthCheckAlert.ts @@ -0,0 +1,129 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import { Request, Response, NextFunction } from "express"; +import { z } from "zod"; +import { db } from "@server/db"; +import { targetHealthCheck, statusHistory } from "@server/db"; +import response from "@server/lib/response"; +import HttpCode from "@server/types/HttpCode"; +import createHttpError from "http-errors"; +import logger from "@server/logger"; +import { fromError } from "zod-validation-error"; +import { eq, and } from "drizzle-orm"; +import { + fireHealthCheckHealthyAlert, + fireHealthCheckNotHealthyAlert +} from "#private/lib/alerts/events/healthCheckEvents"; + +const paramsSchema = z.strictObject({ + orgId: z.string().nonempty(), + healthCheckId: z.coerce.number().int().positive() +}); + +const bodySchema = z.strictObject({ + eventType: z.enum(["health_check_healthy", "health_check_unhealthy"]) +}); + +export type TriggerHealthCheckAlertResponse = { + success: true; +}; + +export async function triggerHealthCheckAlert( + req: Request, + res: Response, + next: NextFunction +): Promise { + try { + const parsedParams = paramsSchema.safeParse(req.params); + if (!parsedParams.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedParams.error).toString() + ) + ); + } + const { orgId, healthCheckId } = parsedParams.data; + + const parsedBody = bodySchema.safeParse(req.body); + if (!parsedBody.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedBody.error).toString() + ) + ); + } + const { eventType } = parsedBody.data; + + // Verify the health check exists and belongs to the org + const [healthCheck] = await db + .select() + .from(targetHealthCheck) + .where( + and( + eq( + targetHealthCheck.targetHealthCheckId, + healthCheckId + ), + eq(targetHealthCheck.orgId, orgId) + ) + ) + .limit(1); + + if (!healthCheck) { + return next( + createHttpError( + HttpCode.NOT_FOUND, + `Health check ${healthCheckId} not found in organization ${orgId}` + ) + ); + } + + await db.insert(statusHistory).values({ + entityType: "healthCheck", + entityId: healthCheckId, + orgId, + status: eventType === "health_check_healthy" ? "healthy" : "unhealthy", + timestamp: Math.floor(Date.now() / 1000) + }); + + if (eventType === "health_check_healthy") { + await fireHealthCheckHealthyAlert( + orgId, + healthCheckId, + healthCheck.name ?? undefined + ); + } else { + await fireHealthCheckNotHealthyAlert( + orgId, + healthCheckId, + healthCheck.name ?? undefined + ); + } + + return response(res, { + data: { success: true }, + success: true, + error: false, + message: "Alert triggered successfully", + status: HttpCode.OK + }); + } catch (error) { + logger.error(error); + return next( + createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred") + ); + } +} \ No newline at end of file diff --git a/server/private/routers/alertEvents/triggerResourceAlert.ts b/server/private/routers/alertEvents/triggerResourceAlert.ts new file mode 100644 index 000000000..61b81d900 --- /dev/null +++ b/server/private/routers/alertEvents/triggerResourceAlert.ts @@ -0,0 +1,135 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import { Request, Response, NextFunction } from "express"; +import { z } from "zod"; +import { db } from "@server/db"; +import { resources, statusHistory } from "@server/db"; +import response from "@server/lib/response"; +import HttpCode from "@server/types/HttpCode"; +import createHttpError from "http-errors"; +import logger from "@server/logger"; +import { fromError } from "zod-validation-error"; +import { eq, and } from "drizzle-orm"; +import { + fireResourceHealthyAlert, + fireResourceUnhealthyAlert, + fireResourceToggleAlert +} from "#private/lib/alerts/events/resourceEvents"; + +const paramsSchema = z.strictObject({ + orgId: z.string().nonempty(), + resourceId: z.coerce.number().int().positive() +}); + +const bodySchema = z.strictObject({ + eventType: z.enum(["resource_healthy", "resource_unhealthy", "resource_toggle"]) +}); + +export type TriggerResourceAlertResponse = { + success: true; +}; + +export async function triggerResourceAlert( + req: Request, + res: Response, + next: NextFunction +): Promise { + try { + const parsedParams = paramsSchema.safeParse(req.params); + if (!parsedParams.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedParams.error).toString() + ) + ); + } + const { orgId, resourceId } = parsedParams.data; + + const parsedBody = bodySchema.safeParse(req.body); + if (!parsedBody.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedBody.error).toString() + ) + ); + } + const { eventType } = parsedBody.data; + + // Verify the resource exists and belongs to the org + const [resource] = await db + .select() + .from(resources) + .where( + and( + eq(resources.resourceId, resourceId), + eq(resources.orgId, orgId) + ) + ) + .limit(1); + + if (!resource) { + return next( + createHttpError( + HttpCode.NOT_FOUND, + `Resource ${resourceId} not found in organization ${orgId}` + ) + ); + } + + if (eventType === "resource_healthy" || eventType === "resource_unhealthy") { + await db.insert(statusHistory).values({ + entityType: "resource", + entityId: resourceId, + orgId, + status: eventType === "resource_healthy" ? "healthy" : "unhealthy", + timestamp: Math.floor(Date.now() / 1000) + }); + } + + if (eventType === "resource_healthy") { + await fireResourceHealthyAlert( + orgId, + resourceId, + resource.name ?? undefined + ); + } else if (eventType === "resource_unhealthy") { + await fireResourceUnhealthyAlert( + orgId, + resourceId, + resource.name ?? undefined + ); + } else { + await fireResourceToggleAlert( + orgId, + resourceId, + resource.name ?? undefined + ); + } + + return response(res, { + data: { success: true }, + success: true, + error: false, + message: "Alert triggered successfully", + status: HttpCode.OK + }); + } catch (error) { + logger.error(error); + return next( + createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred") + ); + } +} \ No newline at end of file diff --git a/server/private/routers/alertEvents/triggerSiteAlert.ts b/server/private/routers/alertEvents/triggerSiteAlert.ts new file mode 100644 index 000000000..084fbc758 --- /dev/null +++ b/server/private/routers/alertEvents/triggerSiteAlert.ts @@ -0,0 +1,113 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import { Request, Response, NextFunction } from "express"; +import { z } from "zod"; +import { db } from "@server/db"; +import { sites, statusHistory } from "@server/db"; +import response from "@server/lib/response"; +import HttpCode from "@server/types/HttpCode"; +import createHttpError from "http-errors"; +import logger from "@server/logger"; +import { fromError } from "zod-validation-error"; +import { eq, and } from "drizzle-orm"; +import { + fireSiteOnlineAlert, + fireSiteOfflineAlert +} from "#private/lib/alerts/events/siteEvents"; + +const paramsSchema = z.strictObject({ + orgId: z.string().nonempty(), + siteId: z.coerce.number().int().positive() +}); + +const bodySchema = z.strictObject({ + eventType: z.enum(["site_online", "site_offline"]) +}); + +export type TriggerSiteAlertResponse = { + success: true; +}; + +export async function triggerSiteAlert( + req: Request, + res: Response, + next: NextFunction +): Promise { + try { + const parsedParams = paramsSchema.safeParse(req.params); + if (!parsedParams.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedParams.error).toString() + ) + ); + } + const { orgId, siteId } = parsedParams.data; + + const parsedBody = bodySchema.safeParse(req.body); + if (!parsedBody.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedBody.error).toString() + ) + ); + } + const { eventType } = parsedBody.data; + + // Verify the site exists and belongs to the org + const [site] = await db + .select() + .from(sites) + .where(and(eq(sites.siteId, siteId), eq(sites.orgId, orgId))) + .limit(1); + + if (!site) { + return next( + createHttpError( + HttpCode.NOT_FOUND, + `Site ${siteId} not found in organization ${orgId}` + ) + ); + } + + await db.insert(statusHistory).values({ + entityType: "site", + entityId: siteId, + orgId, + status: eventType === "site_online" ? "online" : "offline", + timestamp: Math.floor(Date.now() / 1000) + }); + + if (eventType === "site_online") { + await fireSiteOnlineAlert(orgId, siteId, site.name ?? undefined); + } else { + await fireSiteOfflineAlert(orgId, siteId, site.name ?? undefined); + } + + return response(res, { + data: { success: true }, + success: true, + error: false, + message: "Alert triggered successfully", + status: HttpCode.OK + }); + } catch (error) { + logger.error(error); + return next( + createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred") + ); + } +} \ No newline at end of file diff --git a/server/private/routers/alertRule/createAlertRule.ts b/server/private/routers/alertRule/createAlertRule.ts new file mode 100644 index 000000000..8a31327ab --- /dev/null +++ b/server/private/routers/alertRule/createAlertRule.ts @@ -0,0 +1,354 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import { Request, Response, NextFunction } from "express"; +import { z } from "zod"; +import { db, roles } from "@server/db"; +import { + alertRules, + alertSites, + alertHealthChecks, + alertResources, + alertEmailActions, + alertEmailRecipients, + alertWebhookActions +} from "@server/db"; +import response from "@server/lib/response"; +import HttpCode from "@server/types/HttpCode"; +import createHttpError from "http-errors"; +import logger from "@server/logger"; +import { fromError } from "zod-validation-error"; +import { OpenAPITags, registry } from "@server/openApi"; +import { encrypt } from "@server/lib/crypto"; +import config from "@server/lib/config"; + +export const SITE_EVENT_TYPES = ["site_online", "site_offline", "site_toggle"] as const; +export const HC_EVENT_TYPES = [ + "health_check_healthy", + "health_check_unhealthy", + "health_check_toggle" +] as const; +export const RESOURCE_EVENT_TYPES = [ + "resource_healthy", + "resource_unhealthy", + "resource_toggle" +] as const; + +const paramsSchema = z.strictObject({ + orgId: z.string().nonempty() +}); + +const webhookActionSchema = z.strictObject({ + webhookUrl: z.string().url(), + config: z.string().optional(), + enabled: z.boolean().optional().default(true) +}); + +const bodySchema = z + .strictObject({ + name: z.string().nonempty(), + eventType: z.enum([ + ...HC_EVENT_TYPES, + ...SITE_EVENT_TYPES, + ...RESOURCE_EVENT_TYPES + ]), + enabled: z.boolean().optional().default(true), + cooldownSeconds: z.number().int().nonnegative().optional().default(300), + // Source join tables - which is required depends on eventType + siteIds: z.array(z.number().int().positive()).optional().default([]), + allSites: z.boolean().optional().default(false), + healthCheckIds: z + .array(z.number().int().positive()) + .optional() + .default([]), + allHealthChecks: z.boolean().optional().default(false), + resourceIds: z + .array(z.number().int().positive()) + .optional() + .default([]), + allResources: z.boolean().optional().default(false), + // Email recipients (flat) + userIds: z.array(z.string().nonempty()).optional().default([]), + roleIds: z.array(z.number()).optional().default([]), + emails: z.array(z.string().email()).optional().default([]), + // Webhook actions + webhookActions: z.array(webhookActionSchema).optional().default([]) + }) + .superRefine((val, ctx) => { + const isSiteEvent = (SITE_EVENT_TYPES as readonly string[]).includes( + val.eventType + ); + const isHcEvent = (HC_EVENT_TYPES as readonly string[]).includes( + val.eventType + ); + const isResourceEvent = (RESOURCE_EVENT_TYPES as readonly string[]).includes( + val.eventType + ); + + if (isSiteEvent && !val.allSites && val.siteIds.length === 0) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "At least one siteId is required for site event types when allSites is false", + path: ["siteIds"] + }); + } + + if (isHcEvent && !val.allHealthChecks && val.healthCheckIds.length === 0) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: + "At least one healthCheckId is required for health check event types when allHealthChecks is false", + path: ["healthCheckIds"] + }); + } + + if (isSiteEvent && val.healthCheckIds.length > 0) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "healthCheckIds must not be set for site event types", + path: ["healthCheckIds"] + }); + } + + if (isHcEvent && val.siteIds.length > 0) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "siteIds must not be set for health check event types", + path: ["siteIds"] + }); + } + + if (isResourceEvent && !val.allResources && val.resourceIds.length === 0) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "At least one resourceId is required for resource event types when allResources is false", + path: ["resourceIds"] + }); + } + + if (isResourceEvent && val.siteIds.length > 0) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "siteIds must not be set for resource event types", + path: ["siteIds"] + }); + } + + if (isResourceEvent && val.healthCheckIds.length > 0) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "healthCheckIds must not be set for resource event types", + path: ["healthCheckIds"] + }); + } + + if (isSiteEvent && val.resourceIds.length > 0) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "resourceIds must not be set for site event types", + path: ["resourceIds"] + }); + } + + if (isHcEvent && val.resourceIds.length > 0) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "resourceIds must not be set for health check event types", + path: ["resourceIds"] + }); + } + }); + +export type CreateAlertRuleResponse = { + alertRuleId: number; +}; + +registry.registerPath({ + method: "put", + path: "/org/{orgId}/alert-rule", + description: "Create an alert rule for a specific organization.", + tags: [OpenAPITags.Org], + request: { + params: paramsSchema, + body: { + content: { + "application/json": { + schema: bodySchema + } + } + } + }, + responses: {} +}); + +export async function createAlertRule( + req: Request, + res: Response, + next: NextFunction +): Promise { + try { + const parsedParams = paramsSchema.safeParse(req.params); + if (!parsedParams.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedParams.error).toString() + ) + ); + } + + const { orgId } = parsedParams.data; + + const parsedBody = bodySchema.safeParse(req.body); + if (!parsedBody.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedBody.error).toString() + ) + ); + } + + const { + name, + eventType, + enabled, + cooldownSeconds, + siteIds, + allSites, + healthCheckIds, + allHealthChecks, + resourceIds, + allResources, + userIds, + roleIds, + emails, + webhookActions + } = parsedBody.data; + + const now = Date.now(); + + const [rule] = await db + .insert(alertRules) + .values({ + orgId, + name, + eventType, + enabled, + cooldownSeconds, + allSites, + allHealthChecks, + allResources, + createdAt: now, + updatedAt: now + }) + .returning(); + + // Insert site associations (skipped when allSites=true — empty junction = match all) + if (!allSites && siteIds.length > 0) { + await db.insert(alertSites).values( + siteIds.map((siteId) => ({ + alertRuleId: rule.alertRuleId, + siteId + })) + ); + } + + // Insert health check associations (skipped when allHealthChecks=true) + if (!allHealthChecks && healthCheckIds.length > 0) { + await db.insert(alertHealthChecks).values( + healthCheckIds.map((healthCheckId) => ({ + alertRuleId: rule.alertRuleId, + healthCheckId + })) + ); + } + + // Insert resource associations (skipped when allResources=true) + if (!allResources && resourceIds.length > 0) { + await db.insert(alertResources).values( + resourceIds.map((resourceId) => ({ + alertRuleId: rule.alertRuleId, + resourceId + })) + ); + } + + // Create the email action pivot row and recipients if any recipients + // were supplied (userIds, roleIds, or raw emails). + const hasRecipients = + userIds.length > 0 || + roleIds.length > 0 || + emails.length > 0; + + if (hasRecipients) { + const [emailActionRow] = await db + .insert(alertEmailActions) + .values({ alertRuleId: rule.alertRuleId }) + .returning(); + + const recipientRows = [ + ...userIds.map((userId) => ({ + emailActionId: emailActionRow.emailActionId, + userId, + roleId: null as number | null, + email: null as string | null + })), + ...roleIds.map((roleId) => ({ + emailActionId: emailActionRow.emailActionId, + userId: null as string | null, + roleId, + email: null as string | null + })), + ...emails.map((email) => ({ + emailActionId: emailActionRow.emailActionId, + userId: null as string | null, + roleId: null as number | null, + email + })) + ]; + + await db.insert(alertEmailRecipients).values(recipientRows); + } + + if (webhookActions.length > 0) { + const serverSecret = config.getRawConfig().server.secret!; + await db.insert(alertWebhookActions).values( + webhookActions.map((wa) => ({ + alertRuleId: rule.alertRuleId, + webhookUrl: wa.webhookUrl, + config: + wa.config != null + ? encrypt(wa.config, serverSecret) + : null, + enabled: wa.enabled + })) + ); + } + + return response(res, { + data: { + alertRuleId: rule.alertRuleId + }, + success: true, + error: false, + message: "Alert rule created successfully", + status: HttpCode.CREATED + }); + } catch (error) { + logger.error(error); + return next( + createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred") + ); + } +} diff --git a/server/private/routers/alertRule/deleteAlertRule.ts b/server/private/routers/alertRule/deleteAlertRule.ts new file mode 100644 index 000000000..0988cd631 --- /dev/null +++ b/server/private/routers/alertRule/deleteAlertRule.ts @@ -0,0 +1,100 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import { Request, Response, NextFunction } from "express"; +import { z } from "zod"; +import { db } from "@server/db"; +import { alertRules } from "@server/db"; +import response from "@server/lib/response"; +import HttpCode from "@server/types/HttpCode"; +import createHttpError from "http-errors"; +import logger from "@server/logger"; +import { fromError } from "zod-validation-error"; +import { OpenAPITags, registry } from "@server/openApi"; +import { and, eq } from "drizzle-orm"; + +const paramsSchema = z + .object({ + orgId: z.string().nonempty(), + alertRuleId: z.coerce.number() + }) + .strict(); + +registry.registerPath({ + method: "delete", + path: "/org/{orgId}/alert-rule/{alertRuleId}", + description: "Delete an alert rule for a specific organization.", + tags: [OpenAPITags.Org], + request: { + params: paramsSchema + }, + responses: {} +}); + +export async function deleteAlertRule( + req: Request, + res: Response, + next: NextFunction +): Promise { + try { + const parsedParams = paramsSchema.safeParse(req.params); + if (!parsedParams.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedParams.error).toString() + ) + ); + } + + const { orgId, alertRuleId } = parsedParams.data; + + const [existing] = await db + .select() + .from(alertRules) + .where( + and( + eq(alertRules.alertRuleId, alertRuleId), + eq(alertRules.orgId, orgId) + ) + ); + + if (!existing) { + return next( + createHttpError(HttpCode.NOT_FOUND, "Alert rule not found") + ); + } + + await db + .delete(alertRules) + .where( + and( + eq(alertRules.alertRuleId, alertRuleId), + eq(alertRules.orgId, orgId) + ) + ); + + return response(res, { + data: null, + success: true, + error: false, + message: "Alert rule deleted successfully", + status: HttpCode.OK + }); + } catch (error) { + logger.error(error); + return next( + createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred") + ); + } +} \ No newline at end of file diff --git a/server/private/routers/alertRule/getAlertRule.ts b/server/private/routers/alertRule/getAlertRule.ts new file mode 100644 index 000000000..06a97e880 --- /dev/null +++ b/server/private/routers/alertRule/getAlertRule.ts @@ -0,0 +1,227 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import { Request, Response, NextFunction } from "express"; +import { z } from "zod"; +import { db } from "@server/db"; +import { + alertRules, + alertSites, + alertHealthChecks, + alertResources, + alertEmailActions, + alertEmailRecipients, + alertWebhookActions +} from "@server/db"; +import response from "@server/lib/response"; +import HttpCode from "@server/types/HttpCode"; +import createHttpError from "http-errors"; +import logger from "@server/logger"; +import { fromError } from "zod-validation-error"; +import { OpenAPITags, registry } from "@server/openApi"; +import { and, eq } from "drizzle-orm"; +import { decrypt } from "@server/lib/crypto"; +import config from "@server/lib/config"; +import { WebhookAlertConfig } from "#private/lib/alerts/types"; + +const paramsSchema = z + .object({ + orgId: z.string().nonempty(), + alertRuleId: z.coerce.number() + }) + .strict(); + +export type GetAlertRuleResponse = { + alertRuleId: number; + orgId: string; + name: string; + eventType: + | "site_online" + | "site_offline" + | "site_toggle" + | "health_check_healthy" + | "health_check_unhealthy" + | "health_check_toggle" + | "resource_healthy" + | "resource_unhealthy" + | "resource_toggle"; + enabled: boolean; + cooldownSeconds: number; + lastTriggeredAt: number | null; + createdAt: number; + updatedAt: number; + siteIds: number[]; + healthCheckIds: number[]; + resourceIds: number[]; + recipients: { + recipientId: number; + userId: string | null; + roleId: number | null; + email: string | null; + }[]; + webhookActions: { + webhookActionId: number; + webhookUrl: string; + enabled: boolean; + lastSentAt: number | null; + config: WebhookAlertConfig | null; + }[]; +}; + +registry.registerPath({ + method: "get", + path: "/org/{orgId}/alert-rule/{alertRuleId}", + description: "Get a specific alert rule for an organization.", + tags: [OpenAPITags.Org], + request: { + params: paramsSchema + }, + responses: {} +}); + +export async function getAlertRule( + req: Request, + res: Response, + next: NextFunction +): Promise { + try { + const parsedParams = paramsSchema.safeParse(req.params); + if (!parsedParams.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedParams.error).toString() + ) + ); + } + + const { orgId, alertRuleId } = parsedParams.data; + + const [rule] = await db + .select() + .from(alertRules) + .where( + and( + eq(alertRules.alertRuleId, alertRuleId), + eq(alertRules.orgId, orgId) + ) + ); + + if (!rule) { + return next( + createHttpError(HttpCode.NOT_FOUND, "Alert rule not found") + ); + } + + // Fetch site associations + const siteRows = await db + .select() + .from(alertSites) + .where(eq(alertSites.alertRuleId, alertRuleId)); + + // Fetch health check associations + const healthCheckRows = await db + .select() + .from(alertHealthChecks) + .where(eq(alertHealthChecks.alertRuleId, alertRuleId)); + + // Fetch resource associations + const resourceRows = await db + .select() + .from(alertResources) + .where(eq(alertResources.alertRuleId, alertRuleId)); + + // Resolve the single email action row for this rule, then collect all + // recipients into a flat list. The emailAction pivot row is an internal + // implementation detail and is not surfaced to callers. + const [emailAction] = await db + .select() + .from(alertEmailActions) + .where(eq(alertEmailActions.alertRuleId, alertRuleId)); + + let recipients: GetAlertRuleResponse["recipients"] = []; + if (emailAction) { + const rows = await db + .select() + .from(alertEmailRecipients) + .where( + eq( + alertEmailRecipients.emailActionId, + emailAction.emailActionId + ) + ); + + recipients = rows.map((r) => ({ + recipientId: r.recipientId, + userId: r.userId ?? null, + roleId: r.roleId ?? null, + email: r.email ?? null + })); + } + + // Fetch webhook actions + const webhooks = await db + .select() + .from(alertWebhookActions) + .where(eq(alertWebhookActions.alertRuleId, alertRuleId)); + + return response(res, { + data: { + alertRuleId: rule.alertRuleId, + orgId: rule.orgId, + name: rule.name, + eventType: rule.eventType, + enabled: rule.enabled, + cooldownSeconds: rule.cooldownSeconds, + lastTriggeredAt: rule.lastTriggeredAt ?? null, + createdAt: rule.createdAt, + updatedAt: rule.updatedAt, + siteIds: siteRows.map((r) => r.siteId), + healthCheckIds: healthCheckRows.map((r) => r.healthCheckId), + resourceIds: resourceRows.map((r) => r.resourceId), + recipients, + webhookActions: webhooks.map((w) => { + let parsedConfig: WebhookAlertConfig | null = null; + if (w.config) { + try { + const serverSecret = + config.getRawConfig().server.secret!; + const decrypted = decrypt(w.config, serverSecret); + parsedConfig = JSON.parse( + decrypted + ) as WebhookAlertConfig; + } catch { + // best-effort – return null if decryption fails + } + } + return { + webhookActionId: w.webhookActionId, + webhookUrl: w.webhookUrl, + enabled: w.enabled, + lastSentAt: w.lastSentAt ?? null, + config: parsedConfig + }; + }) + }, + success: true, + error: false, + message: "Alert rule retrieved successfully", + status: HttpCode.OK + }); + } catch (error) { + logger.error(error); + return next( + createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred") + ); + } +} diff --git a/server/private/routers/alertRule/index.ts b/server/private/routers/alertRule/index.ts new file mode 100644 index 000000000..19e35f7dc --- /dev/null +++ b/server/private/routers/alertRule/index.ts @@ -0,0 +1,18 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +export * from "./createAlertRule"; +export * from "./updateAlertRule"; +export * from "./deleteAlertRule"; +export * from "./listAlertRules"; +export * from "./getAlertRule"; \ No newline at end of file diff --git a/server/private/routers/alertRule/listAlertRules.ts b/server/private/routers/alertRule/listAlertRules.ts new file mode 100644 index 000000000..601ab0fa3 --- /dev/null +++ b/server/private/routers/alertRule/listAlertRules.ts @@ -0,0 +1,274 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import { Request, Response, NextFunction } from "express"; +import { z } from "zod"; +import { db } from "@server/db"; +import { alertRules, alertSites, alertHealthChecks, alertResources } from "@server/db"; +import response from "@server/lib/response"; +import HttpCode from "@server/types/HttpCode"; +import createHttpError from "http-errors"; +import logger from "@server/logger"; +import { fromError } from "zod-validation-error"; +import { OpenAPITags, registry } from "@server/openApi"; +import { and, eq, inArray, like, sql } from "drizzle-orm"; + +const paramsSchema = z.strictObject({ + orgId: z.string().nonempty() +}); + +const querySchema = z.strictObject({ + limit: z + .string() + .optional() + .default("1000") + .transform(Number) + .pipe(z.number().int().nonnegative()), + offset: z + .string() + .optional() + .default("0") + .transform(Number) + .pipe(z.number().int().nonnegative()), + query: z.string().optional(), + siteId: z + .string() + .optional() + .transform((v) => (v !== undefined ? Number(v) : undefined)) + .pipe(z.number().int().positive().optional()), + resourceId: z + .string() + .optional() + .transform((v) => (v !== undefined ? Number(v) : undefined)) + .pipe(z.number().int().positive().optional()) +}); + +export type ListAlertRulesResponse = { + alertRules: { + alertRuleId: number; + orgId: string; + name: string; + eventType: string; + enabled: boolean; + cooldownSeconds: number; + lastTriggeredAt: number | null; + createdAt: number; + updatedAt: number; + siteIds: number[]; + healthCheckIds: number[]; + resourceIds: number[]; + }[]; + pagination: { + total: number; + limit: number; + offset: number; + }; +}; + +registry.registerPath({ + method: "get", + path: "/org/{orgId}/alert-rules", + description: "List all alert rules for a specific organization.", + tags: [OpenAPITags.Org], + request: { + query: querySchema, + params: paramsSchema + }, + responses: {} +}); + +export async function listAlertRules( + req: Request, + res: Response, + next: NextFunction +): Promise { + try { + const parsedParams = paramsSchema.safeParse(req.params); + if (!parsedParams.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedParams.error).toString() + ) + ); + } + const { orgId } = parsedParams.data; + + const parsedQuery = querySchema.safeParse(req.query); + if (!parsedQuery.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedQuery.error).toString() + ) + ); + } + const { limit, offset, query, siteId, resourceId } = parsedQuery.data; + + // Resolve siteId filter → matching alertRuleIds + let siteFilterRuleIds: number[] | null = null; + if (siteId !== undefined) { + const rows = await db + .select({ alertRuleId: alertSites.alertRuleId }) + .from(alertSites) + .where(eq(alertSites.siteId, siteId)); + siteFilterRuleIds = rows.map((r) => r.alertRuleId); + if (siteFilterRuleIds.length === 0) { + return response(res, { + data: { + alertRules: [], + pagination: { total: 0, limit, offset } + }, + success: true, + error: false, + message: "Alert rules retrieved successfully", + status: HttpCode.OK + }); + } + } + + // Resolve resourceId filter → matching alertRuleIds + let resourceFilterRuleIds: number[] | null = null; + if (resourceId !== undefined) { + const rows = await db + .select({ alertRuleId: alertResources.alertRuleId }) + .from(alertResources) + .where(eq(alertResources.resourceId, resourceId)); + resourceFilterRuleIds = rows.map((r) => r.alertRuleId); + if (resourceFilterRuleIds.length === 0) { + return response(res, { + data: { + alertRules: [], + pagination: { total: 0, limit, offset } + }, + success: true, + error: false, + message: "Alert rules retrieved successfully", + status: HttpCode.OK + }); + } + } + + const whereClause = and( + eq(alertRules.orgId, orgId), + query + ? like(sql`LOWER(${alertRules.name})`, `%${query.toLowerCase()}%`) + : undefined, + siteFilterRuleIds !== null + ? inArray(alertRules.alertRuleId, siteFilterRuleIds) + : undefined, + resourceFilterRuleIds !== null + ? inArray(alertRules.alertRuleId, resourceFilterRuleIds) + : undefined + ); + + const list = await db + .select() + .from(alertRules) + .where(whereClause) + .orderBy(sql`${alertRules.createdAt} DESC`) + .limit(limit) + .offset(offset); + + const [{ count }] = await db + .select({ count: sql`count(*)` }) + .from(alertRules) + .where(whereClause); + + // Batch-fetch site and health-check associations for all returned rules + // in two queries rather than N+1 individual lookups. + const ruleIds = list.map((r) => r.alertRuleId); + + const siteRows = + ruleIds.length > 0 + ? await db + .select() + .from(alertSites) + .where(inArray(alertSites.alertRuleId, ruleIds)) + : []; + + const healthCheckRows = + ruleIds.length > 0 + ? await db + .select() + .from(alertHealthChecks) + .where( + inArray(alertHealthChecks.alertRuleId, ruleIds) + ) + : []; + + const resourceRows = + ruleIds.length > 0 + ? await db + .select() + .from(alertResources) + .where(inArray(alertResources.alertRuleId, ruleIds)) + : []; + + // Index by alertRuleId for O(1) lookup when building the response + const sitesByRule = new Map(); + for (const row of siteRows) { + const existing = sitesByRule.get(row.alertRuleId) ?? []; + existing.push(row.siteId); + sitesByRule.set(row.alertRuleId, existing); + } + + const healthChecksByRule = new Map(); + for (const row of healthCheckRows) { + const existing = healthChecksByRule.get(row.alertRuleId) ?? []; + existing.push(row.healthCheckId); + healthChecksByRule.set(row.alertRuleId, existing); + } + + const resourcesByRule = new Map(); + for (const row of resourceRows) { + const existing = resourcesByRule.get(row.alertRuleId) ?? []; + existing.push(row.resourceId); + resourcesByRule.set(row.alertRuleId, existing); + } + + return response(res, { + data: { + alertRules: list.map((rule) => ({ + alertRuleId: rule.alertRuleId, + orgId: rule.orgId, + name: rule.name, + eventType: rule.eventType, + enabled: rule.enabled, + cooldownSeconds: rule.cooldownSeconds, + lastTriggeredAt: rule.lastTriggeredAt ?? null, + createdAt: rule.createdAt, + updatedAt: rule.updatedAt, + siteIds: sitesByRule.get(rule.alertRuleId) ?? [], + healthCheckIds: + healthChecksByRule.get(rule.alertRuleId) ?? [], + resourceIds: resourcesByRule.get(rule.alertRuleId) ?? [] + })), + pagination: { + total: count, + limit, + offset + } + }, + success: true, + error: false, + message: "Alert rules retrieved successfully", + status: HttpCode.OK + }); + } catch (error) { + logger.error(error); + return next( + createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred") + ); + } +} \ No newline at end of file diff --git a/server/private/routers/alertRule/updateAlertRule.ts b/server/private/routers/alertRule/updateAlertRule.ts new file mode 100644 index 000000000..358661ac9 --- /dev/null +++ b/server/private/routers/alertRule/updateAlertRule.ts @@ -0,0 +1,403 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import { Request, Response, NextFunction } from "express"; +import { z } from "zod"; +import { db } from "@server/db"; +import { + alertRules, + alertSites, + alertHealthChecks, + alertResources, + alertEmailActions, + alertEmailRecipients, + alertWebhookActions +} from "@server/db"; +import response from "@server/lib/response"; +import HttpCode from "@server/types/HttpCode"; +import createHttpError from "http-errors"; +import logger from "@server/logger"; +import { fromError } from "zod-validation-error"; +import { OpenAPITags, registry } from "@server/openApi"; +import { and, eq } from "drizzle-orm"; +import { encrypt } from "@server/lib/crypto"; +import config from "@server/lib/config"; +import { HC_EVENT_TYPES, SITE_EVENT_TYPES, RESOURCE_EVENT_TYPES } from "./createAlertRule"; +import { invalidateAllRemoteExitNodeSessions } from "@server/private/auth/sessions/remoteExitNode"; + +const paramsSchema = z + .object({ + orgId: z.string().nonempty(), + alertRuleId: z.coerce.number() + }) + .strict(); + +const webhookActionSchema = z.strictObject({ + webhookUrl: z.string().url(), + config: z.string().optional(), + enabled: z.boolean().optional().default(true) +}); + +const bodySchema = z + .strictObject({ + // Alert rule fields - all optional for partial updates + name: z.string().nonempty().optional(), + eventType: z + .enum([ + ...HC_EVENT_TYPES, + ...SITE_EVENT_TYPES, + ...RESOURCE_EVENT_TYPES + ]) + .optional(), + enabled: z.boolean().optional(), + cooldownSeconds: z.number().int().nonnegative().optional(), + // Source join tables - if provided the full set is replaced + siteIds: z.array(z.number().int().positive()).optional(), + allSites: z.boolean().optional(), + healthCheckIds: z.array(z.number().int().positive()).optional(), + allHealthChecks: z.boolean().optional(), + resourceIds: z.array(z.number().int().positive()).optional(), + allResources: z.boolean().optional(), + // Recipient arrays - if any are provided the full recipient set is replaced + userIds: z.array(z.string().nonempty()).optional(), + roleIds: z.array(z.number()).optional(), + emails: z.array(z.string().email()).optional(), + // Webhook actions - if provided the full webhook set is replaced + webhookActions: z.array(webhookActionSchema).optional() + }) + .superRefine((val, ctx) => { + if (!val.eventType) return; + + const isSiteEvent = (SITE_EVENT_TYPES as readonly string[]).includes( + val.eventType + ); + const isHcEvent = (HC_EVENT_TYPES as readonly string[]).includes( + val.eventType + ); + const isResourceEvent = (RESOURCE_EVENT_TYPES as readonly string[]).includes( + val.eventType + ); + + if (isSiteEvent && val.siteIds !== undefined && val.siteIds.length === 0 && !val.allSites) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "At least one siteId is required for site event types when allSites is false", + path: ["siteIds"] + }); + } + + if (isHcEvent && val.healthCheckIds !== undefined && val.healthCheckIds.length === 0 && !val.allHealthChecks) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "At least one healthCheckId is required for health check event types when allHealthChecks is false", + path: ["healthCheckIds"] + }); + } + + if (isResourceEvent && val.resourceIds !== undefined && val.resourceIds.length === 0 && !val.allResources) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "At least one resourceId is required for resource event types when allResources is false", + path: ["resourceIds"] + }); + } + + if (isSiteEvent && val.healthCheckIds !== undefined && val.healthCheckIds.length > 0) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "healthCheckIds must not be set for site event types", + path: ["healthCheckIds"] + }); + } + + if (isHcEvent && val.siteIds !== undefined && val.siteIds.length > 0) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "siteIds must not be set for health check event types", + path: ["siteIds"] + }); + } + + if (isResourceEvent && val.siteIds !== undefined && val.siteIds.length > 0) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "siteIds must not be set for resource event types", + path: ["siteIds"] + }); + } + + if (isResourceEvent && val.healthCheckIds !== undefined && val.healthCheckIds.length > 0) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: "healthCheckIds must not be set for resource event types", + path: ["healthCheckIds"] + }); + } + }); + +export type UpdateAlertRuleResponse = { + alertRuleId: number; +}; + +registry.registerPath({ + method: "post", + path: "/org/{orgId}/alert-rule/{alertRuleId}", + description: "Update an alert rule for a specific organization.", + tags: [OpenAPITags.Org], + request: { + params: paramsSchema, + body: { + content: { + "application/json": { + schema: bodySchema + } + } + } + }, + responses: {} +}); + +export async function updateAlertRule( + req: Request, + res: Response, + next: NextFunction +): Promise { + try { + const parsedParams = paramsSchema.safeParse(req.params); + if (!parsedParams.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedParams.error).toString() + ) + ); + } + + const { orgId, alertRuleId } = parsedParams.data; + + const parsedBody = bodySchema.safeParse(req.body); + if (!parsedBody.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedBody.error).toString() + ) + ); + } + + const [existing] = await db + .select() + .from(alertRules) + .where( + and( + eq(alertRules.alertRuleId, alertRuleId), + eq(alertRules.orgId, orgId) + ) + ); + + if (!existing) { + return next( + createHttpError(HttpCode.NOT_FOUND, "Alert rule not found") + ); + } + + const { + name, + eventType, + enabled, + cooldownSeconds, + siteIds, + allSites, + healthCheckIds, + allHealthChecks, + resourceIds, + allResources, + userIds, + roleIds, + emails, + webhookActions + } = parsedBody.data; + + // --- Update rule fields --- + const updateData: Record = { + updatedAt: Date.now() + }; + + if (name !== undefined) updateData.name = name; + if (eventType !== undefined) updateData.eventType = eventType; + if (enabled !== undefined) updateData.enabled = enabled; + if (cooldownSeconds !== undefined) updateData.cooldownSeconds = cooldownSeconds; + if (allSites !== undefined) updateData.allSites = allSites; + if (allHealthChecks !== undefined) updateData.allHealthChecks = allHealthChecks; + if (allResources !== undefined) updateData.allResources = allResources; + + await db + .update(alertRules) + .set(updateData) + .where( + and( + eq(alertRules.alertRuleId, alertRuleId), + eq(alertRules.orgId, orgId) + ) + ); + + // --- Full-replace site associations if siteIds was provided --- + if (siteIds !== undefined || allSites !== undefined) { + await db + .delete(alertSites) + .where(eq(alertSites.alertRuleId, alertRuleId)); + + // Only insert junction rows when allSites is not true + const effectiveAllSites = allSites ?? false; + if (!effectiveAllSites && siteIds !== undefined && siteIds.length > 0) { + await db.insert(alertSites).values( + siteIds.map((siteId) => ({ + alertRuleId, + siteId + })) + ); + } + } + + // --- Full-replace health check associations if healthCheckIds was provided --- + if (healthCheckIds !== undefined || allHealthChecks !== undefined) { + await db + .delete(alertHealthChecks) + .where(eq(alertHealthChecks.alertRuleId, alertRuleId)); + + const effectiveAllHealthChecks = allHealthChecks ?? false; + if (!effectiveAllHealthChecks && healthCheckIds !== undefined && healthCheckIds.length > 0) { + await db.insert(alertHealthChecks).values( + healthCheckIds.map((healthCheckId) => ({ + alertRuleId, + healthCheckId + })) + ); + } + } + + // --- Full-replace resource associations if resourceIds was provided --- + if (resourceIds !== undefined || allResources !== undefined) { + await db + .delete(alertResources) + .where(eq(alertResources.alertRuleId, alertRuleId)); + + const effectiveAllResources = allResources ?? false; + if (!effectiveAllResources && resourceIds !== undefined && resourceIds.length > 0) { + await db.insert(alertResources).values( + resourceIds.map((resourceId) => ({ + alertRuleId, + resourceId + })) + ); + } + } + + // --- Full-replace recipients if any recipient array was provided --- + const recipientsProvided = + userIds !== undefined || + roleIds !== undefined || + emails !== undefined; + + if (recipientsProvided) { + const newRecipients = [ + ...(userIds ?? []).map((userId) => ({ + userId, + roleId: null as number | null, + email: null as string | null + })), + ...(roleIds ?? []).map((roleId) => ({ + userId: null as string | null, + roleId, + email: null as string | null + })), + ...(emails ?? []).map((email) => ({ + userId: null as string | null, + roleId: null as number | null, + email + })) + ]; + + const [existingEmailAction] = await db + .select() + .from(alertEmailActions) + .where(eq(alertEmailActions.alertRuleId, alertRuleId)); + + if (existingEmailAction) { + await db + .delete(alertEmailRecipients) + .where( + eq( + alertEmailRecipients.emailActionId, + existingEmailAction.emailActionId + ) + ); + + if (newRecipients.length > 0) { + await db.insert(alertEmailRecipients).values( + newRecipients.map((r) => ({ + emailActionId: existingEmailAction.emailActionId, + ...r + })) + ); + } + } else if (newRecipients.length > 0) { + const [emailActionRow] = await db + .insert(alertEmailActions) + .values({ alertRuleId, enabled: true }) + .returning(); + + await db.insert(alertEmailRecipients).values( + newRecipients.map((r) => ({ + emailActionId: emailActionRow.emailActionId, + ...r + })) + ); + } + } + + // --- Full-replace webhook actions if the array was provided --- + if (webhookActions !== undefined) { + await db + .delete(alertWebhookActions) + .where(eq(alertWebhookActions.alertRuleId, alertRuleId)); + + if (webhookActions.length > 0) { + const serverSecret = config.getRawConfig().server.secret!; + await db.insert(alertWebhookActions).values( + webhookActions.map((wa) => ({ + alertRuleId, + webhookUrl: wa.webhookUrl, + config: wa.config != null ? encrypt(wa.config, serverSecret) : null, + enabled: wa.enabled + })) + ); + } + } + + return response(res, { + data: { + alertRuleId + }, + success: true, + error: false, + message: "Alert rule updated successfully", + status: HttpCode.OK + }); + } catch (error) { + logger.error(error); + return next( + createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred") + ); + } +} diff --git a/server/private/routers/external.ts b/server/private/routers/external.ts index 7872da700..159ee2449 100644 --- a/server/private/routers/external.ts +++ b/server/private/routers/external.ts @@ -29,6 +29,8 @@ import * as ssh from "#private/routers/ssh"; import * as user from "#private/routers/user"; import * as siteProvisioning from "#private/routers/siteProvisioning"; import * as eventStreamingDestination from "#private/routers/eventStreamingDestination"; +import * as alertRule from "#private/routers/alertRule"; +import * as healthChecks from "#private/routers/healthChecks"; import { verifyOrgAccess, @@ -681,7 +683,96 @@ authenticated.delete( authenticated.get( "/org/:orgId/event-streaming-destinations", + verifyValidLicense, verifyOrgAccess, verifyUserHasAction(ActionsEnum.listEventStreamingDestinations), eventStreamingDestination.listEventStreamingDestinations ); + +authenticated.put( + "/org/:orgId/alert-rule", + verifyValidLicense, + verifyOrgAccess, + verifyLimits, + verifyUserHasAction(ActionsEnum.createAlertRule), + logActionAudit(ActionsEnum.createAlertRule), + alertRule.createAlertRule +); + +authenticated.post( + "/org/:orgId/alert-rule/:alertRuleId", + verifyValidLicense, + verifyOrgAccess, + verifyUserHasAction(ActionsEnum.updateAlertRule), + logActionAudit(ActionsEnum.updateAlertRule), + alertRule.updateAlertRule +); + +authenticated.delete( + "/org/:orgId/alert-rule/:alertRuleId", + verifyValidLicense, + verifyOrgAccess, + verifyUserHasAction(ActionsEnum.deleteAlertRule), + logActionAudit(ActionsEnum.deleteAlertRule), + alertRule.deleteAlertRule +); + +authenticated.get( + "/org/:orgId/alert-rules", + verifyValidLicense, + verifyOrgAccess, + verifyUserHasAction(ActionsEnum.listAlertRules), + alertRule.listAlertRules +); + +authenticated.get( + "/org/:orgId/alert-rule/:alertRuleId", + verifyValidLicense, + verifyOrgAccess, + verifyUserHasAction(ActionsEnum.getAlertRule), + alertRule.getAlertRule +); + +authenticated.get( + "/org/:orgId/health-checks", + verifyValidLicense, + verifyOrgAccess, + verifyUserHasAction(ActionsEnum.listHealthChecks), + healthChecks.listHealthChecks +); + +authenticated.put( + "/org/:orgId/health-check", + verifyValidLicense, + verifyOrgAccess, + verifyLimits, + verifyUserHasAction(ActionsEnum.createHealthCheck), + logActionAudit(ActionsEnum.createHealthCheck), + healthChecks.createHealthCheck +); + +authenticated.post( + "/org/:orgId/health-check/:healthCheckId", + verifyValidLicense, + verifyOrgAccess, + verifyUserHasAction(ActionsEnum.updateHealthCheck), + logActionAudit(ActionsEnum.updateHealthCheck), + healthChecks.updateHealthCheck +); + +authenticated.delete( + "/org/:orgId/health-check/:healthCheckId", + verifyValidLicense, + verifyOrgAccess, + verifyUserHasAction(ActionsEnum.deleteHealthCheck), + logActionAudit(ActionsEnum.deleteHealthCheck), + healthChecks.deleteHealthCheck +); + +authenticated.get( + "/org/:orgId/health-check/:healthCheckId/status-history", + verifyValidLicense, + verifyOrgAccess, + verifyUserHasAction(ActionsEnum.getTarget), + healthChecks.getHealthCheckStatusHistory +); diff --git a/server/private/routers/healthChecks/createHealthCheck.ts b/server/private/routers/healthChecks/createHealthCheck.ts new file mode 100644 index 000000000..ff5495e55 --- /dev/null +++ b/server/private/routers/healthChecks/createHealthCheck.ts @@ -0,0 +1,188 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import { Request, Response, NextFunction } from "express"; +import { z } from "zod"; +import { db, targetHealthCheck, newts, sites } from "@server/db"; +import { eq } from "drizzle-orm"; +import response from "@server/lib/response"; +import HttpCode from "@server/types/HttpCode"; +import createHttpError from "http-errors"; +import logger from "@server/logger"; +import { fromError } from "zod-validation-error"; +import { OpenAPITags, registry } from "@server/openApi"; +import { addStandaloneHealthCheck } from "@server/routers/newt/targets"; + +const paramsSchema = z.strictObject({ + orgId: z.string().nonempty() +}); + +const bodySchema = z.strictObject({ + name: z.string().nonempty(), + siteId: z.number().int().positive(), + hcEnabled: z.boolean().default(false), + hcMode: z.string().default("http"), + hcHostname: z.string().optional(), + hcPort: z.number().int().min(1).max(65535).optional(), + hcPath: z.string().optional(), + hcScheme: z.string().optional(), + hcMethod: z.string().default("GET"), + hcInterval: z.number().int().positive().default(30), + hcUnhealthyInterval: z.number().int().positive().default(30), + hcTimeout: z.number().int().positive().default(5), + hcHeaders: z.string().optional().nullable(), + hcFollowRedirects: z.boolean().default(true), + hcStatus: z.number().int().optional().nullable(), + hcTlsServerName: z.string().optional(), + hcHealthyThreshold: z.number().int().positive().default(1), + hcUnhealthyThreshold: z.number().int().positive().default(1) +}); + +export type CreateHealthCheckResponse = { + targetHealthCheckId: number; +}; + +registry.registerPath({ + method: "put", + path: "/org/{orgId}/health-check", + description: "Create a health check for a specific organization.", + tags: [OpenAPITags.Org], + request: { + params: paramsSchema, + body: { + content: { + "application/json": { + schema: bodySchema + } + } + } + }, + responses: {} +}); + +export async function createHealthCheck( + req: Request, + res: Response, + next: NextFunction +): Promise { + try { + const parsedParams = paramsSchema.safeParse(req.params); + if (!parsedParams.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedParams.error).toString() + ) + ); + } + + const { orgId } = parsedParams.data; + + const parsedBody = bodySchema.safeParse(req.body); + if (!parsedBody.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedBody.error).toString() + ) + ); + } + + const { + name, + siteId, + hcEnabled, + hcMode, + hcHostname, + hcPort, + hcPath, + hcScheme, + hcMethod, + hcInterval, + hcUnhealthyInterval, + hcTimeout, + hcHeaders, + hcFollowRedirects, + hcStatus, + hcTlsServerName, + hcHealthyThreshold, + hcUnhealthyThreshold + } = parsedBody.data; + + const [record] = await db + .insert(targetHealthCheck) + .values({ + targetId: null, + orgId, + siteId, + name, + hcEnabled, + hcMode, + hcHostname: hcHostname ?? null, + hcPort: hcPort ?? null, + hcPath: hcPath ?? null, + hcScheme: hcScheme ?? null, + hcMethod, + hcInterval, + hcUnhealthyInterval, + hcTimeout, + hcHeaders: hcHeaders ?? null, + hcFollowRedirects, + hcStatus: hcStatus ?? null, + hcTlsServerName: hcTlsServerName ?? null, + hcHealthyThreshold, + hcUnhealthyThreshold + }) + .returning(); + + // Push health check to newt if the site is a newt site + if (siteId) { + const [site] = await db + .select() + .from(sites) + .where(eq(sites.siteId, siteId)) + .limit(1); + + if (site && site.type === "newt") { + const [newt] = await db + .select() + .from(newts) + .where(eq(newts.siteId, site.siteId)) + .limit(1); + + if (newt) { + await addStandaloneHealthCheck( + newt.newtId, + record, + newt.version + ); + } + } + } + + return response(res, { + data: { + targetHealthCheckId: record.targetHealthCheckId + }, + success: true, + error: false, + message: "Standalone health check created successfully", + status: HttpCode.CREATED + }); + } catch (error) { + logger.error(error); + return next( + createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred") + ); + } +} diff --git a/server/private/routers/healthChecks/deleteHealthCheck.ts b/server/private/routers/healthChecks/deleteHealthCheck.ts new file mode 100644 index 000000000..530653aab --- /dev/null +++ b/server/private/routers/healthChecks/deleteHealthCheck.ts @@ -0,0 +1,123 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import { Request, Response, NextFunction } from "express"; +import { z } from "zod"; +import { db, targetHealthCheck, newts, sites } from "@server/db"; +import response from "@server/lib/response"; +import HttpCode from "@server/types/HttpCode"; +import createHttpError from "http-errors"; +import logger from "@server/logger"; +import { fromError } from "zod-validation-error"; +import { OpenAPITags, registry } from "@server/openApi"; +import { and, eq, isNull } from "drizzle-orm"; +import { removeStandaloneHealthCheck } from "@server/routers/newt/targets"; + +const paramsSchema = z + .object({ + orgId: z.string().nonempty(), + healthCheckId: z + .string() + .transform(Number) + .pipe(z.number().int().positive()) + }) + .strict(); + +registry.registerPath({ + method: "delete", + path: "/org/{orgId}/health-check/{healthCheckId}", + description: "Delete a health check for a specific organization.", + tags: [OpenAPITags.Org], + request: { + params: paramsSchema + }, + responses: {} +}); + +export async function deleteHealthCheck( + req: Request, + res: Response, + next: NextFunction +): Promise { + try { + const parsedParams = paramsSchema.safeParse(req.params); + if (!parsedParams.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedParams.error).toString() + ) + ); + } + + const { orgId, healthCheckId } = parsedParams.data; + + const [existing] = await db + .select() + .from(targetHealthCheck) + .where( + and( + eq(targetHealthCheck.targetHealthCheckId, healthCheckId), + eq(targetHealthCheck.orgId, orgId), + isNull(targetHealthCheck.targetId) + ) + ); + + if (!existing) { + return next( + createHttpError( + HttpCode.NOT_FOUND, + "Standalone health check not found" + ) + ); + } + + await db + .delete(targetHealthCheck) + .where( + and( + eq(targetHealthCheck.targetHealthCheckId, healthCheckId), + eq(targetHealthCheck.orgId, orgId), + isNull(targetHealthCheck.targetId) + ) + ); + + // Remove health check from newt if the site is a newt site + const [newt] = await db + .select() + .from(newts) + .where(eq(newts.siteId, existing.siteId)) + .limit(1); + + if (newt) { + await removeStandaloneHealthCheck( + newt.newtId, + healthCheckId, + newt.version + ); + } + + return response(res, { + data: null, + success: true, + error: false, + message: "Standalone health check deleted successfully", + status: HttpCode.OK + }); + } catch (error) { + logger.error(error); + return next( + createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred") + ); + } +} diff --git a/server/private/routers/healthChecks/getStatusHistory.ts b/server/private/routers/healthChecks/getStatusHistory.ts new file mode 100644 index 000000000..5b1ddcfb0 --- /dev/null +++ b/server/private/routers/healthChecks/getStatusHistory.ts @@ -0,0 +1,93 @@ +import { Request, Response, NextFunction } from "express"; +import { z } from "zod"; +import { db, statusHistory } from "@server/db"; +import { and, eq, gte, asc } from "drizzle-orm"; +import response from "@server/lib/response"; +import HttpCode from "@server/types/HttpCode"; +import createHttpError from "http-errors"; +import logger from "@server/logger"; +import { fromError } from "zod-validation-error"; +import { + computeBuckets, + statusHistoryQuerySchema, + StatusHistoryResponse +} from "@server/lib/statusHistory"; + +const healthCheckParamsSchema = z.object({ + healthCheckId: z.string().transform((v) => parseInt(v, 10)) +}); + +export async function getHealthCheckStatusHistory( + req: Request, + res: Response, + next: NextFunction +): Promise { + try { + const parsedParams = healthCheckParamsSchema.safeParse(req.params); + if (!parsedParams.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedParams.error).toString() + ) + ); + } + const parsedQuery = statusHistoryQuerySchema.safeParse(req.query); + if (!parsedQuery.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedQuery.error).toString() + ) + ); + } + + const entityType = "healthCheck"; + const entityId = parsedParams.data.healthCheckId; + const { days } = parsedQuery.data; + + const nowSec = Math.floor(Date.now() / 1000); + const startSec = nowSec - days * 86400; + + const events = await db + .select() + .from(statusHistory) + .where( + and( + eq(statusHistory.entityType, entityType), + eq(statusHistory.entityId, entityId), + gte(statusHistory.timestamp, startSec) + ) + ) + .orderBy(asc(statusHistory.timestamp)); + + const { buckets, totalDowntime } = computeBuckets(events, days); + const totalWindow = days * 86400; + const overallUptime = + totalWindow > 0 + ? Math.max( + 0, + ((totalWindow - totalDowntime) / totalWindow) * 100 + ) + : 100; + + return response(res, { + data: { + entityType, + entityId, + days: buckets, + overallUptimePercent: Math.round(overallUptime * 100) / 100, + totalDowntimeSeconds: totalDowntime + }, + success: true, + error: false, + message: "Status history retrieved successfully", + status: HttpCode.OK + }); + } catch (error) { + logger.error(error); + return next( + createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred") + ); + } +} diff --git a/server/private/routers/healthChecks/index.ts b/server/private/routers/healthChecks/index.ts new file mode 100644 index 000000000..665ae5cca --- /dev/null +++ b/server/private/routers/healthChecks/index.ts @@ -0,0 +1,18 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +export * from "./listHealthChecks"; +export * from "./createHealthCheck"; +export * from "./updateHealthCheck"; +export * from "./deleteHealthCheck"; +export * from "./getStatusHistory"; diff --git a/server/private/routers/healthChecks/listHealthChecks.ts b/server/private/routers/healthChecks/listHealthChecks.ts new file mode 100644 index 000000000..e156573e4 --- /dev/null +++ b/server/private/routers/healthChecks/listHealthChecks.ts @@ -0,0 +1,187 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import { db, targetHealthCheck, targets, resources, sites } from "@server/db"; +import response from "@server/lib/response"; +import HttpCode from "@server/types/HttpCode"; +import createHttpError from "http-errors"; +import logger from "@server/logger"; +import { OpenAPITags, registry } from "@server/openApi"; +import { and, eq, like, sql } from "drizzle-orm"; +import { NextFunction, Request, Response } from "express"; +import { z } from "zod"; +import { fromError } from "zod-validation-error"; +import { ListHealthChecksResponse } from "@server/routers/healthChecks/types"; + +const paramsSchema = z.strictObject({ + orgId: z.string().nonempty() +}); + +const querySchema = z.object({ + limit: z + .string() + .optional() + .default("1000") + .transform(Number) + .pipe(z.int().positive()), + offset: z + .string() + .optional() + .default("0") + .transform(Number) + .pipe(z.int().nonnegative()), + query: z.string().optional() +}); + +registry.registerPath({ + method: "get", + path: "/org/{orgId}/health-checks", + description: "List health checks for an organization.", + tags: [OpenAPITags.Org], + request: { + params: paramsSchema, + query: querySchema + }, + responses: {} +}); + +export async function listHealthChecks( + req: Request, + res: Response, + next: NextFunction +): Promise { + try { + const parsedParams = paramsSchema.safeParse(req.params); + if (!parsedParams.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedParams.error).toString() + ) + ); + } + const { orgId } = parsedParams.data; + + const parsedQuery = querySchema.safeParse(req.query); + if (!parsedQuery.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedQuery.error).toString() + ) + ); + } + const { limit, offset, query } = parsedQuery.data; + + const whereClause = and( + eq(targetHealthCheck.orgId, orgId), + query + ? like( + sql`LOWER(${targetHealthCheck.name})`, + `%${query.toLowerCase()}%` + ) + : undefined + ); + + const list = await db + .select({ + targetHealthCheckId: targetHealthCheck.targetHealthCheckId, + name: targetHealthCheck.name, + siteId: targetHealthCheck.siteId, + siteName: sites.name, + siteNiceId: sites.niceId, + hcEnabled: targetHealthCheck.hcEnabled, + hcHealth: targetHealthCheck.hcHealth, + hcMode: targetHealthCheck.hcMode, + hcHostname: targetHealthCheck.hcHostname, + hcPort: targetHealthCheck.hcPort, + hcPath: targetHealthCheck.hcPath, + hcScheme: targetHealthCheck.hcScheme, + hcMethod: targetHealthCheck.hcMethod, + hcInterval: targetHealthCheck.hcInterval, + hcUnhealthyInterval: targetHealthCheck.hcUnhealthyInterval, + hcTimeout: targetHealthCheck.hcTimeout, + hcHeaders: targetHealthCheck.hcHeaders, + hcFollowRedirects: targetHealthCheck.hcFollowRedirects, + hcStatus: targetHealthCheck.hcStatus, + hcTlsServerName: targetHealthCheck.hcTlsServerName, + hcHealthyThreshold: targetHealthCheck.hcHealthyThreshold, + hcUnhealthyThreshold: targetHealthCheck.hcUnhealthyThreshold, + resourceId: resources.resourceId, + resourceName: resources.name, + resourceNiceId: resources.niceId + }) + .from(targetHealthCheck) + .leftJoin(targets, eq(targetHealthCheck.targetId, targets.targetId)) + .leftJoin(resources, eq(targets.resourceId, resources.resourceId)) + .leftJoin(sites, eq(targetHealthCheck.siteId, sites.siteId)) + .where(whereClause) + .orderBy(sql`${targetHealthCheck.targetHealthCheckId} DESC`) + .limit(limit) + .offset(offset); + + const [{ count }] = await db + .select({ count: sql`count(*)` }) + .from(targetHealthCheck) + .where(whereClause); + + return response(res, { + data: { + healthChecks: list.map((row) => ({ + targetHealthCheckId: row.targetHealthCheckId, + name: row.name ?? "", + siteId: row.siteId ?? null, + siteName: row.siteName ?? null, + siteNiceId: row.siteNiceId ?? null, + hcEnabled: row.hcEnabled, + hcHealth: (row.hcHealth ?? "unknown") as + | "unknown" + | "healthy" + | "unhealthy", + hcMode: row.hcMode ?? null, + hcHostname: row.hcHostname ?? null, + hcPort: row.hcPort ?? null, + hcPath: row.hcPath ?? null, + hcScheme: row.hcScheme ?? null, + hcMethod: row.hcMethod ?? null, + hcInterval: row.hcInterval ?? null, + hcUnhealthyInterval: row.hcUnhealthyInterval ?? null, + hcTimeout: row.hcTimeout ?? null, + hcHeaders: row.hcHeaders ?? null, + hcFollowRedirects: row.hcFollowRedirects ?? null, + hcStatus: row.hcStatus ?? null, + hcTlsServerName: row.hcTlsServerName ?? null, + hcHealthyThreshold: row.hcHealthyThreshold ?? null, + hcUnhealthyThreshold: row.hcUnhealthyThreshold ?? null, + resourceId: row.resourceId ?? null, + resourceName: row.resourceName ?? null, + resourceNiceId: row.resourceNiceId ?? null + })), + pagination: { + total: count, + limit, + offset + } + }, + success: true, + error: false, + message: "Standalone health checks retrieved successfully", + status: HttpCode.OK + }); + } catch (error) { + logger.error(error); + return next( + createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred") + ); + } +} diff --git a/server/private/routers/healthChecks/updateHealthCheck.ts b/server/private/routers/healthChecks/updateHealthCheck.ts new file mode 100644 index 000000000..713bf1e03 --- /dev/null +++ b/server/private/routers/healthChecks/updateHealthCheck.ts @@ -0,0 +1,250 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import { Request, Response, NextFunction } from "express"; +import { z } from "zod"; +import { db, targetHealthCheck, newts, sites } from "@server/db"; +import response from "@server/lib/response"; +import HttpCode from "@server/types/HttpCode"; +import createHttpError from "http-errors"; +import logger from "@server/logger"; +import { fromError } from "zod-validation-error"; +import { OpenAPITags, registry } from "@server/openApi"; +import { and, eq, isNull } from "drizzle-orm"; +import { addStandaloneHealthCheck } from "@server/routers/newt/targets"; + +const paramsSchema = z + .object({ + orgId: z.string().nonempty(), + healthCheckId: z + .string() + .transform(Number) + .pipe(z.number().int().positive()) + }) + .strict(); + +const bodySchema = z.strictObject({ + name: z.string().nonempty().optional(), + siteId: z.number().int().positive().optional(), + hcEnabled: z.boolean().optional(), + hcMode: z.string().optional(), + hcHostname: z.string().optional(), + hcPort: z.number().int().min(1).max(65535).optional(), + hcPath: z.string().optional(), + hcScheme: z.string().optional(), + hcMethod: z.string().optional(), + hcInterval: z.number().int().positive().optional(), + hcUnhealthyInterval: z.number().int().positive().optional(), + hcTimeout: z.number().int().positive().optional(), + hcHeaders: z.string().optional().nullable(), + hcFollowRedirects: z.boolean().optional(), + hcStatus: z.number().int().optional().nullable(), + hcTlsServerName: z.string().optional(), + hcHealthyThreshold: z.number().int().positive().optional(), + hcUnhealthyThreshold: z.number().int().positive().optional() +}); + +export type UpdateHealthCheckResponse = { + targetHealthCheckId: number; + name: string | null; + siteId: number | null; + hcEnabled: boolean; + hcHealth: string | null; + hcMode: string | null; + hcHostname: string | null; + hcPort: number | null; + hcPath: string | null; + hcScheme: string | null; + hcMethod: string | null; + hcInterval: number | null; + hcUnhealthyInterval: number | null; + hcTimeout: number | null; + hcHeaders: string | null; + hcFollowRedirects: boolean | null; + hcStatus: number | null; + hcTlsServerName: string | null; + hcHealthyThreshold: number | null; + hcUnhealthyThreshold: number | null; +}; + +registry.registerPath({ + method: "post", + path: "/org/{orgId}/health-check/{healthCheckId}", + description: "Update a health check for a specific organization.", + tags: [OpenAPITags.Org], + request: { + params: paramsSchema, + body: { + content: { + "application/json": { + schema: bodySchema + } + } + } + }, + responses: {} +}); + +export async function updateHealthCheck( + req: Request, + res: Response, + next: NextFunction +): Promise { + try { + const parsedParams = paramsSchema.safeParse(req.params); + if (!parsedParams.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedParams.error).toString() + ) + ); + } + + const { orgId, healthCheckId } = parsedParams.data; + + const parsedBody = bodySchema.safeParse(req.body); + if (!parsedBody.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedBody.error).toString() + ) + ); + } + + const [existing] = await db + .select() + .from(targetHealthCheck) + .where( + and( + eq(targetHealthCheck.targetHealthCheckId, healthCheckId), + eq(targetHealthCheck.orgId, orgId), + isNull(targetHealthCheck.targetId) + ) + ); + + if (!existing) { + return next( + createHttpError( + HttpCode.NOT_FOUND, + "Standalone health check not found" + ) + ); + } + + const { + name, + siteId, + hcEnabled, + hcMode, + hcHostname, + hcPort, + hcPath, + hcScheme, + hcMethod, + hcInterval, + hcUnhealthyInterval, + hcTimeout, + hcHeaders, + hcFollowRedirects, + hcStatus, + hcTlsServerName, + hcHealthyThreshold, + hcUnhealthyThreshold + } = parsedBody.data; + + const updateData: Record = {}; + + if (name !== undefined) updateData.name = name; + if (siteId !== undefined) updateData.siteId = siteId; + if (hcEnabled !== undefined) updateData.hcEnabled = hcEnabled; + if (hcMode !== undefined) updateData.hcMode = hcMode; + if (hcHostname !== undefined) updateData.hcHostname = hcHostname; + if (hcPort !== undefined) updateData.hcPort = hcPort; + if (hcPath !== undefined) updateData.hcPath = hcPath; + if (hcScheme !== undefined) updateData.hcScheme = hcScheme; + if (hcMethod !== undefined) updateData.hcMethod = hcMethod; + if (hcInterval !== undefined) updateData.hcInterval = hcInterval; + if (hcUnhealthyInterval !== undefined) + updateData.hcUnhealthyInterval = hcUnhealthyInterval; + if (hcTimeout !== undefined) updateData.hcTimeout = hcTimeout; + if (hcHeaders !== undefined) updateData.hcHeaders = hcHeaders; + if (hcFollowRedirects !== undefined) + updateData.hcFollowRedirects = hcFollowRedirects; + if (hcStatus !== undefined) updateData.hcStatus = hcStatus; + if (hcTlsServerName !== undefined) + updateData.hcTlsServerName = hcTlsServerName; + if (hcHealthyThreshold !== undefined) + updateData.hcHealthyThreshold = hcHealthyThreshold; + if (hcUnhealthyThreshold !== undefined) + updateData.hcUnhealthyThreshold = hcUnhealthyThreshold; + + const [updated] = await db + .update(targetHealthCheck) + .set(updateData) + .where( + and( + eq(targetHealthCheck.targetHealthCheckId, healthCheckId), + eq(targetHealthCheck.orgId, orgId), + isNull(targetHealthCheck.targetId) + ) + ) + .returning(); + + // Push updated health check to newt if the site is a newt site + const [newt] = await db + .select() + .from(newts) + .where(eq(newts.siteId, updated.siteId)) + .limit(1); + + if (newt) { + await addStandaloneHealthCheck(newt.newtId, updated, newt.version); + } + + return response(res, { + data: { + targetHealthCheckId: updated.targetHealthCheckId, + siteId: updated.siteId ?? null, + name: updated.name ?? null, + hcEnabled: updated.hcEnabled, + hcHealth: updated.hcHealth ?? null, + hcMode: updated.hcMode ?? null, + hcHostname: updated.hcHostname ?? null, + hcPort: updated.hcPort ?? null, + hcPath: updated.hcPath ?? null, + hcScheme: updated.hcScheme ?? null, + hcMethod: updated.hcMethod ?? null, + hcInterval: updated.hcInterval ?? null, + hcUnhealthyInterval: updated.hcUnhealthyInterval ?? null, + hcTimeout: updated.hcTimeout ?? null, + hcHeaders: updated.hcHeaders ?? null, + hcFollowRedirects: updated.hcFollowRedirects ?? null, + hcStatus: updated.hcStatus ?? null, + hcTlsServerName: updated.hcTlsServerName ?? null, + hcHealthyThreshold: updated.hcHealthyThreshold ?? null, + hcUnhealthyThreshold: updated.hcUnhealthyThreshold ?? null + }, + success: true, + error: false, + message: "Standalone health check updated successfully", + status: HttpCode.OK + }); + } catch (error) { + logger.error(error); + return next( + createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred") + ); + } +} diff --git a/server/private/routers/hybrid.ts b/server/private/routers/hybrid.ts index f689df0a5..b3ef792d9 100644 --- a/server/private/routers/hybrid.ts +++ b/server/private/routers/hybrid.ts @@ -24,14 +24,8 @@ import { User, certificates, exitNodeOrgs, - RemoteExitNode, - olms, - newts, - clients, - sites, domains, orgDomains, - targets, loginPage, loginPageOrg, LoginPage, @@ -70,12 +64,9 @@ import { updateAndGenerateEndpointDestinations, updateSiteBandwidth } from "@server/routers/gerbil"; -import * as gerbil from "@server/routers/gerbil"; import logger from "@server/logger"; -import { decryptData } from "@server/lib/encryption"; +import { decrypt } from "@server/lib/crypto"; import config from "@server/lib/config"; -import privateConfig from "#private/lib/config"; -import * as fs from "fs"; import { exchangeSession } from "@server/routers/badger"; import { validateResourceSessionToken } from "@server/auth/sessions/resource"; import { checkExitNodeOrg, resolveExitNodes } from "#private/lib/exitNodes"; @@ -298,25 +289,11 @@ hybridRouter.get( } ); -let encryptionKeyHex = ""; -let encryptionKey: Buffer; -function loadEncryptData() { - if (encryptionKey) { - return; // already loaded - } - - encryptionKeyHex = - privateConfig.getRawPrivateConfig().server.encryption_key; - encryptionKey = Buffer.from(encryptionKeyHex, "hex"); -} - // Get valid certificates for given domains (supports wildcard certs) hybridRouter.get( "/certificates/domains", async (req: Request, res: Response, next: NextFunction) => { try { - loadEncryptData(); // Ensure encryption key is loaded - const parsed = getCertificatesByDomainsQuerySchema.safeParse( req.query ); @@ -447,13 +424,13 @@ hybridRouter.get( const result = filtered.map((cert) => { // Decrypt and save certificate file - const decryptedCert = decryptData( + const decryptedCert = decrypt( cert.certFile!, // is not null from query - encryptionKey + config.getRawConfig().server.secret! ); // Decrypt and save key file - const decryptedKey = decryptData(cert.keyFile!, encryptionKey); + const decryptedKey = decrypt(cert.keyFile!, config.getRawConfig().server.secret!); // Return only the certificate data without org information return { @@ -833,9 +810,12 @@ hybridRouter.get( ) ); - logger.debug(`User ${userId} has roles in org ${orgId}:`, userOrgRoleRows); + logger.debug( + `User ${userId} has roles in org ${orgId}:`, + userOrgRoleRows + ); - return response<{ roleId: number, roleName: string }[]>(res, { + return response<{ roleId: number; roleName: string }[]>(res, { data: userOrgRoleRows, success: true, error: false, diff --git a/server/private/routers/integration.ts b/server/private/routers/integration.ts index 8c1ce4d46..8bae377c0 100644 --- a/server/private/routers/integration.ts +++ b/server/private/routers/integration.ts @@ -14,6 +14,7 @@ import * as orgIdp from "#private/routers/orgIdp"; import * as org from "#private/routers/org"; import * as logs from "#private/routers/auditLogs"; +import * as alertEvents from "#private/routers/alertEvents"; import { verifyApiKeyHasAction, @@ -40,6 +41,27 @@ import { tierMatrix } from "@server/lib/billing/tierMatrix"; export const unauthenticated = ua; export const authenticated = a; +authenticated.post( + "/org/:orgId/site/:siteId/trigger-alert", + verifyApiKeyIsRoot, + verifyApiKeyHasAction(ActionsEnum.triggerSiteAlert), + alertEvents.triggerSiteAlert +); + +authenticated.post( + "/org/:orgId/resource/:resourceId/trigger-alert", + verifyApiKeyIsRoot, + verifyApiKeyHasAction(ActionsEnum.triggerResourceAlert), + alertEvents.triggerResourceAlert +); + +authenticated.post( + "/org/:orgId/health-check/:healthCheckId/trigger-alert", + verifyApiKeyIsRoot, + verifyApiKeyHasAction(ActionsEnum.triggerHealthCheckAlert), + alertEvents.triggerHealthCheckAlert +); + authenticated.post( `/org/:orgId/send-usage-notification`, verifyApiKeyIsRoot, // We are the only ones who can use root key so its fine diff --git a/server/private/routers/newt/handleConnectionLogMessage.ts b/server/private/routers/newt/handleConnectionLogMessage.ts index fb6ab3453..6355eb783 100644 --- a/server/private/routers/newt/handleConnectionLogMessage.ts +++ b/server/private/routers/newt/handleConnectionLogMessage.ts @@ -92,9 +92,14 @@ export const handleConnectionLogMessage: MessageHandler = async (context) => { return; } - // Look up the org for this site + // Look up the org for this site and check retention settings const [site] = await db - .select({ orgId: sites.orgId, orgSubnet: orgs.subnet }) + .select({ + orgId: sites.orgId, + orgSubnet: orgs.subnet, + settingsLogRetentionDaysConnection: + orgs.settingsLogRetentionDaysConnection + }) .from(sites) .innerJoin(orgs, eq(sites.orgId, orgs.orgId)) .where(eq(sites.siteId, newt.siteId)); @@ -108,6 +113,13 @@ export const handleConnectionLogMessage: MessageHandler = async (context) => { const orgId = site.orgId; + if (site.settingsLogRetentionDaysConnection === 0) { + logger.debug( + `Connection log retention is disabled for org ${orgId}, skipping` + ); + return; + } + // Extract the CIDR suffix (e.g. "/16") from the org subnet so we can // reconstruct the exact subnet string stored on each client record. const cidrSuffix = site.orgSubnet?.includes("/") diff --git a/server/private/routers/newt/handleRequestLogMessage.ts b/server/private/routers/newt/handleRequestLogMessage.ts new file mode 100644 index 000000000..f06c59bc6 --- /dev/null +++ b/server/private/routers/newt/handleRequestLogMessage.ts @@ -0,0 +1,238 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import { db } from "@server/db"; +import { MessageHandler } from "@server/routers/ws"; +import { sites, Newt, orgs, clients, clientSitesAssociationsCache } from "@server/db"; +import { and, eq, inArray } from "drizzle-orm"; +import logger from "@server/logger"; +import { inflate } from "zlib"; +import { promisify } from "util"; +import { logRequestAudit } from "@server/routers/badger/logRequestAudit"; +import { getCountryCodeForIp } from "@server/lib/geoip"; + +export async function flushRequestLogToDb(): Promise { + return; +} + +const zlibInflate = promisify(inflate); + +interface HTTPRequestLogData { + requestId: string; + resourceId: number; // siteResourceId + timestamp: string; // ISO 8601 + method: string; + scheme: string; // "http" or "https" + host: string; + path: string; + rawQuery?: string; + userAgent?: string; + sourceAddr: string; // ip:port + tls: boolean; +} + +/** + * Decompress a base64-encoded zlib-compressed string into parsed JSON. + */ +async function decompressRequestLog( + compressed: string +): Promise { + const compressedBuffer = Buffer.from(compressed, "base64"); + const decompressed = await zlibInflate(compressedBuffer); + const jsonString = decompressed.toString("utf-8"); + const parsed = JSON.parse(jsonString); + + if (!Array.isArray(parsed)) { + throw new Error("Decompressed request log data is not an array"); + } + + return parsed; +} + +export const handleRequestLogMessage: MessageHandler = async (context) => { + const { message, client } = context; + const newt = client as Newt; + + if (!newt) { + logger.warn("Request log received but no newt client in context"); + return; + } + + if (!newt.siteId) { + logger.warn("Request log received but newt has no siteId"); + return; + } + + if (!message.data?.compressed) { + logger.warn("Request log message missing compressed data"); + return; + } + + // Look up the org for this site and check retention settings + const [site] = await db + .select({ + orgId: sites.orgId, + orgSubnet: orgs.subnet, + settingsLogRetentionDaysRequest: + orgs.settingsLogRetentionDaysRequest + }) + .from(sites) + .innerJoin(orgs, eq(sites.orgId, orgs.orgId)) + .where(eq(sites.siteId, newt.siteId)); + + if (!site) { + logger.warn( + `Request log received but site ${newt.siteId} not found in database` + ); + return; + } + + const orgId = site.orgId; + + if (site.settingsLogRetentionDaysRequest === 0) { + logger.debug( + `Request log retention is disabled for org ${orgId}, skipping` + ); + return; + } + + let entries: HTTPRequestLogData[]; + try { + entries = await decompressRequestLog(message.data.compressed); + } catch (error) { + logger.error("Failed to decompress request log data:", error); + return; + } + + if (entries.length === 0) { + return; + } + + logger.debug(`Request log entries: ${JSON.stringify(entries)}`); + + // Build a map from sourceIp → external endpoint string by joining clients + // with clientSitesAssociationsCache. The endpoint is the real-world IP:port + // of the client device and is used for GeoIP lookup. + const ipToEndpoint = new Map(); + + const cidrSuffix = site.orgSubnet?.includes("/") + ? site.orgSubnet.substring(site.orgSubnet.indexOf("/")) + : null; + + if (cidrSuffix) { + const uniqueSourceAddrs = new Set(); + for (const entry of entries) { + if (entry.sourceAddr) { + uniqueSourceAddrs.add(entry.sourceAddr); + } + } + + if (uniqueSourceAddrs.size > 0) { + const subnetQueries = Array.from(uniqueSourceAddrs).map((addr) => { + const ip = addr.includes(":") ? addr.split(":")[0] : addr; + return `${ip}${cidrSuffix}`; + }); + + const matchedClients = await db + .select({ + subnet: clients.subnet, + endpoint: clientSitesAssociationsCache.endpoint + }) + .from(clients) + .innerJoin( + clientSitesAssociationsCache, + and( + eq( + clientSitesAssociationsCache.clientId, + clients.clientId + ), + eq(clientSitesAssociationsCache.siteId, newt.siteId) + ) + ) + .where( + and( + eq(clients.orgId, orgId), + inArray(clients.subnet, subnetQueries) + ) + ); + + for (const c of matchedClients) { + if (c.endpoint) { + const ip = c.subnet.split("/")[0]; + ipToEndpoint.set(ip, c.endpoint); + } + } + } + } + + for (const entry of entries) { + if ( + !entry.requestId || + !entry.resourceId || + !entry.method || + !entry.scheme || + !entry.host || + !entry.path || + !entry.sourceAddr + ) { + logger.debug( + `Skipping request log entry with missing required fields: ${JSON.stringify(entry)}` + ); + continue; + } + + const originalRequestURL = + entry.scheme + + "://" + + entry.host + + entry.path + + (entry.rawQuery ? "?" + entry.rawQuery : ""); + + // Resolve the client's external endpoint for GeoIP lookup. + // sourceAddr is the WireGuard IP (possibly ip:port), so strip the port. + const sourceIp = entry.sourceAddr.includes(":") + ? entry.sourceAddr.split(":")[0] + : entry.sourceAddr; + const endpoint = ipToEndpoint.get(sourceIp); + let location: string | undefined; + if (endpoint) { + const endpointIp = endpoint.includes(":") + ? endpoint.split(":")[0] + : endpoint; + location = await getCountryCodeForIp(endpointIp); + } + + await logRequestAudit( + { + action: true, + reason: 108, + siteResourceId: entry.resourceId, + orgId, + location + }, + { + path: entry.path, + originalRequestURL, + scheme: entry.scheme, + host: entry.host, + method: entry.method, + tls: entry.tls, + requestIp: entry.sourceAddr + } + ); + } + + logger.debug( + `Buffered ${entries.length} request log entry/entries from newt ${newt.newtId} (site ${newt.siteId})` + ); +}; diff --git a/server/private/routers/newt/index.ts b/server/private/routers/newt/index.ts index 59d8e980a..94dfc8f05 100644 --- a/server/private/routers/newt/index.ts +++ b/server/private/routers/newt/index.ts @@ -12,3 +12,4 @@ */ export * from "./handleConnectionLogMessage"; +export * from "./handleRequestLogMessage"; diff --git a/server/private/routers/remoteExitNode/handleRemoteExitNodePingMessage.ts b/server/private/routers/remoteExitNode/handleRemoteExitNodePingMessage.ts index cc7578791..c2c710e11 100644 --- a/server/private/routers/remoteExitNode/handleRemoteExitNodePingMessage.ts +++ b/server/private/routers/remoteExitNode/handleRemoteExitNodePingMessage.ts @@ -11,78 +11,12 @@ * This file is not licensed under the AGPLv3. */ -import { db, exitNodes, sites } from "@server/db"; +import { db, exitNodes } from "@server/db"; import { MessageHandler } from "@server/routers/ws"; -import { clients, RemoteExitNode } from "@server/db"; -import { eq, lt, isNull, and, or, inArray } from "drizzle-orm"; +import { RemoteExitNode } from "@server/db"; +import { eq } from "drizzle-orm"; import logger from "@server/logger"; -// Track if the offline checker interval is running -let offlineCheckerInterval: NodeJS.Timeout | null = null; -const OFFLINE_CHECK_INTERVAL = 30 * 1000; // Check every 30 seconds -const OFFLINE_THRESHOLD_MS = 2 * 60 * 1000; // 2 minutes - -/** - * Starts the background interval that checks for clients that haven't pinged recently - * and marks them as offline - */ -export const startRemoteExitNodeOfflineChecker = (): void => { - if (offlineCheckerInterval) { - return; // Already running - } - - offlineCheckerInterval = setInterval(async () => { - try { - const twoMinutesAgo = Math.floor( - (Date.now() - OFFLINE_THRESHOLD_MS) / 1000 - ); - - // Find clients that haven't pinged in the last 2 minutes and mark them as offline - const offlineNodes = await db - .update(exitNodes) - .set({ online: false }) - .where( - and( - eq(exitNodes.online, true), - eq(exitNodes.type, "remoteExitNode"), - or( - lt(exitNodes.lastPing, twoMinutesAgo), - isNull(exitNodes.lastPing) - ) - ) - ) - .returning(); - - if (offlineNodes.length > 0) { - logger.info( - `checkRemoteExitNodeOffline: Marked ${offlineNodes.length} remoteExitNode client(s) offline due to inactivity` - ); - - for (const offlineClient of offlineNodes) { - logger.debug( - `checkRemoteExitNodeOffline: Client ${offlineClient.exitNodeId} marked offline (lastPing: ${offlineClient.lastPing})` - ); - } - } - } catch (error) { - logger.error("Error in offline checker interval", { error }); - } - }, OFFLINE_CHECK_INTERVAL); - - logger.debug("Started offline checker interval"); -}; - -/** - * Stops the background interval that checks for offline clients - */ -export const stopRemoteExitNodeOfflineChecker = (): void => { - if (offlineCheckerInterval) { - clearInterval(offlineCheckerInterval); - offlineCheckerInterval = null; - logger.info("Stopped offline checker interval"); - } -}; - /** * Handles ping messages from clients and responds with pong */ diff --git a/server/private/routers/remoteExitNode/index.ts b/server/private/routers/remoteExitNode/index.ts index bfbf98fed..730f6b693 100644 --- a/server/private/routers/remoteExitNode/index.ts +++ b/server/private/routers/remoteExitNode/index.ts @@ -21,3 +21,4 @@ export * from "./deleteRemoteExitNode"; export * from "./listRemoteExitNodes"; export * from "./pickRemoteExitNodeDefaults"; export * from "./quickStartRemoteExitNode"; +export * from "./offlineChecker"; diff --git a/server/private/routers/remoteExitNode/offlineChecker.ts b/server/private/routers/remoteExitNode/offlineChecker.ts new file mode 100644 index 000000000..7f5e906f8 --- /dev/null +++ b/server/private/routers/remoteExitNode/offlineChecker.ts @@ -0,0 +1,82 @@ +/* + * This file is part of a proprietary work. + * + * Copyright (c) 2025-2026 Fossorial, Inc. + * All rights reserved. + * + * This file is licensed under the Fossorial Commercial License. + * You may not use this file except in compliance with the License. + * Unauthorized use, copying, modification, or distribution is strictly prohibited. + * + * This file is not licensed under the AGPLv3. + */ + +import { db, exitNodes } from "@server/db"; +import { eq, lt, isNull, and, or } from "drizzle-orm"; +import logger from "@server/logger"; + +// Track if the offline checker interval is running +let offlineCheckerInterval: NodeJS.Timeout | null = null; +const OFFLINE_CHECK_INTERVAL = 30 * 1000; // Check every 30 seconds +const OFFLINE_THRESHOLD_MS = 2 * 60 * 1000; // 2 minutes + +/** + * Starts the background interval that checks for clients that haven't pinged recently + * and marks them as offline + */ +export const startRemoteExitNodeOfflineChecker = (): void => { + if (offlineCheckerInterval) { + return; // Already running + } + + offlineCheckerInterval = setInterval(async () => { + try { + const twoMinutesAgo = Math.floor( + (Date.now() - OFFLINE_THRESHOLD_MS) / 1000 + ); + + // Find clients that haven't pinged in the last 2 minutes and mark them as offline + const offlineNodes = await db + .update(exitNodes) + .set({ online: false }) + .where( + and( + eq(exitNodes.online, true), + eq(exitNodes.type, "remoteExitNode"), + or( + lt(exitNodes.lastPing, twoMinutesAgo), + isNull(exitNodes.lastPing) + ) + ) + ) + .returning(); + + if (offlineNodes.length > 0) { + logger.info( + `checkRemoteExitNodeOffline: Marked ${offlineNodes.length} remoteExitNode client(s) offline due to inactivity` + ); + + for (const offlineClient of offlineNodes) { + logger.debug( + `checkRemoteExitNodeOffline: Client ${offlineClient.exitNodeId} marked offline (lastPing: ${offlineClient.lastPing})` + ); + } + } + } catch (error) { + logger.error("Error in offline checker interval", { error }); + } + }, OFFLINE_CHECK_INTERVAL); + + logger.debug("Started offline checker interval"); +}; + +/** + * Stops the background interval that checks for offline clients + */ +export const stopRemoteExitNodeOfflineChecker = (): void => { + if (offlineCheckerInterval) { + clearInterval(offlineCheckerInterval); + offlineCheckerInterval = null; + logger.info("Stopped offline checker interval"); + } +}; diff --git a/server/private/routers/ssh/signSshKey.ts b/server/private/routers/ssh/signSshKey.ts index f929aeca5..82044c0ad 100644 --- a/server/private/routers/ssh/signSshKey.ts +++ b/server/private/routers/ssh/signSshKey.ts @@ -21,7 +21,7 @@ import { roles, roundTripMessageTracker, siteResources, - sites, + siteNetworks, userOrgs } from "@server/db"; import { logAccessAudit } from "#private/lib/logAccessAudit"; @@ -63,10 +63,12 @@ const bodySchema = z export type SignSshKeyResponse = { certificate: string; + messageIds: number[]; messageId: number; sshUsername: string; sshHost: string; resourceId: number; + siteIds: number[]; siteId: number; keyId: string; validPrincipals: string[]; @@ -260,10 +262,7 @@ export async function signSshKey( .update(userOrgs) .set({ pamUsername: usernameToUse }) .where( - and( - eq(userOrgs.orgId, orgId), - eq(userOrgs.userId, userId) - ) + and(eq(userOrgs.orgId, orgId), eq(userOrgs.userId, userId)) ); } else { usernameToUse = userOrg.pamUsername; @@ -395,21 +394,12 @@ export async function signSshKey( homedir = roleRows[0].sshCreateHomeDir ?? null; } - // get the site - const [newt] = await db - .select() - .from(newts) - .where(eq(newts.siteId, resource.siteId)) - .limit(1); + const sites = await db + .select({ siteId: siteNetworks.siteId }) + .from(siteNetworks) + .where(eq(siteNetworks.networkId, resource.networkId!)); - if (!newt) { - return next( - createHttpError( - HttpCode.INTERNAL_SERVER_ERROR, - "Site associated with resource not found" - ) - ); - } + const siteIds = sites.map((site) => site.siteId); // Sign the public key const now = BigInt(Math.floor(Date.now() / 1000)); @@ -423,43 +413,64 @@ export async function signSshKey( validBefore: now + validFor }); - const [message] = await db - .insert(roundTripMessageTracker) - .values({ - wsClientId: newt.newtId, - messageType: `newt/pam/connection`, - sentAt: Math.floor(Date.now() / 1000) - }) - .returning(); + const messageIds: number[] = []; + for (const siteId of siteIds) { + // get the site + const [newt] = await db + .select() + .from(newts) + .where(eq(newts.siteId, siteId)) + .limit(1); - if (!message) { - return next( - createHttpError( - HttpCode.INTERNAL_SERVER_ERROR, - "Failed to create message tracker entry" - ) - ); - } - - await sendToClient(newt.newtId, { - type: `newt/pam/connection`, - data: { - messageId: message.messageId, - orgId: orgId, - agentPort: resource.authDaemonPort ?? 22123, - externalAuthDaemon: resource.authDaemonMode === "remote", - agentHost: resource.destination, - caCert: caKeys.publicKeyOpenSSH, - username: usernameToUse, - niceId: resource.niceId, - metadata: { - sudoMode: sudoMode, - sudoCommands: parsedSudoCommands, - homedir: homedir, - groups: parsedGroups - } + if (!newt) { + return next( + createHttpError( + HttpCode.INTERNAL_SERVER_ERROR, + "Site associated with resource not found" + ) + ); } - }); + + const [message] = await db + .insert(roundTripMessageTracker) + .values({ + wsClientId: newt.newtId, + messageType: `newt/pam/connection`, + sentAt: Math.floor(Date.now() / 1000) + }) + .returning(); + + if (!message) { + return next( + createHttpError( + HttpCode.INTERNAL_SERVER_ERROR, + "Failed to create message tracker entry" + ) + ); + } + + messageIds.push(message.messageId); + + await sendToClient(newt.newtId, { + type: `newt/pam/connection`, + data: { + messageId: message.messageId, + orgId: orgId, + agentPort: resource.authDaemonPort ?? 22123, + externalAuthDaemon: resource.authDaemonMode === "remote", + agentHost: resource.destination, + caCert: caKeys.publicKeyOpenSSH, + username: usernameToUse, + niceId: resource.niceId, + metadata: { + sudoMode: sudoMode, + sudoCommands: parsedSudoCommands, + homedir: homedir, + groups: parsedGroups + } + } + }); + } const expiresIn = Number(validFor); // seconds @@ -480,7 +491,7 @@ export async function signSshKey( metadata: JSON.stringify({ resourceId: resource.siteResourceId, resource: resource.name, - siteId: resource.siteId, + siteIds: siteIds }) }); @@ -494,7 +505,7 @@ export async function signSshKey( : undefined, metadata: { resourceName: resource.name, - siteId: resource.siteId, + siteId: siteIds[0], sshUsername: usernameToUse, sshHost: sshHost }, @@ -505,11 +516,13 @@ export async function signSshKey( return response(res, { data: { certificate: cert.certificate, - messageId: message.messageId, + messageIds: messageIds, + messageId: messageIds[0], // just pick the first one for backward compatibility sshUsername: usernameToUse, sshHost: sshHost, resourceId: resource.siteResourceId, - siteId: resource.siteId, + siteIds: siteIds, + siteId: siteIds[0], // just pick the first one for backward compatibility keyId: cert.keyId, validPrincipals: cert.validPrincipals, validAfter: cert.validAfter.toISOString(), diff --git a/server/private/routers/ws/messageHandlers.ts b/server/private/routers/ws/messageHandlers.ts index 00a9a0ad6..b2553871e 100644 --- a/server/private/routers/ws/messageHandlers.ts +++ b/server/private/routers/ws/messageHandlers.ts @@ -18,12 +18,13 @@ import { } from "#private/routers/remoteExitNode"; import { MessageHandler } from "@server/routers/ws"; import { build } from "@server/build"; -import { handleConnectionLogMessage } from "#private/routers/newt"; +import { handleConnectionLogMessage, handleRequestLogMessage } from "#private/routers/newt"; export const messageHandlers: Record = { "remoteExitNode/register": handleRemoteExitNodeRegisterMessage, "remoteExitNode/ping": handleRemoteExitNodePingMessage, "newt/access-log": handleConnectionLogMessage, + "newt/request-log": handleRequestLogMessage, }; if (build != "saas") { diff --git a/server/routers/auditLogs/queryRequestAuditLog.ts b/server/routers/auditLogs/queryRequestAuditLog.ts index 176a9e5d3..000ec9815 100644 --- a/server/routers/auditLogs/queryRequestAuditLog.ts +++ b/server/routers/auditLogs/queryRequestAuditLog.ts @@ -1,8 +1,8 @@ -import { logsDb, primaryLogsDb, requestAuditLog, resources, db, primaryDb } from "@server/db"; +import { logsDb, primaryLogsDb, requestAuditLog, resources, siteResources, db, primaryDb } from "@server/db"; import { registry } from "@server/openApi"; import { NextFunction } from "express"; import { Request, Response } from "express"; -import { eq, gt, lt, and, count, desc, inArray } from "drizzle-orm"; +import { eq, gt, lt, and, count, desc, inArray, isNull, or } from "drizzle-orm"; import { OpenAPITags } from "@server/openApi"; import { z } from "zod"; import createHttpError from "http-errors"; @@ -92,7 +92,10 @@ function getWhere(data: Q) { lt(requestAuditLog.timestamp, data.timeEnd), eq(requestAuditLog.orgId, data.orgId), data.resourceId - ? eq(requestAuditLog.resourceId, data.resourceId) + ? or( + eq(requestAuditLog.resourceId, data.resourceId), + eq(requestAuditLog.siteResourceId, data.resourceId) + ) : undefined, data.actor ? eq(requestAuditLog.actor, data.actor) : undefined, data.method ? eq(requestAuditLog.method, data.method) : undefined, @@ -110,15 +113,16 @@ export function queryRequest(data: Q) { return primaryLogsDb .select({ id: requestAuditLog.id, - timestamp: requestAuditLog.timestamp, - orgId: requestAuditLog.orgId, - action: requestAuditLog.action, - reason: requestAuditLog.reason, - actorType: requestAuditLog.actorType, - actor: requestAuditLog.actor, - actorId: requestAuditLog.actorId, - resourceId: requestAuditLog.resourceId, - ip: requestAuditLog.ip, + timestamp: requestAuditLog.timestamp, + orgId: requestAuditLog.orgId, + action: requestAuditLog.action, + reason: requestAuditLog.reason, + actorType: requestAuditLog.actorType, + actor: requestAuditLog.actor, + actorId: requestAuditLog.actorId, + resourceId: requestAuditLog.resourceId, + siteResourceId: requestAuditLog.siteResourceId, + ip: requestAuditLog.ip, location: requestAuditLog.location, userAgent: requestAuditLog.userAgent, metadata: requestAuditLog.metadata, @@ -137,37 +141,73 @@ export function queryRequest(data: Q) { } async function enrichWithResourceDetails(logs: Awaited>) { - // If logs database is the same as main database, we can do a join - // Otherwise, we need to fetch resource details separately const resourceIds = logs .map(log => log.resourceId) .filter((id): id is number => id !== null && id !== undefined); - if (resourceIds.length === 0) { + const siteResourceIds = logs + .filter(log => log.resourceId == null && log.siteResourceId != null) + .map(log => log.siteResourceId) + .filter((id): id is number => id !== null && id !== undefined); + + if (resourceIds.length === 0 && siteResourceIds.length === 0) { return logs.map(log => ({ ...log, resourceName: null, resourceNiceId: null })); } - // Fetch resource details from main database - const resourceDetails = await primaryDb - .select({ - resourceId: resources.resourceId, - name: resources.name, - niceId: resources.niceId - }) - .from(resources) - .where(inArray(resources.resourceId, resourceIds)); + const resourceMap = new Map(); - // Create a map for quick lookup - const resourceMap = new Map( - resourceDetails.map(r => [r.resourceId, { name: r.name, niceId: r.niceId }]) - ); + if (resourceIds.length > 0) { + const resourceDetails = await primaryDb + .select({ + resourceId: resources.resourceId, + name: resources.name, + niceId: resources.niceId + }) + .from(resources) + .where(inArray(resources.resourceId, resourceIds)); + + for (const r of resourceDetails) { + resourceMap.set(r.resourceId, { name: r.name, niceId: r.niceId }); + } + } + + const siteResourceMap = new Map(); + + if (siteResourceIds.length > 0) { + const siteResourceDetails = await primaryDb + .select({ + siteResourceId: siteResources.siteResourceId, + name: siteResources.name, + niceId: siteResources.niceId + }) + .from(siteResources) + .where(inArray(siteResources.siteResourceId, siteResourceIds)); + + for (const r of siteResourceDetails) { + siteResourceMap.set(r.siteResourceId, { name: r.name, niceId: r.niceId }); + } + } // Enrich logs with resource details - return logs.map(log => ({ - ...log, - resourceName: log.resourceId ? resourceMap.get(log.resourceId)?.name ?? null : null, - resourceNiceId: log.resourceId ? resourceMap.get(log.resourceId)?.niceId ?? null : null - })); + return logs.map(log => { + if (log.resourceId != null) { + const details = resourceMap.get(log.resourceId); + return { + ...log, + resourceName: details?.name ?? null, + resourceNiceId: details?.niceId ?? null + }; + } else if (log.siteResourceId != null) { + const details = siteResourceMap.get(log.siteResourceId); + return { + ...log, + resourceId: log.siteResourceId, + resourceName: details?.name ?? null, + resourceNiceId: details?.niceId ?? null + }; + } + return { ...log, resourceName: null, resourceNiceId: null }; + }); } export function countRequestQuery(data: Q) { @@ -211,7 +251,8 @@ async function queryUniqueFilterAttributes( uniqueLocations, uniqueHosts, uniquePaths, - uniqueResources + uniqueResources, + uniqueSiteResources ] = await Promise.all([ primaryLogsDb .selectDistinct({ actor: requestAuditLog.actor }) @@ -239,6 +280,13 @@ async function queryUniqueFilterAttributes( }) .from(requestAuditLog) .where(baseConditions) + .limit(DISTINCT_LIMIT + 1), + primaryLogsDb + .selectDistinct({ + id: requestAuditLog.siteResourceId + }) + .from(requestAuditLog) + .where(and(baseConditions, isNull(requestAuditLog.resourceId))) .limit(DISTINCT_LIMIT + 1) ]); @@ -259,6 +307,10 @@ async function queryUniqueFilterAttributes( .map(row => row.id) .filter((id): id is number => id !== null); + const siteResourceIds = uniqueSiteResources + .map(row => row.id) + .filter((id): id is number => id !== null); + let resourcesWithNames: Array<{ id: number; name: string | null }> = []; if (resourceIds.length > 0) { @@ -270,10 +322,31 @@ async function queryUniqueFilterAttributes( .from(resources) .where(inArray(resources.resourceId, resourceIds)); - resourcesWithNames = resourceDetails.map(r => ({ - id: r.resourceId, - name: r.name - })); + resourcesWithNames = [ + ...resourcesWithNames, + ...resourceDetails.map(r => ({ + id: r.resourceId, + name: r.name + })) + ]; + } + + if (siteResourceIds.length > 0) { + const siteResourceDetails = await primaryDb + .select({ + siteResourceId: siteResources.siteResourceId, + name: siteResources.name + }) + .from(siteResources) + .where(inArray(siteResources.siteResourceId, siteResourceIds)); + + resourcesWithNames = [ + ...resourcesWithNames, + ...siteResourceDetails.map(r => ({ + id: r.siteResourceId, + name: r.name + })) + ]; } return { diff --git a/server/routers/auditLogs/types.ts b/server/routers/auditLogs/types.ts index 4c278cba5..972eebfe3 100644 --- a/server/routers/auditLogs/types.ts +++ b/server/routers/auditLogs/types.ts @@ -28,6 +28,7 @@ export type QueryRequestAuditLogResponse = { actor: string | null; actorId: string | null; resourceId: number | null; + siteResourceId: number | null; resourceNiceId: string | null; resourceName: string | null; ip: string | null; diff --git a/server/routers/badger/logRequestAudit.ts b/server/routers/badger/logRequestAudit.ts index 92d01332e..884fb7ae4 100644 --- a/server/routers/badger/logRequestAudit.ts +++ b/server/routers/badger/logRequestAudit.ts @@ -18,6 +18,7 @@ Reasons: 105 - Valid Password 106 - Valid email 107 - Valid SSO +108 - Connected Client 201 - Resource Not Found 202 - Resource Blocked @@ -38,6 +39,7 @@ const auditLogBuffer: Array<{ metadata: any; action: boolean; resourceId?: number; + siteResourceId?: number; reason: number; location?: string; originalRequestURL: string; @@ -186,6 +188,7 @@ export async function logRequestAudit( action: boolean; reason: number; resourceId?: number; + siteResourceId?: number; orgId?: string; location?: string; user?: { username: string; userId: string }; @@ -262,6 +265,7 @@ export async function logRequestAudit( metadata: sanitizeString(metadata), action: data.action, resourceId: data.resourceId, + siteResourceId: data.siteResourceId, reason: data.reason, location: sanitizeString(data.location), originalRequestURL: sanitizeString(body.originalRequestURL) ?? "", diff --git a/server/routers/external.ts b/server/routers/external.ts index d7729bca5..a17c88fb1 100644 --- a/server/routers/external.ts +++ b/server/routers/external.ts @@ -285,6 +285,13 @@ authenticated.get( site.listContainers ); +authenticated.get( + "/site/:siteId/status-history", + verifySiteAccess, + verifyUserHasAction(ActionsEnum.getSite), + site.getSiteStatusHistory +); + // Site Resource endpoints authenticated.put( "/org/:orgId/site-resource", @@ -420,6 +427,13 @@ authenticated.get( resource.listResources ); +authenticated.get( + "/resource/:resourceId/status-history", + verifyResourceAccess, + verifyUserHasAction(ActionsEnum.getResource), + resource.getResourceStatusHistory +); + authenticated.get( "/org/:orgId/resources", verifyOrgAccess, diff --git a/server/routers/gerbil/receiveBandwidth.ts b/server/routers/gerbil/receiveBandwidth.ts index dcd897471..eacf3dad4 100644 --- a/server/routers/gerbil/receiveBandwidth.ts +++ b/server/routers/gerbil/receiveBandwidth.ts @@ -88,11 +88,11 @@ async function dbQueryRows>( ): Promise { const anyDb = db as any; if (typeof anyDb.execute === "function") { - // PostgreSQL (node-postgres via Drizzle) — returns { rows: [...] } or an array + // PostgreSQL (node-postgres via Drizzle) - returns { rows: [...] } or an array const result = await anyDb.execute(query); return (Array.isArray(result) ? result : (result.rows ?? [])) as T[]; } - // SQLite (better-sqlite3 via Drizzle) — returns an array directly + // SQLite (better-sqlite3 via Drizzle) - returns an array directly return (await anyDb.all(query)) as T[]; } @@ -106,7 +106,7 @@ function isSQLite(): boolean { * Swaps out the accumulator before writing so that any bandwidth messages * received during the flush are captured in the new accumulator rather than * being lost or causing contention. Sites are updated in chunks via a single - * batch UPDATE per chunk. Failed chunks are discarded — exact per-flush + * batch UPDATE per chunk. Failed chunks are discarded - exact per-flush * accuracy is not critical and re-queuing is not worth the added complexity. * * This function is exported so that the application's graceful-shutdown @@ -125,7 +125,7 @@ export async function flushSiteBandwidthToDb(): Promise { const currentTime = new Date().toISOString(); // Sort by publicKey for consistent lock ordering across concurrent - // writers — deadlock-prevention strategy. + // writers - deadlock-prevention strategy. const sortedEntries = [...snapshot.entries()].sort(([a], [b]) => a.localeCompare(b) ); @@ -150,7 +150,7 @@ export async function flushSiteBandwidthToDb(): Promise { try { rows = await withDeadlockRetry(async () => { if (isSQLite()) { - // SQLite: one UPDATE per row — no need for batch efficiency here. + // SQLite: one UPDATE per row - no need for batch efficiency here. const results: { orgId: string; pubKey: string }[] = []; for (const [publicKey, { bytesIn, bytesOut }] of chunk) { const result = await dbQueryRows<{ @@ -170,7 +170,7 @@ export async function flushSiteBandwidthToDb(): Promise { return results; } - // PostgreSQL: batch UPDATE … FROM (VALUES …) — single round-trip per chunk. + // PostgreSQL: batch UPDATE … FROM (VALUES …) - single round-trip per chunk. const valuesList = chunk.map(([publicKey, { bytesIn, bytesOut }]) => sql`(${publicKey}::text, ${bytesIn}::real, ${bytesOut}::real)` ); @@ -191,7 +191,7 @@ export async function flushSiteBandwidthToDb(): Promise { `Failed to flush bandwidth chunk [${i}–${chunkEnd}], discarding ${chunk.length} site(s):`, error ); - // Discard the chunk — exact per-flush accuracy is not critical. + // Discard the chunk - exact per-flush accuracy is not critical. continue; } @@ -232,7 +232,7 @@ export async function flushSiteBandwidthToDb(): Promise { totalBandwidth ); if (bandwidthUsage) { - // Fire-and-forget — don't block the flush on limit checking. + // Fire-and-forget - don't block the flush on limit checking. usageService .checkLimitSet( orgId, @@ -298,7 +298,7 @@ export async function updateSiteBandwidth( exitNodeId?: number ): Promise { for (const { publicKey, bytesIn, bytesOut } of bandwidthData) { - // Skip peers that haven't transferred any data — writing zeros to the + // Skip peers that haven't transferred any data - writing zeros to the // database would be a no-op anyway. if (bytesIn <= 0 && bytesOut <= 0) { continue; diff --git a/server/routers/healthChecks/types.ts b/server/routers/healthChecks/types.ts new file mode 100644 index 000000000..0def60833 --- /dev/null +++ b/server/routers/healthChecks/types.ts @@ -0,0 +1,34 @@ +export type ListHealthChecksResponse = { + healthChecks: { + targetHealthCheckId: number; + name: string; + siteId: number | null; + siteName: string | null; + siteNiceId: string | null; + hcEnabled: boolean; + hcHealth: "unknown" | "healthy" | "unhealthy"; + hcMode: string | null; + hcHostname: string | null; + hcPort: number | null; + hcPath: string | null; + hcScheme: string | null; + hcMethod: string | null; + hcInterval: number | null; + hcUnhealthyInterval: number | null; + hcTimeout: number | null; + hcHeaders: string | null; + hcFollowRedirects: boolean | null; + hcStatus: number | null; + hcTlsServerName: string | null; + hcHealthyThreshold: number | null; + hcUnhealthyThreshold: number | null; + resourceId: number | null; + resourceName: string | null; + resourceNiceId: string | null; + }[]; + pagination: { + total: number; + limit: number; + offset: number; + }; +}; diff --git a/server/routers/newt/buildConfiguration.ts b/server/routers/newt/buildConfiguration.ts index afb196152..f87d38450 100644 --- a/server/routers/newt/buildConfiguration.ts +++ b/server/routers/newt/buildConfiguration.ts @@ -4,8 +4,10 @@ import { clientSitesAssociationsCache, db, ExitNode, + networks, resources, Site, + siteNetworks, siteResources, targetHealthCheck, targets @@ -84,7 +86,8 @@ export async function buildClientConfigurationForNewtClient( // ) // ); - if (!client.clientSitesAssociationsCache.isJitMode) { // if we are adding sites through jit then dont add the site to the olm + if (!client.clientSitesAssociationsCache.isJitMode) { + // if we are adding sites through jit then dont add the site to the olm // update the peer info on the olm // if the peer has not been added yet this will be a no-op await updatePeer(client.clients.clientId, { @@ -137,11 +140,14 @@ export async function buildClientConfigurationForNewtClient( // Filter out any null values from peers that didn't have an olm const validPeers = peers.filter((peer) => peer !== null); - // Get all enabled site resources for this site + // Get all enabled site resources for this site by joining through siteNetworks and networks const allSiteResources = await db .select() .from(siteResources) - .where(eq(siteResources.siteId, siteId)); + .innerJoin(networks, eq(siteResources.networkId, networks.networkId)) + .innerJoin(siteNetworks, eq(networks.networkId, siteNetworks.networkId)) + .where(eq(siteNetworks.siteId, siteId)) + .then((rows) => rows.map((r) => r.siteResources)); const targetsToSend: SubnetProxyTargetV2[] = []; @@ -168,7 +174,7 @@ export async function buildClientConfigurationForNewtClient( ) ); - const resourceTargets = generateSubnetProxyTargetV2( + const resourceTargets = await generateSubnetProxyTargetV2( resource, resourceClients ); @@ -184,7 +190,10 @@ export async function buildClientConfigurationForNewtClient( }; } -export async function buildTargetConfigurationForNewtClient(siteId: number) { +export async function buildTargetConfigurationForNewtClient( + siteId: number, + version?: string | null +) { // Get all enabled targets with their resource protocol information const allTargets = await db .select({ @@ -195,7 +204,15 @@ export async function buildTargetConfigurationForNewtClient(siteId: number) { port: targets.port, internalPort: targets.internalPort, enabled: targets.enabled, - protocol: resources.protocol, + protocol: resources.protocol + }) + .from(targets) + .innerJoin(resources, eq(targets.resourceId, resources.resourceId)) + .where(and(eq(targets.siteId, siteId), eq(targets.enabled, true))); + + const allHealthChecks = await db + .select({ + targetHealthCheckId: targetHealthCheck.targetHealthCheckId, hcEnabled: targetHealthCheck.hcEnabled, hcPath: targetHealthCheck.hcPath, hcScheme: targetHealthCheck.hcScheme, @@ -206,17 +223,15 @@ export async function buildTargetConfigurationForNewtClient(siteId: number) { hcUnhealthyInterval: targetHealthCheck.hcUnhealthyInterval, hcTimeout: targetHealthCheck.hcTimeout, hcHeaders: targetHealthCheck.hcHeaders, + hcFollowRedirects: targetHealthCheck.hcFollowRedirects, hcMethod: targetHealthCheck.hcMethod, hcTlsServerName: targetHealthCheck.hcTlsServerName, - hcStatus: targetHealthCheck.hcStatus + hcStatus: targetHealthCheck.hcStatus, + hcHealthyThreshold: targetHealthCheck.hcHealthyThreshold, + hcUnhealthyThreshold: targetHealthCheck.hcUnhealthyThreshold }) - .from(targets) - .innerJoin(resources, eq(targets.resourceId, resources.resourceId)) - .leftJoin( - targetHealthCheck, - eq(targets.targetId, targetHealthCheck.targetId) - ) - .where(and(eq(targets.siteId, siteId), eq(targets.enabled, true))); + .from(targetHealthCheck) + .where(eq(targetHealthCheck.siteId, siteId)); const { tcpTargets, udpTargets } = allTargets.reduce( (acc, target) => { @@ -240,19 +255,14 @@ export async function buildTargetConfigurationForNewtClient(siteId: number) { { tcpTargets: [] as string[], udpTargets: [] as string[] } ); - const healthCheckTargets = allTargets.map((target) => { + const healthCheckTargets = allHealthChecks.map((target) => { // make sure the stuff is defined - if ( - !target.hcPath || - !target.hcHostname || - !target.hcPort || - !target.hcInterval || - !target.hcMethod - ) { - // logger.debug( - // `Skipping adding target health check ${target.targetId} due to missing health check fields` - // ); - return null; // Skip targets with missing health check fields + const isTCP = target.hcMode?.toLowerCase() === "tcp"; + if (!target.hcHostname || !target.hcPort || !target.hcInterval) { + return null; + } + if (!isTCP && (!target.hcPath || !target.hcMethod)) { + return null; } // parse headers @@ -269,7 +279,7 @@ export async function buildTargetConfigurationForNewtClient(siteId: number) { } return { - id: target.targetId, + id: target.targetHealthCheckId, hcEnabled: target.hcEnabled, hcPath: target.hcPath, hcScheme: target.hcScheme, @@ -280,9 +290,12 @@ export async function buildTargetConfigurationForNewtClient(siteId: number) { hcUnhealthyInterval: target.hcUnhealthyInterval, // in seconds hcTimeout: target.hcTimeout, // in seconds hcHeaders: hcHeadersSend, + hcFollowRedirects: target.hcFollowRedirects, hcMethod: target.hcMethod, hcTlsServerName: target.hcTlsServerName, - hcStatus: target.hcStatus + hcStatus: target.hcStatus, + hcHealthyThreshold: target.hcHealthyThreshold, + hcUnhealthyThreshold: target.hcUnhealthyThreshold }; }); diff --git a/server/routers/newt/handleGetConfigMessage.ts b/server/routers/newt/handleNewtGetConfigMessage.ts similarity index 92% rename from server/routers/newt/handleGetConfigMessage.ts rename to server/routers/newt/handleNewtGetConfigMessage.ts index 9c67f53ee..787151a5a 100644 --- a/server/routers/newt/handleGetConfigMessage.ts +++ b/server/routers/newt/handleNewtGetConfigMessage.ts @@ -10,7 +10,7 @@ import { convertTargetsIfNessicary } from "../client/targets"; import { canCompress } from "@server/lib/clientVersionChecks"; import config from "@server/lib/config"; -export const handleGetConfigMessage: MessageHandler = async (context) => { +export const handleNewtGetConfigMessage: MessageHandler = async (context) => { const { message, client, sendToClient } = context; const newt = client as Newt; @@ -56,7 +56,7 @@ export const handleGetConfigMessage: MessageHandler = async (context) => { if (existingSite.lastHolePunch && now - existingSite.lastHolePunch > 5) { logger.warn( - `Site last hole punch is too old; skipping this register. The site is failing to hole punch and identify its network address with the server. Can the client reach the server on UDP port ${config.getRawConfig().gerbil.clients_start_port}?` + `Site last hole punch is too old; skipping this register. The site is failing to hole punch and identify its network address with the server. Can the site reach the server on UDP port ${config.getRawConfig().gerbil.clients_start_port}?` ); return; } @@ -113,7 +113,7 @@ export const handleGetConfigMessage: MessageHandler = async (context) => { exitNode ); - const targetsToSend = await convertTargetsIfNessicary(newt.newtId, targets); + const targetsToSend = await convertTargetsIfNessicary(newt.newtId, targets); // for backward compatibility with old newt versions that don't support the new target format return { message: { diff --git a/server/routers/newt/handleNewtPingMessage.ts b/server/routers/newt/handleNewtPingMessage.ts index 32f665758..56b8a2a24 100644 --- a/server/routers/newt/handleNewtPingMessage.ts +++ b/server/routers/newt/handleNewtPingMessage.ts @@ -1,180 +1,12 @@ -import { db, newts, sites, targetHealthCheck, targets } from "@server/db"; -import { - hasActiveConnections, - getClientConfigVersion -} from "#dynamic/routers/ws"; +import { db, sites } from "@server/db"; +import { getClientConfigVersion } from "#dynamic/routers/ws"; import { MessageHandler } from "@server/routers/ws"; import { Newt } from "@server/db"; -import { eq, lt, isNull, and, or, ne, not } from "drizzle-orm"; +import { eq } from "drizzle-orm"; import logger from "@server/logger"; import { sendNewtSyncMessage } from "./sync"; import { recordPing } from "./pingAccumulator"; -// Track if the offline checker interval is running -let offlineCheckerInterval: NodeJS.Timeout | null = null; -const OFFLINE_CHECK_INTERVAL = 30 * 1000; // Check every 30 seconds -const OFFLINE_THRESHOLD_MS = 2 * 60 * 1000; // 2 minutes -const OFFLINE_THRESHOLD_BANDWIDTH_MS = 8 * 60 * 1000; // 8 minutes - -/** - * Starts the background interval that checks for newt sites that haven't - * pinged recently and marks them as offline. For backward compatibility, - * a site is only marked offline when there is no active WebSocket connection - * either — so older newt versions that don't send pings but remain connected - * continue to be treated as online. - */ -export const startNewtOfflineChecker = (): void => { - if (offlineCheckerInterval) { - return; // Already running - } - - offlineCheckerInterval = setInterval(async () => { - try { - const twoMinutesAgo = Math.floor( - (Date.now() - OFFLINE_THRESHOLD_MS) / 1000 - ); - - // Find all online newt-type sites that haven't pinged recently - // (or have never pinged at all). Join newts to obtain the newtId - // needed for the WebSocket connection check. - const staleSites = await db - .select({ - siteId: sites.siteId, - newtId: newts.newtId, - lastPing: sites.lastPing - }) - .from(sites) - .innerJoin(newts, eq(newts.siteId, sites.siteId)) - .where( - and( - eq(sites.online, true), - eq(sites.type, "newt"), - or( - lt(sites.lastPing, twoMinutesAgo), - isNull(sites.lastPing) - ) - ) - ); - - for (const staleSite of staleSites) { - // Backward-compatibility check: if the newt still has an - // active WebSocket connection (older clients that don't send - // pings), keep the site online. - const isConnected = await hasActiveConnections( - staleSite.newtId - ); - if (isConnected) { - logger.debug( - `Newt ${staleSite.newtId} has not pinged recently but is still connected via WebSocket — keeping site ${staleSite.siteId} online` - ); - continue; - } - - logger.info( - `Marking site ${staleSite.siteId} offline: newt ${staleSite.newtId} has no recent ping and no active WebSocket connection` - ); - - await db - .update(sites) - .set({ online: false }) - .where(eq(sites.siteId, staleSite.siteId)); - - const healthChecksOnSite = await db - .select() - .from(targetHealthCheck) - .innerJoin( - targets, - eq(targets.targetId, targetHealthCheck.targetId) - ) - .innerJoin(sites, eq(sites.siteId, targets.siteId)) - .where(eq(sites.siteId, staleSite.siteId)); - - for (const healthCheck of healthChecksOnSite) { - logger.info( - `Marking health check ${healthCheck.targetHealthCheck.targetHealthCheckId} offline due to site ${staleSite.siteId} being marked offline` - ); - await db - .update(targetHealthCheck) - .set({ hcHealth: "unknown" }) - .where( - eq( - targetHealthCheck.targetHealthCheckId, - healthCheck.targetHealthCheck - .targetHealthCheckId - ) - ); - } - } - - // this part only effects self hosted. Its not efficient but we dont expect people to have very many wireguard sites - // select all of the wireguard sites to evaluate if they need to be offline due to the last bandwidth update - const allWireguardSites = await db - .select({ - siteId: sites.siteId, - online: sites.online, - lastBandwidthUpdate: sites.lastBandwidthUpdate - }) - .from(sites) - .where( - and( - eq(sites.type, "wireguard"), - not(isNull(sites.lastBandwidthUpdate)) - ) - ); - - const wireguardOfflineThreshold = Math.floor( - (Date.now() - OFFLINE_THRESHOLD_BANDWIDTH_MS) / 1000 - ); - - // loop over each one. If its offline and there is a new update then mark it online. If its online and there is no update then mark it offline - for (const site of allWireguardSites) { - const lastBandwidthUpdate = - new Date(site.lastBandwidthUpdate!).getTime() / 1000; - if ( - lastBandwidthUpdate < wireguardOfflineThreshold && - site.online - ) { - logger.info( - `Marking wireguard site ${site.siteId} offline: no bandwidth update in over ${OFFLINE_THRESHOLD_BANDWIDTH_MS / 60000} minutes` - ); - - await db - .update(sites) - .set({ online: false }) - .where(eq(sites.siteId, site.siteId)); - } else if ( - lastBandwidthUpdate >= wireguardOfflineThreshold && - !site.online - ) { - logger.info( - `Marking wireguard site ${site.siteId} online: recent bandwidth update` - ); - - await db - .update(sites) - .set({ online: true }) - .where(eq(sites.siteId, site.siteId)); - } - } - } catch (error) { - logger.error("Error in newt offline checker interval", { error }); - } - }, OFFLINE_CHECK_INTERVAL); - - logger.debug("Started newt offline checker interval"); -}; - -/** - * Stops the background interval that checks for offline newt sites. - */ -export const stopNewtOfflineChecker = (): void => { - if (offlineCheckerInterval) { - clearInterval(offlineCheckerInterval); - offlineCheckerInterval = null; - logger.info("Stopped newt offline checker interval"); - } -}; - /** * Handles ping messages from newt clients. * diff --git a/server/routers/newt/handleNewtRegisterMessage.ts b/server/routers/newt/handleNewtRegisterMessage.ts index fce42caa3..f3902a35d 100644 --- a/server/routers/newt/handleNewtRegisterMessage.ts +++ b/server/routers/newt/handleNewtRegisterMessage.ts @@ -192,7 +192,7 @@ export const handleNewtRegisterMessage: MessageHandler = async (context) => { } const { tcpTargets, udpTargets, validHealthCheckTargets } = - await buildTargetConfigurationForNewtClient(siteId); + await buildTargetConfigurationForNewtClient(siteId, newtVersion); logger.debug( `Sending health check targets to newt ${newt.newtId}: ${JSON.stringify(validHealthCheckTargets)}` diff --git a/server/routers/newt/handleReceiveBandwidthMessage.ts b/server/routers/newt/handleReceiveBandwidthMessage.ts index f086333e7..2d5d99b09 100644 --- a/server/routers/newt/handleReceiveBandwidthMessage.ts +++ b/server/routers/newt/handleReceiveBandwidthMessage.ts @@ -88,7 +88,7 @@ export async function flushBandwidthToDb(): Promise { const currentTime = new Date().toISOString(); // Sort by publicKey for consistent lock ordering across concurrent - // writers — this is the same deadlock-prevention strategy used in the + // writers - this is the same deadlock-prevention strategy used in the // original per-message implementation. const sortedEntries = [...snapshot.entries()].sort(([a], [b]) => a.localeCompare(b) @@ -143,7 +143,7 @@ const flushTimer = setInterval(async () => { }, FLUSH_INTERVAL_MS); // Calling unref() means this timer will not keep the Node.js event loop alive -// on its own — the process can still exit normally when there is no other work +// on its own - the process can still exit normally when there is no other work // left. The graceful-shutdown path (see server/cleanup.ts) will call // flushBandwidthToDb() explicitly before process.exit(), so no data is lost. flushTimer.unref(); @@ -167,7 +167,7 @@ export const handleReceiveBandwidthMessage: MessageHandler = async ( // Accumulate the incoming data in memory; the periodic timer (and the // shutdown hook) will take care of writing it to the database. for (const { publicKey, bytesIn, bytesOut } of bandwidthData) { - // Skip peers that haven't transferred any data — writing zeros to the + // Skip peers that haven't transferred any data - writing zeros to the // database would be a no-op anyway. if (bytesIn <= 0 && bytesOut <= 0) { continue; diff --git a/server/routers/newt/handleRequestLogMessage.ts b/server/routers/newt/handleRequestLogMessage.ts new file mode 100644 index 000000000..190020ad1 --- /dev/null +++ b/server/routers/newt/handleRequestLogMessage.ts @@ -0,0 +1,9 @@ +import { MessageHandler } from "@server/routers/ws"; + +export async function flushRequestLogToDb(): Promise { + return; +} + +export const handleRequestLogMessage: MessageHandler = async (context) => { + return; +}; \ No newline at end of file diff --git a/server/routers/newt/index.ts b/server/routers/newt/index.ts index 33b5caf7c..368cdf636 100644 --- a/server/routers/newt/index.ts +++ b/server/routers/newt/index.ts @@ -2,11 +2,13 @@ export * from "./createNewt"; export * from "./getNewtToken"; export * from "./handleNewtRegisterMessage"; export * from "./handleReceiveBandwidthMessage"; -export * from "./handleGetConfigMessage"; +export * from "./handleNewtGetConfigMessage"; export * from "./handleSocketMessages"; export * from "./handleNewtPingRequestMessage"; export * from "./handleApplyBlueprintMessage"; export * from "./handleNewtPingMessage"; export * from "./handleNewtDisconnectingMessage"; export * from "./handleConnectionLogMessage"; +export * from "./handleRequestLogMessage"; export * from "./registerNewt"; +export * from "./offlineChecker"; diff --git a/server/routers/newt/offlineChecker.ts b/server/routers/newt/offlineChecker.ts new file mode 100644 index 000000000..426d80323 --- /dev/null +++ b/server/routers/newt/offlineChecker.ts @@ -0,0 +1,208 @@ +import { db, newts, sites, targetHealthCheck, targets, statusHistory } from "@server/db"; +import { + hasActiveConnections, +} from "#dynamic/routers/ws"; +import { eq, lt, isNull, and, or, ne, not } from "drizzle-orm"; +import logger from "@server/logger"; +import { fireSiteOfflineAlert, fireSiteOnlineAlert } from "#dynamic/lib/alerts"; + +// Track if the offline checker interval is running +let offlineCheckerInterval: NodeJS.Timeout | null = null; +const OFFLINE_CHECK_INTERVAL = 30 * 1000; // Check every 30 seconds +const OFFLINE_THRESHOLD_MS = 2 * 60 * 1000; // 2 minutes +const OFFLINE_THRESHOLD_BANDWIDTH_MS = 8 * 60 * 1000; // 8 minutes + +/** + * Starts the background interval that checks for newt sites that haven't + * pinged recently and marks them as offline. For backward compatibility, + * a site is only marked offline when there is no active WebSocket connection + * either - so older newt versions that don't send pings but remain connected + * continue to be treated as online. + */ +export const startNewtOfflineChecker = (): void => { + if (offlineCheckerInterval) { + return; // Already running + } + + offlineCheckerInterval = setInterval(async () => { + try { + const twoMinutesAgo = Math.floor( + (Date.now() - OFFLINE_THRESHOLD_MS) / 1000 + ); + + // Find all online newt-type sites that haven't pinged recently + // (or have never pinged at all). Join newts to obtain the newtId + // needed for the WebSocket connection check. + const staleSites = await db + .select({ + siteId: sites.siteId, + orgId: sites.orgId, + name: sites.name, + newtId: newts.newtId, + lastPing: sites.lastPing + }) + .from(sites) + .innerJoin(newts, eq(newts.siteId, sites.siteId)) + .where( + and( + eq(sites.online, true), + eq(sites.type, "newt"), + or( + lt(sites.lastPing, twoMinutesAgo), + isNull(sites.lastPing) + ) + ) + ); + + for (const staleSite of staleSites) { + // Backward-compatibility check: if the newt still has an + // active WebSocket connection (older clients that don't send + // pings), keep the site online. + const isConnected = await hasActiveConnections( + staleSite.newtId + ); + if (isConnected) { + logger.debug( + `Newt ${staleSite.newtId} has not pinged recently but is still connected via WebSocket - keeping site ${staleSite.siteId} online` + ); + continue; + } + + logger.info( + `Marking site ${staleSite.siteId} offline: newt ${staleSite.newtId} has no recent ping and no active WebSocket connection` + ); + + await db + .update(sites) + .set({ online: false }) + .where(eq(sites.siteId, staleSite.siteId)); + + await db.insert(statusHistory).values({ + entityType: "site", + entityId: staleSite.siteId, + orgId: staleSite.orgId, + status: "offline", + timestamp: Math.floor(Date.now() / 1000), + }).execute(); + + const healthChecksOnSite = await db + .select() + .from(targetHealthCheck) + .innerJoin( + targets, + eq(targets.targetId, targetHealthCheck.targetId) + ) + .innerJoin(sites, eq(sites.siteId, targets.siteId)) + .where(eq(sites.siteId, staleSite.siteId)); + + for (const healthCheck of healthChecksOnSite) { + logger.info( + `Marking health check ${healthCheck.targetHealthCheck.targetHealthCheckId} offline due to site ${staleSite.siteId} being marked offline` + ); + await db + .update(targetHealthCheck) + .set({ hcHealth: "unknown" }) + .where( + eq( + targetHealthCheck.targetHealthCheckId, + healthCheck.targetHealthCheck + .targetHealthCheckId + ) + ); + + // TODO: should we be firing an alert here when the health check goes to unknown? + } + + await fireSiteOfflineAlert(staleSite.orgId, staleSite.siteId, staleSite.name); + } + + // this part only effects self hosted. Its not efficient but we dont expect people to have very many wireguard sites + // select all of the wireguard sites to evaluate if they need to be offline due to the last bandwidth update + const allWireguardSites = await db + .select({ + siteId: sites.siteId, + orgId: sites.orgId, + name: sites.name, + online: sites.online, + lastBandwidthUpdate: sites.lastBandwidthUpdate + }) + .from(sites) + .where( + and( + eq(sites.type, "wireguard"), + not(isNull(sites.lastBandwidthUpdate)) + ) + ); + + const wireguardOfflineThreshold = Math.floor( + (Date.now() - OFFLINE_THRESHOLD_BANDWIDTH_MS) / 1000 + ); + + // loop over each one. If its offline and there is a new update then mark it online. If its online and there is no update then mark it offline + for (const site of allWireguardSites) { + const lastBandwidthUpdate = + new Date(site.lastBandwidthUpdate!).getTime() / 1000; + if ( + lastBandwidthUpdate < wireguardOfflineThreshold && + site.online + ) { + logger.info( + `Marking wireguard site ${site.siteId} offline: no bandwidth update in over ${OFFLINE_THRESHOLD_BANDWIDTH_MS / 60000} minutes` + ); + + await db + .update(sites) + .set({ online: false }) + .where(eq(sites.siteId, site.siteId)); + + await db.insert(statusHistory).values({ + entityType: "site", + entityId: site.siteId, + orgId: site.orgId, + status: "offline", + timestamp: Math.floor(Date.now() / 1000), + }).execute(); + + await fireSiteOfflineAlert(site.orgId, site.siteId, site.name); + } else if ( + lastBandwidthUpdate >= wireguardOfflineThreshold && + !site.online + ) { + logger.info( + `Marking wireguard site ${site.siteId} online: recent bandwidth update` + ); + + await db + .update(sites) + .set({ online: true }) + .where(eq(sites.siteId, site.siteId)); + + await db.insert(statusHistory).values({ + entityType: "site", + entityId: site.siteId, + orgId: site.orgId, + status: "online", + timestamp: Math.floor(Date.now() / 1000), + }).execute(); + + await fireSiteOnlineAlert(site.orgId, site.siteId, site.name); + } + } + } catch (error) { + logger.error("Error in newt offline checker interval", { error }); + } + }, OFFLINE_CHECK_INTERVAL); + + logger.debug("Started newt offline checker interval"); +}; + +/** + * Stops the background interval that checks for offline newt sites. + */ +export const stopNewtOfflineChecker = (): void => { + if (offlineCheckerInterval) { + clearInterval(offlineCheckerInterval); + offlineCheckerInterval = null; + logger.info("Stopped newt offline checker interval"); + } +}; diff --git a/server/routers/newt/pingAccumulator.ts b/server/routers/newt/pingAccumulator.ts index fe2cde216..b63bf97d3 100644 --- a/server/routers/newt/pingAccumulator.ts +++ b/server/routers/newt/pingAccumulator.ts @@ -1,7 +1,8 @@ import { db } from "@server/db"; -import { sites, clients, olms } from "@server/db"; -import { inArray } from "drizzle-orm"; +import { sites, clients, olms, statusHistory } from "@server/db"; +import { and, eq, inArray } from "drizzle-orm"; import logger from "@server/logger"; +import { fireSiteOnlineAlert } from "#dynamic/lib/alerts"; /** * Ping Accumulator @@ -110,15 +111,51 @@ async function flushSitePingsToDb(): Promise { const siteIds = batch.map(([id]) => id); try { - await withRetry(async () => { - await db + const newlyOnlineSites = await withRetry(async () => { + // Only update sites that were offline - these are the + // offline→online transitions. .returning() gives us exactly + // the site IDs that changed state. + const transitioned = await db .update(sites) .set({ online: true, lastPing: maxTimestamp }) - .where(inArray(sites.siteId, siteIds)); + .where( + and( + inArray(sites.siteId, siteIds), + eq(sites.online, false) + ) + ) + .returning({ siteId: sites.siteId, orgId: sites.orgId, name: sites.name }); + + // Update lastPing for sites that were already online. + // After the update above, the newly-online sites now have + // online = true, so this catches all remaining sites in the + // batch and keeps lastPing current for them too. + await db + .update(sites) + .set({ lastPing: maxTimestamp }) + .where( + and( + inArray(sites.siteId, siteIds), + eq(sites.online, true) + ) + ); + + return transitioned; }, "flushSitePingsToDb"); + + for (const site of newlyOnlineSites) { + await db.insert(statusHistory).values({ + entityType: "site", + entityId: site.siteId, + orgId: site.orgId, + status: "online", + timestamp: Math.floor(Date.now() / 1000), + }).execute(); + await fireSiteOnlineAlert(site.orgId, site.siteId, site.name); + } } catch (error) { logger.error( `Failed to flush site ping batch (${batch.length} sites), re-queuing for next cycle`, @@ -219,7 +256,7 @@ async function flushClientPingsToDb(): Promise { } /** - * Flush everything — called by the interval timer and during shutdown. + * Flush everything - called by the interval timer and during shutdown. */ export async function flushPingsToDb(): Promise { await flushSitePingsToDb(); @@ -284,7 +321,7 @@ function isTransientError(error: any): boolean { return true; } - // PostgreSQL deadlock detected — always safe to retry (one winner guaranteed) + // PostgreSQL deadlock detected - always safe to retry (one winner guaranteed) if (code === "40P01" || message.includes("deadlock")) { return true; } diff --git a/server/routers/newt/registerNewt.ts b/server/routers/newt/registerNewt.ts index de68ab2de..cc53e48df 100644 --- a/server/routers/newt/registerNewt.ts +++ b/server/routers/newt/registerNewt.ts @@ -249,7 +249,7 @@ export async function registerNewt( dateCreated: moment().toISOString() }); - // Consume the provisioning key — cascade removes siteProvisioningKeyOrg + // Consume the provisioning key - cascade removes siteProvisioningKeyOrg await trx .update(siteProvisioningKeys) .set({ diff --git a/server/routers/newt/targets.ts b/server/routers/newt/targets.ts index 6a523ebe9..25b520854 100644 --- a/server/routers/newt/targets.ts +++ b/server/routers/newt/targets.ts @@ -1,7 +1,6 @@ -import { Target, TargetHealthCheck, db, targetHealthCheck } from "@server/db"; +import { Target, TargetHealthCheck } from "@server/db"; import { sendToClient } from "#dynamic/routers/ws"; import logger from "@server/logger"; -import { eq, inArray } from "drizzle-orm"; import { canCompress } from "@server/lib/clientVersionChecks"; export async function addTargets( @@ -18,17 +17,23 @@ export async function addTargets( }:${target.port}`; }); - await sendToClient(newtId, { - type: `newt/${protocol}/add`, - data: { - targets: payloadTargets - } - }, { incrementConfigVersion: true, compress: canCompress(version, "newt") }); + await sendToClient( + newtId, + { + type: `newt/${protocol}/add`, + data: { + targets: payloadTargets + } + }, + { incrementConfigVersion: true, compress: canCompress(version, "newt") } + ); // Create a map for quick lookup const healthCheckMap = new Map(); healthCheckData.forEach((hc) => { - healthCheckMap.set(hc.targetId, hc); + if (hc.targetId !== null) { + healthCheckMap.set(hc.targetId, hc); + } }); const healthCheckTargets = targets.map((target) => { @@ -43,17 +48,18 @@ export async function addTargets( } // Ensure all necessary fields are present - if ( - !hc.hcPath || - !hc.hcHostname || - !hc.hcPort || - !hc.hcInterval || - !hc.hcMethod - ) { + const isTCP = hc.hcMode?.toLowerCase() === "tcp"; + if (!hc.hcHostname || !hc.hcPort || !hc.hcInterval) { logger.debug( `Skipping target ${target.targetId} due to missing health check fields` ); - return null; // Skip targets with missing health check fields + return null; + } + if (!isTCP && (!hc.hcPath || !hc.hcMethod)) { + logger.debug( + `Skipping target ${target.targetId} due to missing HTTP health check fields` + ); + return null; } const hcHeadersParse = hc.hcHeaders ? JSON.parse(hc.hcHeaders) : null; @@ -77,7 +83,7 @@ export async function addTargets( } return { - id: target.targetId, + id: hc.targetHealthCheckId, hcEnabled: hc.hcEnabled, hcPath: hc.hcPath, hcScheme: hc.hcScheme, @@ -88,9 +94,12 @@ export async function addTargets( hcUnhealthyInterval: hc.hcUnhealthyInterval, // in seconds hcTimeout: hc.hcTimeout, // in seconds hcHeaders: hcHeadersSend, + hcFollowRedirects: hc.hcFollowRedirects, hcMethod: hc.hcMethod, hcStatus: hcStatus, - hcTlsServerName: hc.hcTlsServerName + hcTlsServerName: hc.hcTlsServerName, + hcHealthyThreshold: hc.hcHealthyThreshold, + hcUnhealthyThreshold: hc.hcUnhealthyThreshold }; }); @@ -99,12 +108,106 @@ export async function addTargets( (target) => target !== null ); - await sendToClient(newtId, { - type: `newt/healthcheck/add`, - data: { - targets: validHealthCheckTargets + await sendToClient( + newtId, + { + type: `newt/healthcheck/add`, + data: { + targets: validHealthCheckTargets + } + }, + { incrementConfigVersion: true, compress: canCompress(version, "newt") } + ); +} + +export async function addStandaloneHealthCheck( + newtId: string, + healthCheck: TargetHealthCheck, + version?: string | null +) { + const isTCP = healthCheck.hcMode?.toLowerCase() === "tcp"; + if ( + !healthCheck.hcHostname || + !healthCheck.hcPort || + !healthCheck.hcInterval + ) { + logger.debug( + `Skipping standalone health check ${healthCheck.targetHealthCheckId} due to missing fields` + ); + return; + } + if (!isTCP && (!healthCheck.hcPath || !healthCheck.hcMethod)) { + logger.debug( + `Skipping standalone health check ${healthCheck.targetHealthCheckId} due to missing HTTP health check fields` + ); + return; + } + + const hcHeadersParse = healthCheck.hcHeaders + ? JSON.parse(healthCheck.hcHeaders) + : null; + const hcHeadersSend: { [key: string]: string } = {}; + if (hcHeadersParse) { + hcHeadersParse.forEach((header: { name: string; value: string }) => { + hcHeadersSend[header.name] = header.value; + }); + } + + let hcStatus: number | undefined = undefined; + if (healthCheck.hcStatus) { + const parsedStatus = parseInt(healthCheck.hcStatus.toString()); + if (!isNaN(parsedStatus)) { + hcStatus = parsedStatus; } - }, { incrementConfigVersion: true, compress: canCompress(version, "newt") }); + } + + await sendToClient( + newtId, + { + type: `newt/healthcheck/add`, + data: { + targets: [ + { + id: healthCheck.targetHealthCheckId, + hcEnabled: healthCheck.hcEnabled, + hcPath: healthCheck.hcPath, + hcScheme: healthCheck.hcScheme, + hcMode: healthCheck.hcMode, + hcHostname: healthCheck.hcHostname, + hcPort: healthCheck.hcPort, + hcInterval: healthCheck.hcInterval, + hcUnhealthyInterval: healthCheck.hcUnhealthyInterval, + hcTimeout: healthCheck.hcTimeout, + hcHeaders: hcHeadersSend, + hcFollowRedirects: healthCheck.hcFollowRedirects, + hcMethod: healthCheck.hcMethod, + hcStatus: hcStatus, + hcTlsServerName: healthCheck.hcTlsServerName, + hcHealthyThreshold: healthCheck.hcHealthyThreshold, + hcUnhealthyThreshold: healthCheck.hcUnhealthyThreshold + } + ] + } + }, + { incrementConfigVersion: true, compress: canCompress(version, "newt") } + ); +} + +export async function removeStandaloneHealthCheck( + newtId: string, + healthCheckId: number, + version?: string | null +) { + await sendToClient( + newtId, + { + type: `newt/healthcheck/remove`, + data: { + ids: [healthCheckId] + } + }, + { incrementConfigVersion: true, compress: canCompress(version, "newt") } + ); } export async function removeTargets( @@ -120,21 +223,29 @@ export async function removeTargets( }:${target.port}`; }); - await sendToClient(newtId, { - type: `newt/${protocol}/remove`, - data: { - targets: payloadTargets - } - }, { incrementConfigVersion: true }); + await sendToClient( + newtId, + { + type: `newt/${protocol}/remove`, + data: { + targets: payloadTargets + } + }, + { incrementConfigVersion: true } + ); const healthCheckTargets = targets.map((target) => { return target.targetId; }); - await sendToClient(newtId, { - type: `newt/healthcheck/remove`, - data: { - ids: healthCheckTargets - } - }, { incrementConfigVersion: true, compress: canCompress(version, "newt") }); + await sendToClient( + newtId, + { + type: `newt/healthcheck/remove`, + data: { + ids: healthCheckTargets + } + }, + { incrementConfigVersion: true, compress: canCompress(version, "newt") } + ); } diff --git a/server/routers/olm/buildConfiguration.ts b/server/routers/olm/buildConfiguration.ts index bc2611b1c..4182725d3 100644 --- a/server/routers/olm/buildConfiguration.ts +++ b/server/routers/olm/buildConfiguration.ts @@ -4,6 +4,8 @@ import { clientSitesAssociationsCache, db, exitNodes, + networks, + siteNetworks, siteResources, sites } from "@server/db"; @@ -59,9 +61,17 @@ export async function buildSiteConfigurationForOlmClient( clientSiteResourcesAssociationsCache.siteResourceId ) ) + .innerJoin( + networks, + eq(siteResources.networkId, networks.networkId) + ) + .innerJoin( + siteNetworks, + eq(networks.networkId, siteNetworks.networkId) + ) .where( and( - eq(siteResources.siteId, site.siteId), + eq(siteNetworks.siteId, site.siteId), eq( clientSiteResourcesAssociationsCache.clientId, client.clientId @@ -69,6 +79,7 @@ export async function buildSiteConfigurationForOlmClient( ) ); + if (jitMode) { // Add site configuration to the array siteConfigurations.push({ diff --git a/server/routers/olm/handleOlmPingMessage.ts b/server/routers/olm/handleOlmPingMessage.ts index 0f520b234..0e18c7f5b 100644 --- a/server/routers/olm/handleOlmPingMessage.ts +++ b/server/routers/olm/handleOlmPingMessage.ts @@ -1,104 +1,17 @@ -import { disconnectClient, getClientConfigVersion } from "#dynamic/routers/ws"; +import { getClientConfigVersion } from "#dynamic/routers/ws"; import { db } from "@server/db"; import { MessageHandler } from "@server/routers/ws"; -import { clients, olms, Olm } from "@server/db"; -import { eq, lt, isNull, and, or } from "drizzle-orm"; +import { clients, Olm } from "@server/db"; +import { eq } from "drizzle-orm"; import { recordClientPing } from "@server/routers/newt/pingAccumulator"; import logger from "@server/logger"; import { validateSessionToken } from "@server/auth/sessions/app"; import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy"; -import { sendTerminateClient } from "../client/terminate"; import { encodeHexLowerCase } from "@oslojs/encoding"; import { sha256 } from "@oslojs/crypto/sha2"; import { sendOlmSyncMessage } from "./sync"; -import { OlmErrorCodes } from "./error"; import { handleFingerprintInsertion } from "./fingerprintingUtils"; -// Track if the offline checker interval is running -let offlineCheckerInterval: NodeJS.Timeout | null = null; -const OFFLINE_CHECK_INTERVAL = 30 * 1000; // Check every 30 seconds -const OFFLINE_THRESHOLD_MS = 2 * 60 * 1000; // 2 minutes - -/** - * Starts the background interval that checks for clients that haven't pinged recently - * and marks them as offline - */ -export const startOlmOfflineChecker = (): void => { - if (offlineCheckerInterval) { - return; // Already running - } - - offlineCheckerInterval = setInterval(async () => { - try { - const twoMinutesAgo = Math.floor( - (Date.now() - OFFLINE_THRESHOLD_MS) / 1000 - ); - - // TODO: WE NEED TO MAKE SURE THIS WORKS WITH DISTRIBUTED NODES ALL DOING THE SAME THING - - // Find clients that haven't pinged in the last 2 minutes and mark them as offline - const offlineClients = await db - .update(clients) - .set({ online: false }) - .where( - and( - eq(clients.online, true), - or( - lt(clients.lastPing, twoMinutesAgo), - isNull(clients.lastPing) - ) - ) - ) - .returning(); - - for (const offlineClient of offlineClients) { - logger.info( - `Kicking offline olm client ${offlineClient.clientId} due to inactivity` - ); - - if (!offlineClient.olmId) { - logger.warn( - `Offline client ${offlineClient.clientId} has no olmId, cannot disconnect` - ); - continue; - } - - // Send a disconnect message to the client if connected - try { - await sendTerminateClient( - offlineClient.clientId, - OlmErrorCodes.TERMINATED_INACTIVITY, - offlineClient.olmId - ); // terminate first - // wait a moment to ensure the message is sent - await new Promise((resolve) => setTimeout(resolve, 1000)); - await disconnectClient(offlineClient.olmId); - } catch (error) { - logger.error( - `Error sending disconnect to offline olm ${offlineClient.clientId}`, - { error } - ); - } - } - } catch (error) { - logger.error("Error in offline checker interval", { error }); - } - }, OFFLINE_CHECK_INTERVAL); - - logger.debug("Started offline checker interval"); -}; - -/** - * Stops the background interval that checks for offline clients - */ -export const stopOlmOfflineChecker = (): void => { - if (offlineCheckerInterval) { - clearInterval(offlineCheckerInterval); - offlineCheckerInterval = null; - logger.info("Stopped offline checker interval"); - } -}; - /** * Handles ping messages from clients and responds with pong */ diff --git a/server/routers/olm/handleOlmRegisterMessage.ts b/server/routers/olm/handleOlmRegisterMessage.ts index 01495de3b..a4a62973d 100644 --- a/server/routers/olm/handleOlmRegisterMessage.ts +++ b/server/routers/olm/handleOlmRegisterMessage.ts @@ -17,7 +17,6 @@ import { getUserDeviceName } from "@server/db/names"; import { buildSiteConfigurationForOlmClient } from "./buildConfiguration"; import { OlmErrorCodes, sendOlmError } from "./error"; import { handleFingerprintInsertion } from "./fingerprintingUtils"; -import { Alias } from "@server/lib/ip"; import { build } from "@server/build"; import { canCompress } from "@server/lib/clientVersionChecks"; import config from "@server/lib/config"; diff --git a/server/routers/olm/handleOlmServerInitAddPeerHandshake.ts b/server/routers/olm/handleOlmServerInitAddPeerHandshake.ts index 54badb2dc..05a83a146 100644 --- a/server/routers/olm/handleOlmServerInitAddPeerHandshake.ts +++ b/server/routers/olm/handleOlmServerInitAddPeerHandshake.ts @@ -4,10 +4,12 @@ import { db, exitNodes, Site, - siteResources + siteNetworks, + siteResources, + sites } from "@server/db"; import { MessageHandler } from "@server/routers/ws"; -import { clients, Olm, sites } from "@server/db"; +import { clients, Olm } from "@server/db"; import { and, eq, or } from "drizzle-orm"; import logger from "@server/logger"; import { initPeerAddHandshake } from "./peers"; @@ -44,20 +46,31 @@ export const handleOlmServerInitAddPeerHandshake: MessageHandler = async ( const { siteId, resourceId, chainId } = message.data; - let site: Site | null = null; + const sendCancel = async () => { + await sendToClient( + olm.olmId, + { + type: "olm/wg/peer/chain/cancel", + data: { chainId } + }, + { incrementConfigVersion: false } + ).catch((error) => { + logger.warn(`Error sending message:`, error); + }); + }; + + let sitesToProcess: Site[] = []; + if (siteId) { - // get the site const [siteRes] = await db .select() .from(sites) .where(eq(sites.siteId, siteId)) .limit(1); if (siteRes) { - site = siteRes; + sitesToProcess = [siteRes]; } - } - - if (resourceId && !site) { + } else if (resourceId) { const resources = await db .select() .from(siteResources) @@ -72,27 +85,17 @@ export const handleOlmServerInitAddPeerHandshake: MessageHandler = async ( ); if (!resources || resources.length === 0) { - logger.error(`handleOlmServerPeerAddMessage: Resource not found`); - // cancel the request from the olm side to not keep doing this - await sendToClient( - olm.olmId, - { - type: "olm/wg/peer/chain/cancel", - data: { - chainId - } - }, - { incrementConfigVersion: false } - ).catch((error) => { - logger.warn(`Error sending message:`, error); - }); + logger.error( + `handleOlmServerInitAddPeerHandshake: Resource not found` + ); + await sendCancel(); return; } if (resources.length > 1) { // error but this should not happen because the nice id cant contain a dot and the alias has to have a dot and both have to be unique within the org so there should never be multiple matches logger.error( - `handleOlmServerPeerAddMessage: Multiple resources found matching the criteria` + `handleOlmServerInitAddPeerHandshake: Multiple resources found matching the criteria` ); return; } @@ -117,125 +120,120 @@ export const handleOlmServerInitAddPeerHandshake: MessageHandler = async ( if (currentResourceAssociationCaches.length === 0) { logger.error( - `handleOlmServerPeerAddMessage: Client ${client.clientId} does not have access to resource ${resource.siteResourceId}` + `handleOlmServerInitAddPeerHandshake: Client ${client.clientId} does not have access to resource ${resource.siteResourceId}` ); - // cancel the request from the olm side to not keep doing this - await sendToClient( - olm.olmId, - { - type: "olm/wg/peer/chain/cancel", - data: { - chainId - } - }, - { incrementConfigVersion: false } - ).catch((error) => { - logger.warn(`Error sending message:`, error); - }); + await sendCancel(); return; } - const siteIdFromResource = resource.siteId; - - // get the site - const [siteRes] = await db - .select() - .from(sites) - .where(eq(sites.siteId, siteIdFromResource)); - if (!siteRes) { + if (!resource.networkId) { logger.error( - `handleOlmServerPeerAddMessage: Site with ID ${site} not found` + `handleOlmServerInitAddPeerHandshake: Resource ${resource.siteResourceId} has no network` ); + await sendCancel(); return; } - site = siteRes; + // Get all sites associated with this resource's network via siteNetworks + const siteRows = await db + .select({ siteId: siteNetworks.siteId }) + .from(siteNetworks) + .where(eq(siteNetworks.networkId, resource.networkId)); + + if (!siteRows || siteRows.length === 0) { + logger.error( + `handleOlmServerInitAddPeerHandshake: No sites found for resource ${resource.siteResourceId}` + ); + await sendCancel(); + return; + } + + // Fetch full site objects for all network members + const foundSites = await Promise.all( + siteRows.map(async ({ siteId: sid }) => { + const [s] = await db + .select() + .from(sites) + .where(eq(sites.siteId, sid)) + .limit(1); + return s ?? null; + }) + ); + + sitesToProcess = foundSites.filter((s): s is Site => s !== null); } - if (!site) { - logger.error(`handleOlmServerPeerAddMessage: Site not found`); + if (sitesToProcess.length === 0) { + logger.error( + `handleOlmServerInitAddPeerHandshake: No sites to process` + ); + await sendCancel(); return; } - // check if the client can access this site using the cache - const currentSiteAssociationCaches = await db - .select() - .from(clientSitesAssociationsCache) - .where( - and( - eq(clientSitesAssociationsCache.clientId, client.clientId), - eq(clientSitesAssociationsCache.siteId, site.siteId) - ) - ); + let handshakeInitiated = false; - if (currentSiteAssociationCaches.length === 0) { - logger.error( - `handleOlmServerPeerAddMessage: Client ${client.clientId} does not have access to site ${site.siteId}` - ); - // cancel the request from the olm side to not keep doing this - await sendToClient( - olm.olmId, + for (const site of sitesToProcess) { + // Check if the client can access this site using the cache + const currentSiteAssociationCaches = await db + .select() + .from(clientSitesAssociationsCache) + .where( + and( + eq(clientSitesAssociationsCache.clientId, client.clientId), + eq(clientSitesAssociationsCache.siteId, site.siteId) + ) + ); + + if (currentSiteAssociationCaches.length === 0) { + logger.warn( + `handleOlmServerInitAddPeerHandshake: Client ${client.clientId} does not have access to site ${site.siteId}, skipping` + ); + continue; + } + + if (!site.exitNodeId) { + logger.error( + `handleOlmServerInitAddPeerHandshake: Site ${site.siteId} has no exit node, skipping` + ); + continue; + } + + const [exitNode] = await db + .select() + .from(exitNodes) + .where(eq(exitNodes.exitNodeId, site.exitNodeId)); + + if (!exitNode) { + logger.error( + `handleOlmServerInitAddPeerHandshake: Exit node not found for site ${site.siteId}, skipping` + ); + continue; + } + + // Trigger the peer add handshake - if the peer was already added this will be a no-op + await initPeerAddHandshake( + client.clientId, { - type: "olm/wg/peer/chain/cancel", - data: { - chainId + siteId: site.siteId, + exitNode: { + publicKey: exitNode.publicKey, + endpoint: exitNode.endpoint } }, - { incrementConfigVersion: false } - ).catch((error) => { - logger.warn(`Error sending message:`, error); - }); - return; - } - - if (!site.exitNodeId) { - logger.error( - `handleOlmServerPeerAddMessage: Site with ID ${site.siteId} has no exit node` - ); - // cancel the request from the olm side to not keep doing this - await sendToClient( olm.olmId, - { - type: "olm/wg/peer/chain/cancel", - data: { - chainId - } - }, - { incrementConfigVersion: false } - ).catch((error) => { - logger.warn(`Error sending message:`, error); - }); - return; - } - - // get the exit node from the side - const [exitNode] = await db - .select() - .from(exitNodes) - .where(eq(exitNodes.exitNodeId, site.exitNodeId)); - - if (!exitNode) { - logger.error( - `handleOlmServerPeerAddMessage: Site with ID ${site.siteId} has no exit node` + chainId ); - return; + + handshakeInitiated = true; } - // also trigger the peer add handshake in case the peer was not already added to the olm and we need to hole punch - // if it has already been added this will be a no-op - await initPeerAddHandshake( - // this will kick off the add peer process for the client - client.clientId, - { - siteId: site.siteId, - exitNode: { - publicKey: exitNode.publicKey, - endpoint: exitNode.endpoint - } - }, - olm.olmId, - chainId - ); + if (!handshakeInitiated) { + logger.error( + `handleOlmServerInitAddPeerHandshake: No accessible sites with valid exit nodes found, cancelling chain` + ); + await sendCancel(); + } return; }; diff --git a/server/routers/olm/handleOlmServerPeerAddMessage.ts b/server/routers/olm/handleOlmServerPeerAddMessage.ts index 64284f493..5f46ea84c 100644 --- a/server/routers/olm/handleOlmServerPeerAddMessage.ts +++ b/server/routers/olm/handleOlmServerPeerAddMessage.ts @@ -1,43 +1,25 @@ import { - Client, clientSiteResourcesAssociationsCache, db, - ExitNode, - Org, - orgs, - roleClients, - roles, + networks, + siteNetworks, siteResources, - Transaction, - userClients, - userOrgs, - users } from "@server/db"; import { MessageHandler } from "@server/routers/ws"; import { clients, clientSitesAssociationsCache, - exitNodes, Olm, - olms, sites } from "@server/db"; import { and, eq, inArray, isNotNull, isNull } from "drizzle-orm"; -import { addPeer, deletePeer } from "../newt/peers"; import logger from "@server/logger"; -import { listExitNodes } from "#dynamic/lib/exitNodes"; import { generateAliasConfig, - getNextAvailableClientSubnet } from "@server/lib/ip"; import { generateRemoteSubnets } from "@server/lib/ip"; -import { rebuildClientAssociationsFromClient } from "@server/lib/rebuildClientAssociations"; -import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy"; -import { validateSessionToken } from "@server/auth/sessions/app"; -import config from "@server/lib/config"; import { addPeer as newtAddPeer, - deletePeer as newtDeletePeer } from "@server/routers/newt/peers"; export const handleOlmServerPeerAddMessage: MessageHandler = async ( @@ -153,13 +135,21 @@ export const handleOlmServerPeerAddMessage: MessageHandler = async ( clientSiteResourcesAssociationsCache.siteResourceId ) ) - .where( + .innerJoin( + networks, + eq(siteResources.networkId, networks.networkId) + ) + .innerJoin( + siteNetworks, and( - eq(siteResources.siteId, site.siteId), - eq( - clientSiteResourcesAssociationsCache.clientId, - client.clientId - ) + eq(networks.networkId, siteNetworks.networkId), + eq(siteNetworks.siteId, site.siteId) + ) + ) + .where( + eq( + clientSiteResourcesAssociationsCache.clientId, + client.clientId ) ); diff --git a/server/routers/olm/index.ts b/server/routers/olm/index.ts index 322428572..5c151a8cf 100644 --- a/server/routers/olm/index.ts +++ b/server/routers/olm/index.ts @@ -12,3 +12,4 @@ export * from "./handleOlmUnRelayMessage"; export * from "./recoverOlmWithFingerprint"; export * from "./handleOlmDisconnectingMessage"; export * from "./handleOlmServerInitAddPeerHandshake"; +export * from "./offlineChecker"; diff --git a/server/routers/olm/offlineChecker.ts b/server/routers/olm/offlineChecker.ts new file mode 100644 index 000000000..7dd06a29c --- /dev/null +++ b/server/routers/olm/offlineChecker.ts @@ -0,0 +1,92 @@ +import { disconnectClient, getClientConfigVersion } from "#dynamic/routers/ws"; +import { db } from "@server/db"; +import { clients } from "@server/db"; +import { eq, lt, isNull, and, or } from "drizzle-orm"; +import logger from "@server/logger"; +import { sendTerminateClient } from "../client/terminate"; +import { OlmErrorCodes } from "./error"; + +// Track if the offline checker interval is running +let offlineCheckerInterval: NodeJS.Timeout | null = null; +const OFFLINE_CHECK_INTERVAL = 30 * 1000; // Check every 30 seconds +const OFFLINE_THRESHOLD_MS = 2 * 60 * 1000; // 2 minutes + +/** + * Starts the background interval that checks for clients that haven't pinged recently + * and marks them as offline + */ +export const startOlmOfflineChecker = (): void => { + if (offlineCheckerInterval) { + return; // Already running + } + + offlineCheckerInterval = setInterval(async () => { + try { + const twoMinutesAgo = Math.floor( + (Date.now() - OFFLINE_THRESHOLD_MS) / 1000 + ); + + // TODO: WE NEED TO MAKE SURE THIS WORKS WITH DISTRIBUTED NODES ALL DOING THE SAME THING + + // Find clients that haven't pinged in the last 2 minutes and mark them as offline + const offlineClients = await db + .update(clients) + .set({ online: false }) + .where( + and( + eq(clients.online, true), + or( + lt(clients.lastPing, twoMinutesAgo), + isNull(clients.lastPing) + ) + ) + ) + .returning(); + + for (const offlineClient of offlineClients) { + logger.info( + `Kicking offline olm client ${offlineClient.clientId} due to inactivity` + ); + + if (!offlineClient.olmId) { + logger.warn( + `Offline client ${offlineClient.clientId} has no olmId, cannot disconnect` + ); + continue; + } + + // Send a disconnect message to the client if connected + try { + await sendTerminateClient( + offlineClient.clientId, + OlmErrorCodes.TERMINATED_INACTIVITY, + offlineClient.olmId + ); // terminate first + // wait a moment to ensure the message is sent + await new Promise((resolve) => setTimeout(resolve, 1000)); + await disconnectClient(offlineClient.olmId); + } catch (error) { + logger.error( + `Error sending disconnect to offline olm ${offlineClient.clientId}`, + { error } + ); + } + } + } catch (error) { + logger.error("Error in offline checker interval", { error }); + } + }, OFFLINE_CHECK_INTERVAL); + + logger.debug("Started offline checker interval"); +}; + +/** + * Stops the background interval that checks for offline clients + */ +export const stopOlmOfflineChecker = (): void => { + if (offlineCheckerInterval) { + clearInterval(offlineCheckerInterval); + offlineCheckerInterval = null; + logger.info("Stopped offline checker interval"); + } +}; diff --git a/server/routers/resource/getStatusHistory.ts b/server/routers/resource/getStatusHistory.ts new file mode 100644 index 000000000..9aa548624 --- /dev/null +++ b/server/routers/resource/getStatusHistory.ts @@ -0,0 +1,93 @@ +import { Request, Response, NextFunction } from "express"; +import { z } from "zod"; +import { db, statusHistory } from "@server/db"; +import { and, eq, gte, asc } from "drizzle-orm"; +import response from "@server/lib/response"; +import HttpCode from "@server/types/HttpCode"; +import createHttpError from "http-errors"; +import logger from "@server/logger"; +import { fromError } from "zod-validation-error"; +import { + computeBuckets, + statusHistoryQuerySchema, + StatusHistoryResponse +} from "@server/lib/statusHistory"; + +const resourceParamsSchema = z.object({ + resourceId: z.string().transform((v) => parseInt(v, 10)) +}); + +export async function getResourceStatusHistory( + req: Request, + res: Response, + next: NextFunction +): Promise { + try { + const parsedParams = resourceParamsSchema.safeParse(req.params); + if (!parsedParams.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedParams.error).toString() + ) + ); + } + const parsedQuery = statusHistoryQuerySchema.safeParse(req.query); + if (!parsedQuery.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedQuery.error).toString() + ) + ); + } + + const entityType = "resource"; + const entityId = parsedParams.data.resourceId; + const { days } = parsedQuery.data; + + const nowSec = Math.floor(Date.now() / 1000); + const startSec = nowSec - days * 86400; + + const events = await db + .select() + .from(statusHistory) + .where( + and( + eq(statusHistory.entityType, entityType), + eq(statusHistory.entityId, entityId), + gte(statusHistory.timestamp, startSec) + ) + ) + .orderBy(asc(statusHistory.timestamp)); + + const { buckets, totalDowntime } = computeBuckets(events, days); + const totalWindow = days * 86400; + const overallUptime = + totalWindow > 0 + ? Math.max( + 0, + ((totalWindow - totalDowntime) / totalWindow) * 100 + ) + : 100; + + return response(res, { + data: { + entityType, + entityId, + days: buckets, + overallUptimePercent: Math.round(overallUptime * 100) / 100, + totalDowntimeSeconds: totalDowntime + }, + success: true, + error: false, + message: "Status history retrieved successfully", + status: HttpCode.OK + }); + } catch (error) { + logger.error(error); + return next( + createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred") + ); + } +} diff --git a/server/routers/resource/getUserResources.ts b/server/routers/resource/getUserResources.ts index 802fffb1b..1722a7993 100644 --- a/server/routers/resource/getUserResources.ts +++ b/server/routers/resource/getUserResources.ts @@ -145,7 +145,7 @@ export async function getUserResources( niceId: string; destination: string; mode: string; - protocol: string | null; + scheme: string | null; enabled: boolean; alias: string | null; aliasAddress: string | null; @@ -158,7 +158,7 @@ export async function getUserResources( niceId: siteResources.niceId, destination: siteResources.destination, mode: siteResources.mode, - protocol: siteResources.protocol, + scheme: siteResources.scheme, enabled: siteResources.enabled, alias: siteResources.alias, aliasAddress: siteResources.aliasAddress @@ -242,7 +242,7 @@ export async function getUserResources( name: siteResource.name, destination: siteResource.destination, mode: siteResource.mode, - protocol: siteResource.protocol, + protocol: siteResource.scheme, enabled: siteResource.enabled, alias: siteResource.alias, aliasAddress: siteResource.aliasAddress, @@ -291,7 +291,7 @@ export type GetUserResourcesResponse = { enabled: boolean; alias: string | null; aliasAddress: string | null; - type: 'site'; + type: "site"; }>; }; }; diff --git a/server/routers/resource/index.ts b/server/routers/resource/index.ts index 12e98a70d..6a259d7fe 100644 --- a/server/routers/resource/index.ts +++ b/server/routers/resource/index.ts @@ -32,3 +32,4 @@ export * from "./addUserToResource"; export * from "./removeUserFromResource"; export * from "./listAllResourceNames"; export * from "./removeEmailFromResourceWhitelist"; +export * from "./getStatusHistory"; diff --git a/server/routers/site/deleteSite.ts b/server/routers/site/deleteSite.ts index 587572535..344f6b4e3 100644 --- a/server/routers/site/deleteSite.ts +++ b/server/routers/site/deleteSite.ts @@ -1,6 +1,6 @@ import { Request, Response, NextFunction } from "express"; import { z } from "zod"; -import { db, Site, siteResources } from "@server/db"; +import { db, Site, siteNetworks, siteResources } from "@server/db"; import { newts, newtSessions, sites } from "@server/db"; import { eq } from "drizzle-orm"; import response from "@server/lib/response"; @@ -71,18 +71,23 @@ export async function deleteSite( await deletePeer(site.exitNodeId!, site.pubKey); } } else if (site.type == "newt") { - // delete all of the site resources on this site - const siteResourcesOnSite = trx - .delete(siteResources) - .where(eq(siteResources.siteId, siteId)) - .returning(); + const networks = await trx + .select({ networkId: siteNetworks.networkId }) + .from(siteNetworks) + .where(eq(siteNetworks.siteId, siteId)); // loop through them - for (const removedSiteResource of await siteResourcesOnSite) { - await rebuildClientAssociationsFromSiteResource( - removedSiteResource, - trx - ); + for (const network of await networks) { + const [siteResource] = await trx + .select() + .from(siteResources) + .where(eq(siteResources.networkId, network.networkId)); + if (siteResource) { + await rebuildClientAssociationsFromSiteResource( + siteResource, + trx + ); + } } // get the newt on the site by querying the newt table for siteId diff --git a/server/routers/site/getStatusHistory.ts b/server/routers/site/getStatusHistory.ts new file mode 100644 index 000000000..f1717c8a9 --- /dev/null +++ b/server/routers/site/getStatusHistory.ts @@ -0,0 +1,93 @@ +import { Request, Response, NextFunction } from "express"; +import { z } from "zod"; +import { db, statusHistory } from "@server/db"; +import { and, eq, gte, asc } from "drizzle-orm"; +import response from "@server/lib/response"; +import HttpCode from "@server/types/HttpCode"; +import createHttpError from "http-errors"; +import logger from "@server/logger"; +import { fromError } from "zod-validation-error"; +import { + computeBuckets, + statusHistoryQuerySchema, + StatusHistoryResponse +} from "@server/lib/statusHistory"; + +const siteParamsSchema = z.object({ + siteId: z.string().transform((v) => parseInt(v, 10)) +}); + +export async function getSiteStatusHistory( + req: Request, + res: Response, + next: NextFunction +): Promise { + try { + const parsedParams = siteParamsSchema.safeParse(req.params); + if (!parsedParams.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedParams.error).toString() + ) + ); + } + const parsedQuery = statusHistoryQuerySchema.safeParse(req.query); + if (!parsedQuery.success) { + return next( + createHttpError( + HttpCode.BAD_REQUEST, + fromError(parsedQuery.error).toString() + ) + ); + } + + const entityType = "site"; + const entityId = parsedParams.data.siteId; + const { days } = parsedQuery.data; + + const nowSec = Math.floor(Date.now() / 1000); + const startSec = nowSec - days * 86400; + + const events = await db + .select() + .from(statusHistory) + .where( + and( + eq(statusHistory.entityType, entityType), + eq(statusHistory.entityId, entityId), + gte(statusHistory.timestamp, startSec) + ) + ) + .orderBy(asc(statusHistory.timestamp)); + + const { buckets, totalDowntime } = computeBuckets(events, days); + const totalWindow = days * 86400; + const overallUptime = + totalWindow > 0 + ? Math.max( + 0, + ((totalWindow - totalDowntime) / totalWindow) * 100 + ) + : 100; + + return response(res, { + data: { + entityType, + entityId, + days: buckets, + overallUptimePercent: Math.round(overallUptime * 100) / 100, + totalDowntimeSeconds: totalDowntime + }, + success: true, + error: false, + message: "Status history retrieved successfully", + status: HttpCode.OK + }); + } catch (error) { + logger.error(error); + return next( + createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred") + ); + } +} diff --git a/server/routers/site/index.ts b/server/routers/site/index.ts index 3edf67c14..00fdeda91 100644 --- a/server/routers/site/index.ts +++ b/server/routers/site/index.ts @@ -1,4 +1,5 @@ export * from "./getSite"; +export * from "./getStatusHistory"; export * from "./createSite"; export * from "./deleteSite"; export * from "./updateSite"; diff --git a/server/routers/siteResource/createSiteResource.ts b/server/routers/siteResource/createSiteResource.ts index 3737ffbea..29fc8c213 100644 --- a/server/routers/siteResource/createSiteResource.ts +++ b/server/routers/siteResource/createSiteResource.ts @@ -5,6 +5,8 @@ import { orgs, roles, roleSiteResources, + siteNetworks, + networks, SiteResource, siteResources, sites, @@ -17,17 +19,18 @@ import { portRangeStringSchema } from "@server/lib/ip"; import { isLicensedOrSubscribed } from "#dynamic/lib/isLicencedOrSubscribed"; -import { tierMatrix } from "@server/lib/billing/tierMatrix"; +import { TierFeature, tierMatrix } from "@server/lib/billing/tierMatrix"; import { rebuildClientAssociationsFromSiteResource } from "@server/lib/rebuildClientAssociations"; import response from "@server/lib/response"; import logger from "@server/logger"; import { OpenAPITags, registry } from "@server/openApi"; import HttpCode from "@server/types/HttpCode"; -import { and, eq } from "drizzle-orm"; +import { and, eq, inArray } from "drizzle-orm"; import { NextFunction, Request, Response } from "express"; import createHttpError from "http-errors"; import { z } from "zod"; import { fromError } from "zod-validation-error"; +import { validateAndConstructDomain } from "@server/lib/domainUtils"; const createSiteResourceParamsSchema = z.strictObject({ orgId: z.string() @@ -36,12 +39,14 @@ const createSiteResourceParamsSchema = z.strictObject({ const createSiteResourceSchema = z .strictObject({ name: z.string().min(1).max(255), - mode: z.enum(["host", "cidr", "port"]), - siteId: z.int(), niceId: z.string().optional(), // protocol: z.enum(["tcp", "udp"]).optional(), + mode: z.enum(["host", "cidr", "http"]), + ssl: z.boolean().optional(), // only used for http mode + scheme: z.enum(["http", "https"]).optional(), + siteIds: z.array(z.int()), // proxyPort: z.int().positive().optional(), - // destinationPort: z.int().positive().optional(), + destinationPort: z.int().positive().optional(), destination: z.string().min(1), enabled: z.boolean().default(true), alias: z @@ -58,20 +63,24 @@ const createSiteResourceSchema = z udpPortRangeString: portRangeStringSchema, disableIcmp: z.boolean().optional(), authDaemonPort: z.int().positive().optional(), - authDaemonMode: z.enum(["site", "remote"]).optional() + authDaemonMode: z.enum(["site", "remote"]).optional(), + domainId: z.string().optional(), // only used for http mode, we need this to verify the alias is unique within the org + subdomain: z.string().optional() // only used for http mode, we need this to verify the alias is unique within the org }) .strict() .refine( (data) => { if (data.mode === "host") { - // Check if it's a valid IP address using zod (v4 or v6) - const isValidIP = z - // .union([z.ipv4(), z.ipv6()]) - .union([z.ipv4()]) // for now lets just do ipv4 until we verify ipv6 works everywhere - .safeParse(data.destination).success; + if (data.mode == "host") { + // Check if it's a valid IP address using zod (v4 or v6) + const isValidIP = z + // .union([z.ipv4(), z.ipv6()]) + .union([z.ipv4()]) // for now lets just do ipv4 until we verify ipv6 works everywhere + .safeParse(data.destination).success; - if (isValidIP) { - return true; + if (isValidIP) { + return true; + } } // Check if it's a valid domain (hostname pattern, TLD not required) @@ -106,6 +115,21 @@ const createSiteResourceSchema = z { message: "Destination must be a valid CIDR notation for cidr mode" } + ) + .refine( + (data) => { + if (data.mode !== "http") return true; + return ( + data.scheme !== undefined && + data.destinationPort !== undefined && + data.destinationPort >= 1 && + data.destinationPort <= 65535 + ); + }, + { + message: + "HTTP mode requires scheme (http or https) and a valid destination port" + } ); export type CreateSiteResourceBody = z.infer; @@ -160,14 +184,15 @@ export async function createSiteResource( const { orgId } = parsedParams.data; const { name, - siteId, niceId, + siteIds, mode, - // protocol, + scheme, // proxyPort, - // destinationPort, + destinationPort, destination, enabled, + ssl, alias, userIds, roleIds, @@ -176,18 +201,36 @@ export async function createSiteResource( udpPortRangeString, disableIcmp, authDaemonPort, - authDaemonMode + authDaemonMode, + domainId, + subdomain } = parsedBody.data; + if (mode == "http") { + const hasHttpFeature = await isLicensedOrSubscribed( + orgId, + tierMatrix[TierFeature.HTTPPrivateResources] + ); + if (!hasHttpFeature) { + return next( + createHttpError( + HttpCode.FORBIDDEN, + "HTTP private resources are not included in your current plan. Please upgrade." + ) + ); + } + } + // Verify the site exists and belongs to the org - const [site] = await db + const sitesToAssign = await db .select() .from(sites) - .where(and(eq(sites.siteId, siteId), eq(sites.orgId, orgId))) - .limit(1); + .where(and(inArray(sites.siteId, siteIds), eq(sites.orgId, orgId))); - if (!site) { - return next(createHttpError(HttpCode.NOT_FOUND, "Site not found")); + if (sitesToAssign.length !== siteIds.length) { + return next( + createHttpError(HttpCode.NOT_FOUND, "Some site not found") + ); } const [org] = await db @@ -228,29 +271,50 @@ export async function createSiteResource( ); } - // // check if resource with same protocol and proxy port already exists (only for port mode) - // if (mode === "port" && protocol && proxyPort) { - // const [existingResource] = await db - // .select() - // .from(siteResources) - // .where( - // and( - // eq(siteResources.siteId, siteId), - // eq(siteResources.orgId, orgId), - // eq(siteResources.protocol, protocol), - // eq(siteResources.proxyPort, proxyPort) - // ) - // ) - // .limit(1); - // if (existingResource && existingResource.siteResourceId) { - // return next( - // createHttpError( - // HttpCode.CONFLICT, - // "A resource with the same protocol and proxy port already exists" - // ) - // ); - // } - // } + if (domainId && alias) { + // throw an error because we can only have one or the other + return next( + createHttpError( + HttpCode.BAD_REQUEST, + "Alias and domain cannot both be set. Please choose one or the other." + ) + ); + } + + let fullDomain: string | null = null; + let finalSubdomain: string | null = null; + if (domainId) { + // Validate domain and construct full domain + const domainResult = await validateAndConstructDomain( + domainId, + orgId, + subdomain + ); + + if (!domainResult.success) { + return next( + createHttpError(HttpCode.BAD_REQUEST, domainResult.error) + ); + } + + fullDomain = domainResult.fullDomain; + finalSubdomain = domainResult.subdomain; + + // make sure the full domain is unique + const existingResource = await db + .select() + .from(siteResources) + .where(eq(siteResources.fullDomain, fullDomain)); + + if (existingResource.length > 0) { + return next( + createHttpError( + HttpCode.CONFLICT, + "Resource with that domain already exists" + ) + ); + } + } // make sure the alias is unique within the org if provided if (alias) { @@ -286,27 +350,49 @@ export async function createSiteResource( } let aliasAddress: string | null = null; - if (mode == "host") { - // we can only have an alias on a host + if (mode === "host" || mode === "http") { aliasAddress = await getNextAvailableAliasAddress(orgId); } let newSiteResource: SiteResource | undefined; await db.transaction(async (trx) => { + const [network] = await trx + .insert(networks) + .values({ + scope: "resource", + orgId: orgId + }) + .returning(); + + if (!network) { + return next( + createHttpError( + HttpCode.INTERNAL_SERVER_ERROR, + `Failed to create network` + ) + ); + } + // Create the site resource const insertValues: typeof siteResources.$inferInsert = { - siteId, niceId: updatedNiceId!, orgId, name, - mode: mode as "host" | "cidr", + mode, + ssl, + networkId: network.networkId, destination, + scheme, + destinationPort, enabled, - alias, + alias: alias ? alias.trim() : null, aliasAddress, tcpPortRangeString, udpPortRangeString, - disableIcmp + disableIcmp, + domainId, + subdomain: finalSubdomain, + fullDomain }; if (isLicensedSshPam) { if (authDaemonPort !== undefined) @@ -323,6 +409,13 @@ export async function createSiteResource( //////////////////// update the associations //////////////////// + for (const siteId of siteIds) { + await trx.insert(siteNetworks).values({ + siteId: siteId, + networkId: network.networkId + }); + } + const [adminRole] = await trx .select() .from(roles) @@ -365,16 +458,21 @@ export async function createSiteResource( ); } - const [newt] = await trx - .select() - .from(newts) - .where(eq(newts.siteId, site.siteId)) - .limit(1); + for (const siteToAssign of sitesToAssign) { + const [newt] = await trx + .select() + .from(newts) + .where(eq(newts.siteId, siteToAssign.siteId)) + .limit(1); - if (!newt) { - return next( - createHttpError(HttpCode.NOT_FOUND, "Newt not found") - ); + if (!newt) { + return next( + createHttpError( + HttpCode.NOT_FOUND, + `Newt not found for site ${siteToAssign.siteId}` + ) + ); + } } await rebuildClientAssociationsFromSiteResource( @@ -393,7 +491,7 @@ export async function createSiteResource( } logger.info( - `Created site resource ${newSiteResource.siteResourceId} for site ${siteId}` + `Created site resource ${newSiteResource.siteResourceId} for org ${orgId}` ); return response(res, { diff --git a/server/routers/siteResource/deleteSiteResource.ts b/server/routers/siteResource/deleteSiteResource.ts index 5b50b0ea3..8d08d545d 100644 --- a/server/routers/siteResource/deleteSiteResource.ts +++ b/server/routers/siteResource/deleteSiteResource.ts @@ -70,17 +70,18 @@ export async function deleteSiteResource( .where(and(eq(siteResources.siteResourceId, siteResourceId))) .returning(); - const [newt] = await trx - .select() - .from(newts) - .where(eq(newts.siteId, removedSiteResource.siteId)) - .limit(1); + // not sure why this is here... + // const [newt] = await trx + // .select() + // .from(newts) + // .where(eq(newts.siteId, removedSiteResource.siteId)) + // .limit(1); - if (!newt) { - return next( - createHttpError(HttpCode.NOT_FOUND, "Newt not found") - ); - } + // if (!newt) { + // return next( + // createHttpError(HttpCode.NOT_FOUND, "Newt not found") + // ); + // } await rebuildClientAssociationsFromSiteResource( removedSiteResource, diff --git a/server/routers/siteResource/getSiteResource.ts b/server/routers/siteResource/getSiteResource.ts index be28d36e4..2e3dfe87b 100644 --- a/server/routers/siteResource/getSiteResource.ts +++ b/server/routers/siteResource/getSiteResource.ts @@ -17,38 +17,34 @@ const getSiteResourceParamsSchema = z.strictObject({ .transform((val) => (val ? Number(val) : undefined)) .pipe(z.int().positive().optional()) .optional(), - siteId: z.string().transform(Number).pipe(z.int().positive()), niceId: z.string().optional(), orgId: z.string() }); async function query( siteResourceId?: number, - siteId?: number, niceId?: string, orgId?: string ) { - if (siteResourceId && siteId && orgId) { + if (siteResourceId && orgId) { const [siteResource] = await db .select() .from(siteResources) .where( and( eq(siteResources.siteResourceId, siteResourceId), - eq(siteResources.siteId, siteId), eq(siteResources.orgId, orgId) ) ) .limit(1); return siteResource; - } else if (niceId && siteId && orgId) { + } else if (niceId && orgId) { const [siteResource] = await db .select() .from(siteResources) .where( and( eq(siteResources.niceId, niceId), - eq(siteResources.siteId, siteId), eq(siteResources.orgId, orgId) ) ) @@ -84,7 +80,6 @@ registry.registerPath({ request: { params: z.object({ niceId: z.string(), - siteId: z.number(), orgId: z.string() }) }, @@ -107,10 +102,10 @@ export async function getSiteResource( ); } - const { siteResourceId, siteId, niceId, orgId } = parsedParams.data; + const { siteResourceId, niceId, orgId } = parsedParams.data; // Get the site resource - const siteResource = await query(siteResourceId, siteId, niceId, orgId); + const siteResource = await query(siteResourceId, niceId, orgId); if (!siteResource) { return next( diff --git a/server/routers/siteResource/listAllSiteResourcesByOrg.ts b/server/routers/siteResource/listAllSiteResourcesByOrg.ts index 3320aa3b7..8750e7516 100644 --- a/server/routers/siteResource/listAllSiteResourcesByOrg.ts +++ b/server/routers/siteResource/listAllSiteResourcesByOrg.ts @@ -1,4 +1,4 @@ -import { db, SiteResource, siteResources, sites } from "@server/db"; +import { db, DB_TYPE, SiteResource, siteNetworks, siteResources, sites } from "@server/db"; import response from "@server/lib/response"; import logger from "@server/logger"; import { OpenAPITags, registry } from "@server/openApi"; @@ -41,12 +41,12 @@ const listAllSiteResourcesByOrgQuerySchema = z.object({ }), query: z.string().optional(), mode: z - .enum(["host", "cidr"]) + .enum(["host", "cidr", "http"]) .optional() .catch(undefined) .openapi({ type: "string", - enum: ["host", "cidr"], + enum: ["host", "cidr", "http"], description: "Filter site resources by mode" }), sort_by: z @@ -73,22 +73,58 @@ const listAllSiteResourcesByOrgQuerySchema = z.object({ export type ListAllSiteResourcesByOrgResponse = PaginatedResponse<{ siteResources: (SiteResource & { - siteName: string; - siteNiceId: string; - siteAddress: string | null; + siteOnlines: boolean[]; + siteIds: number[]; + siteNames: string[]; + siteNiceIds: string[]; + siteAddresses: (string | null)[]; })[]; }>; +/** + * Returns an aggregation expression compatible with both SQLite and PostgreSQL. + * - SQLite: json_group_array(col) → returns a JSON array string, parsed after fetch + * - PostgreSQL: array_agg(col) → returns a native array + */ +function aggCol(column: any) { + if (DB_TYPE === "sqlite") { + return sql`json_group_array(${column})`; + } + return sql`array_agg(${column})`; +} + +/** + * For SQLite the aggregated columns come back as JSON strings; parse them into + * proper arrays. For PostgreSQL the driver already returns native arrays, so + * the row is returned unchanged. + */ +function transformSiteResourceRow(row: any) { + if (DB_TYPE !== "sqlite") { + return row; + } + return { + ...row, + siteNames: JSON.parse(row.siteNames) as string[], + siteNiceIds: JSON.parse(row.siteNiceIds) as string[], + siteIds: JSON.parse(row.siteIds) as number[], + siteAddresses: JSON.parse(row.siteAddresses) as (string | null)[], + // SQLite stores booleans as 0/1 integers + siteOnlines: (JSON.parse(row.siteOnlines) as (0 | 1)[]).map( + (v) => v === 1 + ) as boolean[] + }; +} + function querySiteResourcesBase() { return db .select({ siteResourceId: siteResources.siteResourceId, - siteId: siteResources.siteId, orgId: siteResources.orgId, niceId: siteResources.niceId, name: siteResources.name, mode: siteResources.mode, - protocol: siteResources.protocol, + ssl: siteResources.ssl, + scheme: siteResources.scheme, proxyPort: siteResources.proxyPort, destinationPort: siteResources.destinationPort, destination: siteResources.destination, @@ -100,12 +136,24 @@ function querySiteResourcesBase() { disableIcmp: siteResources.disableIcmp, authDaemonMode: siteResources.authDaemonMode, authDaemonPort: siteResources.authDaemonPort, - siteName: sites.name, - siteNiceId: sites.niceId, - siteAddress: sites.address + subdomain: siteResources.subdomain, + domainId: siteResources.domainId, + fullDomain: siteResources.fullDomain, + networkId: siteResources.networkId, + defaultNetworkId: siteResources.defaultNetworkId, + siteNames: aggCol(sites.name), + siteNiceIds: aggCol(sites.niceId), + siteIds: aggCol(sites.siteId), + siteAddresses: aggCol<(string | null)[]>(sites.address), + siteOnlines: aggCol(sites.online) }) .from(siteResources) - .innerJoin(sites, eq(siteResources.siteId, sites.siteId)); + .innerJoin( + siteNetworks, + eq(siteResources.networkId, siteNetworks.networkId) + ) + .innerJoin(sites, eq(siteNetworks.siteId, sites.siteId)) + .groupBy(siteResources.siteResourceId); } registry.registerPath({ @@ -193,10 +241,12 @@ export async function listAllSiteResourcesByOrg( const baseQuery = querySiteResourcesBase().where(and(...conditions)); const countQuery = db.$count( - querySiteResourcesBase().where(and(...conditions)).as("filtered_site_resources") + querySiteResourcesBase() + .where(and(...conditions)) + .as("filtered_site_resources") ); - const [siteResourcesList, totalCount] = await Promise.all([ + const [siteResourcesRaw, totalCount] = await Promise.all([ baseQuery .limit(pageSize) .offset(pageSize * (page - 1)) @@ -210,6 +260,8 @@ export async function listAllSiteResourcesByOrg( countQuery ]); + const siteResourcesList = siteResourcesRaw.map(transformSiteResourceRow); + return response(res, { data: { siteResources: siteResourcesList, @@ -233,4 +285,4 @@ export async function listAllSiteResourcesByOrg( ) ); } -} +} \ No newline at end of file diff --git a/server/routers/siteResource/listSiteResources.ts b/server/routers/siteResource/listSiteResources.ts index 358aa0497..8a1469f76 100644 --- a/server/routers/siteResource/listSiteResources.ts +++ b/server/routers/siteResource/listSiteResources.ts @@ -1,6 +1,6 @@ import { Request, Response, NextFunction } from "express"; import { z } from "zod"; -import { db } from "@server/db"; +import { db, networks, siteNetworks } from "@server/db"; import { siteResources, sites, SiteResource } from "@server/db"; import response from "@server/lib/response"; import HttpCode from "@server/types/HttpCode"; @@ -108,13 +108,21 @@ export async function listSiteResources( return next(createHttpError(HttpCode.NOT_FOUND, "Site not found")); } - // Get site resources + // Get site resources by joining networks to siteResources via siteNetworks const siteResourcesList = await db .select() - .from(siteResources) + .from(siteNetworks) + .innerJoin( + networks, + eq(siteNetworks.networkId, networks.networkId) + ) + .innerJoin( + siteResources, + eq(siteResources.networkId, networks.networkId) + ) .where( and( - eq(siteResources.siteId, siteId), + eq(siteNetworks.siteId, siteId), eq(siteResources.orgId, orgId) ) ) @@ -128,6 +136,7 @@ export async function listSiteResources( .limit(limit) .offset(offset); + return response(res, { data: { siteResources: siteResourcesList }, success: true, diff --git a/server/routers/siteResource/updateSiteResource.ts b/server/routers/siteResource/updateSiteResource.ts index ab70d0fce..4335b55d3 100644 --- a/server/routers/siteResource/updateSiteResource.ts +++ b/server/routers/siteResource/updateSiteResource.ts @@ -1,4 +1,3 @@ -import { isLicensedOrSubscribed } from "#dynamic/lib/isLicencedOrSubscribed"; import { clientSiteResources, clientSiteResourcesAssociationsCache, @@ -7,13 +6,21 @@ import { orgs, roles, roleSiteResources, + siteNetworks, SiteResource, siteResources, sites, + networks, Transaction, userSiteResources } from "@server/db"; -import { tierMatrix } from "@server/lib/billing/tierMatrix"; +import { isLicensedOrSubscribed } from "#dynamic/lib/isLicencedOrSubscribed"; +import { TierFeature, tierMatrix } from "@server/lib/billing/tierMatrix"; +import { validateAndConstructDomain } from "@server/lib/domainUtils"; +import response from "@server/lib/response"; +import { eq, and, ne, inArray } from "drizzle-orm"; +import { OpenAPITags, registry } from "@server/openApi"; +import { updatePeerData, updateTargets } from "@server/routers/client/targets"; import { generateAliasConfig, generateRemoteSubnets, @@ -22,12 +29,8 @@ import { portRangeStringSchema } from "@server/lib/ip"; import { rebuildClientAssociationsFromSiteResource } from "@server/lib/rebuildClientAssociations"; -import response from "@server/lib/response"; import logger from "@server/logger"; -import { OpenAPITags, registry } from "@server/openApi"; -import { updatePeerData, updateTargets } from "@server/routers/client/targets"; import HttpCode from "@server/types/HttpCode"; -import { and, eq, ne } from "drizzle-orm"; import { NextFunction, Request, Response } from "express"; import createHttpError from "http-errors"; import { z } from "zod"; @@ -40,7 +43,8 @@ const updateSiteResourceParamsSchema = z.strictObject({ const updateSiteResourceSchema = z .strictObject({ name: z.string().min(1).max(255).optional(), - siteId: z.int(), + siteIds: z.array(z.int()), + // niceId: z.string().min(1).max(255).regex(/^[a-zA-Z0-9-]+$/, "niceId can only contain letters, numbers, and dashes").optional(), niceId: z .string() .min(1) @@ -51,10 +55,11 @@ const updateSiteResourceSchema = z ) .optional(), // mode: z.enum(["host", "cidr", "port"]).optional(), - mode: z.enum(["host", "cidr"]).optional(), - // protocol: z.enum(["tcp", "udp"]).nullish(), + mode: z.enum(["host", "cidr", "http"]).optional(), + ssl: z.boolean().optional(), + scheme: z.enum(["http", "https"]).nullish(), // proxyPort: z.int().positive().nullish(), - // destinationPort: z.int().positive().nullish(), + destinationPort: z.int().positive().nullish(), destination: z.string().min(1).optional(), enabled: z.boolean().optional(), alias: z @@ -71,7 +76,9 @@ const updateSiteResourceSchema = z udpPortRangeString: portRangeStringSchema, disableIcmp: z.boolean().optional(), authDaemonPort: z.int().positive().nullish(), - authDaemonMode: z.enum(["site", "remote"]).optional() + authDaemonMode: z.enum(["site", "remote"]).optional(), + domainId: z.string().optional(), + subdomain: z.string().optional() }) .strict() .refine( @@ -118,6 +125,23 @@ const updateSiteResourceSchema = z { message: "Destination must be a valid CIDR notation for cidr mode" } + ) + .refine( + (data) => { + if (data.mode !== "http") return true; + return ( + data.scheme !== undefined && + data.scheme !== null && + data.destinationPort !== undefined && + data.destinationPort !== null && + data.destinationPort >= 1 && + data.destinationPort <= 65535 + ); + }, + { + message: + "HTTP mode requires scheme (http or https) and a valid destination port" + } ); export type UpdateSiteResourceBody = z.infer; @@ -172,11 +196,14 @@ export async function updateSiteResource( const { siteResourceId } = parsedParams.data; const { name, - siteId, // because it can change + siteIds, // because it can change niceId, mode, + scheme, destination, + destinationPort, alias, + ssl, enabled, userIds, roleIds, @@ -185,19 +212,11 @@ export async function updateSiteResource( udpPortRangeString, disableIcmp, authDaemonPort, - authDaemonMode + authDaemonMode, + domainId, + subdomain } = parsedBody.data; - const [site] = await db - .select() - .from(sites) - .where(eq(sites.siteId, siteId)) - .limit(1); - - if (!site) { - return next(createHttpError(HttpCode.NOT_FOUND, "Site not found")); - } - // Check if site resource exists const [existingSiteResource] = await db .select() @@ -211,6 +230,21 @@ export async function updateSiteResource( ); } + if (mode == "http") { + const hasHttpFeature = await isLicensedOrSubscribed( + existingSiteResource.orgId, + tierMatrix[TierFeature.HTTPPrivateResources] + ); + if (!hasHttpFeature) { + return next( + createHttpError( + HttpCode.FORBIDDEN, + "HTTP private resources are not included in your current plan. Please upgrade." + ) + ); + } + } + const isLicensedSshPam = await isLicensedOrSubscribed( existingSiteResource.orgId, tierMatrix.sshPam @@ -237,6 +271,23 @@ export async function updateSiteResource( ); } + // Verify the site exists and belongs to the org + const sitesToAssign = await db + .select() + .from(sites) + .where( + and( + inArray(sites.siteId, siteIds), + eq(sites.orgId, existingSiteResource.orgId) + ) + ); + + if (sitesToAssign.length !== siteIds.length) { + return next( + createHttpError(HttpCode.NOT_FOUND, "Some site not found") + ); + } + // Only check if destination is an IP address const isIp = z .union([z.ipv4(), z.ipv6()]) @@ -254,22 +305,60 @@ export async function updateSiteResource( ); } - let existingSite = site; - let siteChanged = false; - if (existingSiteResource.siteId !== siteId) { - siteChanged = true; - // get the existing site - [existingSite] = await db - .select() - .from(sites) - .where(eq(sites.siteId, existingSiteResource.siteId)) - .limit(1); + let sitesChanged = false; + const existingSiteIds = existingSiteResource.networkId + ? await db + .select() + .from(siteNetworks) + .where( + eq(siteNetworks.networkId, existingSiteResource.networkId) + ) + : []; - if (!existingSite) { + const existingSiteIdSet = new Set(existingSiteIds.map((s) => s.siteId)); + const newSiteIdSet = new Set(siteIds); + + if ( + existingSiteIdSet.size !== newSiteIdSet.size || + ![...existingSiteIdSet].every((id) => newSiteIdSet.has(id)) + ) { + sitesChanged = true; + } + + let fullDomain: string | null = null; + let finalSubdomain: string | null = null; + if (domainId) { + // Validate domain and construct full domain + const domainResult = await validateAndConstructDomain( + domainId, + org.orgId, + subdomain + ); + + if (!domainResult.success) { + return next( + createHttpError(HttpCode.BAD_REQUEST, domainResult.error) + ); + } + + fullDomain = domainResult.fullDomain; + finalSubdomain = domainResult.subdomain; + + // make sure the full domain is unique + const [existingDomain] = await db + .select() + .from(siteResources) + .where(eq(siteResources.fullDomain, fullDomain)); + + if ( + existingDomain && + existingDomain.siteResourceId !== + existingSiteResource.siteResourceId + ) { return next( createHttpError( - HttpCode.NOT_FOUND, - "Existing site not found" + HttpCode.CONFLICT, + "Resource with that domain already exists" ) ); } @@ -302,7 +391,7 @@ export async function updateSiteResource( let updatedSiteResource: SiteResource | undefined; await db.transaction(async (trx) => { // if the site is changed we need to delete and recreate the resource to avoid complications with the rebuild function otherwise we can just update in place - if (siteChanged) { + if (sitesChanged) { // delete the existing site resource await trx .delete(siteResources) @@ -343,15 +432,20 @@ export async function updateSiteResource( .update(siteResources) .set({ name, - siteId, niceId, mode, + scheme, + ssl, destination, + destinationPort, enabled, - alias: alias && alias.trim() ? alias : null, + alias: alias ? alias.trim() : null, tcpPortRangeString, udpPortRangeString, disableIcmp, + domainId, + subdomain: finalSubdomain, + fullDomain, ...sshPamSet }) .where( @@ -372,6 +466,23 @@ export async function updateSiteResource( //////////////////// update the associations //////////////////// + // delete the site - site resources associations + await trx + .delete(siteNetworks) + .where( + eq( + siteNetworks.networkId, + updatedSiteResource.networkId! + ) + ); + + for (const siteId of siteIds) { + await trx.insert(siteNetworks).values({ + siteId: siteId, + networkId: updatedSiteResource.networkId! + }); + } + const [adminRole] = await trx .select() .from(roles) @@ -447,14 +558,20 @@ export async function updateSiteResource( .update(siteResources) .set({ name: name, - siteId: siteId, + niceId: niceId, mode: mode, + scheme, + ssl, destination: destination, + destinationPort: destinationPort, enabled: enabled, - alias: alias && alias.trim() ? alias : null, + alias: alias ? alias.trim() : null, tcpPortRangeString: tcpPortRangeString, udpPortRangeString: udpPortRangeString, disableIcmp: disableIcmp, + domainId, + subdomain: finalSubdomain, + fullDomain, ...sshPamSet }) .where( @@ -464,6 +581,23 @@ export async function updateSiteResource( //////////////////// update the associations //////////////////// + // delete the site - site resources associations + await trx + .delete(siteNetworks) + .where( + eq( + siteNetworks.networkId, + updatedSiteResource.networkId! + ) + ); + + for (const siteId of siteIds) { + await trx.insert(siteNetworks).values({ + siteId: siteId, + networkId: updatedSiteResource.networkId! + }); + } + await trx .delete(clientSiteResources) .where( @@ -533,14 +667,15 @@ export async function updateSiteResource( ); } - logger.info( - `Updated site resource ${siteResourceId} for site ${siteId}` - ); + logger.info(`Updated site resource ${siteResourceId}`); await handleMessagingForUpdatedSiteResource( existingSiteResource, updatedSiteResource, - { siteId: site.siteId, orgId: site.orgId }, + siteIds.map((siteId) => ({ + siteId, + orgId: existingSiteResource.orgId + })), trx ); } @@ -567,7 +702,7 @@ export async function updateSiteResource( export async function handleMessagingForUpdatedSiteResource( existingSiteResource: SiteResource | undefined, updatedSiteResource: SiteResource, - site: { siteId: number; orgId: string }, + sites: { siteId: number; orgId: string }[], trx: Transaction ) { logger.debug( @@ -589,9 +724,14 @@ export async function handleMessagingForUpdatedSiteResource( const destinationChanged = existingSiteResource && existingSiteResource.destination !== updatedSiteResource.destination; + const destinationPortChanged = + existingSiteResource && + existingSiteResource.destinationPort !== + updatedSiteResource.destinationPort; const aliasChanged = existingSiteResource && - existingSiteResource.alias !== updatedSiteResource.alias; + (existingSiteResource.alias !== updatedSiteResource.alias || + existingSiteResource.fullDomain !== updatedSiteResource.fullDomain); // because the full domain gets sent down to the stuff as an alias const portRangesChanged = existingSiteResource && (existingSiteResource.tcpPortRangeString !== @@ -603,106 +743,122 @@ export async function handleMessagingForUpdatedSiteResource( // if the existingSiteResource is undefined (new resource) we don't need to do anything here, the rebuild above handled it all - if (destinationChanged || aliasChanged || portRangesChanged) { - const [newt] = await trx - .select() - .from(newts) - .where(eq(newts.siteId, site.siteId)) - .limit(1); - - if (!newt) { - throw new Error( - "Newt not found for site during site resource update" - ); - } - - // Only update targets on newt if destination changed - if (destinationChanged || portRangesChanged) { - const oldTargets = generateSubnetProxyTargetV2( - existingSiteResource, - mergedAllClients - ); - const newTargets = generateSubnetProxyTargetV2( - updatedSiteResource, - mergedAllClients - ); - - await updateTargets( - newt.newtId, - { - oldTargets: oldTargets ? oldTargets : [], - newTargets: newTargets ? newTargets : [] - }, - newt.version - ); - } - - const olmJobs: Promise[] = []; - for (const client of mergedAllClients) { - // does this client have access to another resource on this site that has the same destination still? if so we dont want to remove it from their olm yet - // todo: optimize this query if needed - const oldDestinationStillInUseSites = await trx + if ( + destinationChanged || + aliasChanged || + portRangesChanged || + destinationPortChanged + ) { + for (const site of sites) { + const [newt] = await trx .select() - .from(siteResources) - .innerJoin( - clientSiteResourcesAssociationsCache, - eq( - clientSiteResourcesAssociationsCache.siteResourceId, - siteResources.siteResourceId - ) - ) - .where( - and( - eq( - clientSiteResourcesAssociationsCache.clientId, - client.clientId - ), - eq(siteResources.siteId, site.siteId), - eq( - siteResources.destination, - existingSiteResource.destination - ), - ne( - siteResources.siteResourceId, - existingSiteResource.siteResourceId - ) - ) + .from(newts) + .where(eq(newts.siteId, site.siteId)) + .limit(1); + + if (!newt) { + throw new Error( + "Newt not found for site during site resource update" + ); + } + + // Only update targets on newt if destination changed + if ( + destinationChanged || + portRangesChanged || + destinationPortChanged + ) { + const oldTargets = await generateSubnetProxyTargetV2( + existingSiteResource, + mergedAllClients + ); + const newTargets = await generateSubnetProxyTargetV2( + updatedSiteResource, + mergedAllClients ); - const oldDestinationStillInUseByASite = - oldDestinationStillInUseSites.length > 0; + await updateTargets( + newt.newtId, + { + oldTargets: oldTargets ? oldTargets : [], + newTargets: newTargets ? newTargets : [] + }, + newt.version + ); + } - // we also need to update the remote subnets on the olms for each client that has access to this site - olmJobs.push( - updatePeerData( - client.clientId, - updatedSiteResource.siteId, - destinationChanged - ? { - oldRemoteSubnets: !oldDestinationStillInUseByASite - ? generateRemoteSubnets([ - existingSiteResource - ]) - : [], - newRemoteSubnets: generateRemoteSubnets([ - updatedSiteResource - ]) - } - : undefined, - aliasChanged - ? { - oldAliases: generateAliasConfig([ - existingSiteResource - ]), - newAliases: generateAliasConfig([ - updatedSiteResource - ]) - } - : undefined - ) - ); + const olmJobs: Promise[] = []; + for (const client of mergedAllClients) { + // does this client have access to another resource on this site that has the same destination still? if so we dont want to remove it from their olm yet + // todo: optimize this query if needed + const oldDestinationStillInUseSites = await trx + .select() + .from(siteResources) + .innerJoin( + clientSiteResourcesAssociationsCache, + eq( + clientSiteResourcesAssociationsCache.siteResourceId, + siteResources.siteResourceId + ) + ) + .innerJoin( + siteNetworks, + eq(siteNetworks.networkId, siteResources.networkId) + ) + .where( + and( + eq( + clientSiteResourcesAssociationsCache.clientId, + client.clientId + ), + eq(siteNetworks.siteId, site.siteId), + eq( + siteResources.destination, + existingSiteResource.destination + ), + ne( + siteResources.siteResourceId, + existingSiteResource.siteResourceId + ) + ) + ); + + const oldDestinationStillInUseByASite = + oldDestinationStillInUseSites.length > 0; + + // we also need to update the remote subnets on the olms for each client that has access to this site + olmJobs.push( + updatePeerData( + client.clientId, + site.siteId, + destinationChanged + ? { + oldRemoteSubnets: + !oldDestinationStillInUseByASite + ? generateRemoteSubnets([ + existingSiteResource + ]) + : [], + newRemoteSubnets: generateRemoteSubnets([ + updatedSiteResource + ]) + } + : undefined, + aliasChanged + ? { + oldAliases: generateAliasConfig([ + existingSiteResource + ]), + newAliases: generateAliasConfig([ + updatedSiteResource + ]) + } + : undefined + ) + ); + } + + await Promise.all(olmJobs); } - - await Promise.all(olmJobs); } } diff --git a/server/routers/target/createTarget.ts b/server/routers/target/createTarget.ts index ba52d85a1..ea7512b9c 100644 --- a/server/routers/target/createTarget.ts +++ b/server/routers/target/createTarget.ts @@ -42,6 +42,8 @@ const createTargetSchema = z.strictObject({ hcMethod: z.string().min(1).optional().nullable(), hcStatus: z.int().optional().nullable(), hcTlsServerName: z.string().optional().nullable(), + hcHealthyThreshold: z.int().positive().min(1).optional().nullable(), + hcUnhealthyThreshold: z.int().positive().min(1).optional().nullable(), path: z.string().optional().nullable(), pathMatchType: z.enum(["exact", "prefix", "regex"]).optional().nullable(), rewritePath: z.string().optional().nullable(), @@ -226,7 +228,10 @@ export async function createTarget( healthCheck = await db .insert(targetHealthCheck) .values({ + orgId: resource.orgId, targetId: newTarget[0].targetId, + siteId: targetData.siteId, + name: `Resource ${resource.name} - ${targetData.ip}:${targetData.port}`, hcEnabled: targetData.hcEnabled ?? false, hcPath: targetData.hcPath ?? null, hcScheme: targetData.hcScheme ?? null, @@ -241,7 +246,9 @@ export async function createTarget( hcMethod: targetData.hcMethod ?? null, hcStatus: targetData.hcStatus ?? null, hcHealth: "unknown", - hcTlsServerName: targetData.hcTlsServerName ?? null + hcTlsServerName: targetData.hcTlsServerName ?? null, + hcHealthyThreshold: targetData.hcHealthyThreshold ?? null, + hcUnhealthyThreshold: targetData.hcUnhealthyThreshold ?? null }) .returning(); @@ -271,8 +278,8 @@ export async function createTarget( return response(res, { data: { - ...newTarget[0], - ...healthCheck[0] + ...healthCheck[0], + ...newTarget[0] }, success: true, error: false, diff --git a/server/routers/target/getTarget.ts b/server/routers/target/getTarget.ts index 749e1399b..281c39906 100644 --- a/server/routers/target/getTarget.ts +++ b/server/routers/target/getTarget.ts @@ -15,8 +15,8 @@ const getTargetSchema = z.strictObject({ }); type GetTargetResponse = Target & - Omit & { - hcHeaders: { name: string; value: string }[] | null; + Partial> & { + hcHeaders: { name: string; value: string }[] | null | undefined; }; registry.registerPath({ @@ -70,20 +70,19 @@ export async function getTarget( .limit(1); // Parse hcHeaders from JSON string back to array - let parsedHcHeaders = null; + let parsedHcHeaders: { name: string; value: string }[] | null = null; if (targetHc?.hcHeaders) { try { parsedHcHeaders = JSON.parse(targetHc.hcHeaders); } catch (error) { - // If parsing fails, keep as string for backward compatibility - parsedHcHeaders = targetHc.hcHeaders; + // If parsing fails, keep as null for safety } } return response(res, { data: { - ...target[0], ...targetHc, + ...target[0], hcHeaders: parsedHcHeaders }, success: true, diff --git a/server/routers/target/handleHealthcheckStatusMessage.ts b/server/routers/target/handleHealthcheckStatusMessage.ts index 7ea1730ce..55834d926 100644 --- a/server/routers/target/handleHealthcheckStatusMessage.ts +++ b/server/routers/target/handleHealthcheckStatusMessage.ts @@ -1,9 +1,19 @@ -import { db, targets, resources, sites, targetHealthCheck } from "@server/db"; +import { + db, + targets, + resources, + sites, + targetHealthCheck, + statusHistory +} from "@server/db"; import { MessageHandler } from "@server/routers/ws"; import { Newt } from "@server/db"; import { eq, and } from "drizzle-orm"; import logger from "@server/logger"; -import { unknown } from "zod"; +import { + fireHealthCheckHealthyAlert, + fireHealthCheckNotHealthyAlert +} from "#dynamic/lib/alerts"; interface TargetHealthStatus { status: string; @@ -11,7 +21,7 @@ interface TargetHealthStatus { checkCount: number; lastError?: string; config: { - id: string; + id: string; // this could be the hc id or the target id, depending on the version of newt hcEnabled: boolean; hcPath?: string; hcScheme?: string; @@ -22,7 +32,11 @@ interface TargetHealthStatus { hcUnhealthyInterval?: number; hcTimeout?: number; hcHeaders?: any; + hcFollowRedirects?: boolean; hcMethod?: string; + hcTlsServerName?: string; + hcHealthyThreshold?: number; + hcUnhealthyThreshold?: number; }; } @@ -78,18 +92,26 @@ export const handleHealthcheckStatusMessage: MessageHandler = async ( .select({ targetId: targets.targetId, siteId: targets.siteId, + orgId: targetHealthCheck.orgId, + targetHealthCheckId: targetHealthCheck.targetHealthCheckId, + resourceOrgId: resources.orgId, + resourceId: resources.resourceId, + name: targetHealthCheck.name, hcStatus: targetHealthCheck.hcHealth }) - .from(targets) + .from(targetHealthCheck) + .innerJoin( + targets, + eq(targetHealthCheck.targetId, targets.targetId) + ) .innerJoin( resources, eq(targets.resourceId, resources.resourceId) ) .innerJoin(sites, eq(targets.siteId, sites.siteId)) - .innerJoin(targetHealthCheck, eq(targets.targetId, targetHealthCheck.targetId)) .where( and( - eq(targets.targetId, targetIdNum), + eq(targetHealthCheck.targetHealthCheckId, targetIdNum), eq(sites.siteId, newt.siteId) ) ) @@ -120,8 +142,80 @@ export const handleHealthcheckStatusMessage: MessageHandler = async ( | "healthy" | "unhealthy" }) - .where(eq(targetHealthCheck.targetId, targetIdNum)) - .execute(); + .where(eq(targetHealthCheck.targetId, targetCheck.targetId)); + + const orgId = targetCheck.orgId || targetCheck.resourceOrgId; // for backwards compatibility, check both orgId fields because the target health checks dont have the orgId + if (!orgId) { + logger.warn( + `No org ID found for target ${targetId}, skipping status history logging` + ); + continue; + } + + // Log the state change to status history + await db.insert(statusHistory).values({ + entityType: "healthCheck", + entityId: targetCheck.targetHealthCheckId, + orgId: orgId, + status: healthStatus.status, + timestamp: Math.floor(Date.now() / 1000) + }); + + if (targetCheck.resourceId) { + // Log the state change to status history for the resource as well + // so we can show the resource status along with the site + + // if the status is healthy we should check if ALL of the targets on the resource are currently healthy and if not then dont mark the resource as healthy yet, we want to wait until all targets are healthy to mark the resource as healthy + let status = healthStatus.status; + if (healthStatus.status === "healthy") { + const otherTargets = await db + .select({ hcHealth: targetHealthCheck.hcHealth }) + .from(targets) + .innerJoin( + targetHealthCheck, + eq(targets.targetId, targetHealthCheck.targetId) + ) + .where( + and( + eq(targets.resourceId, targetCheck.resourceId), + eq(targets.targetId, targetCheck.targetId) // only check the other targets, not the one we just updated + ) + ); + + const allHealthy = otherTargets.every( + (t) => t.hcHealth === "healthy" + ); + if (!allHealthy) { + logger.debug( + `Not marking resource ${targetCheck.resourceId} as healthy because not all targets are healthy` + ); + status = "unhealthy"; + } + } + + await db.insert(statusHistory).values({ + entityType: "resource", + entityId: targetCheck.resourceId, + orgId: orgId, + status: status, + timestamp: Math.floor(Date.now() / 1000) + }); + } + + // because we are checking above if there was a change we can fire the alert here because it changed + if (healthStatus.status === "unhealthy") { + await fireHealthCheckHealthyAlert( + orgId, + targetCheck.targetHealthCheckId, + targetCheck.name + ); + } else if (healthStatus.status === "healthy") { + await fireHealthCheckNotHealthyAlert( + orgId, + targetCheck.targetHealthCheckId, + targetCheck.name + ); + } logger.debug( `Updated health status for target ${targetId} to ${healthStatus.status}` diff --git a/server/routers/target/updateTarget.ts b/server/routers/target/updateTarget.ts index 1f9eff716..52759bfc8 100644 --- a/server/routers/target/updateTarget.ts +++ b/server/routers/target/updateTarget.ts @@ -43,6 +43,8 @@ const updateTargetBodySchema = z hcMethod: z.string().min(1).optional().nullable(), hcStatus: z.int().optional().nullable(), hcTlsServerName: z.string().optional().nullable(), + hcHealthyThreshold: z.int().positive().min(1).optional().nullable(), + hcUnhealthyThreshold: z.int().positive().min(1).optional().nullable(), path: z.string().optional().nullable(), pathMatchType: z .enum(["exact", "prefix", "regex"]) @@ -226,6 +228,7 @@ export async function updateTarget( const [updatedHc] = await db .update(targetHealthCheck) .set({ + siteId: parsedBody.data.siteId, hcEnabled: parsedBody.data.hcEnabled || false, hcPath: parsedBody.data.hcPath, hcScheme: parsedBody.data.hcScheme, @@ -240,6 +243,8 @@ export async function updateTarget( hcMethod: parsedBody.data.hcMethod, hcStatus: parsedBody.data.hcStatus, hcTlsServerName: parsedBody.data.hcTlsServerName, + hcHealthyThreshold: parsedBody.data.hcHealthyThreshold, + hcUnhealthyThreshold: parsedBody.data.hcUnhealthyThreshold, ...(hcHealthValue !== undefined && { hcHealth: hcHealthValue }) }) .where(eq(targetHealthCheck.targetId, targetId)) diff --git a/server/routers/ws/messageHandlers.ts b/server/routers/ws/messageHandlers.ts index 143e4d516..f89284389 100644 --- a/server/routers/ws/messageHandlers.ts +++ b/server/routers/ws/messageHandlers.ts @@ -2,7 +2,7 @@ import { build } from "@server/build"; import { handleNewtRegisterMessage, handleReceiveBandwidthMessage, - handleGetConfigMessage, + handleNewtGetConfigMessage, handleDockerStatusMessage, handleDockerContainersMessage, handleNewtPingRequestMessage, @@ -37,7 +37,7 @@ export const messageHandlers: Record = { "newt/disconnecting": handleNewtDisconnectingMessage, "newt/ping": handleNewtPingMessage, "newt/wg/register": handleNewtRegisterMessage, - "newt/wg/get-config": handleGetConfigMessage, + "newt/wg/get-config": handleNewtGetConfigMessage, "newt/receive-bandwidth": handleReceiveBandwidthMessage, "newt/socket/status": handleDockerStatusMessage, "newt/socket/containers": handleDockerContainersMessage, @@ -47,7 +47,7 @@ export const messageHandlers: Record = { "ws/round-trip/complete": handleRoundTripMessage }; -// Start the ping accumulator for all builds — it batches per-site online/lastPing +// Start the ping accumulator for all builds - it batches per-site online/lastPing // updates into periodic bulk writes, preventing connection pool exhaustion. startPingAccumulator(); diff --git a/server/setup/ensureRootApiKey.ts b/server/setup/ensureRootApiKey.ts index 4cf9c032b..55f5186b3 100644 --- a/server/setup/ensureRootApiKey.ts +++ b/server/setup/ensureRootApiKey.ts @@ -34,9 +34,9 @@ export async function ensureRootApiKey() { const envApiKey = process.env.PANGOLIN_ROOT_API_KEY; if (!envApiKey) { - logger.debug( - "PANGOLIN_ROOT_API_KEY not set. Root API key from environment skipped." - ); + // logger.debug( + // "PANGOLIN_ROOT_API_KEY not set. Root API key from environment skipped." + // ); return; } @@ -103,4 +103,4 @@ export async function ensureRootApiKey() { console.error("Failed to ensure root API key:", error); throw error; } -} \ No newline at end of file +} diff --git a/src/app/[orgId]/settings/alerting/[ruleId]/page.tsx b/src/app/[orgId]/settings/alerting/[ruleId]/page.tsx new file mode 100644 index 000000000..86d455db7 --- /dev/null +++ b/src/app/[orgId]/settings/alerting/[ruleId]/page.tsx @@ -0,0 +1,95 @@ +"use client"; + +import AlertRuleGraphEditor from "@app/components/alert-rule-editor/AlertRuleGraphEditor"; +import HeaderTitle from "@app/components/SettingsSectionTitle"; +import { apiResponseToFormValues } from "@app/lib/alertRuleForm"; +import { createApiClient, formatAxiosError } from "@app/lib/api"; +import { useEnvContext } from "@app/hooks/useEnvContext"; +import { usePaidStatus } from "@app/hooks/usePaidStatus"; +import { toast } from "@app/hooks/useToast"; +import { tierMatrix } from "@server/lib/billing/tierMatrix"; +import { useParams, useRouter } from "next/navigation"; +import { useTranslations } from "next-intl"; +import { useEffect, useState } from "react"; +import type { AxiosResponse } from "axios"; +import type { GetAlertRuleResponse } from "@server/private/routers/alertRule"; +import type { AlertRuleFormValues } from "@app/lib/alertRuleForm"; + +export default function EditAlertRulePage() { + const t = useTranslations(); + const params = useParams(); + const router = useRouter(); + const orgId = params.orgId as string; + const ruleIdParam = params.ruleId as string; + const alertRuleId = parseInt(ruleIdParam, 10); + + const api = createApiClient(useEnvContext()); + const { isPaidUser } = usePaidStatus(); + const isPaid = isPaidUser(tierMatrix.alertingRules); + + const [formValues, setFormValues] = useState(undefined); + + useEffect(() => { + if (isNaN(alertRuleId)) { + router.replace(`/${orgId}/settings/alerting`); + return; + } + + api.get>( + `/org/${orgId}/alert-rule/${alertRuleId}` + ) + .then((res) => { + const rule = res.data.data; + setFormValues(apiResponseToFormValues(rule)); + }) + .catch((e) => { + toast({ + title: t("error"), + description: formatAxiosError(e), + variant: "destructive" + }); + setFormValues(null); + }); + }, [orgId, alertRuleId]); + + useEffect(() => { + if (formValues === null) { + router.replace(`/${orgId}/settings/alerting`); + } + }, [formValues, orgId, router]); + + if (formValues === undefined) { + return ( + <> + +
+ {t("loading")} +
+ + ); + } + + if (formValues === null) { + return null; + } + + return ( + <> + + + + ); +} diff --git a/src/app/[orgId]/settings/alerting/create/page.tsx b/src/app/[orgId]/settings/alerting/create/page.tsx new file mode 100644 index 000000000..9f3f20611 --- /dev/null +++ b/src/app/[orgId]/settings/alerting/create/page.tsx @@ -0,0 +1,32 @@ +"use client"; + +import AlertRuleGraphEditor from "@app/components/alert-rule-editor/AlertRuleGraphEditor"; +import HeaderTitle from "@app/components/SettingsSectionTitle"; +import { defaultFormValues } from "@app/lib/alertRuleForm"; +import { usePaidStatus } from "@app/hooks/usePaidStatus"; +import { tierMatrix } from "@server/lib/billing/tierMatrix"; +import { useParams } from "next/navigation"; +import { useTranslations } from "next-intl"; + +export default function NewAlertRulePage() { + const params = useParams(); + const orgId = params.orgId as string; + const t = useTranslations(); + const { isPaidUser } = usePaidStatus(); + const isPaid = isPaidUser(tierMatrix.alertingRules); + + return ( + <> + + + + ); +} diff --git a/src/app/[orgId]/settings/alerting/page.tsx b/src/app/[orgId]/settings/alerting/page.tsx new file mode 100644 index 000000000..cadc83516 --- /dev/null +++ b/src/app/[orgId]/settings/alerting/page.tsx @@ -0,0 +1,34 @@ +import SettingsSectionTitle from "@app/components/SettingsSectionTitle"; +import AlertingRulesTable from "@app/components/AlertingRulesTable"; +import HealthChecksTable from "@app/components/HealthChecksTable"; +import { HorizontalTabs, TabItem } from "@app/components/HorizontalTabs"; +import { getTranslations } from "next-intl/server"; + +type AlertingPageProps = { + params: Promise<{ orgId: string }>; +}; + +export const dynamic = "force-dynamic"; + +export default async function AlertingPage(props: AlertingPageProps) { + const params = await props.params; + const t = await getTranslations(); + + const tabs: TabItem[] = [ + { title: t("alertingTabRules"), href: "" }, + { title: t("alertingTabHealthChecks"), href: "" } + ]; + + return ( + <> + + + + + + + ); +} diff --git a/src/app/[orgId]/settings/logs/access/page.tsx b/src/app/[orgId]/settings/logs/access/page.tsx index a0f1b5386..826e11c17 100644 --- a/src/app/[orgId]/settings/logs/access/page.tsx +++ b/src/app/[orgId]/settings/logs/access/page.tsx @@ -471,11 +471,7 @@ export default function GeneralPage() { : `/${row.original.orgId}/settings/resources/proxy/${row.original.resourceNiceId}` } > - diff --git a/src/app/[orgId]/settings/logs/connection/page.tsx b/src/app/[orgId]/settings/logs/connection/page.tsx index e15708f8e..0fc8f95b7 100644 --- a/src/app/[orgId]/settings/logs/connection/page.tsx +++ b/src/app/[orgId]/settings/logs/connection/page.tsx @@ -22,7 +22,7 @@ import { useParams, useRouter, useSearchParams } from "next/navigation"; import { useEffect, useState, useTransition } from "react"; function formatBytes(bytes: number | null): string { - if (bytes === null || bytes === undefined) return "—"; + if (bytes === null || bytes === undefined) return "-"; if (bytes === 0) return "0 B"; const units = ["B", "KB", "MB", "GB", "TB"]; const i = Math.floor(Math.log(bytes) / Math.log(1024)); @@ -33,7 +33,7 @@ function formatBytes(bytes: number | null): string { function formatDuration(startedAt: number, endedAt: number | null): string { if (endedAt === null || endedAt === undefined) return "Active"; const durationSec = endedAt - startedAt; - if (durationSec < 0) return "—"; + if (durationSec < 0) return "-"; if (durationSec < 60) return `${durationSec}s`; if (durationSec < 3600) { const m = Math.floor(durationSec / 60); @@ -451,11 +451,7 @@ export default function ConnectionLogsPage() { - @@ -464,7 +460,7 @@ export default function ConnectionLogsPage() { } return ( - {row.original.resourceName ?? "—"} + {row.original.resourceName ?? "-"} ); } @@ -497,11 +493,7 @@ export default function ConnectionLogsPage() { - @@ -634,6 +636,7 @@ export default function GeneralPage() { { value: "105", label: t("validPassword") }, { value: "106", label: t("validEmail") }, { value: "107", label: t("validSSO") }, + { value: "108", label: t("connectedClient") }, { value: "201", label: t("resourceNotFound") }, { value: "202", label: t("resourceBlocked") }, { value: "203", label: t("droppedByRule") }, diff --git a/src/app/[orgId]/settings/logs/streaming/page.tsx b/src/app/[orgId]/settings/logs/streaming/page.tsx index 7e48d7566..022a8eb2e 100644 --- a/src/app/[orgId]/settings/logs/streaming/page.tsx +++ b/src/app/[orgId]/settings/logs/streaming/page.tsx @@ -38,6 +38,8 @@ import { HttpDestinationCredenza, parseHttpConfig } from "@app/components/HttpDestinationCredenza"; +import { S3DestinationCredenza } from "@app/components/S3DestinationCredenza"; +import { DatadogDestinationCredenza } from "@app/components/DatadogDestinationCredenza"; import { useTranslations } from "next-intl"; // ── Re-export Destination so the rest of the file can use it ────────────────── @@ -203,7 +205,6 @@ function DestinationTypePicker({ id: "s3", title: t("streamingS3Title"), description: t("streamingS3Description"), - disabled: true, icon: ( setSelected(type)} cols={1} /> @@ -291,6 +291,7 @@ export default function StreamingDestinationsPage() { const [typePickerOpen, setTypePickerOpen] = useState(false); const [editingDestination, setEditingDestination] = useState(null); + const [pickedType, setPickedType] = useState("http"); const [togglingIds, setTogglingIds] = useState>(new Set()); // Delete state @@ -392,7 +393,8 @@ export default function StreamingDestinationsPage() { setTypePickerOpen(true); }; - const handleTypePicked = (_type: DestinationType) => { + const handleTypePicked = (type: DestinationType) => { + setPickedType(type); setTypePickerOpen(false); setEditingDestination(null); setModalOpen(true); @@ -400,6 +402,7 @@ export default function StreamingDestinationsPage() { const openEdit = (destination: Destination) => { setEditingDestination(destination); + setPickedType((destination.type as DestinationType) ?? "http"); setModalOpen(true); }; @@ -434,7 +437,7 @@ export default function StreamingDestinationsPage() { disabled={!isEnterprise} /> ))} - {/* Add card is always clickable — paywall is enforced inside the picker */} + {/* Add card is always clickable - paywall is enforced inside the picker */} )} @@ -446,13 +449,33 @@ export default function StreamingDestinationsPage() { isPaywalled={!isEnterprise} /> - + {pickedType === "http" && ( + + )} + {pickedType === "s3" && ( + + )} + {pickedType === "datadog" && ( + + )} {deleteTarget && ( ({ + siteId, + siteName: siteResource.siteNames[idx], + siteNiceId: siteResource.siteNiceIds[idx], + online: siteResource.siteOnlines[idx] + })), + mode: siteResource.mode, + scheme: siteResource.scheme, + ssl: siteResource.ssl, + siteNames: siteResource.siteNames, + siteAddresses: siteResource.siteAddresses || null, // protocol: siteResource.protocol, // proxyPort: siteResource.proxyPort, - siteId: siteResource.siteId, + siteIds: siteResource.siteIds, destination: siteResource.destination, - // destinationPort: siteResource.destinationPort, + httpHttpsPort: siteResource.destinationPort ?? null, alias: siteResource.alias || null, aliasAddress: siteResource.aliasAddress || null, - siteNiceId: siteResource.siteNiceId, + siteNiceIds: siteResource.siteNiceIds, niceId: siteResource.niceId, tcpPortRangeString: siteResource.tcpPortRangeString || null, udpPortRangeString: siteResource.udpPortRangeString || null, disableIcmp: siteResource.disableIcmp || false, authDaemonMode: siteResource.authDaemonMode ?? null, - authDaemonPort: siteResource.authDaemonPort ?? null + authDaemonPort: siteResource.authDaemonPort ?? null, + subdomain: siteResource.subdomain ?? null, + domainId: siteResource.domainId ?? null, + fullDomain: siteResource.fullDomain ?? null }; } ); diff --git a/src/app/[orgId]/settings/resources/proxy/[niceId]/general/page.tsx b/src/app/[orgId]/settings/resources/proxy/[niceId]/general/page.tsx index 9589f6a2e..f7de28c56 100644 --- a/src/app/[orgId]/settings/resources/proxy/[niceId]/general/page.tsx +++ b/src/app/[orgId]/settings/resources/proxy/[niceId]/general/page.tsx @@ -62,6 +62,7 @@ import { GetResourceResponse } from "@server/routers/resource/getResource"; import type { ResourceContextType } from "@app/contexts/resourceContext"; import { usePaidStatus } from "@app/hooks/usePaidStatus"; import { tierMatrix } from "@server/lib/billing/tierMatrix"; +import UptimeAlertSection from "@app/components/UptimeAlertSection"; type MaintenanceSectionFormProps = { resource: GetResourceResponse; @@ -578,6 +579,13 @@ export default function GeneralForm() { return ( <> + {resource?.resourceId && resource?.orgId && ( + + )} diff --git a/src/app/[orgId]/settings/resources/proxy/[niceId]/proxy/page.tsx b/src/app/[orgId]/settings/resources/proxy/[niceId]/proxy/page.tsx index a9128b9d3..03426ef1f 100644 --- a/src/app/[orgId]/settings/resources/proxy/[niceId]/proxy/page.tsx +++ b/src/app/[orgId]/settings/resources/proxy/[niceId]/proxy/page.tsx @@ -1,6 +1,6 @@ "use client"; -import HealthCheckDialog from "@/components/HealthCheckDialog"; +import HealthCheckCredenza from "@/components/HealthCheckCredenza"; import { Button } from "@/components/ui/button"; import { Input } from "@/components/ui/input"; import { @@ -168,6 +168,30 @@ function ProxyResourceTargetsForm({ const [targets, setTargets] = useState(initialTargets); const [targetsToRemove, setTargetsToRemove] = useState([]); + + const { data: polledTargets } = useQuery({ + ...resourceQueries.resourceTargets({ + resourceId: resource.resourceId + }), + refetchInterval: 10_000 + }); + + useEffect(() => { + if (!polledTargets) return; + setTargets((prev) => + prev.map((t) => { + const fresh = polledTargets.find( + (p) => p.targetId === t.targetId + ); + if (!fresh) return t; + return { + ...t, + hcHealth: fresh.hcHealth, + hcEnabled: t.updated ? t.hcEnabled : fresh.hcEnabled + }; + }) + ); + }, [polledTargets]); const [dockerStates, setDockerStates] = useState>( new Map() ); @@ -317,19 +341,6 @@ function ProxyResourceTargetsForm({ header: () => {t("healthCheck")}, cell: ({ row }) => { const status = row.original.hcHealth || "unknown"; - const isEnabled = row.original.hcEnabled; - - const getStatusColor = (status: string) => { - switch (status) { - case "healthy": - return "green"; - case "unhealthy": - return "red"; - case "unknown": - default: - return "secondary"; - } - }; const getStatusText = (status: string) => { switch (status) { @@ -343,19 +354,7 @@ function ProxyResourceTargetsForm({ } }; - const getStatusIcon = (status: string) => { - switch (status) { - case "healthy": - return ; - case "unhealthy": - return ; - case "unknown": - default: - return null; - } - }; - - return ( + return (
{row.original.siteType === "newt" ? ( + ) : ( - )} @@ -640,10 +642,10 @@ function ProxyResourceTargetsForm({ hcInterval: null, hcTimeout: null, hcHeaders: null, + hcFollowRedirects: null, hcScheme: null, hcHostname: null, hcPort: null, - hcFollowRedirects: null, hcHealth: "unknown", hcStatus: null, hcMode: null, @@ -965,10 +967,10 @@ function ProxyResourceTargetsForm({ {selectedTargetForHealthCheck && ( -
{selectedTargetForHealthCheck && ( - + {site?.siteId && site?.orgId && ( + + )} diff --git a/src/app/[orgId]/settings/sites/create/page.tsx b/src/app/[orgId]/settings/sites/create/page.tsx index b7cff202a..ab97197a3 100644 --- a/src/app/[orgId]/settings/sites/create/page.tsx +++ b/src/app/[orgId]/settings/sites/create/page.tsx @@ -425,7 +425,7 @@ export default function Page() { setRemoteExitNodeOptions(exitNodeOptions); if (exitNodeOptions.length === 0) { - // No remote exit nodes available — remove local option and default to newt + // No remote exit nodes available - remove local option and default to newt setTunnelTypes((prev: any) => prev.filter((item: any) => item.id !== "local") ); @@ -434,7 +434,7 @@ export default function Page() { } } catch (error) { console.error("Failed to fetch remote exit nodes:", error); - // If fetch fails, no remote exit nodes available — remove local option and default to newt + // If fetch fails, no remote exit nodes available - remove local option and default to newt setTunnelTypes((prev: any) => prev.filter((item: any) => item.id !== "local") ); diff --git a/src/app/navigation.tsx b/src/app/navigation.tsx index ac7a4a10f..24dc02a19 100644 --- a/src/app/navigation.tsx +++ b/src/app/navigation.tsx @@ -2,6 +2,7 @@ import { SidebarNavItem } from "@app/components/SidebarNav"; import { Env } from "@app/lib/types/env"; import { build } from "@server/build"; import { + BellRing, Boxes, Building2, Cable, @@ -212,9 +213,9 @@ export const orgNavSections = ( icon: , items: [ { - title: "sidebarApiKeys", - href: "/{orgId}/settings/api-keys", - icon: + title: "sidebarAlerting", + href: "/{orgId}/settings/alerting", + icon: }, { title: "sidebarProvisioning", @@ -225,6 +226,11 @@ export const orgNavSections = ( title: "sidebarBluePrints", href: "/{orgId}/settings/blueprints", icon: + }, + { + title: "sidebarApiKeys", + href: "/{orgId}/settings/api-keys", + icon: } ] }, diff --git a/src/app/private-maintenance-screen/page.tsx b/src/app/private-maintenance-screen/page.tsx new file mode 100644 index 000000000..21417b6f4 --- /dev/null +++ b/src/app/private-maintenance-screen/page.tsx @@ -0,0 +1,32 @@ +import { Metadata } from "next"; +import { getTranslations } from "next-intl/server"; +import { + Card, + CardContent, + CardHeader, + CardTitle +} from "@app/components/ui/card"; + +export const dynamic = "force-dynamic"; + +export const metadata: Metadata = { + title: "Private Placeholder" +}; + +export default async function MaintenanceScreen() { + const t = await getTranslations(); + + let title = t("privateMaintenanceScreenTitle"); + let message = t("privateMaintenanceScreenMessage"); + + return ( +
+ + + {title} + + {message} + +
+ ); +} diff --git a/src/components/AlertingRulesTable.tsx b/src/components/AlertingRulesTable.tsx new file mode 100644 index 000000000..ea67b6b73 --- /dev/null +++ b/src/components/AlertingRulesTable.tsx @@ -0,0 +1,344 @@ +"use client"; + +import ConfirmDeleteDialog from "@app/components/ConfirmDeleteDialog"; +import { PaidFeaturesAlert } from "@app/components/PaidFeaturesAlert"; +import { Button } from "@app/components/ui/button"; +import { DataTable, ExtendedColumnDef } from "@app/components/ui/data-table"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger +} from "@app/components/ui/dropdown-menu"; +import { Switch } from "@app/components/ui/switch"; +import { toast } from "@app/hooks/useToast"; +import { useEnvContext } from "@app/hooks/useEnvContext"; +import { useNavigationContext } from "@app/hooks/useNavigationContext"; +import { usePaidStatus } from "@app/hooks/usePaidStatus"; +import { createApiClient, formatAxiosError } from "@app/lib/api"; +import { orgQueries } from "@app/lib/queries"; +import { tierMatrix } from "@server/lib/billing/tierMatrix"; +import { ArrowUpDown, MoreHorizontal } from "lucide-react"; +import moment from "moment"; +import Link from "next/link"; +import { useRouter } from "next/navigation"; +import { useTranslations } from "next-intl"; +import { useState } from "react"; +import { useQuery, useQueryClient } from "@tanstack/react-query"; +import type { PaginationState } from "@tanstack/react-table"; +import type { DataTablePaginationState } from "@app/components/ui/data-table"; +import { useDebouncedCallback } from "use-debounce"; + +type AlertingRulesTableProps = { + orgId: string; + siteId?: number; + resourceId?: number; +}; + +type AlertRuleRow = { + alertRuleId: number; + orgId: string; + name: string; + eventType: string; + enabled: boolean; + cooldownSeconds: number; + lastTriggeredAt: number | null; + createdAt: number; + updatedAt: number; + siteIds: number[]; + healthCheckIds: number[]; + resourceIds: number[]; +}; + +function ruleHref(orgId: string, ruleId: number) { + return `/${orgId}/settings/alerting/${ruleId}`; +} + +function sourceSummary( + rule: AlertRuleRow, + t: (k: string, o?: Record) => string +) { + if ( + rule.eventType === "site_online" || + rule.eventType === "site_offline" || + rule.eventType === "site_toggle" + ) { + return t("alertingSummarySites", { count: rule.siteIds.length }); + } + if (rule.eventType.startsWith("resource_")) { + return t("alertingSummaryResources", { count: rule.resourceIds.length }); + } + return t("alertingSummaryHealthChecks", { + count: rule.healthCheckIds.length + }); +} + +function triggerLabel( + rule: AlertRuleRow, + t: (k: string) => string +) { + switch (rule.eventType) { + case "site_online": + return t("alertingTriggerSiteOnline"); + case "site_offline": + return t("alertingTriggerSiteOffline"); + case "site_toggle": + return t("alertingTriggerSiteToggle"); + case "health_check_healthy": + return t("alertingTriggerHcHealthy"); + case "health_check_unhealthy": + return t("alertingTriggerHcUnhealthy"); + case "health_check_toggle": + return t("alertingTriggerHcToggle"); + case "resource_healthy": + return t("alertingTriggerResourceHealthy"); + case "resource_unhealthy": + return t("alertingTriggerResourceUnhealthy"); + case "resource_toggle": + return t("alertingTriggerResourceToggle"); + default: + return rule.eventType; + } +} + +export default function AlertingRulesTable({ orgId, siteId, resourceId }: AlertingRulesTableProps) { + const router = useRouter(); + const t = useTranslations(); + const api = createApiClient(useEnvContext()); + const queryClient = useQueryClient(); + const { isPaidUser } = usePaidStatus(); + const isPaid = isPaidUser(tierMatrix.alertingRules); + + const { + navigate: filter, + isNavigating: isFiltering, + searchParams + } = useNavigationContext(); + + const [deleteOpen, setDeleteOpen] = useState(false); + const [selected, setSelected] = useState(null); + const [togglingId, setTogglingId] = useState(null); + + const page = Math.max(1, Number(searchParams.get("page") ?? 1)); + const pageSize = Math.max(1, Number(searchParams.get("pageSize") ?? 20)); + const pageIndex = page - 1; + const query = searchParams.get("query") ?? undefined; + + const { + data, + isLoading, + refetch, + isRefetching + } = useQuery(orgQueries.alertRules({ orgId, limit: pageSize, offset: pageIndex * pageSize, query, siteId, resourceId })); + + const rows = data?.alertRules ?? []; + const total = data?.pagination.total ?? 0; + const pageCount = Math.max(1, Math.ceil(total / pageSize)); + + const paginationState: DataTablePaginationState = { pageIndex, pageSize, pageCount }; + + const handlePaginationChange = (newState: PaginationState) => { + searchParams.set("page", (newState.pageIndex + 1).toString()); + searchParams.set("pageSize", newState.pageSize.toString()); + filter({ searchParams }); + }; + + const handleSearchChange = useDebouncedCallback((value: string) => { + if (value) { + searchParams.set("query", value); + } else { + searchParams.delete("query"); + } + searchParams.delete("page"); + filter({ searchParams }); + }, 300); + + const invalidate = () => + queryClient.invalidateQueries({ queryKey: ["ORG", orgId, "ALERT_RULES"] }); + + const setEnabled = async (rule: AlertRuleRow, enabled: boolean) => { + setTogglingId(rule.alertRuleId); + try { + await api.post(`/org/${orgId}/alert-rule/${rule.alertRuleId}`, { + enabled + }); + await invalidate(); + } catch (e) { + toast({ + title: t("error"), + description: formatAxiosError(e), + variant: "destructive" + }); + } finally { + setTogglingId(null); + } + }; + + const confirmDelete = async () => { + if (!selected) return; + try { + await api.delete( + `/org/${orgId}/alert-rule/${selected.alertRuleId}` + ); + await invalidate(); + toast({ title: t("alertingRuleDeleted") }); + } catch (e) { + toast({ + title: t("error"), + description: formatAxiosError(e), + variant: "destructive" + }); + } finally { + setDeleteOpen(false); + setSelected(null); + } + }; + + const columns: ExtendedColumnDef[] = [ + { + accessorKey: "name", + enableHiding: false, + friendlyName: t("name"), + header: ({ column }) => ( + + ), + cell: ({ row }) => ( + {row.original.name} + ) + }, + { + id: "source", + friendlyName: t("alertingColumnSource"), + header: () => ( + {t("alertingColumnSource")} + ), + cell: ({ row }) => {sourceSummary(row.original, t)} + }, + { + id: "trigger", + friendlyName: t("alertingColumnTrigger"), + header: () => ( + {t("alertingColumnTrigger")} + ), + cell: ({ row }) => {triggerLabel(row.original, t)} + }, + { + accessorKey: "enabled", + friendlyName: t("alertingColumnEnabled"), + header: () => ( + {t("alertingColumnEnabled")} + ), + cell: ({ row }) => { + const r = row.original; + return ( + setEnabled(r, v)} + /> + ); + } + }, + { + accessorKey: "createdAt", + friendlyName: t("createdAt"), + header: () => {t("createdAt")}, + cell: ({ row }) => ( + {moment(row.original.createdAt).format("lll")} + ) + }, + { + id: "rowActions", + enableHiding: false, + header: () => , + cell: ({ row }) => { + const r = row.original; + return ( +
+ + + + + + { + setSelected(r); + setDeleteOpen(true); + }} + > + + {t("delete")} + + + + + +
+ ); + } + } + ]; + + return ( + <> + {selected && ( + { + setDeleteOpen(val); + if (!val) setSelected(null); + }} + dialog={ +
+

{t("alertingDeleteQuestion")}

+
+ } + buttonText={t("delete")} + onConfirm={confirmDelete} + string={selected.name} + title={t("alertingDeleteRule")} + /> + )} + + + { + router.push(`/${orgId}/settings/alerting/create`); + }} + onRefresh={() => refetch()} + isRefreshing={isRefetching || isLoading || isFiltering} + addButtonText={t("alertingAddRule")} + enableColumnVisibility + stickyLeftColumn="name" + stickyRightColumn="rowActions" + pagination={paginationState} + onPaginationChange={handlePaginationChange} + /> + + ); +} diff --git a/src/components/ClientInfoCard.tsx b/src/components/ClientInfoCard.tsx index ece2309e2..7f55a46cd 100644 --- a/src/components/ClientInfoCard.tsx +++ b/src/components/ClientInfoCard.tsx @@ -49,7 +49,7 @@ export default function SiteInfoCard({}: ClientInfoCardProps) { ) : (
-
+
{t("offline")}
)} diff --git a/src/components/ClientResourcesTable.tsx b/src/components/ClientResourcesTable.tsx index 5066f273d..36f8caa78 100644 --- a/src/components/ClientResourcesTable.tsx +++ b/src/components/ClientResourcesTable.tsx @@ -21,6 +21,7 @@ import { ArrowUp10Icon, ArrowUpDown, ArrowUpRight, + ChevronDown, ChevronsUpDownIcon, MoreHorizontal } from "lucide-react"; @@ -38,21 +39,32 @@ import { ControlledDataTable } from "./ui/controlled-data-table"; import { useNavigationContext } from "@app/hooks/useNavigationContext"; import { useDebouncedCallback } from "use-debounce"; import { ColumnFilterButton } from "./ColumnFilterButton"; +import { cn } from "@app/lib/cn"; + +export type InternalResourceSiteRow = { + siteId: number; + siteName: string; + siteNiceId: string; + online: boolean; +}; export type InternalResourceRow = { id: number; name: string; orgId: string; - siteName: string; - siteAddress: string | null; + sites: InternalResourceSiteRow[]; + siteNames: string[]; + siteAddresses: (string | null)[]; + siteIds: number[]; + siteNiceIds: string[]; // mode: "host" | "cidr" | "port"; - mode: "host" | "cidr"; + mode: "host" | "cidr" | "http"; + scheme: "http" | "https" | null; + ssl: boolean; // protocol: string | null; // proxyPort: number | null; - siteId: number; - siteNiceId: string; destination: string; - // destinationPort: number | null; + httpHttpsPort: number | null; alias: string | null; aliasAddress: string | null; niceId: string; @@ -61,8 +73,147 @@ export type InternalResourceRow = { disableIcmp: boolean; authDaemonMode?: "site" | "remote" | null; authDaemonPort?: number | null; + subdomain?: string | null; + domainId?: string | null; + fullDomain?: string | null; }; +function resolveHttpHttpsDisplayPort( + mode: "http", + httpHttpsPort: number | null +): number { + if (httpHttpsPort != null) { + return httpHttpsPort; + } + return 80; +} + +function formatDestinationDisplay(row: InternalResourceRow): string { + const { mode, destination, httpHttpsPort, scheme } = row; + if (mode !== "http") { + return destination; + } + const port = resolveHttpHttpsDisplayPort(mode, httpHttpsPort); + const downstreamScheme = scheme ?? "http"; + const hostPart = + destination.includes(":") && !destination.startsWith("[") + ? `[${destination}]` + : destination; + return `${downstreamScheme}://${hostPart}:${port}`; +} + +function isSafeUrlForLink(href: string): boolean { + try { + void new URL(href); + return true; + } catch { + return false; + } +} + +type AggregateSitesStatus = "allOnline" | "partial" | "allOffline"; + +function aggregateSitesStatus( + resourceSites: InternalResourceSiteRow[] +): AggregateSitesStatus { + if (resourceSites.length === 0) { + return "allOffline"; + } + const onlineCount = resourceSites.filter((rs) => rs.online).length; + if (onlineCount === resourceSites.length) return "allOnline"; + if (onlineCount > 0) return "partial"; + return "allOffline"; +} + +function aggregateStatusDotClass(status: AggregateSitesStatus): string { + switch (status) { + case "allOnline": + return "bg-green-500"; + case "partial": + return "bg-yellow-500"; + case "allOffline": + default: + return "bg-neutral-500"; + } +} + +function ClientResourceSitesStatusCell({ + orgId, + resourceSites +}: { + orgId: string; + resourceSites: InternalResourceSiteRow[]; +}) { + const t = useTranslations(); + + if (resourceSites.length === 0) { + return -; + } + + const aggregate = aggregateSitesStatus(resourceSites); + const countLabel = t("multiSitesSelectorSitesCount", { + count: resourceSites.length + }); + + return ( + + + + + + {resourceSites.map((site) => { + const isOnline = site.online; + return ( + + +
+
+ + {site.siteName} + +
+ + {isOnline ? t("online") : t("offline")} + + + + ); + })} + + + ); +} + type ClientResourcesTableProps = { internalResources: InternalResourceRow[]; orgId: string; @@ -97,8 +248,6 @@ export default function ClientResourcesTable({ useState(); const [isCreateDialogOpen, setIsCreateDialogOpen] = useState(false); - const { data: sites = [] } = useQuery(orgQueries.sites({ orgId })); - const [isRefreshing, startTransition] = useTransition(); const refreshData = () => { @@ -136,6 +285,60 @@ export default function ClientResourcesTable({ } }; + function SiteCell({ resourceRow }: { resourceRow: InternalResourceRow }) { + const { siteNames, siteNiceIds, orgId } = resourceRow; + + if (!siteNames || siteNames.length === 0) { + return -; + } + + if (siteNames.length === 1) { + return ( + + + + ); + } + + return ( + + + + + + {siteNames.map((siteName, idx) => ( + + + {siteName} + + + + ))} + + + ); + } + const internalColumns: ExtendedColumnDef[] = [ { accessorKey: "name", @@ -185,20 +388,17 @@ export default function ClientResourcesTable({ } }, { - accessorKey: "siteName", - friendlyName: t("site"), - header: () => {t("site")}, + id: "sites", + accessorFn: (row) => row.sites.map((s) => s.siteName).join(", "), + friendlyName: t("sites"), + header: () => {t("sites")}, cell: ({ row }) => { const resourceRow = row.original; return ( - - - + ); } }, @@ -215,6 +415,10 @@ export default function ClientResourcesTable({ { value: "cidr", label: t("editInternalResourceDialogModeCidr") + }, + { + value: "http", + label: t("editInternalResourceDialogModeHttp") } ]} selectedValue={searchParams.get("mode") ?? undefined} @@ -227,10 +431,14 @@ export default function ClientResourcesTable({ ), cell: ({ row }) => { const resourceRow = row.original; - const modeLabels: Record<"host" | "cidr" | "port", string> = { + const modeLabels: Record< + "host" | "cidr" | "port" | "http", + string + > = { host: t("editInternalResourceDialogModeHost"), cidr: t("editInternalResourceDialogModeCidr"), - port: t("editInternalResourceDialogModePort") + port: t("editInternalResourceDialogModePort"), + http: t("editInternalResourceDialogModeHttp") }; return {modeLabels[resourceRow.mode]}; } @@ -243,11 +451,12 @@ export default function ClientResourcesTable({ ), cell: ({ row }) => { const resourceRow = row.original; + const display = formatDestinationDisplay(resourceRow); return ( ); } @@ -260,15 +469,26 @@ export default function ClientResourcesTable({ ), cell: ({ row }) => { const resourceRow = row.original; - return resourceRow.mode === "host" && resourceRow.alias ? ( - - ) : ( - - - ); + if (resourceRow.mode === "host" && resourceRow.alias) { + return ( + + ); + } + if (resourceRow.mode === "http") { + const url = `${resourceRow.ssl ? "https" : "http"}://${resourceRow.fullDomain}`; + return ( + + ); + } + return -; } }, { @@ -399,7 +619,7 @@ export default function ClientResourcesTable({ onConfirm={async () => deleteInternalResource( selectedInternalResource!.id, - selectedInternalResource!.siteId + selectedInternalResource!.siteIds[0] ) } string={selectedInternalResource.name} @@ -435,7 +655,6 @@ export default function ClientResourcesTable({ setOpen={setIsEditDialogOpen} resource={editingResource} orgId={orgId} - sites={sites} onSuccess={() => { // Delay refresh to allow modal to close smoothly setTimeout(() => { @@ -450,7 +669,6 @@ export default function ClientResourcesTable({ open={isCreateDialogOpen} setOpen={setIsCreateDialogOpen} orgId={orgId} - sites={sites} onSuccess={() => { // Delay refresh to allow modal to close smoothly setTimeout(() => { diff --git a/src/components/ContactSalesBanner.tsx b/src/components/ContactSalesBanner.tsx new file mode 100644 index 000000000..e5cb87d83 --- /dev/null +++ b/src/components/ContactSalesBanner.tsx @@ -0,0 +1,42 @@ +"use client"; + +import { KeyRound, ExternalLink } from "lucide-react"; +import Link from "next/link"; +import { useTranslations } from "next-intl"; + +export function ContactSalesBanner() { + const t = useTranslations(); + + return ( +
+
+
+ + + {t("contactSalesEnable")}{" "} + + {t("contactSalesBookDemo")} + + + {" " + t("contactSalesOr") + " "} + + {t("contactSalesContactUs")} + + + . + +
+
+
+ ); +} \ No newline at end of file diff --git a/src/components/CreateInternalResourceDialog.tsx b/src/components/CreateInternalResourceDialog.tsx index d5ca61acc..4d2bc0916 100644 --- a/src/components/CreateInternalResourceDialog.tsx +++ b/src/components/CreateInternalResourceDialog.tsx @@ -14,7 +14,6 @@ import { Button } from "@app/components/ui/button"; import { useEnvContext } from "@app/hooks/useEnvContext"; import { toast } from "@app/hooks/useToast"; import { createApiClient, formatAxiosError } from "@app/lib/api"; -import { ListSitesResponse } from "@server/routers/site"; import { AxiosResponse } from "axios"; import { useTranslations } from "next-intl"; import { useState } from "react"; @@ -25,13 +24,10 @@ import { type InternalResourceFormValues } from "./InternalResourceForm"; -type Site = ListSitesResponse["sites"][0]; - type CreateInternalResourceDialogProps = { open: boolean; setOpen: (val: boolean) => void; orgId: string; - sites: Site[]; onSuccess?: () => void; }; @@ -39,18 +35,21 @@ export default function CreateInternalResourceDialog({ open, setOpen, orgId, - sites, onSuccess }: CreateInternalResourceDialogProps) { const t = useTranslations(); const api = createApiClient(useEnvContext()); const [isSubmitting, setIsSubmitting] = useState(false); + const [isHttpModeDisabled, setIsHttpModeDisabled] = useState(false); async function handleSubmit(values: InternalResourceFormValues) { setIsSubmitting(true); try { let data = { ...values }; - if (data.mode === "host" && isHostname(data.destination)) { + if ( + (data.mode === "host" || data.mode === "http") && + isHostname(data.destination) + ) { const currentAlias = data.alias?.trim() || ""; if (!currentAlias) { let aliasValue = data.destination; @@ -65,25 +64,56 @@ export default function CreateInternalResourceDialog({ `/org/${orgId}/site-resource`, { name: data.name, - siteId: data.siteId, + siteIds: data.siteIds, mode: data.mode, destination: data.destination, enabled: true, - alias: data.alias && typeof data.alias === "string" && data.alias.trim() ? data.alias : undefined, - tcpPortRangeString: data.tcpPortRangeString, - udpPortRangeString: data.udpPortRangeString, - disableIcmp: data.disableIcmp ?? false, - ...(data.authDaemonMode != null && { authDaemonMode: data.authDaemonMode }), - ...(data.authDaemonMode === "remote" && data.authDaemonPort != null && { authDaemonPort: data.authDaemonPort }), - roleIds: data.roles ? data.roles.map((r) => parseInt(r.id)) : [], + ...(data.mode === "http" && { + scheme: data.scheme, + ssl: data.ssl ?? false, + destinationPort: data.httpHttpsPort ?? undefined, + domainId: data.httpConfigDomainId + ? data.httpConfigDomainId + : undefined, + subdomain: data.httpConfigSubdomain + ? data.httpConfigSubdomain + : undefined + }), + ...(data.mode === "host" && { + alias: + data.alias && + typeof data.alias === "string" && + data.alias.trim() + ? data.alias + : undefined, + ...(data.authDaemonMode != null && { + authDaemonMode: data.authDaemonMode + }), + ...(data.authDaemonMode === "remote" && + data.authDaemonPort != null && { + authDaemonPort: data.authDaemonPort + }) + }), + ...((data.mode === "host" || data.mode == "cidr") && { + tcpPortRangeString: data.tcpPortRangeString, + udpPortRangeString: data.udpPortRangeString, + disableIcmp: data.disableIcmp ?? false + }), + roleIds: data.roles + ? data.roles.map((r) => parseInt(r.id)) + : [], userIds: data.users ? data.users.map((u) => u.id) : [], - clientIds: data.clients ? data.clients.map((c) => parseInt(c.id)) : [] + clientIds: data.clients + ? data.clients.map((c) => parseInt(c.id)) + : [] } ); toast({ title: t("createInternalResourceDialogSuccess"), - description: t("createInternalResourceDialogInternalResourceCreatedSuccessfully"), + description: t( + "createInternalResourceDialogInternalResourceCreatedSuccessfully" + ), variant: "default" }); setOpen(false); @@ -93,7 +123,9 @@ export default function CreateInternalResourceDialog({ title: t("createInternalResourceDialogError"), description: formatAxiosError( error, - t("createInternalResourceDialogFailedToCreateInternalResource") + t( + "createInternalResourceDialogFailedToCreateInternalResource" + ) ), variant: "destructive" }); @@ -106,31 +138,39 @@ export default function CreateInternalResourceDialog({ - {t("createInternalResourceDialogCreateClientResource")} + + {t("createInternalResourceDialogCreateClientResource")} + - {t("createInternalResourceDialogCreateClientResourceDescription")} + {t( + "createInternalResourceDialogCreateClientResourceDescription" + )} - + + + + + ); +} diff --git a/src/components/DomainPicker.tsx b/src/components/DomainPicker.tsx index ac8493d6e..7a90dfa67 100644 --- a/src/components/DomainPicker.tsx +++ b/src/components/DomainPicker.tsx @@ -175,15 +175,18 @@ export default function DomainPicker({ domainId: firstOrExistingDomain.domainId }; + const base = firstOrExistingDomain.baseDomain; + const sub = + firstOrExistingDomain.type !== "cname" + ? defaultSubdomain?.trim() || undefined + : undefined; + onDomainChange?.({ domainId: firstOrExistingDomain.domainId, type: "organization", - subdomain: - firstOrExistingDomain.type !== "cname" - ? defaultSubdomain || undefined - : undefined, - fullDomain: firstOrExistingDomain.baseDomain, - baseDomain: firstOrExistingDomain.baseDomain + subdomain: sub, + fullDomain: sub ? `${sub}.${base}` : base, + baseDomain: base }); } } diff --git a/src/components/EditInternalResourceDialog.tsx b/src/components/EditInternalResourceDialog.tsx index 690ad405d..859981f7d 100644 --- a/src/components/EditInternalResourceDialog.tsx +++ b/src/components/EditInternalResourceDialog.tsx @@ -15,7 +15,6 @@ import { useEnvContext } from "@app/hooks/useEnvContext"; import { toast } from "@app/hooks/useToast"; import { createApiClient, formatAxiosError } from "@app/lib/api"; import { resourceQueries } from "@app/lib/queries"; -import { ListSitesResponse } from "@server/routers/site"; import { useQueryClient } from "@tanstack/react-query"; import { useTranslations } from "next-intl"; import { useState, useTransition } from "react"; @@ -27,14 +26,11 @@ import { isHostname } from "./InternalResourceForm"; -type Site = ListSitesResponse["sites"][0]; - type EditInternalResourceDialogProps = { open: boolean; setOpen: (val: boolean) => void; resource: InternalResourceData; orgId: string; - sites: Site[]; onSuccess?: () => void; }; @@ -43,18 +39,21 @@ export default function EditInternalResourceDialog({ setOpen, resource, orgId, - sites, onSuccess }: EditInternalResourceDialogProps) { const t = useTranslations(); const api = createApiClient(useEnvContext()); const queryClient = useQueryClient(); const [isSubmitting, startTransition] = useTransition(); + const [isHttpModeDisabled, setIsHttpModeDisabled] = useState(false); async function handleSubmit(values: InternalResourceFormValues) { try { let data = { ...values }; - if (data.mode === "host" && isHostname(data.destination)) { + if ( + (data.mode === "host" || data.mode === "http") && + isHostname(data.destination) + ) { const currentAlias = data.alias?.trim() || ""; if (!currentAlias) { let aliasValue = data.destination; @@ -67,24 +66,39 @@ export default function EditInternalResourceDialog({ await api.post(`/site-resource/${resource.id}`, { name: data.name, - siteId: data.siteId, + siteIds: data.siteIds, mode: data.mode, niceId: data.niceId, destination: data.destination, - alias: - data.alias && - typeof data.alias === "string" && - data.alias.trim() - ? data.alias - : null, - tcpPortRangeString: data.tcpPortRangeString, - udpPortRangeString: data.udpPortRangeString, - disableIcmp: data.disableIcmp ?? false, - ...(data.authDaemonMode != null && { - authDaemonMode: data.authDaemonMode + ...(data.mode === "http" && { + scheme: data.scheme, + ssl: data.ssl ?? false, + destinationPort: data.httpHttpsPort ?? null, + domainId: data.httpConfigDomainId + ? data.httpConfigDomainId + : undefined, + subdomain: data.httpConfigSubdomain + ? data.httpConfigSubdomain + : undefined }), - ...(data.authDaemonMode === "remote" && { - authDaemonPort: data.authDaemonPort || null + ...(data.mode === "host" && { + alias: + data.alias && + typeof data.alias === "string" && + data.alias.trim() + ? data.alias + : null, + ...(data.authDaemonMode != null && { + authDaemonMode: data.authDaemonMode + }), + ...(data.authDaemonMode === "remote" && { + authDaemonPort: data.authDaemonPort || null + }) + }), + ...((data.mode === "host" || data.mode === "cidr") && { + tcpPortRangeString: data.tcpPortRangeString, + udpPortRangeString: data.udpPortRangeString, + disableIcmp: data.disableIcmp ?? false }), roleIds: (data.roles || []).map((r) => parseInt(r.id)), userIds: (data.users || []).map((u) => u.id), @@ -156,13 +170,13 @@ export default function EditInternalResourceDialog({ variant="edit" open={open} resource={resource} - sites={sites} orgId={orgId} siteResourceId={resource.id} formId="edit-internal-resource-form" onSubmit={(values) => startTransition(() => handleSubmit(values)) } + onSubmitDisabledChange={setIsHttpModeDisabled} /> @@ -178,7 +192,7 @@ export default function EditInternalResourceDialog({
) : (
-
+
{t("offline")}
)} diff --git a/src/components/ExitNodesTable.tsx b/src/components/ExitNodesTable.tsx index 5c39f409e..67d819a47 100644 --- a/src/components/ExitNodesTable.tsx +++ b/src/components/ExitNodesTable.tsx @@ -146,7 +146,7 @@ export default function ExitNodesTable({ } else { return ( -
+
{t("offline")}
); diff --git a/src/components/HealthCheckCredenza.tsx b/src/components/HealthCheckCredenza.tsx new file mode 100644 index 000000000..671a16e7d --- /dev/null +++ b/src/components/HealthCheckCredenza.tsx @@ -0,0 +1,1386 @@ +"use client"; + +import { useEffect, useState } from "react"; +import { Button } from "@/components/ui/button"; +import { z } from "zod"; +import { useForm } from "react-hook-form"; +import { zodResolver } from "@hookform/resolvers/zod"; +import { + Form, + FormControl, + FormDescription, + FormField, + FormItem, + FormLabel, + FormMessage +} from "@/components/ui/form"; +import { Input } from "@/components/ui/input"; +import { Switch } from "@/components/ui/switch"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue +} from "@/components/ui/select"; +import { StrategySelect } from "@app/components/StrategySelect"; +import { HeadersInput } from "@app/components/HeadersInput"; +import { HorizontalTabs } from "@app/components/HorizontalTabs"; +import { + Credenza, + CredenzaBody, + CredenzaClose, + CredenzaContent, + CredenzaDescription, + CredenzaFooter, + CredenzaHeader, + CredenzaTitle +} from "@/components/Credenza"; +import { toast } from "@app/hooks/useToast"; +import { createApiClient, formatAxiosError } from "@app/lib/api"; +import { useEnvContext } from "@app/hooks/useEnvContext"; +import { useTranslations } from "next-intl"; +import { ContactSalesBanner } from "@app/components/ContactSalesBanner"; +import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover"; +import { SitesSelector } from "@app/components/site-selector"; +import type { Selectedsite } from "@app/components/site-selector"; +import { CaretSortIcon } from "@radix-ui/react-icons"; +import { cn } from "@app/lib/cn"; + +export type HealthCheckConfig = { + hcEnabled: boolean; + hcPath: string; + hcMethod: string; + hcInterval: number; + hcTimeout: number; + hcStatus: number | null; + hcHeaders?: { name: string; value: string }[] | null; + hcScheme?: string; + hcHostname: string; + hcPort: number; + hcFollowRedirects: boolean; + hcMode: string; + hcUnhealthyInterval: number; + hcTlsServerName: string; + hcHealthyThreshold: number; + hcUnhealthyThreshold: number; +}; + +export type HealthCheckRow = { + targetHealthCheckId: number; + name: string; + hcEnabled: boolean; + hcHealth: "unknown" | "healthy" | "unhealthy"; + hcMode: string | null; + hcHostname: string | null; + hcPort: number | null; + hcPath: string | null; + hcScheme: string | null; + hcMethod: string | null; + hcInterval: number | null; + hcUnhealthyInterval: number | null; + hcTimeout: number | null; + hcHeaders: string | null; + hcFollowRedirects: boolean | null; + hcStatus: number | null; + hcTlsServerName: string | null; + hcHealthyThreshold: number | null; + hcUnhealthyThreshold: number | null; + resourceId: number | null; + resourceName: string | null; + resourceNiceId: string | null; + siteId: number | null; + siteName: string | null; + siteNiceId: string | null; +}; + +export type HealthCheckCredenzaProps = + | { + mode: "autoSave"; + open: boolean; + setOpen: (v: boolean) => void; + orgId?: string; + targetAddress: string; + targetMethod?: string; + initialConfig?: Partial; + onChanges: (config: HealthCheckConfig) => Promise; + } + | { + mode: "submit"; + open: boolean; + setOpen: (v: boolean) => void; + orgId: string; + initialValues?: HealthCheckRow | null; + onSaved: () => void; + }; + +const DEFAULT_VALUES = { + name: "", + hcEnabled: true, + hcMode: "http", + hcScheme: "https", + hcMethod: "GET", + hcHostname: "", + hcPort: "", + hcPath: "/", + hcInterval: 30, + hcUnhealthyInterval: 30, + hcTimeout: 5, + hcHealthyThreshold: 1, + hcUnhealthyThreshold: 1, + hcFollowRedirects: true, + hcTlsServerName: "", + hcStatus: null as number | null, + hcHeaders: [] as { name: string; value: string }[] +}; + +export function HealthCheckCredenza(props: HealthCheckCredenzaProps) { + const { mode, open, setOpen, orgId } = props; + + const t = useTranslations(); + const api = createApiClient(useEnvContext()); + const [loading, setLoading] = useState(false); + const [selectedSite, setSelectedSite] = useState(null); + + const healthCheckSchema = z + .object({ + ...(mode === "submit" + ? { + name: z + .string() + .min(1, { message: t("standaloneHcNameLabel") }) + } + : {}), + hcEnabled: z.boolean(), + hcPath: z.string().optional(), + hcMethod: z.string().optional(), + hcInterval: z + .int() + .positive() + .min(5, { message: t("healthCheckIntervalMin") }), + hcTimeout: z + .int() + .positive() + .min(1, { message: t("healthCheckTimeoutMin") }), + hcStatus: z.int().positive().min(100).optional().nullable(), + hcHeaders: z + .array(z.object({ name: z.string(), value: z.string() })) + .nullable() + .optional(), + hcScheme: z.string().optional(), + hcHostname: z.string(), + hcPort: z + .string() + .min(1, { message: t("healthCheckPortInvalid") }) + .refine( + (val) => { + const port = parseInt(val); + return port > 0 && port <= 65535; + }, + { message: t("healthCheckPortInvalid") } + ), + hcFollowRedirects: z.boolean(), + hcMode: z.string(), + hcUnhealthyInterval: z.int().positive().min(5), + hcTlsServerName: z.string(), + hcHealthyThreshold: z + .int() + .positive() + .min(1, { message: t("healthCheckHealthyThresholdMin") }), + hcUnhealthyThreshold: z + .int() + .positive() + .min(1, { message: t("healthCheckUnhealthyThresholdMin") }) + }) + .superRefine((data, ctx) => { + if (data.hcMode !== "tcp") { + if (!data.hcPath || data.hcPath.length < 1) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: t("healthCheckPathRequired"), + path: ["hcPath"] + }); + } + if (!data.hcMethod || data.hcMethod.length < 1) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: t("healthCheckMethodRequired"), + path: ["hcMethod"] + }); + } + } + }); + + type FormValues = z.infer; + + const form = useForm({ + resolver: zodResolver(healthCheckSchema), + defaultValues: mode === "submit" ? DEFAULT_VALUES : {} + }); + + const watchedEnabled = form.watch("hcEnabled"); + const watchedMode = form.watch("hcMode"); + + useEffect(() => { + if (!open) return; + + if (mode === "autoSave") { + const { initialConfig, targetMethod } = props; + + const getDefaultScheme = () => { + if (initialConfig?.hcScheme) return initialConfig.hcScheme; + if (targetMethod === "https") return "https"; + return "http"; + }; + + form.reset({ + hcEnabled: initialConfig?.hcEnabled, + hcPath: initialConfig?.hcPath, + hcMethod: initialConfig?.hcMethod, + hcInterval: initialConfig?.hcInterval, + hcTimeout: initialConfig?.hcTimeout, + hcStatus: initialConfig?.hcStatus, + hcHeaders: initialConfig?.hcHeaders, + hcScheme: getDefaultScheme(), + hcHostname: initialConfig?.hcHostname, + hcPort: initialConfig?.hcPort + ? initialConfig.hcPort.toString() + : "", + hcFollowRedirects: initialConfig?.hcFollowRedirects, + hcMode: initialConfig?.hcMode ?? "http", + hcUnhealthyInterval: initialConfig?.hcUnhealthyInterval, + hcTlsServerName: initialConfig?.hcTlsServerName ?? "", + hcHealthyThreshold: initialConfig?.hcHealthyThreshold ?? 1, + hcUnhealthyThreshold: initialConfig?.hcUnhealthyThreshold ?? 1 + }); + } else { + const { initialValues } = props; + + if (initialValues) { + let parsedHeaders: { name: string; value: string }[] = []; + if (initialValues.hcHeaders) { + try { + parsedHeaders = JSON.parse(initialValues.hcHeaders); + } catch { + parsedHeaders = []; + } + } + + form.reset({ + name: initialValues.name, + hcEnabled: initialValues.hcEnabled, + hcMode: initialValues.hcMode ?? "http", + hcScheme: initialValues.hcScheme ?? "https", + hcMethod: initialValues.hcMethod ?? "GET", + hcHostname: initialValues.hcHostname ?? "", + hcPort: initialValues.hcPort + ? initialValues.hcPort.toString() + : "", + hcPath: initialValues.hcPath ?? "/", + hcInterval: initialValues.hcInterval ?? 30, + hcUnhealthyInterval: + initialValues.hcUnhealthyInterval ?? 30, + hcTimeout: initialValues.hcTimeout ?? 5, + hcHealthyThreshold: initialValues.hcHealthyThreshold ?? 1, + hcUnhealthyThreshold: + initialValues.hcUnhealthyThreshold ?? 1, + hcFollowRedirects: initialValues.hcFollowRedirects ?? true, + hcTlsServerName: initialValues.hcTlsServerName ?? "", + hcStatus: initialValues.hcStatus ?? null, + hcHeaders: parsedHeaders + }); + if (initialValues.siteId && initialValues.siteName) { + setSelectedSite({ siteId: initialValues.siteId, name: initialValues.siteName, type: "" }); + } else { + setSelectedSite(null); + } + } else { + form.reset(DEFAULT_VALUES); + setSelectedSite(null); + } + } + }, [open]); + + const handleFieldChange = async (fieldName: string, value: any) => { + if (mode !== "autoSave") return; + try { + const currentValues = form.getValues(); + const updatedValues = { ...currentValues, [fieldName]: value }; + + const configToSend: HealthCheckConfig = { + ...updatedValues, + hcPath: updatedValues.hcPath ?? "", + hcMethod: updatedValues.hcMethod ?? "", + hcPort: parseInt(updatedValues.hcPort), + hcStatus: updatedValues.hcStatus || null, + hcHealthyThreshold: updatedValues.hcHealthyThreshold, + hcUnhealthyThreshold: updatedValues.hcUnhealthyThreshold + }; + + await props.onChanges(configToSend); + } catch (error) { + toast({ + title: t("healthCheckError"), + description: t("healthCheckErrorDescription"), + variant: "destructive" + }); + } + }; + + const handleChange = ( + fieldName: string, + value: any, + fieldOnChange: (v: any) => void + ) => { + fieldOnChange(value); + if (mode === "autoSave") { + handleFieldChange(fieldName, value); + } + }; + + const onSubmit = async (values: FormValues) => { + if (mode !== "submit") return; + const { initialValues, onSaved } = props; + + setLoading(true); + try { + const payload = { + name: (values as any).name, + siteId: selectedSite?.siteId, + hcEnabled: values.hcEnabled, + hcMode: values.hcMode, + hcScheme: values.hcScheme, + hcMethod: values.hcMethod, + hcHostname: values.hcHostname, + hcPort: parseInt(values.hcPort), + hcPath: values.hcPath ?? "", + hcInterval: values.hcInterval, + hcUnhealthyInterval: values.hcUnhealthyInterval, + hcTimeout: values.hcTimeout, + hcHealthyThreshold: values.hcHealthyThreshold, + hcUnhealthyThreshold: values.hcUnhealthyThreshold, + hcFollowRedirects: values.hcFollowRedirects, + hcTlsServerName: values.hcTlsServerName, + hcStatus: values.hcStatus || null, + hcHeaders: + values.hcHeaders && values.hcHeaders.length > 0 + ? JSON.stringify(values.hcHeaders) + : null + }; + + if (initialValues) { + await api.post( + `/org/${orgId}/health-check/${initialValues.targetHealthCheckId}`, + payload + ); + } else { + await api.put(`/org/${orgId}/health-check`, payload); + } + + toast({ title: t("standaloneHcSaved") }); + onSaved(); + setOpen(false); + } catch (e) { + toast({ + title: t("error"), + description: formatAxiosError(e), + variant: "destructive" + }); + } finally { + setLoading(false); + } + }; + + const isEditing = mode === "submit" && !!(props as any).initialValues; + + const title = + mode === "autoSave" + ? t("configureHealthCheck") + : isEditing + ? t("standaloneHcEditTitle") + : t("standaloneHcCreateTitle"); + + const description = + mode === "autoSave" + ? t("configureHealthCheckDescription", { + target: (props as any).targetAddress + }) + : t("standaloneHcDescription"); + + const showFields = mode === "submit" || watchedEnabled; + const isSnmpOrIcmp = watchedMode === "snmp" || watchedMode === "icmp"; + const isTcp = watchedMode === "tcp"; + + return ( + + + + {title} + {description} + + +
+ + {/* Name (submit mode only) */} + {mode === "submit" && ( + ( + + + {t("standaloneHcNameLabel")} + + + + + + + )} + /> + )} + + {/* Site picker (submit mode only) */} + {mode === "submit" && ( +
+ + {t("site")} + + + + + + { + setSelectedSite(site); + }} + /> + + + +
+ )} + +
+ + {/* ── Strategy tab ──────────────────────── */} +
+ {/* Enable toggle (autoSave mode only) */} + {mode === "autoSave" && ( + ( + +
+ + {t( + "enableHealthChecks" + )} + +
+ + + handleChange( + "hcEnabled", + value, + field.onChange + ) + } + /> + +
+ )} + /> + )} + + {/* Strategy picker */} + {showFields && ( + ( + + + + handleChange( + "hcMode", + value, + field.onChange + ) + } + /> + + + + )} + /> + )} +
+ + {/* ── Connection tab ────────────────────── */} +
+ {!showFields && ( +

+ {t("enableHealthChecks")} +

+ )} + + {/* Contact-sales banner for SNMP / ICMP */} + {showFields && isSnmpOrIcmp && ( + + )} + + {showFields && !isSnmpOrIcmp && ( + <> + {/* Scheme / Hostname / Port */} + {isTcp ? ( +
+ ( + + + {t( + "healthHostname" + )} + + + + handleChange( + "hcHostname", + e + .target + .value, + () => + field.onChange( + e + ) + ) + } + /> + + + + )} + /> + ( + + + {t( + "healthPort" + )} + + + + handleChange( + "hcPort", + e + .target + .value, + field.onChange + ) + } + /> + + + + )} + /> +
+ ) : ( +
+ ( + + + {t( + "healthScheme" + )} + + + + + )} + /> + ( + + + {t( + "healthHostname" + )} + + + + handleChange( + "hcHostname", + e + .target + .value, + () => + field.onChange( + e + ) + ) + } + /> + + + + )} + /> + ( + + + {t( + "healthPort" + )} + + + + handleChange( + "hcPort", + e + .target + .value, + field.onChange + ) + } + /> + + + + )} + /> +
+ )} + + {/* Method / Path / Timeout (HTTP) */} + {!isTcp && ( +
+ ( + + + {t( + "httpMethod" + )} + + + + + )} + /> + ( + + + {t( + "healthCheckPath" + )} + + + + handleChange( + "hcPath", + e + .target + .value, + () => + field.onChange( + e + ) + ) + } + /> + + + + )} + /> + ( + + + {t( + "timeoutSeconds" + )} + + + + handleChange( + "hcTimeout", + parseInt( + e + .target + .value + ), + field.onChange + ) + } + /> + + + + )} + /> +
+ )} + + {/* Timeout for TCP */} + {isTcp && ( + ( + + + {t( + "timeoutSeconds" + )} + + + + handleChange( + "hcTimeout", + parseInt( + e + .target + .value + ), + field.onChange + ) + } + /> + + + + )} + /> + )} + + )} +
+ + {/* ── Advanced tab ──────────────────────── */} +
+ {!showFields && ( +

+ {t("enableHealthChecks")} +

+ )} + + {/* Contact-sales banner for SNMP / ICMP */} + {showFields && isSnmpOrIcmp && ( + + )} + + {showFields && !isSnmpOrIcmp && ( + <> + {/* Healthy interval + threshold */} +
+ ( + + + {t( + "healthyIntervalSeconds" + )} + + + + handleChange( + "hcInterval", + parseInt( + e + .target + .value + ), + field.onChange + ) + } + /> + + + + )} + /> + ( + + + {t( + "healthyThreshold" + )} + + + + handleChange( + "hcHealthyThreshold", + parseInt( + e + .target + .value + ), + field.onChange + ) + } + /> + + + + )} + /> +
+ + {/* Unhealthy interval + threshold */} +
+ ( + + + {t( + "unhealthyIntervalSeconds" + )} + + + + handleChange( + "hcUnhealthyInterval", + parseInt( + e + .target + .value + ), + field.onChange + ) + } + /> + + + + )} + /> + ( + + + {t( + "unhealthyThreshold" + )} + + + + handleChange( + "hcUnhealthyThreshold", + parseInt( + e + .target + .value + ), + field.onChange + ) + } + /> + + + + )} + /> +
+ + {/* HTTP-only advanced fields */} + {!isTcp && ( + <> + {/* Expected status + TLS server name */} +
+ ( + + + {t( + "expectedResponseCodes" + )} + + + { + const val = + e + .target + .value; + const value = + val + ? parseInt( + val + ) + : null; + handleChange( + "hcStatus", + value, + field.onChange + ); + }} + /> + + + + )} + /> + ( + + + {t( + "tlsServerName" + )} + + + + handleChange( + "hcTlsServerName", + e + .target + .value, + () => + field.onChange( + e + ) + ) + } + /> + + + + )} + /> +
+ + {/* Follow redirects */} + ( + + + {t( + "followRedirects" + )} + + + + handleChange( + "hcFollowRedirects", + value, + field.onChange + ) + } + /> + + + )} + /> + + {/* Custom headers */} + ( + + + {t( + "customHeaders" + )} + + + + handleChange( + "hcHeaders", + value, + field.onChange + ) + } + rows={ + 4 + } + /> + + + {t( + "customHeadersDescription" + )} + + + + )} + /> + + )} + + )} +
+
+
+ + +
+ + {mode === "autoSave" ? ( + + ) : ( + <> + + + + + + )} + +
+
+ ); +} + +export default HealthCheckCredenza; diff --git a/src/components/HealthCheckDialog.tsx b/src/components/HealthCheckDialog.tsx deleted file mode 100644 index c95908025..000000000 --- a/src/components/HealthCheckDialog.tsx +++ /dev/null @@ -1,635 +0,0 @@ -"use client"; - -import { useEffect } from "react"; -import { Button } from "@/components/ui/button"; -import { Input } from "@/components/ui/input"; -import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue -} from "@/components/ui/select"; -import { Switch } from "@/components/ui/switch"; -import { HeadersInput } from "@app/components/HeadersInput"; -import { z } from "zod"; -import { useForm } from "react-hook-form"; -import { zodResolver } from "@hookform/resolvers/zod"; -import { - Form, - FormControl, - FormDescription, - FormField, - FormItem, - FormLabel, - FormMessage -} from "@/components/ui/form"; -import { - Credenza, - CredenzaBody, - CredenzaClose, - CredenzaContent, - CredenzaDescription, - CredenzaFooter, - CredenzaHeader, - CredenzaTitle -} from "@/components/Credenza"; -import { toast } from "@/hooks/useToast"; -import { useTranslations } from "next-intl"; - -type HealthCheckConfig = { - hcEnabled: boolean; - hcPath: string; - hcMethod: string; - hcInterval: number; - hcTimeout: number; - hcStatus: number | null; - hcHeaders?: { name: string; value: string }[] | null; - hcScheme?: string; - hcHostname: string; - hcPort: number; - hcFollowRedirects: boolean; - hcMode: string; - hcUnhealthyInterval: number; - hcTlsServerName: string; -}; - -type HealthCheckDialogProps = { - open: boolean; - setOpen: (val: boolean) => void; - targetId: number; - targetAddress: string; - targetMethod?: string; - initialConfig?: Partial; - onChanges: (config: HealthCheckConfig) => Promise; -}; - -export default function HealthCheckDialog({ - open, - setOpen, - targetId, - targetAddress, - targetMethod, - initialConfig, - onChanges -}: HealthCheckDialogProps) { - const t = useTranslations(); - - const healthCheckSchema = z.object({ - hcEnabled: z.boolean(), - hcPath: z.string().min(1, { message: t("healthCheckPathRequired") }), - hcMethod: z - .string() - .min(1, { message: t("healthCheckMethodRequired") }), - hcInterval: z - .int() - .positive() - .min(5, { message: t("healthCheckIntervalMin") }), - hcTimeout: z - .int() - .positive() - .min(1, { message: t("healthCheckTimeoutMin") }), - hcStatus: z.int().positive().min(100).optional().nullable(), - hcHeaders: z - .array(z.object({ name: z.string(), value: z.string() })) - .nullable() - .optional(), - hcScheme: z.string().optional(), - hcHostname: z.string(), - hcPort: z - .string() - .min(1, { message: t("healthCheckPortInvalid") }) - .refine( - (val) => { - const port = parseInt(val); - return port > 0 && port <= 65535; - }, - { - message: t("healthCheckPortInvalid") - } - ), - hcFollowRedirects: z.boolean(), - hcMode: z.string(), - hcUnhealthyInterval: z.int().positive().min(5), - hcTlsServerName: z.string() - }); - - const form = useForm>({ - resolver: zodResolver(healthCheckSchema), - defaultValues: {} - }); - - useEffect(() => { - if (!open) return; - - // Determine default scheme from target method - const getDefaultScheme = () => { - if (initialConfig?.hcScheme) { - return initialConfig.hcScheme; - } - // Default to target method if it's http or https, otherwise default to http - if (targetMethod === "https") { - return "https"; - } - return "http"; - }; - - form.reset({ - hcEnabled: initialConfig?.hcEnabled, - hcPath: initialConfig?.hcPath, - hcMethod: initialConfig?.hcMethod, - hcInterval: initialConfig?.hcInterval, - hcTimeout: initialConfig?.hcTimeout, - hcStatus: initialConfig?.hcStatus, - hcHeaders: initialConfig?.hcHeaders, - hcScheme: getDefaultScheme(), - hcHostname: initialConfig?.hcHostname, - hcPort: initialConfig?.hcPort - ? initialConfig.hcPort.toString() - : "", - hcFollowRedirects: initialConfig?.hcFollowRedirects, - hcMode: initialConfig?.hcMode, - hcUnhealthyInterval: initialConfig?.hcUnhealthyInterval, - hcTlsServerName: initialConfig?.hcTlsServerName ?? "" - }); - }, [open]); - - const watchedEnabled = form.watch("hcEnabled"); - - const handleFieldChange = async (fieldName: string, value: any) => { - try { - const currentValues = form.getValues(); - const updatedValues = { ...currentValues, [fieldName]: value }; - - // Convert hcPort from string to number before passing to parent - const configToSend: HealthCheckConfig = { - ...updatedValues, - hcPort: parseInt(updatedValues.hcPort), - hcStatus: updatedValues.hcStatus || null - }; - - await onChanges(configToSend); - } catch (error) { - toast({ - title: t("healthCheckError"), - description: t("healthCheckErrorDescription"), - variant: "destructive" - }); - } - }; - - return ( - - - - {t("configureHealthCheck")} - - {t("configureHealthCheckDescription", { - target: targetAddress - })} - - - -
- - {/* Enable Health Checks */} - ( - -
- - {t("enableHealthChecks")} - - - {t( - "enableHealthChecksDescription" - )} - -
- - { - field.onChange(value); - handleFieldChange( - "hcEnabled", - value - ); - }} - /> - -
- )} - /> - - {watchedEnabled && ( -
-
- ( - - - {t("healthScheme")} - - - - - )} - /> - ( - - - {t("healthHostname")} - - - { - field.onChange( - e - ); - handleFieldChange( - "hcHostname", - e.target - .value - ); - }} - /> - - - - )} - /> - ( - - - {t("healthPort")} - - - { - const value = - e.target - .value; - field.onChange( - value - ); - handleFieldChange( - "hcPort", - value - ); - }} - /> - - - - )} - /> - ( - - - {t("healthCheckPath")} - - - { - field.onChange( - e - ); - handleFieldChange( - "hcPath", - e.target - .value - ); - }} - /> - - - - )} - /> -
- - {/* HTTP Method */} - ( - - - {t("httpMethod")} - - - - - )} - /> - - {/* Check Interval, Timeout, and Retry Attempts */} -
- ( - - - {t( - "healthyIntervalSeconds" - )} - - - { - const value = - parseInt( - e.target - .value - ); - field.onChange( - value - ); - handleFieldChange( - "hcInterval", - value - ); - }} - /> - - - - )} - /> - - ( - - - {t( - "unhealthyIntervalSeconds" - )} - - - { - const value = - parseInt( - e.target - .value - ); - field.onChange( - value - ); - handleFieldChange( - "hcUnhealthyInterval", - value - ); - }} - /> - - - - )} - /> - - ( - - - {t("timeoutSeconds")} - - - { - const value = - parseInt( - e.target - .value - ); - field.onChange( - value - ); - handleFieldChange( - "hcTimeout", - value - ); - }} - /> - - - - )} - /> -
- - {/* Expected Response Codes */} - ( - - - {t("expectedResponseCodes")} - - - { - const value = - parseInt( - e.target - .value - ); - field.onChange( - value - ); - handleFieldChange( - "hcStatus", - value - ); - }} - /> - - - {t( - "expectedResponseCodesDescription" - )} - - - - )} - /> - - {/*TLS Server Name (SNI)*/} - ( - - - {t("tlsServerName")} - - - { - field.onChange(e); - handleFieldChange( - "hcTlsServerName", - e.target.value - ); - }} - /> - - - {t( - "tlsServerNameDescription" - )} - - - - )} - /> - - {/* Custom Headers */} - ( - - - {t("customHeaders")} - - - { - field.onChange( - value - ); - handleFieldChange( - "hcHeaders", - value - ); - }} - rows={4} - /> - - - {t( - "customHeadersDescription" - )} - - - - )} - /> -
- )} - - -
- - - -
-
- ); -} diff --git a/src/components/HealthCheckFormFields.tsx b/src/components/HealthCheckFormFields.tsx new file mode 100644 index 000000000..6f5d528db --- /dev/null +++ b/src/components/HealthCheckFormFields.tsx @@ -0,0 +1,768 @@ +"use client"; + +import { UseFormReturn } from "react-hook-form"; +import { useTranslations } from "next-intl"; +import { Input } from "@/components/ui/input"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue +} from "@/components/ui/select"; +import { StrategySelect } from "@app/components/StrategySelect"; +import { Switch } from "@/components/ui/switch"; +import { HeadersInput } from "@app/components/HeadersInput"; +import { + FormControl, + FormDescription, + FormField, + FormItem, + FormLabel, + FormMessage +} from "@/components/ui/form"; +import { ExternalLink, KeyRound } from "lucide-react"; +import Link from "next/link"; + +type HealthCheckFormFieldsProps = { + form: UseFormReturn; + onFieldChange?: (fieldName: string, value: any) => void; + showNameField?: boolean; + hideEnabledField?: boolean; + watchedEnabled?: boolean; + watchedMode?: string; +}; + +export function HealthCheckFormFields({ + form, + onFieldChange, + showNameField, + hideEnabledField, + watchedEnabled, + watchedMode +}: HealthCheckFormFieldsProps) { + const t = useTranslations(); + + const showFields = hideEnabledField || watchedEnabled; + + const handleChange = ( + fieldName: string, + value: any, + fieldOnChange: (v: any) => void + ) => { + fieldOnChange(value); + if (onFieldChange) { + onFieldChange(fieldName, value); + } + }; + + return ( + <> + {/* Name */} + {showNameField && ( + ( + + {t("standaloneHcNameLabel")} + + + + + + )} + /> + )} + + {/* Enable Health Checks */} + {!hideEnabledField && ( + ( + +
+ {t("enableHealthChecks")} +
+ + + handleChange( + "hcEnabled", + value, + field.onChange + ) + } + /> + +
+ )} + /> + )} + + {showFields && ( +
+ {/* Strategy */} + ( + + + + handleChange( + "hcMode", + value, + field.onChange + ) + } + /> + + + + )} + /> + + {/* Inline contact-sales banner for SNMP / ICMP */} + {(watchedMode === "snmp" || watchedMode === "icmp") && ( +
+
+
+ + + Contact sales to enable this feature.{" "} + + Book a demo + + + {" or "} + + contact us + + + . + +
+
+
+ )} + + {/* Connection fields + all remaining config — hidden for SNMP / ICMP */} + {watchedMode !== "snmp" && watchedMode !== "icmp" && ( + <> + {/* Connection fields */} + {watchedMode === "tcp" ? ( +
+ ( + + + {t("healthHostname")} + + + + handleChange( + "hcHostname", + e.target.value, + (v) => + field.onChange( + e + ) + ) + } + /> + + + + )} + /> + ( + + + {t("healthPort")} + + + { + const value = + e.target.value; + handleChange( + "hcPort", + value, + field.onChange + ); + }} + /> + + + + )} + /> +
+ ) : ( +
+ ( + + + {t("healthScheme")} + + + + + )} + /> + ( + + + {t("healthHostname")} + + + + handleChange( + "hcHostname", + e.target.value, + (v) => + field.onChange( + e + ) + ) + } + /> + + + + )} + /> + ( + + + {t("healthPort")} + + + { + const value = + e.target.value; + handleChange( + "hcPort", + value, + field.onChange + ); + }} + /> + + + + )} + /> +
+ )} + + {/* HTTP Method + Path + Timeout (shown when not TCP) */} + {watchedMode !== "tcp" && ( +
+ ( + + + {t("httpMethod")} + + + + + )} + /> + ( + + + {t("healthCheckPath")} + + + + handleChange( + "hcPath", + e.target.value, + (v) => + field.onChange( + e + ) + ) + } + /> + + + + )} + /> + ( + + + {t("timeoutSeconds")} + + + { + const value = + parseInt( + e.target + .value + ); + handleChange( + "hcTimeout", + value, + field.onChange + ); + }} + /> + + + + )} + /> +
+ )} + + {/* TCP timeout (shown only for TCP) */} + {watchedMode === "tcp" && ( + ( + + + {t("timeoutSeconds")} + + + { + const value = parseInt( + e.target.value + ); + handleChange( + "hcTimeout", + value, + field.onChange + ); + }} + /> + + + + )} + /> + )} + + {/* Healthy interval + healthy threshold */} +
+ ( + + + {t("healthyIntervalSeconds")} + + + { + const value = parseInt( + e.target.value + ); + handleChange( + "hcInterval", + value, + field.onChange + ); + }} + /> + + + + )} + /> + ( + + + {t("healthyThreshold")} + + + { + const value = parseInt( + e.target.value + ); + handleChange( + "hcHealthyThreshold", + value, + field.onChange + ); + }} + /> + + + + )} + /> +
+ + {/* Unhealthy interval + unhealthy threshold */} +
+ ( + + + {t("unhealthyIntervalSeconds")} + + + { + const value = parseInt( + e.target.value + ); + handleChange( + "hcUnhealthyInterval", + value, + field.onChange + ); + }} + /> + + + + )} + /> + ( + + + {t("unhealthyThreshold")} + + + { + const value = parseInt( + e.target.value + ); + handleChange( + "hcUnhealthyThreshold", + value, + field.onChange + ); + }} + /> + + + + )} + /> +
+ + {/* HTTP-only fields */} + {watchedMode !== "tcp" && ( + <> + {/* Expected Response Codes + TLS Server Name */} +
+ ( + + + {t( + "expectedResponseCodes" + )} + + + { + const val = + e.target + .value; + const value = + val + ? parseInt( + val + ) + : null; + handleChange( + "hcStatus", + value, + field.onChange + ); + }} + /> + + + + )} + /> + ( + + + {t("tlsServerName")} + + + + handleChange( + "hcTlsServerName", + e.target + .value, + (v) => + field.onChange( + e + ) + ) + } + /> + + + + )} + /> +
+ + {/* Follow Redirects inline toggle */} + ( + + + {t("followRedirects")} + + + + handleChange( + "hcFollowRedirects", + value, + field.onChange + ) + } + /> + + + )} + /> + + {/* Custom Headers */} + ( + + + {t("customHeaders")} + + + + handleChange( + "hcHeaders", + value, + field.onChange + ) + } + rows={4} + /> + + + {t( + "customHeadersDescription" + )} + + + + )} + /> + + )} + + )} +
+ )} + + ); +} diff --git a/src/components/HealthChecksTable.tsx b/src/components/HealthChecksTable.tsx new file mode 100644 index 000000000..9e5d9f2c6 --- /dev/null +++ b/src/components/HealthChecksTable.tsx @@ -0,0 +1,432 @@ +"use client"; + +import UptimeMiniBar from "@app/components/UptimeMiniBar"; + +import ConfirmDeleteDialog from "@app/components/ConfirmDeleteDialog"; +import HealthCheckCredenza, { + HealthCheckRow +} from "@app/components/HealthCheckCredenza"; +import { Badge } from "@app/components/ui/badge"; +import { Button } from "@app/components/ui/button"; +import { DataTable, ExtendedColumnDef } from "@app/components/ui/data-table"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger +} from "@app/components/ui/dropdown-menu"; +import { Switch } from "@app/components/ui/switch"; +import { toast } from "@app/hooks/useToast"; +import { useEnvContext } from "@app/hooks/useEnvContext"; +import { createApiClient, formatAxiosError } from "@app/lib/api"; +import { orgQueries } from "@app/lib/queries"; +import { useQuery, useQueryClient } from "@tanstack/react-query"; +import { ArrowUpDown, ArrowUpRight, MoreHorizontal } from "lucide-react"; +import { useTranslations } from "next-intl"; +import { useState } from "react"; +import type { PaginationState } from "@tanstack/react-table"; +import type { DataTablePaginationState } from "@app/components/ui/data-table"; +import { useNavigationContext } from "@app/hooks/useNavigationContext"; +import { useDebouncedCallback } from "use-debounce"; +import Link from "next/link"; +import { PaidFeaturesAlert } from "@app/components/PaidFeaturesAlert"; +import { usePaidStatus } from "@app/hooks/usePaidStatus"; +import { tierMatrix } from "@server/lib/billing/tierMatrix"; + +type StandaloneHealthChecksTableProps = { + orgId: string; +}; + +function formatTarget(row: HealthCheckRow): string { + if (!row.hcHostname) return "-"; + if (row.hcMode === "tcp") { + if (!row.hcPort) return row.hcHostname; + return `${row.hcHostname}:${row.hcPort}`; + } + // HTTP / default + const scheme = row.hcScheme ?? "http"; + const host = row.hcHostname; + const port = row.hcPort ? `:${row.hcPort}` : ""; + const path = row.hcPath ?? "/"; + return `${scheme}://${host}${port}${path}`; +} + +const healthLabel: Record = { + healthy: "Healthy", + unhealthy: "Unhealthy", + unknown: "Unknown" +}; + +const healthVariant: Record< + HealthCheckRow["hcHealth"], + "green" | "red" | "secondary" +> = { + healthy: "green", + unhealthy: "red", + unknown: "secondary" +}; + +export default function HealthChecksTable({ + orgId +}: StandaloneHealthChecksTableProps) { + const t = useTranslations(); + const api = createApiClient(useEnvContext()); + const queryClient = useQueryClient(); + const { isPaidUser } = usePaidStatus(); + const isPaid = isPaidUser(tierMatrix.standaloneHealthChecks); + + const [credenzaOpen, setCredenzaOpen] = useState(false); + const { + navigate: filter, + isNavigating: isFiltering, + searchParams + } = useNavigationContext(); + + const [deleteOpen, setDeleteOpen] = useState(false); + const [selected, setSelected] = useState(null); + const [togglingId, setTogglingId] = useState(null); + + const page = Math.max(1, Number(searchParams.get("page") ?? 1)); + const pageSize = Math.max(1, Number(searchParams.get("pageSize") ?? 20)); + const pageIndex = page - 1; + const query = searchParams.get("query") ?? undefined; + + const { + data, + isLoading, + refetch, + isRefetching + } = useQuery({ + ...orgQueries.standaloneHealthChecks({ + orgId, + limit: pageSize, + offset: pageIndex * pageSize, + query + }), + refetchInterval: 10_000 + }); + + const rows = data?.healthChecks ?? []; + const total = data?.pagination.total ?? 0; + const pageCount = Math.max(1, Math.ceil(total / pageSize)); + + const paginationState: DataTablePaginationState = { + pageIndex, + pageSize, + pageCount + }; + + const handlePaginationChange = (newState: PaginationState) => { + searchParams.set("page", (newState.pageIndex + 1).toString()); + searchParams.set("pageSize", newState.pageSize.toString()); + filter({ searchParams }); + }; + + const handleSearchChange = useDebouncedCallback((value: string) => { + if (value) { + searchParams.set("query", value); + } else { + searchParams.delete("query"); + } + searchParams.delete("page"); + filter({ searchParams }); + }, 300); + + const invalidate = () => + queryClient.invalidateQueries({ + queryKey: ["ORG", orgId, "STANDALONE_HEALTH_CHECKS"] + }); + + const handleToggleEnabled = async ( + row: HealthCheckRow, + enabled: boolean + ) => { + setTogglingId(row.targetHealthCheckId); + try { + await api.post( + `/org/${orgId}/health-check/${row.targetHealthCheckId}`, + { hcEnabled: enabled } + ); + await invalidate(); + } catch (e) { + toast({ + title: t("error"), + description: formatAxiosError(e), + variant: "destructive" + }); + } finally { + setTogglingId(null); + } + }; + + const handleDelete = async () => { + if (!selected) return; + try { + await api.delete( + `/org/${orgId}/health-check/${selected.targetHealthCheckId}` + ); + await invalidate(); + toast({ title: t("standaloneHcDeleted") }); + } catch (e) { + toast({ + title: t("error"), + description: formatAxiosError(e), + variant: "destructive" + }); + } finally { + setDeleteOpen(false); + setSelected(null); + } + }; + + const columns: ExtendedColumnDef[] = [ + { + accessorKey: "name", + enableHiding: false, + friendlyName: t("name"), + header: ({ column }) => ( + + ), + cell: ({ row }) => ( + {row.original.name ? row.original.name : "-"} + ) + }, + { + id: "mode", + friendlyName: t("standaloneHcColumnMode"), + header: () => ( + {t("standaloneHcColumnMode")} + ), + cell: ({ row }) => ( + + {row.original.hcMode?.toUpperCase() ?? "-"} + + ) + }, + { + id: "target", + friendlyName: t("standaloneHcColumnTarget"), + header: () => ( + {t("standaloneHcColumnTarget")} + ), + cell: ({ row }) => {formatTarget(row.original)} + }, + { + id: "resource", + friendlyName: "Resource", + header: () => ( + Resource + ), + cell: ({ row }) => { + const r = row.original; + if (!r.resourceId || !r.resourceName || !r.resourceNiceId) { + return -; + } + return ( + + + + ); + } + }, + { + id: "site", + friendlyName: "Site", + header: () => ( + Site + ), + cell: ({ row }) => { + const r = row.original; + if (!r.siteId || !r.siteName || !r.siteNiceId) { + return -; + } + return ( + + + + ); + } + }, + { + id: "health", + friendlyName: t("standaloneHcColumnHealth"), + header: () => ( + {t("standaloneHcColumnHealth")} + ), + cell: ({ row }) => { + const health = row.original.hcHealth; + if (health === "healthy") { + return ( + +
+ {healthLabel.healthy} +
+ ); + } else if (health === "unhealthy") { + return ( + +
+ {healthLabel.unhealthy} +
+ ); + } else { + return ( + +
+ {healthLabel.unknown} +
+ ); + } + } + }, + { + id: "uptime", + friendlyName: "Uptime", + header: () => {t("uptime30d")}, + cell: ({ row }) => { + return ( + + ); + } + }, + { + accessorKey: "hcEnabled", + friendlyName: t("alertingColumnEnabled"), + header: () => ( + {t("alertingColumnEnabled")} + ), + cell: ({ row }) => { + const r = row.original; + return ( + handleToggleEnabled(r, v)} + /> + ); + } + }, + { + id: "rowActions", + enableHiding: false, + header: () => , + cell: ({ row }) => { + const r = row.original; + return ( +
+ + + + + + { + setSelected(r); + setDeleteOpen(true); + }} + > + + {t("delete")} + + + + + +
+ ); + } + } + ]; + + return ( + <> + {selected && deleteOpen && ( + { + setDeleteOpen(val); + if (!val) setSelected(null); + }} + dialog={ +
+

{t("standaloneHcDeleteQuestion")}

+
+ } + buttonText={t("delete")} + onConfirm={handleDelete} + string={selected.name} + title={t("standaloneHcDeleteTitle")} + /> + )} + + { + setCredenzaOpen(val); + if (!val) setSelected(null); + }} + orgId={orgId} + initialValues={selected} + onSaved={invalidate} + /> + + + + { + setSelected(null); + setCredenzaOpen(true); + }} + addButtonDisabled={!isPaid} + onRefresh={() => refetch()} + isRefreshing={isRefetching || isLoading || isFiltering} + addButtonText={t("standaloneHcAddButton")} + enableColumnVisibility + stickyLeftColumn="name" + stickyRightColumn="rowActions" + pagination={paginationState} + onPaginationChange={handlePaginationChange} + /> + + ); +} diff --git a/src/components/InternalResourceForm.tsx b/src/components/InternalResourceForm.tsx index 12dfbafee..3a693d82b 100644 --- a/src/components/InternalResourceForm.tsx +++ b/src/components/InternalResourceForm.tsx @@ -1,6 +1,10 @@ "use client"; import { HorizontalTabs } from "@app/components/HorizontalTabs"; +import { + OptionSelect, + type OptionSelectOption +} from "@app/components/OptionSelect"; import { PaidFeaturesAlert } from "@app/components/PaidFeaturesAlert"; import { StrategySelect } from "@app/components/StrategySelect"; import { Tag, TagInput } from "@app/components/tags/tag-input"; @@ -34,7 +38,6 @@ import { getUserDisplayName } from "@app/lib/getUserDisplayName"; import { orgQueries, resourceQueries } from "@app/lib/queries"; import { zodResolver } from "@hookform/resolvers/zod"; import { tierMatrix } from "@server/lib/billing/tierMatrix"; -import { ListSitesResponse } from "@server/routers/site"; import { UserType } from "@server/types/UserTypes"; import { useQuery } from "@tanstack/react-query"; import { ChevronsUpDown, ExternalLink } from "lucide-react"; @@ -42,9 +45,15 @@ import { useTranslations } from "next-intl"; import { useEffect, useRef, useState } from "react"; import { useForm } from "react-hook-form"; import { z } from "zod"; -import { SitesSelector, type Selectedsite } from "./site-selector"; +import { + MultiSitesSelector, + formatMultiSitesSelectorLabel +} from "./multi-site-selector"; +import type { Selectedsite } from "./site-selector"; import { CaretSortIcon } from "@radix-ui/react-icons"; import { MachinesSelector } from "./machines-selector"; +import DomainPicker from "@app/components/DomainPicker"; +import { SwitchInput } from "@app/components/SwitchInput"; // --- Helpers (shared) --- @@ -118,15 +127,15 @@ export const cleanForFQDN = (name: string): string => // --- Types --- -type Site = ListSitesResponse["sites"][0]; +export type InternalResourceMode = "host" | "cidr" | "http"; export type InternalResourceData = { id: number; name: string; orgId: string; - siteName: string; - mode: "host" | "cidr"; - siteId: number; + siteNames: string[]; + mode: InternalResourceMode; + siteIds: number[]; niceId: string; destination: string; alias?: string | null; @@ -135,14 +144,30 @@ export type InternalResourceData = { disableIcmp?: boolean; authDaemonMode?: "site" | "remote" | null; authDaemonPort?: number | null; + httpHttpsPort?: number | null; + scheme?: "http" | "https" | null; + ssl?: boolean; + subdomain?: string | null; + domainId?: string | null; + fullDomain?: string | null; }; const tagSchema = z.object({ id: z.string(), text: z.string() }); +function buildSelectedSitesForResource( + resource: InternalResourceData, +): Selectedsite[] { + return resource.siteIds.map((siteId, idx) => ({ + name: resource.siteNames[idx] ?? "", + siteId, + type: "newt" as const + })); +} + export type InternalResourceFormValues = { name: string; - siteId: number; - mode: "host" | "cidr"; + siteIds: number[]; + mode: InternalResourceMode; destination: string; alias?: string | null; niceId?: string; @@ -151,6 +176,12 @@ export type InternalResourceFormValues = { disableIcmp?: boolean; authDaemonMode?: "site" | "remote" | null; authDaemonPort?: number | null; + httpHttpsPort?: number | null; + scheme?: "http" | "https"; + ssl?: boolean; + httpConfigSubdomain?: string | null; + httpConfigDomainId?: string | null; + httpConfigFullDomain?: string | null; roles?: z.infer[]; users?: z.infer[]; clients?: z.infer[]; @@ -160,28 +191,29 @@ type InternalResourceFormProps = { variant: "create" | "edit"; resource?: InternalResourceData; open?: boolean; - sites: Site[]; orgId: string; siteResourceId?: number; formId: string; onSubmit: (values: InternalResourceFormValues) => void | Promise; + onSubmitDisabledChange?: (disabled: boolean) => void; }; export function InternalResourceForm({ variant, resource, open, - sites, orgId, siteResourceId, formId, - onSubmit + onSubmit, + onSubmitDisabledChange }: InternalResourceFormProps) { const t = useTranslations(); const { env } = useEnvContext(); const { isPaidUser } = usePaidStatus(); const disableEnterpriseFeatures = env.flags.disableEnterpriseFeatures; const sshSectionDisabled = !isPaidUser(tierMatrix.sshPam); + const httpSectionDisabled = !isPaidUser(tierMatrix.httpPrivateResources); const nameRequiredKey = variant === "create" @@ -211,6 +243,22 @@ export function InternalResourceForm({ variant === "create" ? "createInternalResourceDialogModeCidr" : "editInternalResourceDialogModeCidr"; + const modeHttpKey = + variant === "create" + ? "createInternalResourceDialogModeHttp" + : "editInternalResourceDialogModeHttp"; + const schemeLabelKey = + variant === "create" + ? "createInternalResourceDialogScheme" + : "editInternalResourceDialogScheme"; + const enableSslLabelKey = + variant === "create" + ? "createInternalResourceDialogEnableSsl" + : "editInternalResourceDialogEnableSsl"; + const enableSslDescriptionKey = + variant === "create" + ? "createInternalResourceDialogEnableSslDescription" + : "editInternalResourceDialogEnableSslDescription"; const destinationLabelKey = variant === "create" ? "createInternalResourceDialogDestination" @@ -223,50 +271,95 @@ export function InternalResourceForm({ variant === "create" ? "createInternalResourceDialogAlias" : "editInternalResourceDialogAlias"; + const httpHttpsPortLabelKey = + variant === "create" + ? "createInternalResourceDialogModePort" + : "editInternalResourceDialogModePort"; + const httpConfigurationTitleKey = + variant === "create" + ? "createInternalResourceDialogHttpConfiguration" + : "editInternalResourceDialogHttpConfiguration"; + const httpConfigurationDescriptionKey = + variant === "create" + ? "createInternalResourceDialogHttpConfigurationDescription" + : "editInternalResourceDialogHttpConfigurationDescription"; - const formSchema = z.object({ - name: z.string().min(1, t(nameRequiredKey)).max(255, t(nameMaxKey)), - siteId: z - .number() - .int() - .positive(siteRequiredKey ? t(siteRequiredKey) : undefined), - mode: z.enum(["host", "cidr"]), - destination: z - .string() - .min( - 1, - destinationRequiredKey - ? { message: t(destinationRequiredKey) } - : undefined - ), - alias: z.string().nullish(), - niceId: z - .string() - .min(1) - .max(255) - .regex(/^[a-zA-Z0-9-]+$/) - .optional(), - tcpPortRangeString: createPortRangeStringSchema(t), - udpPortRangeString: createPortRangeStringSchema(t), - disableIcmp: z.boolean().optional(), - authDaemonMode: z.enum(["site", "remote"]).optional().nullable(), - authDaemonPort: z.number().int().positive().optional().nullable(), - roles: z.array(tagSchema).optional(), - users: z.array(tagSchema).optional(), - clients: z - .array( - z.object({ - clientId: z.number(), - name: z.string() - }) - ) - .optional() - }); + const siteIdsSchema = siteRequiredKey + ? z.array(z.number().int().positive()).min(1, t(siteRequiredKey)) + : z.array(z.number().int().positive()).min(1); + + const formSchema = z + .object({ + name: z.string().min(1, t(nameRequiredKey)).max(255, t(nameMaxKey)), + siteIds: siteIdsSchema, + mode: z.enum(["host", "cidr", "http"]), + destination: z + .string() + .min( + 1, + destinationRequiredKey + ? { message: t(destinationRequiredKey) } + : undefined + ), + alias: z.string().nullish(), + httpHttpsPort: z + .number() + .int() + .min(1) + .max(65535) + .optional() + .nullable(), + scheme: z.enum(["http", "https"]).optional(), + ssl: z.boolean().optional(), + httpConfigSubdomain: z.string().nullish(), + httpConfigDomainId: z.string().nullish(), + httpConfigFullDomain: z.string().nullish(), + niceId: z + .string() + .min(1) + .max(255) + .regex(/^[a-zA-Z0-9-]+$/) + .optional(), + tcpPortRangeString: createPortRangeStringSchema(t), + udpPortRangeString: createPortRangeStringSchema(t), + disableIcmp: z.boolean().optional(), + authDaemonMode: z.enum(["site", "remote"]).optional().nullable(), + authDaemonPort: z.number().int().positive().optional().nullable(), + roles: z.array(tagSchema).optional(), + users: z.array(tagSchema).optional(), + clients: z + .array( + z.object({ + clientId: z.number(), + name: z.string() + }) + ) + .optional() + }) + .superRefine((data, ctx) => { + if (data.mode !== "http") return; + if (!data.scheme) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: t("internalResourceDownstreamSchemeRequired"), + path: ["scheme"] + }); + } + if ( + data.httpHttpsPort == null || + !Number.isFinite(data.httpHttpsPort) || + data.httpHttpsPort < 1 + ) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: t("internalResourceHttpPortRequired"), + path: ["httpHttpsPort"] + }); + } + }); type FormData = z.infer; - const availableSites = sites.filter((s) => s.type === "newt"); - const rolesQuery = useQuery(orgQueries.roles({ orgId })); const usersQuery = useQuery(orgQueries.users({ orgId })); const clientsQuery = useQuery(orgQueries.machineClients({ orgId })); @@ -385,7 +478,7 @@ export function InternalResourceForm({ variant === "edit" && resource ? { name: resource.name, - siteId: resource.siteId, + siteIds: resource.siteIds, mode: resource.mode ?? "host", destination: resource.destination ?? "", alias: resource.alias ?? null, @@ -394,6 +487,12 @@ export function InternalResourceForm({ disableIcmp: resource.disableIcmp ?? false, authDaemonMode: resource.authDaemonMode ?? "site", authDaemonPort: resource.authDaemonPort ?? null, + httpHttpsPort: resource.httpHttpsPort ?? null, + scheme: resource.scheme ?? "http", + ssl: resource.ssl ?? false, + httpConfigSubdomain: resource.subdomain ?? null, + httpConfigDomainId: resource.domainId ?? null, + httpConfigFullDomain: resource.fullDomain ?? null, niceId: resource.niceId, roles: [], users: [], @@ -401,10 +500,16 @@ export function InternalResourceForm({ } : { name: "", - siteId: availableSites[0]?.siteId ?? 0, + siteIds: [], mode: "host", destination: "", alias: null, + httpHttpsPort: null, + scheme: "http", + ssl: true, + httpConfigSubdomain: null, + httpConfigDomainId: null, + httpConfigFullDomain: null, tcpPortRangeString: "*", udpPortRangeString: "*", disableIcmp: false, @@ -415,8 +520,10 @@ export function InternalResourceForm({ clients: [] }; - const [selectedSite, setSelectedSite] = useState( - availableSites[0] + const [selectedSites, setSelectedSites] = useState(() => + variant === "edit" && resource + ? buildSelectedSitesForResource(resource) + : [] ); const form = useForm({ @@ -425,6 +532,10 @@ export function InternalResourceForm({ }); const mode = form.watch("mode"); + const httpConfigSubdomain = form.watch("httpConfigSubdomain"); + const httpConfigDomainId = form.watch("httpConfigDomainId"); + const httpConfigFullDomain = form.watch("httpConfigFullDomain"); + const isHttpMode = mode === "http"; const authDaemonMode = form.watch("authDaemonMode") ?? "site"; const hasInitialized = useRef(false); const previousResourceId = useRef(null); @@ -444,10 +555,16 @@ export function InternalResourceForm({ if (variant === "create" && open) { form.reset({ name: "", - siteId: availableSites[0]?.siteId ?? 0, + siteIds: [], mode: "host", destination: "", alias: null, + httpHttpsPort: null, + scheme: "http", + ssl: true, + httpConfigSubdomain: null, + httpConfigDomainId: null, + httpConfigFullDomain: null, tcpPortRangeString: "*", udpPortRangeString: "*", disableIcmp: false, @@ -457,12 +574,13 @@ export function InternalResourceForm({ users: [], clients: [] }); + setSelectedSites([]); setTcpPortMode("all"); setUdpPortMode("all"); setTcpCustomPorts(""); setUdpCustomPorts(""); } - }, [variant, open]); + }, [variant, open, form]); // Reset when edit dialog opens / resource changes useEffect(() => { @@ -471,10 +589,16 @@ export function InternalResourceForm({ if (resourceChanged) { form.reset({ name: resource.name, - siteId: resource.siteId, + siteIds: resource.siteIds, mode: resource.mode ?? "host", destination: resource.destination ?? "", alias: resource.alias ?? null, + httpHttpsPort: resource.httpHttpsPort ?? null, + scheme: resource.scheme ?? "http", + ssl: resource.ssl ?? false, + httpConfigSubdomain: resource.subdomain ?? null, + httpConfigDomainId: resource.domainId ?? null, + httpConfigFullDomain: resource.fullDomain ?? null, tcpPortRangeString: resource.tcpPortRangeString ?? "*", udpPortRangeString: resource.udpPortRangeString ?? "*", disableIcmp: resource.disableIcmp ?? false, @@ -484,6 +608,9 @@ export function InternalResourceForm({ users: [], clients: [] }); + setSelectedSites( + buildSelectedSitesForResource(resource) + ); setTcpPortMode( getPortModeFromString(resource.tcpPortRangeString) ); @@ -537,12 +664,18 @@ export function InternalResourceForm({ form ]); + useEffect(() => { + onSubmitDisabledChange?.(isHttpMode && httpSectionDisabled); + }, [isHttpMode, httpSectionDisabled, onSubmitDisabledChange]); + return (
{ + const siteIds = values.siteIds; onSubmit({ ...values, + siteIds, clients: (values.clients ?? []).map((c) => ({ id: c.clientId.toString(), text: c.name @@ -581,51 +714,6 @@ export function InternalResourceForm({ )} /> )} - ( - - {t("site")} - - - - - - - - { - setSelectedSite(site); - field.onChange(site.siteId); - }} - /> - - - - - )} - /> -
-
+
+
( - + - {t(modeLabelKey)} + {t("sites")} - + + + + + + + + { + setSelectedSites( + sites + ); + field.onChange( + sites.map( + (s) => + s.siteId + ) + ); + }} + /> + + )} />
+
+ { + const modeOptions: OptionSelectOption[] = + [ + { + value: "host", + label: t(modeHostKey) + }, + { + value: "cidr", + label: t(modeCidrKey) + }, + { + value: "http", + label: t(modeHttpKey) + } + ]; + return ( + + + {t(modeLabelKey)} + + + options={modeOptions} + value={field.value} + onChange={ + field.onChange + } + cols={3} + /> + + + ); + }} + /> +
+
+
+ {mode === "http" && ( +
+ ( + + + {t(schemeLabelKey)} + + + + + )} + /> +
+ )}
- + )} />
- {mode !== "cidr" && ( -
+ {mode === "host" && ( +
)} + {mode === "http" && ( +
+ ( + + + {t( + httpHttpsPortLabelKey + )} + + + { + const raw = + e.target + .value; + if ( + raw === "" + ) { + field.onChange( + null + ); + return; + } + const n = + Number(raw); + field.onChange( + Number.isFinite( + n + ) + ? n + : null + ); + }} + /> + + + + )} + /> +
+ )}
-
-
- -
- {t( - "editInternalResourceDialogPortRestrictionsDescription" + {isHttpMode && ( + + )} + + {isHttpMode ? ( +
+
+ +
+ {t(httpConfigurationDescriptionKey)} +
+
+
+ { + if (res === null) { + form.setValue( + "httpConfigSubdomain", + null + ); + form.setValue( + "httpConfigDomainId", + null + ); + form.setValue( + "httpConfigFullDomain", + null + ); + return; + } + form.setValue( + "httpConfigSubdomain", + res.subdomain ?? null + ); + form.setValue( + "httpConfigDomainId", + res.domainId + ); + form.setValue( + "httpConfigFullDomain", + res.fullDomain + ); + }} + /> +
+ ( + + + + + )} -
+ />
-
-
- - {t("editInternalResourceDialogTcp")} - -
-
- ( - -
- - {tcpPortMode === - "custom" ? ( - - - setTcpCustomPorts( - e.target - .value - ) - } - /> - - ) : ( - - )} -
- -
+ ) : ( +
+
+ +
+ {t( + "editInternalResourceDialogPortRestrictionsDescription" )} - /> -
-
-
-
- - {t("editInternalResourceDialogUdp")} - +
- ( - -
- - {udpPortMode === - "custom" ? ( - - - setUdpCustomPorts( - e.target - .value - ) - } - /> - - ) : ( - - )} -
- -
- )} - /> -
-
-
-
- - {t("editInternalResourceDialogIcmp")} - -
-
- ( - -
- - + + {t("editInternalResourceDialogTcp")} + +
+
+ ( + +
+ + {tcpPortMode === + "custom" ? ( + + + setTcpCustomPorts( + e + .target + .value + ) + } + /> + + ) : ( + + )} +
+ +
+ )} + /> +
+
+
+
+ + {t("editInternalResourceDialogUdp")} + +
+
+ ( + +
+ + {udpPortMode === + "custom" ? ( + + + setUdpCustomPorts( + e + .target + .value + ) + } + /> + + ) : ( + + )} +
+ +
+ )} + /> +
+
+
+
+ + {t( + "editInternalResourceDialogIcmp" + )} + +
+
+ ( + +
+ + + field.onChange( + !checked + ) + } + /> + + + {field.value + ? t("blocked") + : t("allowed")} + +
+ +
+ )} + /> +
-
+ )}
@@ -1213,8 +1579,8 @@ export function InternalResourceForm({ )}
- {/* SSH Access tab */} - {!disableEnterpriseFeatures && mode !== "cidr" && ( + {/* SSH Access tab (host mode only) */} + {!disableEnterpriseFeatures && mode === "host" && (
diff --git a/src/components/LicenseKeysDataTable.tsx b/src/components/LicenseKeysDataTable.tsx index 1e39c9225..a3e6f3ce5 100644 --- a/src/components/LicenseKeysDataTable.tsx +++ b/src/components/LicenseKeysDataTable.tsx @@ -1,6 +1,5 @@ "use client"; -import { ColumnDef } from "@tanstack/react-table"; import { ExtendedColumnDef } from "@app/components/ui/data-table"; import { DataTable } from "@app/components/ui/data-table"; import { Button } from "@app/components/ui/button"; diff --git a/src/components/LocaleSwitcherSelect.tsx b/src/components/LocaleSwitcherSelect.tsx index 157b87472..b6f65aa7c 100644 --- a/src/components/LocaleSwitcherSelect.tsx +++ b/src/components/LocaleSwitcherSelect.tsx @@ -36,7 +36,7 @@ export default function LocaleSwitcherSelect({ }); // Persist locale to the database (fire-and-forget) api.post("/user/locale", { locale }).catch(() => { - // Silently ignore errors — cookie is already set as fallback + // Silently ignore errors - cookie is already set as fallback }); } diff --git a/src/components/LogDataTable.tsx b/src/components/LogDataTable.tsx index 3a53a859f..14e87ff75 100644 --- a/src/components/LogDataTable.tsx +++ b/src/components/LogDataTable.tsx @@ -405,7 +405,11 @@ export function LogDataTable({ onClick={() => !disabled && onExport() } - disabled={isExporting || disabled || isExportDisabled} + disabled={ + isExporting || + disabled || + isExportDisabled + } > {isExporting ? ( diff --git a/src/components/MachineClientsTable.tsx b/src/components/MachineClientsTable.tsx index 9c1da5b4d..4ef22c83d 100644 --- a/src/components/MachineClientsTable.tsx +++ b/src/components/MachineClientsTable.tsx @@ -293,7 +293,7 @@ export default function MachineClientsTable({ } else { return ( -
+
{t("disconnected")}
); diff --git a/src/components/PendingSitesTable.tsx b/src/components/PendingSitesTable.tsx index c65cb218e..a6625037d 100644 --- a/src/components/PendingSitesTable.tsx +++ b/src/components/PendingSitesTable.tsx @@ -204,7 +204,7 @@ export default function PendingSitesTable({ } else { return ( -
+
{t("offline")}
); @@ -353,9 +353,9 @@ export default function PendingSitesTable({ - ); diff --git a/src/components/ProxyResourcesTable.tsx b/src/components/ProxyResourcesTable.tsx index fbb544ddf..dddf1312c 100644 --- a/src/components/ProxyResourcesTable.tsx +++ b/src/components/ProxyResourcesTable.tsx @@ -19,6 +19,7 @@ import { toast } from "@app/hooks/useToast"; import { createApiClient, formatAxiosError } from "@app/lib/api"; import { UpdateResourceResponse } from "@server/routers/resource"; import type { PaginationState } from "@tanstack/react-table"; +import { useQuery } from "@tanstack/react-query"; import { AxiosResponse } from "axios"; import { ArrowDown01Icon, @@ -37,6 +38,7 @@ import { useTranslations } from "next-intl"; import Link from "next/link"; import { useRouter } from "next/navigation"; import { + useEffect, useOptimistic, useRef, useState, @@ -47,6 +49,14 @@ import { useDebouncedCallback } from "use-debounce"; import z from "zod"; import { ColumnFilterButton } from "./ColumnFilterButton"; import { ControlledDataTable } from "./ui/controlled-data-table"; +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger +} from "@app/components/ui/tooltip"; +import type { StatusHistoryResponse } from "@server/lib/statusHistory"; +import UptimeMiniBar from "./UptimeMiniBar"; export type TargetHealth = { targetId: number; @@ -161,6 +171,13 @@ export default function ProxyResourcesTable({ const [isRefreshing, startTransition] = useTransition(); const [isNavigatingToAddPage, startNavigation] = useTransition(); + useEffect(() => { + const interval = setInterval(() => { + router.refresh(); + }, 10_000); + return () => clearInterval(interval); + }, []); + const refreshData = () => { startTransition(() => { try { @@ -361,6 +378,7 @@ export default function ProxyResourcesTable({ { accessorKey: "protocol", friendlyName: t("protocol"), + enableHiding: true, header: () => {t("protocol")}, cell: ({ row }) => { const resourceRow = row.original; @@ -422,6 +440,17 @@ export default function ProxyResourcesTable({ return statusOrder[statusA] - statusOrder[statusB]; } }, + { + id: "statusHistory", + friendlyName: t("uptime30d"), + header: () => {t("uptime30d")}, + cell: ({ row }) => { + const resourceRow = row.original; + return ( + + ); + } + }, { accessorKey: "domain", friendlyName: t("access"), @@ -656,7 +685,7 @@ export default function ProxyResourcesTable({ isRefreshing={isRefreshing || isFiltering} isNavigatingToAddPage={isNavigatingToAddPage} enableColumnVisibility - columnVisibility={{ niceId: false }} + columnVisibility={{ niceId: false, protocol: false }} stickyLeftColumn="name" stickyRightColumn="actions" /> diff --git a/src/components/ResourceInfoBox.tsx b/src/components/ResourceInfoBox.tsx index 36a507ea9..8006612a9 100644 --- a/src/components/ResourceInfoBox.tsx +++ b/src/components/ResourceInfoBox.tsx @@ -79,7 +79,7 @@ export default function ResourceInfoBox({}: ResourceInfoBoxType) { ) : ( -
+
Offline
)} diff --git a/src/components/S3DestinationCredenza.tsx b/src/components/S3DestinationCredenza.tsx new file mode 100644 index 000000000..7702e7932 --- /dev/null +++ b/src/components/S3DestinationCredenza.tsx @@ -0,0 +1,63 @@ +"use client"; + + +import { + Credenza, + CredenzaBody, + CredenzaClose, + CredenzaContent, + CredenzaDescription, + CredenzaFooter, + CredenzaHeader, + CredenzaTitle +} from "@app/components/Credenza"; +import { Button } from "@app/components/ui/button"; +import { ContactSalesBanner } from "@app/components/ContactSalesBanner"; +import { useTranslations } from "next-intl"; + +export interface S3DestinationCredenzaProps { + open: boolean; + onOpenChange: (open: boolean) => void; + editing: any; + orgId: string; + onSaved: () => void; +} + +export function S3DestinationCredenza({ + open, + onOpenChange, + editing, + orgId, + onSaved, +}: S3DestinationCredenzaProps) { + const t = useTranslations(); + + return ( + + + + + {editing + ? t("S3DestEditTitle") + : t("S3DestAddTitle")} + + + {editing + ? t("S3DestEditDescription") + : t("S3DestAddDescription")} + + + + + + + + + + + + + + + ); +} diff --git a/src/components/ShareLinksTable.tsx b/src/components/ShareLinksTable.tsx index efac77df3..333cee03f 100644 --- a/src/components/ShareLinksTable.tsx +++ b/src/components/ShareLinksTable.tsx @@ -144,9 +144,9 @@ export default function ShareLinksTable({ - ); diff --git a/src/components/SiteInfoCard.tsx b/src/components/SiteInfoCard.tsx index b075e453d..56492ff54 100644 --- a/src/components/SiteInfoCard.tsx +++ b/src/components/SiteInfoCard.tsx @@ -52,7 +52,7 @@ export default function SiteInfoCard({}: SiteInfoCardProps) {
) : (
-
+
{t("offline")}
)} diff --git a/src/components/SitesTable.tsx b/src/components/SitesTable.tsx index 6cca706a6..ffec95283 100644 --- a/src/components/SitesTable.tsx +++ b/src/components/SitesTable.tsx @@ -1,6 +1,7 @@ "use client"; import ConfirmDeleteDialog from "@app/components/ConfirmDeleteDialog"; +import UptimeMiniBar from "@app/components/UptimeMiniBar"; import { Badge } from "@app/components/ui/badge"; import { Button } from "@app/components/ui/button"; @@ -29,7 +30,7 @@ import { import { useTranslations } from "next-intl"; import Link from "next/link"; import { usePathname, useRouter } from "next/navigation"; -import { useState, useTransition } from "react"; +import { useState, useTransition, useEffect } from "react"; import { useDebouncedCallback } from "use-debounce"; import z from "zod"; import { ColumnFilterButton } from "./ColumnFilterButton"; @@ -84,6 +85,13 @@ export default function SitesTable({ const api = createApiClient(useEnvContext()); const t = useTranslations(); + useEffect(() => { + const interval = setInterval(() => { + router.refresh(); + }, 10_000); + return () => clearInterval(interval); + }, []); + const booleanSearchFilterSchema = z .enum(["true", "false"]) .optional() @@ -212,7 +220,7 @@ export default function SitesTable({ } else { return ( -
+
{t("offline")}
); @@ -222,6 +230,17 @@ export default function SitesTable({ } } }, + { + id: "uptime", + friendlyName: "Uptime", + header: () => {t("uptime30d")}, + cell: ({ row }) => { + const originalRow = row.original; + return ( + + ); + } + }, { accessorKey: "mbIn", friendlyName: t("dataIn"), @@ -363,9 +382,9 @@ export default function SitesTable({ - ); diff --git a/src/components/UptimeAlertSection.tsx b/src/components/UptimeAlertSection.tsx new file mode 100644 index 000000000..ce1c174f1 --- /dev/null +++ b/src/components/UptimeAlertSection.tsx @@ -0,0 +1,302 @@ +"use client"; + +import { useState, useMemo } from "react"; +import { useQuery, useQueryClient } from "@tanstack/react-query"; +import Link from "next/link"; +import { BellPlus, BellRing } from "lucide-react"; +import { + SettingsSection, + SettingsSectionHeader, + SettingsSectionTitle, + SettingsSectionDescription, + SettingsSectionBody +} from "@app/components/Settings"; +import UptimeBar from "@app/components/UptimeBar"; +import { Button } from "@app/components/ui/button"; +import { + Credenza, + CredenzaBody, + CredenzaClose, + CredenzaContent, + CredenzaDescription, + CredenzaFooter, + CredenzaHeader, + CredenzaTitle +} from "@app/components/Credenza"; +import { Input } from "@app/components/ui/input"; +import { Label } from "@app/components/ui/label"; +import { TagInput, type Tag } from "@app/components/tags/tag-input"; +import { getUserDisplayName } from "@app/lib/getUserDisplayName"; +import { createApiClient, formatAxiosError } from "@app/lib/api"; +import { useEnvContext } from "@app/hooks/useEnvContext"; +import { toast } from "@app/hooks/useToast"; +import { orgQueries } from "@app/lib/queries"; + +interface UptimeAlertSectionProps { + orgId: string; + siteId?: number; + startingName?: string; + resourceId?: number; + days?: number; +} + +export default function UptimeAlertSection({ + orgId, + siteId, + startingName, + resourceId, + days = 90 +}: UptimeAlertSectionProps) { + const api = createApiClient(useEnvContext()); + const queryClient = useQueryClient(); + + const [open, setOpen] = useState(false); + const [name, setName] = useState(`${siteId ? "Site" : "Resource"} ${startingName} Alert`); + const [userTags, setUserTags] = useState([]); + const [roleTags, setRoleTags] = useState([]); + const [emailTags, setEmailTags] = useState([]); + const [activeUserTagIndex, setActiveUserTagIndex] = useState( + null + ); + const [activeRoleTagIndex, setActiveRoleTagIndex] = useState( + null + ); + const [activeEmailTagIndex, setActiveEmailTagIndex] = useState< + number | null + >(null); + const [loading, setLoading] = useState(false); + + const { data: alertRules, isLoading: alertRulesLoading } = useQuery( + orgQueries.alertRulesForSource({ orgId, siteId, resourceId }) + ); + + const { data: orgUsers = [] } = useQuery(orgQueries.users({ orgId })); + const { data: orgRoles = [] } = useQuery(orgQueries.roles({ orgId })); + + const allUsers = useMemo( + () => + orgUsers.map((u) => ({ + id: String(u.id), + text: getUserDisplayName({ + email: u.email, + name: u.name, + username: u.username + }) + })), + [orgUsers] + ); + + const allRoles = useMemo( + () => + orgRoles + .map((r) => ({ id: String(r.roleId), text: r.name })) + .filter((r) => r.text !== "Admin"), + [orgRoles] + ); + + const hasRules = (alertRules?.length ?? 0) > 0; + + async function handleSubmit() { + if ( + userTags.length === 0 && + roleTags.length === 0 && + emailTags.length === 0 + ) { + toast({ + variant: "destructive", + title: "No recipients", + description: + "Please add at least one user, role, or email to notify." + }); + return; + } + + setLoading(true); + try { + await api.put(`/org/${orgId}/alert-rule`, { + name, + eventType: siteId ? "site_toggle" : "resource_toggle", + enabled: true, + cooldownSeconds: 300, + siteIds: siteId ? [siteId] : [], + healthCheckIds: [], + resourceIds: resourceId ? [resourceId] : [], + userIds: userTags.map((tag) => tag.id), + roleIds: roleTags.map((tag) => Number(tag.id)), + emails: emailTags.map((tag) => tag.text), + webhookActions: [] + }); + + toast({ + title: "Alert created", + description: + "You will be notified when this changes status." + }); + + setOpen(false); + setName("Uptime Alert"); + setUserTags([]); + setRoleTags([]); + setEmailTags([]); + + queryClient.invalidateQueries({ + queryKey: orgQueries.alertRulesForSource({ + orgId, + siteId, + resourceId + }).queryKey + }); + } catch (e) { + toast({ + variant: "destructive", + title: "Failed to create alert", + description: formatAxiosError(e, "An error occurred.") + }); + } + setLoading(false); + } + + const alertButton = alertRulesLoading ? null : hasRules ? ( + + ) : ( + + ); + + return ( + <> + + +
+
+ Uptime + + Site availability over the last {days} days. + +
+ {alertButton} +
+
+ + + +
+ + + + + Create Email Alert + + Get notified by email when this{" "} + {siteId ? "site" : "resource"} goes offline or + comes back online. + + + +
+
+ + setName(e.target.value)} + placeholder="Alert name" + /> +
+
+ + { + const next = + typeof newTags === "function" + ? newTags(userTags) + : newTags; + setUserTags(next as Tag[]); + }} + enableAutocomplete + autocompleteOptions={allUsers} + restrictTagsToAutocompleteOptions + allowDuplicates={false} + sortTags + /> +
+
+ + { + const next = + typeof newTags === "function" + ? newTags(roleTags) + : newTags; + setRoleTags(next as Tag[]); + }} + enableAutocomplete + autocompleteOptions={allRoles} + restrictTagsToAutocompleteOptions + allowDuplicates={false} + sortTags + /> +
+
+ + { + const next = + typeof newTags === "function" + ? newTags(emailTags) + : newTags; + setEmailTags(next as Tag[]); + }} + allowDuplicates={false} + sortTags + validateTag={(tag) => + /^[^\s@]+@[^\s@]+\.[^\s@]+$/.test(tag) + } + delimiterList={[",", "Enter"]} + /> +
+
+
+ + + + + + +
+
+ + ); +} diff --git a/src/components/UptimeBar.tsx b/src/components/UptimeBar.tsx new file mode 100644 index 000000000..636d536e9 --- /dev/null +++ b/src/components/UptimeBar.tsx @@ -0,0 +1,221 @@ +"use client"; + +import { useQuery } from "@tanstack/react-query"; +import { orgQueries } from "@app/lib/queries"; +import { + Tooltip, + TooltipContent, + TooltipTrigger +} from "@app/components/ui/tooltip"; +import { useEnvContext } from "@app/hooks/useEnvContext"; +import { createApiClient } from "@app/lib/api"; +import { cn } from "@app/lib/cn"; + +function formatDuration(seconds: number): string { + if (seconds === 0) return "0s"; + if (seconds < 60) return `${Math.round(seconds)}s`; + const h = Math.floor(seconds / 3600); + const m = Math.floor((seconds % 3600) / 60); + const s = Math.round(seconds % 60); + if (h > 0) return s > 0 ? `${h}h ${m}m ${s}s` : `${h}h ${m}m`; + if (m > 0 && s > 0) return `${m}m ${s}s`; + return `${m}m`; +} + +function formatDate(dateStr: string): string { + return new Date(dateStr + "T00:00:00").toLocaleDateString([], { + month: "short", + day: "numeric", + year: "numeric" + }); +} + +function formatTime(ts: number): string { + return new Date(ts * 1000).toLocaleTimeString([], { + hour: "2-digit", + minute: "2-digit" + }); +} + +const barColorClass: Record = { + good: "bg-green-500", + degraded: "bg-yellow-500", + bad: "bg-red-500", + no_data: "bg-zinc-700" +}; + +type UptimeBarProps = { + orgId?: string; + siteId?: number; + resourceId?: number; + healthCheckId?: number; + days?: number; + title?: string; + className?: string; +}; + +export default function UptimeBar({ + orgId, + siteId, + resourceId, + healthCheckId, + days = 90, + title, + className +}: UptimeBarProps) { + const api = createApiClient(useEnvContext()); + + const siteQuery = useQuery({ + ...orgQueries.siteStatusHistory({ siteId: siteId ?? 0, days }), + enabled: siteId != null, + meta: { api } + }); + + const hcQuery = useQuery({ + ...orgQueries.healthCheckStatusHistory({ orgId: orgId ?? "", healthCheckId: healthCheckId ?? 0, days }), + enabled: healthCheckId != null && siteId == null && resourceId == null, + meta: { api } + }); + + const resourceQuery = useQuery({ + ...orgQueries.resourceStatusHistory({ resourceId, days }), + enabled: resourceId != null && siteId == null && healthCheckId == null, + meta: { api } + }); + + const { data, isLoading } = + siteId != null ? siteQuery : + resourceId != null ? resourceQuery : + hcQuery; + + if (isLoading) { + return ( +
+ {title && ( +
{title}
+ )} +
+ {Array.from({ length: days }).map((_, i) => ( +
+ ))} +
+
+ ); + } + + if (!data) return null; + + const allNoData = data.days.every((d) => d.status === "no_data"); + + return ( +
+ {/* Header row */} +
+ {title && ( + {title} + )} +
+ {!allNoData && ( + <> + + + {data.overallUptimePercent.toFixed(2)}% + {" "} + uptime + + {data.totalDowntimeSeconds > 0 && ( + + + {formatDuration( + data.totalDowntimeSeconds + )} + {" "} + downtime + + )} + + )} + {allNoData && ( + + No data available + + )} +
+
+ + {/* Bar row */} +
+ {data.days.map((day, i) => ( + + +
+ + +
+ {formatDate(day.date)} +
+ {day.status !== "no_data" && ( +
+ Uptime:{" "} + + {day.uptimePercent.toFixed(1)}% + +
+ )} + {day.totalDowntimeSeconds > 0 && ( +
+ Downtime:{" "} + + {formatDuration( + day.totalDowntimeSeconds + )} + +
+ )} + {day.downtimeWindows.length > 0 && ( +
+ {day.downtimeWindows.map((w, wi) => ( +
+ {formatTime(w.start)} + {w.end + ? ` – ${formatTime(w.end)}` + : " – ongoing"}{" "} + + ({w.status}) + +
+ ))} +
+ )} + {day.status === "no_data" && ( +
+ No monitoring data +
+ )} +
+ + ))} +
+ + {/* Date labels */} +
+ {days} days ago + Today +
+
+ ); +} diff --git a/src/components/UptimeMiniBar.tsx b/src/components/UptimeMiniBar.tsx new file mode 100644 index 000000000..b9574054a --- /dev/null +++ b/src/components/UptimeMiniBar.tsx @@ -0,0 +1,144 @@ +"use client"; + +import { useQuery } from "@tanstack/react-query"; +import { orgQueries } from "@app/lib/queries"; +import { + Tooltip, + TooltipContent, + TooltipTrigger +} from "@app/components/ui/tooltip"; +import { useEnvContext } from "@app/hooks/useEnvContext"; +import { createApiClient } from "@app/lib/api"; +import { cn } from "@app/lib/cn"; + +function formatDuration(seconds: number): string { + if (seconds === 0) return "0s"; + if (seconds < 60) return `${Math.round(seconds)}s`; + const h = Math.floor(seconds / 3600); + const m = Math.floor((seconds % 3600) / 60); + const s = Math.round(seconds % 60); + if (h > 0) return `${h}h ${m}m`; + if (m > 0 && s > 0) return `${m}m ${s}s`; + return `${m}m`; +} + +function formatDate(dateStr: string): string { + return new Date(dateStr + "T00:00:00").toLocaleDateString([], { + month: "short", + day: "numeric" + }); +} + +const barColorClass: Record = { + good: "bg-green-500", + degraded: "bg-yellow-500", + bad: "bg-red-500", + no_data: "bg-zinc-700" +}; + +type UptimeMiniBarProps = { + orgId?: string; + siteId?: number; + resourceId?: number; + healthCheckId?: number; + days?: number; +}; + +export default function UptimeMiniBar({ + orgId, + siteId, + resourceId, + healthCheckId, + days = 30 +}: UptimeMiniBarProps) { + const api = createApiClient(useEnvContext()); + + const siteQuery = useQuery({ + ...orgQueries.siteStatusHistory({ siteId: siteId ?? 0, days }), + enabled: siteId != null, + meta: { api }, + staleTime: 5 * 60 * 1000 + }); + + const hcQuery = useQuery({ + ...orgQueries.healthCheckStatusHistory({ orgId: orgId ?? "", healthCheckId: healthCheckId ?? 0, days }), + enabled: healthCheckId != null && siteId == null && resourceId == null, + meta: { api }, + staleTime: 5 * 60 * 1000 + }); + + const resourceQuery = useQuery({ + ...orgQueries.resourceStatusHistory({ resourceId, days }), + enabled: resourceId != null && siteId == null && healthCheckId == null, + meta: { api }, + staleTime: 5 * 60 * 1000 + }); + + const { data, isLoading } = + siteId != null ? siteQuery : + resourceId != null ? resourceQuery : + hcQuery; + + if (isLoading) { + return ( +
+
+ {Array.from({ length: days }).map((_, i) => ( +
+ ))} +
+ +
+ ); + } + + if (!data) return null; + + const allNoData = data.days.every((d) => d.status === "no_data"); + + return ( +
+
+ {data.days.map((day, i) => ( + + +
+ + +
+ {formatDate(day.date)} +
+
+ {day.status === "no_data" + ? "No data" + : `${day.uptimePercent.toFixed(1)}% uptime`} +
+ {day.totalDowntimeSeconds > 0 && ( +
+ Down:{" "} + {formatDuration(day.totalDowntimeSeconds)} +
+ )} +
+ + ))} +
+ + {allNoData + ? "No data" + : `${data.overallUptimePercent.toFixed(1)}%`} + +
+ ); +} diff --git a/src/components/UserDevicesTable.tsx b/src/components/UserDevicesTable.tsx index 4c5331015..88e495406 100644 --- a/src/components/UserDevicesTable.tsx +++ b/src/components/UserDevicesTable.tsx @@ -373,12 +373,12 @@ export default function UserDevicesTable({ - ) : ( @@ -427,7 +427,7 @@ export default function UserDevicesTable({ } else { return ( -
+
{t("disconnected")}
); diff --git a/src/components/WorldMap.tsx b/src/components/WorldMap.tsx index ac227c553..09548400b 100644 --- a/src/components/WorldMap.tsx +++ b/src/components/WorldMap.tsx @@ -218,7 +218,7 @@ function drawInteractiveCountries( }); hoverPath .datum(country) - .attr("d", path(country) as string) + .attr("d", path(country as any) as string) .style("display", null); }) diff --git a/src/components/alert-rule-editor/AlertRuleFields.tsx b/src/components/alert-rule-editor/AlertRuleFields.tsx new file mode 100644 index 000000000..8ec323261 --- /dev/null +++ b/src/components/alert-rule-editor/AlertRuleFields.tsx @@ -0,0 +1,1355 @@ +"use client"; + +import { Button } from "@app/components/ui/button"; +import { Checkbox } from "@app/components/ui/checkbox"; +import { + Command, + CommandEmpty, + CommandGroup, + CommandInput, + CommandItem, + CommandList +} from "@app/components/ui/command"; +import { + FormControl, + FormField, + FormItem, + FormLabel, + FormMessage +} from "@app/components/ui/form"; +import { Input } from "@app/components/ui/input"; +import { + Popover, + PopoverContent, + PopoverTrigger +} from "@app/components/ui/popover"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue +} from "@app/components/ui/select"; +import { + RadioGroup, + RadioGroupItem +} from "@app/components/ui/radio-group"; +import { Label } from "@app/components/ui/label"; +import { StrategySelect } from "@app/components/StrategySelect"; +import { TagInput, type Tag } from "@app/components/tags/tag-input"; +import { getUserDisplayName } from "@app/lib/getUserDisplayName"; +import { + type AlertRuleFormAction, + type AlertRuleFormValues +} from "@app/lib/alertRuleForm"; +import { orgQueries } from "@app/lib/queries"; +import { useQuery } from "@tanstack/react-query"; +import { ContactSalesBanner } from "@app/components/ContactSalesBanner"; +import { Bell, Globe, ChevronsUpDown, Plus, Trash2 } from "lucide-react"; +import { useTranslations } from "next-intl"; +import { useEffect, useMemo, useRef, useState } from "react"; +import type { Control, UseFormReturn } from "react-hook-form"; +import { useFormContext, useWatch } from "react-hook-form"; +import { useDebounce } from "use-debounce"; + +export function AddActionPanel({ + onAdd +}: { + onAdd: (type: AlertRuleFormAction["type"]) => void; +}) { + const t = useTranslations(); + + + const EXTERNAL_INTEGRATIONS = [ + { + id: "pagerduty", + name: "PagerDuty", + logo: "/third-party/pgd.png", + description: "Send alerts to PagerDuty for incident management", + descriptionKey: t("alertingExternalPagerDutyDescription") + }, + { + id: "opsgenie", + name: "Opsgenie", + logo: "/third-party/opsgenie.png", + description: "Route alerts to Opsgenie for on-call management", + descriptionKey: t("alertingExternalOpsgenieDescription") + }, + { + id: "servicenow", + name: "ServiceNow", + logo: "/third-party/servicenow.png", + description: "Create ServiceNow incidents from alert events", + descriptionKey: t("alertingExternalServiceNowDescription") + }, + { + id: "incidentio", + name: "Incident.io", + logo: "/third-party/incidentio.png", + description: "Trigger Incident.io workflows from alert events", + descriptionKey: t("alertingExternalIncidentIoDescription") + } + ] as const; + + const EXTERNAL_IDS = EXTERNAL_INTEGRATIONS.map((i) => i.id); + + const [selected, setSelected] = useState("notify"); + + const isPremiumSelected = + selected !== null && EXTERNAL_IDS.includes(selected as any); + const isBuiltInSelected = selected !== null && !isPremiumSelected; + + const actionTypeOptions = [ + { + id: "notify", + title: t("alertingActionNotify"), + description: t("alertingActionNotifyDescription"), + icon: + }, + { + id: "webhook", + title: t("alertingActionWebhook"), + description: t("alertingActionWebhookDescription"), + icon: + }, + ...EXTERNAL_INTEGRATIONS.map((integration) => ({ + id: integration.id, + title: integration.name, + description: integration.description, + icon: ( + {integration.name} + ) + })) + ]; + + const handleAdd = () => { + if (!isBuiltInSelected) return; + onAdd(selected as AlertRuleFormAction["type"]); + setSelected(null); + }; + + return ( +
+ setSelected(v)} + /> + {isPremiumSelected && } + {!isPremiumSelected && ( + + )} +
+ ); +} + +function SiteMultiSelect({ + orgId, + value, + onChange +}: { + orgId: string; + value: number[]; + onChange: (v: number[]) => void; +}) { + const t = useTranslations(); + const [open, setOpen] = useState(false); + const [q, setQ] = useState(""); + const [debounced] = useDebounce(q, 150); + const { data: sites = [] } = useQuery( + orgQueries.sites({ orgId, query: debounced, perPage: 500 }) + ); + const toggle = (id: number) => { + if (value.includes(id)) { + onChange(value.filter((x) => x !== id)); + } else { + onChange([...value, id]); + } + }; + const summary = + value.length === 0 + ? t("alertingSelectSites") + : t("alertingSitesSelected", { count: value.length }); + return ( + + + + + + + + + {t("siteNotFound")} + + {sites.map((s) => ( + toggle(s.siteId)} + className="cursor-pointer" + > + + {s.name} + + ))} + + + + + + ); +} + +function HealthCheckMultiSelect({ + orgId, + value, + onChange +}: { + orgId: string; + value: number[]; + onChange: (v: number[]) => void; +}) { + const t = useTranslations(); + const [open, setOpen] = useState(false); + const [q, setQ] = useState(""); + const [debounced] = useDebounce(q, 150); + + const { data: healthChecks = [] } = useQuery( + orgQueries.healthChecks({ orgId }) + ); + + const shown = useMemo(() => { + const query = debounced.trim().toLowerCase(); + const base = query + ? healthChecks.filter((hc) => + hc.name.toLowerCase().includes(query) + ) + : healthChecks; + // Always keep already-selected items visible even if they fall outside the search + if (query && value.length > 0) { + const selectedNotInBase = healthChecks.filter( + (hc) => + value.includes(hc.targetHealthCheckId) && + !base.some( + (b) => b.targetHealthCheckId === hc.targetHealthCheckId + ) + ); + return [...selectedNotInBase, ...base]; + } + return base; + }, [healthChecks, debounced, value]); + + const toggle = (id: number) => { + if (value.includes(id)) { + onChange(value.filter((x) => x !== id)); + } else { + onChange([...value, id]); + } + }; + + const summary = + value.length === 0 + ? t("alertingSelectHealthChecks") + : t("alertingHealthChecksSelected", { count: value.length }); + + return ( + + + + + + + + + + {t("alertingHealthChecksEmpty")} + + + {shown.map((hc) => ( + + toggle(hc.targetHealthCheckId) + } + className="cursor-pointer" + > + + + {hc.name} + + + ))} + + + + + + ); +} + +function ResourceMultiSelect({ + orgId, + value, + onChange +}: { + orgId: string; + value: number[]; + onChange: (v: number[]) => void; +}) { + const t = useTranslations(); + const [open, setOpen] = useState(false); + const [q, setQ] = useState(""); + const [debounced] = useDebounce(q, 150); + + const { data: resources = [] } = useQuery( + orgQueries.resources({ orgId, query: debounced, perPage: 10 }) + ); + + const shown = useMemo(() => { + return resources; + }, [resources]); + + const toggle = (id: number) => { + if (value.includes(id)) { + onChange(value.filter((x) => x !== id)); + } else { + onChange([...value, id]); + } + }; + + const summary = + value.length === 0 + ? t("alertingSelectResources") + : t("alertingResourcesSelected", { count: value.length }); + + return ( + + + + + + + + + + {t("alertingResourcesEmpty")} + + + {shown.map((r) => ( + toggle(r.resourceId)} + className="cursor-pointer" + > + + {r.name} + + ))} + + + + + + ); +} + +export function ActionBlock({ + orgId, + index, + control, + form, + onRemove, + onUpdate, + canRemove +}: { + orgId: string; + index: number; + control: Control; + form: UseFormReturn; + onRemove: () => void; + onUpdate: (val: AlertRuleFormAction) => void; + canRemove: boolean; +}) { + const t = useTranslations(); + const type = useWatch({ control, name: `actions.${index}.type` }); + + const typeHeader = + type === "notify" ? ( +
+ + {t("alertingActionNotify")} +
+ ) : ( +
+ + {t("alertingActionWebhook")} +
+ ); + + return ( +
+ {canRemove && ( + + )} + {typeHeader} + {type === "notify" && ( + + )} + {type === "webhook" && ( + + )} +
+ ); +} + +function NotifyActionFields({ + orgId, + index, + control, + form +}: { + orgId: string; + index: number; + control: Control; + form: UseFormReturn; +}) { + const t = useTranslations(); + + const [emailActiveIdx, setEmailActiveIdx] = useState(null); + const [activeUsersTagIndex, setActiveUsersTagIndex] = useState< + number | null + >(null); + const [activeRolesTagIndex, setActiveRolesTagIndex] = useState< + number | null + >(null); + + const { data: orgUsers = [], isLoading: isLoadingUsers } = useQuery(orgQueries.users({ orgId })); + const { data: orgRoles = [], isLoading: isLoadingRoles } = useQuery(orgQueries.roles({ orgId })); + + const allUsers = useMemo( + () => + orgUsers.map((u) => ({ + id: String(u.id), + text: getUserDisplayName({ + email: u.email, + name: u.name, + username: u.username + }) + })), + [orgUsers] + ); + + const allRoles = useMemo( + () => + orgRoles + .map((r) => ({ id: String(r.roleId), text: r.name })) + .filter((r) => r.text !== "Admin"), + [orgRoles] + ); + + const hasResolvedTagsRef = useRef(false); + + useEffect(() => { + if (isLoadingUsers || isLoadingRoles) return; + if (hasResolvedTagsRef.current) return; + + const currentUserTags = form.getValues( + `actions.${index}.userTags` + ) as Tag[]; + const currentRoleTags = form.getValues( + `actions.${index}.roleTags` + ) as Tag[]; + + const resolvedUserTags = currentUserTags.map((tag) => { + const match = allUsers.find((u) => u.id === tag.id); + return match ? { id: tag.id, text: match.text } : tag; + }); + + const resolvedRoleTags = currentRoleTags.map((tag) => { + const match = allRoles.find((r) => r.id === tag.id); + return match ? { id: tag.id, text: match.text } : tag; + }); + + const userTagsNeedUpdate = resolvedUserTags.some( + (t, i) => t.text !== currentUserTags[i]?.text + ); + const roleTagsNeedUpdate = resolvedRoleTags.some( + (t, i) => t.text !== currentRoleTags[i]?.text + ); + + if (userTagsNeedUpdate) { + form.setValue(`actions.${index}.userTags`, resolvedUserTags, { + shouldDirty: false + }); + } + if (roleTagsNeedUpdate) { + form.setValue(`actions.${index}.roleTags`, resolvedRoleTags, { + shouldDirty: false + }); + } + + hasResolvedTagsRef.current = true; + }, [isLoadingUsers, isLoadingRoles, allUsers, allRoles]); + + const userTags = (useWatch({ control, name: `actions.${index}.userTags` }) ?? []) as Tag[]; + const roleTags = (useWatch({ control, name: `actions.${index}.roleTags` }) ?? []) as Tag[]; + const emailTags = (useWatch({ control, name: `actions.${index}.emailTags` }) ?? []) as Tag[]; + + return ( +
+ ( + + {t("alertingNotifyUsers")} + + { + const next = + typeof newTags === "function" + ? newTags(userTags) + : newTags; + form.setValue( + `actions.${index}.userTags`, + next as Tag[], + { shouldDirty: true } + ); + }} + enableAutocomplete={true} + autocompleteOptions={allUsers} + allowDuplicates={false} + restrictTagsToAutocompleteOptions={true} + sortTags={true} + /> + + + + )} + /> + ( + + {t("alertingNotifyRoles")} + + { + const next = + typeof newTags === "function" + ? newTags(roleTags) + : newTags; + form.setValue( + `actions.${index}.roleTags`, + next as Tag[], + { shouldDirty: true } + ); + }} + enableAutocomplete={true} + autocompleteOptions={allRoles} + allowDuplicates={false} + restrictTagsToAutocompleteOptions={true} + sortTags={true} + /> + + + + )} + /> + ( + + {t("alertingNotifyEmails")} + + { + const next = + typeof updater === "function" + ? updater(emailTags) + : updater; + form.setValue( + `actions.${index}.emailTags`, + next as Tag[], + { shouldDirty: true } + ); + }} + activeTagIndex={emailActiveIdx} + setActiveTagIndex={setEmailActiveIdx} + placeholder={t("alertingEmailPlaceholder")} + size="sm" + allowDuplicates={false} + sortTags={true} + validateTag={(tag) => + /^[^\s@]+@[^\s@]+\.[^\s@]+$/.test(tag) + } + delimiterList={[",", "Enter"]} + /> + + + + )} + /> +
+ ); +} + +function WebhookActionFields({ + index, + control, + form +}: { + index: number; + control: Control; + form: UseFormReturn; +}) { + const t = useTranslations(); + return ( +
+ ( + + URL + + + + + + )} + /> + ( + + {t("alertingWebhookMethod")} + + + + )} + /> + {/* Authentication */} +
+
+ +

+ {t("httpDestAuthDescription")} +

+
+ ( + + + + {/* None */} +
+ +
+ +

+ {t("httpDestAuthNoneDescription")} +

+
+
+ + {/* Bearer */} +
+ +
+
+ +

+ {t("httpDestAuthBearerDescription")} +

+
+ {field.value === "bearer" && ( + ( + + + + + + + )} + /> + )} +
+
+ + {/* Basic */} +
+ +
+
+ +

+ {t("httpDestAuthBasicDescription")} +

+
+ {field.value === "basic" && ( + ( + + + + + + + )} + /> + )} +
+
+ + {/* Custom */} +
+ +
+
+ +

+ {t("httpDestAuthCustomDescription")} +

+
+ {field.value === "custom" && ( +
+ ( + + + + + + + )} + /> + ( + + + + + + + )} + /> +
+ )} +
+
+
+
+ +
+ )} + /> +
+ +
+ ); +} + +function WebhookHeadersField({ + index, + control, + form +}: { + index: number; + control: Control; + form: UseFormReturn; +}) { + const t = useTranslations(); + const headers = + (useWatch({ control, name: `actions.${index}.headers` as const }) ?? []); + return ( +
+ {t("alertingWebhookHeaders")} + {headers.map((_, hi) => ( +
+ ( + + + + + + )} + /> + ( + + + + + + )} + /> + +
+ ))} + +
+ ); +} + +export function AlertRuleSourceFields({ + orgId, + control +}: { + orgId: string; + control: Control; +}) { + const t = useTranslations(); + const { setValue, getValues } = useFormContext(); + const sourceType = useWatch({ control, name: "sourceType" }); + const allSites = useWatch({ control, name: "allSites" }); + const allHealthChecks = useWatch({ control, name: "allHealthChecks" }); + const allResources = useWatch({ control, name: "allResources" }); + + const siteStrategyOptions = useMemo( + () => [ + { + id: "all" as const, + title: t("alertingAllSites"), + description: t("alertingAllSitesDescription") + }, + { + id: "specific" as const, + title: t("alertingSpecificSites"), + description: t("alertingSpecificSitesDescription") + } + ], + [t] + ); + + const healthCheckStrategyOptions = useMemo( + () => [ + { + id: "all" as const, + title: t("alertingAllHealthChecks"), + description: t("alertingAllHealthChecksDescription") + }, + { + id: "specific" as const, + title: t("alertingSpecificHealthChecks"), + description: t("alertingSpecificHealthChecksDescription") + } + ], + [t] + ); + + const resourceStrategyOptions = useMemo( + () => [ + { + id: "all" as const, + title: t("alertingAllResources"), + description: t("alertingAllResourcesDescription") + }, + { + id: "specific" as const, + title: t("alertingSpecificResources"), + description: t("alertingSpecificResourcesDescription") + } + ], + [t] + ); + + return ( +
+ ( + + {t("alertingSourceType")} + + + + )} + /> + {sourceType === "site" ? ( + <> + ( + + { + field.onChange(v === "all"); + if (v === "all") { + setValue("siteIds", []); + } + }} + cols={2} + /> + + + )} + /> + {!allSites && ( + ( + + + {t("alertingPickSites")} + + + + + )} + /> + )} + + ) : sourceType === "resource" ? ( + <> + ( + + { + field.onChange(v === "all"); + if (v === "all") { + setValue("resourceIds", []); + } + }} + cols={2} + /> + + + )} + /> + {!allResources && ( + ( + + + {t("alertingPickResources")} + + + + + )} + /> + )} + + ) : ( + <> + ( + + { + field.onChange(v === "all"); + if (v === "all") { + setValue("healthCheckIds", []); + } + }} + cols={2} + /> + + + )} + /> + {!allHealthChecks && ( + ( + + + {t("alertingPickHealthChecks")} + + + + + )} + /> + )} + + )} +
+ ); +} + +export function AlertRuleTriggerFields({ + control +}: { + control: Control; +}) { + const t = useTranslations(); + const sourceType = useWatch({ control, name: "sourceType" }); + return ( + ( + + {t("alertingTrigger")} + + + + )} + /> + ); +} diff --git a/src/components/alert-rule-editor/AlertRuleGraphEditor.tsx b/src/components/alert-rule-editor/AlertRuleGraphEditor.tsx new file mode 100644 index 000000000..70667cc2e --- /dev/null +++ b/src/components/alert-rule-editor/AlertRuleGraphEditor.tsx @@ -0,0 +1,366 @@ +"use client"; + +import { + ActionBlock, + AddActionPanel, + AlertRuleSourceFields, + AlertRuleTriggerFields +} from "@app/components/alert-rule-editor/AlertRuleFields"; +import { SettingsContainer } from "@app/components/Settings"; +import { Button } from "@app/components/ui/button"; +import { Card, CardContent } from "@app/components/ui/card"; +import { + Form, + FormControl, + FormField, + FormItem, + FormLabel, + FormMessage +} from "@app/components/ui/form"; +import { Input } from "@app/components/ui/input"; +import { toast } from "@app/hooks/useToast"; +import { + buildFormSchema, + defaultFormValues, + formValuesToApiPayload, + type AlertRuleFormValues +} from "@app/lib/alertRuleForm"; +import { createApiClient, formatAxiosError } from "@app/lib/api"; +import { useEnvContext } from "@app/hooks/useEnvContext"; +import type { CreateAlertRuleResponse } from "@server/private/routers/alertRule"; +import type { AxiosResponse } from "axios"; +import { zodResolver } from "@hookform/resolvers/zod"; +import { ChevronLeft, Cog, Flag, Zap } from "lucide-react"; +import Link from "next/link"; +import { useRouter } from "next/navigation"; +import { useMemo, useState, type ReactNode } from "react"; +import { useFieldArray, useForm } from "react-hook-form"; +import { useTranslations } from "next-intl"; +import { PaidFeaturesAlert } from "@app/components/PaidFeaturesAlert"; +import { SwitchInput } from "@app/components/SwitchInput"; +import { tierMatrix } from "@server/lib/billing/tierMatrix"; + +const FORM_ID = "alert-rule-form"; + +type StepAccent = { + labelClass: string; + icon: typeof Flag; +}; + +type AlertRuleGraphEditorProps = { + orgId: string; + alertRuleId?: number; + initialValues: AlertRuleFormValues; + isNew: boolean; + disabled?: boolean; +}; + +function VerticalRuleStep({ + stepNumber, + isLast, + title, + accent, + children +}: { + stepNumber: number; + isLast: boolean; + title: string; + accent: StepAccent; + children: ReactNode; +}) { + const Icon = accent.icon; + return ( +
  • +
    +
    + {stepNumber} +
    + {!isLast && ( +
    + )} +
    +
    +
    + + {title} +
    +
    + {children} +
    +
    +
  • + ); +} + +export default function AlertRuleGraphEditor({ + orgId, + alertRuleId, + initialValues, + isNew, + disabled = false +}: AlertRuleGraphEditorProps) { + const t = useTranslations(); + const router = useRouter(); + const api = createApiClient(useEnvContext()); + const [isSaving, setIsSaving] = useState(false); + const schema = useMemo(() => buildFormSchema(t), [t]); + const form = useForm({ + resolver: zodResolver(schema), + defaultValues: initialValues ?? defaultFormValues() + }); + + const { fields, append, remove, update } = useFieldArray({ + control: form.control, + name: "actions" + }); + + const onSubmit = form.handleSubmit(async (values) => { + setIsSaving(true); + try { + const payload = formValuesToApiPayload(values); + if (isNew) { + const res = await api.put< + AxiosResponse + >(`/org/${orgId}/alert-rule`, payload); + toast({ title: t("alertingRuleSaved") }); + router.replace( + `/${orgId}/settings/alerting/${res.data.data.alertRuleId}` + ); + } else { + await api.post( + `/org/${orgId}/alert-rule/${alertRuleId}`, + payload + ); + toast({ title: t("alertingRuleSaved") }); + } + } catch (e) { + toast({ + title: t("error"), + description: formatAxiosError(e), + variant: "destructive" + }); + } finally { + setIsSaving(false); + } + }); + + return ( + + + + +
    + + +
    +
      + +
      + +
      +
      + +
      + +
      +
      + +
      +
      + { + if (type === "notify") { + append({ + type: "notify", + userTags: [], + roleTags: [], + emailTags: [] + }); + } else { + append({ + type: "webhook", + url: "", + method: "POST", + headers: [ + { + key: "", + value: "" + } + ], + authType: "none", + bearerToken: "", + basicCredentials: + "", + customHeaderName: + "", + customHeaderValue: + "" + }); + } + }} + /> + {fields.map((f, index) => ( + + remove(index) + } + onUpdate={(val) => + update(index, val) + } + canRemove + /> + ))} +
      +
      +
      +
    +
    +
    +
    + + + ); +} diff --git a/src/components/multi-site-selector.tsx b/src/components/multi-site-selector.tsx new file mode 100644 index 000000000..407e3b3e1 --- /dev/null +++ b/src/components/multi-site-selector.tsx @@ -0,0 +1,117 @@ +import { orgQueries } from "@app/lib/queries"; +import { useQuery } from "@tanstack/react-query"; +import { useMemo, useState } from "react"; +import { + Command, + CommandEmpty, + CommandGroup, + CommandInput, + CommandItem, + CommandList +} from "./ui/command"; +import { Checkbox } from "./ui/checkbox"; +import { useTranslations } from "next-intl"; +import { useDebounce } from "use-debounce"; +import type { Selectedsite } from "./site-selector"; + +export type MultiSitesSelectorProps = { + orgId: string; + selectedSites: Selectedsite[]; + onSelectionChange: (sites: Selectedsite[]) => void; + filterTypes?: string[]; +}; + +export function formatMultiSitesSelectorLabel( + selectedSites: Selectedsite[], + t: (key: string, values?: { count: number }) => string +): string { + if (selectedSites.length === 0) { + return t("selectSites"); + } + if (selectedSites.length === 1) { + return selectedSites[0]!.name; + } + return t("multiSitesSelectorSitesCount", { + count: selectedSites.length + }); +} + +export function MultiSitesSelector({ + orgId, + selectedSites, + onSelectionChange, + filterTypes +}: MultiSitesSelectorProps) { + const t = useTranslations(); + const [siteSearchQuery, setSiteSearchQuery] = useState(""); + const [debouncedQuery] = useDebounce(siteSearchQuery, 150); + + const { data: sites = [] } = useQuery( + orgQueries.sites({ + orgId, + query: debouncedQuery, + perPage: 10 + }) + ); + + const sitesShown = useMemo(() => { + const base = filterTypes + ? sites.filter((s) => filterTypes.includes(s.type)) + : [...sites]; + if (debouncedQuery.trim().length === 0 && selectedSites.length > 0) { + const selectedNotInBase = selectedSites.filter( + (sel) => !base.some((s) => s.siteId === sel.siteId) + ); + return [...selectedNotInBase, ...base]; + } + return base; + }, [debouncedQuery, sites, selectedSites, filterTypes]); + + const selectedIds = useMemo( + () => new Set(selectedSites.map((s) => s.siteId)), + [selectedSites] + ); + + const toggleSite = (site: Selectedsite) => { + if (selectedIds.has(site.siteId)) { + onSelectionChange( + selectedSites.filter((s) => s.siteId !== site.siteId) + ); + } else { + onSelectionChange([...selectedSites, site]); + } + }; + + return ( + + setSiteSearchQuery(v)} + /> + + {t("siteNotFound")} + + {sitesShown.map((site) => ( + { + toggleSite(site); + }} + > + {}} + aria-hidden + tabIndex={-1} + /> + {site.name} + + ))} + + + + ); +} diff --git a/src/components/resource-target-address-item.tsx b/src/components/resource-target-address-item.tsx index 851b64b54..c801844ce 100644 --- a/src/components/resource-target-address-item.tsx +++ b/src/components/resource-target-address-item.tsx @@ -12,14 +12,6 @@ import { useTranslations } from "next-intl"; import { useMemo, useState } from "react"; import { ContainersSelector } from "./ContainersSelector"; import { Button } from "./ui/button"; -import { - Command, - CommandEmpty, - CommandGroup, - CommandInput, - CommandItem, - CommandList -} from "./ui/command"; import { Input } from "./ui/input"; import { Popover, PopoverContent, PopoverTrigger } from "./ui/popover"; import { Select, SelectContent, SelectItem, SelectTrigger } from "./ui/select"; @@ -212,6 +204,12 @@ export function ResourceTargetAddressItem({ proxyTarget.port === 0 ? "" : proxyTarget.port } className="w-18.75 px-2 border-none placeholder-gray-400 rounded-l-xs" + type="number" + onKeyDown={(e) => { + if (["e", "E", "+", "-", "."].includes(e.key)) { + e.preventDefault(); + } + }} onBlur={(e) => { const value = parseInt(e.target.value, 10); if (!isNaN(value) && value > 0) { @@ -227,6 +225,7 @@ export function ResourceTargetAddressItem({ } }} /> +
    ); diff --git a/src/components/ui/checkbox.tsx b/src/components/ui/checkbox.tsx index 261655bb0..5cffd8978 100644 --- a/src/components/ui/checkbox.tsx +++ b/src/components/ui/checkbox.tsx @@ -43,8 +43,8 @@ const Checkbox = React.forwardRef< className={cn(checkboxVariants({ variant }), className)} {...props} > - - + + )); diff --git a/src/lib/alertRuleForm.ts b/src/lib/alertRuleForm.ts new file mode 100644 index 000000000..115c9fcf5 --- /dev/null +++ b/src/lib/alertRuleForm.ts @@ -0,0 +1,456 @@ +import type { Tag } from "@app/components/tags/tag-input"; +import { z } from "zod"; + +// --------------------------------------------------------------------------- +// Shared primitive schemas +// --------------------------------------------------------------------------- + +export const tagSchema = z.object({ + id: z.string(), + text: z.string() +}); + +// --------------------------------------------------------------------------- +// Form-layer types +// NOTE: the form uses "health_check_unhealthy" internally; it maps to the +// backend's "health_check_unhealthy" at the API boundary. +// --------------------------------------------------------------------------- + +export type AlertTrigger = + | "site_online" + | "site_offline" + | "site_toggle" + | "health_check_healthy" + | "health_check_unhealthy" + | "health_check_toggle" + | "resource_healthy" + | "resource_unhealthy" + | "resource_toggle"; + +export type AlertRuleFormAction = + | { + type: "notify"; + userTags: Tag[]; + roleTags: Tag[]; + emailTags: Tag[]; + } + | { + type: "webhook"; + url: string; + method: string; + headers: { key: string; value: string }[]; + authType: "none" | "bearer" | "basic" | "custom"; + bearerToken: string; + basicCredentials: string; + customHeaderName: string; + customHeaderValue: string; + }; + +export type AlertRuleFormValues = { + name: string; + enabled: boolean; + sourceType: "site" | "health_check" | "resource"; + allSites: boolean; + siteIds: number[]; + allHealthChecks: boolean; + healthCheckIds: number[]; + allResources: boolean; + resourceIds: number[]; + trigger: AlertTrigger; + actions: AlertRuleFormAction[]; +}; + +// --------------------------------------------------------------------------- +// API boundary types +// --------------------------------------------------------------------------- + +export type AlertRuleApiPayload = { + name: string; + eventType: + | "site_online" + | "site_offline" + | "site_toggle" + | "health_check_healthy" + | "health_check_unhealthy" + | "health_check_toggle" + | "resource_healthy" + | "resource_unhealthy" + | "resource_toggle"; + enabled: boolean; + allSites: boolean; + siteIds: number[]; + allHealthChecks: boolean; + healthCheckIds: number[]; + allResources: boolean; + resourceIds: number[]; + userIds: string[]; + roleIds: number[]; + emails: string[]; + webhookActions: { + webhookUrl: string; + enabled: boolean; + config?: string; + }[]; +}; + +// Shape of what GET /org/:orgId/alert-rule/:alertRuleId returns +export type AlertRuleApiResponse = { + alertRuleId: number; + orgId: string; + name: string; + eventType: string; + enabled: boolean; + cooldownSeconds: number; + lastTriggeredAt: number | null; + createdAt: number; + updatedAt: number; + siteIds: number[]; + healthCheckIds: number[]; + resourceIds: number[]; + recipients: { + recipientId: number; + userId: string | null; + roleId: number | null; + email: string | null; + }[]; + webhookActions: { + webhookActionId: number; + webhookUrl: string; + enabled: boolean; + lastSentAt: number | null; + config: { + authType: string; + bearerToken?: string; + basicCredentials?: string; + customHeaderName?: string; + customHeaderValue?: string; + headers?: { key: string; value: string }[]; + method?: string; + } | null; + }[]; +}; + +// --------------------------------------------------------------------------- +// Zod form schema (for react-hook-form validation) +// --------------------------------------------------------------------------- + +export function buildFormSchema(t: (k: string) => string) { + return z + .object({ + name: z + .string() + .min(1, { message: t("alertingErrorNameRequired") }), + enabled: z.boolean(), + sourceType: z.enum(["site", "health_check", "resource"]), + allSites: z.boolean(), + siteIds: z.array(z.number()), + allHealthChecks: z.boolean(), + healthCheckIds: z.array(z.number()), + allResources: z.boolean(), + resourceIds: z.array(z.number()), + trigger: z.enum([ + "site_online", + "site_offline", + "site_toggle", + "health_check_healthy", + "health_check_unhealthy", + "health_check_toggle", + "resource_healthy", + "resource_unhealthy", + "resource_toggle" + ]), + actions: z.array( + z.discriminatedUnion("type", [ + z.object({ + type: z.literal("notify"), + userTags: z.array(tagSchema), + roleTags: z.array(tagSchema), + emailTags: z.array(tagSchema) + }), + z.object({ + type: z.literal("webhook"), + url: z.string(), + method: z.string(), + headers: z.array( + z.object({ + key: z.string(), + value: z.string() + }) + ), + authType: z.enum(["none", "bearer", "basic", "custom"]), + bearerToken: z.string(), + basicCredentials: z.string(), + customHeaderName: z.string(), + customHeaderValue: z.string() + }) + ]) + ) + }) + .superRefine((val, ctx) => { + if (val.actions.length === 0) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: t("alertingErrorActionsMin"), + path: ["actions"] + }); + } + if ( + val.sourceType === "site" && + !val.allSites && + val.siteIds.length === 0 + ) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: t("alertingErrorPickSites"), + path: ["siteIds"] + }); + } + if ( + val.sourceType === "health_check" && + !val.allHealthChecks && + val.healthCheckIds.length === 0 + ) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: t("alertingErrorPickHealthChecks"), + path: ["healthCheckIds"] + }); + } + if ( + val.sourceType === "resource" && + !val.allResources && + val.resourceIds.length === 0 + ) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: t("alertingErrorPickResources"), + path: ["resourceIds"] + }); + } + const siteTriggers: AlertTrigger[] = [ + "site_online", + "site_offline", + "site_toggle" + ]; + const hcTriggers: AlertTrigger[] = [ + "health_check_healthy", + "health_check_unhealthy", + "health_check_toggle" + ]; + const resourceTriggers: AlertTrigger[] = [ + "resource_healthy", + "resource_unhealthy", + "resource_toggle" + ]; + if ( + val.sourceType === "site" && + !siteTriggers.includes(val.trigger) + ) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: t("alertingErrorTriggerSite"), + path: ["trigger"] + }); + } + if ( + val.sourceType === "health_check" && + !hcTriggers.includes(val.trigger) + ) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: t("alertingErrorTriggerHealth"), + path: ["trigger"] + }); + } + if ( + val.sourceType === "resource" && + !resourceTriggers.includes(val.trigger) + ) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: t("alertingErrorTriggerResource"), + path: ["trigger"] + }); + } + val.actions.forEach((a, i) => { + if (a.type === "notify") { + if ( + a.userTags.length === 0 && + a.roleTags.length === 0 && + a.emailTags.length === 0 + ) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: t("alertingErrorNotifyRecipients"), + path: ["actions", i, "userTags"] + }); + } + } + if (a.type === "webhook") { + try { + new URL(a.url.trim()); + } catch { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: t("alertingErrorWebhookUrl"), + path: ["actions", i, "url"] + }); + } + } + }); + }); +} + +// --------------------------------------------------------------------------- +// defaultFormValues +// --------------------------------------------------------------------------- + +export function defaultFormValues(): AlertRuleFormValues { + return { + name: "", + enabled: true, + sourceType: "site", + allSites: true, + siteIds: [], + allHealthChecks: true, + healthCheckIds: [], + allResources: true, + resourceIds: [], + trigger: "site_toggle", + actions: [] + }; +} + +// --------------------------------------------------------------------------- +// API response → form values +// --------------------------------------------------------------------------- + +export function apiResponseToFormValues( + rule: AlertRuleApiResponse +): AlertRuleFormValues { + const trigger = rule.eventType; + const sourceType = rule.eventType.startsWith("site_") + ? "site" + : rule.eventType.startsWith("resource_") + ? "resource" + : "health_check"; + + // Collect notify recipients into a single notify action (if any) + const userTags = rule.recipients + .filter((r) => r.userId != null) + .map((r) => ({ id: r.userId!, text: r.userId! })); + const roleTags = rule.recipients + .filter((r) => r.roleId != null) + .map((r) => ({ id: String(r.roleId!), text: String(r.roleId!) })); + const emailTags = rule.recipients + .filter((r) => r.email != null) + .map((r) => ({ id: r.email!, text: r.email! })); + + const actions: AlertRuleFormAction[] = []; + + if (userTags.length > 0 || roleTags.length > 0 || emailTags.length > 0) { + actions.push({ type: "notify", userTags, roleTags, emailTags }); + } + + // Each webhook action becomes its own form webhook action + for (const w of rule.webhookActions) { + const cfg = w.config; + actions.push({ + type: "webhook", + url: w.webhookUrl, + method: cfg?.method ?? "POST", + headers: cfg?.headers?.length + ? cfg.headers + : [{ key: "", value: "" }], + authType: + (cfg?.authType as "none" | "bearer" | "basic" | "custom") ?? + "none", + bearerToken: cfg?.bearerToken ?? "", + basicCredentials: cfg?.basicCredentials ?? "", + customHeaderName: cfg?.customHeaderName ?? "", + customHeaderValue: cfg?.customHeaderValue ?? "" + }); + } + + const allSites = sourceType === "site" && rule.siteIds.length === 0; + const allHealthChecks = + sourceType === "health_check" && rule.healthCheckIds.length === 0; + const allResources = + sourceType === "resource" && (rule.resourceIds?.length ?? 0) === 0; + + return { + name: rule.name, + enabled: rule.enabled, + sourceType, + allSites, + siteIds: rule.siteIds, + allHealthChecks, + healthCheckIds: rule.healthCheckIds, + allResources, + resourceIds: rule.resourceIds ?? [], + trigger: trigger as AlertTrigger, + actions + }; +} + +// --------------------------------------------------------------------------- +// Form values → API payload +// --------------------------------------------------------------------------- + +export function formValuesToApiPayload( + values: AlertRuleFormValues +): AlertRuleApiPayload { + const eventType = values.trigger; + + // Collect all notify-type actions and merge their recipient lists + const allUserIds: string[] = []; + const allRoleIds: number[] = []; + const allEmails: string[] = []; + + const webhookActions: AlertRuleApiPayload["webhookActions"] = []; + + for (const action of values.actions) { + if (action.type === "notify") { + allUserIds.push(...action.userTags.map((t) => t.id)); + allRoleIds.push(...action.roleTags.map((t) => Number(t.id))); + allEmails.push( + ...action.emailTags.map((t) => t.text.trim()).filter(Boolean) + ); + } else if (action.type === "webhook") { + webhookActions.push({ + webhookUrl: action.url.trim(), + enabled: true, + config: JSON.stringify({ + authType: action.authType, + bearerToken: action.bearerToken || undefined, + basicCredentials: action.basicCredentials || undefined, + customHeaderName: action.customHeaderName || undefined, + customHeaderValue: action.customHeaderValue || undefined, + headers: action.headers.filter((h) => h.key.trim()), + method: action.method + }) + }); + } + } + + // Deduplicate + const uniqueUserIds = [...new Set(allUserIds)]; + const uniqueRoleIds: number[] = [...new Set(allRoleIds)]; + const uniqueEmails = [...new Set(allEmails)]; + + return { + name: values.name.trim(), + eventType, + enabled: values.enabled, + allSites: values.allSites, + siteIds: values.allSites ? [] : values.siteIds, + allHealthChecks: values.allHealthChecks, + healthCheckIds: values.allHealthChecks ? [] : values.healthCheckIds, + allResources: values.allResources, + resourceIds: values.allResources ? [] : values.resourceIds, + userIds: uniqueUserIds, + roleIds: uniqueRoleIds, + emails: uniqueEmails, + webhookActions + }; +} diff --git a/src/lib/queries.ts b/src/lib/queries.ts index 7a22639fe..1e7074e3a 100644 --- a/src/lib/queries.ts +++ b/src/lib/queries.ts @@ -8,6 +8,7 @@ import type { ListResourceNamesResponse, ListResourcesResponse } from "@server/routers/resource"; +import type { ListAlertRulesResponse } from "@server/private/routers/alertRule"; import type { ListRolesResponse } from "@server/routers/role"; import type { ListSitesResponse } from "@server/routers/site"; import type { @@ -27,7 +28,8 @@ import type { AxiosResponse } from "axios"; import z from "zod"; import { remote } from "./api"; import { durationToMs } from "./durationToMs"; -import { wait } from "./wait"; +import { ListHealthChecksResponse } from "@server/routers/healthChecks/types"; +import { StatusHistoryResponse } from "@server/lib/statusHistory"; export type ProductUpdate = { link: string | null; @@ -156,7 +158,8 @@ export const orgQueries = { queryKey: ["ORG", orgId, "SITES", { query, perPage }] as const, queryFn: async ({ signal, meta }) => { const sp = new URLSearchParams({ - pageSize: perPage.toString() + pageSize: perPage.toString(), + status: "approved" }); if (query?.trim()) { @@ -230,6 +233,204 @@ export const orgQueries = { return res.data.data.resources; } + }), + + healthChecks: ({ + orgId, + perPage = 10_000 + }: { + orgId: string; + perPage?: number; + }) => + queryOptions({ + queryKey: ["ORG", orgId, "HEALTH_CHECKS", { perPage }] as const, + queryFn: async ({ signal, meta }) => { + const sp = new URLSearchParams({ + limit: perPage.toString(), + offset: "0" + }); + const res = await meta!.api.get< + AxiosResponse + >(`/org/${orgId}/health-checks?${sp.toString()}`, { signal }); + return res.data.data.healthChecks; + } + }), + + alertRules: ({ + orgId, + limit = 20, + offset = 0, + query, + siteId, + resourceId + }: { + orgId: string; + limit?: number; + offset?: number; + query?: string; + siteId?: number; + resourceId?: number; + }) => + queryOptions({ + queryKey: ["ORG", orgId, "ALERT_RULES", { limit, offset, query, siteId, resourceId }] as const, + queryFn: async ({ signal, meta }) => { + const sp = new URLSearchParams(); + sp.set("limit", String(limit)); + sp.set("offset", String(offset)); + if (query) sp.set("query", query); + if (siteId != null) sp.set("siteId", String(siteId)); + if (resourceId != null) sp.set("resourceId", String(resourceId)); + const res = await meta!.api.get< + AxiosResponse + >(`/org/${orgId}/alert-rules?${sp.toString()}`, { signal }); + return { + alertRules: res.data.data.alertRules, + pagination: res.data.data.pagination + }; + } + }), + + alertRulesForSource: ({ + orgId, + siteId, + resourceId + }: { + orgId: string; + siteId?: number; + resourceId?: number; + }) => + queryOptions({ + queryKey: ["ORG", orgId, "ALERT_RULES", { siteId, resourceId }] as const, + queryFn: async ({ signal, meta }) => { + const sp = new URLSearchParams(); + if (siteId != null) sp.set("siteId", String(siteId)); + if (resourceId != null) sp.set("resourceId", String(resourceId)); + const res = await meta!.api.get< + AxiosResponse + >(`/org/${orgId}/alert-rules?${sp.toString()}`, { signal }); + return res.data.data.alertRules; + } + }), + + standaloneHealthChecks: ({ + orgId, + limit = 20, + offset = 0, + query + }: { + orgId: string; + limit?: number; + offset?: number; + query?: string; + }) => + queryOptions({ + queryKey: ["ORG", orgId, "STANDALONE_HEALTH_CHECKS", { limit, offset, query }] as const, + queryFn: async ({ signal, meta }) => { + const sp = new URLSearchParams(); + sp.set("limit", String(limit)); + sp.set("offset", String(offset)); + if (query) sp.set("query", query); + const res = await meta!.api.get< + AxiosResponse<{ + healthChecks: { + targetHealthCheckId: number; + name: string; + siteId: number | null; + siteName: string | null; + siteNiceId: string | null; + hcEnabled: boolean; + hcHealth: "unknown" | "healthy" | "unhealthy"; + hcMode: string | null; + hcHostname: string | null; + hcPort: number | null; + hcPath: string | null; + hcScheme: string | null; + hcMethod: string | null; + hcInterval: number | null; + hcUnhealthyInterval: number | null; + hcTimeout: number | null; + hcHeaders: string | null; + hcFollowRedirects: boolean | null; + hcStatus: number | null; + hcTlsServerName: string | null; + hcHealthyThreshold: number | null; + hcUnhealthyThreshold: number | null; + resourceId: number | null; + resourceName: string | null; + resourceNiceId: string | null; + }[]; + pagination: { + total: number; + limit: number; + offset: number; + }; + }> + >(`/org/${orgId}/health-checks?${sp.toString()}`, { signal }); + return { + healthChecks: res.data.data.healthChecks, + pagination: res.data.data.pagination + }; + } + }), + siteStatusHistory: ({ + siteId, + days = 90 + }: { + siteId: number; + days?: number; + }) => + queryOptions({ + queryKey: ["SITE_STATUS_HISTORY", siteId, days] as const, + queryFn: async ({ signal, meta }) => { + const res = await meta!.api.get< + AxiosResponse + >(`/site/${siteId}/status-history?days=${days}`, { signal }); + return res.data.data; + } + }), + + resourceStatusHistory: ({ + resourceId, + days = 90 + }: { + resourceId?: number; + days?: number; + }) => + queryOptions({ + queryKey: ["RESOURCE_STATUS_HISTORY", resourceId, days] as const, + queryFn: async ({ signal, meta }) => { + const res = await meta!.api.get< + AxiosResponse + >(`/resource/${resourceId}/status-history?days=${days}`, { signal }); + return res.data.data; + } + }), + + healthCheckStatusHistory: ({ + orgId, + healthCheckId, + days = 90 + }: { + orgId: string; + healthCheckId: number; + days?: number; + }) => + queryOptions({ + queryKey: [ + "HC_STATUS_HISTORY", + orgId, + healthCheckId, + days + ] as const, + queryFn: async ({ signal, meta }) => { + const res = await meta!.api.get< + AxiosResponse + >( + `/org/${orgId}/health-check/${healthCheckId}/status-history?days=${days}`, + { signal } + ); + return res.data.data; + } }) }; diff --git a/src/services/locale.ts b/src/services/locale.ts index 81be42bc1..2d3e1d21f 100644 --- a/src/services/locale.ts +++ b/src/services/locale.ts @@ -17,14 +17,14 @@ export async function getUserLocale(): Promise { return cookieLocale as Locale; } - // No cookie found — try to restore from user's saved locale in DB + // No cookie found - try to restore from user's saved locale in DB try { const res = await internal.get("/user", await authCookieHeader()); const userLocale = res.data?.data?.locale; if (userLocale && locales.includes(userLocale as Locale)) { // Try to cache in a cookie so subsequent requests skip the API // call. cookies().set() is only permitted in Server Actions and - // Route Handlers — not during rendering — so we isolate it so + // Route Handlers - not during rendering - so we isolate it so // that a write failure doesn't prevent the locale from being // returned for the current request. try { @@ -40,7 +40,7 @@ export async function getUserLocale(): Promise { return userLocale as Locale; } } catch { - // User not logged in or API unavailable — fall through + // User not logged in or API unavailable - fall through } const headerList = await headers();