Compare commits

..

10 Commits

Author SHA1 Message Date
Owen Schwartz
bb5853827b Merge pull request #2948 from fosrl/dev
1.18.1-s.3
2026-04-30 14:11:16 -07:00
Owen
68f5512732 Handle messaging in the background; dont time out 2026-04-30 14:00:32 -07:00
Owen
416e124c02 Rotate the secret on the new things using it 2026-04-30 11:53:55 -07:00
Owen
d3e4d8cda8 Fix pr blueprints not picking up site 2026-04-30 11:39:37 -07:00
Owen
81972dbb73 Add name to migration
Fixes #2943
2026-04-30 10:56:12 -07:00
Owen Schwartz
b715786a1e Merge pull request #2939 from fosrl/dev
1.18.1-s.2
2026-04-29 21:33:03 -07:00
Owen
ae24eb2d2c Disable the alerts and hc when downgrading 2026-04-29 21:31:02 -07:00
Owen
20fc59dcda Delete trial when upgrading 2026-04-29 21:25:58 -07:00
Owen
93b09de425 Adjust cloud api endpoints 2026-04-29 21:04:11 -07:00
Owen
bacc130453 Clean up sign and verify 2026-04-29 17:14:22 -07:00
13 changed files with 357 additions and 161 deletions

View File

@@ -414,28 +414,18 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }}
- name: Install cosign
# cosign is used to sign and verify container images (key and keyless)
# cosign is used to sign container images using keyless (OIDC) signing
uses: sigstore/cosign-installer@cad07c2e89fa2edd6e2d7bab4c1aa38e53f76003 # v4.1.1
- name: Dual-sign and verify (GHCR & Docker Hub)
# Sign each image by digest using keyless (OIDC) and key-based signing,
# then verify both the public key signature and the keyless OIDC signature.
- name: Sign (GHCR, keyless)
# Sign each GHCR image by digest using keyless (OIDC) signing via Sigstore/Rekor.
# Signatures are stored in the registry alongside the image.
env:
TAG: ${{ env.TAG }}
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
COSIGN_YES: "true"
run: |
set -euo pipefail
issuer="https://token.actions.githubusercontent.com"
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
# Track failures
FAILED_TAGS=()
SUCCESSFUL_TAGS=()
# Determine if this is an RC release
IS_RC="false"
if [[ "$TAG" == *"-rc."* ]]; then
@@ -463,95 +453,47 @@ jobs:
)
fi
# Sign each image variant for both registries
for BASE_IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
for IMAGE_TAG in "${IMAGE_TAGS[@]}"; do
echo "Processing ${BASE_IMAGE}:${IMAGE_TAG}"
TAG_FAILED=false
FAILED_TAGS=()
SUCCESSFUL_TAGS=()
# Wrap the entire tag processing in error handling
(
set -e
DIGEST="$(skopeo inspect --retry-times 3 docker://${BASE_IMAGE}:${IMAGE_TAG} | jq -r '.Digest')"
REF="${BASE_IMAGE}@${DIGEST}"
echo "Resolved digest: ${REF}"
for IMAGE_TAG in "${IMAGE_TAGS[@]}"; do
echo "Processing ${GHCR_IMAGE}:${IMAGE_TAG}"
TAG_FAILED=false
echo "==> cosign sign (keyless) --recursive ${REF}"
cosign sign --recursive "${REF}"
(
set -e
DIGEST="$(skopeo inspect --retry-times 3 docker://${GHCR_IMAGE}:${IMAGE_TAG} | jq -r '.Digest')"
REF="${GHCR_IMAGE}@${DIGEST}"
echo "Resolved digest: ${REF}"
echo "==> cosign sign (key) --recursive ${REF}"
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
echo "==> cosign sign (keyless) --recursive ${REF}"
cosign sign --recursive "${REF}"
) || TAG_FAILED=true
# Retry wrapper for verification to handle registry propagation delays
retry_verify() {
local cmd="$1"
local attempts=6
local delay=5
local i=1
until eval "$cmd"; do
if [ $i -ge $attempts ]; then
echo "Verification failed after $attempts attempts"
return 1
fi
echo "Verification not yet available. Retry $i/$attempts after ${delay}s..."
sleep $delay
i=$((i+1))
delay=$((delay*2))
# Cap the delay to avoid very long waits
if [ $delay -gt 60 ]; then delay=60; fi
done
return 0
}
echo "==> cosign verify (public key) ${REF}"
if retry_verify "cosign verify --key env://COSIGN_PUBLIC_KEY '${REF}' -o text"; then
VERIFIED_INDEX=true
else
VERIFIED_INDEX=false
fi
echo "==> cosign verify (keyless policy) ${REF}"
if retry_verify "cosign verify --certificate-oidc-issuer '${issuer}' --certificate-identity-regexp '${id_regex}' '${REF}' -o text"; then
VERIFIED_INDEX_KEYLESS=true
else
VERIFIED_INDEX_KEYLESS=false
fi
# Check if verification succeeded
if [ "${VERIFIED_INDEX}" != "true" ] && [ "${VERIFIED_INDEX_KEYLESS}" != "true" ]; then
echo "⚠️ WARNING: Verification not available for ${BASE_IMAGE}:${IMAGE_TAG}"
echo "This may be due to registry propagation delays. Continuing anyway."
fi
) || TAG_FAILED=true
if [ "$TAG_FAILED" = "true" ]; then
echo "⚠️ WARNING: Failed to sign/verify ${BASE_IMAGE}:${IMAGE_TAG}"
FAILED_TAGS+=("${BASE_IMAGE}:${IMAGE_TAG}")
else
echo "✓ Successfully signed and verified ${BASE_IMAGE}:${IMAGE_TAG}"
SUCCESSFUL_TAGS+=("${BASE_IMAGE}:${IMAGE_TAG}")
fi
done
if [ "$TAG_FAILED" = "true" ]; then
echo "⚠️ WARNING: Failed to sign ${GHCR_IMAGE}:${IMAGE_TAG}"
FAILED_TAGS+=("${GHCR_IMAGE}:${IMAGE_TAG}")
else
echo "✓ Successfully signed ${GHCR_IMAGE}:${IMAGE_TAG}"
SUCCESSFUL_TAGS+=("${GHCR_IMAGE}:${IMAGE_TAG}")
fi
done
# Report summary
echo ""
echo "=========================================="
echo "Sign and Verify Summary"
echo "Sign Summary"
echo "=========================================="
echo "Successful: ${#SUCCESSFUL_TAGS[@]}"
echo "Failed: ${#FAILED_TAGS[@]}"
echo ""
if [ ${#FAILED_TAGS[@]} -gt 0 ]; then
echo "Failed tags:"
for tag in "${FAILED_TAGS[@]}"; do
echo " - $tag"
done
echo ""
echo "⚠️ WARNING: Some tags failed to sign/verify, but continuing anyway"
echo "⚠️ WARNING: Some tags failed to sign, but continuing anyway"
else
echo "✓ All images signed and verified successfully!"
echo "✓ All images signed successfully!"
fi
shell: bash

View File

@@ -1,5 +1,5 @@
import { CommandModule } from "yargs";
import { db, idpOidcConfig, licenseKey } from "@server/db";
import { db, idpOidcConfig, licenseKey, certificates, eventStreamingDestinations, alertWebhookActions } from "@server/db";
import { encrypt, decrypt } from "@server/lib/crypto";
import { configFilePath1, configFilePath2 } from "@server/lib/consts";
import { eq } from "drizzle-orm";
@@ -129,9 +129,15 @@ export const rotateServerSecret: CommandModule<
console.log("\nReading encrypted data from database...");
const idpConfigs = await db.select().from(idpOidcConfig);
const licenseKeys = await db.select().from(licenseKey);
const certs = await db.select().from(certificates);
const streamingDestinations = await db.select().from(eventStreamingDestinations);
const webhookActions = await db.select().from(alertWebhookActions);
console.log(`Found ${idpConfigs.length} OIDC IdP configuration(s)`);
console.log(`Found ${licenseKeys.length} license key(s)`);
console.log(`Found ${certs.length} certificate(s)`);
console.log(`Found ${streamingDestinations.length} event streaming destination(s)`);
console.log(`Found ${webhookActions.length} alert webhook action(s)`);
// Prepare all decrypted and re-encrypted values
console.log("\nDecrypting and re-encrypting values...");
@@ -149,8 +155,27 @@ export const rotateServerSecret: CommandModule<
encryptedInstanceId: string;
};
type CertUpdate = {
certId: number;
encryptedCertFile: string | null;
encryptedKeyFile: string | null;
};
type StreamingDestinationUpdate = {
destinationId: number;
encryptedConfig: string;
};
type WebhookActionUpdate = {
webhookActionId: number;
encryptedConfig: string;
};
const idpUpdates: IdpUpdate[] = [];
const licenseKeyUpdates: LicenseKeyUpdate[] = [];
const certUpdates: CertUpdate[] = [];
const streamingDestinationUpdates: StreamingDestinationUpdate[] = [];
const webhookActionUpdates: WebhookActionUpdate[] = [];
// Process idpOidcConfig entries
for (const idpConfig of idpConfigs) {
@@ -217,6 +242,70 @@ export const rotateServerSecret: CommandModule<
}
}
// Process certificate entries
for (const cert of certs) {
try {
const encryptedCertFile = cert.certFile
? encrypt(decrypt(cert.certFile, oldSecret), newSecret)
: null;
const encryptedKeyFile = cert.keyFile
? encrypt(decrypt(cert.keyFile, oldSecret), newSecret)
: null;
certUpdates.push({
certId: cert.certId,
encryptedCertFile,
encryptedKeyFile
});
} catch (error) {
console.error(
`Error processing certificate ${cert.certId} (${cert.domain}):`,
error
);
throw error;
}
}
// Process eventStreamingDestinations entries
for (const dest of streamingDestinations) {
try {
const decryptedConfig = decrypt(dest.config, oldSecret);
const encryptedConfig = encrypt(decryptedConfig, newSecret);
streamingDestinationUpdates.push({
destinationId: dest.destinationId,
encryptedConfig
});
} catch (error) {
console.error(
`Error processing event streaming destination ${dest.destinationId}:`,
error
);
throw error;
}
}
// Process alertWebhookActions entries
for (const webhook of webhookActions) {
try {
if (webhook.config == null) continue;
const decryptedConfig = decrypt(webhook.config, oldSecret);
const encryptedConfig = encrypt(decryptedConfig, newSecret);
webhookActionUpdates.push({
webhookActionId: webhook.webhookActionId,
encryptedConfig
});
} catch (error) {
console.error(
`Error processing alert webhook action ${webhook.webhookActionId}:`,
error
);
throw error;
}
}
// Perform all database updates in a single transaction
console.log("\nUpdating database in transaction...");
await db.transaction(async (trx) => {
@@ -250,10 +339,50 @@ export const rotateServerSecret: CommandModule<
instanceId: update.encryptedInstanceId
});
}
// Update certificate entries
for (const update of certUpdates) {
await trx
.update(certificates)
.set({
certFile: update.encryptedCertFile,
keyFile: update.encryptedKeyFile
})
.where(eq(certificates.certId, update.certId));
}
// Update event streaming destination entries
for (const update of streamingDestinationUpdates) {
await trx
.update(eventStreamingDestinations)
.set({ config: update.encryptedConfig })
.where(
eq(
eventStreamingDestinations.destinationId,
update.destinationId
)
);
}
// Update alert webhook action entries
for (const update of webhookActionUpdates) {
await trx
.update(alertWebhookActions)
.set({ config: update.encryptedConfig })
.where(
eq(
alertWebhookActions.webhookActionId,
update.webhookActionId
)
);
}
});
console.log(`Rotated ${idpUpdates.length} OIDC IdP configuration(s)`);
console.log(`Rotated ${licenseKeyUpdates.length} license key(s)`);
console.log(`Rotated ${certUpdates.length} certificate(s)`);
console.log(`Rotated ${streamingDestinationUpdates.length} event streaming destination(s)`);
console.log(`Rotated ${webhookActionUpdates.length} alert webhook action(s)`);
// Update config file with new secret
console.log("\nUpdating config file...");
@@ -270,6 +399,9 @@ export const rotateServerSecret: CommandModule<
console.log(`\nSummary:`);
console.log(` - OIDC IdP configurations: ${idpUpdates.length}`);
console.log(` - License keys: ${licenseKeyUpdates.length}`);
console.log(` - Certificates: ${certUpdates.length}`);
console.log(` - Event streaming destinations: ${streamingDestinationUpdates.length}`);
console.log(` - Alert webhook actions: ${webhookActionUpdates.length}`);
console.log(
`\n IMPORTANT: Restart the server for the new secret to take effect.`
);

View File

@@ -122,8 +122,6 @@ export enum ActionsEnum {
createOrgDomain = "createOrgDomain",
deleteOrgDomain = "deleteOrgDomain",
restartOrgDomain = "restartOrgDomain",
sendUsageNotification = "sendUsageNotification",
sendTrialNotification = "sendTrialNotification",
createRemoteExitNode = "createRemoteExitNode",
updateRemoteExitNode = "updateRemoteExitNode",
getRemoteExitNode = "getRemoteExitNode",

View File

@@ -566,6 +566,17 @@ export const alertWebhookActions = pgTable("alertWebhookActions", {
lastSentAt: bigint("lastSentAt", { mode: "number" }) // nullable
});
export const trialNotifications = pgTable("trialNotifications", {
notificationId: serial("notificationId").primaryKey(),
subscriptionId: varchar("subscriptionId", { length: 255 })
.notNull()
.references(() => subscriptions.subscriptionId, {
onDelete: "cascade"
}),
notificationType: varchar("notificationType", { length: 50 }).notNull(), // trial_ending_5d, trial_ending_24h, trial_ended
sentAt: bigint("sentAt", { mode: "number" }).notNull()
});
export type Approval = InferSelectModel<typeof approvals>;
export type Limit = InferSelectModel<typeof limits>;
export type Account = InferSelectModel<typeof account>;
@@ -604,3 +615,12 @@ export type EventStreamingCursor = InferSelectModel<
typeof eventStreamingCursors
>;
export type AlertResources = InferSelectModel<typeof alertResources>;
export type AlertHealthChecks = InferSelectModel<typeof alertHealthChecks>;
export type AlertSites = InferSelectModel<typeof alertSites>;
export type AlertRules = InferSelectModel<typeof alertRules>;
export type AlertEmailActions = InferSelectModel<typeof alertEmailActions>;
export type AlertEmailRecipients = InferSelectModel<
typeof alertEmailRecipients
>;
export type AlertWebhookActions = InferSelectModel<typeof alertWebhookActions>;
export type TrialNotification = InferSelectModel<typeof trialNotifications>;

View File

@@ -21,6 +21,9 @@ import {
targetHealthCheck,
users
} from "./schema";
import { serial, varchar } from "drizzle-orm/mysql-core";
import { pgTable } from "drizzle-orm/pg-core";
import { bigint } from "zod";
export const certificates = sqliteTable("certificates", {
certId: integer("certId").primaryKey({ autoIncrement: true }),
@@ -569,6 +572,19 @@ export const alertWebhookActions = sqliteTable("alertWebhookActions", {
lastSentAt: integer("lastSentAt")
});
export const trialNotifications = sqliteTable("trialNotifications", {
notificationId: integer("notificationId").primaryKey({
autoIncrement: true
}),
subscriptionId: text("subscriptionId")
.notNull()
.references(() => subscriptions.subscriptionId, {
onDelete: "cascade"
}),
notificationType: text("notificationType").notNull(), // trial_ending_5d, trial_ending_24h, trial_ended
sentAt: integer("sentAt").notNull()
});
export type Approval = InferSelectModel<typeof approvals>;
export type Limit = InferSelectModel<typeof limits>;
export type Account = InferSelectModel<typeof account>;
@@ -601,3 +617,10 @@ export type EventStreamingCursor = InferSelectModel<
typeof eventStreamingCursors
>;
export type AlertResources = InferSelectModel<typeof alertResources>;
export type AlertHealthChecks = InferSelectModel<typeof alertHealthChecks>;
export type AlertSites = InferSelectModel<typeof alertSites>;
export type AlertRule = InferSelectModel<typeof alertRules>;
export type AlertEmailAction = InferSelectModel<typeof alertEmailActions>;
export type AlertEmailRecipient = InferSelectModel<typeof alertEmailRecipients>;
export type AlertWebhookAction = InferSelectModel<typeof alertWebhookActions>;
export type TrialNotification = InferSelectModel<typeof trialNotifications>;

View File

@@ -131,41 +131,22 @@ export async function updateClientResources(
: [];
const allSites: { siteId: number }[] = [];
if (resourceData.site) {
let siteSingle;
const resourceSiteId = resourceData.site;
if (resourceSiteId) {
// Look up site by niceId
[siteSingle] = await trx
.select({ siteId: sites.siteId })
.from(sites)
.where(
and(
eq(sites.niceId, resourceSiteId),
eq(sites.orgId, orgId)
)
// Look up site by niceId
const [siteSingle] = await trx
.select({ siteId: sites.siteId })
.from(sites)
.where(
and(
eq(sites.niceId, resourceData.site),
eq(sites.orgId, orgId)
)
.limit(1);
} else if (siteId) {
// Use the provided siteId directly, but verify it belongs to the org
[siteSingle] = await trx
.select({ siteId: sites.siteId })
.from(sites)
.where(
and(eq(sites.siteId, siteId), eq(sites.orgId, orgId))
)
.limit(1);
} else {
throw new Error(`Target site is required`);
)
.limit(1);
if (siteSingle) {
allSites.push(siteSingle);
}
if (!siteSingle) {
throw new Error(
`Site not found: ${resourceSiteId} in org ${orgId}`
);
}
allSites.push(siteSingle);
}
if (resourceData.sites) {
@@ -180,15 +161,31 @@ export async function updateClientResources(
)
)
.limit(1);
if (!site) {
throw new Error(
`Site not found: ${siteId} in org ${orgId}`
);
if (site) {
allSites.push(site);
}
allSites.push(site);
}
}
if (siteId && allSites.length === 0) {
// only add if there are not provided sites
// Use the provided siteId directly, but verify it belongs to the org
const [siteSingle] = await trx
.select({ siteId: sites.siteId })
.from(sites)
.where(and(eq(sites.siteId, siteId), eq(sites.orgId, orgId)))
.limit(1);
if (siteSingle) {
allSites.push(siteSingle);
}
}
if (allSites.length === 0) {
throw new Error(
`No valid sites found for private private resource ${resourceNiceId} in org ${orgId}`
);
}
if (existingResource) {
let domainInfo:
| { subdomain: string | null; domainId: string }

View File

@@ -30,8 +30,10 @@ import {
userOrgRoles,
siteProvisioningKeyOrg,
siteProvisioningKeys,
alertRules,
targetHealthCheck
} from "@server/db";
import { and, eq } from "drizzle-orm";
import { and, eq, isNull } from "drizzle-orm";
/**
* Get the maximum allowed retention days for a given tier
@@ -318,6 +320,14 @@ async function disableFeature(
await disableSiteProvisioningKeys(orgId);
break;
case TierFeature.AlertingRules:
await disableAlertingRules(orgId);
break;
case TierFeature.StandaloneHealthChecks:
await disableStandaloneHealthChecks(orgId);
break;
default:
logger.warn(
`Unknown feature ${feature} for org ${orgId}, skipping`
@@ -360,8 +370,7 @@ async function disableFullRbac(orgId: string): Promise<void> {
async function disableSiteProvisioningKeys(orgId: string): Promise<void> {
const rows = await db
.select({
siteProvisioningKeyId:
siteProvisioningKeyOrg.siteProvisioningKeyId
siteProvisioningKeyId: siteProvisioningKeyOrg.siteProvisioningKeyId
})
.from(siteProvisioningKeyOrg)
.where(eq(siteProvisioningKeyOrg.orgId, orgId));
@@ -525,6 +534,29 @@ async function disablePasswordExpirationPolicies(orgId: string): Promise<void> {
logger.info(`Disabled password expiration policies for org ${orgId}`);
}
async function disableAlertingRules(orgId: string): Promise<void> {
await db
.update(alertRules)
.set({ enabled: false })
.where(eq(alertRules.orgId, orgId));
logger.info(`Disabled all alert rules for org ${orgId}`);
}
async function disableStandaloneHealthChecks(orgId: string): Promise<void> {
await db
.update(targetHealthCheck)
.set({ hcEnabled: false })
.where(
and(
eq(targetHealthCheck.orgId, orgId),
isNull(targetHealthCheck.targetId)
)
);
logger.info(`Disabled standalone health checks for org ${orgId}`);
}
async function disableAutoProvisioning(orgId: string): Promise<void> {
// Get all IDP IDs for this org through the idpOrg join table
const orgIdps = await db

View File

@@ -174,6 +174,19 @@ export async function handleSubscriptionCreated(
// TODO: update user in Sendy
}
}
// delete the trial subscrition if we have one
await db
.delete(subscriptions)
.where(
and(
eq(
subscriptions.customerId,
subscription.customer as string
),
eq(subscriptions.trial, true)
)
);
} else if (type === "license") {
logger.debug(
`License subscription created for org ${customer.orgId}, no lifecycle handling needed.`

View File

@@ -67,24 +67,20 @@ if (build == "saas") {
verifyApiKeyIsRoot,
certificates.syncCertToNewts
);
authenticated.post(
`/org/:orgId/send-usage-notification`,
verifyApiKeyIsRoot, // We are the only ones who can use root key so its fine
org.sendUsageNotification
);
authenticated.post(
`/org/:orgId/send-trial-notification`,
verifyApiKeyIsRoot,
org.sendTrialNotification
);
}
authenticated.post(
`/org/:orgId/send-usage-notification`,
verifyApiKeyIsRoot, // We are the only ones who can use root key so its fine
verifyApiKeyHasAction(ActionsEnum.sendUsageNotification),
logActionAudit(ActionsEnum.sendUsageNotification),
org.sendUsageNotification
);
authenticated.post(
`/org/:orgId/send-trial-notification`,
verifyApiKeyIsRoot,
verifyApiKeyHasAction(ActionsEnum.sendTrialNotification),
logActionAudit(ActionsEnum.sendTrialNotification),
org.sendTrialNotification
);
authenticated.delete(
"/idp/:idpId",
verifyApiKeyIsRoot,

View File

@@ -496,11 +496,6 @@ export async function createSiteResource(
);
}
}
await rebuildClientAssociationsFromSiteResource(
newSiteResource,
trx
); // we need to call this because we added to the admin role
});
if (!newSiteResource) {
@@ -526,6 +521,22 @@ export async function createSiteResource(
await createCertificate(domainId, fullDomain, db);
}
// Run in the background after the response is sent. Wrapped in its
// own transaction so it always executes on the primary — avoiding any
// replica-lag issues while still allowing the HTTP response to return
// early.
db.transaction(async (trx) => {
await rebuildClientAssociationsFromSiteResource(
newSiteResource!,
trx
);
}).catch((err) => {
logger.error(
`Error rebuilding client associations for site resource ${newSiteResource!.siteResourceId}:`,
err
);
});
return response(res, {
data: newSiteResource,
success: true,

View File

@@ -431,9 +431,6 @@ export async function updateSiteResource(
})
.returning();
// wait some time to allow for messages to be handled
await new Promise((resolve) => setTimeout(resolve, 750));
const sshPamSet =
isLicensedSshPam &&
(authDaemonPort !== undefined ||
@@ -556,11 +553,6 @@ export async function updateSiteResource(
}))
);
}
await rebuildClientAssociationsFromSiteResource(
updatedSiteResource,
trx
);
} else {
// Update the site resource
const sshPamSet =
@@ -690,7 +682,24 @@ export async function updateSiteResource(
}
logger.info(`Updated site resource ${siteResourceId}`);
}
});
// Background: wait for removal messages to propagate, then rebuild
// associations for the re-created resource. Own transaction ensures
// execution on the primary against fully committed state.
(async () => {
await db.transaction(async (trx) => {
if (!updatedSiteResource) {
throw new Error("No updated resource found after update");
}
if (sitesChanged) {
await new Promise((resolve) => setTimeout(resolve, 750));
await rebuildClientAssociationsFromSiteResource(
updatedSiteResource,
trx
);
}
await handleMessagingForUpdatedSiteResource(
existingSiteResource,
updatedSiteResource,
@@ -700,7 +709,12 @@ export async function updateSiteResource(
})),
trx
);
}
});
})().catch((err) => {
logger.error(
`Error rebuilding client associations for site resource ${updatedSiteResource?.siteResourceId}:`,
err
);
});
return response(res, {

View File

@@ -16,6 +16,9 @@ export default async function migration() {
thc."targetId",
t."siteId",
s."orgId",
r."name" AS "resourceName",
t."ip",
t."port",
thc."hcEnabled",
thc."hcPath",
thc."hcScheme",
@@ -33,13 +36,17 @@ export default async function migration() {
thc."hcTlsServerName"
FROM "targetHealthCheck" thc
JOIN "targets" t ON thc."targetId" = t."targetId"
JOIN "sites" s ON t."siteId" = s."siteId"`
JOIN "sites" s ON t."siteId" = s."siteId"
JOIN "resources" r ON t."resourceId" = r."resourceId"`
);
const existingHealthChecks = healthChecksQuery.rows as {
targetHealthCheckId: number;
targetId: number;
siteId: number;
orgId: string;
resourceName: string;
ip: string;
port: number;
hcEnabled: boolean;
hcPath: string | null;
hcScheme: string | null;
@@ -385,6 +392,7 @@ export default async function migration() {
"targetId",
"orgId",
"siteId",
"name",
"hcEnabled",
"hcPath",
"hcScheme",
@@ -405,6 +413,7 @@ export default async function migration() {
${hc.targetId},
${hc.orgId},
${hc.siteId},
${`Resource ${hc.resourceName} - ${hc.ip}:${hc.port}`},
${hc.hcEnabled},
${hc.hcPath},
${hc.hcScheme},

View File

@@ -22,6 +22,9 @@ export default async function migration() {
thc."targetId",
t."siteId",
s."orgId",
r."name" AS "resourceName",
t."ip",
t."port",
thc."hcEnabled",
thc."hcPath",
thc."hcScheme",
@@ -39,13 +42,17 @@ export default async function migration() {
thc."hcTlsServerName"
FROM 'targetHealthCheck' thc
JOIN 'targets' t ON thc."targetId" = t."targetId"
JOIN 'sites' s ON t."siteId" = s."siteId"`
JOIN 'sites' s ON t."siteId" = s."siteId"
JOIN 'resources' r ON t."resourceId" = r."resourceId"`
)
.all() as {
targetHealthCheckId: number;
targetId: number;
siteId: number;
orgId: string;
resourceName: string;
ip: string;
port: number;
hcEnabled: number;
hcPath: string | null;
hcScheme: string | null;
@@ -392,6 +399,7 @@ export default async function migration() {
"targetId",
"orgId",
"siteId",
"name",
"hcEnabled",
"hcPath",
"hcScheme",
@@ -407,7 +415,7 @@ export default async function migration() {
"hcStatus",
"hcHealth",
"hcTlsServerName"
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
);
const insertAll = db.transaction(() => {
@@ -417,6 +425,7 @@ export default async function migration() {
hc.targetId,
hc.orgId,
hc.siteId,
`Resource ${hc.resourceName} - ${hc.ip}:${hc.port}`,
hc.hcEnabled,
hc.hcPath,
hc.hcScheme,