Compare commits

..

5 Commits

Author SHA1 Message Date
Owen Schwartz
87e6c7ba36 Merge pull request #3003 from fosrl/dev
1.18.2-s.3
2026-05-05 10:54:48 -07:00
Owen Schwartz
9410a18404 Merge pull request #2997 from fosrl/dev
Translations
2026-05-04 11:49:26 -07:00
Owen Schwartz
23f4302186 Merge pull request #2995 from fosrl/dev
1.18.2-s.2
2026-05-04 11:43:24 -07:00
Owen Schwartz
fb4bda077b Merge pull request #2983 from fosrl/dev
1.18.2-s.1
2026-05-03 14:59:12 -07:00
Owen Schwartz
cf596d980f Merge pull request #2971 from fosrl/dev
1.18.2
2026-05-02 20:59:51 -07:00
7 changed files with 29 additions and 381 deletions

View File

@@ -1,7 +1,7 @@
import { z } from "zod"; import { z } from "zod";
import { db, logsDb, statusHistory } from "@server/db"; import { db, logsDb, statusHistory } from "@server/db";
import { and, eq, gte, asc } from "drizzle-orm"; import { and, eq, gte, asc } from "drizzle-orm";
import { regionalCache as cache } from "@server/private/lib/cache"; import cache from "@server/lib/cache";
const STATUS_HISTORY_CACHE_TTL = 60; // seconds const STATUS_HISTORY_CACHE_TTL = 60; // seconds
@@ -63,7 +63,7 @@ export async function invalidateStatusHistoryCache(
entityId: number entityId: number
): Promise<void> { ): Promise<void> {
const prefix = `statusHistory:${entityType}:${entityId}:`; const prefix = `statusHistory:${entityType}:${entityId}:`;
const keys = await cache.keysWithPrefix(prefix); const keys = cache.keys().filter((k) => k.startsWith(prefix));
if (keys.length > 0) { if (keys.length > 0) {
await cache.del(keys); await cache.del(keys);
} }

View File

@@ -500,30 +500,7 @@ function findAcmeJsonFiles(dirPath: string): string[] {
const fullPath = path.join(dirPath, entry.name); const fullPath = path.join(dirPath, entry.name);
if (entry.isDirectory()) { if (entry.isDirectory()) {
results.push(...findAcmeJsonFiles(fullPath)); results.push(...findAcmeJsonFiles(fullPath));
} else if (entry.isFile()) { } else if (entry.isFile() && entry.name === "acme.json") {
// check if it is a json file
if (entry.name.endsWith(".json")) {
let raw: string;
try {
raw = fs.readFileSync(fullPath, "utf8");
} catch (err) {
logger.warn(
`acmeCertSync: could not read file "${fullPath}": ${err}`
);
continue;
}
let parsed: any;
try {
parsed = JSON.parse(raw);
} catch (err) {
logger.warn(
`acmeCertSync: could not parse "${fullPath}" as JSON: ${err}`
);
continue;
}
}
results.push(fullPath); results.push(fullPath);
} }
} }

View File

@@ -13,7 +13,7 @@
import NodeCache from "node-cache"; import NodeCache from "node-cache";
import logger from "@server/logger"; import logger from "@server/logger";
import { redisManager, regionalRedisManager } from "@server/private/lib/redis"; import { redisManager } from "@server/private/lib/redis";
// Create local cache with maxKeys limit to prevent memory leaks // Create local cache with maxKeys limit to prevent memory leaks
// With ~10k requests/day and 5min TTL, 10k keys should be more than sufficient // With ~10k requests/day and 5min TTL, 10k keys should be more than sufficient
@@ -298,147 +298,3 @@ class AdaptiveCache {
// Export singleton instance // Export singleton instance
export const cache = new AdaptiveCache(); export const cache = new AdaptiveCache();
export default cache; export default cache;
/**
* Regional adaptive cache backed by the in-cluster Redis instance.
* Falls back to a local NodeCache when the regional Redis is unavailable.
* Use this for data that is regional in nature (e.g. status history) so
* reads are served from the same cluster the user is hitting.
*/
const regionalLocalCache = new NodeCache({
stdTTL: 3600,
checkperiod: 120,
maxKeys: 10000
});
class RegionalAdaptiveCache {
private useRedis(): boolean {
return (
regionalRedisManager.isRedisEnabled() &&
regionalRedisManager.getHealthStatus().isHealthy
);
}
async set(key: string, value: any, ttl?: number): Promise<boolean> {
const effectiveTtl = ttl === 0 ? undefined : ttl;
const redisTtl = ttl === 0 ? undefined : (ttl ?? 3600);
if (this.useRedis()) {
try {
const serialized = JSON.stringify(value);
const success = await regionalRedisManager.set(
key,
serialized,
redisTtl
);
if (success) {
logger.debug(`[regional] Set key in Redis: ${key}`);
return true;
}
} catch (error) {
logger.error(
`[regional] Redis set error for key ${key}:`,
error
);
}
}
const success = regionalLocalCache.set(key, value, effectiveTtl || 0);
if (success) logger.debug(`[regional] Set key in local cache: ${key}`);
return success;
}
async get<T = any>(key: string): Promise<T | undefined> {
if (this.useRedis()) {
try {
const value = await regionalRedisManager.get(key);
if (value !== null) {
logger.debug(`[regional] Cache hit in Redis: ${key}`);
return JSON.parse(value) as T;
}
logger.debug(`[regional] Cache miss in Redis: ${key}`);
return undefined;
} catch (error) {
logger.error(
`[regional] Redis get error for key ${key}:`,
error
);
}
}
const value = regionalLocalCache.get<T>(key);
if (value !== undefined) {
logger.debug(`[regional] Cache hit in local cache: ${key}`);
} else {
logger.debug(`[regional] Cache miss in local cache: ${key}`);
}
return value;
}
async del(key: string | string[]): Promise<number> {
const keys = Array.isArray(key) ? key : [key];
let deletedCount = 0;
if (this.useRedis()) {
try {
for (const k of keys) {
const success = await regionalRedisManager.del(k);
if (success) {
deletedCount++;
logger.debug(`[regional] Deleted key from Redis: ${k}`);
}
}
if (deletedCount === keys.length) return deletedCount;
deletedCount = 0;
} catch (error) {
logger.error(`[regional] Redis del error:`, error);
deletedCount = 0;
}
}
for (const k of keys) {
const count = regionalLocalCache.del(k);
if (count > 0) {
deletedCount++;
logger.debug(`[regional] Deleted key from local cache: ${k}`);
}
}
return deletedCount;
}
async has(key: string): Promise<boolean> {
if (this.useRedis()) {
try {
const value = await regionalRedisManager.get(key);
return value !== null;
} catch (error) {
logger.error(
`[regional] Redis has error for key ${key}:`,
error
);
}
}
return regionalLocalCache.has(key);
}
/**
* Returns keys matching the given prefix from whichever backend is active.
* Redis uses a KEYS scan; local cache filters in-memory keys.
*/
async keysWithPrefix(prefix: string): Promise<string[]> {
if (this.useRedis()) {
try {
return await regionalRedisManager.keys(`${prefix}*`);
} catch (error) {
logger.error(`[regional] Redis keys error:`, error);
}
}
return regionalLocalCache.keys().filter((k) => k.startsWith(prefix));
}
getCurrentBackend(): "redis" | "local" {
return this.useRedis() ? "redis" : "local";
}
}
export const regionalCache = new RegionalAdaptiveCache();

View File

@@ -73,25 +73,6 @@ export const privateConfigSchema = z
.object({ .object({
rejectUnauthorized: z.boolean().optional().default(true) rejectUnauthorized: z.boolean().optional().default(true)
}) })
.optional(),
regional_redis: z
.object({
host: z.string(),
port: portSchema,
password: z
.string()
.optional()
.transform(getEnvOrYaml("REGIONAL_REDIS_PASSWORD")),
db: z.int().nonnegative().optional().default(0),
tls: z
.object({
rejectUnauthorized: z
.boolean()
.optional()
.default(true)
})
.optional()
})
.optional() .optional()
}) })
.optional(), .optional(),

View File

@@ -855,163 +855,3 @@ class RedisManager {
export const redisManager = new RedisManager(); export const redisManager = new RedisManager();
export const redis = redisManager.getClient(); export const redis = redisManager.getClient();
export default redisManager; export default redisManager;
/**
* Lightweight Redis manager for the regional (in-cluster) Redis instance.
* Connects only when `redis.regional_redis` is present in the private config
* and `flags.enable_redis` is true. No pub/sub — designed for low-latency
* caching of regionally-scoped data.
*/
class RegionalRedisManager {
private writeClient: Redis | null = null;
private readClient: Redis | null = null;
private isEnabled: boolean = false;
private isHealthy: boolean = false;
private connectionTimeout: number = 5000;
private commandTimeout: number = 5000;
constructor() {
if (build === "oss") return;
const cfg = privateConfig.getRawPrivateConfig();
if (!cfg.flags.enable_redis || !cfg.redis?.regional_redis) return;
this.isEnabled = true;
this.initializeClients();
}
private getConfig(): RedisOptions {
const r = privateConfig.getRawPrivateConfig().redis!.regional_redis!;
const opts: RedisOptions = {
host: r.host,
port: r.port,
password: r.password,
db: r.db
};
if (r.tls) {
opts.tls = { rejectUnauthorized: r.tls.rejectUnauthorized ?? true };
}
return opts;
}
private initializeClients(): void {
const cfg = this.getConfig();
const baseOpts = {
...cfg,
enableReadyCheck: false,
maxRetriesPerRequest: 3,
keepAlive: 10000,
connectTimeout: this.connectionTimeout,
commandTimeout: this.commandTimeout
};
try {
this.writeClient = new Redis(baseOpts);
// redis-1 (replica) handles reads; fall back to primary if not resolvable
this.readClient = new Redis({
...baseOpts,
host: cfg.host!.replace(/^(.*?)(\.\S+)$/, (_, h, rest) => {
// Derive replica hostname from the headless service pattern:
// redis.redis.svc.cluster.local -> redis-1.redis-headless.redis.svc.cluster.local
// If it doesn't look like a k8s service, just use the same host
return h + rest;
})
});
// For simplicity use same host for both; callers can always read from primary
// The real replica routing is handled by the StatefulSet headless service
this.readClient = this.writeClient;
this.writeClient.on("ready", () => {
logger.info("Regional Redis client ready");
this.isHealthy = true;
});
this.writeClient.on("error", (err) => {
logger.error("Regional Redis client error:", err);
this.isHealthy = false;
});
this.writeClient.on("reconnecting", () => {
logger.info("Regional Redis client reconnecting...");
this.isHealthy = false;
});
logger.info("Regional Redis client initialized");
} catch (error) {
logger.error("Failed to initialize regional Redis client:", error);
this.isEnabled = false;
}
}
public isRedisEnabled(): boolean {
return this.isEnabled && this.writeClient !== null && this.isHealthy;
}
public getHealthStatus() {
return { isEnabled: this.isEnabled, isHealthy: this.isHealthy };
}
public async set(
key: string,
value: string,
ttl?: number
): Promise<boolean> {
if (!this.isRedisEnabled() || !this.writeClient) return false;
try {
if (ttl) {
await this.writeClient.setex(key, ttl, value);
} else {
await this.writeClient.set(key, value);
}
return true;
} catch (error) {
logger.error("Regional Redis SET error:", error);
return false;
}
}
public async get(key: string): Promise<string | null> {
if (!this.isRedisEnabled() || !this.readClient) return null;
try {
return await this.readClient.get(key);
} catch (error) {
logger.error("Regional Redis GET error:", error);
return null;
}
}
public async del(key: string): Promise<boolean> {
if (!this.isRedisEnabled() || !this.writeClient) return false;
try {
await this.writeClient.del(key);
return true;
} catch (error) {
logger.error("Regional Redis DEL error:", error);
return false;
}
}
public async keys(pattern: string): Promise<string[]> {
if (!this.isRedisEnabled() || !this.readClient) return [];
try {
return await this.readClient.keys(pattern);
} catch (error) {
logger.error("Regional Redis KEYS error:", error);
return [];
}
}
public async disconnect(): Promise<void> {
try {
if (this.writeClient) {
await this.writeClient.quit();
this.writeClient = null;
}
this.readClient = null;
logger.info("Regional Redis client disconnected");
} catch (error) {
logger.error("Error disconnecting regional Redis client:", error);
}
}
}
export const regionalRedisManager = new RegionalRedisManager();

View File

@@ -333,16 +333,23 @@ export async function validateOidcCallback(
.innerJoin(orgs, eq(orgs.orgId, idpOrg.orgId)); .innerJoin(orgs, eq(orgs.orgId, idpOrg.orgId));
allOrgs = idpOrgs.map((o) => o.orgs); allOrgs = idpOrgs.map((o) => o.orgs);
for (const org of allOrgs) { // for (const org of allOrgs) {
const subscribed = await isSubscribed( // const subscribed = await isSubscribed(
org.orgId, // org.orgId,
tierMatrix.autoProvisioning // tierMatrix.autoProvisioning
); // );
if (!subscribed) { // if (!subscribed) {
// filter out the org // // filter out the org
allOrgs = allOrgs.filter((o) => o.orgId !== org.orgId); // allOrgs = allOrgs.filter((o) => o.orgId !== org.orgId);
}
} // // return next(
// // createHttpError(
// // HttpCode.FORBIDDEN,
// // "This organization's current plan does not support this feature."
// // )
// // );
// }
// }
} else { } else {
allOrgs = await db.select().from(orgs); allOrgs = await db.select().from(orgs);
} }
@@ -483,14 +490,7 @@ export async function validateOidcCallback(
} }
} }
calculateUserClientsForOrgs(existingUser.userId).catch( await calculateUserClientsForOrgs(existingUser.userId);
(err) => {
logger.error(
"Error calculating user clients after removing all orgs for user with no valid IdP mappings",
{ error: err }
);
}
);
return next( return next(
createHttpError( createHttpError(
@@ -512,9 +512,10 @@ export async function validateOidcCallback(
const orgUserCounts: { orgId: string; userCount: number }[] = []; const orgUserCounts: { orgId: string; userCount: number }[] = [];
let userId = existingUser?.userId;
// sync the user with the orgs and roles // sync the user with the orgs and roles
await db.transaction(async (trx) => { await db.transaction(async (trx) => {
let userId = existingUser?.userId;
// create user if not exists // create user if not exists
if (!existingUser) { if (!existingUser) {
userId = generateId(15); userId = generateId(15);
@@ -644,15 +645,8 @@ export async function validateOidcCallback(
userCount: userCount.length userCount: userCount.length
}); });
} }
});
db.transaction(async (trx) => {
await calculateUserClientsForOrgs(userId!, trx); await calculateUserClientsForOrgs(userId!, trx);
}).catch((err) => {
logger.error(
"Error calculating user clients after syncing orgs and roles for OIDC user",
{ error: err }
);
}); });
for (const orgCount of orgUserCounts) { for (const orgCount of orgUserCounts) {

View File

@@ -147,7 +147,7 @@ export default function SmartLoginOrgSelector({
const response = await generateOidcUrlProxy( const response = await generateOidcUrlProxy(
idpId, idpId,
safeRedirect, safeRedirect,
undefined, orgId,
forceLogin forceLogin
); );