Merge branch 'main' into copilot/fix-1112

This commit is contained in:
Owen
2025-08-10 10:10:10 -07:00
77 changed files with 4113 additions and 1256 deletions

View File

@@ -5,7 +5,8 @@ import {
boolean,
integer,
bigint,
real
real,
text
} from "drizzle-orm/pg-core";
import { InferSelectModel } from "drizzle-orm";
@@ -58,7 +59,8 @@ export const sites = pgTable("sites", {
publicKey: varchar("publicKey"),
lastHolePunch: bigint("lastHolePunch", { mode: "number" }),
listenPort: integer("listenPort"),
dockerSocketEnabled: boolean("dockerSocketEnabled").notNull().default(true)
dockerSocketEnabled: boolean("dockerSocketEnabled").notNull().default(true),
remoteSubnets: text("remoteSubnets") // comma-separated list of subnets that this site can access
});
export const resources = pgTable("resources", {
@@ -92,7 +94,8 @@ export const resources = pgTable("resources", {
enabled: boolean("enabled").notNull().default(true),
stickySession: boolean("stickySession").notNull().default(false),
tlsServerName: varchar("tlsServerName"),
setHostHeader: varchar("setHostHeader")
setHostHeader: varchar("setHostHeader"),
enableProxy: boolean("enableProxy").default(true),
});
export const targets = pgTable("targets", {
@@ -135,6 +138,8 @@ export const users = pgTable("user", {
twoFactorSecret: varchar("twoFactorSecret"),
emailVerified: boolean("emailVerified").notNull().default(false),
dateCreated: varchar("dateCreated").notNull(),
termsAcceptedTimestamp: varchar("termsAcceptedTimestamp"),
termsVersion: varchar("termsVersion"),
serverAdmin: boolean("serverAdmin").notNull().default(false)
});
@@ -504,8 +509,8 @@ export const clients = pgTable("clients", {
name: varchar("name").notNull(),
pubKey: varchar("pubKey"),
subnet: varchar("subnet").notNull(),
megabytesIn: integer("bytesIn"),
megabytesOut: integer("bytesOut"),
megabytesIn: real("bytesIn"),
megabytesOut: real("bytesOut"),
lastBandwidthUpdate: varchar("lastBandwidthUpdate"),
lastPing: varchar("lastPing"),
type: varchar("type").notNull(), // "olm"
@@ -539,7 +544,7 @@ export const olmSessions = pgTable("clientSession", {
olmId: varchar("olmId")
.notNull()
.references(() => olms.olmId, { onDelete: "cascade" }),
expiresAt: integer("expiresAt").notNull()
expiresAt: bigint("expiresAt", { mode: "number" }).notNull()
});
export const userClients = pgTable("userClients", {
@@ -562,9 +567,11 @@ export const roleClients = pgTable("roleClients", {
export const securityKeys = pgTable("webauthnCredentials", {
credentialId: varchar("credentialId").primaryKey(),
userId: varchar("userId").notNull().references(() => users.userId, {
onDelete: "cascade"
}),
userId: varchar("userId")
.notNull()
.references(() => users.userId, {
onDelete: "cascade"
}),
publicKey: varchar("publicKey").notNull(),
signCount: integer("signCount").notNull(),
transports: varchar("transports"),

View File

@@ -65,7 +65,8 @@ export const sites = sqliteTable("sites", {
listenPort: integer("listenPort"),
dockerSocketEnabled: integer("dockerSocketEnabled", { mode: "boolean" })
.notNull()
.default(true)
.default(true),
remoteSubnets: text("remoteSubnets"), // comma-separated list of subnets that this site can access
});
export const resources = sqliteTable("resources", {
@@ -105,7 +106,8 @@ export const resources = sqliteTable("resources", {
.notNull()
.default(false),
tlsServerName: text("tlsServerName"),
setHostHeader: text("setHostHeader")
setHostHeader: text("setHostHeader"),
enableProxy: integer("enableProxy", { mode: "boolean" }).default(true),
});
export const targets = sqliteTable("targets", {
@@ -154,6 +156,8 @@ export const users = sqliteTable("user", {
.notNull()
.default(false),
dateCreated: text("dateCreated").notNull(),
termsAcceptedTimestamp: text("termsAcceptedTimestamp"),
termsVersion: text("termsVersion"),
serverAdmin: integer("serverAdmin", { mode: "boolean" })
.notNull()
.default(false)

View File

@@ -1,3 +1,4 @@
#! /usr/bin/env node
import "./extendZod.ts";
import { runSetupFunctions } from "./setup";

View File

@@ -2,7 +2,7 @@ import path from "path";
import { fileURLToPath } from "url";
// This is a placeholder value replaced by the build process
export const APP_VERSION = "1.7.3";
export const APP_VERSION = "1.8.0";
export const __FILENAME = fileURLToPath(import.meta.url);
export const __DIRNAME = path.dirname(__FILENAME);

View File

@@ -213,7 +213,7 @@ export const configSchema = z
smtp_host: z.string().optional(),
smtp_port: portSchema.optional(),
smtp_user: z.string().optional(),
smtp_pass: z.string().optional(),
smtp_pass: z.string().optional().transform(getEnvOrYaml("EMAIL_SMTP_PASS")),
smtp_secure: z.boolean().optional(),
smtp_tls_reject_unauthorized: z.boolean().optional(),
no_reply: z.string().email().optional()
@@ -229,9 +229,22 @@ export const configSchema = z
disable_local_sites: z.boolean().optional(),
disable_basic_wireguard_sites: z.boolean().optional(),
disable_config_managed_domains: z.boolean().optional(),
enable_clients: z.boolean().optional()
enable_clients: z.boolean().optional().default(true),
})
.optional(),
dns: z
.object({
nameservers: z
.array(z.string().optional().optional())
.optional()
.default(["ns1.fossorial.io", "ns2.fossorial.io"]),
cname_extension: z.string().optional().default("fossorial.io")
})
.optional()
.default({
nameservers: ["ns1.fossorial.io", "ns2.fossorial.io"],
cname_extension: "fossorial.io"
})
})
.refine(
(data) => {

View File

@@ -106,21 +106,21 @@ export async function login(
);
}
// Check if user has security keys registered
const userSecurityKeys = await db
.select()
.from(securityKeys)
.where(eq(securityKeys.userId, existingUser.userId));
if (userSecurityKeys.length > 0) {
return response<LoginResponse>(res, {
data: { useSecurityKey: true },
success: true,
error: false,
message: "Security key authentication required",
status: HttpCode.OK
});
}
// // Check if user has security keys registered
// const userSecurityKeys = await db
// .select()
// .from(securityKeys)
// .where(eq(securityKeys.userId, existingUser.userId));
//
// if (userSecurityKeys.length > 0) {
// return response<LoginResponse>(res, {
// data: { useSecurityKey: true },
// success: true,
// error: false,
// message: "Security key authentication required",
// status: HttpCode.OK
// });
// }
if (
existingUser.twoFactorSetupRequested &&

View File

@@ -21,15 +21,14 @@ import { hashPassword } from "@server/auth/password";
import { checkValidInvite } from "@server/auth/checkValidInvite";
import { passwordSchema } from "@server/auth/passwordSchema";
import { UserType } from "@server/types/UserTypes";
import { build } from "@server/build";
export const signupBodySchema = z.object({
email: z
.string()
.toLowerCase()
.email(),
email: z.string().toLowerCase().email(),
password: passwordSchema,
inviteToken: z.string().optional(),
inviteId: z.string().optional()
inviteId: z.string().optional(),
termsAcceptedTimestamp: z.string().nullable().optional()
});
export type SignUpBody = z.infer<typeof signupBodySchema>;
@@ -54,7 +53,8 @@ export async function signup(
);
}
const { email, password, inviteToken, inviteId } = parsedBody.data;
const { email, password, inviteToken, inviteId, termsAcceptedTimestamp } =
parsedBody.data;
const passwordHash = await hashPassword(password);
const userId = generateId(15);
@@ -161,13 +161,24 @@ export async function signup(
}
}
if (build === "saas" && !termsAcceptedTimestamp) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
"You must accept the terms of service and privacy policy"
)
);
}
await db.insert(users).values({
userId: userId,
type: UserType.Internal,
username: email,
email: email,
passwordHash,
dateCreated: moment().toISOString()
dateCreated: moment().toISOString(),
termsAcceptedTimestamp: termsAcceptedTimestamp || null,
termsVersion: "1"
});
// give the user their default permissions:

View File

@@ -14,7 +14,7 @@ import { OpenAPITags, registry } from "@server/openApi";
const getClientSchema = z
.object({
clientId: z.string().transform(stoi).pipe(z.number().int().positive()),
orgId: z.string().optional()
orgId: z.string()
})
.strict();

View File

@@ -1,6 +1,6 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import { db, exitNodes, sites } from "@server/db";
import { clients, clientSites } from "@server/db";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
@@ -17,6 +17,7 @@ import {
addPeer as olmAddPeer,
deletePeer as olmDeletePeer
} from "../olm/peers";
import axios from "axios";
const updateClientParamsSchema = z
.object({
@@ -53,6 +54,11 @@ registry.registerPath({
responses: {}
});
interface PeerDestination {
destinationIP: string;
destinationPort: number;
}
export async function updateClient(
req: Request,
res: Response,
@@ -124,15 +130,22 @@ export async function updateClient(
);
for (const siteId of sitesAdded) {
if (!client.subnet || !client.pubKey || !client.endpoint) {
logger.debug("Client subnet, pubKey or endpoint is not set");
logger.debug(
"Client subnet, pubKey or endpoint is not set"
);
continue;
}
// TODO: WE NEED TO HANDLE THIS BETTER. RIGHT NOW WE ARE JUST GUESSING BASED ON THE OTHER SITES
// BUT REALLY WE NEED TO TRACK THE USERS PREFERENCE THAT THEY CHOSE IN THE CLIENTS
const isRelayed = true;
const site = await newtAddPeer(siteId, {
publicKey: client.pubKey,
allowedIps: [`${client.subnet.split("/")[0]}/32`], // we want to only allow from that client
endpoint: client.endpoint
endpoint: isRelayed ? "" : client.endpoint
});
if (!site) {
logger.debug("Failed to add peer to newt - missing site");
continue;
@@ -142,12 +155,49 @@ export async function updateClient(
logger.debug("Site endpoint or publicKey is not set");
continue;
}
let endpoint;
if (isRelayed) {
if (!site.exitNodeId) {
logger.warn(
`Site ${site.siteId} has no exit node, skipping`
);
return null;
}
// get the exit node for the site
const [exitNode] = await db
.select()
.from(exitNodes)
.where(eq(exitNodes.exitNodeId, site.exitNodeId))
.limit(1);
if (!exitNode) {
logger.warn(
`Exit node not found for site ${site.siteId}`
);
return null;
}
endpoint = `${exitNode.endpoint}:21820`;
} else {
if (!endpoint) {
logger.warn(
`Site ${site.siteId} has no endpoint, skipping`
);
return null;
}
endpoint = site.endpoint;
}
await olmAddPeer(client.clientId, {
siteId: siteId,
endpoint: site.endpoint,
siteId: site.siteId,
endpoint: endpoint,
publicKey: site.publicKey,
serverIP: site.address,
serverPort: site.listenPort
serverPort: site.listenPort,
remoteSubnets: site.remoteSubnets
});
}
@@ -170,7 +220,11 @@ export async function updateClient(
logger.debug("Site endpoint or publicKey is not set");
continue;
}
await olmDeletePeer(client.clientId, site.siteId, site.publicKey);
await olmDeletePeer(
client.clientId,
site.siteId,
site.publicKey
);
}
}
@@ -201,6 +255,101 @@ export async function updateClient(
}
}
if (client.endpoint) {
// get all sites for this client and join with exit nodes with site.exitNodeId
const sitesData = await db
.select()
.from(sites)
.innerJoin(
clientSites,
eq(sites.siteId, clientSites.siteId)
)
.leftJoin(
exitNodes,
eq(sites.exitNodeId, exitNodes.exitNodeId)
)
.where(eq(clientSites.clientId, client.clientId));
let exitNodeDestinations: {
reachableAt: string;
destinations: PeerDestination[];
}[] = [];
for (const site of sitesData) {
if (!site.sites.subnet) {
logger.warn(
`Site ${site.sites.siteId} has no subnet, skipping`
);
continue;
}
// find the destinations in the array
let destinations = exitNodeDestinations.find(
(d) => d.reachableAt === site.exitNodes?.reachableAt
);
if (!destinations) {
destinations = {
reachableAt: site.exitNodes?.reachableAt || "",
destinations: [
{
destinationIP:
site.sites.subnet.split("/")[0],
destinationPort: site.sites.listenPort || 0
}
]
};
} else {
// add to the existing destinations
destinations.destinations.push({
destinationIP: site.sites.subnet.split("/")[0],
destinationPort: site.sites.listenPort || 0
});
}
// update it in the array
exitNodeDestinations = exitNodeDestinations.filter(
(d) => d.reachableAt !== site.exitNodes?.reachableAt
);
exitNodeDestinations.push(destinations);
}
for (const destination of exitNodeDestinations) {
try {
logger.info(
`Updating destinations for exit node at ${destination.reachableAt}`
);
const payload = {
sourceIp: client.endpoint?.split(":")[0] || "",
sourcePort: parseInt(client.endpoint?.split(":")[1]) || 0,
destinations: destination.destinations
};
logger.info(
`Payload for update-destinations: ${JSON.stringify(payload, null, 2)}`
);
const response = await axios.post(
`${destination.reachableAt}/update-destinations`,
payload,
{
headers: {
"Content-Type": "application/json"
}
}
);
logger.info("Destinations updated:", {
peer: response.data.status
});
} catch (error) {
if (axios.isAxiosError(error)) {
throw new Error(
`Error communicating with Gerbil. Make sure Pangolin can reach the Gerbil HTTP API: ${error.response?.status}`
);
}
throw error;
}
}
}
// Fetch the updated client
const [updatedClient] = await trx
.select()

View File

@@ -11,6 +11,7 @@ import { generateId } from "@server/auth/sessions/app";
import { eq, and } from "drizzle-orm";
import { isValidDomain } from "@server/lib/validators";
import { build } from "@server/build";
import config from "@server/lib/config";
const paramsSchema = z
.object({
@@ -228,15 +229,15 @@ export async function createOrgDomain(
// TODO: This needs to be cross region and not hardcoded
if (type === "ns") {
nsRecords = ["ns-east.fossorial.io", "ns-west.fossorial.io"];
nsRecords = config.getRawConfig().dns.nameservers as string[];
} else if (type === "cname") {
cnameRecords = [
{
value: `${domainId}.cname.fossorial.io`,
value: `${domainId}.${config.getRawConfig().dns.cname_extension}`,
baseDomain: baseDomain
},
{
value: `_acme-challenge.${domainId}.cname.fossorial.io`,
value: `_acme-challenge.${domainId}.${config.getRawConfig().dns.cname_extension}`,
baseDomain: `_acme-challenge.${baseDomain}`
}
];

View File

@@ -233,6 +233,12 @@ authenticated.get(
resource.listResources
);
authenticated.get(
"/org/:orgId/user-resources",
verifyOrgAccess,
resource.getUserResources
);
authenticated.get(
"/org/:orgId/domains",
verifyOrgAccess,

View File

@@ -8,7 +8,7 @@ export async function addPeer(exitNodeId: number, peer: {
publicKey: string;
allowedIps: string[];
}) {
logger.info(`Adding peer with public key ${peer.publicKey} to exit node ${exitNodeId}`);
const [exitNode] = await db.select().from(exitNodes).where(eq(exitNodes.exitNodeId, exitNodeId)).limit(1);
if (!exitNode) {
throw new Error(`Exit node with ID ${exitNodeId} not found`);
@@ -35,6 +35,7 @@ export async function addPeer(exitNodeId: number, peer: {
}
export async function deletePeer(exitNodeId: number, publicKey: string) {
logger.info(`Deleting peer with public key ${publicKey} from exit node ${exitNodeId}`);
const [exitNode] = await db.select().from(exitNodes).where(eq(exitNodes.exitNodeId, exitNodeId)).limit(1);
if (!exitNode) {
throw new Error(`Exit node with ID ${exitNodeId} not found`);

View File

@@ -1,6 +1,6 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { clients, newts, olms, Site, sites, clientSites } from "@server/db";
import { clients, newts, olms, Site, sites, clientSites, exitNodes } from "@server/db";
import { db } from "@server/db";
import { eq } from "drizzle-orm";
import HttpCode from "@server/types/HttpCode";
@@ -9,6 +9,7 @@ import logger from "@server/logger";
import { fromError } from "zod-validation-error";
import { validateNewtSessionToken } from "@server/auth/sessions/newt";
import { validateOlmSessionToken } from "@server/auth/sessions/olm";
import axios from "axios";
// Define Zod schema for request validation
const updateHolePunchSchema = z.object({
@@ -17,7 +18,8 @@ const updateHolePunchSchema = z.object({
token: z.string(),
ip: z.string(),
port: z.number(),
timestamp: z.number()
timestamp: z.number(),
reachableAt: z.string().optional()
});
// New response type with multi-peer destination support
@@ -43,9 +45,8 @@ export async function updateHolePunch(
);
}
const { olmId, newtId, ip, port, timestamp, token } = parsedParams.data;
const { olmId, newtId, ip, port, timestamp, token, reachableAt } = parsedParams.data;
let currentSiteId: number | undefined;
let destinations: PeerDestination[] = [];
@@ -95,37 +96,129 @@ export async function updateHolePunch(
);
}
// Get all sites that this client is connected to
const clientSitePairs = await db
.select()
.from(clientSites)
.where(eq(clientSites.clientId, client.clientId));
// // Get all sites that this client is connected to
// const clientSitePairs = await db
// .select()
// .from(clientSites)
// .where(eq(clientSites.clientId, client.clientId));
if (clientSitePairs.length === 0) {
logger.warn(`No sites found for client: ${client.clientId}`);
return next(
createHttpError(HttpCode.NOT_FOUND, "No sites found for client")
);
}
// if (clientSitePairs.length === 0) {
// logger.warn(`No sites found for client: ${client.clientId}`);
// return next(
// createHttpError(HttpCode.NOT_FOUND, "No sites found for client")
// );
// }
// Get all sites details
const siteIds = clientSitePairs.map(pair => pair.siteId);
// // Get all sites details
// const siteIds = clientSitePairs.map(pair => pair.siteId);
for (const siteId of siteIds) {
const [site] = await db
.select()
.from(sites)
.where(eq(sites.siteId, siteId));
// for (const siteId of siteIds) {
// const [site] = await db
// .select()
// .from(sites)
// .where(eq(sites.siteId, siteId));
if (site && site.subnet && site.listenPort) {
destinations.push({
destinationIP: site.subnet.split("/")[0],
destinationPort: site.listenPort
// if (site && site.subnet && site.listenPort) {
// destinations.push({
// destinationIP: site.subnet.split("/")[0],
// destinationPort: site.listenPort
// });
// }
// }
// get all sites for this client and join with exit nodes with site.exitNodeId
const sitesData = await db
.select()
.from(sites)
.innerJoin(clientSites, eq(sites.siteId, clientSites.siteId))
.leftJoin(exitNodes, eq(sites.exitNodeId, exitNodes.exitNodeId))
.where(eq(clientSites.clientId, client.clientId));
let exitNodeDestinations: {
reachableAt: string;
destinations: PeerDestination[];
}[] = [];
for (const site of sitesData) {
if (!site.sites.subnet) {
logger.warn(`Site ${site.sites.siteId} has no subnet, skipping`);
continue;
}
// find the destinations in the array
let destinations = exitNodeDestinations.find(
(d) => d.reachableAt === site.exitNodes?.reachableAt
);
if (!destinations) {
destinations = {
reachableAt: site.exitNodes?.reachableAt || "",
destinations: [
{
destinationIP: site.sites.subnet.split("/")[0],
destinationPort: site.sites.listenPort || 0
}
]
};
} else {
// add to the existing destinations
destinations.destinations.push({
destinationIP: site.sites.subnet.split("/")[0],
destinationPort: site.sites.listenPort || 0
});
}
// update it in the array
exitNodeDestinations = exitNodeDestinations.filter(
(d) => d.reachableAt !== site.exitNodes?.reachableAt
);
exitNodeDestinations.push(destinations);
}
logger.debug(JSON.stringify(exitNodeDestinations, null, 2));
for (const destination of exitNodeDestinations) {
// if its the current exit node skip it because it is replying with the same data
if (reachableAt && destination.reachableAt == reachableAt) {
logger.debug(`Skipping update for reachableAt: ${reachableAt}`);
continue;
}
try {
const response = await axios.post(
`${destination.reachableAt}/update-destinations`,
{
sourceIp: client.endpoint?.split(":")[0] || "",
sourcePort: client.endpoint?.split(":")[1] || 0,
destinations: destination.destinations
},
{
headers: {
"Content-Type": "application/json"
}
}
);
logger.info("Destinations updated:", {
peer: response.data.status
});
} catch (error) {
if (axios.isAxiosError(error)) {
throw new Error(
`Error communicating with Gerbil. Make sure Pangolin can reach the Gerbil HTTP API: ${error.response?.status}`
);
}
throw error;
}
}
// Send the desinations back to the origin
destinations = exitNodeDestinations.find(
(d) => d.reachableAt === reachableAt
)?.destinations || [];
} else if (newtId) {
logger.debug(`Got hole punch with ip: ${ip}, port: ${port} for olmId: ${olmId}`);
const { session, newt: newtSession } =
await validateNewtSessionToken(token);
@@ -174,28 +267,29 @@ export async function updateHolePunch(
}
// Find all clients that connect to this site
const sitesClientPairs = await db
.select()
.from(clientSites)
.where(eq(clientSites.siteId, newt.siteId));
// const sitesClientPairs = await db
// .select()
// .from(clientSites)
// .where(eq(clientSites.siteId, newt.siteId));
// THE NEWT IS NOT SENDING RAW WG TO THE GERBIL SO IDK IF WE REALLY NEED THIS - REMOVING
// Get client details for each client
for (const pair of sitesClientPairs) {
const [client] = await db
.select()
.from(clients)
.where(eq(clients.clientId, pair.clientId));
// for (const pair of sitesClientPairs) {
// const [client] = await db
// .select()
// .from(clients)
// .where(eq(clients.clientId, pair.clientId));
if (client && client.endpoint) {
const [host, portStr] = client.endpoint.split(':');
if (host && portStr) {
destinations.push({
destinationIP: host,
destinationPort: parseInt(portStr, 10)
});
}
}
}
// if (client && client.endpoint) {
// const [host, portStr] = client.endpoint.split(':');
// if (host && portStr) {
// destinations.push({
// destinationIP: host,
// destinationPort: parseInt(portStr, 10)
// });
// }
// }
// }
// If this is a newt/site, also add other sites in the same org
// if (updatedSite.orgId) {

View File

@@ -2,10 +2,18 @@ import { z } from "zod";
import { MessageHandler } from "../ws";
import logger from "@server/logger";
import { fromError } from "zod-validation-error";
import { db } from "@server/db";
import {
db,
ExitNode,
exitNodes,
resources,
Target,
targets
} from "@server/db";
import { clients, clientSites, Newt, sites } from "@server/db";
import { eq } from "drizzle-orm";
import { eq, and, inArray } from "drizzle-orm";
import { updatePeer } from "../olm/peers";
import axios from "axios";
const inputSchema = z.object({
publicKey: z.string(),
@@ -54,7 +62,7 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
logger.warn("handleGetConfigMessage: Site not found");
return;
}
// we need to wait for hole punch success
if (!existingSite.endpoint) {
logger.warn(`Site ${existingSite.siteId} has no endpoint, skipping`);
@@ -87,6 +95,48 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
return;
}
let exitNode: ExitNode | undefined;
if (site.exitNodeId) {
[exitNode] = await db
.select()
.from(exitNodes)
.where(eq(exitNodes.exitNodeId, site.exitNodeId))
.limit(1);
if (exitNode.reachableAt) {
try {
const response = await axios.post(
`${exitNode.reachableAt}/update-proxy-mapping`,
{
oldDestination: {
destinationIP: existingSite.subnet?.split("/")[0],
destinationPort: existingSite.listenPort
},
newDestination: {
destinationIP: site.subnet?.split("/")[0],
destinationPort: site.listenPort
}
},
{
headers: {
"Content-Type": "application/json"
}
}
);
logger.info("Destinations updated:", {
peer: response.data.status
});
} catch (error) {
if (axios.isAxiosError(error)) {
throw new Error(
`Error communicating with Gerbil. Make sure Pangolin can reach the Gerbil HTTP API: ${error.response?.status}`
);
}
throw error;
}
}
}
// Get all clients connected to this site
const clientsRes = await db
.select()
@@ -107,33 +157,59 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
if (!client.clients.endpoint) {
return false;
}
if (!client.clients.online) {
return false;
}
return true;
})
.map(async (client) => {
// Add or update this peer on the olm if it is connected
try {
if (site.endpoint && site.publicKey) {
await updatePeer(client.clients.clientId, {
siteId: site.siteId,
endpoint: site.endpoint,
publicKey: site.publicKey,
serverIP: site.address,
serverPort: site.listenPort
});
if (!site.publicKey) {
logger.warn(
`Site ${site.siteId} has no public key, skipping`
);
return null;
}
let endpoint = site.endpoint;
if (client.clientSites.isRelayed) {
if (!site.exitNodeId) {
logger.warn(
`Site ${site.siteId} has no exit node, skipping`
);
return null;
}
if (!exitNode) {
logger.warn(
`Exit node not found for site ${site.siteId}`
);
return null;
}
endpoint = `${exitNode.endpoint}:21820`;
}
if (!endpoint) {
logger.warn(
`Site ${site.siteId} has no endpoint, skipping`
);
return null;
}
await updatePeer(client.clients.clientId, {
siteId: site.siteId,
endpoint: endpoint,
publicKey: site.publicKey,
serverIP: site.address,
serverPort: site.listenPort,
remoteSubnets: site.remoteSubnets
});
} catch (error) {
logger.error(
`Failed to add/update peer ${client.clients.pubKey} to newt ${newt.newtId}: ${error}`
`Failed to add/update peer ${client.clients.pubKey} to olm ${newt.newtId}: ${error}`
);
}
return {
publicKey: client.clients.pubKey!,
allowedIps: [`${client.clients.subnet.split('/')[0]}/32`], // we want to only allow from that client
allowedIps: [`${client.clients.subnet.split("/")[0]}/32`], // we want to only allow from that client
endpoint: client.clientSites.isRelayed
? ""
: client.clients.endpoint! // if its relayed it should be localhost
@@ -144,14 +220,96 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
// Filter out any null values from peers that didn't have an olm
const validPeers = peers.filter((peer) => peer !== null);
// Improved version
const allResources = await db.transaction(async (tx) => {
// First get all resources for the site
const resourcesList = await tx
.select({
resourceId: resources.resourceId,
subdomain: resources.subdomain,
fullDomain: resources.fullDomain,
ssl: resources.ssl,
blockAccess: resources.blockAccess,
sso: resources.sso,
emailWhitelistEnabled: resources.emailWhitelistEnabled,
http: resources.http,
proxyPort: resources.proxyPort,
protocol: resources.protocol
})
.from(resources)
.where(and(eq(resources.siteId, siteId), eq(resources.http, false)));
// Get all enabled targets for these resources in a single query
const resourceIds = resourcesList.map((r) => r.resourceId);
const allTargets =
resourceIds.length > 0
? await tx
.select({
resourceId: targets.resourceId,
targetId: targets.targetId,
ip: targets.ip,
method: targets.method,
port: targets.port,
internalPort: targets.internalPort,
enabled: targets.enabled,
})
.from(targets)
.where(
and(
inArray(targets.resourceId, resourceIds),
eq(targets.enabled, true)
)
)
: [];
// Combine the data in JS instead of using SQL for the JSON
return resourcesList.map((resource) => ({
...resource,
targets: allTargets.filter(
(target) => target.resourceId === resource.resourceId
)
}));
});
const { tcpTargets, udpTargets } = allResources.reduce(
(acc, resource) => {
// Skip resources with no targets
if (!resource.targets?.length) return acc;
// Format valid targets into strings
const formattedTargets = resource.targets
.filter(
(target: Target) =>
resource.proxyPort && target?.ip && target?.port
)
.map(
(target: Target) =>
`${resource.proxyPort}:${target.ip}:${target.port}`
);
// Add to the appropriate protocol array
if (resource.protocol === "tcp") {
acc.tcpTargets.push(...formattedTargets);
} else {
acc.udpTargets.push(...formattedTargets);
}
return acc;
},
{ tcpTargets: [] as string[], udpTargets: [] as string[] }
);
// Build the configuration response
const configResponse = {
ipAddress: site.address,
peers: validPeers
peers: validPeers,
targets: {
udp: udpTargets,
tcp: tcpTargets
}
};
logger.debug("Sending config: ", configResponse);
return {
message: {
type: "newt/wg/receive-config",

View File

@@ -4,7 +4,8 @@ import { sendToClient } from "../ws";
export function addTargets(
newtId: string,
targets: Target[],
protocol: string
protocol: string,
port: number | null = null
) {
//create a list of udp and tcp targets
const payloadTargets = targets.map((target) => {
@@ -13,19 +14,32 @@ export function addTargets(
}:${target.port}`;
});
const payload = {
sendToClient(newtId, {
type: `newt/${protocol}/add`,
data: {
targets: payloadTargets
}
};
sendToClient(newtId, payload);
});
const payloadTargetsResources = targets.map((target) => {
return `${port ? port + ":" : ""}${
target.ip
}:${target.port}`;
});
sendToClient(newtId, {
type: `newt/wg/${protocol}/add`,
data: {
targets: [payloadTargetsResources[0]] // We can only use one target for WireGuard right now
}
});
}
export function removeTargets(
newtId: string,
targets: Target[],
protocol: string
protocol: string,
port: number | null = null
) {
//create a list of udp and tcp targets
const payloadTargets = targets.map((target) => {
@@ -34,11 +48,23 @@ export function removeTargets(
}:${target.port}`;
});
const payload = {
sendToClient(newtId, {
type: `newt/${protocol}/remove`,
data: {
targets: payloadTargets
}
};
sendToClient(newtId, payload);
});
const payloadTargetsResources = targets.map((target) => {
return `${port ? port + ":" : ""}${
target.ip
}:${target.port}`;
});
sendToClient(newtId, {
type: `newt/wg/${protocol}/remove`,
data: {
targets: [payloadTargetsResources[0]] // We can only use one target for WireGuard right now
}
});
}

View File

@@ -1,4 +1,4 @@
import { db } from "@server/db";
import { db, ExitNode } from "@server/db";
import { MessageHandler } from "../ws";
import {
clients,
@@ -28,7 +28,10 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
return;
}
const clientId = olm.clientId;
const { publicKey } = message.data;
const { publicKey, relay } = message.data;
logger.debug(`Olm client ID: ${clientId}, Public Key: ${publicKey}, Relay: ${relay}`);
if (!publicKey) {
logger.warn("Public key not provided");
return;
@@ -58,9 +61,11 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
sendToClient(olm.olmId, {
type: "olm/wg/holepunch",
data: {
serverPubKey: exitNode.publicKey
serverPubKey: exitNode.publicKey,
endpoint: exitNode.endpoint,
}
});
}
if (now - (client.lastHolePunch || 0) > 6) {
@@ -84,7 +89,7 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
await db
.update(clientSites)
.set({
isRelayed: false
isRelayed: relay == true
})
.where(eq(clientSites.clientId, olm.clientId));
}
@@ -97,7 +102,15 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
.where(eq(clientSites.clientId, client.clientId));
// Prepare an array to store site configurations
const siteConfigurations = [];
let siteConfigurations = [];
logger.debug(`Found ${sitesData.length} sites for client ${client.clientId}`);
if (sitesData.length === 0) {
sendToClient(olm.olmId, {
type: "olm/register/no-sites",
data: {}
});
}
// Process each site
for (const { sites: site } of sitesData) {
@@ -114,12 +127,12 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
continue;
}
if (site.lastHolePunch && now - site.lastHolePunch > 6) {
logger.warn(
`Site ${site.siteId} last hole punch is too old, skipping`
);
continue;
}
// if (site.lastHolePunch && now - site.lastHolePunch > 6 && relay) {
// logger.warn(
// `Site ${site.siteId} last hole punch is too old, skipping`
// );
// continue;
// }
// If public key changed, delete old peer from this site
if (client.pubKey && client.pubKey != publicKey) {
@@ -142,7 +155,7 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
await addPeer(site.siteId, {
publicKey: publicKey,
allowedIps: [`${client.subnet.split('/')[0]}/32`], // we want to only allow from that client
endpoint: client.endpoint
endpoint: relay ? "" : client.endpoint
});
} else {
logger.warn(
@@ -150,21 +163,36 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
);
}
let endpoint = site.endpoint;
if (relay) {
const [exitNode] = await db
.select()
.from(exitNodes)
.where(eq(exitNodes.exitNodeId, site.exitNodeId))
.limit(1);
if (!exitNode) {
logger.warn(`Exit node not found for site ${site.siteId}`);
continue;
}
endpoint = `${exitNode.endpoint}:21820`;
}
// Add site configuration to the array
siteConfigurations.push({
siteId: site.siteId,
endpoint: site.endpoint,
endpoint: endpoint,
publicKey: site.publicKey,
serverIP: site.address,
serverPort: site.listenPort
serverPort: site.listenPort,
remoteSubnets: site.remoteSubnets
});
}
// If we have no valid site configurations, don't send a connect message
if (siteConfigurations.length === 0) {
logger.warn("No valid site configurations found");
return;
}
// REMOVED THIS SO IT CREATES THE INTERFACE AND JUST WAITS FOR THE SITES
// if (siteConfigurations.length === 0) {
// logger.warn("No valid site configurations found");
// return;
// }
// Return connect message with all site configurations
return {

View File

@@ -1,7 +1,7 @@
import { db } from "@server/db";
import { db, exitNodes, sites } from "@server/db";
import { MessageHandler } from "../ws";
import { clients, clientSites, Olm } from "@server/db";
import { eq } from "drizzle-orm";
import { and, eq } from "drizzle-orm";
import { updatePeer } from "../newt/peers";
import logger from "@server/logger";
@@ -30,29 +30,67 @@ export const handleOlmRelayMessage: MessageHandler = async (context) => {
.limit(1);
if (!client) {
logger.warn("Site not found or does not have exit node");
logger.warn("Client not found");
return;
}
// make sure we hand endpoints for both the site and the client and the lastHolePunch is not too old
if (!client.pubKey) {
logger.warn("Site or client has no endpoint or listen port");
logger.warn("Client has no endpoint or listen port");
return;
}
const { siteId } = message.data;
// Get the site
const [site] = await db
.select()
.from(sites)
.where(eq(sites.siteId, siteId))
.limit(1);
if (!site || !site.exitNodeId) {
logger.warn("Site not found or has no exit node");
return;
}
// get the site's exit node
const [exitNode] = await db
.select()
.from(exitNodes)
.where(eq(exitNodes.exitNodeId, site.exitNodeId))
.limit(1);
if (!exitNode) {
logger.warn("Exit node not found for site");
return;
}
await db
.update(clientSites)
.set({
isRelayed: true
})
.where(eq(clientSites.clientId, olm.clientId));
.where(
and(
eq(clientSites.clientId, olm.clientId),
eq(clientSites.siteId, siteId)
)
);
// update the peer on the exit node
await updatePeer(siteId, client.pubKey, {
endpoint: "" // this removes the endpoint
});
sendToClient(olm.olmId, {
type: "olm/wg/peer/relay",
data: {
siteId: siteId,
endpoint: exitNode.endpoint,
publicKey: exitNode.publicKey
}
});
return;
};

View File

@@ -12,6 +12,7 @@ export async function addPeer(
endpoint: string;
serverIP: string | null;
serverPort: number | null;
remoteSubnets: string | null; // optional, comma-separated list of subnets that this site can access
}
) {
const [olm] = await db
@@ -30,7 +31,8 @@ export async function addPeer(
publicKey: peer.publicKey,
endpoint: peer.endpoint,
serverIP: peer.serverIP,
serverPort: peer.serverPort
serverPort: peer.serverPort,
remoteSubnets: peer.remoteSubnets // optional, comma-separated list of subnets that this site can access
}
});
@@ -66,6 +68,7 @@ export async function updatePeer(
endpoint: string;
serverIP: string | null;
serverPort: number | null;
remoteSubnets?: string | null; // optional, comma-separated list of subnets that
}
) {
const [olm] = await db
@@ -84,7 +87,8 @@ export async function updatePeer(
publicKey: peer.publicKey,
endpoint: peer.endpoint,
serverIP: peer.serverIP,
serverPort: peer.serverPort
serverPort: peer.serverPort,
remoteSubnets: peer.remoteSubnets
}
});

View File

@@ -1,14 +1,8 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import {
newts,
newtSessions,
orgs,
sites,
userActions
} from "@server/db";
import { eq } from "drizzle-orm";
import { db, domains, orgDomains, resources } from "@server/db";
import { newts, newtSessions, orgs, sites, userActions } from "@server/db";
import { eq, and, inArray, sql } from "drizzle-orm";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
@@ -126,6 +120,44 @@ export async function deleteOrg(
}
}
const allOrgDomains = await trx
.select()
.from(orgDomains)
.innerJoin(domains, eq(domains.domainId, orgDomains.domainId))
.where(
and(
eq(orgDomains.orgId, orgId),
eq(domains.configManaged, false)
)
);
// For each domain, check if it belongs to multiple organizations
const domainIdsToDelete: string[] = [];
for (const orgDomain of allOrgDomains) {
const domainId = orgDomain.domains.domainId;
// Count how many organizations this domain belongs to
const orgCount = await trx
.select({ count: sql<number>`count(*)` })
.from(orgDomains)
.where(eq(orgDomains.domainId, domainId));
// Only delete the domain if it belongs to exactly 1 organization (the one being deleted)
if (orgCount[0].count === 1) {
domainIdsToDelete.push(domainId);
}
}
// Delete domains that belong exclusively to this organization
if (domainIdsToDelete.length > 0) {
await trx
.delete(domains)
.where(inArray(domains.domainId, domainIdsToDelete));
}
// Delete resources
await trx.delete(resources).where(eq(resources.orgId, orgId));
await trx.delete(orgs).where(eq(orgs.orgId, orgId));
});
@@ -136,8 +168,11 @@ export async function deleteOrg(
data: {}
};
// Don't await this to prevent blocking the response
sendToClient(newtId, payload).catch(error => {
logger.error("Failed to send termination message to newt:", error);
sendToClient(newtId, payload).catch((error) => {
logger.error(
"Failed to send termination message to newt:",
error
);
});
}

View File

@@ -33,10 +33,7 @@ const createResourceParamsSchema = z
const createHttpResourceSchema = z
.object({
name: z.string().min(1).max(255),
subdomain: z
.string()
.nullable()
.optional(),
subdomain: z.string().nullable().optional(),
siteId: z.number(),
http: z.boolean(),
protocol: z.enum(["tcp", "udp"]),
@@ -59,7 +56,8 @@ const createRawResourceSchema = z
siteId: z.number(),
http: z.boolean(),
protocol: z.enum(["tcp", "udp"]),
proxyPort: z.number().int().min(1).max(65535)
proxyPort: z.number().int().min(1).max(65535),
enableProxy: z.boolean().default(true)
})
.strict()
.refine(
@@ -88,12 +86,7 @@ registry.registerPath({
body: {
content: {
"application/json": {
schema:
build == "oss"
? createHttpResourceSchema.or(
createRawResourceSchema
)
: createHttpResourceSchema
schema: createHttpResourceSchema.or(createRawResourceSchema)
}
}
}
@@ -156,7 +149,10 @@ export async function createResource(
{ siteId, orgId }
);
} else {
if (!config.getRawConfig().flags?.allow_raw_resources && build == "oss") {
if (
!config.getRawConfig().flags?.allow_raw_resources &&
build == "oss"
) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
@@ -378,7 +374,7 @@ async function createRawResource(
);
}
const { name, http, protocol, proxyPort } = parsedBody.data;
const { name, http, protocol, proxyPort, enableProxy } = parsedBody.data;
// if http is false check to see if there is already a resource with the same port and protocol
const existingResource = await db
@@ -411,7 +407,8 @@ async function createRawResource(
name,
http,
protocol,
proxyPort
proxyPort,
enableProxy
})
.returning();

View File

@@ -103,7 +103,8 @@ export async function deleteResource(
removeTargets(
newt.newtId,
targetsToBeRemoved,
deletedResource.protocol
deletedResource.protocol,
deletedResource.proxyPort
);
}
}

View File

@@ -0,0 +1,168 @@
import { Request, Response, NextFunction } from "express";
import { db } from "@server/db";
import { and, eq, or, inArray } from "drizzle-orm";
import {
resources,
userResources,
roleResources,
userOrgs,
roles,
resourcePassword,
resourcePincode,
resourceWhitelist,
sites
} from "@server/db";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { response } from "@server/lib/response";
export async function getUserResources(
req: Request,
res: Response,
next: NextFunction
): Promise<any> {
try {
const { orgId } = req.params;
const userId = req.user?.userId;
if (!userId) {
return next(
createHttpError(HttpCode.UNAUTHORIZED, "User not authenticated")
);
}
// First get the user's role in the organization
const userOrgResult = await db
.select({
roleId: userOrgs.roleId
})
.from(userOrgs)
.where(
and(
eq(userOrgs.userId, userId),
eq(userOrgs.orgId, orgId)
)
)
.limit(1);
if (userOrgResult.length === 0) {
return next(
createHttpError(HttpCode.FORBIDDEN, "User not in organization")
);
}
const userRoleId = userOrgResult[0].roleId;
// Get resources accessible through direct assignment or role assignment
const directResourcesQuery = db
.select({ resourceId: userResources.resourceId })
.from(userResources)
.where(eq(userResources.userId, userId));
const roleResourcesQuery = db
.select({ resourceId: roleResources.resourceId })
.from(roleResources)
.where(eq(roleResources.roleId, userRoleId));
const [directResources, roleResourceResults] = await Promise.all([
directResourcesQuery,
roleResourcesQuery
]);
// Combine all accessible resource IDs
const accessibleResourceIds = [
...directResources.map(r => r.resourceId),
...roleResourceResults.map(r => r.resourceId)
];
if (accessibleResourceIds.length === 0) {
return response(res, {
data: { resources: [] },
success: true,
error: false,
message: "No resources found",
status: HttpCode.OK
});
}
// Get resource details for accessible resources
const resourcesData = await db
.select({
resourceId: resources.resourceId,
name: resources.name,
fullDomain: resources.fullDomain,
ssl: resources.ssl,
enabled: resources.enabled,
sso: resources.sso,
protocol: resources.protocol,
emailWhitelistEnabled: resources.emailWhitelistEnabled,
siteName: sites.name
})
.from(resources)
.leftJoin(sites, eq(sites.siteId, resources.siteId))
.where(
and(
inArray(resources.resourceId, accessibleResourceIds),
eq(resources.orgId, orgId),
eq(resources.enabled, true)
)
);
// Check for password, pincode, and whitelist protection for each resource
const resourcesWithAuth = await Promise.all(
resourcesData.map(async (resource) => {
const [passwordCheck, pincodeCheck, whitelistCheck] = await Promise.all([
db.select().from(resourcePassword).where(eq(resourcePassword.resourceId, resource.resourceId)).limit(1),
db.select().from(resourcePincode).where(eq(resourcePincode.resourceId, resource.resourceId)).limit(1),
db.select().from(resourceWhitelist).where(eq(resourceWhitelist.resourceId, resource.resourceId)).limit(1)
]);
const hasPassword = passwordCheck.length > 0;
const hasPincode = pincodeCheck.length > 0;
const hasWhitelist = whitelistCheck.length > 0 || resource.emailWhitelistEnabled;
return {
resourceId: resource.resourceId,
name: resource.name,
domain: `${resource.ssl ? "https://" : "http://"}${resource.fullDomain}`,
enabled: resource.enabled,
protected: !!(resource.sso || hasPassword || hasPincode || hasWhitelist),
protocol: resource.protocol,
sso: resource.sso,
password: hasPassword,
pincode: hasPincode,
whitelist: hasWhitelist,
siteName: resource.siteName
};
})
);
return response(res, {
data: { resources: resourcesWithAuth },
success: true,
error: false,
message: "User resources retrieved successfully",
status: HttpCode.OK
});
} catch (error) {
console.error("Error fetching user resources:", error);
return next(
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "Internal server error")
);
}
}
export type GetUserResourcesResponse = {
success: boolean;
data: {
resources: Array<{
resourceId: number;
name: string;
domain: string;
enabled: boolean;
protected: boolean;
protocol: string;
}>;
};
};

View File

@@ -21,4 +21,5 @@ export * from "./getExchangeToken";
export * from "./createResourceRule";
export * from "./deleteResourceRule";
export * from "./listResourceRules";
export * from "./updateResourceRule";
export * from "./updateResourceRule";
export * from "./getUserResources";

View File

@@ -168,7 +168,8 @@ export async function transferResource(
removeTargets(
newt.newtId,
resourceTargets,
updatedResource.protocol
updatedResource.protocol,
updatedResource.proxyPort
);
}
}
@@ -190,7 +191,8 @@ export async function transferResource(
addTargets(
newt.newtId,
resourceTargets,
updatedResource.protocol
updatedResource.protocol,
updatedResource.proxyPort
);
}
}

View File

@@ -34,9 +34,7 @@ const updateResourceParamsSchema = z
const updateHttpResourceBodySchema = z
.object({
name: z.string().min(1).max(255).optional(),
subdomain: subdomainSchema
.nullable()
.optional(),
subdomain: subdomainSchema.nullable().optional(),
ssl: z.boolean().optional(),
sso: z.boolean().optional(),
blockAccess: z.boolean().optional(),
@@ -93,7 +91,8 @@ const updateRawResourceBodySchema = z
name: z.string().min(1).max(255).optional(),
proxyPort: z.number().int().min(1).max(65535).optional(),
stickySession: z.boolean().optional(),
enabled: z.boolean().optional()
enabled: z.boolean().optional(),
enableProxy: z.boolean().optional()
})
.strict()
.refine((data) => Object.keys(data).length > 0, {
@@ -121,12 +120,9 @@ registry.registerPath({
body: {
content: {
"application/json": {
schema:
build == "oss"
? updateHttpResourceBodySchema.and(
updateRawResourceBodySchema
)
: updateHttpResourceBodySchema
schema: updateHttpResourceBodySchema.and(
updateRawResourceBodySchema
)
}
}
}
@@ -288,7 +284,9 @@ async function updateHttpResource(
} else if (domainRes.domains.type == "wildcard") {
if (updateData.subdomain !== undefined) {
// the subdomain cant have a dot in it
const parsedSubdomain = subdomainSchema.safeParse(updateData.subdomain);
const parsedSubdomain = subdomainSchema.safeParse(
updateData.subdomain
);
if (!parsedSubdomain.success) {
return next(
createHttpError(
@@ -341,7 +339,7 @@ async function updateHttpResource(
const updatedResource = await db
.update(resources)
.set({...updateData, })
.set({ ...updateData })
.where(eq(resources.resourceId, resource.resourceId))
.returning();

View File

@@ -9,6 +9,7 @@ import createHttpError from "http-errors";
import logger from "@server/logger";
import { fromError } from "zod-validation-error";
import { OpenAPITags, registry } from "@server/openApi";
import { isValidCIDR } from "@server/lib/validators";
const updateSiteParamsSchema = z
.object({
@@ -20,6 +21,9 @@ const updateSiteBodySchema = z
.object({
name: z.string().min(1).max(255).optional(),
dockerSocketEnabled: z.boolean().optional(),
remoteSubnets: z
.string()
.optional()
// subdomain: z
// .string()
// .min(1)
@@ -85,6 +89,21 @@ export async function updateSite(
const { siteId } = parsedParams.data;
const updateData = parsedBody.data;
// if remoteSubnets is provided, ensure it's a valid comma-separated list of cidrs
if (updateData.remoteSubnets) {
const subnets = updateData.remoteSubnets.split(",").map((s) => s.trim());
for (const subnet of subnets) {
if (!isValidCIDR(subnet)) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
`Invalid CIDR format: ${subnet}`
)
);
}
}
}
const updatedSite = await db
.update(sites)
.set(updateData)

View File

@@ -173,7 +173,7 @@ export async function createTarget(
.where(eq(newts.siteId, site.siteId))
.limit(1);
addTargets(newt.newtId, newTarget, resource.protocol);
addTargets(newt.newtId, newTarget, resource.protocol, resource.proxyPort);
}
}
}

View File

@@ -105,7 +105,7 @@ export async function deleteTarget(
.where(eq(newts.siteId, site.siteId))
.limit(1);
removeTargets(newt.newtId, [deletedTarget], resource.protocol);
removeTargets(newt.newtId, [deletedTarget], resource.protocol, resource.proxyPort);
}
}

View File

@@ -157,7 +157,7 @@ export async function updateTarget(
.where(eq(newts.siteId, site.siteId))
.limit(1);
addTargets(newt.newtId, [updatedTarget], resource.protocol);
addTargets(newt.newtId, [updatedTarget], resource.protocol, resource.proxyPort);
}
}
return response(res, {

View File

@@ -66,7 +66,8 @@ export async function traefikConfigProvider(
enabled: resources.enabled,
stickySession: resources.stickySession,
tlsServerName: resources.tlsServerName,
setHostHeader: resources.setHostHeader
setHostHeader: resources.setHostHeader,
enableProxy: resources.enableProxy
})
.from(resources)
.innerJoin(sites, eq(sites.siteId, resources.siteId))
@@ -365,6 +366,10 @@ export async function traefikConfigProvider(
}
} else {
// Non-HTTP (TCP/UDP) configuration
if (!resource.enableProxy) {
continue;
}
const protocol = resource.protocol.toLowerCase();
const port = resource.proxyPort;

View File

@@ -1,3 +1,4 @@
#! /usr/bin/env node
import { migrate } from "drizzle-orm/node-postgres/migrator";
import { db } from "../db/pg";
import semver from "semver";
@@ -6,6 +7,7 @@ import { __DIRNAME, APP_VERSION } from "@server/lib/consts";
import path from "path";
import m1 from "./scriptsPg/1.6.0";
import m2 from "./scriptsPg/1.7.0";
import m3 from "./scriptsPg/1.8.0";
// THIS CANNOT IMPORT ANYTHING FROM THE SERVER
// EXCEPT FOR THE DATABASE AND THE SCHEMA
@@ -13,7 +15,8 @@ import m2 from "./scriptsPg/1.7.0";
// Define the migration list with versions and their corresponding functions
const migrations = [
{ version: "1.6.0", run: m1 },
{ version: "1.7.0", run: m2 }
{ version: "1.7.0", run: m2 },
{ version: "1.8.0", run: m3 }
// Add new migrations here as they are created
] as {
version: string;

View File

@@ -1,3 +1,4 @@
#! /usr/bin/env node
import { migrate } from "drizzle-orm/better-sqlite3/migrator";
import { db, exists } from "../db/sqlite";
import path from "path";
@@ -23,6 +24,7 @@ import m19 from "./scriptsSqlite/1.3.0";
import m20 from "./scriptsSqlite/1.5.0";
import m21 from "./scriptsSqlite/1.6.0";
import m22 from "./scriptsSqlite/1.7.0";
import m23 from "./scriptsSqlite/1.8.0";
// THIS CANNOT IMPORT ANYTHING FROM THE SERVER
// EXCEPT FOR THE DATABASE AND THE SCHEMA
@@ -46,6 +48,7 @@ const migrations = [
{ version: "1.5.0", run: m20 },
{ version: "1.6.0", run: m21 },
{ version: "1.7.0", run: m22 },
{ version: "1.8.0", run: m23 },
// Add new migrations here as they are created
] as const;

View File

@@ -0,0 +1,32 @@
import { db } from "@server/db/pg/driver";
import { sql } from "drizzle-orm";
const version = "1.8.0";
export default async function migration() {
console.log(`Running setup script ${version}...`);
try {
await db.execute(sql`
BEGIN;
ALTER TABLE "clients" ALTER COLUMN "bytesIn" SET DATA TYPE real;
ALTER TABLE "clients" ALTER COLUMN "bytesOut" SET DATA TYPE real;
ALTER TABLE "clientSession" ALTER COLUMN "expiresAt" SET DATA TYPE bigint;
ALTER TABLE "resources" ADD COLUMN "enableProxy" boolean DEFAULT true;
ALTER TABLE "sites" ADD COLUMN "remoteSubnets" text;
ALTER TABLE "user" ADD COLUMN "termsAcceptedTimestamp" varchar;
ALTER TABLE "user" ADD COLUMN "termsVersion" varchar;
COMMIT;
`);
console.log(`Migrated database schema`);
} catch (e) {
console.log("Unable to migrate database schema");
console.log(e);
throw e;
}
console.log(`${version} migration complete`);
}

View File

@@ -0,0 +1,30 @@
import { APP_PATH } from "@server/lib/consts";
import Database from "better-sqlite3";
import path from "path";
const version = "1.8.0";
export default async function migration() {
console.log(`Running setup script ${version}...`);
const location = path.join(APP_PATH, "db", "db.sqlite");
const db = new Database(location);
try {
db.transaction(() => {
db.exec(`
ALTER TABLE 'resources' ADD 'enableProxy' integer DEFAULT 1;
ALTER TABLE 'sites' ADD 'remoteSubnets' text;
ALTER TABLE 'user' ADD 'termsAcceptedTimestamp' text;
ALTER TABLE 'user' ADD 'termsVersion' text;
`);
})();
console.log("Migrated database schema");
} catch (e) {
console.log("Unable to migrate database schema");
throw e;
}
console.log(`${version} migration complete`);
}