mirror of
https://github.com/fosrl/pangolin.git
synced 2025-12-30 15:50:37 -06:00
Send site add in case the client does not have the site
This commit is contained in:
@@ -24,7 +24,7 @@ import {
|
||||
deletePeer as newtDeletePeer
|
||||
} from "@server/routers/newt/peers";
|
||||
import {
|
||||
initPeerAddHandshake as holepunchSiteAdd,
|
||||
initPeerAddHandshake,
|
||||
deletePeer as olmDeletePeer
|
||||
} from "@server/routers/olm/peers";
|
||||
import { sendToExitNode } from "#dynamic/lib/exitNodes";
|
||||
@@ -477,7 +477,7 @@ async function handleMessagesForSiteClients(
|
||||
}
|
||||
|
||||
if (isAdd) {
|
||||
await holepunchSiteAdd(
|
||||
await initPeerAddHandshake(
|
||||
// this will kick off the add peer process for the client
|
||||
client.clientId,
|
||||
{
|
||||
@@ -545,9 +545,13 @@ export async function updateClientSiteDestinations(
|
||||
}
|
||||
|
||||
// Parse the endpoint properly for both IPv4 and IPv6
|
||||
const parsedEndpoint = parseEndpoint(site.clientSitesAssociationsCache.endpoint);
|
||||
const parsedEndpoint = parseEndpoint(
|
||||
site.clientSitesAssociationsCache.endpoint
|
||||
);
|
||||
if (!parsedEndpoint) {
|
||||
logger.warn(`Failed to parse endpoint ${site.clientSitesAssociationsCache.endpoint}, skipping`);
|
||||
logger.warn(
|
||||
`Failed to parse endpoint ${site.clientSitesAssociationsCache.endpoint}, skipping`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ import {
|
||||
} from "@server/db";
|
||||
import { clients, clientSitesAssociationsCache, Newt, sites } from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
import { updatePeer } from "../olm/peers";
|
||||
import { initPeerAddHandshake, updatePeer } from "../olm/peers";
|
||||
import { sendToExitNode } from "#dynamic/lib/exitNodes";
|
||||
import { generateSubnetProxyTargets, SubnetProxyTarget } from "@server/lib/ip";
|
||||
import config from "@server/lib/config";
|
||||
@@ -140,92 +140,101 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
|
||||
)
|
||||
.where(eq(clientSitesAssociationsCache.siteId, siteId));
|
||||
|
||||
// Prepare peers data for the response
|
||||
const peers = await Promise.all(
|
||||
clientsRes
|
||||
.filter((client) => {
|
||||
if (!client.clients.pubKey) {
|
||||
logger.warn(
|
||||
`Client ${client.clients.clientId} has no public key, skipping`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
if (!client.clients.subnet) {
|
||||
logger.warn(
|
||||
`Client ${client.clients.clientId} has no subnet, skipping`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
})
|
||||
.map(async (client) => {
|
||||
// Add or update this peer on the olm if it is connected
|
||||
if (!site.publicKey) {
|
||||
logger.warn(
|
||||
`Site ${site.siteId} has no public key, skipping`
|
||||
);
|
||||
return null;
|
||||
}
|
||||
let peers: Array<{
|
||||
publicKey: string;
|
||||
allowedIps: string[];
|
||||
endpoint?: string;
|
||||
}> = [];
|
||||
|
||||
if (!exitNode) {
|
||||
logger.warn(`Exit node not found for site ${site.siteId}`);
|
||||
return null;
|
||||
}
|
||||
if (site.publicKey && site.endpoint && exitNode) {
|
||||
// Prepare peers data for the response
|
||||
peers = await Promise.all(
|
||||
clientsRes
|
||||
.filter((client) => {
|
||||
if (!client.clients.pubKey) {
|
||||
logger.warn(
|
||||
`Client ${client.clients.clientId} has no public key, skipping`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
if (!client.clients.subnet) {
|
||||
logger.warn(
|
||||
`Client ${client.clients.clientId} has no subnet, skipping`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
})
|
||||
.map(async (client) => {
|
||||
// Add or update this peer on the olm if it is connected
|
||||
|
||||
if (!site.endpoint) {
|
||||
logger.warn(
|
||||
`Site ${site.siteId} has no endpoint, skipping`
|
||||
);
|
||||
return null;
|
||||
}
|
||||
|
||||
// const allSiteResources = await db // only get the site resources that this client has access to
|
||||
// .select()
|
||||
// .from(siteResources)
|
||||
// .innerJoin(
|
||||
// clientSiteResourcesAssociationsCache,
|
||||
// eq(
|
||||
// siteResources.siteResourceId,
|
||||
// clientSiteResourcesAssociationsCache.siteResourceId
|
||||
// )
|
||||
// )
|
||||
// .where(
|
||||
// and(
|
||||
// eq(siteResources.siteId, site.siteId),
|
||||
// eq(
|
||||
// clientSiteResourcesAssociationsCache.clientId,
|
||||
// client.clients.clientId
|
||||
// )
|
||||
// )
|
||||
// );
|
||||
await updatePeer(client.clients.clientId, {
|
||||
siteId: site.siteId,
|
||||
endpoint: site.endpoint,
|
||||
relayEndpoint: `${exitNode.endpoint}:${config.getRawConfig().gerbil.clients_start_port}`,
|
||||
publicKey: site.publicKey,
|
||||
serverIP: site.address,
|
||||
serverPort: site.listenPort
|
||||
// remoteSubnets: generateRemoteSubnets(
|
||||
// allSiteResources.map(
|
||||
// ({ siteResources }) => siteResources
|
||||
// const allSiteResources = await db // only get the site resources that this client has access to
|
||||
// .select()
|
||||
// .from(siteResources)
|
||||
// .innerJoin(
|
||||
// clientSiteResourcesAssociationsCache,
|
||||
// eq(
|
||||
// siteResources.siteResourceId,
|
||||
// clientSiteResourcesAssociationsCache.siteResourceId
|
||||
// )
|
||||
// )
|
||||
// ),
|
||||
// aliases: generateAliasConfig(
|
||||
// allSiteResources.map(
|
||||
// ({ siteResources }) => siteResources
|
||||
// )
|
||||
// )
|
||||
});
|
||||
// .where(
|
||||
// and(
|
||||
// eq(siteResources.siteId, site.siteId),
|
||||
// eq(
|
||||
// clientSiteResourcesAssociationsCache.clientId,
|
||||
// client.clients.clientId
|
||||
// )
|
||||
// )
|
||||
// );
|
||||
|
||||
return {
|
||||
publicKey: client.clients.pubKey!,
|
||||
allowedIps: [`${client.clients.subnet.split("/")[0]}/32`], // we want to only allow from that client
|
||||
endpoint: client.clientSitesAssociationsCache.isRelayed
|
||||
? ""
|
||||
: client.clientSitesAssociationsCache.endpoint! // if its relayed it should be localhost
|
||||
};
|
||||
})
|
||||
);
|
||||
// update the peer info on the olm
|
||||
// if the peer has not been added yet this will be a no-op
|
||||
await updatePeer(client.clients.clientId, {
|
||||
siteId: site.siteId,
|
||||
endpoint: site.endpoint!,
|
||||
relayEndpoint: `${exitNode.endpoint}:${config.getRawConfig().gerbil.clients_start_port}`,
|
||||
publicKey: site.publicKey!,
|
||||
serverIP: site.address,
|
||||
serverPort: site.listenPort
|
||||
// remoteSubnets: generateRemoteSubnets(
|
||||
// allSiteResources.map(
|
||||
// ({ siteResources }) => siteResources
|
||||
// )
|
||||
// ),
|
||||
// aliases: generateAliasConfig(
|
||||
// allSiteResources.map(
|
||||
// ({ siteResources }) => siteResources
|
||||
// )
|
||||
// )
|
||||
});
|
||||
|
||||
// also trigger the peer add handshake in case the peer was not already added to the olm and we need to hole punch
|
||||
// if it has already been added this will be a no-op
|
||||
await initPeerAddHandshake(
|
||||
// this will kick off the add peer process for the client
|
||||
client.clients.clientId,
|
||||
{
|
||||
siteId,
|
||||
exitNode: {
|
||||
publicKey: exitNode.publicKey,
|
||||
endpoint: exitNode.endpoint
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
return {
|
||||
publicKey: client.clients.pubKey!,
|
||||
allowedIps: [
|
||||
`${client.clients.subnet.split("/")[0]}/32`
|
||||
], // we want to only allow from that client
|
||||
endpoint: client.clientSitesAssociationsCache.isRelayed
|
||||
? ""
|
||||
: client.clientSitesAssociationsCache.endpoint! // if its relayed it should be localhost
|
||||
};
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
// Filter out any null values from peers that didn't have an olm
|
||||
const validPeers = peers.filter((peer) => peer !== null);
|
||||
|
||||
Reference in New Issue
Block a user