diff --git a/packages/k8s-operator/Dockerfile b/packages/k8s-operator/Dockerfile new file mode 100644 index 0000000..9800b92 --- /dev/null +++ b/packages/k8s-operator/Dockerfile @@ -0,0 +1,32 @@ +FROM node:18-alpine AS build +WORKDIR /app + +# Copy package files +COPY package.json ./ +COPY tsconfig.json ./ + +# Copy source files +COPY src/ ./src/ + +# Install dependencies +RUN npm install + +# Build +RUN npm run build + +# Create production image +FROM node:18-alpine +WORKDIR /app + +# Copy package.json and built files +COPY --from=build /app/package.json ./ +COPY --from=build /app/dist ./dist + +# Install production dependencies +RUN npm install --production + +# Set environment variables +ENV NODE_ENV=production + +# Run +CMD ["node", "dist/index.js"] \ No newline at end of file diff --git a/packages/k8s-operator/package.json b/packages/k8s-operator/package.json new file mode 100644 index 0000000..785f264 --- /dev/null +++ b/packages/k8s-operator/package.json @@ -0,0 +1,26 @@ +{ + "name": "@minikura/k8s-operator", + "version": "1.0.0", + "description": "Kubernetes operator for Minikura that syncs database to Kubernetes resources", + "main": "dist/index.js", + "type": "module", + "scripts": { + "build": "tsc", + "start": "node dist/index.js", + "dev": "bun run src/index.ts", + "watch": "bun --watch run src/index.ts", + "apply-crds": "bun --elide-lines=0 run src/scripts/apply-crds.ts" + }, + "dependencies": { + "@kubernetes/client-node": "^0.18.0", + "@minikura/db": "workspace:*", + "dotenv-mono": "^1.3.11", + "node-fetch": "^3.3.2" + }, + "devDependencies": { + "@types/node": "^18.0.0", + "ts-node": "^10.9.1", + "ts-node-dev": "^2.0.0", + "typescript": "^5.0.0" + } +} \ No newline at end of file diff --git a/packages/k8s-operator/src/config/constants.ts b/packages/k8s-operator/src/config/constants.ts new file mode 100644 index 0000000..c2b977e --- /dev/null +++ b/packages/k8s-operator/src/config/constants.ts @@ -0,0 +1,49 @@ +import { dotenvLoad } from "dotenv-mono"; +const dotenv = dotenvLoad(); + +export const API_GROUP = 'minikura.kirameki.cafe'; +export const API_VERSION = 'v1alpha1'; + +export const KUBERNETES_NAMESPACE_ENV = process.env.KUBERNETES_NAMESPACE; +export const NAMESPACE = process.env.KUBERNETES_NAMESPACE || 'minikura'; + +export const ENABLE_CRD_REFLECTION = process.env.ENABLE_CRD_REFLECTION === 'true'; +export const SKIP_TLS_VERIFY = process.env.KUBERNETES_SKIP_TLS_VERIFY === 'true'; + +if (SKIP_TLS_VERIFY) { + process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0'; +} + +// Resource types +export const RESOURCE_TYPES = { + MINECRAFT_SERVER: { + kind: 'MinecraftServer', + plural: 'minecraftservers', + singular: 'minecraftserver', + shortNames: ['mcs'], + }, + REVERSE_PROXY_SERVER: { + kind: 'ReverseProxyServer', + plural: 'reverseproxyservers', + singular: 'reverseproxyserver', + shortNames: ['rps'], + }, +}; + +// Kubernetes resource label prefixes +export const LABEL_PREFIX = 'minikura.kirameki.cafe'; + +// Polling intervals (in milliseconds) +export const SYNC_INTERVAL = 30 * 1000; // 30 seconds + +export const IMAGES = { + MINECRAFT: 'itzg/minecraft-server', + REVERSE_PROXY: 'itzg/minecraft-server', +}; + +export const DEFAULTS = { + MEMORY: '1G', + CPU_REQUEST: '250m', + CPU_LIMIT: '1000m', + STORAGE_SIZE: '1Gi', +}; diff --git a/packages/k8s-operator/src/controllers/base-controller.ts b/packages/k8s-operator/src/controllers/base-controller.ts new file mode 100644 index 0000000..9bb6267 --- /dev/null +++ b/packages/k8s-operator/src/controllers/base-controller.ts @@ -0,0 +1,57 @@ +import { PrismaClient } from '@minikura/db'; +import { KubernetesClient } from '../utils/k8s-client'; +import { SYNC_INTERVAL } from '../config/constants'; + +export abstract class BaseController { + protected prisma: PrismaClient; + protected k8sClient: KubernetesClient; + protected namespace: string; + private intervalId: ReturnType | null = null; + + constructor(prisma: PrismaClient, namespace: string) { + this.prisma = prisma; + this.k8sClient = KubernetesClient.getInstance(); + this.namespace = namespace; + } + + /** + * Start watching for changes in the database and syncing to Kubernetes + */ + public startWatching(): void { + console.log(`Starting to watch for changes in ${this.getControllerName()}...`); + + // Initial sync + this.syncResources().catch(err => { + console.error(`Error during initial sync of ${this.getControllerName()}:`, err); + }); + + // Polling interval for changes + // TODO: Maybe there's a better way to do this + this.intervalId = setInterval(() => { + this.syncResources().catch(err => { + console.error(`Error syncing ${this.getControllerName()}:`, err); + }); + }, SYNC_INTERVAL); + } + + /** + * Stop watching for changes + */ + public stopWatching(): void { + if (this.intervalId) { + clearInterval(this.intervalId); + this.intervalId = null; + console.log(`Stopped watching for changes in ${this.getControllerName()}`); + } + } + + /** + * Get a name for this controller for logging purposes + */ + protected abstract getControllerName(): string; + + /** + * Sync resources from database to Kubernetes + */ + protected abstract syncResources(): Promise; +} \ No newline at end of file diff --git a/packages/k8s-operator/src/controllers/reverse-proxy-controller.ts b/packages/k8s-operator/src/controllers/reverse-proxy-controller.ts new file mode 100644 index 0000000..292e7b8 --- /dev/null +++ b/packages/k8s-operator/src/controllers/reverse-proxy-controller.ts @@ -0,0 +1,113 @@ +import { PrismaClient } from '@minikura/db'; +import type { ReverseProxyServer, CustomEnvironmentVariable } from '@minikura/db'; +import { BaseController } from './base-controller'; +import type { ReverseProxyConfig } from '../types'; +import { createReverseProxyServer, deleteReverseProxyServer } from '../resources/reverseProxyServer'; + +type ReverseProxyWithEnvVars = ReverseProxyServer & { + env_variables: CustomEnvironmentVariable[]; +}; + +export class ReverseProxyController extends BaseController { + private deployedProxies = new Map(); + + constructor(prisma: PrismaClient, namespace: string) { + super(prisma, namespace); + } + + protected getControllerName(): string { + return 'ReverseProxyController'; + } + + protected async syncResources(): Promise { + try { + const appsApi = this.k8sClient.getAppsApi(); + const coreApi = this.k8sClient.getCoreApi(); + const networkingApi = this.k8sClient.getNetworkingApi(); + + const proxies = await this.prisma.reverseProxyServer.findMany({ + include: { + env_variables: true, + } + }) as ReverseProxyWithEnvVars[]; + + const currentProxyIds = new Set(proxies.map(proxy => proxy.id)); + + // Delete reverse proxy servers that are no longer in the database + for (const [proxyId, proxy] of this.deployedProxies.entries()) { + if (!currentProxyIds.has(proxyId)) { + console.log(`Reverse proxy server ${proxy.id} (${proxyId}) has been removed from the database, deleting from Kubernetes...`); + await deleteReverseProxyServer(proxy.id, proxy.type, appsApi, coreApi, this.namespace); + this.deployedProxies.delete(proxyId); + } + } + + // Create or update reverse proxy servers that are in the database + for (const proxy of proxies) { + const deployedProxy = this.deployedProxies.get(proxy.id); + + // If proxy doesn't exist yet or has been updated + if (!deployedProxy || this.hasProxyChanged(deployedProxy, proxy)) { + console.log(`${!deployedProxy ? 'Creating' : 'Updating'} reverse proxy server ${proxy.id} (${proxy.id}) in Kubernetes...`); + + const proxyConfig: ReverseProxyConfig = { + id: proxy.id, + external_address: proxy.external_address, + external_port: proxy.external_port, + listen_port: proxy.listen_port, + description: proxy.description, + apiKey: proxy.api_key, + type: proxy.type, + memory: proxy.memory, + env_variables: proxy.env_variables?.map(ev => ({ + key: ev.key, + value: ev.value + })) + }; + + await createReverseProxyServer( + proxyConfig, + appsApi, + coreApi, + networkingApi, + this.namespace + ); + + // Update cache + this.deployedProxies.set(proxy.id, { ...proxy }); + } + } + } catch (error) { + console.error('Error syncing reverse proxy servers:', error); + throw error; + } + } + + private hasProxyChanged( + oldProxy: ReverseProxyWithEnvVars, + newProxy: ReverseProxyWithEnvVars + ): boolean { + // Check basic properties + const basicPropsChanged = + oldProxy.external_address !== newProxy.external_address || + oldProxy.external_port !== newProxy.external_port || + oldProxy.listen_port !== newProxy.listen_port || + oldProxy.description !== newProxy.description; + + if (basicPropsChanged) return true; + + // Check if environment variables have changed + const oldEnvVars = oldProxy.env_variables || []; + const newEnvVars = newProxy.env_variables || []; + + if (oldEnvVars.length !== newEnvVars.length) return true; + for (const newEnv of newEnvVars) { + const oldEnv = oldEnvVars.find(e => e.key === newEnv.key); + if (!oldEnv || oldEnv.value !== newEnv.value) { + return true; + } + } + + return false; + } +} \ No newline at end of file diff --git a/packages/k8s-operator/src/controllers/server-controller.ts b/packages/k8s-operator/src/controllers/server-controller.ts new file mode 100644 index 0000000..f3ca6f2 --- /dev/null +++ b/packages/k8s-operator/src/controllers/server-controller.ts @@ -0,0 +1,113 @@ +import { PrismaClient, ServerType } from '@minikura/db'; +import type { Server, CustomEnvironmentVariable } from '@minikura/db'; +import { BaseController } from './base-controller'; +import type { ServerConfig } from '../types'; +import { createServer, deleteServer } from '../resources/server'; + +type ServerWithEnvVars = Server & { + env_variables: CustomEnvironmentVariable[]; +}; + +export class ServerController extends BaseController { + private deployedServers = new Map(); + + constructor(prisma: PrismaClient, namespace: string) { + super(prisma, namespace); + } + + protected getControllerName(): string { + return 'ServerController'; + } + + protected async syncResources(): Promise { + try { + const appsApi = this.k8sClient.getAppsApi(); + const coreApi = this.k8sClient.getCoreApi(); + const networkingApi = this.k8sClient.getNetworkingApi(); + + const servers = await this.prisma.server.findMany({ + include: { + env_variables: true, + } + }) as ServerWithEnvVars[]; + + const currentServerIds = new Set(servers.map(server => server.id)); + + // Delete servers that are no longer in the database + for (const [serverId, server] of this.deployedServers.entries()) { + if (!currentServerIds.has(serverId)) { + console.log(`Server ${server.id} (${serverId}) has been removed from the database, deleting from Kubernetes...`); + await deleteServer(serverId, server.id, appsApi, coreApi, this.namespace); + this.deployedServers.delete(serverId); + } + } + + // Create or update servers that are in the database + for (const server of servers) { + const deployedServer = this.deployedServers.get(server.id); + + // If server doesn't exist yet or has been updated + if (!deployedServer || this.hasServerChanged(deployedServer, server)) { + console.log(`${!deployedServer ? 'Creating' : 'Updating'} server ${server.id} (${server.id}) in Kubernetes...`); + + const serverConfig: ServerConfig = { + id: server.id, + type: server.type, + apiKey: server.api_key, + description: server.description, + listen_port: server.listen_port, + memory: server.memory, + env_variables: server.env_variables?.map(ev => ({ + key: ev.key, + value: ev.value + })) + }; + + await createServer( + serverConfig, + appsApi, + coreApi, + networkingApi, + this.namespace + ); + + // Update cache + this.deployedServers.set(server.id, { ...server }); + } + } + } catch (error) { + console.error('Error syncing servers:', error); + throw error; + } + } + + private hasServerChanged( + oldServer: ServerWithEnvVars, + newServer: ServerWithEnvVars + ): boolean { + // Check basic properties + const basicPropsChanged = + oldServer.type !== newServer.type || + oldServer.listen_port !== newServer.listen_port || + oldServer.description !== newServer.description; + + if (basicPropsChanged) return true; + + // Check if environment variables have changed + const oldEnvVars = oldServer.env_variables || []; + const newEnvVars = newServer.env_variables || []; + + // Check if the number of env vars has changed + if (oldEnvVars.length !== newEnvVars.length) return true; + + // Check if any of the existing env vars have changed + for (const newEnv of newEnvVars) { + const oldEnv = oldEnvVars.find(e => e.key === newEnv.key); + if (!oldEnv || oldEnv.value !== newEnv.value) { + return true; + } + } + + return false; + } +} \ No newline at end of file diff --git a/packages/k8s-operator/src/crds/rbac.ts b/packages/k8s-operator/src/crds/rbac.ts new file mode 100644 index 0000000..0b27d58 --- /dev/null +++ b/packages/k8s-operator/src/crds/rbac.ts @@ -0,0 +1,169 @@ +import { NAMESPACE } from '../config/constants'; + +/** + * Namespace definition + */ +export const minikuraNamespace = { + apiVersion: 'v1', + kind: 'Namespace', + metadata: { + name: NAMESPACE, + }, +}; + +/** + * Service account + */ +export const minikuraServiceAccount = { + apiVersion: 'v1', + kind: 'ServiceAccount', + metadata: { + name: 'minikura-operator', + namespace: NAMESPACE, + }, +}; + +/** + * Cluster role + */ +export const minikuraClusterRole = { + apiVersion: 'rbac.authorization.k8s.io/v1', + kind: 'ClusterRole', + metadata: { + name: 'minikura-operator-role', + }, + rules: [ + { + apiGroups: [''], + resources: ['configmaps', 'services', 'secrets'], + verbs: ['get', 'list', 'watch', 'create', 'update', 'patch', 'delete'], + }, + { + apiGroups: ['apps'], + resources: ['deployments', 'statefulsets'], + verbs: ['get', 'list', 'watch', 'create', 'update', 'patch', 'delete'], + }, + { + apiGroups: ['networking.k8s.io'], + resources: ['ingresses'], + verbs: ['get', 'list', 'watch', 'create', 'update', 'patch', 'delete'], + }, + { + apiGroups: ['apiextensions.k8s.io'], + resources: ['customresourcedefinitions'], + verbs: ['get', 'list', 'watch', 'create', 'update', 'patch', 'delete'], + }, + { + apiGroups: ['minikura.kirameki.cafe'], + resources: ['minecraftservers', 'velocityproxies'], + verbs: ['get', 'list', 'watch', 'create', 'update', 'patch', 'delete'], + }, + { + apiGroups: ['minikura.kirameki.cafe'], + resources: ['minecraftservers/status', 'velocityproxies/status'], + verbs: ['get', 'update', 'patch'], + }, + ], +}; + +/** + * Cluster role binding + */ +export const minikuraClusterRoleBinding = { + apiVersion: 'rbac.authorization.k8s.io/v1', + kind: 'ClusterRoleBinding', + metadata: { + name: 'minikura-operator-role-binding', + }, + subjects: [ + { + kind: 'ServiceAccount', + name: 'minikura-operator', + namespace: NAMESPACE, + }, + ], + roleRef: { + kind: 'ClusterRole', + name: 'minikura-operator-role', + apiGroup: 'rbac.authorization.k8s.io', + }, +}; + +/** + * Deployment for the Minikura operator + */ +export const minikuraOperatorDeployment = { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata: { + name: 'minikura-operator', + namespace: NAMESPACE, + }, + spec: { + replicas: 1, + selector: { + matchLabels: { + app: 'minikura-operator', + }, + }, + template: { + metadata: { + labels: { + app: 'minikura-operator', + }, + }, + spec: { + serviceAccountName: 'minikura-operator', + containers: [ + { + name: 'operator', + image: '${REGISTRY_URL}/minikura-operator:latest', + env: [ + { + name: 'DATABASE_URL', + valueFrom: { + secretKeyRef: { + name: 'minikura-operator-secrets', + key: 'DATABASE_URL', + }, + }, + }, + { + name: 'KUBERNETES_NAMESPACE', + value: NAMESPACE, + }, + { + name: 'USE_CRDS', + value: 'true', + }, + ], + resources: { + requests: { + memory: '256Mi', + cpu: '200m', + }, + limits: { + memory: '512Mi', + cpu: '500m', + }, + }, + livenessProbe: { + exec: { + command: ['bun', '-e', "console.log('Health check')"], + }, + initialDelaySeconds: 30, + periodSeconds: 30, + }, + readinessProbe: { + exec: { + command: ['bun', '-e', "console.log('Ready check')"], + }, + initialDelaySeconds: 5, + periodSeconds: 10, + }, + }, + ], + }, + }, + }, +}; \ No newline at end of file diff --git a/packages/k8s-operator/src/crds/reverseProxy.ts b/packages/k8s-operator/src/crds/reverseProxy.ts new file mode 100644 index 0000000..247c4c7 --- /dev/null +++ b/packages/k8s-operator/src/crds/reverseProxy.ts @@ -0,0 +1,156 @@ +import { API_GROUP, API_VERSION, RESOURCE_TYPES } from '../config/constants'; + +export const REVERSE_PROXY_SERVER_CRD = { + apiVersion: 'apiextensions.k8s.io/v1', + kind: 'CustomResourceDefinition', + metadata: { + name: `${RESOURCE_TYPES.REVERSE_PROXY_SERVER.plural}.${API_GROUP}`, + }, + spec: { + group: API_GROUP, + versions: [ + { + name: API_VERSION, + served: true, + storage: true, + schema: { + openAPIV3Schema: { + type: 'object', + properties: { + spec: { + type: 'object', + required: ['id', 'external_address', 'external_port'], + properties: { + id: { + type: 'string', + pattern: '^[a-zA-Z0-9-_]+$', + description: 'ID of the reverse proxy server', + }, + description: { + type: 'string', + nullable: true, + description: 'Optional description of the server', + }, + external_address: { + type: 'string', + description: 'External address of the proxy server', + }, + external_port: { + type: 'integer', + minimum: 1, + maximum: 65535, + description: 'External port of the proxy server', + }, + listen_port: { + type: 'integer', + minimum: 1, + maximum: 65535, + default: 25565, + nullable: true, + description: 'Port the proxy server listens on internally', + }, + type: { + type: 'string', + enum: ['VELOCITY', 'BUNGEECORD'], + default: 'VELOCITY', + nullable: true, + description: 'Type of the reverse proxy server', + }, + memory: { + type: 'string', + default: '512M', + nullable: true, + description: 'Memory allocation for the server', + }, + environmentVariables: { + type: 'array', + nullable: true, + items: { + type: 'object', + required: ['key', 'value'], + properties: { + key: { + type: 'string', + description: 'Environment variable key', + }, + value: { + type: 'string', + description: 'Environment variable value', + }, + }, + }, + }, + }, + }, + status: { + type: 'object', + nullable: true, + properties: { + phase: { + type: 'string', + enum: ['Pending', 'Running', 'Failed'], + description: 'Current phase of the server', + }, + message: { + type: 'string', + nullable: true, + description: 'Detailed message about the current status', + }, + apiKey: { + type: 'string', + nullable: true, + description: 'API key for server communication', + }, + internalId: { + type: 'string', + nullable: true, + description: 'Internal ID assigned by Minikura', + }, + lastSyncedAt: { + type: 'string', + nullable: true, + description: 'Last time the server was synced with Kubernetes', + }, + }, + }, + }, + }, + }, + additionalPrinterColumns: [ + { + name: 'Type', + type: 'string', + jsonPath: '.spec.type', + }, + { + name: 'External Address', + type: 'string', + jsonPath: '.spec.external_address', + }, + { + name: 'External Port', + type: 'integer', + jsonPath: '.spec.external_port', + }, + { + name: 'Status', + type: 'string', + jsonPath: '.status.phase', + }, + { + name: 'Age', + type: 'date', + jsonPath: '.metadata.creationTimestamp', + }, + ], + }, + ], + scope: 'Namespaced', + names: { + singular: RESOURCE_TYPES.REVERSE_PROXY_SERVER.singular, + plural: RESOURCE_TYPES.REVERSE_PROXY_SERVER.plural, + kind: RESOURCE_TYPES.REVERSE_PROXY_SERVER.kind, + shortNames: RESOURCE_TYPES.REVERSE_PROXY_SERVER.shortNames, + }, + }, +}; \ No newline at end of file diff --git a/packages/k8s-operator/src/crds/server.ts b/packages/k8s-operator/src/crds/server.ts new file mode 100644 index 0000000..9474c8f --- /dev/null +++ b/packages/k8s-operator/src/crds/server.ts @@ -0,0 +1,132 @@ +import { API_GROUP, API_VERSION, RESOURCE_TYPES } from '../config/constants'; + +export const MINECRAFT_SERVER_CRD = { + apiVersion: 'apiextensions.k8s.io/v1', + kind: 'CustomResourceDefinition', + metadata: { + name: `${RESOURCE_TYPES.MINECRAFT_SERVER.plural}.${API_GROUP}`, + }, + spec: { + group: API_GROUP, + versions: [ + { + name: API_VERSION, + served: true, + storage: true, + schema: { + openAPIV3Schema: { + type: 'object', + properties: { + spec: { + type: 'object', + required: ['id', 'type', 'listen_port'], + properties: { + id: { + type: 'string', + pattern: '^[a-zA-Z0-9-_]+$', + description: 'ID of the Minecraft server', + }, + description: { + type: 'string', + nullable: true, + description: 'Optional description of the server', + }, + listen_port: { + type: 'integer', + minimum: 1, + maximum: 65535, + description: 'Port the server listens on', + }, + type: { + type: 'string', + enum: ['STATEFUL', 'STATELESS'], + description: 'Type of the server', + }, + memory: { + type: 'string', + nullable: true, + default: '1G', + description: 'Memory allocation for the server', + }, + environmentVariables: { + type: 'array', + nullable: true, + items: { + type: 'object', + required: ['key', 'value'], + properties: { + key: { + type: 'string', + description: 'Environment variable key', + }, + value: { + type: 'string', + description: 'Environment variable value', + }, + }, + }, + }, + }, + }, + status: { + type: 'object', + nullable: true, + properties: { + phase: { + type: 'string', + enum: ['Pending', 'Running', 'Failed'], + description: 'Current phase of the server', + }, + message: { + type: 'string', + nullable: true, + description: 'Detailed message about the current status', + }, + apiKey: { + type: 'string', + nullable: true, + description: 'API key for server communication', + }, + internalId: { + type: 'string', + nullable: true, + description: 'Internal ID assigned by Minikura', + }, + lastSyncedAt: { + type: 'string', + nullable: true, + description: 'Last time the server was synced with Kubernetes', + }, + }, + }, + }, + }, + }, + additionalPrinterColumns: [ + { + name: 'Type', + type: 'string', + jsonPath: '.spec.type', + }, + { + name: 'Status', + type: 'string', + jsonPath: '.status.phase', + }, + { + name: 'Age', + type: 'date', + jsonPath: '.metadata.creationTimestamp', + }, + ], + }, + ], + scope: 'Namespaced', + names: { + singular: RESOURCE_TYPES.MINECRAFT_SERVER.singular, + plural: RESOURCE_TYPES.MINECRAFT_SERVER.plural, + kind: RESOURCE_TYPES.MINECRAFT_SERVER.kind, + shortNames: RESOURCE_TYPES.MINECRAFT_SERVER.shortNames, + }, + }, +}; \ No newline at end of file diff --git a/packages/k8s-operator/src/index.ts b/packages/k8s-operator/src/index.ts new file mode 100644 index 0000000..7da6edd --- /dev/null +++ b/packages/k8s-operator/src/index.ts @@ -0,0 +1,70 @@ +import { dotenvLoad } from "dotenv-mono"; +const dotenv = dotenvLoad(); + +import { NAMESPACE, KUBERNETES_NAMESPACE_ENV, ENABLE_CRD_REFLECTION } from './config/constants'; +import { prisma } from "@minikura/db"; +import { KubernetesClient } from './utils/k8s-client'; +import { ServerController } from './controllers/server-controller'; +import { ReverseProxyController } from './controllers/reverse-proxy-controller'; +import { setupCRDRegistration } from './utils/crd-registrar'; + +async function main() { + console.log('Starting Minikura Kubernetes Operator...'); + console.log(`Using namespace: ${NAMESPACE}`); + + try { + const k8sClient = KubernetesClient.getInstance(); + console.log('Connected to Kubernetes cluster'); + + const serverController = new ServerController(prisma, NAMESPACE); + const reverseProxyController = new ReverseProxyController(prisma, NAMESPACE); + + serverController.startWatching(); + reverseProxyController.startWatching(); + + if (ENABLE_CRD_REFLECTION) { + console.log('CRD reflection enabled - will create CRDs to reflect database state'); + try { + await setupCRDRegistration(prisma, k8sClient, NAMESPACE); + } catch (error: any) { + console.error(`Failed to set up CRD registration: ${error.message}`); + if (error.response) { + console.error(`Response status: ${error.response.statusCode}`); + console.error(`Response body: ${JSON.stringify(error.response.body)}`); + } + console.error('Continuing operation without CRD reflection'); + console.log('Kubernetes resources will still be created/updated, but CRD reflection is disabled'); + } + } + + console.log('Minikura Kubernetes Operator is running'); + + process.on('SIGINT', gracefulShutdown); + process.on('SIGTERM', gracefulShutdown); + + function gracefulShutdown() { + console.log('Shutting down operator gracefully...'); + serverController.stopWatching(); + reverseProxyController.stopWatching(); + prisma.$disconnect(); + console.log('Resources released, exiting...'); + process.exit(0); + } + + } catch (error: any) { + console.error(`Failed to start Minikura Kubernetes Operator: ${error.message}`); + if (error.response) { + console.error(`Response status: ${error.response.statusCode}`); + console.error(`Response body: ${JSON.stringify(error.response.body)}`); + } + if (error.stack) { + console.error(`Stack trace: ${error.stack}`); + } + process.exit(1); + } +} + +main().catch(error => { + console.error('Unhandled error:', error); + process.exit(1); +}); \ No newline at end of file diff --git a/packages/k8s-operator/src/resources/reverseProxyServer.ts b/packages/k8s-operator/src/resources/reverseProxyServer.ts new file mode 100644 index 0000000..73a5816 --- /dev/null +++ b/packages/k8s-operator/src/resources/reverseProxyServer.ts @@ -0,0 +1,222 @@ +import * as k8s from '@kubernetes/client-node'; +import { ReverseProxyServerType } from '@minikura/db'; +import { LABEL_PREFIX } from '../config/constants'; +import { calculateJavaMemory, convertToK8sFormat } from '../utils/memory'; +import type { ReverseProxyConfig } from '../types'; + +export async function createReverseProxyServer( + server: ReverseProxyConfig, + appsApi: k8s.AppsV1Api, + coreApi: k8s.CoreV1Api, + networkingApi: k8s.NetworkingV1Api, + namespace: string +): Promise { + console.log(`Creating reverse proxy server ${server.id} in namespace '${namespace}'`); + + const serverType = server.type.toLowerCase(); + const serverName = `${serverType}-${server.id}`; + + const configMap = { + apiVersion: 'v1', + kind: 'ConfigMap', + metadata: { + name: `${serverName}-config`, + namespace: namespace, + labels: { + app: serverName, + [`${LABEL_PREFIX}/server-type`]: serverType, + [`${LABEL_PREFIX}/proxy-id`]: server.id, + } + }, + data: { + 'minikura-api-key': server.apiKey, + } + }; + + try { + await coreApi.createNamespacedConfigMap(namespace, configMap); + console.log(`Created ConfigMap for reverse proxy server ${server.id}`); + } catch (error: any) { + // Conflict, update it + if (error.response?.statusCode === 409) { + await coreApi.replaceNamespacedConfigMap(`${serverName}-config`, namespace, configMap); + console.log(`Updated ConfigMap for reverse proxy server ${server.id}`); + } else { + throw error; + } + } + + // Create Service for the reverse proxy - Always LoadBalancer for now + const service = { + apiVersion: 'v1', + kind: 'Service', + metadata: { + name: serverName, + namespace: namespace, + labels: { + app: serverName, + [`${LABEL_PREFIX}/server-type`]: serverType, + [`${LABEL_PREFIX}/proxy-id`]: server.id, + } + }, + spec: { + selector: { + app: serverName, + }, + ports: [ + { + port: server.external_port, + targetPort: server.listen_port, + protocol: 'TCP', + name: 'minecraft', + } + ], + type: 'LoadBalancer', + } + }; + + try { + await coreApi.createNamespacedService(namespace, service); + console.log(`Created Service for reverse proxy server ${server.id}`); + } catch (error: any) { + // Conflict, update it + if (error.response?.statusCode === 409) { + await coreApi.replaceNamespacedService(serverName, namespace, service); + console.log(`Updated Service for reverse proxy server ${server.id}`); + } else { + throw error; + } + } + + // Create Deployment + const deployment = { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata: { + name: serverName, + namespace: namespace, + labels: { + app: serverName, + [`${LABEL_PREFIX}/server-type`]: serverType, + [`${LABEL_PREFIX}/proxy-id`]: server.id, + } + }, + spec: { + replicas: 1, + selector: { + matchLabels: { + app: serverName, + } + }, + template: { + metadata: { + labels: { + app: serverName, + [`${LABEL_PREFIX}/server-type`]: serverType, + [`${LABEL_PREFIX}/proxy-id`]: server.id, + } + }, + spec: { + containers: [ + { + name: serverType, + image: 'itzg/mc-proxy:latest', + ports: [ + { + containerPort: server.listen_port, + name: 'minecraft', + } + ], + env: [ + { + name: 'TYPE', + value: server.type, + }, + { + name: 'NETWORKADDRESS_CACHE_TTL', + value: '30', + }, + { + name: 'MEMORY', + value: calculateJavaMemory(server.memory || '512M', 0.8), + }, + ...(server.env_variables || []).map(ev => ({ + name: ev.key, + value: ev.value, + })), + ], + readinessProbe: { + tcpSocket: { + port: server.listen_port, + }, + initialDelaySeconds: 30, + periodSeconds: 10, + }, + resources: { + requests: { + memory: convertToK8sFormat(server.memory || "512M"), + cpu: "250m", + }, + limits: { + memory: convertToK8sFormat(server.memory || "512M"), + cpu: "500m", + } + } + } + ] + } + } + } + }; + + try { + await appsApi.createNamespacedDeployment(namespace, deployment); + console.log(`Created Deployment for reverse proxy server ${server.id}`); + } catch (error: any) { + // Conflict, update it + if (error.response?.statusCode === 409) { + await appsApi.replaceNamespacedDeployment(serverName, namespace, deployment); + console.log(`Updated Deployment for reverse proxy server ${server.id}`); + } else { + throw error; + } + } +} + +export async function deleteReverseProxyServer( + proxyId: string, + proxyType: ReverseProxyServerType, + appsApi: k8s.AppsV1Api, + coreApi: k8s.CoreV1Api, + namespace: string +): Promise { + const serverType = proxyType.toLowerCase(); + const name = `${serverType}-${proxyId}`; + + try { + await appsApi.deleteNamespacedDeployment(name, namespace); + console.log(`Deleted Deployment for reverse proxy server ${proxyId}`); + } catch (error: any) { + if (error.response?.statusCode !== 404) { + console.error(`Error deleting Deployment for reverse proxy server ${proxyId}:`, error); + } + } + + try { + await coreApi.deleteNamespacedService(name, namespace); + console.log(`Deleted Service for reverse proxy server ${proxyId}`); + } catch (error: any) { + if (error.response?.statusCode !== 404) { + console.error(`Error deleting Service for reverse proxy server ${proxyId}:`, error); + } + } + + try { + await coreApi.deleteNamespacedConfigMap(`${name}-config`, namespace); + console.log(`Deleted ConfigMap for reverse proxy server ${proxyId}`); + } catch (error: any) { + if (error.response?.statusCode !== 404) { + console.error(`Error deleting ConfigMap for reverse proxy server ${proxyId}:`, error); + } + } +} \ No newline at end of file diff --git a/packages/k8s-operator/src/resources/server.ts b/packages/k8s-operator/src/resources/server.ts new file mode 100644 index 0000000..5a91664 --- /dev/null +++ b/packages/k8s-operator/src/resources/server.ts @@ -0,0 +1,411 @@ +import * as k8s from '@kubernetes/client-node'; +import { ServerType } from '@minikura/db'; +import { LABEL_PREFIX } from '../config/constants'; +import { calculateJavaMemory, convertToK8sFormat } from '../utils/memory'; +import type { ServerConfig } from '../types'; + +export async function createServer( + server: ServerConfig, + appsApi: k8s.AppsV1Api, + coreApi: k8s.CoreV1Api, + networkingApi: k8s.NetworkingV1Api, + namespace: string +): Promise { + const serverName = `minecraft-${server.id}`; + + const configMap = { + apiVersion: 'v1', + kind: 'ConfigMap', + metadata: { + name: `${serverName}-config`, + namespace: namespace, + labels: { + app: serverName, + [`${LABEL_PREFIX}/server-type`]: server.type.toLowerCase(), + [`${LABEL_PREFIX}/server-id`]: server.id, + } + }, + data: { + 'server-type': server.type, + 'minikura-api-key': server.apiKey, + } + }; + + try { + await coreApi.createNamespacedConfigMap(namespace, configMap); + console.log(`Created ConfigMap for server ${server.id}`); + } catch (err: any) { + // Conflict, update it + if (err.response?.statusCode === 409) { + await coreApi.replaceNamespacedConfigMap(`${serverName}-config`, namespace, configMap); + console.log(`Updated ConfigMap for server ${server.id}`); + } else { + throw err; + } + } + + const service = { + apiVersion: 'v1', + kind: 'Service', + metadata: { + name: serverName, + namespace: namespace, + labels: { + app: serverName, + [`${LABEL_PREFIX}/server-type`]: server.type.toLowerCase(), + [`${LABEL_PREFIX}/server-id`]: server.id, + } + }, + spec: { + selector: { + app: serverName, + }, + ports: [ + { + port: server.listen_port, + targetPort: 25565, + protocol: 'TCP', + name: 'minecraft', + } + ], + type: 'ClusterIP', // Always ClusterIP for regular servers + } + }; + + try { + await coreApi.createNamespacedService(namespace, service); + console.log(`Created Service for server ${server.id}`); + } catch (err: any) { + // Conflict, update it + if (err.response?.statusCode === 409) { + await coreApi.replaceNamespacedService(serverName, namespace, service); + console.log(`Updated Service for server ${server.id}`); + } else { + throw err; + } + } + + if (server.type === ServerType.STATELESS) { + await createDeployment(serverName, server, appsApi, namespace); + } else { + await createStatefulSet(serverName, server, appsApi, namespace); + } +} + +async function createDeployment( + serverName: string, + server: ServerConfig, + appsApi: k8s.AppsV1Api, + namespace: string +): Promise { + const deployment = { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata: { + name: serverName, + namespace: namespace, + labels: { + app: serverName, + [`${LABEL_PREFIX}/server-type`]: 'stateless', + [`${LABEL_PREFIX}/server-id`]: server.id, + } + }, + spec: { + replicas: 1, + selector: { + matchLabels: { + app: serverName, + } + }, + template: { + metadata: { + labels: { + app: serverName, + [`${LABEL_PREFIX}/server-type`]: 'stateless', + [`${LABEL_PREFIX}/server-id`]: server.id, + } + }, + spec: { + containers: [ + { + name: 'minecraft', + image: 'itzg/minecraft-server', + ports: [ + { + containerPort: 25565, + name: 'minecraft', + } + ], + env: [ + { + name: 'EULA', + value: 'TRUE', + }, + { + name: 'TYPE', + value: 'VANILLA', + }, + { + name: 'MEMORY', + value: calculateJavaMemory(server.memory || '1G', 0.8), + }, + { + name: 'OPS', + value: '', + }, + { + name: 'OVERRIDE_SERVER_PROPERTIES', + value: 'true', + }, + { + name: 'ENABLE_RCON', + value: 'false', + }, + ...(server.env_variables || []).map(ev => ({ + name: ev.key, + value: ev.value, + })), + ], + volumeMounts: [ + { + name: 'config', + mountPath: '/config', + } + ], + readinessProbe: { + tcpSocket: { + port: 25565, + }, + initialDelaySeconds: 30, + periodSeconds: 10, + }, + resources: { + requests: { + memory: convertToK8sFormat(server.memory || "1G"), + cpu: "250m", + }, + limits: { + memory: convertToK8sFormat(server.memory || "1G"), + cpu: "500m", + } + } + } + ], + volumes: [ + { + name: 'config', + configMap: { + name: `${serverName}-config`, + } + } + ] + } + } + } + }; + + try { + await appsApi.createNamespacedDeployment(namespace, deployment); + console.log(`Created Deployment for server ${server.id}`); + } catch (err: any) { + if (err.response?.statusCode === 409) { + // Deployment already exists, update it + await appsApi.replaceNamespacedDeployment(serverName, namespace, deployment); + console.log(`Updated Deployment for server ${server.id}`); + } else { + throw err; + } + } +} + +async function createStatefulSet( + serverName: string, + server: ServerConfig, + appsApi: k8s.AppsV1Api, + namespace: string +): Promise { + const statefulSet = { + apiVersion: 'apps/v1', + kind: 'StatefulSet', + metadata: { + name: serverName, + namespace: namespace, + labels: { + app: serverName, + [`${LABEL_PREFIX}/server-type`]: 'stateful', + [`${LABEL_PREFIX}/server-id`]: server.id, + } + }, + spec: { + serviceName: serverName, + replicas: 1, + selector: { + matchLabels: { + app: serverName, + } + }, + template: { + metadata: { + labels: { + app: serverName, + [`${LABEL_PREFIX}/server-type`]: 'stateful', + [`${LABEL_PREFIX}/server-id`]: server.id, + } + }, + spec: { + containers: [ + { + name: 'minecraft', + image: 'itzg/minecraft-server', + ports: [ + { + containerPort: 25565, + name: 'minecraft', + } + ], + env: [ + { + name: 'EULA', + value: 'TRUE', + }, + { + name: 'TYPE', + value: 'VANILLA', + }, + { + name: 'MEMORY', + value: calculateJavaMemory(server.memory || '1G', 0.8), + }, + { + name: 'OPS', + value: '', + }, + { + name: 'OVERRIDE_SERVER_PROPERTIES', + value: 'true', + }, + { + name: 'ENABLE_RCON', + value: 'false', + }, + ...(server.env_variables || []).map(ev => ({ + name: ev.key, + value: ev.value, + })), + ], + volumeMounts: [ + { + name: 'data', + mountPath: '/data', + }, + { + name: 'config', + mountPath: '/config', + } + ], + readinessProbe: { + tcpSocket: { + port: 25565, + }, + initialDelaySeconds: 60, + periodSeconds: 10, + }, + resources: { + requests: { + memory: convertToK8sFormat(server.memory), + cpu: "250m", + }, + limits: { + memory: convertToK8sFormat(server.memory), + cpu: "500m", + } + } + } + ], + volumes: [ + { + name: 'config', + configMap: { + name: `${serverName}-config`, + } + } + ] + } + }, + volumeClaimTemplates: [ + { + metadata: { + name: 'data', + }, + spec: { + accessModes: ['ReadWriteOnce'], + resources: { + requests: { + storage: '1Gi', + } + } + } + } + ] + } + }; + + try { + await appsApi.createNamespacedStatefulSet(namespace, statefulSet); + console.log(`Created StatefulSet for server ${server.id}`); + } catch (err: any) { + if (err.response?.statusCode === 409) { + // StatefulSet already exists, update it + await appsApi.replaceNamespacedStatefulSet(serverName, namespace, statefulSet); + console.log(`Updated StatefulSet for server ${server.id}`); + } else { + throw err; + } + } +} + +export async function deleteServer( + serverId: string, + serverId2: string, + appsApi: k8s.AppsV1Api, + coreApi: k8s.CoreV1Api, + namespace: string +): Promise { + const serverName = `minecraft-${serverId2}`; + + try { + await appsApi.deleteNamespacedDeployment(serverName, namespace); + console.log(`Deleted Deployment for server ${serverName}`); + } catch (err: any) { + if (err.response?.statusCode !== 404) { + console.error(`Error deleting Deployment for server ${serverName}:`, err); + } + } + + try { + await appsApi.deleteNamespacedStatefulSet(serverName, namespace); + console.log(`Deleted StatefulSet for server ${serverName}`); + } catch (err: any) { + if (err.response?.statusCode !== 404) { + console.error(`Error deleting StatefulSet for server ${serverName}:`, err); + } + } + + try { + await coreApi.deleteNamespacedService(serverName, namespace); + console.log(`Deleted Service for server ${serverName}`); + } catch (err: any) { + if (err.response?.statusCode !== 404) { + console.error(`Error deleting Service for server ${serverName}:`, err); + } + } + + try { + await coreApi.deleteNamespacedConfigMap(`${serverName}-config`, namespace); + console.log(`Deleted ConfigMap for server ${serverName}`); + } catch (err: any) { + if (err.response?.statusCode !== 404) { + console.error(`Error deleting ConfigMap for server ${serverName}:`, err); + } + } +} \ No newline at end of file diff --git a/packages/k8s-operator/src/scripts/apply-crds.ts b/packages/k8s-operator/src/scripts/apply-crds.ts new file mode 100644 index 0000000..f33002d --- /dev/null +++ b/packages/k8s-operator/src/scripts/apply-crds.ts @@ -0,0 +1,37 @@ +import { KubernetesClient } from '../utils/k8s-client'; +import { registerRBACResources } from '../utils/rbac-registrar'; +import { setupCRDRegistration } from '../utils/crd-registrar'; +import { NAMESPACE } from '../config/constants'; +import { PrismaClient } from '@minikura/db'; +import { dotenvLoad } from 'dotenv-mono'; + +dotenvLoad(); + +async function main() { + console.log('Starting to apply TypeScript-defined CRDs to Kubernetes cluster...'); + + try { + const k8sClient = KubernetesClient.getInstance(); + console.log(`Connected to Kubernetes cluster, using namespace: ${NAMESPACE}`); + + await registerRBACResources(k8sClient); + + console.log('Registering Custom Resource Definitions...'); + const prisma = new PrismaClient(); + await setupCRDRegistration(prisma, k8sClient, NAMESPACE); + + console.log('Successfully applied all resources to Kubernetes cluster'); + process.exit(0); + } catch (error: any) { + console.error('Failed to apply resources:', error.message); + if (error.stack) { + console.error(error.stack); + } + process.exit(1); + } +} + +main().catch(error => { + console.error('Unhandled error:', error); + process.exit(1); +}); \ No newline at end of file diff --git a/packages/k8s-operator/src/types/index.ts b/packages/k8s-operator/src/types/index.ts new file mode 100644 index 0000000..605f659 --- /dev/null +++ b/packages/k8s-operator/src/types/index.ts @@ -0,0 +1,76 @@ +import type { + ServerType, + ReverseProxyServerType, + Server as PrismaServer, + ReverseProxyServer as PrismaReverseProxyServer, + CustomEnvironmentVariable +} from '@minikura/db'; + +// Base interface +export interface CustomResource { + apiVersion: string; + kind: string; + metadata: { + name: string; + namespace?: string; + labels?: Record; + annotations?: Record; + [key: string]: any; + }; +} + +export type ServerConfig = Pick & { + apiKey: string; + env_variables?: Array>; +}; + +export type MinecraftServerSpec = Pick & { + environmentVariables?: Array>; +}; + +export interface MinecraftServerStatus { + phase: 'Pending' | 'Running' | 'Failed'; + message?: string; + apiKey?: string; + internalId?: string; + lastSyncedAt?: string; +} + +export interface MinecraftServerCRD extends CustomResource { + spec: MinecraftServerSpec; + status?: MinecraftServerStatus; +} + +// Reverse Proxy Types + +export type ReverseProxyConfig = Pick< + PrismaReverseProxyServer, + 'id' | 'description' | 'external_address' | 'external_port' | 'listen_port' | 'type' | 'memory' +> & { + apiKey: string; + env_variables?: Array>; +}; + +export type ReverseProxyServerSpec = Partial< + Pick +> & { + id: string; + external_address: string; + external_port: number; + environmentVariables?: Array>; +}; + +export interface ReverseProxyServerStatus { + phase: 'Pending' | 'Running' | 'Failed'; + message?: string; + apiKey?: string; + internalId?: string; + lastSyncedAt?: string; +} + +export interface ReverseProxyServerCRD extends CustomResource { + spec: ReverseProxyServerSpec; + status?: ReverseProxyServerStatus; +} + +export type EnvironmentVariable = Pick; \ No newline at end of file diff --git a/packages/k8s-operator/src/utils/crd-registrar.ts b/packages/k8s-operator/src/utils/crd-registrar.ts new file mode 100644 index 0000000..ecbfe59 --- /dev/null +++ b/packages/k8s-operator/src/utils/crd-registrar.ts @@ -0,0 +1,446 @@ +import { PrismaClient } from '@minikura/db'; +import type { Server, ReverseProxyServer, CustomEnvironmentVariable } from '@minikura/db'; +import { KubernetesClient } from './k8s-client'; +import { API_GROUP, API_VERSION, LABEL_PREFIX } from '../config/constants'; +import { MINECRAFT_SERVER_CRD } from '../crds/server'; +import { REVERSE_PROXY_SERVER_CRD } from '../crds/reverseProxy'; + +/** + * Sets up the CRD registration and starts a reflector to sync database state to CRDs + */ +export async function setupCRDRegistration( + prisma: PrismaClient, + k8sClient: KubernetesClient, + namespace: string +): Promise { + await registerCRDs(k8sClient); + await startCRDReflector(prisma, k8sClient, namespace); +} + +/** + * Registers the CRDs with the Kubernetes API + */ +async function registerCRDs(k8sClient: KubernetesClient): Promise { + try { + const apiExtensionsClient = k8sClient.getApiExtensionsApi(); + + console.log('Registering CRDs...'); + + try { + await apiExtensionsClient.createCustomResourceDefinition(MINECRAFT_SERVER_CRD); + console.log(`MinecraftServer CRD created successfully (${API_GROUP}/${API_VERSION})`); + } catch (error: any) { + if (error.response?.statusCode === 409) { + // TODO: Handle conflict + console.log('MinecraftServer CRD already exists'); + } else { + console.error('Error creating MinecraftServer CRD:', error); + } + } + + try { + await apiExtensionsClient.createCustomResourceDefinition(REVERSE_PROXY_SERVER_CRD); + console.log(`ReverseProxyServer CRD created successfully (${API_GROUP}/${API_VERSION})`); + } catch (error: any) { + if (error.response?.statusCode === 409) { + // TODO: Handle conflict + console.log('ReverseProxyServer CRD already exists'); + } else { + console.error('Error creating ReverseProxyServer CRD:', error); + } + } + } catch (error) { + console.error('Error registering CRDs:', error); + throw error; + } +} + +/** + * Starts a reflector to sync database state to CRDs + */ +async function startCRDReflector( + prisma: PrismaClient, + k8sClient: KubernetesClient, + namespace: string +): Promise { + const customObjectsApi = k8sClient.getCustomObjectsApi(); + + // Keep track which server IDs have corresponding CRs + const reflectedMinecraftServers = new Map(); // DB ID -> CR name + const reflectedReverseProxyServers = new Map(); // DB ID -> CR name + + console.log('Starting CRD reflector...'); + + // Initial sync to create CRs that reflect the DB state + await syncDBtoCRDs(prisma, customObjectsApi, namespace, + reflectedMinecraftServers, reflectedReverseProxyServers); + + // Polling interval to check for changes in the DB + // TODO: Make this listener instead + setInterval(async () => { + await syncDBtoCRDs(prisma, customObjectsApi, namespace, + reflectedMinecraftServers, reflectedReverseProxyServers); + }, 30 * 1000); +} + +/** + * Synchronizes database state to CRDs + */ +async function syncDBtoCRDs( + prisma: PrismaClient, + customObjectsApi: any, + namespace: string, + reflectedMinecraftServers: Map, + reflectedReverseProxyServers: Map +): Promise { + try { + console.log(`[${new Date().toISOString()}] Starting CRD sync operation...`); + await syncMinecraftServers(prisma, customObjectsApi, namespace, reflectedMinecraftServers); + await syncReverseProxyServers(prisma, customObjectsApi, namespace, reflectedReverseProxyServers); + console.log(`[${new Date().toISOString()}] CRD sync operation completed`); + } catch (error) { + console.error(`[${new Date().toISOString()}] Error syncing database to CRDs:`, error); + } +} + +/** + * Synchronizes Minecraft server objects from the database to CRDs + */ +async function syncMinecraftServers( + prisma: PrismaClient, + customObjectsApi: any, + namespace: string, + reflectedMinecraftServers: Map +): Promise { + try { + const servers = await prisma.server.findMany(); + + let existingCRs: any[] = []; + try { + const response = await customObjectsApi.listNamespacedCustomObject( + API_GROUP, + API_VERSION, + namespace, + 'minecraftservers' + ); + existingCRs = (response.body as any).items || []; + } catch (error) { + console.error('Error listing MinecraftServer CRs:', error); + // TODO: Potentially better error handling here + // For now, continue anyway - it might just be that none exist yet + } + + // Map CR names to their corresponding DB IDs + const existingCRMap = new Map(); + // Map of CR names to their resourceVersions for updates + const crResourceVersions = new Map(); + + for (const cr of existingCRs) { + const internalId = cr.status?.internalId; + if (internalId) { + existingCRMap.set(internalId, cr.metadata.name); + // Store the resourceVersion for later + if (cr.metadata?.resourceVersion) { + crResourceVersions.set(cr.metadata.name, cr.metadata.resourceVersion); + } + } + } + + // Refresh tracking map + reflectedMinecraftServers.clear(); + + // Create or update CRs for each server + for (const server of servers) { + const crName = existingCRMap.get(server.id) || `${server.id.toLowerCase()}`; + + // Build the CR object + const serverCR: { + apiVersion: string, + kind: string, + metadata: { + name: string, + namespace: string, + annotations: Record, + resourceVersion?: string + }, + spec: any, + status: any + } = { + apiVersion: `${API_GROUP}/${API_VERSION}`, + kind: 'MinecraftServer', + metadata: { + name: crName, + namespace: namespace, + annotations: { + [`${LABEL_PREFIX}/database-managed`]: 'true', + [`${LABEL_PREFIX}/last-synced`]: new Date().toISOString() + } + }, + spec: { + id: server.id, + description: server.description, + listen_port: server.listen_port, + type: server.type, + memory: server.memory + }, + status: { + phase: 'Running', + message: 'Managed by database', + internalId: server.id, + apiKey: '[REDACTED]', // Don't expose actual API key + lastSyncedAt: new Date().toISOString() + } + }; + + try { + if (existingCRMap.has(server.id)) { + // Update existing CR + + // Get the current resource first + const crName = existingCRMap.get(server.id)!; + + try { + // Get the existing resource to get the current resourceVersion + const existingResource = await customObjectsApi.getNamespacedCustomObject( + API_GROUP, + API_VERSION, + namespace, + 'minecraftservers', + crName + ); + + // Extract the resourceVersion from the existing resource + if (existingResource?.body?.metadata?.resourceVersion) { + // Add the resourceVersion to our custom resource + serverCR.metadata.resourceVersion = existingResource.body.metadata.resourceVersion; + } + + // Now update with the correct resourceVersion + await customObjectsApi.replaceNamespacedCustomObject( + API_GROUP, + API_VERSION, + namespace, + 'minecraftservers', + crName, + serverCR + ); + console.log(`Updated MinecraftServer CR ${crName} for server ${server.id}`); + } catch (error) { + console.error(`Error getting/updating MinecraftServer CR for ${server.id}:`, error); + } + } else { + // Create new CR + await customObjectsApi.createNamespacedCustomObject( + API_GROUP, + API_VERSION, + namespace, + 'minecraftservers', + serverCR + ); + console.log(`Created MinecraftServer CR ${crName} for server ${server.id}`); + } + + // Remember this mapping + reflectedMinecraftServers.set(server.id, crName); + } catch (error) { + console.error(`Error creating/updating MinecraftServer CR for ${server.id}:`, error); + } + } + + // Delete CRs for servers that no longer exist + for (const [dbId, crName] of existingCRMap.entries()) { + if (!servers.some(s => s.id === dbId)) { + try { + await customObjectsApi.deleteNamespacedCustomObject( + API_GROUP, + API_VERSION, + namespace, + 'minecraftservers', + crName + ); + console.log(`Deleted MinecraftServer CR ${crName} for removed server ID ${dbId}`); + } catch (error) { + console.error(`Error deleting MinecraftServer CR ${crName}:`, error); + } + } + } + } catch (error) { + console.error('Error syncing Minecraft servers to CRDs:', error); + } +} + +/** + * Synchronizes Reverse Proxy server objects from the database to CRDs + */ +async function syncReverseProxyServers( + prisma: PrismaClient, + customObjectsApi: any, + namespace: string, + reflectedReverseProxyServers: Map +): Promise { + try { + const proxies = await prisma.reverseProxyServer.findMany({ + include: { + env_variables: true + } + }); + + let existingCRs: any[] = []; + try { + const response = await customObjectsApi.listNamespacedCustomObject( + API_GROUP, + API_VERSION, + namespace, + 'reverseproxyservers' + ); + existingCRs = (response.body as any).items || []; + } catch (error) { + console.error('Error listing ReverseProxyServer CRs:', error); + // TODO: Potentially better error handling here + // For now, continue anyway - it might just be that none exist yet + } + + // Map CR names to their corresponding DB IDs + const existingCRMap = new Map(); + // Map of CR names to their resourceVersions for updates + const crResourceVersions = new Map(); + + for (const cr of existingCRs) { + const internalId = cr.status?.internalId; + if (internalId) { + existingCRMap.set(internalId, cr.metadata.name); + // Store the resourceVersion for later + if (cr.metadata?.resourceVersion) { + crResourceVersions.set(cr.metadata.name, cr.metadata.resourceVersion); + } + } + } + + // Refresh tracking map + reflectedReverseProxyServers.clear(); + + // Create or update CRs for each proxy + for (const proxy of proxies) { + const crName = existingCRMap.get(proxy.id) || `${proxy.id.toLowerCase()}`; + + // Build the CR object + const proxyCR: { + apiVersion: string, + kind: string, + metadata: { + name: string, + namespace: string, + annotations: Record, + resourceVersion?: string + }, + spec: any, + status: any + } = { + apiVersion: `${API_GROUP}/${API_VERSION}`, + kind: 'ReverseProxyServer', + metadata: { + name: crName, + namespace: namespace, + annotations: { + [`${LABEL_PREFIX}/database-managed`]: 'true', + [`${LABEL_PREFIX}/last-synced`]: new Date().toISOString() + } + }, + spec: { + id: proxy.id, + description: proxy.description, + external_address: proxy.external_address, + external_port: proxy.external_port, + listen_port: proxy.listen_port, + type: proxy.type, + memory: proxy.memory, + environmentVariables: proxy.env_variables?.map(ev => ({ + key: ev.key, + value: ev.value + })) + }, + status: { + phase: 'Running', + message: 'Managed by database', + internalId: proxy.id, + apiKey: '[REDACTED]', // Don't expose actual API key + lastSyncedAt: new Date().toISOString() + } + }; + + try { + if (existingCRMap.has(proxy.id)) { + // Update existing CR + + // Get the current resource first + const crName = existingCRMap.get(proxy.id)!; + + try { + // Get the existing resource to get the current resourceVersion + const existingResource = await customObjectsApi.getNamespacedCustomObject( + API_GROUP, + API_VERSION, + namespace, + 'reverseproxyservers', + crName + ); + + // Extract the resourceVersion from the existing resource + if (existingResource?.body?.metadata?.resourceVersion) { + // Add the resourceVersion to our custom resource + proxyCR.metadata.resourceVersion = existingResource.body.metadata.resourceVersion; + } + + // Now update with the correct resourceVersion + await customObjectsApi.replaceNamespacedCustomObject( + API_GROUP, + API_VERSION, + namespace, + 'reverseproxyservers', + crName, + proxyCR + ); + console.log(`Updated ReverseProxyServer CR ${crName} for proxy ${proxy.id}`); + } catch (error) { + console.error(`Error getting/updating ReverseProxyServer CR for ${proxy.id}:`, error); + } + } else { + // Create new CR + await customObjectsApi.createNamespacedCustomObject( + API_GROUP, + API_VERSION, + namespace, + 'reverseproxyservers', + proxyCR + ); + console.log(`Created ReverseProxyServer CR ${crName} for proxy ${proxy.id}`); + } + + // Remember this mapping + reflectedReverseProxyServers.set(proxy.id, crName); + } catch (error) { + console.error(`Error creating/updating ReverseProxyServer CR for ${proxy.id}:`, error); + } + } + + // Delete CRs for proxies that no longer exist + for (const [dbId, crName] of existingCRMap.entries()) { + if (!proxies.some(p => p.id === dbId)) { + try { + await customObjectsApi.deleteNamespacedCustomObject( + API_GROUP, + API_VERSION, + namespace, + 'reverseproxyservers', + crName + ); + console.log(`Deleted ReverseProxyServer CR ${crName} for removed proxy ID ${dbId}`); + } catch (error) { + console.error(`Error deleting ReverseProxyServer CR ${crName}:`, error); + } + } + } + } catch (error) { + console.error('Error syncing Reverse Proxy servers to CRDs:', error); + } +} \ No newline at end of file diff --git a/packages/k8s-operator/src/utils/k8s-client.ts b/packages/k8s-operator/src/utils/k8s-client.ts new file mode 100644 index 0000000..2fc7aba --- /dev/null +++ b/packages/k8s-operator/src/utils/k8s-client.ts @@ -0,0 +1,101 @@ +import * as k8s from '@kubernetes/client-node'; +import { SKIP_TLS_VERIFY, NAMESPACE } from '../config/constants'; + +export class KubernetesClient { + private static instance: KubernetesClient; + private kc: k8s.KubeConfig; + private appsApi!: k8s.AppsV1Api; + private coreApi!: k8s.CoreV1Api; + private networkingApi!: k8s.NetworkingV1Api; + private customObjectsApi!: k8s.CustomObjectsApi; + private apiExtensionsApi!: k8s.ApiextensionsV1Api; + + private constructor() { + if (SKIP_TLS_VERIFY) { + console.log('Disabling TLS certificate validation'); + process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0'; + } + + this.kc = new k8s.KubeConfig(); + this.setupConfig(); + this.initializeClients(); + } + + static getInstance(): KubernetesClient { + if (!KubernetesClient.instance) { + KubernetesClient.instance = new KubernetesClient(); + } + return KubernetesClient.instance; + } + + private setupConfig(): void { + try { + this.kc.loadFromDefault(); + console.log('Loaded Kubernetes config from default location'); + } catch (err) { + console.warn('Failed to load Kubernetes config from default location:', err); + } + + // Running in a cluster, try to load in-cluster config + if (!this.kc.getCurrentContext()) { + try { + this.kc.loadFromCluster(); + console.log('Loaded Kubernetes config from cluster'); + } catch (err) { + console.warn('Failed to load Kubernetes config from cluster:', err); + } + } + + if (!this.kc.getCurrentContext()) { + throw new Error('Failed to setup Kubernetes client - no valid configuration found'); + } + + const currentCluster = this.kc.getCurrentCluster(); + if (currentCluster) { + console.log(`Connecting to Kubernetes server: ${currentCluster.server}`); + } + } + + private initializeClients(): void { + this.appsApi = this.kc.makeApiClient(k8s.AppsV1Api); + this.coreApi = this.kc.makeApiClient(k8s.CoreV1Api); + this.networkingApi = this.kc.makeApiClient(k8s.NetworkingV1Api); + this.customObjectsApi = this.kc.makeApiClient(k8s.CustomObjectsApi); + this.apiExtensionsApi = this.kc.makeApiClient(k8s.ApiextensionsV1Api); + } + + getKubeConfig(): k8s.KubeConfig { + return this.kc; + } + + getAppsApi(): k8s.AppsV1Api { + return this.appsApi; + } + + getCoreApi(): k8s.CoreV1Api { + return this.coreApi; + } + + getNetworkingApi(): k8s.NetworkingV1Api { + return this.networkingApi; + } + + getCustomObjectsApi(): k8s.CustomObjectsApi { + return this.customObjectsApi; + } + + getApiExtensionsApi(): k8s.ApiextensionsV1Api { + return this.apiExtensionsApi; + } + + async handleApiError(error: any, context: string): Promise { + console.error(`Kubernetes API error (${context}):`, error?.message || error); + + if (error?.response) { + console.error(`Response status: ${error.response.statusCode}`); + console.error(`Response body: ${JSON.stringify(error.response.body)}`); + } + + throw error; + } +} \ No newline at end of file diff --git a/packages/k8s-operator/src/utils/memory.ts b/packages/k8s-operator/src/utils/memory.ts new file mode 100644 index 0000000..9a69ad9 --- /dev/null +++ b/packages/k8s-operator/src/utils/memory.ts @@ -0,0 +1,38 @@ +/** + * Memory utility functions for Kubernetes resources + */ + +/** + * Calculate memory for Java (lower than what's requested to account for JVM overhead) + * @param memoryString Memory string in format like "512M" or "1G" + * @param factor Multiplicative factor to apply (e.g., 0.8 for 80%) + * @returns Calculated memory string in same format + */ +export function calculateJavaMemory(memoryString: string, factor: number): string { + const match = memoryString.match(/^(\d+)([MG])$/i); + if (!match) return "512M"; // Default if format is not recognized + + const [, valueStr, unit] = match; + const value = parseInt(valueStr, 10); + + const calculatedValue = Math.round(value * factor); + return `${calculatedValue}${unit.toUpperCase()}`; +} + +/** + * Convert memory string to Kubernetes format (e.g., "1G" -> "1Gi") + * @param memoryString Memory string in format like "512M" or "1G" + * @returns Memory string in Kubernetes format + */ +export function convertToK8sFormat(memoryString: string): string { + const match = memoryString.match(/^(\d+)([MG])$/i); + if (!match) return "1Gi"; // Default if format is not recognized + + const [, valueStr, unit] = match; + + if (unit.toUpperCase() === 'G') { + return `${valueStr}Gi`; + } else { + return `${valueStr}Mi`; + } +} \ No newline at end of file diff --git a/packages/k8s-operator/src/utils/rbac-registrar.ts b/packages/k8s-operator/src/utils/rbac-registrar.ts new file mode 100644 index 0000000..8f2387c --- /dev/null +++ b/packages/k8s-operator/src/utils/rbac-registrar.ts @@ -0,0 +1,212 @@ +import { KubernetesClient } from './k8s-client'; +import { + minikuraNamespace, + minikuraServiceAccount, + minikuraClusterRole, + minikuraClusterRoleBinding, + minikuraOperatorDeployment +} from '../crds/rbac'; +import fetch from 'node-fetch'; + +/** + * Registers all RBAC resources required + * @param k8sClient The Kubernetes client instance + */ +export async function registerRBACResources(k8sClient: KubernetesClient): Promise { + try { + console.log('Starting RBAC resources registration...'); + + await registerNamespace(k8sClient); + await registerServiceAccount(k8sClient); + await registerClusterRole(k8sClient); + await registerClusterRoleBinding(k8sClient); + + console.log('RBAC resources registration completed successfully'); + } catch (error: any) { + console.error('Error registering RBAC resources:', error.message); + if (error.response) { + console.error(`Response status: ${error.response.statusCode}`); + console.error(`Response body: ${JSON.stringify(error.response.body)}`); + } + throw error; + } +} + +/** + * Registers the namespace + */ +async function registerNamespace(k8sClient: KubernetesClient): Promise { + try { + const coreApi = k8sClient.getCoreApi(); + await coreApi.createNamespace(minikuraNamespace); + console.log(`Created namespace ${minikuraNamespace.metadata.name}`); + } catch (error: any) { + if (error.response?.statusCode === 409) { + console.log(`Namespace ${minikuraNamespace.metadata.name} already exists`); + } else { + throw error; + } + } +} + +/** + * Registers the service account + */ +async function registerServiceAccount(k8sClient: KubernetesClient): Promise { + try { + const coreApi = k8sClient.getCoreApi(); + await coreApi.createNamespacedServiceAccount( + minikuraServiceAccount.metadata.namespace, + minikuraServiceAccount + ); + console.log(`Created service account ${minikuraServiceAccount.metadata.name}`); + } catch (error: any) { + if (error.response?.statusCode === 409) { + console.log(`Service account ${minikuraServiceAccount.metadata.name} already exists`); + } else { + throw error; + } + } +} + +/** + * Registers the cluster role + */ +async function registerClusterRole(k8sClient: KubernetesClient): Promise { + try { + // TODO: I can't get this working with the k8s client, so I'm using fetch directly, fix later + const kc = k8sClient.getKubeConfig(); + const opts = {}; + kc.applyToRequest(opts as any); + + // Get cluster URL + const cluster = kc.getCurrentCluster(); + if (!cluster) { + throw new Error('No active cluster found in KubeConfig'); + } + + try { + const response = await fetch(`${cluster.server}/apis/rbac.authorization.k8s.io/v1/clusterroles`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + ...(opts as any).headers + }, + body: JSON.stringify(minikuraClusterRole), + agent: (opts as any).agent + }); + + if (response.ok) { + console.log(`Created cluster role ${minikuraClusterRole.metadata.name}`); + } else if (response.status === 409) { + console.log(`Cluster role ${minikuraClusterRole.metadata.name} already exists`); + } else { + const text = await response.text(); + throw new Error(`Failed to create cluster role: ${response.status} ${response.statusText} - ${text}`); + } + } catch (error: any) { + // If the error message contains "already exists", that's OK + if (error.message?.includes('already exists') || error.message?.includes('409')) { + console.log(`Cluster role ${minikuraClusterRole.metadata.name} already exists`); + } else { + throw error; + } + } + } catch (error: any) { + console.error(`Error registering cluster role:`, error.message); + throw error; + } +} + +/** + * Registers the Minikura cluster role binding + */ +async function registerClusterRoleBinding(k8sClient: KubernetesClient): Promise { + try { + // We need to use the raw client for cluster roles + const kc = k8sClient.getKubeConfig(); + const opts = {}; + kc.applyToRequest(opts as any); + + // Get cluster URL + const cluster = kc.getCurrentCluster(); + if (!cluster) { + throw new Error('No active cluster found in KubeConfig'); + } + + // Create the cluster role binding + const { default: fetch } = await import('node-fetch'); + + try { + const response = await fetch(`${cluster.server}/apis/rbac.authorization.k8s.io/v1/clusterrolebindings`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + ...(opts as any).headers + }, + body: JSON.stringify(minikuraClusterRoleBinding), + agent: (opts as any).agent + }); + + if (response.ok) { + console.log(`Created cluster role binding ${minikuraClusterRoleBinding.metadata.name}`); + } else if (response.status === 409) { + console.log(`Cluster role binding ${minikuraClusterRoleBinding.metadata.name} already exists`); + } else { + const text = await response.text(); + throw new Error(`Failed to create cluster role binding: ${response.status} ${response.statusText} - ${text}`); + } + } catch (error: any) { + // If the error message contains "already exists" + // TODO: Potentially better error handling here + if (error.message?.includes('already exists') || error.message?.includes('409')) { + console.log(`Cluster role binding ${minikuraClusterRoleBinding.metadata.name} already exists`); + } else { + throw error; + } + } + } catch (error: any) { + console.error(`Error registering cluster role binding:`, error.message); + throw error; + } +} + +/** + * Registers the Minikura operator deployment + * Note: This requires the secret to be created first + */ +export async function registerOperatorDeployment( + k8sClient: KubernetesClient, + registryUrl: string +): Promise { + try { + // Replace the registry URL placeholder, for future use + const deployment = JSON.parse( + JSON.stringify(minikuraOperatorDeployment).replace('${REGISTRY_URL}', registryUrl) + ); + + const appsApi = k8sClient.getAppsApi(); + await appsApi.createNamespacedDeployment( + deployment.metadata.namespace, + deployment + ); + console.log(`Created deployment ${deployment.metadata.name}`); + } catch (error: any) { + if (error.response?.statusCode === 409) { + console.log(`Deployment ${minikuraOperatorDeployment.metadata.name} already exists`); + // Update the deployment if it already exists + const deployment = JSON.parse( + JSON.stringify(minikuraOperatorDeployment).replace('${REGISTRY_URL}', registryUrl) + ); + + await k8sClient.getAppsApi().replaceNamespacedDeployment( + deployment.metadata.name, + deployment.metadata.namespace, + deployment + ); + console.log(`Updated deployment ${deployment.metadata.name}`); + } else { + throw error; + } + } +} \ No newline at end of file diff --git a/packages/k8s-operator/tsconfig.json b/packages/k8s-operator/tsconfig.json new file mode 100644 index 0000000..fd521c7 --- /dev/null +++ b/packages/k8s-operator/tsconfig.json @@ -0,0 +1,26 @@ +{ + "compilerOptions": { + // Enable latest features + "lib": ["ESNext"], + "target": "ESNext", + "module": "ESNext", + "moduleDetection": "force", + "allowJs": true, + + // Bundler mode + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "verbatimModuleSyntax": true, + "noEmit": true, + + // Best practices + "strict": true, + "skipLibCheck": true, + "noFallthroughCasesInSwitch": true, + + // Some stricter flags (disabled by default) + "noUnusedLocals": false, + "noUnusedParameters": false, + "noPropertyAccessFromIndexSignature": false + } +}