Path: blob/master/src/packages/file-server/zfs/pools.ts
1447 views
/*1This code sets things up for each pool and namespace, e.g., defining datasets, creating directories,2etc. as defined in config and names.34WARNING: For efficientcy and sanity, it assumes that once something is setup, it stays setup.5If there is a chaos monkey running around breaking things (e.g., screwing up6file permissions, deleting datasets, etc.,) then this code won't help at all.78OPERATIONS:910- To add a new pool, just create it using zfs with a name sthat starts with context.PREFIX.11It should automatically start getting used within POOLS_CACHE_MS by newly created filesystems.1213*/1415import { reuseInFlight } from "@cocalc/util/reuse-in-flight";16import { context, POOLS_CACHE_MS } from "./config";17import { exec } from "./util";18import {19archivesDataset,20archivesMountpoint,21namespaceDataset,22filesystemsDataset,23filesystemsPath,24bupDataset,25bupMountpoint,26tempDataset,27} from "./names";28import { exists } from "@cocalc/backend/misc/async-utils-node";29import { getNamespacesAndPools } from "./db";3031// Make sure all pools and namespaces are initialized for all existing filesystems.32// This should be needed after booting up the server and importing the pools.33export async function initializeAllPools() {34// TODO: maybe import all here?3536for (const { namespace, pool } of getNamespacesAndPools()) {37await initializePool({ namespace, pool });38}39}4041interface Pool {42name: string;43state: "ONLINE" | "OFFLINE";44size: number;45allocated: number;46free: number;47}4849type Pools = { [name: string]: Pool };50let poolsCache: { [prefix: string]: Pools } = {};5152export const getPools = reuseInFlight(53async ({ noCache }: { noCache?: boolean } = {}): Promise<Pools> => {54if (!noCache && poolsCache[context.PREFIX]) {55return poolsCache[context.PREFIX];56}57const { stdout } = await exec({58verbose: true,59command: "zpool",60args: ["list", "-j", "--json-int", "-o", "size,allocated,free"],61});62const { pools } = JSON.parse(stdout);63const v: { [name: string]: Pool } = {};64for (const name in pools) {65if (!name.startsWith(context.PREFIX)) {66continue;67}68const pool = pools[name];69for (const key in pool.properties) {70pool.properties[key] = pool.properties[key].value;71}72v[name] = { name, state: pool.state, ...pool.properties };73}74poolsCache[context.PREFIX] = v;75if (!process.env.COCALC_TEST_MODE) {76// only clear cache in non-test mode77setTimeout(() => {78delete poolsCache[context.PREFIX];79}, POOLS_CACHE_MS);80}81return v;82},83);8485// OK to call this again even if initialized already.86export const initializePool = reuseInFlight(87async ({88namespace = context.namespace,89pool,90}: {91namespace?: string;92pool: string;93}) => {94if (!pool.startsWith(context.PREFIX)) {95throw Error(96`pool (="${pool}") must start with the prefix '${context.PREFIX}'`,97);98}99// archives and filesystems for each namespace are in this dataset100await ensureDatasetExists({101name: namespaceDataset({ namespace, pool }),102});103104// Initialize archives dataset, used for archiving filesystems.105await ensureDatasetExists({106name: archivesDataset({ pool, namespace }),107mountpoint: archivesMountpoint({ pool, namespace }),108});109// This sets up the parent filesystem for all filesystems110// and enable compression and dedup.111await ensureDatasetExists({112name: filesystemsDataset({ namespace, pool }),113});114await ensureDatasetExists({115name: tempDataset({ namespace, pool }),116dedup: "off",117});118// Initialize bup dataset, used for backups.119await ensureDatasetExists({120name: bupDataset({ pool, namespace }),121mountpoint: bupMountpoint({ pool, namespace }),122compression: "off",123dedup: "off",124});125126const filesystems = filesystemsPath({ namespace });127if (!(await exists(filesystems))) {128await exec({129verbose: true,130command: "sudo",131args: ["mkdir", "-p", filesystems],132});133await exec({134verbose: true,135command: "sudo",136args: ["chmod", "a+rx", context.FILESYSTEMS],137});138await exec({139verbose: true,140command: "sudo",141args: ["chmod", "a+rx", filesystems],142});143}144},145);146147// If a dataset exists, it is assumed to exist henceforth for the life of this process.148// That's fine for *this* application here of initializing pools, since we never delete149// anything here.150const datasetExistsCache = new Set<string>();151async function datasetExists(name: string): Promise<boolean> {152if (datasetExistsCache.has(name)) {153return true;154}155try {156await exec({157verbose: true,158command: "zfs",159args: ["list", name],160});161datasetExistsCache.add(name);162return true;163} catch {164return false;165}166}167168async function isMounted(dataset): Promise<boolean> {169const { stdout } = await exec({170command: "zfs",171args: ["get", "mounted", dataset, "-j"],172});173const x = JSON.parse(stdout);174return x.datasets[dataset].properties.mounted.value == "yes";175}176177async function ensureDatasetExists({178name,179mountpoint,180compression = "lz4",181dedup = "on",182}: {183name: string;184mountpoint?: string;185compression?: "lz4" | "off";186dedup?: "on" | "off";187}) {188if (await datasetExists(name)) {189if (mountpoint && !(await isMounted(name))) {190// ensure mounted191await exec({192verbose: true,193command: "sudo",194args: ["zfs", "mount", name],195});196}197return;198}199await exec({200verbose: true,201command: "sudo",202args: [203"zfs",204"create",205"-o",206`mountpoint=${mountpoint ? mountpoint : "none"}`,207"-o",208`compression=${compression}`,209"-o",210`dedup=${dedup}`,211name,212],213});214// make sure it is very hard to accidentally delete the entire dataset215// see https://github.com/openzfs/zfs/issues/4134#issuecomment-2565724994216const safety = `${name}@safety`;217await exec({218verbose: true,219command: "sudo",220args: ["zfs", "snapshot", safety],221});222await exec({223verbose: true,224command: "sudo",225args: ["zfs", "hold", "safety", safety],226});227}228229230