Path: blob/master/src/packages/file-server/zfs/config.ts
1447 views
import { join } from "path";1import { databaseFilename } from "./names";23// we ONLY put filesystems on pools whose name have this prefix.4// all other pools are ignored. We also mount everything in /{PREFIX} on the filesystem.5const PREFIX = process.env.COCALC_TEST_MODE ? "cocalcfs-test" : "cocalcfs";67const DATA = `/${PREFIX}`;89const SQLITE3_DATABASE_FILE = databaseFilename(DATA);1011// Directory on server where filesystems get mounted (so NFS can serve them)12const FILESYSTEMS = join(DATA, "filesystems");1314// Directory on server where zfs send streams (and tar?) are stored15const ARCHIVES = join(DATA, "archives");1617// Directory to store data used in pulling as part of sync.18// E.g., this keeps around copies of the sqlite state database of each remote.19const PULL = join(DATA, "pull");2021// Directory for bup22const BUP = join(DATA, "bup");2324export const context = {25namespace: process.env.NAMESPACE ?? "default",26PREFIX,27DATA,28SQLITE3_DATABASE_FILE,29FILESYSTEMS,30ARCHIVES,31PULL,32BUP,33};3435// WARNING: this "setContext" is global. It's very useful for **UNIT TESTING**, but36// for any other use, you want to set this at most once and never again!!! The reason37// is because with nodejs you could have async code running all over the place, and38// changing the context out from under it would lead to nonsense and corruption.39export function setContext({40namespace,41prefix,42}: {43namespace?: string;44prefix?: string;45}) {46context.namespace = namespace ?? process.env.NAMESPACE ?? "default";47context.PREFIX = prefix ?? PREFIX;48context.DATA = `/${context.PREFIX}`;49context.SQLITE3_DATABASE_FILE = databaseFilename(context.DATA);50context.FILESYSTEMS = join(context.DATA, "filesystems");51context.ARCHIVES = join(context.DATA, "archives");52context.PULL = join(context.DATA, "pull");53context.BUP = join(context.DATA, "bup");54}5556// Every filesystem has at least this much quota (?)57export const MIN_QUOTA = 1024 * 1024 * 1; // 1MB5859// We periodically do "zpool list" to find out what pools are available60// and how much space they have left. This info is cached for this long61// to avoid excessive calls:62export const POOLS_CACHE_MS = 15000;6364// two hour default for running any commands (e.g., zfs send/recv)65export const DEFAULT_EXEC_TIMEOUT_MS = 2 * 1000 * 60 * 60;6667// **all** user files for filesystems have this owner and group.68export const UID = 2001;69export const GID = 2001;7071// We make/update snapshots periodically, with this being the minimum interval.72export const SNAPSHOT_INTERVAL_MS = 60 * 30 * 1000;73//export const SNAPSHOT_INTERVAL_MS = 10 * 1000;7475// Lengths of time in minutes to keep these snapshots76export const SNAPSHOT_INTERVALS_MS = {77halfhourly: 30 * 1000 * 60,78daily: 60 * 24 * 1000 * 60,79weekly: 60 * 24 * 7 * 1000 * 60,80monthly: 60 * 24 * 7 * 4 * 1000 * 60,81};8283// How many of each type of snapshot to retain84export const SNAPSHOT_COUNTS = {85halfhourly: 24,86daily: 14,87weekly: 7,88monthly: 4,89};9091// Minimal interval for bup backups92export const BUP_INTERVAL_MS = 24 * 1000 * 60 * 60;9394// minimal interval for zfs streams95export const STREAM_INTERVAL_MS = 24 * 1000 * 60 * 60;96// when more than this many streams, we recompact down97export const MAX_STREAMS = 30;9899100