Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
sagemathinc
GitHub Repository: sagemathinc/cocalc
Path: blob/master/src/packages/file-server/zfs/pools.ts
1447 views
1
/*
2
This code sets things up for each pool and namespace, e.g., defining datasets, creating directories,
3
etc. as defined in config and names.
4
5
WARNING: For efficientcy and sanity, it assumes that once something is setup, it stays setup.
6
If there is a chaos monkey running around breaking things (e.g., screwing up
7
file permissions, deleting datasets, etc.,) then this code won't help at all.
8
9
OPERATIONS:
10
11
- To add a new pool, just create it using zfs with a name sthat starts with context.PREFIX.
12
It should automatically start getting used within POOLS_CACHE_MS by newly created filesystems.
13
14
*/
15
16
import { reuseInFlight } from "@cocalc/util/reuse-in-flight";
17
import { context, POOLS_CACHE_MS } from "./config";
18
import { exec } from "./util";
19
import {
20
archivesDataset,
21
archivesMountpoint,
22
namespaceDataset,
23
filesystemsDataset,
24
filesystemsPath,
25
bupDataset,
26
bupMountpoint,
27
tempDataset,
28
} from "./names";
29
import { exists } from "@cocalc/backend/misc/async-utils-node";
30
import { getNamespacesAndPools } from "./db";
31
32
// Make sure all pools and namespaces are initialized for all existing filesystems.
33
// This should be needed after booting up the server and importing the pools.
34
export async function initializeAllPools() {
35
// TODO: maybe import all here?
36
37
for (const { namespace, pool } of getNamespacesAndPools()) {
38
await initializePool({ namespace, pool });
39
}
40
}
41
42
interface Pool {
43
name: string;
44
state: "ONLINE" | "OFFLINE";
45
size: number;
46
allocated: number;
47
free: number;
48
}
49
50
type Pools = { [name: string]: Pool };
51
let poolsCache: { [prefix: string]: Pools } = {};
52
53
export const getPools = reuseInFlight(
54
async ({ noCache }: { noCache?: boolean } = {}): Promise<Pools> => {
55
if (!noCache && poolsCache[context.PREFIX]) {
56
return poolsCache[context.PREFIX];
57
}
58
const { stdout } = await exec({
59
verbose: true,
60
command: "zpool",
61
args: ["list", "-j", "--json-int", "-o", "size,allocated,free"],
62
});
63
const { pools } = JSON.parse(stdout);
64
const v: { [name: string]: Pool } = {};
65
for (const name in pools) {
66
if (!name.startsWith(context.PREFIX)) {
67
continue;
68
}
69
const pool = pools[name];
70
for (const key in pool.properties) {
71
pool.properties[key] = pool.properties[key].value;
72
}
73
v[name] = { name, state: pool.state, ...pool.properties };
74
}
75
poolsCache[context.PREFIX] = v;
76
if (!process.env.COCALC_TEST_MODE) {
77
// only clear cache in non-test mode
78
setTimeout(() => {
79
delete poolsCache[context.PREFIX];
80
}, POOLS_CACHE_MS);
81
}
82
return v;
83
},
84
);
85
86
// OK to call this again even if initialized already.
87
export const initializePool = reuseInFlight(
88
async ({
89
namespace = context.namespace,
90
pool,
91
}: {
92
namespace?: string;
93
pool: string;
94
}) => {
95
if (!pool.startsWith(context.PREFIX)) {
96
throw Error(
97
`pool (="${pool}") must start with the prefix '${context.PREFIX}'`,
98
);
99
}
100
// archives and filesystems for each namespace are in this dataset
101
await ensureDatasetExists({
102
name: namespaceDataset({ namespace, pool }),
103
});
104
105
// Initialize archives dataset, used for archiving filesystems.
106
await ensureDatasetExists({
107
name: archivesDataset({ pool, namespace }),
108
mountpoint: archivesMountpoint({ pool, namespace }),
109
});
110
// This sets up the parent filesystem for all filesystems
111
// and enable compression and dedup.
112
await ensureDatasetExists({
113
name: filesystemsDataset({ namespace, pool }),
114
});
115
await ensureDatasetExists({
116
name: tempDataset({ namespace, pool }),
117
dedup: "off",
118
});
119
// Initialize bup dataset, used for backups.
120
await ensureDatasetExists({
121
name: bupDataset({ pool, namespace }),
122
mountpoint: bupMountpoint({ pool, namespace }),
123
compression: "off",
124
dedup: "off",
125
});
126
127
const filesystems = filesystemsPath({ namespace });
128
if (!(await exists(filesystems))) {
129
await exec({
130
verbose: true,
131
command: "sudo",
132
args: ["mkdir", "-p", filesystems],
133
});
134
await exec({
135
verbose: true,
136
command: "sudo",
137
args: ["chmod", "a+rx", context.FILESYSTEMS],
138
});
139
await exec({
140
verbose: true,
141
command: "sudo",
142
args: ["chmod", "a+rx", filesystems],
143
});
144
}
145
},
146
);
147
148
// If a dataset exists, it is assumed to exist henceforth for the life of this process.
149
// That's fine for *this* application here of initializing pools, since we never delete
150
// anything here.
151
const datasetExistsCache = new Set<string>();
152
async function datasetExists(name: string): Promise<boolean> {
153
if (datasetExistsCache.has(name)) {
154
return true;
155
}
156
try {
157
await exec({
158
verbose: true,
159
command: "zfs",
160
args: ["list", name],
161
});
162
datasetExistsCache.add(name);
163
return true;
164
} catch {
165
return false;
166
}
167
}
168
169
async function isMounted(dataset): Promise<boolean> {
170
const { stdout } = await exec({
171
command: "zfs",
172
args: ["get", "mounted", dataset, "-j"],
173
});
174
const x = JSON.parse(stdout);
175
return x.datasets[dataset].properties.mounted.value == "yes";
176
}
177
178
async function ensureDatasetExists({
179
name,
180
mountpoint,
181
compression = "lz4",
182
dedup = "on",
183
}: {
184
name: string;
185
mountpoint?: string;
186
compression?: "lz4" | "off";
187
dedup?: "on" | "off";
188
}) {
189
if (await datasetExists(name)) {
190
if (mountpoint && !(await isMounted(name))) {
191
// ensure mounted
192
await exec({
193
verbose: true,
194
command: "sudo",
195
args: ["zfs", "mount", name],
196
});
197
}
198
return;
199
}
200
await exec({
201
verbose: true,
202
command: "sudo",
203
args: [
204
"zfs",
205
"create",
206
"-o",
207
`mountpoint=${mountpoint ? mountpoint : "none"}`,
208
"-o",
209
`compression=${compression}`,
210
"-o",
211
`dedup=${dedup}`,
212
name,
213
],
214
});
215
// make sure it is very hard to accidentally delete the entire dataset
216
// see https://github.com/openzfs/zfs/issues/4134#issuecomment-2565724994
217
const safety = `${name}@safety`;
218
await exec({
219
verbose: true,
220
command: "sudo",
221
args: ["zfs", "snapshot", safety],
222
});
223
await exec({
224
verbose: true,
225
command: "sudo",
226
args: ["zfs", "hold", "safety", safety],
227
});
228
}
229
230