Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
sagemathinc
GitHub Repository: sagemathinc/cocalc
Path: blob/master/src/packages/backend/conat/test/files/write.test.ts
1451 views
1
/*
2
Test async streaming writing of files to compute servers using NATS.
3
4
5
DEVELOPMENT:
6
7
pnpm test ./write.test.ts
8
9
*/
10
11
import { before, after } from "@cocalc/backend/conat/test/setup";
12
13
beforeAll(before);
14
15
import { close, createServer, writeFile } from "@cocalc/conat/files/write";
16
import { createWriteStream, createReadStream } from "fs";
17
import { file as tempFile } from "tmp-promise";
18
import { writeFile as fsWriteFile, readFile } from "fs/promises";
19
import { sha1 } from "@cocalc/backend/sha1";
20
import { delay } from "awaiting";
21
22
describe("do a basic test that the file writing service works", () => {
23
const project_id = "00000000-0000-4000-8000-000000000000";
24
const compute_server_id = 0;
25
it("create the write server", async () => {
26
await createServer({
27
project_id,
28
compute_server_id,
29
createWriteStream,
30
});
31
});
32
33
let cleanups: any[] = [];
34
const CONTENT = "cocalc";
35
let source;
36
it("creates the file we will read", async () => {
37
const { path, cleanup } = await tempFile();
38
source = path;
39
await fsWriteFile(path, CONTENT);
40
cleanups.push(cleanup);
41
});
42
43
let dest;
44
it("write to a new file", async () => {
45
const { path, cleanup } = await tempFile();
46
dest = path;
47
cleanups.push(cleanup);
48
49
const stream = createReadStream(source);
50
const { bytes, chunks } = await writeFile({
51
stream,
52
project_id,
53
compute_server_id,
54
path,
55
});
56
expect(chunks).toBe(1);
57
expect(bytes).toBe(CONTENT.length);
58
});
59
60
it("confirm that the dest file is correct", async () => {
61
await delay(50);
62
const d = (await readFile(dest)).toString();
63
expect(d).toEqual(CONTENT);
64
});
65
66
it("closes the write server", async () => {
67
close({ project_id, compute_server_id });
68
for (const f of cleanups) {
69
f();
70
}
71
});
72
});
73
74
describe("do a more challenging test that involves a larger file that has to be broken into many chunks", () => {
75
const project_id = "00000000-0000-4000-8000-000000000000";
76
const compute_server_id = 1;
77
78
it("create the write server", async () => {
79
await createServer({
80
project_id,
81
compute_server_id,
82
createWriteStream,
83
});
84
});
85
86
let cleanups: any[] = [];
87
let CONTENT = "";
88
for (let i = 0; i < 1000000; i++) {
89
CONTENT += `${i}`;
90
}
91
let source;
92
it("creates the file we will read", async () => {
93
const { path, cleanup } = await tempFile();
94
source = path;
95
await fsWriteFile(path, CONTENT);
96
cleanups.push(cleanup);
97
});
98
99
let dest;
100
it("write to a new file", async () => {
101
const { path, cleanup } = await tempFile();
102
dest = path;
103
cleanups.push(cleanup);
104
105
const stream = createReadStream(source);
106
const { bytes, chunks } = await writeFile({
107
stream,
108
project_id,
109
compute_server_id,
110
path,
111
});
112
expect(chunks).toBeGreaterThan(1);
113
expect(bytes).toBe(CONTENT.length);
114
});
115
116
it("confirm that the dest file is correct", async () => {
117
let d = (await readFile(dest)).toString();
118
if (d.length != CONTENT.length) {
119
// under heavy load file might not have been flushed **to disk** (even though it was fully and
120
// correctly received), so we wait to give it a chance, then try again.
121
await delay(1000);
122
d = (await readFile(dest)).toString();
123
}
124
expect(d.length).toEqual(CONTENT.length);
125
// not directly comparing, since huge and if something goes wrong the output
126
// saying the test failed is huge.
127
expect(sha1(d)).toEqual(sha1(CONTENT));
128
});
129
130
it("closes the write server", async () => {
131
close({ project_id, compute_server_id });
132
for (const f of cleanups) {
133
f();
134
}
135
});
136
});
137
138
afterAll(after);
139
140