Commit | Line | Data |
---|---|---|
36457566 LC |
1 | #include "config.h" |
2 | #include "local-store.hh" | |
3 | #include "globals.hh" | |
4 | #include "archive.hh" | |
5 | #include "pathlocks.hh" | |
6 | #include "worker-protocol.hh" | |
7 | #include "derivations.hh" | |
8 | #include "affinity.hh" | |
9 | ||
10 | #include <iostream> | |
11 | #include <algorithm> | |
12 | #include <cstring> | |
13 | ||
14 | #include <sys/types.h> | |
15 | #include <sys/stat.h> | |
16 | #include <sys/time.h> | |
17 | #include <unistd.h> | |
18 | #include <utime.h> | |
19 | #include <fcntl.h> | |
20 | #include <errno.h> | |
21 | #include <stdio.h> | |
22 | #include <time.h> | |
15ddeff5 | 23 | #include <grp.h> |
64cf660f | 24 | #include <ctype.h> |
36457566 LC |
25 | |
26 | #if HAVE_UNSHARE && HAVE_STATVFS && HAVE_SYS_MOUNT_H | |
27 | #include <sched.h> | |
28 | #include <sys/statvfs.h> | |
29 | #include <sys/mount.h> | |
30 | #endif | |
31 | ||
36457566 LC |
32 | #include <sys/ioctl.h> |
33 | #include <errno.h> | |
36457566 LC |
34 | |
35 | #include <sqlite3.h> | |
36 | ||
37 | ||
38 | namespace nix { | |
39 | ||
40 | ||
36457566 LC |
41 | void checkStoreNotSymlink() |
42 | { | |
43 | if (getEnv("NIX_IGNORE_SYMLINK_STORE") == "1") return; | |
44 | Path path = settings.nixStore; | |
45 | struct stat st; | |
46 | while (path != "/") { | |
47 | if (lstat(path.c_str(), &st)) | |
48 | throw SysError(format("getting status of `%1%'") % path); | |
49 | if (S_ISLNK(st.st_mode)) | |
50 | throw Error(format( | |
51 | "the path `%1%' is a symlink; " | |
8327e733 | 52 | "this is not allowed for the store and its parent directories") |
36457566 LC |
53 | % path); |
54 | path = dirOf(path); | |
55 | } | |
56 | } | |
57 | ||
58 | ||
59 | LocalStore::LocalStore(bool reserveSpace) | |
36457566 LC |
60 | { |
61 | schemaPath = settings.nixDBPath + "/schema"; | |
62 | ||
63 | if (settings.readOnlyMode) { | |
64 | openDB(false); | |
65 | return; | |
66 | } | |
67 | ||
68 | /* Create missing state directories if they don't already exist. */ | |
69 | createDirs(settings.nixStore); | |
70 | makeStoreWritable(); | |
71 | createDirs(linksDir = settings.nixStore + "/.links"); | |
72 | Path profilesDir = settings.nixStateDir + "/profiles"; | |
15ddeff5 | 73 | createDirs(profilesDir); |
36457566 LC |
74 | createDirs(settings.nixStateDir + "/temproots"); |
75 | createDirs(settings.nixDBPath); | |
76 | Path gcRootsDir = settings.nixStateDir + "/gcroots"; | |
77 | if (!pathExists(gcRootsDir)) { | |
78 | createDirs(gcRootsDir); | |
79 | createSymlink(profilesDir, gcRootsDir + "/profiles"); | |
80 | } | |
81 | ||
15ddeff5 LC |
82 | /* Optionally, create directories and set permissions for a |
83 | multi-user install. */ | |
84 | if (getuid() == 0 && settings.buildUsersGroup != "") { | |
85 | ||
86 | Path perUserDir = profilesDir + "/per-user"; | |
87 | createDirs(perUserDir); | |
81c580c8 LC |
88 | if (chmod(perUserDir.c_str(), 0755) == -1) |
89 | throw SysError(format("could not set permissions on '%1%' to 755") | |
90 | % perUserDir); | |
54c260e6 LC |
91 | |
92 | mode_t perm = 01775; | |
15ddeff5 LC |
93 | |
94 | struct group * gr = getgrnam(settings.buildUsersGroup.c_str()); | |
95 | if (!gr) | |
96 | throw Error(format("the group `%1%' specified in `build-users-group' does not exist") | |
97 | % settings.buildUsersGroup); | |
54c260e6 LC |
98 | else { |
99 | struct stat st; | |
100 | if (stat(settings.nixStore.c_str(), &st)) | |
101 | throw SysError(format("getting attributes of path '%1%'") % settings.nixStore); | |
102 | ||
103 | if (st.st_uid != 0 || st.st_gid != gr->gr_gid || (st.st_mode & ~S_IFMT) != perm) { | |
104 | if (chown(settings.nixStore.c_str(), 0, gr->gr_gid) == -1) | |
105 | throw SysError(format("changing ownership of path '%1%'") % settings.nixStore); | |
106 | if (chmod(settings.nixStore.c_str(), perm) == -1) | |
107 | throw SysError(format("changing permissions on path '%1%'") % settings.nixStore); | |
108 | } | |
15ddeff5 LC |
109 | } |
110 | } | |
111 | ||
36457566 LC |
112 | checkStoreNotSymlink(); |
113 | ||
114 | /* We can't open a SQLite database if the disk is full. Since | |
115 | this prevents the garbage collector from running when it's most | |
116 | needed, we reserve some dummy space that we can free just | |
117 | before doing a garbage collection. */ | |
118 | try { | |
119 | Path reservedPath = settings.nixDBPath + "/reserved"; | |
120 | if (reserveSpace) { | |
121 | struct stat st; | |
122 | if (stat(reservedPath.c_str(), &st) == -1 || | |
123 | st.st_size != settings.reservedSize) | |
322eeb87 LC |
124 | { |
125 | AutoCloseFD fd = open(reservedPath.c_str(), O_WRONLY | O_CREAT, 0600); | |
126 | int res = -1; | |
127 | #if HAVE_POSIX_FALLOCATE | |
128 | res = posix_fallocate(fd, 0, settings.reservedSize); | |
129 | #endif | |
130 | if (res == -1) { | |
131 | writeFull(fd, string(settings.reservedSize, 'X')); | |
132 | ftruncate(fd, settings.reservedSize); | |
133 | } | |
134 | } | |
36457566 LC |
135 | } |
136 | else | |
137 | deletePath(reservedPath); | |
138 | } catch (SysError & e) { /* don't care about errors */ | |
139 | } | |
140 | ||
141 | /* Acquire the big fat lock in shared mode to make sure that no | |
142 | schema upgrade is in progress. */ | |
143 | try { | |
144 | Path globalLockPath = settings.nixDBPath + "/big-lock"; | |
145 | globalLock = openLockFile(globalLockPath.c_str(), true); | |
146 | } catch (SysError & e) { | |
147 | if (e.errNo != EACCES) throw; | |
148 | settings.readOnlyMode = true; | |
149 | openDB(false); | |
150 | return; | |
151 | } | |
152 | ||
153 | if (!lockFile(globalLock, ltRead, false)) { | |
8327e733 | 154 | printMsg(lvlError, "waiting for the big store lock..."); |
36457566 LC |
155 | lockFile(globalLock, ltRead, true); |
156 | } | |
157 | ||
158 | /* Check the current database schema and if necessary do an | |
159 | upgrade. */ | |
160 | int curSchema = getSchema(); | |
161 | if (curSchema > nixSchemaVersion) | |
8327e733 | 162 | throw Error(format("current store schema is version %1%, but I only support %2%") |
36457566 LC |
163 | % curSchema % nixSchemaVersion); |
164 | ||
165 | else if (curSchema == 0) { /* new store */ | |
166 | curSchema = nixSchemaVersion; | |
167 | openDB(true); | |
168 | writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str()); | |
169 | } | |
170 | ||
171 | else if (curSchema < nixSchemaVersion) { | |
602a1486 LC |
172 | /* Guix always used version 7 of the schema. */ |
173 | throw Error( | |
174 | format("Your store database uses an implausibly old schema, version %1%.") | |
175 | % curSchema); | |
36457566 LC |
176 | } |
177 | ||
178 | else openDB(false); | |
179 | } | |
180 | ||
181 | ||
182 | LocalStore::~LocalStore() | |
183 | { | |
2bb04905 LC |
184 | try { |
185 | if (fdTempRoots != -1) { | |
186 | fdTempRoots.close(); | |
187 | unlink(fnTempRoots.c_str()); | |
36457566 LC |
188 | } |
189 | } catch (...) { | |
190 | ignoreException(); | |
191 | } | |
192 | } | |
193 | ||
194 | ||
195 | int LocalStore::getSchema() | |
196 | { | |
197 | int curSchema = 0; | |
198 | if (pathExists(schemaPath)) { | |
199 | string s = readFile(schemaPath); | |
200 | if (!string2Int(s, curSchema)) | |
201 | throw Error(format("`%1%' is corrupt") % schemaPath); | |
202 | } | |
203 | return curSchema; | |
204 | } | |
205 | ||
206 | ||
207 | void LocalStore::openDB(bool create) | |
208 | { | |
209 | if (access(settings.nixDBPath.c_str(), R_OK | W_OK)) | |
8327e733 | 210 | throw SysError(format("store database directory `%1%' is not writable") % settings.nixDBPath); |
36457566 | 211 | |
8327e733 | 212 | /* Open the store database. */ |
36457566 LC |
213 | string dbPath = settings.nixDBPath + "/db.sqlite"; |
214 | if (sqlite3_open_v2(dbPath.c_str(), &db.db, | |
215 | SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), 0) != SQLITE_OK) | |
8327e733 | 216 | throw Error(format("cannot open store database `%1%'") % dbPath); |
36457566 LC |
217 | |
218 | if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK) | |
219 | throwSQLiteError(db, "setting timeout"); | |
220 | ||
221 | if (sqlite3_exec(db, "pragma foreign_keys = 1;", 0, 0, 0) != SQLITE_OK) | |
222 | throwSQLiteError(db, "enabling foreign keys"); | |
223 | ||
224 | /* !!! check whether sqlite has been built with foreign key | |
225 | support */ | |
226 | ||
227 | /* Whether SQLite should fsync(). "Normal" synchronous mode | |
228 | should be safe enough. If the user asks for it, don't sync at | |
229 | all. This can cause database corruption if the system | |
230 | crashes. */ | |
231 | string syncMode = settings.fsyncMetadata ? "normal" : "off"; | |
232 | if (sqlite3_exec(db, ("pragma synchronous = " + syncMode + ";").c_str(), 0, 0, 0) != SQLITE_OK) | |
233 | throwSQLiteError(db, "setting synchronous mode"); | |
234 | ||
235 | /* Set the SQLite journal mode. WAL mode is fastest, so it's the | |
236 | default. */ | |
237 | string mode = settings.useSQLiteWAL ? "wal" : "truncate"; | |
238 | string prevMode; | |
239 | { | |
240 | SQLiteStmt stmt; | |
241 | stmt.create(db, "pragma main.journal_mode;"); | |
242 | if (sqlite3_step(stmt) != SQLITE_ROW) | |
243 | throwSQLiteError(db, "querying journal mode"); | |
244 | prevMode = string((const char *) sqlite3_column_text(stmt, 0)); | |
245 | } | |
246 | if (prevMode != mode && | |
247 | sqlite3_exec(db, ("pragma main.journal_mode = " + mode + ";").c_str(), 0, 0, 0) != SQLITE_OK) | |
248 | throwSQLiteError(db, "setting journal mode"); | |
249 | ||
250 | /* Increase the auto-checkpoint interval to 40000 pages. This | |
251 | seems enough to ensure that instantiating the NixOS system | |
252 | derivation is done in a single fsync(). */ | |
253 | if (mode == "wal" && sqlite3_exec(db, "pragma wal_autocheckpoint = 40000;", 0, 0, 0) != SQLITE_OK) | |
254 | throwSQLiteError(db, "setting autocheckpoint interval"); | |
255 | ||
256 | /* Initialise the database schema, if necessary. */ | |
257 | if (create) { | |
258 | const char * schema = | |
259 | #include "schema.sql.hh" | |
260 | ; | |
261 | if (sqlite3_exec(db, (const char *) schema, 0, 0, 0) != SQLITE_OK) | |
262 | throwSQLiteError(db, "initialising database schema"); | |
263 | } | |
264 | ||
265 | /* Prepare SQL statements. */ | |
266 | stmtRegisterValidPath.create(db, | |
267 | "insert into ValidPaths (path, hash, registrationTime, deriver, narSize) values (?, ?, ?, ?, ?);"); | |
268 | stmtUpdatePathInfo.create(db, | |
269 | "update ValidPaths set narSize = ?, hash = ? where path = ?;"); | |
270 | stmtAddReference.create(db, | |
271 | "insert or replace into Refs (referrer, reference) values (?, ?);"); | |
272 | stmtQueryPathInfo.create(db, | |
273 | "select id, hash, registrationTime, deriver, narSize from ValidPaths where path = ?;"); | |
274 | stmtQueryReferences.create(db, | |
275 | "select path from Refs join ValidPaths on reference = id where referrer = ?;"); | |
276 | stmtQueryReferrers.create(db, | |
277 | "select path from Refs join ValidPaths on referrer = id where reference = (select id from ValidPaths where path = ?);"); | |
278 | stmtInvalidatePath.create(db, | |
279 | "delete from ValidPaths where path = ?;"); | |
280 | stmtRegisterFailedPath.create(db, | |
281 | "insert or ignore into FailedPaths (path, time) values (?, ?);"); | |
282 | stmtHasPathFailed.create(db, | |
283 | "select time from FailedPaths where path = ?;"); | |
284 | stmtQueryFailedPaths.create(db, | |
285 | "select path from FailedPaths;"); | |
286 | // If the path is a derivation, then clear its outputs. | |
287 | stmtClearFailedPath.create(db, | |
288 | "delete from FailedPaths where ?1 = '*' or path = ?1 " | |
289 | "or path in (select d.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where v.path = ?1);"); | |
290 | stmtAddDerivationOutput.create(db, | |
291 | "insert or replace into DerivationOutputs (drv, id, path) values (?, ?, ?);"); | |
292 | stmtQueryValidDerivers.create(db, | |
293 | "select v.id, v.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where d.path = ?;"); | |
294 | stmtQueryDerivationOutputs.create(db, | |
295 | "select id, path from DerivationOutputs where drv = ?;"); | |
296 | // Use "path >= ?" with limit 1 rather than "path like '?%'" to | |
297 | // ensure efficient lookup. | |
298 | stmtQueryPathFromHashPart.create(db, | |
299 | "select path from ValidPaths where path >= ? limit 1;"); | |
b1fd0ab7 | 300 | stmtQueryValidPaths.create(db, "select path from ValidPaths"); |
36457566 LC |
301 | } |
302 | ||
303 | ||
8327e733 LC |
304 | /* To improve purity, users may want to make the store a read-only |
305 | bind mount. So make the store writable for this process. */ | |
36457566 LC |
306 | void LocalStore::makeStoreWritable() |
307 | { | |
308 | #if HAVE_UNSHARE && HAVE_STATVFS && HAVE_SYS_MOUNT_H && defined(MS_BIND) && defined(MS_REMOUNT) | |
309 | if (getuid() != 0) return; | |
310 | /* Check if /nix/store is on a read-only mount. */ | |
311 | struct statvfs stat; | |
312 | if (statvfs(settings.nixStore.c_str(), &stat) != 0) | |
8327e733 | 313 | throw SysError("getting info about the store mount point"); |
36457566 LC |
314 | |
315 | if (stat.f_flag & ST_RDONLY) { | |
316 | if (unshare(CLONE_NEWNS) == -1) | |
317 | throw SysError("setting up a private mount namespace"); | |
318 | ||
54c260e6 | 319 | if (mount(0, settings.nixStore.c_str(), "none", MS_REMOUNT | MS_BIND, 0) == -1) |
36457566 LC |
320 | throw SysError(format("remounting %1% writable") % settings.nixStore); |
321 | } | |
322 | #endif | |
323 | } | |
324 | ||
325 | ||
326 | const time_t mtimeStore = 1; /* 1 second into the epoch */ | |
327 | ||
328 | ||
329 | static void canonicaliseTimestampAndPermissions(const Path & path, const struct stat & st) | |
330 | { | |
331 | if (!S_ISLNK(st.st_mode)) { | |
332 | ||
333 | /* Mask out all type related bits. */ | |
334 | mode_t mode = st.st_mode & ~S_IFMT; | |
335 | ||
336 | if (mode != 0444 && mode != 0555) { | |
337 | mode = (st.st_mode & S_IFMT) | |
338 | | 0444 | |
339 | | (st.st_mode & S_IXUSR ? 0111 : 0); | |
340 | if (chmod(path.c_str(), mode) == -1) | |
341 | throw SysError(format("changing mode of `%1%' to %2$o") % path % mode); | |
342 | } | |
343 | ||
344 | } | |
345 | ||
346 | if (st.st_mtime != mtimeStore) { | |
347 | struct timeval times[2]; | |
348 | times[0].tv_sec = st.st_atime; | |
349 | times[0].tv_usec = 0; | |
350 | times[1].tv_sec = mtimeStore; | |
351 | times[1].tv_usec = 0; | |
352 | #if HAVE_LUTIMES | |
353 | if (lutimes(path.c_str(), times) == -1) | |
354 | if (errno != ENOSYS || | |
355 | (!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1)) | |
356 | #else | |
357 | if (!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1) | |
358 | #endif | |
359 | throw SysError(format("changing modification time of `%1%'") % path); | |
360 | } | |
361 | } | |
362 | ||
363 | ||
364 | void canonicaliseTimestampAndPermissions(const Path & path) | |
365 | { | |
366 | struct stat st; | |
367 | if (lstat(path.c_str(), &st)) | |
368 | throw SysError(format("getting attributes of path `%1%'") % path); | |
369 | canonicaliseTimestampAndPermissions(path, st); | |
370 | } | |
371 | ||
372 | ||
373 | static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSeen & inodesSeen) | |
374 | { | |
375 | checkInterrupt(); | |
376 | ||
377 | struct stat st; | |
378 | if (lstat(path.c_str(), &st)) | |
379 | throw SysError(format("getting attributes of path `%1%'") % path); | |
380 | ||
2bb04905 LC |
381 | /* Really make sure that the path is of a supported type. */ |
382 | if (!(S_ISREG(st.st_mode) || S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode))) | |
383 | throw Error(format("file ‘%1%’ has an unsupported type") % path); | |
36457566 LC |
384 | |
385 | /* Fail if the file is not owned by the build user. This prevents | |
386 | us from messing up the ownership/permissions of files | |
387 | hard-linked into the output (e.g. "ln /etc/shadow $out/foo"). | |
388 | However, ignore files that we chown'ed ourselves previously to | |
389 | ensure that we don't fail on hard links within the same build | |
390 | (i.e. "touch $out/foo; ln $out/foo $out/bar"). */ | |
391 | if (fromUid != (uid_t) -1 && st.st_uid != fromUid) { | |
392 | assert(!S_ISDIR(st.st_mode)); | |
393 | if (inodesSeen.find(Inode(st.st_dev, st.st_ino)) == inodesSeen.end()) | |
394 | throw BuildError(format("invalid ownership on file `%1%'") % path); | |
395 | mode_t mode = st.st_mode & ~S_IFMT; | |
396 | assert(S_ISLNK(st.st_mode) || (st.st_uid == geteuid() && (mode == 0444 || mode == 0555) && st.st_mtime == mtimeStore)); | |
397 | return; | |
398 | } | |
399 | ||
400 | inodesSeen.insert(Inode(st.st_dev, st.st_ino)); | |
401 | ||
402 | canonicaliseTimestampAndPermissions(path, st); | |
403 | ||
404 | /* Change ownership to the current uid. If it's a symlink, use | |
405 | lchown if available, otherwise don't bother. Wrong ownership | |
406 | of a symlink doesn't matter, since the owning user can't change | |
407 | the symlink and can't delete it because the directory is not | |
8327e733 LC |
408 | writable. The only exception is top-level paths in the |
409 | store (since that directory is group-writable for the build | |
36457566 LC |
410 | users group); we check for this case below. */ |
411 | if (st.st_uid != geteuid()) { | |
412 | #if HAVE_LCHOWN | |
56b8f567 | 413 | if (lchown(path.c_str(), geteuid(), getegid()) == -1) |
36457566 LC |
414 | #else |
415 | if (!S_ISLNK(st.st_mode) && | |
56b8f567 | 416 | chown(path.c_str(), geteuid(), getegid()) == -1) |
36457566 LC |
417 | #endif |
418 | throw SysError(format("changing owner of `%1%' to %2%") | |
419 | % path % geteuid()); | |
420 | } | |
421 | ||
422 | if (S_ISDIR(st.st_mode)) { | |
2bb04905 LC |
423 | DirEntries entries = readDirectory(path); |
424 | for (auto & i : entries) | |
425 | canonicalisePathMetaData_(path + "/" + i.name, fromUid, inodesSeen); | |
36457566 LC |
426 | } |
427 | } | |
428 | ||
429 | ||
430 | void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen) | |
431 | { | |
432 | canonicalisePathMetaData_(path, fromUid, inodesSeen); | |
433 | ||
434 | /* On platforms that don't have lchown(), the top-level path can't | |
435 | be a symlink, since we can't change its ownership. */ | |
436 | struct stat st; | |
437 | if (lstat(path.c_str(), &st)) | |
438 | throw SysError(format("getting attributes of path `%1%'") % path); | |
439 | ||
440 | if (st.st_uid != geteuid()) { | |
441 | assert(S_ISLNK(st.st_mode)); | |
442 | throw Error(format("wrong ownership of top-level store path `%1%'") % path); | |
443 | } | |
444 | } | |
445 | ||
446 | ||
447 | void canonicalisePathMetaData(const Path & path, uid_t fromUid) | |
448 | { | |
449 | InodesSeen inodesSeen; | |
450 | canonicalisePathMetaData(path, fromUid, inodesSeen); | |
451 | } | |
452 | ||
453 | ||
454 | void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation & drv) | |
455 | { | |
456 | string drvName = storePathToName(drvPath); | |
457 | assert(isDerivation(drvName)); | |
458 | drvName = string(drvName, 0, drvName.size() - drvExtension.size()); | |
459 | ||
460 | if (isFixedOutputDrv(drv)) { | |
461 | DerivationOutputs::const_iterator out = drv.outputs.find("out"); | |
462 | if (out == drv.outputs.end()) | |
463 | throw Error(format("derivation `%1%' does not have an output named `out'") % drvPath); | |
464 | ||
465 | bool recursive; HashType ht; Hash h; | |
466 | out->second.parseHashInfo(recursive, ht, h); | |
467 | Path outPath = makeFixedOutputPath(recursive, ht, h, drvName); | |
468 | ||
469 | StringPairs::const_iterator j = drv.env.find("out"); | |
470 | if (out->second.path != outPath || j == drv.env.end() || j->second != outPath) | |
471 | throw Error(format("derivation `%1%' has incorrect output `%2%', should be `%3%'") | |
472 | % drvPath % out->second.path % outPath); | |
473 | } | |
474 | ||
475 | else { | |
476 | Derivation drvCopy(drv); | |
477 | foreach (DerivationOutputs::iterator, i, drvCopy.outputs) { | |
478 | i->second.path = ""; | |
479 | drvCopy.env[i->first] = ""; | |
480 | } | |
481 | ||
482 | Hash h = hashDerivationModulo(*this, drvCopy); | |
483 | ||
484 | foreach (DerivationOutputs::const_iterator, i, drv.outputs) { | |
485 | Path outPath = makeOutputPath(i->first, h, drvName); | |
486 | StringPairs::const_iterator j = drv.env.find(i->first); | |
487 | if (i->second.path != outPath || j == drv.env.end() || j->second != outPath) | |
488 | throw Error(format("derivation `%1%' has incorrect output `%2%', should be `%3%'") | |
489 | % drvPath % i->second.path % outPath); | |
490 | } | |
491 | } | |
492 | } | |
493 | ||
494 | ||
b1fd0ab7 | 495 | uint64_t LocalStore::addValidPath(const ValidPathInfo & info, bool checkOutputs) |
36457566 | 496 | { |
b1fd0ab7 ED |
497 | stmtRegisterValidPath.use() |
498 | (info.path) | |
499 | ("sha256:" + printHash(info.hash)) | |
500 | (info.registrationTime == 0 ? time(0) : info.registrationTime) | |
501 | (info.deriver, info.deriver != "") | |
502 | (info.narSize, info.narSize != 0) | |
503 | .exec(); | |
504 | uint64_t id = sqlite3_last_insert_rowid(db); | |
36457566 LC |
505 | |
506 | /* If this is a derivation, then store the derivation outputs in | |
507 | the database. This is useful for the garbage collector: it can | |
508 | efficiently query whether a path is an output of some | |
509 | derivation. */ | |
510 | if (isDerivation(info.path)) { | |
15ddeff5 | 511 | Derivation drv = readDerivation(info.path); |
36457566 LC |
512 | |
513 | /* Verify that the output paths in the derivation are correct | |
514 | (i.e., follow the scheme for computing output paths from | |
515 | derivations). Note that if this throws an error, then the | |
516 | DB transaction is rolled back, so the path validity | |
517 | registration above is undone. */ | |
518 | if (checkOutputs) checkDerivationOutputs(info.path, drv); | |
519 | ||
b1fd0ab7 ED |
520 | for (auto & i : drv.outputs) { |
521 | stmtAddDerivationOutput.use() | |
522 | (id) | |
523 | (i.first) | |
524 | (i.second.path) | |
525 | .exec(); | |
36457566 LC |
526 | } |
527 | } | |
528 | ||
529 | return id; | |
530 | } | |
531 | ||
532 | ||
b1fd0ab7 | 533 | void LocalStore::addReference(uint64_t referrer, uint64_t reference) |
36457566 | 534 | { |
b1fd0ab7 | 535 | stmtAddReference.use()(referrer)(reference).exec(); |
36457566 LC |
536 | } |
537 | ||
538 | ||
539 | void LocalStore::registerFailedPath(const Path & path) | |
540 | { | |
517ce0c1 | 541 | retrySQLite<void>([&]() { |
b1fd0ab7 | 542 | stmtRegisterFailedPath.use()(path)(time(0)).step(); |
517ce0c1 | 543 | }); |
36457566 LC |
544 | } |
545 | ||
546 | ||
547 | bool LocalStore::hasPathFailed(const Path & path) | |
548 | { | |
517ce0c1 | 549 | return retrySQLite<bool>([&]() { |
b1fd0ab7 | 550 | return stmtHasPathFailed.use()(path).next(); |
517ce0c1 | 551 | }); |
36457566 LC |
552 | } |
553 | ||
554 | ||
555 | PathSet LocalStore::queryFailedPaths() | |
556 | { | |
517ce0c1 | 557 | return retrySQLite<PathSet>([&]() { |
b1fd0ab7 | 558 | auto useQueryFailedPaths(stmtQueryFailedPaths.use()); |
36457566 LC |
559 | |
560 | PathSet res; | |
b1fd0ab7 ED |
561 | while (useQueryFailedPaths.next()) |
562 | res.insert(useQueryFailedPaths.getStr(0)); | |
36457566 LC |
563 | |
564 | return res; | |
517ce0c1 | 565 | }); |
36457566 LC |
566 | } |
567 | ||
568 | ||
569 | void LocalStore::clearFailedPaths(const PathSet & paths) | |
570 | { | |
517ce0c1 | 571 | retrySQLite<void>([&]() { |
36457566 LC |
572 | SQLiteTxn txn(db); |
573 | ||
b1fd0ab7 ED |
574 | for (auto & path : paths) |
575 | stmtClearFailedPath.use()(path).exec(); | |
36457566 LC |
576 | |
577 | txn.commit(); | |
517ce0c1 | 578 | }); |
36457566 LC |
579 | } |
580 | ||
581 | ||
582 | Hash parseHashField(const Path & path, const string & s) | |
583 | { | |
584 | string::size_type colon = s.find(':'); | |
585 | if (colon == string::npos) | |
586 | throw Error(format("corrupt hash `%1%' in valid-path entry for `%2%'") | |
587 | % s % path); | |
588 | HashType ht = parseHashType(string(s, 0, colon)); | |
589 | if (ht == htUnknown) | |
590 | throw Error(format("unknown hash type `%1%' in valid-path entry for `%2%'") | |
591 | % string(s, 0, colon) % path); | |
592 | return parseHash(ht, string(s, colon + 1)); | |
593 | } | |
594 | ||
595 | ||
596 | ValidPathInfo LocalStore::queryPathInfo(const Path & path) | |
597 | { | |
598 | ValidPathInfo info; | |
599 | info.path = path; | |
600 | ||
601 | assertStorePath(path); | |
602 | ||
517ce0c1 | 603 | return retrySQLite<ValidPathInfo>([&]() { |
36457566 LC |
604 | |
605 | /* Get the path info. */ | |
b1fd0ab7 | 606 | auto useQueryPathInfo(stmtQueryPathInfo.use()(path)); |
36457566 | 607 | |
b1fd0ab7 ED |
608 | if (!useQueryPathInfo.next()) |
609 | throw Error(format("path `%1%' is not valid") % path); | |
36457566 | 610 | |
b1fd0ab7 | 611 | info.id = useQueryPathInfo.getInt(0); |
36457566 | 612 | |
b1fd0ab7 | 613 | info.hash = parseHashField(path, useQueryPathInfo.getStr(1)); |
36457566 | 614 | |
b1fd0ab7 | 615 | info.registrationTime = useQueryPathInfo.getInt(2); |
36457566 | 616 | |
b1fd0ab7 | 617 | auto s = (const char *) sqlite3_column_text(stmtQueryPathInfo, 3); |
36457566 LC |
618 | if (s) info.deriver = s; |
619 | ||
620 | /* Note that narSize = NULL yields 0. */ | |
b1fd0ab7 | 621 | info.narSize = useQueryPathInfo.getInt(4); |
36457566 LC |
622 | |
623 | /* Get the references. */ | |
b1fd0ab7 | 624 | auto useQueryReferences(stmtQueryReferences.use()(info.id)); |
36457566 | 625 | |
b1fd0ab7 ED |
626 | while (useQueryReferences.next()) |
627 | info.references.insert(useQueryReferences.getStr(0)); | |
36457566 LC |
628 | |
629 | return info; | |
517ce0c1 | 630 | }); |
36457566 LC |
631 | } |
632 | ||
633 | ||
634 | /* Update path info in the database. Currently only updates the | |
635 | narSize field. */ | |
636 | void LocalStore::updatePathInfo(const ValidPathInfo & info) | |
637 | { | |
b1fd0ab7 ED |
638 | stmtUpdatePathInfo.use() |
639 | (info.narSize, info.narSize != 0) | |
640 | ("sha256:" + printHash(info.hash)) | |
641 | (info.path) | |
642 | .exec(); | |
36457566 LC |
643 | } |
644 | ||
645 | ||
b1fd0ab7 | 646 | uint64_t LocalStore::queryValidPathId(const Path & path) |
36457566 | 647 | { |
b1fd0ab7 ED |
648 | auto use(stmtQueryPathInfo.use()(path)); |
649 | if (!use.next()) | |
650 | throw Error(format("path ‘%1%’ is not valid") % path); | |
651 | return use.getInt(0); | |
36457566 LC |
652 | } |
653 | ||
654 | ||
655 | bool LocalStore::isValidPath_(const Path & path) | |
656 | { | |
b1fd0ab7 | 657 | return stmtQueryPathInfo.use()(path).next(); |
36457566 LC |
658 | } |
659 | ||
660 | ||
661 | bool LocalStore::isValidPath(const Path & path) | |
662 | { | |
517ce0c1 | 663 | return retrySQLite<bool>([&]() { |
36457566 | 664 | return isValidPath_(path); |
517ce0c1 | 665 | }); |
36457566 LC |
666 | } |
667 | ||
668 | ||
669 | PathSet LocalStore::queryValidPaths(const PathSet & paths) | |
670 | { | |
517ce0c1 | 671 | return retrySQLite<PathSet>([&]() { |
36457566 LC |
672 | PathSet res; |
673 | foreach (PathSet::const_iterator, i, paths) | |
674 | if (isValidPath_(*i)) res.insert(*i); | |
675 | return res; | |
517ce0c1 | 676 | }); |
36457566 LC |
677 | } |
678 | ||
679 | ||
680 | PathSet LocalStore::queryAllValidPaths() | |
681 | { | |
517ce0c1 | 682 | return retrySQLite<PathSet>([&]() { |
b1fd0ab7 | 683 | auto use(stmtQueryValidPaths.use()); |
36457566 | 684 | PathSet res; |
b1fd0ab7 | 685 | while (use.next()) res.insert(use.getStr(0)); |
36457566 | 686 | return res; |
517ce0c1 | 687 | }); |
36457566 LC |
688 | } |
689 | ||
690 | ||
691 | void LocalStore::queryReferences(const Path & path, | |
692 | PathSet & references) | |
693 | { | |
694 | ValidPathInfo info = queryPathInfo(path); | |
695 | references.insert(info.references.begin(), info.references.end()); | |
696 | } | |
697 | ||
698 | ||
699 | void LocalStore::queryReferrers_(const Path & path, PathSet & referrers) | |
700 | { | |
b1fd0ab7 | 701 | auto useQueryReferrers(stmtQueryReferrers.use()(path)); |
36457566 | 702 | |
b1fd0ab7 ED |
703 | while (useQueryReferrers.next()) |
704 | referrers.insert(useQueryReferrers.getStr(0)); | |
36457566 LC |
705 | } |
706 | ||
707 | ||
708 | void LocalStore::queryReferrers(const Path & path, PathSet & referrers) | |
709 | { | |
710 | assertStorePath(path); | |
517ce0c1 | 711 | return retrySQLite<void>([&]() { |
36457566 | 712 | queryReferrers_(path, referrers); |
517ce0c1 | 713 | }); |
36457566 LC |
714 | } |
715 | ||
716 | ||
717 | Path LocalStore::queryDeriver(const Path & path) | |
718 | { | |
719 | return queryPathInfo(path).deriver; | |
720 | } | |
721 | ||
722 | ||
723 | PathSet LocalStore::queryValidDerivers(const Path & path) | |
724 | { | |
725 | assertStorePath(path); | |
726 | ||
517ce0c1 | 727 | return retrySQLite<PathSet>([&]() { |
b1fd0ab7 | 728 | auto useQueryValidDerivers(stmtQueryValidDerivers.use()(path)); |
36457566 LC |
729 | |
730 | PathSet derivers; | |
b1fd0ab7 ED |
731 | while (useQueryValidDerivers.next()) |
732 | derivers.insert(useQueryValidDerivers.getStr(1)); | |
36457566 LC |
733 | |
734 | return derivers; | |
517ce0c1 | 735 | }); |
36457566 LC |
736 | } |
737 | ||
738 | ||
739 | PathSet LocalStore::queryDerivationOutputs(const Path & path) | |
740 | { | |
517ce0c1 | 741 | return retrySQLite<PathSet>([&]() { |
b1fd0ab7 | 742 | auto useQueryDerivationOutputs(stmtQueryDerivationOutputs.use()(queryValidPathId(path))); |
36457566 LC |
743 | |
744 | PathSet outputs; | |
b1fd0ab7 ED |
745 | while (useQueryDerivationOutputs.next()) |
746 | outputs.insert(useQueryDerivationOutputs.getStr(1)); | |
36457566 LC |
747 | |
748 | return outputs; | |
517ce0c1 | 749 | }); |
36457566 LC |
750 | } |
751 | ||
752 | ||
753 | StringSet LocalStore::queryDerivationOutputNames(const Path & path) | |
754 | { | |
517ce0c1 | 755 | return retrySQLite<StringSet>([&]() { |
b1fd0ab7 | 756 | auto useQueryDerivationOutputs(stmtQueryDerivationOutputs.use()(queryValidPathId(path))); |
36457566 LC |
757 | |
758 | StringSet outputNames; | |
b1fd0ab7 ED |
759 | while (useQueryDerivationOutputs.next()) |
760 | outputNames.insert(useQueryDerivationOutputs.getStr(0)); | |
36457566 LC |
761 | |
762 | return outputNames; | |
517ce0c1 | 763 | }); |
36457566 LC |
764 | } |
765 | ||
766 | ||
767 | Path LocalStore::queryPathFromHashPart(const string & hashPart) | |
768 | { | |
769 | if (hashPart.size() != 32) throw Error("invalid hash part"); | |
770 | ||
771 | Path prefix = settings.nixStore + "/" + hashPart; | |
772 | ||
517ce0c1 | 773 | return retrySQLite<Path>([&]() -> Path { |
b1fd0ab7 | 774 | auto useQueryPathFromHashPart(stmtQueryPathFromHashPart.use()(prefix)); |
36457566 | 775 | |
b1fd0ab7 | 776 | if (!useQueryPathFromHashPart.next()) return ""; |
36457566 LC |
777 | |
778 | const char * s = (const char *) sqlite3_column_text(stmtQueryPathFromHashPart, 0); | |
779 | return s && prefix.compare(0, prefix.size(), s, prefix.size()) == 0 ? s : ""; | |
517ce0c1 | 780 | }); |
36457566 LC |
781 | } |
782 | ||
2d730862 LC |
783 | /* Read a line from the substituter's reply file descriptor, while also |
784 | processing its stderr. */ | |
79c6614f | 785 | string LocalStore::getLineFromSubstituter(Agent & run) |
36457566 LC |
786 | { |
787 | string res, err; | |
788 | ||
36457566 LC |
789 | while (1) { |
790 | checkInterrupt(); | |
791 | ||
792 | fd_set fds; | |
793 | FD_ZERO(&fds); | |
79c6614f LC |
794 | FD_SET(run.fromAgent.readSide, &fds); |
795 | FD_SET(run.builderOut.readSide, &fds); | |
36457566 LC |
796 | |
797 | /* Wait for data to appear on the substituter's stdout or | |
798 | stderr. */ | |
79c6614f | 799 | if (select(std::max(run.fromAgent.readSide, run.builderOut.readSide) + 1, &fds, 0, 0, 0) == -1) { |
36457566 LC |
800 | if (errno == EINTR) continue; |
801 | throw SysError("waiting for input from the substituter"); | |
802 | } | |
803 | ||
804 | /* Completely drain stderr before dealing with stdout. */ | |
2d730862 | 805 | if (FD_ISSET(run.fromAgent.readSide, &fds)) { |
36457566 | 806 | char buf[4096]; |
2d730862 | 807 | ssize_t n = read(run.fromAgent.readSide, (unsigned char *) buf, sizeof(buf)); |
36457566 LC |
808 | if (n == -1) { |
809 | if (errno == EINTR) continue; | |
810 | throw SysError("reading from substituter's stderr"); | |
811 | } | |
f6919ebd LC |
812 | if (n == 0) throw EndOfFile(format("`%1% substitute' died unexpectedly") |
813 | % settings.guixProgram); | |
36457566 LC |
814 | err.append(buf, n); |
815 | string::size_type p; | |
399f9ace LC |
816 | while (((p = err.find('\n')) != string::npos) |
817 | || ((p = err.find('\r')) != string::npos)) { | |
818 | string thing(err, 0, p + 1); | |
f6919ebd | 819 | writeToStderr("substitute: " + thing); |
36457566 LC |
820 | err = string(err, p + 1); |
821 | } | |
822 | } | |
823 | ||
824 | /* Read from stdout until we get a newline or the buffer is empty. */ | |
2d730862 | 825 | else if (FD_ISSET(run.builderOut.readSide, &fds)) { |
79c6614f | 826 | unsigned char c; |
2d730862 | 827 | readFull(run.builderOut.readSide, (unsigned char *) &c, 1); |
79c6614f LC |
828 | if (c == '\n') { |
829 | if (!err.empty()) printMsg(lvlError, "substitute: " + err); | |
830 | return res; | |
831 | } | |
832 | res += c; | |
36457566 LC |
833 | } |
834 | } | |
835 | } | |
836 | ||
837 | ||
79c6614f | 838 | template<class T> T LocalStore::getIntLineFromSubstituter(Agent & run) |
36457566 LC |
839 | { |
840 | string s = getLineFromSubstituter(run); | |
841 | T res; | |
842 | if (!string2Int(s, res)) throw Error("integer expected from stream"); | |
843 | return res; | |
844 | } | |
845 | ||
846 | ||
847 | PathSet LocalStore::querySubstitutablePaths(const PathSet & paths) | |
848 | { | |
849 | PathSet res; | |
151afd84 | 850 | |
f6919ebd LC |
851 | if (!settings.useSubstitutes || paths.empty()) return res; |
852 | ||
a618a8c6 | 853 | Agent & run = *substituter(); |
79c6614f LC |
854 | |
855 | string s = "have "; | |
856 | foreach (PathSet::const_iterator, j, paths) | |
857 | if (res.find(*j) == res.end()) { s += *j; s += " "; } | |
858 | writeLine(run.toAgent.writeSide, s); | |
859 | while (true) { | |
860 | /* FIXME: we only read stderr when an error occurs, so | |
861 | substituters should only write (short) messages to | |
862 | stderr when they fail. I.e. they shouldn't write debug | |
863 | output. */ | |
864 | Path path = getLineFromSubstituter(run); | |
865 | if (path == "") break; | |
866 | res.insert(path); | |
36457566 | 867 | } |
f6919ebd | 868 | |
36457566 LC |
869 | return res; |
870 | } | |
871 | ||
872 | ||
a618a8c6 | 873 | std::shared_ptr<Agent> LocalStore::substituter() |
36457566 | 874 | { |
f6919ebd | 875 | if (!runningSubstituter) { |
79c6614f LC |
876 | const Strings args = { "substitute", "--query" }; |
877 | const std::map<string, string> env = { { "_NIX_OPTIONS", settings.pack() } }; | |
a618a8c6 | 878 | runningSubstituter = std::make_shared<Agent>(settings.guixProgram, args, env); |
f6919ebd LC |
879 | } |
880 | ||
a618a8c6 LC |
881 | return runningSubstituter; |
882 | } | |
883 | ||
884 | void LocalStore::querySubstitutablePathInfos(PathSet & paths, SubstitutablePathInfos & infos) | |
885 | { | |
886 | if (!settings.useSubstitutes) return; | |
887 | ||
888 | Agent & run = *substituter(); | |
36457566 LC |
889 | |
890 | string s = "info "; | |
891 | foreach (PathSet::const_iterator, i, paths) | |
892 | if (infos.find(*i) == infos.end()) { s += *i; s += " "; } | |
79c6614f | 893 | writeLine(run.toAgent.writeSide, s); |
36457566 LC |
894 | |
895 | while (true) { | |
896 | Path path = getLineFromSubstituter(run); | |
897 | if (path == "") break; | |
898 | if (paths.find(path) == paths.end()) | |
899 | throw Error(format("got unexpected path `%1%' from substituter") % path); | |
900 | paths.erase(path); | |
901 | SubstitutablePathInfo & info(infos[path]); | |
902 | info.deriver = getLineFromSubstituter(run); | |
903 | if (info.deriver != "") assertStorePath(info.deriver); | |
904 | int nrRefs = getIntLineFromSubstituter<int>(run); | |
905 | while (nrRefs--) { | |
906 | Path p = getLineFromSubstituter(run); | |
907 | assertStorePath(p); | |
908 | info.references.insert(p); | |
909 | } | |
910 | info.downloadSize = getIntLineFromSubstituter<long long>(run); | |
911 | info.narSize = getIntLineFromSubstituter<long long>(run); | |
912 | } | |
913 | } | |
914 | ||
915 | ||
916 | void LocalStore::querySubstitutablePathInfos(const PathSet & paths, | |
917 | SubstitutablePathInfos & infos) | |
918 | { | |
f6919ebd LC |
919 | if (!paths.empty()) { |
920 | PathSet todo = paths; | |
921 | querySubstitutablePathInfos(todo, infos); | |
36457566 LC |
922 | } |
923 | } | |
924 | ||
925 | ||
926 | Hash LocalStore::queryPathHash(const Path & path) | |
927 | { | |
928 | return queryPathInfo(path).hash; | |
929 | } | |
930 | ||
931 | ||
932 | void LocalStore::registerValidPath(const ValidPathInfo & info) | |
933 | { | |
934 | ValidPathInfos infos; | |
935 | infos.push_back(info); | |
936 | registerValidPaths(infos); | |
937 | } | |
938 | ||
939 | ||
940 | void LocalStore::registerValidPaths(const ValidPathInfos & infos) | |
941 | { | |
942 | /* SQLite will fsync by default, but the new valid paths may not be fsync-ed. | |
943 | * So some may want to fsync them before registering the validity, at the | |
944 | * expense of some speed of the path registering operation. */ | |
945 | if (settings.syncBeforeRegistering) sync(); | |
946 | ||
517ce0c1 | 947 | return retrySQLite<void>([&]() { |
36457566 LC |
948 | SQLiteTxn txn(db); |
949 | PathSet paths; | |
950 | ||
951 | foreach (ValidPathInfos::const_iterator, i, infos) { | |
952 | assert(i->hash.type == htSHA256); | |
953 | if (isValidPath_(i->path)) | |
954 | updatePathInfo(*i); | |
955 | else | |
956 | addValidPath(*i, false); | |
957 | paths.insert(i->path); | |
958 | } | |
959 | ||
b1fd0ab7 ED |
960 | for (auto & i : infos) { |
961 | auto referrer = queryValidPathId(i.path); | |
962 | for (auto & j : i.references) | |
963 | addReference(referrer, queryValidPathId(j)); | |
36457566 LC |
964 | } |
965 | ||
966 | /* Check that the derivation outputs are correct. We can't do | |
967 | this in addValidPath() above, because the references might | |
968 | not be valid yet. */ | |
969 | foreach (ValidPathInfos::const_iterator, i, infos) | |
970 | if (isDerivation(i->path)) { | |
971 | // FIXME: inefficient; we already loaded the | |
972 | // derivation in addValidPath(). | |
15ddeff5 | 973 | Derivation drv = readDerivation(i->path); |
36457566 LC |
974 | checkDerivationOutputs(i->path, drv); |
975 | } | |
976 | ||
977 | /* Do a topological sort of the paths. This will throw an | |
978 | error if a cycle is detected and roll back the | |
979 | transaction. Cycles can only occur when a derivation | |
980 | has multiple outputs. */ | |
981 | topoSortPaths(*this, paths); | |
982 | ||
983 | txn.commit(); | |
517ce0c1 | 984 | }); |
36457566 LC |
985 | } |
986 | ||
987 | ||
988 | /* Invalidate a path. The caller is responsible for checking that | |
989 | there are no referrers. */ | |
990 | void LocalStore::invalidatePath(const Path & path) | |
991 | { | |
992 | debug(format("invalidating path `%1%'") % path); | |
993 | ||
994 | drvHashes.erase(path); | |
995 | ||
b1fd0ab7 | 996 | stmtInvalidatePath.use()(path).exec(); |
36457566 LC |
997 | |
998 | /* Note that the foreign key constraints on the Refs table take | |
999 | care of deleting the references entries for `path'. */ | |
1000 | } | |
1001 | ||
1002 | ||
1003 | Path LocalStore::addToStoreFromDump(const string & dump, const string & name, | |
1004 | bool recursive, HashType hashAlgo, bool repair) | |
1005 | { | |
1006 | Hash h = hashString(hashAlgo, dump); | |
1007 | ||
1008 | Path dstPath = makeFixedOutputPath(recursive, hashAlgo, h, name); | |
1009 | ||
1010 | addTempRoot(dstPath); | |
1011 | ||
1012 | if (repair || !isValidPath(dstPath)) { | |
1013 | ||
1014 | /* The first check above is an optimisation to prevent | |
1015 | unnecessary lock acquisition. */ | |
1016 | ||
1017 | PathLocks outputLock(singleton<PathSet, Path>(dstPath)); | |
1018 | ||
1019 | if (repair || !isValidPath(dstPath)) { | |
1020 | ||
1021 | if (pathExists(dstPath)) deletePath(dstPath); | |
1022 | ||
1023 | if (recursive) { | |
1024 | StringSource source(dump); | |
1025 | restorePath(dstPath, source); | |
1026 | } else | |
1027 | writeFile(dstPath, dump); | |
1028 | ||
1029 | canonicalisePathMetaData(dstPath, -1); | |
1030 | ||
1031 | /* Register the SHA-256 hash of the NAR serialisation of | |
1032 | the path in the database. We may just have computed it | |
1033 | above (if called with recursive == true and hashAlgo == | |
1034 | sha256); otherwise, compute it here. */ | |
1035 | HashResult hash; | |
1036 | if (recursive) { | |
1037 | hash.first = hashAlgo == htSHA256 ? h : hashString(htSHA256, dump); | |
1038 | hash.second = dump.size(); | |
1039 | } else | |
1040 | hash = hashPath(htSHA256, dstPath); | |
1041 | ||
1042 | optimisePath(dstPath); // FIXME: combine with hashPath() | |
1043 | ||
1044 | ValidPathInfo info; | |
1045 | info.path = dstPath; | |
1046 | info.hash = hash.first; | |
1047 | info.narSize = hash.second; | |
1048 | registerValidPath(info); | |
1049 | } | |
1050 | ||
1051 | outputLock.setDeletion(true); | |
1052 | } | |
1053 | ||
1054 | return dstPath; | |
1055 | } | |
1056 | ||
1057 | ||
54c260e6 | 1058 | Path LocalStore::addToStore(const string & name, const Path & _srcPath, |
36457566 LC |
1059 | bool recursive, HashType hashAlgo, PathFilter & filter, bool repair) |
1060 | { | |
1061 | Path srcPath(absPath(_srcPath)); | |
1062 | debug(format("adding `%1%' to the store") % srcPath); | |
1063 | ||
1064 | /* Read the whole path into memory. This is not a very scalable | |
1065 | method for very large paths, but `copyPath' is mainly used for | |
1066 | small files. */ | |
1067 | StringSink sink; | |
1068 | if (recursive) | |
1069 | dumpPath(srcPath, sink, filter); | |
1070 | else | |
1071 | sink.s = readFile(srcPath); | |
1072 | ||
54c260e6 | 1073 | return addToStoreFromDump(sink.s, name, recursive, hashAlgo, repair); |
36457566 LC |
1074 | } |
1075 | ||
1076 | ||
1077 | Path LocalStore::addTextToStore(const string & name, const string & s, | |
1078 | const PathSet & references, bool repair) | |
1079 | { | |
1080 | Path dstPath = computeStorePathForText(name, s, references); | |
1081 | ||
1082 | addTempRoot(dstPath); | |
1083 | ||
1084 | if (repair || !isValidPath(dstPath)) { | |
1085 | ||
1086 | PathLocks outputLock(singleton<PathSet, Path>(dstPath)); | |
1087 | ||
1088 | if (repair || !isValidPath(dstPath)) { | |
1089 | ||
1090 | if (pathExists(dstPath)) deletePath(dstPath); | |
1091 | ||
1092 | writeFile(dstPath, s); | |
1093 | ||
1094 | canonicalisePathMetaData(dstPath, -1); | |
1095 | ||
1096 | HashResult hash = hashPath(htSHA256, dstPath); | |
1097 | ||
1098 | optimisePath(dstPath); | |
1099 | ||
1100 | ValidPathInfo info; | |
1101 | info.path = dstPath; | |
1102 | info.hash = hash.first; | |
1103 | info.narSize = hash.second; | |
1104 | info.references = references; | |
1105 | registerValidPath(info); | |
1106 | } | |
1107 | ||
1108 | outputLock.setDeletion(true); | |
1109 | } | |
1110 | ||
1111 | return dstPath; | |
1112 | } | |
1113 | ||
1114 | ||
1115 | struct HashAndWriteSink : Sink | |
1116 | { | |
1117 | Sink & writeSink; | |
1118 | HashSink hashSink; | |
1119 | HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256) | |
1120 | { | |
1121 | } | |
1122 | virtual void operator () (const unsigned char * data, size_t len) | |
1123 | { | |
1124 | writeSink(data, len); | |
1125 | hashSink(data, len); | |
1126 | } | |
1127 | Hash currentHash() | |
1128 | { | |
1129 | return hashSink.currentHash().first; | |
1130 | } | |
1131 | }; | |
1132 | ||
1133 | ||
1134 | #define EXPORT_MAGIC 0x4558494e | |
1135 | ||
1136 | ||
1137 | static void checkSecrecy(const Path & path) | |
1138 | { | |
1139 | struct stat st; | |
1140 | if (stat(path.c_str(), &st)) | |
1141 | throw SysError(format("getting status of `%1%'") % path); | |
1142 | if ((st.st_mode & (S_IRWXG | S_IRWXO)) != 0) | |
1143 | throw Error(format("file `%1%' should be secret (inaccessible to everybody else)!") % path); | |
1144 | } | |
1145 | ||
1146 | ||
64cf660f LC |
1147 | /* Return the authentication agent, a "guix authenticate" process started |
1148 | lazily. */ | |
1149 | static std::shared_ptr<Agent> authenticationAgent() | |
0fe1fba4 | 1150 | { |
64cf660f LC |
1151 | static std::shared_ptr<Agent> agent; |
1152 | ||
1153 | if (!agent) { | |
1154 | Strings args = { "authenticate" }; | |
1155 | agent = std::make_shared<Agent>(settings.guixProgram, args); | |
1156 | } | |
1157 | ||
1158 | return agent; | |
1159 | } | |
1160 | ||
1161 | /* Read an integer and the byte that immediately follows it from FD. Return | |
1162 | the integer. */ | |
1163 | static int readInteger(int fd) | |
1164 | { | |
1165 | string str; | |
1166 | ||
1167 | while (1) { | |
1168 | char ch; | |
1169 | ssize_t rd = read(fd, &ch, 1); | |
1170 | if (rd == -1) { | |
1171 | if (errno != EINTR) | |
1172 | throw SysError("reading an integer"); | |
1173 | } else if (rd == 0) | |
1174 | throw EndOfFile("unexpected EOF reading an integer"); | |
1175 | else { | |
1176 | if (isdigit(ch)) { | |
1177 | str += ch; | |
1178 | } else { | |
1179 | break; | |
1180 | } | |
1181 | } | |
1182 | } | |
1183 | ||
1184 | return stoi(str); | |
1185 | } | |
1186 | ||
1187 | /* Read from FD a reply coming from 'guix authenticate'. The reply has the | |
1188 | form "CODE LEN:STR". CODE is an integer, where zero indicates success. | |
1189 | LEN specifies the length in bytes of the string that immediately | |
1190 | follows. */ | |
1191 | static std::string readAuthenticateReply(int fd) | |
1192 | { | |
1193 | int code = readInteger(fd); | |
1194 | int len = readInteger(fd); | |
1195 | ||
1196 | string str; | |
1197 | str.resize(len); | |
1198 | readFull(fd, (unsigned char *) &str[0], len); | |
1199 | ||
1200 | if (code == 0) | |
1201 | return str; | |
1202 | else | |
1203 | throw Error(str); | |
0fe1fba4 LC |
1204 | } |
1205 | ||
27cc51c2 LC |
1206 | /* Sign HASH with the key stored in file SECRETKEY. Return the signature as a |
1207 | string, or raise an exception upon error. */ | |
1208 | static std::string signHash(const string &secretKey, const Hash &hash) | |
1209 | { | |
64cf660f LC |
1210 | auto agent = authenticationAgent(); |
1211 | auto hexHash = printHash(hash); | |
1212 | ||
1213 | writeLine(agent->toAgent.writeSide, | |
1214 | (format("sign %1%:%2% %3%:%4%") | |
1215 | % secretKey.size() % secretKey | |
1216 | % hexHash.size() % hexHash).str()); | |
27cc51c2 | 1217 | |
64cf660f | 1218 | return readAuthenticateReply(agent->fromAgent.readSide); |
27cc51c2 LC |
1219 | } |
1220 | ||
1221 | /* Verify SIGNATURE and return the base16-encoded hash over which it was | |
1222 | computed. */ | |
1223 | static std::string verifySignature(const string &signature) | |
1224 | { | |
64cf660f | 1225 | auto agent = authenticationAgent(); |
27cc51c2 | 1226 | |
64cf660f LC |
1227 | writeLine(agent->toAgent.writeSide, |
1228 | (format("verify %1%:%2%") | |
1229 | % signature.size() % signature).str()); | |
27cc51c2 | 1230 | |
64cf660f | 1231 | return readAuthenticateReply(agent->fromAgent.readSide); |
27cc51c2 LC |
1232 | } |
1233 | ||
36457566 LC |
1234 | void LocalStore::exportPath(const Path & path, bool sign, |
1235 | Sink & sink) | |
1236 | { | |
1237 | assertStorePath(path); | |
1238 | ||
2bb04905 LC |
1239 | printMsg(lvlInfo, format("exporting path `%1%'") % path); |
1240 | ||
36457566 LC |
1241 | if (!isValidPath(path)) |
1242 | throw Error(format("path `%1%' is not valid") % path); | |
1243 | ||
1244 | HashAndWriteSink hashAndWriteSink(sink); | |
1245 | ||
1246 | dumpPath(path, hashAndWriteSink); | |
1247 | ||
1248 | /* Refuse to export paths that have changed. This prevents | |
1249 | filesystem corruption from spreading to other machines. | |
1250 | Don't complain if the stored hash is zero (unknown). */ | |
1251 | Hash hash = hashAndWriteSink.currentHash(); | |
1252 | Hash storedHash = queryPathHash(path); | |
1253 | if (hash != storedHash && storedHash != Hash(storedHash.type)) | |
1254 | throw Error(format("hash of path `%1%' has changed from `%2%' to `%3%'!") % path | |
1255 | % printHash(storedHash) % printHash(hash)); | |
1256 | ||
1257 | writeInt(EXPORT_MAGIC, hashAndWriteSink); | |
1258 | ||
1259 | writeString(path, hashAndWriteSink); | |
1260 | ||
1261 | PathSet references; | |
1262 | queryReferences(path, references); | |
1263 | writeStrings(references, hashAndWriteSink); | |
1264 | ||
1265 | Path deriver = queryDeriver(path); | |
1266 | writeString(deriver, hashAndWriteSink); | |
1267 | ||
1268 | if (sign) { | |
1269 | Hash hash = hashAndWriteSink.currentHash(); | |
1270 | ||
1271 | writeInt(1, hashAndWriteSink); | |
1272 | ||
36457566 LC |
1273 | Path secretKey = settings.nixConfDir + "/signing-key.sec"; |
1274 | checkSecrecy(secretKey); | |
1275 | ||
27cc51c2 | 1276 | string signature = signHash(secretKey, hash); |
36457566 LC |
1277 | |
1278 | writeString(signature, hashAndWriteSink); | |
1279 | ||
1280 | } else | |
1281 | writeInt(0, hashAndWriteSink); | |
1282 | } | |
1283 | ||
1284 | ||
1285 | struct HashAndReadSource : Source | |
1286 | { | |
1287 | Source & readSource; | |
1288 | HashSink hashSink; | |
1289 | bool hashing; | |
1290 | HashAndReadSource(Source & readSource) : readSource(readSource), hashSink(htSHA256) | |
1291 | { | |
1292 | hashing = true; | |
1293 | } | |
1294 | size_t read(unsigned char * data, size_t len) | |
1295 | { | |
1296 | size_t n = readSource.read(data, len); | |
1297 | if (hashing) hashSink(data, n); | |
1298 | return n; | |
1299 | } | |
1300 | }; | |
1301 | ||
1302 | ||
1303 | /* Create a temporary directory in the store that won't be | |
1304 | garbage-collected. */ | |
1305 | Path LocalStore::createTempDirInStore() | |
1306 | { | |
1307 | Path tmpDir; | |
1308 | do { | |
1309 | /* There is a slight possibility that `tmpDir' gets deleted by | |
1310 | the GC between createTempDir() and addTempRoot(), so repeat | |
1311 | until `tmpDir' exists. */ | |
1312 | tmpDir = createTempDir(settings.nixStore); | |
1313 | addTempRoot(tmpDir); | |
1314 | } while (!pathExists(tmpDir)); | |
1315 | return tmpDir; | |
1316 | } | |
1317 | ||
1318 | ||
1319 | Path LocalStore::importPath(bool requireSignature, Source & source) | |
1320 | { | |
1321 | HashAndReadSource hashAndReadSource(source); | |
1322 | ||
1323 | /* We don't yet know what store path this archive contains (the | |
1324 | store path follows the archive data proper), and besides, we | |
1325 | don't know yet whether the signature is valid. */ | |
1326 | Path tmpDir = createTempDirInStore(); | |
1327 | AutoDelete delTmp(tmpDir); | |
1328 | Path unpacked = tmpDir + "/unpacked"; | |
1329 | ||
1330 | restorePath(unpacked, hashAndReadSource); | |
1331 | ||
1332 | unsigned int magic = readInt(hashAndReadSource); | |
1333 | if (magic != EXPORT_MAGIC) | |
8327e733 | 1334 | throw Error("normalized archive cannot be imported; wrong format"); |
36457566 LC |
1335 | |
1336 | Path dstPath = readStorePath(hashAndReadSource); | |
1337 | ||
36457566 LC |
1338 | PathSet references = readStorePaths<PathSet>(hashAndReadSource); |
1339 | ||
1340 | Path deriver = readString(hashAndReadSource); | |
1341 | if (deriver != "") assertStorePath(deriver); | |
1342 | ||
1343 | Hash hash = hashAndReadSource.hashSink.finish().first; | |
1344 | hashAndReadSource.hashing = false; | |
1345 | ||
1346 | bool haveSignature = readInt(hashAndReadSource) == 1; | |
1347 | ||
1348 | if (requireSignature && !haveSignature) | |
1349 | throw Error(format("imported archive of `%1%' lacks a signature") % dstPath); | |
1350 | ||
1351 | if (haveSignature) { | |
1352 | string signature = readString(hashAndReadSource); | |
1353 | ||
1354 | if (requireSignature) { | |
27cc51c2 | 1355 | string hash2 = verifySignature(signature); |
36457566 LC |
1356 | |
1357 | /* Note: runProgram() throws an exception if the signature | |
1358 | is invalid. */ | |
1359 | ||
1360 | if (printHash(hash) != hash2) | |
1361 | throw Error( | |
1362 | "signed hash doesn't match actual contents of imported " | |
1363 | "archive; archive could be corrupt, or someone is trying " | |
1364 | "to import a Trojan horse"); | |
1365 | } | |
1366 | } | |
1367 | ||
1368 | /* Do the actual import. */ | |
1369 | ||
1370 | /* !!! way too much code duplication with addTextToStore() etc. */ | |
1371 | addTempRoot(dstPath); | |
1372 | ||
1373 | if (!isValidPath(dstPath)) { | |
1374 | ||
1375 | PathLocks outputLock; | |
1376 | ||
1377 | /* Lock the output path. But don't lock if we're being called | |
1378 | from a build hook (whose parent process already acquired a | |
1379 | lock on this path). */ | |
1380 | Strings locksHeld = tokenizeString<Strings>(getEnv("NIX_HELD_LOCKS")); | |
1381 | if (find(locksHeld.begin(), locksHeld.end(), dstPath) == locksHeld.end()) | |
1382 | outputLock.lockPaths(singleton<PathSet, Path>(dstPath)); | |
1383 | ||
1384 | if (!isValidPath(dstPath)) { | |
1385 | ||
1386 | if (pathExists(dstPath)) deletePath(dstPath); | |
1387 | ||
1388 | if (rename(unpacked.c_str(), dstPath.c_str()) == -1) | |
1389 | throw SysError(format("cannot move `%1%' to `%2%'") | |
1390 | % unpacked % dstPath); | |
1391 | ||
1392 | canonicalisePathMetaData(dstPath, -1); | |
1393 | ||
1394 | /* !!! if we were clever, we could prevent the hashPath() | |
1395 | here. */ | |
1396 | HashResult hash = hashPath(htSHA256, dstPath); | |
1397 | ||
1398 | optimisePath(dstPath); // FIXME: combine with hashPath() | |
1399 | ||
1400 | ValidPathInfo info; | |
1401 | info.path = dstPath; | |
1402 | info.hash = hash.first; | |
1403 | info.narSize = hash.second; | |
1404 | info.references = references; | |
1405 | info.deriver = deriver != "" && isValidPath(deriver) ? deriver : ""; | |
1406 | registerValidPath(info); | |
1407 | } | |
1408 | ||
1409 | outputLock.setDeletion(true); | |
1410 | } | |
1411 | ||
1412 | return dstPath; | |
1413 | } | |
1414 | ||
1415 | ||
1416 | Paths LocalStore::importPaths(bool requireSignature, Source & source) | |
1417 | { | |
1418 | Paths res; | |
1419 | while (true) { | |
1420 | unsigned long long n = readLongLong(source); | |
1421 | if (n == 0) break; | |
1422 | if (n != 1) throw Error("input doesn't look like something created by `nix-store --export'"); | |
1423 | res.push_back(importPath(requireSignature, source)); | |
1424 | } | |
1425 | return res; | |
1426 | } | |
1427 | ||
1428 | ||
1429 | void LocalStore::invalidatePathChecked(const Path & path) | |
1430 | { | |
1431 | assertStorePath(path); | |
1432 | ||
517ce0c1 | 1433 | retrySQLite<void>([&]() { |
36457566 LC |
1434 | SQLiteTxn txn(db); |
1435 | ||
1436 | if (isValidPath_(path)) { | |
1437 | PathSet referrers; queryReferrers_(path, referrers); | |
1438 | referrers.erase(path); /* ignore self-references */ | |
1439 | if (!referrers.empty()) | |
1440 | throw PathInUse(format("cannot delete path `%1%' because it is in use by %2%") | |
1441 | % path % showPaths(referrers)); | |
1442 | invalidatePath(path); | |
1443 | } | |
1444 | ||
1445 | txn.commit(); | |
517ce0c1 | 1446 | }); |
36457566 LC |
1447 | } |
1448 | ||
1449 | ||
1450 | bool LocalStore::verifyStore(bool checkContents, bool repair) | |
1451 | { | |
8327e733 | 1452 | printMsg(lvlError, format("reading the store...")); |
36457566 LC |
1453 | |
1454 | bool errors = false; | |
1455 | ||
1456 | /* Acquire the global GC lock to prevent a garbage collection. */ | |
1457 | AutoCloseFD fdGCLock = openGCLock(ltWrite); | |
1458 | ||
2bb04905 LC |
1459 | PathSet store; |
1460 | for (auto & i : readDirectory(settings.nixStore)) store.insert(i.name); | |
36457566 LC |
1461 | |
1462 | /* Check whether all valid paths actually exist. */ | |
1463 | printMsg(lvlInfo, "checking path existence..."); | |
1464 | ||
1465 | PathSet validPaths2 = queryAllValidPaths(), validPaths, done; | |
1466 | ||
1467 | foreach (PathSet::iterator, i, validPaths2) | |
1468 | verifyPath(*i, store, done, validPaths, repair, errors); | |
1469 | ||
1470 | /* Release the GC lock so that checking content hashes (which can | |
1471 | take ages) doesn't block the GC or builds. */ | |
1472 | fdGCLock.close(); | |
1473 | ||
1474 | /* Optionally, check the content hashes (slow). */ | |
1475 | if (checkContents) { | |
1476 | printMsg(lvlInfo, "checking hashes..."); | |
1477 | ||
1478 | Hash nullHash(htSHA256); | |
1479 | ||
1480 | foreach (PathSet::iterator, i, validPaths) { | |
1481 | try { | |
1482 | ValidPathInfo info = queryPathInfo(*i); | |
1483 | ||
1484 | /* Check the content hash (optionally - slow). */ | |
1485 | printMsg(lvlTalkative, format("checking contents of `%1%'") % *i); | |
1486 | HashResult current = hashPath(info.hash.type, *i); | |
1487 | ||
1488 | if (info.hash != nullHash && info.hash != current.first) { | |
1489 | printMsg(lvlError, format("path `%1%' was modified! " | |
1490 | "expected hash `%2%', got `%3%'") | |
1491 | % *i % printHash(info.hash) % printHash(current.first)); | |
1492 | if (repair) repairPath(*i); else errors = true; | |
1493 | } else { | |
1494 | ||
1495 | bool update = false; | |
1496 | ||
1497 | /* Fill in missing hashes. */ | |
1498 | if (info.hash == nullHash) { | |
1499 | printMsg(lvlError, format("fixing missing hash on `%1%'") % *i); | |
1500 | info.hash = current.first; | |
1501 | update = true; | |
1502 | } | |
1503 | ||
1504 | /* Fill in missing narSize fields (from old stores). */ | |
1505 | if (info.narSize == 0) { | |
1506 | printMsg(lvlError, format("updating size field on `%1%' to %2%") % *i % current.second); | |
1507 | info.narSize = current.second; | |
1508 | update = true; | |
1509 | } | |
1510 | ||
1511 | if (update) updatePathInfo(info); | |
1512 | ||
1513 | } | |
1514 | ||
1515 | } catch (Error & e) { | |
1516 | /* It's possible that the path got GC'ed, so ignore | |
1517 | errors on invalid paths. */ | |
1518 | if (isValidPath(*i)) | |
1519 | printMsg(lvlError, format("error: %1%") % e.msg()); | |
1520 | else | |
1521 | printMsg(lvlError, format("warning: %1%") % e.msg()); | |
1522 | errors = true; | |
1523 | } | |
1524 | } | |
1525 | } | |
1526 | ||
1527 | return errors; | |
1528 | } | |
1529 | ||
1530 | ||
1531 | void LocalStore::verifyPath(const Path & path, const PathSet & store, | |
1532 | PathSet & done, PathSet & validPaths, bool repair, bool & errors) | |
1533 | { | |
1534 | checkInterrupt(); | |
1535 | ||
1536 | if (done.find(path) != done.end()) return; | |
1537 | done.insert(path); | |
1538 | ||
1539 | if (!isStorePath(path)) { | |
8327e733 | 1540 | printMsg(lvlError, format("path `%1%' is not in the store") % path); |
36457566 LC |
1541 | invalidatePath(path); |
1542 | return; | |
1543 | } | |
1544 | ||
1545 | if (store.find(baseNameOf(path)) == store.end()) { | |
1546 | /* Check any referrers first. If we can invalidate them | |
1547 | first, then we can invalidate this path as well. */ | |
1548 | bool canInvalidate = true; | |
1549 | PathSet referrers; queryReferrers(path, referrers); | |
1550 | foreach (PathSet::iterator, i, referrers) | |
1551 | if (*i != path) { | |
1552 | verifyPath(*i, store, done, validPaths, repair, errors); | |
1553 | if (validPaths.find(*i) != validPaths.end()) | |
1554 | canInvalidate = false; | |
1555 | } | |
1556 | ||
1557 | if (canInvalidate) { | |
1558 | printMsg(lvlError, format("path `%1%' disappeared, removing from database...") % path); | |
1559 | invalidatePath(path); | |
1560 | } else { | |
1561 | printMsg(lvlError, format("path `%1%' disappeared, but it still has valid referrers!") % path); | |
1562 | if (repair) | |
1563 | try { | |
1564 | repairPath(path); | |
1565 | } catch (Error & e) { | |
1566 | printMsg(lvlError, format("warning: %1%") % e.msg()); | |
1567 | errors = true; | |
1568 | } | |
1569 | else errors = true; | |
1570 | } | |
1571 | ||
1572 | return; | |
1573 | } | |
1574 | ||
1575 | validPaths.insert(path); | |
1576 | } | |
1577 | ||
1578 | ||
1579 | bool LocalStore::pathContentsGood(const Path & path) | |
1580 | { | |
1581 | std::map<Path, bool>::iterator i = pathContentsGoodCache.find(path); | |
1582 | if (i != pathContentsGoodCache.end()) return i->second; | |
1583 | printMsg(lvlInfo, format("checking path `%1%'...") % path); | |
1584 | ValidPathInfo info = queryPathInfo(path); | |
1585 | bool res; | |
1586 | if (!pathExists(path)) | |
1587 | res = false; | |
1588 | else { | |
1589 | HashResult current = hashPath(info.hash.type, path); | |
1590 | Hash nullHash(htSHA256); | |
1591 | res = info.hash == nullHash || info.hash == current.first; | |
1592 | } | |
1593 | pathContentsGoodCache[path] = res; | |
1594 | if (!res) printMsg(lvlError, format("path `%1%' is corrupted or missing!") % path); | |
1595 | return res; | |
1596 | } | |
1597 | ||
1598 | ||
1599 | void LocalStore::markContentsGood(const Path & path) | |
1600 | { | |
1601 | pathContentsGoodCache[path] = true; | |
1602 | } | |
1603 | ||
1604 | ||
36457566 LC |
1605 | void LocalStore::vacuumDB() |
1606 | { | |
1607 | if (sqlite3_exec(db, "vacuum;", 0, 0, 0) != SQLITE_OK) | |
1608 | throwSQLiteError(db, "vacuuming SQLite database"); | |
1609 | } | |
1610 | ||
1611 | ||
81c580c8 LC |
1612 | void LocalStore::createUser(const std::string & userName, uid_t userId) |
1613 | { | |
1614 | auto dir = settings.nixStateDir + "/profiles/per-user/" + userName; | |
1615 | ||
1616 | createDirs(dir); | |
1617 | if (chmod(dir.c_str(), 0755) == -1) | |
1618 | throw SysError(format("changing permissions of directory '%s'") % dir); | |
1619 | if (chown(dir.c_str(), userId, -1) == -1) | |
1620 | throw SysError(format("changing owner of directory '%s'") % dir); | |
1621 | } | |
1622 | ||
1623 | ||
36457566 | 1624 | } |