merged from debian-apt
[ntk/apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25 #include <apt-pkg/macros.h>
26
27 #include <apt-pkg/tagfile.h>
28
29 #include <apti18n.h>
30
31 #include <vector>
32
33 #include <sys/stat.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <stdio.h>
37 /*}}}*/
38 typedef vector<pkgIndexFile *>::iterator FileIterator;
39 template <typename Iter> std::vector<Iter*> pkgCacheGenerator::Dynamic<Iter>::toReMap;
40
41 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
42 // ---------------------------------------------------------------------
43 /* We set the dirty flag and make sure that is written to the disk */
44 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
45 Map(*pMap), Cache(pMap,false), Progress(Prog),
46 FoundFileDeps(0)
47 {
48 CurrentFile = 0;
49 memset(UniqHash,0,sizeof(UniqHash));
50
51 if (_error->PendingError() == true)
52 return;
53
54 if (Map.Size() == 0)
55 {
56 // Setup the map interface..
57 Cache.HeaderP = (pkgCache::Header *)Map.Data();
58 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
59 return;
60
61 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
62
63 // Starting header
64 *Cache.HeaderP = pkgCache::Header();
65 map_ptrloc const idxVerSysName = WriteStringInMap(_system->VS->Label);
66 Cache.HeaderP->VerSysName = idxVerSysName;
67 map_ptrloc const idxArchitecture = WriteStringInMap(_config->Find("APT::Architecture"));
68 Cache.HeaderP->Architecture = idxArchitecture;
69 if (unlikely(idxVerSysName == 0 || idxArchitecture == 0))
70 return;
71 Cache.ReMap();
72 }
73 else
74 {
75 // Map directly from the existing file
76 Cache.ReMap();
77 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
78 if (Cache.VS != _system->VS)
79 {
80 _error->Error(_("Cache has an incompatible versioning system"));
81 return;
82 }
83 }
84
85 Cache.HeaderP->Dirty = true;
86 Map.Sync(0,sizeof(pkgCache::Header));
87 }
88 /*}}}*/
89 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
90 // ---------------------------------------------------------------------
91 /* We sync the data then unset the dirty flag in two steps so as to
92 advoid a problem during a crash */
93 pkgCacheGenerator::~pkgCacheGenerator()
94 {
95 if (_error->PendingError() == true)
96 return;
97 if (Map.Sync() == false)
98 return;
99
100 Cache.HeaderP->Dirty = false;
101 Cache.HeaderP->CacheFileSize = Map.Size();
102 Map.Sync(0,sizeof(pkgCache::Header));
103 }
104 /*}}}*/
105 void pkgCacheGenerator::ReMap(void const * const oldMap, void const * const newMap) {/*{{{*/
106 if (oldMap == newMap)
107 return;
108
109 Cache.ReMap(false);
110
111 CurrentFile += (pkgCache::PackageFile*) newMap - (pkgCache::PackageFile*) oldMap;
112
113 for (size_t i = 0; i < _count(UniqHash); ++i)
114 if (UniqHash[i] != 0)
115 UniqHash[i] += (pkgCache::StringItem*) newMap - (pkgCache::StringItem*) oldMap;
116
117 for (std::vector<pkgCache::GrpIterator*>::const_iterator i = Dynamic<pkgCache::GrpIterator>::toReMap.begin();
118 i != Dynamic<pkgCache::GrpIterator>::toReMap.end(); ++i)
119 (*i)->ReMap(oldMap, newMap);
120 for (std::vector<pkgCache::PkgIterator*>::const_iterator i = Dynamic<pkgCache::PkgIterator>::toReMap.begin();
121 i != Dynamic<pkgCache::PkgIterator>::toReMap.end(); ++i)
122 (*i)->ReMap(oldMap, newMap);
123 for (std::vector<pkgCache::VerIterator*>::const_iterator i = Dynamic<pkgCache::VerIterator>::toReMap.begin();
124 i != Dynamic<pkgCache::VerIterator>::toReMap.end(); ++i)
125 (*i)->ReMap(oldMap, newMap);
126 for (std::vector<pkgCache::DepIterator*>::const_iterator i = Dynamic<pkgCache::DepIterator>::toReMap.begin();
127 i != Dynamic<pkgCache::DepIterator>::toReMap.end(); ++i)
128 (*i)->ReMap(oldMap, newMap);
129 for (std::vector<pkgCache::DescIterator*>::const_iterator i = Dynamic<pkgCache::DescIterator>::toReMap.begin();
130 i != Dynamic<pkgCache::DescIterator>::toReMap.end(); ++i)
131 (*i)->ReMap(oldMap, newMap);
132 for (std::vector<pkgCache::PrvIterator*>::const_iterator i = Dynamic<pkgCache::PrvIterator>::toReMap.begin();
133 i != Dynamic<pkgCache::PrvIterator>::toReMap.end(); ++i)
134 (*i)->ReMap(oldMap, newMap);
135 for (std::vector<pkgCache::PkgFileIterator*>::const_iterator i = Dynamic<pkgCache::PkgFileIterator>::toReMap.begin();
136 i != Dynamic<pkgCache::PkgFileIterator>::toReMap.end(); ++i)
137 (*i)->ReMap(oldMap, newMap);
138 } /*}}}*/
139 // CacheGenerator::WriteStringInMap /*{{{*/
140 map_ptrloc pkgCacheGenerator::WriteStringInMap(const char *String,
141 const unsigned long &Len) {
142 void const * const oldMap = Map.Data();
143 map_ptrloc const index = Map.WriteString(String, Len);
144 if (index != 0)
145 ReMap(oldMap, Map.Data());
146 return index;
147 }
148 /*}}}*/
149 // CacheGenerator::WriteStringInMap /*{{{*/
150 map_ptrloc pkgCacheGenerator::WriteStringInMap(const char *String) {
151 void const * const oldMap = Map.Data();
152 map_ptrloc const index = Map.WriteString(String);
153 if (index != 0)
154 ReMap(oldMap, Map.Data());
155 return index;
156 }
157 /*}}}*/
158 map_ptrloc pkgCacheGenerator::AllocateInMap(const unsigned long &size) {/*{{{*/
159 void const * const oldMap = Map.Data();
160 map_ptrloc const index = Map.Allocate(size);
161 if (index != 0)
162 ReMap(oldMap, Map.Data());
163 return index;
164 }
165 /*}}}*/
166 // CacheGenerator::MergeList - Merge the package list /*{{{*/
167 // ---------------------------------------------------------------------
168 /* This provides the generation of the entries in the cache. Each loop
169 goes through a single package record from the underlying parse engine. */
170 bool pkgCacheGenerator::MergeList(ListParser &List,
171 pkgCache::VerIterator *OutVer)
172 {
173 List.Owner = this;
174
175 unsigned int Counter = 0;
176 while (List.Step() == true)
177 {
178 string const PackageName = List.Package();
179 if (PackageName.empty() == true)
180 return false;
181
182 string const Arch = List.Architecture();
183
184 // Get a pointer to the package structure
185 pkgCache::PkgIterator Pkg;
186 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
187 if (NewPackage(Pkg, PackageName, Arch) == false)
188 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
189 Counter++;
190 if (Counter % 100 == 0 && Progress != 0)
191 Progress->Progress(List.Offset());
192
193 /* Get a pointer to the version structure. We know the list is sorted
194 so we use that fact in the search. Insertion of new versions is
195 done with correct sorting */
196 string Version = List.Version();
197 if (Version.empty() == true)
198 {
199 // we first process the package, then the descriptions
200 // (this has the bonus that we get MMap error when we run out
201 // of MMap space)
202 pkgCache::VerIterator Ver(Cache);
203 Dynamic<pkgCache::VerIterator> DynVer(Ver);
204 if (List.UsePackage(Pkg, Ver) == false)
205 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
206 PackageName.c_str());
207
208 // Find the right version to write the description
209 MD5SumValue CurMd5 = List.Description_md5();
210 Ver = Pkg.VersionList();
211
212 for (; Ver.end() == false; ++Ver)
213 {
214 pkgCache::DescIterator Desc = Ver.DescriptionList();
215 Dynamic<pkgCache::DescIterator> DynDesc(Desc);
216 map_ptrloc *LastDesc = &Ver->DescriptionList;
217 bool duplicate=false;
218
219 // don't add a new description if we have one for the given
220 // md5 && language
221 for ( ; Desc.end() == false; Desc++)
222 if (MD5SumValue(Desc.md5()) == CurMd5 &&
223 Desc.LanguageCode() == List.DescriptionLanguage())
224 duplicate=true;
225 if(duplicate)
226 continue;
227
228 for (Desc = Ver.DescriptionList();
229 Desc.end() == false;
230 LastDesc = &Desc->NextDesc, Desc++)
231 {
232 if (MD5SumValue(Desc.md5()) == CurMd5)
233 {
234 // Add new description
235 void const * const oldMap = Map.Data();
236 map_ptrloc const descindex = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
237 if (oldMap != Map.Data())
238 LastDesc += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
239 *LastDesc = descindex;
240 Desc->ParentPkg = Pkg.Index();
241
242 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
243 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
244 break;
245 }
246 }
247 }
248
249 continue;
250 }
251
252 pkgCache::VerIterator Ver = Pkg.VersionList();
253 Dynamic<pkgCache::VerIterator> DynVer(Ver);
254 map_ptrloc *LastVer = &Pkg->VersionList;
255 void const * oldMap = Map.Data();
256 int Res = 1;
257 unsigned long const Hash = List.VersionHash();
258 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
259 {
260 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
261 // Version is higher as current version - insert here
262 if (Res > 0)
263 break;
264 // Versionstrings are equal - is hash also equal?
265 if (Res == 0 && Ver->Hash == Hash)
266 break;
267 // proceed with the next till we have either the right
268 // or we found another version (which will be lower)
269 }
270
271 /* We already have a version for this item, record that we saw it */
272 if (Res == 0 && Ver.end() == false && Ver->Hash == Hash)
273 {
274 if (List.UsePackage(Pkg,Ver) == false)
275 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
276 PackageName.c_str());
277
278 if (NewFileVer(Ver,List) == false)
279 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
280 PackageName.c_str());
281
282 // Read only a single record and return
283 if (OutVer != 0)
284 {
285 *OutVer = Ver;
286 FoundFileDeps |= List.HasFileDeps();
287 return true;
288 }
289
290 continue;
291 }
292
293 // Add a new version
294 map_ptrloc const verindex = NewVersion(Ver,Version,*LastVer);
295 if (verindex == 0 && _error->PendingError())
296 return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
297 PackageName.c_str(), 1);
298
299 if (oldMap != Map.Data())
300 LastVer += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
301 *LastVer = verindex;
302 Ver->ParentPkg = Pkg.Index();
303 Ver->Hash = Hash;
304
305 if (List.NewVersion(Ver) == false)
306 return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
307 PackageName.c_str(), 2);
308
309 if (List.UsePackage(Pkg,Ver) == false)
310 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
311 PackageName.c_str());
312
313 if (NewFileVer(Ver,List) == false)
314 return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
315 PackageName.c_str(), 3);
316
317 // Read only a single record and return
318 if (OutVer != 0)
319 {
320 *OutVer = Ver;
321 FoundFileDeps |= List.HasFileDeps();
322 return true;
323 }
324
325 /* Record the Description data. Description data always exist in
326 Packages and Translation-* files. */
327 pkgCache::DescIterator Desc = Ver.DescriptionList();
328 Dynamic<pkgCache::DescIterator> DynDesc(Desc);
329 map_ptrloc *LastDesc = &Ver->DescriptionList;
330
331 // Skip to the end of description set
332 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
333
334 // Add new description
335 oldMap = Map.Data();
336 map_ptrloc const descindex = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
337 if (oldMap != Map.Data())
338 LastDesc += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
339 *LastDesc = descindex;
340 Desc->ParentPkg = Pkg.Index();
341
342 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
343 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
344 }
345
346 FoundFileDeps |= List.HasFileDeps();
347
348 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
349 return _error->Error(_("Wow, you exceeded the number of package "
350 "names this APT is capable of."));
351 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
352 return _error->Error(_("Wow, you exceeded the number of versions "
353 "this APT is capable of."));
354 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
355 return _error->Error(_("Wow, you exceeded the number of descriptions "
356 "this APT is capable of."));
357 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
358 return _error->Error(_("Wow, you exceeded the number of dependencies "
359 "this APT is capable of."));
360 return true;
361 }
362 /*}}}*/
363 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
364 // ---------------------------------------------------------------------
365 /* If we found any file depends while parsing the main list we need to
366 resolve them. Since it is undesired to load the entire list of files
367 into the cache as virtual packages we do a two stage effort. MergeList
368 identifies the file depends and this creates Provdies for them by
369 re-parsing all the indexs. */
370 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
371 {
372 List.Owner = this;
373
374 unsigned int Counter = 0;
375 while (List.Step() == true)
376 {
377 string PackageName = List.Package();
378 if (PackageName.empty() == true)
379 return false;
380 string Version = List.Version();
381 if (Version.empty() == true)
382 continue;
383
384 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
385 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
386 if (Pkg.end() == true)
387 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
388 PackageName.c_str());
389 Counter++;
390 if (Counter % 100 == 0 && Progress != 0)
391 Progress->Progress(List.Offset());
392
393 unsigned long Hash = List.VersionHash();
394 pkgCache::VerIterator Ver = Pkg.VersionList();
395 Dynamic<pkgCache::VerIterator> DynVer(Ver);
396 for (; Ver.end() == false; Ver++)
397 {
398 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
399 {
400 if (List.CollectFileProvides(Cache,Ver) == false)
401 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
402 break;
403 }
404 }
405
406 if (Ver.end() == true)
407 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
408 }
409
410 return true;
411 }
412 /*}}}*/
413 // CacheGenerator::NewGroup - Add a new group /*{{{*/
414 // ---------------------------------------------------------------------
415 /* This creates a new group structure and adds it to the hash table */
416 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name)
417 {
418 Grp = Cache.FindGrp(Name);
419 if (Grp.end() == false)
420 return true;
421
422 // Get a structure
423 map_ptrloc const Group = AllocateInMap(sizeof(pkgCache::Group));
424 if (unlikely(Group == 0))
425 return false;
426
427 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
428 map_ptrloc const idxName = WriteStringInMap(Name);
429 if (unlikely(idxName == 0))
430 return false;
431 Grp->Name = idxName;
432
433 // Insert it into the hash table
434 unsigned long const Hash = Cache.Hash(Name);
435 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
436 Cache.HeaderP->GrpHashTable[Hash] = Group;
437
438 Grp->ID = Cache.HeaderP->GroupCount++;
439 return true;
440 }
441 /*}}}*/
442 // CacheGenerator::NewPackage - Add a new package /*{{{*/
443 // ---------------------------------------------------------------------
444 /* This creates a new package structure and adds it to the hash table */
445 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
446 const string &Arch) {
447 pkgCache::GrpIterator Grp;
448 Dynamic<pkgCache::GrpIterator> DynGrp(Grp);
449 if (unlikely(NewGroup(Grp, Name) == false))
450 return false;
451
452 Pkg = Grp.FindPkg(Arch);
453 if (Pkg.end() == false)
454 return true;
455
456 // Get a structure
457 map_ptrloc const Package = AllocateInMap(sizeof(pkgCache::Package));
458 if (unlikely(Package == 0))
459 return false;
460 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
461
462 // Insert the package into our package list
463 if (Grp->FirstPackage == 0) // the group is new
464 {
465 // Insert it into the hash table
466 unsigned long const Hash = Cache.Hash(Name);
467 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
468 Cache.HeaderP->PkgHashTable[Hash] = Package;
469 Grp->FirstPackage = Package;
470 }
471 else // Group the Packages together
472 {
473 // this package is the new last package
474 pkgCache::PkgIterator LastPkg(Cache, Cache.PkgP + Grp->LastPackage);
475 Pkg->NextPackage = LastPkg->NextPackage;
476 LastPkg->NextPackage = Package;
477 }
478 Grp->LastPackage = Package;
479
480 // Set the name, arch and the ID
481 Pkg->Name = Grp->Name;
482 Pkg->Group = Grp.Index();
483 // all is mapped to the native architecture
484 map_ptrloc const idxArch = (Arch == "all") ? Cache.HeaderP->Architecture : WriteUniqString(Arch.c_str());
485 if (unlikely(idxArch == 0))
486 return false;
487 Pkg->Arch = idxArch;
488 Pkg->ID = Cache.HeaderP->PackageCount++;
489
490 return true;
491 }
492 /*}}}*/
493 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
494 // ---------------------------------------------------------------------
495 /* */
496 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
497 ListParser &List)
498 {
499 if (CurrentFile == 0)
500 return true;
501
502 // Get a structure
503 map_ptrloc const VerFile = AllocateInMap(sizeof(pkgCache::VerFile));
504 if (VerFile == 0)
505 return 0;
506
507 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
508 VF->File = CurrentFile - Cache.PkgFileP;
509
510 // Link it to the end of the list
511 map_ptrloc *Last = &Ver->FileList;
512 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
513 Last = &V->NextFile;
514 VF->NextFile = *Last;
515 *Last = VF.Index();
516
517 VF->Offset = List.Offset();
518 VF->Size = List.Size();
519 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
520 Cache.HeaderP->MaxVerFileSize = VF->Size;
521 Cache.HeaderP->VerFileCount++;
522
523 return true;
524 }
525 /*}}}*/
526 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
527 // ---------------------------------------------------------------------
528 /* This puts a version structure in the linked list */
529 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
530 const string &VerStr,
531 unsigned long Next)
532 {
533 // Get a structure
534 map_ptrloc const Version = AllocateInMap(sizeof(pkgCache::Version));
535 if (Version == 0)
536 return 0;
537
538 // Fill it in
539 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
540 Ver->NextVer = Next;
541 Ver->ID = Cache.HeaderP->VersionCount++;
542 map_ptrloc const idxVerStr = WriteStringInMap(VerStr);
543 if (unlikely(idxVerStr == 0))
544 return 0;
545 Ver->VerStr = idxVerStr;
546
547 return Version;
548 }
549 /*}}}*/
550 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
551 // ---------------------------------------------------------------------
552 /* */
553 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
554 ListParser &List)
555 {
556 if (CurrentFile == 0)
557 return true;
558
559 // Get a structure
560 map_ptrloc const DescFile = AllocateInMap(sizeof(pkgCache::DescFile));
561 if (DescFile == 0)
562 return false;
563
564 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
565 DF->File = CurrentFile - Cache.PkgFileP;
566
567 // Link it to the end of the list
568 map_ptrloc *Last = &Desc->FileList;
569 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
570 Last = &D->NextFile;
571
572 DF->NextFile = *Last;
573 *Last = DF.Index();
574
575 DF->Offset = List.Offset();
576 DF->Size = List.Size();
577 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
578 Cache.HeaderP->MaxDescFileSize = DF->Size;
579 Cache.HeaderP->DescFileCount++;
580
581 return true;
582 }
583 /*}}}*/
584 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
585 // ---------------------------------------------------------------------
586 /* This puts a description structure in the linked list */
587 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
588 const string &Lang,
589 const MD5SumValue &md5sum,
590 map_ptrloc Next)
591 {
592 // Get a structure
593 map_ptrloc const Description = AllocateInMap(sizeof(pkgCache::Description));
594 if (Description == 0)
595 return 0;
596
597 // Fill it in
598 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
599 Desc->NextDesc = Next;
600 Desc->ID = Cache.HeaderP->DescriptionCount++;
601 map_ptrloc const idxlanguage_code = WriteStringInMap(Lang);
602 map_ptrloc const idxmd5sum = WriteStringInMap(md5sum.Value());
603 if (unlikely(idxlanguage_code == 0 || idxmd5sum == 0))
604 return 0;
605 Desc->language_code = idxlanguage_code;
606 Desc->md5sum = idxmd5sum;
607
608 return Description;
609 }
610 /*}}}*/
611 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
612 // ---------------------------------------------------------------------
613 /* This prepares the Cache for delivery */
614 bool pkgCacheGenerator::FinishCache(OpProgress *Progress)
615 {
616 // FIXME: add progress reporting for this operation
617 // Do we have different architectures in your groups ?
618 vector<string> archs = APT::Configuration::getArchitectures();
619 if (archs.size() > 1)
620 {
621 // Create Conflicts in between the group
622 pkgCache::GrpIterator G = GetCache().GrpBegin();
623 Dynamic<pkgCache::GrpIterator> DynG(G);
624 for (; G.end() != true; G++)
625 {
626 string const PkgName = G.Name();
627 pkgCache::PkgIterator P = G.PackageList();
628 Dynamic<pkgCache::PkgIterator> DynP(P);
629 for (; P.end() != true; P = G.NextPkg(P))
630 {
631 pkgCache::PkgIterator allPkg;
632 Dynamic<pkgCache::PkgIterator> DynallPkg(allPkg);
633 pkgCache::VerIterator V = P.VersionList();
634 Dynamic<pkgCache::VerIterator> DynV(V);
635 for (; V.end() != true; V++)
636 {
637 char const * const Arch = P.Arch();
638 map_ptrloc *OldDepLast = NULL;
639 /* MultiArch handling introduces a lot of implicit Dependencies:
640 - MultiArch: same → Co-Installable if they have the same version
641 - Architecture: all → Need to be Co-Installable for internal reasons
642 - All others conflict with all other group members */
643 bool const coInstall = ((V->MultiArch & pkgCache::Version::Same) == pkgCache::Version::Same);
644 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A)
645 {
646 if (Arch == 0 || *A == Arch)
647 continue;
648 /* We allow only one installed arch at the time
649 per group, therefore each group member conflicts
650 with all other group members */
651 pkgCache::PkgIterator D = G.FindPkg(*A);
652 Dynamic<pkgCache::PkgIterator> DynD(D);
653 if (D.end() == true)
654 continue;
655 if (coInstall == true)
656 {
657 // Replaces: ${self}:other ( << ${binary:Version})
658 NewDepends(D, V, V.VerStr(),
659 pkgCache::Dep::Less, pkgCache::Dep::Replaces,
660 OldDepLast);
661 // Breaks: ${self}:other (!= ${binary:Version})
662 NewDepends(D, V, V.VerStr(),
663 pkgCache::Dep::NotEquals, pkgCache::Dep::DpkgBreaks,
664 OldDepLast);
665 } else {
666 // Conflicts: ${self}:other
667 NewDepends(D, V, "",
668 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
669 OldDepLast);
670 }
671 }
672 }
673 }
674 }
675 }
676 return true;
677 }
678 /*}}}*/
679 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
680 // ---------------------------------------------------------------------
681 /* This creates a dependency element in the tree. It is linked to the
682 version and to the package that it is pointing to. */
683 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
684 pkgCache::VerIterator &Ver,
685 string const &Version,
686 unsigned int const &Op,
687 unsigned int const &Type,
688 map_ptrloc *OldDepLast)
689 {
690 void const * const oldMap = Map.Data();
691 // Get a structure
692 map_ptrloc const Dependency = AllocateInMap(sizeof(pkgCache::Dependency));
693 if (unlikely(Dependency == 0))
694 return false;
695
696 // Fill it in
697 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
698 Dynamic<pkgCache::DepIterator> DynDep(Dep);
699 Dep->ParentVer = Ver.Index();
700 Dep->Type = Type;
701 Dep->CompareOp = Op;
702 Dep->ID = Cache.HeaderP->DependsCount++;
703
704 // Probe the reverse dependency list for a version string that matches
705 if (Version.empty() == false)
706 {
707 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
708 if (I->Version != 0 && I.TargetVer() == Version)
709 Dep->Version = I->Version;*/
710 if (Dep->Version == 0) {
711 map_ptrloc const index = WriteStringInMap(Version);
712 if (unlikely(index == 0))
713 return false;
714 Dep->Version = index;
715 }
716 }
717
718 // Link it to the package
719 Dep->Package = Pkg.Index();
720 Dep->NextRevDepends = Pkg->RevDepends;
721 Pkg->RevDepends = Dep.Index();
722
723 // Do we know where to link the Dependency to?
724 if (OldDepLast == NULL)
725 {
726 OldDepLast = &Ver->DependsList;
727 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
728 OldDepLast = &D->NextDepends;
729 } else if (oldMap != Map.Data())
730 OldDepLast += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
731
732 Dep->NextDepends = *OldDepLast;
733 *OldDepLast = Dep.Index();
734 OldDepLast = &Dep->NextDepends;
735
736 return true;
737 }
738 /*}}}*/
739 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
740 // ---------------------------------------------------------------------
741 /* This creates a Group and the Package to link this dependency to if
742 needed and handles also the caching of the old endpoint */
743 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator &Ver,
744 const string &PackageName,
745 const string &Arch,
746 const string &Version,
747 unsigned int Op,
748 unsigned int Type)
749 {
750 pkgCache::GrpIterator Grp;
751 Dynamic<pkgCache::GrpIterator> DynGrp(Grp);
752 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
753 return false;
754
755 // Locate the target package
756 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
757 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
758 if (Pkg.end() == true) {
759 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
760 return false;
761 }
762
763 // Is it a file dependency?
764 if (unlikely(PackageName[0] == '/'))
765 FoundFileDeps = true;
766
767 /* Caching the old end point speeds up generation substantially */
768 if (OldDepVer != Ver) {
769 OldDepLast = NULL;
770 OldDepVer = Ver;
771 }
772
773 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
774 }
775 /*}}}*/
776 // ListParser::NewProvides - Create a Provides element /*{{{*/
777 // ---------------------------------------------------------------------
778 /* */
779 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator &Ver,
780 const string &PkgName,
781 const string &PkgArch,
782 const string &Version)
783 {
784 pkgCache &Cache = Owner->Cache;
785
786 // We do not add self referencing provides
787 if (Ver.ParentPkg().Name() == PkgName && (PkgArch == Ver.ParentPkg().Arch() ||
788 (PkgArch == "all" && strcmp((Cache.StrP + Cache.HeaderP->Architecture), Ver.ParentPkg().Arch()) == 0)))
789 return true;
790
791 // Get a structure
792 map_ptrloc const Provides = Owner->AllocateInMap(sizeof(pkgCache::Provides));
793 if (unlikely(Provides == 0))
794 return false;
795 Cache.HeaderP->ProvidesCount++;
796
797 // Fill it in
798 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
799 Dynamic<pkgCache::PrvIterator> DynPrv(Prv);
800 Prv->Version = Ver.Index();
801 Prv->NextPkgProv = Ver->ProvidesList;
802 Ver->ProvidesList = Prv.Index();
803 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
804 return false;
805
806 // Locate the target package
807 pkgCache::PkgIterator Pkg;
808 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
809 if (unlikely(Owner->NewPackage(Pkg,PkgName, PkgArch) == false))
810 return false;
811
812 // Link it to the package
813 Prv->ParentPkg = Pkg.Index();
814 Prv->NextProvides = Pkg->ProvidesList;
815 Pkg->ProvidesList = Prv.Index();
816
817 return true;
818 }
819 /*}}}*/
820 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
821 // ---------------------------------------------------------------------
822 /* This is used to select which file is to be associated with all newly
823 added versions. The caller is responsible for setting the IMS fields. */
824 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
825 const pkgIndexFile &Index,
826 unsigned long Flags)
827 {
828 // Get some space for the structure
829 map_ptrloc const idxFile = AllocateInMap(sizeof(*CurrentFile));
830 if (unlikely(idxFile == 0))
831 return false;
832 CurrentFile = Cache.PkgFileP + idxFile;
833
834 // Fill it in
835 map_ptrloc const idxFileName = WriteStringInMap(File);
836 map_ptrloc const idxSite = WriteUniqString(Site);
837 if (unlikely(idxFileName == 0 || idxSite == 0))
838 return false;
839 CurrentFile->FileName = idxFileName;
840 CurrentFile->Site = idxSite;
841 CurrentFile->NextFile = Cache.HeaderP->FileList;
842 CurrentFile->Flags = Flags;
843 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
844 map_ptrloc const idxIndexType = WriteUniqString(Index.GetType()->Label);
845 if (unlikely(idxIndexType == 0))
846 return false;
847 CurrentFile->IndexType = idxIndexType;
848 PkgFileName = File;
849 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
850 Cache.HeaderP->PackageFileCount++;
851
852 if (Progress != 0)
853 Progress->SubProgress(Index.Size());
854 return true;
855 }
856 /*}}}*/
857 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
858 // ---------------------------------------------------------------------
859 /* This is used to create handles to strings. Given the same text it
860 always returns the same number */
861 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
862 unsigned int Size)
863 {
864 /* We use a very small transient hash table here, this speeds up generation
865 by a fair amount on slower machines */
866 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
867 if (Bucket != 0 &&
868 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
869 return Bucket->String;
870
871 // Search for an insertion point
872 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
873 int Res = 1;
874 map_ptrloc *Last = &Cache.HeaderP->StringList;
875 for (; I != Cache.StringItemP; Last = &I->NextItem,
876 I = Cache.StringItemP + I->NextItem)
877 {
878 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
879 if (Res >= 0)
880 break;
881 }
882
883 // Match
884 if (Res == 0)
885 {
886 Bucket = I;
887 return I->String;
888 }
889
890 // Get a structure
891 void const * const oldMap = Map.Data();
892 map_ptrloc const Item = AllocateInMap(sizeof(pkgCache::StringItem));
893 if (Item == 0)
894 return 0;
895
896 map_ptrloc const idxString = WriteStringInMap(S,Size);
897 if (unlikely(idxString == 0))
898 return 0;
899 if (oldMap != Map.Data()) {
900 Last += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
901 I += (pkgCache::StringItem*) Map.Data() - (pkgCache::StringItem*) oldMap;
902 }
903 *Last = Item;
904
905 // Fill in the structure
906 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
907 ItemP->NextItem = I - Cache.StringItemP;
908 ItemP->String = idxString;
909
910 Bucket = ItemP;
911 return ItemP->String;
912 }
913 /*}}}*/
914 // CheckValidity - Check that a cache is up-to-date /*{{{*/
915 // ---------------------------------------------------------------------
916 /* This just verifies that each file in the list of index files exists,
917 has matching attributes with the cache and the cache does not have
918 any extra files. */
919 static bool CheckValidity(const string &CacheFile, FileIterator Start,
920 FileIterator End,MMap **OutMap = 0)
921 {
922 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
923 // No file, certainly invalid
924 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
925 {
926 if (Debug == true)
927 std::clog << "CacheFile doesn't exist" << std::endl;
928 return false;
929 }
930
931 // Map it
932 FileFd CacheF(CacheFile,FileFd::ReadOnly);
933 SPtr<MMap> Map = new MMap(CacheF,0);
934 pkgCache Cache(Map);
935 if (_error->PendingError() == true || Map->Size() == 0)
936 {
937 if (Debug == true)
938 std::clog << "Errors are pending or Map is empty()" << std::endl;
939 _error->Discard();
940 return false;
941 }
942
943 /* Now we check every index file, see if it is in the cache,
944 verify the IMS data and check that it is on the disk too.. */
945 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
946 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
947 for (; Start != End; Start++)
948 {
949 if (Debug == true)
950 std::clog << "Checking PkgFile " << (*Start)->Describe() << ": ";
951 if ((*Start)->HasPackages() == false)
952 {
953 if (Debug == true)
954 std::clog << "Has NO packages" << std::endl;
955 continue;
956 }
957
958 if ((*Start)->Exists() == false)
959 {
960 #if 0 // mvo: we no longer give a message here (Default Sources spec)
961 _error->WarningE("stat",_("Couldn't stat source package list %s"),
962 (*Start)->Describe().c_str());
963 #endif
964 if (Debug == true)
965 std::clog << "file doesn't exist" << std::endl;
966 continue;
967 }
968
969 // FindInCache is also expected to do an IMS check.
970 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
971 if (File.end() == true)
972 {
973 if (Debug == true)
974 std::clog << "FindInCache returned end-Pointer" << std::endl;
975 return false;
976 }
977
978 Visited[File->ID] = true;
979 if (Debug == true)
980 std::clog << "with ID " << File->ID << " is valid" << std::endl;
981 }
982
983 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
984 if (Visited[I] == false)
985 {
986 if (Debug == true)
987 std::clog << "File with ID" << I << " wasn't visited" << std::endl;
988 return false;
989 }
990
991 if (_error->PendingError() == true)
992 {
993 if (Debug == true)
994 {
995 std::clog << "Validity failed because of pending errors:" << std::endl;
996 _error->DumpErrors();
997 }
998 _error->Discard();
999 return false;
1000 }
1001
1002 if (OutMap != 0)
1003 *OutMap = Map.UnGuard();
1004 return true;
1005 }
1006 /*}}}*/
1007 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
1008 // ---------------------------------------------------------------------
1009 /* Size is kind of an abstract notion that is only used for the progress
1010 meter */
1011 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
1012 {
1013 unsigned long TotalSize = 0;
1014 for (; Start != End; Start++)
1015 {
1016 if ((*Start)->HasPackages() == false)
1017 continue;
1018 TotalSize += (*Start)->Size();
1019 }
1020 return TotalSize;
1021 }
1022 /*}}}*/
1023 // BuildCache - Merge the list of index files into the cache /*{{{*/
1024 // ---------------------------------------------------------------------
1025 /* */
1026 static bool BuildCache(pkgCacheGenerator &Gen,
1027 OpProgress *Progress,
1028 unsigned long &CurrentSize,unsigned long TotalSize,
1029 FileIterator Start, FileIterator End)
1030 {
1031 FileIterator I;
1032 for (I = Start; I != End; I++)
1033 {
1034 if ((*I)->HasPackages() == false)
1035 continue;
1036
1037 if ((*I)->Exists() == false)
1038 continue;
1039
1040 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
1041 {
1042 _error->Warning("Duplicate sources.list entry %s",
1043 (*I)->Describe().c_str());
1044 continue;
1045 }
1046
1047 unsigned long Size = (*I)->Size();
1048 if (Progress != NULL)
1049 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
1050 CurrentSize += Size;
1051
1052 if ((*I)->Merge(Gen,Progress) == false)
1053 return false;
1054 }
1055
1056 if (Gen.HasFileDeps() == true)
1057 {
1058 if (Progress != NULL)
1059 Progress->Done();
1060 TotalSize = ComputeSize(Start, End);
1061 CurrentSize = 0;
1062 for (I = Start; I != End; I++)
1063 {
1064 unsigned long Size = (*I)->Size();
1065 if (Progress != NULL)
1066 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
1067 CurrentSize += Size;
1068 if ((*I)->MergeFileProvides(Gen,Progress) == false)
1069 return false;
1070 }
1071 }
1072
1073 return true;
1074 }
1075 /*}}}*/
1076 // CacheGenerator::CreateDynamicMMap - load an mmap with configuration options /*{{{*/
1077 DynamicMMap* pkgCacheGenerator::CreateDynamicMMap(FileFd *CacheF, unsigned long Flags) {
1078 unsigned long const MapStart = _config->FindI("APT::Cache-Start", 24*1024*1024);
1079 unsigned long const MapGrow = _config->FindI("APT::Cache-Grow", 1*1024*1024);
1080 unsigned long const MapLimit = _config->FindI("APT::Cache-Limit", 0);
1081 Flags |= MMap::Moveable;
1082 if (_config->FindB("APT::Cache-Fallback", false) == true)
1083 Flags |= MMap::Fallback;
1084 if (CacheF != NULL)
1085 return new DynamicMMap(*CacheF, Flags, MapStart, MapGrow, MapLimit);
1086 else
1087 return new DynamicMMap(Flags, MapStart, MapGrow, MapLimit);
1088 }
1089 /*}}}*/
1090 // CacheGenerator::MakeStatusCache - Construct the status cache /*{{{*/
1091 // ---------------------------------------------------------------------
1092 /* This makes sure that the status cache (the cache that has all
1093 index files from the sources list and all local ones) is ready
1094 to be mmaped. If OutMap is not zero then a MMap object representing
1095 the cache will be stored there. This is pretty much mandetory if you
1096 are using AllowMem. AllowMem lets the function be run as non-root
1097 where it builds the cache 'fast' into a memory buffer. */
1098 __deprecated bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
1099 MMap **OutMap, bool AllowMem)
1100 { return pkgCacheGenerator::MakeStatusCache(List, &Progress, OutMap, AllowMem); }
1101 bool pkgCacheGenerator::MakeStatusCache(pkgSourceList &List,OpProgress *Progress,
1102 MMap **OutMap,bool AllowMem)
1103 {
1104 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
1105
1106 vector<pkgIndexFile *> Files;
1107 for (vector<metaIndex *>::const_iterator i = List.begin();
1108 i != List.end();
1109 i++)
1110 {
1111 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
1112 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
1113 j != Indexes->end();
1114 j++)
1115 Files.push_back (*j);
1116 }
1117
1118 unsigned long const EndOfSource = Files.size();
1119 if (_system->AddStatusFiles(Files) == false)
1120 return false;
1121
1122 // Decide if we can write to the files..
1123 string const CacheFile = _config->FindFile("Dir::Cache::pkgcache");
1124 string const SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
1125
1126 // ensure the cache directory exists
1127 if (CacheFile.empty() == false || SrcCacheFile.empty() == false)
1128 {
1129 string dir = _config->FindDir("Dir::Cache");
1130 size_t const len = dir.size();
1131 if (len > 5 && dir.find("/apt/", len - 6, 5) == len - 5)
1132 dir = dir.substr(0, len - 5);
1133 if (CacheFile.empty() == false)
1134 CreateDirectory(dir, flNotFile(CacheFile));
1135 if (SrcCacheFile.empty() == false)
1136 CreateDirectory(dir, flNotFile(SrcCacheFile));
1137 }
1138
1139 // Decide if we can write to the cache
1140 bool Writeable = false;
1141 if (CacheFile.empty() == false)
1142 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
1143 else
1144 if (SrcCacheFile.empty() == false)
1145 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
1146 if (Debug == true)
1147 std::clog << "Do we have write-access to the cache files? " << (Writeable ? "YES" : "NO") << std::endl;
1148
1149 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
1150 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
1151
1152 if (Progress != NULL)
1153 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1154
1155 // Cache is OK, Fin.
1156 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
1157 {
1158 if (Progress != NULL)
1159 Progress->OverallProgress(1,1,1,_("Reading package lists"));
1160 if (Debug == true)
1161 std::clog << "pkgcache.bin is valid - no need to build anything" << std::endl;
1162 return true;
1163 }
1164 else if (Debug == true)
1165 std::clog << "pkgcache.bin is NOT valid" << std::endl;
1166
1167 /* At this point we know we need to reconstruct the package cache,
1168 begin. */
1169 SPtr<FileFd> CacheF;
1170 SPtr<DynamicMMap> Map;
1171 if (Writeable == true && CacheFile.empty() == false)
1172 {
1173 _error->PushToStack();
1174 unlink(CacheFile.c_str());
1175 CacheF = new FileFd(CacheFile,FileFd::WriteAtomic);
1176 fchmod(CacheF->Fd(),0644);
1177 Map = CreateDynamicMMap(CacheF, MMap::Public);
1178 if (_error->PendingError() == true)
1179 {
1180 delete CacheF.UnGuard();
1181 delete Map.UnGuard();
1182 if (Debug == true)
1183 std::clog << "Open filebased MMap FAILED" << std::endl;
1184 Writeable = false;
1185 if (AllowMem == false)
1186 {
1187 _error->MergeWithStack();
1188 return false;
1189 }
1190 _error->RevertToStack();
1191 }
1192 else if (Debug == true)
1193 {
1194 _error->MergeWithStack();
1195 std::clog << "Open filebased MMap" << std::endl;
1196 }
1197 }
1198 if (Writeable == false || CacheFile.empty() == true)
1199 {
1200 // Just build it in memory..
1201 Map = CreateDynamicMMap(NULL);
1202 if (Debug == true)
1203 std::clog << "Open memory Map (not filebased)" << std::endl;
1204 }
1205
1206 // Lets try the source cache.
1207 unsigned long CurrentSize = 0;
1208 unsigned long TotalSize = 0;
1209 if (CheckValidity(SrcCacheFile,Files.begin(),
1210 Files.begin()+EndOfSource) == true)
1211 {
1212 if (Debug == true)
1213 std::clog << "srcpkgcache.bin is valid - populate MMap with it." << std::endl;
1214 // Preload the map with the source cache
1215 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
1216 unsigned long const alloc = Map->RawAllocate(SCacheF.Size());
1217 if ((alloc == 0 && _error->PendingError())
1218 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
1219 SCacheF.Size()) == false)
1220 return false;
1221
1222 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1223
1224 // Build the status cache
1225 pkgCacheGenerator Gen(Map.Get(),Progress);
1226 if (_error->PendingError() == true)
1227 return false;
1228 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1229 Files.begin()+EndOfSource,Files.end()) == false)
1230 return false;
1231
1232 // FIXME: move me to a better place
1233 Gen.FinishCache(Progress);
1234 }
1235 else
1236 {
1237 if (Debug == true)
1238 std::clog << "srcpkgcache.bin is NOT valid - rebuild" << std::endl;
1239 TotalSize = ComputeSize(Files.begin(),Files.end());
1240
1241 // Build the source cache
1242 pkgCacheGenerator Gen(Map.Get(),Progress);
1243 if (_error->PendingError() == true)
1244 return false;
1245 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1246 Files.begin(),Files.begin()+EndOfSource) == false)
1247 return false;
1248
1249 // Write it back
1250 if (Writeable == true && SrcCacheFile.empty() == false)
1251 {
1252 FileFd SCacheF(SrcCacheFile,FileFd::WriteAtomic);
1253 if (_error->PendingError() == true)
1254 return false;
1255
1256 fchmod(SCacheF.Fd(),0644);
1257
1258 // Write out the main data
1259 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1260 return _error->Error(_("IO Error saving source cache"));
1261 SCacheF.Sync();
1262
1263 // Write out the proper header
1264 Gen.GetCache().HeaderP->Dirty = false;
1265 if (SCacheF.Seek(0) == false ||
1266 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1267 return _error->Error(_("IO Error saving source cache"));
1268 Gen.GetCache().HeaderP->Dirty = true;
1269 SCacheF.Sync();
1270 }
1271
1272 // Build the status cache
1273 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1274 Files.begin()+EndOfSource,Files.end()) == false)
1275 return false;
1276
1277 // FIXME: move me to a better place
1278 Gen.FinishCache(Progress);
1279 }
1280 if (Debug == true)
1281 std::clog << "Caches are ready for shipping" << std::endl;
1282
1283 if (_error->PendingError() == true)
1284 return false;
1285 if (OutMap != 0)
1286 {
1287 if (CacheF != 0)
1288 {
1289 delete Map.UnGuard();
1290 *OutMap = new MMap(*CacheF,0);
1291 }
1292 else
1293 {
1294 *OutMap = Map.UnGuard();
1295 }
1296 }
1297
1298 return true;
1299 }
1300 /*}}}*/
1301 // CacheGenerator::MakeOnlyStatusCache - Build only a status files cache/*{{{*/
1302 // ---------------------------------------------------------------------
1303 /* */
1304 __deprecated bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1305 { return pkgCacheGenerator::MakeOnlyStatusCache(&Progress, OutMap); }
1306 bool pkgCacheGenerator::MakeOnlyStatusCache(OpProgress *Progress,DynamicMMap **OutMap)
1307 {
1308 vector<pkgIndexFile *> Files;
1309 unsigned long EndOfSource = Files.size();
1310 if (_system->AddStatusFiles(Files) == false)
1311 return false;
1312
1313 SPtr<DynamicMMap> Map = CreateDynamicMMap(NULL);
1314 unsigned long CurrentSize = 0;
1315 unsigned long TotalSize = 0;
1316
1317 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1318
1319 // Build the status cache
1320 if (Progress != NULL)
1321 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1322 pkgCacheGenerator Gen(Map.Get(),Progress);
1323 if (_error->PendingError() == true)
1324 return false;
1325 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1326 Files.begin()+EndOfSource,Files.end()) == false)
1327 return false;
1328
1329 // FIXME: move me to a better place
1330 Gen.FinishCache(Progress);
1331
1332 if (_error->PendingError() == true)
1333 return false;
1334 *OutMap = Map.UnGuard();
1335
1336 return true;
1337 }
1338 /*}}}*/