merged from http://bzr.debian.org/bzr/apt/apt/debian-sid
[ntk/apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25 #include <apt-pkg/macros.h>
26
27 #include <apt-pkg/tagfile.h>
28
29 #include <apti18n.h>
30
31 #include <vector>
32
33 #include <sys/stat.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <stdio.h>
37 /*}}}*/
38 typedef vector<pkgIndexFile *>::iterator FileIterator;
39 template <typename Iter> std::vector<Iter*> pkgCacheGenerator::Dynamic<Iter>::toReMap;
40
41 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
42 // ---------------------------------------------------------------------
43 /* We set the dirty flag and make sure that is written to the disk */
44 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
45 Map(*pMap), Cache(pMap,false), Progress(Prog),
46 FoundFileDeps(0)
47 {
48 CurrentFile = 0;
49 memset(UniqHash,0,sizeof(UniqHash));
50
51 if (_error->PendingError() == true)
52 return;
53
54 if (Map.Size() == 0)
55 {
56 // Setup the map interface..
57 Cache.HeaderP = (pkgCache::Header *)Map.Data();
58 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
59 return;
60
61 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
62
63 // Starting header
64 *Cache.HeaderP = pkgCache::Header();
65 map_ptrloc const idxVerSysName = WriteStringInMap(_system->VS->Label);
66 Cache.HeaderP->VerSysName = idxVerSysName;
67 map_ptrloc const idxArchitecture = WriteStringInMap(_config->Find("APT::Architecture"));
68 Cache.HeaderP->Architecture = idxArchitecture;
69 if (unlikely(idxVerSysName == 0 || idxArchitecture == 0))
70 return;
71 Cache.ReMap();
72 }
73 else
74 {
75 // Map directly from the existing file
76 Cache.ReMap();
77 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
78 if (Cache.VS != _system->VS)
79 {
80 _error->Error(_("Cache has an incompatible versioning system"));
81 return;
82 }
83 }
84
85 Cache.HeaderP->Dirty = true;
86 Map.Sync(0,sizeof(pkgCache::Header));
87 }
88 /*}}}*/
89 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
90 // ---------------------------------------------------------------------
91 /* We sync the data then unset the dirty flag in two steps so as to
92 advoid a problem during a crash */
93 pkgCacheGenerator::~pkgCacheGenerator()
94 {
95 if (_error->PendingError() == true)
96 return;
97 if (Map.Sync() == false)
98 return;
99
100 Cache.HeaderP->Dirty = false;
101 Map.Sync(0,sizeof(pkgCache::Header));
102 }
103 /*}}}*/
104 void pkgCacheGenerator::ReMap(void const * const oldMap, void const * const newMap) {/*{{{*/
105 if (oldMap == newMap)
106 return;
107
108 Cache.ReMap(false);
109
110 CurrentFile += (pkgCache::PackageFile*) newMap - (pkgCache::PackageFile*) oldMap;
111
112 for (size_t i = 0; i < _count(UniqHash); ++i)
113 if (UniqHash[i] != 0)
114 UniqHash[i] += (pkgCache::StringItem*) newMap - (pkgCache::StringItem*) oldMap;
115
116 for (std::vector<pkgCache::GrpIterator*>::const_iterator i = Dynamic<pkgCache::GrpIterator>::toReMap.begin();
117 i != Dynamic<pkgCache::GrpIterator>::toReMap.end(); ++i)
118 (*i)->ReMap(oldMap, newMap);
119 for (std::vector<pkgCache::PkgIterator*>::const_iterator i = Dynamic<pkgCache::PkgIterator>::toReMap.begin();
120 i != Dynamic<pkgCache::PkgIterator>::toReMap.end(); ++i)
121 (*i)->ReMap(oldMap, newMap);
122 for (std::vector<pkgCache::VerIterator*>::const_iterator i = Dynamic<pkgCache::VerIterator>::toReMap.begin();
123 i != Dynamic<pkgCache::VerIterator>::toReMap.end(); ++i)
124 (*i)->ReMap(oldMap, newMap);
125 for (std::vector<pkgCache::DepIterator*>::const_iterator i = Dynamic<pkgCache::DepIterator>::toReMap.begin();
126 i != Dynamic<pkgCache::DepIterator>::toReMap.end(); ++i)
127 (*i)->ReMap(oldMap, newMap);
128 for (std::vector<pkgCache::DescIterator*>::const_iterator i = Dynamic<pkgCache::DescIterator>::toReMap.begin();
129 i != Dynamic<pkgCache::DescIterator>::toReMap.end(); ++i)
130 (*i)->ReMap(oldMap, newMap);
131 for (std::vector<pkgCache::PrvIterator*>::const_iterator i = Dynamic<pkgCache::PrvIterator>::toReMap.begin();
132 i != Dynamic<pkgCache::PrvIterator>::toReMap.end(); ++i)
133 (*i)->ReMap(oldMap, newMap);
134 for (std::vector<pkgCache::PkgFileIterator*>::const_iterator i = Dynamic<pkgCache::PkgFileIterator>::toReMap.begin();
135 i != Dynamic<pkgCache::PkgFileIterator>::toReMap.end(); ++i)
136 (*i)->ReMap(oldMap, newMap);
137 } /*}}}*/
138 // CacheGenerator::WriteStringInMap /*{{{*/
139 map_ptrloc pkgCacheGenerator::WriteStringInMap(const char *String,
140 const unsigned long &Len) {
141 void const * const oldMap = Map.Data();
142 map_ptrloc const index = Map.WriteString(String, Len);
143 if (index != 0)
144 ReMap(oldMap, Map.Data());
145 return index;
146 }
147 /*}}}*/
148 // CacheGenerator::WriteStringInMap /*{{{*/
149 map_ptrloc pkgCacheGenerator::WriteStringInMap(const char *String) {
150 void const * const oldMap = Map.Data();
151 map_ptrloc const index = Map.WriteString(String);
152 if (index != 0)
153 ReMap(oldMap, Map.Data());
154 return index;
155 }
156 /*}}}*/
157 map_ptrloc pkgCacheGenerator::AllocateInMap(const unsigned long &size) {/*{{{*/
158 void const * const oldMap = Map.Data();
159 map_ptrloc const index = Map.Allocate(size);
160 if (index != 0)
161 ReMap(oldMap, Map.Data());
162 return index;
163 }
164 /*}}}*/
165 // CacheGenerator::MergeList - Merge the package list /*{{{*/
166 // ---------------------------------------------------------------------
167 /* This provides the generation of the entries in the cache. Each loop
168 goes through a single package record from the underlying parse engine. */
169 bool pkgCacheGenerator::MergeList(ListParser &List,
170 pkgCache::VerIterator *OutVer)
171 {
172 List.Owner = this;
173
174 unsigned int Counter = 0;
175 while (List.Step() == true)
176 {
177 string const PackageName = List.Package();
178 if (PackageName.empty() == true)
179 return false;
180
181 string const Arch = List.Architecture();
182
183 // Get a pointer to the package structure
184 pkgCache::PkgIterator Pkg;
185 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
186 if (NewPackage(Pkg, PackageName, Arch) == false)
187 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
188 Counter++;
189 if (Counter % 100 == 0 && Progress != 0)
190 Progress->Progress(List.Offset());
191
192 /* Get a pointer to the version structure. We know the list is sorted
193 so we use that fact in the search. Insertion of new versions is
194 done with correct sorting */
195 string Version = List.Version();
196 if (Version.empty() == true)
197 {
198 // we first process the package, then the descriptions
199 // (this has the bonus that we get MMap error when we run out
200 // of MMap space)
201 pkgCache::VerIterator Ver(Cache);
202 Dynamic<pkgCache::VerIterator> DynVer(Ver);
203 if (List.UsePackage(Pkg, Ver) == false)
204 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
205 PackageName.c_str());
206
207 // Find the right version to write the description
208 MD5SumValue CurMd5 = List.Description_md5();
209 Ver = Pkg.VersionList();
210
211 for (; Ver.end() == false; ++Ver)
212 {
213 pkgCache::DescIterator Desc = Ver.DescriptionList();
214 Dynamic<pkgCache::DescIterator> DynDesc(Desc);
215 map_ptrloc *LastDesc = &Ver->DescriptionList;
216 bool duplicate=false;
217
218 // don't add a new description if we have one for the given
219 // md5 && language
220 for ( ; Desc.end() == false; Desc++)
221 if (MD5SumValue(Desc.md5()) == CurMd5 &&
222 Desc.LanguageCode() == List.DescriptionLanguage())
223 duplicate=true;
224 if(duplicate)
225 continue;
226
227 for (Desc = Ver.DescriptionList();
228 Desc.end() == false;
229 LastDesc = &Desc->NextDesc, Desc++)
230 {
231 if (MD5SumValue(Desc.md5()) == CurMd5)
232 {
233 // Add new description
234 void const * const oldMap = Map.Data();
235 map_ptrloc const descindex = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
236 if (oldMap != Map.Data())
237 LastDesc += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
238 *LastDesc = descindex;
239 Desc->ParentPkg = Pkg.Index();
240
241 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
242 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
243 break;
244 }
245 }
246 }
247
248 continue;
249 }
250
251 pkgCache::VerIterator Ver = Pkg.VersionList();
252 Dynamic<pkgCache::VerIterator> DynVer(Ver);
253 map_ptrloc *LastVer = &Pkg->VersionList;
254 void const * oldMap = Map.Data();
255 int Res = 1;
256 unsigned long const Hash = List.VersionHash();
257 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
258 {
259 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
260 // Version is higher as current version - insert here
261 if (Res > 0)
262 break;
263 // Versionstrings are equal - is hash also equal?
264 if (Res == 0 && Ver->Hash == Hash)
265 break;
266 // proceed with the next till we have either the right
267 // or we found another version (which will be lower)
268 }
269
270 /* We already have a version for this item, record that we saw it */
271 if (Res == 0 && Ver.end() == false && Ver->Hash == Hash)
272 {
273 if (List.UsePackage(Pkg,Ver) == false)
274 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
275 PackageName.c_str());
276
277 if (NewFileVer(Ver,List) == false)
278 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
279 PackageName.c_str());
280
281 // Read only a single record and return
282 if (OutVer != 0)
283 {
284 *OutVer = Ver;
285 FoundFileDeps |= List.HasFileDeps();
286 return true;
287 }
288
289 continue;
290 }
291
292 // Add a new version
293 map_ptrloc const verindex = NewVersion(Ver,Version,*LastVer);
294 if (verindex == 0 && _error->PendingError())
295 return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
296 PackageName.c_str(), 1);
297
298 if (oldMap != Map.Data())
299 LastVer += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
300 *LastVer = verindex;
301 Ver->ParentPkg = Pkg.Index();
302 Ver->Hash = Hash;
303
304 if (List.NewVersion(Ver) == false)
305 return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
306 PackageName.c_str(), 2);
307
308 if (List.UsePackage(Pkg,Ver) == false)
309 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
310 PackageName.c_str());
311
312 if (NewFileVer(Ver,List) == false)
313 return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
314 PackageName.c_str(), 3);
315
316 // Read only a single record and return
317 if (OutVer != 0)
318 {
319 *OutVer = Ver;
320 FoundFileDeps |= List.HasFileDeps();
321 return true;
322 }
323
324 /* Record the Description data. Description data always exist in
325 Packages and Translation-* files. */
326 pkgCache::DescIterator Desc = Ver.DescriptionList();
327 Dynamic<pkgCache::DescIterator> DynDesc(Desc);
328 map_ptrloc *LastDesc = &Ver->DescriptionList;
329
330 // Skip to the end of description set
331 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
332
333 // Add new description
334 oldMap = Map.Data();
335 map_ptrloc const descindex = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
336 if (oldMap != Map.Data())
337 LastDesc += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
338 *LastDesc = descindex;
339 Desc->ParentPkg = Pkg.Index();
340
341 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
342 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
343 }
344
345 FoundFileDeps |= List.HasFileDeps();
346
347 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
348 return _error->Error(_("Wow, you exceeded the number of package "
349 "names this APT is capable of."));
350 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
351 return _error->Error(_("Wow, you exceeded the number of versions "
352 "this APT is capable of."));
353 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
354 return _error->Error(_("Wow, you exceeded the number of descriptions "
355 "this APT is capable of."));
356 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
357 return _error->Error(_("Wow, you exceeded the number of dependencies "
358 "this APT is capable of."));
359 return true;
360 }
361 /*}}}*/
362 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
363 // ---------------------------------------------------------------------
364 /* If we found any file depends while parsing the main list we need to
365 resolve them. Since it is undesired to load the entire list of files
366 into the cache as virtual packages we do a two stage effort. MergeList
367 identifies the file depends and this creates Provdies for them by
368 re-parsing all the indexs. */
369 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
370 {
371 List.Owner = this;
372
373 unsigned int Counter = 0;
374 while (List.Step() == true)
375 {
376 string PackageName = List.Package();
377 if (PackageName.empty() == true)
378 return false;
379 string Version = List.Version();
380 if (Version.empty() == true)
381 continue;
382
383 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
384 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
385 if (Pkg.end() == true)
386 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
387 PackageName.c_str());
388 Counter++;
389 if (Counter % 100 == 0 && Progress != 0)
390 Progress->Progress(List.Offset());
391
392 unsigned long Hash = List.VersionHash();
393 pkgCache::VerIterator Ver = Pkg.VersionList();
394 Dynamic<pkgCache::VerIterator> DynVer(Ver);
395 for (; Ver.end() == false; Ver++)
396 {
397 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
398 {
399 if (List.CollectFileProvides(Cache,Ver) == false)
400 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
401 break;
402 }
403 }
404
405 if (Ver.end() == true)
406 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
407 }
408
409 return true;
410 }
411 /*}}}*/
412 // CacheGenerator::NewGroup - Add a new group /*{{{*/
413 // ---------------------------------------------------------------------
414 /* This creates a new group structure and adds it to the hash table */
415 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name)
416 {
417 Grp = Cache.FindGrp(Name);
418 if (Grp.end() == false)
419 return true;
420
421 // Get a structure
422 map_ptrloc const Group = AllocateInMap(sizeof(pkgCache::Group));
423 if (unlikely(Group == 0))
424 return false;
425
426 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
427 map_ptrloc const idxName = WriteStringInMap(Name);
428 if (unlikely(idxName == 0))
429 return false;
430 Grp->Name = idxName;
431
432 // Insert it into the hash table
433 unsigned long const Hash = Cache.Hash(Name);
434 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
435 Cache.HeaderP->GrpHashTable[Hash] = Group;
436
437 Grp->ID = Cache.HeaderP->GroupCount++;
438 return true;
439 }
440 /*}}}*/
441 // CacheGenerator::NewPackage - Add a new package /*{{{*/
442 // ---------------------------------------------------------------------
443 /* This creates a new package structure and adds it to the hash table */
444 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
445 const string &Arch) {
446 pkgCache::GrpIterator Grp;
447 Dynamic<pkgCache::GrpIterator> DynGrp(Grp);
448 if (unlikely(NewGroup(Grp, Name) == false))
449 return false;
450
451 Pkg = Grp.FindPkg(Arch);
452 if (Pkg.end() == false)
453 return true;
454
455 // Get a structure
456 map_ptrloc const Package = AllocateInMap(sizeof(pkgCache::Package));
457 if (unlikely(Package == 0))
458 return false;
459 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
460
461 // Insert the package into our package list
462 if (Grp->FirstPackage == 0) // the group is new
463 {
464 // Insert it into the hash table
465 unsigned long const Hash = Cache.Hash(Name);
466 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
467 Cache.HeaderP->PkgHashTable[Hash] = Package;
468 Grp->FirstPackage = Package;
469 }
470 else // Group the Packages together
471 {
472 // this package is the new last package
473 pkgCache::PkgIterator LastPkg(Cache, Cache.PkgP + Grp->LastPackage);
474 Pkg->NextPackage = LastPkg->NextPackage;
475 LastPkg->NextPackage = Package;
476 }
477 Grp->LastPackage = Package;
478
479 // Set the name, arch and the ID
480 Pkg->Name = Grp->Name;
481 Pkg->Group = Grp.Index();
482 // all is mapped to the native architecture
483 map_ptrloc const idxArch = (Arch == "all") ? Cache.HeaderP->Architecture : WriteUniqString(Arch.c_str());
484 if (unlikely(idxArch == 0))
485 return false;
486 Pkg->Arch = idxArch;
487 Pkg->ID = Cache.HeaderP->PackageCount++;
488
489 return true;
490 }
491 /*}}}*/
492 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
493 // ---------------------------------------------------------------------
494 /* */
495 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
496 ListParser &List)
497 {
498 if (CurrentFile == 0)
499 return true;
500
501 // Get a structure
502 map_ptrloc const VerFile = AllocateInMap(sizeof(pkgCache::VerFile));
503 if (VerFile == 0)
504 return 0;
505
506 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
507 VF->File = CurrentFile - Cache.PkgFileP;
508
509 // Link it to the end of the list
510 map_ptrloc *Last = &Ver->FileList;
511 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
512 Last = &V->NextFile;
513 VF->NextFile = *Last;
514 *Last = VF.Index();
515
516 VF->Offset = List.Offset();
517 VF->Size = List.Size();
518 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
519 Cache.HeaderP->MaxVerFileSize = VF->Size;
520 Cache.HeaderP->VerFileCount++;
521
522 return true;
523 }
524 /*}}}*/
525 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
526 // ---------------------------------------------------------------------
527 /* This puts a version structure in the linked list */
528 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
529 const string &VerStr,
530 unsigned long Next)
531 {
532 // Get a structure
533 map_ptrloc const Version = AllocateInMap(sizeof(pkgCache::Version));
534 if (Version == 0)
535 return 0;
536
537 // Fill it in
538 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
539 Ver->NextVer = Next;
540 Ver->ID = Cache.HeaderP->VersionCount++;
541 map_ptrloc const idxVerStr = WriteStringInMap(VerStr);
542 if (unlikely(idxVerStr == 0))
543 return 0;
544 Ver->VerStr = idxVerStr;
545
546 return Version;
547 }
548 /*}}}*/
549 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
550 // ---------------------------------------------------------------------
551 /* */
552 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
553 ListParser &List)
554 {
555 if (CurrentFile == 0)
556 return true;
557
558 // Get a structure
559 map_ptrloc const DescFile = AllocateInMap(sizeof(pkgCache::DescFile));
560 if (DescFile == 0)
561 return false;
562
563 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
564 DF->File = CurrentFile - Cache.PkgFileP;
565
566 // Link it to the end of the list
567 map_ptrloc *Last = &Desc->FileList;
568 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
569 Last = &D->NextFile;
570
571 DF->NextFile = *Last;
572 *Last = DF.Index();
573
574 DF->Offset = List.Offset();
575 DF->Size = List.Size();
576 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
577 Cache.HeaderP->MaxDescFileSize = DF->Size;
578 Cache.HeaderP->DescFileCount++;
579
580 return true;
581 }
582 /*}}}*/
583 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
584 // ---------------------------------------------------------------------
585 /* This puts a description structure in the linked list */
586 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
587 const string &Lang,
588 const MD5SumValue &md5sum,
589 map_ptrloc Next)
590 {
591 // Get a structure
592 map_ptrloc const Description = AllocateInMap(sizeof(pkgCache::Description));
593 if (Description == 0)
594 return 0;
595
596 // Fill it in
597 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
598 Desc->NextDesc = Next;
599 Desc->ID = Cache.HeaderP->DescriptionCount++;
600 map_ptrloc const idxlanguage_code = WriteStringInMap(Lang);
601 map_ptrloc const idxmd5sum = WriteStringInMap(md5sum.Value());
602 if (unlikely(idxlanguage_code == 0 || idxmd5sum == 0))
603 return 0;
604 Desc->language_code = idxlanguage_code;
605 Desc->md5sum = idxmd5sum;
606
607 return Description;
608 }
609 /*}}}*/
610 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
611 // ---------------------------------------------------------------------
612 /* This prepares the Cache for delivery */
613 bool pkgCacheGenerator::FinishCache(OpProgress *Progress)
614 {
615 // FIXME: add progress reporting for this operation
616 // Do we have different architectures in your groups ?
617 vector<string> archs = APT::Configuration::getArchitectures();
618 if (archs.size() > 1)
619 {
620 // Create Conflicts in between the group
621 pkgCache::GrpIterator G = GetCache().GrpBegin();
622 Dynamic<pkgCache::GrpIterator> DynG(G);
623 for (; G.end() != true; G++)
624 {
625 string const PkgName = G.Name();
626 pkgCache::PkgIterator P = G.PackageList();
627 Dynamic<pkgCache::PkgIterator> DynP(P);
628 for (; P.end() != true; P = G.NextPkg(P))
629 {
630 pkgCache::PkgIterator allPkg;
631 Dynamic<pkgCache::PkgIterator> DynallPkg(allPkg);
632 pkgCache::VerIterator V = P.VersionList();
633 Dynamic<pkgCache::VerIterator> DynV(V);
634 for (; V.end() != true; V++)
635 {
636 char const * const Arch = P.Arch();
637 map_ptrloc *OldDepLast = NULL;
638 /* MultiArch handling introduces a lot of implicit Dependencies:
639 - MultiArch: same → Co-Installable if they have the same version
640 - Architecture: all → Need to be Co-Installable for internal reasons
641 - All others conflict with all other group members */
642 bool const coInstall = ((V->MultiArch & pkgCache::Version::Same) == pkgCache::Version::Same);
643 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A)
644 {
645 if (Arch == 0 || *A == Arch)
646 continue;
647 /* We allow only one installed arch at the time
648 per group, therefore each group member conflicts
649 with all other group members */
650 pkgCache::PkgIterator D = G.FindPkg(*A);
651 Dynamic<pkgCache::PkgIterator> DynD(D);
652 if (D.end() == true)
653 continue;
654 if (coInstall == true)
655 {
656 // Replaces: ${self}:other ( << ${binary:Version})
657 NewDepends(D, V, V.VerStr(),
658 pkgCache::Dep::Less, pkgCache::Dep::Replaces,
659 OldDepLast);
660 // Breaks: ${self}:other (!= ${binary:Version})
661 NewDepends(D, V, V.VerStr(),
662 pkgCache::Dep::NotEquals, pkgCache::Dep::DpkgBreaks,
663 OldDepLast);
664 } else {
665 // Conflicts: ${self}:other
666 NewDepends(D, V, "",
667 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
668 OldDepLast);
669 }
670 }
671 }
672 }
673 }
674 }
675 return true;
676 }
677 /*}}}*/
678 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
679 // ---------------------------------------------------------------------
680 /* This creates a dependency element in the tree. It is linked to the
681 version and to the package that it is pointing to. */
682 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
683 pkgCache::VerIterator &Ver,
684 string const &Version,
685 unsigned int const &Op,
686 unsigned int const &Type,
687 map_ptrloc *OldDepLast)
688 {
689 void const * const oldMap = Map.Data();
690 // Get a structure
691 map_ptrloc const Dependency = AllocateInMap(sizeof(pkgCache::Dependency));
692 if (unlikely(Dependency == 0))
693 return false;
694
695 // Fill it in
696 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
697 Dynamic<pkgCache::DepIterator> DynDep(Dep);
698 Dep->ParentVer = Ver.Index();
699 Dep->Type = Type;
700 Dep->CompareOp = Op;
701 Dep->ID = Cache.HeaderP->DependsCount++;
702
703 // Probe the reverse dependency list for a version string that matches
704 if (Version.empty() == false)
705 {
706 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
707 if (I->Version != 0 && I.TargetVer() == Version)
708 Dep->Version = I->Version;*/
709 if (Dep->Version == 0) {
710 map_ptrloc const index = WriteStringInMap(Version);
711 if (unlikely(index == 0))
712 return false;
713 Dep->Version = index;
714 }
715 }
716
717 // Link it to the package
718 Dep->Package = Pkg.Index();
719 Dep->NextRevDepends = Pkg->RevDepends;
720 Pkg->RevDepends = Dep.Index();
721
722 // Do we know where to link the Dependency to?
723 if (OldDepLast == NULL)
724 {
725 OldDepLast = &Ver->DependsList;
726 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
727 OldDepLast = &D->NextDepends;
728 } else if (oldMap != Map.Data())
729 OldDepLast += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
730
731 Dep->NextDepends = *OldDepLast;
732 *OldDepLast = Dep.Index();
733 OldDepLast = &Dep->NextDepends;
734
735 return true;
736 }
737 /*}}}*/
738 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
739 // ---------------------------------------------------------------------
740 /* This creates a Group and the Package to link this dependency to if
741 needed and handles also the caching of the old endpoint */
742 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator &Ver,
743 const string &PackageName,
744 const string &Arch,
745 const string &Version,
746 unsigned int Op,
747 unsigned int Type)
748 {
749 pkgCache::GrpIterator Grp;
750 Dynamic<pkgCache::GrpIterator> DynGrp(Grp);
751 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
752 return false;
753
754 // Locate the target package
755 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
756 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
757 if (Pkg.end() == true) {
758 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
759 return false;
760 }
761
762 // Is it a file dependency?
763 if (unlikely(PackageName[0] == '/'))
764 FoundFileDeps = true;
765
766 /* Caching the old end point speeds up generation substantially */
767 if (OldDepVer != Ver) {
768 OldDepLast = NULL;
769 OldDepVer = Ver;
770 }
771
772 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
773 }
774 /*}}}*/
775 // ListParser::NewProvides - Create a Provides element /*{{{*/
776 // ---------------------------------------------------------------------
777 /* */
778 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator &Ver,
779 const string &PkgName,
780 const string &PkgArch,
781 const string &Version)
782 {
783 pkgCache &Cache = Owner->Cache;
784
785 // We do not add self referencing provides
786 if (Ver.ParentPkg().Name() == PkgName && (PkgArch == Ver.ParentPkg().Arch() ||
787 (PkgArch == "all" && strcmp((Cache.StrP + Cache.HeaderP->Architecture), Ver.ParentPkg().Arch()) == 0)))
788 return true;
789
790 // Get a structure
791 map_ptrloc const Provides = Owner->AllocateInMap(sizeof(pkgCache::Provides));
792 if (unlikely(Provides == 0))
793 return false;
794 Cache.HeaderP->ProvidesCount++;
795
796 // Fill it in
797 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
798 Dynamic<pkgCache::PrvIterator> DynPrv(Prv);
799 Prv->Version = Ver.Index();
800 Prv->NextPkgProv = Ver->ProvidesList;
801 Ver->ProvidesList = Prv.Index();
802 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
803 return false;
804
805 // Locate the target package
806 pkgCache::PkgIterator Pkg;
807 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
808 if (unlikely(Owner->NewPackage(Pkg,PkgName, PkgArch) == false))
809 return false;
810
811 // Link it to the package
812 Prv->ParentPkg = Pkg.Index();
813 Prv->NextProvides = Pkg->ProvidesList;
814 Pkg->ProvidesList = Prv.Index();
815
816 return true;
817 }
818 /*}}}*/
819 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
820 // ---------------------------------------------------------------------
821 /* This is used to select which file is to be associated with all newly
822 added versions. The caller is responsible for setting the IMS fields. */
823 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
824 const pkgIndexFile &Index,
825 unsigned long Flags)
826 {
827 // Get some space for the structure
828 map_ptrloc const idxFile = AllocateInMap(sizeof(*CurrentFile));
829 if (unlikely(idxFile == 0))
830 return false;
831 CurrentFile = Cache.PkgFileP + idxFile;
832
833 // Fill it in
834 map_ptrloc const idxFileName = WriteStringInMap(File);
835 map_ptrloc const idxSite = WriteUniqString(Site);
836 if (unlikely(idxFileName == 0 || idxSite == 0))
837 return false;
838 CurrentFile->FileName = idxFileName;
839 CurrentFile->Site = idxSite;
840 CurrentFile->NextFile = Cache.HeaderP->FileList;
841 CurrentFile->Flags = Flags;
842 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
843 map_ptrloc const idxIndexType = WriteUniqString(Index.GetType()->Label);
844 if (unlikely(idxIndexType == 0))
845 return false;
846 CurrentFile->IndexType = idxIndexType;
847 PkgFileName = File;
848 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
849 Cache.HeaderP->PackageFileCount++;
850
851 if (Progress != 0)
852 Progress->SubProgress(Index.Size());
853 return true;
854 }
855 /*}}}*/
856 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
857 // ---------------------------------------------------------------------
858 /* This is used to create handles to strings. Given the same text it
859 always returns the same number */
860 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
861 unsigned int Size)
862 {
863 /* We use a very small transient hash table here, this speeds up generation
864 by a fair amount on slower machines */
865 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
866 if (Bucket != 0 &&
867 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
868 return Bucket->String;
869
870 // Search for an insertion point
871 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
872 int Res = 1;
873 map_ptrloc *Last = &Cache.HeaderP->StringList;
874 for (; I != Cache.StringItemP; Last = &I->NextItem,
875 I = Cache.StringItemP + I->NextItem)
876 {
877 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
878 if (Res >= 0)
879 break;
880 }
881
882 // Match
883 if (Res == 0)
884 {
885 Bucket = I;
886 return I->String;
887 }
888
889 // Get a structure
890 void const * const oldMap = Map.Data();
891 map_ptrloc const Item = AllocateInMap(sizeof(pkgCache::StringItem));
892 if (Item == 0)
893 return 0;
894
895 map_ptrloc const idxString = WriteStringInMap(S,Size);
896 if (unlikely(idxString == 0))
897 return 0;
898 if (oldMap != Map.Data()) {
899 Last += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
900 I += (pkgCache::StringItem*) Map.Data() - (pkgCache::StringItem*) oldMap;
901 }
902 *Last = Item;
903
904 // Fill in the structure
905 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
906 ItemP->NextItem = I - Cache.StringItemP;
907 ItemP->String = idxString;
908
909 Bucket = ItemP;
910 return ItemP->String;
911 }
912 /*}}}*/
913 // CheckValidity - Check that a cache is up-to-date /*{{{*/
914 // ---------------------------------------------------------------------
915 /* This just verifies that each file in the list of index files exists,
916 has matching attributes with the cache and the cache does not have
917 any extra files. */
918 static bool CheckValidity(const string &CacheFile, FileIterator Start,
919 FileIterator End,MMap **OutMap = 0)
920 {
921 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
922 // No file, certainly invalid
923 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
924 {
925 if (Debug == true)
926 std::clog << "CacheFile doesn't exist" << std::endl;
927 return false;
928 }
929
930 // Map it
931 FileFd CacheF(CacheFile,FileFd::ReadOnly);
932 SPtr<MMap> Map = new MMap(CacheF,0);
933 pkgCache Cache(Map);
934 if (_error->PendingError() == true || Map->Size() == 0)
935 {
936 if (Debug == true)
937 std::clog << "Errors are pending or Map is empty()" << std::endl;
938 _error->Discard();
939 return false;
940 }
941
942 /* Now we check every index file, see if it is in the cache,
943 verify the IMS data and check that it is on the disk too.. */
944 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
945 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
946 for (; Start != End; Start++)
947 {
948 if (Debug == true)
949 std::clog << "Checking PkgFile " << (*Start)->Describe() << ": ";
950 if ((*Start)->HasPackages() == false)
951 {
952 if (Debug == true)
953 std::clog << "Has NO packages" << std::endl;
954 continue;
955 }
956
957 if ((*Start)->Exists() == false)
958 {
959 #if 0 // mvo: we no longer give a message here (Default Sources spec)
960 _error->WarningE("stat",_("Couldn't stat source package list %s"),
961 (*Start)->Describe().c_str());
962 #endif
963 if (Debug == true)
964 std::clog << "file doesn't exist" << std::endl;
965 continue;
966 }
967
968 // FindInCache is also expected to do an IMS check.
969 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
970 if (File.end() == true)
971 {
972 if (Debug == true)
973 std::clog << "FindInCache returned end-Pointer" << std::endl;
974 return false;
975 }
976
977 Visited[File->ID] = true;
978 if (Debug == true)
979 std::clog << "with ID " << File->ID << " is valid" << std::endl;
980 }
981
982 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
983 if (Visited[I] == false)
984 {
985 if (Debug == true)
986 std::clog << "File with ID" << I << " wasn't visited" << std::endl;
987 return false;
988 }
989
990 if (_error->PendingError() == true)
991 {
992 if (Debug == true)
993 {
994 std::clog << "Validity failed because of pending errors:" << std::endl;
995 _error->DumpErrors();
996 }
997 _error->Discard();
998 return false;
999 }
1000
1001 if (OutMap != 0)
1002 *OutMap = Map.UnGuard();
1003 return true;
1004 }
1005 /*}}}*/
1006 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
1007 // ---------------------------------------------------------------------
1008 /* Size is kind of an abstract notion that is only used for the progress
1009 meter */
1010 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
1011 {
1012 unsigned long TotalSize = 0;
1013 for (; Start != End; Start++)
1014 {
1015 if ((*Start)->HasPackages() == false)
1016 continue;
1017 TotalSize += (*Start)->Size();
1018 }
1019 return TotalSize;
1020 }
1021 /*}}}*/
1022 // BuildCache - Merge the list of index files into the cache /*{{{*/
1023 // ---------------------------------------------------------------------
1024 /* */
1025 static bool BuildCache(pkgCacheGenerator &Gen,
1026 OpProgress *Progress,
1027 unsigned long &CurrentSize,unsigned long TotalSize,
1028 FileIterator Start, FileIterator End)
1029 {
1030 FileIterator I;
1031 for (I = Start; I != End; I++)
1032 {
1033 if ((*I)->HasPackages() == false)
1034 continue;
1035
1036 if ((*I)->Exists() == false)
1037 continue;
1038
1039 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
1040 {
1041 _error->Warning("Duplicate sources.list entry %s",
1042 (*I)->Describe().c_str());
1043 continue;
1044 }
1045
1046 unsigned long Size = (*I)->Size();
1047 if (Progress != NULL)
1048 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
1049 CurrentSize += Size;
1050
1051 if ((*I)->Merge(Gen,Progress) == false)
1052 return false;
1053 }
1054
1055 if (Gen.HasFileDeps() == true)
1056 {
1057 if (Progress != NULL)
1058 Progress->Done();
1059 TotalSize = ComputeSize(Start, End);
1060 CurrentSize = 0;
1061 for (I = Start; I != End; I++)
1062 {
1063 unsigned long Size = (*I)->Size();
1064 if (Progress != NULL)
1065 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
1066 CurrentSize += Size;
1067 if ((*I)->MergeFileProvides(Gen,Progress) == false)
1068 return false;
1069 }
1070 }
1071
1072 return true;
1073 }
1074 /*}}}*/
1075 // CacheGenerator::CreateDynamicMMap - load an mmap with configuration options /*{{{*/
1076 DynamicMMap* pkgCacheGenerator::CreateDynamicMMap(FileFd *CacheF, unsigned long Flags) {
1077 unsigned long const MapStart = _config->FindI("APT::Cache-Start", 24*1024*1024);
1078 unsigned long const MapGrow = _config->FindI("APT::Cache-Grow", 1*1024*1024);
1079 unsigned long const MapLimit = _config->FindI("APT::Cache-Limit", 0);
1080 Flags |= MMap::Moveable;
1081 if (_config->FindB("APT::Cache-Fallback", false) == true)
1082 Flags |= MMap::Fallback;
1083 if (CacheF != NULL)
1084 return new DynamicMMap(*CacheF, Flags, MapStart, MapGrow, MapLimit);
1085 else
1086 return new DynamicMMap(Flags, MapStart, MapGrow, MapLimit);
1087 }
1088 /*}}}*/
1089 // CacheGenerator::MakeStatusCache - Construct the status cache /*{{{*/
1090 // ---------------------------------------------------------------------
1091 /* This makes sure that the status cache (the cache that has all
1092 index files from the sources list and all local ones) is ready
1093 to be mmaped. If OutMap is not zero then a MMap object representing
1094 the cache will be stored there. This is pretty much mandetory if you
1095 are using AllowMem. AllowMem lets the function be run as non-root
1096 where it builds the cache 'fast' into a memory buffer. */
1097 __deprecated bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
1098 MMap **OutMap, bool AllowMem)
1099 { return pkgCacheGenerator::MakeStatusCache(List, &Progress, OutMap, AllowMem); }
1100 bool pkgCacheGenerator::MakeStatusCache(pkgSourceList &List,OpProgress *Progress,
1101 MMap **OutMap,bool AllowMem)
1102 {
1103 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
1104
1105 vector<pkgIndexFile *> Files;
1106 for (vector<metaIndex *>::const_iterator i = List.begin();
1107 i != List.end();
1108 i++)
1109 {
1110 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
1111 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
1112 j != Indexes->end();
1113 j++)
1114 Files.push_back (*j);
1115 }
1116
1117 unsigned long const EndOfSource = Files.size();
1118 if (_system->AddStatusFiles(Files) == false)
1119 return false;
1120
1121 // Decide if we can write to the files..
1122 string const CacheFile = _config->FindFile("Dir::Cache::pkgcache");
1123 string const SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
1124
1125 // ensure the cache directory exists
1126 if (CacheFile.empty() == false || SrcCacheFile.empty() == false)
1127 {
1128 string dir = _config->FindDir("Dir::Cache");
1129 size_t const len = dir.size();
1130 if (len > 5 && dir.find("/apt/", len - 6, 5) == len - 5)
1131 dir = dir.substr(0, len - 5);
1132 if (CacheFile.empty() == false)
1133 CreateDirectory(dir, flNotFile(CacheFile));
1134 if (SrcCacheFile.empty() == false)
1135 CreateDirectory(dir, flNotFile(SrcCacheFile));
1136 }
1137
1138 // Decide if we can write to the cache
1139 bool Writeable = false;
1140 if (CacheFile.empty() == false)
1141 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
1142 else
1143 if (SrcCacheFile.empty() == false)
1144 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
1145 if (Debug == true)
1146 std::clog << "Do we have write-access to the cache files? " << (Writeable ? "YES" : "NO") << std::endl;
1147
1148 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
1149 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
1150
1151 if (Progress != NULL)
1152 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1153
1154 // Cache is OK, Fin.
1155 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
1156 {
1157 if (Progress != NULL)
1158 Progress->OverallProgress(1,1,1,_("Reading package lists"));
1159 if (Debug == true)
1160 std::clog << "pkgcache.bin is valid - no need to build anything" << std::endl;
1161 return true;
1162 }
1163 else if (Debug == true)
1164 std::clog << "pkgcache.bin is NOT valid" << std::endl;
1165
1166 /* At this point we know we need to reconstruct the package cache,
1167 begin. */
1168 SPtr<FileFd> CacheF;
1169 SPtr<DynamicMMap> Map;
1170 if (Writeable == true && CacheFile.empty() == false)
1171 {
1172 _error->PushToStack();
1173 unlink(CacheFile.c_str());
1174 CacheF = new FileFd(CacheFile,FileFd::WriteAtomic);
1175 fchmod(CacheF->Fd(),0644);
1176 Map = CreateDynamicMMap(CacheF, MMap::Public);
1177 if (_error->PendingError() == true)
1178 {
1179 delete CacheF.UnGuard();
1180 delete Map.UnGuard();
1181 if (Debug == true)
1182 std::clog << "Open filebased MMap FAILED" << std::endl;
1183 Writeable = false;
1184 if (AllowMem == false)
1185 {
1186 _error->MergeWithStack();
1187 return false;
1188 }
1189 _error->RevertToStack();
1190 }
1191 else if (Debug == true)
1192 {
1193 _error->MergeWithStack();
1194 std::clog << "Open filebased MMap" << std::endl;
1195 }
1196 }
1197 if (Writeable == false || CacheFile.empty() == true)
1198 {
1199 // Just build it in memory..
1200 Map = CreateDynamicMMap(NULL);
1201 if (Debug == true)
1202 std::clog << "Open memory Map (not filebased)" << std::endl;
1203 }
1204
1205 // Lets try the source cache.
1206 unsigned long CurrentSize = 0;
1207 unsigned long TotalSize = 0;
1208 if (CheckValidity(SrcCacheFile,Files.begin(),
1209 Files.begin()+EndOfSource) == true)
1210 {
1211 if (Debug == true)
1212 std::clog << "srcpkgcache.bin is valid - populate MMap with it." << std::endl;
1213 // Preload the map with the source cache
1214 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
1215 unsigned long const alloc = Map->RawAllocate(SCacheF.Size());
1216 if ((alloc == 0 && _error->PendingError())
1217 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
1218 SCacheF.Size()) == false)
1219 return false;
1220
1221 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1222
1223 // Build the status cache
1224 pkgCacheGenerator Gen(Map.Get(),Progress);
1225 if (_error->PendingError() == true)
1226 return false;
1227 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1228 Files.begin()+EndOfSource,Files.end()) == false)
1229 return false;
1230
1231 // FIXME: move me to a better place
1232 Gen.FinishCache(Progress);
1233 }
1234 else
1235 {
1236 if (Debug == true)
1237 std::clog << "srcpkgcache.bin is NOT valid - rebuild" << std::endl;
1238 TotalSize = ComputeSize(Files.begin(),Files.end());
1239
1240 // Build the source cache
1241 pkgCacheGenerator Gen(Map.Get(),Progress);
1242 if (_error->PendingError() == true)
1243 return false;
1244 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1245 Files.begin(),Files.begin()+EndOfSource) == false)
1246 return false;
1247
1248 // Write it back
1249 if (Writeable == true && SrcCacheFile.empty() == false)
1250 {
1251 FileFd SCacheF(SrcCacheFile,FileFd::WriteAtomic);
1252 if (_error->PendingError() == true)
1253 return false;
1254
1255 fchmod(SCacheF.Fd(),0644);
1256
1257 // Write out the main data
1258 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1259 return _error->Error(_("IO Error saving source cache"));
1260 SCacheF.Sync();
1261
1262 // Write out the proper header
1263 Gen.GetCache().HeaderP->Dirty = false;
1264 if (SCacheF.Seek(0) == false ||
1265 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1266 return _error->Error(_("IO Error saving source cache"));
1267 Gen.GetCache().HeaderP->Dirty = true;
1268 SCacheF.Sync();
1269 }
1270
1271 // Build the status cache
1272 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1273 Files.begin()+EndOfSource,Files.end()) == false)
1274 return false;
1275
1276 // FIXME: move me to a better place
1277 Gen.FinishCache(Progress);
1278 }
1279 if (Debug == true)
1280 std::clog << "Caches are ready for shipping" << std::endl;
1281
1282 if (_error->PendingError() == true)
1283 return false;
1284 if (OutMap != 0)
1285 {
1286 if (CacheF != 0)
1287 {
1288 delete Map.UnGuard();
1289 *OutMap = new MMap(*CacheF,0);
1290 }
1291 else
1292 {
1293 *OutMap = Map.UnGuard();
1294 }
1295 }
1296
1297 return true;
1298 }
1299 /*}}}*/
1300 // CacheGenerator::MakeOnlyStatusCache - Build only a status files cache/*{{{*/
1301 // ---------------------------------------------------------------------
1302 /* */
1303 __deprecated bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1304 { return pkgCacheGenerator::MakeOnlyStatusCache(&Progress, OutMap); }
1305 bool pkgCacheGenerator::MakeOnlyStatusCache(OpProgress *Progress,DynamicMMap **OutMap)
1306 {
1307 vector<pkgIndexFile *> Files;
1308 unsigned long EndOfSource = Files.size();
1309 if (_system->AddStatusFiles(Files) == false)
1310 return false;
1311
1312 SPtr<DynamicMMap> Map = CreateDynamicMMap(NULL);
1313 unsigned long CurrentSize = 0;
1314 unsigned long TotalSize = 0;
1315
1316 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1317
1318 // Build the status cache
1319 if (Progress != NULL)
1320 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1321 pkgCacheGenerator Gen(Map.Get(),Progress);
1322 if (_error->PendingError() == true)
1323 return false;
1324 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1325 Files.begin()+EndOfSource,Files.end()) == false)
1326 return false;
1327
1328 // FIXME: move me to a better place
1329 Gen.FinishCache(Progress);
1330
1331 if (_error->PendingError() == true)
1332 return false;
1333 *OutMap = Map.UnGuard();
1334
1335 return true;
1336 }
1337 /*}}}*/