use references instead of copies in the Cache generation methods
[ntk/apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25 #include <apt-pkg/macros.h>
26
27 #include <apt-pkg/tagfile.h>
28
29 #include <apti18n.h>
30
31 #include <vector>
32
33 #include <sys/stat.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <stdio.h>
37 /*}}}*/
38 typedef vector<pkgIndexFile *>::iterator FileIterator;
39
40 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41 // ---------------------------------------------------------------------
42 /* We set the dirty flag and make sure that is written to the disk */
43 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
44 Map(*pMap), Cache(pMap,false), Progress(Prog),
45 FoundFileDeps(0)
46 {
47 CurrentFile = 0;
48 memset(UniqHash,0,sizeof(UniqHash));
49
50 if (_error->PendingError() == true)
51 return;
52
53 if (Map.Size() == 0)
54 {
55 // Setup the map interface..
56 Cache.HeaderP = (pkgCache::Header *)Map.Data();
57 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
58 return;
59
60 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
61
62 // Starting header
63 *Cache.HeaderP = pkgCache::Header();
64 Cache.HeaderP->VerSysName = WriteStringInMap(_system->VS->Label);
65 Cache.HeaderP->Architecture = WriteStringInMap(_config->Find("APT::Architecture"));
66 Cache.ReMap();
67 }
68 else
69 {
70 // Map directly from the existing file
71 Cache.ReMap();
72 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
73 if (Cache.VS != _system->VS)
74 {
75 _error->Error(_("Cache has an incompatible versioning system"));
76 return;
77 }
78 }
79
80 Cache.HeaderP->Dirty = true;
81 Map.Sync(0,sizeof(pkgCache::Header));
82 }
83 /*}}}*/
84 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85 // ---------------------------------------------------------------------
86 /* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88 pkgCacheGenerator::~pkgCacheGenerator()
89 {
90 if (_error->PendingError() == true)
91 return;
92 if (Map.Sync() == false)
93 return;
94
95 Cache.HeaderP->Dirty = false;
96 Map.Sync(0,sizeof(pkgCache::Header));
97 }
98 /*}}}*/
99 // CacheGenerator::WriteStringInMap /*{{{*/
100 unsigned long pkgCacheGenerator::WriteStringInMap(const char *String,
101 const unsigned long &Len) {
102 return Map.WriteString(String, Len);
103 }
104 /*}}}*/
105 // CacheGenerator::WriteStringInMap /*{{{*/
106 unsigned long pkgCacheGenerator::WriteStringInMap(const char *String) {
107 return Map.WriteString(String);
108 }
109 /*}}}*/
110 unsigned long pkgCacheGenerator::AllocateInMap(const unsigned long &size) {/*{{{*/
111 return Map.Allocate(size);
112 }
113 /*}}}*/
114 // CacheGenerator::MergeList - Merge the package list /*{{{*/
115 // ---------------------------------------------------------------------
116 /* This provides the generation of the entries in the cache. Each loop
117 goes through a single package record from the underlying parse engine. */
118 bool pkgCacheGenerator::MergeList(ListParser &List,
119 pkgCache::VerIterator *OutVer)
120 {
121 List.Owner = this;
122
123 unsigned int Counter = 0;
124 while (List.Step() == true)
125 {
126 string const PackageName = List.Package();
127 if (PackageName.empty() == true)
128 return false;
129
130 /* As we handle Arch all packages as architecture bounded
131 we add all information to every (simulated) arch package */
132 std::vector<string> genArch;
133 if (List.ArchitectureAll() == true) {
134 genArch = APT::Configuration::getArchitectures();
135 if (genArch.size() != 1)
136 genArch.push_back("all");
137 } else
138 genArch.push_back(List.Architecture());
139
140 for (std::vector<string>::const_iterator arch = genArch.begin();
141 arch != genArch.end(); ++arch)
142 {
143 // Get a pointer to the package structure
144 pkgCache::PkgIterator Pkg;
145 if (NewPackage(Pkg, PackageName, *arch) == false)
146 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
147 Counter++;
148 if (Counter % 100 == 0 && Progress != 0)
149 Progress->Progress(List.Offset());
150
151 /* Get a pointer to the version structure. We know the list is sorted
152 so we use that fact in the search. Insertion of new versions is
153 done with correct sorting */
154 string Version = List.Version();
155 if (Version.empty() == true)
156 {
157 // we first process the package, then the descriptions
158 // (this has the bonus that we get MMap error when we run out
159 // of MMap space)
160 pkgCache::VerIterator Ver(Cache);
161 if (List.UsePackage(Pkg, Ver) == false)
162 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
163 PackageName.c_str());
164
165 // Find the right version to write the description
166 MD5SumValue CurMd5 = List.Description_md5();
167 Ver = Pkg.VersionList();
168 map_ptrloc *LastVer = &Pkg->VersionList;
169
170 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
171 {
172 pkgCache::DescIterator Desc = Ver.DescriptionList();
173 map_ptrloc *LastDesc = &Ver->DescriptionList;
174 bool duplicate=false;
175
176 // don't add a new description if we have one for the given
177 // md5 && language
178 for ( ; Desc.end() == false; Desc++)
179 if (MD5SumValue(Desc.md5()) == CurMd5 &&
180 Desc.LanguageCode() == List.DescriptionLanguage())
181 duplicate=true;
182 if(duplicate)
183 continue;
184
185 for (Desc = Ver.DescriptionList();
186 Desc.end() == false;
187 LastDesc = &Desc->NextDesc, Desc++)
188 {
189 if (MD5SumValue(Desc.md5()) == CurMd5)
190 {
191 // Add new description
192 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
193 Desc->ParentPkg = Pkg.Index();
194
195 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
196 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
197 break;
198 }
199 }
200 }
201
202 continue;
203 }
204
205 pkgCache::VerIterator Ver = Pkg.VersionList();
206 map_ptrloc *LastVer = &Pkg->VersionList;
207 int Res = 1;
208 unsigned long const Hash = List.VersionHash();
209 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
210 {
211 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
212 // Version is higher as current version - insert here
213 if (Res > 0)
214 break;
215 // Versionstrings are equal - is hash also equal?
216 if (Res == 0 && Ver->Hash == Hash)
217 break;
218 // proceed with the next till we have either the right
219 // or we found another version (which will be lower)
220 }
221
222 /* We already have a version for this item, record that we saw it */
223 if (Res == 0 && Ver.end() == false && Ver->Hash == Hash)
224 {
225 if (List.UsePackage(Pkg,Ver) == false)
226 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
227 PackageName.c_str());
228
229 if (NewFileVer(Ver,List) == false)
230 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
231 PackageName.c_str());
232
233 // Read only a single record and return
234 if (OutVer != 0)
235 {
236 *OutVer = Ver;
237 FoundFileDeps |= List.HasFileDeps();
238 return true;
239 }
240
241 continue;
242 }
243
244 // Add a new version
245 *LastVer = NewVersion(Ver,Version,*LastVer);
246 Ver->ParentPkg = Pkg.Index();
247 Ver->Hash = Hash;
248
249 if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
250 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
251 PackageName.c_str());
252
253 if (List.UsePackage(Pkg,Ver) == false)
254 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
255 PackageName.c_str());
256
257 if (NewFileVer(Ver,List) == false)
258 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
259 PackageName.c_str());
260
261 // Read only a single record and return
262 if (OutVer != 0)
263 {
264 *OutVer = Ver;
265 FoundFileDeps |= List.HasFileDeps();
266 return true;
267 }
268
269 /* Record the Description data. Description data always exist in
270 Packages and Translation-* files. */
271 pkgCache::DescIterator Desc = Ver.DescriptionList();
272 map_ptrloc *LastDesc = &Ver->DescriptionList;
273
274 // Skip to the end of description set
275 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
276
277 // Add new description
278 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
279 Desc->ParentPkg = Pkg.Index();
280
281 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
282 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
283 }
284 }
285
286 FoundFileDeps |= List.HasFileDeps();
287
288 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
289 return _error->Error(_("Wow, you exceeded the number of package "
290 "names this APT is capable of."));
291 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
292 return _error->Error(_("Wow, you exceeded the number of versions "
293 "this APT is capable of."));
294 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
295 return _error->Error(_("Wow, you exceeded the number of descriptions "
296 "this APT is capable of."));
297 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
298 return _error->Error(_("Wow, you exceeded the number of dependencies "
299 "this APT is capable of."));
300 return true;
301 }
302 /*}}}*/
303 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
304 // ---------------------------------------------------------------------
305 /* If we found any file depends while parsing the main list we need to
306 resolve them. Since it is undesired to load the entire list of files
307 into the cache as virtual packages we do a two stage effort. MergeList
308 identifies the file depends and this creates Provdies for them by
309 re-parsing all the indexs. */
310 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
311 {
312 List.Owner = this;
313
314 unsigned int Counter = 0;
315 while (List.Step() == true)
316 {
317 string PackageName = List.Package();
318 if (PackageName.empty() == true)
319 return false;
320 string Version = List.Version();
321 if (Version.empty() == true)
322 continue;
323
324 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
325 if (Pkg.end() == true)
326 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
327 PackageName.c_str());
328 Counter++;
329 if (Counter % 100 == 0 && Progress != 0)
330 Progress->Progress(List.Offset());
331
332 unsigned long Hash = List.VersionHash();
333 pkgCache::VerIterator Ver = Pkg.VersionList();
334 for (; Ver.end() == false; Ver++)
335 {
336 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
337 {
338 if (List.CollectFileProvides(Cache,Ver) == false)
339 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
340 break;
341 }
342 }
343
344 if (Ver.end() == true)
345 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
346 }
347
348 return true;
349 }
350 /*}}}*/
351 // CacheGenerator::NewGroup - Add a new group /*{{{*/
352 // ---------------------------------------------------------------------
353 /* This creates a new group structure and adds it to the hash table */
354 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name)
355 {
356 Grp = Cache.FindGrp(Name);
357 if (Grp.end() == false)
358 return true;
359
360 // Get a structure
361 unsigned long const Group = AllocateInMap(sizeof(pkgCache::Group));
362 if (unlikely(Group == 0))
363 return false;
364
365 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
366 Grp->Name = WriteStringInMap(Name);
367 if (unlikely(Grp->Name == 0))
368 return false;
369
370 // Insert it into the hash table
371 unsigned long const Hash = Cache.Hash(Name);
372 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
373 Cache.HeaderP->GrpHashTable[Hash] = Group;
374
375 Grp->ID = Cache.HeaderP->GroupCount++;
376 return true;
377 }
378 /*}}}*/
379 // CacheGenerator::NewPackage - Add a new package /*{{{*/
380 // ---------------------------------------------------------------------
381 /* This creates a new package structure and adds it to the hash table */
382 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
383 const string &Arch) {
384 pkgCache::GrpIterator Grp;
385 if (unlikely(NewGroup(Grp, Name) == false))
386 return false;
387
388 Pkg = Grp.FindPkg(Arch);
389 if (Pkg.end() == false)
390 return true;
391
392 // Get a structure
393 unsigned long const Package = AllocateInMap(sizeof(pkgCache::Package));
394 if (unlikely(Package == 0))
395 return false;
396 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
397
398 // Insert the package into our package list
399 if (Grp->FirstPackage == 0) // the group is new
400 {
401 // Insert it into the hash table
402 unsigned long const Hash = Cache.Hash(Name);
403 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
404 Cache.HeaderP->PkgHashTable[Hash] = Package;
405 Grp->FirstPackage = Package;
406 }
407 else // Group the Packages together
408 {
409 // this package is the new last package
410 pkgCache::PkgIterator LastPkg(Cache, Cache.PkgP + Grp->LastPackage);
411 Pkg->NextPackage = LastPkg->NextPackage;
412 LastPkg->NextPackage = Package;
413 }
414 Grp->LastPackage = Package;
415
416 // Set the name, arch and the ID
417 Pkg->Name = Grp->Name;
418 Pkg->Group = Grp.Index();
419 Pkg->Arch = WriteUniqString(Arch.c_str());
420 if (unlikely(Pkg->Arch == 0))
421 return false;
422 Pkg->ID = Cache.HeaderP->PackageCount++;
423
424 return true;
425 }
426 /*}}}*/
427 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
428 // ---------------------------------------------------------------------
429 /* */
430 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
431 ListParser &List)
432 {
433 if (CurrentFile == 0)
434 return true;
435
436 // Get a structure
437 unsigned long VerFile = AllocateInMap(sizeof(pkgCache::VerFile));
438 if (VerFile == 0)
439 return 0;
440
441 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
442 VF->File = CurrentFile - Cache.PkgFileP;
443
444 // Link it to the end of the list
445 map_ptrloc *Last = &Ver->FileList;
446 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
447 Last = &V->NextFile;
448 VF->NextFile = *Last;
449 *Last = VF.Index();
450
451 VF->Offset = List.Offset();
452 VF->Size = List.Size();
453 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
454 Cache.HeaderP->MaxVerFileSize = VF->Size;
455 Cache.HeaderP->VerFileCount++;
456
457 return true;
458 }
459 /*}}}*/
460 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
461 // ---------------------------------------------------------------------
462 /* This puts a version structure in the linked list */
463 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
464 const string &VerStr,
465 unsigned long Next)
466 {
467 // Get a structure
468 unsigned long Version = AllocateInMap(sizeof(pkgCache::Version));
469 if (Version == 0)
470 return 0;
471
472 // Fill it in
473 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
474 Ver->NextVer = Next;
475 Ver->ID = Cache.HeaderP->VersionCount++;
476 Ver->VerStr = WriteStringInMap(VerStr);
477 if (Ver->VerStr == 0)
478 return 0;
479
480 return Version;
481 }
482 /*}}}*/
483 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
484 // ---------------------------------------------------------------------
485 /* */
486 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
487 ListParser &List)
488 {
489 if (CurrentFile == 0)
490 return true;
491
492 // Get a structure
493 unsigned long DescFile = AllocateInMap(sizeof(pkgCache::DescFile));
494 if (DescFile == 0)
495 return false;
496
497 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
498 DF->File = CurrentFile - Cache.PkgFileP;
499
500 // Link it to the end of the list
501 map_ptrloc *Last = &Desc->FileList;
502 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
503 Last = &D->NextFile;
504
505 DF->NextFile = *Last;
506 *Last = DF.Index();
507
508 DF->Offset = List.Offset();
509 DF->Size = List.Size();
510 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
511 Cache.HeaderP->MaxDescFileSize = DF->Size;
512 Cache.HeaderP->DescFileCount++;
513
514 return true;
515 }
516 /*}}}*/
517 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
518 // ---------------------------------------------------------------------
519 /* This puts a description structure in the linked list */
520 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
521 const string &Lang,
522 const MD5SumValue &md5sum,
523 map_ptrloc Next)
524 {
525 // Get a structure
526 map_ptrloc Description = AllocateInMap(sizeof(pkgCache::Description));
527 if (Description == 0)
528 return 0;
529
530 // Fill it in
531 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
532 Desc->NextDesc = Next;
533 Desc->ID = Cache.HeaderP->DescriptionCount++;
534 Desc->language_code = WriteStringInMap(Lang);
535 Desc->md5sum = WriteStringInMap(md5sum.Value());
536 if (Desc->language_code == 0 || Desc->md5sum == 0)
537 return 0;
538
539 return Description;
540 }
541 /*}}}*/
542 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
543 // ---------------------------------------------------------------------
544 /* This prepares the Cache for delivery */
545 bool pkgCacheGenerator::FinishCache(OpProgress *Progress)
546 {
547 // FIXME: add progress reporting for this operation
548 // Do we have different architectures in your groups ?
549 vector<string> archs = APT::Configuration::getArchitectures();
550 if (archs.size() > 1)
551 {
552 // Create Conflicts in between the group
553 for (pkgCache::GrpIterator G = GetCache().GrpBegin(); G.end() != true; G++)
554 {
555 string const PkgName = G.Name();
556 for (pkgCache::PkgIterator P = G.PackageList(); P.end() != true; P = G.NextPkg(P))
557 {
558 if (strcmp(P.Arch(),"all") == 0)
559 continue;
560 pkgCache::PkgIterator allPkg;
561 for (pkgCache::VerIterator V = P.VersionList(); V.end() != true; V++)
562 {
563 string const Arch = V.Arch(true);
564 map_ptrloc *OldDepLast = NULL;
565 /* MultiArch handling introduces a lot of implicit Dependencies:
566 - MultiArch: same → Co-Installable if they have the same version
567 - Architecture: all → Need to be Co-Installable for internal reasons
568 - All others conflict with all other group members */
569 bool const coInstall = (V->MultiArch == pkgCache::Version::All ||
570 V->MultiArch == pkgCache::Version::Same);
571 if (V->MultiArch == pkgCache::Version::All && allPkg.end() == true)
572 allPkg = G.FindPkg("all");
573 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A)
574 {
575 if (*A == Arch)
576 continue;
577 /* We allow only one installed arch at the time
578 per group, therefore each group member conflicts
579 with all other group members */
580 pkgCache::PkgIterator D = G.FindPkg(*A);
581 if (D.end() == true)
582 continue;
583 if (coInstall == true)
584 {
585 // Replaces: ${self}:other ( << ${binary:Version})
586 NewDepends(D, V, V.VerStr(),
587 pkgCache::Dep::Less, pkgCache::Dep::Replaces,
588 OldDepLast);
589 // Breaks: ${self}:other (!= ${binary:Version})
590 NewDepends(D, V, V.VerStr(),
591 pkgCache::Dep::NotEquals, pkgCache::Dep::DpkgBreaks,
592 OldDepLast);
593 if (V->MultiArch == pkgCache::Version::All)
594 {
595 // Depend on ${self}:all which does depend on nothing
596 NewDepends(allPkg, V, V.VerStr(),
597 pkgCache::Dep::Equals, pkgCache::Dep::Depends,
598 OldDepLast);
599 }
600 } else {
601 // Conflicts: ${self}:other
602 NewDepends(D, V, "",
603 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
604 OldDepLast);
605 }
606 }
607 }
608 }
609 }
610 }
611 return true;
612 }
613 /*}}}*/
614 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
615 // ---------------------------------------------------------------------
616 /* This creates a dependency element in the tree. It is linked to the
617 version and to the package that it is pointing to. */
618 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
619 pkgCache::VerIterator &Ver,
620 string const &Version,
621 unsigned int const &Op,
622 unsigned int const &Type,
623 map_ptrloc *OldDepLast)
624 {
625 // Get a structure
626 unsigned long const Dependency = AllocateInMap(sizeof(pkgCache::Dependency));
627 if (unlikely(Dependency == 0))
628 return false;
629
630 // Fill it in
631 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
632 Dep->ParentVer = Ver.Index();
633 Dep->Type = Type;
634 Dep->CompareOp = Op;
635 Dep->ID = Cache.HeaderP->DependsCount++;
636
637 // Probe the reverse dependency list for a version string that matches
638 if (Version.empty() == false)
639 {
640 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
641 if (I->Version != 0 && I.TargetVer() == Version)
642 Dep->Version = I->Version;*/
643 if (Dep->Version == 0)
644 if (unlikely((Dep->Version = WriteStringInMap(Version)) == 0))
645 return false;
646 }
647
648 // Link it to the package
649 Dep->Package = Pkg.Index();
650 Dep->NextRevDepends = Pkg->RevDepends;
651 Pkg->RevDepends = Dep.Index();
652
653 // Do we know where to link the Dependency to?
654 if (OldDepLast == NULL)
655 {
656 OldDepLast = &Ver->DependsList;
657 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
658 OldDepLast = &D->NextDepends;
659 }
660
661 Dep->NextDepends = *OldDepLast;
662 *OldDepLast = Dep.Index();
663 OldDepLast = &Dep->NextDepends;
664
665 return true;
666 }
667 /*}}}*/
668 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
669 // ---------------------------------------------------------------------
670 /* This creates a Group and the Package to link this dependency to if
671 needed and handles also the caching of the old endpoint */
672 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator &Ver,
673 const string &PackageName,
674 const string &Arch,
675 const string &Version,
676 unsigned int Op,
677 unsigned int Type)
678 {
679 pkgCache::GrpIterator Grp;
680 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
681 return false;
682
683 // Locate the target package
684 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
685 if (Pkg.end() == true) {
686 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
687 return false;
688 }
689
690 // Is it a file dependency?
691 if (unlikely(PackageName[0] == '/'))
692 FoundFileDeps = true;
693
694 /* Caching the old end point speeds up generation substantially */
695 if (OldDepVer != Ver) {
696 OldDepLast = NULL;
697 OldDepVer = Ver;
698 }
699
700 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
701 }
702 /*}}}*/
703 // ListParser::NewProvides - Create a Provides element /*{{{*/
704 // ---------------------------------------------------------------------
705 /* */
706 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator &Ver,
707 const string &PkgName,
708 const string &PkgArch,
709 const string &Version)
710 {
711 pkgCache &Cache = Owner->Cache;
712
713 // We do not add self referencing provides
714 if (Ver.ParentPkg().Name() == PkgName && PkgArch == Ver.Arch(true))
715 return true;
716
717 // Get a structure
718 unsigned long const Provides = Owner->AllocateInMap(sizeof(pkgCache::Provides));
719 if (unlikely(Provides == 0))
720 return false;
721 Cache.HeaderP->ProvidesCount++;
722
723 // Fill it in
724 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
725 Prv->Version = Ver.Index();
726 Prv->NextPkgProv = Ver->ProvidesList;
727 Ver->ProvidesList = Prv.Index();
728 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
729 return false;
730
731 // Locate the target package
732 pkgCache::PkgIterator Pkg;
733 if (unlikely(Owner->NewPackage(Pkg,PkgName, PkgArch) == false))
734 return false;
735
736 // Link it to the package
737 Prv->ParentPkg = Pkg.Index();
738 Prv->NextProvides = Pkg->ProvidesList;
739 Pkg->ProvidesList = Prv.Index();
740
741 return true;
742 }
743 /*}}}*/
744 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
745 // ---------------------------------------------------------------------
746 /* This is used to select which file is to be associated with all newly
747 added versions. The caller is responsible for setting the IMS fields. */
748 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
749 const pkgIndexFile &Index,
750 unsigned long Flags)
751 {
752 // Get some space for the structure
753 CurrentFile = Cache.PkgFileP + AllocateInMap(sizeof(*CurrentFile));
754 if (CurrentFile == Cache.PkgFileP)
755 return false;
756
757 // Fill it in
758 CurrentFile->FileName = WriteStringInMap(File);
759 CurrentFile->Site = WriteUniqString(Site);
760 CurrentFile->NextFile = Cache.HeaderP->FileList;
761 CurrentFile->Flags = Flags;
762 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
763 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
764 PkgFileName = File;
765 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
766 Cache.HeaderP->PackageFileCount++;
767
768 if (CurrentFile->FileName == 0)
769 return false;
770
771 if (Progress != 0)
772 Progress->SubProgress(Index.Size());
773 return true;
774 }
775 /*}}}*/
776 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
777 // ---------------------------------------------------------------------
778 /* This is used to create handles to strings. Given the same text it
779 always returns the same number */
780 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
781 unsigned int Size)
782 {
783 /* We use a very small transient hash table here, this speeds up generation
784 by a fair amount on slower machines */
785 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
786 if (Bucket != 0 &&
787 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
788 return Bucket->String;
789
790 // Search for an insertion point
791 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
792 int Res = 1;
793 map_ptrloc *Last = &Cache.HeaderP->StringList;
794 for (; I != Cache.StringItemP; Last = &I->NextItem,
795 I = Cache.StringItemP + I->NextItem)
796 {
797 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
798 if (Res >= 0)
799 break;
800 }
801
802 // Match
803 if (Res == 0)
804 {
805 Bucket = I;
806 return I->String;
807 }
808
809 // Get a structure
810 unsigned long Item = AllocateInMap(sizeof(pkgCache::StringItem));
811 if (Item == 0)
812 return 0;
813
814 // Fill in the structure
815 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
816 ItemP->NextItem = I - Cache.StringItemP;
817 *Last = Item;
818 ItemP->String = WriteStringInMap(S,Size);
819 if (ItemP->String == 0)
820 return 0;
821
822 Bucket = ItemP;
823 return ItemP->String;
824 }
825 /*}}}*/
826 // CheckValidity - Check that a cache is up-to-date /*{{{*/
827 // ---------------------------------------------------------------------
828 /* This just verifies that each file in the list of index files exists,
829 has matching attributes with the cache and the cache does not have
830 any extra files. */
831 static bool CheckValidity(const string &CacheFile, FileIterator Start,
832 FileIterator End,MMap **OutMap = 0)
833 {
834 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
835 // No file, certainly invalid
836 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
837 {
838 if (Debug == true)
839 std::clog << "CacheFile doesn't exist" << std::endl;
840 return false;
841 }
842
843 // Map it
844 FileFd CacheF(CacheFile,FileFd::ReadOnly);
845 SPtr<MMap> Map = new MMap(CacheF,0);
846 pkgCache Cache(Map);
847 if (_error->PendingError() == true || Map->Size() == 0)
848 {
849 if (Debug == true)
850 std::clog << "Errors are pending or Map is empty()" << std::endl;
851 _error->Discard();
852 return false;
853 }
854
855 /* Now we check every index file, see if it is in the cache,
856 verify the IMS data and check that it is on the disk too.. */
857 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
858 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
859 for (; Start != End; Start++)
860 {
861 if (Debug == true)
862 std::clog << "Checking PkgFile " << (*Start)->Describe() << ": ";
863 if ((*Start)->HasPackages() == false)
864 {
865 if (Debug == true)
866 std::clog << "Has NO packages" << std::endl;
867 continue;
868 }
869
870 if ((*Start)->Exists() == false)
871 {
872 #if 0 // mvo: we no longer give a message here (Default Sources spec)
873 _error->WarningE("stat",_("Couldn't stat source package list %s"),
874 (*Start)->Describe().c_str());
875 #endif
876 if (Debug == true)
877 std::clog << "file doesn't exist" << std::endl;
878 continue;
879 }
880
881 // FindInCache is also expected to do an IMS check.
882 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
883 if (File.end() == true)
884 {
885 if (Debug == true)
886 std::clog << "FindInCache returned end-Pointer" << std::endl;
887 return false;
888 }
889
890 Visited[File->ID] = true;
891 if (Debug == true)
892 std::clog << "with ID " << File->ID << " is valid" << std::endl;
893 }
894
895 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
896 if (Visited[I] == false)
897 {
898 if (Debug == true)
899 std::clog << "File with ID" << I << " wasn't visited" << std::endl;
900 return false;
901 }
902
903 if (_error->PendingError() == true)
904 {
905 if (Debug == true)
906 {
907 std::clog << "Validity failed because of pending errors:" << std::endl;
908 _error->DumpErrors();
909 }
910 _error->Discard();
911 return false;
912 }
913
914 if (OutMap != 0)
915 *OutMap = Map.UnGuard();
916 return true;
917 }
918 /*}}}*/
919 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
920 // ---------------------------------------------------------------------
921 /* Size is kind of an abstract notion that is only used for the progress
922 meter */
923 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
924 {
925 unsigned long TotalSize = 0;
926 for (; Start != End; Start++)
927 {
928 if ((*Start)->HasPackages() == false)
929 continue;
930 TotalSize += (*Start)->Size();
931 }
932 return TotalSize;
933 }
934 /*}}}*/
935 // BuildCache - Merge the list of index files into the cache /*{{{*/
936 // ---------------------------------------------------------------------
937 /* */
938 static bool BuildCache(pkgCacheGenerator &Gen,
939 OpProgress *Progress,
940 unsigned long &CurrentSize,unsigned long TotalSize,
941 FileIterator Start, FileIterator End)
942 {
943 FileIterator I;
944 for (I = Start; I != End; I++)
945 {
946 if ((*I)->HasPackages() == false)
947 continue;
948
949 if ((*I)->Exists() == false)
950 continue;
951
952 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
953 {
954 _error->Warning("Duplicate sources.list entry %s",
955 (*I)->Describe().c_str());
956 continue;
957 }
958
959 unsigned long Size = (*I)->Size();
960 if (Progress != NULL)
961 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
962 CurrentSize += Size;
963
964 if ((*I)->Merge(Gen,Progress) == false)
965 return false;
966 }
967
968 if (Gen.HasFileDeps() == true)
969 {
970 if (Progress != NULL)
971 Progress->Done();
972 TotalSize = ComputeSize(Start, End);
973 CurrentSize = 0;
974 for (I = Start; I != End; I++)
975 {
976 unsigned long Size = (*I)->Size();
977 if (Progress != NULL)
978 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
979 CurrentSize += Size;
980 if ((*I)->MergeFileProvides(Gen,Progress) == false)
981 return false;
982 }
983 }
984
985 return true;
986 }
987 /*}}}*/
988 // CacheGenerator::MakeStatusCache - Construct the status cache /*{{{*/
989 // ---------------------------------------------------------------------
990 /* This makes sure that the status cache (the cache that has all
991 index files from the sources list and all local ones) is ready
992 to be mmaped. If OutMap is not zero then a MMap object representing
993 the cache will be stored there. This is pretty much mandetory if you
994 are using AllowMem. AllowMem lets the function be run as non-root
995 where it builds the cache 'fast' into a memory buffer. */
996 __deprecated bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
997 MMap **OutMap, bool AllowMem)
998 { return pkgCacheGenerator::MakeStatusCache(List, &Progress, OutMap, AllowMem); }
999 bool pkgCacheGenerator::MakeStatusCache(pkgSourceList &List,OpProgress *Progress,
1000 MMap **OutMap,bool AllowMem)
1001 {
1002 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
1003 unsigned long const MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
1004
1005 vector<pkgIndexFile *> Files;
1006 for (vector<metaIndex *>::const_iterator i = List.begin();
1007 i != List.end();
1008 i++)
1009 {
1010 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
1011 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
1012 j != Indexes->end();
1013 j++)
1014 Files.push_back (*j);
1015 }
1016
1017 unsigned long const EndOfSource = Files.size();
1018 if (_system->AddStatusFiles(Files) == false)
1019 return false;
1020
1021 // Decide if we can write to the files..
1022 string const CacheFile = _config->FindFile("Dir::Cache::pkgcache");
1023 string const SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
1024
1025 // ensure the cache directory exists
1026 if (CacheFile.empty() == false || SrcCacheFile.empty() == false)
1027 {
1028 string dir = _config->FindDir("Dir::Cache");
1029 size_t const len = dir.size();
1030 if (len > 5 && dir.find("/apt/", len - 6, 5) == len - 5)
1031 dir = dir.substr(0, len - 5);
1032 if (CacheFile.empty() == false)
1033 CreateDirectory(dir, flNotFile(CacheFile));
1034 if (SrcCacheFile.empty() == false)
1035 CreateDirectory(dir, flNotFile(SrcCacheFile));
1036 }
1037
1038 // Decide if we can write to the cache
1039 bool Writeable = false;
1040 if (CacheFile.empty() == false)
1041 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
1042 else
1043 if (SrcCacheFile.empty() == false)
1044 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
1045 if (Debug == true)
1046 std::clog << "Do we have write-access to the cache files? " << (Writeable ? "YES" : "NO") << std::endl;
1047
1048 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
1049 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
1050
1051 if (Progress != NULL)
1052 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1053
1054 // Cache is OK, Fin.
1055 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
1056 {
1057 if (Progress != NULL)
1058 Progress->OverallProgress(1,1,1,_("Reading package lists"));
1059 if (Debug == true)
1060 std::clog << "pkgcache.bin is valid - no need to build anything" << std::endl;
1061 return true;
1062 }
1063 else if (Debug == true)
1064 std::clog << "pkgcache.bin is NOT valid" << std::endl;
1065
1066 /* At this point we know we need to reconstruct the package cache,
1067 begin. */
1068 SPtr<FileFd> CacheF;
1069 SPtr<DynamicMMap> Map;
1070 if (Writeable == true && CacheFile.empty() == false)
1071 {
1072 unlink(CacheFile.c_str());
1073 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
1074 fchmod(CacheF->Fd(),0644);
1075 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
1076 if (_error->PendingError() == true)
1077 return false;
1078 if (Debug == true)
1079 std::clog << "Open filebased MMap" << std::endl;
1080 }
1081 else
1082 {
1083 // Just build it in memory..
1084 Map = new DynamicMMap(0,MapSize);
1085 if (Debug == true)
1086 std::clog << "Open memory Map (not filebased)" << std::endl;
1087 }
1088
1089 // Lets try the source cache.
1090 unsigned long CurrentSize = 0;
1091 unsigned long TotalSize = 0;
1092 if (CheckValidity(SrcCacheFile,Files.begin(),
1093 Files.begin()+EndOfSource) == true)
1094 {
1095 if (Debug == true)
1096 std::clog << "srcpkgcache.bin is valid - populate MMap with it." << std::endl;
1097 // Preload the map with the source cache
1098 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
1099 unsigned long const alloc = Map->RawAllocate(SCacheF.Size());
1100 if ((alloc == 0 && _error->PendingError())
1101 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
1102 SCacheF.Size()) == false)
1103 return false;
1104
1105 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1106
1107 // Build the status cache
1108 pkgCacheGenerator Gen(Map.Get(),Progress);
1109 if (_error->PendingError() == true)
1110 return false;
1111 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1112 Files.begin()+EndOfSource,Files.end()) == false)
1113 return false;
1114
1115 // FIXME: move me to a better place
1116 Gen.FinishCache(Progress);
1117 }
1118 else
1119 {
1120 if (Debug == true)
1121 std::clog << "srcpkgcache.bin is NOT valid - rebuild" << std::endl;
1122 TotalSize = ComputeSize(Files.begin(),Files.end());
1123
1124 // Build the source cache
1125 pkgCacheGenerator Gen(Map.Get(),Progress);
1126 if (_error->PendingError() == true)
1127 return false;
1128 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1129 Files.begin(),Files.begin()+EndOfSource) == false)
1130 return false;
1131
1132 // Write it back
1133 if (Writeable == true && SrcCacheFile.empty() == false)
1134 {
1135 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
1136 if (_error->PendingError() == true)
1137 return false;
1138
1139 fchmod(SCacheF.Fd(),0644);
1140
1141 // Write out the main data
1142 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1143 return _error->Error(_("IO Error saving source cache"));
1144 SCacheF.Sync();
1145
1146 // Write out the proper header
1147 Gen.GetCache().HeaderP->Dirty = false;
1148 if (SCacheF.Seek(0) == false ||
1149 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1150 return _error->Error(_("IO Error saving source cache"));
1151 Gen.GetCache().HeaderP->Dirty = true;
1152 SCacheF.Sync();
1153 }
1154
1155 // Build the status cache
1156 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1157 Files.begin()+EndOfSource,Files.end()) == false)
1158 return false;
1159
1160 // FIXME: move me to a better place
1161 Gen.FinishCache(Progress);
1162 }
1163 if (Debug == true)
1164 std::clog << "Caches are ready for shipping" << std::endl;
1165
1166 if (_error->PendingError() == true)
1167 return false;
1168 if (OutMap != 0)
1169 {
1170 if (CacheF != 0)
1171 {
1172 delete Map.UnGuard();
1173 *OutMap = new MMap(*CacheF,0);
1174 }
1175 else
1176 {
1177 *OutMap = Map.UnGuard();
1178 }
1179 }
1180
1181 return true;
1182 }
1183 /*}}}*/
1184 // CacheGenerator::MakeOnlyStatusCache - Build only a status files cache/*{{{*/
1185 // ---------------------------------------------------------------------
1186 /* */
1187 __deprecated bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1188 { return pkgCacheGenerator::MakeOnlyStatusCache(&Progress, OutMap); }
1189 bool pkgCacheGenerator::MakeOnlyStatusCache(OpProgress *Progress,DynamicMMap **OutMap)
1190 {
1191 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
1192 vector<pkgIndexFile *> Files;
1193 unsigned long EndOfSource = Files.size();
1194 if (_system->AddStatusFiles(Files) == false)
1195 return false;
1196
1197 SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
1198 unsigned long CurrentSize = 0;
1199 unsigned long TotalSize = 0;
1200
1201 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1202
1203 // Build the status cache
1204 if (Progress != NULL)
1205 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1206 pkgCacheGenerator Gen(Map.Get(),Progress);
1207 if (_error->PendingError() == true)
1208 return false;
1209 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1210 Files.begin()+EndOfSource,Files.end()) == false)
1211 return false;
1212
1213 // FIXME: move me to a better place
1214 Gen.FinishCache(Progress);
1215
1216 if (_error->PendingError() == true)
1217 return false;
1218 *OutMap = Map.UnGuard();
1219
1220 return true;
1221 }
1222 /*}}}*/