criss-cross merge with my sid branch
[ntk/apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25 #include <apt-pkg/macros.h>
26
27 #include <apt-pkg/tagfile.h>
28
29 #include <apti18n.h>
30
31 #include <vector>
32
33 #include <sys/stat.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <stdio.h>
37 /*}}}*/
38 typedef vector<pkgIndexFile *>::iterator FileIterator;
39
40 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41 // ---------------------------------------------------------------------
42 /* We set the dirty flag and make sure that is written to the disk */
43 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
44 Map(*pMap), Cache(pMap,false), Progress(Prog),
45 FoundFileDeps(0)
46 {
47 CurrentFile = 0;
48 memset(UniqHash,0,sizeof(UniqHash));
49
50 if (_error->PendingError() == true)
51 return;
52
53 if (Map.Size() == 0)
54 {
55 // Setup the map interface..
56 Cache.HeaderP = (pkgCache::Header *)Map.Data();
57 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
58 return;
59
60 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
61
62 // Starting header
63 *Cache.HeaderP = pkgCache::Header();
64 Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
65 Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
66 Cache.ReMap();
67 }
68 else
69 {
70 // Map directly from the existing file
71 Cache.ReMap();
72 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
73 if (Cache.VS != _system->VS)
74 {
75 _error->Error(_("Cache has an incompatible versioning system"));
76 return;
77 }
78 }
79
80 Cache.HeaderP->Dirty = true;
81 Map.Sync(0,sizeof(pkgCache::Header));
82 }
83 /*}}}*/
84 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85 // ---------------------------------------------------------------------
86 /* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88 pkgCacheGenerator::~pkgCacheGenerator()
89 {
90 if (_error->PendingError() == true)
91 return;
92 if (Map.Sync() == false)
93 return;
94
95 Cache.HeaderP->Dirty = false;
96 Map.Sync(0,sizeof(pkgCache::Header));
97 }
98 /*}}}*/
99 // CacheGenerator::MergeList - Merge the package list /*{{{*/
100 // ---------------------------------------------------------------------
101 /* This provides the generation of the entries in the cache. Each loop
102 goes through a single package record from the underlying parse engine. */
103 bool pkgCacheGenerator::MergeList(ListParser &List,
104 pkgCache::VerIterator *OutVer)
105 {
106 List.Owner = this;
107
108 unsigned int Counter = 0;
109 while (List.Step() == true)
110 {
111 string const PackageName = List.Package();
112 if (PackageName.empty() == true)
113 return false;
114
115 /* As we handle Arch all packages as architecture bounded
116 we add all information to every (simulated) arch package */
117 std::vector<string> genArch;
118 if (List.ArchitectureAll() == true) {
119 genArch = APT::Configuration::getArchitectures();
120 if (genArch.size() != 1)
121 genArch.push_back("all");
122 } else
123 genArch.push_back(List.Architecture());
124
125 for (std::vector<string>::const_iterator arch = genArch.begin();
126 arch != genArch.end(); ++arch)
127 {
128 // Get a pointer to the package structure
129 pkgCache::PkgIterator Pkg;
130 if (NewPackage(Pkg, PackageName, *arch) == false)
131 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
132 Counter++;
133 if (Counter % 100 == 0 && Progress != 0)
134 Progress->Progress(List.Offset());
135
136 /* Get a pointer to the version structure. We know the list is sorted
137 so we use that fact in the search. Insertion of new versions is
138 done with correct sorting */
139 string Version = List.Version();
140 if (Version.empty() == true)
141 {
142 // we first process the package, then the descriptions
143 // (this has the bonus that we get MMap error when we run out
144 // of MMap space)
145 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
146 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
147 PackageName.c_str());
148
149 // Find the right version to write the description
150 MD5SumValue CurMd5 = List.Description_md5();
151 pkgCache::VerIterator Ver = Pkg.VersionList();
152 map_ptrloc *LastVer = &Pkg->VersionList;
153
154 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
155 {
156 pkgCache::DescIterator Desc = Ver.DescriptionList();
157 map_ptrloc *LastDesc = &Ver->DescriptionList;
158 bool duplicate=false;
159
160 // don't add a new description if we have one for the given
161 // md5 && language
162 for ( ; Desc.end() == false; Desc++)
163 if (MD5SumValue(Desc.md5()) == CurMd5 &&
164 Desc.LanguageCode() == List.DescriptionLanguage())
165 duplicate=true;
166 if(duplicate)
167 continue;
168
169 for (Desc = Ver.DescriptionList();
170 Desc.end() == false;
171 LastDesc = &Desc->NextDesc, Desc++)
172 {
173 if (MD5SumValue(Desc.md5()) == CurMd5)
174 {
175 // Add new description
176 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
177 Desc->ParentPkg = Pkg.Index();
178
179 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
180 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
181 break;
182 }
183 }
184 }
185
186 continue;
187 }
188
189 pkgCache::VerIterator Ver = Pkg.VersionList();
190 map_ptrloc *LastVer = &Pkg->VersionList;
191 int Res = 1;
192 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
193 {
194 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
195 if (Res >= 0)
196 break;
197 }
198
199 /* We already have a version for this item, record that we
200 saw it */
201 unsigned long Hash = List.VersionHash();
202 if (Res == 0 && Ver->Hash == Hash)
203 {
204 if (List.UsePackage(Pkg,Ver) == false)
205 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
206 PackageName.c_str());
207
208 if (NewFileVer(Ver,List) == false)
209 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
210 PackageName.c_str());
211
212 // Read only a single record and return
213 if (OutVer != 0)
214 {
215 *OutVer = Ver;
216 FoundFileDeps |= List.HasFileDeps();
217 return true;
218 }
219
220 continue;
221 }
222
223 // Skip to the end of the same version set.
224 if (Res == 0)
225 {
226 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
227 {
228 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
229 if (Res != 0)
230 break;
231 }
232 }
233
234 // Add a new version
235 *LastVer = NewVersion(Ver,Version,*LastVer);
236 Ver->ParentPkg = Pkg.Index();
237 Ver->Hash = Hash;
238
239 if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
240 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
241 PackageName.c_str());
242
243 if (List.UsePackage(Pkg,Ver) == false)
244 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
245 PackageName.c_str());
246
247 if (NewFileVer(Ver,List) == false)
248 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
249 PackageName.c_str());
250
251 // Read only a single record and return
252 if (OutVer != 0)
253 {
254 *OutVer = Ver;
255 FoundFileDeps |= List.HasFileDeps();
256 return true;
257 }
258
259 /* Record the Description data. Description data always exist in
260 Packages and Translation-* files. */
261 pkgCache::DescIterator Desc = Ver.DescriptionList();
262 map_ptrloc *LastDesc = &Ver->DescriptionList;
263
264 // Skip to the end of description set
265 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
266
267 // Add new description
268 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
269 Desc->ParentPkg = Pkg.Index();
270
271 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
272 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
273 }
274 }
275
276 FoundFileDeps |= List.HasFileDeps();
277
278 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
279 return _error->Error(_("Wow, you exceeded the number of package "
280 "names this APT is capable of."));
281 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
282 return _error->Error(_("Wow, you exceeded the number of versions "
283 "this APT is capable of."));
284 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
285 return _error->Error(_("Wow, you exceeded the number of descriptions "
286 "this APT is capable of."));
287 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
288 return _error->Error(_("Wow, you exceeded the number of dependencies "
289 "this APT is capable of."));
290 return true;
291 }
292 /*}}}*/
293 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
294 // ---------------------------------------------------------------------
295 /* If we found any file depends while parsing the main list we need to
296 resolve them. Since it is undesired to load the entire list of files
297 into the cache as virtual packages we do a two stage effort. MergeList
298 identifies the file depends and this creates Provdies for them by
299 re-parsing all the indexs. */
300 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
301 {
302 List.Owner = this;
303
304 unsigned int Counter = 0;
305 while (List.Step() == true)
306 {
307 string PackageName = List.Package();
308 if (PackageName.empty() == true)
309 return false;
310 string Version = List.Version();
311 if (Version.empty() == true)
312 continue;
313
314 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
315 if (Pkg.end() == true)
316 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
317 PackageName.c_str());
318 Counter++;
319 if (Counter % 100 == 0 && Progress != 0)
320 Progress->Progress(List.Offset());
321
322 unsigned long Hash = List.VersionHash();
323 pkgCache::VerIterator Ver = Pkg.VersionList();
324 for (; Ver.end() == false; Ver++)
325 {
326 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
327 {
328 if (List.CollectFileProvides(Cache,Ver) == false)
329 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
330 break;
331 }
332 }
333
334 if (Ver.end() == true)
335 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
336 }
337
338 return true;
339 }
340 /*}}}*/
341 // CacheGenerator::NewGroup - Add a new group /*{{{*/
342 // ---------------------------------------------------------------------
343 /* This creates a new group structure and adds it to the hash table */
344 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name) {
345 Grp = Cache.FindGrp(Name);
346 if (Grp.end() == false)
347 return true;
348
349 // Get a structure
350 unsigned long const Group = Map.Allocate(sizeof(pkgCache::Group));
351 if (unlikely(Group == 0))
352 return false;
353
354 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
355 Grp->Name = Map.WriteString(Name);
356 if (unlikely(Grp->Name == 0))
357 return false;
358
359 // Insert it into the hash table
360 unsigned long const Hash = Cache.Hash(Name);
361 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
362 Cache.HeaderP->GrpHashTable[Hash] = Group;
363
364 Cache.HeaderP->GroupCount++;
365
366 return true;
367 }
368 /*}}}*/
369 // CacheGenerator::NewPackage - Add a new package /*{{{*/
370 // ---------------------------------------------------------------------
371 /* This creates a new package structure and adds it to the hash table */
372 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
373 const string &Arch) {
374 pkgCache::GrpIterator Grp;
375 if (unlikely(NewGroup(Grp, Name) == false))
376 return false;
377
378 Pkg = Grp.FindPkg(Arch);
379 if (Pkg.end() == false)
380 return true;
381
382 // Get a structure
383 unsigned long const Package = Map.Allocate(sizeof(pkgCache::Package));
384 if (unlikely(Package == 0))
385 return false;
386 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
387
388 // Insert it into the hash table
389 unsigned long const Hash = Cache.Hash(Name);
390 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
391 Cache.HeaderP->PkgHashTable[Hash] = Package;
392
393 // remember the packages in the group
394 Grp->FirstPackage = Package;
395 if (Grp->LastPackage == 0)
396 Grp->LastPackage = Package;
397
398 // Set the name, arch and the ID
399 Pkg->Name = Grp->Name;
400 Pkg->Group = Grp.Index();
401 Pkg->Arch = WriteUniqString(Arch.c_str());
402 if (unlikely(Pkg->Arch == 0))
403 return false;
404 Pkg->ID = Cache.HeaderP->PackageCount++;
405
406 return true;
407 }
408 /*}}}*/
409 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
410 // ---------------------------------------------------------------------
411 /* */
412 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
413 ListParser &List)
414 {
415 if (CurrentFile == 0)
416 return true;
417
418 // Get a structure
419 unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
420 if (VerFile == 0)
421 return 0;
422
423 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
424 VF->File = CurrentFile - Cache.PkgFileP;
425
426 // Link it to the end of the list
427 map_ptrloc *Last = &Ver->FileList;
428 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
429 Last = &V->NextFile;
430 VF->NextFile = *Last;
431 *Last = VF.Index();
432
433 VF->Offset = List.Offset();
434 VF->Size = List.Size();
435 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
436 Cache.HeaderP->MaxVerFileSize = VF->Size;
437 Cache.HeaderP->VerFileCount++;
438
439 return true;
440 }
441 /*}}}*/
442 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
443 // ---------------------------------------------------------------------
444 /* This puts a version structure in the linked list */
445 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
446 const string &VerStr,
447 unsigned long Next)
448 {
449 // Get a structure
450 unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
451 if (Version == 0)
452 return 0;
453
454 // Fill it in
455 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
456 Ver->NextVer = Next;
457 Ver->ID = Cache.HeaderP->VersionCount++;
458 Ver->VerStr = Map.WriteString(VerStr);
459 if (Ver->VerStr == 0)
460 return 0;
461
462 return Version;
463 }
464 /*}}}*/
465 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
466 // ---------------------------------------------------------------------
467 /* */
468 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
469 ListParser &List)
470 {
471 if (CurrentFile == 0)
472 return true;
473
474 // Get a structure
475 unsigned long DescFile = Map.Allocate(sizeof(pkgCache::DescFile));
476 if (DescFile == 0)
477 return false;
478
479 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
480 DF->File = CurrentFile - Cache.PkgFileP;
481
482 // Link it to the end of the list
483 map_ptrloc *Last = &Desc->FileList;
484 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
485 Last = &D->NextFile;
486
487 DF->NextFile = *Last;
488 *Last = DF.Index();
489
490 DF->Offset = List.Offset();
491 DF->Size = List.Size();
492 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
493 Cache.HeaderP->MaxDescFileSize = DF->Size;
494 Cache.HeaderP->DescFileCount++;
495
496 return true;
497 }
498 /*}}}*/
499 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
500 // ---------------------------------------------------------------------
501 /* This puts a description structure in the linked list */
502 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
503 const string &Lang,
504 const MD5SumValue &md5sum,
505 map_ptrloc Next)
506 {
507 // Get a structure
508 map_ptrloc Description = Map.Allocate(sizeof(pkgCache::Description));
509 if (Description == 0)
510 return 0;
511
512 // Fill it in
513 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
514 Desc->NextDesc = Next;
515 Desc->ID = Cache.HeaderP->DescriptionCount++;
516 Desc->language_code = Map.WriteString(Lang);
517 Desc->md5sum = Map.WriteString(md5sum.Value());
518 if (Desc->language_code == 0 || Desc->md5sum == 0)
519 return 0;
520
521 return Description;
522 }
523 /*}}}*/
524 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
525 // ---------------------------------------------------------------------
526 /* This prepares the Cache for delivery */
527 bool pkgCacheGenerator::FinishCache(OpProgress &Progress) {
528 // FIXME: add progress reporting for this operation
529 // Do we have different architectures in your groups ?
530 vector<string> archs = APT::Configuration::getArchitectures();
531 if (archs.size() > 1) {
532 // Create Conflicts in between the group
533 for (pkgCache::GrpIterator G = GetCache().GrpBegin(); G.end() != true; G++) {
534 string const PkgName = G.Name();
535 for (pkgCache::PkgIterator P = G.PackageList(); P.end() != true; P = G.NextPkg(P)) {
536 if (strcmp(P.Arch(),"all") == 0)
537 continue;
538 pkgCache::PkgIterator allPkg;
539 for (pkgCache::VerIterator V = P.VersionList(); V.end() != true; V++) {
540 string const Arch = V.Arch(true);
541 map_ptrloc *OldDepLast = NULL;
542 /* MultiArch handling introduces a lot of implicit Dependencies:
543 - MultiArch: same → Co-Installable if they have the same version
544 - Architecture: all → Need to be Co-Installable for internal reasons
545 - All others conflict with all other group members */
546 bool const coInstall = (V->MultiArch == pkgCache::Version::All ||
547 V->MultiArch == pkgCache::Version::Same);
548 if (V->MultiArch == pkgCache::Version::All && allPkg.end() == true)
549 allPkg = G.FindPkg("all");
550 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A) {
551 if (*A == Arch)
552 continue;
553 /* We allow only one installed arch at the time
554 per group, therefore each group member conflicts
555 with all other group members */
556 pkgCache::PkgIterator D = G.FindPkg(*A);
557 if (D.end() == true)
558 continue;
559 if (coInstall == true) {
560 // Replaces: ${self}:other ( << ${binary:Version})
561 NewDepends(D, V, V.VerStr(),
562 pkgCache::Dep::Less, pkgCache::Dep::Replaces,
563 OldDepLast);
564 // Breaks: ${self}:other (!= ${binary:Version})
565 NewDepends(D, V, V.VerStr(),
566 pkgCache::Dep::Less, pkgCache::Dep::DpkgBreaks,
567 OldDepLast);
568 NewDepends(D, V, V.VerStr(),
569 pkgCache::Dep::Greater, pkgCache::Dep::DpkgBreaks,
570 OldDepLast);
571 if (V->MultiArch == pkgCache::Version::All) {
572 // Depend on ${self}:all which does depend on nothing
573 NewDepends(allPkg, V, V.VerStr(),
574 pkgCache::Dep::Equals, pkgCache::Dep::Depends,
575 OldDepLast);
576 }
577 } else {
578 // Conflicts: ${self}:other
579 NewDepends(D, V, "",
580 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
581 OldDepLast);
582 }
583 }
584 }
585 }
586 }
587 }
588 return true;
589 }
590 /*}}}*/
591 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
592 // ---------------------------------------------------------------------
593 /* This creates a dependency element in the tree. It is linked to the
594 version and to the package that it is pointing to. */
595 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
596 pkgCache::VerIterator &Ver,
597 string const &Version,
598 unsigned int const &Op,
599 unsigned int const &Type,
600 map_ptrloc *OldDepLast)
601 {
602 // Get a structure
603 unsigned long const Dependency = Map.Allocate(sizeof(pkgCache::Dependency));
604 if (unlikely(Dependency == 0))
605 return false;
606
607 // Fill it in
608 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
609 Dep->ParentVer = Ver.Index();
610 Dep->Type = Type;
611 Dep->CompareOp = Op;
612 Dep->ID = Cache.HeaderP->DependsCount++;
613
614 // Probe the reverse dependency list for a version string that matches
615 if (Version.empty() == false)
616 {
617 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
618 if (I->Version != 0 && I.TargetVer() == Version)
619 Dep->Version = I->Version;*/
620 if (Dep->Version == 0)
621 if (unlikely((Dep->Version = Map.WriteString(Version)) == 0))
622 return false;
623 }
624
625 // Link it to the package
626 Dep->Package = Pkg.Index();
627 Dep->NextRevDepends = Pkg->RevDepends;
628 Pkg->RevDepends = Dep.Index();
629
630 // Do we know where to link the Dependency to?
631 if (OldDepLast == NULL)
632 {
633 OldDepLast = &Ver->DependsList;
634 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
635 OldDepLast = &D->NextDepends;
636 }
637
638 Dep->NextDepends = *OldDepLast;
639 *OldDepLast = Dep.Index();
640 OldDepLast = &Dep->NextDepends;
641
642 return true;
643 }
644 /*}}}*/
645 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
646 // ---------------------------------------------------------------------
647 /* This creates a Group and the Package to link this dependency to if
648 needed and handles also the caching of the old endpoint */
649 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
650 const string &PackageName,
651 const string &Arch,
652 const string &Version,
653 unsigned int Op,
654 unsigned int Type)
655 {
656 pkgCache::GrpIterator Grp;
657 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
658 return false;
659
660 // Locate the target package
661 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
662 if (Pkg.end() == true) {
663 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
664 return false;
665 }
666
667 // Is it a file dependency?
668 if (unlikely(PackageName[0] == '/'))
669 FoundFileDeps = true;
670
671 /* Caching the old end point speeds up generation substantially */
672 if (OldDepVer != Ver) {
673 OldDepLast = NULL;
674 OldDepVer = Ver;
675 }
676
677 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
678 }
679 /*}}}*/
680 // ListParser::NewProvides - Create a Provides element /*{{{*/
681 // ---------------------------------------------------------------------
682 /* */
683 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
684 const string &PkgName,
685 const string &PkgArch,
686 const string &Version)
687 {
688 pkgCache &Cache = Owner->Cache;
689
690 // We do not add self referencing provides
691 if (Ver.ParentPkg().Name() == PkgName && PkgArch == Ver.Arch(true))
692 return true;
693
694 // Get a structure
695 unsigned long const Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
696 if (unlikely(Provides == 0))
697 return false;
698 Cache.HeaderP->ProvidesCount++;
699
700 // Fill it in
701 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
702 Prv->Version = Ver.Index();
703 Prv->NextPkgProv = Ver->ProvidesList;
704 Ver->ProvidesList = Prv.Index();
705 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
706 return false;
707
708 // Locate the target package
709 pkgCache::PkgIterator Pkg;
710 if (unlikely(Owner->NewPackage(Pkg,PkgName, PkgArch) == false))
711 return false;
712
713 // Link it to the package
714 Prv->ParentPkg = Pkg.Index();
715 Prv->NextProvides = Pkg->ProvidesList;
716 Pkg->ProvidesList = Prv.Index();
717
718 return true;
719 }
720 /*}}}*/
721 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
722 // ---------------------------------------------------------------------
723 /* This is used to select which file is to be associated with all newly
724 added versions. The caller is responsible for setting the IMS fields. */
725 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
726 const pkgIndexFile &Index,
727 unsigned long Flags)
728 {
729 // Get some space for the structure
730 CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
731 if (CurrentFile == Cache.PkgFileP)
732 return false;
733
734 // Fill it in
735 CurrentFile->FileName = Map.WriteString(File);
736 CurrentFile->Site = WriteUniqString(Site);
737 CurrentFile->NextFile = Cache.HeaderP->FileList;
738 CurrentFile->Flags = Flags;
739 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
740 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
741 PkgFileName = File;
742 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
743 Cache.HeaderP->PackageFileCount++;
744
745 if (CurrentFile->FileName == 0)
746 return false;
747
748 if (Progress != 0)
749 Progress->SubProgress(Index.Size());
750 return true;
751 }
752 /*}}}*/
753 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
754 // ---------------------------------------------------------------------
755 /* This is used to create handles to strings. Given the same text it
756 always returns the same number */
757 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
758 unsigned int Size)
759 {
760 /* We use a very small transient hash table here, this speeds up generation
761 by a fair amount on slower machines */
762 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
763 if (Bucket != 0 &&
764 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
765 return Bucket->String;
766
767 // Search for an insertion point
768 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
769 int Res = 1;
770 map_ptrloc *Last = &Cache.HeaderP->StringList;
771 for (; I != Cache.StringItemP; Last = &I->NextItem,
772 I = Cache.StringItemP + I->NextItem)
773 {
774 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
775 if (Res >= 0)
776 break;
777 }
778
779 // Match
780 if (Res == 0)
781 {
782 Bucket = I;
783 return I->String;
784 }
785
786 // Get a structure
787 unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
788 if (Item == 0)
789 return 0;
790
791 // Fill in the structure
792 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
793 ItemP->NextItem = I - Cache.StringItemP;
794 *Last = Item;
795 ItemP->String = Map.WriteString(S,Size);
796 if (ItemP->String == 0)
797 return 0;
798
799 Bucket = ItemP;
800 return ItemP->String;
801 }
802 /*}}}*/
803 // CheckValidity - Check that a cache is up-to-date /*{{{*/
804 // ---------------------------------------------------------------------
805 /* This just verifies that each file in the list of index files exists,
806 has matching attributes with the cache and the cache does not have
807 any extra files. */
808 static bool CheckValidity(const string &CacheFile, FileIterator Start,
809 FileIterator End,MMap **OutMap = 0)
810 {
811 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
812 // No file, certainly invalid
813 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
814 {
815 if (Debug == true)
816 std::clog << "CacheFile doesn't exist" << std::endl;
817 return false;
818 }
819
820 // Map it
821 FileFd CacheF(CacheFile,FileFd::ReadOnly);
822 SPtr<MMap> Map = new MMap(CacheF,0);
823 pkgCache Cache(Map);
824 if (_error->PendingError() == true || Map->Size() == 0)
825 {
826 if (Debug == true)
827 std::clog << "Errors are pending or Map is empty()" << std::endl;
828 _error->Discard();
829 return false;
830 }
831
832 /* Now we check every index file, see if it is in the cache,
833 verify the IMS data and check that it is on the disk too.. */
834 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
835 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
836 for (; Start != End; Start++)
837 {
838 if (Debug == true)
839 std::clog << "Checking PkgFile " << (*Start)->Describe() << ": ";
840 if ((*Start)->HasPackages() == false)
841 {
842 if (Debug == true)
843 std::clog << "Has NO packages" << std::endl;
844 continue;
845 }
846
847 if ((*Start)->Exists() == false)
848 {
849 #if 0 // mvo: we no longer give a message here (Default Sources spec)
850 _error->WarningE("stat",_("Couldn't stat source package list %s"),
851 (*Start)->Describe().c_str());
852 #endif
853 if (Debug == true)
854 std::clog << "file doesn't exist" << std::endl;
855 continue;
856 }
857
858 // FindInCache is also expected to do an IMS check.
859 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
860 if (File.end() == true)
861 {
862 if (Debug == true)
863 std::clog << "FindInCache returned end-Pointer" << std::endl;
864 return false;
865 }
866
867 Visited[File->ID] = true;
868 if (Debug == true)
869 std::clog << "with ID " << File->ID << " is valid" << std::endl;
870 }
871
872 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
873 if (Visited[I] == false)
874 {
875 if (Debug == true)
876 std::clog << "File with ID" << I << " wasn't visited" << std::endl;
877 return false;
878 }
879
880 if (_error->PendingError() == true)
881 {
882 if (Debug == true)
883 {
884 std::clog << "Validity failed because of pending errors:" << std::endl;
885 _error->DumpErrors();
886 }
887 _error->Discard();
888 return false;
889 }
890
891 if (OutMap != 0)
892 *OutMap = Map.UnGuard();
893 return true;
894 }
895 /*}}}*/
896 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
897 // ---------------------------------------------------------------------
898 /* Size is kind of an abstract notion that is only used for the progress
899 meter */
900 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
901 {
902 unsigned long TotalSize = 0;
903 for (; Start != End; Start++)
904 {
905 if ((*Start)->HasPackages() == false)
906 continue;
907 TotalSize += (*Start)->Size();
908 }
909 return TotalSize;
910 }
911 /*}}}*/
912 // BuildCache - Merge the list of index files into the cache /*{{{*/
913 // ---------------------------------------------------------------------
914 /* */
915 static bool BuildCache(pkgCacheGenerator &Gen,
916 OpProgress &Progress,
917 unsigned long &CurrentSize,unsigned long TotalSize,
918 FileIterator Start, FileIterator End)
919 {
920 FileIterator I;
921 for (I = Start; I != End; I++)
922 {
923 if ((*I)->HasPackages() == false)
924 continue;
925
926 if ((*I)->Exists() == false)
927 continue;
928
929 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
930 {
931 _error->Warning("Duplicate sources.list entry %s",
932 (*I)->Describe().c_str());
933 continue;
934 }
935
936 unsigned long Size = (*I)->Size();
937 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
938 CurrentSize += Size;
939
940 if ((*I)->Merge(Gen,Progress) == false)
941 return false;
942 }
943
944 if (Gen.HasFileDeps() == true)
945 {
946 Progress.Done();
947 TotalSize = ComputeSize(Start, End);
948 CurrentSize = 0;
949 for (I = Start; I != End; I++)
950 {
951 unsigned long Size = (*I)->Size();
952 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
953 CurrentSize += Size;
954 if ((*I)->MergeFileProvides(Gen,Progress) == false)
955 return false;
956 }
957 }
958
959 return true;
960 }
961 /*}}}*/
962 // MakeStatusCache - Construct the status cache /*{{{*/
963 // ---------------------------------------------------------------------
964 /* This makes sure that the status cache (the cache that has all
965 index files from the sources list and all local ones) is ready
966 to be mmaped. If OutMap is not zero then a MMap object representing
967 the cache will be stored there. This is pretty much mandetory if you
968 are using AllowMem. AllowMem lets the function be run as non-root
969 where it builds the cache 'fast' into a memory buffer. */
970 bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
971 MMap **OutMap,bool AllowMem)
972 {
973 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
974 unsigned long const MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
975
976 vector<pkgIndexFile *> Files;
977 for (vector<metaIndex *>::const_iterator i = List.begin();
978 i != List.end();
979 i++)
980 {
981 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
982 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
983 j != Indexes->end();
984 j++)
985 Files.push_back (*j);
986 }
987
988 unsigned long const EndOfSource = Files.size();
989 if (_system->AddStatusFiles(Files) == false)
990 return false;
991
992 // Decide if we can write to the files..
993 string const CacheFile = _config->FindFile("Dir::Cache::pkgcache");
994 string const SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
995
996 // Decide if we can write to the cache
997 bool Writeable = false;
998 if (CacheFile.empty() == false)
999 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
1000 else
1001 if (SrcCacheFile.empty() == false)
1002 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
1003 if (Debug == true)
1004 std::clog << "Do we have write-access to the cache files? " << (Writeable ? "YES" : "NO") << std::endl;
1005
1006 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
1007 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
1008
1009 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1010
1011 // Cache is OK, Fin.
1012 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
1013 {
1014 Progress.OverallProgress(1,1,1,_("Reading package lists"));
1015 if (Debug == true)
1016 std::clog << "pkgcache.bin is valid - no need to build anything" << std::endl;
1017 return true;
1018 }
1019 else if (Debug == true)
1020 std::clog << "pkgcache.bin is NOT valid" << std::endl;
1021
1022 /* At this point we know we need to reconstruct the package cache,
1023 begin. */
1024 SPtr<FileFd> CacheF;
1025 SPtr<DynamicMMap> Map;
1026 if (Writeable == true && CacheFile.empty() == false)
1027 {
1028 unlink(CacheFile.c_str());
1029 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
1030 fchmod(CacheF->Fd(),0644);
1031 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
1032 if (_error->PendingError() == true)
1033 return false;
1034 if (Debug == true)
1035 std::clog << "Open filebased MMap" << std::endl;
1036 }
1037 else
1038 {
1039 // Just build it in memory..
1040 Map = new DynamicMMap(0,MapSize);
1041 if (Debug == true)
1042 std::clog << "Open memory Map (not filebased)" << std::endl;
1043 }
1044
1045 // Lets try the source cache.
1046 unsigned long CurrentSize = 0;
1047 unsigned long TotalSize = 0;
1048 if (CheckValidity(SrcCacheFile,Files.begin(),
1049 Files.begin()+EndOfSource) == true)
1050 {
1051 if (Debug == true)
1052 std::clog << "srcpkgcache.bin is valid - populate MMap with it." << std::endl;
1053 // Preload the map with the source cache
1054 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
1055 unsigned long const alloc = Map->RawAllocate(SCacheF.Size());
1056 if ((alloc == 0 && _error->PendingError())
1057 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
1058 SCacheF.Size()) == false)
1059 return false;
1060
1061 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1062
1063 // Build the status cache
1064 pkgCacheGenerator Gen(Map.Get(),&Progress);
1065 if (_error->PendingError() == true)
1066 return false;
1067 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1068 Files.begin()+EndOfSource,Files.end()) == false)
1069 return false;
1070
1071 // FIXME: move me to a better place
1072 Gen.FinishCache(Progress);
1073 }
1074 else
1075 {
1076 if (Debug == true)
1077 std::clog << "srcpkgcache.bin is NOT valid - rebuild" << std::endl;
1078 TotalSize = ComputeSize(Files.begin(),Files.end());
1079
1080 // Build the source cache
1081 pkgCacheGenerator Gen(Map.Get(),&Progress);
1082 if (_error->PendingError() == true)
1083 return false;
1084 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1085 Files.begin(),Files.begin()+EndOfSource) == false)
1086 return false;
1087
1088 // Write it back
1089 if (Writeable == true && SrcCacheFile.empty() == false)
1090 {
1091 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
1092 if (_error->PendingError() == true)
1093 return false;
1094
1095 fchmod(SCacheF.Fd(),0644);
1096
1097 // Write out the main data
1098 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1099 return _error->Error(_("IO Error saving source cache"));
1100 SCacheF.Sync();
1101
1102 // Write out the proper header
1103 Gen.GetCache().HeaderP->Dirty = false;
1104 if (SCacheF.Seek(0) == false ||
1105 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1106 return _error->Error(_("IO Error saving source cache"));
1107 Gen.GetCache().HeaderP->Dirty = true;
1108 SCacheF.Sync();
1109 }
1110
1111 // Build the status cache
1112 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1113 Files.begin()+EndOfSource,Files.end()) == false)
1114 return false;
1115
1116 // FIXME: move me to a better place
1117 Gen.FinishCache(Progress);
1118 }
1119 if (Debug == true)
1120 std::clog << "Caches are ready for shipping" << std::endl;
1121
1122 if (_error->PendingError() == true)
1123 return false;
1124 if (OutMap != 0)
1125 {
1126 if (CacheF != 0)
1127 {
1128 delete Map.UnGuard();
1129 *OutMap = new MMap(*CacheF,0);
1130 }
1131 else
1132 {
1133 *OutMap = Map.UnGuard();
1134 }
1135 }
1136
1137 return true;
1138 }
1139 /*}}}*/
1140 // MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
1141 // ---------------------------------------------------------------------
1142 /* */
1143 bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1144 {
1145 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
1146 vector<pkgIndexFile *> Files;
1147 unsigned long EndOfSource = Files.size();
1148 if (_system->AddStatusFiles(Files) == false)
1149 return false;
1150
1151 SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
1152 unsigned long CurrentSize = 0;
1153 unsigned long TotalSize = 0;
1154
1155 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1156
1157 // Build the status cache
1158 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1159 pkgCacheGenerator Gen(Map.Get(),&Progress);
1160 if (_error->PendingError() == true)
1161 return false;
1162 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1163 Files.begin()+EndOfSource,Files.end()) == false)
1164 return false;
1165
1166 // FIXME: move me to a better place
1167 Gen.FinishCache(Progress);
1168
1169 if (_error->PendingError() == true)
1170 return false;
1171 *OutMap = Map.UnGuard();
1172
1173 return true;
1174 }
1175 /*}}}*/