Create implicit dependencies needed for Multi-Arch handling
[ntk/apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25
26 #include <apt-pkg/tagfile.h>
27
28 #include <apti18n.h>
29
30 #include <vector>
31
32 #include <sys/stat.h>
33 #include <unistd.h>
34 #include <errno.h>
35 #include <stdio.h>
36 #include <system.h>
37 /*}}}*/
38 typedef vector<pkgIndexFile *>::iterator FileIterator;
39
40 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41 // ---------------------------------------------------------------------
42 /* We set the dirty flag and make sure that is written to the disk */
43 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
44 Map(*pMap), Cache(pMap,false), Progress(Prog),
45 FoundFileDeps(0)
46 {
47 CurrentFile = 0;
48 memset(UniqHash,0,sizeof(UniqHash));
49
50 if (_error->PendingError() == true)
51 return;
52
53 if (Map.Size() == 0)
54 {
55 // Setup the map interface..
56 Cache.HeaderP = (pkgCache::Header *)Map.Data();
57 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
58 return;
59
60 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
61
62 // Starting header
63 *Cache.HeaderP = pkgCache::Header();
64 Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
65 Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
66 Cache.ReMap();
67 }
68 else
69 {
70 // Map directly from the existing file
71 Cache.ReMap();
72 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
73 if (Cache.VS != _system->VS)
74 {
75 _error->Error(_("Cache has an incompatible versioning system"));
76 return;
77 }
78 }
79
80 Cache.HeaderP->Dirty = true;
81 Map.Sync(0,sizeof(pkgCache::Header));
82 }
83 /*}}}*/
84 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85 // ---------------------------------------------------------------------
86 /* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88 pkgCacheGenerator::~pkgCacheGenerator()
89 {
90 if (_error->PendingError() == true)
91 return;
92 if (Map.Sync() == false)
93 return;
94
95 Cache.HeaderP->Dirty = false;
96 Map.Sync(0,sizeof(pkgCache::Header));
97 }
98 /*}}}*/
99 // CacheGenerator::MergeList - Merge the package list /*{{{*/
100 // ---------------------------------------------------------------------
101 /* This provides the generation of the entries in the cache. Each loop
102 goes through a single package record from the underlying parse engine. */
103 bool pkgCacheGenerator::MergeList(ListParser &List,
104 pkgCache::VerIterator *OutVer)
105 {
106 List.Owner = this;
107
108 unsigned int Counter = 0;
109 while (List.Step() == true)
110 {
111 string const PackageName = List.Package();
112 if (PackageName.empty() == true)
113 return false;
114
115 /* As we handle Arch all packages as architecture bounded
116 we add all information to every (simulated) arch package */
117 std::vector<string> genArch;
118 if (List.ArchitectureAll() == true)
119 genArch = APT::Configuration::getArchitectures();
120 else
121 genArch.push_back(List.Architecture());
122
123 for (std::vector<string>::const_iterator arch = genArch.begin();
124 arch != genArch.end(); ++arch)
125 {
126 // Get a pointer to the package structure
127 pkgCache::PkgIterator Pkg;
128 if (NewPackage(Pkg, PackageName, *arch) == false)
129 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
130 Counter++;
131 if (Counter % 100 == 0 && Progress != 0)
132 Progress->Progress(List.Offset());
133
134 /* Get a pointer to the version structure. We know the list is sorted
135 so we use that fact in the search. Insertion of new versions is
136 done with correct sorting */
137 string Version = List.Version();
138 if (Version.empty() == true)
139 {
140 // we first process the package, then the descriptions
141 // (this has the bonus that we get MMap error when we run out
142 // of MMap space)
143 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
144 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
145 PackageName.c_str());
146
147 // Find the right version to write the description
148 MD5SumValue CurMd5 = List.Description_md5();
149 pkgCache::VerIterator Ver = Pkg.VersionList();
150 map_ptrloc *LastVer = &Pkg->VersionList;
151
152 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
153 {
154 pkgCache::DescIterator Desc = Ver.DescriptionList();
155 map_ptrloc *LastDesc = &Ver->DescriptionList;
156 bool duplicate=false;
157
158 // don't add a new description if we have one for the given
159 // md5 && language
160 for ( ; Desc.end() == false; Desc++)
161 if (MD5SumValue(Desc.md5()) == CurMd5 &&
162 Desc.LanguageCode() == List.DescriptionLanguage())
163 duplicate=true;
164 if(duplicate)
165 continue;
166
167 for (Desc = Ver.DescriptionList();
168 Desc.end() == false;
169 LastDesc = &Desc->NextDesc, Desc++)
170 {
171 if (MD5SumValue(Desc.md5()) == CurMd5)
172 {
173 // Add new description
174 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
175 Desc->ParentPkg = Pkg.Index();
176
177 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
178 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
179 break;
180 }
181 }
182 }
183
184 continue;
185 }
186
187 pkgCache::VerIterator Ver = Pkg.VersionList();
188 map_ptrloc *LastVer = &Pkg->VersionList;
189 int Res = 1;
190 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
191 {
192 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
193 if (Res >= 0)
194 break;
195 }
196
197 /* We already have a version for this item, record that we
198 saw it */
199 unsigned long Hash = List.VersionHash();
200 if (Res == 0 && Ver->Hash == Hash)
201 {
202 if (List.UsePackage(Pkg,Ver) == false)
203 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
204 PackageName.c_str());
205
206 if (NewFileVer(Ver,List) == false)
207 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
208 PackageName.c_str());
209
210 // Read only a single record and return
211 if (OutVer != 0)
212 {
213 *OutVer = Ver;
214 FoundFileDeps |= List.HasFileDeps();
215 return true;
216 }
217
218 continue;
219 }
220
221 // Skip to the end of the same version set.
222 if (Res == 0)
223 {
224 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
225 {
226 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
227 if (Res != 0)
228 break;
229 }
230 }
231
232 // Add a new version
233 *LastVer = NewVersion(Ver,Version,*LastVer);
234 Ver->ParentPkg = Pkg.Index();
235 Ver->Hash = Hash;
236
237 if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
238 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
239 PackageName.c_str());
240
241 if (List.UsePackage(Pkg,Ver) == false)
242 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
243 PackageName.c_str());
244
245 if (NewFileVer(Ver,List) == false)
246 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
247 PackageName.c_str());
248
249 // Read only a single record and return
250 if (OutVer != 0)
251 {
252 *OutVer = Ver;
253 FoundFileDeps |= List.HasFileDeps();
254 return true;
255 }
256
257 /* Record the Description data. Description data always exist in
258 Packages and Translation-* files. */
259 pkgCache::DescIterator Desc = Ver.DescriptionList();
260 map_ptrloc *LastDesc = &Ver->DescriptionList;
261
262 // Skip to the end of description set
263 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
264
265 // Add new description
266 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
267 Desc->ParentPkg = Pkg.Index();
268
269 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
270 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
271 }
272 }
273
274 FoundFileDeps |= List.HasFileDeps();
275
276 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
277 return _error->Error(_("Wow, you exceeded the number of package "
278 "names this APT is capable of."));
279 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
280 return _error->Error(_("Wow, you exceeded the number of versions "
281 "this APT is capable of."));
282 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
283 return _error->Error(_("Wow, you exceeded the number of descriptions "
284 "this APT is capable of."));
285 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
286 return _error->Error(_("Wow, you exceeded the number of dependencies "
287 "this APT is capable of."));
288 return true;
289 }
290 /*}}}*/
291 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
292 // ---------------------------------------------------------------------
293 /* If we found any file depends while parsing the main list we need to
294 resolve them. Since it is undesired to load the entire list of files
295 into the cache as virtual packages we do a two stage effort. MergeList
296 identifies the file depends and this creates Provdies for them by
297 re-parsing all the indexs. */
298 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
299 {
300 List.Owner = this;
301
302 unsigned int Counter = 0;
303 while (List.Step() == true)
304 {
305 string PackageName = List.Package();
306 if (PackageName.empty() == true)
307 return false;
308 string Version = List.Version();
309 if (Version.empty() == true)
310 continue;
311
312 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
313 if (Pkg.end() == true)
314 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
315 PackageName.c_str());
316 Counter++;
317 if (Counter % 100 == 0 && Progress != 0)
318 Progress->Progress(List.Offset());
319
320 unsigned long Hash = List.VersionHash();
321 pkgCache::VerIterator Ver = Pkg.VersionList();
322 for (; Ver.end() == false; Ver++)
323 {
324 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
325 {
326 if (List.CollectFileProvides(Cache,Ver) == false)
327 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
328 break;
329 }
330 }
331
332 if (Ver.end() == true)
333 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
334 }
335
336 return true;
337 }
338 /*}}}*/
339 // CacheGenerator::NewGroup - Add a new group /*{{{*/
340 // ---------------------------------------------------------------------
341 /* This creates a new group structure and adds it to the hash table */
342 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name) {
343 Grp = Cache.FindGrp(Name);
344 if (Grp.end() == false)
345 return true;
346
347 // Get a structure
348 unsigned long const Group = Map.Allocate(sizeof(pkgCache::Group));
349 if (unlikely(Group == 0))
350 return false;
351
352 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
353 Grp->Name = Map.WriteString(Name);
354 if (unlikely(Grp->Name == 0))
355 return false;
356
357 // Insert it into the hash table
358 unsigned long const Hash = Cache.Hash(Name);
359 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
360 Cache.HeaderP->GrpHashTable[Hash] = Group;
361
362 Cache.HeaderP->GroupCount++;
363
364 return true;
365 }
366 /*}}}*/
367 // CacheGenerator::NewPackage - Add a new package /*{{{*/
368 // ---------------------------------------------------------------------
369 /* This creates a new package structure and adds it to the hash table */
370 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
371 const string &Arch) {
372 pkgCache::GrpIterator Grp;
373 if (unlikely(NewGroup(Grp, Name) == false))
374 return false;
375
376 Pkg = Grp.FindPkg(Arch);
377 if (Pkg.end() == false)
378 return true;
379
380 // Get a structure
381 unsigned long const Package = Map.Allocate(sizeof(pkgCache::Package));
382 if (unlikely(Package == 0))
383 return false;
384 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
385
386 // Insert it into the hash table
387 unsigned long const Hash = Cache.Hash(Name);
388 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
389 Cache.HeaderP->PkgHashTable[Hash] = Package;
390
391 // remember the packages in the group
392 Grp->FirstPackage = Package;
393 if (Grp->LastPackage == 0)
394 Grp->LastPackage = Package;
395
396 // Set the name, arch and the ID
397 Pkg->Name = Grp->Name;
398 Pkg->Group = Grp.Index();
399 Pkg->Arch = WriteUniqString(Arch.c_str());
400 if (unlikely(Pkg->Arch == 0))
401 return false;
402 Pkg->ID = Cache.HeaderP->PackageCount++;
403
404 return true;
405 }
406 /*}}}*/
407 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
408 // ---------------------------------------------------------------------
409 /* */
410 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
411 ListParser &List)
412 {
413 if (CurrentFile == 0)
414 return true;
415
416 // Get a structure
417 unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
418 if (VerFile == 0)
419 return 0;
420
421 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
422 VF->File = CurrentFile - Cache.PkgFileP;
423
424 // Link it to the end of the list
425 map_ptrloc *Last = &Ver->FileList;
426 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
427 Last = &V->NextFile;
428 VF->NextFile = *Last;
429 *Last = VF.Index();
430
431 VF->Offset = List.Offset();
432 VF->Size = List.Size();
433 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
434 Cache.HeaderP->MaxVerFileSize = VF->Size;
435 Cache.HeaderP->VerFileCount++;
436
437 return true;
438 }
439 /*}}}*/
440 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
441 // ---------------------------------------------------------------------
442 /* This puts a version structure in the linked list */
443 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
444 const string &VerStr,
445 unsigned long Next)
446 {
447 // Get a structure
448 unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
449 if (Version == 0)
450 return 0;
451
452 // Fill it in
453 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
454 Ver->NextVer = Next;
455 Ver->ID = Cache.HeaderP->VersionCount++;
456 Ver->VerStr = Map.WriteString(VerStr);
457 if (Ver->VerStr == 0)
458 return 0;
459
460 return Version;
461 }
462 /*}}}*/
463 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
464 // ---------------------------------------------------------------------
465 /* */
466 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
467 ListParser &List)
468 {
469 if (CurrentFile == 0)
470 return true;
471
472 // Get a structure
473 unsigned long DescFile = Map.Allocate(sizeof(pkgCache::DescFile));
474 if (DescFile == 0)
475 return false;
476
477 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
478 DF->File = CurrentFile - Cache.PkgFileP;
479
480 // Link it to the end of the list
481 map_ptrloc *Last = &Desc->FileList;
482 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
483 Last = &D->NextFile;
484
485 DF->NextFile = *Last;
486 *Last = DF.Index();
487
488 DF->Offset = List.Offset();
489 DF->Size = List.Size();
490 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
491 Cache.HeaderP->MaxDescFileSize = DF->Size;
492 Cache.HeaderP->DescFileCount++;
493
494 return true;
495 }
496 /*}}}*/
497 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
498 // ---------------------------------------------------------------------
499 /* This puts a description structure in the linked list */
500 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
501 const string &Lang,
502 const MD5SumValue &md5sum,
503 map_ptrloc Next)
504 {
505 // Get a structure
506 map_ptrloc Description = Map.Allocate(sizeof(pkgCache::Description));
507 if (Description == 0)
508 return 0;
509
510 // Fill it in
511 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
512 Desc->NextDesc = Next;
513 Desc->ID = Cache.HeaderP->DescriptionCount++;
514 Desc->language_code = Map.WriteString(Lang);
515 Desc->md5sum = Map.WriteString(md5sum.Value());
516 if (Desc->language_code == 0 || Desc->md5sum == 0)
517 return 0;
518
519 return Description;
520 }
521 /*}}}*/
522 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
523 // ---------------------------------------------------------------------
524 /* This prepares the Cache for delivery */
525 bool pkgCacheGenerator::FinishCache(OpProgress &Progress) {
526 // FIXME: add progress reporting for this operation
527 // Do we have different architectures in your groups ?
528 vector<string> archs = APT::Configuration::getArchitectures();
529 if (archs.size() > 1) {
530 // Create Conflicts in between the group
531 for (pkgCache::GrpIterator G = GetCache().GrpBegin(); G.end() != true; G++) {
532 string const PkgName = G.Name();
533 for (pkgCache::PkgIterator P = G.PackageList(); P.end() != true; P = G.NextPkg(P)) {
534 for (pkgCache::VerIterator V = P.VersionList(); V.end() != true; V++) {
535 string const Arch = V.Arch();
536 map_ptrloc *OldDepLast = NULL;
537 /* MultiArch handling introduces a lot of implicit Dependencies:
538 - MultiArch: same → Co-Installable if they have the same version
539 - Architecture: all → Need to be Co-Installable for internal reasons
540 - All others conflict with all other group members */
541 bool const coInstall = (V->MultiArch == pkgCache::Version::All ||
542 V->MultiArch == pkgCache::Version::Same);
543 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A) {
544 if (*A == Arch)
545 continue;
546 /* We allow only one installed arch at the time
547 per group, therefore each group member conflicts
548 with all other group members */
549 pkgCache::PkgIterator D = G.FindPkg(*A);
550 if (D.end() == true)
551 continue;
552 if (coInstall == true) {
553 // Replaces: ${self}:other ( << ${binary:Version})
554 NewDepends(D, V, V.VerStr(),
555 pkgCache::Dep::Less, pkgCache::Dep::Replaces,
556 OldDepLast);
557 // Breaks: ${self}:other (!= ${binary:Version})
558 NewDepends(D, V, V.VerStr(),
559 pkgCache::Dep::Less, pkgCache::Dep::DpkgBreaks,
560 OldDepLast);
561 NewDepends(D, V, V.VerStr(),
562 pkgCache::Dep::Greater, pkgCache::Dep::DpkgBreaks,
563 OldDepLast);
564 } else {
565 // Conflicts: ${self}:other
566 NewDepends(D, V, "",
567 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
568 OldDepLast);
569 }
570 }
571 }
572 }
573 }
574 }
575 return true;
576 }
577 /*}}}*/
578 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
579 // ---------------------------------------------------------------------
580 /* This creates a dependency element in the tree. It is linked to the
581 version and to the package that it is pointing to. */
582 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
583 pkgCache::VerIterator &Ver,
584 string const &Version,
585 unsigned int const &Op,
586 unsigned int const &Type,
587 map_ptrloc *OldDepLast)
588 {
589 // Get a structure
590 unsigned long const Dependency = Map.Allocate(sizeof(pkgCache::Dependency));
591 if (unlikely(Dependency == 0))
592 return false;
593
594 // Fill it in
595 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
596 Dep->ParentVer = Ver.Index();
597 Dep->Type = Type;
598 Dep->CompareOp = Op;
599 Dep->ID = Cache.HeaderP->DependsCount++;
600
601 // Probe the reverse dependency list for a version string that matches
602 if (Version.empty() == false)
603 {
604 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
605 if (I->Version != 0 && I.TargetVer() == Version)
606 Dep->Version = I->Version;*/
607 if (Dep->Version == 0)
608 if (unlikely((Dep->Version = Map.WriteString(Version)) == 0))
609 return false;
610 }
611
612 // Link it to the package
613 Dep->Package = Pkg.Index();
614 Dep->NextRevDepends = Pkg->RevDepends;
615 Pkg->RevDepends = Dep.Index();
616
617 // Do we know where to link the Dependency to?
618 if (OldDepLast == NULL)
619 {
620 OldDepLast = &Ver->DependsList;
621 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
622 OldDepLast = &D->NextDepends;
623 }
624
625 Dep->NextDepends = *OldDepLast;
626 *OldDepLast = Dep.Index();
627 OldDepLast = &Dep->NextDepends;
628
629 return true;
630 }
631 /*}}}*/
632 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
633 // ---------------------------------------------------------------------
634 /* This creates a Group and the Package to link this dependency to if
635 needed and handles also the caching of the old endpoint */
636 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
637 const string &PackageName,
638 const string &Arch,
639 const string &Version,
640 unsigned int Op,
641 unsigned int Type)
642 {
643 pkgCache::GrpIterator Grp;
644 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
645 return false;
646
647 // Locate the target package
648 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
649 if (Pkg.end() == true) {
650 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
651 return false;
652 }
653
654 // Is it a file dependency?
655 if (unlikely(PackageName[0] == '/'))
656 FoundFileDeps = true;
657
658 /* Caching the old end point speeds up generation substantially */
659 if (OldDepVer != Ver) {
660 OldDepLast = NULL;
661 OldDepVer = Ver;
662 }
663
664 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
665 }
666 /*}}}*/
667 // ListParser::NewProvides - Create a Provides element /*{{{*/
668 // ---------------------------------------------------------------------
669 /* */
670 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
671 const string &PackageName,
672 const string &Version)
673 {
674 pkgCache &Cache = Owner->Cache;
675
676 // We do not add self referencing provides
677 if (unlikely(Ver.ParentPkg().Name() == PackageName))
678 return true;
679
680 // Get a structure
681 unsigned long const Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
682 if (unlikely(Provides == 0))
683 return false;
684 Cache.HeaderP->ProvidesCount++;
685
686 // Fill it in
687 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
688 Prv->Version = Ver.Index();
689 Prv->NextPkgProv = Ver->ProvidesList;
690 Ver->ProvidesList = Prv.Index();
691 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
692 return false;
693
694 // Locate the target package
695 pkgCache::PkgIterator Pkg;
696 if (unlikely(Owner->NewPackage(Pkg,PackageName,string(Ver.Arch())) == false))
697 return false;
698
699 // Link it to the package
700 Prv->ParentPkg = Pkg.Index();
701 Prv->NextProvides = Pkg->ProvidesList;
702 Pkg->ProvidesList = Prv.Index();
703
704 return true;
705 }
706 /*}}}*/
707 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
708 // ---------------------------------------------------------------------
709 /* This is used to select which file is to be associated with all newly
710 added versions. The caller is responsible for setting the IMS fields. */
711 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
712 const pkgIndexFile &Index,
713 unsigned long Flags)
714 {
715 // Get some space for the structure
716 CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
717 if (CurrentFile == Cache.PkgFileP)
718 return false;
719
720 // Fill it in
721 CurrentFile->FileName = Map.WriteString(File);
722 CurrentFile->Site = WriteUniqString(Site);
723 CurrentFile->NextFile = Cache.HeaderP->FileList;
724 CurrentFile->Flags = Flags;
725 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
726 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
727 PkgFileName = File;
728 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
729 Cache.HeaderP->PackageFileCount++;
730
731 if (CurrentFile->FileName == 0)
732 return false;
733
734 if (Progress != 0)
735 Progress->SubProgress(Index.Size());
736 return true;
737 }
738 /*}}}*/
739 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
740 // ---------------------------------------------------------------------
741 /* This is used to create handles to strings. Given the same text it
742 always returns the same number */
743 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
744 unsigned int Size)
745 {
746 /* We use a very small transient hash table here, this speeds up generation
747 by a fair amount on slower machines */
748 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
749 if (Bucket != 0 &&
750 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
751 return Bucket->String;
752
753 // Search for an insertion point
754 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
755 int Res = 1;
756 map_ptrloc *Last = &Cache.HeaderP->StringList;
757 for (; I != Cache.StringItemP; Last = &I->NextItem,
758 I = Cache.StringItemP + I->NextItem)
759 {
760 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
761 if (Res >= 0)
762 break;
763 }
764
765 // Match
766 if (Res == 0)
767 {
768 Bucket = I;
769 return I->String;
770 }
771
772 // Get a structure
773 unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
774 if (Item == 0)
775 return 0;
776
777 // Fill in the structure
778 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
779 ItemP->NextItem = I - Cache.StringItemP;
780 *Last = Item;
781 ItemP->String = Map.WriteString(S,Size);
782 if (ItemP->String == 0)
783 return 0;
784
785 Bucket = ItemP;
786 return ItemP->String;
787 }
788 /*}}}*/
789 // CheckValidity - Check that a cache is up-to-date /*{{{*/
790 // ---------------------------------------------------------------------
791 /* This just verifies that each file in the list of index files exists,
792 has matching attributes with the cache and the cache does not have
793 any extra files. */
794 static bool CheckValidity(const string &CacheFile, FileIterator Start,
795 FileIterator End,MMap **OutMap = 0)
796 {
797 // No file, certainly invalid
798 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
799 return false;
800
801 // Map it
802 FileFd CacheF(CacheFile,FileFd::ReadOnly);
803 SPtr<MMap> Map = new MMap(CacheF,0);
804 pkgCache Cache(Map);
805 if (_error->PendingError() == true || Map->Size() == 0)
806 {
807 _error->Discard();
808 return false;
809 }
810
811 /* Now we check every index file, see if it is in the cache,
812 verify the IMS data and check that it is on the disk too.. */
813 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
814 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
815 for (; Start != End; Start++)
816 {
817 if ((*Start)->HasPackages() == false)
818 continue;
819
820 if ((*Start)->Exists() == false)
821 {
822 #if 0 // mvo: we no longer give a message here (Default Sources spec)
823 _error->WarningE("stat",_("Couldn't stat source package list %s"),
824 (*Start)->Describe().c_str());
825 #endif
826 continue;
827 }
828
829 // FindInCache is also expected to do an IMS check.
830 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
831 if (File.end() == true)
832 return false;
833
834 Visited[File->ID] = true;
835 }
836
837 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
838 if (Visited[I] == false)
839 return false;
840
841 if (_error->PendingError() == true)
842 {
843 _error->Discard();
844 return false;
845 }
846
847 if (OutMap != 0)
848 *OutMap = Map.UnGuard();
849 return true;
850 }
851 /*}}}*/
852 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
853 // ---------------------------------------------------------------------
854 /* Size is kind of an abstract notion that is only used for the progress
855 meter */
856 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
857 {
858 unsigned long TotalSize = 0;
859 for (; Start != End; Start++)
860 {
861 if ((*Start)->HasPackages() == false)
862 continue;
863 TotalSize += (*Start)->Size();
864 }
865 return TotalSize;
866 }
867 /*}}}*/
868 // BuildCache - Merge the list of index files into the cache /*{{{*/
869 // ---------------------------------------------------------------------
870 /* */
871 static bool BuildCache(pkgCacheGenerator &Gen,
872 OpProgress &Progress,
873 unsigned long &CurrentSize,unsigned long TotalSize,
874 FileIterator Start, FileIterator End)
875 {
876 FileIterator I;
877 for (I = Start; I != End; I++)
878 {
879 if ((*I)->HasPackages() == false)
880 continue;
881
882 if ((*I)->Exists() == false)
883 continue;
884
885 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
886 {
887 _error->Warning("Duplicate sources.list entry %s",
888 (*I)->Describe().c_str());
889 continue;
890 }
891
892 unsigned long Size = (*I)->Size();
893 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
894 CurrentSize += Size;
895
896 if ((*I)->Merge(Gen,Progress) == false)
897 return false;
898 }
899
900 if (Gen.HasFileDeps() == true)
901 {
902 Progress.Done();
903 TotalSize = ComputeSize(Start, End);
904 CurrentSize = 0;
905 for (I = Start; I != End; I++)
906 {
907 unsigned long Size = (*I)->Size();
908 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
909 CurrentSize += Size;
910 if ((*I)->MergeFileProvides(Gen,Progress) == false)
911 return false;
912 }
913 }
914
915 return true;
916 }
917 /*}}}*/
918 // MakeStatusCache - Construct the status cache /*{{{*/
919 // ---------------------------------------------------------------------
920 /* This makes sure that the status cache (the cache that has all
921 index files from the sources list and all local ones) is ready
922 to be mmaped. If OutMap is not zero then a MMap object representing
923 the cache will be stored there. This is pretty much mandetory if you
924 are using AllowMem. AllowMem lets the function be run as non-root
925 where it builds the cache 'fast' into a memory buffer. */
926 bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
927 MMap **OutMap,bool AllowMem)
928 {
929 unsigned long MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
930
931 vector<pkgIndexFile *> Files;
932 for (vector<metaIndex *>::const_iterator i = List.begin();
933 i != List.end();
934 i++)
935 {
936 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
937 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
938 j != Indexes->end();
939 j++)
940 Files.push_back (*j);
941 }
942
943 unsigned long EndOfSource = Files.size();
944 if (_system->AddStatusFiles(Files) == false)
945 return false;
946
947 // Decide if we can write to the files..
948 string CacheFile = _config->FindFile("Dir::Cache::pkgcache");
949 string SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
950
951 // Decide if we can write to the cache
952 bool Writeable = false;
953 if (CacheFile.empty() == false)
954 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
955 else
956 if (SrcCacheFile.empty() == false)
957 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
958
959 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
960 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
961
962 Progress.OverallProgress(0,1,1,_("Reading package lists"));
963
964 // Cache is OK, Fin.
965 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
966 {
967 Progress.OverallProgress(1,1,1,_("Reading package lists"));
968 return true;
969 }
970
971 /* At this point we know we need to reconstruct the package cache,
972 begin. */
973 SPtr<FileFd> CacheF;
974 SPtr<DynamicMMap> Map;
975 if (Writeable == true && CacheFile.empty() == false)
976 {
977 unlink(CacheFile.c_str());
978 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
979 fchmod(CacheF->Fd(),0644);
980 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
981 if (_error->PendingError() == true)
982 return false;
983 }
984 else
985 {
986 // Just build it in memory..
987 Map = new DynamicMMap(0,MapSize);
988 }
989
990 // Lets try the source cache.
991 unsigned long CurrentSize = 0;
992 unsigned long TotalSize = 0;
993 if (CheckValidity(SrcCacheFile,Files.begin(),
994 Files.begin()+EndOfSource) == true)
995 {
996 // Preload the map with the source cache
997 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
998 unsigned long alloc = Map->RawAllocate(SCacheF.Size());
999 if ((alloc == 0 && _error->PendingError())
1000 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
1001 SCacheF.Size()) == false)
1002 return false;
1003
1004 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1005
1006 // Build the status cache
1007 pkgCacheGenerator Gen(Map.Get(),&Progress);
1008 if (_error->PendingError() == true)
1009 return false;
1010 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1011 Files.begin()+EndOfSource,Files.end()) == false)
1012 return false;
1013
1014 // FIXME: move me to a better place
1015 Gen.FinishCache(Progress);
1016 }
1017 else
1018 {
1019 TotalSize = ComputeSize(Files.begin(),Files.end());
1020
1021 // Build the source cache
1022 pkgCacheGenerator Gen(Map.Get(),&Progress);
1023 if (_error->PendingError() == true)
1024 return false;
1025 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1026 Files.begin(),Files.begin()+EndOfSource) == false)
1027 return false;
1028
1029 // Write it back
1030 if (Writeable == true && SrcCacheFile.empty() == false)
1031 {
1032 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
1033 if (_error->PendingError() == true)
1034 return false;
1035
1036 fchmod(SCacheF.Fd(),0644);
1037
1038 // Write out the main data
1039 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1040 return _error->Error(_("IO Error saving source cache"));
1041 SCacheF.Sync();
1042
1043 // Write out the proper header
1044 Gen.GetCache().HeaderP->Dirty = false;
1045 if (SCacheF.Seek(0) == false ||
1046 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1047 return _error->Error(_("IO Error saving source cache"));
1048 Gen.GetCache().HeaderP->Dirty = true;
1049 SCacheF.Sync();
1050 }
1051
1052 // Build the status cache
1053 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1054 Files.begin()+EndOfSource,Files.end()) == false)
1055 return false;
1056
1057 // FIXME: move me to a better place
1058 Gen.FinishCache(Progress);
1059 }
1060
1061 if (_error->PendingError() == true)
1062 return false;
1063 if (OutMap != 0)
1064 {
1065 if (CacheF != 0)
1066 {
1067 delete Map.UnGuard();
1068 *OutMap = new MMap(*CacheF,0);
1069 }
1070 else
1071 {
1072 *OutMap = Map.UnGuard();
1073 }
1074 }
1075
1076 return true;
1077 }
1078 /*}}}*/
1079 // MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
1080 // ---------------------------------------------------------------------
1081 /* */
1082 bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1083 {
1084 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
1085 vector<pkgIndexFile *> Files;
1086 unsigned long EndOfSource = Files.size();
1087 if (_system->AddStatusFiles(Files) == false)
1088 return false;
1089
1090 SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
1091 unsigned long CurrentSize = 0;
1092 unsigned long TotalSize = 0;
1093
1094 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1095
1096 // Build the status cache
1097 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1098 pkgCacheGenerator Gen(Map.Get(),&Progress);
1099 if (_error->PendingError() == true)
1100 return false;
1101 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1102 Files.begin()+EndOfSource,Files.end()) == false)
1103 return false;
1104
1105 // FIXME: move me to a better place
1106 Gen.FinishCache(Progress);
1107
1108 if (_error->PendingError() == true)
1109 return false;
1110 *OutMap = Map.UnGuard();
1111
1112 return true;
1113 }
1114 /*}}}*/