Try to use NotEquals for the MultiArch Breaks dependencies instead of
[ntk/apt.git] / apt-pkg / pkgcachegen.cc
... / ...
CommitLineData
1// -*- mode: cpp; mode: fold -*-
2// Description /*{{{*/
3// $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4/* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12// Include Files /*{{{*/
13#define APT_COMPATIBILITY 986
14
15#include <apt-pkg/pkgcachegen.h>
16#include <apt-pkg/error.h>
17#include <apt-pkg/version.h>
18#include <apt-pkg/progress.h>
19#include <apt-pkg/sourcelist.h>
20#include <apt-pkg/configuration.h>
21#include <apt-pkg/aptconfiguration.h>
22#include <apt-pkg/strutl.h>
23#include <apt-pkg/sptr.h>
24#include <apt-pkg/pkgsystem.h>
25#include <apt-pkg/macros.h>
26
27#include <apt-pkg/tagfile.h>
28
29#include <apti18n.h>
30
31#include <vector>
32
33#include <sys/stat.h>
34#include <unistd.h>
35#include <errno.h>
36#include <stdio.h>
37 /*}}}*/
38typedef vector<pkgIndexFile *>::iterator FileIterator;
39
40// CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41// ---------------------------------------------------------------------
42/* We set the dirty flag and make sure that is written to the disk */
43pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
44 Map(*pMap), Cache(pMap,false), Progress(Prog),
45 FoundFileDeps(0)
46{
47 CurrentFile = 0;
48 memset(UniqHash,0,sizeof(UniqHash));
49
50 if (_error->PendingError() == true)
51 return;
52
53 if (Map.Size() == 0)
54 {
55 // Setup the map interface..
56 Cache.HeaderP = (pkgCache::Header *)Map.Data();
57 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
58 return;
59
60 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
61
62 // Starting header
63 *Cache.HeaderP = pkgCache::Header();
64 Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
65 Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
66 Cache.ReMap();
67 }
68 else
69 {
70 // Map directly from the existing file
71 Cache.ReMap();
72 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
73 if (Cache.VS != _system->VS)
74 {
75 _error->Error(_("Cache has an incompatible versioning system"));
76 return;
77 }
78 }
79
80 Cache.HeaderP->Dirty = true;
81 Map.Sync(0,sizeof(pkgCache::Header));
82}
83 /*}}}*/
84// CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85// ---------------------------------------------------------------------
86/* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88pkgCacheGenerator::~pkgCacheGenerator()
89{
90 if (_error->PendingError() == true)
91 return;
92 if (Map.Sync() == false)
93 return;
94
95 Cache.HeaderP->Dirty = false;
96 Map.Sync(0,sizeof(pkgCache::Header));
97}
98 /*}}}*/
99// CacheGenerator::MergeList - Merge the package list /*{{{*/
100// ---------------------------------------------------------------------
101/* This provides the generation of the entries in the cache. Each loop
102 goes through a single package record from the underlying parse engine. */
103bool pkgCacheGenerator::MergeList(ListParser &List,
104 pkgCache::VerIterator *OutVer)
105{
106 List.Owner = this;
107
108 unsigned int Counter = 0;
109 while (List.Step() == true)
110 {
111 string const PackageName = List.Package();
112 if (PackageName.empty() == true)
113 return false;
114
115 /* As we handle Arch all packages as architecture bounded
116 we add all information to every (simulated) arch package */
117 std::vector<string> genArch;
118 if (List.ArchitectureAll() == true) {
119 genArch = APT::Configuration::getArchitectures();
120 if (genArch.size() != 1)
121 genArch.push_back("all");
122 } else
123 genArch.push_back(List.Architecture());
124
125 for (std::vector<string>::const_iterator arch = genArch.begin();
126 arch != genArch.end(); ++arch)
127 {
128 // Get a pointer to the package structure
129 pkgCache::PkgIterator Pkg;
130 if (NewPackage(Pkg, PackageName, *arch) == false)
131 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
132 Counter++;
133 if (Counter % 100 == 0 && Progress != 0)
134 Progress->Progress(List.Offset());
135
136 /* Get a pointer to the version structure. We know the list is sorted
137 so we use that fact in the search. Insertion of new versions is
138 done with correct sorting */
139 string Version = List.Version();
140 if (Version.empty() == true)
141 {
142 // we first process the package, then the descriptions
143 // (this has the bonus that we get MMap error when we run out
144 // of MMap space)
145 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
146 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
147 PackageName.c_str());
148
149 // Find the right version to write the description
150 MD5SumValue CurMd5 = List.Description_md5();
151 pkgCache::VerIterator Ver = Pkg.VersionList();
152 map_ptrloc *LastVer = &Pkg->VersionList;
153
154 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
155 {
156 pkgCache::DescIterator Desc = Ver.DescriptionList();
157 map_ptrloc *LastDesc = &Ver->DescriptionList;
158 bool duplicate=false;
159
160 // don't add a new description if we have one for the given
161 // md5 && language
162 for ( ; Desc.end() == false; Desc++)
163 if (MD5SumValue(Desc.md5()) == CurMd5 &&
164 Desc.LanguageCode() == List.DescriptionLanguage())
165 duplicate=true;
166 if(duplicate)
167 continue;
168
169 for (Desc = Ver.DescriptionList();
170 Desc.end() == false;
171 LastDesc = &Desc->NextDesc, Desc++)
172 {
173 if (MD5SumValue(Desc.md5()) == CurMd5)
174 {
175 // Add new description
176 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
177 Desc->ParentPkg = Pkg.Index();
178
179 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
180 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
181 break;
182 }
183 }
184 }
185
186 continue;
187 }
188
189 pkgCache::VerIterator Ver = Pkg.VersionList();
190 map_ptrloc *LastVer = &Pkg->VersionList;
191 int Res = 1;
192 unsigned long const Hash = List.VersionHash();
193 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
194 {
195 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
196 // Version is higher as current version - insert here
197 if (Res > 0)
198 break;
199 // Versionstrings are equal - is hash also equal?
200 if (Res == 0 && Ver->Hash == Hash)
201 break;
202 // proceed with the next till we have either the right
203 // or we found another version (which will be lower)
204 }
205
206 /* We already have a version for this item, record that we saw it */
207 if (Res == 0 && Ver.end() == false && Ver->Hash == Hash)
208 {
209 if (List.UsePackage(Pkg,Ver) == false)
210 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
211 PackageName.c_str());
212
213 if (NewFileVer(Ver,List) == false)
214 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
215 PackageName.c_str());
216
217 // Read only a single record and return
218 if (OutVer != 0)
219 {
220 *OutVer = Ver;
221 FoundFileDeps |= List.HasFileDeps();
222 return true;
223 }
224
225 continue;
226 }
227
228 // Add a new version
229 *LastVer = NewVersion(Ver,Version,*LastVer);
230 Ver->ParentPkg = Pkg.Index();
231 Ver->Hash = Hash;
232
233 if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
234 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
235 PackageName.c_str());
236
237 if (List.UsePackage(Pkg,Ver) == false)
238 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
239 PackageName.c_str());
240
241 if (NewFileVer(Ver,List) == false)
242 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
243 PackageName.c_str());
244
245 // Read only a single record and return
246 if (OutVer != 0)
247 {
248 *OutVer = Ver;
249 FoundFileDeps |= List.HasFileDeps();
250 return true;
251 }
252
253 /* Record the Description data. Description data always exist in
254 Packages and Translation-* files. */
255 pkgCache::DescIterator Desc = Ver.DescriptionList();
256 map_ptrloc *LastDesc = &Ver->DescriptionList;
257
258 // Skip to the end of description set
259 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
260
261 // Add new description
262 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
263 Desc->ParentPkg = Pkg.Index();
264
265 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
266 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
267 }
268 }
269
270 FoundFileDeps |= List.HasFileDeps();
271
272 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
273 return _error->Error(_("Wow, you exceeded the number of package "
274 "names this APT is capable of."));
275 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
276 return _error->Error(_("Wow, you exceeded the number of versions "
277 "this APT is capable of."));
278 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
279 return _error->Error(_("Wow, you exceeded the number of descriptions "
280 "this APT is capable of."));
281 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
282 return _error->Error(_("Wow, you exceeded the number of dependencies "
283 "this APT is capable of."));
284 return true;
285}
286 /*}}}*/
287// CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
288// ---------------------------------------------------------------------
289/* If we found any file depends while parsing the main list we need to
290 resolve them. Since it is undesired to load the entire list of files
291 into the cache as virtual packages we do a two stage effort. MergeList
292 identifies the file depends and this creates Provdies for them by
293 re-parsing all the indexs. */
294bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
295{
296 List.Owner = this;
297
298 unsigned int Counter = 0;
299 while (List.Step() == true)
300 {
301 string PackageName = List.Package();
302 if (PackageName.empty() == true)
303 return false;
304 string Version = List.Version();
305 if (Version.empty() == true)
306 continue;
307
308 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
309 if (Pkg.end() == true)
310 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
311 PackageName.c_str());
312 Counter++;
313 if (Counter % 100 == 0 && Progress != 0)
314 Progress->Progress(List.Offset());
315
316 unsigned long Hash = List.VersionHash();
317 pkgCache::VerIterator Ver = Pkg.VersionList();
318 for (; Ver.end() == false; Ver++)
319 {
320 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
321 {
322 if (List.CollectFileProvides(Cache,Ver) == false)
323 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
324 break;
325 }
326 }
327
328 if (Ver.end() == true)
329 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
330 }
331
332 return true;
333}
334 /*}}}*/
335// CacheGenerator::NewGroup - Add a new group /*{{{*/
336// ---------------------------------------------------------------------
337/* This creates a new group structure and adds it to the hash table */
338bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name)
339{
340 Grp = Cache.FindGrp(Name);
341 if (Grp.end() == false)
342 return true;
343
344 // Get a structure
345 unsigned long const Group = Map.Allocate(sizeof(pkgCache::Group));
346 if (unlikely(Group == 0))
347 return false;
348
349 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
350 Grp->Name = Map.WriteString(Name);
351 if (unlikely(Grp->Name == 0))
352 return false;
353
354 // Insert it into the hash table
355 unsigned long const Hash = Cache.Hash(Name);
356 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
357 Cache.HeaderP->GrpHashTable[Hash] = Group;
358
359 Grp->ID = Cache.HeaderP->GroupCount++;
360 return true;
361}
362 /*}}}*/
363// CacheGenerator::NewPackage - Add a new package /*{{{*/
364// ---------------------------------------------------------------------
365/* This creates a new package structure and adds it to the hash table */
366bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
367 const string &Arch) {
368 pkgCache::GrpIterator Grp;
369 if (unlikely(NewGroup(Grp, Name) == false))
370 return false;
371
372 Pkg = Grp.FindPkg(Arch);
373 if (Pkg.end() == false)
374 return true;
375
376 // Get a structure
377 unsigned long const Package = Map.Allocate(sizeof(pkgCache::Package));
378 if (unlikely(Package == 0))
379 return false;
380 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
381
382 // Insert the package into our package list
383 if (Grp->FirstPackage == 0) // the group is new
384 {
385 // Insert it into the hash table
386 unsigned long const Hash = Cache.Hash(Name);
387 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
388 Cache.HeaderP->PkgHashTable[Hash] = Package;
389 Grp->FirstPackage = Package;
390 }
391 else // Group the Packages together
392 {
393 // this package is the new last package
394 pkgCache::PkgIterator LastPkg(Cache, Cache.PkgP + Grp->LastPackage);
395 Pkg->NextPackage = LastPkg->NextPackage;
396 LastPkg->NextPackage = Package;
397 }
398 Grp->LastPackage = Package;
399
400 // Set the name, arch and the ID
401 Pkg->Name = Grp->Name;
402 Pkg->Group = Grp.Index();
403 Pkg->Arch = WriteUniqString(Arch.c_str());
404 if (unlikely(Pkg->Arch == 0))
405 return false;
406 Pkg->ID = Cache.HeaderP->PackageCount++;
407
408 return true;
409}
410 /*}}}*/
411// CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
412// ---------------------------------------------------------------------
413/* */
414bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
415 ListParser &List)
416{
417 if (CurrentFile == 0)
418 return true;
419
420 // Get a structure
421 unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
422 if (VerFile == 0)
423 return 0;
424
425 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
426 VF->File = CurrentFile - Cache.PkgFileP;
427
428 // Link it to the end of the list
429 map_ptrloc *Last = &Ver->FileList;
430 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
431 Last = &V->NextFile;
432 VF->NextFile = *Last;
433 *Last = VF.Index();
434
435 VF->Offset = List.Offset();
436 VF->Size = List.Size();
437 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
438 Cache.HeaderP->MaxVerFileSize = VF->Size;
439 Cache.HeaderP->VerFileCount++;
440
441 return true;
442}
443 /*}}}*/
444// CacheGenerator::NewVersion - Create a new Version /*{{{*/
445// ---------------------------------------------------------------------
446/* This puts a version structure in the linked list */
447unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
448 const string &VerStr,
449 unsigned long Next)
450{
451 // Get a structure
452 unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
453 if (Version == 0)
454 return 0;
455
456 // Fill it in
457 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
458 Ver->NextVer = Next;
459 Ver->ID = Cache.HeaderP->VersionCount++;
460 Ver->VerStr = Map.WriteString(VerStr);
461 if (Ver->VerStr == 0)
462 return 0;
463
464 return Version;
465}
466 /*}}}*/
467// CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
468// ---------------------------------------------------------------------
469/* */
470bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
471 ListParser &List)
472{
473 if (CurrentFile == 0)
474 return true;
475
476 // Get a structure
477 unsigned long DescFile = Map.Allocate(sizeof(pkgCache::DescFile));
478 if (DescFile == 0)
479 return false;
480
481 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
482 DF->File = CurrentFile - Cache.PkgFileP;
483
484 // Link it to the end of the list
485 map_ptrloc *Last = &Desc->FileList;
486 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
487 Last = &D->NextFile;
488
489 DF->NextFile = *Last;
490 *Last = DF.Index();
491
492 DF->Offset = List.Offset();
493 DF->Size = List.Size();
494 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
495 Cache.HeaderP->MaxDescFileSize = DF->Size;
496 Cache.HeaderP->DescFileCount++;
497
498 return true;
499}
500 /*}}}*/
501// CacheGenerator::NewDescription - Create a new Description /*{{{*/
502// ---------------------------------------------------------------------
503/* This puts a description structure in the linked list */
504map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
505 const string &Lang,
506 const MD5SumValue &md5sum,
507 map_ptrloc Next)
508{
509 // Get a structure
510 map_ptrloc Description = Map.Allocate(sizeof(pkgCache::Description));
511 if (Description == 0)
512 return 0;
513
514 // Fill it in
515 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
516 Desc->NextDesc = Next;
517 Desc->ID = Cache.HeaderP->DescriptionCount++;
518 Desc->language_code = Map.WriteString(Lang);
519 Desc->md5sum = Map.WriteString(md5sum.Value());
520 if (Desc->language_code == 0 || Desc->md5sum == 0)
521 return 0;
522
523 return Description;
524}
525 /*}}}*/
526// CacheGenerator::FinishCache - do various finish operations /*{{{*/
527// ---------------------------------------------------------------------
528/* This prepares the Cache for delivery */
529bool pkgCacheGenerator::FinishCache(OpProgress *Progress)
530{
531 // FIXME: add progress reporting for this operation
532 // Do we have different architectures in your groups ?
533 vector<string> archs = APT::Configuration::getArchitectures();
534 if (archs.size() > 1)
535 {
536 // Create Conflicts in between the group
537 for (pkgCache::GrpIterator G = GetCache().GrpBegin(); G.end() != true; G++)
538 {
539 string const PkgName = G.Name();
540 for (pkgCache::PkgIterator P = G.PackageList(); P.end() != true; P = G.NextPkg(P))
541 {
542 if (strcmp(P.Arch(),"all") == 0)
543 continue;
544 pkgCache::PkgIterator allPkg;
545 for (pkgCache::VerIterator V = P.VersionList(); V.end() != true; V++)
546 {
547 string const Arch = V.Arch(true);
548 map_ptrloc *OldDepLast = NULL;
549 /* MultiArch handling introduces a lot of implicit Dependencies:
550 - MultiArch: same → Co-Installable if they have the same version
551 - Architecture: all → Need to be Co-Installable for internal reasons
552 - All others conflict with all other group members */
553 bool const coInstall = (V->MultiArch == pkgCache::Version::All ||
554 V->MultiArch == pkgCache::Version::Same);
555 if (V->MultiArch == pkgCache::Version::All && allPkg.end() == true)
556 allPkg = G.FindPkg("all");
557 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A)
558 {
559 if (*A == Arch)
560 continue;
561 /* We allow only one installed arch at the time
562 per group, therefore each group member conflicts
563 with all other group members */
564 pkgCache::PkgIterator D = G.FindPkg(*A);
565 if (D.end() == true)
566 continue;
567 if (coInstall == true)
568 {
569 // Replaces: ${self}:other ( << ${binary:Version})
570 NewDepends(D, V, V.VerStr(),
571 pkgCache::Dep::Less, pkgCache::Dep::Replaces,
572 OldDepLast);
573 // Breaks: ${self}:other (!= ${binary:Version})
574 NewDepends(D, V, V.VerStr(),
575 pkgCache::Dep::NotEquals, pkgCache::Dep::DpkgBreaks,
576 OldDepLast);
577 if (V->MultiArch == pkgCache::Version::All)
578 {
579 // Depend on ${self}:all which does depend on nothing
580 NewDepends(allPkg, V, V.VerStr(),
581 pkgCache::Dep::Equals, pkgCache::Dep::Depends,
582 OldDepLast);
583 }
584 } else {
585 // Conflicts: ${self}:other
586 NewDepends(D, V, "",
587 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
588 OldDepLast);
589 }
590 }
591 }
592 }
593 }
594 }
595 return true;
596}
597 /*}}}*/
598// CacheGenerator::NewDepends - Create a dependency element /*{{{*/
599// ---------------------------------------------------------------------
600/* This creates a dependency element in the tree. It is linked to the
601 version and to the package that it is pointing to. */
602bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
603 pkgCache::VerIterator &Ver,
604 string const &Version,
605 unsigned int const &Op,
606 unsigned int const &Type,
607 map_ptrloc *OldDepLast)
608{
609 // Get a structure
610 unsigned long const Dependency = Map.Allocate(sizeof(pkgCache::Dependency));
611 if (unlikely(Dependency == 0))
612 return false;
613
614 // Fill it in
615 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
616 Dep->ParentVer = Ver.Index();
617 Dep->Type = Type;
618 Dep->CompareOp = Op;
619 Dep->ID = Cache.HeaderP->DependsCount++;
620
621 // Probe the reverse dependency list for a version string that matches
622 if (Version.empty() == false)
623 {
624/* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
625 if (I->Version != 0 && I.TargetVer() == Version)
626 Dep->Version = I->Version;*/
627 if (Dep->Version == 0)
628 if (unlikely((Dep->Version = Map.WriteString(Version)) == 0))
629 return false;
630 }
631
632 // Link it to the package
633 Dep->Package = Pkg.Index();
634 Dep->NextRevDepends = Pkg->RevDepends;
635 Pkg->RevDepends = Dep.Index();
636
637 // Do we know where to link the Dependency to?
638 if (OldDepLast == NULL)
639 {
640 OldDepLast = &Ver->DependsList;
641 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
642 OldDepLast = &D->NextDepends;
643 }
644
645 Dep->NextDepends = *OldDepLast;
646 *OldDepLast = Dep.Index();
647 OldDepLast = &Dep->NextDepends;
648
649 return true;
650}
651 /*}}}*/
652// ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
653// ---------------------------------------------------------------------
654/* This creates a Group and the Package to link this dependency to if
655 needed and handles also the caching of the old endpoint */
656bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
657 const string &PackageName,
658 const string &Arch,
659 const string &Version,
660 unsigned int Op,
661 unsigned int Type)
662{
663 pkgCache::GrpIterator Grp;
664 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
665 return false;
666
667 // Locate the target package
668 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
669 if (Pkg.end() == true) {
670 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
671 return false;
672 }
673
674 // Is it a file dependency?
675 if (unlikely(PackageName[0] == '/'))
676 FoundFileDeps = true;
677
678 /* Caching the old end point speeds up generation substantially */
679 if (OldDepVer != Ver) {
680 OldDepLast = NULL;
681 OldDepVer = Ver;
682 }
683
684 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
685}
686 /*}}}*/
687// ListParser::NewProvides - Create a Provides element /*{{{*/
688// ---------------------------------------------------------------------
689/* */
690bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
691 const string &PkgName,
692 const string &PkgArch,
693 const string &Version)
694{
695 pkgCache &Cache = Owner->Cache;
696
697 // We do not add self referencing provides
698 if (Ver.ParentPkg().Name() == PkgName && PkgArch == Ver.Arch(true))
699 return true;
700
701 // Get a structure
702 unsigned long const Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
703 if (unlikely(Provides == 0))
704 return false;
705 Cache.HeaderP->ProvidesCount++;
706
707 // Fill it in
708 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
709 Prv->Version = Ver.Index();
710 Prv->NextPkgProv = Ver->ProvidesList;
711 Ver->ProvidesList = Prv.Index();
712 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
713 return false;
714
715 // Locate the target package
716 pkgCache::PkgIterator Pkg;
717 if (unlikely(Owner->NewPackage(Pkg,PkgName, PkgArch) == false))
718 return false;
719
720 // Link it to the package
721 Prv->ParentPkg = Pkg.Index();
722 Prv->NextProvides = Pkg->ProvidesList;
723 Pkg->ProvidesList = Prv.Index();
724
725 return true;
726}
727 /*}}}*/
728// CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
729// ---------------------------------------------------------------------
730/* This is used to select which file is to be associated with all newly
731 added versions. The caller is responsible for setting the IMS fields. */
732bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
733 const pkgIndexFile &Index,
734 unsigned long Flags)
735{
736 // Get some space for the structure
737 CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
738 if (CurrentFile == Cache.PkgFileP)
739 return false;
740
741 // Fill it in
742 CurrentFile->FileName = Map.WriteString(File);
743 CurrentFile->Site = WriteUniqString(Site);
744 CurrentFile->NextFile = Cache.HeaderP->FileList;
745 CurrentFile->Flags = Flags;
746 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
747 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
748 PkgFileName = File;
749 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
750 Cache.HeaderP->PackageFileCount++;
751
752 if (CurrentFile->FileName == 0)
753 return false;
754
755 if (Progress != 0)
756 Progress->SubProgress(Index.Size());
757 return true;
758}
759 /*}}}*/
760// CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
761// ---------------------------------------------------------------------
762/* This is used to create handles to strings. Given the same text it
763 always returns the same number */
764unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
765 unsigned int Size)
766{
767 /* We use a very small transient hash table here, this speeds up generation
768 by a fair amount on slower machines */
769 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
770 if (Bucket != 0 &&
771 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
772 return Bucket->String;
773
774 // Search for an insertion point
775 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
776 int Res = 1;
777 map_ptrloc *Last = &Cache.HeaderP->StringList;
778 for (; I != Cache.StringItemP; Last = &I->NextItem,
779 I = Cache.StringItemP + I->NextItem)
780 {
781 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
782 if (Res >= 0)
783 break;
784 }
785
786 // Match
787 if (Res == 0)
788 {
789 Bucket = I;
790 return I->String;
791 }
792
793 // Get a structure
794 unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
795 if (Item == 0)
796 return 0;
797
798 // Fill in the structure
799 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
800 ItemP->NextItem = I - Cache.StringItemP;
801 *Last = Item;
802 ItemP->String = Map.WriteString(S,Size);
803 if (ItemP->String == 0)
804 return 0;
805
806 Bucket = ItemP;
807 return ItemP->String;
808}
809 /*}}}*/
810// CheckValidity - Check that a cache is up-to-date /*{{{*/
811// ---------------------------------------------------------------------
812/* This just verifies that each file in the list of index files exists,
813 has matching attributes with the cache and the cache does not have
814 any extra files. */
815static bool CheckValidity(const string &CacheFile, FileIterator Start,
816 FileIterator End,MMap **OutMap = 0)
817{
818 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
819 // No file, certainly invalid
820 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
821 {
822 if (Debug == true)
823 std::clog << "CacheFile doesn't exist" << std::endl;
824 return false;
825 }
826
827 // Map it
828 FileFd CacheF(CacheFile,FileFd::ReadOnly);
829 SPtr<MMap> Map = new MMap(CacheF,0);
830 pkgCache Cache(Map);
831 if (_error->PendingError() == true || Map->Size() == 0)
832 {
833 if (Debug == true)
834 std::clog << "Errors are pending or Map is empty()" << std::endl;
835 _error->Discard();
836 return false;
837 }
838
839 /* Now we check every index file, see if it is in the cache,
840 verify the IMS data and check that it is on the disk too.. */
841 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
842 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
843 for (; Start != End; Start++)
844 {
845 if (Debug == true)
846 std::clog << "Checking PkgFile " << (*Start)->Describe() << ": ";
847 if ((*Start)->HasPackages() == false)
848 {
849 if (Debug == true)
850 std::clog << "Has NO packages" << std::endl;
851 continue;
852 }
853
854 if ((*Start)->Exists() == false)
855 {
856#if 0 // mvo: we no longer give a message here (Default Sources spec)
857 _error->WarningE("stat",_("Couldn't stat source package list %s"),
858 (*Start)->Describe().c_str());
859#endif
860 if (Debug == true)
861 std::clog << "file doesn't exist" << std::endl;
862 continue;
863 }
864
865 // FindInCache is also expected to do an IMS check.
866 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
867 if (File.end() == true)
868 {
869 if (Debug == true)
870 std::clog << "FindInCache returned end-Pointer" << std::endl;
871 return false;
872 }
873
874 Visited[File->ID] = true;
875 if (Debug == true)
876 std::clog << "with ID " << File->ID << " is valid" << std::endl;
877 }
878
879 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
880 if (Visited[I] == false)
881 {
882 if (Debug == true)
883 std::clog << "File with ID" << I << " wasn't visited" << std::endl;
884 return false;
885 }
886
887 if (_error->PendingError() == true)
888 {
889 if (Debug == true)
890 {
891 std::clog << "Validity failed because of pending errors:" << std::endl;
892 _error->DumpErrors();
893 }
894 _error->Discard();
895 return false;
896 }
897
898 if (OutMap != 0)
899 *OutMap = Map.UnGuard();
900 return true;
901}
902 /*}}}*/
903// ComputeSize - Compute the total size of a bunch of files /*{{{*/
904// ---------------------------------------------------------------------
905/* Size is kind of an abstract notion that is only used for the progress
906 meter */
907static unsigned long ComputeSize(FileIterator Start,FileIterator End)
908{
909 unsigned long TotalSize = 0;
910 for (; Start != End; Start++)
911 {
912 if ((*Start)->HasPackages() == false)
913 continue;
914 TotalSize += (*Start)->Size();
915 }
916 return TotalSize;
917}
918 /*}}}*/
919// BuildCache - Merge the list of index files into the cache /*{{{*/
920// ---------------------------------------------------------------------
921/* */
922static bool BuildCache(pkgCacheGenerator &Gen,
923 OpProgress *Progress,
924 unsigned long &CurrentSize,unsigned long TotalSize,
925 FileIterator Start, FileIterator End)
926{
927 FileIterator I;
928 for (I = Start; I != End; I++)
929 {
930 if ((*I)->HasPackages() == false)
931 continue;
932
933 if ((*I)->Exists() == false)
934 continue;
935
936 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
937 {
938 _error->Warning("Duplicate sources.list entry %s",
939 (*I)->Describe().c_str());
940 continue;
941 }
942
943 unsigned long Size = (*I)->Size();
944 if (Progress != NULL)
945 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
946 CurrentSize += Size;
947
948 if ((*I)->Merge(Gen,Progress) == false)
949 return false;
950 }
951
952 if (Gen.HasFileDeps() == true)
953 {
954 if (Progress != NULL)
955 Progress->Done();
956 TotalSize = ComputeSize(Start, End);
957 CurrentSize = 0;
958 for (I = Start; I != End; I++)
959 {
960 unsigned long Size = (*I)->Size();
961 if (Progress != NULL)
962 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
963 CurrentSize += Size;
964 if ((*I)->MergeFileProvides(Gen,Progress) == false)
965 return false;
966 }
967 }
968
969 return true;
970}
971 /*}}}*/
972// CacheGenerator::MakeStatusCache - Construct the status cache /*{{{*/
973// ---------------------------------------------------------------------
974/* This makes sure that the status cache (the cache that has all
975 index files from the sources list and all local ones) is ready
976 to be mmaped. If OutMap is not zero then a MMap object representing
977 the cache will be stored there. This is pretty much mandetory if you
978 are using AllowMem. AllowMem lets the function be run as non-root
979 where it builds the cache 'fast' into a memory buffer. */
980__deprecated bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
981 MMap **OutMap, bool AllowMem)
982 { return pkgCacheGenerator::MakeStatusCache(List, &Progress, OutMap, AllowMem); }
983bool pkgCacheGenerator::MakeStatusCache(pkgSourceList &List,OpProgress *Progress,
984 MMap **OutMap,bool AllowMem)
985{
986 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
987 unsigned long const MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
988
989 vector<pkgIndexFile *> Files;
990 for (vector<metaIndex *>::const_iterator i = List.begin();
991 i != List.end();
992 i++)
993 {
994 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
995 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
996 j != Indexes->end();
997 j++)
998 Files.push_back (*j);
999 }
1000
1001 unsigned long const EndOfSource = Files.size();
1002 if (_system->AddStatusFiles(Files) == false)
1003 return false;
1004
1005 // Decide if we can write to the files..
1006 string const CacheFile = _config->FindFile("Dir::Cache::pkgcache");
1007 string const SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
1008
1009 // ensure the cache directory exists
1010 if (CacheFile.empty() == false || SrcCacheFile.empty() == false)
1011 {
1012 string dir = _config->FindDir("Dir::Cache");
1013 size_t const len = dir.size();
1014 if (len > 5 && dir.find("/apt/", len - 6, 5) == len - 5)
1015 dir = dir.substr(0, len - 5);
1016 if (CacheFile.empty() == false)
1017 CreateDirectory(dir, flNotFile(CacheFile));
1018 if (SrcCacheFile.empty() == false)
1019 CreateDirectory(dir, flNotFile(SrcCacheFile));
1020 }
1021
1022 // Decide if we can write to the cache
1023 bool Writeable = false;
1024 if (CacheFile.empty() == false)
1025 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
1026 else
1027 if (SrcCacheFile.empty() == false)
1028 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
1029 if (Debug == true)
1030 std::clog << "Do we have write-access to the cache files? " << (Writeable ? "YES" : "NO") << std::endl;
1031
1032 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
1033 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
1034
1035 if (Progress != NULL)
1036 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1037
1038 // Cache is OK, Fin.
1039 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
1040 {
1041 if (Progress != NULL)
1042 Progress->OverallProgress(1,1,1,_("Reading package lists"));
1043 if (Debug == true)
1044 std::clog << "pkgcache.bin is valid - no need to build anything" << std::endl;
1045 return true;
1046 }
1047 else if (Debug == true)
1048 std::clog << "pkgcache.bin is NOT valid" << std::endl;
1049
1050 /* At this point we know we need to reconstruct the package cache,
1051 begin. */
1052 SPtr<FileFd> CacheF;
1053 SPtr<DynamicMMap> Map;
1054 if (Writeable == true && CacheFile.empty() == false)
1055 {
1056 unlink(CacheFile.c_str());
1057 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
1058 fchmod(CacheF->Fd(),0644);
1059 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
1060 if (_error->PendingError() == true)
1061 return false;
1062 if (Debug == true)
1063 std::clog << "Open filebased MMap" << std::endl;
1064 }
1065 else
1066 {
1067 // Just build it in memory..
1068 Map = new DynamicMMap(0,MapSize);
1069 if (Debug == true)
1070 std::clog << "Open memory Map (not filebased)" << std::endl;
1071 }
1072
1073 // Lets try the source cache.
1074 unsigned long CurrentSize = 0;
1075 unsigned long TotalSize = 0;
1076 if (CheckValidity(SrcCacheFile,Files.begin(),
1077 Files.begin()+EndOfSource) == true)
1078 {
1079 if (Debug == true)
1080 std::clog << "srcpkgcache.bin is valid - populate MMap with it." << std::endl;
1081 // Preload the map with the source cache
1082 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
1083 unsigned long const alloc = Map->RawAllocate(SCacheF.Size());
1084 if ((alloc == 0 && _error->PendingError())
1085 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
1086 SCacheF.Size()) == false)
1087 return false;
1088
1089 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1090
1091 // Build the status cache
1092 pkgCacheGenerator Gen(Map.Get(),Progress);
1093 if (_error->PendingError() == true)
1094 return false;
1095 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1096 Files.begin()+EndOfSource,Files.end()) == false)
1097 return false;
1098
1099 // FIXME: move me to a better place
1100 Gen.FinishCache(Progress);
1101 }
1102 else
1103 {
1104 if (Debug == true)
1105 std::clog << "srcpkgcache.bin is NOT valid - rebuild" << std::endl;
1106 TotalSize = ComputeSize(Files.begin(),Files.end());
1107
1108 // Build the source cache
1109 pkgCacheGenerator Gen(Map.Get(),Progress);
1110 if (_error->PendingError() == true)
1111 return false;
1112 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1113 Files.begin(),Files.begin()+EndOfSource) == false)
1114 return false;
1115
1116 // Write it back
1117 if (Writeable == true && SrcCacheFile.empty() == false)
1118 {
1119 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
1120 if (_error->PendingError() == true)
1121 return false;
1122
1123 fchmod(SCacheF.Fd(),0644);
1124
1125 // Write out the main data
1126 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1127 return _error->Error(_("IO Error saving source cache"));
1128 SCacheF.Sync();
1129
1130 // Write out the proper header
1131 Gen.GetCache().HeaderP->Dirty = false;
1132 if (SCacheF.Seek(0) == false ||
1133 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1134 return _error->Error(_("IO Error saving source cache"));
1135 Gen.GetCache().HeaderP->Dirty = true;
1136 SCacheF.Sync();
1137 }
1138
1139 // Build the status cache
1140 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1141 Files.begin()+EndOfSource,Files.end()) == false)
1142 return false;
1143
1144 // FIXME: move me to a better place
1145 Gen.FinishCache(Progress);
1146 }
1147 if (Debug == true)
1148 std::clog << "Caches are ready for shipping" << std::endl;
1149
1150 if (_error->PendingError() == true)
1151 return false;
1152 if (OutMap != 0)
1153 {
1154 if (CacheF != 0)
1155 {
1156 delete Map.UnGuard();
1157 *OutMap = new MMap(*CacheF,0);
1158 }
1159 else
1160 {
1161 *OutMap = Map.UnGuard();
1162 }
1163 }
1164
1165 return true;
1166}
1167 /*}}}*/
1168// CacheGenerator::MakeOnlyStatusCache - Build only a status files cache/*{{{*/
1169// ---------------------------------------------------------------------
1170/* */
1171__deprecated bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1172 { return pkgCacheGenerator::MakeOnlyStatusCache(&Progress, OutMap); }
1173bool pkgCacheGenerator::MakeOnlyStatusCache(OpProgress *Progress,DynamicMMap **OutMap)
1174{
1175 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
1176 vector<pkgIndexFile *> Files;
1177 unsigned long EndOfSource = Files.size();
1178 if (_system->AddStatusFiles(Files) == false)
1179 return false;
1180
1181 SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
1182 unsigned long CurrentSize = 0;
1183 unsigned long TotalSize = 0;
1184
1185 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1186
1187 // Build the status cache
1188 if (Progress != NULL)
1189 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1190 pkgCacheGenerator Gen(Map.Get(),Progress);
1191 if (_error->PendingError() == true)
1192 return false;
1193 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1194 Files.begin()+EndOfSource,Files.end()) == false)
1195 return false;
1196
1197 // FIXME: move me to a better place
1198 Gen.FinishCache(Progress);
1199
1200 if (_error->PendingError() == true)
1201 return false;
1202 *OutMap = Map.UnGuard();
1203
1204 return true;
1205}
1206 /*}}}*/