Foreign Versions add an implicit Provides to the other packages in the group
[ntk/apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25
26 #include <apt-pkg/tagfile.h>
27
28 #include <apti18n.h>
29
30 #include <vector>
31
32 #include <sys/stat.h>
33 #include <unistd.h>
34 #include <errno.h>
35 #include <stdio.h>
36 #include <system.h>
37 /*}}}*/
38 typedef vector<pkgIndexFile *>::iterator FileIterator;
39
40 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41 // ---------------------------------------------------------------------
42 /* We set the dirty flag and make sure that is written to the disk */
43 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
44 Map(*pMap), Cache(pMap,false), Progress(Prog),
45 FoundFileDeps(0)
46 {
47 CurrentFile = 0;
48 memset(UniqHash,0,sizeof(UniqHash));
49
50 if (_error->PendingError() == true)
51 return;
52
53 if (Map.Size() == 0)
54 {
55 // Setup the map interface..
56 Cache.HeaderP = (pkgCache::Header *)Map.Data();
57 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
58 return;
59
60 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
61
62 // Starting header
63 *Cache.HeaderP = pkgCache::Header();
64 Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
65 Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
66 Cache.ReMap();
67 }
68 else
69 {
70 // Map directly from the existing file
71 Cache.ReMap();
72 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
73 if (Cache.VS != _system->VS)
74 {
75 _error->Error(_("Cache has an incompatible versioning system"));
76 return;
77 }
78 }
79
80 Cache.HeaderP->Dirty = true;
81 Map.Sync(0,sizeof(pkgCache::Header));
82 }
83 /*}}}*/
84 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85 // ---------------------------------------------------------------------
86 /* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88 pkgCacheGenerator::~pkgCacheGenerator()
89 {
90 if (_error->PendingError() == true)
91 return;
92 if (Map.Sync() == false)
93 return;
94
95 Cache.HeaderP->Dirty = false;
96 Map.Sync(0,sizeof(pkgCache::Header));
97 }
98 /*}}}*/
99 // CacheGenerator::MergeList - Merge the package list /*{{{*/
100 // ---------------------------------------------------------------------
101 /* This provides the generation of the entries in the cache. Each loop
102 goes through a single package record from the underlying parse engine. */
103 bool pkgCacheGenerator::MergeList(ListParser &List,
104 pkgCache::VerIterator *OutVer)
105 {
106 List.Owner = this;
107
108 unsigned int Counter = 0;
109 while (List.Step() == true)
110 {
111 string const PackageName = List.Package();
112 if (PackageName.empty() == true)
113 return false;
114
115 /* As we handle Arch all packages as architecture bounded
116 we add all information to every (simulated) arch package */
117 std::vector<string> genArch;
118 if (List.ArchitectureAll() == true)
119 genArch = APT::Configuration::getArchitectures();
120 else
121 genArch.push_back(List.Architecture());
122
123 for (std::vector<string>::const_iterator arch = genArch.begin();
124 arch != genArch.end(); ++arch)
125 {
126 // Get a pointer to the package structure
127 pkgCache::PkgIterator Pkg;
128 if (NewPackage(Pkg, PackageName, *arch) == false)
129 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
130 Counter++;
131 if (Counter % 100 == 0 && Progress != 0)
132 Progress->Progress(List.Offset());
133
134 /* Get a pointer to the version structure. We know the list is sorted
135 so we use that fact in the search. Insertion of new versions is
136 done with correct sorting */
137 string Version = List.Version();
138 if (Version.empty() == true)
139 {
140 // we first process the package, then the descriptions
141 // (this has the bonus that we get MMap error when we run out
142 // of MMap space)
143 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
144 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
145 PackageName.c_str());
146
147 // Find the right version to write the description
148 MD5SumValue CurMd5 = List.Description_md5();
149 pkgCache::VerIterator Ver = Pkg.VersionList();
150 map_ptrloc *LastVer = &Pkg->VersionList;
151
152 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
153 {
154 pkgCache::DescIterator Desc = Ver.DescriptionList();
155 map_ptrloc *LastDesc = &Ver->DescriptionList;
156 bool duplicate=false;
157
158 // don't add a new description if we have one for the given
159 // md5 && language
160 for ( ; Desc.end() == false; Desc++)
161 if (MD5SumValue(Desc.md5()) == CurMd5 &&
162 Desc.LanguageCode() == List.DescriptionLanguage())
163 duplicate=true;
164 if(duplicate)
165 continue;
166
167 for (Desc = Ver.DescriptionList();
168 Desc.end() == false;
169 LastDesc = &Desc->NextDesc, Desc++)
170 {
171 if (MD5SumValue(Desc.md5()) == CurMd5)
172 {
173 // Add new description
174 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
175 Desc->ParentPkg = Pkg.Index();
176
177 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
178 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
179 break;
180 }
181 }
182 }
183
184 continue;
185 }
186
187 pkgCache::VerIterator Ver = Pkg.VersionList();
188 map_ptrloc *LastVer = &Pkg->VersionList;
189 int Res = 1;
190 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
191 {
192 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
193 if (Res >= 0)
194 break;
195 }
196
197 /* We already have a version for this item, record that we
198 saw it */
199 unsigned long Hash = List.VersionHash();
200 if (Res == 0 && Ver->Hash == Hash)
201 {
202 if (List.UsePackage(Pkg,Ver) == false)
203 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
204 PackageName.c_str());
205
206 if (NewFileVer(Ver,List) == false)
207 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
208 PackageName.c_str());
209
210 // Read only a single record and return
211 if (OutVer != 0)
212 {
213 *OutVer = Ver;
214 FoundFileDeps |= List.HasFileDeps();
215 return true;
216 }
217
218 continue;
219 }
220
221 // Skip to the end of the same version set.
222 if (Res == 0)
223 {
224 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
225 {
226 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
227 if (Res != 0)
228 break;
229 }
230 }
231
232 // Add a new version
233 *LastVer = NewVersion(Ver,Version,*LastVer);
234 Ver->ParentPkg = Pkg.Index();
235 Ver->Hash = Hash;
236
237 if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
238 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
239 PackageName.c_str());
240
241 if (List.UsePackage(Pkg,Ver) == false)
242 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
243 PackageName.c_str());
244
245 if (NewFileVer(Ver,List) == false)
246 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
247 PackageName.c_str());
248
249 // Read only a single record and return
250 if (OutVer != 0)
251 {
252 *OutVer = Ver;
253 FoundFileDeps |= List.HasFileDeps();
254 return true;
255 }
256
257 /* Record the Description data. Description data always exist in
258 Packages and Translation-* files. */
259 pkgCache::DescIterator Desc = Ver.DescriptionList();
260 map_ptrloc *LastDesc = &Ver->DescriptionList;
261
262 // Skip to the end of description set
263 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
264
265 // Add new description
266 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
267 Desc->ParentPkg = Pkg.Index();
268
269 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
270 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
271 }
272 }
273
274 FoundFileDeps |= List.HasFileDeps();
275
276 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
277 return _error->Error(_("Wow, you exceeded the number of package "
278 "names this APT is capable of."));
279 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
280 return _error->Error(_("Wow, you exceeded the number of versions "
281 "this APT is capable of."));
282 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
283 return _error->Error(_("Wow, you exceeded the number of descriptions "
284 "this APT is capable of."));
285 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
286 return _error->Error(_("Wow, you exceeded the number of dependencies "
287 "this APT is capable of."));
288 return true;
289 }
290 /*}}}*/
291 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
292 // ---------------------------------------------------------------------
293 /* If we found any file depends while parsing the main list we need to
294 resolve them. Since it is undesired to load the entire list of files
295 into the cache as virtual packages we do a two stage effort. MergeList
296 identifies the file depends and this creates Provdies for them by
297 re-parsing all the indexs. */
298 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
299 {
300 List.Owner = this;
301
302 unsigned int Counter = 0;
303 while (List.Step() == true)
304 {
305 string PackageName = List.Package();
306 if (PackageName.empty() == true)
307 return false;
308 string Version = List.Version();
309 if (Version.empty() == true)
310 continue;
311
312 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
313 if (Pkg.end() == true)
314 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
315 PackageName.c_str());
316 Counter++;
317 if (Counter % 100 == 0 && Progress != 0)
318 Progress->Progress(List.Offset());
319
320 unsigned long Hash = List.VersionHash();
321 pkgCache::VerIterator Ver = Pkg.VersionList();
322 for (; Ver.end() == false; Ver++)
323 {
324 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
325 {
326 if (List.CollectFileProvides(Cache,Ver) == false)
327 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
328 break;
329 }
330 }
331
332 if (Ver.end() == true)
333 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
334 }
335
336 return true;
337 }
338 /*}}}*/
339 // CacheGenerator::NewGroup - Add a new group /*{{{*/
340 // ---------------------------------------------------------------------
341 /* This creates a new group structure and adds it to the hash table */
342 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name) {
343 Grp = Cache.FindGrp(Name);
344 if (Grp.end() == false)
345 return true;
346
347 // Get a structure
348 unsigned long const Group = Map.Allocate(sizeof(pkgCache::Group));
349 if (unlikely(Group == 0))
350 return false;
351
352 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
353 Grp->Name = Map.WriteString(Name);
354 if (unlikely(Grp->Name == 0))
355 return false;
356
357 // Insert it into the hash table
358 unsigned long const Hash = Cache.Hash(Name);
359 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
360 Cache.HeaderP->GrpHashTable[Hash] = Group;
361
362 Cache.HeaderP->GroupCount++;
363
364 return true;
365 }
366 /*}}}*/
367 // CacheGenerator::NewPackage - Add a new package /*{{{*/
368 // ---------------------------------------------------------------------
369 /* This creates a new package structure and adds it to the hash table */
370 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
371 const string &Arch) {
372 pkgCache::GrpIterator Grp;
373 if (unlikely(NewGroup(Grp, Name) == false))
374 return false;
375
376 Pkg = Grp.FindPkg(Arch);
377 if (Pkg.end() == false)
378 return true;
379
380 // Get a structure
381 unsigned long const Package = Map.Allocate(sizeof(pkgCache::Package));
382 if (unlikely(Package == 0))
383 return false;
384 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
385
386 // Insert it into the hash table
387 unsigned long const Hash = Cache.Hash(Name);
388 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
389 Cache.HeaderP->PkgHashTable[Hash] = Package;
390
391 // remember the packages in the group
392 Grp->FirstPackage = Package;
393 if (Grp->LastPackage == 0)
394 Grp->LastPackage = Package;
395
396 // Set the name, arch and the ID
397 Pkg->Name = Grp->Name;
398 Pkg->Group = Grp.Index();
399 Pkg->Arch = WriteUniqString(Arch.c_str());
400 if (unlikely(Pkg->Arch == 0))
401 return false;
402 Pkg->ID = Cache.HeaderP->PackageCount++;
403
404 return true;
405 }
406 /*}}}*/
407 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
408 // ---------------------------------------------------------------------
409 /* */
410 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
411 ListParser &List)
412 {
413 if (CurrentFile == 0)
414 return true;
415
416 // Get a structure
417 unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
418 if (VerFile == 0)
419 return 0;
420
421 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
422 VF->File = CurrentFile - Cache.PkgFileP;
423
424 // Link it to the end of the list
425 map_ptrloc *Last = &Ver->FileList;
426 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
427 Last = &V->NextFile;
428 VF->NextFile = *Last;
429 *Last = VF.Index();
430
431 VF->Offset = List.Offset();
432 VF->Size = List.Size();
433 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
434 Cache.HeaderP->MaxVerFileSize = VF->Size;
435 Cache.HeaderP->VerFileCount++;
436
437 return true;
438 }
439 /*}}}*/
440 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
441 // ---------------------------------------------------------------------
442 /* This puts a version structure in the linked list */
443 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
444 const string &VerStr,
445 unsigned long Next)
446 {
447 // Get a structure
448 unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
449 if (Version == 0)
450 return 0;
451
452 // Fill it in
453 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
454 Ver->NextVer = Next;
455 Ver->ID = Cache.HeaderP->VersionCount++;
456 Ver->VerStr = Map.WriteString(VerStr);
457 if (Ver->VerStr == 0)
458 return 0;
459
460 return Version;
461 }
462 /*}}}*/
463 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
464 // ---------------------------------------------------------------------
465 /* */
466 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
467 ListParser &List)
468 {
469 if (CurrentFile == 0)
470 return true;
471
472 // Get a structure
473 unsigned long DescFile = Map.Allocate(sizeof(pkgCache::DescFile));
474 if (DescFile == 0)
475 return false;
476
477 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
478 DF->File = CurrentFile - Cache.PkgFileP;
479
480 // Link it to the end of the list
481 map_ptrloc *Last = &Desc->FileList;
482 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
483 Last = &D->NextFile;
484
485 DF->NextFile = *Last;
486 *Last = DF.Index();
487
488 DF->Offset = List.Offset();
489 DF->Size = List.Size();
490 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
491 Cache.HeaderP->MaxDescFileSize = DF->Size;
492 Cache.HeaderP->DescFileCount++;
493
494 return true;
495 }
496 /*}}}*/
497 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
498 // ---------------------------------------------------------------------
499 /* This puts a description structure in the linked list */
500 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
501 const string &Lang,
502 const MD5SumValue &md5sum,
503 map_ptrloc Next)
504 {
505 // Get a structure
506 map_ptrloc Description = Map.Allocate(sizeof(pkgCache::Description));
507 if (Description == 0)
508 return 0;
509
510 // Fill it in
511 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
512 Desc->NextDesc = Next;
513 Desc->ID = Cache.HeaderP->DescriptionCount++;
514 Desc->language_code = Map.WriteString(Lang);
515 Desc->md5sum = Map.WriteString(md5sum.Value());
516 if (Desc->language_code == 0 || Desc->md5sum == 0)
517 return 0;
518
519 return Description;
520 }
521 /*}}}*/
522 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
523 // ---------------------------------------------------------------------
524 /* This prepares the Cache for delivery */
525 bool pkgCacheGenerator::FinishCache(OpProgress &Progress) {
526 // FIXME: add progress reporting for this operation
527 // Do we have different architectures in your groups ?
528 vector<string> archs = APT::Configuration::getArchitectures();
529 if (archs.size() > 1) {
530 // Create Conflicts in between the group
531 for (pkgCache::GrpIterator G = GetCache().GrpBegin(); G.end() != true; G++) {
532 string const PkgName = G.Name();
533 for (pkgCache::PkgIterator P = G.PackageList(); P.end() != true; P = G.NextPkg(P)) {
534 for (pkgCache::VerIterator V = P.VersionList(); V.end() != true; V++) {
535 string const Arch = V.Arch();
536 map_ptrloc *OldDepLast = NULL;
537 /* MultiArch handling introduces a lot of implicit Dependencies:
538 - MultiArch: same → Co-Installable if they have the same version
539 - Architecture: all → Need to be Co-Installable for internal reasons
540 - All others conflict with all other group members */
541 bool const coInstall = (V->MultiArch == pkgCache::Version::All ||
542 V->MultiArch == pkgCache::Version::Same);
543 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A) {
544 if (*A == Arch)
545 continue;
546 /* We allow only one installed arch at the time
547 per group, therefore each group member conflicts
548 with all other group members */
549 pkgCache::PkgIterator D = G.FindPkg(*A);
550 if (D.end() == true)
551 continue;
552 if (coInstall == true) {
553 // Replaces: ${self}:other ( << ${binary:Version})
554 NewDepends(D, V, V.VerStr(),
555 pkgCache::Dep::Less, pkgCache::Dep::Replaces,
556 OldDepLast);
557 // Breaks: ${self}:other (!= ${binary:Version})
558 NewDepends(D, V, V.VerStr(),
559 pkgCache::Dep::Less, pkgCache::Dep::DpkgBreaks,
560 OldDepLast);
561 NewDepends(D, V, V.VerStr(),
562 pkgCache::Dep::Greater, pkgCache::Dep::DpkgBreaks,
563 OldDepLast);
564 } else {
565 // Conflicts: ${self}:other
566 NewDepends(D, V, "",
567 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
568 OldDepLast);
569 }
570 }
571 }
572 }
573 }
574 }
575 return true;
576 }
577 /*}}}*/
578 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
579 // ---------------------------------------------------------------------
580 /* This creates a dependency element in the tree. It is linked to the
581 version and to the package that it is pointing to. */
582 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
583 pkgCache::VerIterator &Ver,
584 string const &Version,
585 unsigned int const &Op,
586 unsigned int const &Type,
587 map_ptrloc *OldDepLast)
588 {
589 // Get a structure
590 unsigned long const Dependency = Map.Allocate(sizeof(pkgCache::Dependency));
591 if (unlikely(Dependency == 0))
592 return false;
593
594 // Fill it in
595 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
596 Dep->ParentVer = Ver.Index();
597 Dep->Type = Type;
598 Dep->CompareOp = Op;
599 Dep->ID = Cache.HeaderP->DependsCount++;
600
601 // Probe the reverse dependency list for a version string that matches
602 if (Version.empty() == false)
603 {
604 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
605 if (I->Version != 0 && I.TargetVer() == Version)
606 Dep->Version = I->Version;*/
607 if (Dep->Version == 0)
608 if (unlikely((Dep->Version = Map.WriteString(Version)) == 0))
609 return false;
610 }
611
612 // Link it to the package
613 Dep->Package = Pkg.Index();
614 Dep->NextRevDepends = Pkg->RevDepends;
615 Pkg->RevDepends = Dep.Index();
616
617 // Do we know where to link the Dependency to?
618 if (OldDepLast == NULL)
619 {
620 OldDepLast = &Ver->DependsList;
621 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
622 OldDepLast = &D->NextDepends;
623 }
624
625 Dep->NextDepends = *OldDepLast;
626 *OldDepLast = Dep.Index();
627 OldDepLast = &Dep->NextDepends;
628
629 return true;
630 }
631 /*}}}*/
632 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
633 // ---------------------------------------------------------------------
634 /* This creates a Group and the Package to link this dependency to if
635 needed and handles also the caching of the old endpoint */
636 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
637 const string &PackageName,
638 const string &Arch,
639 const string &Version,
640 unsigned int Op,
641 unsigned int Type)
642 {
643 pkgCache::GrpIterator Grp;
644 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
645 return false;
646
647 // Locate the target package
648 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
649 if (Pkg.end() == true) {
650 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
651 return false;
652 }
653
654 // Is it a file dependency?
655 if (unlikely(PackageName[0] == '/'))
656 FoundFileDeps = true;
657
658 /* Caching the old end point speeds up generation substantially */
659 if (OldDepVer != Ver) {
660 OldDepLast = NULL;
661 OldDepVer = Ver;
662 }
663
664 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
665 }
666 /*}}}*/
667 // ListParser::NewProvides - Create a Provides element /*{{{*/
668 // ---------------------------------------------------------------------
669 /* */
670 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
671 const string &PkgName,
672 const string &PkgArch,
673 const string &Version)
674 {
675 pkgCache &Cache = Owner->Cache;
676
677 // We do not add self referencing provides
678 if (Ver.ParentPkg().Name() == PkgName && PkgArch == Ver.Arch())
679 return true;
680
681 // Get a structure
682 unsigned long const Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
683 if (unlikely(Provides == 0))
684 return false;
685 Cache.HeaderP->ProvidesCount++;
686
687 // Fill it in
688 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
689 Prv->Version = Ver.Index();
690 Prv->NextPkgProv = Ver->ProvidesList;
691 Ver->ProvidesList = Prv.Index();
692 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
693 return false;
694
695 // Locate the target package
696 pkgCache::PkgIterator Pkg;
697 if (unlikely(Owner->NewPackage(Pkg,PkgName, PkgArch) == false))
698 return false;
699
700 // Link it to the package
701 Prv->ParentPkg = Pkg.Index();
702 Prv->NextProvides = Pkg->ProvidesList;
703 Pkg->ProvidesList = Prv.Index();
704
705 return true;
706 }
707 /*}}}*/
708 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
709 // ---------------------------------------------------------------------
710 /* This is used to select which file is to be associated with all newly
711 added versions. The caller is responsible for setting the IMS fields. */
712 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
713 const pkgIndexFile &Index,
714 unsigned long Flags)
715 {
716 // Get some space for the structure
717 CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
718 if (CurrentFile == Cache.PkgFileP)
719 return false;
720
721 // Fill it in
722 CurrentFile->FileName = Map.WriteString(File);
723 CurrentFile->Site = WriteUniqString(Site);
724 CurrentFile->NextFile = Cache.HeaderP->FileList;
725 CurrentFile->Flags = Flags;
726 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
727 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
728 PkgFileName = File;
729 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
730 Cache.HeaderP->PackageFileCount++;
731
732 if (CurrentFile->FileName == 0)
733 return false;
734
735 if (Progress != 0)
736 Progress->SubProgress(Index.Size());
737 return true;
738 }
739 /*}}}*/
740 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
741 // ---------------------------------------------------------------------
742 /* This is used to create handles to strings. Given the same text it
743 always returns the same number */
744 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
745 unsigned int Size)
746 {
747 /* We use a very small transient hash table here, this speeds up generation
748 by a fair amount on slower machines */
749 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
750 if (Bucket != 0 &&
751 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
752 return Bucket->String;
753
754 // Search for an insertion point
755 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
756 int Res = 1;
757 map_ptrloc *Last = &Cache.HeaderP->StringList;
758 for (; I != Cache.StringItemP; Last = &I->NextItem,
759 I = Cache.StringItemP + I->NextItem)
760 {
761 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
762 if (Res >= 0)
763 break;
764 }
765
766 // Match
767 if (Res == 0)
768 {
769 Bucket = I;
770 return I->String;
771 }
772
773 // Get a structure
774 unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
775 if (Item == 0)
776 return 0;
777
778 // Fill in the structure
779 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
780 ItemP->NextItem = I - Cache.StringItemP;
781 *Last = Item;
782 ItemP->String = Map.WriteString(S,Size);
783 if (ItemP->String == 0)
784 return 0;
785
786 Bucket = ItemP;
787 return ItemP->String;
788 }
789 /*}}}*/
790 // CheckValidity - Check that a cache is up-to-date /*{{{*/
791 // ---------------------------------------------------------------------
792 /* This just verifies that each file in the list of index files exists,
793 has matching attributes with the cache and the cache does not have
794 any extra files. */
795 static bool CheckValidity(const string &CacheFile, FileIterator Start,
796 FileIterator End,MMap **OutMap = 0)
797 {
798 // No file, certainly invalid
799 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
800 return false;
801
802 // Map it
803 FileFd CacheF(CacheFile,FileFd::ReadOnly);
804 SPtr<MMap> Map = new MMap(CacheF,0);
805 pkgCache Cache(Map);
806 if (_error->PendingError() == true || Map->Size() == 0)
807 {
808 _error->Discard();
809 return false;
810 }
811
812 /* Now we check every index file, see if it is in the cache,
813 verify the IMS data and check that it is on the disk too.. */
814 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
815 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
816 for (; Start != End; Start++)
817 {
818 if ((*Start)->HasPackages() == false)
819 continue;
820
821 if ((*Start)->Exists() == false)
822 {
823 #if 0 // mvo: we no longer give a message here (Default Sources spec)
824 _error->WarningE("stat",_("Couldn't stat source package list %s"),
825 (*Start)->Describe().c_str());
826 #endif
827 continue;
828 }
829
830 // FindInCache is also expected to do an IMS check.
831 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
832 if (File.end() == true)
833 return false;
834
835 Visited[File->ID] = true;
836 }
837
838 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
839 if (Visited[I] == false)
840 return false;
841
842 if (_error->PendingError() == true)
843 {
844 _error->Discard();
845 return false;
846 }
847
848 if (OutMap != 0)
849 *OutMap = Map.UnGuard();
850 return true;
851 }
852 /*}}}*/
853 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
854 // ---------------------------------------------------------------------
855 /* Size is kind of an abstract notion that is only used for the progress
856 meter */
857 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
858 {
859 unsigned long TotalSize = 0;
860 for (; Start != End; Start++)
861 {
862 if ((*Start)->HasPackages() == false)
863 continue;
864 TotalSize += (*Start)->Size();
865 }
866 return TotalSize;
867 }
868 /*}}}*/
869 // BuildCache - Merge the list of index files into the cache /*{{{*/
870 // ---------------------------------------------------------------------
871 /* */
872 static bool BuildCache(pkgCacheGenerator &Gen,
873 OpProgress &Progress,
874 unsigned long &CurrentSize,unsigned long TotalSize,
875 FileIterator Start, FileIterator End)
876 {
877 FileIterator I;
878 for (I = Start; I != End; I++)
879 {
880 if ((*I)->HasPackages() == false)
881 continue;
882
883 if ((*I)->Exists() == false)
884 continue;
885
886 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
887 {
888 _error->Warning("Duplicate sources.list entry %s",
889 (*I)->Describe().c_str());
890 continue;
891 }
892
893 unsigned long Size = (*I)->Size();
894 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
895 CurrentSize += Size;
896
897 if ((*I)->Merge(Gen,Progress) == false)
898 return false;
899 }
900
901 if (Gen.HasFileDeps() == true)
902 {
903 Progress.Done();
904 TotalSize = ComputeSize(Start, End);
905 CurrentSize = 0;
906 for (I = Start; I != End; I++)
907 {
908 unsigned long Size = (*I)->Size();
909 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
910 CurrentSize += Size;
911 if ((*I)->MergeFileProvides(Gen,Progress) == false)
912 return false;
913 }
914 }
915
916 return true;
917 }
918 /*}}}*/
919 // MakeStatusCache - Construct the status cache /*{{{*/
920 // ---------------------------------------------------------------------
921 /* This makes sure that the status cache (the cache that has all
922 index files from the sources list and all local ones) is ready
923 to be mmaped. If OutMap is not zero then a MMap object representing
924 the cache will be stored there. This is pretty much mandetory if you
925 are using AllowMem. AllowMem lets the function be run as non-root
926 where it builds the cache 'fast' into a memory buffer. */
927 bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
928 MMap **OutMap,bool AllowMem)
929 {
930 unsigned long MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
931
932 vector<pkgIndexFile *> Files;
933 for (vector<metaIndex *>::const_iterator i = List.begin();
934 i != List.end();
935 i++)
936 {
937 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
938 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
939 j != Indexes->end();
940 j++)
941 Files.push_back (*j);
942 }
943
944 unsigned long EndOfSource = Files.size();
945 if (_system->AddStatusFiles(Files) == false)
946 return false;
947
948 // Decide if we can write to the files..
949 string CacheFile = _config->FindFile("Dir::Cache::pkgcache");
950 string SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
951
952 // Decide if we can write to the cache
953 bool Writeable = false;
954 if (CacheFile.empty() == false)
955 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
956 else
957 if (SrcCacheFile.empty() == false)
958 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
959
960 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
961 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
962
963 Progress.OverallProgress(0,1,1,_("Reading package lists"));
964
965 // Cache is OK, Fin.
966 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
967 {
968 Progress.OverallProgress(1,1,1,_("Reading package lists"));
969 return true;
970 }
971
972 /* At this point we know we need to reconstruct the package cache,
973 begin. */
974 SPtr<FileFd> CacheF;
975 SPtr<DynamicMMap> Map;
976 if (Writeable == true && CacheFile.empty() == false)
977 {
978 unlink(CacheFile.c_str());
979 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
980 fchmod(CacheF->Fd(),0644);
981 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
982 if (_error->PendingError() == true)
983 return false;
984 }
985 else
986 {
987 // Just build it in memory..
988 Map = new DynamicMMap(0,MapSize);
989 }
990
991 // Lets try the source cache.
992 unsigned long CurrentSize = 0;
993 unsigned long TotalSize = 0;
994 if (CheckValidity(SrcCacheFile,Files.begin(),
995 Files.begin()+EndOfSource) == true)
996 {
997 // Preload the map with the source cache
998 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
999 unsigned long alloc = Map->RawAllocate(SCacheF.Size());
1000 if ((alloc == 0 && _error->PendingError())
1001 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
1002 SCacheF.Size()) == false)
1003 return false;
1004
1005 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1006
1007 // Build the status cache
1008 pkgCacheGenerator Gen(Map.Get(),&Progress);
1009 if (_error->PendingError() == true)
1010 return false;
1011 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1012 Files.begin()+EndOfSource,Files.end()) == false)
1013 return false;
1014
1015 // FIXME: move me to a better place
1016 Gen.FinishCache(Progress);
1017 }
1018 else
1019 {
1020 TotalSize = ComputeSize(Files.begin(),Files.end());
1021
1022 // Build the source cache
1023 pkgCacheGenerator Gen(Map.Get(),&Progress);
1024 if (_error->PendingError() == true)
1025 return false;
1026 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1027 Files.begin(),Files.begin()+EndOfSource) == false)
1028 return false;
1029
1030 // Write it back
1031 if (Writeable == true && SrcCacheFile.empty() == false)
1032 {
1033 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
1034 if (_error->PendingError() == true)
1035 return false;
1036
1037 fchmod(SCacheF.Fd(),0644);
1038
1039 // Write out the main data
1040 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1041 return _error->Error(_("IO Error saving source cache"));
1042 SCacheF.Sync();
1043
1044 // Write out the proper header
1045 Gen.GetCache().HeaderP->Dirty = false;
1046 if (SCacheF.Seek(0) == false ||
1047 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1048 return _error->Error(_("IO Error saving source cache"));
1049 Gen.GetCache().HeaderP->Dirty = true;
1050 SCacheF.Sync();
1051 }
1052
1053 // Build the status cache
1054 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1055 Files.begin()+EndOfSource,Files.end()) == false)
1056 return false;
1057
1058 // FIXME: move me to a better place
1059 Gen.FinishCache(Progress);
1060 }
1061
1062 if (_error->PendingError() == true)
1063 return false;
1064 if (OutMap != 0)
1065 {
1066 if (CacheF != 0)
1067 {
1068 delete Map.UnGuard();
1069 *OutMap = new MMap(*CacheF,0);
1070 }
1071 else
1072 {
1073 *OutMap = Map.UnGuard();
1074 }
1075 }
1076
1077 return true;
1078 }
1079 /*}}}*/
1080 // MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
1081 // ---------------------------------------------------------------------
1082 /* */
1083 bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1084 {
1085 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
1086 vector<pkgIndexFile *> Files;
1087 unsigned long EndOfSource = Files.size();
1088 if (_system->AddStatusFiles(Files) == false)
1089 return false;
1090
1091 SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
1092 unsigned long CurrentSize = 0;
1093 unsigned long TotalSize = 0;
1094
1095 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1096
1097 // Build the status cache
1098 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1099 pkgCacheGenerator Gen(Map.Get(),&Progress);
1100 if (_error->PendingError() == true)
1101 return false;
1102 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1103 Files.begin()+EndOfSource,Files.end()) == false)
1104 return false;
1105
1106 // FIXME: move me to a better place
1107 Gen.FinishCache(Progress);
1108
1109 if (_error->PendingError() == true)
1110 return false;
1111 *OutMap = Map.UnGuard();
1112
1113 return true;
1114 }
1115 /*}}}*/