Add yet another pseudo package which isn't as pseudo as the others:
[ntk/apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25
26 #include <apt-pkg/tagfile.h>
27
28 #include <apti18n.h>
29
30 #include <vector>
31
32 #include <sys/stat.h>
33 #include <unistd.h>
34 #include <errno.h>
35 #include <stdio.h>
36 #include <system.h>
37 /*}}}*/
38 typedef vector<pkgIndexFile *>::iterator FileIterator;
39
40 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41 // ---------------------------------------------------------------------
42 /* We set the dirty flag and make sure that is written to the disk */
43 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
44 Map(*pMap), Cache(pMap,false), Progress(Prog),
45 FoundFileDeps(0)
46 {
47 CurrentFile = 0;
48 memset(UniqHash,0,sizeof(UniqHash));
49
50 if (_error->PendingError() == true)
51 return;
52
53 if (Map.Size() == 0)
54 {
55 // Setup the map interface..
56 Cache.HeaderP = (pkgCache::Header *)Map.Data();
57 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
58 return;
59
60 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
61
62 // Starting header
63 *Cache.HeaderP = pkgCache::Header();
64 Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
65 Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
66 Cache.ReMap();
67 }
68 else
69 {
70 // Map directly from the existing file
71 Cache.ReMap();
72 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
73 if (Cache.VS != _system->VS)
74 {
75 _error->Error(_("Cache has an incompatible versioning system"));
76 return;
77 }
78 }
79
80 Cache.HeaderP->Dirty = true;
81 Map.Sync(0,sizeof(pkgCache::Header));
82 }
83 /*}}}*/
84 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85 // ---------------------------------------------------------------------
86 /* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88 pkgCacheGenerator::~pkgCacheGenerator()
89 {
90 if (_error->PendingError() == true)
91 return;
92 if (Map.Sync() == false)
93 return;
94
95 Cache.HeaderP->Dirty = false;
96 Map.Sync(0,sizeof(pkgCache::Header));
97 }
98 /*}}}*/
99 // CacheGenerator::MergeList - Merge the package list /*{{{*/
100 // ---------------------------------------------------------------------
101 /* This provides the generation of the entries in the cache. Each loop
102 goes through a single package record from the underlying parse engine. */
103 bool pkgCacheGenerator::MergeList(ListParser &List,
104 pkgCache::VerIterator *OutVer)
105 {
106 List.Owner = this;
107
108 unsigned int Counter = 0;
109 while (List.Step() == true)
110 {
111 string const PackageName = List.Package();
112 if (PackageName.empty() == true)
113 return false;
114
115 /* As we handle Arch all packages as architecture bounded
116 we add all information to every (simulated) arch package */
117 std::vector<string> genArch;
118 if (List.ArchitectureAll() == true) {
119 genArch = APT::Configuration::getArchitectures();
120 genArch.push_back("all");
121 } else
122 genArch.push_back(List.Architecture());
123
124 for (std::vector<string>::const_iterator arch = genArch.begin();
125 arch != genArch.end(); ++arch)
126 {
127 // Get a pointer to the package structure
128 pkgCache::PkgIterator Pkg;
129 if (NewPackage(Pkg, PackageName, *arch) == false)
130 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
131 Counter++;
132 if (Counter % 100 == 0 && Progress != 0)
133 Progress->Progress(List.Offset());
134
135 /* Get a pointer to the version structure. We know the list is sorted
136 so we use that fact in the search. Insertion of new versions is
137 done with correct sorting */
138 string Version = List.Version();
139 if (Version.empty() == true)
140 {
141 // we first process the package, then the descriptions
142 // (this has the bonus that we get MMap error when we run out
143 // of MMap space)
144 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
145 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
146 PackageName.c_str());
147
148 // Find the right version to write the description
149 MD5SumValue CurMd5 = List.Description_md5();
150 pkgCache::VerIterator Ver = Pkg.VersionList();
151 map_ptrloc *LastVer = &Pkg->VersionList;
152
153 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
154 {
155 pkgCache::DescIterator Desc = Ver.DescriptionList();
156 map_ptrloc *LastDesc = &Ver->DescriptionList;
157 bool duplicate=false;
158
159 // don't add a new description if we have one for the given
160 // md5 && language
161 for ( ; Desc.end() == false; Desc++)
162 if (MD5SumValue(Desc.md5()) == CurMd5 &&
163 Desc.LanguageCode() == List.DescriptionLanguage())
164 duplicate=true;
165 if(duplicate)
166 continue;
167
168 for (Desc = Ver.DescriptionList();
169 Desc.end() == false;
170 LastDesc = &Desc->NextDesc, Desc++)
171 {
172 if (MD5SumValue(Desc.md5()) == CurMd5)
173 {
174 // Add new description
175 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
176 Desc->ParentPkg = Pkg.Index();
177
178 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
179 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
180 break;
181 }
182 }
183 }
184
185 continue;
186 }
187
188 pkgCache::VerIterator Ver = Pkg.VersionList();
189 map_ptrloc *LastVer = &Pkg->VersionList;
190 int Res = 1;
191 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
192 {
193 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
194 if (Res >= 0)
195 break;
196 }
197
198 /* We already have a version for this item, record that we
199 saw it */
200 unsigned long Hash = List.VersionHash();
201 if (Res == 0 && Ver->Hash == Hash)
202 {
203 if (List.UsePackage(Pkg,Ver) == false)
204 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
205 PackageName.c_str());
206
207 if (NewFileVer(Ver,List) == false)
208 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
209 PackageName.c_str());
210
211 // Read only a single record and return
212 if (OutVer != 0)
213 {
214 *OutVer = Ver;
215 FoundFileDeps |= List.HasFileDeps();
216 return true;
217 }
218
219 continue;
220 }
221
222 // Skip to the end of the same version set.
223 if (Res == 0)
224 {
225 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
226 {
227 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
228 if (Res != 0)
229 break;
230 }
231 }
232
233 // Add a new version
234 *LastVer = NewVersion(Ver,Version,*LastVer);
235 Ver->ParentPkg = Pkg.Index();
236 Ver->Hash = Hash;
237
238 if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
239 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
240 PackageName.c_str());
241
242 if (List.UsePackage(Pkg,Ver) == false)
243 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
244 PackageName.c_str());
245
246 if (NewFileVer(Ver,List) == false)
247 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
248 PackageName.c_str());
249
250 // Read only a single record and return
251 if (OutVer != 0)
252 {
253 *OutVer = Ver;
254 FoundFileDeps |= List.HasFileDeps();
255 return true;
256 }
257
258 /* Record the Description data. Description data always exist in
259 Packages and Translation-* files. */
260 pkgCache::DescIterator Desc = Ver.DescriptionList();
261 map_ptrloc *LastDesc = &Ver->DescriptionList;
262
263 // Skip to the end of description set
264 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
265
266 // Add new description
267 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
268 Desc->ParentPkg = Pkg.Index();
269
270 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
271 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
272 }
273 }
274
275 FoundFileDeps |= List.HasFileDeps();
276
277 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
278 return _error->Error(_("Wow, you exceeded the number of package "
279 "names this APT is capable of."));
280 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
281 return _error->Error(_("Wow, you exceeded the number of versions "
282 "this APT is capable of."));
283 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
284 return _error->Error(_("Wow, you exceeded the number of descriptions "
285 "this APT is capable of."));
286 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
287 return _error->Error(_("Wow, you exceeded the number of dependencies "
288 "this APT is capable of."));
289 return true;
290 }
291 /*}}}*/
292 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
293 // ---------------------------------------------------------------------
294 /* If we found any file depends while parsing the main list we need to
295 resolve them. Since it is undesired to load the entire list of files
296 into the cache as virtual packages we do a two stage effort. MergeList
297 identifies the file depends and this creates Provdies for them by
298 re-parsing all the indexs. */
299 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
300 {
301 List.Owner = this;
302
303 unsigned int Counter = 0;
304 while (List.Step() == true)
305 {
306 string PackageName = List.Package();
307 if (PackageName.empty() == true)
308 return false;
309 string Version = List.Version();
310 if (Version.empty() == true)
311 continue;
312
313 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
314 if (Pkg.end() == true)
315 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
316 PackageName.c_str());
317 Counter++;
318 if (Counter % 100 == 0 && Progress != 0)
319 Progress->Progress(List.Offset());
320
321 unsigned long Hash = List.VersionHash();
322 pkgCache::VerIterator Ver = Pkg.VersionList();
323 for (; Ver.end() == false; Ver++)
324 {
325 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
326 {
327 if (List.CollectFileProvides(Cache,Ver) == false)
328 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
329 break;
330 }
331 }
332
333 if (Ver.end() == true)
334 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
335 }
336
337 return true;
338 }
339 /*}}}*/
340 // CacheGenerator::NewGroup - Add a new group /*{{{*/
341 // ---------------------------------------------------------------------
342 /* This creates a new group structure and adds it to the hash table */
343 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name) {
344 Grp = Cache.FindGrp(Name);
345 if (Grp.end() == false)
346 return true;
347
348 // Get a structure
349 unsigned long const Group = Map.Allocate(sizeof(pkgCache::Group));
350 if (unlikely(Group == 0))
351 return false;
352
353 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
354 Grp->Name = Map.WriteString(Name);
355 if (unlikely(Grp->Name == 0))
356 return false;
357
358 // Insert it into the hash table
359 unsigned long const Hash = Cache.Hash(Name);
360 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
361 Cache.HeaderP->GrpHashTable[Hash] = Group;
362
363 Cache.HeaderP->GroupCount++;
364
365 return true;
366 }
367 /*}}}*/
368 // CacheGenerator::NewPackage - Add a new package /*{{{*/
369 // ---------------------------------------------------------------------
370 /* This creates a new package structure and adds it to the hash table */
371 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
372 const string &Arch) {
373 pkgCache::GrpIterator Grp;
374 if (unlikely(NewGroup(Grp, Name) == false))
375 return false;
376
377 Pkg = Grp.FindPkg(Arch);
378 if (Pkg.end() == false)
379 return true;
380
381 // Get a structure
382 unsigned long const Package = Map.Allocate(sizeof(pkgCache::Package));
383 if (unlikely(Package == 0))
384 return false;
385 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
386
387 // Insert it into the hash table
388 unsigned long const Hash = Cache.Hash(Name);
389 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
390 Cache.HeaderP->PkgHashTable[Hash] = Package;
391
392 // remember the packages in the group
393 Grp->FirstPackage = Package;
394 if (Grp->LastPackage == 0)
395 Grp->LastPackage = Package;
396
397 // Set the name, arch and the ID
398 Pkg->Name = Grp->Name;
399 Pkg->Group = Grp.Index();
400 Pkg->Arch = WriteUniqString(Arch.c_str());
401 if (unlikely(Pkg->Arch == 0))
402 return false;
403 Pkg->ID = Cache.HeaderP->PackageCount++;
404
405 return true;
406 }
407 /*}}}*/
408 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
409 // ---------------------------------------------------------------------
410 /* */
411 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
412 ListParser &List)
413 {
414 if (CurrentFile == 0)
415 return true;
416
417 // Get a structure
418 unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
419 if (VerFile == 0)
420 return 0;
421
422 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
423 VF->File = CurrentFile - Cache.PkgFileP;
424
425 // Link it to the end of the list
426 map_ptrloc *Last = &Ver->FileList;
427 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
428 Last = &V->NextFile;
429 VF->NextFile = *Last;
430 *Last = VF.Index();
431
432 VF->Offset = List.Offset();
433 VF->Size = List.Size();
434 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
435 Cache.HeaderP->MaxVerFileSize = VF->Size;
436 Cache.HeaderP->VerFileCount++;
437
438 return true;
439 }
440 /*}}}*/
441 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
442 // ---------------------------------------------------------------------
443 /* This puts a version structure in the linked list */
444 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
445 const string &VerStr,
446 unsigned long Next)
447 {
448 // Get a structure
449 unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
450 if (Version == 0)
451 return 0;
452
453 // Fill it in
454 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
455 Ver->NextVer = Next;
456 Ver->ID = Cache.HeaderP->VersionCount++;
457 Ver->VerStr = Map.WriteString(VerStr);
458 if (Ver->VerStr == 0)
459 return 0;
460
461 return Version;
462 }
463 /*}}}*/
464 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
465 // ---------------------------------------------------------------------
466 /* */
467 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
468 ListParser &List)
469 {
470 if (CurrentFile == 0)
471 return true;
472
473 // Get a structure
474 unsigned long DescFile = Map.Allocate(sizeof(pkgCache::DescFile));
475 if (DescFile == 0)
476 return false;
477
478 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
479 DF->File = CurrentFile - Cache.PkgFileP;
480
481 // Link it to the end of the list
482 map_ptrloc *Last = &Desc->FileList;
483 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
484 Last = &D->NextFile;
485
486 DF->NextFile = *Last;
487 *Last = DF.Index();
488
489 DF->Offset = List.Offset();
490 DF->Size = List.Size();
491 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
492 Cache.HeaderP->MaxDescFileSize = DF->Size;
493 Cache.HeaderP->DescFileCount++;
494
495 return true;
496 }
497 /*}}}*/
498 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
499 // ---------------------------------------------------------------------
500 /* This puts a description structure in the linked list */
501 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
502 const string &Lang,
503 const MD5SumValue &md5sum,
504 map_ptrloc Next)
505 {
506 // Get a structure
507 map_ptrloc Description = Map.Allocate(sizeof(pkgCache::Description));
508 if (Description == 0)
509 return 0;
510
511 // Fill it in
512 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
513 Desc->NextDesc = Next;
514 Desc->ID = Cache.HeaderP->DescriptionCount++;
515 Desc->language_code = Map.WriteString(Lang);
516 Desc->md5sum = Map.WriteString(md5sum.Value());
517 if (Desc->language_code == 0 || Desc->md5sum == 0)
518 return 0;
519
520 return Description;
521 }
522 /*}}}*/
523 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
524 // ---------------------------------------------------------------------
525 /* This prepares the Cache for delivery */
526 bool pkgCacheGenerator::FinishCache(OpProgress &Progress) {
527 // FIXME: add progress reporting for this operation
528 // Do we have different architectures in your groups ?
529 vector<string> archs = APT::Configuration::getArchitectures();
530 if (archs.size() > 1) {
531 // Create Conflicts in between the group
532 for (pkgCache::GrpIterator G = GetCache().GrpBegin(); G.end() != true; G++) {
533 string const PkgName = G.Name();
534 for (pkgCache::PkgIterator P = G.PackageList(); P.end() != true; P = G.NextPkg(P)) {
535 if (strcmp(P.Arch(),"all") == 0)
536 continue;
537 pkgCache::PkgIterator allPkg;
538 for (pkgCache::VerIterator V = P.VersionList(); V.end() != true; V++) {
539 string const Arch = V.Arch(true);
540 map_ptrloc *OldDepLast = NULL;
541 /* MultiArch handling introduces a lot of implicit Dependencies:
542 - MultiArch: same → Co-Installable if they have the same version
543 - Architecture: all → Need to be Co-Installable for internal reasons
544 - All others conflict with all other group members */
545 bool const coInstall = (V->MultiArch == pkgCache::Version::All ||
546 V->MultiArch == pkgCache::Version::Same);
547 if (V->MultiArch == pkgCache::Version::All && allPkg.end() == true)
548 allPkg = G.FindPkg("all");
549 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A) {
550 if (*A == Arch)
551 continue;
552 /* We allow only one installed arch at the time
553 per group, therefore each group member conflicts
554 with all other group members */
555 pkgCache::PkgIterator D = G.FindPkg(*A);
556 if (D.end() == true)
557 continue;
558 if (coInstall == true) {
559 // Replaces: ${self}:other ( << ${binary:Version})
560 NewDepends(D, V, V.VerStr(),
561 pkgCache::Dep::Less, pkgCache::Dep::Replaces,
562 OldDepLast);
563 // Breaks: ${self}:other (!= ${binary:Version})
564 NewDepends(D, V, V.VerStr(),
565 pkgCache::Dep::Less, pkgCache::Dep::DpkgBreaks,
566 OldDepLast);
567 NewDepends(D, V, V.VerStr(),
568 pkgCache::Dep::Greater, pkgCache::Dep::DpkgBreaks,
569 OldDepLast);
570 if (V->MultiArch == pkgCache::Version::All) {
571 // Depend on ${self}:all which does depend on nothing
572 NewDepends(allPkg, V, V.VerStr(),
573 pkgCache::Dep::Equals, pkgCache::Dep::Depends,
574 OldDepLast);
575 }
576 } else {
577 // Conflicts: ${self}:other
578 NewDepends(D, V, "",
579 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
580 OldDepLast);
581 }
582 }
583 }
584 }
585 }
586 }
587 return true;
588 }
589 /*}}}*/
590 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
591 // ---------------------------------------------------------------------
592 /* This creates a dependency element in the tree. It is linked to the
593 version and to the package that it is pointing to. */
594 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
595 pkgCache::VerIterator &Ver,
596 string const &Version,
597 unsigned int const &Op,
598 unsigned int const &Type,
599 map_ptrloc *OldDepLast)
600 {
601 // Get a structure
602 unsigned long const Dependency = Map.Allocate(sizeof(pkgCache::Dependency));
603 if (unlikely(Dependency == 0))
604 return false;
605
606 // Fill it in
607 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
608 Dep->ParentVer = Ver.Index();
609 Dep->Type = Type;
610 Dep->CompareOp = Op;
611 Dep->ID = Cache.HeaderP->DependsCount++;
612
613 // Probe the reverse dependency list for a version string that matches
614 if (Version.empty() == false)
615 {
616 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
617 if (I->Version != 0 && I.TargetVer() == Version)
618 Dep->Version = I->Version;*/
619 if (Dep->Version == 0)
620 if (unlikely((Dep->Version = Map.WriteString(Version)) == 0))
621 return false;
622 }
623
624 // Link it to the package
625 Dep->Package = Pkg.Index();
626 Dep->NextRevDepends = Pkg->RevDepends;
627 Pkg->RevDepends = Dep.Index();
628
629 // Do we know where to link the Dependency to?
630 if (OldDepLast == NULL)
631 {
632 OldDepLast = &Ver->DependsList;
633 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
634 OldDepLast = &D->NextDepends;
635 }
636
637 Dep->NextDepends = *OldDepLast;
638 *OldDepLast = Dep.Index();
639 OldDepLast = &Dep->NextDepends;
640
641 return true;
642 }
643 /*}}}*/
644 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
645 // ---------------------------------------------------------------------
646 /* This creates a Group and the Package to link this dependency to if
647 needed and handles also the caching of the old endpoint */
648 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
649 const string &PackageName,
650 const string &Arch,
651 const string &Version,
652 unsigned int Op,
653 unsigned int Type)
654 {
655 pkgCache::GrpIterator Grp;
656 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
657 return false;
658
659 // Locate the target package
660 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
661 if (Pkg.end() == true) {
662 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
663 return false;
664 }
665
666 // Is it a file dependency?
667 if (unlikely(PackageName[0] == '/'))
668 FoundFileDeps = true;
669
670 /* Caching the old end point speeds up generation substantially */
671 if (OldDepVer != Ver) {
672 OldDepLast = NULL;
673 OldDepVer = Ver;
674 }
675
676 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
677 }
678 /*}}}*/
679 // ListParser::NewProvides - Create a Provides element /*{{{*/
680 // ---------------------------------------------------------------------
681 /* */
682 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
683 const string &PkgName,
684 const string &PkgArch,
685 const string &Version)
686 {
687 pkgCache &Cache = Owner->Cache;
688
689 // We do not add self referencing provides
690 if (Ver.ParentPkg().Name() == PkgName && PkgArch == Ver.Arch(true))
691 return true;
692
693 // Get a structure
694 unsigned long const Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
695 if (unlikely(Provides == 0))
696 return false;
697 Cache.HeaderP->ProvidesCount++;
698
699 // Fill it in
700 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
701 Prv->Version = Ver.Index();
702 Prv->NextPkgProv = Ver->ProvidesList;
703 Ver->ProvidesList = Prv.Index();
704 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
705 return false;
706
707 // Locate the target package
708 pkgCache::PkgIterator Pkg;
709 if (unlikely(Owner->NewPackage(Pkg,PkgName, PkgArch) == false))
710 return false;
711
712 // Link it to the package
713 Prv->ParentPkg = Pkg.Index();
714 Prv->NextProvides = Pkg->ProvidesList;
715 Pkg->ProvidesList = Prv.Index();
716
717 return true;
718 }
719 /*}}}*/
720 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
721 // ---------------------------------------------------------------------
722 /* This is used to select which file is to be associated with all newly
723 added versions. The caller is responsible for setting the IMS fields. */
724 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
725 const pkgIndexFile &Index,
726 unsigned long Flags)
727 {
728 // Get some space for the structure
729 CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
730 if (CurrentFile == Cache.PkgFileP)
731 return false;
732
733 // Fill it in
734 CurrentFile->FileName = Map.WriteString(File);
735 CurrentFile->Site = WriteUniqString(Site);
736 CurrentFile->NextFile = Cache.HeaderP->FileList;
737 CurrentFile->Flags = Flags;
738 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
739 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
740 PkgFileName = File;
741 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
742 Cache.HeaderP->PackageFileCount++;
743
744 if (CurrentFile->FileName == 0)
745 return false;
746
747 if (Progress != 0)
748 Progress->SubProgress(Index.Size());
749 return true;
750 }
751 /*}}}*/
752 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
753 // ---------------------------------------------------------------------
754 /* This is used to create handles to strings. Given the same text it
755 always returns the same number */
756 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
757 unsigned int Size)
758 {
759 /* We use a very small transient hash table here, this speeds up generation
760 by a fair amount on slower machines */
761 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
762 if (Bucket != 0 &&
763 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
764 return Bucket->String;
765
766 // Search for an insertion point
767 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
768 int Res = 1;
769 map_ptrloc *Last = &Cache.HeaderP->StringList;
770 for (; I != Cache.StringItemP; Last = &I->NextItem,
771 I = Cache.StringItemP + I->NextItem)
772 {
773 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
774 if (Res >= 0)
775 break;
776 }
777
778 // Match
779 if (Res == 0)
780 {
781 Bucket = I;
782 return I->String;
783 }
784
785 // Get a structure
786 unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
787 if (Item == 0)
788 return 0;
789
790 // Fill in the structure
791 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
792 ItemP->NextItem = I - Cache.StringItemP;
793 *Last = Item;
794 ItemP->String = Map.WriteString(S,Size);
795 if (ItemP->String == 0)
796 return 0;
797
798 Bucket = ItemP;
799 return ItemP->String;
800 }
801 /*}}}*/
802 // CheckValidity - Check that a cache is up-to-date /*{{{*/
803 // ---------------------------------------------------------------------
804 /* This just verifies that each file in the list of index files exists,
805 has matching attributes with the cache and the cache does not have
806 any extra files. */
807 static bool CheckValidity(const string &CacheFile, FileIterator Start,
808 FileIterator End,MMap **OutMap = 0)
809 {
810 // No file, certainly invalid
811 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
812 return false;
813
814 // Map it
815 FileFd CacheF(CacheFile,FileFd::ReadOnly);
816 SPtr<MMap> Map = new MMap(CacheF,0);
817 pkgCache Cache(Map);
818 if (_error->PendingError() == true || Map->Size() == 0)
819 {
820 _error->Discard();
821 return false;
822 }
823
824 /* Now we check every index file, see if it is in the cache,
825 verify the IMS data and check that it is on the disk too.. */
826 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
827 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
828 for (; Start != End; Start++)
829 {
830 if ((*Start)->HasPackages() == false)
831 continue;
832
833 if ((*Start)->Exists() == false)
834 {
835 #if 0 // mvo: we no longer give a message here (Default Sources spec)
836 _error->WarningE("stat",_("Couldn't stat source package list %s"),
837 (*Start)->Describe().c_str());
838 #endif
839 continue;
840 }
841
842 // FindInCache is also expected to do an IMS check.
843 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
844 if (File.end() == true)
845 return false;
846
847 Visited[File->ID] = true;
848 }
849
850 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
851 if (Visited[I] == false)
852 return false;
853
854 if (_error->PendingError() == true)
855 {
856 _error->Discard();
857 return false;
858 }
859
860 if (OutMap != 0)
861 *OutMap = Map.UnGuard();
862 return true;
863 }
864 /*}}}*/
865 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
866 // ---------------------------------------------------------------------
867 /* Size is kind of an abstract notion that is only used for the progress
868 meter */
869 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
870 {
871 unsigned long TotalSize = 0;
872 for (; Start != End; Start++)
873 {
874 if ((*Start)->HasPackages() == false)
875 continue;
876 TotalSize += (*Start)->Size();
877 }
878 return TotalSize;
879 }
880 /*}}}*/
881 // BuildCache - Merge the list of index files into the cache /*{{{*/
882 // ---------------------------------------------------------------------
883 /* */
884 static bool BuildCache(pkgCacheGenerator &Gen,
885 OpProgress &Progress,
886 unsigned long &CurrentSize,unsigned long TotalSize,
887 FileIterator Start, FileIterator End)
888 {
889 FileIterator I;
890 for (I = Start; I != End; I++)
891 {
892 if ((*I)->HasPackages() == false)
893 continue;
894
895 if ((*I)->Exists() == false)
896 continue;
897
898 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
899 {
900 _error->Warning("Duplicate sources.list entry %s",
901 (*I)->Describe().c_str());
902 continue;
903 }
904
905 unsigned long Size = (*I)->Size();
906 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
907 CurrentSize += Size;
908
909 if ((*I)->Merge(Gen,Progress) == false)
910 return false;
911 }
912
913 if (Gen.HasFileDeps() == true)
914 {
915 Progress.Done();
916 TotalSize = ComputeSize(Start, End);
917 CurrentSize = 0;
918 for (I = Start; I != End; I++)
919 {
920 unsigned long Size = (*I)->Size();
921 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
922 CurrentSize += Size;
923 if ((*I)->MergeFileProvides(Gen,Progress) == false)
924 return false;
925 }
926 }
927
928 return true;
929 }
930 /*}}}*/
931 // MakeStatusCache - Construct the status cache /*{{{*/
932 // ---------------------------------------------------------------------
933 /* This makes sure that the status cache (the cache that has all
934 index files from the sources list and all local ones) is ready
935 to be mmaped. If OutMap is not zero then a MMap object representing
936 the cache will be stored there. This is pretty much mandetory if you
937 are using AllowMem. AllowMem lets the function be run as non-root
938 where it builds the cache 'fast' into a memory buffer. */
939 bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
940 MMap **OutMap,bool AllowMem)
941 {
942 unsigned long MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
943
944 vector<pkgIndexFile *> Files;
945 for (vector<metaIndex *>::const_iterator i = List.begin();
946 i != List.end();
947 i++)
948 {
949 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
950 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
951 j != Indexes->end();
952 j++)
953 Files.push_back (*j);
954 }
955
956 unsigned long EndOfSource = Files.size();
957 if (_system->AddStatusFiles(Files) == false)
958 return false;
959
960 // Decide if we can write to the files..
961 string CacheFile = _config->FindFile("Dir::Cache::pkgcache");
962 string SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
963
964 // Decide if we can write to the cache
965 bool Writeable = false;
966 if (CacheFile.empty() == false)
967 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
968 else
969 if (SrcCacheFile.empty() == false)
970 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
971
972 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
973 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
974
975 Progress.OverallProgress(0,1,1,_("Reading package lists"));
976
977 // Cache is OK, Fin.
978 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
979 {
980 Progress.OverallProgress(1,1,1,_("Reading package lists"));
981 return true;
982 }
983
984 /* At this point we know we need to reconstruct the package cache,
985 begin. */
986 SPtr<FileFd> CacheF;
987 SPtr<DynamicMMap> Map;
988 if (Writeable == true && CacheFile.empty() == false)
989 {
990 unlink(CacheFile.c_str());
991 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
992 fchmod(CacheF->Fd(),0644);
993 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
994 if (_error->PendingError() == true)
995 return false;
996 }
997 else
998 {
999 // Just build it in memory..
1000 Map = new DynamicMMap(0,MapSize);
1001 }
1002
1003 // Lets try the source cache.
1004 unsigned long CurrentSize = 0;
1005 unsigned long TotalSize = 0;
1006 if (CheckValidity(SrcCacheFile,Files.begin(),
1007 Files.begin()+EndOfSource) == true)
1008 {
1009 // Preload the map with the source cache
1010 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
1011 unsigned long alloc = Map->RawAllocate(SCacheF.Size());
1012 if ((alloc == 0 && _error->PendingError())
1013 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
1014 SCacheF.Size()) == false)
1015 return false;
1016
1017 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1018
1019 // Build the status cache
1020 pkgCacheGenerator Gen(Map.Get(),&Progress);
1021 if (_error->PendingError() == true)
1022 return false;
1023 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1024 Files.begin()+EndOfSource,Files.end()) == false)
1025 return false;
1026
1027 // FIXME: move me to a better place
1028 Gen.FinishCache(Progress);
1029 }
1030 else
1031 {
1032 TotalSize = ComputeSize(Files.begin(),Files.end());
1033
1034 // Build the source cache
1035 pkgCacheGenerator Gen(Map.Get(),&Progress);
1036 if (_error->PendingError() == true)
1037 return false;
1038 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1039 Files.begin(),Files.begin()+EndOfSource) == false)
1040 return false;
1041
1042 // Write it back
1043 if (Writeable == true && SrcCacheFile.empty() == false)
1044 {
1045 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
1046 if (_error->PendingError() == true)
1047 return false;
1048
1049 fchmod(SCacheF.Fd(),0644);
1050
1051 // Write out the main data
1052 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1053 return _error->Error(_("IO Error saving source cache"));
1054 SCacheF.Sync();
1055
1056 // Write out the proper header
1057 Gen.GetCache().HeaderP->Dirty = false;
1058 if (SCacheF.Seek(0) == false ||
1059 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1060 return _error->Error(_("IO Error saving source cache"));
1061 Gen.GetCache().HeaderP->Dirty = true;
1062 SCacheF.Sync();
1063 }
1064
1065 // Build the status cache
1066 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1067 Files.begin()+EndOfSource,Files.end()) == false)
1068 return false;
1069
1070 // FIXME: move me to a better place
1071 Gen.FinishCache(Progress);
1072 }
1073
1074 if (_error->PendingError() == true)
1075 return false;
1076 if (OutMap != 0)
1077 {
1078 if (CacheF != 0)
1079 {
1080 delete Map.UnGuard();
1081 *OutMap = new MMap(*CacheF,0);
1082 }
1083 else
1084 {
1085 *OutMap = Map.UnGuard();
1086 }
1087 }
1088
1089 return true;
1090 }
1091 /*}}}*/
1092 // MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
1093 // ---------------------------------------------------------------------
1094 /* */
1095 bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1096 {
1097 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
1098 vector<pkgIndexFile *> Files;
1099 unsigned long EndOfSource = Files.size();
1100 if (_system->AddStatusFiles(Files) == false)
1101 return false;
1102
1103 SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
1104 unsigned long CurrentSize = 0;
1105 unsigned long TotalSize = 0;
1106
1107 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1108
1109 // Build the status cache
1110 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1111 pkgCacheGenerator Gen(Map.Get(),&Progress);
1112 if (_error->PendingError() == true)
1113 return false;
1114 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1115 Files.begin()+EndOfSource,Files.end()) == false)
1116 return false;
1117
1118 // FIXME: move me to a better place
1119 Gen.FinishCache(Progress);
1120
1121 if (_error->PendingError() == true)
1122 return false;
1123 *OutMap = Map.UnGuard();
1124
1125 return true;
1126 }
1127 /*}}}*/