* methods/http.{cc,h}:
[ntk/apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25 #include <apt-pkg/macros.h>
26
27 #include <apt-pkg/tagfile.h>
28
29 #include <apti18n.h>
30
31 #include <vector>
32
33 #include <sys/stat.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <stdio.h>
37 /*}}}*/
38 typedef vector<pkgIndexFile *>::iterator FileIterator;
39
40 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41 // ---------------------------------------------------------------------
42 /* We set the dirty flag and make sure that is written to the disk */
43 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
44 Map(*pMap), Cache(pMap,false), Progress(Prog),
45 FoundFileDeps(0)
46 {
47 CurrentFile = 0;
48 memset(UniqHash,0,sizeof(UniqHash));
49
50 if (_error->PendingError() == true)
51 return;
52
53 if (Map.Size() == 0)
54 {
55 // Setup the map interface..
56 Cache.HeaderP = (pkgCache::Header *)Map.Data();
57 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
58 return;
59
60 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
61
62 // Starting header
63 *Cache.HeaderP = pkgCache::Header();
64 Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
65 Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
66 Cache.ReMap();
67 }
68 else
69 {
70 // Map directly from the existing file
71 Cache.ReMap();
72 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
73 if (Cache.VS != _system->VS)
74 {
75 _error->Error(_("Cache has an incompatible versioning system"));
76 return;
77 }
78 }
79
80 Cache.HeaderP->Dirty = true;
81 Map.Sync(0,sizeof(pkgCache::Header));
82 }
83 /*}}}*/
84 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85 // ---------------------------------------------------------------------
86 /* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88 pkgCacheGenerator::~pkgCacheGenerator()
89 {
90 if (_error->PendingError() == true)
91 return;
92 if (Map.Sync() == false)
93 return;
94
95 Cache.HeaderP->Dirty = false;
96 Map.Sync(0,sizeof(pkgCache::Header));
97 }
98 /*}}}*/
99 // CacheGenerator::MergeList - Merge the package list /*{{{*/
100 // ---------------------------------------------------------------------
101 /* This provides the generation of the entries in the cache. Each loop
102 goes through a single package record from the underlying parse engine. */
103 bool pkgCacheGenerator::MergeList(ListParser &List,
104 pkgCache::VerIterator *OutVer)
105 {
106 List.Owner = this;
107
108 unsigned int Counter = 0;
109 while (List.Step() == true)
110 {
111 string const PackageName = List.Package();
112 if (PackageName.empty() == true)
113 return false;
114
115 /* As we handle Arch all packages as architecture bounded
116 we add all information to every (simulated) arch package */
117 std::vector<string> genArch;
118 if (List.ArchitectureAll() == true) {
119 genArch = APT::Configuration::getArchitectures();
120 if (genArch.size() != 1)
121 genArch.push_back("all");
122 } else
123 genArch.push_back(List.Architecture());
124
125 for (std::vector<string>::const_iterator arch = genArch.begin();
126 arch != genArch.end(); ++arch)
127 {
128 // Get a pointer to the package structure
129 pkgCache::PkgIterator Pkg;
130 if (NewPackage(Pkg, PackageName, *arch) == false)
131 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
132 Counter++;
133 if (Counter % 100 == 0 && Progress != 0)
134 Progress->Progress(List.Offset());
135
136 /* Get a pointer to the version structure. We know the list is sorted
137 so we use that fact in the search. Insertion of new versions is
138 done with correct sorting */
139 string Version = List.Version();
140 if (Version.empty() == true)
141 {
142 // we first process the package, then the descriptions
143 // (this has the bonus that we get MMap error when we run out
144 // of MMap space)
145 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
146 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
147 PackageName.c_str());
148
149 // Find the right version to write the description
150 MD5SumValue CurMd5 = List.Description_md5();
151 pkgCache::VerIterator Ver = Pkg.VersionList();
152 map_ptrloc *LastVer = &Pkg->VersionList;
153
154 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
155 {
156 pkgCache::DescIterator Desc = Ver.DescriptionList();
157 map_ptrloc *LastDesc = &Ver->DescriptionList;
158 bool duplicate=false;
159
160 // don't add a new description if we have one for the given
161 // md5 && language
162 for ( ; Desc.end() == false; Desc++)
163 if (MD5SumValue(Desc.md5()) == CurMd5 &&
164 Desc.LanguageCode() == List.DescriptionLanguage())
165 duplicate=true;
166 if(duplicate)
167 continue;
168
169 for (Desc = Ver.DescriptionList();
170 Desc.end() == false;
171 LastDesc = &Desc->NextDesc, Desc++)
172 {
173 if (MD5SumValue(Desc.md5()) == CurMd5)
174 {
175 // Add new description
176 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
177 Desc->ParentPkg = Pkg.Index();
178
179 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
180 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
181 break;
182 }
183 }
184 }
185
186 continue;
187 }
188
189 pkgCache::VerIterator Ver = Pkg.VersionList();
190 map_ptrloc *LastVer = &Pkg->VersionList;
191 int Res = 1;
192 unsigned long const Hash = List.VersionHash();
193 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
194 {
195 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
196 // Version is higher as current version - insert here
197 if (Res > 0)
198 break;
199 // Versionstrings are equal - is hash also equal?
200 if (Res == 0 && Ver->Hash == Hash)
201 break;
202 // proceed with the next till we have either the right
203 // or we found another version (which will be lower)
204 }
205
206 /* We already have a version for this item, record that we saw it */
207 if (Res == 0 && Ver.end() == false && Ver->Hash == Hash)
208 {
209 if (List.UsePackage(Pkg,Ver) == false)
210 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
211 PackageName.c_str());
212
213 if (NewFileVer(Ver,List) == false)
214 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
215 PackageName.c_str());
216
217 // Read only a single record and return
218 if (OutVer != 0)
219 {
220 *OutVer = Ver;
221 FoundFileDeps |= List.HasFileDeps();
222 return true;
223 }
224
225 continue;
226 }
227
228 // Add a new version
229 *LastVer = NewVersion(Ver,Version,*LastVer);
230 Ver->ParentPkg = Pkg.Index();
231 Ver->Hash = Hash;
232
233 if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
234 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
235 PackageName.c_str());
236
237 if (List.UsePackage(Pkg,Ver) == false)
238 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
239 PackageName.c_str());
240
241 if (NewFileVer(Ver,List) == false)
242 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
243 PackageName.c_str());
244
245 // Read only a single record and return
246 if (OutVer != 0)
247 {
248 *OutVer = Ver;
249 FoundFileDeps |= List.HasFileDeps();
250 return true;
251 }
252
253 /* Record the Description data. Description data always exist in
254 Packages and Translation-* files. */
255 pkgCache::DescIterator Desc = Ver.DescriptionList();
256 map_ptrloc *LastDesc = &Ver->DescriptionList;
257
258 // Skip to the end of description set
259 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
260
261 // Add new description
262 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
263 Desc->ParentPkg = Pkg.Index();
264
265 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
266 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
267 }
268 }
269
270 FoundFileDeps |= List.HasFileDeps();
271
272 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
273 return _error->Error(_("Wow, you exceeded the number of package "
274 "names this APT is capable of."));
275 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
276 return _error->Error(_("Wow, you exceeded the number of versions "
277 "this APT is capable of."));
278 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
279 return _error->Error(_("Wow, you exceeded the number of descriptions "
280 "this APT is capable of."));
281 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
282 return _error->Error(_("Wow, you exceeded the number of dependencies "
283 "this APT is capable of."));
284 return true;
285 }
286 /*}}}*/
287 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
288 // ---------------------------------------------------------------------
289 /* If we found any file depends while parsing the main list we need to
290 resolve them. Since it is undesired to load the entire list of files
291 into the cache as virtual packages we do a two stage effort. MergeList
292 identifies the file depends and this creates Provdies for them by
293 re-parsing all the indexs. */
294 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
295 {
296 List.Owner = this;
297
298 unsigned int Counter = 0;
299 while (List.Step() == true)
300 {
301 string PackageName = List.Package();
302 if (PackageName.empty() == true)
303 return false;
304 string Version = List.Version();
305 if (Version.empty() == true)
306 continue;
307
308 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
309 if (Pkg.end() == true)
310 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
311 PackageName.c_str());
312 Counter++;
313 if (Counter % 100 == 0 && Progress != 0)
314 Progress->Progress(List.Offset());
315
316 unsigned long Hash = List.VersionHash();
317 pkgCache::VerIterator Ver = Pkg.VersionList();
318 for (; Ver.end() == false; Ver++)
319 {
320 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
321 {
322 if (List.CollectFileProvides(Cache,Ver) == false)
323 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
324 break;
325 }
326 }
327
328 if (Ver.end() == true)
329 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
330 }
331
332 return true;
333 }
334 /*}}}*/
335 // CacheGenerator::NewGroup - Add a new group /*{{{*/
336 // ---------------------------------------------------------------------
337 /* This creates a new group structure and adds it to the hash table */
338 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name)
339 {
340 Grp = Cache.FindGrp(Name);
341 if (Grp.end() == false)
342 return true;
343
344 // Get a structure
345 unsigned long const Group = Map.Allocate(sizeof(pkgCache::Group));
346 if (unlikely(Group == 0))
347 return false;
348
349 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
350 Grp->Name = Map.WriteString(Name);
351 if (unlikely(Grp->Name == 0))
352 return false;
353
354 // Insert it into the hash table
355 unsigned long const Hash = Cache.Hash(Name);
356 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
357 Cache.HeaderP->GrpHashTable[Hash] = Group;
358
359 Grp->ID = Cache.HeaderP->GroupCount++;
360 return true;
361 }
362 /*}}}*/
363 // CacheGenerator::NewPackage - Add a new package /*{{{*/
364 // ---------------------------------------------------------------------
365 /* This creates a new package structure and adds it to the hash table */
366 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
367 const string &Arch) {
368 pkgCache::GrpIterator Grp;
369 if (unlikely(NewGroup(Grp, Name) == false))
370 return false;
371
372 Pkg = Grp.FindPkg(Arch);
373 if (Pkg.end() == false)
374 return true;
375
376 // Get a structure
377 unsigned long const Package = Map.Allocate(sizeof(pkgCache::Package));
378 if (unlikely(Package == 0))
379 return false;
380 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
381
382 // Insert the package into our package list
383 if (Grp->FirstPackage == 0) // the group is new
384 {
385 // Insert it into the hash table
386 unsigned long const Hash = Cache.Hash(Name);
387 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
388 Cache.HeaderP->PkgHashTable[Hash] = Package;
389 Grp->FirstPackage = Package;
390 }
391 else // Group the Packages together
392 {
393 // this package is the new last package
394 pkgCache::PkgIterator LastPkg(Cache, Cache.PkgP + Grp->LastPackage);
395 Pkg->NextPackage = LastPkg->NextPackage;
396 LastPkg->NextPackage = Package;
397 }
398 Grp->LastPackage = Package;
399
400 // Set the name, arch and the ID
401 Pkg->Name = Grp->Name;
402 Pkg->Group = Grp.Index();
403 Pkg->Arch = WriteUniqString(Arch.c_str());
404 if (unlikely(Pkg->Arch == 0))
405 return false;
406 Pkg->ID = Cache.HeaderP->PackageCount++;
407
408 return true;
409 }
410 /*}}}*/
411 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
412 // ---------------------------------------------------------------------
413 /* */
414 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
415 ListParser &List)
416 {
417 if (CurrentFile == 0)
418 return true;
419
420 // Get a structure
421 unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
422 if (VerFile == 0)
423 return 0;
424
425 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
426 VF->File = CurrentFile - Cache.PkgFileP;
427
428 // Link it to the end of the list
429 map_ptrloc *Last = &Ver->FileList;
430 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
431 Last = &V->NextFile;
432 VF->NextFile = *Last;
433 *Last = VF.Index();
434
435 VF->Offset = List.Offset();
436 VF->Size = List.Size();
437 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
438 Cache.HeaderP->MaxVerFileSize = VF->Size;
439 Cache.HeaderP->VerFileCount++;
440
441 return true;
442 }
443 /*}}}*/
444 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
445 // ---------------------------------------------------------------------
446 /* This puts a version structure in the linked list */
447 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
448 const string &VerStr,
449 unsigned long Next)
450 {
451 // Get a structure
452 unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
453 if (Version == 0)
454 return 0;
455
456 // Fill it in
457 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
458 Ver->NextVer = Next;
459 Ver->ID = Cache.HeaderP->VersionCount++;
460 Ver->VerStr = Map.WriteString(VerStr);
461 if (Ver->VerStr == 0)
462 return 0;
463
464 return Version;
465 }
466 /*}}}*/
467 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
468 // ---------------------------------------------------------------------
469 /* */
470 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
471 ListParser &List)
472 {
473 if (CurrentFile == 0)
474 return true;
475
476 // Get a structure
477 unsigned long DescFile = Map.Allocate(sizeof(pkgCache::DescFile));
478 if (DescFile == 0)
479 return false;
480
481 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
482 DF->File = CurrentFile - Cache.PkgFileP;
483
484 // Link it to the end of the list
485 map_ptrloc *Last = &Desc->FileList;
486 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
487 Last = &D->NextFile;
488
489 DF->NextFile = *Last;
490 *Last = DF.Index();
491
492 DF->Offset = List.Offset();
493 DF->Size = List.Size();
494 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
495 Cache.HeaderP->MaxDescFileSize = DF->Size;
496 Cache.HeaderP->DescFileCount++;
497
498 return true;
499 }
500 /*}}}*/
501 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
502 // ---------------------------------------------------------------------
503 /* This puts a description structure in the linked list */
504 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
505 const string &Lang,
506 const MD5SumValue &md5sum,
507 map_ptrloc Next)
508 {
509 // Get a structure
510 map_ptrloc Description = Map.Allocate(sizeof(pkgCache::Description));
511 if (Description == 0)
512 return 0;
513
514 // Fill it in
515 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
516 Desc->NextDesc = Next;
517 Desc->ID = Cache.HeaderP->DescriptionCount++;
518 Desc->language_code = Map.WriteString(Lang);
519 Desc->md5sum = Map.WriteString(md5sum.Value());
520 if (Desc->language_code == 0 || Desc->md5sum == 0)
521 return 0;
522
523 return Description;
524 }
525 /*}}}*/
526 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
527 // ---------------------------------------------------------------------
528 /* This prepares the Cache for delivery */
529 bool pkgCacheGenerator::FinishCache(OpProgress &Progress)
530 {
531 // FIXME: add progress reporting for this operation
532 // Do we have different architectures in your groups ?
533 vector<string> archs = APT::Configuration::getArchitectures();
534 if (archs.size() > 1)
535 {
536 // Create Conflicts in between the group
537 for (pkgCache::GrpIterator G = GetCache().GrpBegin(); G.end() != true; G++)
538 {
539 string const PkgName = G.Name();
540 for (pkgCache::PkgIterator P = G.PackageList(); P.end() != true; P = G.NextPkg(P))
541 {
542 if (strcmp(P.Arch(),"all") == 0)
543 continue;
544 pkgCache::PkgIterator allPkg;
545 for (pkgCache::VerIterator V = P.VersionList(); V.end() != true; V++)
546 {
547 string const Arch = V.Arch(true);
548 map_ptrloc *OldDepLast = NULL;
549 /* MultiArch handling introduces a lot of implicit Dependencies:
550 - MultiArch: same → Co-Installable if they have the same version
551 - Architecture: all → Need to be Co-Installable for internal reasons
552 - All others conflict with all other group members */
553 bool const coInstall = (V->MultiArch == pkgCache::Version::All ||
554 V->MultiArch == pkgCache::Version::Same);
555 if (V->MultiArch == pkgCache::Version::All && allPkg.end() == true)
556 allPkg = G.FindPkg("all");
557 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A)
558 {
559 if (*A == Arch)
560 continue;
561 /* We allow only one installed arch at the time
562 per group, therefore each group member conflicts
563 with all other group members */
564 pkgCache::PkgIterator D = G.FindPkg(*A);
565 if (D.end() == true)
566 continue;
567 if (coInstall == true)
568 {
569 // Replaces: ${self}:other ( << ${binary:Version})
570 NewDepends(D, V, V.VerStr(),
571 pkgCache::Dep::Less, pkgCache::Dep::Replaces,
572 OldDepLast);
573 // Breaks: ${self}:other (!= ${binary:Version})
574 NewDepends(D, V, V.VerStr(),
575 pkgCache::Dep::Less, pkgCache::Dep::DpkgBreaks,
576 OldDepLast);
577 NewDepends(D, V, V.VerStr(),
578 pkgCache::Dep::Greater, pkgCache::Dep::DpkgBreaks,
579 OldDepLast);
580 if (V->MultiArch == pkgCache::Version::All)
581 {
582 // Depend on ${self}:all which does depend on nothing
583 NewDepends(allPkg, V, V.VerStr(),
584 pkgCache::Dep::Equals, pkgCache::Dep::Depends,
585 OldDepLast);
586 }
587 } else {
588 // Conflicts: ${self}:other
589 NewDepends(D, V, "",
590 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
591 OldDepLast);
592 }
593 }
594 }
595 }
596 }
597 }
598 return true;
599 }
600 /*}}}*/
601 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
602 // ---------------------------------------------------------------------
603 /* This creates a dependency element in the tree. It is linked to the
604 version and to the package that it is pointing to. */
605 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
606 pkgCache::VerIterator &Ver,
607 string const &Version,
608 unsigned int const &Op,
609 unsigned int const &Type,
610 map_ptrloc *OldDepLast)
611 {
612 // Get a structure
613 unsigned long const Dependency = Map.Allocate(sizeof(pkgCache::Dependency));
614 if (unlikely(Dependency == 0))
615 return false;
616
617 // Fill it in
618 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
619 Dep->ParentVer = Ver.Index();
620 Dep->Type = Type;
621 Dep->CompareOp = Op;
622 Dep->ID = Cache.HeaderP->DependsCount++;
623
624 // Probe the reverse dependency list for a version string that matches
625 if (Version.empty() == false)
626 {
627 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
628 if (I->Version != 0 && I.TargetVer() == Version)
629 Dep->Version = I->Version;*/
630 if (Dep->Version == 0)
631 if (unlikely((Dep->Version = Map.WriteString(Version)) == 0))
632 return false;
633 }
634
635 // Link it to the package
636 Dep->Package = Pkg.Index();
637 Dep->NextRevDepends = Pkg->RevDepends;
638 Pkg->RevDepends = Dep.Index();
639
640 // Do we know where to link the Dependency to?
641 if (OldDepLast == NULL)
642 {
643 OldDepLast = &Ver->DependsList;
644 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
645 OldDepLast = &D->NextDepends;
646 }
647
648 Dep->NextDepends = *OldDepLast;
649 *OldDepLast = Dep.Index();
650 OldDepLast = &Dep->NextDepends;
651
652 return true;
653 }
654 /*}}}*/
655 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
656 // ---------------------------------------------------------------------
657 /* This creates a Group and the Package to link this dependency to if
658 needed and handles also the caching of the old endpoint */
659 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
660 const string &PackageName,
661 const string &Arch,
662 const string &Version,
663 unsigned int Op,
664 unsigned int Type)
665 {
666 pkgCache::GrpIterator Grp;
667 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
668 return false;
669
670 // Locate the target package
671 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
672 if (Pkg.end() == true) {
673 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
674 return false;
675 }
676
677 // Is it a file dependency?
678 if (unlikely(PackageName[0] == '/'))
679 FoundFileDeps = true;
680
681 /* Caching the old end point speeds up generation substantially */
682 if (OldDepVer != Ver) {
683 OldDepLast = NULL;
684 OldDepVer = Ver;
685 }
686
687 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
688 }
689 /*}}}*/
690 // ListParser::NewProvides - Create a Provides element /*{{{*/
691 // ---------------------------------------------------------------------
692 /* */
693 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
694 const string &PkgName,
695 const string &PkgArch,
696 const string &Version)
697 {
698 pkgCache &Cache = Owner->Cache;
699
700 // We do not add self referencing provides
701 if (Ver.ParentPkg().Name() == PkgName && PkgArch == Ver.Arch(true))
702 return true;
703
704 // Get a structure
705 unsigned long const Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
706 if (unlikely(Provides == 0))
707 return false;
708 Cache.HeaderP->ProvidesCount++;
709
710 // Fill it in
711 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
712 Prv->Version = Ver.Index();
713 Prv->NextPkgProv = Ver->ProvidesList;
714 Ver->ProvidesList = Prv.Index();
715 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
716 return false;
717
718 // Locate the target package
719 pkgCache::PkgIterator Pkg;
720 if (unlikely(Owner->NewPackage(Pkg,PkgName, PkgArch) == false))
721 return false;
722
723 // Link it to the package
724 Prv->ParentPkg = Pkg.Index();
725 Prv->NextProvides = Pkg->ProvidesList;
726 Pkg->ProvidesList = Prv.Index();
727
728 return true;
729 }
730 /*}}}*/
731 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
732 // ---------------------------------------------------------------------
733 /* This is used to select which file is to be associated with all newly
734 added versions. The caller is responsible for setting the IMS fields. */
735 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
736 const pkgIndexFile &Index,
737 unsigned long Flags)
738 {
739 // Get some space for the structure
740 CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
741 if (CurrentFile == Cache.PkgFileP)
742 return false;
743
744 // Fill it in
745 CurrentFile->FileName = Map.WriteString(File);
746 CurrentFile->Site = WriteUniqString(Site);
747 CurrentFile->NextFile = Cache.HeaderP->FileList;
748 CurrentFile->Flags = Flags;
749 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
750 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
751 PkgFileName = File;
752 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
753 Cache.HeaderP->PackageFileCount++;
754
755 if (CurrentFile->FileName == 0)
756 return false;
757
758 if (Progress != 0)
759 Progress->SubProgress(Index.Size());
760 return true;
761 }
762 /*}}}*/
763 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
764 // ---------------------------------------------------------------------
765 /* This is used to create handles to strings. Given the same text it
766 always returns the same number */
767 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
768 unsigned int Size)
769 {
770 /* We use a very small transient hash table here, this speeds up generation
771 by a fair amount on slower machines */
772 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
773 if (Bucket != 0 &&
774 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
775 return Bucket->String;
776
777 // Search for an insertion point
778 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
779 int Res = 1;
780 map_ptrloc *Last = &Cache.HeaderP->StringList;
781 for (; I != Cache.StringItemP; Last = &I->NextItem,
782 I = Cache.StringItemP + I->NextItem)
783 {
784 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
785 if (Res >= 0)
786 break;
787 }
788
789 // Match
790 if (Res == 0)
791 {
792 Bucket = I;
793 return I->String;
794 }
795
796 // Get a structure
797 unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
798 if (Item == 0)
799 return 0;
800
801 // Fill in the structure
802 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
803 ItemP->NextItem = I - Cache.StringItemP;
804 *Last = Item;
805 ItemP->String = Map.WriteString(S,Size);
806 if (ItemP->String == 0)
807 return 0;
808
809 Bucket = ItemP;
810 return ItemP->String;
811 }
812 /*}}}*/
813 // CheckValidity - Check that a cache is up-to-date /*{{{*/
814 // ---------------------------------------------------------------------
815 /* This just verifies that each file in the list of index files exists,
816 has matching attributes with the cache and the cache does not have
817 any extra files. */
818 static bool CheckValidity(const string &CacheFile, FileIterator Start,
819 FileIterator End,MMap **OutMap = 0)
820 {
821 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
822 // No file, certainly invalid
823 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
824 {
825 if (Debug == true)
826 std::clog << "CacheFile doesn't exist" << std::endl;
827 return false;
828 }
829
830 // Map it
831 FileFd CacheF(CacheFile,FileFd::ReadOnly);
832 SPtr<MMap> Map = new MMap(CacheF,0);
833 pkgCache Cache(Map);
834 if (_error->PendingError() == true || Map->Size() == 0)
835 {
836 if (Debug == true)
837 std::clog << "Errors are pending or Map is empty()" << std::endl;
838 _error->Discard();
839 return false;
840 }
841
842 /* Now we check every index file, see if it is in the cache,
843 verify the IMS data and check that it is on the disk too.. */
844 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
845 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
846 for (; Start != End; Start++)
847 {
848 if (Debug == true)
849 std::clog << "Checking PkgFile " << (*Start)->Describe() << ": ";
850 if ((*Start)->HasPackages() == false)
851 {
852 if (Debug == true)
853 std::clog << "Has NO packages" << std::endl;
854 continue;
855 }
856
857 if ((*Start)->Exists() == false)
858 {
859 #if 0 // mvo: we no longer give a message here (Default Sources spec)
860 _error->WarningE("stat",_("Couldn't stat source package list %s"),
861 (*Start)->Describe().c_str());
862 #endif
863 if (Debug == true)
864 std::clog << "file doesn't exist" << std::endl;
865 continue;
866 }
867
868 // FindInCache is also expected to do an IMS check.
869 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
870 if (File.end() == true)
871 {
872 if (Debug == true)
873 std::clog << "FindInCache returned end-Pointer" << std::endl;
874 return false;
875 }
876
877 Visited[File->ID] = true;
878 if (Debug == true)
879 std::clog << "with ID " << File->ID << " is valid" << std::endl;
880 }
881
882 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
883 if (Visited[I] == false)
884 {
885 if (Debug == true)
886 std::clog << "File with ID" << I << " wasn't visited" << std::endl;
887 return false;
888 }
889
890 if (_error->PendingError() == true)
891 {
892 if (Debug == true)
893 {
894 std::clog << "Validity failed because of pending errors:" << std::endl;
895 _error->DumpErrors();
896 }
897 _error->Discard();
898 return false;
899 }
900
901 if (OutMap != 0)
902 *OutMap = Map.UnGuard();
903 return true;
904 }
905 /*}}}*/
906 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
907 // ---------------------------------------------------------------------
908 /* Size is kind of an abstract notion that is only used for the progress
909 meter */
910 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
911 {
912 unsigned long TotalSize = 0;
913 for (; Start != End; Start++)
914 {
915 if ((*Start)->HasPackages() == false)
916 continue;
917 TotalSize += (*Start)->Size();
918 }
919 return TotalSize;
920 }
921 /*}}}*/
922 // BuildCache - Merge the list of index files into the cache /*{{{*/
923 // ---------------------------------------------------------------------
924 /* */
925 static bool BuildCache(pkgCacheGenerator &Gen,
926 OpProgress &Progress,
927 unsigned long &CurrentSize,unsigned long TotalSize,
928 FileIterator Start, FileIterator End)
929 {
930 FileIterator I;
931 for (I = Start; I != End; I++)
932 {
933 if ((*I)->HasPackages() == false)
934 continue;
935
936 if ((*I)->Exists() == false)
937 continue;
938
939 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
940 {
941 _error->Warning("Duplicate sources.list entry %s",
942 (*I)->Describe().c_str());
943 continue;
944 }
945
946 unsigned long Size = (*I)->Size();
947 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
948 CurrentSize += Size;
949
950 if ((*I)->Merge(Gen,Progress) == false)
951 return false;
952 }
953
954 if (Gen.HasFileDeps() == true)
955 {
956 Progress.Done();
957 TotalSize = ComputeSize(Start, End);
958 CurrentSize = 0;
959 for (I = Start; I != End; I++)
960 {
961 unsigned long Size = (*I)->Size();
962 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
963 CurrentSize += Size;
964 if ((*I)->MergeFileProvides(Gen,Progress) == false)
965 return false;
966 }
967 }
968
969 return true;
970 }
971 /*}}}*/
972 // MakeStatusCache - Construct the status cache /*{{{*/
973 // ---------------------------------------------------------------------
974 /* This makes sure that the status cache (the cache that has all
975 index files from the sources list and all local ones) is ready
976 to be mmaped. If OutMap is not zero then a MMap object representing
977 the cache will be stored there. This is pretty much mandetory if you
978 are using AllowMem. AllowMem lets the function be run as non-root
979 where it builds the cache 'fast' into a memory buffer. */
980 bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
981 MMap **OutMap,bool AllowMem)
982 {
983 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
984 unsigned long const MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
985
986 vector<pkgIndexFile *> Files;
987 for (vector<metaIndex *>::const_iterator i = List.begin();
988 i != List.end();
989 i++)
990 {
991 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
992 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
993 j != Indexes->end();
994 j++)
995 Files.push_back (*j);
996 }
997
998 unsigned long const EndOfSource = Files.size();
999 if (_system->AddStatusFiles(Files) == false)
1000 return false;
1001
1002 // Decide if we can write to the files..
1003 string const CacheFile = _config->FindFile("Dir::Cache::pkgcache");
1004 string const SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
1005
1006 // ensure the cache directory exists
1007 if (CacheFile.empty() == false || SrcCacheFile.empty() == false)
1008 {
1009 string dir = _config->FindDir("Dir::Cache");
1010 size_t const len = dir.size();
1011 if (len > 5 && dir.find("/apt/", len - 6, 5) == len - 5)
1012 dir = dir.substr(0, len - 5);
1013 if (CacheFile.empty() == false)
1014 CreateDirectory(dir, flNotFile(CacheFile));
1015 if (SrcCacheFile.empty() == false)
1016 CreateDirectory(dir, flNotFile(SrcCacheFile));
1017 }
1018
1019 // Decide if we can write to the cache
1020 bool Writeable = false;
1021 if (CacheFile.empty() == false)
1022 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
1023 else
1024 if (SrcCacheFile.empty() == false)
1025 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
1026 if (Debug == true)
1027 std::clog << "Do we have write-access to the cache files? " << (Writeable ? "YES" : "NO") << std::endl;
1028
1029 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
1030 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
1031
1032 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1033
1034 // Cache is OK, Fin.
1035 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
1036 {
1037 Progress.OverallProgress(1,1,1,_("Reading package lists"));
1038 if (Debug == true)
1039 std::clog << "pkgcache.bin is valid - no need to build anything" << std::endl;
1040 return true;
1041 }
1042 else if (Debug == true)
1043 std::clog << "pkgcache.bin is NOT valid" << std::endl;
1044
1045 /* At this point we know we need to reconstruct the package cache,
1046 begin. */
1047 SPtr<FileFd> CacheF;
1048 SPtr<DynamicMMap> Map;
1049 if (Writeable == true && CacheFile.empty() == false)
1050 {
1051 unlink(CacheFile.c_str());
1052 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
1053 fchmod(CacheF->Fd(),0644);
1054 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
1055 if (_error->PendingError() == true)
1056 return false;
1057 if (Debug == true)
1058 std::clog << "Open filebased MMap" << std::endl;
1059 }
1060 else
1061 {
1062 // Just build it in memory..
1063 Map = new DynamicMMap(0,MapSize);
1064 if (Debug == true)
1065 std::clog << "Open memory Map (not filebased)" << std::endl;
1066 }
1067
1068 // Lets try the source cache.
1069 unsigned long CurrentSize = 0;
1070 unsigned long TotalSize = 0;
1071 if (CheckValidity(SrcCacheFile,Files.begin(),
1072 Files.begin()+EndOfSource) == true)
1073 {
1074 if (Debug == true)
1075 std::clog << "srcpkgcache.bin is valid - populate MMap with it." << std::endl;
1076 // Preload the map with the source cache
1077 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
1078 unsigned long const alloc = Map->RawAllocate(SCacheF.Size());
1079 if ((alloc == 0 && _error->PendingError())
1080 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
1081 SCacheF.Size()) == false)
1082 return false;
1083
1084 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1085
1086 // Build the status cache
1087 pkgCacheGenerator Gen(Map.Get(),&Progress);
1088 if (_error->PendingError() == true)
1089 return false;
1090 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1091 Files.begin()+EndOfSource,Files.end()) == false)
1092 return false;
1093
1094 // FIXME: move me to a better place
1095 Gen.FinishCache(Progress);
1096 }
1097 else
1098 {
1099 if (Debug == true)
1100 std::clog << "srcpkgcache.bin is NOT valid - rebuild" << std::endl;
1101 TotalSize = ComputeSize(Files.begin(),Files.end());
1102
1103 // Build the source cache
1104 pkgCacheGenerator Gen(Map.Get(),&Progress);
1105 if (_error->PendingError() == true)
1106 return false;
1107 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1108 Files.begin(),Files.begin()+EndOfSource) == false)
1109 return false;
1110
1111 // Write it back
1112 if (Writeable == true && SrcCacheFile.empty() == false)
1113 {
1114 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
1115 if (_error->PendingError() == true)
1116 return false;
1117
1118 fchmod(SCacheF.Fd(),0644);
1119
1120 // Write out the main data
1121 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1122 return _error->Error(_("IO Error saving source cache"));
1123 SCacheF.Sync();
1124
1125 // Write out the proper header
1126 Gen.GetCache().HeaderP->Dirty = false;
1127 if (SCacheF.Seek(0) == false ||
1128 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1129 return _error->Error(_("IO Error saving source cache"));
1130 Gen.GetCache().HeaderP->Dirty = true;
1131 SCacheF.Sync();
1132 }
1133
1134 // Build the status cache
1135 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1136 Files.begin()+EndOfSource,Files.end()) == false)
1137 return false;
1138
1139 // FIXME: move me to a better place
1140 Gen.FinishCache(Progress);
1141 }
1142 if (Debug == true)
1143 std::clog << "Caches are ready for shipping" << std::endl;
1144
1145 if (_error->PendingError() == true)
1146 return false;
1147 if (OutMap != 0)
1148 {
1149 if (CacheF != 0)
1150 {
1151 delete Map.UnGuard();
1152 *OutMap = new MMap(*CacheF,0);
1153 }
1154 else
1155 {
1156 *OutMap = Map.UnGuard();
1157 }
1158 }
1159
1160 return true;
1161 }
1162 /*}}}*/
1163 // MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
1164 // ---------------------------------------------------------------------
1165 /* */
1166 bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1167 {
1168 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
1169 vector<pkgIndexFile *> Files;
1170 unsigned long EndOfSource = Files.size();
1171 if (_system->AddStatusFiles(Files) == false)
1172 return false;
1173
1174 SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
1175 unsigned long CurrentSize = 0;
1176 unsigned long TotalSize = 0;
1177
1178 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1179
1180 // Build the status cache
1181 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1182 pkgCacheGenerator Gen(Map.Get(),&Progress);
1183 if (_error->PendingError() == true)
1184 return false;
1185 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1186 Files.begin()+EndOfSource,Files.end()) == false)
1187 return false;
1188
1189 // FIXME: move me to a better place
1190 Gen.FinishCache(Progress);
1191
1192 if (_error->PendingError() == true)
1193 return false;
1194 *OutMap = Map.UnGuard();
1195
1196 return true;
1197 }
1198 /*}}}*/