Split ListParser::NewDepends into two methods to use these new method
[ntk/apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25
26 #include <apt-pkg/tagfile.h>
27
28 #include <apti18n.h>
29
30 #include <vector>
31
32 #include <sys/stat.h>
33 #include <unistd.h>
34 #include <errno.h>
35 #include <stdio.h>
36 #include <system.h>
37 /*}}}*/
38 typedef vector<pkgIndexFile *>::iterator FileIterator;
39
40 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41 // ---------------------------------------------------------------------
42 /* We set the dirty flag and make sure that is written to the disk */
43 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
44 Map(*pMap), Cache(pMap,false), Progress(Prog),
45 FoundFileDeps(0)
46 {
47 CurrentFile = 0;
48 memset(UniqHash,0,sizeof(UniqHash));
49
50 if (_error->PendingError() == true)
51 return;
52
53 if (Map.Size() == 0)
54 {
55 // Setup the map interface..
56 Cache.HeaderP = (pkgCache::Header *)Map.Data();
57 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
58 return;
59
60 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
61
62 // Starting header
63 *Cache.HeaderP = pkgCache::Header();
64 Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
65 Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
66 Cache.ReMap();
67 }
68 else
69 {
70 // Map directly from the existing file
71 Cache.ReMap();
72 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
73 if (Cache.VS != _system->VS)
74 {
75 _error->Error(_("Cache has an incompatible versioning system"));
76 return;
77 }
78 }
79
80 Cache.HeaderP->Dirty = true;
81 Map.Sync(0,sizeof(pkgCache::Header));
82 }
83 /*}}}*/
84 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85 // ---------------------------------------------------------------------
86 /* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88 pkgCacheGenerator::~pkgCacheGenerator()
89 {
90 if (_error->PendingError() == true)
91 return;
92 if (Map.Sync() == false)
93 return;
94
95 Cache.HeaderP->Dirty = false;
96 Map.Sync(0,sizeof(pkgCache::Header));
97 }
98 /*}}}*/
99 // CacheGenerator::MergeList - Merge the package list /*{{{*/
100 // ---------------------------------------------------------------------
101 /* This provides the generation of the entries in the cache. Each loop
102 goes through a single package record from the underlying parse engine. */
103 bool pkgCacheGenerator::MergeList(ListParser &List,
104 pkgCache::VerIterator *OutVer)
105 {
106 List.Owner = this;
107
108 unsigned int Counter = 0;
109 while (List.Step() == true)
110 {
111 // Get a pointer to the package structure
112 string const PackageName = List.Package();
113 if (PackageName.empty() == true)
114 return false;
115
116 pkgCache::PkgIterator Pkg;
117 if (NewPackage(Pkg, PackageName, List.Architecture()) == false)
118 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
119 Counter++;
120 if (Counter % 100 == 0 && Progress != 0)
121 Progress->Progress(List.Offset());
122
123 /* Get a pointer to the version structure. We know the list is sorted
124 so we use that fact in the search. Insertion of new versions is
125 done with correct sorting */
126 string Version = List.Version();
127 if (Version.empty() == true)
128 {
129 // we first process the package, then the descriptions
130 // (this has the bonus that we get MMap error when we run out
131 // of MMap space)
132 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
133 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
134 PackageName.c_str());
135
136 // Find the right version to write the description
137 MD5SumValue CurMd5 = List.Description_md5();
138 pkgCache::VerIterator Ver = Pkg.VersionList();
139 map_ptrloc *LastVer = &Pkg->VersionList;
140
141 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
142 {
143 pkgCache::DescIterator Desc = Ver.DescriptionList();
144 map_ptrloc *LastDesc = &Ver->DescriptionList;
145 bool duplicate=false;
146
147 // don't add a new description if we have one for the given
148 // md5 && language
149 for ( ; Desc.end() == false; Desc++)
150 if (MD5SumValue(Desc.md5()) == CurMd5 &&
151 Desc.LanguageCode() == List.DescriptionLanguage())
152 duplicate=true;
153 if(duplicate)
154 continue;
155
156 for (Desc = Ver.DescriptionList();
157 Desc.end() == false;
158 LastDesc = &Desc->NextDesc, Desc++)
159 {
160 if (MD5SumValue(Desc.md5()) == CurMd5)
161 {
162 // Add new description
163 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
164 Desc->ParentPkg = Pkg.Index();
165
166 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
167 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
168 break;
169 }
170 }
171 }
172
173 continue;
174 }
175
176 pkgCache::VerIterator Ver = Pkg.VersionList();
177 map_ptrloc *LastVer = &Pkg->VersionList;
178 int Res = 1;
179 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
180 {
181 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
182 if (Res >= 0)
183 break;
184 }
185
186 /* We already have a version for this item, record that we
187 saw it */
188 unsigned long Hash = List.VersionHash();
189 if (Res == 0 && Ver->Hash == Hash)
190 {
191 if (List.UsePackage(Pkg,Ver) == false)
192 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
193 PackageName.c_str());
194
195 if (NewFileVer(Ver,List) == false)
196 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
197 PackageName.c_str());
198
199 // Read only a single record and return
200 if (OutVer != 0)
201 {
202 *OutVer = Ver;
203 FoundFileDeps |= List.HasFileDeps();
204 return true;
205 }
206
207 continue;
208 }
209
210 // Skip to the end of the same version set.
211 if (Res == 0)
212 {
213 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
214 {
215 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
216 if (Res != 0)
217 break;
218 }
219 }
220
221 // Add a new version
222 *LastVer = NewVersion(Ver,Version,*LastVer);
223 Ver->ParentPkg = Pkg.Index();
224 Ver->Hash = Hash;
225
226 if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
227 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
228 PackageName.c_str());
229
230 if (List.UsePackage(Pkg,Ver) == false)
231 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
232 PackageName.c_str());
233
234 if (NewFileVer(Ver,List) == false)
235 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
236 PackageName.c_str());
237
238 // Read only a single record and return
239 if (OutVer != 0)
240 {
241 *OutVer = Ver;
242 FoundFileDeps |= List.HasFileDeps();
243 return true;
244 }
245
246 /* Record the Description data. Description data always exist in
247 Packages and Translation-* files. */
248 pkgCache::DescIterator Desc = Ver.DescriptionList();
249 map_ptrloc *LastDesc = &Ver->DescriptionList;
250
251 // Skip to the end of description set
252 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
253
254 // Add new description
255 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
256 Desc->ParentPkg = Pkg.Index();
257
258 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
259 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
260 }
261
262 FoundFileDeps |= List.HasFileDeps();
263
264 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
265 return _error->Error(_("Wow, you exceeded the number of package "
266 "names this APT is capable of."));
267 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
268 return _error->Error(_("Wow, you exceeded the number of versions "
269 "this APT is capable of."));
270 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
271 return _error->Error(_("Wow, you exceeded the number of descriptions "
272 "this APT is capable of."));
273 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
274 return _error->Error(_("Wow, you exceeded the number of dependencies "
275 "this APT is capable of."));
276 return true;
277 }
278 /*}}}*/
279 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
280 // ---------------------------------------------------------------------
281 /* If we found any file depends while parsing the main list we need to
282 resolve them. Since it is undesired to load the entire list of files
283 into the cache as virtual packages we do a two stage effort. MergeList
284 identifies the file depends and this creates Provdies for them by
285 re-parsing all the indexs. */
286 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
287 {
288 List.Owner = this;
289
290 unsigned int Counter = 0;
291 while (List.Step() == true)
292 {
293 string PackageName = List.Package();
294 if (PackageName.empty() == true)
295 return false;
296 string Version = List.Version();
297 if (Version.empty() == true)
298 continue;
299
300 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
301 if (Pkg.end() == true)
302 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
303 PackageName.c_str());
304 Counter++;
305 if (Counter % 100 == 0 && Progress != 0)
306 Progress->Progress(List.Offset());
307
308 unsigned long Hash = List.VersionHash();
309 pkgCache::VerIterator Ver = Pkg.VersionList();
310 for (; Ver.end() == false; Ver++)
311 {
312 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
313 {
314 if (List.CollectFileProvides(Cache,Ver) == false)
315 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
316 break;
317 }
318 }
319
320 if (Ver.end() == true)
321 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
322 }
323
324 return true;
325 }
326 /*}}}*/
327 // CacheGenerator::NewGroup - Add a new group /*{{{*/
328 // ---------------------------------------------------------------------
329 /* This creates a new group structure and adds it to the hash table */
330 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name) {
331 Grp = Cache.FindGrp(Name);
332 if (Grp.end() == false)
333 return true;
334
335 // Get a structure
336 unsigned long const Group = Map.Allocate(sizeof(pkgCache::Group));
337 if (unlikely(Group == 0))
338 return false;
339
340 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
341 Grp->Name = Map.WriteString(Name);
342 if (unlikely(Grp->Name == 0))
343 return false;
344
345 // Insert it into the hash table
346 unsigned long const Hash = Cache.Hash(Name);
347 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
348 Cache.HeaderP->GrpHashTable[Hash] = Group;
349
350 Cache.HeaderP->GroupCount++;
351
352 return true;
353 }
354 /*}}}*/
355 // CacheGenerator::NewPackage - Add a new package /*{{{*/
356 // ---------------------------------------------------------------------
357 /* This creates a new package structure and adds it to the hash table */
358 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
359 const string &Arch) {
360 pkgCache::GrpIterator Grp;
361 if (unlikely(NewGroup(Grp, Name) == false))
362 return false;
363
364 Pkg = Grp.FindPkg(Arch);
365 if (Pkg.end() == false)
366 return true;
367
368 // Get a structure
369 unsigned long const Package = Map.Allocate(sizeof(pkgCache::Package));
370 if (unlikely(Package == 0))
371 return false;
372 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
373
374 // Insert it into the hash table
375 unsigned long const Hash = Cache.Hash(Name);
376 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
377 Cache.HeaderP->PkgHashTable[Hash] = Package;
378
379 // remember the packages in the group
380 Grp->FirstPackage = Package;
381 if (Grp->LastPackage == 0)
382 Grp->LastPackage = Package;
383
384 // Set the name, arch and the ID
385 Pkg->Name = Grp->Name;
386 Pkg->Group = Grp.Index();
387 Pkg->Arch = WriteUniqString(Arch.c_str());
388 if (unlikely(Pkg->Arch == 0))
389 return false;
390 Pkg->ID = Cache.HeaderP->PackageCount++;
391
392 return true;
393 }
394 /*}}}*/
395 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
396 // ---------------------------------------------------------------------
397 /* */
398 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
399 ListParser &List)
400 {
401 if (CurrentFile == 0)
402 return true;
403
404 // Get a structure
405 unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
406 if (VerFile == 0)
407 return 0;
408
409 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
410 VF->File = CurrentFile - Cache.PkgFileP;
411
412 // Link it to the end of the list
413 map_ptrloc *Last = &Ver->FileList;
414 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
415 Last = &V->NextFile;
416 VF->NextFile = *Last;
417 *Last = VF.Index();
418
419 VF->Offset = List.Offset();
420 VF->Size = List.Size();
421 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
422 Cache.HeaderP->MaxVerFileSize = VF->Size;
423 Cache.HeaderP->VerFileCount++;
424
425 return true;
426 }
427 /*}}}*/
428 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
429 // ---------------------------------------------------------------------
430 /* This puts a version structure in the linked list */
431 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
432 const string &VerStr,
433 unsigned long Next)
434 {
435 // Get a structure
436 unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
437 if (Version == 0)
438 return 0;
439
440 // Fill it in
441 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
442 Ver->NextVer = Next;
443 Ver->ID = Cache.HeaderP->VersionCount++;
444 Ver->VerStr = Map.WriteString(VerStr);
445 if (Ver->VerStr == 0)
446 return 0;
447
448 return Version;
449 }
450 /*}}}*/
451 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
452 // ---------------------------------------------------------------------
453 /* */
454 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
455 ListParser &List)
456 {
457 if (CurrentFile == 0)
458 return true;
459
460 // Get a structure
461 unsigned long DescFile = Map.Allocate(sizeof(pkgCache::DescFile));
462 if (DescFile == 0)
463 return false;
464
465 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
466 DF->File = CurrentFile - Cache.PkgFileP;
467
468 // Link it to the end of the list
469 map_ptrloc *Last = &Desc->FileList;
470 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
471 Last = &D->NextFile;
472
473 DF->NextFile = *Last;
474 *Last = DF.Index();
475
476 DF->Offset = List.Offset();
477 DF->Size = List.Size();
478 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
479 Cache.HeaderP->MaxDescFileSize = DF->Size;
480 Cache.HeaderP->DescFileCount++;
481
482 return true;
483 }
484 /*}}}*/
485 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
486 // ---------------------------------------------------------------------
487 /* This puts a description structure in the linked list */
488 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
489 const string &Lang,
490 const MD5SumValue &md5sum,
491 map_ptrloc Next)
492 {
493 // Get a structure
494 map_ptrloc Description = Map.Allocate(sizeof(pkgCache::Description));
495 if (Description == 0)
496 return 0;
497
498 // Fill it in
499 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
500 Desc->NextDesc = Next;
501 Desc->ID = Cache.HeaderP->DescriptionCount++;
502 Desc->language_code = Map.WriteString(Lang);
503 Desc->md5sum = Map.WriteString(md5sum.Value());
504 if (Desc->language_code == 0 || Desc->md5sum == 0)
505 return 0;
506
507 return Description;
508 }
509 /*}}}*/
510 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
511 // ---------------------------------------------------------------------
512 /* This prepares the Cache for delivery */
513 bool pkgCacheGenerator::FinishCache(OpProgress &Progress) {
514 // FIXME: add progress reporting for this operation
515 // Do we have different architectures in your groups ?
516 vector<string> archs = APT::Configuration::getArchitectures();
517 if (archs.size() > 1) {
518 // Create Conflicts in between the group
519 for (pkgCache::GrpIterator G = GetCache().GrpBegin(); G.end() != true; G++) {
520 string const PkgName = G.Name();
521 for (pkgCache::PkgIterator P = G.PackageList(); P.end() != true; P = G.NextPkg(P)) {
522 for (pkgCache::VerIterator V = P.VersionList(); V.end() != true; V++) {
523 // Arch all packages are "co-installable"
524 if (V->MultiArch == pkgCache::Version::All)
525 continue;
526 string const Arch = V.Arch();
527 map_ptrloc *OldDepLast = NULL;
528 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A) {
529 if (*A == Arch)
530 continue;
531 /* We allow only one installed arch at the time
532 per group, therefore each group member conflicts
533 with all other group members */
534 pkgCache::PkgIterator D = G.FindPkg(*A);
535 if (D.end() == true)
536 continue;
537 // Conflicts: ${self}:other
538 NewDepends(D, V, "",
539 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
540 OldDepLast);
541 }
542 }
543 }
544 }
545 }
546 return true;
547 }
548 /*}}}*/
549 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
550 // ---------------------------------------------------------------------
551 /* This creates a dependency element in the tree. It is linked to the
552 version and to the package that it is pointing to. */
553 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
554 pkgCache::VerIterator &Ver,
555 string const &Version,
556 unsigned int const &Op,
557 unsigned int const &Type,
558 map_ptrloc *OldDepLast)
559 {
560 // Get a structure
561 unsigned long const Dependency = Map.Allocate(sizeof(pkgCache::Dependency));
562 if (unlikely(Dependency == 0))
563 return false;
564
565 // Fill it in
566 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
567 Dep->ParentVer = Ver.Index();
568 Dep->Type = Type;
569 Dep->CompareOp = Op;
570 Dep->ID = Cache.HeaderP->DependsCount++;
571
572 // Probe the reverse dependency list for a version string that matches
573 if (Version.empty() == false)
574 {
575 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
576 if (I->Version != 0 && I.TargetVer() == Version)
577 Dep->Version = I->Version;*/
578 if (Dep->Version == 0)
579 if (unlikely((Dep->Version = Map.WriteString(Version)) == 0))
580 return false;
581 }
582
583 // Link it to the package
584 Dep->Package = Pkg.Index();
585 Dep->NextRevDepends = Pkg->RevDepends;
586 Pkg->RevDepends = Dep.Index();
587
588 // Do we know where to link the Dependency to?
589 if (OldDepLast == NULL)
590 {
591 OldDepLast = &Ver->DependsList;
592 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
593 OldDepLast = &D->NextDepends;
594 }
595
596 Dep->NextDepends = *OldDepLast;
597 *OldDepLast = Dep.Index();
598 OldDepLast = &Dep->NextDepends;
599
600 return true;
601 }
602 /*}}}*/
603 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
604 // ---------------------------------------------------------------------
605 /* This creates a Group and the Package to link this dependency to if
606 needed and handles also the caching of the old endpoint */
607 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
608 const string &PackageName,
609 const string &Arch,
610 const string &Version,
611 unsigned int Op,
612 unsigned int Type)
613 {
614 pkgCache::GrpIterator Grp;
615 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
616 return false;
617
618 // Locate the target package
619 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
620 if (Pkg.end() == true) {
621 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
622 return false;
623 }
624
625 // Is it a file dependency?
626 if (unlikely(PackageName[0] == '/'))
627 FoundFileDeps = true;
628
629 /* Caching the old end point speeds up generation substantially */
630 if (OldDepVer != Ver) {
631 OldDepLast = NULL;
632 OldDepVer = Ver;
633 }
634
635 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
636 }
637 /*}}}*/
638 // ListParser::NewProvides - Create a Provides element /*{{{*/
639 // ---------------------------------------------------------------------
640 /* */
641 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
642 const string &PackageName,
643 const string &Version)
644 {
645 pkgCache &Cache = Owner->Cache;
646
647 // We do not add self referencing provides
648 if (unlikely(Ver.ParentPkg().Name() == PackageName))
649 return true;
650
651 // Get a structure
652 unsigned long const Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
653 if (unlikely(Provides == 0))
654 return false;
655 Cache.HeaderP->ProvidesCount++;
656
657 // Fill it in
658 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
659 Prv->Version = Ver.Index();
660 Prv->NextPkgProv = Ver->ProvidesList;
661 Ver->ProvidesList = Prv.Index();
662 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
663 return false;
664
665 // Locate the target package
666 pkgCache::PkgIterator Pkg;
667 if (unlikely(Owner->NewPackage(Pkg,PackageName,string(Ver.Arch())) == false))
668 return false;
669
670 // Link it to the package
671 Prv->ParentPkg = Pkg.Index();
672 Prv->NextProvides = Pkg->ProvidesList;
673 Pkg->ProvidesList = Prv.Index();
674
675 return true;
676 }
677 /*}}}*/
678 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
679 // ---------------------------------------------------------------------
680 /* This is used to select which file is to be associated with all newly
681 added versions. The caller is responsible for setting the IMS fields. */
682 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
683 const pkgIndexFile &Index,
684 unsigned long Flags)
685 {
686 // Get some space for the structure
687 CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
688 if (CurrentFile == Cache.PkgFileP)
689 return false;
690
691 // Fill it in
692 CurrentFile->FileName = Map.WriteString(File);
693 CurrentFile->Site = WriteUniqString(Site);
694 CurrentFile->NextFile = Cache.HeaderP->FileList;
695 CurrentFile->Flags = Flags;
696 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
697 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
698 PkgFileName = File;
699 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
700 Cache.HeaderP->PackageFileCount++;
701
702 if (CurrentFile->FileName == 0)
703 return false;
704
705 if (Progress != 0)
706 Progress->SubProgress(Index.Size());
707 return true;
708 }
709 /*}}}*/
710 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
711 // ---------------------------------------------------------------------
712 /* This is used to create handles to strings. Given the same text it
713 always returns the same number */
714 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
715 unsigned int Size)
716 {
717 /* We use a very small transient hash table here, this speeds up generation
718 by a fair amount on slower machines */
719 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
720 if (Bucket != 0 &&
721 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
722 return Bucket->String;
723
724 // Search for an insertion point
725 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
726 int Res = 1;
727 map_ptrloc *Last = &Cache.HeaderP->StringList;
728 for (; I != Cache.StringItemP; Last = &I->NextItem,
729 I = Cache.StringItemP + I->NextItem)
730 {
731 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
732 if (Res >= 0)
733 break;
734 }
735
736 // Match
737 if (Res == 0)
738 {
739 Bucket = I;
740 return I->String;
741 }
742
743 // Get a structure
744 unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
745 if (Item == 0)
746 return 0;
747
748 // Fill in the structure
749 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
750 ItemP->NextItem = I - Cache.StringItemP;
751 *Last = Item;
752 ItemP->String = Map.WriteString(S,Size);
753 if (ItemP->String == 0)
754 return 0;
755
756 Bucket = ItemP;
757 return ItemP->String;
758 }
759 /*}}}*/
760 // CheckValidity - Check that a cache is up-to-date /*{{{*/
761 // ---------------------------------------------------------------------
762 /* This just verifies that each file in the list of index files exists,
763 has matching attributes with the cache and the cache does not have
764 any extra files. */
765 static bool CheckValidity(const string &CacheFile, FileIterator Start,
766 FileIterator End,MMap **OutMap = 0)
767 {
768 // No file, certainly invalid
769 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
770 return false;
771
772 // Map it
773 FileFd CacheF(CacheFile,FileFd::ReadOnly);
774 SPtr<MMap> Map = new MMap(CacheF,0);
775 pkgCache Cache(Map);
776 if (_error->PendingError() == true || Map->Size() == 0)
777 {
778 _error->Discard();
779 return false;
780 }
781
782 /* Now we check every index file, see if it is in the cache,
783 verify the IMS data and check that it is on the disk too.. */
784 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
785 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
786 for (; Start != End; Start++)
787 {
788 if ((*Start)->HasPackages() == false)
789 continue;
790
791 if ((*Start)->Exists() == false)
792 {
793 #if 0 // mvo: we no longer give a message here (Default Sources spec)
794 _error->WarningE("stat",_("Couldn't stat source package list %s"),
795 (*Start)->Describe().c_str());
796 #endif
797 continue;
798 }
799
800 // FindInCache is also expected to do an IMS check.
801 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
802 if (File.end() == true)
803 return false;
804
805 Visited[File->ID] = true;
806 }
807
808 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
809 if (Visited[I] == false)
810 return false;
811
812 if (_error->PendingError() == true)
813 {
814 _error->Discard();
815 return false;
816 }
817
818 if (OutMap != 0)
819 *OutMap = Map.UnGuard();
820 return true;
821 }
822 /*}}}*/
823 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
824 // ---------------------------------------------------------------------
825 /* Size is kind of an abstract notion that is only used for the progress
826 meter */
827 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
828 {
829 unsigned long TotalSize = 0;
830 for (; Start != End; Start++)
831 {
832 if ((*Start)->HasPackages() == false)
833 continue;
834 TotalSize += (*Start)->Size();
835 }
836 return TotalSize;
837 }
838 /*}}}*/
839 // BuildCache - Merge the list of index files into the cache /*{{{*/
840 // ---------------------------------------------------------------------
841 /* */
842 static bool BuildCache(pkgCacheGenerator &Gen,
843 OpProgress &Progress,
844 unsigned long &CurrentSize,unsigned long TotalSize,
845 FileIterator Start, FileIterator End)
846 {
847 FileIterator I;
848 for (I = Start; I != End; I++)
849 {
850 if ((*I)->HasPackages() == false)
851 continue;
852
853 if ((*I)->Exists() == false)
854 continue;
855
856 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
857 {
858 _error->Warning("Duplicate sources.list entry %s",
859 (*I)->Describe().c_str());
860 continue;
861 }
862
863 unsigned long Size = (*I)->Size();
864 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
865 CurrentSize += Size;
866
867 if ((*I)->Merge(Gen,Progress) == false)
868 return false;
869 }
870
871 if (Gen.HasFileDeps() == true)
872 {
873 Progress.Done();
874 TotalSize = ComputeSize(Start, End);
875 CurrentSize = 0;
876 for (I = Start; I != End; I++)
877 {
878 unsigned long Size = (*I)->Size();
879 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
880 CurrentSize += Size;
881 if ((*I)->MergeFileProvides(Gen,Progress) == false)
882 return false;
883 }
884 }
885
886 return true;
887 }
888 /*}}}*/
889 // MakeStatusCache - Construct the status cache /*{{{*/
890 // ---------------------------------------------------------------------
891 /* This makes sure that the status cache (the cache that has all
892 index files from the sources list and all local ones) is ready
893 to be mmaped. If OutMap is not zero then a MMap object representing
894 the cache will be stored there. This is pretty much mandetory if you
895 are using AllowMem. AllowMem lets the function be run as non-root
896 where it builds the cache 'fast' into a memory buffer. */
897 bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
898 MMap **OutMap,bool AllowMem)
899 {
900 unsigned long MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
901
902 vector<pkgIndexFile *> Files;
903 for (vector<metaIndex *>::const_iterator i = List.begin();
904 i != List.end();
905 i++)
906 {
907 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
908 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
909 j != Indexes->end();
910 j++)
911 Files.push_back (*j);
912 }
913
914 unsigned long EndOfSource = Files.size();
915 if (_system->AddStatusFiles(Files) == false)
916 return false;
917
918 // Decide if we can write to the files..
919 string CacheFile = _config->FindFile("Dir::Cache::pkgcache");
920 string SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
921
922 // Decide if we can write to the cache
923 bool Writeable = false;
924 if (CacheFile.empty() == false)
925 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
926 else
927 if (SrcCacheFile.empty() == false)
928 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
929
930 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
931 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
932
933 Progress.OverallProgress(0,1,1,_("Reading package lists"));
934
935 // Cache is OK, Fin.
936 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
937 {
938 Progress.OverallProgress(1,1,1,_("Reading package lists"));
939 return true;
940 }
941
942 /* At this point we know we need to reconstruct the package cache,
943 begin. */
944 SPtr<FileFd> CacheF;
945 SPtr<DynamicMMap> Map;
946 if (Writeable == true && CacheFile.empty() == false)
947 {
948 unlink(CacheFile.c_str());
949 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
950 fchmod(CacheF->Fd(),0644);
951 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
952 if (_error->PendingError() == true)
953 return false;
954 }
955 else
956 {
957 // Just build it in memory..
958 Map = new DynamicMMap(0,MapSize);
959 }
960
961 // Lets try the source cache.
962 unsigned long CurrentSize = 0;
963 unsigned long TotalSize = 0;
964 if (CheckValidity(SrcCacheFile,Files.begin(),
965 Files.begin()+EndOfSource) == true)
966 {
967 // Preload the map with the source cache
968 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
969 unsigned long alloc = Map->RawAllocate(SCacheF.Size());
970 if ((alloc == 0 && _error->PendingError())
971 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
972 SCacheF.Size()) == false)
973 return false;
974
975 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
976
977 // Build the status cache
978 pkgCacheGenerator Gen(Map.Get(),&Progress);
979 if (_error->PendingError() == true)
980 return false;
981 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
982 Files.begin()+EndOfSource,Files.end()) == false)
983 return false;
984
985 // FIXME: move me to a better place
986 Gen.FinishCache(Progress);
987 }
988 else
989 {
990 TotalSize = ComputeSize(Files.begin(),Files.end());
991
992 // Build the source cache
993 pkgCacheGenerator Gen(Map.Get(),&Progress);
994 if (_error->PendingError() == true)
995 return false;
996 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
997 Files.begin(),Files.begin()+EndOfSource) == false)
998 return false;
999
1000 // Write it back
1001 if (Writeable == true && SrcCacheFile.empty() == false)
1002 {
1003 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
1004 if (_error->PendingError() == true)
1005 return false;
1006
1007 fchmod(SCacheF.Fd(),0644);
1008
1009 // Write out the main data
1010 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1011 return _error->Error(_("IO Error saving source cache"));
1012 SCacheF.Sync();
1013
1014 // Write out the proper header
1015 Gen.GetCache().HeaderP->Dirty = false;
1016 if (SCacheF.Seek(0) == false ||
1017 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1018 return _error->Error(_("IO Error saving source cache"));
1019 Gen.GetCache().HeaderP->Dirty = true;
1020 SCacheF.Sync();
1021 }
1022
1023 // Build the status cache
1024 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1025 Files.begin()+EndOfSource,Files.end()) == false)
1026 return false;
1027
1028 // FIXME: move me to a better place
1029 Gen.FinishCache(Progress);
1030 }
1031
1032 if (_error->PendingError() == true)
1033 return false;
1034 if (OutMap != 0)
1035 {
1036 if (CacheF != 0)
1037 {
1038 delete Map.UnGuard();
1039 *OutMap = new MMap(*CacheF,0);
1040 }
1041 else
1042 {
1043 *OutMap = Map.UnGuard();
1044 }
1045 }
1046
1047 return true;
1048 }
1049 /*}}}*/
1050 // MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
1051 // ---------------------------------------------------------------------
1052 /* */
1053 bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1054 {
1055 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
1056 vector<pkgIndexFile *> Files;
1057 unsigned long EndOfSource = Files.size();
1058 if (_system->AddStatusFiles(Files) == false)
1059 return false;
1060
1061 SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
1062 unsigned long CurrentSize = 0;
1063 unsigned long TotalSize = 0;
1064
1065 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1066
1067 // Build the status cache
1068 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1069 pkgCacheGenerator Gen(Map.Get(),&Progress);
1070 if (_error->PendingError() == true)
1071 return false;
1072 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1073 Files.begin()+EndOfSource,Files.end()) == false)
1074 return false;
1075
1076 // FIXME: move me to a better place
1077 Gen.FinishCache(Progress);
1078
1079 if (_error->PendingError() == true)
1080 return false;
1081 *OutMap = Map.UnGuard();
1082
1083 return true;
1084 }
1085 /*}}}*/