summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Frysinger <vapier@google.com>2015-05-31 05:15:46 -0400
committerMike Frysinger <vapier@google.com>2015-05-31 05:15:46 -0400
commit031126cef43e4338873d97de2189fa9f2fe06eea (patch)
tree48f628bbcec0b7766c68796ab2d560132b0e8c50
parent06303e6a87d55bdd9c880b2f4df65e0bbc6202ff (diff)
downloadportage-031126cef43e4338873d97de2189fa9f2fe06eea.tar.gz
initial import
This is the portage-prefix-2.2.14 release set up like: ./configure \ --prefix=/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr \ --with-portage-user=vapier \ --with-portage-group=eng \ --with-offset-prefix=/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir It does not work as-is; follow up commits will make it more usable. BUG=b:20895978 Change-Id: I03d3de254bf9f7c3204bd6ac7254c15476b08ba7
l---------usr/bin/ebuild1
l---------usr/bin/egencache1
l---------usr/bin/emerge1
l---------usr/bin/emerge-webrsync1
l---------usr/bin/emirrordist1
l---------usr/bin/portageq1
l---------usr/bin/quickpkg1
l---------usr/bin/repoman1
-rw-r--r--usr/etc/dispatch-conf.conf65
-rw-r--r--usr/etc/etc-update.conf82
l---------usr/etc/make.globals1
-rwxr-xr-xusr/lib/portage/bin/archive-conf89
-rwxr-xr-xusr/lib/portage/bin/bashrc-functions.sh85
-rwxr-xr-xusr/lib/portage/bin/binhost-snapshot141
-rwxr-xr-xusr/lib/portage/bin/check-implicit-pointer-usage.py84
-rwxr-xr-xusr/lib/portage/bin/chpathtool.py224
-rwxr-xr-xusr/lib/portage/bin/clean_locks43
-rwxr-xr-xusr/lib/portage/bin/deprecated-path28
-rwxr-xr-xusr/lib/portage/bin/dispatch-conf499
-rwxr-xr-xusr/lib/portage/bin/dohtml.py235
-rwxr-xr-xusr/lib/portage/bin/eapi.sh145
-rwxr-xr-xusr/lib/portage/bin/ebuild358
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/bsd/sed34
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/die7
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/dobin33
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/doconfd14
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/dodir14
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/dodoc42
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/doenvd14
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/doexe46
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/dohard24
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/doheader19
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/dohtml19
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/doinfo28
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/doinitd14
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/doins168
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/dolib47
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/dolib.a6
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/dolib.so6
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/doman68
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/domo39
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/dosbin33
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/dosed46
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/dosym31
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/ecompress161
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/ecompressdir225
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/eerror7
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/einfo7
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/elog7
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/emake28
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/eqawarn7
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/ewarn7
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/fowners19
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/fperms17
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/keepdir20
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/newbin57
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/newconfd57
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/newdoc57
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/newenvd57
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/newexe57
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/newheader57
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/newinitd57
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/newins57
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/newlib.a57
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/newlib.so57
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/newman57
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/newsbin57
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/portageq26
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/prepall29
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/prepalldocs23
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/prepallinfo13
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/prepallman26
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/prepallstrip11
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/prepinfo38
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/prepman39
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/prepstrip390
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/unprivileged/chgrp41
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/unprivileged/chown41
-rwxr-xr-xusr/lib/portage/bin/ebuild-helpers/xattr/install35
-rwxr-xr-xusr/lib/portage/bin/ebuild-ipc10
-rwxr-xr-xusr/lib/portage/bin/ebuild-ipc.py282
-rwxr-xr-xusr/lib/portage/bin/ebuild.sh727
-rwxr-xr-xusr/lib/portage/bin/egencache1088
-rwxr-xr-xusr/lib/portage/bin/emaint42
-rwxr-xr-xusr/lib/portage/bin/emerge85
-rwxr-xr-xusr/lib/portage/bin/emerge-webrsync532
-rwxr-xr-xusr/lib/portage/bin/emirrordist13
-rwxr-xr-xusr/lib/portage/bin/env-update41
-rwxr-xr-xusr/lib/portage/bin/etc-update733
-rwxr-xr-xusr/lib/portage/bin/filter-bash-environment.py158
-rwxr-xr-xusr/lib/portage/bin/fixpackages52
-rwxr-xr-xusr/lib/portage/bin/glsa-check335
-rwxr-xr-xusr/lib/portage/bin/helper-functions.sh97
-rwxr-xr-xusr/lib/portage/bin/install-qa-check.d/05double-D17
-rwxr-xr-xusr/lib/portage/bin/install-qa-check.d/05prefix118
-rwxr-xr-xusr/lib/portage/bin/install-qa-check.d/10executable-issues140
-rwxr-xr-xusr/lib/portage/bin/install-qa-check.d/10ignored-flags99
-rwxr-xr-xusr/lib/portage/bin/install-qa-check.d/20deprecated-directories18
-rwxr-xr-xusr/lib/portage/bin/install-qa-check.d/20runtime-directories26
-rwxr-xr-xusr/lib/portage/bin/install-qa-check.d/60bash-completion130
-rwxr-xr-xusr/lib/portage/bin/install-qa-check.d/60openrc41
-rwxr-xr-xusr/lib/portage/bin/install-qa-check.d/60pkgconfig15
-rwxr-xr-xusr/lib/portage/bin/install-qa-check.d/60pngfix35
-rwxr-xr-xusr/lib/portage/bin/install-qa-check.d/60systemd25
-rwxr-xr-xusr/lib/portage/bin/install-qa-check.d/60udev21
-rwxr-xr-xusr/lib/portage/bin/install-qa-check.d/80libraries167
-rwxr-xr-xusr/lib/portage/bin/install-qa-check.d/80multilib-strict50
-rwxr-xr-xusr/lib/portage/bin/install-qa-check.d/90gcc-warnings168
-rwxr-xr-xusr/lib/portage/bin/install-qa-check.d/90world-writable27
-rwxr-xr-xusr/lib/portage/bin/install.py253
-rwxr-xr-xusr/lib/portage/bin/isolated-functions.sh491
-rwxr-xr-xusr/lib/portage/bin/lock-helper.py30
-rwxr-xr-xusr/lib/portage/bin/misc-functions.sh1201
-rwxr-xr-xusr/lib/portage/bin/phase-functions.sh1013
-rwxr-xr-xusr/lib/portage/bin/phase-helpers.sh1001
-rwxr-xr-xusr/lib/portage/bin/portageq1442
-rwxr-xr-xusr/lib/portage/bin/quickpkg333
-rwxr-xr-xusr/lib/portage/bin/readpecoff108
-rwxr-xr-xusr/lib/portage/bin/regenworld139
-rwxr-xr-xusr/lib/portage/bin/repoman3132
-rwxr-xr-xusr/lib/portage/bin/save-ebuild-env.sh125
-rwxr-xr-xusr/lib/portage/bin/xattr-helper.py192
-rwxr-xr-xusr/lib/portage/bin/xpak-helper.py69
-rw-r--r--usr/lib/portage/pym/_emerge/AbstractDepPriority.py30
-rw-r--r--usr/lib/portage/pym/_emerge/AbstractEbuildProcess.py344
-rw-r--r--usr/lib/portage/pym/_emerge/AbstractPollTask.py154
-rw-r--r--usr/lib/portage/pym/_emerge/AsynchronousLock.py286
-rw-r--r--usr/lib/portage/pym/_emerge/AsynchronousTask.py176
-rw-r--r--usr/lib/portage/pym/_emerge/AtomArg.py14
-rw-r--r--usr/lib/portage/pym/_emerge/Binpkg.py402
-rw-r--r--usr/lib/portage/pym/_emerge/BinpkgEnvExtractor.py66
-rw-r--r--usr/lib/portage/pym/_emerge/BinpkgExtractorAsync.py39
-rw-r--r--usr/lib/portage/pym/_emerge/BinpkgFetcher.py184
-rw-r--r--usr/lib/portage/pym/_emerge/BinpkgPrefetcher.py43
-rw-r--r--usr/lib/portage/pym/_emerge/BinpkgVerifier.py120
-rw-r--r--usr/lib/portage/pym/_emerge/Blocker.py15
-rw-r--r--usr/lib/portage/pym/_emerge/BlockerCache.py191
-rw-r--r--usr/lib/portage/pym/_emerge/BlockerDB.py125
-rw-r--r--usr/lib/portage/pym/_emerge/BlockerDepPriority.py13
-rw-r--r--usr/lib/portage/pym/_emerge/CompositeTask.py162
-rw-r--r--usr/lib/portage/pym/_emerge/DepPriority.py56
-rw-r--r--usr/lib/portage/pym/_emerge/DepPriorityNormalRange.py47
-rw-r--r--usr/lib/portage/pym/_emerge/DepPrioritySatisfiedRange.py97
-rw-r--r--usr/lib/portage/pym/_emerge/Dependency.py21
-rw-r--r--usr/lib/portage/pym/_emerge/DependencyArg.py46
-rw-r--r--usr/lib/portage/pym/_emerge/EbuildBinpkg.py50
-rw-r--r--usr/lib/portage/pym/_emerge/EbuildBuild.py409
-rw-r--r--usr/lib/portage/pym/_emerge/EbuildBuildDir.py108
-rw-r--r--usr/lib/portage/pym/_emerge/EbuildExecuter.py88
-rw-r--r--usr/lib/portage/pym/_emerge/EbuildFetcher.py286
-rw-r--r--usr/lib/portage/pym/_emerge/EbuildFetchonly.py32
-rw-r--r--usr/lib/portage/pym/_emerge/EbuildIpcDaemon.py133
-rw-r--r--usr/lib/portage/pym/_emerge/EbuildMerge.py58
-rw-r--r--usr/lib/portage/pym/_emerge/EbuildMetadataPhase.py221
-rw-r--r--usr/lib/portage/pym/_emerge/EbuildPhase.py382
-rw-r--r--usr/lib/portage/pym/_emerge/EbuildProcess.py27
-rw-r--r--usr/lib/portage/pym/_emerge/EbuildSpawnProcess.py22
-rw-r--r--usr/lib/portage/pym/_emerge/FakeVartree.py329
-rw-r--r--usr/lib/portage/pym/_emerge/FifoIpcDaemon.py109
-rw-r--r--usr/lib/portage/pym/_emerge/JobStatusDisplay.py303
-rw-r--r--usr/lib/portage/pym/_emerge/MergeListItem.py129
-rw-r--r--usr/lib/portage/pym/_emerge/MetadataRegen.py154
-rw-r--r--usr/lib/portage/pym/_emerge/MiscFunctionsProcess.py48
-rw-r--r--usr/lib/portage/pym/_emerge/Package.py857
-rw-r--r--usr/lib/portage/pym/_emerge/PackageArg.py19
-rw-r--r--usr/lib/portage/pym/_emerge/PackageMerge.py44
-rw-r--r--usr/lib/portage/pym/_emerge/PackageUninstall.py110
-rw-r--r--usr/lib/portage/pym/_emerge/PackageVirtualDbapi.py149
-rw-r--r--usr/lib/portage/pym/_emerge/PipeReader.py127
-rw-r--r--usr/lib/portage/pym/_emerge/PollScheduler.py160
-rw-r--r--usr/lib/portage/pym/_emerge/ProgressHandler.py22
-rw-r--r--usr/lib/portage/pym/_emerge/RootConfig.py41
-rw-r--r--usr/lib/portage/pym/_emerge/Scheduler.py2007
-rw-r--r--usr/lib/portage/pym/_emerge/SequentialTaskQueue.py81
-rw-r--r--usr/lib/portage/pym/_emerge/SetArg.py14
-rw-r--r--usr/lib/portage/pym/_emerge/SpawnProcess.py217
-rw-r--r--usr/lib/portage/pym/_emerge/SubProcess.py156
-rw-r--r--usr/lib/portage/pym/_emerge/Task.py50
-rw-r--r--usr/lib/portage/pym/_emerge/TaskSequence.py61
-rw-r--r--usr/lib/portage/pym/_emerge/UninstallFailure.py15
-rw-r--r--usr/lib/portage/pym/_emerge/UnmergeDepPriority.py46
-rw-r--r--usr/lib/portage/pym/_emerge/UseFlagDisplay.py124
-rw-r--r--usr/lib/portage/pym/_emerge/UserQuery.py71
-rw-r--r--usr/lib/portage/pym/_emerge/__init__.py2
-rw-r--r--usr/lib/portage/pym/_emerge/_find_deep_system_runtime_deps.py38
-rw-r--r--usr/lib/portage/pym/_emerge/_flush_elog_mod_echo.py15
-rw-r--r--usr/lib/portage/pym/_emerge/actions.py4088
-rw-r--r--usr/lib/portage/pym/_emerge/chk_updated_cfg_files.py42
-rw-r--r--usr/lib/portage/pym/_emerge/clear_caches.py17
-rw-r--r--usr/lib/portage/pym/_emerge/countdown.py22
-rw-r--r--usr/lib/portage/pym/_emerge/create_depgraph_params.py112
-rw-r--r--usr/lib/portage/pym/_emerge/create_world_atom.py126
-rw-r--r--usr/lib/portage/pym/_emerge/depgraph.py8943
-rw-r--r--usr/lib/portage/pym/_emerge/emergelog.py57
-rw-r--r--usr/lib/portage/pym/_emerge/getloadavg.py37
-rw-r--r--usr/lib/portage/pym/_emerge/help.py25
-rw-r--r--usr/lib/portage/pym/_emerge/is_valid_package_atom.py22
-rw-r--r--usr/lib/portage/pym/_emerge/main.py1077
-rw-r--r--usr/lib/portage/pym/_emerge/post_emerge.py168
-rw-r--r--usr/lib/portage/pym/_emerge/resolver/__init__.py2
-rw-r--r--usr/lib/portage/pym/_emerge/resolver/backtracking.py264
-rw-r--r--usr/lib/portage/pym/_emerge/resolver/circular_dependency.py272
-rw-r--r--usr/lib/portage/pym/_emerge/resolver/output.py1022
-rw-r--r--usr/lib/portage/pym/_emerge/resolver/output_helpers.py693
-rw-r--r--usr/lib/portage/pym/_emerge/resolver/package_tracker.py301
-rw-r--r--usr/lib/portage/pym/_emerge/resolver/slot_collision.py1122
-rw-r--r--usr/lib/portage/pym/_emerge/search.py394
-rw-r--r--usr/lib/portage/pym/_emerge/show_invalid_depstring_notice.py35
-rw-r--r--usr/lib/portage/pym/_emerge/stdout_spinner.py86
-rw-r--r--usr/lib/portage/pym/_emerge/sync/__init__.py2
-rw-r--r--usr/lib/portage/pym/_emerge/sync/getaddrinfo_validate.py29
-rw-r--r--usr/lib/portage/pym/_emerge/sync/old_tree_timestamp.py100
-rw-r--r--usr/lib/portage/pym/_emerge/unmerge.py594
-rw-r--r--usr/lib/portage/pym/portage/__init__.py690
-rw-r--r--usr/lib/portage/pym/portage/_emirrordist/Config.py132
-rw-r--r--usr/lib/portage/pym/portage/_emirrordist/DeletionIterator.py83
-rw-r--r--usr/lib/portage/pym/portage/_emirrordist/DeletionTask.py129
-rw-r--r--usr/lib/portage/pym/portage/_emirrordist/FetchIterator.py147
-rw-r--r--usr/lib/portage/pym/portage/_emirrordist/FetchTask.py631
-rw-r--r--usr/lib/portage/pym/portage/_emirrordist/MirrorDistTask.py219
-rw-r--r--usr/lib/portage/pym/portage/_emirrordist/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/_emirrordist/main.py463
-rw-r--r--usr/lib/portage/pym/portage/_global_updates.py255
-rw-r--r--usr/lib/portage/pym/portage/_legacy_globals.py77
-rw-r--r--usr/lib/portage/pym/portage/_selinux.py140
-rw-r--r--usr/lib/portage/pym/portage/_sets/__init__.py316
-rw-r--r--usr/lib/portage/pym/portage/_sets/base.py265
-rw-r--r--usr/lib/portage/pym/portage/_sets/dbapi.py537
-rw-r--r--usr/lib/portage/pym/portage/_sets/files.py342
-rw-r--r--usr/lib/portage/pym/portage/_sets/libs.py99
-rw-r--r--usr/lib/portage/pym/portage/_sets/profiles.py54
-rw-r--r--usr/lib/portage/pym/portage/_sets/security.py86
-rw-r--r--usr/lib/portage/pym/portage/_sets/shell.py44
-rw-r--r--usr/lib/portage/pym/portage/cache/__init__.py4
-rw-r--r--usr/lib/portage/pym/portage/cache/anydbm.py113
-rw-r--r--usr/lib/portage/pym/portage/cache/cache_errors.py62
-rw-r--r--usr/lib/portage/pym/portage/cache/ebuild_xattr.py172
-rw-r--r--usr/lib/portage/pym/portage/cache/flat_hash.py162
-rw-r--r--usr/lib/portage/pym/portage/cache/fs_template.py98
-rw-r--r--usr/lib/portage/pym/portage/cache/mappings.py485
-rw-r--r--usr/lib/portage/pym/portage/cache/metadata.py158
-rw-r--r--usr/lib/portage/pym/portage/cache/sql_template.py301
-rw-r--r--usr/lib/portage/pym/portage/cache/sqlite.py280
-rw-r--r--usr/lib/portage/pym/portage/cache/template.py312
-rw-r--r--usr/lib/portage/pym/portage/cache/volatile.py30
-rw-r--r--usr/lib/portage/pym/portage/checksum.py427
-rw-r--r--usr/lib/portage/pym/portage/const.py306
-rw-r--r--usr/lib/portage/pym/portage/const_autotool.py23
-rw-r--r--usr/lib/portage/pym/portage/cvstree.py315
-rw-r--r--usr/lib/portage/pym/portage/data.py281
-rw-r--r--usr/lib/portage/pym/portage/dbapi/_MergeProcess.py279
-rw-r--r--usr/lib/portage/pym/portage/dbapi/_SyncfsProcess.py53
-rw-r--r--usr/lib/portage/pym/portage/dbapi/__init__.py387
-rw-r--r--usr/lib/portage/pym/portage/dbapi/_expand_new_virt.py81
-rw-r--r--usr/lib/portage/pym/portage/dbapi/_similar_name_search.py57
-rw-r--r--usr/lib/portage/pym/portage/dbapi/bintree.py1500
-rw-r--r--usr/lib/portage/pym/portage/dbapi/cpv_expand.py108
-rw-r--r--usr/lib/portage/pym/portage/dbapi/dep_expand.py58
-rw-r--r--usr/lib/portage/pym/portage/dbapi/porttree.py1229
-rw-r--r--usr/lib/portage/pym/portage/dbapi/vartree.py5326
-rw-r--r--usr/lib/portage/pym/portage/dbapi/virtual.py167
-rw-r--r--usr/lib/portage/pym/portage/debug.py120
-rw-r--r--usr/lib/portage/pym/portage/dep/__init__.py2821
-rw-r--r--usr/lib/portage/pym/portage/dep/_slot_operator.py106
-rw-r--r--usr/lib/portage/pym/portage/dep/dep_check.py711
-rw-r--r--usr/lib/portage/pym/portage/dispatch_conf.py212
-rw-r--r--usr/lib/portage/pym/portage/eapi.py144
-rw-r--r--usr/lib/portage/pym/portage/eclass_cache.py187
-rw-r--r--usr/lib/portage/pym/portage/elog/__init__.py191
-rw-r--r--usr/lib/portage/pym/portage/elog/filtering.py15
-rw-r--r--usr/lib/portage/pym/portage/elog/messages.py190
-rw-r--r--usr/lib/portage/pym/portage/elog/mod_custom.py19
-rw-r--r--usr/lib/portage/pym/portage/elog/mod_echo.py60
-rw-r--r--usr/lib/portage/pym/portage/elog/mod_mail.py43
-rw-r--r--usr/lib/portage/pym/portage/elog/mod_mail_summary.py89
-rw-r--r--usr/lib/portage/pym/portage/elog/mod_save.py86
-rw-r--r--usr/lib/portage/pym/portage/elog/mod_save_summary.py92
-rw-r--r--usr/lib/portage/pym/portage/elog/mod_syslog.py37
-rw-r--r--usr/lib/portage/pym/portage/emaint/__init__.py5
-rw-r--r--usr/lib/portage/pym/portage/emaint/defaults.py25
-rw-r--r--usr/lib/portage/pym/portage/emaint/main.py225
-rw-r--r--usr/lib/portage/pym/portage/emaint/module.py194
-rw-r--r--usr/lib/portage/pym/portage/emaint/modules/__init__.py5
-rw-r--r--usr/lib/portage/pym/portage/emaint/modules/binhost/__init__.py20
-rw-r--r--usr/lib/portage/pym/portage/emaint/modules/binhost/binhost.py165
-rw-r--r--usr/lib/portage/pym/portage/emaint/modules/config/__init__.py20
-rw-r--r--usr/lib/portage/pym/portage/emaint/modules/config/config.py79
-rw-r--r--usr/lib/portage/pym/portage/emaint/modules/logs/__init__.py45
-rw-r--r--usr/lib/portage/pym/portage/emaint/modules/logs/logs.py103
-rw-r--r--usr/lib/portage/pym/portage/emaint/modules/merges/__init__.py31
-rw-r--r--usr/lib/portage/pym/portage/emaint/modules/merges/merges.py290
-rw-r--r--usr/lib/portage/pym/portage/emaint/modules/move/__init__.py30
-rw-r--r--usr/lib/portage/pym/portage/emaint/modules/move/move.py181
-rw-r--r--usr/lib/portage/pym/portage/emaint/modules/resume/__init__.py20
-rw-r--r--usr/lib/portage/pym/portage/emaint/modules/resume/resume.py58
-rw-r--r--usr/lib/portage/pym/portage/emaint/modules/world/__init__.py20
-rw-r--r--usr/lib/portage/pym/portage/emaint/modules/world/world.py89
-rw-r--r--usr/lib/portage/pym/portage/emaint/progress.py61
-rw-r--r--usr/lib/portage/pym/portage/env/__init__.py3
-rw-r--r--usr/lib/portage/pym/portage/env/config.py105
-rw-r--r--usr/lib/portage/pym/portage/env/loaders.py327
-rw-r--r--usr/lib/portage/pym/portage/env/validators.py20
-rw-r--r--usr/lib/portage/pym/portage/exception.py204
-rw-r--r--usr/lib/portage/pym/portage/getbinpkg.py935
-rw-r--r--usr/lib/portage/pym/portage/glsa.py726
-rw-r--r--usr/lib/portage/pym/portage/localization.py42
-rw-r--r--usr/lib/portage/pym/portage/locks.py557
-rw-r--r--usr/lib/portage/pym/portage/mail.py177
-rw-r--r--usr/lib/portage/pym/portage/manifest.py650
-rw-r--r--usr/lib/portage/pym/portage/news.py424
-rw-r--r--usr/lib/portage/pym/portage/output.py844
-rw-r--r--usr/lib/portage/pym/portage/package/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_config/KeywordsManager.py335
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_config/LicenseManager.py237
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_config/LocationsManager.py308
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_config/MaskManager.py268
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_config/UseManager.py489
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_config/VirtualsManager.py233
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_config/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_config/env_var_validation.py23
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_config/features_set.py128
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_config/helper.py64
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_config/special_env_vars.py217
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_config/unpack_dependencies.py38
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_ipc/ExitCommand.py27
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_ipc/IpcCommand.py9
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_ipc/QueryCommand.py140
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_ipc/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_metadata_invalid.py41
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py43
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py93
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py186
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_parallel_manifest/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/_spawn_nofetch.py93
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/config.py2739
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/deprecated_profile_check.py83
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/digestcheck.py155
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/digestgen.py205
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/doebuild.py2425
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/fetch.py1171
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/getmaskingreason.py126
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/getmaskingstatus.py192
-rw-r--r--usr/lib/portage/pym/portage/package/ebuild/prepare_build_dirs.py409
-rw-r--r--usr/lib/portage/pym/portage/process.py665
-rw-r--r--usr/lib/portage/pym/portage/proxy/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/proxy/lazyimport.py213
-rw-r--r--usr/lib/portage/pym/portage/proxy/objectproxy.py98
-rw-r--r--usr/lib/portage/pym/portage/repository/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/repository/config.py1080
-rw-r--r--usr/lib/portage/pym/portage/tests/__init__.py355
-rw-r--r--usr/lib/portage/pym/portage/tests/bin/__init__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/bin/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/bin/setup_env.py87
-rw-r--r--usr/lib/portage/pym/portage/tests/bin/test_dobin.py16
-rw-r--r--usr/lib/portage/pym/portage/tests/bin/test_dodir.py18
-rw-r--r--usr/lib/portage/pym/portage/tests/dbapi/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/tests/dbapi/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/dbapi/test_fakedbapi.py63
-rw-r--r--usr/lib/portage/pym/portage/tests/dbapi/test_portdb_cache.py182
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/__init__.py3
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/testAtom.py341
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/testCheckRequiredUse.py233
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/testExtendedAtomDict.py18
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/testExtractAffectingUSE.py75
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/testStandalone.py37
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/test_best_match_to_list.py63
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/test_dep_getcpv.py37
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/test_dep_getrepo.py29
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/test_dep_getslot.py28
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/test_dep_getusedeps.py35
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/test_get_operator.py37
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/test_get_required_use_flags.py44
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/test_isjustname.py24
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/test_isvalidatom.py158
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/test_match_from_list.py137
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/test_paren_reduce.py69
-rw-r--r--usr/lib/portage/pym/portage/tests/dep/test_use_reduce.py626
-rw-r--r--usr/lib/portage/pym/portage/tests/ebuild/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/tests/ebuild/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/ebuild/test_array_fromfile_eof.py47
-rw-r--r--usr/lib/portage/pym/portage/tests/ebuild/test_config.py345
-rw-r--r--usr/lib/portage/pym/portage/tests/ebuild/test_doebuild_fd_pipes.py137
-rw-r--r--usr/lib/portage/pym/portage/tests/ebuild/test_doebuild_spawn.py105
-rw-r--r--usr/lib/portage/pym/portage/tests/ebuild/test_ipc_daemon.py157
-rw-r--r--usr/lib/portage/pym/portage/tests/ebuild/test_spawn.py57
-rw-r--r--usr/lib/portage/pym/portage/tests/emerge/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/tests/emerge/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/emerge/test_emerge_slot_abi.py178
-rw-r--r--usr/lib/portage/pym/portage/tests/emerge/test_global_updates.py41
-rw-r--r--usr/lib/portage/pym/portage/tests/emerge/test_simple.py446
-rw-r--r--usr/lib/portage/pym/portage/tests/env/__init__.py4
-rw-r--r--usr/lib/portage/pym/portage/tests/env/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/env/config/__init__.py4
-rw-r--r--usr/lib/portage/pym/portage/tests/env/config/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/env/config/test_PackageKeywordsFile.py40
-rw-r--r--usr/lib/portage/pym/portage/tests/env/config/test_PackageMaskFile.py29
-rw-r--r--usr/lib/portage/pym/portage/tests/env/config/test_PackageUseFile.py37
-rw-r--r--usr/lib/portage/pym/portage/tests/env/config/test_PortageModulesFile.py38
-rw-r--r--usr/lib/portage/pym/portage/tests/glsa/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/tests/glsa/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/glsa/test_security_set.py145
-rw-r--r--usr/lib/portage/pym/portage/tests/lafilefixer/__init__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/lafilefixer/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/lafilefixer/test_lafilefixer.py145
-rw-r--r--usr/lib/portage/pym/portage/tests/lazyimport/__init__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/lazyimport/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py81
-rw-r--r--usr/lib/portage/pym/portage/tests/lazyimport/test_preload_portage_submodules.py16
-rw-r--r--usr/lib/portage/pym/portage/tests/lint/__init__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/lint/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/lint/test_bash_syntax.py54
-rw-r--r--usr/lib/portage/pym/portage/tests/lint/test_compile_modules.py54
-rw-r--r--usr/lib/portage/pym/portage/tests/lint/test_import_modules.py44
-rw-r--r--usr/lib/portage/pym/portage/tests/locks/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/tests/locks/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/locks/test_asynchronous_lock.py176
-rw-r--r--usr/lib/portage/pym/portage/tests/locks/test_lock_nonblock.py62
-rw-r--r--usr/lib/portage/pym/portage/tests/news/__init__.py3
-rw-r--r--usr/lib/portage/pym/portage/tests/news/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/news/test_NewsItem.py95
-rw-r--r--usr/lib/portage/pym/portage/tests/process/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/tests/process/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/process/test_PopenProcess.py85
-rw-r--r--usr/lib/portage/pym/portage/tests/process/test_PopenProcessBlockingIO.py63
-rw-r--r--usr/lib/portage/pym/portage/tests/process/test_poll.py86
-rw-r--r--usr/lib/portage/pym/portage/tests/repoman/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/tests/repoman/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/repoman/test_echangelog.py106
-rw-r--r--usr/lib/portage/pym/portage/tests/repoman/test_simple.py327
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/ResolverPlayground.py813
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_autounmask.py481
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_autounmask_multilib_use.py85
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_autounmask_use_breakage.py63
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_backtracking.py174
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_blocker.py48
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_circular_choices.py61
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_circular_dependencies.py84
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_complete_graph.py130
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_complete_if_new_subslot_without_revbump.py74
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_depclean.py285
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_depclean_order.py57
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_depclean_slot_unavailable.py78
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_depth.py252
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_eapi.py115
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_features_test_use.py68
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_keywords.py356
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_merge_order.py478
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py31
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_multirepo.py398
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_multislot.py54
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_old_dep_chain_display.py35
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_onlydeps.py34
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_or_choices.py207
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_output.py88
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_package_tracker.py261
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_rebuild.py143
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_regular_slot_change_without_revbump.py59
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_required_use.py114
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_simple.py74
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_slot_abi.py459
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_slot_abi_downgrade.py225
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_slot_change_without_revbump.py69
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_slot_collisions.py259
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_force_rebuild.py84
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_mask_update.py41
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_rebuild.py449
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_unsatisfied_deep_deps.py115
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_update.py98
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_autounmask.py120
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_rebuild.py80
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_required_use.py72
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_unsatisfied.py70
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_unsolved.py88
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_solve_non_slot_operator_slot_conflicts.py75
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_targetroot.py85
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_unpack_dependencies.py65
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_use_aliases.py131
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_use_dep_defaults.py40
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_useflags.py78
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_virtual_slot.py142
-rw-r--r--usr/lib/portage/pym/portage/tests/resolver/test_virtual_transition.py51
-rw-r--r--usr/lib/portage/pym/portage/tests/runTests.py67
-rw-r--r--usr/lib/portage/pym/portage/tests/sets/__init__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/sets/base/__init__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/sets/base/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/sets/base/testInternalPackageSet.py61
-rw-r--r--usr/lib/portage/pym/portage/tests/sets/files/__init__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/sets/files/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/sets/files/testConfigFileSet.py32
-rw-r--r--usr/lib/portage/pym/portage/tests/sets/files/testStaticFileSet.py27
-rw-r--r--usr/lib/portage/pym/portage/tests/sets/shell/__init__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/sets/shell/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/sets/shell/testShell.py28
-rw-r--r--usr/lib/portage/pym/portage/tests/unicode/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/tests/unicode/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/unicode/test_string_format.py108
-rw-r--r--usr/lib/portage/pym/portage/tests/update/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/tests/update/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/update/test_move_ent.py109
-rw-r--r--usr/lib/portage/pym/portage/tests/update/test_move_slot_ent.py154
-rw-r--r--usr/lib/portage/pym/portage/tests/update/test_update_dbentry.py277
-rw-r--r--usr/lib/portage/pym/portage/tests/util/__init__.py4
-rw-r--r--usr/lib/portage/pym/portage/tests/util/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/util/test_digraph.py237
-rw-r--r--usr/lib/portage/pym/portage/tests/util/test_getconfig.py76
-rw-r--r--usr/lib/portage/pym/portage/tests/util/test_grabdict.py11
-rw-r--r--usr/lib/portage/pym/portage/tests/util/test_normalizedPath.py14
-rw-r--r--usr/lib/portage/pym/portage/tests/util/test_stackDictList.py19
-rw-r--r--usr/lib/portage/pym/portage/tests/util/test_stackDicts.py33
-rw-r--r--usr/lib/portage/pym/portage/tests/util/test_stackLists.py21
-rw-r--r--usr/lib/portage/pym/portage/tests/util/test_uniqueArray.py26
-rw-r--r--usr/lib/portage/pym/portage/tests/util/test_varExpand.py92
-rw-r--r--usr/lib/portage/pym/portage/tests/util/test_whirlpool.py16
-rw-r--r--usr/lib/portage/pym/portage/tests/versions/__init__.py3
-rw-r--r--usr/lib/portage/pym/portage/tests/versions/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/versions/test_cpv_sort_key.py17
-rw-r--r--usr/lib/portage/pym/portage/tests/versions/test_vercmp.py84
-rw-r--r--usr/lib/portage/pym/portage/tests/xpak/__init__.py3
-rw-r--r--usr/lib/portage/pym/portage/tests/xpak/__test__.py0
-rw-r--r--usr/lib/portage/pym/portage/tests/xpak/test_decodeint.py16
-rw-r--r--usr/lib/portage/pym/portage/update.py427
-rw-r--r--usr/lib/portage/pym/portage/util/ExtractKernelVersion.py78
-rw-r--r--usr/lib/portage/pym/portage/util/SlotObject.py50
-rw-r--r--usr/lib/portage/pym/portage/util/_ShelveUnicodeWrapper.py45
-rw-r--r--usr/lib/portage/pym/portage/util/__init__.py1800
-rw-r--r--usr/lib/portage/pym/portage/util/_argparse.py42
-rw-r--r--usr/lib/portage/pym/portage/util/_async/AsyncScheduler.py102
-rw-r--r--usr/lib/portage/pym/portage/util/_async/FileCopier.py17
-rw-r--r--usr/lib/portage/pym/portage/util/_async/FileDigester.py73
-rw-r--r--usr/lib/portage/pym/portage/util/_async/ForkProcess.py65
-rw-r--r--usr/lib/portage/pym/portage/util/_async/PipeLogger.py163
-rw-r--r--usr/lib/portage/pym/portage/util/_async/PipeReaderBlockingIO.py91
-rw-r--r--usr/lib/portage/pym/portage/util/_async/PopenProcess.py33
-rw-r--r--usr/lib/portage/pym/portage/util/_async/SchedulerInterface.py79
-rw-r--r--usr/lib/portage/pym/portage/util/_async/TaskScheduler.py20
-rw-r--r--usr/lib/portage/pym/portage/util/_async/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/util/_async/run_main_scheduler.py41
-rw-r--r--usr/lib/portage/pym/portage/util/_ctypes.py47
-rw-r--r--usr/lib/portage/pym/portage/util/_desktop_entry.py104
-rw-r--r--usr/lib/portage/pym/portage/util/_dyn_libs/LinkageMapELF.py820
-rw-r--r--usr/lib/portage/pym/portage/util/_dyn_libs/LinkageMapMachO.py770
-rw-r--r--usr/lib/portage/pym/portage/util/_dyn_libs/LinkageMapPeCoff.py286
-rw-r--r--usr/lib/portage/pym/portage/util/_dyn_libs/LinkageMapXCoff.py312
-rw-r--r--usr/lib/portage/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py254
-rw-r--r--usr/lib/portage/pym/portage/util/_dyn_libs/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/util/_dyn_libs/display_preserved_libs.py98
-rw-r--r--usr/lib/portage/pym/portage/util/_eventloop/EventLoop.py664
-rw-r--r--usr/lib/portage/pym/portage/util/_eventloop/GlibEventLoop.py23
-rw-r--r--usr/lib/portage/pym/portage/util/_eventloop/PollConstants.py18
-rw-r--r--usr/lib/portage/pym/portage/util/_eventloop/PollSelectAdapter.py76
-rw-r--r--usr/lib/portage/pym/portage/util/_eventloop/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/util/_eventloop/global_event_loop.py35
-rw-r--r--usr/lib/portage/pym/portage/util/_get_vm_info.py80
-rw-r--r--usr/lib/portage/pym/portage/util/_info_files.py139
-rw-r--r--usr/lib/portage/pym/portage/util/_path.py27
-rw-r--r--usr/lib/portage/pym/portage/util/_pty.py78
-rw-r--r--usr/lib/portage/pym/portage/util/_urlopen.py92
-rw-r--r--usr/lib/portage/pym/portage/util/digraph.py359
-rw-r--r--usr/lib/portage/pym/portage/util/env_update.py359
-rw-r--r--usr/lib/portage/pym/portage/util/lafilefixer.py185
-rw-r--r--usr/lib/portage/pym/portage/util/listdir.py139
-rw-r--r--usr/lib/portage/pym/portage/util/movefile.py422
-rw-r--r--usr/lib/portage/pym/portage/util/mtimedb.py128
-rw-r--r--usr/lib/portage/pym/portage/util/whirlpool.py796
-rw-r--r--usr/lib/portage/pym/portage/util/writeable_check.py86
-rw-r--r--usr/lib/portage/pym/portage/versions.py588
-rw-r--r--usr/lib/portage/pym/portage/xml/__init__.py2
-rw-r--r--usr/lib/portage/pym/portage/xml/metadata.py423
-rw-r--r--usr/lib/portage/pym/portage/xpak.py499
-rw-r--r--usr/lib/portage/pym/repoman/__init__.py0
-rw-r--r--usr/lib/portage/pym/repoman/checks.py920
-rw-r--r--usr/lib/portage/pym/repoman/errors.py27
-rw-r--r--usr/lib/portage/pym/repoman/herdbase.py115
-rw-r--r--usr/lib/portage/pym/repoman/utilities.py967
l---------usr/sbin/archive-conf1
l---------usr/sbin/dispatch-conf1
l---------usr/sbin/emaint1
l---------usr/sbin/env-update1
l---------usr/sbin/etc-update1
l---------usr/sbin/fixpackages1
l---------usr/sbin/readpecoff1
l---------usr/sbin/regenworld1
-rw-r--r--usr/share/man/man1/dispatch-conf.190
-rw-r--r--usr/share/man/man1/ebuild.1233
-rw-r--r--usr/share/man/man1/egencache.1163
-rw-r--r--usr/share/man/man1/emaint.177
-rw-r--r--usr/share/man/man1/emerge.11262
-rw-r--r--usr/share/man/man1/env-update.128
-rw-r--r--usr/share/man/man1/etc-update.152
-rw-r--r--usr/share/man/man1/quickpkg.173
-rw-r--r--usr/share/man/man1/repoman.1403
-rw-r--r--usr/share/man/man5/color.map.5209
-rw-r--r--usr/share/man/man5/ebuild.51576
-rw-r--r--usr/share/man/man5/make.conf.51120
-rw-r--r--usr/share/man/man5/portage.51410
-rw-r--r--usr/share/portage/config/make.conf.example368
-rw-r--r--usr/share/portage/config/make.globals179
-rw-r--r--usr/share/portage/config/repos.conf7
-rw-r--r--usr/share/portage/config/sets/portage.conf91
603 files changed, 126985 insertions, 0 deletions
diff --git a/usr/bin/ebuild b/usr/bin/ebuild
new file mode 120000
index 0000000..96a259c
--- /dev/null
+++ b/usr/bin/ebuild
@@ -0,0 +1 @@
+../lib/portage/bin/ebuild \ No newline at end of file
diff --git a/usr/bin/egencache b/usr/bin/egencache
new file mode 120000
index 0000000..c8046fb
--- /dev/null
+++ b/usr/bin/egencache
@@ -0,0 +1 @@
+../lib/portage/bin/egencache \ No newline at end of file
diff --git a/usr/bin/emerge b/usr/bin/emerge
new file mode 120000
index 0000000..7f7fd9b
--- /dev/null
+++ b/usr/bin/emerge
@@ -0,0 +1 @@
+../lib/portage/bin/emerge \ No newline at end of file
diff --git a/usr/bin/emerge-webrsync b/usr/bin/emerge-webrsync
new file mode 120000
index 0000000..a9cb41e
--- /dev/null
+++ b/usr/bin/emerge-webrsync
@@ -0,0 +1 @@
+../lib/portage/bin/emerge-webrsync \ No newline at end of file
diff --git a/usr/bin/emirrordist b/usr/bin/emirrordist
new file mode 120000
index 0000000..6ac946d
--- /dev/null
+++ b/usr/bin/emirrordist
@@ -0,0 +1 @@
+../lib/portage/bin/emirrordist \ No newline at end of file
diff --git a/usr/bin/portageq b/usr/bin/portageq
new file mode 120000
index 0000000..b9ffbca
--- /dev/null
+++ b/usr/bin/portageq
@@ -0,0 +1 @@
+../lib/portage/bin/portageq \ No newline at end of file
diff --git a/usr/bin/quickpkg b/usr/bin/quickpkg
new file mode 120000
index 0000000..85c54a7
--- /dev/null
+++ b/usr/bin/quickpkg
@@ -0,0 +1 @@
+../lib/portage/bin/quickpkg \ No newline at end of file
diff --git a/usr/bin/repoman b/usr/bin/repoman
new file mode 120000
index 0000000..c45040b
--- /dev/null
+++ b/usr/bin/repoman
@@ -0,0 +1 @@
+../lib/portage/bin/repoman \ No newline at end of file
diff --git a/usr/etc/dispatch-conf.conf b/usr/etc/dispatch-conf.conf
new file mode 100644
index 0000000..125b7cc
--- /dev/null
+++ b/usr/etc/dispatch-conf.conf
@@ -0,0 +1,65 @@
+#
+# dispatch-conf.conf
+#
+
+# Directory to archive replaced configs
+archive-dir=${EPREFIX}/etc/config-archive
+
+# Use rcs for storing files in the archive directory?
+# NOTE: You should install dev-vcs/rcs before enabling this option.
+# WARNING: When configured to use rcs, read and execute permissions of
+# archived files may be inherited from the first check in of a working
+# file, as documented in the ci(1) man page. This means that even if
+# the permissions of the working file have since changed, the older
+# permissions of the first check in may be inherited. As mentioned in
+# the ci(1) man page, users can control access to RCS files by setting
+# the permissions of the directory containing the files (see
+# archive-dir above).
+# (yes or no)
+use-rcs=no
+
+# Diff for display
+# %s old file
+# %s new file
+# If using colordiff instead of diff, the less -R option may be required
+# for correct display.
+diff="diff -Nu '%s' '%s'"
+
+# Set the pager for use with diff commands (this will
+# cause the PAGER environment variable to be ignored).
+# Setting pager="cat" will disable pager usage.
+pager=""
+
+# Default options used if less is the pager
+less-opts="--no-init --QUIT-AT-EOF"
+
+# Diff for interactive merges.
+# %s output file
+# %s old file
+# %s new file
+merge="sdiff --suppress-common-lines --output='%s' '%s' '%s'"
+
+# Automerge files comprising only CVS interpolations (e.g. Header or Id)
+# (yes or no)
+replace-cvs=yes
+
+# Automerge files comprising only whitespace and/or comments
+# (yes or no)
+replace-wscomments=no
+
+# Automerge files that the user hasn't modified
+# (yes or no)
+replace-unmodified=no
+
+# Ignore a version that is identical to the previously merged version,
+# even though it is different from the current user modified version
+# Note that emerge already has a similar feature enabled by default,
+# which can be disabled by the emerge --noconfmem option.
+# (yes or no)
+ignore-previously-merged=no
+
+# Per-session log file of changes made to configuration files
+#log-file=/var/log/dispatch-conf.log
+
+# List of frozen files for which dispatch-conf will automatically zap updates
+#frozen-files=""
diff --git a/usr/etc/etc-update.conf b/usr/etc/etc-update.conf
new file mode 100644
index 0000000..9709862
--- /dev/null
+++ b/usr/etc/etc-update.conf
@@ -0,0 +1,82 @@
+# /etc/etc-update.conf: config file for `etc-update` utility
+# edit the lines below to your liking
+
+# mode - 0 for text, 1 for menu (support incomplete)
+# note that you need dev-util/dialog installed
+mode="0"
+
+# Whether to clear the term prior to each display
+#clear_term="yes"
+clear_term="no"
+
+# Whether trivial/comment changes should be automerged
+eu_automerge="yes"
+
+# arguments used whenever rm is called
+rm_opts="-i"
+
+# arguments used whenever mv is called
+mv_opts="-i"
+
+# arguments used whenever cp is called
+cp_opts="-i"
+
+# set the pager for use with diff commands (this will
+# cause the PAGER environment variable to be ignored)
+#pager="less"
+
+# For emacs-users (see NOTE_2)
+# diff_command="eval emacs -nw --eval=\'\(ediff\ \"%file1\"\ \"%file2\"\)\'"
+#using_editor=1
+
+# vim-users: you CAN use vimdiff for diff_command. (see NOTE_1 and NOTE_2)
+#diff_command="vim -d %file1 %file2"
+#using_editor=1
+
+# If using colordiff instead of diff, the less -R option may be required
+# for correct display (see 'pager' setting above).
+diff_command="diff -uN %file1 %file2"
+using_editor=0
+
+
+# vim-users: don't use vimdiff for merging (see NOTE_1)
+merge_command="sdiff -s -o %merged %orig %new"
+
+# EXPLANATION
+#
+# pager:
+#
+# Examples of pager usage:
+# pager="cat" # don't use a pager
+# pager="less -E" # less
+# pager="more" # more
+#
+#
+# diff_command:
+#
+# Arguments:
+# %file1 [REQUIRED]
+# %file2 [REQUIRED]
+#
+# Examples of diff_command:
+# diff_command="diff -uN %file1 %file2" # diff
+# diff_command="vim -d %file1 %file2" # vimdiff
+#
+#
+# merge_command:
+#
+# Arguments:
+# %orig [REQUIRED]
+# %new [REQUIRED]
+# %merged [REQUIRED]
+#
+# Examples of merge_command:
+# merge_command="sdiff -s -o %merged %old %new" # sdiff
+#
+
+# NOTE_1: Editors such as vim/vimdiff are not usable for the merge_command
+# because it is not known what filenames the produced files have (the user can
+# choose while using those programs)
+
+# NOTE_2: Make sure using_editor is set to "1" when using an editor as
+# diff_command!
diff --git a/usr/etc/make.globals b/usr/etc/make.globals
new file mode 120000
index 0000000..4caa6cd
--- /dev/null
+++ b/usr/etc/make.globals
@@ -0,0 +1 @@
+/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp.x/prefix-portage-2.2.14/../../../../src/third_party/portage-prefix//usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/share/portage/config/make.globals \ No newline at end of file
diff --git a/usr/lib/portage/bin/archive-conf b/usr/lib/portage/bin/archive-conf
new file mode 100755
index 0000000..59350a0
--- /dev/null
+++ b/usr/lib/portage/bin/archive-conf
@@ -0,0 +1,89 @@
+#!/usr/bin/python -b
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+#
+# archive-conf -- save off a config file in the dispatch-conf archive dir
+#
+# Written by Wayne Davison <gentoo@blorf.net> with code snagged from
+# Jeremy Wohl's dispatch-conf script and the portage chkcontents script.
+#
+
+from __future__ import print_function
+
+import sys
+
+from os import path as osp
+if osp.isfile(osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")):
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+import portage
+portage._internal_caller = True
+
+import portage.dispatch_conf
+from portage import os
+from portage.checksum import perform_md5
+
+FIND_EXTANT_CONTENTS = "find %s -name CONTENTS"
+
+MANDATORY_OPTS = [ 'archive-dir' ]
+
+def archive_conf():
+ args = []
+ content_files = []
+ md5_match_hash = {}
+
+ options = portage.dispatch_conf.read_config(MANDATORY_OPTS)
+
+ for conf in sys.argv[1:]:
+ if not os.path.isabs(conf):
+ conf = os.path.abspath(conf)
+ args += [ conf ]
+ md5_match_hash[conf] = ''
+
+ # Find all the CONTENT files in VDB_PATH.
+ with os.popen(FIND_EXTANT_CONTENTS % (os.path.join(portage.settings['EROOT'], portage.VDB_PATH))) as f:
+ content_files += f.readlines()
+
+ # Search for the saved md5 checksum of all the specified config files
+ # and see if the current file is unmodified or not.
+ try:
+ todo_cnt = len(args)
+ for filename in content_files:
+ filename = filename.rstrip()
+ try:
+ contents = open(filename, "r")
+ except IOError as e:
+ print('archive-conf: Unable to open %s: %s' % (filename, e), file=sys.stderr)
+ sys.exit(1)
+ lines = contents.readlines()
+ for line in lines:
+ items = line.split()
+ if items[0] == 'obj':
+ for conf in args:
+ if items[1] == conf:
+ stored = items[2].lower()
+ real = perform_md5(conf).lower()
+ if stored == real:
+ md5_match_hash[conf] = conf
+ todo_cnt -= 1
+ if todo_cnt == 0:
+ raise StopIteration()
+ except StopIteration:
+ pass
+
+ for conf in args:
+ archive = os.path.join(options['archive-dir'], conf.lstrip('/'))
+ if options['use-rcs'] == 'yes':
+ portage.dispatch_conf.rcs_archive(archive, conf, md5_match_hash[conf], '')
+ if md5_match_hash[conf]:
+ portage.dispatch_conf.rcs_archive_post_process(archive)
+ else:
+ portage.dispatch_conf.file_archive(archive, conf, md5_match_hash[conf], '')
+ if md5_match_hash[conf]:
+ portage.dispatch_conf.file_archive_post_process(archive)
+
+# run
+if len(sys.argv) > 1:
+ archive_conf()
+else:
+ print('Usage: archive-conf /CONFIG/FILE [/CONFIG/FILE...]', file=sys.stderr)
diff --git a/usr/lib/portage/bin/bashrc-functions.sh b/usr/lib/portage/bin/bashrc-functions.sh
new file mode 100755
index 0000000..503b172
--- /dev/null
+++ b/usr/lib/portage/bin/bashrc-functions.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+portageq() {
+ PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}}\
+ "${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/portageq" "$@"
+}
+
+register_die_hook() {
+ local x
+ for x in $* ; do
+ has $x $EBUILD_DEATH_HOOKS || \
+ export EBUILD_DEATH_HOOKS="$EBUILD_DEATH_HOOKS $x"
+ done
+}
+
+register_success_hook() {
+ local x
+ for x in $* ; do
+ has $x $EBUILD_SUCCESS_HOOKS || \
+ export EBUILD_SUCCESS_HOOKS="$EBUILD_SUCCESS_HOOKS $x"
+ done
+}
+
+__strip_duplicate_slashes() {
+ if [[ -n $1 ]] ; then
+ local removed=$1
+ while [[ ${removed} == *//* ]] ; do
+ removed=${removed//\/\///}
+ done
+ echo "${removed}"
+ fi
+}
+
+KV_major() {
+ [[ -z $1 ]] && return 1
+
+ local KV=$@
+ echo "${KV%%.*}"
+}
+
+KV_minor() {
+ [[ -z $1 ]] && return 1
+
+ local KV=$@
+ KV=${KV#*.}
+ echo "${KV%%.*}"
+}
+
+KV_micro() {
+ [[ -z $1 ]] && return 1
+
+ local KV=$@
+ KV=${KV#*.*.}
+ echo "${KV%%[^[:digit:]]*}"
+}
+
+KV_to_int() {
+ [[ -z $1 ]] && return 1
+
+ local KV_MAJOR=$(KV_major "$1")
+ local KV_MINOR=$(KV_minor "$1")
+ local KV_MICRO=$(KV_micro "$1")
+ local KV_int=$(( KV_MAJOR * 65536 + KV_MINOR * 256 + KV_MICRO ))
+
+ # We make version 2.2.0 the minimum version we will handle as
+ # a sanity check ... if its less, we fail ...
+ if [[ ${KV_int} -ge 131584 ]] ; then
+ echo "${KV_int}"
+ return 0
+ fi
+
+ return 1
+}
+
+_RC_GET_KV_CACHE=""
+get_KV() {
+ [[ -z ${_RC_GET_KV_CACHE} ]] \
+ && _RC_GET_KV_CACHE=$(uname -r)
+
+ echo $(KV_to_int "${_RC_GET_KV_CACHE}")
+
+ return $?
+}
diff --git a/usr/lib/portage/bin/binhost-snapshot b/usr/lib/portage/bin/binhost-snapshot
new file mode 100755
index 0000000..3a34643
--- /dev/null
+++ b/usr/lib/portage/bin/binhost-snapshot
@@ -0,0 +1,141 @@
+#!/usr/bin/python -b
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import os
+import sys
+import textwrap
+
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
+from os import path as osp
+if osp.isfile(osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")):
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+import portage
+portage._internal_caller = True
+from portage.util._argparse import ArgumentParser
+
+def parse_args(argv):
+ prog_name = os.path.basename(argv[0])
+ usage = prog_name + ' [options] ' + \
+ '<src_pkg_dir> <snapshot_dir> <snapshot_uri> <binhost_dir>'
+
+ prog_desc = "This program will copy src_pkg_dir to snapshot_dir " + \
+ "and inside binhost_dir it will create a Packages index file " + \
+ "which refers to snapshot_uri. This is intended to solve race " + \
+ "conditions on binhosts as described at http://crosbug.com/3225."
+
+ usage += "\n\n"
+ for line in textwrap.wrap(prog_desc, 70):
+ usage += line + "\n"
+
+ usage += "\n"
+ usage += "Required Arguments:\n\n"
+ usage += " src_pkg_dir - the source $PKGDIR\n"
+ usage += " snapshot_dir - destination snapshot " + \
+ "directory (must not exist)\n"
+ usage += " snapshot_uri - URI which refers to " + \
+ "snapshot_dir from the\n" + \
+ " client side\n"
+ usage += " binhost_dir - directory in which to " + \
+ "write Packages index with\n" + \
+ " snapshot_uri"
+
+ parser = ArgumentParser(usage=usage)
+ parser.add_argument('--hardlinks',
+ help='create hardlinks (y or n, default is y)',
+ choices=('y', 'n'),
+ default='y')
+ options, args = parser.parse_known_args(argv[1:])
+
+ if len(args) != 4:
+ parser.error("Required 4 arguments, got %d" % (len(args),))
+
+ return parser, options, args
+
+def main(argv):
+ parser, options, args = parse_args(argv)
+
+ src_pkg_dir, snapshot_dir, snapshot_uri, binhost_dir = args
+ src_pkgs_index = os.path.join(src_pkg_dir, 'Packages')
+
+ if not os.path.isdir(src_pkg_dir):
+ parser.error("src_pkg_dir is not a directory: '%s'" % (src_pkg_dir,))
+
+ if not os.path.isfile(src_pkgs_index):
+ parser.error("src_pkg_dir does not contain a " + \
+ "'Packages' index: '%s'" % (src_pkg_dir,))
+
+ parse_result = urlparse(snapshot_uri)
+ if not (parse_result.scheme and parse_result.netloc and parse_result.path):
+ parser.error("snapshot_uri is not a valid URI: '%s'" % (snapshot_uri,))
+
+ if os.path.isdir(snapshot_dir):
+ parser.error("snapshot_dir already exists: '%s'" % snapshot_dir)
+
+ try:
+ os.makedirs(os.path.dirname(snapshot_dir))
+ except OSError:
+ pass
+ if not os.path.isdir(os.path.dirname(snapshot_dir)):
+ parser.error("snapshot_dir parent could not be created: '%s'" % \
+ os.path.dirname(snapshot_dir))
+
+ try:
+ os.makedirs(binhost_dir)
+ except OSError:
+ pass
+ if not os.path.isdir(binhost_dir):
+ parser.error("binhost_dir could not be created: '%s'" % binhost_dir)
+
+ cp_opts = 'RP'
+ if options.hardlinks == 'n':
+ cp_opts += 'p'
+ else:
+ cp_opts += 'l'
+
+ cp_cmd = 'cp -%s %s %s' % (
+ cp_opts,
+ portage._shell_quote(src_pkg_dir),
+ portage._shell_quote(snapshot_dir)
+ )
+
+ ret = os.system(cp_cmd)
+ if not (os.WIFEXITED(ret) and os.WEXITSTATUS(ret) == os.EX_OK):
+ return 1
+
+ infile = io.open(portage._unicode_encode(src_pkgs_index,
+ encoding=portage._encodings['fs'], errors='strict'),
+ mode='r', encoding=portage._encodings['repo.content'],
+ errors='strict')
+
+ outfile = portage.util.atomic_ofstream(
+ os.path.join(binhost_dir, "Packages"),
+ encoding=portage._encodings['repo.content'],
+ errors='strict')
+
+ for line in infile:
+ if line[:4] == 'URI:':
+ # skip existing URI line
+ pass
+ else:
+ if not line.strip():
+ # end of header
+ outfile.write("URI: %s\n\n" % snapshot_uri)
+ break
+ outfile.write(line)
+
+ for line in infile:
+ outfile.write(line)
+
+ infile.close()
+ outfile.close()
+
+ return os.EX_OK
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv))
diff --git a/usr/lib/portage/bin/check-implicit-pointer-usage.py b/usr/lib/portage/bin/check-implicit-pointer-usage.py
new file mode 100755
index 0000000..242436c
--- /dev/null
+++ b/usr/lib/portage/bin/check-implicit-pointer-usage.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python -b
+
+# Ripped from HP and updated from Debian
+# Update by Gentoo to support unicode output
+
+#
+# Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+# David Mosberger <davidm@hpl.hp.com>
+#
+# Scan standard input for GCC warning messages that are likely to
+# source of real 64-bit problems. In particular, see whether there
+# are any implicitly declared functions whose return values are later
+# interpreted as pointers. Those are almost guaranteed to cause
+# crashes.
+#
+
+from __future__ import print_function
+
+import re
+import sys
+
+implicit_pattern = re.compile("([^:]*):(\d+): warning: implicit declaration "
+ + "of function [`']([^']*)'")
+pointer_pattern = (
+ "([^:]*):(\d+): warning: "
+ + "("
+ + "(assignment"
+ + "|initialization"
+ + "|return"
+ + "|passing arg \d+ of `[^']*'"
+ + "|passing arg \d+ of pointer to function"
+ + ") makes pointer from integer without a cast"
+ + "|"
+ + "cast to pointer from integer of different size)")
+
+if sys.hexversion < 0x3000000:
+ # Use encoded byte strings in python-2.x, since the python ebuilds are
+ # known to remove the encodings module when USE=build is enabled (thus
+ # disabling unicode decoding/encoding). The portage module has a
+ # workaround for this, but currently we don't import that here since we
+ # don't want to trigger potential sandbox violations due to stale pyc
+ # files for the portage module.
+ unicode_quote_open = '\xE2\x80\x98'
+ unicode_quote_close = '\xE2\x80\x99'
+ def write(msg):
+ sys.stdout.write(msg)
+else:
+ unicode_quote_open = '\u2018'
+ unicode_quote_close = '\u2019'
+ def write(msg):
+ sys.stdout.buffer.write(msg.encode('utf_8', 'backslashreplace'))
+
+pointer_pattern = re.compile(pointer_pattern)
+
+last_implicit_filename = ""
+last_implicit_linenum = -1
+last_implicit_func = ""
+
+while True:
+ if sys.hexversion >= 0x3000000:
+ line = sys.stdin.buffer.readline().decode('utf_8', 'replace')
+ else:
+ line = sys.stdin.readline()
+ if not line:
+ break
+ # translate unicode open/close quotes to ascii ones
+ line = line.replace(unicode_quote_open, "`")
+ line = line.replace(unicode_quote_close, "'")
+ m = implicit_pattern.match(line)
+ if m:
+ last_implicit_filename = m.group(1)
+ last_implicit_linenum = int(m.group(2))
+ last_implicit_func = m.group(3)
+ else:
+ m = pointer_pattern.match(line)
+ if m:
+ pointer_filename = m.group(1)
+ pointer_linenum = int(m.group(2))
+ if (last_implicit_filename == pointer_filename
+ and last_implicit_linenum == pointer_linenum):
+ write("Function `%s' implicitly converted to pointer at " \
+ "%s:%d\n" % (last_implicit_func,
+ last_implicit_filename,
+ last_implicit_linenum))
diff --git a/usr/lib/portage/bin/chpathtool.py b/usr/lib/portage/bin/chpathtool.py
new file mode 100755
index 0000000..9b26086
--- /dev/null
+++ b/usr/lib/portage/bin/chpathtool.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python -b
+# Copyright 2011-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Helper tool for converting installed files to custom prefixes.
+
+In other words, eprefixy $D for Gentoo/Prefix."""
+__doc__ = doc
+
+
+import io
+import os
+import stat
+import sys
+
+from portage.util._argparse import ArgumentParser
+
+# Argument parsing compatibility for Python 2.6 using optparse.
+if sys.hexversion < 0x2070000:
+ from optparse import OptionParser
+
+from optparse import OptionError
+
+CONTENT_ENCODING = 'utf_8'
+FS_ENCODING = 'utf_8'
+
+try:
+ import magic
+except ImportError:
+ magic = None
+else:
+ try:
+ magic.MIME_TYPE
+ except AttributeError:
+ # magic module seems to be broken
+ magic = None
+
+class IsTextFile(object):
+
+ def __init__(self):
+ if magic is not None:
+ self._call = self._is_text_magic
+ self._m = magic.open(magic.MIME_TYPE)
+ self._m.load()
+ else:
+ self._call = self._is_text_encoding
+ self._encoding = CONTENT_ENCODING
+
+ def __call__(self, filename):
+ """
+ Returns True if the given file is a text file, and False otherwise.
+ """
+ return self._call(filename)
+
+ def _is_text_magic(self, filename):
+ mime_type = self._m.file(filename)
+ if isinstance(mime_type, bytes):
+ mime_type = mime_type.decode('ascii', 'replace')
+ return mime_type.startswith('text/')
+
+ def _is_text_encoding(self, filename):
+ try:
+ for line in io.open(filename, mode='r', encoding=self._encoding):
+ pass
+ except UnicodeDecodeError:
+ return False
+ return True
+
+def chpath_inplace(filename, is_text_file, old, new):
+ """
+ Returns True if any modifications were made, and False otherwise.
+ """
+
+ modified = False
+ orig_stat = os.lstat(filename)
+ try:
+ f = io.open(filename, buffering=0, mode='r+b')
+ except IOError:
+ try:
+ orig_mode = stat.S_IMODE(os.lstat(filename).st_mode)
+ except OSError as e:
+ sys.stderr.write('%s: %s\n' % (e, filename))
+ return
+ temp_mode = 0o200 | orig_mode
+ os.chmod(filename, temp_mode)
+ try:
+ f = io.open(filename, buffering=0, mode='r+b')
+ finally:
+ os.chmod(filename, orig_mode)
+
+ len_old = len(old)
+ len_new = len(new)
+ matched_byte_count = 0
+ while True:
+ in_byte = f.read(1)
+
+ if not in_byte:
+ break
+
+ if in_byte == old[matched_byte_count]:
+ matched_byte_count += 1
+ if matched_byte_count == len_old:
+ modified = True
+ matched_byte_count = 0
+ end_position = f.tell()
+ start_position = end_position - len_old
+ if not is_text_file:
+ # search backwards for leading slashes written by
+ # a previous invocation of this tool
+ num_to_write = len_old
+ f.seek(start_position - 1)
+ while True:
+ if f.read(1) != b'/':
+ break
+ num_to_write += 1
+ f.seek(f.tell() - 2)
+
+ # pad with as many leading slashes as necessary
+ while num_to_write > len_new:
+ f.write(b'/')
+ num_to_write -= 1
+ f.write(new)
+ else:
+ remainder = f.read()
+ f.seek(start_position)
+ f.write(new)
+ if remainder:
+ f.write(remainder)
+ f.truncate()
+ f.seek(start_position + len_new)
+ elif matched_byte_count > 0:
+ # back up an try to start a new match after
+ # the first byte of the previous partial match
+ f.seek(f.tell() - matched_byte_count)
+ matched_byte_count = 0
+
+ f.close()
+ if modified:
+ if sys.hexversion >= 0x3030000:
+ orig_mtime = orig_stat.st_mtime_ns
+ os.utime(filename, ns=(orig_mtime, orig_mtime))
+ else:
+ orig_mtime = orig_stat[stat.ST_MTIME]
+ os.utime(filename, (orig_mtime, orig_mtime))
+ return modified
+
+def chpath_inplace_symlink(filename, st, old, new):
+ target = os.readlink(filename)
+ if target.startswith(old):
+ new_target = new + target[len(old):]
+ os.unlink(filename)
+ os.symlink(new_target, filename)
+ os.lchown(filename, st.st_uid, st.st_gid)
+
+def main(argv):
+
+ parser = ArgumentParser(description=doc)
+ try:
+ parser.add_argument('location', default=None,
+ help='root directory (e.g. $D)')
+ parser.add_argument('old', default=None,
+ help='original build prefix (e.g. /)')
+ parser.add_argument('new', default=None,
+ help='new install prefix (e.g. $EPREFIX)')
+ opts = parser.parse_args(argv)
+
+ location, old, new = opts.location, opts.old, opts.new
+ except OptionError:
+ # Argument parsing compatibility for Python 2.6 using optparse.
+ if sys.hexversion < 0x2070000:
+ parser = OptionParser(description=doc,
+ usage="usage: %prog [-h] location old new\n\n" + \
+ " location: root directory (e.g. $D)\n" + \
+ " old: original build prefix (e.g. /)\n" + \
+ " new: new install prefix (e.g. $EPREFIX)")
+
+ (opts, args) = parser.parse_args()
+
+ if len(args) != 3:
+ parser.print_usage()
+ print("%s: error: expected 3 arguments, got %i"
+ % (__file__, len(args)))
+ return
+
+ location, old, new = args[0:3]
+ else:
+ raise
+
+ is_text_file = IsTextFile()
+
+ if not isinstance(location, bytes):
+ location = location.encode(FS_ENCODING)
+ if not isinstance(old, bytes):
+ old = old.encode(FS_ENCODING)
+ if not isinstance(new, bytes):
+ new = new.encode(FS_ENCODING)
+
+ st = os.lstat(location)
+
+ if stat.S_ISDIR(st.st_mode):
+ for parent, dirs, files in os.walk(location):
+ for filename in files:
+ filename = os.path.join(parent, filename)
+ try:
+ st = os.lstat(filename)
+ except OSError:
+ pass
+ else:
+ if stat.S_ISREG(st.st_mode):
+ chpath_inplace(filename,
+ is_text_file(filename), old, new)
+ elif stat.S_ISLNK(st.st_mode):
+ chpath_inplace_symlink(filename, st, old, new)
+
+ elif stat.S_ISREG(st.st_mode):
+ chpath_inplace(location,
+ is_text_file(location), old, new)
+ elif stat.S_ISLNK(st.st_mode):
+ chpath_inplace_symlink(location, st, old, new)
+
+ return os.EX_OK
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/usr/lib/portage/bin/clean_locks b/usr/lib/portage/bin/clean_locks
new file mode 100755
index 0000000..13af061
--- /dev/null
+++ b/usr/lib/portage/bin/clean_locks
@@ -0,0 +1,43 @@
+#!/usr/bin/python -bO
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys, errno
+from os import path as osp
+if osp.isfile(osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")):
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+import portage
+portage._internal_caller = True
+
+if not sys.argv[1:] or "--help" in sys.argv or "-h" in sys.argv:
+ print()
+ print("You must specify directories with hardlink-locks to clean.")
+ print("You may optionally specify --force, which will remove all")
+ print("of the locks, even if we can't establish if they are in use.")
+ print("Please attempt cleaning without force first.")
+ print()
+ print("%s %s/.locks" % (sys.argv[0], portage.settings["DISTDIR"]))
+ print("%s --force %s/.locks" % (sys.argv[0], portage.settings["DISTDIR"]))
+ print()
+ sys.exit(1)
+
+force = False
+if "--force" in sys.argv[1:]:
+ force=True
+
+for x in sys.argv[1:]:
+ if x == "--force":
+ continue
+ try:
+ for y in portage.locks.hardlock_cleanup(x, remove_all_locks=force):
+ print(y)
+ print()
+
+ except OSError as e:
+ if e.errno in (errno.ENOENT, errno.ENOTDIR):
+ print("!!! %s is not a directory or does not exist" % x)
+ else:
+ raise
+ sys.exit(e.errno)
diff --git a/usr/lib/portage/bin/deprecated-path b/usr/lib/portage/bin/deprecated-path
new file mode 100755
index 0000000..b8aaadb
--- /dev/null
+++ b/usr/lib/portage/bin/deprecated-path
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author:
+#
+
+source /lib/gentoo/functions.sh
+
+scriptpath=${BASH_SOURCE[0]}
+scriptname=${scriptpath##*/}
+
+IFS=':'
+
+for path in ${PATH}; do
+ [[ -x ${path}/${scriptname} ]] || continue
+ [[ ${path}/${scriptname} -ef ${scriptpath} ]] && continue
+
+ unset IFS
+ eerror "Deprecation warning: Calling ${scriptname} from wrong path: '${scriptpath}'"
+ eerror "Correct path should be '${path}/${scriptname}', Please correct your scripts or file a bug with the maintainer..."
+ exec "${path}/${scriptname}" "$@"
+done
+
+unset IFS
+
+eerror "ERROR: portage file: deprecated-path: Failed to locate ${scriptname} in PATH"
+eerror "PATH: ${PATH}"
+exit 1
diff --git a/usr/lib/portage/bin/dispatch-conf b/usr/lib/portage/bin/dispatch-conf
new file mode 100755
index 0000000..a7c8ed1
--- /dev/null
+++ b/usr/lib/portage/bin/dispatch-conf
@@ -0,0 +1,499 @@
+#!/usr/bin/python -bO
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+#
+# dispatch-conf -- Integrate modified configs, post-emerge
+#
+# Jeremy Wohl (http://igmus.org)
+#
+# TODO
+# dialog menus
+#
+
+from __future__ import print_function
+
+from stat import ST_GID, ST_MODE, ST_UID
+from random import random
+import atexit, re, shutil, stat, sys
+from os import path as osp
+if osp.isfile(osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")):
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+import portage
+portage._internal_caller = True
+from portage import os
+from portage import _unicode_decode
+from portage.dispatch_conf import diffstatusoutput
+from portage.process import find_binary, spawn
+from portage.const import EPREFIX
+
+FIND_EXTANT_CONFIGS = "find '%s' %s -name '._cfg????_%s' ! -name '.*~' ! -iname '.*.bak' -print"
+DIFF_CONTENTS = "diff -Nu '%s' '%s'"
+
+if "case-insensitive-fs" in portage.settings.features:
+ FIND_EXTANT_CONFIGS = \
+ FIND_EXTANT_CONFIGS.replace("-name '._cfg", "-iname '._cfg")
+
+# We need a secure scratch dir and python does silly verbose errors on the use of tempnam
+oldmask = os.umask(0o077)
+SCRATCH_DIR = None
+while SCRATCH_DIR is None:
+ try:
+ mydir = EPREFIX+"/tmp/dispatch-conf."
+ for x in range(0,8):
+ if int(random() * 3) == 0:
+ mydir += chr(int(65+random()*26.0))
+ elif int(random() * 2) == 0:
+ mydir += chr(int(97+random()*26.0))
+ else:
+ mydir += chr(int(48+random()*10.0))
+ if os.path.exists(mydir):
+ continue
+ os.mkdir(mydir)
+ SCRATCH_DIR = mydir
+ except OSError as e:
+ if e.errno != 17:
+ raise
+os.umask(oldmask)
+
+# Ensure the scratch dir is deleted
+def cleanup(mydir=SCRATCH_DIR):
+ shutil.rmtree(mydir)
+atexit.register(cleanup)
+
+MANDATORY_OPTS = [ 'archive-dir', 'diff', 'replace-cvs', 'replace-wscomments', 'merge' ]
+
+def cmd_var_is_valid(cmd):
+ """
+ Return true if the first whitespace-separated token contained
+ in cmd is an executable file, false otherwise.
+ """
+ cmd = portage.util.shlex_split(cmd)
+ if not cmd:
+ return False
+
+ if os.path.isabs(cmd[0]):
+ return os.access(cmd[0], os.EX_OK)
+
+ return find_binary(cmd[0]) is not None
+
+class dispatch:
+ options = {}
+
+ def grind (self, config_paths):
+ confs = []
+ count = 0
+
+ config_root = portage.settings["EPREFIX"] or os.sep
+ self.options = portage.dispatch_conf.read_config(MANDATORY_OPTS)
+
+ if "log-file" in self.options:
+ if os.path.isfile(self.options["log-file"]):
+ shutil.copy(self.options["log-file"], self.options["log-file"] + '.old')
+ if os.path.isfile(self.options["log-file"]) \
+ or not os.path.exists(self.options["log-file"]):
+ open(self.options["log-file"], 'w').close() # Truncate it
+ os.chmod(self.options["log-file"], 0o600)
+ else:
+ self.options["log-file"] = "/dev/null"
+
+ pager = self.options.get("pager")
+ if pager is None or not cmd_var_is_valid(pager):
+ pager = os.environ.get("PAGER")
+ if pager is None or not cmd_var_is_valid(pager):
+ pager = "cat"
+
+ pager_basename = os.path.basename(portage.util.shlex_split(pager)[0])
+ if pager_basename == "less":
+ less_opts = self.options.get("less-opts")
+ if less_opts is not None and less_opts.strip():
+ pager += " " + less_opts
+
+ if pager_basename == "cat":
+ pager = ""
+ else:
+ pager = " | " + pager
+
+ #
+ # Build list of extant configs
+ #
+
+ for path in config_paths:
+ path = portage.normalize_path(
+ os.path.join(config_root, path.lstrip(os.sep)))
+ try:
+ mymode = os.stat(path).st_mode
+ except OSError:
+ continue
+ basename = "*"
+ find_opts = "-name '.*' -type d -prune -o"
+ if not stat.S_ISDIR(mymode):
+ path, basename = os.path.split(path)
+ find_opts = "-maxdepth 1"
+
+ with os.popen(FIND_EXTANT_CONFIGS %
+ (path, find_opts, basename)) as proc:
+ confs += self.massage(proc.readlines())
+
+ if self.options['use-rcs'] == 'yes':
+ for rcs_util in ("rcs", "ci", "co", "rcsmerge"):
+ if not find_binary(rcs_util):
+ print('dispatch-conf: Error finding all RCS utils and " + \
+ "use-rcs=yes in config; fatal', file=sys.stderr)
+ return False
+
+
+ # config file freezing support
+ frozen_files = set(self.options.get("frozen-files", "").split())
+ auto_zapped = []
+ protect_obj = portage.util.ConfigProtect(
+ config_root, config_paths,
+ portage.util.shlex_split(
+ portage.settings.get('CONFIG_PROTECT_MASK', '')),
+ case_insensitive = ("case-insensitive-fs"
+ in portage.settings.features))
+
+ def diff(file1, file2):
+ return diffstatusoutput(DIFF_CONTENTS, file1, file2)
+
+ #
+ # Remove new configs identical to current
+ # and
+ # Auto-replace configs a) whose differences are simply CVS interpolations,
+ # or b) whose differences are simply ws or comments,
+ # or c) in paths now unprotected by CONFIG_PROTECT_MASK,
+ #
+
+ def f (conf):
+ mrgconf = re.sub(r'\._cfg', '._mrg', conf['new'])
+ archive = os.path.join(self.options['archive-dir'], conf['current'].lstrip('/'))
+ if self.options['use-rcs'] == 'yes':
+ mrgfail = portage.dispatch_conf.rcs_archive(archive, conf['current'], conf['new'], mrgconf)
+ else:
+ mrgfail = portage.dispatch_conf.file_archive(archive, conf['current'], conf['new'], mrgconf)
+ if os.path.exists(archive + '.dist'):
+ unmodified = len(diff(conf['current'], archive + '.dist')[1]) == 0
+ else:
+ unmodified = 0
+ if os.path.exists(mrgconf):
+ if mrgfail or len(diff(conf['new'], mrgconf)[1]) == 0:
+ os.unlink(mrgconf)
+ newconf = conf['new']
+ else:
+ newconf = mrgconf
+ else:
+ newconf = conf['new']
+
+ if newconf == mrgconf and \
+ self.options.get('ignore-previously-merged') != 'yes' and \
+ os.path.exists(archive+'.dist') and \
+ len(diff(archive+'.dist', conf['new'])[1]) == 0:
+ # The current update is identical to the archived .dist
+ # version that has previously been merged.
+ os.unlink(mrgconf)
+ newconf = conf['new']
+
+ mystatus, myoutput = diff(conf['current'], newconf)
+ myoutput_len = len(myoutput)
+ same_file = 0 == myoutput_len
+ if mystatus >> 8 == 2:
+ # Binary files differ
+ same_cvs = False
+ same_wsc = False
+ else:
+ # Extract all the normal diff lines (ignore the headers).
+ mylines = re.findall('^[+-][^\n+-].*$', myoutput, re.MULTILINE)
+
+ # Filter out all the cvs headers
+ cvs_header = re.compile('# [$]Header:')
+ cvs_lines = list(filter(cvs_header.search, mylines))
+ same_cvs = len(mylines) == len(cvs_lines)
+
+ # Filter out comments and whitespace-only changes.
+ # Note: be nice to also ignore lines that only differ in whitespace...
+ wsc_lines = []
+ for x in ['^[-+]\s*#', '^[-+]\s*$']:
+ wsc_lines += list(filter(re.compile(x).match, mylines))
+ same_wsc = len(mylines) == len(wsc_lines)
+
+ # Do options permit?
+ same_cvs = same_cvs and self.options['replace-cvs'] == 'yes'
+ same_wsc = same_wsc and self.options['replace-wscomments'] == 'yes'
+ unmodified = unmodified and self.options['replace-unmodified'] == 'yes'
+
+ if same_file:
+ os.unlink (conf ['new'])
+ self.post_process(conf['current'])
+ if os.path.exists(mrgconf):
+ os.unlink(mrgconf)
+ return False
+ elif conf['current'] in frozen_files:
+ """Frozen files are automatically zapped. The new config has
+ already been archived with a .new suffix. When zapped, it is
+ left with the .new suffix (post_process is skipped), since it
+ hasn't been merged into the current config."""
+ auto_zapped.append(conf['current'])
+ os.unlink(conf['new'])
+ try:
+ os.unlink(mrgconf)
+ except OSError:
+ pass
+ return False
+ elif unmodified or same_cvs or same_wsc or \
+ not protect_obj.isprotected(conf['current']):
+ self.replace(newconf, conf['current'])
+ self.post_process(conf['current'])
+ if newconf == mrgconf:
+ os.unlink(conf['new'])
+ elif os.path.exists(mrgconf):
+ os.unlink(mrgconf)
+ return False
+ else:
+ return True
+
+ confs = [x for x in confs if f(x)]
+
+ #
+ # Interactively process remaining
+ #
+
+ valid_input = "qhtnmlezu"
+
+ for conf in confs:
+ count = count + 1
+
+ newconf = conf['new']
+ mrgconf = re.sub(r'\._cfg', '._mrg', newconf)
+ if os.path.exists(mrgconf):
+ newconf = mrgconf
+ show_new_diff = 0
+
+ while 1:
+ clear_screen()
+ if show_new_diff:
+ cmd = self.options['diff'] % (conf['new'], mrgconf)
+ cmd += pager
+ spawn_shell(cmd)
+ show_new_diff = 0
+ else:
+ cmd = self.options['diff'] % (conf['current'], newconf)
+ cmd += pager
+ spawn_shell(cmd)
+
+ print()
+ print('>> (%i of %i) -- %s' % (count, len(confs), conf ['current']))
+ print('>> q quit, h help, n next, e edit-new, z zap-new, u use-new\n m merge, t toggle-merge, l look-merge: ', end=' ')
+
+ # In some cases getch() will return some spurious characters
+ # that do not represent valid input. If we don't validate the
+ # input then the spurious characters can cause us to jump
+ # back into the above "diff" command immediatly after the user
+ # has exited it (which can be quite confusing and gives an
+ # "out of control" feeling).
+ while True:
+ c = getch()
+ if c in valid_input:
+ sys.stdout.write('\n')
+ sys.stdout.flush()
+ break
+
+ if c == 'q':
+ sys.exit (0)
+ if c == 'h':
+ self.do_help ()
+ continue
+ elif c == 't':
+ if newconf == mrgconf:
+ newconf = conf['new']
+ elif os.path.exists(mrgconf):
+ newconf = mrgconf
+ continue
+ elif c == 'n':
+ break
+ elif c == 'm':
+ merged = SCRATCH_DIR+"/"+os.path.basename(conf['current'])
+ print()
+ ret = os.system (self.options['merge'] % (merged, conf ['current'], newconf))
+ ret = os.WEXITSTATUS(ret)
+ if ret < 2:
+ ret = 0
+ if ret:
+ print("Failure running 'merge' command")
+ continue
+ shutil.copyfile(merged, mrgconf)
+ os.remove(merged)
+ mystat = os.lstat(conf['new'])
+ os.chmod(mrgconf, mystat[ST_MODE])
+ os.chown(mrgconf, mystat[ST_UID], mystat[ST_GID])
+ newconf = mrgconf
+ continue
+ elif c == 'l':
+ show_new_diff = 1
+ continue
+ elif c == 'e':
+ if 'EDITOR' not in os.environ:
+ os.environ['EDITOR']='nano'
+ os.system(os.environ['EDITOR'] + ' ' + newconf)
+ continue
+ elif c == 'z':
+ os.unlink(conf['new'])
+ if os.path.exists(mrgconf):
+ os.unlink(mrgconf)
+ break
+ elif c == 'u':
+ self.replace(newconf, conf ['current'])
+ self.post_process(conf['current'])
+ if newconf == mrgconf:
+ os.unlink(conf['new'])
+ elif os.path.exists(mrgconf):
+ os.unlink(mrgconf)
+ break
+ else:
+ raise AssertionError("Invalid Input: %s" % c)
+
+ if auto_zapped:
+ print()
+ print(" One or more updates are frozen and have been automatically zapped:")
+ print()
+ for frozen in auto_zapped:
+ print(" * '%s'" % frozen)
+ print()
+
+ def replace (self, newconf, curconf):
+ """Replace current config with the new/merged version. Also logs
+ the diff of what changed into the configured log file."""
+ os.system((DIFF_CONTENTS % (curconf, newconf)) + '>>' + self.options["log-file"])
+ try:
+ os.rename(newconf, curconf)
+ except (IOError, os.error) as why:
+ print('dispatch-conf: Error renaming %s to %s: %s; fatal' % \
+ (newconf, curconf, str(why)), file=sys.stderr)
+
+
+ def post_process(self, curconf):
+ archive = os.path.join(self.options['archive-dir'], curconf.lstrip('/'))
+ if self.options['use-rcs'] == 'yes':
+ portage.dispatch_conf.rcs_archive_post_process(archive)
+ else:
+ portage.dispatch_conf.file_archive_post_process(archive)
+
+
+ def massage (self, newconfigs):
+ """Sort, rstrip, remove old versions, break into triad hash.
+
+ Triad is dictionary of current (/etc/make.conf), new (/etc/._cfg0003_make.conf)
+ and dir (/etc).
+
+ We keep ._cfg0002_conf over ._cfg0001_conf and ._cfg0000_conf.
+ """
+ h = {}
+ configs = []
+ newconfigs.sort ()
+
+ for nconf in newconfigs:
+ nconf = nconf.rstrip ()
+ conf = re.sub (r'\._cfg\d+_', '', nconf)
+ dirname = os.path.dirname(nconf)
+ conf_map = {
+ 'current' : conf,
+ 'dir' : dirname,
+ 'new' : nconf,
+ }
+
+ if conf in h:
+ mrgconf = re.sub(r'\._cfg', '._mrg', h[conf]['new'])
+ if os.path.exists(mrgconf):
+ os.unlink(mrgconf)
+ os.unlink(h[conf]['new'])
+ h[conf].update(conf_map)
+ else:
+ h[conf] = conf_map
+ configs.append(conf_map)
+
+ return configs
+
+
+ def do_help (self):
+ print()
+ print()
+
+ print(' u -- update current config with new config and continue')
+ print(' z -- zap (delete) new config and continue')
+ print(' n -- skip to next config, leave all intact')
+ print(' e -- edit new config')
+ print(' m -- interactively merge current and new configs')
+ print(' l -- look at diff between pre-merged and merged configs')
+ print(' t -- toggle new config between merged and pre-merged state')
+ print(' h -- this screen')
+ print(' q -- quit')
+
+ print(); print('press any key to return to diff...', end=' ')
+
+ getch ()
+
+
+def getch ():
+ # from ASPN - Danny Yoo
+ #
+ import tty, termios
+
+ fd = sys.stdin.fileno()
+ old_settings = termios.tcgetattr(fd)
+ try:
+ tty.setraw(sys.stdin.fileno())
+ ch = sys.stdin.read(1)
+ finally:
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+ return ch
+
+def clear_screen():
+ try:
+ import curses
+ try:
+ curses.setupterm()
+ sys.stdout.write(_unicode_decode(curses.tigetstr("clear")))
+ sys.stdout.flush()
+ return
+ except curses.error:
+ pass
+ except ImportError:
+ pass
+ os.system("clear 2>/dev/null")
+
+shell = os.environ.get("SHELL")
+if not shell or not os.access(shell, os.EX_OK):
+ shell = find_binary("sh")
+
+def spawn_shell(cmd):
+ if shell:
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
+ spawn([shell, "-c", cmd], env=os.environ,
+ fd_pipes = { 0 : portage._get_stdin().fileno(),
+ 1 : sys.__stdout__.fileno(),
+ 2 : sys.__stderr__.fileno()})
+ else:
+ os.system(cmd)
+
+def usage(argv):
+ print('dispatch-conf: sane configuration file update\n')
+ print('Usage: dispatch-conf [config dirs]\n')
+ print('See the dispatch-conf(1) man page for more details')
+ sys.exit(os.EX_OK)
+
+for x in sys.argv:
+ if x in ('-h', '--help'):
+ usage(sys.argv)
+ elif x in ('--version'):
+ print("Portage", portage.VERSION)
+ sys.exit(os.EX_OK)
+
+# run
+d = dispatch ()
+
+if len(sys.argv) > 1:
+ # for testing
+ d.grind(sys.argv[1:])
+else:
+ d.grind(portage.util.shlex_split(
+ portage.settings.get('CONFIG_PROTECT', '')))
diff --git a/usr/lib/portage/bin/dohtml.py b/usr/lib/portage/bin/dohtml.py
new file mode 100755
index 0000000..5359f5e
--- /dev/null
+++ b/usr/lib/portage/bin/dohtml.py
@@ -0,0 +1,235 @@
+#!/usr/bin/python -b
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+#
+# Typical usage:
+# dohtml -r docs/*
+# - put all files and directories in docs into /usr/share/doc/${PF}/html
+# dohtml foo.html
+# - put foo.html into /usr/share/doc/${PF}/html
+#
+#
+# Detailed usage:
+# dohtml <list-of-files>
+# - will install the files in the list of files (space-separated list) into
+# /usr/share/doc/${PF}/html, provided the file ends in .css, .gif, .htm,
+# .html, .jpeg, .jpg, .js or .png.
+# dohtml -r <list-of-files-and-directories>
+# - will do as 'dohtml', but recurse into all directories, as long as the
+# directory name is not CVS
+# dohtml -A jpe,java [-r] <list-of-files[-and-directories]>
+# - will do as 'dohtml' but add .jpe,.java (default filter list is
+# added to your list)
+# dohtml -a png,gif,html,htm [-r] <list-of-files[-and-directories]>
+# - will do as 'dohtml' but filter on .png,.gif,.html,.htm (default filter
+# list is ignored)
+# dohtml -x CVS,SCCS,RCS -r <list-of-files-and-directories>
+# - will do as 'dohtml -r', but ignore directories named CVS, SCCS, RCS
+#
+
+from __future__ import print_function
+
+import os
+import shutil
+import sys
+
+from portage.util import normalize_path
+
+# Change back to original cwd _after_ all imports (bug #469338).
+os.chdir(os.environ["__PORTAGE_HELPER_CWD"])
+
+def dodir(path):
+ try:
+ os.makedirs(path, 0o755)
+ except OSError:
+ if not os.path.isdir(path):
+ raise
+ os.chmod(path, 0o755)
+
+def dofile(src,dst):
+ shutil.copy(src, dst)
+ os.chmod(dst, 0o644)
+
+def eqawarn(lines):
+ cmd = "source '%s/isolated-functions.sh' ; " % \
+ os.environ["PORTAGE_BIN_PATH"]
+ for line in lines:
+ cmd += "eqawarn \"%s\" ; " % line
+ os.spawnlp(os.P_WAIT, "bash", "bash", "-c", cmd)
+
+skipped_directories = []
+skipped_files = []
+warn_on_skipped_files = os.environ.get("PORTAGE_DOHTML_WARN_ON_SKIPPED_FILES") is not None
+unwarned_skipped_extensions = os.environ.get("PORTAGE_DOHTML_UNWARNED_SKIPPED_EXTENSIONS", "").split()
+unwarned_skipped_files = os.environ.get("PORTAGE_DOHTML_UNWARNED_SKIPPED_FILES", "").split()
+
+def install(basename, dirname, options, prefix=""):
+ fullpath = basename
+ if prefix:
+ fullpath = os.path.join(prefix, fullpath)
+ if dirname:
+ fullpath = os.path.join(dirname, fullpath)
+
+ if options.DOCDESTTREE:
+ desttree = options.DOCDESTTREE
+ else:
+ desttree = "html"
+
+ destdir = os.path.join(options.ED, "usr", "share", "doc",
+ options.PF.lstrip(os.sep), desttree.lstrip(os.sep),
+ options.doc_prefix.lstrip(os.sep), prefix).rstrip(os.sep)
+
+ if not os.path.exists(fullpath):
+ sys.stderr.write("!!! dohtml: %s does not exist\n" % fullpath)
+ return False
+ elif os.path.isfile(fullpath):
+ ext = os.path.splitext(basename)[1][1:]
+ if ext in options.allowed_exts or basename in options.allowed_files:
+ dodir(destdir)
+ dofile(fullpath, os.path.join(destdir, basename))
+ elif warn_on_skipped_files and ext not in unwarned_skipped_extensions and basename not in unwarned_skipped_files:
+ skipped_files.append(fullpath)
+ elif options.recurse and os.path.isdir(fullpath) and \
+ basename not in options.disallowed_dirs:
+ for i in os.listdir(fullpath):
+ pfx = basename
+ if prefix:
+ pfx = os.path.join(prefix, pfx)
+ install(i, dirname, options, pfx)
+ elif not options.recurse and os.path.isdir(fullpath):
+ global skipped_directories
+ skipped_directories.append(fullpath)
+ return False
+ else:
+ return False
+ return True
+
+
+class OptionsClass:
+ def __init__(self):
+ self.PF = ""
+ self.ED = ""
+ self.DOCDESTTREE = ""
+
+ if "PF" in os.environ:
+ self.PF = os.environ["PF"]
+ if self.PF:
+ self.PF = normalize_path(self.PF)
+ if "force-prefix" not in os.environ.get("FEATURES", "").split() and \
+ os.environ.get("EAPI", "0") in ("0", "1", "2"):
+ self.ED = os.environ.get("D", "")
+ else:
+ self.ED = os.environ.get("ED", "")
+ if self.ED:
+ self.ED = normalize_path(self.ED)
+ if "_E_DOCDESTTREE_" in os.environ:
+ self.DOCDESTTREE = os.environ["_E_DOCDESTTREE_"]
+ if self.DOCDESTTREE:
+ self.DOCDESTTREE = normalize_path(self.DOCDESTTREE)
+
+ self.allowed_exts = ['css', 'gif', 'htm', 'html', 'jpeg', 'jpg', 'js', 'png']
+ if os.environ.get("EAPI", "0") in ("4-python", "5-progress"):
+ self.allowed_exts += ['ico', 'svg', 'xhtml', 'xml']
+ self.allowed_files = []
+ self.disallowed_dirs = ['CVS']
+ self.recurse = False
+ self.verbose = False
+ self.doc_prefix = ""
+
+def print_help():
+ opts = OptionsClass()
+
+ print("dohtml [-a .foo,.bar] [-A .foo,.bar] [-f foo,bar] [-x foo,bar]")
+ print(" [-r] [-V] <file> [file ...]")
+ print()
+ print(" -a Set the list of allowed to those that are specified.")
+ print(" Default:", ",".join(opts.allowed_exts))
+ print(" -A Extend the list of allowed file types.")
+ print(" -f Set list of allowed extensionless file names.")
+ print(" -x Set directories to be excluded from recursion.")
+ print(" Default:", ",".join(opts.disallowed_dirs))
+ print(" -p Set a document prefix for installed files (empty by default).")
+ print(" -r Install files and directories recursively.")
+ print(" -V Be verbose.")
+ print()
+
+def parse_args():
+ options = OptionsClass()
+ args = []
+
+ x = 1
+ while x < len(sys.argv):
+ arg = sys.argv[x]
+ if arg in ["-h","-r","-V"]:
+ if arg == "-h":
+ print_help()
+ sys.exit(0)
+ elif arg == "-r":
+ options.recurse = True
+ elif arg == "-V":
+ options.verbose = True
+ elif sys.argv[x] in ["-A","-a","-f","-x","-p"]:
+ x += 1
+ if x == len(sys.argv):
+ print_help()
+ sys.exit(0)
+ elif arg == "-p":
+ options.doc_prefix = sys.argv[x]
+ if options.doc_prefix:
+ options.doc_prefix = normalize_path(options.doc_prefix)
+ else:
+ values = sys.argv[x].split(",")
+ if arg == "-A":
+ options.allowed_exts.extend(values)
+ elif arg == "-a":
+ options.allowed_exts = values
+ elif arg == "-f":
+ options.allowed_files = values
+ elif arg == "-x":
+ options.disallowed_dirs = values
+ else:
+ args.append(sys.argv[x])
+ x += 1
+
+ return (options, args)
+
+def main():
+
+ (options, args) = parse_args()
+
+ if options.verbose:
+ print("Allowed extensions:", options.allowed_exts)
+ print("Document prefix : '" + options.doc_prefix + "'")
+ print("Allowed files :", options.allowed_files)
+
+ success = False
+ endswith_slash = (os.sep, os.sep + ".")
+
+ for x in args:
+ trailing_slash = x.endswith(endswith_slash)
+ x = normalize_path(x)
+ if trailing_slash:
+ # Modify behavior of basename and dirname
+ # as noted in bug #425214, causing foo/ to
+ # behave similarly to the way that foo/*
+ # behaves.
+ x += os.sep
+ basename = os.path.basename(x)
+ dirname = os.path.dirname(x)
+ success |= install(basename, dirname, options)
+
+ for x in skipped_directories:
+ eqawarn(["QA Notice: dohtml on directory '%s' without recursion option" % x])
+ for x in skipped_files:
+ eqawarn(["dohtml: skipped file '%s'" % x])
+
+ if success:
+ retcode = 0
+ else:
+ retcode = 1
+
+ sys.exit(retcode)
+
+if __name__ == "__main__":
+ main()
diff --git a/usr/lib/portage/bin/eapi.sh b/usr/lib/portage/bin/eapi.sh
new file mode 100755
index 0000000..623b89f
--- /dev/null
+++ b/usr/lib/portage/bin/eapi.sh
@@ -0,0 +1,145 @@
+#!/bin/bash
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# PHASES
+
+___eapi_has_pkg_pretend() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_has_src_prepare() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1)$ ]]
+}
+
+___eapi_has_src_configure() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1)$ ]]
+}
+
+___eapi_default_src_test_disables_parallel_jobs() {
+ [[ ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_has_S_WORKDIR_fallback() {
+ [[ ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+# VARIABLES
+
+___eapi_has_prefix_variables() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2)$ || " ${FEATURES} " == *" force-prefix "* ]]
+}
+
+___eapi_has_HDEPEND() {
+ [[ ${1-${EAPI}} =~ ^(5-hdepend)$ ]]
+}
+
+___eapi_has_RDEPEND_DEPEND_fallback() {
+ [[ ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+# HELPERS PRESENCE
+
+___eapi_has_dohard() {
+ [[ ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_has_dosed() {
+ [[ ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_has_docompress() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_has_nonfatal() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_has_doheader() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_has_usex() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_has_master_repositories() {
+ [[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_repository_path() {
+ [[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_available_eclasses() {
+ [[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_eclass_path() {
+ [[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_license_path() {
+ [[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_package_manager_build_user() {
+ [[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_package_manager_build_group() {
+ [[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+# HELPERS BEHAVIOR
+
+___eapi_best_version_and_has_version_support_--host-root() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_unpack_supports_xz() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2)$ ]]
+}
+
+___eapi_econf_passes_--disable-dependency-tracking() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_econf_passes_--disable-silent-rules() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_use_enable_and_use_with_support_empty_third_argument() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_dodoc_supports_-r() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_doins_and_newins_preserve_symlinks() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_newins_supports_reading_from_standard_input() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_helpers_can_die() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_disallows_helpers_in_global_scope() {
+ [[ ${1-${EAPI}} =~ ^(4-python|5-progress)$ ]]
+}
+
+___eapi_unpack_is_case_sensitive() {
+ [[ ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi|5|5-hdepend)$ ]]
+}
+
+# OTHERS
+
+___eapi_enables_globstar() {
+ [[ ${1-${EAPI}} =~ ^(4-python|5-progress)$ ]]
+}
diff --git a/usr/lib/portage/bin/ebuild b/usr/lib/portage/bin/ebuild
new file mode 100755
index 0000000..02ee022
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild
@@ -0,0 +1,358 @@
+#!/usr/bin/python -bO
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import platform
+import signal
+import sys
+# This block ensures that ^C interrupts are handled quietly.
+try:
+
+ def exithandler(signum, _frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ sys.exit(128 + signum)
+
+ signal.signal(signal.SIGINT, exithandler)
+ signal.signal(signal.SIGTERM, exithandler)
+ # Prevent "[Errno 32] Broken pipe" exceptions when
+ # writing to a pipe.
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+except KeyboardInterrupt:
+ sys.exit(128 + signal.SIGINT)
+
+def debug_signal(_signum, _frame):
+ import pdb
+ pdb.set_trace()
+
+if platform.python_implementation() == 'Jython':
+ debug_signum = signal.SIGUSR2 # bug #424259
+else:
+ debug_signum = signal.SIGUSR1
+
+signal.signal(debug_signum, debug_signal)
+
+import io
+import os
+from os import path as osp
+if osp.isfile(osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")):
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+import portage
+portage._internal_caller = True
+from portage import os
+from portage import _encodings
+from portage import _shell_quote
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.const import VDB_PATH
+from portage.util._argparse import ArgumentParser
+from _emerge.Package import Package
+from _emerge.RootConfig import RootConfig
+
+description = "See the ebuild(1) man page for more info"
+usage = "Usage: ebuild <ebuild file> <command> [command] ..."
+parser = ArgumentParser(description=description, usage=usage)
+
+force_help = "When used together with the digest or manifest " + \
+ "command, this option forces regeneration of digests for all " + \
+ "distfiles associated with the current ebuild. Any distfiles " + \
+ "that do not already exist in ${DISTDIR} will be automatically fetched."
+
+parser.add_argument("--force", help=force_help, action="store_true")
+parser.add_argument("--color", help="enable or disable color output",
+ choices=("y", "n"))
+parser.add_argument("--debug", help="show debug output",
+ action="store_true")
+parser.add_argument("--version", help="show version and exit",
+ action="store_true")
+parser.add_argument("--ignore-default-opts",
+ action="store_true",
+ help="do not use the EBUILD_DEFAULT_OPTS environment variable")
+parser.add_argument("--skip-manifest", help="skip all manifest checks",
+ action="store_true")
+
+opts, pargs = parser.parse_known_args(args=sys.argv[1:])
+
+def err(txt):
+ portage.writemsg('ebuild: %s\n' % (txt,), noiselevel=-1)
+ sys.exit(1)
+
+if opts.version:
+ print("Portage", portage.VERSION)
+ sys.exit(os.EX_OK)
+
+if len(pargs) < 2:
+ parser.error("missing required args")
+
+if not opts.ignore_default_opts:
+ default_opts = portage.util.shlex_split(
+ portage.settings.get("EBUILD_DEFAULT_OPTS", ""))
+ opts, pargs = parser.parse_known_args(default_opts + sys.argv[1:])
+
+debug = opts.debug
+force = opts.force
+
+import portage.util, portage.const
+
+# do this _after_ 'import portage' to prevent unnecessary tracing
+if debug and "python-trace" in portage.features:
+ import portage.debug
+ portage.debug.set_trace(True)
+
+if not opts.color == 'y' and \
+ (opts.color == 'n' or \
+ portage.settings.get('NOCOLOR') in ('yes', 'true') or \
+ portage.settings.get('TERM') == 'dumb' or \
+ not sys.stdout.isatty()):
+ portage.output.nocolor()
+ portage.settings.unlock()
+ portage.settings['NOCOLOR'] = 'true'
+ portage.settings.lock()
+
+ebuild = pargs.pop(0)
+
+pf = None
+if ebuild.endswith(".ebuild"):
+ pf = os.path.basename(ebuild)[:-7]
+
+if pf is None:
+ err("%s: does not end with '.ebuild'" % (ebuild,))
+
+if not os.path.isabs(ebuild):
+ mycwd = os.getcwd()
+ # Try to get the non-canonical path from the PWD evironment variable, since
+ # the canonical path returned from os.getcwd() may may be unusable in
+ # cases where the directory stucture is built from symlinks.
+ pwd = os.environ.get('PWD', '')
+ if sys.hexversion < 0x3000000:
+ pwd = _unicode_decode(pwd, encoding=_encodings['content'],
+ errors='strict')
+ if pwd and pwd != mycwd and \
+ os.path.realpath(pwd) == mycwd:
+ mycwd = portage.normalize_path(pwd)
+ ebuild = os.path.join(mycwd, ebuild)
+ebuild = portage.normalize_path(ebuild)
+# portdbapi uses the canonical path for the base of the portage tree, but
+# subdirectories of the base can be built from symlinks (like crossdev does).
+ebuild_portdir = os.path.realpath(
+ os.path.dirname(os.path.dirname(os.path.dirname(ebuild))))
+ebuild = os.path.join(ebuild_portdir, *ebuild.split(os.path.sep)[-3:])
+vdb_path = os.path.realpath(os.path.join(portage.settings['EROOT'], VDB_PATH))
+
+# Make sure that portdb.findname() returns the correct ebuild.
+if ebuild_portdir != vdb_path and \
+ ebuild_portdir not in portage.portdb.porttrees:
+ portdir_overlay = portage.settings.get("PORTDIR_OVERLAY", "")
+ if sys.hexversion >= 0x3000000:
+ os.environ["PORTDIR_OVERLAY"] = \
+ portdir_overlay + \
+ " " + _shell_quote(ebuild_portdir)
+ else:
+ os.environ["PORTDIR_OVERLAY"] = \
+ _unicode_encode(portdir_overlay,
+ encoding=_encodings['content'], errors='strict') + \
+ " " + _unicode_encode(_shell_quote(ebuild_portdir),
+ encoding=_encodings['content'], errors='strict')
+
+ print("Appending %s to PORTDIR_OVERLAY..." % ebuild_portdir)
+ portage._reset_legacy_globals()
+
+myrepo = None
+if ebuild_portdir != vdb_path:
+ myrepo = portage.portdb.getRepositoryName(ebuild_portdir)
+
+if not os.path.exists(ebuild):
+ err('%s: does not exist' % (ebuild,))
+
+ebuild_split = ebuild.split("/")
+cpv = "%s/%s" % (ebuild_split[-3], pf)
+
+with io.open(_unicode_encode(ebuild, encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace') as f:
+ eapi = portage._parse_eapi_ebuild_head(f)[0]
+if eapi is None:
+ eapi = "0"
+if not portage.catpkgsplit(cpv, eapi=eapi):
+ err('%s: %s: does not follow correct package syntax' % (ebuild, cpv))
+
+if ebuild.startswith(vdb_path):
+ mytree = "vartree"
+ pkg_type = "installed"
+
+ portage_ebuild = portage.db[portage.root][mytree].dbapi.findname(cpv, myrepo=myrepo)
+
+ if os.path.realpath(portage_ebuild) != ebuild:
+ err('Portage seems to think that %s is at %s' % (cpv, portage_ebuild))
+
+else:
+ mytree = "porttree"
+ pkg_type = "ebuild"
+
+ portage_ebuild = portage.portdb.findname(cpv, myrepo=myrepo)
+
+ if not portage_ebuild or portage_ebuild != ebuild:
+ err('%s: does not seem to have a valid PORTDIR structure' % (ebuild,))
+
+if len(pargs) > 1 and "config" in pargs:
+ err('"config" must not be called with any other phase')
+
+def discard_digests(myebuild, mysettings, mydbapi):
+ """Discard all distfiles digests for the given ebuild. This is useful when
+ upstream has changed the identity of the distfiles and the user would
+ otherwise have to manually remove the Manifest and files/digest-* files in
+ order to ensure correct results."""
+ try:
+ portage._doebuild_manifest_exempt_depend += 1
+ pkgdir = os.path.dirname(myebuild)
+ fetchlist_dict = portage.FetchlistDict(pkgdir, mysettings, mydbapi)
+ mf = mysettings.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir)))
+ mf = mf.load_manifest(pkgdir, mysettings["DISTDIR"],
+ fetchlist_dict=fetchlist_dict)
+ mf.create(requiredDistfiles=None,
+ assumeDistHashesSometimes=True, assumeDistHashesAlways=True)
+ distfiles = fetchlist_dict[cpv]
+ for myfile in distfiles:
+ try:
+ del mf.fhashdict["DIST"][myfile]
+ except KeyError:
+ pass
+ mf.write()
+ finally:
+ portage._doebuild_manifest_exempt_depend -= 1
+
+portage.settings.validate() # generate warning messages if necessary
+
+build_dir_phases = set(["setup", "unpack", "prepare", "configure", "compile",
+ "test", "install", "package", "rpm", "merge", "qmerge"])
+
+# If the current metadata is invalid then force the ebuild to be
+# sourced again even if $T/environment already exists.
+ebuild_changed = False
+if mytree == "porttree" and build_dir_phases.intersection(pargs):
+ ebuild_changed = \
+ portage.portdb._pull_valid_cache(cpv, ebuild, ebuild_portdir)[0] is None
+
+tmpsettings = portage.config(clone=portage.settings)
+tmpsettings["PORTAGE_VERBOSE"] = "1"
+tmpsettings.backup_changes("PORTAGE_VERBOSE")
+
+if opts.skip_manifest:
+ tmpsettings["EBUILD_SKIP_MANIFEST"] = "1"
+ tmpsettings.backup_changes("EBUILD_SKIP_MANIFEST")
+
+if opts.skip_manifest or \
+ "digest" in tmpsettings.features or \
+ "digest" in pargs or \
+ "manifest" in pargs:
+ portage._doebuild_manifest_exempt_depend += 1
+
+if "test" in pargs:
+ # This variable is a signal to config.regenerate() to
+ # indicate that the test phase should be enabled regardless
+ # of problems such as masked "test" USE flag.
+ tmpsettings["EBUILD_FORCE_TEST"] = "1"
+ tmpsettings.backup_changes("EBUILD_FORCE_TEST")
+ tmpsettings.features.add("test")
+
+tmpsettings.features.discard("fail-clean")
+
+if "merge" in pargs and "noauto" in tmpsettings.features:
+ print("Disabling noauto in features... merge disables it. (qmerge doesn't)")
+ tmpsettings.features.discard("noauto")
+
+try:
+ metadata = dict(zip(Package.metadata_keys,
+ portage.db[portage.settings['EROOT']][mytree].dbapi.aux_get(
+ cpv, Package.metadata_keys, myrepo=myrepo)))
+except KeyError:
+ # aux_get failure, message should have been shown on stderr.
+ sys.exit(1)
+
+root_config = RootConfig(portage.settings,
+ portage.db[portage.settings['EROOT']], None)
+
+pkg = Package(built=(pkg_type != "ebuild"), cpv=cpv,
+ installed=(pkg_type=="installed"),
+ metadata=metadata, root_config=root_config,
+ type_name=pkg_type)
+
+# Apply package.env and repo-level settings. This allows per-package
+# FEATURES and other variables (possibly PORTAGE_TMPDIR) to be
+# available as soon as possible. Also, note that the only way to ensure
+# that setcpv gets metadata from the correct repository is to pass in
+# a Package instance, as we do here (previously we had to modify
+# portdb.porttrees in order to accomplish this).
+tmpsettings.setcpv(pkg)
+
+def stale_env_warning():
+ if "clean" not in pargs and \
+ "noauto" not in tmpsettings.features and \
+ build_dir_phases.intersection(pargs):
+ portage.doebuild_environment(ebuild, "setup", portage.root,
+ tmpsettings, debug, 1, portage.portdb)
+ env_filename = os.path.join(tmpsettings["T"], "environment")
+ if os.path.exists(env_filename):
+ msg = ("Existing ${T}/environment for '%s' will be sourced. " + \
+ "Run 'clean' to start with a fresh environment.") % \
+ (tmpsettings["PF"], )
+ from textwrap import wrap
+ msg = wrap(msg, 70)
+ for x in msg:
+ portage.writemsg(">>> %s\n" % x)
+
+ if ebuild_changed:
+ open(os.path.join(tmpsettings['PORTAGE_BUILDDIR'],
+ '.ebuild_changed'), 'w').close()
+
+from portage.exception import PermissionDenied, \
+ PortagePackageException, UnsupportedAPIException
+
+if 'digest' in tmpsettings.features:
+ if pargs and pargs[0] not in ("digest", "manifest"):
+ pargs = ['digest'] + pargs
+ # We only need to build digests on the first pass.
+ tmpsettings.features.discard('digest')
+
+checked_for_stale_env = False
+
+for arg in pargs:
+ try:
+ if not checked_for_stale_env and arg not in ("digest","manifest"):
+ # This has to go after manifest generation since otherwise
+ # aux_get() might fail due to invalid ebuild digests.
+ stale_env_warning()
+ checked_for_stale_env = True
+
+ if arg in ("digest", "manifest") and force:
+ discard_digests(ebuild, tmpsettings, portage.portdb)
+ a = portage.doebuild(ebuild, arg, settings=tmpsettings,
+ debug=debug, tree=mytree,
+ vartree=portage.db[portage.root]['vartree'])
+ except KeyboardInterrupt:
+ print("Interrupted.")
+ a = 1
+ except KeyError:
+ # aux_get error
+ a = 1
+ except UnsupportedAPIException as e:
+ from textwrap import wrap
+ msg = wrap(str(e), 70)
+ del e
+ for x in msg:
+ portage.writemsg("!!! %s\n" % x, noiselevel=-1)
+ a = 1
+ except PortagePackageException as e:
+ portage.writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ a = 1
+ except PermissionDenied as e:
+ portage.writemsg("!!! Permission Denied: %s\n" % (e,), noiselevel=-1)
+ a = 1
+ if a == None:
+ print("Could not run the required binary?")
+ a = 127
+ if a:
+ sys.exit(a)
diff --git a/usr/lib/portage/bin/ebuild-helpers/bsd/sed b/usr/lib/portage/bin/ebuild-helpers/bsd/sed
new file mode 100755
index 0000000..41c2246
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/bsd/sed
@@ -0,0 +1,34 @@
+#!/bin/bash
+# Copyright 2007-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+scriptpath=${BASH_SOURCE[0]}
+scriptname=${scriptpath##*/}
+
+# PREFIX LOCAL: warn about screwups early
+if [[ -n ${EPREFIX} ]] ; then
+ echo "When using Prefix, this BSD sed wrapper should not exist (in ${scriptpath})! This is a bug!" > /dev/stderr
+ exit 1
+fi
+# END PREFIX LOCAL
+
+if [[ sed == ${scriptname} && -n ${ESED} ]]; then
+ exec ${ESED} "$@"
+elif type -P g${scriptname} > /dev/null ; then
+ exec g${scriptname} "$@"
+else
+ old_IFS="${IFS}"
+ IFS=":"
+
+ for path in $PATH; do
+ if [[ -x ${path}/${scriptname} ]]; then
+ [[ ${path}/${scriptname} -ef ${scriptpath} ]] && continue
+ exec "${path}/${scriptname}" "$@"
+ exit 0
+ fi
+ done
+
+ IFS="${old_IFS}"
+fi
+
+exit 1
diff --git a/usr/lib/portage/bin/ebuild-helpers/die b/usr/lib/portage/bin/ebuild-helpers/die
new file mode 100755
index 0000000..e1cd8ef
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/die
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+die "$@"
+exit 1
diff --git a/usr/lib/portage/bin/ebuild-helpers/dobin b/usr/lib/portage/bin/ebuild-helpers/dobin
new file mode 100755
index 0000000..51b7087
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/dobin
@@ -0,0 +1,33 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $# -lt 1 ]] ; then
+ __helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+if [[ ! -d ${ED}${DESTTREE}/bin ]] ; then
+ install -d "${ED}${DESTTREE}/bin" || { __helpers_die "${0##*/}: failed to install ${ED}${DESTTREE}/bin"; exit 2; }
+fi
+
+ret=0
+
+for x in "$@" ; do
+ if [[ -e ${x} ]] ; then
+ install -m0755 -o ${PORTAGE_INST_UID:-0} -g ${PORTAGE_INST_GID:-0} "${x}" "${ED}${DESTTREE}/bin"
+ else
+ echo "!!! ${0##*/}: $x does not exist" 1>&2
+ false
+ fi
+ ((ret|=$?))
+done
+
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
+exit ${ret}
diff --git a/usr/lib/portage/bin/ebuild-helpers/doconfd b/usr/lib/portage/bin/ebuild-helpers/doconfd
new file mode 100755
index 0000000..26a4029
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/doconfd
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+if [[ $# -lt 1 ]] ; then
+ source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+ __helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+exec \
+env \
+INSDESTTREE="/etc/conf.d/" \
+doins "$@"
diff --git a/usr/lib/portage/bin/ebuild-helpers/dodir b/usr/lib/portage/bin/ebuild-helpers/dodir
new file mode 100755
index 0000000..f4f89fd
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/dodir
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+install -d ${DIROPTIONS} "${@/#/${ED}/}"
+ret=$?
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/dodoc b/usr/lib/portage/bin/ebuild-helpers/dodoc
new file mode 100755
index 0000000..fc874ca
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/dodoc
@@ -0,0 +1,42 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ___eapi_dodoc_supports_-r; then
+ exec \
+ env \
+ __PORTAGE_HELPER="dodoc" \
+ doins "$@"
+fi
+
+if [ $# -lt 1 ] ; then
+ __helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+dir="${ED}usr/share/doc/${PF}/${_E_DOCDESTTREE_}"
+if [ ! -d "${dir}" ] ; then
+ install -d "${dir}"
+fi
+
+ret=0
+for x in "$@" ; do
+ if [ -d "${x}" ] ; then
+ eqawarn "QA Notice: dodoc argument '${x}' is a directory"
+ elif [ -s "${x}" ] ; then
+ install -m0644 "${x}" "${dir}" || { ((ret|=1)); continue; }
+ ecompress --queue "${dir}/${x##*/}"
+ elif [ ! -e "${x}" ] ; then
+ echo "!!! ${0##*/}: $x does not exist" 1>&2
+ ((ret|=1))
+ fi
+done
+
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
+exit ${ret}
diff --git a/usr/lib/portage/bin/ebuild-helpers/doenvd b/usr/lib/portage/bin/ebuild-helpers/doenvd
new file mode 100755
index 0000000..8f0e6e6
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/doenvd
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+if [[ $# -lt 1 ]] ; then
+ source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+ __helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+exec \
+env \
+INSDESTTREE="/etc/env.d/" \
+doins "$@"
diff --git a/usr/lib/portage/bin/ebuild-helpers/doexe b/usr/lib/portage/bin/ebuild-helpers/doexe
new file mode 100755
index 0000000..a323177
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/doexe
@@ -0,0 +1,46 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $# -lt 1 ]] ; then
+ __helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+if [[ ! -d ${ED}${_E_EXEDESTTREE_} ]] ; then
+ install -d "${ED}${_E_EXEDESTTREE_}"
+fi
+
+TMP=$(mktemp -d "${T}/.doexe_tmp_XXXXXX")
+
+ret=0
+
+for x in "$@" ; do
+ if [ -L "${x}" ] ; then
+ cp "$x" "$TMP"
+ mysrc=$TMP/${x##*/}
+ elif [ -d "${x}" ] ; then
+ __vecho "doexe: warning, skipping directory ${x}"
+ continue
+ else
+ mysrc="${x}"
+ fi
+ if [ -e "$mysrc" ] ; then
+ install $EXEOPTIONS "$mysrc" "$ED$_E_EXEDESTTREE_"
+ else
+ echo "!!! ${0##*/}: $mysrc does not exist" 1>&2
+ false
+ fi
+ ((ret|=$?))
+done
+
+rm -rf "$TMP"
+
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/dohard b/usr/lib/portage/bin/ebuild-helpers/dohard
new file mode 100755
index 0000000..e0a44fa
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/dohard
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_dohard; then
+ die "'${0##*/}' has been banned for EAPI '$EAPI'"
+ exit 1
+fi
+
+if [[ $# -ne 2 ]] ; then
+ echo "$0: two arguments needed" 1>&2
+ exit 1
+fi
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+destdir=${2%/*}
+[[ ! -d ${ED}${destdir} ]] && dodir "${destdir}"
+
+exec ln -f "${ED}$1" "${ED}$2"
diff --git a/usr/lib/portage/bin/ebuild-helpers/doheader b/usr/lib/portage/bin/ebuild-helpers/doheader
new file mode 100755
index 0000000..3795365
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/doheader
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_doheader; then
+ die "${0##*/} is not supported in EAPI ${EAPI}"
+fi
+
+if [[ $# -lt 1 ]] || [[ $1 == -r && $# -lt 2 ]] ; then
+ __helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+exec \
+env \
+INSDESTTREE="/usr/include/" \
+doins "$@"
diff --git a/usr/lib/portage/bin/ebuild-helpers/dohtml b/usr/lib/portage/bin/ebuild-helpers/dohtml
new file mode 100755
index 0000000..be134ad
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/dohtml
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 2009-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+PORTAGE_BIN_PATH=${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}
+PORTAGE_PYM_PATH=${PORTAGE_PYM_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/pym}
+# Use safe cwd, avoiding unsafe import for bug #469338.
+export __PORTAGE_HELPER_CWD=${PWD}
+cd "${PORTAGE_PYM_PATH}"
+PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
+ "${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH/dohtml.py" "$@"
+
+ret=$?
+# Restore cwd for display by __helpers_die
+cd "${__PORTAGE_HELPER_CWD}"
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/doinfo b/usr/lib/portage/bin/ebuild-helpers/doinfo
new file mode 100755
index 0000000..a62eaed
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/doinfo
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z $1 ]] ; then
+ __helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+if [[ ! -d ${ED}usr/share/info ]] ; then
+ install -d "${ED}usr/share/info" || { __helpers_die "${0##*/}: failed to install ${ED}usr/share/info"; exit 1; }
+fi
+
+install -m0644 "$@" "${ED}usr/share/info"
+rval=$?
+if [ $rval -ne 0 ] ; then
+ for x in "$@" ; do
+ [ -e "$x" ] || echo "!!! ${0##*/}: $x does not exist" 1>&2
+ done
+ __helpers_die "${0##*/} failed"
+fi
+exit $rval
diff --git a/usr/lib/portage/bin/ebuild-helpers/doinitd b/usr/lib/portage/bin/ebuild-helpers/doinitd
new file mode 100755
index 0000000..15859e7
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/doinitd
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+if [[ $# -lt 1 ]] ; then
+ source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+ __helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+exec \
+env \
+_E_EXEDESTTREE_="/etc/init.d/" \
+doexe "$@"
diff --git a/usr/lib/portage/bin/ebuild-helpers/doins b/usr/lib/portage/bin/ebuild-helpers/doins
new file mode 100755
index 0000000..b76b256
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/doins
@@ -0,0 +1,168 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+helper=${__PORTAGE_HELPER:-${0##*/}}
+
+if [[ ${helper} == dodoc ]] ; then
+ if [ $# -eq 0 ] ; then
+ # default_src_install may call dodoc with no arguments
+ # when DOC is defined but empty, so simply return
+ # sucessfully in this case.
+ eqawarn "QA Notice: dodoc called with no arguments"
+ exit 0
+ fi
+ export INSOPTIONS=-m0644
+ export INSDESTTREE=usr/share/doc/${PF}/${_E_DOCDESTTREE_}
+fi
+
+if [ $# -lt 1 ] ; then
+ __helpers_die "${helper}: at least one argument needed"
+ exit 1
+fi
+
+if [[ "$1" == "-r" ]] ; then
+ DOINSRECUR=y
+ shift
+else
+ DOINSRECUR=n
+fi
+
+if ! ___eapi_has_prefix_variables; then
+ export ED="${D}"
+fi
+
+if [[ ${INSDESTTREE#${ED}} != "${INSDESTTREE}" ]]; then
+ __vecho "-------------------------------------------------------" 1>&2
+ __vecho "You should not use \${D} or \${ED} with helpers." 1>&2
+ __vecho " --> ${INSDESTTREE}" 1>&2
+ __vecho "-------------------------------------------------------" 1>&2
+ __helpers_die "${helper} used with \${D} or \${ED}"
+ exit 1
+fi
+# PREFIX LOCAL: check for usage with EPREFIX
+if [[ ${INSDESTTREE#${EPREFIX}} != "${INSDESTTREE}" ]]; then
+ __vecho "-------------------------------------------------------" 1>&2
+ __vecho "You should not use \${EPREFIX} with helpers." 1>&2
+ __vecho " --> ${INSDESTTREE}" 1>&2
+ __vecho "-------------------------------------------------------" 1>&2
+ exit 1
+fi
+# END PREFIX LOCAL
+
+if ___eapi_doins_and_newins_preserve_symlinks; then
+ PRESERVE_SYMLINKS=y
+else
+ PRESERVE_SYMLINKS=n
+fi
+
+export TMP=$(mktemp -d "${T}/.doins_tmp_XXXXXX")
+# Use separate directories to avoid potential name collisions.
+mkdir -p "$TMP"/{1,2}
+
+[[ ! -d ${ED}${INSDESTTREE} ]] && dodir "${INSDESTTREE}"
+
+_doins() {
+ local mysrc="$1" mydir="$2" cleanup="" rval
+
+ if [ -L "$mysrc" ] ; then
+ # Our fake $DISTDIR contains symlinks that should
+ # not be reproduced inside $D. In order to ensure
+ # that things like dodoc "$DISTDIR"/foo.pdf work
+ # as expected, we dereference symlinked files that
+ # refer to absolute paths inside
+ # $PORTAGE_ACTUAL_DISTDIR/.
+ if [ $PRESERVE_SYMLINKS = y ] && \
+ ! [[ $(readlink "$mysrc") == "$PORTAGE_ACTUAL_DISTDIR"/* ]] ; then
+ rm -rf "${ED}$INSDESTTREE/$mydir/${mysrc##*/}" || return $?
+ cp -P "$mysrc" "${ED}$INSDESTTREE/$mydir/${mysrc##*/}"
+ return $?
+ else
+ cp "$mysrc" "$TMP/2/${mysrc##*/}" || return $?
+ mysrc="$TMP/2/${mysrc##*/}"
+ cleanup=$mysrc
+ fi
+ fi
+
+ install ${INSOPTIONS} "${mysrc}" "${ED}${INSDESTTREE}/${mydir}"
+ rval=$?
+ [[ -n ${cleanup} ]] && rm -f "${cleanup}"
+ [ $rval -ne 0 ] && echo "!!! ${helper}: $mysrc does not exist" 1>&2
+ return $rval
+}
+
+_xdoins() {
+ local -i failed=0
+ while read -r -d $'\0' x ; do
+ _doins "$x" "${x%/*}"
+ ((failed|=$?))
+ done
+ return $failed
+}
+
+success=0
+failed=0
+
+for x in "$@" ; do
+ if [[ $PRESERVE_SYMLINKS = n && -d $x ]] || \
+ [[ $PRESERVE_SYMLINKS = y && -d $x && ! -L $x ]] ; then
+ if [ "${DOINSRECUR}" == "n" ] ; then
+ if [[ ${helper} == dodoc ]] ; then
+ echo "!!! ${helper}: $x is a directory" 1>&2
+ ((failed|=1))
+ fi
+ continue
+ fi
+
+ while [ "$x" != "${x%/}" ] ; do
+ x=${x%/}
+ done
+ if [ "$x" = "${x%/*}" ] ; then
+ pushd "$PWD" >/dev/null
+ else
+ pushd "${x%/*}" >/dev/null
+ fi
+ x=${x##*/}
+ x_orig=$x
+ # Follow any symlinks recursively until we've got
+ # a normal directory for 'find' to traverse. The
+ # name of the symlink will be used for the name
+ # of the installed directory, as discussed in
+ # bug #239529.
+ while [ -L "$x" ] ; do
+ pushd "$(readlink "$x")" >/dev/null
+ x=${PWD##*/}
+ pushd "${PWD%/*}" >/dev/null
+ done
+ if [[ $x != $x_orig ]] ; then
+ mv "$x" "$TMP/1/$x_orig"
+ pushd "$TMP/1" >/dev/null
+ fi
+ find "$x_orig" -type d -exec dodir "${INSDESTTREE}/{}" \;
+ find "$x_orig" \( -type f -or -type l \) -print0 | _xdoins
+ if [[ ${PIPESTATUS[1]} -eq 0 ]] ; then
+ # NOTE: Even if only an empty directory is installed here, it
+ # still counts as success, since an empty directory given as
+ # an argument to doins -r should not trigger failure.
+ ((success|=1))
+ else
+ ((failed|=1))
+ fi
+ if [[ $x != $x_orig ]] ; then
+ popd >/dev/null
+ mv "$TMP/1/$x_orig" "$x"
+ fi
+ while popd >/dev/null 2>&1 ; do true ; done
+ else
+ _doins "${x}"
+ if [[ $? -eq 0 ]] ; then
+ ((success|=1))
+ else
+ ((failed|=1))
+ fi
+ fi
+done
+rm -rf "$TMP"
+[[ $failed -ne 0 || $success -eq 0 ]] && { __helpers_die "${helper} failed"; exit 1; } || exit 0
diff --git a/usr/lib/portage/bin/ebuild-helpers/dolib b/usr/lib/portage/bin/ebuild-helpers/dolib
new file mode 100755
index 0000000..cd74cd3
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/dolib
@@ -0,0 +1,47 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+# Setup ABI cruft
+LIBDIR_VAR="LIBDIR_${ABI}"
+if [[ -n ${ABI} && -n ${!LIBDIR_VAR} ]] ; then
+ CONF_LIBDIR=${!LIBDIR_VAR}
+fi
+unset LIBDIR_VAR
+# we need this to default to lib so that things dont break
+CONF_LIBDIR=${CONF_LIBDIR:-lib}
+libdir="${ED}${DESTTREE}/${CONF_LIBDIR}"
+
+
+if [[ $# -lt 1 ]] ; then
+ __helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+if [[ ! -d ${libdir} ]] ; then
+ install -d "${libdir}" || { __helpers_die "${0##*/}: failed to install ${libdir}"; exit 1; }
+fi
+
+ret=0
+
+for x in "$@" ; do
+ if [[ -e ${x} ]] ; then
+ if [[ ! -L ${x} ]] ; then
+ install ${LIBOPTIONS} "${x}" "${libdir}"
+ else
+ ln -s "$(readlink "${x}")" "${libdir}/${x##*/}"
+ fi
+ else
+ echo "!!! ${0##*/}: ${x} does not exist" 1>&2
+ false
+ fi
+ ((ret|=$?))
+done
+
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
+exit ${ret}
diff --git a/usr/lib/portage/bin/ebuild-helpers/dolib.a b/usr/lib/portage/bin/ebuild-helpers/dolib.a
new file mode 100755
index 0000000..d2279dc
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/dolib.a
@@ -0,0 +1,6 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+exec env LIBOPTIONS="-m0644" \
+ dolib "$@"
diff --git a/usr/lib/portage/bin/ebuild-helpers/dolib.so b/usr/lib/portage/bin/ebuild-helpers/dolib.so
new file mode 100755
index 0000000..4bdbfab
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/dolib.so
@@ -0,0 +1,6 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+exec env LIBOPTIONS="-m0755" \
+ dolib "$@"
diff --git a/usr/lib/portage/bin/ebuild-helpers/doman b/usr/lib/portage/bin/ebuild-helpers/doman
new file mode 100755
index 0000000..635f2ee
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/doman
@@ -0,0 +1,68 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $# -lt 1 ]] ; then
+ __helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+i18n=""
+
+ret=0
+
+for x in "$@" ; do
+ if [[ ${x:0:6} == "-i18n=" ]] ; then
+ i18n=${x:6}/
+ continue
+ fi
+ if [[ ${x:0:6} == ".keep_" ]] ; then
+ continue
+ fi
+
+ suffix=${x##*.}
+
+ # These will be automatically decompressed by ecompressdir.
+ if has ${suffix} Z gz bz2 ; then
+ realname=${x%.*}
+ suffix=${realname##*.}
+ fi
+
+ if has "${EAPI:-0}" 2 3 || [[ -z ${i18n} ]] \
+ && ! has "${EAPI:-0}" 0 1 \
+ && [[ $x =~ (.*)\.([a-z][a-z](_[A-Z][A-Z])?)\.(.*) ]]
+ then
+ name=${BASH_REMATCH[1]##*/}.${BASH_REMATCH[4]}
+ mandir=${BASH_REMATCH[2]}/man${suffix:0:1}
+ else
+ name=${x##*/}
+ mandir=${i18n#/}man${suffix:0:1}
+ fi
+
+
+ if [[ ${mandir} == *man[0-9n] ]] ; then
+ if [[ -s ${x} ]] ; then
+ if [[ ! -d ${ED}/usr/share/man/${mandir} ]] ; then
+ install -d "${ED}/usr/share/man/${mandir}"
+ fi
+
+ install -m0644 "${x}" "${ED}/usr/share/man/${mandir}/${name}"
+ ((ret|=$?))
+ elif [[ ! -e ${x} ]] ; then
+ echo "!!! ${0##*/}: $x does not exist" 1>&2
+ ((ret|=1))
+ fi
+ else
+ __vecho "doman: '${x}' is probably not a man page; skipping" 1>&2
+ ((ret|=1))
+ fi
+done
+
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
+exit ${ret}
diff --git a/usr/lib/portage/bin/ebuild-helpers/domo b/usr/lib/portage/bin/ebuild-helpers/domo
new file mode 100755
index 0000000..300358c
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/domo
@@ -0,0 +1,39 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+mynum=${#}
+if [ ${mynum} -lt 1 ] ; then
+ __helpers_die "${0}: at least one argument needed"
+ exit 1
+fi
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+if [ ! -d "${ED}${DESTTREE}/share/locale" ] ; then
+ install -d "${ED}${DESTTREE}/share/locale/"
+fi
+
+ret=0
+
+for x in "$@" ; do
+ if [ -e "${x}" ] ; then
+ mytiny="${x##*/}"
+ mydir="${ED}${DESTTREE}/share/locale/${mytiny%.*}/LC_MESSAGES"
+ if [ ! -d "${mydir}" ] ; then
+ install -d "${mydir}"
+ fi
+ install -m0644 "${x}" "${mydir}/${MOPREFIX}.mo"
+ else
+ echo "!!! ${0##*/}: $x does not exist" 1>&2
+ false
+ fi
+ ((ret|=$?))
+done
+
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/dosbin b/usr/lib/portage/bin/ebuild-helpers/dosbin
new file mode 100755
index 0000000..33415a6
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/dosbin
@@ -0,0 +1,33 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $# -lt 1 ]] ; then
+ __helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+if [[ ! -d ${ED}${DESTTREE}/sbin ]] ; then
+ install -d "${ED}${DESTTREE}/sbin" || { __helpers_die "${0##*/}: failed to install ${ED}${DESTTREE}/sbin"; exit 2; }
+fi
+
+ret=0
+
+for x in "$@" ; do
+ if [[ -e ${x} ]] ; then
+ install -m0755 -o ${PORTAGE_INST_UID:-0} -g ${PORTAGE_INST_GID:-0} "${x}" "${ED}${DESTTREE}/sbin"
+ else
+ echo "!!! ${0##*/}: ${x} does not exist" 1>&2
+ false
+ fi
+ ((ret|=$?))
+done
+
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
+exit ${ret}
diff --git a/usr/lib/portage/bin/ebuild-helpers/dosed b/usr/lib/portage/bin/ebuild-helpers/dosed
new file mode 100755
index 0000000..7db0629
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/dosed
@@ -0,0 +1,46 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_dosed; then
+ die "'${0##*/}' has been banned for EAPI '$EAPI'"
+ exit 1
+fi
+
+if [[ $# -lt 1 ]] ; then
+ echo "!!! ${0##*/}: at least one argument needed" >&2
+ exit 1
+fi
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+ret=0
+file_found=0
+mysed="s:${ED}::g"
+
+for x in "$@" ; do
+ y=$ED${x#/}
+ if [ -e "${y}" ] ; then
+ if [ -f "${y}" ] ; then
+ file_found=1
+ sed -i -e "${mysed}" "${y}"
+ else
+ echo "${y} is not a regular file!" >&2
+ false
+ fi
+ ((ret|=$?))
+ else
+ mysed="${x}"
+ fi
+done
+
+if [ $file_found = 0 ] ; then
+ echo "!!! ${0##*/}: $y does not exist" 1>&2
+ ((ret|=1))
+fi
+
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/dosym b/usr/lib/portage/bin/ebuild-helpers/dosym
new file mode 100755
index 0000000..638f19b
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/dosym
@@ -0,0 +1,31 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $# -ne 2 ]] ; then
+ __helpers_die "${0##*/}: two arguments needed"
+ exit 1
+fi
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+if [[ ${2} == */ ]] || \
+ [[ -d ${ED}${2} && ! -L ${ED}${2} ]] ; then
+ # implicit basename not allowed by PMS (bug #379899)
+ eqawarn "QA Notice: dosym target omits basename: '${2}'"
+fi
+
+destdir=${2%/*}
+[[ ! -d ${ED}${destdir} ]] && dodir "${destdir}"
+# when absolute, prefix with offset for Gentoo Prefix
+target="${1}"
+[[ ${target:0:1} == "/" ]] && target="${EPREFIX}${target}"
+ln -snf "${target}" "${ED}${2}"
+
+ret=$?
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/ecompress b/usr/lib/portage/bin/ebuild-helpers/ecompress
new file mode 100755
index 0000000..b439ab2
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/ecompress
@@ -0,0 +1,161 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z $1 ]] ; then
+ __helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+# setup compression stuff
+PORTAGE_COMPRESS=${PORTAGE_COMPRESS-bzip2}
+[[ -z ${PORTAGE_COMPRESS} ]] && exit 0
+
+if [[ ${PORTAGE_COMPRESS_FLAGS+set} != "set" ]] ; then
+ case ${PORTAGE_COMPRESS} in
+ bzip2|gzip) PORTAGE_COMPRESS_FLAGS="-9";;
+ esac
+fi
+
+# decompress_args(suffix, binary)
+# - suffix: the compression suffix to work with
+# - binary: the program to execute that'll compress/decompress
+# new_args: global array used to return revised arguments
+decompress_args() {
+ local suffix=$1 binary=$2
+ shift 2
+
+ # Initialize the global new_args array.
+ new_args=()
+ declare -a decompress_args=()
+ local x i=0 decompress_count=0
+ for x in "$@" ; do
+ if [[ ${x%$suffix} = $x ]] ; then
+ new_args[$i]=$x
+ else
+ new_args[$i]=${x%$suffix}
+ decompress_args[$decompress_count]=$x
+ ((decompress_count++))
+ fi
+ ((i++))
+ done
+
+ if [ $decompress_count -gt 0 ] ; then
+ ${binary} "${decompress_args[@]}"
+ if [ $? -ne 0 ] ; then
+ # Apparently decompression failed for one or more files, so
+ # drop those since we don't want to compress them twice.
+ new_args=()
+ local x i=0
+ for x in "$@" ; do
+ if [[ ${x%$suffix} = $x ]] ; then
+ new_args[$i]=$x
+ ((i++))
+ elif [[ -f ${x%$suffix} ]] ; then
+ new_args[$i]=${x%$suffix}
+ ((i++))
+ else
+ # Apparently decompression failed for this one, so drop
+ # it since we don't want to compress it twice.
+ true
+ fi
+ done
+ fi
+ fi
+}
+
+case $1 in
+ --suffix)
+ [[ -n $2 ]] && __vecho "${0##*/}: --suffix takes no additional arguments" 1>&2
+
+ if [[ ! -e ${T}/.ecompress.suffix ]] ; then
+ set -e
+ tmpdir="${T}"/.ecompress$$.${RANDOM}
+ mkdir "${tmpdir}"
+ cd "${tmpdir}"
+ # we have to fill the file enough so that there is something
+ # to compress as some programs will refuse to do compression
+ # if it cannot actually compress the file
+ echo {0..1000} > compressme
+ ${PORTAGE_COMPRESS} ${PORTAGE_COMPRESS_FLAGS} compressme > /dev/null
+ # If PORTAGE_COMPRESS_FLAGS contains -k then we need to avoid
+ # having our glob match the uncompressed file here.
+ suffix=$(echo compressme.*)
+ [[ -z $suffix || "$suffix" == "compressme.*" ]] && \
+ suffix=$(echo compressme*)
+ suffix=${suffix#compressme}
+ cd /
+ rm -rf "${tmpdir}"
+ echo "${suffix}" > "${T}/.ecompress.suffix"
+ fi
+ cat "${T}/.ecompress.suffix"
+ ;;
+ --bin)
+ [[ -n $2 ]] && __vecho "${0##*/}: --bin takes no additional arguments" 1>&2
+
+ echo "${PORTAGE_COMPRESS} ${PORTAGE_COMPRESS_FLAGS}"
+ ;;
+ --queue)
+ shift
+ ret=0
+ for x in "${@/%/.ecompress.file}" ; do
+ >> "$x"
+ ((ret|=$?))
+ done
+ [[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
+ exit $ret
+ ;;
+ --dequeue)
+ [[ -n $2 ]] && __vecho "${0##*/}: --dequeue takes no additional arguments" 1>&2
+ find "${D}" -name '*.ecompress.file' -print0 \
+ | sed -e 's:\.ecompress\.file::g' \
+ | ${XARGS} -0 ecompress
+ find "${D}" -name '*.ecompress.file' -print0 | ${XARGS} -0 rm -f
+ ;;
+ --*)
+ __helpers_die "${0##*/}: unknown arguments '$*'"
+ exit 1
+ ;;
+ *)
+ # Since dodoc calls ecompress on files that are already compressed,
+ # perform decompression here (similar to ecompressdir behavior).
+ decompress_args ".Z" "gunzip -f" "$@"
+ set -- "${new_args[@]}"
+ decompress_args ".gz" "gunzip -f" "$@"
+ set -- "${new_args[@]}"
+ decompress_args ".bz2" "bunzip2 -f" "$@"
+ set -- "${new_args[@]}"
+
+ mask_ext_re=""
+ set -f
+ for x in $PORTAGE_COMPRESS_EXCLUDE_SUFFIXES ; do
+ mask_ext_re+="|$x"
+ done
+ set +f
+ mask_ext_re="^(${mask_ext_re:1})\$"
+ declare -a filtered_args=()
+ i=0
+ for x in "$@" ; do
+ [[ ${x##*.} =~ $mask_ext_re ]] && continue
+ [[ -s ${x} ]] || continue
+ filtered_args[$i]=$x
+ ((i++))
+ done
+ [ $i -eq 0 ] && exit 0
+ set -- "${filtered_args[@]}"
+
+ # If a compressed version of the file already exists, simply
+ # delete it so that the compressor doesn't whine (bzip2 will
+ # complain and skip, gzip will prompt for input)
+ suffix=$(ecompress --suffix)
+ [[ -n ${suffix} ]] && echo -n "${@/%/${suffix}$'\001'}" | \
+ tr '\001' '\000' | ${XARGS} -0 rm -f
+ # Finally, let's actually do some real work
+ "${PORTAGE_COMPRESS}" ${PORTAGE_COMPRESS_FLAGS} "$@"
+ ret=$?
+ [[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
+ exit $ret
+ ;;
+esac
diff --git a/usr/lib/portage/bin/ebuild-helpers/ecompressdir b/usr/lib/portage/bin/ebuild-helpers/ecompressdir
new file mode 100755
index 0000000..fa68bee
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/ecompressdir
@@ -0,0 +1,225 @@
+#!/bin/bash
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/helper-functions.sh
+
+if [[ -z $1 ]] ; then
+ __helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D} EPREFIX=
+fi
+
+SIZE_LIMIT=''
+while [[ $# -gt 0 ]] ; do
+ case $1 in
+ --ignore)
+ shift
+ for skip in "$@" ; do
+ [[ -d ${ED}${skip} || -f ${ED}${skip} ]] \
+ && >> "${ED}${skip}.ecompress.skip"
+ done
+ exit 0
+ ;;
+ --queue)
+ shift
+ set -- "${@/%/.ecompress.dir}"
+ set -- "${@/#/${ED}}"
+ ret=0
+ for x in "$@" ; do
+ # Stash the limit in the .dir file so we can reload it later.
+ printf "${SIZE_LIMIT}" > "${x}"
+ ((ret|=$?))
+ done
+ [[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
+ exit $ret
+ ;;
+ --dequeue)
+ [[ -n $2 ]] && __vecho "${0##*/}: --dequeue takes no additional arguments" 1>&2
+ find "${ED}" -name '*.ecompress.dir' -print0 \
+ | sed -e 's:\.ecompress\.dir::g' -e "s:${ED}:/:g" \
+ | ${XARGS} -0 ecompressdir
+ find "${ED}" -name '*.ecompress.skip' -print0 | ${XARGS} -0 rm -f
+ exit 0
+ ;;
+ --limit)
+ SIZE_LIMIT=$2
+ shift
+ ;;
+ --*)
+ __helpers_die "${0##*/}: unknown arguments '$*'"
+ exit 1
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+done
+
+# figure out the new suffix
+suffix=$(ecompress --suffix)
+
+# funk_up_dir(action, suffix, binary, [size_limit])
+# - action: compress or decompress
+# - suffix: the compression suffix to work with
+# - binary: the program to execute that'll compress/decompress
+# - size_limit: if compressing, skip files smaller than this
+# The directory we act on is implied in the ${dir} variable
+funk_up_dir() {
+ local act=$1 suffix=$2 binary=$3 size_limit=$4
+
+ local negate=""
+ [[ ${act} == "compress" ]] && negate="!"
+
+ local ret=0
+ # first we act on all the files
+ local args=(
+ -type f
+ ${negate} -iname "*${suffix}"
+ )
+ [[ -n ${size_limit} ]] && args+=( -size "+${size_limit}c" )
+ find "${dir}" "${args[@]}" -print0 | ${XARGS} -0 ${binary}
+ ((ret|=$?))
+
+ # Repeat until nothing changes, in order to handle multiple
+ # levels of indirection (see bug #470916).
+ local -i indirection=0
+ while true ; do
+ local something_changed=
+ while read -r -d $'\0' brokenlink ; do
+ [[ -e ${brokenlink} ]] && continue
+ olddest=$(readlink "${brokenlink}")
+ # Ignore temporarily broken symlinks due to
+ # _relocate_skip_dirs (bug #399595), and handle
+ # absolute symlinks to files that aren't merged
+ # yet (bug #405327).
+ if [[ ${olddest} == /* ]] ; then
+ [ -e "${D}${olddest}" ] && continue
+ skip_dir_dest=${T}/ecompress-skip/${olddest#${EPREFIX}}
+ else
+ skip_dir_dest=${T}/ecompress-skip/${actual_dir#${ED}}/${brokenlink%/*}/${olddest}
+ fi
+ [[ -e ${skip_dir_dest} ]] && continue
+ if [[ ${act} == "compress" ]] ; then
+ newdest=${olddest}${suffix}
+ else
+ [[ ${olddest} == *${suffix} ]] || continue
+ newdest=${olddest%${suffix}}
+ fi
+ if [[ "${newdest}" == /* ]] ; then
+ [[ -f "${D}${newdest}" ]] || continue
+ else
+ [[ -f "${dir}/${brokenlink%/*}/${newdest}" ]] || continue
+ fi
+ something_changed=${brokenlink}
+ rm -f "${brokenlink}"
+ [[ ${act} == "compress" ]] \
+ && ln -snf "${newdest}" "${brokenlink}${suffix}" \
+ || ln -snf "${newdest}" "${brokenlink%${suffix}}"
+ ((ret|=$?))
+ done < <(find "${dir}" -type l -print0)
+ [[ -n ${something_changed} ]] || break
+ (( indirection++ ))
+ if (( indirection >= 100 )) ; then
+ # Protect against possibility of a bug triggering an endless loop.
+ eerror "ecompressdir: too many levels of indirection for" \
+ "'${actual_dir#${ED}}/${something_changed#./}'"
+ break
+ fi
+ done
+ return ${ret}
+}
+
+# _relocate_skip_dirs(srctree, dsttree)
+# Move all files and directories we want to skip running compression
+# on from srctree to dsttree.
+_relocate_skip_dirs() {
+ local srctree="$1" dsttree="$2"
+
+ [[ -d ${srctree} ]] || return 0
+
+ find "${srctree}" -name '*.ecompress.skip' -print0 | \
+ while read -r -d $'\0' src ; do
+ src=${src%.ecompress.skip}
+ dst="${dsttree}${src#${srctree}}"
+ parent=${dst%/*}
+ mkdir -p "${parent}"
+ mv "${src}" "${dst}"
+ mv "${src}.ecompress.skip" "${dst}.ecompress.skip"
+ done
+}
+hide_skip_dirs() { _relocate_skip_dirs "${ED}" "${T}"/ecompress-skip/ ; }
+restore_skip_dirs() { _relocate_skip_dirs "${T}"/ecompress-skip/ "${ED}" ; }
+
+ret=0
+
+rm -rf "${T}"/ecompress-skip
+
+decompressors=(
+ ".Z" "gunzip -f"
+ ".gz" "gunzip -f"
+ ".bz2" "bunzip2 -f"
+ ".xz" "unxz -f"
+ ".lzma" "unxz -f"
+)
+
+__multijob_init
+
+for dir in "$@" ; do
+ dir=${dir#/}
+ dir="${ED}${dir}"
+ if [[ ! -d ${dir} ]] ; then
+ __vecho "${0##*/}: /${dir#${ED}} does not exist!"
+ continue
+ fi
+ cd "${dir}"
+ actual_dir=${dir}
+ dir=. # use relative path to avoid 'Argument list too long' errors
+
+ # hide all the stuff we want to skip
+ hide_skip_dirs "${dir}"
+
+ # since we've been requested to compress the whole dir,
+ # delete any individual queued requests
+ size_limit=${SIZE_LIMIT:-$(<"${actual_dir}.ecompress.dir")}
+ rm -f "${actual_dir}.ecompress.dir"
+ find "${dir}" -type f -name '*.ecompress.file' -print0 | ${XARGS} -0 rm -f
+
+ # not uncommon for packages to compress doc files themselves
+ for (( i = 0; i < ${#decompressors[@]}; i += 2 )) ; do
+ # It's faster to parallelize at this stage than to try to
+ # parallelize the compressors. This is because the find|xargs
+ # ends up launching less compressors overall, so the overhead
+ # of forking children ends up dominating.
+ (
+ __multijob_child_init
+ funk_up_dir "decompress" "${decompressors[i]}" "${decompressors[i+1]}"
+ ) &
+ __multijob_post_fork
+ : $(( ret |= $? ))
+ done
+
+ __multijob_finish
+ : $(( ret |= $? ))
+
+ # forcibly break all hard links as some compressors whine about it
+ find "${dir}" -type f -links +1 -exec env file="{}" sh -c \
+ 'cp -p "${file}" "${file}.ecompress.break" ; mv -f "${file}.ecompress.break" "${file}"' \;
+
+ # now lets do our work
+ if [[ -n ${suffix} ]] ; then
+ __vecho "${0##*/}: $(ecompress --bin) /${actual_dir#${ED}}"
+ funk_up_dir "compress" "${suffix}" "ecompress" "${size_limit}"
+ : $(( ret |= $? ))
+ fi
+
+ # finally, restore the skipped stuff
+ restore_skip_dirs
+done
+
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
+exit ${ret}
diff --git a/usr/lib/portage/bin/ebuild-helpers/eerror b/usr/lib/portage/bin/ebuild-helpers/eerror
new file mode 100755
index 0000000..a93295e
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/eerror
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+${0##*/} "$@"
diff --git a/usr/lib/portage/bin/ebuild-helpers/einfo b/usr/lib/portage/bin/ebuild-helpers/einfo
new file mode 100755
index 0000000..a93295e
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/einfo
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+${0##*/} "$@"
diff --git a/usr/lib/portage/bin/ebuild-helpers/elog b/usr/lib/portage/bin/ebuild-helpers/elog
new file mode 100755
index 0000000..a93295e
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/elog
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+${0##*/} "$@"
diff --git a/usr/lib/portage/bin/ebuild-helpers/emake b/usr/lib/portage/bin/ebuild-helpers/emake
new file mode 100755
index 0000000..b69042e
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/emake
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#
+# emake: Supplies some default parameters to GNU make. At the moment the
+# only parameter supplied is -jN, where N is a number of
+# parallel processes that should be ideal for the running host
+# (e.g. on a single-CPU machine, N=2). The MAKEOPTS variable
+# is set in make.globals. We don't source make.globals
+# here because emake is only called from an ebuild.
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $PORTAGE_QUIET != 1 ]] ; then
+ (
+ for arg in ${MAKE:-make} $MAKEOPTS "$@" $EXTRA_EMAKE ; do
+ [[ ${arg} == *" "* ]] \
+ && printf "'%s' " "${arg}" \
+ || printf "%s " "${arg}"
+ done
+ printf "\n"
+ ) >&2
+fi
+
+${MAKE:-make} SHELL="${BASH:-/bin/bash}" ${MAKEOPTS} "$@" ${EXTRA_EMAKE}
+ret=$?
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/eqawarn b/usr/lib/portage/bin/ebuild-helpers/eqawarn
new file mode 100755
index 0000000..a93295e
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/eqawarn
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+${0##*/} "$@"
diff --git a/usr/lib/portage/bin/ebuild-helpers/ewarn b/usr/lib/portage/bin/ebuild-helpers/ewarn
new file mode 100755
index 0000000..a93295e
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/ewarn
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+${0##*/} "$@"
diff --git a/usr/lib/portage/bin/ebuild-helpers/fowners b/usr/lib/portage/bin/ebuild-helpers/fowners
new file mode 100755
index 0000000..a1e978a
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/fowners
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+
+if ! ___eapi_has_prefix_variables; then
+ EPREFIX= ED=${D}
+fi
+
+# we can't prefix all arguments because
+# chown takes random options
+slash="/"
+chown "${@/#${slash}/${ED}${slash}}"
+ret=$?
+
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/fperms b/usr/lib/portage/bin/ebuild-helpers/fperms
new file mode 100755
index 0000000..695f912
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/fperms
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+# we can't prefix all arguments because
+# chmod takes random options
+slash="/"
+chmod "${@/#${slash}/${ED}${slash}}"
+ret=$?
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/keepdir b/usr/lib/portage/bin/ebuild-helpers/keepdir
new file mode 100755
index 0000000..bec2feb
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/keepdir
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+dodir "$@"
+ret=$?
+
+for x in "$@"; do
+ >> "${ED}${x}/.keep_${CATEGORY}_${PN}-${SLOT%/*}" || \
+ { echo "!!! ${0##*/}: cannot write .keep in ${ED}${x}" 1>&2; ret=1; }
+done
+
+[[ ${ret} -ne 0 ]] && __helpers_die "${0##*/} failed"
+exit ${ret}
diff --git a/usr/lib/portage/bin/ebuild-helpers/newbin b/usr/lib/portage/bin/ebuild-helpers/newbin
new file mode 100755
index 0000000..282e7a5
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/newbin
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+helper=${0##*/}
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ __helpers_die "${helper}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+(($#>2)) && \
+ eqawarn "QA Notice: ${helper} called with more than 2 arguments: ${@:3}"
+
+stdin=
+if ___eapi_newins_supports_reading_from_standard_input && [[ $1 == "-" ]]; then
+ stdin=yes
+fi
+
+TMP=$(mktemp -d "${T}/.newins_tmp_XXXXXX")
+trap 'rm -rf "${TMP}"' EXIT
+
+if [[ ${stdin} ]] ; then
+ if [[ -t 0 ]] ; then
+ __helpers_die "!!! ${helper}: Input is from a terminal"
+ exit 1
+ fi
+ cat > "${TMP}/$2"
+ ret=$?
+else
+ if [[ ! -e $1 ]] ; then
+ __helpers_die "!!! ${helper}: $1 does not exist"
+ exit 1
+ fi
+
+ cp_args="-f"
+ if [[ ${helper} == newins ]] ; then
+ if ___eapi_doins_and_newins_preserve_symlinks; then
+ cp_args+=" -P"
+ fi
+ fi
+
+ cp ${cp_args} "$1" "${TMP}/$2"
+ ret=$?
+fi
+
+if [[ ${ret} -ne 0 ]] ; then
+ __helpers_die "${0##*/} failed"
+ exit ${ret}
+fi
+
+do${helper#new} "${TMP}/$2"
+ret=$?
+[[ $ret -ne 0 ]] && __helpers_die "${helper} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/newconfd b/usr/lib/portage/bin/ebuild-helpers/newconfd
new file mode 100755
index 0000000..282e7a5
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/newconfd
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+helper=${0##*/}
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ __helpers_die "${helper}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+(($#>2)) && \
+ eqawarn "QA Notice: ${helper} called with more than 2 arguments: ${@:3}"
+
+stdin=
+if ___eapi_newins_supports_reading_from_standard_input && [[ $1 == "-" ]]; then
+ stdin=yes
+fi
+
+TMP=$(mktemp -d "${T}/.newins_tmp_XXXXXX")
+trap 'rm -rf "${TMP}"' EXIT
+
+if [[ ${stdin} ]] ; then
+ if [[ -t 0 ]] ; then
+ __helpers_die "!!! ${helper}: Input is from a terminal"
+ exit 1
+ fi
+ cat > "${TMP}/$2"
+ ret=$?
+else
+ if [[ ! -e $1 ]] ; then
+ __helpers_die "!!! ${helper}: $1 does not exist"
+ exit 1
+ fi
+
+ cp_args="-f"
+ if [[ ${helper} == newins ]] ; then
+ if ___eapi_doins_and_newins_preserve_symlinks; then
+ cp_args+=" -P"
+ fi
+ fi
+
+ cp ${cp_args} "$1" "${TMP}/$2"
+ ret=$?
+fi
+
+if [[ ${ret} -ne 0 ]] ; then
+ __helpers_die "${0##*/} failed"
+ exit ${ret}
+fi
+
+do${helper#new} "${TMP}/$2"
+ret=$?
+[[ $ret -ne 0 ]] && __helpers_die "${helper} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/newdoc b/usr/lib/portage/bin/ebuild-helpers/newdoc
new file mode 100755
index 0000000..282e7a5
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/newdoc
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+helper=${0##*/}
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ __helpers_die "${helper}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+(($#>2)) && \
+ eqawarn "QA Notice: ${helper} called with more than 2 arguments: ${@:3}"
+
+stdin=
+if ___eapi_newins_supports_reading_from_standard_input && [[ $1 == "-" ]]; then
+ stdin=yes
+fi
+
+TMP=$(mktemp -d "${T}/.newins_tmp_XXXXXX")
+trap 'rm -rf "${TMP}"' EXIT
+
+if [[ ${stdin} ]] ; then
+ if [[ -t 0 ]] ; then
+ __helpers_die "!!! ${helper}: Input is from a terminal"
+ exit 1
+ fi
+ cat > "${TMP}/$2"
+ ret=$?
+else
+ if [[ ! -e $1 ]] ; then
+ __helpers_die "!!! ${helper}: $1 does not exist"
+ exit 1
+ fi
+
+ cp_args="-f"
+ if [[ ${helper} == newins ]] ; then
+ if ___eapi_doins_and_newins_preserve_symlinks; then
+ cp_args+=" -P"
+ fi
+ fi
+
+ cp ${cp_args} "$1" "${TMP}/$2"
+ ret=$?
+fi
+
+if [[ ${ret} -ne 0 ]] ; then
+ __helpers_die "${0##*/} failed"
+ exit ${ret}
+fi
+
+do${helper#new} "${TMP}/$2"
+ret=$?
+[[ $ret -ne 0 ]] && __helpers_die "${helper} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/newenvd b/usr/lib/portage/bin/ebuild-helpers/newenvd
new file mode 100755
index 0000000..282e7a5
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/newenvd
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+helper=${0##*/}
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ __helpers_die "${helper}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+(($#>2)) && \
+ eqawarn "QA Notice: ${helper} called with more than 2 arguments: ${@:3}"
+
+stdin=
+if ___eapi_newins_supports_reading_from_standard_input && [[ $1 == "-" ]]; then
+ stdin=yes
+fi
+
+TMP=$(mktemp -d "${T}/.newins_tmp_XXXXXX")
+trap 'rm -rf "${TMP}"' EXIT
+
+if [[ ${stdin} ]] ; then
+ if [[ -t 0 ]] ; then
+ __helpers_die "!!! ${helper}: Input is from a terminal"
+ exit 1
+ fi
+ cat > "${TMP}/$2"
+ ret=$?
+else
+ if [[ ! -e $1 ]] ; then
+ __helpers_die "!!! ${helper}: $1 does not exist"
+ exit 1
+ fi
+
+ cp_args="-f"
+ if [[ ${helper} == newins ]] ; then
+ if ___eapi_doins_and_newins_preserve_symlinks; then
+ cp_args+=" -P"
+ fi
+ fi
+
+ cp ${cp_args} "$1" "${TMP}/$2"
+ ret=$?
+fi
+
+if [[ ${ret} -ne 0 ]] ; then
+ __helpers_die "${0##*/} failed"
+ exit ${ret}
+fi
+
+do${helper#new} "${TMP}/$2"
+ret=$?
+[[ $ret -ne 0 ]] && __helpers_die "${helper} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/newexe b/usr/lib/portage/bin/ebuild-helpers/newexe
new file mode 100755
index 0000000..282e7a5
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/newexe
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+helper=${0##*/}
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ __helpers_die "${helper}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+(($#>2)) && \
+ eqawarn "QA Notice: ${helper} called with more than 2 arguments: ${@:3}"
+
+stdin=
+if ___eapi_newins_supports_reading_from_standard_input && [[ $1 == "-" ]]; then
+ stdin=yes
+fi
+
+TMP=$(mktemp -d "${T}/.newins_tmp_XXXXXX")
+trap 'rm -rf "${TMP}"' EXIT
+
+if [[ ${stdin} ]] ; then
+ if [[ -t 0 ]] ; then
+ __helpers_die "!!! ${helper}: Input is from a terminal"
+ exit 1
+ fi
+ cat > "${TMP}/$2"
+ ret=$?
+else
+ if [[ ! -e $1 ]] ; then
+ __helpers_die "!!! ${helper}: $1 does not exist"
+ exit 1
+ fi
+
+ cp_args="-f"
+ if [[ ${helper} == newins ]] ; then
+ if ___eapi_doins_and_newins_preserve_symlinks; then
+ cp_args+=" -P"
+ fi
+ fi
+
+ cp ${cp_args} "$1" "${TMP}/$2"
+ ret=$?
+fi
+
+if [[ ${ret} -ne 0 ]] ; then
+ __helpers_die "${0##*/} failed"
+ exit ${ret}
+fi
+
+do${helper#new} "${TMP}/$2"
+ret=$?
+[[ $ret -ne 0 ]] && __helpers_die "${helper} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/newheader b/usr/lib/portage/bin/ebuild-helpers/newheader
new file mode 100755
index 0000000..282e7a5
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/newheader
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+helper=${0##*/}
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ __helpers_die "${helper}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+(($#>2)) && \
+ eqawarn "QA Notice: ${helper} called with more than 2 arguments: ${@:3}"
+
+stdin=
+if ___eapi_newins_supports_reading_from_standard_input && [[ $1 == "-" ]]; then
+ stdin=yes
+fi
+
+TMP=$(mktemp -d "${T}/.newins_tmp_XXXXXX")
+trap 'rm -rf "${TMP}"' EXIT
+
+if [[ ${stdin} ]] ; then
+ if [[ -t 0 ]] ; then
+ __helpers_die "!!! ${helper}: Input is from a terminal"
+ exit 1
+ fi
+ cat > "${TMP}/$2"
+ ret=$?
+else
+ if [[ ! -e $1 ]] ; then
+ __helpers_die "!!! ${helper}: $1 does not exist"
+ exit 1
+ fi
+
+ cp_args="-f"
+ if [[ ${helper} == newins ]] ; then
+ if ___eapi_doins_and_newins_preserve_symlinks; then
+ cp_args+=" -P"
+ fi
+ fi
+
+ cp ${cp_args} "$1" "${TMP}/$2"
+ ret=$?
+fi
+
+if [[ ${ret} -ne 0 ]] ; then
+ __helpers_die "${0##*/} failed"
+ exit ${ret}
+fi
+
+do${helper#new} "${TMP}/$2"
+ret=$?
+[[ $ret -ne 0 ]] && __helpers_die "${helper} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/newinitd b/usr/lib/portage/bin/ebuild-helpers/newinitd
new file mode 100755
index 0000000..282e7a5
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/newinitd
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+helper=${0##*/}
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ __helpers_die "${helper}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+(($#>2)) && \
+ eqawarn "QA Notice: ${helper} called with more than 2 arguments: ${@:3}"
+
+stdin=
+if ___eapi_newins_supports_reading_from_standard_input && [[ $1 == "-" ]]; then
+ stdin=yes
+fi
+
+TMP=$(mktemp -d "${T}/.newins_tmp_XXXXXX")
+trap 'rm -rf "${TMP}"' EXIT
+
+if [[ ${stdin} ]] ; then
+ if [[ -t 0 ]] ; then
+ __helpers_die "!!! ${helper}: Input is from a terminal"
+ exit 1
+ fi
+ cat > "${TMP}/$2"
+ ret=$?
+else
+ if [[ ! -e $1 ]] ; then
+ __helpers_die "!!! ${helper}: $1 does not exist"
+ exit 1
+ fi
+
+ cp_args="-f"
+ if [[ ${helper} == newins ]] ; then
+ if ___eapi_doins_and_newins_preserve_symlinks; then
+ cp_args+=" -P"
+ fi
+ fi
+
+ cp ${cp_args} "$1" "${TMP}/$2"
+ ret=$?
+fi
+
+if [[ ${ret} -ne 0 ]] ; then
+ __helpers_die "${0##*/} failed"
+ exit ${ret}
+fi
+
+do${helper#new} "${TMP}/$2"
+ret=$?
+[[ $ret -ne 0 ]] && __helpers_die "${helper} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/newins b/usr/lib/portage/bin/ebuild-helpers/newins
new file mode 100755
index 0000000..282e7a5
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/newins
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+helper=${0##*/}
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ __helpers_die "${helper}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+(($#>2)) && \
+ eqawarn "QA Notice: ${helper} called with more than 2 arguments: ${@:3}"
+
+stdin=
+if ___eapi_newins_supports_reading_from_standard_input && [[ $1 == "-" ]]; then
+ stdin=yes
+fi
+
+TMP=$(mktemp -d "${T}/.newins_tmp_XXXXXX")
+trap 'rm -rf "${TMP}"' EXIT
+
+if [[ ${stdin} ]] ; then
+ if [[ -t 0 ]] ; then
+ __helpers_die "!!! ${helper}: Input is from a terminal"
+ exit 1
+ fi
+ cat > "${TMP}/$2"
+ ret=$?
+else
+ if [[ ! -e $1 ]] ; then
+ __helpers_die "!!! ${helper}: $1 does not exist"
+ exit 1
+ fi
+
+ cp_args="-f"
+ if [[ ${helper} == newins ]] ; then
+ if ___eapi_doins_and_newins_preserve_symlinks; then
+ cp_args+=" -P"
+ fi
+ fi
+
+ cp ${cp_args} "$1" "${TMP}/$2"
+ ret=$?
+fi
+
+if [[ ${ret} -ne 0 ]] ; then
+ __helpers_die "${0##*/} failed"
+ exit ${ret}
+fi
+
+do${helper#new} "${TMP}/$2"
+ret=$?
+[[ $ret -ne 0 ]] && __helpers_die "${helper} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/newlib.a b/usr/lib/portage/bin/ebuild-helpers/newlib.a
new file mode 100755
index 0000000..282e7a5
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/newlib.a
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+helper=${0##*/}
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ __helpers_die "${helper}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+(($#>2)) && \
+ eqawarn "QA Notice: ${helper} called with more than 2 arguments: ${@:3}"
+
+stdin=
+if ___eapi_newins_supports_reading_from_standard_input && [[ $1 == "-" ]]; then
+ stdin=yes
+fi
+
+TMP=$(mktemp -d "${T}/.newins_tmp_XXXXXX")
+trap 'rm -rf "${TMP}"' EXIT
+
+if [[ ${stdin} ]] ; then
+ if [[ -t 0 ]] ; then
+ __helpers_die "!!! ${helper}: Input is from a terminal"
+ exit 1
+ fi
+ cat > "${TMP}/$2"
+ ret=$?
+else
+ if [[ ! -e $1 ]] ; then
+ __helpers_die "!!! ${helper}: $1 does not exist"
+ exit 1
+ fi
+
+ cp_args="-f"
+ if [[ ${helper} == newins ]] ; then
+ if ___eapi_doins_and_newins_preserve_symlinks; then
+ cp_args+=" -P"
+ fi
+ fi
+
+ cp ${cp_args} "$1" "${TMP}/$2"
+ ret=$?
+fi
+
+if [[ ${ret} -ne 0 ]] ; then
+ __helpers_die "${0##*/} failed"
+ exit ${ret}
+fi
+
+do${helper#new} "${TMP}/$2"
+ret=$?
+[[ $ret -ne 0 ]] && __helpers_die "${helper} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/newlib.so b/usr/lib/portage/bin/ebuild-helpers/newlib.so
new file mode 100755
index 0000000..282e7a5
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/newlib.so
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+helper=${0##*/}
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ __helpers_die "${helper}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+(($#>2)) && \
+ eqawarn "QA Notice: ${helper} called with more than 2 arguments: ${@:3}"
+
+stdin=
+if ___eapi_newins_supports_reading_from_standard_input && [[ $1 == "-" ]]; then
+ stdin=yes
+fi
+
+TMP=$(mktemp -d "${T}/.newins_tmp_XXXXXX")
+trap 'rm -rf "${TMP}"' EXIT
+
+if [[ ${stdin} ]] ; then
+ if [[ -t 0 ]] ; then
+ __helpers_die "!!! ${helper}: Input is from a terminal"
+ exit 1
+ fi
+ cat > "${TMP}/$2"
+ ret=$?
+else
+ if [[ ! -e $1 ]] ; then
+ __helpers_die "!!! ${helper}: $1 does not exist"
+ exit 1
+ fi
+
+ cp_args="-f"
+ if [[ ${helper} == newins ]] ; then
+ if ___eapi_doins_and_newins_preserve_symlinks; then
+ cp_args+=" -P"
+ fi
+ fi
+
+ cp ${cp_args} "$1" "${TMP}/$2"
+ ret=$?
+fi
+
+if [[ ${ret} -ne 0 ]] ; then
+ __helpers_die "${0##*/} failed"
+ exit ${ret}
+fi
+
+do${helper#new} "${TMP}/$2"
+ret=$?
+[[ $ret -ne 0 ]] && __helpers_die "${helper} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/newman b/usr/lib/portage/bin/ebuild-helpers/newman
new file mode 100755
index 0000000..282e7a5
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/newman
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+helper=${0##*/}
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ __helpers_die "${helper}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+(($#>2)) && \
+ eqawarn "QA Notice: ${helper} called with more than 2 arguments: ${@:3}"
+
+stdin=
+if ___eapi_newins_supports_reading_from_standard_input && [[ $1 == "-" ]]; then
+ stdin=yes
+fi
+
+TMP=$(mktemp -d "${T}/.newins_tmp_XXXXXX")
+trap 'rm -rf "${TMP}"' EXIT
+
+if [[ ${stdin} ]] ; then
+ if [[ -t 0 ]] ; then
+ __helpers_die "!!! ${helper}: Input is from a terminal"
+ exit 1
+ fi
+ cat > "${TMP}/$2"
+ ret=$?
+else
+ if [[ ! -e $1 ]] ; then
+ __helpers_die "!!! ${helper}: $1 does not exist"
+ exit 1
+ fi
+
+ cp_args="-f"
+ if [[ ${helper} == newins ]] ; then
+ if ___eapi_doins_and_newins_preserve_symlinks; then
+ cp_args+=" -P"
+ fi
+ fi
+
+ cp ${cp_args} "$1" "${TMP}/$2"
+ ret=$?
+fi
+
+if [[ ${ret} -ne 0 ]] ; then
+ __helpers_die "${0##*/} failed"
+ exit ${ret}
+fi
+
+do${helper#new} "${TMP}/$2"
+ret=$?
+[[ $ret -ne 0 ]] && __helpers_die "${helper} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/newsbin b/usr/lib/portage/bin/ebuild-helpers/newsbin
new file mode 100755
index 0000000..282e7a5
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/newsbin
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+helper=${0##*/}
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ __helpers_die "${helper}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+(($#>2)) && \
+ eqawarn "QA Notice: ${helper} called with more than 2 arguments: ${@:3}"
+
+stdin=
+if ___eapi_newins_supports_reading_from_standard_input && [[ $1 == "-" ]]; then
+ stdin=yes
+fi
+
+TMP=$(mktemp -d "${T}/.newins_tmp_XXXXXX")
+trap 'rm -rf "${TMP}"' EXIT
+
+if [[ ${stdin} ]] ; then
+ if [[ -t 0 ]] ; then
+ __helpers_die "!!! ${helper}: Input is from a terminal"
+ exit 1
+ fi
+ cat > "${TMP}/$2"
+ ret=$?
+else
+ if [[ ! -e $1 ]] ; then
+ __helpers_die "!!! ${helper}: $1 does not exist"
+ exit 1
+ fi
+
+ cp_args="-f"
+ if [[ ${helper} == newins ]] ; then
+ if ___eapi_doins_and_newins_preserve_symlinks; then
+ cp_args+=" -P"
+ fi
+ fi
+
+ cp ${cp_args} "$1" "${TMP}/$2"
+ ret=$?
+fi
+
+if [[ ${ret} -ne 0 ]] ; then
+ __helpers_die "${0##*/} failed"
+ exit ${ret}
+fi
+
+do${helper#new} "${TMP}/$2"
+ret=$?
+[[ $ret -ne 0 ]] && __helpers_die "${helper} failed"
+exit $ret
diff --git a/usr/lib/portage/bin/ebuild-helpers/portageq b/usr/lib/portage/bin/ebuild-helpers/portageq
new file mode 100755
index 0000000..c678dd0
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/portageq
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Copyright 2009-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+scriptpath=${BASH_SOURCE[0]}
+scriptname=${scriptpath##*/}
+
+PORTAGE_BIN_PATH=${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}
+PORTAGE_PYM_PATH=${PORTAGE_PYM_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/pym}
+# Use safe cwd, avoiding unsafe import for bug #469338.
+cd "${PORTAGE_PYM_PATH}"
+
+IFS=':'
+set -f # in case ${PATH} contains any shell glob characters
+
+for path in ${PATH}; do
+ [[ -x ${path}/${scriptname} ]] || continue
+ [[ ${path}/${scriptname} -ef ${scriptpath} ]] && continue
+ PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
+ exec "${PORTAGE_PYTHON:-/usr/bin/python}" \
+ "${path}/${scriptname}" "$@"
+done
+
+unset IFS
+echo "${scriptname}: command not found" 1>&2
+exit 127
diff --git a/usr/lib/portage/bin/ebuild-helpers/prepall b/usr/lib/portage/bin/ebuild-helpers/prepall
new file mode 100755
index 0000000..d3f32e5
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/prepall
@@ -0,0 +1,29 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+[[ -d ${ED} ]] || exit 0
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+if has chflags $FEATURES ; then
+ # Save all the file flags for restoration at the end of prepall.
+ mtree -c -p "${ED}" -k flags > "${T}/bsdflags.mtree"
+ # Remove all the file flags so that prepall can do anything necessary.
+ chflags -R noschg,nouchg,nosappnd,nouappnd "${ED}"
+ chflags -R nosunlnk,nouunlnk "${ED}" 2>/dev/null
+fi
+
+prepallman
+prepallinfo
+
+prepallstrip
+
+if has chflags $FEATURES ; then
+ # Restore all the file flags that were saved at the beginning of prepall.
+ mtree -U -e -p "${ED}" -k flags < "${T}/bsdflags.mtree" &> /dev/null
+fi
diff --git a/usr/lib/portage/bin/ebuild-helpers/prepalldocs b/usr/lib/portage/bin/ebuild-helpers/prepalldocs
new file mode 100755
index 0000000..a49ff11
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/prepalldocs
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ___eapi_has_docompress; then
+ die "'${0##*/}' has been banned for EAPI '$EAPI'"
+ exit 1
+fi
+
+if [[ -n $1 ]] ; then
+ __vecho "${0##*/}: invalid usage; takes no arguments" 1>&2
+fi
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+[[ -d ${ED}usr/share/doc ]] || exit 0
+
+ecompressdir --ignore /usr/share/doc/${PF}/html
+ecompressdir --queue /usr/share/doc
diff --git a/usr/lib/portage/bin/ebuild-helpers/prepallinfo b/usr/lib/portage/bin/ebuild-helpers/prepallinfo
new file mode 100755
index 0000000..8578a9f
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/prepallinfo
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+[[ -d ${ED}usr/share/info ]] || exit 0
+
+exec prepinfo
diff --git a/usr/lib/portage/bin/ebuild-helpers/prepallman b/usr/lib/portage/bin/ebuild-helpers/prepallman
new file mode 100755
index 0000000..8cff5ee
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/prepallman
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+# replaced by controllable compression in EAPI 4
+___eapi_has_docompress && exit 0
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+ret=0
+
+# PREFIX LOCAL: ED needs not to exist, wheras D does
+[[ -d ${ED} ]] || exit ${ret}
+# END PREFIX LOCAL
+
+while IFS= read -r -d '' mandir ; do
+ mandir=${mandir#${ED}}
+ prepman "${mandir%/man}"
+ ((ret|=$?))
+done < <(find "${ED}" -type d -name man -print0)
+
+exit ${ret}
diff --git a/usr/lib/portage/bin/ebuild-helpers/prepallstrip b/usr/lib/portage/bin/ebuild-helpers/prepallstrip
new file mode 100755
index 0000000..4d5453c
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/prepallstrip
@@ -0,0 +1,11 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+exec prepstrip "${ED}"
diff --git a/usr/lib/portage/bin/ebuild-helpers/prepinfo b/usr/lib/portage/bin/ebuild-helpers/prepinfo
new file mode 100755
index 0000000..74a56bf
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/prepinfo
@@ -0,0 +1,38 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+if [[ -z $1 ]] ; then
+ infodir="/usr/share/info"
+else
+ if [[ -d ${ED}$1/share/info ]] ; then
+ infodir="$1/share/info"
+ else
+ infodir="$1/info"
+ fi
+fi
+
+if [[ ! -d ${ED}${infodir} ]] ; then
+ if [[ -n $1 ]] ; then
+ __vecho "${0##*/}: '${infodir}' does not exist!"
+ exit 1
+ else
+ exit 0
+ fi
+fi
+
+find "${ED}${infodir}" -type d -print0 | while read -r -d $'\0' x ; do
+ for f in "${x}"/.keepinfodir*; do
+ [[ -e ${f} ]] && continue 2
+ done
+ rm -f "${x}"/dir{,.info}{,.gz,.bz2}
+done
+
+___eapi_has_docompress && exit 0
+exec ecompressdir --queue "${infodir}"
diff --git a/usr/lib/portage/bin/ebuild-helpers/prepman b/usr/lib/portage/bin/ebuild-helpers/prepman
new file mode 100755
index 0000000..e5e482e
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/prepman
@@ -0,0 +1,39 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Do not compress man pages which are smaller than this (in bytes). #169260
+SIZE_LIMIT='128'
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+if [[ -z $1 ]] ; then
+ mandir="${ED}usr/share/man"
+else
+ mandir="${ED}$1/man"
+fi
+
+if [[ ! -d ${mandir} ]] ; then
+ eqawarn "QA Notice: prepman called with non-existent dir '${mandir#${ED}}'"
+ exit 0
+fi
+
+# replaced by controllable compression in EAPI 4
+___eapi_has_docompress && exit 0
+
+shopt -s nullglob
+
+really_is_mandir=0
+
+# use some heuristics to test if this is a real mandir
+for subdir in "${mandir}"/man* "${mandir}"/*/man* ; do
+ [[ -d ${subdir} ]] && really_is_mandir=1 && break
+done
+
+[[ ${really_is_mandir} == 1 ]] && exec ecompressdir --limit ${SIZE_LIMIT} --queue "${mandir#${ED}}"
+
+exit 0
diff --git a/usr/lib/portage/bin/ebuild-helpers/prepstrip b/usr/lib/portage/bin/ebuild-helpers/prepstrip
new file mode 100755
index 0000000..f7311c5
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/prepstrip
@@ -0,0 +1,390 @@
+#!/bin/bash
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/helper-functions.sh
+
+# avoid multiple calls to `has`. this creates things like:
+# FEATURES_foo=false
+# if "foo" is not in $FEATURES
+tf() { "$@" && echo true || echo false ; }
+exp_tf() {
+ local flag var=$1
+ shift
+ for flag in "$@" ; do
+ eval ${var}_${flag}=$(tf has ${flag} ${!var})
+ done
+}
+exp_tf FEATURES compressdebug installsources nostrip splitdebug xattr
+exp_tf RESTRICT binchecks installsources splitdebug strip
+
+if ! ___eapi_has_prefix_variables; then
+ EPREFIX= ED=${D}
+fi
+
+banner=false
+SKIP_STRIP=false
+if ${RESTRICT_strip} || ${FEATURES_nostrip} ; then
+ SKIP_STRIP=true
+ banner=true
+ ${FEATURES_installsources} || exit 0
+fi
+
+PRESERVE_XATTR=false
+if [[ ${KERNEL} == linux ]] && ${FEATURES_xattr} ; then
+ PRESERVE_XATTR=true
+ if type -P getfattr >/dev/null && type -P setfattr >/dev/null ; then
+ dump_xattrs() {
+ getfattr -d --absolute-names "$1"
+ }
+ restore_xattrs() {
+ setfattr --restore=-
+ }
+ else
+ dump_xattrs() {
+ PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
+ "${PORTAGE_PYTHON:-/usr/bin/python}" \
+ "${PORTAGE_BIN_PATH}/xattr-helper.py" --dump < <(echo -n "$1")
+ }
+ restore_xattrs() {
+ PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
+ "${PORTAGE_PYTHON:-/usr/bin/python}" \
+ "${PORTAGE_BIN_PATH}/xattr-helper.py" --restore
+ }
+ fi
+fi
+
+# look up the tools we might be using
+for t in STRIP:strip OBJCOPY:objcopy READELF:readelf ; do
+ v=${t%:*} # STRIP
+ t=${t#*:} # strip
+ eval ${v}=\"${!v:-${CHOST}-${t}}\"
+ type -P -- ${!v} >/dev/null || eval ${v}=${t}
+done
+
+# Figure out what tool set we're using to strip stuff
+unset SAFE_STRIP_FLAGS DEF_STRIP_FLAGS SPLIT_STRIP_FLAGS
+case $(${STRIP} --version 2>/dev/null) in
+*elfutils*) # dev-libs/elfutils
+ # elfutils default behavior is always safe, so don't need to specify
+ # any flags at all
+ SAFE_STRIP_FLAGS=""
+ DEF_STRIP_FLAGS="--remove-comment"
+ SPLIT_STRIP_FLAGS="-f"
+ ;;
+*GNU*) # sys-devel/binutils
+ # We'll leave out -R .note for now until we can check out the relevance
+ # of the section when it has the ALLOC flag set on it ...
+ SAFE_STRIP_FLAGS="--strip-unneeded"
+ DEF_STRIP_FLAGS="-R .comment -R .GCC.command.line -R .note.gnu.gold-version"
+ SPLIT_STRIP_FLAGS=
+ ;;
+esac
+: ${PORTAGE_STRIP_FLAGS=${SAFE_STRIP_FLAGS} ${DEF_STRIP_FLAGS}}
+
+prepstrip_sources_dir=${EPREFIX}/usr/src/debug/${CATEGORY}/${PF}
+
+type -P debugedit >/dev/null && debugedit_found=true || debugedit_found=false
+debugedit_warned=false
+
+__multijob_init
+
+# Setup $T filesystem layout that we care about.
+tmpdir="${T}/prepstrip"
+rm -rf "${tmpdir}"
+mkdir -p "${tmpdir}"/{inodes,splitdebug,sources}
+
+# Usage: save_elf_sources <elf>
+save_elf_sources() {
+ ${FEATURES_installsources} || return 0
+ ${RESTRICT_installsources} && return 0
+ if ! ${debugedit_found} ; then
+ if ! ${debugedit_warned} ; then
+ debugedit_warned=true
+ ewarn "FEATURES=installsources is enabled but the debugedit binary could not"
+ ewarn "be found. This feature will not work unless debugedit is installed!"
+ fi
+ return 0
+ fi
+
+ local x=$1
+
+ # since we're editing the ELF here, we should recompute the build-id
+ # (the -i flag below). save that output so we don't need to recompute
+ # it later on in the save_elf_debug step.
+ buildid=$(debugedit -i \
+ -b "${WORKDIR}" \
+ -d "${prepstrip_sources_dir}" \
+ -l "${tmpdir}/sources/${x##*/}.${BASHPID:-$(__bashpid)}" \
+ "${x}")
+}
+
+# Usage: save_elf_debug <elf> [splitdebug file]
+save_elf_debug() {
+ ${FEATURES_splitdebug} || return 0
+ ${RESTRICT_splitdebug} && return 0
+
+ # NOTE: Debug files must be installed in
+ # ${EPREFIX}/usr/lib/debug/${EPREFIX} (note that ${EPREFIX} occurs
+ # twice in this path) in order for gdb's debug-file-directory
+ # lookup to work correctly.
+ local x=$1
+ local inode_debug=$2
+ local splitdebug=$3
+ local y=${ED}usr/lib/debug/${x:${#D}}.debug
+
+ # dont save debug info twice
+ [[ ${x} == *".debug" ]] && return 0
+
+ mkdir -p "${y%/*}"
+
+ if [ -f "${inode_debug}" ] ; then
+ ln "${inode_debug}" "${y}" || die "ln failed unexpectedly"
+ else
+ if [[ -n ${splitdebug} ]] ; then
+ mv "${splitdebug}" "${y}"
+ else
+ local objcopy_flags="--only-keep-debug"
+ ${FEATURES_compressdebug} && objcopy_flags+=" --compress-debug-sections"
+ ${OBJCOPY} ${objcopy_flags} "${x}" "${y}"
+ ${OBJCOPY} --add-gnu-debuglink="${y}" "${x}"
+ fi
+ # Only do the following if the debug file was
+ # successfully created (see bug #446774).
+ if [ $? -eq 0 ] ; then
+ local args="a-x,o-w"
+ [[ -g ${x} || -u ${x} ]] && args+=",go-r"
+ chmod ${args} "${y}"
+ ln "${y}" "${inode_debug}" || die "ln failed unexpectedly"
+ fi
+ fi
+
+ # if we don't already have build-id from debugedit, look it up
+ if [[ -z ${buildid} ]] ; then
+ # convert the readelf output to something useful
+ buildid=$(${READELF} -n "${x}" 2>/dev/null | awk '/Build ID:/{ print $NF; exit }')
+ fi
+ if [[ -n ${buildid} ]] ; then
+ local buildid_dir="${ED}usr/lib/debug/.build-id/${buildid:0:2}"
+ local buildid_file="${buildid_dir}/${buildid:2}"
+ mkdir -p "${buildid_dir}"
+ [ -L "${buildid_file}".debug ] || ln -s "../../${x:${#D}}.debug" "${buildid_file}.debug"
+ [ -L "${buildid_file}" ] || ln -s "/${x:${#D}}" "${buildid_file}"
+ fi
+}
+
+# Usage: process_elf <elf>
+process_elf() {
+ local x=$1 inode_link=$2 strip_flags=${*:3}
+ local already_stripped lockfile xt_data
+
+ __vecho " ${x:${#ED}}"
+
+ # If two processes try to debugedit or strip the same hardlink at the
+ # same time, it may corrupt files or cause loss of splitdebug info.
+ # So, use a lockfile to prevent interference (easily observed with
+ # dev-vcs/git which creates ~111 hardlinks to one file in
+ # /usr/libexec/git-core).
+ lockfile=${inode_link}_lockfile
+ if ! ln "${inode_link}" "${lockfile}" 2>/dev/null ; then
+ while [[ -f ${lockfile} ]] ; do
+ sleep 1
+ done
+ unset lockfile
+ fi
+
+ [ -f "${inode_link}_stripped" ] && already_stripped=true || already_stripped=false
+
+ if ! ${already_stripped} ; then
+ if ${PRESERVE_XATTR} ; then
+ xt_data=$(dump_xattrs "${x}")
+ fi
+ save_elf_sources "${x}"
+ fi
+
+ if ${strip_this} ; then
+
+ # see if we can split & strip at the same time
+ if [[ -n ${SPLIT_STRIP_FLAGS} ]] ; then
+ local shortname="${x##*/}.debug"
+ local splitdebug="${tmpdir}/splitdebug/${shortname}.${BASHPID:-$(__bashpid)}"
+ ${already_stripped} || \
+ ${STRIP} ${strip_flags} \
+ -f "${splitdebug}" \
+ -F "${shortname}" \
+ "${x}"
+ save_elf_debug "${x}" "${inode_link}_debug" "${splitdebug}"
+ else
+ save_elf_debug "${x}" "${inode_link}_debug"
+ ${already_stripped} || \
+ ${STRIP} ${strip_flags} "${x}"
+ fi
+ fi
+
+ if ${already_stripped} ; then
+ rm -f "${x}" || die "rm failed unexpectedly"
+ ln "${inode_link}_stripped" "${x}" || die "ln failed unexpectedly"
+ else
+ ln "${x}" "${inode_link}_stripped" || die "ln failed unexpectedly"
+ if [[ ${xt_data} ]] ; then
+ restore_xattrs <<< "${xt_data}"
+ fi
+ fi
+
+ [[ -n ${lockfile} ]] && rm -f "${lockfile}"
+}
+
+# The existance of the section .symtab tells us that a binary is stripped.
+# We want to log already stripped binaries, as this may be a QA violation.
+# They prevent us from getting the splitdebug data.
+if ! ${RESTRICT_binchecks} && ! ${RESTRICT_strip} ; then
+ # We need to do the non-stripped scan serially first before we turn around
+ # and start stripping the files ourselves. The log parsing can be done in
+ # parallel though.
+ log=${tmpdir}/scanelf-already-stripped.log
+ scanelf -yqRBF '#k%F' -k '!.symtab' "$@" | sed -e "s#^${ED}##" > "${log}"
+ (
+ __multijob_child_init
+ qa_var="QA_PRESTRIPPED_${ARCH/-/_}"
+ [[ -n ${!qa_var} ]] && QA_PRESTRIPPED="${!qa_var}"
+ if [[ -n ${QA_PRESTRIPPED} && -s ${log} && \
+ ${QA_STRICT_PRESTRIPPED-unset} = unset ]] ; then
+ shopts=$-
+ set -o noglob
+ for x in ${QA_PRESTRIPPED} ; do
+ sed -e "s#^${x#/}\$##" -i "${log}"
+ done
+ set +o noglob
+ set -${shopts}
+ fi
+ sed -e "/^\$/d" -e "s#^#/#" -i "${log}"
+ if [[ -s ${log} ]] ; then
+ __vecho -e "\n"
+ eqawarn "QA Notice: Pre-stripped files found:"
+ eqawarn "$(<"${log}")"
+ else
+ rm -f "${log}"
+ fi
+ ) &
+ __multijob_post_fork
+fi
+
+# Since strip creates a new inode, we need to know the initial set of
+# inodes in advance, so that we can avoid interference due to trying
+# to strip the same (hardlinked) file multiple times in parallel.
+# See bug #421099.
+if [[ ${USERLAND} == BSD ]] ; then
+ get_inode_number() { stat -f '%i' "$1"; }
+else
+ get_inode_number() { stat -c '%i' "$1"; }
+fi
+cd "${tmpdir}/inodes" || die "cd failed unexpectedly"
+while read -r x ; do
+ inode_link=$(get_inode_number "${x}") || die "stat failed unexpectedly"
+ echo "${x}" >> "${inode_link}" || die "echo failed unexpectedly"
+done < <(
+ # Use sort -u to eliminate duplicates for bug #445336.
+ (
+ scanelf -yqRBF '#k%F' -k '.symtab' "$@"
+ find "$@" -type f ! -type l -name '*.a'
+ ) | LC_ALL=C sort -u
+)
+
+# Now we look for unstripped binaries.
+for inode_link in $(shopt -s nullglob; echo *) ; do
+while read -r x
+do
+
+ if ! ${banner} ; then
+ __vecho "strip: ${STRIP} ${PORTAGE_STRIP_FLAGS}"
+ banner=true
+ fi
+
+ (
+ __multijob_child_init
+ f=$(file "${x}") || exit 0
+ [[ -z ${f} ]] && exit 0
+
+ if ! ${SKIP_STRIP} ; then
+ # The noglob funk is to support STRIP_MASK="/*/booga" and to keep
+ # the for loop from expanding the globs.
+ # The eval echo is to support STRIP_MASK="/*/{booga,bar}" sex.
+ set -o noglob
+ strip_this=true
+ for m in $(eval echo ${STRIP_MASK}) ; do
+ [[ /${x#${ED}} == ${m} ]] && strip_this=false && break
+ done
+ set +o noglob
+ else
+ strip_this=false
+ fi
+
+ # In Prefix we are usually an unprivileged user, so we can't strip
+ # unwritable objects. Make them temporarily writable for the
+ # stripping.
+ was_not_writable=false
+ if [[ ! -w ${x} ]] ; then
+ was_not_writable=true
+ chmod u+w "${x}"
+ fi
+
+ # only split debug info for final linked objects
+ # or kernel modules as debuginfo for intermediatary
+ # files (think crt*.o from gcc/glibc) is useless and
+ # actually causes problems. install sources for all
+ # elf types though cause that stuff is good.
+
+ buildid=
+ if [[ ${f} == *"current ar archive"* ]] ; then
+ __vecho " ${x:${#ED}}"
+ if ${strip_this} ; then
+ # If we have split debug enabled, then do not strip this.
+ # There is no concept of splitdebug for objects not yet
+ # linked in (only for finally linked ELFs), so we have to
+ # retain the debug info in the archive itself.
+ if ! ${FEATURES_splitdebug} || ${RESTRICT_splitdebug} ; then
+ ${STRIP} -g "${x}"
+ fi
+ fi
+ elif [[ ${f} == *"SB executable"* || ${f} == *"SB shared object"* ]] ; then
+ process_elf "${x}" "${inode_link}" ${PORTAGE_STRIP_FLAGS}
+ elif [[ ${f} == *"SB relocatable"* ]] ; then
+ process_elf "${x}" "${inode_link}" ${SAFE_STRIP_FLAGS}
+ fi
+
+ if ${was_not_writable} ; then
+ chmod u-w "${x}"
+ fi
+ ) &
+ __multijob_post_fork
+
+done < "${inode_link}"
+done
+
+# With a bit more work, we could run the rsync processes below in
+# parallel, but not sure that'd be an overall improvement.
+__multijob_finish
+
+cd "${tmpdir}"/sources/ && cat * > "${tmpdir}/debug.sources" 2>/dev/null
+if [[ -s ${tmpdir}/debug.sources ]] && \
+ ${FEATURES_installsources} && \
+ ! ${RESTRICT_installsources} && \
+ ${debugedit_found}
+then
+ __vecho "installsources: rsyncing source files"
+ [[ -d ${D}${prepstrip_sources_dir} ]] || mkdir -p "${D}${prepstrip_sources_dir}"
+ grep -zv '/<[^/>]*>$' "${tmpdir}"/debug.sources | \
+ (cd "${WORKDIR}"; LANG=C sort -z -u | \
+ rsync -tL0 --chmod=ugo-st,a+r,go-w,Da+x,Fa-x --files-from=- "${WORKDIR}/" "${D}${prepstrip_sources_dir}/" )
+
+ # Preserve directory structure.
+ # Needed after running save_elf_sources.
+ # https://bugzilla.redhat.com/show_bug.cgi?id=444310
+ while read -r -d $'\0' emptydir
+ do
+ >> "${emptydir}"/.keepdir
+ done < <(find "${D}${prepstrip_sources_dir}/" -type d -empty -print0)
+fi
+
+cd "${T}"
+rm -rf "${tmpdir}"
diff --git a/usr/lib/portage/bin/ebuild-helpers/unprivileged/chgrp b/usr/lib/portage/bin/ebuild-helpers/unprivileged/chgrp
new file mode 100755
index 0000000..2af9b8c
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/unprivileged/chgrp
@@ -0,0 +1,41 @@
+#!/bin/bash
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+scriptpath=${BASH_SOURCE[0]}
+scriptname=${scriptpath##*/}
+
+IFS=':'
+
+for path in ${PATH}; do
+ [[ -x ${path}/${scriptname} ]] || continue
+ [[ ${path}/${scriptname} -ef ${scriptpath} ]] && continue
+ IFS=$' \t\n'
+ output=$("${path}/${scriptname}" "$@" 2>&1)
+ if [[ $? -ne 0 ]] ; then
+
+ # Avoid an extreme performance problem when the
+ # output is very long (bug #470992).
+ if [[ $(wc -l <<< "${output}") -gt 100 ]]; then
+ output=$(head -n100 <<< "${output}")
+ output="${output}\n ... (further messages truncated)"
+ fi
+
+ source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+ if ! ___eapi_has_prefix_variables; then
+ EPREFIX=
+ fi
+ msg="${scriptname} failure ignored with unprivileged user:\n ${scriptname} $*\n ${output}"
+ # Reverse expansion of ${D} and ${EPREFIX}, for readability.
+ msg=${msg//${D}/'${D}'}
+ if [[ -n ${EPREFIX} ]] ; then
+ msg=${msg//${EPREFIX}/'${EPREFIX}'}
+ msg=${msg//${EPREFIX#/}/'${EPREFIX}'}
+ fi
+ ewarn "${msg}"
+ fi
+ exit 0
+done
+
+exit 1
diff --git a/usr/lib/portage/bin/ebuild-helpers/unprivileged/chown b/usr/lib/portage/bin/ebuild-helpers/unprivileged/chown
new file mode 100755
index 0000000..2af9b8c
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/unprivileged/chown
@@ -0,0 +1,41 @@
+#!/bin/bash
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+scriptpath=${BASH_SOURCE[0]}
+scriptname=${scriptpath##*/}
+
+IFS=':'
+
+for path in ${PATH}; do
+ [[ -x ${path}/${scriptname} ]] || continue
+ [[ ${path}/${scriptname} -ef ${scriptpath} ]] && continue
+ IFS=$' \t\n'
+ output=$("${path}/${scriptname}" "$@" 2>&1)
+ if [[ $? -ne 0 ]] ; then
+
+ # Avoid an extreme performance problem when the
+ # output is very long (bug #470992).
+ if [[ $(wc -l <<< "${output}") -gt 100 ]]; then
+ output=$(head -n100 <<< "${output}")
+ output="${output}\n ... (further messages truncated)"
+ fi
+
+ source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"/isolated-functions.sh
+
+ if ! ___eapi_has_prefix_variables; then
+ EPREFIX=
+ fi
+ msg="${scriptname} failure ignored with unprivileged user:\n ${scriptname} $*\n ${output}"
+ # Reverse expansion of ${D} and ${EPREFIX}, for readability.
+ msg=${msg//${D}/'${D}'}
+ if [[ -n ${EPREFIX} ]] ; then
+ msg=${msg//${EPREFIX}/'${EPREFIX}'}
+ msg=${msg//${EPREFIX#/}/'${EPREFIX}'}
+ fi
+ ewarn "${msg}"
+ fi
+ exit 0
+done
+
+exit 1
diff --git a/usr/lib/portage/bin/ebuild-helpers/xattr/install b/usr/lib/portage/bin/ebuild-helpers/xattr/install
new file mode 100755
index 0000000..d572fe6
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-helpers/xattr/install
@@ -0,0 +1,35 @@
+#!/bin/bash
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+PORTAGE_BIN_PATH=${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}
+PORTAGE_PYM_PATH=${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}
+INSTALL_XATTR=${EPREFIX}/usr/bin/install-xattr
+# Use safe cwd, avoiding unsafe import for bug #469338.
+export __PORTAGE_HELPER_CWD=${PWD}
+cd "${PORTAGE_PYM_PATH}"
+export __PORTAGE_HELPER_PATH=${BASH_SOURCE[0]}
+
+if [[ ${PORTAGE_INSTALL_XATTR_IMPLEMENTATION} == "c" ]]; then
+ implementation="c"
+elif [[ ${PORTAGE_INSTALL_XATTR_IMPLEMENTATION} == "python" ]]; then
+ implementation="python"
+else
+ # If PORTAGE_INSTALL_XATTR_IMPLEMENTATION is unset or not set to "c" or "python"
+ # then we'll autodetect, preferring "c" but falling back on "python"
+ if [[ -x "${INSTALL_XATTR}" ]]; then
+ implementation="c"
+ else
+ implementation="python"
+ fi
+fi
+
+if [[ "${implementation}" == "c" ]]; then
+ exec "${INSTALL_XATTR}" "$@"
+elif [[ "${implementation}" == "python" ]]; then
+ PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
+ exec "${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/install.py" "$@"
+else
+ echo "Unknown implementation for PORTAGE_INSTALL_XATTR_IMPLEMENTATION"
+ exit -1
+fi
diff --git a/usr/lib/portage/bin/ebuild-ipc b/usr/lib/portage/bin/ebuild-ipc
new file mode 100755
index 0000000..820005f
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-ipc
@@ -0,0 +1,10 @@
+#!/bin/bash
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+PORTAGE_BIN_PATH=${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}
+PORTAGE_PYM_PATH=${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}
+# Use safe cwd, avoiding unsafe import for bug #469338.
+cd "${PORTAGE_PYM_PATH}"
+PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
+ exec "${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH/ebuild-ipc.py" "$@"
diff --git a/usr/lib/portage/bin/ebuild-ipc.py b/usr/lib/portage/bin/ebuild-ipc.py
new file mode 100755
index 0000000..b47ee23
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild-ipc.py
@@ -0,0 +1,282 @@
+#!/usr/bin/python -b
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#
+# This is a helper which ebuild processes can use
+# to communicate with portage's main python process.
+
+import errno
+import logging
+import os
+import pickle
+import platform
+import signal
+import sys
+import time
+
+def debug_signal(signum, frame):
+ import pdb
+ pdb.set_trace()
+
+if platform.python_implementation() == 'Jython':
+ debug_signum = signal.SIGUSR2 # bug #424259
+else:
+ debug_signum = signal.SIGUSR1
+
+signal.signal(debug_signum, debug_signal)
+
+if os.path.isfile(os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), ".portage_not_installed")):
+ pym_paths = [os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "pym")]
+ sys.path.insert(0, pym_paths[0])
+else:
+ import distutils.sysconfig
+ pym_paths = [os.path.join(distutils.sysconfig.get_python_lib(), x) for x in ("_emerge", "portage")]
+# Avoid sandbox violations after Python upgrade.
+if os.environ.get("SANDBOX_ON") == "1":
+ sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
+ for pym_path in pym_paths:
+ if pym_path not in sandbox_write:
+ sandbox_write.append(pym_path)
+ os.environ["SANDBOX_WRITE"] = ":".join(filter(None, sandbox_write))
+ del pym_path, sandbox_write
+del pym_paths
+
+import portage
+portage._internal_caller = True
+portage._disable_legacy_globals()
+
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.AbstractPollTask import AbstractPollTask
+from _emerge.PipeReader import PipeReader
+
+RETURNCODE_WRITE_FAILED = 2
+
+class FifoWriter(AbstractPollTask):
+
+ __slots__ = ('buf', 'fifo', '_fd', '_reg_id',)
+
+ def _start(self):
+ try:
+ self._fd = os.open(self.fifo, os.O_WRONLY|os.O_NONBLOCK)
+ except OSError as e:
+ if e.errno == errno.ENXIO:
+ # This happens if the daemon has been killed.
+ self.returncode = RETURNCODE_WRITE_FAILED
+ self._unregister()
+ self._async_wait()
+ return
+ else:
+ raise
+ self._reg_id = self.scheduler.io_add_watch(
+ self._fd,
+ self.scheduler.IO_OUT | self.scheduler.IO_HUP | \
+ self._exceptional_events, self._output_handler)
+ self._registered = True
+
+ def _output_handler(self, fd, event):
+ if event & self.scheduler.IO_OUT:
+ # The whole buf should be able to fit in the fifo with
+ # a single write call, so there's no valid reason for
+ # os.write to raise EAGAIN here.
+ buf = self.buf
+ while buf:
+ buf = buf[os.write(fd, buf):]
+ self.returncode = os.EX_OK
+ self._unregister()
+ self.wait()
+ return False
+ else:
+ self._unregister_if_appropriate(event)
+ if not self._registered:
+ self.returncode = RETURNCODE_WRITE_FAILED
+ self.wait()
+ return False
+ return True
+
+ def _cancel(self):
+ self.returncode = self._cancelled_returncode
+ self._unregister()
+
+ def _unregister(self):
+ self._registered = False
+ if self._reg_id is not None:
+ self.scheduler.source_remove(self._reg_id)
+ self._reg_id = None
+ if self._fd is not None:
+ os.close(self._fd)
+ self._fd = None
+
+class EbuildIpc(object):
+
+ # Timeout for each individual communication attempt (we retry
+ # as long as the daemon process appears to be alive).
+ _COMMUNICATE_RETRY_TIMEOUT_MS = 15000
+
+ def __init__(self):
+ self.fifo_dir = os.environ['PORTAGE_BUILDDIR']
+ self.ipc_in_fifo = os.path.join(self.fifo_dir, '.ipc_in')
+ self.ipc_out_fifo = os.path.join(self.fifo_dir, '.ipc_out')
+ self.ipc_lock_file = os.path.join(self.fifo_dir, '.ipc_lock')
+
+ def _daemon_is_alive(self):
+ try:
+ builddir_lock = portage.locks.lockfile(self.fifo_dir,
+ wantnewlockfile=True, flags=os.O_NONBLOCK)
+ except portage.exception.TryAgain:
+ return True
+ else:
+ portage.locks.unlockfile(builddir_lock)
+ return False
+
+ def communicate(self, args):
+
+ # Make locks quiet since unintended locking messages displayed on
+ # stdout could corrupt the intended output of this program.
+ portage.locks._quiet = True
+ lock_obj = portage.locks.lockfile(self.ipc_lock_file, unlinkfile=True)
+
+ try:
+ return self._communicate(args)
+ finally:
+ portage.locks.unlockfile(lock_obj)
+
+ def _timeout_retry_msg(self, start_time, when):
+ time_elapsed = time.time() - start_time
+ portage.util.writemsg_level(
+ portage.localization._(
+ 'ebuild-ipc timed out %s after %d seconds,' + \
+ ' retrying...\n') % (when, time_elapsed),
+ level=logging.ERROR, noiselevel=-1)
+
+ def _no_daemon_msg(self):
+ portage.util.writemsg_level(
+ portage.localization._(
+ 'ebuild-ipc: daemon process not detected\n'),
+ level=logging.ERROR, noiselevel=-1)
+
+ def _run_writer(self, fifo_writer, msg):
+ """
+ Wait on pid and return an appropriate exit code. This
+ may return unsuccessfully due to timeout if the daemon
+ process does not appear to be alive.
+ """
+
+ start_time = time.time()
+
+ fifo_writer.start()
+ eof = fifo_writer.poll() is not None
+
+ while not eof:
+ fifo_writer._wait_loop(timeout=self._COMMUNICATE_RETRY_TIMEOUT_MS)
+
+ eof = fifo_writer.poll() is not None
+ if eof:
+ break
+ elif self._daemon_is_alive():
+ self._timeout_retry_msg(start_time, msg)
+ else:
+ fifo_writer.cancel()
+ self._no_daemon_msg()
+ fifo_writer.wait()
+ return 2
+
+ return fifo_writer.wait()
+
+ def _receive_reply(self, input_fd):
+
+ start_time = time.time()
+
+ pipe_reader = PipeReader(input_files={"input_fd":input_fd},
+ scheduler=global_event_loop())
+ pipe_reader.start()
+
+ eof = pipe_reader.poll() is not None
+
+ while not eof:
+ pipe_reader._wait_loop(timeout=self._COMMUNICATE_RETRY_TIMEOUT_MS)
+ eof = pipe_reader.poll() is not None
+ if not eof:
+ if self._daemon_is_alive():
+ self._timeout_retry_msg(start_time,
+ portage.localization._('during read'))
+ else:
+ pipe_reader.cancel()
+ self._no_daemon_msg()
+ return 2
+
+ buf = pipe_reader.getvalue()
+
+ retval = 2
+
+ if not buf:
+
+ portage.util.writemsg_level(
+ "ebuild-ipc: %s\n" % \
+ (portage.localization._('read failed'),),
+ level=logging.ERROR, noiselevel=-1)
+
+ else:
+
+ try:
+ reply = pickle.loads(buf)
+ except SystemExit:
+ raise
+ except Exception as e:
+ # The pickle module can raise practically
+ # any exception when given corrupt data.
+ portage.util.writemsg_level(
+ "ebuild-ipc: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+
+ else:
+
+ (out, err, retval) = reply
+
+ if out:
+ portage.util.writemsg_stdout(out, noiselevel=-1)
+
+ if err:
+ portage.util.writemsg(err, noiselevel=-1)
+
+ return retval
+
+ def _communicate(self, args):
+
+ if not self._daemon_is_alive():
+ self._no_daemon_msg()
+ return 2
+
+ # Open the input fifo before the output fifo, in order to make it
+ # possible for the daemon to send a reply without blocking. This
+ # improves performance, and also makes it possible for the daemon
+ # to do a non-blocking write without a race condition.
+ input_fd = os.open(self.ipc_out_fifo,
+ os.O_RDONLY|os.O_NONBLOCK)
+
+ # Use forks so that the child process can handle blocking IO
+ # un-interrupted, while the parent handles all timeout
+ # considerations. This helps to avoid possible race conditions
+ # from interference between timeouts and blocking IO operations.
+ msg = portage.localization._('during write')
+ retval = self._run_writer(FifoWriter(buf=pickle.dumps(args),
+ fifo=self.ipc_in_fifo, scheduler=global_event_loop()), msg)
+
+ if retval != os.EX_OK:
+ portage.util.writemsg_level(
+ "ebuild-ipc: %s: %s\n" % (msg,
+ portage.localization._('subprocess failure: %s') % \
+ retval), level=logging.ERROR, noiselevel=-1)
+ return retval
+
+ if not self._daemon_is_alive():
+ self._no_daemon_msg()
+ return 2
+
+ return self._receive_reply(input_fd)
+
+def ebuild_ipc_main(args):
+ ebuild_ipc = EbuildIpc()
+ return ebuild_ipc.communicate(args)
+
+if __name__ == '__main__':
+ sys.exit(ebuild_ipc_main(sys.argv[1:]))
diff --git a/usr/lib/portage/bin/ebuild.sh b/usr/lib/portage/bin/ebuild.sh
new file mode 100755
index 0000000..7623049
--- /dev/null
+++ b/usr/lib/portage/bin/ebuild.sh
@@ -0,0 +1,727 @@
+#!/bin/bash
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+PORTAGE_BIN_PATH="${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}"
+PORTAGE_PYM_PATH="${PORTAGE_PYM_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/pym}"
+
+# Prevent aliases from causing portage to act inappropriately.
+# Make sure it's before everything so we don't mess aliases that follow.
+unalias -a
+
+source "${PORTAGE_BIN_PATH}/isolated-functions.sh" || exit 1
+
+if [[ $EBUILD_PHASE != depend ]] ; then
+ source "${PORTAGE_BIN_PATH}/phase-functions.sh" || die
+ source "${PORTAGE_BIN_PATH}/save-ebuild-env.sh" || die
+ source "${PORTAGE_BIN_PATH}/phase-helpers.sh" || die
+ source "${PORTAGE_BIN_PATH}/bashrc-functions.sh" || die
+else
+ # These dummy functions are for things that are likely to be called
+ # in global scope, even though they are completely useless during
+ # the "depend" phase.
+ for x in diropts docompress exeopts get_KV insopts \
+ KV_major KV_micro KV_minor KV_to_int \
+ libopts register_die_hook register_success_hook \
+ __strip_duplicate_slashes \
+ use_with use_enable ; do
+ eval "${x}() {
+ if ___eapi_disallows_helpers_in_global_scope; then
+ die \"\${FUNCNAME}() calls are not allowed in global scope\"
+ fi
+ }"
+ done
+ # These dummy functions return false in non-strict EAPIs, in order to ensure that
+ # `use multislot` is false for the "depend" phase.
+ funcs="use useq usev"
+ ___eapi_has_usex && funcs+=" usex"
+ for x in ${funcs} ; do
+ eval "${x}() {
+ if ___eapi_disallows_helpers_in_global_scope; then
+ die \"\${FUNCNAME}() calls are not allowed in global scope\"
+ else
+ return 1
+ fi
+ }"
+ done
+ # These functions die because calls to them during the "depend" phase
+ # are considered to be severe QA violations.
+ funcs="best_version has_version portageq"
+ ___eapi_has_master_repositories && funcs+=" master_repositories"
+ ___eapi_has_repository_path && funcs+=" repository_path"
+ ___eapi_has_available_eclasses && funcs+=" available_eclasses"
+ ___eapi_has_eclass_path && funcs+=" eclass_path"
+ ___eapi_has_license_path && funcs+=" license_path"
+ for x in ${funcs} ; do
+ eval "${x}() { die \"\${FUNCNAME}() calls are not allowed in global scope\"; }"
+ done
+ unset funcs x
+fi
+
+# Don't use sandbox's BASH_ENV for new shells because it does
+# 'source /etc/profile' which can interfere with the build
+# environment by modifying our PATH.
+unset BASH_ENV
+
+# This is just a temporary workaround for portage-9999 users since
+# earlier portage versions do not detect a version change in this case
+# (9999 to 9999) and therefore they try execute an incompatible version of
+# ebuild.sh during the upgrade.
+export PORTAGE_BZIP2_COMMAND=${PORTAGE_BZIP2_COMMAND:-bzip2}
+
+# These two functions wrap sourcing and calling respectively. At present they
+# perform a qa check to make sure eclasses and ebuilds and profiles don't mess
+# with shell opts (shopts). Ebuilds/eclasses changing shopts should reset them
+# when they are done.
+
+__qa_source() {
+ local shopts=$(shopt) OLDIFS="$IFS"
+ local retval
+ source "$@"
+ retval=$?
+ set +e
+ [[ $shopts != $(shopt) ]] &&
+ eqawarn "QA Notice: Global shell options changed and were not restored while sourcing '$*'"
+ [[ "$IFS" != "$OLDIFS" ]] &&
+ eqawarn "QA Notice: Global IFS changed and was not restored while sourcing '$*'"
+ return $retval
+}
+
+__qa_call() {
+ local shopts=$(shopt) OLDIFS="$IFS"
+ local retval
+ "$@"
+ retval=$?
+ set +e
+ [[ $shopts != $(shopt) ]] &&
+ eqawarn "QA Notice: Global shell options changed and were not restored while calling '$*'"
+ [[ "$IFS" != "$OLDIFS" ]] &&
+ eqawarn "QA Notice: Global IFS changed and was not restored while calling '$*'"
+ return $retval
+}
+
+EBUILD_SH_ARGS="$*"
+
+shift $#
+
+# Unset some variables that break things.
+unset GZIP BZIP BZIP2 CDPATH GREP_OPTIONS GREP_COLOR GLOBIGNORE
+
+[[ $PORTAGE_QUIET != "" ]] && export PORTAGE_QUIET
+
+# sandbox support functions; defined prior to profile.bashrc srcing, since the profile might need to add a default exception (/usr/lib64/conftest fex)
+__sb_append_var() {
+ local _v=$1 ; shift
+ local var="SANDBOX_${_v}"
+ [[ -z $1 || -n $2 ]] && die "Usage: add$(LC_ALL=C tr "[:upper:]" "[:lower:]" <<< "${_v}") <colon-delimited list of paths>"
+ export ${var}="${!var:+${!var}:}$1"
+}
+# bash-4 version:
+# local var="SANDBOX_${1^^}"
+# addread() { __sb_append_var ${0#add} "$@" ; }
+addread() { __sb_append_var READ "$@" ; }
+addwrite() { __sb_append_var WRITE "$@" ; }
+adddeny() { __sb_append_var DENY "$@" ; }
+addpredict() { __sb_append_var PREDICT "$@" ; }
+
+addwrite "${PORTAGE_TMPDIR}"
+addread "/:${PORTAGE_TMPDIR}"
+[[ -n ${PORTAGE_GPG_DIR} ]] && addpredict "${PORTAGE_GPG_DIR}"
+
+# Avoid sandbox violations in temporary directories.
+if [[ -w $T ]] ; then
+ export TEMP=$T
+ export TMP=$T
+ export TMPDIR=$T
+elif [[ $SANDBOX_ON = 1 ]] ; then
+ for x in TEMP TMP TMPDIR ; do
+ [[ -n ${!x} ]] && addwrite "${!x}"
+ done
+ unset x
+fi
+
+# the sandbox is disabled by default except when overridden in the relevant stages
+export SANDBOX_ON=0
+
+# Ensure that $PWD is sane whenever possible, to protect against
+# exploitation of insecure search path for python -c in ebuilds.
+# See bug #239560 and bug #469338.
+cd "${PORTAGE_PYM_PATH}" || \
+ die "PORTAGE_PYM_PATH does not exist: '${PORTAGE_PYM_PATH}'"
+
+#if no perms are specified, dirs/files will have decent defaults
+#(not secretive, but not stupid)
+umask 022
+
+# debug-print() gets called from many places with verbose status information useful
+# for tracking down problems. The output is in $T/eclass-debug.log.
+# You can set ECLASS_DEBUG_OUTPUT to redirect the output somewhere else as well.
+# The special "on" setting echoes the information, mixing it with the rest of the
+# emerge output.
+# You can override the setting by exporting a new one from the console, or you can
+# set a new default in make.*. Here the default is "" or unset.
+
+# in the future might use e* from /etc/init.d/functions.sh if i feel like it
+debug-print() {
+ # if $T isn't defined, we're in dep calculation mode and
+ # shouldn't do anything
+ [[ $EBUILD_PHASE = depend || ! -d ${T} || ${#} -eq 0 ]] && return 0
+
+ if [[ ${ECLASS_DEBUG_OUTPUT} == on ]]; then
+ printf 'debug: %s\n' "${@}" >&2
+ elif [[ -n ${ECLASS_DEBUG_OUTPUT} ]]; then
+ printf 'debug: %s\n' "${@}" >> "${ECLASS_DEBUG_OUTPUT}"
+ fi
+
+ if [[ -w $T ]] ; then
+ # default target
+ printf '%s\n' "${@}" >> "${T}/eclass-debug.log"
+ # let the portage user own/write to this file
+ chgrp "${PORTAGE_GRPNAME:-${PORTAGE_GROUP}}" "${T}/eclass-debug.log"
+ chmod g+w "${T}/eclass-debug.log"
+ fi
+}
+
+# The following 2 functions are debug-print() wrappers
+
+debug-print-function() {
+ debug-print "${1}: entering function, parameters: ${*:2}"
+}
+
+debug-print-section() {
+ debug-print "now in section ${*}"
+}
+
+# Sources all eclasses in parameters
+declare -ix ECLASS_DEPTH=0
+inherit() {
+ ECLASS_DEPTH=$(($ECLASS_DEPTH + 1))
+ if [[ ${ECLASS_DEPTH} > 1 ]]; then
+ debug-print "*** Multiple Inheritence (Level: ${ECLASS_DEPTH})"
+ fi
+
+ if [[ -n $ECLASS && -n ${!__export_funcs_var} ]] ; then
+ echo "QA Notice: EXPORT_FUNCTIONS is called before inherit in" \
+ "$ECLASS.eclass. For compatibility with <=portage-2.1.6.7," \
+ "only call EXPORT_FUNCTIONS after inherit(s)." \
+ | fmt -w 75 | while read -r ; do eqawarn "$REPLY" ; done
+ fi
+
+ local repo_location
+ local location
+ local potential_location
+ local x
+
+ # These variables must be restored before returning.
+ local PECLASS=$ECLASS
+ local prev_export_funcs_var=$__export_funcs_var
+
+ local B_IUSE
+ local B_REQUIRED_USE
+ local B_DEPEND
+ local B_RDEPEND
+ local B_PDEPEND
+ local B_HDEPEND
+ while [ "$1" ]; do
+ location=""
+ potential_location=""
+
+ export ECLASS="$1"
+ __export_funcs_var=__export_functions_$ECLASS_DEPTH
+ unset $__export_funcs_var
+
+ if [[ ${EBUILD_PHASE} != depend && ${EBUILD_PHASE} != nofetch && \
+ ${EBUILD_PHASE} != *rm && ${EMERGE_FROM} != "binary" && \
+ -z ${_IN_INSTALL_QA_CHECK} ]]
+ then
+ # This is disabled in the *rm phases because they frequently give
+ # false alarms due to INHERITED in /var/db/pkg being outdated
+ # in comparison the the eclasses from the portage tree. It's
+ # disabled for nofetch, since that can be called by repoman and
+ # that triggers bug #407449 due to repoman not exporting
+ # non-essential variables such as INHERITED.
+ if ! has $ECLASS $INHERITED $__INHERITED_QA_CACHE ; then
+ eqawarn "QA Notice: ECLASS '$ECLASS' inherited illegally in $CATEGORY/$PF $EBUILD_PHASE"
+ fi
+ fi
+
+ for repo_location in "${PORTAGE_ECLASS_LOCATIONS[@]}"; do
+ potential_location="${repo_location}/eclass/${1}.eclass"
+ if [[ -f ${potential_location} ]]; then
+ location="${potential_location}"
+ debug-print " eclass exists: ${location}"
+ break
+ fi
+ done
+ debug-print "inherit: $1 -> $location"
+ [[ -z ${location} ]] && die "${1}.eclass could not be found by inherit()"
+
+ # inherits in QA checks can't handle metadata assignments
+ if [[ -z ${_IN_INSTALL_QA_CHECK} ]]; then
+ #We need to back up the values of *DEPEND to B_*DEPEND
+ #(if set).. and then restore them after the inherit call.
+
+ #turn off glob expansion
+ set -f
+
+ # Retain the old data and restore it later.
+ unset B_IUSE B_REQUIRED_USE B_DEPEND B_RDEPEND B_PDEPEND B_HDEPEND
+ [ "${IUSE+set}" = set ] && B_IUSE="${IUSE}"
+ [ "${REQUIRED_USE+set}" = set ] && B_REQUIRED_USE="${REQUIRED_USE}"
+ [ "${DEPEND+set}" = set ] && B_DEPEND="${DEPEND}"
+ [ "${RDEPEND+set}" = set ] && B_RDEPEND="${RDEPEND}"
+ [ "${PDEPEND+set}" = set ] && B_PDEPEND="${PDEPEND}"
+ [ "${HDEPEND+set}" = set ] && B_HDEPEND="${HDEPEND}"
+ unset IUSE REQUIRED_USE DEPEND RDEPEND PDEPEND HDEPEND
+ #turn on glob expansion
+ set +f
+ fi
+
+ __qa_source "$location" || die "died sourcing $location in inherit()"
+
+ if [[ -z ${_IN_INSTALL_QA_CHECK} ]]; then
+ #turn off glob expansion
+ set -f
+
+ # If each var has a value, append it to the global variable E_* to
+ # be applied after everything is finished. New incremental behavior.
+ [ "${IUSE+set}" = set ] && E_IUSE+="${E_IUSE:+ }${IUSE}"
+ [ "${REQUIRED_USE+set}" = set ] && E_REQUIRED_USE+="${E_REQUIRED_USE:+ }${REQUIRED_USE}"
+ [ "${DEPEND+set}" = set ] && E_DEPEND+="${E_DEPEND:+ }${DEPEND}"
+ [ "${RDEPEND+set}" = set ] && E_RDEPEND+="${E_RDEPEND:+ }${RDEPEND}"
+ [ "${PDEPEND+set}" = set ] && E_PDEPEND+="${E_PDEPEND:+ }${PDEPEND}"
+ [ "${HDEPEND+set}" = set ] && E_HDEPEND+="${E_HDEPEND:+ }${HDEPEND}"
+
+ [ "${B_IUSE+set}" = set ] && IUSE="${B_IUSE}"
+ [ "${B_IUSE+set}" = set ] || unset IUSE
+
+ [ "${B_REQUIRED_USE+set}" = set ] && REQUIRED_USE="${B_REQUIRED_USE}"
+ [ "${B_REQUIRED_USE+set}" = set ] || unset REQUIRED_USE
+
+ [ "${B_DEPEND+set}" = set ] && DEPEND="${B_DEPEND}"
+ [ "${B_DEPEND+set}" = set ] || unset DEPEND
+
+ [ "${B_RDEPEND+set}" = set ] && RDEPEND="${B_RDEPEND}"
+ [ "${B_RDEPEND+set}" = set ] || unset RDEPEND
+
+ [ "${B_PDEPEND+set}" = set ] && PDEPEND="${B_PDEPEND}"
+ [ "${B_PDEPEND+set}" = set ] || unset PDEPEND
+
+ [ "${B_HDEPEND+set}" = set ] && HDEPEND="${B_HDEPEND}"
+ [ "${B_HDEPEND+set}" = set ] || unset HDEPEND
+
+ #turn on glob expansion
+ set +f
+
+ if [[ -n ${!__export_funcs_var} ]] ; then
+ for x in ${!__export_funcs_var} ; do
+ debug-print "EXPORT_FUNCTIONS: $x -> ${ECLASS}_$x"
+ declare -F "${ECLASS}_$x" >/dev/null || \
+ die "EXPORT_FUNCTIONS: ${ECLASS}_$x is not defined"
+ eval "$x() { ${ECLASS}_$x \"\$@\" ; }" > /dev/null
+ done
+ fi
+ unset $__export_funcs_var
+
+ has $1 $INHERITED || export INHERITED="$INHERITED $1"
+ fi
+
+ shift
+ done
+ ((--ECLASS_DEPTH)) # Returns 1 when ECLASS_DEPTH reaches 0.
+ if (( ECLASS_DEPTH > 0 )) ; then
+ export ECLASS=$PECLASS
+ __export_funcs_var=$prev_export_funcs_var
+ else
+ unset ECLASS __export_funcs_var
+ fi
+ return 0
+}
+
+# Exports stub functions that call the eclass's functions, thereby making them default.
+# For example, if ECLASS="base" and you call "EXPORT_FUNCTIONS src_unpack", the following
+# code will be eval'd:
+# src_unpack() { base_src_unpack; }
+EXPORT_FUNCTIONS() {
+ if [ -z "$ECLASS" ]; then
+ die "EXPORT_FUNCTIONS without a defined ECLASS"
+ fi
+ eval $__export_funcs_var+=\" $*\"
+}
+
+PORTAGE_BASHRCS_SOURCED=0
+
+# @FUNCTION: __source_all_bashrcs
+# @DESCRIPTION:
+# Source a relevant bashrc files and perform other miscellaneous
+# environment initialization when appropriate.
+#
+# If EAPI is set then define functions provided by the current EAPI:
+#
+# * default_* aliases for the current EAPI phase functions
+# * A "default" function which is an alias for the default phase
+# function for the current phase.
+#
+__source_all_bashrcs() {
+ [[ $PORTAGE_BASHRCS_SOURCED = 1 ]] && return 0
+ PORTAGE_BASHRCS_SOURCED=1
+ local x
+
+ local OCC="${CC}" OCXX="${CXX}"
+
+ if [[ $EBUILD_PHASE != depend ]] ; then
+ # source the existing profile.bashrcs.
+ save_IFS
+ IFS=$'\n'
+ local path_array=($PROFILE_PATHS)
+ restore_IFS
+ for x in "${path_array[@]}" ; do
+ [ -f "$x/profile.bashrc" ] && __qa_source "$x/profile.bashrc"
+ done
+ fi
+
+ if [ -r "${PORTAGE_BASHRC}" ] ; then
+ if [ "$PORTAGE_DEBUG" != "1" ] || [ "${-/x/}" != "$-" ]; then
+ source "${PORTAGE_BASHRC}"
+ else
+ set -x
+ source "${PORTAGE_BASHRC}"
+ set +x
+ fi
+ fi
+
+ if [[ $EBUILD_PHASE != depend ]] ; then
+ # The user's bashrc is the ONLY non-portage bit of code that can
+ # change shopts without a QA violation.
+ for x in "${PM_EBUILD_HOOK_DIR}"/${CATEGORY}/{${PN},${PN}:${SLOT%/*},${P},${PF}}; do
+ if [ -r "${x}" ]; then
+ # If $- contains x, then tracing has already been enabled
+ # elsewhere for some reason. We preserve it's state so as
+ # not to interfere.
+ if [ "$PORTAGE_DEBUG" != "1" ] || [ "${-/x/}" != "$-" ]; then
+ source "${x}"
+ else
+ set -x
+ source "${x}"
+ set +x
+ fi
+ fi
+ done
+ fi
+
+ [ ! -z "${OCC}" ] && export CC="${OCC}"
+ [ ! -z "${OCXX}" ] && export CXX="${OCXX}"
+}
+
+# === === === === === === === === === === === === === === === === === ===
+# === === === === === functions end, main part begins === === === === ===
+# === === === === === === === === === === === === === === === === === ===
+
+export SANDBOX_ON="1"
+export S=${WORKDIR}/${P}
+
+# Turn of extended glob matching so that g++ doesn't get incorrectly matched.
+shopt -u extglob
+
+if [[ ${EBUILD_PHASE} == depend ]] ; then
+ QA_INTERCEPTORS="awk bash cc egrep equery fgrep g++
+ gawk gcc grep javac java-config nawk perl
+ pkg-config python python-config sed"
+elif [[ ${EBUILD_PHASE} == clean* ]] ; then
+ unset QA_INTERCEPTORS
+else
+ QA_INTERCEPTORS="autoconf automake aclocal libtoolize"
+fi
+# level the QA interceptors if we're in depend
+if [[ -n ${QA_INTERCEPTORS} ]] ; then
+ for BIN in ${QA_INTERCEPTORS}; do
+ BIN_PATH=$(type -Pf ${BIN})
+ if [ "$?" != "0" ]; then
+ BODY="echo \"*** missing command: ${BIN}\" >&2; return 127"
+ else
+ BODY="${BIN_PATH} \"\$@\"; return \$?"
+ fi
+ if [[ ${EBUILD_PHASE} == depend ]] ; then
+ FUNC_SRC="${BIN}() {
+ if [ \$ECLASS_DEPTH -gt 0 ]; then
+ eqawarn \"QA Notice: '${BIN}' called in global scope: eclass \${ECLASS}\"
+ else
+ eqawarn \"QA Notice: '${BIN}' called in global scope: \${CATEGORY}/\${PF}\"
+ fi
+ ${BODY}
+ }"
+ elif has ${BIN} autoconf automake aclocal libtoolize ; then
+ FUNC_SRC="${BIN}() {
+ if ! has \${FUNCNAME[1]} eautoreconf eaclocal _elibtoolize \\
+ eautoheader eautoconf eautomake autotools_run_tool \\
+ autotools_check_macro autotools_get_subdirs \\
+ autotools_get_auxdir ; then
+ eqawarn \"QA Notice: '${BIN}' called by \${FUNCNAME[1]}: \${CATEGORY}/\${PF}\"
+ eqawarn \"Use autotools.eclass instead of calling '${BIN}' directly.\"
+ fi
+ ${BODY}
+ }"
+ else
+ FUNC_SRC="${BIN}() {
+ eqawarn \"QA Notice: '${BIN}' called by \${FUNCNAME[1]}: \${CATEGORY}/\${PF}\"
+ ${BODY}
+ }"
+ fi
+ eval "$FUNC_SRC" || echo "error creating QA interceptor ${BIN}" >&2
+ done
+ unset BIN_PATH BIN BODY FUNC_SRC
+fi
+
+# Subshell/helper die support (must export for the die helper).
+export EBUILD_MASTER_PID=${BASHPID:-$(__bashpid)}
+trap 'exit 1' SIGTERM
+
+if ! has "$EBUILD_PHASE" clean cleanrm depend && \
+ [ -f "${T}"/environment ] ; then
+ # The environment may have been extracted from environment.bz2 or
+ # may have come from another version of ebuild.sh or something.
+ # In any case, preprocess it to prevent any potential interference.
+ # NOTE: export ${FOO}=... requires quoting, unlike normal exports
+ __preprocess_ebuild_env || \
+ die "error processing environment"
+ # Colon separated SANDBOX_* variables need to be cumulative.
+ for x in SANDBOX_DENY SANDBOX_READ SANDBOX_PREDICT SANDBOX_WRITE ; do
+ export PORTAGE_${x}="${!x}"
+ done
+ PORTAGE_SANDBOX_ON=${SANDBOX_ON}
+ export SANDBOX_ON=1
+ source "${T}"/environment || \
+ die "error sourcing environment"
+ # We have to temporarily disable sandbox since the
+ # SANDBOX_{DENY,READ,PREDICT,WRITE} values we've just loaded
+ # may be unusable (triggering in spurious sandbox violations)
+ # until we've merged them with our current values.
+ export SANDBOX_ON=0
+ for x in SANDBOX_DENY SANDBOX_PREDICT SANDBOX_READ SANDBOX_WRITE ; do
+ y="PORTAGE_${x}"
+ if [ -z "${!x}" ] ; then
+ export ${x}="${!y}"
+ elif [ -n "${!y}" ] && [ "${!y}" != "${!x}" ] ; then
+ # filter out dupes
+ export ${x}="$(printf "${!y}:${!x}" | tr ":" "\0" | \
+ sort -z -u | tr "\0" ":")"
+ fi
+ export ${x}="${!x%:}"
+ unset PORTAGE_${x}
+ done
+ unset x y
+ export SANDBOX_ON=${PORTAGE_SANDBOX_ON}
+ unset PORTAGE_SANDBOX_ON
+ [[ -n $EAPI ]] || EAPI=0
+fi
+
+if ___eapi_enables_globstar; then
+ shopt -s globstar
+fi
+
+# Convert quoted paths to array.
+eval "PORTAGE_ECLASS_LOCATIONS=(${PORTAGE_ECLASS_LOCATIONS})"
+
+# Source the ebuild every time for FEATURES=noauto, so that ebuild
+# modifications take effect immediately.
+if ! has "$EBUILD_PHASE" clean cleanrm ; then
+ if [[ $EBUILD_PHASE = depend || ! -f $T/environment || \
+ -f $PORTAGE_BUILDDIR/.ebuild_changed || \
+ " ${FEATURES} " == *" noauto "* ]] ; then
+ # The bashrcs get an opportunity here to set aliases that will be expanded
+ # during sourcing of ebuilds and eclasses.
+ __source_all_bashrcs
+
+ # When EBUILD_PHASE != depend, INHERITED comes pre-initialized
+ # from cache. In order to make INHERITED content independent of
+ # EBUILD_PHASE during inherit() calls, we unset INHERITED after
+ # we make a backup copy for QA checks.
+ __INHERITED_QA_CACHE=$INHERITED
+
+ # *DEPEND and IUSE will be set during the sourcing of the ebuild.
+ # In order to ensure correct interaction between ebuilds and
+ # eclasses, they need to be unset before this process of
+ # interaction begins.
+ unset EAPI DEPEND RDEPEND PDEPEND HDEPEND INHERITED IUSE REQUIRED_USE \
+ ECLASS E_IUSE E_REQUIRED_USE E_DEPEND E_RDEPEND E_PDEPEND \
+ E_HDEPEND
+
+ if [[ $PORTAGE_DEBUG != 1 || ${-/x/} != $- ]] ; then
+ source "$EBUILD" || die "error sourcing ebuild"
+ else
+ set -x
+ source "$EBUILD" || die "error sourcing ebuild"
+ set +x
+ fi
+
+ if [[ "${EBUILD_PHASE}" != "depend" ]] ; then
+ RESTRICT=${PORTAGE_RESTRICT}
+ [[ -e $PORTAGE_BUILDDIR/.ebuild_changed ]] && \
+ rm "$PORTAGE_BUILDDIR/.ebuild_changed"
+ fi
+
+ [ "${EAPI+set}" = set ] || EAPI=0
+
+ # export EAPI for helpers (especially since we unset it above)
+ export EAPI
+
+ if ___eapi_has_RDEPEND_DEPEND_fallback; then
+ export RDEPEND=${RDEPEND-${DEPEND}}
+ debug-print "RDEPEND: not set... Setting to: ${DEPEND}"
+ fi
+
+ # add in dependency info from eclasses
+ IUSE+="${IUSE:+ }${E_IUSE}"
+ DEPEND+="${DEPEND:+ }${E_DEPEND}"
+ RDEPEND+="${RDEPEND:+ }${E_RDEPEND}"
+ PDEPEND+="${PDEPEND:+ }${E_PDEPEND}"
+ HDEPEND+="${HDEPEND:+ }${E_HDEPEND}"
+ REQUIRED_USE+="${REQUIRED_USE:+ }${E_REQUIRED_USE}"
+
+ unset ECLASS E_IUSE E_REQUIRED_USE E_DEPEND E_RDEPEND E_PDEPEND E_HDEPEND \
+ __INHERITED_QA_CACHE
+
+ # alphabetically ordered by $EBUILD_PHASE value
+ case ${EAPI} in
+ 0|1)
+ _valid_phases="src_compile pkg_config pkg_info src_install
+ pkg_nofetch pkg_postinst pkg_postrm pkg_preinst pkg_prerm
+ pkg_setup src_test src_unpack"
+ ;;
+ 2|3)
+ _valid_phases="src_compile pkg_config src_configure pkg_info
+ src_install pkg_nofetch pkg_postinst pkg_postrm pkg_preinst
+ src_prepare pkg_prerm pkg_setup src_test src_unpack"
+ ;;
+ *)
+ _valid_phases="src_compile pkg_config src_configure pkg_info
+ src_install pkg_nofetch pkg_postinst pkg_postrm pkg_preinst
+ src_prepare pkg_prerm pkg_pretend pkg_setup src_test src_unpack"
+ ;;
+ esac
+
+ DEFINED_PHASES=
+ for _f in $_valid_phases ; do
+ if declare -F $_f >/dev/null ; then
+ _f=${_f#pkg_}
+ DEFINED_PHASES+=" ${_f#src_}"
+ fi
+ done
+ [[ -n $DEFINED_PHASES ]] || DEFINED_PHASES=-
+
+ unset _f _valid_phases
+
+ if [[ $EBUILD_PHASE != depend ]] ; then
+
+ if has distcc $FEATURES ; then
+ [[ -n $DISTCC_LOG ]] && addwrite "${DISTCC_LOG%/*}"
+ fi
+
+ if has ccache $FEATURES ; then
+
+ if [[ -n $CCACHE_DIR ]] ; then
+ addread "$CCACHE_DIR"
+ addwrite "$CCACHE_DIR"
+ fi
+
+ [[ -n $CCACHE_SIZE ]] && ccache -M $CCACHE_SIZE &> /dev/null
+ fi
+
+ if [[ -n $QA_PREBUILT ]] ; then
+
+ # these ones support fnmatch patterns
+ QA_EXECSTACK+=" $QA_PREBUILT"
+ QA_TEXTRELS+=" $QA_PREBUILT"
+ QA_WX_LOAD+=" $QA_PREBUILT"
+
+ # these ones support regular expressions, so translate
+ # fnmatch patterns to regular expressions
+ for x in QA_DT_NEEDED QA_FLAGS_IGNORED QA_PRESTRIPPED QA_SONAME ; do
+ if [[ $(declare -p $x 2>/dev/null) = declare\ -a* ]] ; then
+ eval "$x=(\"\${$x[@]}\" ${QA_PREBUILT//\*/.*})"
+ else
+ eval "$x+=\" ${QA_PREBUILT//\*/.*}\""
+ fi
+ done
+
+ unset x
+ fi
+
+ # This needs to be exported since prepstrip is a separate shell script.
+ [[ -n $QA_PRESTRIPPED ]] && export QA_PRESTRIPPED
+ eval "[[ -n \$QA_PRESTRIPPED_${ARCH/-/_} ]] && \
+ export QA_PRESTRIPPED_${ARCH/-/_}"
+ fi
+ fi
+fi
+
+# unset USE_EXPAND variables that contain only the special "*" token
+for x in ${USE_EXPAND} ; do
+ [ "${!x}" == "*" ] && unset ${x}
+done
+unset x
+
+if has nostrip ${FEATURES} ${RESTRICT} || has strip ${RESTRICT}
+then
+ export DEBUGBUILD=1
+fi
+
+if [[ $EBUILD_PHASE = depend ]] ; then
+ export SANDBOX_ON="0"
+ set -f
+
+ if [ -n "${dbkey}" ] ; then
+ if [ ! -d "${dbkey%/*}" ]; then
+ install -d -g ${PORTAGE_GID} -m2775 "${dbkey%/*}"
+ fi
+ # Make it group writable. 666&~002==664
+ umask 002
+ fi
+
+ auxdbkeys="DEPEND RDEPEND SLOT SRC_URI RESTRICT HOMEPAGE LICENSE
+ DESCRIPTION KEYWORDS INHERITED IUSE REQUIRED_USE PDEPEND PROVIDE EAPI
+ PROPERTIES DEFINED_PHASES HDEPEND UNUSED_04
+ UNUSED_03 UNUSED_02 UNUSED_01"
+
+ if ! ___eapi_has_HDEPEND; then
+ unset HDEPEND
+ fi
+
+ # The extra $(echo) commands remove newlines.
+ if [ -n "${dbkey}" ] ; then
+ > "${dbkey}"
+ for f in ${auxdbkeys} ; do
+ echo $(echo ${!f}) >> "${dbkey}" || exit $?
+ done
+ else
+ for f in ${auxdbkeys} ; do
+ eval "echo \$(echo \${!f}) 1>&${PORTAGE_PIPE_FD}" || exit $?
+ done
+ eval "exec ${PORTAGE_PIPE_FD}>&-"
+ fi
+ set +f
+else
+ # Note: readonly variables interfere with __preprocess_ebuild_env(), so
+ # declare them only after it has already run.
+ declare -r $PORTAGE_READONLY_METADATA $PORTAGE_READONLY_VARS
+ if ___eapi_has_prefix_variables; then
+ declare -r ED EPREFIX EROOT
+ fi
+
+ if [[ -n $EBUILD_SH_ARGS ]] ; then
+ (
+ # Don't allow subprocesses to inherit the pipe which
+ # emerge uses to monitor ebuild.sh.
+ if [[ -n ${PORTAGE_PIPE_FD} ]] ; then
+ eval "exec ${PORTAGE_PIPE_FD}>&-"
+ unset PORTAGE_PIPE_FD
+ fi
+ __ebuild_main ${EBUILD_SH_ARGS}
+ exit 0
+ )
+ exit $?
+ fi
+fi
+
+# Do not exit when ebuild.sh is sourced by other scripts.
+true
diff --git a/usr/lib/portage/bin/egencache b/usr/lib/portage/bin/egencache
new file mode 100755
index 0000000..0ea0a93
--- /dev/null
+++ b/usr/lib/portage/bin/egencache
@@ -0,0 +1,1088 @@
+#!/usr/bin/python -b
+# Copyright 2009-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# unicode_literals for compat with TextIOWrapper in Python 2
+from __future__ import print_function, unicode_literals
+
+import platform
+import signal
+import sys
+# This block ensures that ^C interrupts are handled quietly.
+try:
+
+ def exithandler(signum, _frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ sys.exit(128 + signum)
+
+ signal.signal(signal.SIGINT, exithandler)
+ signal.signal(signal.SIGTERM, exithandler)
+
+except KeyboardInterrupt:
+ sys.exit(128 + signal.SIGINT)
+
+def debug_signal(_signum, _frame):
+ import pdb
+ pdb.set_trace()
+
+if platform.python_implementation() == 'Jython':
+ debug_signum = signal.SIGUSR2 # bug #424259
+else:
+ debug_signum = signal.SIGUSR1
+
+signal.signal(debug_signum, debug_signal)
+
+import io
+import logging
+import subprocess
+import time
+import textwrap
+import re
+
+from os import path as osp
+if osp.isfile(osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")):
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+import portage
+portage._internal_caller = True
+from portage import os, _encodings, _unicode_encode, _unicode_decode
+from _emerge.MetadataRegen import MetadataRegen
+from portage.cache.cache_errors import CacheError, StatCollision
+from portage.const import TIMESTAMP_FORMAT
+from portage.manifest import guessManifestFileType
+from portage.package.ebuild._parallel_manifest.ManifestScheduler import ManifestScheduler
+from portage.util import cmp_sort_key, writemsg_level
+from portage.util._argparse import ArgumentParser
+from portage.util._async.run_main_scheduler import run_main_scheduler
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage import cpv_getkey
+from portage.dep import Atom, isjustname
+from portage.versions import pkgsplit, vercmp
+from portage.const import EPREFIX
+
+try:
+ from xml.etree import ElementTree
+except ImportError:
+ pass
+else:
+ try:
+ from xml.parsers.expat import ExpatError
+ except ImportError:
+ pass
+ else:
+ from repoman.utilities import parse_metadata_use
+
+from repoman.utilities import FindVCS
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+def parse_args(args):
+ usage = "egencache [options] <action> ... [atom] ..."
+ parser = ArgumentParser(usage=usage)
+
+ actions = parser.add_argument_group('Actions')
+ actions.add_argument("--update",
+ action="store_true",
+ help="update metadata/md5-cache/ (generate as necessary)")
+ actions.add_argument("--update-use-local-desc",
+ action="store_true",
+ help="update the use.local.desc file from metadata.xml")
+ actions.add_argument("--update-changelogs",
+ action="store_true",
+ help="update the ChangeLog files from SCM logs")
+ actions.add_argument("--update-manifests",
+ action="store_true",
+ help="update manifests")
+
+ common = parser.add_argument_group('Common options')
+ common.add_argument("--repo",
+ action="store",
+ help="name of repo to operate on")
+ common.add_argument("--config-root",
+ help="location of portage config files",
+ dest="portage_configroot")
+ common.add_argument("--gpg-dir",
+ help="override the PORTAGE_GPG_DIR variable",
+ dest="gpg_dir")
+ common.add_argument("--gpg-key",
+ help="override the PORTAGE_GPG_KEY variable",
+ dest="gpg_key")
+ common.add_argument("--portdir",
+ help="override the PORTDIR variable (deprecated in favor of --repositories-configuration)",
+ dest="portdir")
+ common.add_argument("--portdir-overlay",
+ help="override the PORTDIR_OVERLAY variable (deprecated in favor of --repositories-configuration)",
+ dest="portdir_overlay")
+ common.add_argument("--repositories-configuration",
+ help="override configuration of repositories (in format of repos.conf)",
+ dest="repositories_configuration")
+ common.add_argument("--sign-manifests",
+ choices=('y', 'n'),
+ metavar="<y|n>",
+ help="manually override layout.conf sign-manifests setting")
+ common.add_argument("--strict-manifests",
+ choices=('y', 'n'),
+ metavar="<y|n>",
+ help="manually override \"strict\" FEATURES setting")
+ common.add_argument("--thin-manifests",
+ choices=('y', 'n'),
+ metavar="<y|n>",
+ help="manually override layout.conf thin-manifests setting")
+ common.add_argument("--tolerant",
+ action="store_true",
+ help="exit successfully if only minor errors occurred")
+ common.add_argument("--ignore-default-opts",
+ action="store_true",
+ help="do not use the EGENCACHE_DEFAULT_OPTS environment variable")
+ common.add_argument("--write-timestamp",
+ action="store_true",
+ help="write metadata/timestamp.chk as required for rsync repositories")
+
+ update = parser.add_argument_group('--update options')
+ update.add_argument("--cache-dir",
+ help="location of the metadata cache",
+ dest="cache_dir")
+ update.add_argument("-j", "--jobs",
+ type=int,
+ action="store",
+ help="max ebuild processes to spawn")
+ update.add_argument("--load-average",
+ type=float,
+ action="store",
+ help="max load allowed when spawning multiple jobs",
+ dest="load_average")
+ update.add_argument("--rsync",
+ action="store_true",
+ help="enable rsync stat collision workaround " + \
+ "for bug 139134 (use with --update)")
+
+ uld = parser.add_argument_group('--update-use-local-desc options')
+ uld.add_argument("--preserve-comments",
+ action="store_true",
+ help="preserve the comments from the existing use.local.desc file")
+ uld.add_argument("--use-local-desc-output",
+ help="output file for use.local.desc data (or '-' for stdout)",
+ dest="uld_output")
+
+ options, args = parser.parse_known_args(args)
+
+ if options.jobs:
+ jobs = None
+ try:
+ jobs = int(options.jobs)
+ except ValueError:
+ jobs = -1
+
+ if jobs < 1:
+ parser.error("Invalid: --jobs='%s'" % \
+ (options.jobs,))
+
+ options.jobs = jobs
+
+ else:
+ options.jobs = None
+
+ if options.load_average:
+ try:
+ load_average = float(options.load_average)
+ except ValueError:
+ load_average = 0.0
+
+ if load_average <= 0.0:
+ parser.error("Invalid: --load-average='%s'" % \
+ (options.load_average,))
+
+ options.load_average = load_average
+
+ else:
+ options.load_average = None
+
+ options.config_root = options.portage_configroot
+ if options.config_root is not None and \
+ not os.path.isdir(options.config_root):
+ parser.error("Not a directory: --config-root='%s'" % \
+ (options.config_root,))
+
+ if options.cache_dir is not None:
+ if not os.path.isdir(options.cache_dir):
+ parser.error("Not a directory: --cache-dir='%s'" % \
+ (options.cache_dir,))
+ if not os.access(options.cache_dir, os.W_OK):
+ parser.error("Write access denied: --cache-dir='%s'" % \
+ (options.cache_dir,))
+
+ if options.portdir is not None:
+ writemsg_level("egencache: warning: --portdir option is deprecated in favor of --repositories-configuration option\n",
+ level=logging.WARNING, noiselevel=-1)
+ if options.portdir_overlay is not None:
+ writemsg_level("egencache: warning: --portdir-overlay option is deprecated in favor of --repositories-configuration option\n",
+ level=logging.WARNING, noiselevel=-1)
+
+ for atom in args:
+ try:
+ atom = portage.dep.Atom(atom)
+ except portage.exception.InvalidAtom:
+ parser.error('Invalid atom: %s' % (atom,))
+
+ if not isjustname(atom):
+ parser.error('Atom is too specific: %s' % (atom,))
+
+ if options.update_use_local_desc:
+ try:
+ ElementTree
+ ExpatError
+ except NameError:
+ parser.error('--update-use-local-desc requires python with USE=xml!')
+
+ if options.uld_output == '-' and options.preserve_comments:
+ parser.error('--preserve-comments can not be used when outputting to stdout')
+
+ return parser, options, args
+
+class GenCache(object):
+ def __init__(self, portdb, cp_iter=None, max_jobs=None, max_load=None,
+ rsync=False):
+ # The caller must set portdb.porttrees in order to constrain
+ # findname, cp_list, and cpv_list to the desired tree.
+ tree = portdb.porttrees[0]
+ self._portdb = portdb
+ self._eclass_db = portdb.repositories.get_repo_for_location(tree).eclass_db
+ self._auxdbkeys = portdb._known_keys
+ # We can globally cleanse stale cache only if we
+ # iterate over every single cp.
+ self._global_cleanse = cp_iter is None
+ if cp_iter is not None:
+ self._cp_set = set(cp_iter)
+ cp_iter = iter(self._cp_set)
+ self._cp_missing = self._cp_set.copy()
+ else:
+ self._cp_set = None
+ self._cp_missing = set()
+ write_auxdb = "metadata-transfer" in portdb.settings.features
+ self._regen = MetadataRegen(portdb, cp_iter=cp_iter,
+ consumer=self._metadata_callback,
+ max_jobs=max_jobs, max_load=max_load,
+ write_auxdb=write_auxdb, main=True)
+ self.returncode = os.EX_OK
+ conf = portdb.repositories.get_repo_for_location(tree)
+ self._trg_caches = tuple(conf.iter_pregenerated_caches(
+ self._auxdbkeys, force=True, readonly=False))
+ if not self._trg_caches:
+ raise Exception("cache formats '%s' aren't supported" %
+ (" ".join(conf.cache_formats),))
+
+ if rsync:
+ for trg_cache in self._trg_caches:
+ if hasattr(trg_cache, 'raise_stat_collision'):
+ trg_cache.raise_stat_collision = True
+ # Make _metadata_callback write this cache first, in case
+ # it raises a StatCollision and triggers mtime
+ # modification.
+ self._trg_caches = tuple([trg_cache] +
+ [x for x in self._trg_caches if x is not trg_cache])
+
+ self._existing_nodes = set()
+
+ def _metadata_callback(self, cpv, repo_path, metadata,
+ ebuild_hash, eapi_supported):
+ self._existing_nodes.add(cpv)
+ self._cp_missing.discard(cpv_getkey(cpv))
+
+ # Since we're supposed to be able to efficiently obtain the
+ # EAPI from _parse_eapi_ebuild_head, we don't write cache
+ # entries for unsupported EAPIs.
+ if metadata is not None and eapi_supported:
+ if metadata.get('EAPI') == '0':
+ del metadata['EAPI']
+ for trg_cache in self._trg_caches:
+ self._write_cache(trg_cache,
+ cpv, repo_path, metadata, ebuild_hash)
+
+ def _write_cache(self, trg_cache, cpv, repo_path, metadata, ebuild_hash):
+
+ if not hasattr(trg_cache, 'raise_stat_collision'):
+ # This cache does not avoid redundant writes automatically,
+ # so check for an identical existing entry before writing.
+ # This prevents unnecessary disk writes and can also prevent
+ # unnecessary rsync transfers.
+ try:
+ dest = trg_cache[cpv]
+ except (KeyError, CacheError):
+ pass
+ else:
+ if trg_cache.validate_entry(dest,
+ ebuild_hash, self._eclass_db):
+ identical = True
+ for k in self._auxdbkeys:
+ if dest.get(k, '') != metadata.get(k, ''):
+ identical = False
+ break
+ if identical:
+ return
+
+ try:
+ chf = trg_cache.validation_chf
+ metadata['_%s_' % chf] = getattr(ebuild_hash, chf)
+ try:
+ trg_cache[cpv] = metadata
+ except StatCollision as sc:
+ # If the content of a cache entry changes and neither the
+ # file mtime nor size changes, it will prevent rsync from
+ # detecting changes. Cache backends may raise this
+ # exception from _setitem() if they detect this type of stat
+ # collision. These exceptions are handled by bumping the
+ # mtime on the ebuild (and the corresponding cache entry).
+ # See bug #139134. It is convenient to include checks for
+ # redundant writes along with the internal StatCollision
+ # detection code, so for caches with the
+ # raise_stat_collision attribute, we do not need to
+ # explicitly check for redundant writes like we do for the
+ # other cache types above.
+ max_mtime = sc.mtime
+ for _ec, ec_hash in metadata['_eclasses_'].items():
+ if max_mtime < ec_hash.mtime:
+ max_mtime = ec_hash.mtime
+ if max_mtime == sc.mtime:
+ max_mtime += 1
+ max_mtime = long(max_mtime)
+ try:
+ os.utime(ebuild_hash.location, (max_mtime, max_mtime))
+ except OSError as e:
+ self.returncode |= 1
+ writemsg_level(
+ "%s writing target: %s\n" % (cpv, e),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ ebuild_hash.mtime = max_mtime
+ metadata['_mtime_'] = max_mtime
+ trg_cache[cpv] = metadata
+ self._portdb.auxdb[repo_path][cpv] = metadata
+
+ except CacheError as ce:
+ self.returncode |= 1
+ writemsg_level(
+ "%s writing target: %s\n" % (cpv, ce),
+ level=logging.ERROR, noiselevel=-1)
+
+ def run(self):
+ signum = run_main_scheduler(self._regen)
+ if signum is not None:
+ sys.exit(128 + signum)
+
+ self.returncode |= self._regen.returncode
+
+ for trg_cache in self._trg_caches:
+ self._cleanse_cache(trg_cache)
+
+ def _cleanse_cache(self, trg_cache):
+ cp_missing = self._cp_missing
+ dead_nodes = set()
+ if self._global_cleanse:
+ try:
+ for cpv in trg_cache:
+ cp = cpv_getkey(cpv)
+ if cp is None:
+ self.returncode |= 1
+ writemsg_level(
+ "Unable to parse cp for '%s'\n" % (cpv,),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ dead_nodes.add(cpv)
+ except CacheError as ce:
+ self.returncode |= 1
+ writemsg_level(
+ "Error listing cache entries for " + \
+ "'%s': %s, continuing...\n" % \
+ (trg_cache.location, ce),
+ level=logging.ERROR, noiselevel=-1)
+
+ else:
+ cp_set = self._cp_set
+ try:
+ for cpv in trg_cache:
+ cp = cpv_getkey(cpv)
+ if cp is None:
+ self.returncode |= 1
+ writemsg_level(
+ "Unable to parse cp for '%s'\n" % (cpv,),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ cp_missing.discard(cp)
+ if cp in cp_set:
+ dead_nodes.add(cpv)
+ except CacheError as ce:
+ self.returncode |= 1
+ writemsg_level(
+ "Error listing cache entries for " + \
+ "'%s': %s, continuing...\n" % \
+ (trg_cache.location, ce),
+ level=logging.ERROR, noiselevel=-1)
+
+ if cp_missing:
+ self.returncode |= 1
+ for cp in sorted(cp_missing):
+ writemsg_level(
+ "No ebuilds or cache entries found for '%s'\n" % (cp,),
+ level=logging.ERROR, noiselevel=-1)
+
+ if dead_nodes:
+ dead_nodes.difference_update(self._existing_nodes)
+ for k in dead_nodes:
+ try:
+ del trg_cache[k]
+ except KeyError:
+ pass
+ except CacheError as ce:
+ self.returncode |= 1
+ writemsg_level(
+ "%s deleting stale cache: %s\n" % (k, ce),
+ level=logging.ERROR, noiselevel=-1)
+
+ if not trg_cache.autocommits:
+ try:
+ trg_cache.commit()
+ except CacheError as ce:
+ self.returncode |= 1
+ writemsg_level(
+ "committing target: %s\n" % (ce,),
+ level=logging.ERROR, noiselevel=-1)
+
+ if hasattr(trg_cache, '_prune_empty_dirs'):
+ trg_cache._prune_empty_dirs()
+
+class GenUseLocalDesc(object):
+ def __init__(self, portdb, output=None,
+ preserve_comments=False):
+ self.returncode = os.EX_OK
+ self._portdb = portdb
+ self._output = output
+ self._preserve_comments = preserve_comments
+
+ def run(self):
+ repo_path = self._portdb.porttrees[0]
+ ops = {'<':0, '<=':1, '=':2, '>=':3, '>':4}
+
+ if self._output is None or self._output != '-':
+ if self._output is None:
+ prof_path = os.path.join(repo_path, 'profiles')
+ desc_path = os.path.join(prof_path, 'use.local.desc')
+ try:
+ os.mkdir(prof_path)
+ except OSError:
+ pass
+ else:
+ desc_path = self._output
+
+ try:
+ if self._preserve_comments:
+ # Probe in binary mode, in order to avoid
+ # potential character encoding issues.
+ output = open(_unicode_encode(desc_path,
+ encoding=_encodings['fs'], errors='strict'), 'r+b')
+ else:
+ output = io.open(_unicode_encode(desc_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ except IOError as e:
+ if not self._preserve_comments or \
+ os.path.isfile(desc_path):
+ writemsg_level(
+ "ERROR: failed to open output file %s: %s\n" \
+ % (desc_path, e), level=logging.ERROR, noiselevel=-1)
+ self.returncode |= 2
+ return
+
+ # Open in r+b mode failed because the file doesn't
+ # exist yet. We can probably recover if we disable
+ # preserve_comments mode now.
+ writemsg_level(
+ "WARNING: --preserve-comments enabled, but " + \
+ "output file not found: %s\n" % (desc_path,),
+ level=logging.WARNING, noiselevel=-1)
+ self._preserve_comments = False
+ try:
+ output = io.open(_unicode_encode(desc_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ except IOError as e:
+ writemsg_level(
+ "ERROR: failed to open output file %s: %s\n" \
+ % (desc_path, e), level=logging.ERROR, noiselevel=-1)
+ self.returncode |= 2
+ return
+ else:
+ output = sys.stdout
+
+ if self._preserve_comments:
+ while True:
+ pos = output.tell()
+ if not output.readline().startswith(b'#'):
+ break
+ output.seek(pos)
+ output.truncate()
+ output.close()
+
+ # Finished probing comments in binary mode, now append
+ # in text mode.
+ output = io.open(_unicode_encode(desc_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='a', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ output.write('\n')
+ else:
+ output.write(textwrap.dedent('''\
+ # This file is deprecated as per GLEP 56 in favor of metadata.xml. Please add
+ # your descriptions to your package's metadata.xml ONLY.
+ # * generated automatically using egencache *
+
+ '''))
+
+ # The cmp function no longer exists in python3, so we'll
+ # implement our own here under a slightly different name
+ # since we don't want any confusion given that we never
+ # want to rely on the builtin cmp function.
+ def cmp_func(a, b):
+ if a is None or b is None:
+ # None can't be compared with other types in python3.
+ if a is None and b is None:
+ return 0
+ elif a is None:
+ return -1
+ else:
+ return 1
+ return (a > b) - (a < b)
+
+ class _MetadataTreeBuilder(ElementTree.TreeBuilder):
+ """
+ Implements doctype() as required to avoid deprecation warnings
+ since Python >=2.7
+ """
+ def doctype(self, name, pubid, system):
+ pass
+
+ for cp in self._portdb.cp_all():
+ metadata_path = os.path.join(repo_path, cp, 'metadata.xml')
+ try:
+ metadata = ElementTree.parse(_unicode_encode(metadata_path,
+ encoding=_encodings['fs'], errors='strict'),
+ parser=ElementTree.XMLParser(
+ target=_MetadataTreeBuilder()))
+ except IOError:
+ pass
+ except (ExpatError, EnvironmentError) as e:
+ writemsg_level(
+ "ERROR: failed parsing %s/metadata.xml: %s\n" % (cp, e),
+ level=logging.ERROR, noiselevel=-1)
+ self.returncode |= 1
+ else:
+ try:
+ usedict = parse_metadata_use(metadata)
+ except portage.exception.ParseError as e:
+ writemsg_level(
+ "ERROR: failed parsing %s/metadata.xml: %s\n" % (cp, e),
+ level=logging.ERROR, noiselevel=-1)
+ self.returncode |= 1
+ else:
+ for flag in sorted(usedict):
+ def atomcmp(atoma, atomb):
+ # None is better than an atom, that's why we reverse the args
+ if atoma is None or atomb is None:
+ return cmp_func(atomb, atoma)
+ # Same for plain PNs (.operator is None then)
+ elif atoma.operator is None or atomb.operator is None:
+ return cmp_func(atomb.operator, atoma.operator)
+ # Version matching
+ elif atoma.cpv != atomb.cpv:
+ return vercmp(atoma.version, atomb.version)
+ # Versions match, let's fallback to operator matching
+ else:
+ return cmp_func(ops.get(atoma.operator, -1),
+ ops.get(atomb.operator, -1))
+
+ def _Atom(key):
+ if key is not None:
+ return Atom(key)
+ return None
+
+ resdict = usedict[flag]
+ if len(resdict) == 1:
+ resdesc = next(iter(resdict.items()))[1]
+ else:
+ try:
+ reskeys = dict((_Atom(k), k) for k in resdict)
+ except portage.exception.InvalidAtom as e:
+ writemsg_level(
+ "ERROR: failed parsing %s/metadata.xml: %s\n" % (cp, e),
+ level=logging.ERROR, noiselevel=-1)
+ self.returncode |= 1
+ resdesc = next(iter(resdict.items()))[1]
+ else:
+ resatoms = sorted(reskeys, key=cmp_sort_key(atomcmp))
+ resdesc = resdict[reskeys[resatoms[-1]]]
+
+ output.write('%s:%s - %s\n' % (cp, flag, resdesc))
+
+ output.close()
+
+if sys.hexversion < 0x3000000:
+ _filename_base = unicode
+else:
+ _filename_base = str
+
+class _special_filename(_filename_base):
+ """
+ Helps to sort file names by file type and other criteria.
+ """
+ def __new__(cls, status_change, file_name):
+ return _filename_base.__new__(cls, status_change + file_name)
+
+ def __init__(self, status_change, file_name):
+ _filename_base.__init__(status_change + file_name)
+ self.status_change = status_change
+ self.file_name = file_name
+ self.file_type = guessManifestFileType(file_name)
+
+ @staticmethod
+ def file_type_lt(a, b):
+ """
+ Defines an ordering between file types.
+ """
+ first = a.file_type
+ second = b.file_type
+ if first == second:
+ return False
+
+ if first == "EBUILD":
+ return True
+ elif first == "MISC":
+ return second in ("EBUILD",)
+ elif first == "AUX":
+ return second in ("EBUILD", "MISC")
+ elif first == "DIST":
+ return second in ("EBUILD", "MISC", "AUX")
+ elif first is None:
+ return False
+ else:
+ raise ValueError("Unknown file type '%s'" % first)
+
+ def __lt__(self, other):
+ """
+ Compare different file names, first by file type and then
+ for ebuilds by version and lexicographically for others.
+ EBUILD < MISC < AUX < DIST < None
+ """
+ if self.__class__ != other.__class__:
+ raise NotImplementedError
+
+ # Sort by file type as defined by file_type_lt().
+ if self.file_type_lt(self, other):
+ return True
+ elif self.file_type_lt(other, self):
+ return False
+
+ # Files have the same type.
+ if self.file_type == "EBUILD":
+ # Sort by version. Lowest first.
+ ver = "-".join(pkgsplit(self.file_name[:-7])[1:3])
+ other_ver = "-".join(pkgsplit(other.file_name[:-7])[1:3])
+ return vercmp(ver, other_ver) < 0
+ else:
+ # Sort lexicographically.
+ return self.file_name < other.file_name
+
+class GenChangeLogs(object):
+ def __init__(self, portdb):
+ self.returncode = os.EX_OK
+ self._portdb = portdb
+ self._wrapper = textwrap.TextWrapper(
+ width = 78,
+ initial_indent = ' ',
+ subsequent_indent = ' '
+ )
+
+ @staticmethod
+ def grab(cmd):
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ return _unicode_decode(p.communicate()[0],
+ encoding=_encodings['stdio'], errors='strict')
+
+ def generate_changelog(self, cp):
+ try:
+ output = io.open('ChangeLog',
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ except IOError as e:
+ writemsg_level(
+ "ERROR: failed to open ChangeLog for %s: %s\n" % (cp,e,),
+ level=logging.ERROR, noiselevel=-1)
+ self.returncode |= 2
+ return
+
+ output.write(textwrap.dedent('''\
+ # ChangeLog for %s
+ # Copyright 1999-%s Gentoo Foundation; Distributed under the GPL v2
+ # $Header: $
+
+ ''' % (cp, time.strftime('%Y'))))
+
+ # now grab all the commits
+ commits = self.grab(['git', 'rev-list', 'HEAD', '--', '.']).split()
+
+ for c in commits:
+ # Explaining the arguments:
+ # --name-status to get a list of added/removed files
+ # --no-renames to avoid getting more complex records on the list
+ # --format to get the timestamp, author and commit description
+ # --root to make it work fine even with the initial commit
+ # --relative to get paths relative to ebuilddir
+ # -r (recursive) to get per-file changes
+ # then the commit-id and path.
+
+ cinfo = self.grab(['git', 'diff-tree', '--name-status', '--no-renames',
+ '--format=%ct %cN <%cE>%n%B', '--root', '--relative', '-r',
+ c, '--', '.']).rstrip('\n').split('\n')
+
+ # Expected output:
+ # timestamp Author Name <author@email>
+ # commit message l1
+ # ...
+ # commit message ln
+ #
+ # status1 filename1
+ # ...
+ # statusn filenamen
+
+ changed = []
+ for n, l in enumerate(reversed(cinfo)):
+ if not l:
+ body = cinfo[1:-n-1]
+ break
+ else:
+ f = l.split()
+ if f[1] == 'Manifest':
+ pass # XXX: remanifest commits?
+ elif f[1] == 'ChangeLog':
+ pass
+ elif f[0].startswith('A'):
+ changed.append(_special_filename("+", f[1]))
+ elif f[0].startswith('D'):
+ changed.append(_special_filename("-", f[1]))
+ elif f[0].startswith('M'):
+ changed.append(_special_filename("", f[1]))
+ else:
+ writemsg_level(
+ "ERROR: unexpected git file status for %s: %s\n" % (cp,f,),
+ level=logging.ERROR, noiselevel=-1)
+ self.returncode |= 1
+
+ if not changed:
+ continue
+
+ (ts, author) = cinfo[0].split(' ', 1)
+ date = time.strftime('%d %b %Y', time.gmtime(float(ts)))
+
+ changed = [str(x) for x in sorted(changed)]
+
+ wroteheader = False
+ # Reverse the sort order for headers.
+ for c in reversed(changed):
+ if c.startswith('+') and c.endswith('.ebuild'):
+ output.write('*%s (%s)\n' % (c[1:-7], date))
+ wroteheader = True
+ if wroteheader:
+ output.write('\n')
+
+ # strip '<cp>: ', '[<cp>] ', and similar
+ body[0] = re.sub(r'^\W*' + re.escape(cp) + r'\W+', '', body[0])
+ # strip trailing newline
+ if not body[-1]:
+ body = body[:-1]
+ # strip git-svn id
+ if body[-1].startswith('git-svn-id:') and not body[-2]:
+ body = body[:-2]
+ # strip the repoman version/manifest note
+ if body[-1] == ' (Signed Manifest commit)' or body[-1] == ' (Unsigned Manifest commit)':
+ body = body[:-1]
+ if body[-1].startswith('(Portage version:') and body[-1].endswith(')'):
+ body = body[:-1]
+ if not body[-1]:
+ body = body[:-1]
+
+ # don't break filenames on hyphens
+ self._wrapper.break_on_hyphens = False
+ output.write(self._wrapper.fill(
+ '%s; %s %s:' % (date, author, ', '.join(changed))))
+ # but feel free to break commit messages there
+ self._wrapper.break_on_hyphens = True
+ output.write(
+ '\n%s\n\n' % '\n'.join(self._wrapper.fill(x) for x in body))
+
+ output.close()
+
+ def run(self):
+ repo_path = self._portdb.porttrees[0]
+ os.chdir(repo_path)
+
+ if 'git' not in FindVCS():
+ writemsg_level(
+ "ERROR: --update-changelogs supported only in git repos\n",
+ level=logging.ERROR, noiselevel=-1)
+ self.returncode = 127
+ return
+
+ for cp in self._portdb.cp_all():
+ os.chdir(os.path.join(repo_path, cp))
+ # Determine whether ChangeLog is up-to-date by comparing
+ # the newest commit timestamp with the ChangeLog timestamp.
+ lmod = self.grab(['git', 'log', '--format=%ct', '-1', '.'])
+ if not lmod:
+ # This cp has not been added to the repo.
+ continue
+
+ try:
+ cmod = os.stat('ChangeLog').st_mtime
+ except OSError:
+ cmod = 0
+
+ if float(cmod) < float(lmod):
+ self.generate_changelog(cp)
+
+def egencache_main(args):
+
+ # The calling environment is ignored, so the program is
+ # completely controlled by commandline arguments.
+ env = {}
+
+ if not sys.stdout.isatty():
+ portage.output.nocolor()
+ env['NOCOLOR'] = 'true'
+
+ parser, options, atoms = parse_args(args)
+
+ config_root = options.config_root
+
+ if options.repositories_configuration is not None:
+ env['PORTAGE_REPOSITORIES'] = options.repositories_configuration
+ elif options.portdir_overlay is not None:
+ env['PORTDIR_OVERLAY'] = options.portdir_overlay
+
+ if options.cache_dir is not None:
+ env['PORTAGE_DEPCACHEDIR'] = options.cache_dir
+
+ if options.portdir is not None:
+ env['PORTDIR'] = options.portdir
+
+ settings = portage.config(config_root=config_root,
+ local_config=False, env=env)
+
+ default_opts = None
+ if not options.ignore_default_opts:
+ default_opts = portage.util.shlex_split(
+ settings.get('EGENCACHE_DEFAULT_OPTS', ''))
+
+ if default_opts:
+ parser, options, args = parse_args(default_opts + args)
+
+ if options.cache_dir is not None:
+ env['PORTAGE_DEPCACHEDIR'] = options.cache_dir
+
+ settings = portage.config(config_root=config_root,
+ local_config=False, env=env)
+
+ if not (options.update or options.update_use_local_desc or
+ options.update_changelogs or options.update_manifests):
+ parser.error('No action specified')
+ return 1
+
+ if options.repo is None:
+ if len(settings.repositories.prepos) == 2:
+ for repo in settings.repositories:
+ if repo.name != "DEFAULT":
+ options.repo = repo.name
+ break
+
+ if options.repo is None:
+ parser.error("--repo option is required")
+
+ repo_path = settings.repositories.treemap.get(options.repo)
+ if repo_path is None:
+ parser.error("Unable to locate repository named '%s'" % (options.repo,))
+ return 1
+
+ repo_config = settings.repositories.get_repo_for_location(repo_path)
+
+ if options.strict_manifests is not None:
+ if options.strict_manifests == "y":
+ settings.features.add("strict")
+ else:
+ settings.features.discard("strict")
+
+ if options.update and 'metadata-transfer' not in settings.features:
+ # Forcibly enable metadata-transfer if portdbapi has a pregenerated
+ # cache that does not support eclass validation.
+ cache = repo_config.get_pregenerated_cache(
+ portage.dbapi.dbapi._known_keys, readonly=True)
+ if cache is not None and not cache.complete_eclass_entries:
+ settings.features.add('metadata-transfer')
+ cache = None
+
+ settings.lock()
+
+ portdb = portage.portdbapi(mysettings=settings)
+
+ # Limit ebuilds to the specified repo.
+ portdb.porttrees = [repo_path]
+
+ if options.update:
+ if options.cache_dir is not None:
+ # already validated earlier
+ pass
+ else:
+ # We check write access after the portdbapi constructor
+ # has had an opportunity to create it. This ensures that
+ # we don't use the cache in the "volatile" mode which is
+ # undesirable for egencache.
+ if not os.access(settings["PORTAGE_DEPCACHEDIR"], os.W_OK):
+ writemsg_level("ecachegen: error: " + \
+ "write access denied: %s\n" % (settings["PORTAGE_DEPCACHEDIR"],),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if options.sign_manifests is not None:
+ repo_config.sign_manifest = options.sign_manifests == 'y'
+
+ if options.thin_manifests is not None:
+ repo_config.thin_manifest = options.thin_manifests == 'y'
+
+ gpg_cmd = None
+ gpg_vars = None
+ force_sign_key = None
+
+ if options.update_manifests:
+ if repo_config.sign_manifest:
+
+ sign_problem = False
+ gpg_dir = None
+ gpg_cmd = settings.get("PORTAGE_GPG_SIGNING_COMMAND")
+ if gpg_cmd is None:
+ writemsg_level("egencache: error: "
+ "PORTAGE_GPG_SIGNING_COMMAND is unset! "
+ "Is make.globals missing?\n",
+ level=logging.ERROR, noiselevel=-1)
+ sign_problem = True
+ elif "${PORTAGE_GPG_KEY}" in gpg_cmd and \
+ options.gpg_key is None and \
+ "PORTAGE_GPG_KEY" not in settings:
+ writemsg_level("egencache: error: "
+ "PORTAGE_GPG_KEY is unset!\n",
+ level=logging.ERROR, noiselevel=-1)
+ sign_problem = True
+ elif "${PORTAGE_GPG_DIR}" in gpg_cmd:
+ if options.gpg_dir is not None:
+ gpg_dir = options.gpg_dir
+ elif "PORTAGE_GPG_DIR" not in settings:
+ gpg_dir = os.path.expanduser("~/.gnupg")
+ else:
+ gpg_dir = os.path.expanduser(settings["PORTAGE_GPG_DIR"])
+ if not os.access(gpg_dir, os.X_OK):
+ writemsg_level(("egencache: error: "
+ "Unable to access directory: "
+ "PORTAGE_GPG_DIR='%s'\n") % gpg_dir,
+ level=logging.ERROR, noiselevel=-1)
+ sign_problem = True
+
+ if sign_problem:
+ writemsg_level("egencache: You may disable manifest "
+ "signatures with --sign-manifests=n or by setting "
+ "\"sign-manifests = false\" in metadata/layout.conf\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ gpg_vars = {}
+ if gpg_dir is not None:
+ gpg_vars["PORTAGE_GPG_DIR"] = gpg_dir
+ gpg_var_names = []
+ if options.gpg_key is None:
+ gpg_var_names.append("PORTAGE_GPG_KEY")
+ else:
+ gpg_vars["PORTAGE_GPG_KEY"] = options.gpg_key
+
+ for k in gpg_var_names:
+ v = settings.get(k)
+ if v is not None:
+ gpg_vars[k] = v
+
+ force_sign_key = gpg_vars.get("PORTAGE_GPG_KEY")
+
+ ret = [os.EX_OK]
+
+ if options.update:
+ cp_iter = None
+ if atoms:
+ cp_iter = iter(atoms)
+
+ gen_cache = GenCache(portdb, cp_iter=cp_iter,
+ max_jobs=options.jobs,
+ max_load=options.load_average,
+ rsync=options.rsync)
+ gen_cache.run()
+ if options.tolerant:
+ ret.append(os.EX_OK)
+ else:
+ ret.append(gen_cache.returncode)
+
+ if options.update_manifests:
+
+ cp_iter = None
+ if atoms:
+ cp_iter = iter(atoms)
+
+ event_loop = global_event_loop()
+ scheduler = ManifestScheduler(portdb, cp_iter=cp_iter,
+ gpg_cmd=gpg_cmd, gpg_vars=gpg_vars,
+ force_sign_key=force_sign_key,
+ max_jobs=options.jobs,
+ max_load=options.load_average,
+ event_loop=event_loop)
+
+ signum = run_main_scheduler(scheduler)
+ if signum is not None:
+ sys.exit(128 + signum)
+
+ if options.tolerant:
+ ret.append(os.EX_OK)
+ else:
+ ret.append(scheduler.returncode)
+
+ if options.update_use_local_desc:
+ gen_desc = GenUseLocalDesc(portdb,
+ output=options.uld_output,
+ preserve_comments=options.preserve_comments)
+ gen_desc.run()
+ ret.append(gen_desc.returncode)
+
+ if options.update_changelogs:
+ gen_clogs = GenChangeLogs(portdb)
+ gen_clogs.run()
+ ret.append(gen_clogs.returncode)
+
+ if options.write_timestamp:
+ timestamp_path = os.path.join(repo_path, 'metadata', 'timestamp.chk')
+ try:
+ with open(timestamp_path, 'w') as f:
+ f.write(time.strftime('%s\n' % TIMESTAMP_FORMAT, time.gmtime()))
+ except IOError:
+ ret.append(os.EX_IOERR)
+ else:
+ ret.append(os.EX_OK)
+
+ return max(ret)
+
+if __name__ == "__main__":
+ portage._disable_legacy_globals()
+ portage.util.noiselimit = -1
+ sys.exit(egencache_main(sys.argv[1:]))
diff --git a/usr/lib/portage/bin/emaint b/usr/lib/portage/bin/emaint
new file mode 100755
index 0000000..a634c0e
--- /dev/null
+++ b/usr/lib/portage/bin/emaint
@@ -0,0 +1,42 @@
+#!/usr/bin/python -bO
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""System health checks and maintenance utilities.
+"""
+
+from __future__ import print_function
+
+import sys
+import errno
+# This block ensures that ^C interrupts are handled quietly.
+try:
+ import signal
+
+ def exithandler(signum, _frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ sys.exit(128 + signum)
+
+ signal.signal(signal.SIGINT, exithandler)
+ signal.signal(signal.SIGTERM, exithandler)
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+except KeyboardInterrupt:
+ sys.exit(1)
+
+from os import path as osp
+if osp.isfile(osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")):
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+import portage
+portage._internal_caller = True
+from portage.emaint.main import emaint_main
+
+try:
+ emaint_main(sys.argv[1:])
+except IOError as e:
+ if e.errno == errno.EACCES:
+ print("\nemaint: Need superuser access")
+ sys.exit(1)
+ else:
+ raise
diff --git a/usr/lib/portage/bin/emerge b/usr/lib/portage/bin/emerge
new file mode 100755
index 0000000..43cfdcd
--- /dev/null
+++ b/usr/lib/portage/bin/emerge
@@ -0,0 +1,85 @@
+#!/usr/bin/python -b
+# Copyright 2006-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import platform
+import signal
+import sys
+
+# This block ensures that ^C interrupts are handled quietly. We handle
+# KeyboardInterrupt instead of installing a SIGINT handler, since
+# exiting from signal handlers intermittently causes python to ignore
+# the SystemExit exception with a message like this:
+# Exception SystemExit: 130 in <function remove at 0x7fd2146c1320> ignored
+try:
+
+ def exithandler(signum, _frame):
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ sys.exit(128 + signum)
+
+ signal.signal(signal.SIGTERM, exithandler)
+ # Prevent "[Errno 32] Broken pipe" exceptions when
+ # writing to a pipe.
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+ def debug_signal(_signum, _frame):
+ import pdb
+ pdb.set_trace()
+
+ if platform.python_implementation() == 'Jython':
+ debug_signum = signal.SIGUSR2 # bug #424259
+ else:
+ debug_signum = signal.SIGUSR1
+
+ signal.signal(debug_signum, debug_signal)
+
+ from os import path as osp
+ if osp.isfile(osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")):
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+ import portage
+ portage._internal_caller = True
+ portage._disable_legacy_globals()
+ from _emerge.main import emerge_main
+
+ if __name__ == "__main__":
+ from portage.exception import IsADirectory, ParseError, \
+ PermissionDenied
+ try:
+ retval = emerge_main()
+ except PermissionDenied as e:
+ sys.stderr.write("Permission denied: '%s'\n" % str(e))
+ sys.exit(e.errno)
+ except IsADirectory as e:
+ sys.stderr.write("'%s' is a directory, but should be a file!\n"
+ "See portage man page for information on "
+ "which files may be directories.\n" %
+ str(e))
+ sys.exit(e.errno)
+ except ParseError as e:
+ sys.stderr.write("%s\n" % str(e))
+ sys.exit(1)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except Exception:
+ # If an unexpected exception occurs then we don't want the
+ # mod_echo output to obscure the traceback, so dump the
+ # mod_echo output before showing the traceback.
+ import traceback
+ tb_str = traceback.format_exc()
+ try:
+ from portage.elog import mod_echo
+ except ImportError:
+ pass
+ else:
+ mod_echo.finalize()
+ sys.stderr.write(tb_str)
+ sys.exit(1)
+ sys.exit(retval)
+
+except KeyboardInterrupt:
+ sys.stderr.write("\n\nExiting on signal %(signal)s\n" %
+ {"signal": signal.SIGINT})
+ sys.stderr.flush()
+ sys.exit(128 + signal.SIGINT)
diff --git a/usr/lib/portage/bin/emerge-webrsync b/usr/lib/portage/bin/emerge-webrsync
new file mode 100755
index 0000000..6493a85
--- /dev/null
+++ b/usr/lib/portage/bin/emerge-webrsync
@@ -0,0 +1,532 @@
+#!/bin/bash
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author: Karl Trygve Kalleberg <karltk@gentoo.org>
+# Rewritten from the old, Perl-based emerge-webrsync script
+# Author: Alon Bar-Lev <alon.barlev@gmail.com>
+# Major rewrite from Karl's scripts.
+
+# TODO:
+# - all output should prob be converted to e* funcs
+# - add support for ROOT
+
+#
+# gpg key import
+# KEY_ID=0x96D8BF6D
+# gpg --homedir /etc/portage/gnupg --keyserver subkeys.pgp.net --recv-keys $KEY_ID
+# gpg --homedir /etc/portage/gnupg --edit-key $KEY_ID trust
+#
+
+# Only echo if in verbose mode
+vvecho() { [[ ${do_verbose} -eq 1 ]] && echo "$@" ; }
+# Only echo if not in verbose mode
+nvecho() { [[ ${do_verbose} -eq 0 ]] && echo "$@" ; }
+# warning echos
+wecho() { echo "${argv0##*/}: warning: $*" 1>&2 ; }
+# error echos
+eecho() { echo "${argv0##*/}: error: $*" 1>&2 ; }
+
+argv0=$0
+
+# Use portageq from the same directory/prefix as the current script, so
+# that we don't have to rely on PATH including the current EPREFIX.
+scriptpath=${BASH_SOURCE[0]}
+if [ -x "${scriptpath%/*}/portageq" ]; then
+ portageq=${scriptpath%/*}/portageq
+elif type -P portageq > /dev/null ; then
+ portageq=portageq
+else
+ eecho "could not find 'portageq'; aborting"
+ exit 1
+fi
+eval "$("${portageq}" envvar -v DISTDIR EPREFIX FEATURES \
+ FETCHCOMMAND GENTOO_MIRRORS \
+ PORTAGE_BIN_PATH PORTAGE_CONFIGROOT PORTAGE_GPG_DIR \
+ PORTAGE_NICENESS PORTAGE_REPOSITORIES PORTAGE_RSYNC_EXTRA_OPTS \
+ PORTAGE_RSYNC_OPTS PORTAGE_TMPDIR \
+ USERLAND http_proxy ftp_proxy \
+ PORTAGE_USER PORTAGE_GROUP)"
+export http_proxy ftp_proxy
+
+# PREFIX LOCAL: use Prefix servers, just because we want this and infra
+# can't support us yet
+GENTOO_MIRRORS="http://rsync.prefix.bitzolder.nl"
+# END PREFIX LOCAL
+
+source "${PORTAGE_BIN_PATH}"/isolated-functions.sh || exit 1
+
+repo_name=gentoo
+repo_location=$(__repo_attr "${repo_name}" location)
+if [[ -z ${repo_location} ]]; then
+ eecho "Repository '${repo_name}' not found"
+ exit 1
+fi
+repo_sync_type=$(__repo_attr "${repo_name}" sync-type)
+
+# If PORTAGE_NICENESS is overriden via the env then it will
+# still pass through the portageq call and override properly.
+if [ -n "${PORTAGE_NICENESS}" ]; then
+ renice $PORTAGE_NICENESS $$ > /dev/null
+fi
+
+do_verbose=0
+do_debug=0
+keep=false
+
+if has webrsync-gpg ${FEATURES} ; then
+ WEBSYNC_VERIFY_SIGNATURE=1
+else
+ WEBSYNC_VERIFY_SIGNATURE=0
+fi
+if [ ${WEBSYNC_VERIFY_SIGNATURE} != 0 -a -z "${PORTAGE_GPG_DIR}" ]; then
+ eecho "please set PORTAGE_GPG_DIR in make.conf"
+ exit 1
+fi
+
+do_tar() {
+ local file=$1; shift
+ local decompressor
+ case ${file} in
+ *.xz) decompressor="xzcat" ;;
+ *.bz2) decompressor="bzcat" ;;
+ *.gz) decompressor="zcat" ;;
+ *) decompressor="cat" ;;
+ esac
+ ${decompressor} "${file}" | tar "$@"
+ _pipestatus=${PIPESTATUS[*]}
+ [[ ${_pipestatus// /} -eq 0 ]]
+}
+
+get_utc_date_in_seconds() {
+ date -u +"%s"
+}
+
+get_date_part() {
+ local utc_time_in_secs="$1"
+ local part="$2"
+
+ if [[ ${USERLAND} == BSD ]] ; then
+ date -r ${utc_time_in_secs} -u +"${part}"
+ else
+ date -d @${utc_time_in_secs} -u +"${part}"
+ fi
+}
+
+get_utc_second_from_string() {
+ local s="$1"
+ if [[ ${USERLAND} == BSD ]] ; then
+ # Specify zeros for the least significant digits, or else those
+ # digits are inherited from the current system clock time.
+ date -juf "%Y%m%d%H%M.%S" "${s}0000.00" +"%s"
+ else
+ date -d "${s:0:4}-${s:4:2}-${s:6:2}" -u +"%s"
+ fi
+}
+
+get_portage_timestamp() {
+ local portage_current_timestamp=0
+
+ if [ -f "${repo_location}/metadata/timestamp.x" ]; then
+ portage_current_timestamp=$(cut -f 1 -d " " "${repo_location}/metadata/timestamp.x" )
+ fi
+
+ echo "${portage_current_timestamp}"
+}
+
+fetch_file() {
+ local URI="$1"
+ local FILE="$2"
+ local opts
+
+ if [ "${FETCHCOMMAND/wget/}" != "${FETCHCOMMAND}" ]; then
+ opts="--continue $(nvecho -q)"
+ elif [ "${FETCHCOMMAND/curl/}" != "${FETCHCOMMAND}" ]; then
+ opts="--continue-at - $(nvecho -s -f)"
+ else
+ rm -f "${DISTDIR}/${FILE}"
+ fi
+
+ __vecho "Fetching file ${FILE} ..."
+ # already set DISTDIR=
+ eval "${FETCHCOMMAND} ${opts}"
+ if [[ $? -eq 0 && -s ${DISTDIR}/${FILE} ]] ; then
+ return 0
+ else
+ rm -f "${DISTDIR}/${FILE}"
+ return 1
+ fi
+}
+
+check_file_digest() {
+ local digest="$1"
+ local file="$2"
+ local r=1
+
+ __vecho "Checking digest ..."
+
+ if type -P md5sum > /dev/null; then
+ local md5sum_output=$(md5sum "${file}")
+ local digest_content=$(< "${digest}")
+ [ "${md5sum_output%%[[:space:]]*}" = "${digest_content%%[[:space:]]*}" ] && r=0
+ elif type -P md5 > /dev/null; then
+ [ "$(md5 -q "${file}")" == "$(cut -d ' ' -f 1 "${digest}")" ] && r=0
+ else
+ eecho "cannot check digest: no suitable md5/md5sum binaries found"
+ fi
+
+ return "${r}"
+}
+
+check_file_signature() {
+ local signature="$1"
+ local file="$2"
+ local r=1
+
+ if [ ${WEBSYNC_VERIFY_SIGNATURE} != 0 ]; then
+
+ __vecho "Checking signature ..."
+
+ if type -P gpg > /dev/null; then
+ gpg --homedir "${PORTAGE_GPG_DIR}" --verify "$signature" "$file" && r=0
+ else
+ eecho "cannot check signature: gpg binary not found"
+ exit 1
+ fi
+ else
+ r=0
+ fi
+
+ return "${r}"
+}
+
+get_snapshot_timestamp() {
+ local file="$1"
+
+ do_tar "${file}" --to-stdout -xf - portage/metadata/timestamp.x | cut -f 1 -d " "
+}
+
+sync_local() {
+ local file="$1"
+
+ __vecho "Syncing local tree ..."
+
+ # PREFIX LOCAL: use PORTAGE_USER and PORTAGE_GROUP
+ local ownership="${PORTAGE_USER:-portage}:${PORTAGE_GROUP:-portage}"
+ # END PREFIX LOCAL
+ if has usersync ${FEATURES} ; then
+ case "${USERLAND}" in
+ BSD)
+ ownership=$(stat -f '%Su:%Sg' "${repo_location}")
+ ;;
+ *)
+ ownership=$(stat -c '%U:%G' "${repo_location}")
+ ;;
+ esac
+ fi
+
+ if type -P tarsync > /dev/null ; then
+ local chown_opts="-o ${ownership%:*} -g ${ownership#*:}"
+ chown ${ownership} "${repo_location}" > /dev/null 2>&1 || chown_opts=""
+ if ! tarsync $(vvecho -v) -s 1 ${chown_opts} \
+ -e /distfiles -e /packages -e /local "${file}" "${repo_location}"; then
+ eecho "tarsync failed; tarball is corrupt? (${file})"
+ return 1
+ fi
+ else
+ if ! do_tar "${file}" xf -; then
+ eecho "tar failed to extract the image. tarball is corrupt? (${file})"
+ rm -fr portage
+ return 1
+ fi
+
+ # Free disk space
+ ${keep} || rm -f "${file}"
+
+ local rsync_opts="${PORTAGE_RSYNC_OPTS} ${PORTAGE_RSYNC_EXTRA_OPTS}"
+ if chown ${ownership} portage > /dev/null 2>&1; then
+ chown -R ${ownership} portage
+ rsync_opts+=" --owner --group"
+ fi
+ cd portage
+ rsync ${rsync_opts} . "${repo_location%%/}"
+ cd ..
+
+ __vecho "Cleaning up ..."
+ rm -fr portage
+ fi
+
+ if has metadata-transfer ${FEATURES} ; then
+ __vecho "Updating cache ..."
+ emerge --metadata
+ fi
+ local post_sync=${PORTAGE_CONFIGROOT}etc/portage/bin/post_sync
+ [ -x "${post_sync}" ] && "${post_sync}"
+ # --quiet suppresses output if there are no relevant news items
+ has news ${FEATURES} && emerge --check-news --quiet
+ return 0
+}
+
+do_snapshot() {
+ local ignore_timestamp="$1"
+ local date="$2"
+
+ local r=1
+
+ local base_file="portage-${date}.tar"
+
+ local have_files=0
+ local mirror
+
+ local compressions=""
+ # xz is not supported in app-arch/tarsync, so use
+ # bz2 format if we have tarsync.
+ if ! type -P tarsync > /dev/null ; then
+ type -P xzcat > /dev/null && compressions="${compressions} xz"
+ fi
+ type -P bzcat > /dev/null && compressions="${compressions} bz2"
+ type -P zcat > /dev/null && compressions="${compressions} gz"
+ if [[ -z ${compressions} ]] ; then
+ eecho "unable to locate any decompressors (xzcat or bzcat or zcat)"
+ exit 1
+ fi
+
+ for mirror in ${GENTOO_MIRRORS} ; do
+
+ mirror=${mirror%/}
+ __vecho "Trying to retrieve ${date} snapshot from ${mirror} ..."
+
+ for compression in ${compressions} ; do
+ local file="portage-${date}.tar.${compression}"
+ local digest="${file}.md5sum"
+ local signature="${file}.gpgsig"
+
+ if [ -s "${DISTDIR}/${file}" -a -s "${DISTDIR}/${digest}" -a -s "${DISTDIR}/${signature}" ] ; then
+ check_file_digest "${DISTDIR}/${digest}" "${DISTDIR}/${file}" && \
+ check_file_signature "${DISTDIR}/${signature}" "${DISTDIR}/${file}" && \
+ have_files=1
+ fi
+
+ if [ ${have_files} -eq 0 ] ; then
+ fetch_file "${mirror}/snapshots/${digest}" "${digest}" && \
+ fetch_file "${mirror}/snapshots/${signature}" "${signature}" && \
+ fetch_file "${mirror}/snapshots/${file}" "${file}" && \
+ check_file_digest "${DISTDIR}/${digest}" "${DISTDIR}/${file}" && \
+ check_file_signature "${DISTDIR}/${signature}" "${DISTDIR}/${file}" && \
+ have_files=1
+ fi
+
+ #
+ # If timestamp is invalid
+ # we want to try and retrieve
+ # from a different mirror
+ #
+ if [ ${have_files} -eq 1 ]; then
+
+ __vecho "Getting snapshot timestamp ..."
+ local snapshot_timestamp=$(get_snapshot_timestamp "${DISTDIR}/${file}")
+
+ if [ ${ignore_timestamp} == 0 ]; then
+ if [ ${snapshot_timestamp} -lt $(get_portage_timestamp) ]; then
+ wecho "portage is newer than snapshot"
+ have_files=0
+ fi
+ else
+ local utc_seconds=$(get_utc_second_from_string "${date}")
+
+ #
+ # Check that this snapshot
+ # is what it claims to be ...
+ #
+ if [ ${snapshot_timestamp} -lt ${utc_seconds} ] || \
+ [ ${snapshot_timestamp} -gt $((${utc_seconds}+ 2*86400)) ]; then
+
+ wecho "snapshot timestamp is not in acceptable period"
+ have_files=0
+ fi
+ fi
+ fi
+
+ if [ ${have_files} -eq 1 ]; then
+ break
+ else
+ #
+ # Remove files and use a different mirror
+ #
+ rm -f "${DISTDIR}/${file}" "${DISTDIR}/${digest}" "${DISTDIR}/${signature}"
+ fi
+ done
+
+ [ ${have_files} -eq 1 ] && break
+ done
+
+ if [ ${have_files} -eq 1 ]; then
+ sync_local "${DISTDIR}/${file}" && r=0
+ else
+ __vecho "${date} snapshot was not found"
+ fi
+
+ ${keep} || rm -f "${DISTDIR}/${file}" "${DISTDIR}/${digest}" "${DISTDIR}/${signature}"
+ return "${r}"
+}
+
+do_latest_snapshot() {
+ local attempts=0
+ local r=1
+
+ __vecho "Fetching most recent snapshot ..."
+
+ # The snapshot for a given day is generated at 00:45 UTC on the following
+ # day, so the current day's snapshot (going by UTC time) hasn't been
+ # generated yet. Therefore, always start by looking for the previous day's
+ # snapshot (for attempts=1, subtract 1 day from the current UTC time).
+
+ # Timestamps that differ by less than 2 hours
+ # are considered to be approximately equal.
+ local min_time_diff=$(( 2 * 60 * 60 ))
+
+ local existing_timestamp=$(get_portage_timestamp)
+ local timestamp_difference
+ local timestamp_problem
+ local approx_snapshot_time
+ local start_time=$(get_utc_date_in_seconds)
+ local start_hour=$(get_date_part ${start_time} "%H")
+
+ # Daily snapshots are created at 00:45 and are not
+ # available until after 01:00. Don't waste time trying
+ # to fetch a snapshot before it's been created.
+ if [ ${start_hour} -lt 1 ] ; then
+ (( start_time -= 86400 ))
+ fi
+ local snapshot_date=$(get_date_part ${start_time} "%Y%m%d")
+ local snapshot_date_seconds=$(get_utc_second_from_string ${snapshot_date})
+
+ while (( ${attempts} < 40 )) ; do
+ (( attempts++ ))
+ (( snapshot_date_seconds -= 86400 ))
+ # snapshots are created at 00:45
+ (( approx_snapshot_time = snapshot_date_seconds + 86400 + 2700 ))
+ (( timestamp_difference = existing_timestamp - approx_snapshot_time ))
+ [ ${timestamp_difference} -lt 0 ] && (( timestamp_difference = -1 * timestamp_difference ))
+ snapshot_date=$(get_date_part ${snapshot_date_seconds} "%Y%m%d")
+
+ timestamp_problem=""
+ if [ ${timestamp_difference} -eq 0 ]; then
+ timestamp_problem="is identical to"
+ elif [ ${timestamp_difference} -lt ${min_time_diff} ]; then
+ timestamp_problem="is possibly identical to"
+ elif [ ${approx_snapshot_time} -lt ${existing_timestamp} ] ; then
+ timestamp_problem="is newer than"
+ fi
+
+ if [ -n "${timestamp_problem}" ]; then
+ ewarn "Latest snapshot date: ${snapshot_date}"
+ ewarn
+ ewarn "Approximate snapshot timestamp: ${approx_snapshot_time}"
+ ewarn " Current local timestamp: ${existing_timestamp}"
+ ewarn
+ echo -e "The current local timestamp" \
+ "${timestamp_problem} the" \
+ "timestamp of the latest" \
+ "snapshot. In order to force sync," \
+ "use the --revert option or remove" \
+ "the timestamp file located at" \
+ "'${repo_location}/metadata/timestamp.x'." | fmt -w 70 | \
+ while read -r line ; do
+ ewarn "${line}"
+ done
+ r=0
+ break
+ fi
+
+ if do_snapshot 0 "${snapshot_date}"; then
+ r=0
+ break;
+ fi
+ done
+
+ return "${r}"
+}
+
+usage() {
+ cat <<-EOF
+ Usage: $0 [options]
+
+ Options:
+ --revert=yyyymmdd Revert to snapshot
+ -k, --keep Keep snapshots in DISTDIR (don't delete)
+ -q, --quiet Only output errors
+ -v, --verbose Enable verbose output
+ -x, --debug Enable debug output
+ -h, --help This help screen (duh!)
+ EOF
+ if [[ -n $* ]] ; then
+ printf "\nError: %s\n" "$*" 1>&2
+ exit 1
+ else
+ exit 0
+ fi
+}
+
+main() {
+ local arg
+ local revert_date
+
+ for arg in "$@" ; do
+ local v=${arg#*=}
+ case ${arg} in
+ -h|--help) usage ;;
+ -k|--keep) keep=true ;;
+ -q|--quiet) PORTAGE_QUIET=1 ;;
+ -v|--verbose) do_verbose=1 ;;
+ -x|--debug) do_debug=1 ;;
+ --revert=*) revert_date=${v} ;;
+ *) usage "Invalid option '${arg}'" ;;
+ esac
+ done
+
+ [[ -d ${repo_location} ]] || mkdir -p "${repo_location}"
+ if [[ ! -w ${repo_location} ]] ; then
+ eecho "Repository '${repo_name}' is not writable: ${repo_location}"
+ exit 1
+ fi
+
+ [[ -d ${PORTAGE_TMPDIR}/portage ]] || mkdir -p "${PORTAGE_TMPDIR}/portage"
+ TMPDIR=$(mktemp -d "${PORTAGE_TMPDIR}/portage/webrsync-XXXXXX")
+ if [[ ! -w ${TMPDIR} ]] ; then
+ eecho "TMPDIR is not writable: ${TMPDIR}"
+ exit 1
+ fi
+ trap 'cd / ; rm -rf "${TMPDIR}"' EXIT
+ cd "${TMPDIR}" || exit 1
+
+ ${keep} || DISTDIR=${TMPDIR}
+ [ ! -d "${DISTDIR}" ] && mkdir -p "${DISTDIR}"
+
+ if ${keep} && [[ ! -w ${DISTDIR} ]] ; then
+ eecho "DISTDIR is not writable: ${DISTDIR}"
+ exit 1
+ fi
+
+ # This is a sanity check to help prevent people like funtoo users
+ # from accidentally wiping out their git tree.
+ if [[ -n ${repo_sync_type} && ${repo_sync_type} != rsync ]] ; then
+ echo "The current sync-type attribute of repository 'gentoo' is not set to 'rsync':" >&2
+ echo >&2
+ echo " sync-type=${repo_sync_type}" >&2
+ echo >&2
+ echo "If you intend to use emerge-webrsync then please" >&2
+ echo "adjust sync-type and sync-uri attributes to refer to rsync." >&2
+ echo "emerge-webrsync exiting due to abnormal sync-type setting." >&2
+ exit 1
+ fi
+
+ [[ ${do_debug} -eq 1 ]] && set -x
+
+ if [[ -n ${revert_date} ]] ; then
+ do_snapshot 1 "${revert_date}"
+ else
+ do_latest_snapshot
+ fi
+}
+
+main "$@"
diff --git a/usr/lib/portage/bin/emirrordist b/usr/lib/portage/bin/emirrordist
new file mode 100755
index 0000000..0368eee
--- /dev/null
+++ b/usr/lib/portage/bin/emirrordist
@@ -0,0 +1,13 @@
+#!/usr/bin/python -b
+# Copyright 2013-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+import portage
+portage._internal_caller = True
+portage._disable_legacy_globals()
+from portage._emirrordist.main import emirrordist_main
+
+if __name__ == "__main__":
+ sys.exit(emirrordist_main(sys.argv[1:]))
diff --git a/usr/lib/portage/bin/env-update b/usr/lib/portage/bin/env-update
new file mode 100755
index 0000000..c43459b
--- /dev/null
+++ b/usr/lib/portage/bin/env-update
@@ -0,0 +1,41 @@
+#!/usr/bin/python -bO
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import errno
+import sys
+
+def usage(status):
+ print("Usage: env-update [--no-ldconfig]")
+ print("")
+ print("See the env-update(1) man page for more info")
+ sys.exit(status)
+
+if "-h" in sys.argv or "--help" in sys.argv:
+ usage(0)
+
+makelinks=1
+if "--no-ldconfig" in sys.argv:
+ makelinks=0
+ sys.argv.pop(sys.argv.index("--no-ldconfig"))
+
+if len(sys.argv) > 1:
+ print("!!! Invalid command line options!\n")
+ usage(1)
+
+from os import path as osp
+if osp.isfile(osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")):
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+import portage
+portage._internal_caller = True
+
+try:
+ portage.env_update(makelinks)
+except IOError as e:
+ if e.errno == errno.EACCES:
+ print("env-update: Need superuser access")
+ sys.exit(1)
+ else:
+ raise
diff --git a/usr/lib/portage/bin/etc-update b/usr/lib/portage/bin/etc-update
new file mode 100755
index 0000000..c27379b
--- /dev/null
+++ b/usr/lib/portage/bin/etc-update
@@ -0,0 +1,733 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Author Brandon Low <lostlogic@gentoo.org>
+# Mike Frysinger <vapier@gentoo.org>
+#
+# Previous version (from which I've borrowed a few bits) by:
+# Jochem Kossen <j.kossen@home.nl>
+# Leo Lipelis <aeoo@gentoo.org>
+# Karl Trygve Kalleberg <karltk@gentoo.org>
+
+cd /
+
+type -P gsed >/dev/null && sed() { gsed "$@"; }
+
+get_config() {
+ # the sed here does:
+ # - strip off comments
+ # - match lines that set item in question
+ # - delete the "item =" part
+ # - store the actual value into the hold space
+ # - on the last line, restore the hold space and print it
+ # If there's more than one of the same configuration item, then
+ # the store to the hold space clobbers previous value so the last
+ # setting takes precedence.
+ local match=$1
+ eval $(sed -n -r \
+ -e 's:[[:space:]]*#.*$::' \
+ -e "/^[[:space:]]*${match}[[:space:]]*=/{s:^([^=]*)=[[:space:]]*([\"']{0,1})(.*)\2:\1=\2\3\2:;H}" \
+ -e '${g;p}' \
+ "${PORTAGE_CONFIGROOT}"etc/etc-update.conf)
+}
+
+cmd_var_is_valid() {
+ # return true if the first whitespace-separated token contained
+ # in "${1}" is an executable file, false otherwise
+ [[ -x $(type -P ${1%%[[:space:]]*}) ]]
+}
+
+diff_command() {
+ local cmd=${diff_command//%file1/$1}
+ ${cmd//%file2/$2}
+}
+
+# Usage: do_mv_ln [options] <src> <dst>
+# Files have to be the last two args, and has to be
+# files so we can handle symlinked target sanely.
+do_mv_ln() {
+ local opts=( ${@:1:$(( $# - 2 ))} )
+ local src=${@:$(( $# - 1 )):1}
+ local dst=${@:$(( $# - 0 )):1}
+
+ if [[ -L ${dst} ]] ; then #330221
+ local lfile=$(readlink "${dst}")
+ [[ ${lfile} == /* ]] || lfile="${dst%/*}/${lfile}"
+ echo " Target is a symlink; replacing ${lfile}"
+ dst=${lfile}
+ fi
+
+ mv "${opts[@]}" "${src}" "${dst}"
+}
+
+scan() {
+ ${QUIET} || echo "Scanning Configuration files..."
+ rm -rf "${TMP}"/files > /dev/null 2>&1
+ mkdir "${TMP}"/files || die "Failed mkdir command!"
+ count=0
+ input=0
+ local basename
+ local find_opts
+ local path
+
+ for path in ${SCAN_PATHS} ; do
+ path="${EROOT%/}${path}"
+
+ if [[ ! -d ${path} ]] ; then
+ [[ ! -f ${path} ]] && continue
+ basename="${path##*/}"
+ path="${path%/*}"
+ find_opts=( -maxdepth 1 )
+ else
+ basename=*
+ # Do not traverse hidden directories such as .svn or .git.
+ find_opts=( -name '.*' -type d -prune -o )
+ fi
+ ${case_insensitive} && \
+ find_opts+=( -iname ) || find_opts+=( -name )
+ find_opts+=( "._cfg????_${basename}" )
+ find_opts+=( ! -name '.*~' ! -iname '.*.bak' -print )
+
+ if [ ! -w "${path}" ] ; then
+ [ -e "${path}" ] || continue
+ die "Need write access to ${path}"
+ fi
+
+ local file ofile b=$'\001'
+ for file in $(find "${path}"/ "${find_opts[@]}" |
+ sed \
+ -e 's://*:/:g' \
+ -e "s:\(^.*/\)\(\._cfg[0-9]*_\)\(.*$\):\1\2\3$b\1$b\2$b\3:" |
+ sort -t"$b" -k2,2 -k4,4 -k3,3 |
+ LC_ALL=C cut -f1 -d"$b")
+ do
+ local rpath rfile cfg_file live_file
+ rpath=${file%/*}
+ rfile=${file##*/}
+ cfg_file="${rpath}/${rfile}"
+ live_file="${rpath}/${rfile:10}"
+
+ local mpath
+ for mpath in ${CONFIG_PROTECT_MASK}; do
+ mpath="${EROOT%/}${mpath}"
+ if [[ "${rpath}" == "${mpath}"* ]] ; then
+ ${QUIET} || echo "Updating masked file: ${live_file}"
+ mv "${cfg_file}" "${live_file}"
+ continue 2
+ fi
+ done
+ if [[ ! -f ${file} ]] ; then
+ ${QUIET} || echo "Skipping non-file ${file} ..."
+ continue
+ fi
+
+ if [[ "${ofile:10}" != "${rfile:10}" ]] ||
+ [[ ${opath} != ${rpath} ]]
+ then
+ MATCHES=0
+ if [[ ${eu_automerge} == "yes" ]] ; then
+ if [[ ! -e ${cfg_file} || ! -e ${live_file} ]] ; then
+ MATCHES=0
+ else
+ diff -Bbua "${cfg_file}" "${live_file}" | \
+ sed -n -r \
+ -e '/^[+-]/{/^([+-][\t ]*(#|$)|-{3} |\+{3} )/d;q1}'
+ : $(( MATCHES = ($? == 0) ))
+ fi
+
+ else
+ diff -Nbua "${cfg_file}" "${live_file}" |
+ sed -n \
+ -e '/# .Header:/d' \
+ -e '/^[+-][^+-]/q1'
+ : $(( MATCHES = ($? == 0) ))
+ fi
+
+ if [[ ${MATCHES} == 1 ]] ; then
+ ${QUIET} || echo "Automerging trivial changes in: ${live_file}"
+ do_mv_ln "${cfg_file}" "${live_file}"
+ continue
+ else
+ : $(( ++count ))
+ echo "${live_file}" > "${TMP}"/files/${count}
+ echo "${cfg_file}" >> "${TMP}"/files/${count}
+ ofile="${rfile}"
+ opath="${rpath}"
+ continue
+ fi
+ fi
+
+ if ! diff -Nbua "${cfg_file}" "${rpath}/${ofile}" |
+ sed -n \
+ -e '/# .Header:/d' \
+ -e '/^[+-][^+-]/q1'
+ then
+ echo "${cfg_file}" >> "${TMP}"/files/${count}
+ ofile="${rfile}"
+ opath="${rpath}"
+ else
+ mv "${cfg_file}" "${rpath}/${ofile}"
+ continue
+ fi
+ done
+ done
+}
+
+parse_automode_flag() {
+ case $1 in
+ -9)
+ local reply
+ read -p "Are you sure that you want to delete all updates (type YES): " reply
+ if [[ ${reply} != "YES" ]] ; then
+ echo "Did not get a 'YES', so ignoring request"
+ return 1
+ else
+ parse_automode_flag -7
+ export rm_opts=""
+ fi
+ ;;
+ -7)
+ input=0
+ export DELETE_ALL="yes"
+ ;;
+ -5)
+ parse_automode_flag -3
+ export mv_opts=" ${mv_opts} "
+ mv_opts="${mv_opts// -i / }"
+ NONINTERACTIVE_MV=true
+ ;;
+ -3)
+ input=0
+ export OVERWRITE_ALL="yes"
+ ;;
+ *)
+ return 1
+ ;;
+ esac
+ return 0
+}
+
+sel_file() {
+ local -i isfirst=0
+ until [[ -f ${TMP}/files/${input} ]] || \
+ [[ ${input} == -1 ]] || \
+ [[ ${input} == -3 ]]
+ do
+ local allfiles=( $(cd "${TMP}"/files/ && printf '%s\n' * | sort -n) )
+ local isfirst=${allfiles[0]}
+
+ # Optimize: no point in building the whole file list if
+ # we're not actually going to talk to the user.
+ if [[ ${OVERWRITE_ALL} == "yes" || ${DELETE_ALL} == "yes" ]] ; then
+ input=0
+ else
+ local numfiles=${#allfiles[@]}
+ local numwidth=${#numfiles}
+ local file fullfile line
+ for file in "${allfiles[@]}" ; do
+ fullfile="${TMP}/files/${file}"
+ line=$(head -n1 "${fullfile}")
+ printf '%*i%s %s' ${numwidth} ${file} "${PAR}" "${line}"
+ if [[ ${mode} == 0 ]] ; then
+ local numupdates=$(( $(wc -l <"${fullfile}") - 1 ))
+ echo " (${numupdates})"
+ else
+ echo
+ fi
+ done > "${TMP}"/menuitems
+
+ clear
+
+ if [[ ${mode} == 0 ]] ; then
+ cat <<-EOF
+ The following is the list of files which need updating, each
+ configuration file is followed by a list of possible replacement files.
+ $(<"${TMP}"/menuitems)
+ Please select a file to edit by entering the corresponding number.
+ (don't use -3, -5, -7 or -9 if you're unsure what to do)
+ (-1 to exit) (${_3_HELP_TEXT})
+ (${_5_HELP_TEXT})
+ (${_7_HELP_TEXT})
+ EOF
+ printf " (${_9_HELP_TEXT}): "
+ input=$(read_int)
+ else
+ dialog \
+ --title "${title}" \
+ --menu "Please select a file to update" \
+ 0 0 0 $(<"${TMP}"/menuitems) \
+ 2> "${TMP}"/input \
+ || die "$(<"${TMP}"/input)\n\nUser termination!" 0
+ input=$(<"${TMP}"/input)
+ fi
+ : ${input:=0}
+
+ if [[ ${input} != 0 ]] ; then
+ parse_automode_flag ${input} || continue
+ fi
+ fi # -3 automerge
+ if [[ ${input} == 0 ]] ; then
+ input=${isfirst}
+ fi
+ done
+}
+
+user_special() {
+ local special="${PORTAGE_CONFIGROOT}etc/etc-update.special"
+
+ if [[ -r ${special} ]] ; then
+ if [[ -z $1 ]] ; then
+ error "user_special() called without arguments"
+ return 1
+ fi
+ local pat
+ while read -r pat ; do
+ echo "$1" | grep -q "${pat}" && return 0
+ done < "${special}"
+ fi
+ return 1
+}
+
+read_int() {
+ # Read an integer from stdin. Continously loops until a valid integer is
+ # read. This is a workaround for odd behavior of bash when an attempt is
+ # made to store a value such as "1y" into an integer-only variable.
+ local my_input
+ while : ; do
+ read my_input
+ # failed integer conversions will break a loop unless they're enclosed
+ # in a subshell.
+ echo "${my_input}" | (declare -i x; read x) 2>/dev/null && break
+ printf 'Value "%s" is not valid. Please enter an integer value: ' "${my_input}" >&2
+ done
+ echo ${my_input}
+}
+
+do_file() {
+ interactive_echo() { [ "${OVERWRITE_ALL}" != "yes" ] && [ "${DELETE_ALL}" != "yes" ] && echo; }
+ interactive_echo
+ local -i my_input
+ local -i linecnt
+ local fullfile="${TMP}/files/${input}"
+ local ofile=$(head -n1 "${fullfile}")
+
+ # Walk through all the pending updates for this one file.
+ linecnt=$(wc -l <"${fullfile}")
+ while (( linecnt > 1 )) ; do
+ if (( linecnt == 2 )) ; then
+ # Only one update ... keeps things simple.
+ my_input=1
+ else
+ my_input=0
+ fi
+
+ # Optimize: no point in scanning the file list when we know
+ # we're just going to consume all the ones available.
+ if [[ ${OVERWRITE_ALL} == "yes" || ${DELETE_ALL} == "yes" ]] ; then
+ my_input=1
+ fi
+
+ # Figure out which file they wish to operate on.
+ while (( my_input <= 0 || my_input >= linecnt )) ; do
+ local fcount=0
+ for line in $(<"${fullfile}"); do
+ if (( fcount > 0 )); then
+ printf '%i%s %s\n' ${fcount} "${PAR}" "${line}"
+ fi
+ : $(( ++fcount ))
+ done > "${TMP}"/menuitems
+
+ if [[ ${mode} == 0 ]] ; then
+ echo "Below are the new config files for ${ofile}:"
+ cat "${TMP}"/menuitems
+ echo -n "Please select a file to process (-1 to exit this file): "
+ my_input=$(read_int)
+ else
+ dialog \
+ --title "${title}" \
+ --menu "Please select a file to process for ${ofile}" \
+ 0 0 0 $(<"${TMP}"/menuitems) \
+ 2> "${TMP}"/input \
+ || die "$(<"${TMP}"/input)\n\nUser termination!" 0
+ my_input=$(<"${TMP}"/input)
+ fi
+
+ if [[ ${my_input} == 0 ]] ; then
+ # Auto select the first file.
+ my_input=1
+ elif [[ ${my_input} == -1 ]] ; then
+ input=0
+ return
+ fi
+ done
+
+ # First line is the old file while the rest are the config files.
+ : $(( ++my_input ))
+ local file=$(sed -n -e "${my_input}p" "${fullfile}")
+ do_cfg "${file}" "${ofile}"
+
+ sed -i -e "${my_input}d" "${fullfile}"
+
+ : $(( --linecnt ))
+ done
+
+ interactive_echo
+ rm "${fullfile}"
+ : $(( --count ))
+}
+
+show_diff() {
+ clear
+ local file1=$1 file2=$2
+ if [[ ${using_editor} == 0 ]] ; then
+ (
+ echo "Showing differences between ${file1} and ${file2}"
+ diff_command "${file1}" "${file2}"
+ ) | ${pager}
+ else
+ echo "Beginning of differences between ${file1} and ${file2}"
+ diff_command "${file1}" "${file2}"
+ echo "End of differences between ${file1} and ${file2}"
+ fi
+}
+
+do_cfg() {
+ local file=$1
+ local ofile=$2
+ local -i my_input=0
+
+ until (( my_input == -1 )) || [ ! -f "${file}" ] ; do
+ if [[ "${OVERWRITE_ALL}" == "yes" ]] && ! user_special "${ofile}"; then
+ my_input=1
+ elif [[ "${DELETE_ALL}" == "yes" ]] && ! user_special "${ofile}"; then
+ my_input=2
+ else
+ show_diff "${ofile}" "${file}"
+ if [[ -L ${file} ]] ; then
+ cat <<-EOF
+
+ -------------------------------------------------------------
+ NOTE: File is a symlink to another file. REPLACE recommended.
+ The original file may simply have moved. Please review.
+ -------------------------------------------------------------
+
+ EOF
+ fi
+ cat <<-EOF
+
+ File: ${file}
+ 1) Replace original with update
+ 2) Delete update, keeping original as is
+ 3) Interactively merge original with update
+ 4) Show differences again
+ 5) Save update as example config
+ EOF
+ printf 'Please select from the menu above (-1 to ignore this update): '
+ my_input=$(read_int)
+ fi
+
+ case ${my_input} in
+ 1) echo "Replacing ${ofile} with ${file}"
+ do_mv_ln ${mv_opts} "${file}" "${ofile}"
+ [ -n "${OVERWRITE_ALL}" ] && my_input=-1
+ continue
+ ;;
+ 2) echo "Deleting ${file}"
+ rm ${rm_opts} "${file}"
+ [ -n "${DELETE_ALL}" ] && my_input=-1
+ continue
+ ;;
+ 3) do_merge "${file}" "${ofile}"
+ my_input=${?}
+# [ ${my_input} == 255 ] && my_input=-1
+ continue
+ ;;
+ 4) continue
+ ;;
+ 5) do_distconf "${file}" "${ofile}"
+ ;;
+ *) continue
+ ;;
+ esac
+ done
+}
+
+do_merge() {
+ # make sure we keep the merged file in the secure tempdir
+ # so we dont leak any information contained in said file
+ # (think of case where the file has 0600 perms; during the
+ # merging process, the temp file gets umask perms!)
+
+ local file="${1}"
+ local ofile="${2}"
+ local mfile="${TMP}/${2}.merged"
+ local -i my_input=0
+ echo "${file} ${ofile} ${mfile}"
+
+ if [[ -e ${mfile} ]] ; then
+ echo "A previous version of the merged file exists, cleaning..."
+ rm ${rm_opts} "${mfile}"
+ fi
+
+ # since mfile will be like $TMP/path/to/original-file.merged, we
+ # need to make sure the full /path/to/ exists ahead of time
+ mkdir -p "${mfile%/*}"
+
+ until (( my_input == -1 )); do
+ echo "Merging ${file} and ${ofile}"
+ $(echo "${merge_command}" |
+ sed -e "s:%merged:${mfile}:g" \
+ -e "s:%orig:${ofile}:g" \
+ -e "s:%new:${file}:g")
+ until (( my_input == -1 )); do
+ cat <<-EOF
+ 1) Replace ${ofile} with merged file
+ 2) Show differences between merged file and original
+ 3) Remerge original with update
+ 4) Edit merged file
+ 5) Return to the previous menu
+ EOF
+ printf 'Please select from the menu above (-1 to exit, losing this merge): '
+ my_input=$(read_int)
+ case ${my_input} in
+ 1) echo "Replacing ${ofile} with ${mfile}"
+ if [[ ${USERLAND} == BSD ]] ; then
+ chown "$(stat -f %Su:%Sg "${ofile}")" "${mfile}"
+ chmod $(stat -f %Mp%Lp "${ofile}") "${mfile}"
+ else
+ chown --reference="${ofile}" "${mfile}"
+ chmod --reference="${ofile}" "${mfile}"
+ fi
+ do_mv_ln ${mv_opts} "${mfile}" "${ofile}"
+ rm ${rm_opts} "${file}"
+ return 255
+ ;;
+ 2) show_diff "${ofile}" "${mfile}"
+ continue
+ ;;
+ 3) break
+ ;;
+ 4) ${EDITOR:-nano -w} "${mfile}"
+ continue
+ ;;
+ 5) rm ${rm_opts} "${mfile}"
+ return 0
+ ;;
+ *) continue
+ ;;
+ esac
+ done
+ done
+ rm ${rm_opts} "${mfile}"
+ return 255
+}
+
+do_distconf() {
+ # search for any previously saved distribution config
+ # files and number the current one accordingly
+
+ local file=$1 ofile=$2
+ local -i count
+ local suffix
+ local efile
+
+ for (( count = 0; count <= 9999; ++count )) ; do
+ suffix=$(printf ".dist_%04i" ${count})
+ efile="${ofile}${suffix}"
+ if [[ ! -f ${efile} ]] ; then
+ mv ${mv_opts} "${file}" "${efile}"
+ break
+ elif diff_command "${file}" "${efile}" &> /dev/null; then
+ # replace identical copy
+ mv "${file}" "${efile}"
+ break
+ fi
+ done
+}
+
+error() { echo "etc-update: ERROR: $*" 1>&2 ; return 1 ; }
+die() {
+ trap SIGTERM
+ trap SIGINT
+ local msg=$1 exitcode=${2:-1}
+
+ if [ ${exitcode} -eq 0 ] ; then
+ ${QUIET} || printf 'Exiting: %b\n' "${msg}"
+ scan > /dev/null
+ ! ${QUIET} && [ ${count} -gt 0 ] && echo "NOTE: ${count} updates remaining"
+ else
+ error "${msg}"
+ fi
+
+ rm -rf "${TMP}"
+ exit ${exitcode}
+}
+
+_3_HELP_TEXT="-3 to auto merge all files"
+_5_HELP_TEXT="-5 to auto-merge AND not use 'mv -i'"
+_7_HELP_TEXT="-7 to discard all updates"
+_9_HELP_TEXT="-9 to discard all updates AND not use 'rm -i'"
+usage() {
+ cat <<-EOF
+ etc-update: Handle configuration file updates
+
+ Usage: etc-update [options] [paths to scan]
+
+ If no paths are specified, then \${CONFIG_PROTECT} will be used.
+
+ Options:
+ -d, --debug Enable shell debugging
+ -h, --help Show help and run away
+ -p, --preen Automerge trivial changes only and quit
+ -q, --quiet Show only essential output
+ -v, --verbose Show settings and such along the way
+ -V, --version Show version and trundle away
+
+ --automode <mode>
+ ${_3_HELP_TEXT}
+ ${_5_HELP_TEXT}
+ ${_7_HELP_TEXT}
+ ${_9_HELP_TEXT}
+ EOF
+
+ [[ $# -gt 1 ]] && printf "\nError: %s\n" "${*:2}" 1>&2
+
+ exit ${1:-0}
+}
+
+#
+# Run the script
+#
+
+declare -i count=0
+declare input=0
+declare title="Gentoo's etc-update tool!"
+
+PREEN=false
+SET_X=false
+QUIET=false
+VERBOSE=false
+NONINTERACTIVE_MV=false
+while [[ -n $1 ]] ; do
+ case $1 in
+ -d|--debug) SET_X=true;;
+ -h|--help) usage;;
+ -p|--preen) PREEN=true;;
+ -q|--quiet) QUIET=true;;
+ -v|--verbose) VERBOSE=true;;
+ -V|--version) emerge --version; exit 0;;
+ --automode) parse_automode_flag $2 && shift || usage 1 "Invalid mode '$2'";;
+ -*) usage 1 "Invalid option '$1'";;
+ *) break;;
+ esac
+ shift
+done
+${SET_X} && set -x
+
+type -P portageq >/dev/null || die "missing portageq"
+portage_vars=(
+ CONFIG_PROTECT{,_MASK}
+ FEATURES
+ PORTAGE_CONFIGROOT
+ PORTAGE_INST_{G,U}ID
+ PORTAGE_TMPDIR
+ EROOT
+ USERLAND
+ NOCOLOR
+)
+eval $(${PORTAGE_PYTHON:+"${PORTAGE_PYTHON}"} "$(type -P portageq)" envvar -v ${portage_vars[@]})
+export PORTAGE_TMPDIR
+SCAN_PATHS=${*:-${CONFIG_PROTECT}}
+[[ " ${FEATURES} " == *" case-insensitive-fs "* ]] && \
+ case_insensitive=true || case_insensitive=false
+
+TMP="${PORTAGE_TMPDIR}/etc-update-$$"
+trap "die terminated" SIGTERM
+trap "die interrupted" SIGINT
+
+rm -rf "${TMP}" 2>/dev/null
+mkdir "${TMP}" || die "failed to create temp dir"
+# make sure we have a secure directory to work in
+chmod 0700 "${TMP}" || die "failed to set perms on temp dir"
+chown ${PORTAGE_INST_UID:-0}:${PORTAGE_INST_GID:-0} "${TMP}" || \
+ die "failed to set ownership on temp dir"
+
+# Get all the user settings from etc-update.conf
+cfg_vars=(
+ clear_term
+ eu_automerge
+ rm_opts
+ mv_opts
+ pager
+ diff_command
+ using_editor
+ merge_command
+ mode
+)
+# default them all to ""
+eval ${cfg_vars[@]/%/=}
+# then extract them all from the conf in one shot
+# (ugly var at end is due to printf appending a '|' to last item)
+get_config "($(printf '%s|' "${cfg_vars[@]}")NOVARFOROLDMEN)"
+
+# finally setup any specific defaults
+: ${mode:="0"}
+if ! cmd_var_is_valid "${pager}" ; then
+ pager=${PAGER}
+ cmd_var_is_valid "${pager}" || pager=cat
+fi
+
+[[ ${clear_term} == "yes" ]] || clear() { :; }
+
+if [[ ${using_editor} == "0" ]] ; then
+ # Sanity check to make sure diff exists and works
+ echo > "${TMP}"/.diff-test-1
+ echo > "${TMP}"/.diff-test-2
+
+ if ! diff_command "${TMP}"/.diff-test-1 "${TMP}"/.diff-test-2 ; then
+ die "'${diff_command}' does not seem to work, aborting"
+ fi
+else
+ # NOTE: cmd_var_is_valid doesn't work with diff_command="eval emacs..."
+ # because it uses type -P.
+ if ! type ${diff_command%%[[:space:]]*} >/dev/null; then
+ die "'${diff_command}' does not seem to work, aborting"
+ fi
+fi
+
+if [[ ${mode} == "0" ]] ; then
+ PAR=")"
+else
+ PAR=""
+ if ! type dialog >/dev/null || ! dialog --help >/dev/null ; then
+ die "mode=1 and 'dialog' not found or not executable, aborting"
+ fi
+fi
+
+if ${NONINTERACTIVE_MV} ; then
+ export mv_opts=" ${mv_opts} "
+ mv_opts="${mv_opts// -i / }"
+fi
+
+if ${VERBOSE} ; then
+ for v in ${portage_vars[@]} ${cfg_vars[@]} TMP SCAN_PATHS ; do
+ echo "${v}=${!v}"
+ done
+fi
+
+scan
+
+${PREEN} && exit 0
+
+until (( input == -1 )); do
+ if (( count == 0 )); then
+ die "Nothing left to do; exiting. :)" 0
+ fi
+ sel_file
+ if (( input != -1 )); then
+ do_file
+ fi
+done
+
+die "User termination!" 0
diff --git a/usr/lib/portage/bin/filter-bash-environment.py b/usr/lib/portage/bin/filter-bash-environment.py
new file mode 100755
index 0000000..a4cdc54
--- /dev/null
+++ b/usr/lib/portage/bin/filter-bash-environment.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python -b
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import codecs
+import io
+import os
+import re
+import sys
+
+here_doc_re = re.compile(r'.*\s<<[-]?(\w+)$')
+func_start_re = re.compile(r'^[-\w]+\s*\(\)\s*$')
+func_end_re = re.compile(r'^\}$')
+
+var_assign_re = re.compile(r'(^|^declare\s+-\S+\s+|^declare\s+|^export\s+)([^=\s]+)=("|\')?.*$')
+close_quote_re = re.compile(r'(\\"|"|\')\s*$')
+readonly_re = re.compile(r'^declare\s+-(\S*)r(\S*)\s+')
+# declare without assignment
+var_declare_re = re.compile(r'^declare(\s+-\S+)?\s+([^=\s]+)\s*$')
+
+def have_end_quote(quote, line):
+ """
+ Check if the line has an end quote (useful for handling multi-line
+ quotes). This handles escaped double quotes that may occur at the
+ end of a line. The posix spec does not allow escaping of single
+ quotes inside of single quotes, so that case is not handled.
+ """
+ close_quote_match = close_quote_re.search(line)
+ return close_quote_match is not None and \
+ close_quote_match.group(1) == quote
+
+def filter_declare_readonly_opt(line):
+ readonly_match = readonly_re.match(line)
+ if readonly_match is not None:
+ declare_opts = ''
+ for i in (1, 2):
+ group = readonly_match.group(i)
+ if group is not None:
+ declare_opts += group
+ if declare_opts:
+ line = 'declare -%s %s' % \
+ (declare_opts, line[readonly_match.end():])
+ else:
+ line = 'declare ' + line[readonly_match.end():]
+ return line
+
+def filter_bash_environment(pattern, file_in, file_out):
+ # Filter out any instances of the \1 character from variable values
+ # since this character multiplies each time that the environment
+ # is saved (strange bash behavior). This can eventually result in
+ # mysterious 'Argument list too long' errors from programs that have
+ # huge strings of \1 characters in their environment. See bug #222091.
+ here_doc_delim = None
+ in_func = None
+ multi_line_quote = None
+ multi_line_quote_filter = None
+ for line in file_in:
+ if multi_line_quote is not None:
+ if not multi_line_quote_filter:
+ file_out.write(line.replace("\1", ""))
+ if have_end_quote(multi_line_quote, line):
+ multi_line_quote = None
+ multi_line_quote_filter = None
+ continue
+ if here_doc_delim is None and in_func is None:
+ var_assign_match = var_assign_re.match(line)
+ if var_assign_match is not None:
+ quote = var_assign_match.group(3)
+ filter_this = pattern.match(var_assign_match.group(2)) \
+ is not None
+ # Exclude the start quote when searching for the end quote,
+ # to ensure that the start quote is not misidentified as the
+ # end quote (happens if there is a newline immediately after
+ # the start quote).
+ if quote is not None and not \
+ have_end_quote(quote, line[var_assign_match.end(2)+2:]):
+ multi_line_quote = quote
+ multi_line_quote_filter = filter_this
+ if not filter_this:
+ line = filter_declare_readonly_opt(line)
+ file_out.write(line.replace("\1", ""))
+ continue
+ else:
+ declare_match = var_declare_re.match(line)
+ if declare_match is not None:
+ # declare without assignment
+ filter_this = pattern.match(declare_match.group(2)) \
+ is not None
+ if not filter_this:
+ line = filter_declare_readonly_opt(line)
+ file_out.write(line)
+ continue
+
+ if here_doc_delim is not None:
+ if here_doc_delim.match(line):
+ here_doc_delim = None
+ file_out.write(line)
+ continue
+ here_doc = here_doc_re.match(line)
+ if here_doc is not None:
+ here_doc_delim = re.compile("^%s$" % here_doc.group(1))
+ file_out.write(line)
+ continue
+ # Note: here-documents are handled before functions since otherwise
+ # it would be possible for the content of a here-document to be
+ # mistaken as the end of a function.
+ if in_func:
+ if func_end_re.match(line) is not None:
+ in_func = None
+ file_out.write(line)
+ continue
+ in_func = func_start_re.match(line)
+ if in_func is not None:
+ file_out.write(line)
+ continue
+ # This line is not recognized as part of a variable assignment,
+ # function definition, or here document, so just allow it to
+ # pass through.
+ file_out.write(line)
+
+if __name__ == "__main__":
+ description = "Filter out variable assignments for variable " + \
+ "names matching a given PATTERN " + \
+ "while leaving bash function definitions and here-documents " + \
+ "intact. The PATTERN is a space separated list of variable names" + \
+ " and it supports python regular expression syntax."
+ usage = "usage: %s PATTERN" % os.path.basename(sys.argv[0])
+ args = sys.argv[1:]
+
+ if '-h' in args or '--help' in args:
+ sys.stdout.write(usage + "\n")
+ sys.stdout.flush()
+ sys.exit(os.EX_OK)
+
+ if len(args) != 1:
+ sys.stderr.write(usage + "\n")
+ sys.stderr.write("Exactly one PATTERN argument required.\n")
+ sys.stderr.flush()
+ sys.exit(2)
+
+ file_in = sys.stdin
+ file_out = sys.stdout
+ if sys.hexversion >= 0x3000000:
+ file_in = codecs.iterdecode(sys.stdin.buffer.raw,
+ 'utf_8', errors='replace')
+ file_out = io.TextIOWrapper(sys.stdout.buffer,
+ 'utf_8', errors='backslashreplace')
+
+ var_pattern = args[0].split()
+
+ # Filter invalid variable names that are not supported by bash.
+ var_pattern.append(r'\d.*')
+ var_pattern.append(r'.*\W.*')
+
+ var_pattern = "^(%s)$" % "|".join(var_pattern)
+ filter_bash_environment(
+ re.compile(var_pattern), file_in, file_out)
+ file_out.flush()
diff --git a/usr/lib/portage/bin/fixpackages b/usr/lib/portage/bin/fixpackages
new file mode 100755
index 0000000..8a0c444
--- /dev/null
+++ b/usr/lib/portage/bin/fixpackages
@@ -0,0 +1,52 @@
+#!/usr/bin/python -b
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import os
+import sys
+
+from os import path as osp
+if osp.isfile(osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")):
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+import portage
+portage._internal_caller = True
+from portage import os
+from portage.output import EOutput
+from portage.util._argparse import ArgumentParser
+from textwrap import wrap
+from portage._global_updates import _global_updates
+mysettings = portage.settings
+mytrees = portage.db
+mtimedb = portage.mtimedb
+
+description = """The fixpackages program performs package move updates on
+ configuration files, installed packages, and binary packages."""
+description = " ".join(description.split())
+
+parser = ArgumentParser(description=description)
+parser.parse_args()
+
+if mysettings['ROOT'] != "/":
+ out = EOutput()
+ msg = "The fixpackages program is not intended for use with " + \
+ "ROOT != \"/\". Instead use `emaint --fix movebin` and/or " + \
+ "`emaint --fix moveinst."
+ for line in wrap(msg, 72):
+ out.eerror(line)
+ sys.exit(1)
+
+try:
+ os.nice(int(mysettings.get("PORTAGE_NICENESS", "0")))
+except (OSError, ValueError) as e:
+ portage.writemsg("!!! Failed to change nice value to '%s'\n" % \
+ mysettings["PORTAGE_NICENESS"])
+ portage.writemsg("!!! %s\n" % str(e))
+ del e
+
+_global_updates(mytrees, mtimedb["updates"], if_mtime_changed=False)
+
+print()
+print("Done.")
+print()
diff --git a/usr/lib/portage/bin/glsa-check b/usr/lib/portage/bin/glsa-check
new file mode 100755
index 0000000..94dea73
--- /dev/null
+++ b/usr/lib/portage/bin/glsa-check
@@ -0,0 +1,335 @@
+#!/usr/bin/python -b
+# Copyright 2008-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+import codecs
+
+from os import path as osp
+if osp.isfile(osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")):
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+import portage
+portage._internal_caller = True
+from portage import os
+from portage.output import green, red, nocolor, white
+from portage.util._argparse import ArgumentParser
+
+__program__ = "glsa-check"
+__author__ = "Marius Mauch <genone@gentoo.org>"
+__version__ = "1.0"
+
+# option parsing
+epilog = "glsa-list can contain an arbitrary number of GLSA ids," \
+ " filenames containing GLSAs or the special identifiers" \
+ " 'all', 'new' and 'affected'"
+parser = ArgumentParser(usage=__program__ + " <option> [glsa-list]",
+ epilog=epilog)
+
+modes = parser.add_argument_group("Modes")
+modes.add_argument("-l", "--list", action="store_const",
+ const="list", dest="mode",
+ help="List all unapplied GLSA")
+modes.add_argument("-d", "--dump", action="store_const",
+ const="dump", dest="mode",
+ help="Show all information about the given GLSA")
+modes.add_argument("--print", action="store_const",
+ const="dump", dest="mode",
+ help="Alias for --dump")
+modes.add_argument("-t", "--test", action="store_const",
+ const="test", dest="mode",
+ help="Test if this system is affected by the given GLSA")
+modes.add_argument("-p", "--pretend", action="store_const",
+ const="pretend", dest="mode",
+ help="Show the necessary commands to apply this GLSA")
+modes.add_argument("-f", "--fix", action="store_const",
+ const="fix", dest="mode",
+ help="Try to auto-apply this GLSA (experimental)")
+modes.add_argument("-i", "--inject", action="store_const",
+ const="inject", dest="mode",
+ help="inject the given GLSA into the glsa_injected file")
+modes.add_argument("-m", "--mail", action="store_const",
+ const="mail", dest="mode",
+ help="Send a mail with the given GLSAs to the administrator")
+
+parser.add_argument("-V", "--version", action="store_true",
+ help="Some information about this tool")
+parser.add_argument("-v", "--verbose", action="store_true", dest="verbose",
+ help="Print more information")
+parser.add_argument("-n", "--nocolor", action="store_true",
+ help="Disable colors")
+parser.add_argument("-e", "--emergelike", action="store_false", dest="least_change",
+ help="Do not use a least-change algorithm")
+parser.add_argument("-c", "--cve", action="store_true", dest="list_cve",
+ help="Show CAN ids in listing mode")
+
+options, params = parser.parse_known_args()
+
+if options.nocolor:
+ nocolor()
+
+if options.version:
+ sys.stderr.write("\n"+ __program__ + ", version " + __version__ + "\n")
+ sys.stderr.write("Author: " + __author__ + "\n")
+ sys.stderr.write("This program is licensed under the GPL, version 2\n\n")
+ sys.exit(0)
+
+mode = options.mode
+least_change = options.least_change
+list_cve = options.list_cve
+verbose = options.verbose
+
+# Sanity checking
+if mode is None:
+ sys.stderr.write("No mode given: what should I do?\n")
+ parser.print_help()
+ sys.exit(1)
+elif mode != "list" and not params:
+ sys.stderr.write("\nno GLSA given, so we'll do nothing for now. \n")
+ sys.stderr.write("If you want to run on all GLSA please tell me so \n")
+ sys.stderr.write("(specify \"all\" as parameter)\n\n")
+ parser.print_help()
+ sys.exit(1)
+elif mode in ["fix", "inject"] and os.geteuid() != 0:
+ # we need root privileges for write access
+ sys.stderr.write("\nThis tool needs root access to "+options.mode+" this GLSA\n\n")
+ sys.exit(2)
+elif mode == "list" and not params:
+ params.append("new")
+
+# delay this for speed increase
+from portage.glsa import (Glsa, GlsaTypeException, GlsaFormatException,
+ get_applied_glsas, get_glsa_list)
+
+eroot = portage.settings['EROOT']
+vardb = portage.db[eroot]["vartree"].dbapi
+portdb = portage.db[eroot]["porttree"].dbapi
+
+# build glsa lists
+completelist = get_glsa_list(portage.settings)
+
+checklist = get_applied_glsas(portage.settings)
+todolist = [e for e in completelist if e not in checklist]
+
+glsalist = []
+if "new" in params:
+ glsalist = todolist
+ params.remove("new")
+
+if "all" in params:
+ glsalist = completelist
+ params.remove("all")
+if "affected" in params:
+ # replaced completelist with todolist on request of wschlich
+ for x in todolist:
+ try:
+ myglsa = Glsa(x, portage.settings, vardb, portdb)
+ except (GlsaTypeException, GlsaFormatException) as e:
+ if verbose:
+ sys.stderr.write(("invalid GLSA: %s (error message was: %s)\n" % (x, e)))
+ continue
+ if myglsa.isVulnerable():
+ glsalist.append(x)
+ params.remove("affected")
+
+# remove invalid parameters
+for p in params[:]:
+ if not (p in completelist or os.path.exists(p)):
+ sys.stderr.write(("(removing %s from parameter list as it isn't a valid GLSA specification)\n" % p))
+ params.remove(p)
+
+glsalist.extend([g for g in params if g not in glsalist])
+
+def summarylist(myglsalist, fd1=sys.stdout, fd2=sys.stderr, encoding="utf-8"):
+ # Get to the raw streams in py3k before wrapping them with an encoded writer
+ # to avoid writing bytes to a text stream (stdout/stderr are text streams
+ # by default in py3k)
+ if hasattr(fd1, "buffer"):
+ fd1 = fd1.buffer
+ if hasattr(fd2, "buffer"):
+ fd2 = fd2.buffer
+ fd1 = codecs.getwriter(encoding)(fd1)
+ fd2 = codecs.getwriter(encoding)(fd2)
+ fd2.write(white("[A]")+" means this GLSA was marked as applied (injected),\n")
+ fd2.write(green("[U]")+" means the system is not affected and\n")
+ fd2.write(red("[N]")+" indicates that the system might be affected.\n\n")
+
+ myglsalist.sort()
+ for myid in myglsalist:
+ try:
+ myglsa = Glsa(myid, portage.settings, vardb, portdb)
+ except (GlsaTypeException, GlsaFormatException) as e:
+ if verbose:
+ fd2.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
+ continue
+ if myglsa.isInjected():
+ status = "[A]"
+ color = white
+ elif myglsa.isVulnerable():
+ status = "[N]"
+ color = red
+ else:
+ status = "[U]"
+ color = green
+
+ if verbose:
+ access = ("[%-8s] " % myglsa.access)
+ else:
+ access=""
+
+ fd1.write(color(myglsa.nr) + " " + color(status) + " " + color(access) + myglsa.title + " (")
+ if not verbose:
+ for pkg in list(myglsa.packages)[:3]:
+ fd1.write(" " + pkg + " ")
+ if len(myglsa.packages) > 3:
+ fd1.write("... ")
+ else:
+ for pkg in myglsa.packages:
+ mylist = vardb.match(pkg)
+ if len(mylist) > 0:
+ pkg = color(" ".join(mylist))
+ fd1.write(" " + pkg + " ")
+
+ fd1.write(")")
+ if list_cve:
+ fd1.write(" "+(",".join([r[:13] for r in myglsa.references if r[:4] in ["CAN-", "CVE-"]])))
+ fd1.write("\n")
+ return 0
+
+if mode == "list":
+ sys.exit(summarylist(glsalist))
+
+# dump, fix, inject and fix are nearly the same code, only the glsa method call differs
+if mode in ["dump", "fix", "inject", "pretend"]:
+ for myid in glsalist:
+ try:
+ myglsa = Glsa(myid, portage.settings, vardb, portdb)
+ except (GlsaTypeException, GlsaFormatException) as e:
+ if verbose:
+ sys.stderr.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
+ continue
+ if mode == "dump":
+ myglsa.dump()
+ elif mode == "fix":
+ sys.stdout.write("Fixing GLSA "+myid+"\n")
+ if not myglsa.isVulnerable():
+ sys.stdout.write(">>> no vulnerable packages installed\n")
+ else:
+ mergelist = myglsa.getMergeList(least_change=least_change)
+ if mergelist == []:
+ sys.stdout.write(">>> cannot fix GLSA, no unaffected packages available\n")
+ sys.exit(2)
+ for pkg in mergelist:
+ sys.stdout.write(">>> merging "+pkg+"\n")
+ # using emerge for the actual merging as it contains the dependency
+ # code and we want to be consistent in behaviour. Also this functionality
+ # will be integrated in emerge later, so it shouldn't hurt much.
+ emergecmd = "emerge --oneshot " + " =" + pkg
+ if verbose:
+ sys.stderr.write(emergecmd+"\n")
+ exitcode = os.system(emergecmd)
+ # system() returns the exitcode in the high byte of a 16bit integer
+ if exitcode >= 1<<8:
+ exitcode >>= 8
+ if exitcode:
+ sys.exit(exitcode)
+ if len(mergelist):
+ sys.stdout.write("\n")
+ elif mode == "pretend":
+ sys.stdout.write("Checking GLSA "+myid+"\n")
+ if not myglsa.isVulnerable():
+ sys.stdout.write(">>> no vulnerable packages installed\n")
+ else:
+ mergedict = {}
+ for (vuln, update) in myglsa.getAffectionTable(least_change=least_change):
+ mergedict.setdefault(update, []).append(vuln)
+
+ sys.stdout.write(">>> The following updates will be performed for this GLSA:\n")
+ for pkg in mergedict:
+ if pkg != "":
+ sys.stdout.write(" " + pkg + " (vulnerable: " + ", ".join(mergedict[pkg]) + ")\n")
+ if "" in mergedict:
+ sys.stdout.write("\n>>> For the following packages, no upgrade path exists:\n")
+ sys.stdout.write(" " + ", ".join(mergedict[""]))
+ elif mode == "inject":
+ sys.stdout.write("injecting " + myid + "\n")
+ myglsa.inject()
+ sys.stdout.write("\n")
+ sys.exit(0)
+
+# test is a bit different as Glsa.test() produces no output
+if mode == "test":
+ outputlist = []
+ for myid in glsalist:
+ try:
+ myglsa = Glsa(myid, portage.settings, vardb, portdb)
+ except (GlsaTypeException, GlsaFormatException) as e:
+ if verbose:
+ sys.stderr.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
+ continue
+ if myglsa.isVulnerable():
+ outputlist.append(str(myglsa.nr))
+ if len(outputlist) > 0:
+ sys.stderr.write("This system is affected by the following GLSAs:\n")
+ if verbose:
+ summarylist(outputlist)
+ else:
+ sys.stdout.write("\n".join(outputlist)+"\n")
+ else:
+ sys.stderr.write("This system is not affected by any of the listed GLSAs\n")
+ sys.exit(0)
+
+# mail mode as requested by solar
+if mode == "mail":
+ import portage.mail, socket
+ from io import BytesIO
+ from email.mime.text import MIMEText
+
+ # color doesn't make any sense for mail
+ nocolor()
+
+ if "PORTAGE_ELOG_MAILURI" in portage.settings:
+ myrecipient = portage.settings["PORTAGE_ELOG_MAILURI"].split()[0]
+ else:
+ myrecipient = "root@localhost"
+
+ if "PORTAGE_ELOG_MAILFROM" in portage.settings:
+ myfrom = portage.settings["PORTAGE_ELOG_MAILFROM"]
+ else:
+ myfrom = "glsa-check"
+
+ mysubject = "[glsa-check] Summary for %s" % socket.getfqdn()
+
+ # need a file object for summarylist()
+ myfd = BytesIO()
+ line = "GLSA Summary report for host %s\n" % socket.getfqdn()
+ myfd.write(line.encode("utf-8"))
+ line = "(Command was: %s)\n\n" % " ".join(sys.argv)
+ myfd.write(line.encode("utf-8"))
+ summarylist(glsalist, fd1=myfd, fd2=myfd)
+ summary = myfd.getvalue().decode("utf-8")
+ myfd.close()
+
+ myattachments = []
+ for myid in glsalist:
+ try:
+ myglsa = Glsa(myid, portage.settings, vardb, portdb)
+ except (GlsaTypeException, GlsaFormatException) as e:
+ if verbose:
+ sys.stderr.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
+ continue
+ myfd = BytesIO()
+ myglsa.dump(outstream=myfd)
+ attachment = myfd.getvalue().decode("utf-8")
+ myattachments.append(MIMEText(attachment, _charset="utf8"))
+ myfd.close()
+
+ mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject, summary, myattachments)
+ portage.mail.send_mail(portage.settings, mymessage)
+
+ sys.exit(0)
+
+# something wrong here, all valid paths are covered with sys.exit()
+sys.stderr.write("nothing more to do\n")
+sys.exit(2)
diff --git a/usr/lib/portage/bin/helper-functions.sh b/usr/lib/portage/bin/helper-functions.sh
new file mode 100755
index 0000000..b9bc74a
--- /dev/null
+++ b/usr/lib/portage/bin/helper-functions.sh
@@ -0,0 +1,97 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# For routines we want to use in ebuild-helpers/ but don't want to
+# expose to the general ebuild environment.
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+#
+# API functions for doing parallel processing
+#
+makeopts_jobs() {
+ # Copied from eutils.eclass:makeopts_jobs()
+ local jobs=$(echo " ${MAKEOPTS} " | \
+ sed -r -n 's:.*[[:space:]](-j|--jobs[=[:space:]])[[:space:]]*([0-9]+).*:\2:p')
+ echo ${jobs:-1}
+}
+
+__multijob_init() {
+ # Setup a pipe for children to write their pids to when they finish.
+ # We have to allocate two fd's because POSIX has undefined behavior
+ # when you open a FIFO for simultaneous read/write. #487056
+ local pipe=$(mktemp -t multijob.XXXXXX)
+ rm -f "${pipe}"
+ mkfifo -m 600 "${pipe}"
+ __redirect_alloc_fd mj_write_fd "${pipe}"
+ __redirect_alloc_fd mj_read_fd "${pipe}"
+ rm -f "${pipe}"
+
+ # See how many children we can fork based on the user's settings.
+ mj_max_jobs=$(makeopts_jobs "$@")
+ mj_num_jobs=0
+}
+
+__multijob_child_init() {
+ trap 'echo ${BASHPID:-$(__bashpid)} $? >&'${mj_write_fd} EXIT
+ trap 'exit 1' INT TERM
+}
+
+__multijob_finish_one() {
+ local pid ret
+ read -r -u ${mj_read_fd} pid ret
+ : $(( --mj_num_jobs ))
+ return ${ret}
+}
+
+__multijob_finish() {
+ local ret=0
+ while [[ ${mj_num_jobs} -gt 0 ]] ; do
+ __multijob_finish_one
+ : $(( ret |= $? ))
+ done
+ # Let bash clean up its internal child tracking state.
+ wait
+ return ${ret}
+}
+
+__multijob_post_fork() {
+ : $(( ++mj_num_jobs ))
+ if [[ ${mj_num_jobs} -ge ${mj_max_jobs} ]] ; then
+ __multijob_finish_one
+ fi
+ return $?
+}
+
+# @FUNCTION: __redirect_alloc_fd
+# @USAGE: <var> <file> [redirection]
+# @DESCRIPTION:
+# Find a free fd and redirect the specified file via it. Store the new
+# fd in the specified variable. Useful for the cases where we don't care
+# about the exact fd #.
+__redirect_alloc_fd() {
+ local var=$1 file=$2 redir=${3:-"<>"}
+
+ if [[ $(( (BASH_VERSINFO[0] << 8) + BASH_VERSINFO[1] )) -ge $(( (4 << 8) + 1 )) ]] ; then
+ # Newer bash provides this functionality.
+ eval "exec {${var}}${redir}'${file}'"
+ else
+ # Need to provide the functionality ourselves.
+ local fd=10
+ local fddir=/dev/fd
+ # Prefer /proc/self/fd if available (/dev/fd
+ # doesn't work on solaris, see bug #474536).
+ [[ -d /proc/self/fd ]] && fddir=/proc/self/fd
+ while :; do
+ # Make sure the fd isn't open. It could be a char device,
+ # or a symlink (possibly broken) to something else.
+ if [[ ! -e ${fddir}/${fd} ]] && [[ ! -L ${fddir}/${fd} ]] ; then
+ eval "exec ${fd}${redir}'${file}'" && break
+ fi
+ [[ ${fd} -gt 1024 ]] && die 'could not locate a free temp fd !?'
+ : $(( ++fd ))
+ done
+ : $(( ${var} = fd ))
+ fi
+}
diff --git a/usr/lib/portage/bin/install-qa-check.d/05double-D b/usr/lib/portage/bin/install-qa-check.d/05double-D
new file mode 100755
index 0000000..1634afd
--- /dev/null
+++ b/usr/lib/portage/bin/install-qa-check.d/05double-D
@@ -0,0 +1,17 @@
+# Check for accidential install into ${D}/${D}
+
+DD_check() {
+ if [[ -d ${D%/}${D} ]] ; then
+ local -i INSTALLTOD=0
+ while read -r -d $'\0' i ; do
+ eqawarn "QA Notice: /${i##${D%/}${D}} installed in \${D}/\${D}"
+ ((INSTALLTOD++))
+ done < <(find "${D%/}${D}" -print0)
+ die "Aborting due to QA concerns: ${INSTALLTOD} files installed in ${D%/}${D}"
+ fi
+}
+
+DD_check
+: # guarantee successful exit
+
+# vim:ft=sh
diff --git a/usr/lib/portage/bin/install-qa-check.d/05prefix b/usr/lib/portage/bin/install-qa-check.d/05prefix
new file mode 100755
index 0000000..32561e2
--- /dev/null
+++ b/usr/lib/portage/bin/install-qa-check.d/05prefix
@@ -0,0 +1,118 @@
+# Prefix specific QA checks
+
+install_qa_check_prefix() {
+ [[ ${ED} == ${D} ]] && return
+
+ if [[ -d ${ED}/${D} ]] ; then
+ find "${ED}/${D}" | \
+ while read i ; do
+ eqawarn "QA Notice: /${i##${ED}/${D}} installed in \${ED}/\${D}"
+ done
+ die "Aborting due to QA concerns: files installed in ${ED}/${D}"
+ fi
+
+ if [[ -d ${ED}/${EPREFIX} ]] ; then
+ find "${ED}/${EPREFIX}/" | \
+ while read i ; do
+ eqawarn "QA Notice: ${i#${D}} double prefix"
+ done
+ die "Aborting due to QA concerns: double prefix files installed"
+ fi
+
+ if [[ -d ${D} ]] ; then
+ INSTALLTOD=$(find ${D%/} | egrep -v "^${ED}" | sed -e "s|^${D%/}||" | awk '{if (length($0) <= length("'"${EPREFIX}"'")) { if (substr("'"${EPREFIX}"'", 1, length($0)) != $0) {print $0;} } else if (substr($0, 1, length("'"${EPREFIX}"'")) != "'"${EPREFIX}"'") {print $0;} }')
+ if [[ -n ${INSTALLTOD} ]] ; then
+ eqawarn "QA Notice: the following files are outside of the prefix:"
+ eqawarn "${INSTALLTOD}"
+ die "Aborting due to QA concerns: there are files installed outside the prefix"
+ fi
+ fi
+
+ # all further checks rely on ${ED} existing
+ [[ -d ${ED} ]] || return
+
+ # check shebangs, bug #282539
+ rm -f "${T}"/non-prefix-shebangs-errs
+ local WHITELIST=" /usr/bin/env "
+ # this is hell expensive, but how else?
+ find "${ED}" -executable \! -type d -print0 \
+ | xargs -0 grep -H -n -m1 "^#!" \
+ | while read f ;
+ do
+ local fn=${f%%:*}
+ local pos=${f#*:} ; pos=${pos%:*}
+ local line=${f##*:}
+ # shebang always appears on the first line ;)
+ [[ ${pos} != 1 ]] && continue
+ local oldIFS=${IFS}
+ IFS=$'\r'$'\n'$'\t'" "
+ line=( ${line#"#!"} )
+ IFS=${oldIFS}
+ [[ ${WHITELIST} == *" ${line[0]} "* ]] && continue
+ local fp=${fn#${D}} ; fp=/${fp%/*}
+ # line[0] can be an absolutised path, bug #342929
+ local eprefix=$(canonicalize ${EPREFIX})
+ local rf=${fn}
+ # in case we deal with a symlink, make sure we don't replace it
+ # with a real file (sed -i does that)
+ if [[ -L ${fn} ]] ; then
+ rf=$(readlink ${fn})
+ [[ ${rf} != /* ]] && rf=${fn%/*}/${rf}
+ # ignore symlinks pointing to outside prefix
+ # as seen in sys-devel/native-cctools
+ [[ $(canonicalize "/${rf#${D}}") != ${eprefix}/* ]] && continue
+ fi
+ # does the shebang start with ${EPREFIX}, and does it exist?
+ if [[ ${line[0]} == ${EPREFIX}/* || ${line[0]} == ${eprefix}/* ]] ; then
+ if [[ ! -e ${ROOT%/}${line[0]} && ! -e ${D%/}${line[0]} ]] ; then
+ # hmm, refers explicitly to $EPREFIX, but doesn't exist,
+ # if it's in PATH that's wrong in any case
+ if [[ ":${PATH}:" == *":${fp}:"* ]] ; then
+ echo "${fn#${D}}:${line[0]} (explicit EPREFIX but target not found)" \
+ >> "${T}"/non-prefix-shebangs-errs
+ else
+ eqawarn "${fn#${D}} has explicit EPREFIX in shebang but target not found (${line[0]})"
+ fi
+ fi
+ continue
+ fi
+ # unprefixed shebang, is the script directly in $PATH or an init
+ # script?
+ if [[ ":${PATH}:${EPREFIX}/etc/init.d:" == *":${fp}:"* ]] ; then
+ if [[ -e ${EROOT}${line[0]} || -e ${ED}${line[0]} ]] ; then
+ # is it unprefixed, but we can just fix it because a
+ # prefixed variant exists
+ eqawarn "prefixing shebang of ${fn#${D}}"
+ # statement is made idempotent on purpose, because
+ # symlinks may point to the same target, and hence the
+ # same real file may be sedded multiple times since we
+ # read the shebangs in one go upfront for performance
+ # reasons
+ sed -i -e '1s:^#! \?'"${line[0]}"':#!'"${EPREFIX}"${line[0]}':' "${rf}"
+ continue
+ else
+ # this is definitely wrong: script in $PATH and invalid shebang
+ echo "${fn#${D}}:${line[0]} (script ${fn##*/} installed in PATH but interpreter ${line[0]} not found)" \
+ >> "${T}"/non-prefix-shebangs-errs
+ fi
+ else
+ # unprefixed/invalid shebang, but outside $PATH, this may be
+ # intended (e.g. config.guess) so remain silent by default
+ has stricter ${FEATURES} && \
+ eqawarn "invalid shebang in ${fn#${D}}: ${line[0]}"
+ fi
+ done
+ if [[ -e "${T}"/non-prefix-shebangs-errs ]] ; then
+ eqawarn "QA Notice: the following files use invalid (possible non-prefixed) shebangs:"
+ while read line ; do
+ eqawarn " ${line}"
+ done < "${T}"/non-prefix-shebangs-errs
+ rm -f "${T}"/non-prefix-shebangs-errs
+ die "Aborting due to QA concerns: invalid shebangs found"
+ fi
+}
+
+install_qa_check_prefix
+: # guarantee successful exit
+
+# vim:ft=sh
diff --git a/usr/lib/portage/bin/install-qa-check.d/10executable-issues b/usr/lib/portage/bin/install-qa-check.d/10executable-issues
new file mode 100755
index 0000000..f765749
--- /dev/null
+++ b/usr/lib/portage/bin/install-qa-check.d/10executable-issues
@@ -0,0 +1,140 @@
+# Check for major issues with built executables: insecure RPATHs,
+# text relocations, executable stacks
+
+elf_check() {
+ if type -P scanelf > /dev/null && ! has binchecks ${RESTRICT}; then
+ local insecure_rpath=0 tmp_quiet=${PORTAGE_QUIET}
+ local f x
+
+ # display warnings when using stricter because we die afterwards
+ if has stricter ${FEATURES} ; then
+ local PORTAGE_QUIET
+ fi
+
+ # Make sure we disallow insecure RUNPATH/RPATHs.
+ # 1) References to PORTAGE_BUILDDIR are banned because it's a
+ # security risk. We don't want to load files from a
+ # temporary directory.
+ # 2) If ROOT != "/", references to ROOT are banned because
+ # that directory won't exist on the target system.
+ # 3) Null paths are banned because the loader will search $PWD when
+ # it finds null paths.
+ local forbidden_dirs="${PORTAGE_BUILDDIR}"
+ if [[ -n "${ROOT}" && "${ROOT}" != "/" ]]; then
+ forbidden_dirs+=" ${ROOT}"
+ fi
+ local dir l rpath_files=$(scanelf -F '%F:%r' -qBR "${ED}")
+ f=""
+ for dir in ${forbidden_dirs}; do
+ for l in $(echo "${rpath_files}" | grep -E ":${dir}|::|: "); do
+ f+=" ${l%%:*}\n"
+ if ! has stricter ${FEATURES}; then
+ __vecho "Auto fixing rpaths for ${l%%:*}"
+ TMPDIR="${dir}" scanelf -BXr "${l%%:*}" -o /dev/null
+ fi
+ done
+ done
+
+ # Reject set*id binaries with $ORIGIN in RPATH #260331
+ x=$(
+ find "${ED}" -type f \( -perm -u+s -o -perm -g+s \) -print0 | \
+ xargs -0 scanelf -qyRF '%r %p' | grep '$ORIGIN'
+ )
+
+ # Print QA notice.
+ if [[ -n ${f}${x} ]] ; then
+ __vecho -ne '\n'
+ eqawarn "QA Notice: The following files contain insecure RUNPATHs"
+ eqawarn " Please file a bug about this at http://bugs.gentoo.org/"
+ eqawarn " with the maintaining herd of the package."
+ eqawarn "${f}${f:+${x:+\n}}${x}"
+ __vecho -ne '\n'
+ if [[ -n ${x} ]] || has stricter ${FEATURES} ; then
+ insecure_rpath=1
+ fi
+ fi
+
+ # TEXTRELs are baaaaaaaad
+ # Allow devs to mark things as ignorable ... e.g. things that are
+ # binary-only and upstream isn't cooperating (nvidia-glx) ... we
+ # allow ebuild authors to set QA_TEXTRELS_arch and QA_TEXTRELS ...
+ # the former overrides the latter ... regexes allowed ! :)
+ local qa_var="QA_TEXTRELS_${ARCH/-/_}"
+ [[ -n ${!qa_var} ]] && QA_TEXTRELS=${!qa_var}
+ [[ -n ${QA_STRICT_TEXTRELS} ]] && QA_TEXTRELS=""
+ export QA_TEXTRELS="${QA_TEXTRELS} lib*/modules/*.ko"
+ f=$(scanelf -qyRF '%t %p' "${ED}" | grep -v 'usr/lib/debug/')
+ if [[ -n ${f} ]] ; then
+ scanelf -qyRAF '%T %p' "${PORTAGE_BUILDDIR}"/ &> "${T}"/scanelf-textrel.log
+ __vecho -ne '\n'
+ eqawarn "QA Notice: The following files contain runtime text relocations"
+ eqawarn " Text relocations force the dynamic linker to perform extra"
+ eqawarn " work at startup, waste system resources, and may pose a security"
+ eqawarn " risk. On some architectures, the code may not even function"
+ eqawarn " properly, if at all."
+ eqawarn " For more information, see http://hardened.gentoo.org/pic-fix-guide.xml"
+ eqawarn " Please include the following list of files in your report:"
+ eqawarn "${f}"
+ __vecho -ne '\n'
+ die_msg="${die_msg} textrels,"
+ sleep 1
+ fi
+
+ # Also, executable stacks only matter on linux (and just glibc atm ...)
+ f=""
+ case ${CTARGET:-${CHOST}} in
+ *-linux-gnu*)
+ # Check for files with executable stacks, but only on arches which
+ # are supported at the moment. Keep this list in sync with
+ # http://www.gentoo.org/proj/en/hardened/gnu-stack.xml (Arch Status)
+ case ${CTARGET:-${CHOST}} in
+ arm*|i?86*|ia64*|m68k*|s390*|sh*|x86_64*)
+ # Allow devs to mark things as ignorable ... e.g. things
+ # that are binary-only and upstream isn't cooperating ...
+ # we allow ebuild authors to set QA_EXECSTACK_arch and
+ # QA_EXECSTACK ... the former overrides the latter ...
+ # regexes allowed ! :)
+
+ qa_var="QA_EXECSTACK_${ARCH/-/_}"
+ [[ -n ${!qa_var} ]] && QA_EXECSTACK=${!qa_var}
+ [[ -n ${QA_STRICT_EXECSTACK} ]] && QA_EXECSTACK=""
+ qa_var="QA_WX_LOAD_${ARCH/-/_}"
+ [[ -n ${!qa_var} ]] && QA_WX_LOAD=${!qa_var}
+ [[ -n ${QA_STRICT_WX_LOAD} ]] && QA_WX_LOAD=""
+ export QA_EXECSTACK="${QA_EXECSTACK} lib*/modules/*.ko"
+ export QA_WX_LOAD="${QA_WX_LOAD} lib*/modules/*.ko"
+ f=$(scanelf -qyRAF '%e %p' "${ED}" | grep -v 'usr/lib/debug/')
+ ;;
+ esac
+ ;;
+ esac
+ if [[ -n ${f} ]] ; then
+ # One more pass to help devs track down the source
+ scanelf -qyRAF '%e %p' "${PORTAGE_BUILDDIR}"/ &> "${T}"/scanelf-execstack.log
+ __vecho -ne '\n'
+ eqawarn "QA Notice: The following files contain writable and executable sections"
+ eqawarn " Files with such sections will not work properly (or at all!) on some"
+ eqawarn " architectures/operating systems. A bug should be filed at"
+ eqawarn " http://bugs.gentoo.org/ to make sure the issue is fixed."
+ eqawarn " For more information, see http://hardened.gentoo.org/gnu-stack.xml"
+ eqawarn " Please include the following list of files in your report:"
+ eqawarn " Note: Bugs should be filed for the respective maintainers"
+ eqawarn " of the package in question and not hardened@g.o."
+ eqawarn "${f}"
+ __vecho -ne '\n'
+ die_msg="${die_msg} execstacks"
+ sleep 1
+ fi
+
+ if [[ ${insecure_rpath} -eq 1 ]] ; then
+ die "Aborting due to serious QA concerns with RUNPATH/RPATH"
+ elif [[ -n ${die_msg} ]] && has stricter ${FEATURES} ; then
+ die "Aborting due to QA concerns: ${die_msg}"
+ fi
+ fi
+}
+
+elf_check
+: # guarantee successful exit
+
+# vim:ft=sh
diff --git a/usr/lib/portage/bin/install-qa-check.d/10ignored-flags b/usr/lib/portage/bin/install-qa-check.d/10ignored-flags
new file mode 100755
index 0000000..7aa9eb6
--- /dev/null
+++ b/usr/lib/portage/bin/install-qa-check.d/10ignored-flags
@@ -0,0 +1,99 @@
+# QA checks for ignored *FLAGS.
+
+ignored_flag_check() {
+ type -P scanelf > /dev/null || return
+ has binchecks ${RESTRICT} && return
+
+ local qa_var="QA_FLAGS_IGNORED_${ARCH/-/_}"
+ eval "[[ -n \${!qa_var} ]] && QA_FLAGS_IGNORED=(\"\${${qa_var}[@]}\")"
+ if [[ ${#QA_FLAGS_IGNORED[@]} -eq 1 ]] ; then
+ local shopts=$-
+ set -o noglob
+ QA_FLAGS_IGNORED=(${QA_FLAGS_IGNORED})
+ set +o noglob
+ set -${shopts}
+ fi
+
+ local f x
+
+ # Check for files built without respecting *FLAGS. Note that
+ # -frecord-gcc-switches must be in all *FLAGS variables, in
+ # order to avoid false positive results here.
+ # NOTE: This check must execute before prepall/prepstrip, since
+ # prepstrip strips the .GCC.command.line sections.
+ if [[ "${CFLAGS}" == *-frecord-gcc-switches* ]] && \
+ [[ "${CXXFLAGS}" == *-frecord-gcc-switches* ]] && \
+ [[ "${FFLAGS}" == *-frecord-gcc-switches* ]] && \
+ [[ "${FCFLAGS}" == *-frecord-gcc-switches* ]] ; then
+ rm -f "${T}"/scanelf-ignored-CFLAGS.log
+ for x in $(scanelf -qyRF '#k%p' -k '!.GCC.command.line' "${ED}") ; do
+ # Separate out file types that are known to support
+ # .GCC.command.line sections, using the `file` command
+ # similar to how prepstrip uses it.
+ f=$(file "${x}") || continue
+ [[ -z ${f} ]] && continue
+ if [[ ${f} == *"SB executable"* ||
+ ${f} == *"SB shared object"* ]] ; then
+ echo "${x}" >> "${T}"/scanelf-ignored-CFLAGS.log
+ fi
+ done
+
+ if [[ -f "${T}"/scanelf-ignored-CFLAGS.log ]] ; then
+
+ if [ "${QA_STRICT_FLAGS_IGNORED-unset}" = unset ] ; then
+ for x in "${QA_FLAGS_IGNORED[@]}" ; do
+ sed -e "s#^${x#/}\$##" -i "${T}"/scanelf-ignored-CFLAGS.log
+ done
+ fi
+ # Filter anything under /usr/lib/debug/ in order to avoid
+ # duplicate warnings for splitdebug files.
+ sed -e "s#^usr/lib/debug/.*##" -e "/^\$/d" -e "s#^#/#" \
+ -i "${T}"/scanelf-ignored-CFLAGS.log
+ f=$(<"${T}"/scanelf-ignored-CFLAGS.log)
+ if [[ -n ${f} ]] ; then
+ __vecho -ne '\n'
+ eqawarn "${BAD}QA Notice: Files built without respecting CFLAGS have been detected${NORMAL}"
+ eqawarn " Please include the following list of files in your report:"
+ eqawarn "${f}"
+ __vecho -ne '\n'
+ sleep 1
+ else
+ rm -f "${T}"/scanelf-ignored-CFLAGS.log
+ fi
+ fi
+ fi
+
+ # Check for files built without respecting LDFLAGS
+ if [[ "${LDFLAGS}" == *,--hash-style=gnu* ]] && \
+ ! has binchecks ${RESTRICT} ; then
+ f=$(scanelf -qyRF '#k%p' -k .hash "${ED}")
+ if [[ -n ${f} ]] ; then
+ echo "${f}" > "${T}"/scanelf-ignored-LDFLAGS.log
+ if [ "${QA_STRICT_FLAGS_IGNORED-unset}" = unset ] ; then
+ for x in "${QA_FLAGS_IGNORED[@]}" ; do
+ sed -e "s#^${x#/}\$##" -i "${T}"/scanelf-ignored-LDFLAGS.log
+ done
+ fi
+ # Filter anything under /usr/lib/debug/ in order to avoid
+ # duplicate warnings for splitdebug files.
+ sed -e "s#^usr/lib/debug/.*##" -e "/^\$/d" -e "s#^#/#" \
+ -i "${T}"/scanelf-ignored-LDFLAGS.log
+ f=$(<"${T}"/scanelf-ignored-LDFLAGS.log)
+ if [[ -n ${f} ]] ; then
+ __vecho -ne '\n'
+ eqawarn "${BAD}QA Notice: Files built without respecting LDFLAGS have been detected${NORMAL}"
+ eqawarn " Please include the following list of files in your report:"
+ eqawarn "${f}"
+ __vecho -ne '\n'
+ sleep 1
+ else
+ rm -f "${T}"/scanelf-ignored-LDFLAGS.log
+ fi
+ fi
+ fi
+}
+
+ignored_flag_check
+: # guarantee successful exit
+
+# vim:ft=sh
diff --git a/usr/lib/portage/bin/install-qa-check.d/20deprecated-directories b/usr/lib/portage/bin/install-qa-check.d/20deprecated-directories
new file mode 100755
index 0000000..fb82bfe
--- /dev/null
+++ b/usr/lib/portage/bin/install-qa-check.d/20deprecated-directories
@@ -0,0 +1,18 @@
+# Check for deprecated directories
+
+deprecated_dir_check() {
+ local x f=
+ for x in etc/app-defaults usr/man usr/info usr/X11R6 usr/doc usr/locale ; do
+ [[ -d ${ED}/$x ]] && f+=" $x\n"
+ done
+ if [[ -n $f ]] ; then
+ eqawarn "QA Notice: This ebuild installs into the following deprecated directories:"
+ eqawarn
+ eqawarn "$f"
+ fi
+}
+
+deprecated_dir_check
+: # guarantee successful exit
+
+# vim:ft=sh
diff --git a/usr/lib/portage/bin/install-qa-check.d/20runtime-directories b/usr/lib/portage/bin/install-qa-check.d/20runtime-directories
new file mode 100755
index 0000000..2e21d6d
--- /dev/null
+++ b/usr/lib/portage/bin/install-qa-check.d/20runtime-directories
@@ -0,0 +1,26 @@
+# Check for directories that need to be created at runtime
+
+runtime_dir_check() {
+ # It's ok create these directories, but not to install into them. #493154
+ # TODO: We should add var/lib to this list.
+ local x f=
+ for x in var/cache var/lock var/run run ; do
+ if [[ ! -L ${ED}/${x} && -d ${ED}/${x} ]] ; then
+ if [[ -z $(find "${ED}/${x}" -prune -empty) ]] ; then
+ f+=$(cd "${ED}"; find "${x}" -printf ' %p\n')
+ fi
+ fi
+ done
+ if [[ -n ${f} ]] ; then
+ eqawarn "QA Notice: This ebuild installs into paths that should be created at runtime."
+ eqawarn " To fix, simply do not install into these directories. Instead, your package"
+ eqawarn " should create dirs on the fly at runtime as needed via init scripts/etc..."
+ eqawarn
+ eqawarn "${f}"
+ fi
+}
+
+runtime_dir_check
+: # guarantee successful exit
+
+# vim:ft=sh
diff --git a/usr/lib/portage/bin/install-qa-check.d/60bash-completion b/usr/lib/portage/bin/install-qa-check.d/60bash-completion
new file mode 100755
index 0000000..c154761
--- /dev/null
+++ b/usr/lib/portage/bin/install-qa-check.d/60bash-completion
@@ -0,0 +1,130 @@
+# QA checks for bash-completion files
+
+bashcomp_check() {
+ # Check for correct bash-completion install path.
+ local syscompdir=$(pkg-config --variable=completionsdir bash-completion 2>/dev/null)
+ : ${syscompdir:=${EPREFIX}/usr/share/bash-completion/completions}
+
+ local instcompdir
+ if [[ -d ${ED}/usr/share/bash-completion/completions ]]; then
+ instcompdir=${ED}/usr/share/bash-completion/completions
+ elif [[ -d ${ED}/usr/share/bash-completion ]]; then
+ if [[ ${syscompdir} != ${EPREFIX}/usr/share/bash-completion ]]; then
+ eqawarn "Bash completions were installed in legacy location. Please update"
+ eqawarn "the ebuild to get the install paths using bash-completion-r1.eclass."
+ eqawarn
+ fi
+
+ instcompdir=${ED}/usr/share/bash-completion
+ fi
+
+ # Do a few QA tests on bash completions.
+ if [[ -n ${instcompdir} && -f ${EROOT}/usr/share/bash-completion/bash_completion ]]; then
+ _get_completions() {
+ # source the file
+ source "${1}" &>/dev/null
+
+ [[ ${USED_HAVE} == yes ]] && echo '__HAVE_USED__'
+
+ # print the completed commands
+ while read -a args; do
+ [[ ${args[0]} == complete ]] || continue
+ # command always comes last, one per line
+ echo "${args[$(( ${#args[@]} - 1))]}"
+ done < <(complete -p)
+ }
+
+ # load the global helpers
+ source "${EROOT}"/usr/share/bash-completion/bash_completion
+
+ # clean up predefined completions
+ complete -r
+
+ # force all completions on
+ _have() {
+ return 0
+ }
+
+ local USED_HAVE=no
+ # add a replacement for have()
+ have() {
+ USED_HAVE=yes
+
+ unset -v have
+ _have ${1} && have=yes
+ }
+
+ local f c completions
+ local all_compls=()
+ local all_files=()
+ local qa_warnings=()
+
+ for f in "${instcompdir}"/*; do
+ # ignore directories and other non-files
+ [[ ! -f ${f} ]] && continue
+
+ # skip the common code file
+ # (in case we're run in /usr/share/bash-completion)
+ [[ ${f##*/} == bash_completion ]] && continue
+
+ completions=( $(_get_completions "${f}") )
+
+ if [[ ${completions[0]} == __HAVE_USED__ ]]; then
+ qa_warnings+=(
+ "${f##*/}: 'have' command is deprecated and must not be used."
+ )
+ unset 'completions[0]'
+ fi
+
+ if [[ -z ${completions[@]} ]]; then
+ qa_warnings+=(
+ "${f##*/}: does not define any completions (failed to source?)."
+ )
+ continue
+ fi
+
+ for c in "${completions[@]}"; do
+ if [[ ${c} == /* ]]; then
+ qa_warnings+=(
+ "${f##*/}: absolute paths can not be used for completions (on '${c}')."
+ )
+ else
+ all_compls+=( "${c}" )
+ fi
+ done
+
+ if ! has "${f##*/}" "${all_compls[@]}"; then
+ qa_warnings+=(
+ "${f##*/}: incorrect name, no completions for '${f##*/}' command defined."
+ )
+ fi
+
+ all_files+=( "${f##*/}" )
+ done
+
+ for c in "${all_compls[@]}"; do
+ if ! has "${c}" "${all_files[@]}"; then
+ qa_warnings+=(
+ "${c}: missing alias (symlink) for completed command."
+ )
+ fi
+ done
+
+ if [[ -n ${qa_warnings[@]} ]]; then
+ eqawarn "Problems with installed bash completions were found:"
+ eqawarn
+ for c in "${qa_warnings[@]}"; do
+ eqawarn " ${c}"
+ done
+ eqawarn
+ eqawarn "For more details on installing bash-completions, please see:"
+ eqawarn "https://wiki.gentoo.org/wiki/Bash/Installing_completion_files"
+ eqawarn
+ fi
+ fi
+}
+
+bashcomp_check
+: # guarantee successful exit
+
+# vim:ft=sh
diff --git a/usr/lib/portage/bin/install-qa-check.d/60openrc b/usr/lib/portage/bin/install-qa-check.d/60openrc
new file mode 100755
index 0000000..9b7fc6d
--- /dev/null
+++ b/usr/lib/portage/bin/install-qa-check.d/60openrc
@@ -0,0 +1,41 @@
+# QA checks for OpenRC init.d files.
+
+openrc_check() {
+ # Sanity check syntax errors in init.d scripts
+ local d i
+ for d in /etc/conf.d /etc/init.d ; do
+ [[ -d ${ED}/${d} ]] || continue
+ for i in "${ED}"/${d}/* ; do
+ [[ -L ${i} ]] && continue
+ # if empty conf.d/init.d dir exists (baselayout), then i will be "/etc/conf.d/*" and not exist
+ [[ ! -e ${i} ]] && continue
+ if [[ ${d} == /etc/init.d && ${i} != *.sh ]] ; then
+ # skip non-shell-script for bug #451386
+ [[ $(head -n1 "${i}") =~ ^#!.*[[:space:]/](runscript|sh)$ ]] || continue
+ fi
+ bash -n "${i}" || die "The init.d file has syntax errors: ${i}"
+ done
+ done
+
+ local checkbashisms=$(type -P checkbashisms)
+ if [[ -n ${checkbashisms} ]] ; then
+ for d in /etc/init.d ; do
+ [[ -d ${ED}${d} ]] || continue
+ for i in "${ED}${d}"/* ; do
+ [[ -e ${i} ]] || continue
+ [[ -L ${i} ]] && continue
+ f=$("${checkbashisms}" -f "${i}" 2>&1)
+ [[ $? != 0 && -n ${f} ]] || continue
+ eqawarn "QA Notice: shell script appears to use non-POSIX feature(s):"
+ while read -r ;
+ do eqawarn " ${REPLY}"
+ done <<< "${f//${ED}}"
+ done
+ done
+ fi
+}
+
+openrc_check
+: # guarantee successful exit
+
+# vim:ft=sh
diff --git a/usr/lib/portage/bin/install-qa-check.d/60pkgconfig b/usr/lib/portage/bin/install-qa-check.d/60pkgconfig
new file mode 100755
index 0000000..1b34c04
--- /dev/null
+++ b/usr/lib/portage/bin/install-qa-check.d/60pkgconfig
@@ -0,0 +1,15 @@
+# Check for pkg-config file issues
+
+pkgconfig_check() {
+ # Look for leaking LDFLAGS into pkg-config files
+ local f=$(egrep -sH '^Libs.*-Wl,(-O[012]|--hash-style)' "${ED}"/usr/*/pkgconfig/*.pc)
+ if [[ -n ${f} ]] ; then
+ eqawarn "QA Notice: pkg-config files with wrong LDFLAGS detected:"
+ eqawarn "${f//${D}}"
+ fi
+}
+
+pkgconfig_check
+: # guarantee successful exit
+
+# vim:ft=sh
diff --git a/usr/lib/portage/bin/install-qa-check.d/60pngfix b/usr/lib/portage/bin/install-qa-check.d/60pngfix
new file mode 100755
index 0000000..8d53040
--- /dev/null
+++ b/usr/lib/portage/bin/install-qa-check.d/60pngfix
@@ -0,0 +1,35 @@
+# Check for issues with PNG files
+
+pngfix_check() {
+ local pngfix=$(type -P pngfix)
+ if [[ -n ${pngfix} ]] ; then
+ local pngout=()
+ local next
+
+ while read -r -a pngout ; do
+ local error=""
+
+ case "${pngout[1]}" in
+ CHK)
+ error='invalid checksum'
+ ;;
+ TFB)
+ error='broken IDAT window length'
+ ;;
+ esac
+
+ if [[ -n ${error} ]] ; then
+ if [[ -z ${next} ]] ; then
+ eqawarn "QA Notice: broken .png files found:"
+ next=1
+ fi
+ eqawarn " ${pngout[@]:7}: ${error}"
+ fi
+ done < <(find "${ED}" -type f -name '*.png' -exec "${pngfix}" {} +)
+ fi
+}
+
+pngfix_check
+: # guarantee successful exit
+
+# vim:ft=sh
diff --git a/usr/lib/portage/bin/install-qa-check.d/60systemd b/usr/lib/portage/bin/install-qa-check.d/60systemd
new file mode 100755
index 0000000..f134a30
--- /dev/null
+++ b/usr/lib/portage/bin/install-qa-check.d/60systemd
@@ -0,0 +1,25 @@
+# QA checks for systemd units.
+
+systemd_check() {
+ local systemddir f
+
+ # Common mistakes in systemd service files.
+ if type -P pkg-config >/dev/null && pkg-config --exists systemd; then
+ systemddir=$(pkg-config --variable=systemdsystemunitdir systemd)
+ else
+ systemddir=/usr/lib/systemd/system
+ fi
+ if [[ -d ${ED%/}${systemddir} ]]; then
+ f=$(grep -sH '^EnvironmentFile.*=.*/etc/conf\.d' "${ED%/}${systemddir}"/*.service)
+ if [[ -n ${f} ]] ; then
+ eqawarn "QA Notice: systemd units using /etc/conf.d detected:"
+ eqawarn "${f//${D}}"
+ eqawarn "See: https://wiki.gentoo.org/wiki/Project:Systemd/conf.d_files"
+ fi
+ fi
+}
+
+systemd_check
+: # guarantee successful exit
+
+# vim:ft=sh
diff --git a/usr/lib/portage/bin/install-qa-check.d/60udev b/usr/lib/portage/bin/install-qa-check.d/60udev
new file mode 100755
index 0000000..4327d06
--- /dev/null
+++ b/usr/lib/portage/bin/install-qa-check.d/60udev
@@ -0,0 +1,21 @@
+# Check udev rule installs
+
+udev_check() {
+ set +f
+ local x f=
+ for x in "${ED}etc/udev/rules.d/"* "${ED}lib"*"/udev/rules.d/"* ; do
+ [[ -e ${x} ]] || continue
+ [[ ${x} == ${ED}lib/udev/rules.d/* ]] && continue
+ f+=" ${x#${ED}}\n"
+ done
+ if [[ -n $f ]] ; then
+ eqawarn "QA Notice: udev rules should be installed in /lib/udev/rules.d:"
+ eqawarn
+ eqawarn "$f"
+ fi
+}
+
+udev_check
+: # guarantee successful exit
+
+# vim:ft=sh
diff --git a/usr/lib/portage/bin/install-qa-check.d/80libraries b/usr/lib/portage/bin/install-qa-check.d/80libraries
new file mode 100755
index 0000000..c83f278
--- /dev/null
+++ b/usr/lib/portage/bin/install-qa-check.d/80libraries
@@ -0,0 +1,167 @@
+# Check for issues with installed libraries
+
+lib_check() {
+ local f x i j
+
+ if type -P scanelf > /dev/null && ! has binchecks ${RESTRICT}; then
+ # Check for shared libraries lacking SONAMEs
+ local qa_var="QA_SONAME_${ARCH/-/_}"
+ eval "[[ -n \${!qa_var} ]] && QA_SONAME=(\"\${${qa_var}[@]}\")"
+ f=$(scanelf -ByF '%S %p' "${ED}"{,usr/}lib*/lib*.so* | awk '$2 == "" { print }' | sed -e "s:^[[:space:]]${ED}:/:")
+ if [[ -n ${f} ]] ; then
+ echo "${f}" > "${T}"/scanelf-missing-SONAME.log
+ if [[ "${QA_STRICT_SONAME-unset}" == unset ]] ; then
+ if [[ ${#QA_SONAME[@]} -gt 1 ]] ; then
+ for x in "${QA_SONAME[@]}" ; do
+ sed -e "s#^/${x#/}\$##" -i "${T}"/scanelf-missing-SONAME.log
+ done
+ else
+ local shopts=$-
+ set -o noglob
+ for x in ${QA_SONAME} ; do
+ sed -e "s#^/${x#/}\$##" -i "${T}"/scanelf-missing-SONAME.log
+ done
+ set +o noglob
+ set -${shopts}
+ fi
+ fi
+ sed -e "/^\$/d" -i "${T}"/scanelf-missing-SONAME.log
+ f=$(<"${T}"/scanelf-missing-SONAME.log)
+ if [[ -n ${f} ]] ; then
+ __vecho -ne '\n'
+ eqawarn "QA Notice: The following shared libraries lack a SONAME"
+ eqawarn "${f}"
+ __vecho -ne '\n'
+ sleep 1
+ else
+ rm -f "${T}"/scanelf-missing-SONAME.log
+ fi
+ fi
+
+ # Check for shared libraries lacking NEEDED entries
+ qa_var="QA_DT_NEEDED_${ARCH/-/_}"
+ eval "[[ -n \${!qa_var} ]] && QA_DT_NEEDED=(\"\${${qa_var}[@]}\")"
+ f=$(scanelf -ByF '%n %p' "${ED}"{,usr/}lib*/lib*.so* | awk '$2 == "" { print }' | sed -e "s:^[[:space:]]${ED}:/:")
+ if [[ -n ${f} ]] ; then
+ echo "${f}" > "${T}"/scanelf-missing-NEEDED.log
+ if [[ "${QA_STRICT_DT_NEEDED-unset}" == unset ]] ; then
+ if [[ ${#QA_DT_NEEDED[@]} -gt 1 ]] ; then
+ for x in "${QA_DT_NEEDED[@]}" ; do
+ sed -e "s#^/${x#/}\$##" -i "${T}"/scanelf-missing-NEEDED.log
+ done
+ else
+ local shopts=$-
+ set -o noglob
+ for x in ${QA_DT_NEEDED} ; do
+ sed -e "s#^/${x#/}\$##" -i "${T}"/scanelf-missing-NEEDED.log
+ done
+ set +o noglob
+ set -${shopts}
+ fi
+ fi
+ sed -e "/^\$/d" -i "${T}"/scanelf-missing-NEEDED.log
+ f=$(<"${T}"/scanelf-missing-NEEDED.log)
+ if [[ -n ${f} ]] ; then
+ __vecho -ne '\n'
+ eqawarn "QA Notice: The following shared libraries lack NEEDED entries"
+ eqawarn "${f}"
+ __vecho -ne '\n'
+ sleep 1
+ else
+ rm -f "${T}"/scanelf-missing-NEEDED.log
+ fi
+ fi
+ fi
+
+ # this should help to ensure that all (most?) shared libraries are executable
+ # and that all libtool scripts / static libraries are not executable
+ for i in "${ED}"opt/*/lib* \
+ "${ED}"lib* \
+ "${ED}"usr/lib* ; do
+ [[ ! -d ${i} ]] && continue
+
+ for j in "${i}"/*.so.* "${i}"/*.so ; do
+ [[ ! -e ${j} ]] && continue
+ [[ -L ${j} ]] && continue
+ [[ -x ${j} ]] && continue
+ __vecho "making executable: ${j#${ED}}"
+ chmod +x "${j}"
+ done
+
+ for j in "${i}"/*.a "${i}"/*.la ; do
+ [[ ! -e ${j} ]] && continue
+ [[ -L ${j} ]] && continue
+ [[ ! -x ${j} ]] && continue
+ __vecho "removing executable bit: ${j#${ED}}"
+ chmod -x "${j}"
+ done
+
+ for j in "${i}"/*.{a,dll,dylib,sl,so}.* "${i}"/*.{a,dll,dylib,sl,so} ; do
+ [[ ! -e ${j} ]] && continue
+ [[ ! -L ${j} ]] && continue
+ linkdest=$(readlink "${j}")
+ if [[ ${linkdest} == /* ]] ; then
+ __vecho -ne '\n'
+ eqawarn "QA Notice: Found an absolute symlink in a library directory:"
+ eqawarn " ${j#${D}} -> ${linkdest}"
+ eqawarn " It should be a relative symlink if in the same directory"
+ eqawarn " or a linker script if it crosses the /usr boundary."
+ fi
+ done
+ done
+
+ # When installing static libraries into /usr/lib and shared libraries into
+ # /lib, we have to make sure we have a linker script in /usr/lib along side
+ # the static library, or gcc will utilize the static lib when linking :(.
+ # http://bugs.gentoo.org/4411
+ local abort="no"
+ local a s
+ for a in "${ED}"usr/lib*/*.a ; do
+ # PREFIX LOCAL: support MachO objects
+ [[ ${CHOST} == *-darwin* ]] \
+ && s=${a%.a}.dylib \
+ || s=${a%.a}.so
+ # END PREFIX LOCAL
+ if [[ ! -e ${s} ]] ; then
+ s=${s%usr/*}${s##*/usr/}
+ if [[ -e ${s} ]] ; then
+ __vecho -ne '\n'
+ eqawarn "QA Notice: Missing gen_usr_ldscript for ${s##*/}"
+ abort="yes"
+ fi
+ fi
+ done
+ [[ ${abort} == "yes" ]] && die "add those ldscripts"
+
+ # Make sure people don't store libtool files or static libs in /lib
+ # PREFIX LOCAL: on AIX, "dynamic libs" have extension .a, so don't
+ # get false positives
+ [[ ${CHOST} == *-aix* ]] \
+ && f=$(ls "${ED}"lib*/*.la 2>/dev/null || true) \
+ || f=$(ls "${ED}"lib*/*.{a,la} 2>/dev/null)
+ # END PREFIX LOCAL
+ if [[ -n ${f} ]] ; then
+ __vecho -ne '\n'
+ eqawarn "QA Notice: Excessive files found in the / partition"
+ eqawarn "${f}"
+ __vecho -ne '\n'
+ die "static archives (*.a) and libtool library files (*.la) belong in /usr/lib*, not /lib*"
+ fi
+
+ # Verify that the libtool files don't contain bogus $D entries.
+ local abort=no gentoo_bug=no always_overflow=no
+ for a in "${ED}"usr/lib*/*.la ; do
+ s=${a##*/}
+ if grep -qs "${ED}" "${a}" ; then
+ __vecho -ne '\n'
+ eqawarn "QA Notice: ${s} appears to contain PORTAGE_TMPDIR paths"
+ abort="yes"
+ fi
+ done
+ [[ ${abort} == "yes" ]] && die "soiled libtool library files found"
+}
+
+lib_check
+: # guarantee successful exit
+
+# vim:ft=sh
diff --git a/usr/lib/portage/bin/install-qa-check.d/80multilib-strict b/usr/lib/portage/bin/install-qa-check.d/80multilib-strict
new file mode 100755
index 0000000..436932e
--- /dev/null
+++ b/usr/lib/portage/bin/install-qa-check.d/80multilib-strict
@@ -0,0 +1,50 @@
+# Strict multilib directory checks
+multilib_strict_check() {
+ if has multilib-strict ${FEATURES} && \
+ [[ -x ${EPREFIX}/usr/bin/file && -x ${EPREFIX}/usr/bin/find ]] && \
+ [[ -n ${MULTILIB_STRICT_DIRS} && -n ${MULTILIB_STRICT_DENY} ]]
+ then
+ rm -f "${T}/multilib-strict.log"
+ local abort=no dir file
+ MULTILIB_STRICT_EXEMPT=$(echo ${MULTILIB_STRICT_EXEMPT} | sed -e 's:\([(|)]\):\\\1:g')
+ for dir in ${MULTILIB_STRICT_DIRS} ; do
+ [[ -d ${ED}/${dir} ]] || continue
+ for file in $(find ${ED}/${dir} -type f | grep -v "^${ED}/${dir}/${MULTILIB_STRICT_EXEMPT}"); do
+ if file ${file} | egrep -q "${MULTILIB_STRICT_DENY}" ; then
+ echo "${file#${ED}//}" >> "${T}/multilib-strict.log"
+ fi
+ done
+ done
+
+ if [[ -s ${T}/multilib-strict.log ]] ; then
+ if [[ ${#QA_MULTILIB_PATHS[@]} -eq 1 ]] ; then
+ local shopts=$-
+ set -o noglob
+ QA_MULTILIB_PATHS=(${QA_MULTILIB_PATHS})
+ set +o noglob
+ set -${shopts}
+ fi
+ if [ "${QA_STRICT_MULTILIB_PATHS-unset}" = unset ] ; then
+ local x
+ for x in "${QA_MULTILIB_PATHS[@]}" ; do
+ sed -e "s#^${x#/}\$##" -i "${T}/multilib-strict.log"
+ done
+ sed -e "/^\$/d" -i "${T}/multilib-strict.log"
+ fi
+ if [[ -s ${T}/multilib-strict.log ]] ; then
+ abort=yes
+ echo "Files matching a file type that is not allowed:"
+ while read -r ; do
+ echo " ${REPLY}"
+ done < "${T}/multilib-strict.log"
+ fi
+ fi
+
+ [[ ${abort} == yes ]] && die "multilib-strict check failed!"
+ fi
+}
+
+multilib_strict_check
+: # guarantee successful exit
+
+# vim:ft=sh
diff --git a/usr/lib/portage/bin/install-qa-check.d/90gcc-warnings b/usr/lib/portage/bin/install-qa-check.d/90gcc-warnings
new file mode 100755
index 0000000..b18651e
--- /dev/null
+++ b/usr/lib/portage/bin/install-qa-check.d/90gcc-warnings
@@ -0,0 +1,168 @@
+# Check for important gcc warning
+
+gcc_warn_check() {
+ local f
+
+ # Evaluate misc gcc warnings
+ if [[ -n ${PORTAGE_LOG_FILE} && -r ${PORTAGE_LOG_FILE} ]] ; then
+ # In debug mode, this variable definition and corresponding grep calls
+ # will produce false positives if they're shown in the trace.
+ local reset_debug=0
+ if [[ ${-/x/} != $- ]] ; then
+ set +x
+ reset_debug=1
+ fi
+ local m msgs=(
+ # only will and does, no might :)
+ 'warning: .*will.*\[-Wstrict-aliasing\]'
+ 'warning: .*does.*\[-Wstrict-aliasing\]'
+ # implicit declaration of function ‘...’
+ 'warning: .*\[-Wimplicit-function-declaration\]'
+ # with -Wall, goes in pair with -Wimplicit-function-declaration
+ # but without -Wall, we need to assert for it alone
+ 'warning: .*incompatible implicit declaration of built-in function'
+ # 'is used uninitialized in this function' and some more
+ 'warning: .*\[-Wuninitialized\]'
+ # comparisons like ‘X<=Y<=Z’ do not have their mathematical meaning
+ 'warning: .*mathematical meaning*\[-Wparentheses\]'
+ # null argument where non-null required
+ 'warning: .*\[-Wnonnull\]'
+ # array subscript is above/below/outside array bounds
+ 'warning: .*\[-Warray-bounds\]'
+ # attempt to free a non-heap object
+ 'warning: .*\[-Wfree-nonheap-object\]'
+ # those three do not have matching -W flags, it seems
+ 'warning: .*will always overflow destination buffer'
+ 'warning: .*assuming pointer wraparound does not occur'
+ 'warning: .*escape sequence out of range'
+ # left-hand operand of comma expression has no effect
+ 'warning: .*left.*comma.*\[-Wunused-value\]'
+ # converting to non-pointer type ... from NULL and likes
+ 'warning: .*\[-Wconversion-null\]'
+ # NULL used in arithmetic
+ 'warning: .*NULL.*\[-Wpointer-arith\]'
+ # pointer to a function used in arithmetic and likes
+ 'warning: .*function.*\[-Wpointer-arith\]'
+ # the address of ... will never be NULL and likes
+ # (uses of function refs & string constants in conditionals)
+ 'warning: .*\[-Waddress\]'
+ # outdated?
+ 'warning: .*too few arguments for format'
+ # format ... expects a matching ... argument
+ # (iow, too few arguments for format in new wording :))
+ 'warning: .*matching.*\[-Wformat=\]'
+ # function returns address of local variable
+ 'warning: .*\[-Wreturn-local-addr\]'
+ # argument to sizeof ... is the same expression as the source
+ 'warning: .*\[-Wsizeof-pointer-memaccess\]'
+ # iteration invokes undefined behavior
+ 'warning: .*\[-Waggressive-loop-optimizations\]'
+
+ # this may be valid code :/
+ #': warning: multi-character character constant'
+ # need to check these two ...
+ #': warning: assuming signed overflow does not occur when'
+ #': warning: comparison with string literal results in unspecified behav'
+ # yacc/lex likes to trigger this one
+ #': warning: extra tokens at end of .* directive'
+ # only gcc itself triggers this ?
+ #': warning: .*noreturn.* function does return'
+ # these throw false positives when 0 is used instead of NULL
+ #': warning: missing sentinel in function call'
+ #': warning: not enough variable arguments to fit a sentinel'
+ )
+
+ # join all messages into one grep-expression
+ local joined_msgs
+ printf -v joined_msgs '%s|' "${msgs[@]}"
+ joined_msgs=${joined_msgs%|}
+
+ local abort="no"
+ local grep_cmd=grep
+ [[ $PORTAGE_LOG_FILE = *.gz ]] && grep_cmd=zgrep
+
+ # force C locale to work around slow unicode locales #160234
+ f=$(LC_CTYPE=C LC_COLLATE=C "${grep_cmd}" -E "${joined_msgs}" "${PORTAGE_LOG_FILE}")
+ if [[ -n ${f} ]] ; then
+ abort="yes"
+ # for now, don't make this fatal (see bug #337031)
+ #if [[ ${f} == *'will always overflow destination buffer'* ]]; then
+ # always_overflow=yes
+ #fi
+ if [[ $always_overflow = yes ]] ; then
+ eerror
+ eerror "QA Notice: Package triggers severe warnings which indicate that it"
+ eerror " may exhibit random runtime failures."
+ eerror
+ eerror "${f}"
+ eerror
+ eerror " Please file a bug about this at http://bugs.gentoo.org/"
+ eerror " with the maintaining herd of the package."
+ eerror
+ else
+ __vecho -ne '\n'
+ eqawarn "QA Notice: Package triggers severe warnings which indicate that it"
+ eqawarn " may exhibit random runtime failures."
+ eqawarn "${f}"
+ __vecho -ne '\n'
+ fi
+ fi
+
+ local cat_cmd=cat
+ [[ $PORTAGE_LOG_FILE = *.gz ]] && cat_cmd=zcat
+ [[ $reset_debug = 1 ]] && set -x
+ # Use safe cwd, avoiding unsafe import for bug #469338.
+ f=$(cd "${PORTAGE_PYM_PATH}" ; $cat_cmd "${PORTAGE_LOG_FILE}" | \
+ "${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH"/check-implicit-pointer-usage.py || die "check-implicit-pointer-usage.py failed")
+ if [[ -n ${f} ]] ; then
+
+ # In the future this will be a forced "die". In preparation,
+ # increase the log level from "qa" to "eerror" so that people
+ # are aware this is a problem that must be fixed asap.
+
+ # just warn on 32bit hosts but bail on 64bit hosts
+ case ${CHOST} in
+ alpha*|hppa64*|ia64*|powerpc64*|mips64*|sparc64*|sparcv9*|x86_64*) gentoo_bug=yes ;;
+ esac
+
+ abort=yes
+
+ if [[ $gentoo_bug = yes ]] ; then
+ eerror
+ eerror "QA Notice: Package triggers severe warnings which indicate that it"
+ eerror " will almost certainly crash on 64bit architectures."
+ eerror
+ eerror "${f}"
+ eerror
+ eerror " Please file a bug about this at http://bugs.gentoo.org/"
+ eerror " with the maintaining herd of the package."
+ eerror
+ else
+ __vecho -ne '\n'
+ eqawarn "QA Notice: Package triggers severe warnings which indicate that it"
+ eqawarn " will almost certainly crash on 64bit architectures."
+ eqawarn "${f}"
+ __vecho -ne '\n'
+ fi
+
+ fi
+ if [[ ${abort} == "yes" ]] ; then
+ if [[ $gentoo_bug = yes || $always_overflow = yes ]] ; then
+ die "install aborted due to severe warnings shown above"
+ else
+ echo "Please do not file a Gentoo bug and instead" \
+ "report the above QA issues directly to the upstream" \
+ "developers of this software." | fmt -w 70 | \
+ while read -r line ; do eqawarn "${line}" ; done
+ eqawarn "Homepage: ${HOMEPAGE}"
+ has stricter ${FEATURES} && \
+ die "install aborted due to severe warnings shown above"
+ fi
+ fi
+ fi
+}
+
+gcc_warn_check
+: # guarantee successful exit
+
+# vim:ft=sh
diff --git a/usr/lib/portage/bin/install-qa-check.d/90world-writable b/usr/lib/portage/bin/install-qa-check.d/90world-writable
new file mode 100755
index 0000000..635612d
--- /dev/null
+++ b/usr/lib/portage/bin/install-qa-check.d/90world-writable
@@ -0,0 +1,27 @@
+# Check for world-writable files
+
+world_writable_check() {
+ # Now we look for all world writable files.
+ # PREFIX LOCAL: keep offset prefix in the reported files
+ local unsafe_files=$(find "${ED}" -type f -perm -2 | sed -e "s:^${D}:- :")
+ # END PREFIX LOCAL
+ if [[ -n ${unsafe_files} ]] ; then
+ __vecho "QA Security Notice: world writable file(s):"
+ __vecho "${unsafe_files}"
+ __vecho "- This may or may not be a security problem, most of the time it is one."
+ __vecho "- Please double check that $PF really needs a world writeable bit and file bugs accordingly."
+ sleep 1
+ fi
+
+ local unsafe_files=$(find "${ED}" -type f '(' -perm -2002 -o -perm -4002 ')' | sed -e "s:^${D}:/:")
+ if [[ -n ${unsafe_files} ]] ; then
+ eqawarn "QA Notice: Unsafe files detected (set*id and world writable)"
+ eqawarn "${unsafe_files}"
+ die "Unsafe files found in \${D}. Portage will not install them."
+ fi
+}
+
+world_writable_check
+: # guarantee successful exit
+
+# vim:ft=sh
diff --git a/usr/lib/portage/bin/install.py b/usr/lib/portage/bin/install.py
new file mode 100755
index 0000000..3c5e0de
--- /dev/null
+++ b/usr/lib/portage/bin/install.py
@@ -0,0 +1,253 @@
+#!/usr/bin/python -b
+# Copyright 2013-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import stat
+import sys
+import subprocess
+import traceback
+
+import portage
+from portage.util._argparse import ArgumentParser
+from portage.util.movefile import _copyxattr
+from portage.exception import OperationNotSupported
+
+# Change back to original cwd _after_ all imports (bug #469338).
+os.chdir(os.environ["__PORTAGE_HELPER_CWD"])
+
+def parse_args(args):
+ """
+ Parse the command line arguments using optparse for python 2.6 compatibility
+ Args:
+ args: a list of the white space delimited command line
+ Returns:
+ tuple of the Namespace of parsed options, and a list of order parameters
+ """
+ parser = ArgumentParser(add_help=False)
+
+ parser.add_argument(
+ "-b",
+ action="store_true",
+ dest="shortopt_b"
+ )
+ parser.add_argument(
+ "--backup",
+ action="store",
+ dest="backup"
+ )
+ parser.add_argument(
+ "-c",
+ action="store_true",
+ dest="shortopt_c"
+ )
+ parser.add_argument(
+ "--compare",
+ "-C",
+ action="store_true",
+ dest="compare"
+ )
+ parser.add_argument(
+ "--directory",
+ "-d",
+ action="store_true",
+ dest="directory"
+ )
+ parser.add_argument(
+ "-D",
+ action="store_true",
+ dest="shortopt_D"
+ )
+ parser.add_argument(
+ "--owner",
+ "-o",
+ action="store",
+ dest="owner"
+ )
+ parser.add_argument(
+ "--group",
+ "-g",
+ action="store",
+ dest="group"
+ )
+ parser.add_argument(
+ "--mode",
+ "-m",
+ action="store",
+ dest="mode"
+ )
+ parser.add_argument(
+ "--preserve-timestamps",
+ "-p",
+ action="store_true",
+ dest="preserve_timestamps"
+ )
+ parser.add_argument(
+ "--strip",
+ "-s",
+ action="store_true",
+ dest="strip"
+ )
+ parser.add_argument(
+ "--strip-program",
+ action="store",
+ dest="strip_program"
+ )
+ parser.add_argument(
+ "--suffix",
+ "-S",
+ action="store",
+ dest="suffix"
+ )
+ parser.add_argument(
+ "--target-directory",
+ "-t",
+ action="store",
+ dest="target_directory"
+ )
+ parser.add_argument(
+ "--no-target-directory",
+ "-T",
+ action="store_true",
+ dest="no_target_directory"
+ )
+ parser.add_argument(
+ "--context",
+ "-Z",
+ action="store",
+ dest="context"
+ )
+ parser.add_argument(
+ "--verbose",
+ "-v",
+ action="store_true",
+ dest="verbose"
+ )
+ parser.add_argument(
+ "--help",
+ action="store_true",
+ dest="help"
+ )
+ parser.add_argument(
+ "--version",
+ action="store_true",
+ dest="version"
+ )
+
+ # Use parse_known_args for maximum compatibility with
+ # getopt handling of non-option file arguments. Note
+ # that parser.add_argument("files", nargs='+') would
+ # be subtly incompatible because it requires that all
+ # of the file arguments be grouped sequentially. Also
+ # note that we have to explicitly call add_argument
+ # for known options in order for argparse to correctly
+ # separate option arguments from file arguments in all
+ # cases (it also allows for optparse compatibility).
+ parsed_args = parser.parse_known_args()
+
+ opts = parsed_args[0]
+ files = parsed_args[1]
+ files = [f for f in files if f != "--"] # filter out "--"
+
+ return (opts, files)
+
+
+def copy_xattrs(opts, files):
+ """
+ Copy the extended attributes using portage.util.movefile._copyxattr
+ Args:
+ opts: Namespace of the parsed command line otions
+ files: list of ordered command line parameters which should be files/directories
+ Returns:
+ system exit code
+ """
+ if opts.directory or not files:
+ return os.EX_OK
+
+ if opts.target_directory is None:
+ source, target = files[:-1], files[-1]
+ target_is_directory = os.path.isdir(target)
+ else:
+ source, target = files, opts.target_directory
+ target_is_directory = True
+
+ exclude = os.environ.get("PORTAGE_XATTR_EXCLUDE", "security.* system.nfs4_acl")
+
+ try:
+ if target_is_directory:
+ for s in source:
+ abs_path = os.path.join(target, os.path.basename(s))
+ _copyxattr(s, abs_path, exclude=exclude)
+ else:
+ _copyxattr(source[0], target, exclude=exclude)
+ return os.EX_OK
+
+ except OperationNotSupported:
+ traceback.print_exc()
+ return os.EX_OSERR
+
+
+def Which(filename, path=None, exclude=None):
+ """
+ Find the absolute path of 'filename' in a given search 'path'
+ Args:
+ filename: basename of the file
+ path: colon delimited search path
+ exclude: path of file to exclude
+ """
+ if path is None:
+ path = os.environ.get('PATH', '')
+
+ if exclude is not None:
+ st = os.stat(exclude)
+ exclude = (st.st_ino, st.st_dev)
+
+ for p in path.split(':'):
+ p = os.path.join(p, filename)
+ if os.access(p, os.X_OK):
+ try:
+ st = os.stat(p)
+ except OSError:
+ # file disappeared?
+ pass
+ else:
+ if stat.S_ISREG(st.st_mode) and \
+ (exclude is None or exclude != (st.st_ino, st.st_dev)):
+ return p
+
+ return None
+
+
+def main(args):
+ opts, files = parse_args(args)
+ install_binary = Which('install', exclude=os.environ["__PORTAGE_HELPER_PATH"])
+ if install_binary is None:
+ sys.stderr.write("install: command not found\n")
+ return 127
+
+ cmdline = [install_binary]
+ cmdline += args
+
+ if sys.hexversion >= 0x3000000:
+ # We can't trust that the filesystem encoding (locale dependent)
+ # correctly matches the arguments, so use surrogateescape to
+ # pass through the original argv bytes for Python 3.
+ fs_encoding = sys.getfilesystemencoding()
+ cmdline = [x.encode(fs_encoding, 'surrogateescape') for x in cmdline]
+ files = [x.encode(fs_encoding, 'surrogateescape') for x in files]
+ if opts.target_directory is not None:
+ opts.target_directory = \
+ opts.target_directory.encode(fs_encoding, 'surrogateescape')
+
+ returncode = subprocess.call(cmdline)
+ if returncode == os.EX_OK:
+ returncode = copy_xattrs(opts, files)
+ if returncode != os.EX_OK:
+ portage.util.writemsg("!!! install: copy_xattrs failed with the "
+ "following arguments: %s\n" %
+ " ".join(portage._shell_quote(x) for x in args), noiselevel=-1)
+ return returncode
+
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
diff --git a/usr/lib/portage/bin/isolated-functions.sh b/usr/lib/portage/bin/isolated-functions.sh
new file mode 100755
index 0000000..1b10b6d
--- /dev/null
+++ b/usr/lib/portage/bin/isolated-functions.sh
@@ -0,0 +1,491 @@
+#!/bin/bash
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}/eapi.sh"
+
+# We need this next line for "die" and "assert". It expands
+# It _must_ preceed all the calls to die and assert.
+shopt -s expand_aliases
+alias save_IFS='[ "${IFS:-unset}" != "unset" ] && old_IFS="${IFS}"'
+alias restore_IFS='if [ "${old_IFS:-unset}" != "unset" ]; then IFS="${old_IFS}"; unset old_IFS; else unset IFS; fi'
+
+assert() {
+ local x pipestatus=${PIPESTATUS[*]}
+ for x in $pipestatus ; do
+ [[ $x -eq 0 ]] || die "$@"
+ done
+}
+
+__assert_sigpipe_ok() {
+ # When extracting a tar file like this:
+ #
+ # bzip2 -dc foo.tar.bz2 | tar xof -
+ #
+ # For some tar files (see bug #309001), tar will
+ # close its stdin pipe when the decompressor still has
+ # remaining data to be written to its stdout pipe. This
+ # causes the decompressor to be killed by SIGPIPE. In
+ # this case, we want to ignore pipe writers killed by
+ # SIGPIPE, and trust the exit status of tar. We refer
+ # to the bash manual section "3.7.5 Exit Status"
+ # which says, "When a command terminates on a fatal
+ # signal whose number is N, Bash uses the value 128+N
+ # as the exit status."
+
+ local x pipestatus=${PIPESTATUS[*]}
+ for x in $pipestatus ; do
+ # Allow SIGPIPE through (128 + 13)
+ [[ $x -ne 0 && $x -ne ${PORTAGE_SIGPIPE_STATUS:-141} ]] && die "$@"
+ done
+
+ # Require normal success for the last process (tar).
+ [[ $x -eq 0 ]] || die "$@"
+}
+
+shopt -s extdebug
+
+# __dump_trace([number of funcs on stack to skip],
+# [whitespacing for filenames],
+# [whitespacing for line numbers])
+__dump_trace() {
+ local funcname="" sourcefile="" lineno="" s="yes" n p
+ declare -i strip=${1:-1}
+ local filespacing=$2 linespacing=$3
+
+ # The __qa_call() function and anything before it are portage internals
+ # that the user will not be interested in. Therefore, the stack trace
+ # should only show calls that come after __qa_call().
+ (( n = ${#FUNCNAME[@]} - 1 ))
+ (( p = ${#BASH_ARGV[@]} ))
+ while (( n > 0 )) ; do
+ [ "${FUNCNAME[${n}]}" == "__qa_call" ] && break
+ (( p -= ${BASH_ARGC[${n}]} ))
+ (( n-- ))
+ done
+ if (( n == 0 )) ; then
+ (( n = ${#FUNCNAME[@]} - 1 ))
+ (( p = ${#BASH_ARGV[@]} ))
+ fi
+
+ eerror "Call stack:"
+ while (( n > ${strip} )) ; do
+ funcname=${FUNCNAME[${n} - 1]}
+ sourcefile=$(basename "${BASH_SOURCE[${n}]}")
+ lineno=${BASH_LINENO[${n} - 1]}
+ # Display function arguments
+ args=
+ if [[ -n "${BASH_ARGV[@]}" ]]; then
+ for (( j = 1 ; j <= ${BASH_ARGC[${n} - 1]} ; ++j )); do
+ newarg=${BASH_ARGV[$(( p - j - 1 ))]}
+ args="${args:+${args} }'${newarg}'"
+ done
+ (( p -= ${BASH_ARGC[${n} - 1]} ))
+ fi
+ eerror " $(printf "%${filespacing}s" "${sourcefile}"), line $(printf "%${linespacing}s" "${lineno}"): Called ${funcname}${args:+ ${args}}"
+ (( n-- ))
+ done
+}
+
+nonfatal() {
+ if ! ___eapi_has_nonfatal; then
+ die "$FUNCNAME() not supported in this EAPI"
+ fi
+ if [[ $# -lt 1 ]]; then
+ die "$FUNCNAME(): Missing argument"
+ fi
+
+ PORTAGE_NONFATAL=1 "$@"
+}
+
+__bashpid() {
+ # The BASHPID variable is new to bash-4.0, so add a hack for older
+ # versions. This must be used like so:
+ # ${BASHPID:-$(__bashpid)}
+ sh -c 'echo ${PPID}'
+}
+
+__helpers_die() {
+ if ___eapi_helpers_can_die; then
+ die "$@"
+ else
+ echo -e "$@" >&2
+ fi
+}
+
+die() {
+ local IFS=$' \t\n'
+
+ if [[ $PORTAGE_NONFATAL -eq 1 ]]; then
+ echo -e " $WARN*$NORMAL ${FUNCNAME[1]}: WARNING: $@" >&2
+ return 1
+ fi
+
+ set +e
+ if [ -n "${QA_INTERCEPTORS}" ] ; then
+ # die was called from inside inherit. We need to clean up
+ # QA_INTERCEPTORS since sed is called below.
+ unset -f ${QA_INTERCEPTORS}
+ unset QA_INTERCEPTORS
+ fi
+ local n filespacing=0 linespacing=0
+ # setup spacing to make output easier to read
+ (( n = ${#FUNCNAME[@]} - 1 ))
+ while (( n > 0 )) ; do
+ [ "${FUNCNAME[${n}]}" == "__qa_call" ] && break
+ (( n-- ))
+ done
+ (( n == 0 )) && (( n = ${#FUNCNAME[@]} - 1 ))
+ while (( n > 0 )); do
+ sourcefile=${BASH_SOURCE[${n}]} sourcefile=${sourcefile##*/}
+ lineno=${BASH_LINENO[${n}]}
+ ((filespacing < ${#sourcefile})) && filespacing=${#sourcefile}
+ ((linespacing < ${#lineno})) && linespacing=${#lineno}
+ (( n-- ))
+ done
+
+ # When a helper binary dies automatically in EAPI 4 and later, we don't
+ # get a stack trace, so at least report the phase that failed.
+ local phase_str=
+ [[ -n $EBUILD_PHASE ]] && phase_str=" ($EBUILD_PHASE phase)"
+ eerror "ERROR: ${CATEGORY}/${PF}::${PORTAGE_REPO_NAME} failed${phase_str}:"
+ eerror " ${*:-(no error message)}"
+ eerror
+ # __dump_trace is useless when the main script is a helper binary
+ local main_index
+ (( main_index = ${#BASH_SOURCE[@]} - 1 ))
+ if has ${BASH_SOURCE[$main_index]##*/} ebuild.sh misc-functions.sh ; then
+ __dump_trace 2 ${filespacing} ${linespacing}
+ eerror " $(printf "%${filespacing}s" "${BASH_SOURCE[1]##*/}"), line $(printf "%${linespacing}s" "${BASH_LINENO[0]}"): Called die"
+ eerror "The specific snippet of code:"
+ # This scans the file that called die and prints out the logic that
+ # ended in the call to die. This really only handles lines that end
+ # with '|| die' and any preceding lines with line continuations (\).
+ # This tends to be the most common usage though, so let's do it.
+ # Due to the usage of appending to the hold space (even when empty),
+ # we always end up with the first line being a blank (thus the 2nd sed).
+ sed -n \
+ -e "# When we get to the line that failed, append it to the
+ # hold space, move the hold space to the pattern space,
+ # then print out the pattern space and quit immediately
+ ${BASH_LINENO[0]}{H;g;p;q}" \
+ -e '# If this line ends with a line continuation, append it
+ # to the hold space
+ /\\$/H' \
+ -e '# If this line does not end with a line continuation,
+ # erase the line and set the hold buffer to it (thus
+ # erasing the hold buffer in the process)
+ /[^\]$/{s:^.*$::;h}' \
+ "${BASH_SOURCE[1]}" \
+ | sed -e '1d' -e 's:^:RETAIN-LEADING-SPACE:' \
+ | while read -r n ; do eerror " ${n#RETAIN-LEADING-SPACE}" ; done
+ eerror
+ fi
+ eerror "If you need support, post the output of \`emerge --info '=${CATEGORY}/${PF}::${PORTAGE_REPO_NAME}'\`,"
+ eerror "the complete build log and the output of \`emerge -pqv '=${CATEGORY}/${PF}::${PORTAGE_REPO_NAME}'\`."
+
+ # Only call die hooks here if we are executed via ebuild.sh or
+ # misc-functions.sh, since those are the only cases where the environment
+ # contains the hook functions. When necessary (like for __helpers_die), die
+ # hooks are automatically called later by a misc-functions.sh invocation.
+ if has ${BASH_SOURCE[$main_index]##*/} ebuild.sh misc-functions.sh && \
+ [[ ${EBUILD_PHASE} != depend ]] ; then
+ local x
+ for x in $EBUILD_DEATH_HOOKS; do
+ ${x} "$@" >&2 1>&2
+ done
+ > "$PORTAGE_BUILDDIR/.die_hooks"
+ fi
+
+ if [[ -n ${PORTAGE_LOG_FILE} ]] ; then
+ eerror "The complete build log is located at '${PORTAGE_LOG_FILE}'."
+ if [[ ${PORTAGE_LOG_FILE} != ${T}/* ]] && \
+ ! has fail-clean ${FEATURES} ; then
+ # Display path to symlink in ${T}, as requested in bug #412865.
+ local log_ext=log
+ [[ ${PORTAGE_LOG_FILE} != *.log ]] && log_ext+=.${PORTAGE_LOG_FILE##*.}
+ eerror "For convenience, a symlink to the build log is located at '${T}/build.${log_ext}'."
+ fi
+ fi
+ if [ -f "${T}/environment" ] ; then
+ eerror "The ebuild environment file is located at '${T}/environment'."
+ elif [ -d "${T}" ] ; then
+ {
+ set
+ export
+ } > "${T}/die.env"
+ eerror "The ebuild environment file is located at '${T}/die.env'."
+ fi
+ eerror "Working directory: '$(pwd)'"
+ eerror "S: '${S}'"
+
+ [[ -n $PORTAGE_EBUILD_EXIT_FILE ]] && > "$PORTAGE_EBUILD_EXIT_FILE"
+ [[ -n $PORTAGE_IPC_DAEMON ]] && "$PORTAGE_BIN_PATH"/ebuild-ipc exit 1
+
+ # subshell die support
+ [[ ${BASHPID:-$(__bashpid)} == ${EBUILD_MASTER_PID} ]] || kill -s SIGTERM ${EBUILD_MASTER_PID}
+ exit 1
+}
+
+__quiet_mode() {
+ [[ ${PORTAGE_QUIET} -eq 1 ]]
+}
+
+__vecho() {
+ __quiet_mode || echo "$@"
+}
+
+# Internal logging function, don't use this in ebuilds
+__elog_base() {
+ local messagetype
+ [ -z "${1}" -o -z "${T}" -o ! -d "${T}/logging" ] && return 1
+ case "${1}" in
+ INFO|WARN|ERROR|LOG|QA)
+ messagetype="${1}"
+ shift
+ ;;
+ *)
+ __vecho -e " ${BAD}*${NORMAL} Invalid use of internal function __elog_base(), next message will not be logged"
+ return 1
+ ;;
+ esac
+ echo -e "$@" | while read -r ; do
+ echo "$messagetype $REPLY" >> \
+ "${T}/logging/${EBUILD_PHASE:-other}"
+ done
+ return 0
+}
+
+eqawarn() {
+ __elog_base QA "$*"
+ [[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+ echo -e "$@" | while read -r ; do
+ __vecho " $WARN*$NORMAL $REPLY" >&2
+ done
+ LAST_E_CMD="eqawarn"
+ return 0
+}
+
+elog() {
+ __elog_base LOG "$*"
+ [[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+ echo -e "$@" | while read -r ; do
+ echo " $GOOD*$NORMAL $REPLY"
+ done
+ LAST_E_CMD="elog"
+ return 0
+}
+
+einfo() {
+ __elog_base INFO "$*"
+ [[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+ echo -e "$@" | while read -r ; do
+ echo " $GOOD*$NORMAL $REPLY"
+ done
+ LAST_E_CMD="einfo"
+ return 0
+}
+
+einfon() {
+ __elog_base INFO "$*"
+ [[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+ echo -ne " ${GOOD}*${NORMAL} $*"
+ LAST_E_CMD="einfon"
+ return 0
+}
+
+ewarn() {
+ __elog_base WARN "$*"
+ [[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+ echo -e "$@" | while read -r ; do
+ echo " $WARN*$NORMAL $RC_INDENTATION$REPLY" >&2
+ done
+ LAST_E_CMD="ewarn"
+ return 0
+}
+
+eerror() {
+ __elog_base ERROR "$*"
+ [[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+ echo -e "$@" | while read -r ; do
+ echo " $BAD*$NORMAL $RC_INDENTATION$REPLY" >&2
+ done
+ LAST_E_CMD="eerror"
+ return 0
+}
+
+ebegin() {
+ local msg="$*" dots spaces=${RC_DOT_PATTERN//?/ }
+ if [[ -n ${RC_DOT_PATTERN} ]] ; then
+ dots=$(printf "%$(( COLS - 3 - ${#RC_INDENTATION} - ${#msg} - 7 ))s" '')
+ dots=${dots//${spaces}/${RC_DOT_PATTERN}}
+ msg="${msg}${dots}"
+ else
+ msg="${msg} ..."
+ fi
+ einfon "${msg}"
+ [[ ${RC_ENDCOL} == "yes" ]] && echo
+ LAST_E_LEN=$(( 3 + ${#RC_INDENTATION} + ${#msg} ))
+ LAST_E_CMD="ebegin"
+ return 0
+}
+
+__eend() {
+ local retval=${1:-0} efunc=${2:-eerror} msg
+ shift 2
+
+ if [[ ${retval} == "0" ]] ; then
+ msg="${BRACKET}[ ${GOOD}ok${BRACKET} ]${NORMAL}"
+ else
+ if [[ -n $* ]] ; then
+ ${efunc} "$*"
+ fi
+ msg="${BRACKET}[ ${BAD}!!${BRACKET} ]${NORMAL}"
+ fi
+
+ if [[ ${RC_ENDCOL} == "yes" ]] ; then
+ echo -e "${ENDCOL} ${msg}"
+ else
+ [[ ${LAST_E_CMD} == ebegin ]] || LAST_E_LEN=0
+ printf "%$(( COLS - LAST_E_LEN - 7 ))s%b\n" '' "${msg}"
+ fi
+
+ return ${retval}
+}
+
+eend() {
+ local retval=${1:-0}
+ shift
+
+ __eend ${retval} eerror "$*"
+
+ LAST_E_CMD="eend"
+ return ${retval}
+}
+
+__unset_colors() {
+ COLS=80
+ ENDCOL=
+
+ GOOD=
+ WARN=
+ BAD=
+ NORMAL=
+ HILITE=
+ BRACKET=
+}
+
+__set_colors() {
+ COLS=${COLUMNS:-0} # bash's internal COLUMNS variable
+ # Avoid wasteful stty calls during the "depend" phases.
+ # If stdout is a pipe, the parent process can export COLUMNS
+ # if it's relevant. Use an extra subshell for stty calls, in
+ # order to redirect "/dev/tty: No such device or address"
+ # error from bash to /dev/null.
+ [[ $COLS == 0 && $EBUILD_PHASE != depend ]] && \
+ COLS=$(set -- $( ( stty size </dev/tty ) 2>/dev/null || echo 24 80 ) ; echo $2)
+ (( COLS > 0 )) || (( COLS = 80 ))
+
+ # Now, ${ENDCOL} will move us to the end of the
+ # column; irregardless of character width
+ ENDCOL=$'\e[A\e['$(( COLS - 8 ))'C'
+ if [ -n "${PORTAGE_COLORMAP}" ] ; then
+ eval ${PORTAGE_COLORMAP}
+ else
+ GOOD=$'\e[32;01m'
+ WARN=$'\e[33;01m'
+ BAD=$'\e[31;01m'
+ HILITE=$'\e[36;01m'
+ BRACKET=$'\e[34;01m'
+ NORMAL=$'\e[0m'
+ fi
+}
+
+RC_ENDCOL="yes"
+RC_INDENTATION=''
+RC_DEFAULT_INDENT=2
+RC_DOT_PATTERN=''
+
+case "${NOCOLOR:-false}" in
+ yes|true)
+ __unset_colors
+ ;;
+ no|false)
+ __set_colors
+ ;;
+esac
+
+# In Prefix every platform has USERLAND=GNU, even FreeBSD. Since I
+# don't know how to reliably "figure out" we are in a Prefix instance of
+# portage here, I for now disable this check, and hardcode it to GNU.
+# Somehow it appears stange to me that this code is in this file,
+# non-ebuilds/eclasses should never rely on USERLAND and XARGS, don't they?
+#if [[ -z ${USERLAND} ]] ; then
+# case $(uname -s) in
+# *BSD|DragonFly)
+# export USERLAND="BSD"
+# ;;
+# *)
+# export USERLAND="GNU"
+# ;;
+# esac
+#fi
+[[ -z ${USERLAND} ]] && USERLAND="GNU"
+
+if [[ -z ${XARGS} ]] ; then
+ case ${USERLAND} in
+ BSD)
+ export XARGS="xargs"
+ ;;
+ *)
+ export XARGS="xargs -r"
+ ;;
+ esac
+fi
+
+hasq() {
+ has $EBUILD_PHASE prerm postrm || eqawarn \
+ "QA Notice: The 'hasq' function is deprecated (replaced by 'has')"
+ has "$@"
+}
+
+hasv() {
+ if has "$@" ; then
+ echo "$1"
+ return 0
+ fi
+ return 1
+}
+
+has() {
+ local needle=$1
+ shift
+
+ local x
+ for x in "$@"; do
+ [ "${x}" = "${needle}" ] && return 0
+ done
+ return 1
+}
+
+__repo_attr() {
+ local appropriate_section=0 exit_status=1 line saved_extglob_shopt=$(shopt -p extglob)
+ shopt -s extglob
+ while read line; do
+ [[ ${appropriate_section} == 0 && ${line} == "[$1]" ]] && appropriate_section=1 && continue
+ [[ ${appropriate_section} == 1 && ${line} == "["*"]" ]] && appropriate_section=0 && continue
+ # If a conditional expression like [[ ${line} == $2*( )=* ]] is used
+ # then bash-3.2 produces an error like the following when the file is
+ # sourced: syntax error in conditional expression: unexpected token `('
+ # Therefore, use a regular expression for compatibility.
+ if [[ ${appropriate_section} == 1 && ${line} =~ ^${2}[[:space:]]*= ]]; then
+ echo "${line##$2*( )=*( )}"
+ exit_status=0
+ break
+ fi
+ done <<< "${PORTAGE_REPOSITORIES}"
+ eval "${saved_extglob_shopt}"
+ return ${exit_status}
+}
+
+true
diff --git a/usr/lib/portage/bin/lock-helper.py b/usr/lib/portage/bin/lock-helper.py
new file mode 100755
index 0000000..aa2dd60
--- /dev/null
+++ b/usr/lib/portage/bin/lock-helper.py
@@ -0,0 +1,30 @@
+#!/usr/bin/python -b
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import sys
+sys.path.insert(0, os.environ['PORTAGE_PYM_PATH'])
+import portage
+portage._internal_caller = True
+portage._disable_legacy_globals()
+
+def main(args):
+
+ if args and isinstance(args[0], bytes):
+ for i, x in enumerate(args):
+ args[i] = portage._unicode_decode(x, errors='strict')
+
+ # Make locks quiet since unintended locking messages displayed on
+ # stdout would corrupt the intended output of this program.
+ portage.locks._quiet = True
+ lock_obj = portage.locks.lockfile(args[0], wantnewlockfile=True)
+ sys.stdout.write('\0')
+ sys.stdout.flush()
+ sys.stdin.read(1)
+ portage.locks.unlockfile(lock_obj)
+ return portage.os.EX_OK
+
+if __name__ == "__main__":
+ rval = main(sys.argv[1:])
+ sys.exit(rval)
diff --git a/usr/lib/portage/bin/misc-functions.sh b/usr/lib/portage/bin/misc-functions.sh
new file mode 100755
index 0000000..b285865
--- /dev/null
+++ b/usr/lib/portage/bin/misc-functions.sh
@@ -0,0 +1,1201 @@
+#!/bin/bash
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#
+# Miscellaneous shell functions that make use of the ebuild env but don't need
+# to be included directly in ebuild.sh.
+#
+# We're sourcing ebuild.sh here so that we inherit all of it's goodness,
+# including bashrc trickery. This approach allows us to do our miscellaneous
+# shell work withing the same env that ebuild.sh has, but without polluting
+# ebuild.sh itself with unneeded logic and shell code.
+#
+# XXX hack: clear the args so ebuild.sh doesn't see them
+MISC_FUNCTIONS_ARGS="$@"
+shift $#
+
+source "${PORTAGE_BIN_PATH:-/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage/bin}/ebuild.sh"
+
+install_symlink_html_docs() {
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ else
+ # PREFIX LOCAL: ED needs not to exist, whereas D does
+ [[ ! -d ${ED} && -d ${D} ]] && dodir /
+ # END PREFIX LOCAL
+ fi
+ cd "${ED}" || die "cd failed"
+ #symlink the html documentation (if DOC_SYMLINKS_DIR is set in make.conf)
+ if [ -n "${DOC_SYMLINKS_DIR}" ] ; then
+ local mydocdir docdir
+ for docdir in "${HTMLDOC_DIR:-does/not/exist}" "${PF}/html" "${PF}/HTML" "${P}/html" "${P}/HTML" ; do
+ if [ -d "usr/share/doc/${docdir}" ] ; then
+ mydocdir="/usr/share/doc/${docdir}"
+ fi
+ done
+ if [ -n "${mydocdir}" ] ; then
+ local mysympath
+ if [ -z "${SLOT}" -o "${SLOT%/*}" = "0" ] ; then
+ mysympath="${DOC_SYMLINKS_DIR}/${CATEGORY}/${PN}"
+ else
+ mysympath="${DOC_SYMLINKS_DIR}/${CATEGORY}/${PN}-${SLOT%/*}"
+ fi
+ einfo "Symlinking ${mysympath} to the HTML documentation"
+ dodir "${DOC_SYMLINKS_DIR}/${CATEGORY}"
+ dosym "${mydocdir}" "${mysympath}"
+ fi
+ fi
+}
+
+# replacement for "readlink -f" or "realpath"
+READLINK_F_WORKS=""
+canonicalize() {
+ if [[ -z ${READLINK_F_WORKS} ]] ; then
+ if [[ $(readlink -f -- /../ 2>/dev/null) == "/" ]] ; then
+ READLINK_F_WORKS=true
+ else
+ READLINK_F_WORKS=false
+ fi
+ fi
+ if ${READLINK_F_WORKS} ; then
+ readlink -f -- "$@"
+ return
+ fi
+
+ local f=$1 b n=10 wd=$(pwd)
+ while (( n-- > 0 )); do
+ while [[ ${f: -1} = / && ${#f} -gt 1 ]]; do
+ f=${f%/}
+ done
+ b=${f##*/}
+ cd "${f%"${b}"}" 2>/dev/null || break
+ if [[ ! -L ${b} ]]; then
+ f=$(pwd -P)
+ echo "${f%/}/${b}"
+ cd "${wd}"
+ return 0
+ fi
+ f=$(readlink "${b}")
+ done
+ cd "${wd}"
+ return 1
+}
+
+prepcompress() {
+ local -a include exclude incl_d incl_f
+ local f g i real_f real_d
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
+
+ # Canonicalize path names and check for their existence.
+ real_d=$(canonicalize "${ED}")
+ for (( i = 0; i < ${#PORTAGE_DOCOMPRESS[@]}; i++ )); do
+ real_f=$(canonicalize "${ED}${PORTAGE_DOCOMPRESS[i]}")
+ f=${real_f#"${real_d}"}
+ if [[ ${real_f} != "${f}" ]] && [[ -d ${real_f} || -f ${real_f} ]]
+ then
+ include[${#include[@]}]=${f:-/}
+ elif [[ ${i} -ge 3 ]]; then
+ ewarn "prepcompress:" \
+ "ignoring nonexistent path '${PORTAGE_DOCOMPRESS[i]}'"
+ fi
+ done
+ for (( i = 0; i < ${#PORTAGE_DOCOMPRESS_SKIP[@]}; i++ )); do
+ real_f=$(canonicalize "${ED}${PORTAGE_DOCOMPRESS_SKIP[i]}")
+ f=${real_f#"${real_d}"}
+ if [[ ${real_f} != "${f}" ]] && [[ -d ${real_f} || -f ${real_f} ]]
+ then
+ exclude[${#exclude[@]}]=${f:-/}
+ elif [[ ${i} -ge 1 ]]; then
+ ewarn "prepcompress:" \
+ "ignoring nonexistent path '${PORTAGE_DOCOMPRESS_SKIP[i]}'"
+ fi
+ done
+
+ # Remove redundant entries from lists.
+ # For the include list, remove any entries that are:
+ # a) contained in a directory in the include or exclude lists, or
+ # b) identical with an entry in the exclude list.
+ for (( i = ${#include[@]} - 1; i >= 0; i-- )); do
+ f=${include[i]}
+ for g in "${include[@]}"; do
+ if [[ ${f} == "${g%/}"/* ]]; then
+ unset include[i]
+ continue 2
+ fi
+ done
+ for g in "${exclude[@]}"; do
+ if [[ ${f} = "${g}" || ${f} == "${g%/}"/* ]]; then
+ unset include[i]
+ continue 2
+ fi
+ done
+ done
+ # For the exclude list, remove any entries that are:
+ # a) contained in a directory in the exclude list, or
+ # b) _not_ contained in a directory in the include list.
+ for (( i = ${#exclude[@]} - 1; i >= 0; i-- )); do
+ f=${exclude[i]}
+ for g in "${exclude[@]}"; do
+ if [[ ${f} == "${g%/}"/* ]]; then
+ unset exclude[i]
+ continue 2
+ fi
+ done
+ for g in "${include[@]}"; do
+ [[ ${f} == "${g%/}"/* ]] && continue 2
+ done
+ unset exclude[i]
+ done
+
+ # Split the include list into directories and files
+ for f in "${include[@]}"; do
+ if [[ -d ${ED}${f} ]]; then
+ incl_d[${#incl_d[@]}]=${f}
+ else
+ incl_f[${#incl_f[@]}]=${f}
+ fi
+ done
+
+ # Queue up for compression.
+ # ecompress{,dir} doesn't like to be called with empty argument lists.
+ [[ ${#incl_d[@]} -gt 0 ]] && ecompressdir --limit ${PORTAGE_DOCOMPRESS_SIZE_LIMIT:-0} --queue "${incl_d[@]}"
+ [[ ${#incl_f[@]} -gt 0 ]] && ecompress --queue "${incl_f[@]/#/${ED}}"
+ [[ ${#exclude[@]} -gt 0 ]] && ecompressdir --ignore "${exclude[@]}"
+ return 0
+}
+
+install_qa_check() {
+ local f i qa_var x
+ if ! ___eapi_has_prefix_variables; then
+ local EPREFIX= ED=${D}
+ fi
+
+ # PREFIX LOCAL: ED needs not to exist, whereas D does
+ cd "${D}" || die "cd failed"
+ # END PREFIX LOCAL
+
+ # Run QA checks from install-qa-check.d.
+ # Note: checks need to be run *before* stripping.
+ local f
+ # TODO: handle nullglob-like
+ for f in "${PORTAGE_BIN_PATH}"/install-qa-check.d/*; do
+ # Run in a subshell to treat it like external script,
+ # but use 'source' to pass all variables through.
+ (
+ source "${f}" || eerror "Post-install QA check ${f##*/} failed to run"
+ )
+ done
+
+ # Run QA checks from repositories
+ # (yes, PORTAGE_ECLASS_LOCATIONS contains repo paths...)
+ local repo_location
+ for repo_location in "${PORTAGE_ECLASS_LOCATIONS[@]}"; do
+ for f in "${repo_location}"/metadata/install-qa-check.d/*; do
+ if [[ -f ${f} ]]; then
+ (
+ # allow inheriting eclasses
+ _IN_INSTALL_QA_CHECK=1
+ source "${f}" || eerror "Post-install QA check ${f##*/} failed to run"
+ )
+ fi
+ done
+ done
+
+ export STRIP_MASK
+ prepall
+ ___eapi_has_docompress && prepcompress
+ ecompressdir --dequeue
+ ecompress --dequeue
+
+ # PREFIX LOCAL:
+ # anything outside the prefix should be caught by the Prefix QA
+ # check, so if there's nothing in ED, we skip searching for QA
+ # checks there, the specific QA funcs can hence rely on ED existing
+ if [[ -d ${ED} ]] ; then
+ case ${CHOST} in
+ *-darwin*)
+ # Mach-O platforms (NeXT, Darwin, OSX)
+ install_qa_check_macho
+ ;;
+ *-interix*|*-winnt*)
+ # PECOFF platforms (Windows/Interix)
+ install_qa_check_pecoff
+ ;;
+ *-aix*)
+ # XCOFF platforms (AIX)
+ install_qa_check_xcoff
+ ;;
+ *)
+ # because this is the majority: ELF platforms (Linux,
+ # Solaris, *BSD, IRIX, etc.)
+ install_qa_check_elf
+ ;;
+ esac
+ fi
+
+ # this is basically here such that the diff with trunk remains just
+ # offsetted and not out of order
+ install_qa_check_misc
+ # END PREFIX LOCAL
+}
+
+install_qa_check_elf() {
+ # Create NEEDED.ELF.2 regardless of RESTRICT=binchecks, since this info is
+ # too useful not to have (it's required for things like preserve-libs), and
+ # it's tempting for ebuild authors to set RESTRICT=binchecks for packages
+ # containing pre-built binaries.
+ if type -P scanelf > /dev/null ; then
+ # Save NEEDED information after removing self-contained providers
+ rm -f "$PORTAGE_BUILDDIR"/build-info/NEEDED{,.ELF.2}
+ scanelf -qyRF '%a;%p;%S;%r;%n' "${D}" | { while IFS= read -r l; do
+ arch=${l%%;*}; l=${l#*;}
+ obj="/${l%%;*}"; l=${l#*;}
+ soname=${l%%;*}; l=${l#*;}
+ rpath=${l%%;*}; l=${l#*;}; [ "${rpath}" = " - " ] && rpath=""
+ needed=${l%%;*}; l=${l#*;}
+ echo "${obj} ${needed}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED
+ echo "${arch:3};${obj};${soname};${rpath};${needed}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED.ELF.2
+ done }
+
+ [ -n "${QA_SONAME_NO_SYMLINK}" ] && \
+ echo "${QA_SONAME_NO_SYMLINK}" > \
+ "${PORTAGE_BUILDDIR}"/build-info/QA_SONAME_NO_SYMLINK
+
+ if has binchecks ${RESTRICT} && \
+ [ -s "${PORTAGE_BUILDDIR}/build-info/NEEDED.ELF.2" ] ; then
+ eqawarn "QA Notice: RESTRICT=binchecks prevented checks on these ELF files:"
+ eqawarn "$(while read -r x; do x=${x#*;} ; x=${x%%;*} ; echo "${x#${EPREFIX}}" ; done < "${PORTAGE_BUILDDIR}"/build-info/NEEDED.ELF.2)"
+ fi
+ fi
+}
+
+install_qa_check_misc() {
+ # Portage regenerates this on the installed system.
+ rm -f "${ED}"/usr/share/info/dir{,.gz,.bz2} || die "rm failed!"
+}
+
+install_qa_check_macho() {
+ if ! has binchecks ${RESTRICT} ; then
+ # on Darwin, dynamic libraries are called .dylibs instead of
+ # .sos. In addition the version component is before the
+ # extension, not after it. Check for this, and *only* warn
+ # about it. Some packages do ship .so files on Darwin and make
+ # it work (ugly!).
+ rm -f "${T}/mach-o.check"
+ find ${ED%/} -name "*.so" -or -name "*.so.*" | \
+ while read i ; do
+ [[ $(file $i) == *"Mach-O"* ]] && \
+ echo "${i#${D}}" >> "${T}/mach-o.check"
+ done
+ if [[ -f ${T}/mach-o.check ]] ; then
+ f=$(< "${T}/mach-o.check")
+ __vecho -ne '\a\n'
+ eqawarn "QA Notice: Found .so dynamic libraries on Darwin:"
+ eqawarn " ${f//$'\n'/\n }"
+ fi
+ rm -f "${T}/mach-o.check"
+
+ # The naming for dynamic libraries is different on Darwin; the
+ # version component is before the extention, instead of after
+ # it, as with .sos. Again, make this a warning only.
+ rm -f "${T}/mach-o.check"
+ find ${ED%/} -name "*.dylib.*" | \
+ while read i ; do
+ echo "${i#${D}}" >> "${T}/mach-o.check"
+ done
+ if [[ -f "${T}/mach-o.check" ]] ; then
+ f=$(< "${T}/mach-o.check")
+ __vecho -ne '\a\n'
+ eqawarn "QA Notice: Found wrongly named dynamic libraries on Darwin:"
+ eqawarn " ${f// /\n }"
+ fi
+ rm -f "${T}/mach-o.check"
+ fi
+
+ install_name_is_relative() {
+ case $1 in
+ "@executable_path/"*) return 0 ;;
+ "@loader_path"/*) return 0 ;;
+ "@rpath/"*) return 0 ;;
+ *) return 1 ;;
+ esac
+ }
+
+ # While we generate the NEEDED files, check that we don't get kernel
+ # traps at runtime because of broken install_names on Darwin.
+ rm -f "${T}"/.install_name_check_failed
+ scanmacho -qyRF '%a;%p;%S;%n' "${D}" | { while IFS= read l ; do
+ arch=${l%%;*}; l=${l#*;}
+ obj="/${l%%;*}"; l=${l#*;}
+ install_name=${l%%;*}; l=${l#*;}
+ needed=${l%%;*}; l=${l#*;}
+
+ ignore=
+ qa_var="QA_IGNORE_INSTALL_NAME_FILES_${ARCH/-/_}"
+ eval "[[ -n \${!qa_var} ]] &&
+ QA_IGNORE_INSTALL_NAME_FILES=(\"\${${qa_var}[@]}\")"
+
+ if [[ ${#QA_IGNORE_INSTALL_NAME_FILES[@]} -gt 1 ]] ; then
+ for x in "${QA_IGNORE_INSTALL_NAME_FILES[@]}" ; do
+ [[ ${obj##*/} == ${x} ]] && \
+ ignore=true
+ done
+ else
+ local shopts=$-
+ set -o noglob
+ for x in ${QA_IGNORE_INSTALL_NAME_FILES} ; do
+ [[ ${obj##*/} == ${x} ]] && \
+ ignore=true
+ done
+ set +o noglob
+ set -${shopts}
+ fi
+
+ # See if the self-reference install_name points to an existing
+ # and to be installed file. This usually is a symlink for the
+ # major version.
+ if install_name_is_relative ${install_name} ; then
+ # try to locate the library in the installed image
+ local inpath=${install_name#@*/}
+ local libl
+ for libl in $(find "${ED}" -name "${inpath##*/}") ; do
+ if [[ ${libl} == */${inpath} ]] ; then
+ install_name=/${libl#${D}}
+ break
+ fi
+ done
+ fi
+ if [[ ! -e ${D}${install_name} ]] ; then
+ eqawarn "QA Notice: invalid self-reference install_name ${install_name} in ${obj}"
+ # remember we are in an implicit subshell, that's
+ # why we touch a file here ... ideally we should be
+ # able to die correctly/nicely here
+ [[ -z ${ignore} ]] && touch "${T}"/.install_name_check_failed
+ fi
+
+ # this is ugly, paths with spaces won't work
+ for lib in ${needed//,/ } ; do
+ if [[ ${lib} == ${D}* ]] ; then
+ eqawarn "QA Notice: install_name references \${D}: ${lib} in ${obj}"
+ [[ -z ${ignore} ]] && touch "${T}"/.install_name_check_failed
+ elif [[ ${lib} == ${S}* ]] ; then
+ eqawarn "QA Notice: install_name references \${S}: ${lib} in ${obj}"
+ [[ -z ${ignore} ]] && touch "${T}"/.install_name_check_failed
+ elif ! install_name_is_relative ${lib} && [[ ! -e ${lib} && ! -e ${D}${lib} ]] ; then
+ eqawarn "QA Notice: invalid reference to ${lib} in ${obj}"
+ [[ -z ${ignore} ]] && touch "${T}"/.install_name_check_failed
+ fi
+ done
+
+ # backwards compatibility
+ echo "${obj} ${needed}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED
+ # what we use
+ echo "${arch};${obj};${install_name};${needed}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED.MACHO.3
+ done }
+ if [[ -f ${T}/.install_name_check_failed ]] ; then
+ # secret switch "allow_broken_install_names" to get
+ # around this and install broken crap (not a good idea)
+ has allow_broken_install_names ${FEATURES} || \
+ die "invalid install_name found, your application or library will crash at runtime"
+ fi
+}
+
+install_qa_check_pecoff() {
+ local _pfx_scan="readpecoff ${CHOST}"
+
+ # this one uses readpecoff, which supports multiple prefix platforms!
+ # this is absolutely _not_ optimized for speed, and there may be plenty
+ # of possibilities by introducing one or the other cache!
+ if ! has binchecks ${RESTRICT}; then
+ # copied and adapted from the above scanelf code.
+ local qa_var insecure_rpath=0 tmp_quiet=${PORTAGE_QUIET}
+ local f x
+
+ # display warnings when using stricter because we die afterwards
+ if has stricter ${FEATURES} ; then
+ unset PORTAGE_QUIET
+ fi
+
+ local _exec_find_opt="-executable"
+ [[ ${CHOST} == *-winnt* ]] && _exec_find_opt='-name *.dll -o -name *.exe'
+
+ # Make sure we disallow insecure RUNPATH/RPATH's
+ # Don't want paths that point to the tree where the package was built
+ # (older, broken libtools would do this). Also check for null paths
+ # because the loader will search $PWD when it finds null paths.
+
+ f=$(
+ find "${ED}" -type f '(' ${_exec_find_opt} ')' -print0 | xargs -0 ${_pfx_scan} | \
+ while IFS=";" read arch obj soname rpath needed ; do \
+ echo "${rpath}" | grep -E "(${PORTAGE_BUILDDIR}|: |::|^:|^ )" > /dev/null 2>&1 \
+ && echo "${obj}"; done;
+ )
+ # Reject set*id binaries with $ORIGIN in RPATH #260331
+ x=$(
+ find "${ED}" -type f '(' -perm -u+s -o -perm -g+s ')' -print0 | \
+ xargs -0 ${_pfx_scan} | while IFS=";" read arch obj soname rpath needed; do \
+ echo "${rpath}" | grep '$ORIGIN' > /dev/null 2>&1 && echo "${obj}"; done;
+ )
+ if [[ -n ${f}${x} ]] ; then
+ __vecho -ne '\a\n'
+ eqawarn "QA Notice: The following files contain insecure RUNPATH's"
+ eqawarn " Please file a bug about this at http://bugs.gentoo.org/"
+ eqawarn " with the maintaining herd of the package."
+ eqawarn "${f}${f:+${x:+\n}}${x}"
+ __vecho -ne '\a\n'
+ if [[ -n ${x} ]] || has stricter ${FEATURES} ; then
+ insecure_rpath=1
+ else
+ eqawarn "cannot automatically fix runpaths on interix platforms!"
+ fi
+ fi
+
+ rm -f "${PORTAGE_BUILDDIR}"/build-info/NEEDED
+ rm -f "${PORTAGE_BUILDDIR}"/build-info/NEEDED.PECOFF.1
+
+ # Save NEEDED information after removing self-contained providers
+ find "${ED}" -type f '(' ${_exec_find_opt} ')' -print0 | xargs -0 ${_pfx_scan} | { while IFS=';' read arch obj soname rpath needed; do
+ # need to strip image dir from object name.
+ obj="/${obj#${D}}"
+ if [ -z "${rpath}" -o -n "${rpath//*ORIGIN*}" ]; then
+ # object doesn't contain $ORIGIN in its runpath attribute
+ echo "${obj} ${needed}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED
+ echo "${arch};${obj};${soname};${rpath};${needed}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED.PECOFF.1
+ else
+ dir=${obj%/*}
+ # replace $ORIGIN with the dirname of the current object for the lookup
+ opath=$(echo :${rpath}: | sed -e "s#.*:\(.*\)\$ORIGIN\(.*\):.*#\1${dir}\2#")
+ sneeded=$(echo ${needed} | tr , ' ')
+ rneeded=""
+ for lib in ${sneeded}; do
+ found=0
+ for path in ${opath//:/ }; do
+ [ -e "${ED}/${path}/${lib}" ] && found=1 && break
+ done
+ [ "${found}" -eq 0 ] && rneeded="${rneeded},${lib}"
+ done
+ rneeded=${rneeded:1}
+ if [ -n "${rneeded}" ]; then
+ echo "${obj} ${rneeded}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED
+ echo "${arch};${obj};${soname};${rpath};${rneeded}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED.PECOFF.1
+ fi
+ fi
+ done }
+
+ if [[ ${insecure_rpath} -eq 1 ]] ; then
+ die "Aborting due to serious QA concerns with RUNPATH/RPATH"
+ elif [[ -n ${die_msg} ]] && has stricter ${FEATURES} ; then
+ die "Aborting due to QA concerns: ${die_msg}"
+ fi
+
+ local _so_ext='.so*'
+
+ case "${CHOST}" in
+ *-winnt*) _so_ext=".dll" ;; # no "*" intentionally!
+ esac
+
+ # Run some sanity checks on shared libraries
+ for d in "${ED}"lib* "${ED}"usr/lib* ; do
+ [[ -d "${d}" ]] || continue
+ f=$(find "${d}" -name "lib*${_so_ext}" -print0 | \
+ xargs -0 ${_pfx_scan} | while IFS=";" read arch obj soname rpath needed; \
+ do [[ -z "${soname}" ]] && echo "${obj}"; done)
+ if [[ -n ${f} ]] ; then
+ __vecho -ne '\a\n'
+ eqawarn "QA Notice: The following shared libraries lack a SONAME"
+ eqawarn "${f}"
+ __vecho -ne '\a\n'
+ sleep 1
+ fi
+
+ f=$(find "${d}" -name "lib*${_so_ext}" -print0 | \
+ xargs -0 ${_pfx_scan} | while IFS=";" read arch obj soname rpath needed; \
+ do [[ -z "${needed}" ]] && echo "${obj}"; done)
+ if [[ -n ${f} ]] ; then
+ __vecho -ne '\a\n'
+ eqawarn "QA Notice: The following shared libraries lack NEEDED entries"
+ eqawarn "${f}"
+ __vecho -ne '\a\n'
+ sleep 1
+ fi
+ done
+
+ PORTAGE_QUIET=${tmp_quiet}
+ fi
+}
+
+install_qa_check_xcoff() {
+ if ! has binchecks ${RESTRICT}; then
+ local tmp_quiet=${PORTAGE_QUIET}
+ local queryline deplib
+ local insecure_rpath_list= undefined_symbols_list=
+
+ # display warnings when using stricter because we die afterwards
+ if has stricter ${FEATURES} ; then
+ unset PORTAGE_QUIET
+ fi
+
+ rm -f "${PORTAGE_BUILDDIR}"/build-info/NEEDED.XCOFF.1
+
+ local neededfd
+ for neededfd in {3..1024} none; do ( : <&${neededfd} ) 2>/dev/null || break; done
+ [[ ${neededfd} != none ]] || die "cannot find free file descriptor handle"
+
+ eval "exec ${neededfd}>\"${PORTAGE_BUILDDIR}\"/build-info/NEEDED.XCOFF.1" || die "cannot open ${PORTAGE_BUILDDIR}/build-info/NEEDED.XCOFF.1"
+
+ ( # work around a problem in /usr/bin/dump (used by aixdll-query)
+ # dumping core when path names get too long.
+ cd "${ED}" >/dev/null &&
+ find . -not -type d -exec \
+ aixdll-query '{}' FILE MEMBER FLAGS FORMAT RUNPATH DEPLIBS ';'
+ ) > "${T}"/needed 2>/dev/null
+
+ # Symlinking shared archive libraries is not a good idea on aix,
+ # as there is nothing like "soname" on pure filesystem level.
+ # So we create a copy instead of the symlink.
+ local prev_FILE=
+ local FILE MEMBER FLAGS FORMAT RUNPATH DEPLIBS
+ while read queryline
+ do
+ FILE= MEMBER= FLAGS= FORMAT= RUNPATH= DEPLIBS=
+ eval ${queryline}
+ FILE=${FILE#./}
+
+ if [[ ${prev_FILE} != ${FILE} ]]; then
+ if [[ " ${FLAGS} " == *" SHROBJ "* && -h ${ED}${FILE} ]]; then
+ prev_FILE=${FILE}
+ local target=$(readlink "${ED}${FILE}")
+ if [[ ${target} == /* ]]; then
+ target=${D}${target}
+ else
+ target=${FILE%/*}/${target}
+ fi
+ rm -f "${ED}${FILE}" || die "cannot prune ${FILE}"
+ cp -f "${ED}${target}" "${ED}${FILE}" || die "cannot copy ${target} to ${FILE}"
+ fi
+ fi
+ done <"${T}"/needed
+
+ prev_FILE=
+ while read queryline
+ do
+ FILE= MEMBER= FLAGS= FORMAT= RUNPATH= DEPLIBS=
+ eval ${queryline}
+ FILE=${FILE#./}
+
+ if [[ -n ${MEMBER} && ${prev_FILE} != ${FILE} ]]; then
+ # Save NEEDED information for each archive library stub
+ # even if it is static only: the already installed archive
+ # may contain shared objects to be preserved.
+ echo "${FORMAT##* }${FORMAT%%-*};${EPREFIX}/${FILE};${FILE##*/};;" >&${neededfd}
+ fi
+ prev_FILE=${FILE}
+
+ # shared objects have both EXEC and SHROBJ flags,
+ # while executables have EXEC flag only.
+ [[ " ${FLAGS} " == *" EXEC "* ]] || continue
+
+ # Make sure we disallow insecure RUNPATH's
+ # Don't want paths that point to the tree where the package was built
+ # (older, broken libtools would do this). Also check for null paths
+ # because the loader will search $PWD when it finds null paths.
+ # And we really want absolute paths only.
+ if [[ -n $(echo ":${RUNPATH}:" | grep -E "(${PORTAGE_BUILDDIR}|::|:[^/])") ]]; then
+ insecure_rpath_list="${insecure_rpath_list}\n${FILE}${MEMBER:+[${MEMBER}]}"
+ fi
+
+ local needed=
+ [[ -n ${MEMBER} ]] && needed=${FILE##*/}
+ for deplib in ${DEPLIBS}; do
+ eval deplib=${deplib}
+ if [[ ${deplib} == '.' || ${deplib} == '..' ]]; then
+ # Although we do have runtime linking, we don't want undefined symbols.
+ # AIX does indicate this by needing either '.' or '..'
+ undefined_symbols_list="${undefined_symbols_list}\n${FILE}"
+ else
+ needed="${needed}${needed:+,}${deplib}"
+ fi
+ done
+
+ FILE=${EPREFIX}/${FILE}
+
+ [[ -n ${MEMBER} ]] && MEMBER="[${MEMBER}]"
+ # Save NEEDED information
+ echo "${FORMAT##* }${FORMAT%%-*};${FILE}${MEMBER};${FILE##*/}${MEMBER};${RUNPATH};${needed}" >&${neededfd}
+ done <"${T}"/needed
+
+ eval "exec ${neededfd}>&-" || die "cannot close handle to ${PORTAGE_BUILDDIR}/build-info/NEEDED.XCOFF.1"
+
+ if [[ -n ${undefined_symbols_list} ]]; then
+ __vecho -ne '\a\n'
+ eqawarn "QA Notice: The following files contain undefined symbols."
+ eqawarn " Please file a bug about this at http://bugs.gentoo.org/"
+ eqawarn " with 'prefix' as the maintaining herd of the package."
+ eqawarn "${undefined_symbols_list}"
+ __vecho -ne '\a\n'
+ fi
+
+ if [[ -n ${insecure_rpath_list} ]] ; then
+ __vecho -ne '\a\n'
+ eqawarn "QA Notice: The following files contain insecure RUNPATH's"
+ eqawarn " Please file a bug about this at http://bugs.gentoo.org/"
+ eqawarn " with 'prefix' as the maintaining herd of the package."
+ eqawarn "${insecure_rpath_list}"
+ __vecho -ne '\a\n'
+ if has stricter ${FEATURES} ; then
+ insecure_rpath=1
+ fi
+ fi
+
+ if [[ ${insecure_rpath} -eq 1 ]] ; then
+ die "Aborting due to serious QA concerns with RUNPATH/RPATH"
+ elif [[ -n ${die_msg} ]] && has stricter ${FEATURES} ; then
+ die "Aborting due to QA concerns: ${die_msg}"
+ fi
+
+ PORTAGE_QUIET=${tmp_quiet}
+ fi
+}
+
+install_mask() {
+ local root="$1"
+ shift
+ local install_mask="$*"
+
+ # we don't want globbing for initial expansion, but afterwards, we do
+ local shopts=$-
+ set -o noglob
+ local no_inst
+ for no_inst in ${install_mask}; do
+ set +o noglob
+ __quiet_mode || einfo "Removing ${no_inst}"
+ # normal stuff
+ rm -Rf "${root}"/${no_inst} >&/dev/null
+
+ # we also need to handle globs (*.a, *.h, etc)
+ find "${root}" \( -path "${no_inst}" -or -name "${no_inst}" \) \
+ -exec rm -fR {} \; >/dev/null 2>&1
+ done
+ # set everything back the way we found it
+ set +o noglob
+ set -${shopts}
+}
+
+preinst_aix() {
+ if [[ ${CHOST} != *-aix* ]] || has binchecks ${RESTRICT}; then
+ return 0
+ fi
+ local ar strip
+ if type ${CHOST}-ar >/dev/null 2>&1 && type ${CHOST}-strip >/dev/null 2>&1; then
+ ar=${CHOST}-ar
+ strip=${CHOST}-strip
+ elif [[ ${CBUILD} == "${CHOST}" ]] && type ar >/dev/null 2>&1 && type strip >/dev/null 2>&1; then
+ ar=ar
+ strip=strip
+ elif [[ -x /usr/ccs/bin/ar && -x /usr/ccs/bin/strip ]]; then
+ ar=/usr/ccs/bin/ar
+ strip=/usr/ccs/bin/strip
+ else
+ die "cannot find where to use 'ar' and 'strip' from"
+ fi
+ local archives_members= archives=() helperfiles=()
+ local archive_member soname runpath needed archive contentmember
+ while read archive_member; do
+ archive_member=${archive_member#*;${EPREFIX}/} # drop "^type;EPREFIX/"
+ soname=${archive_member#*;}
+ runpath=${soname#*;}
+ needed=${runpath#*;}
+ soname=${soname%%;*}
+ runpath=${runpath%%;*}
+ archive_member=${archive_member%%;*} # drop ";soname;runpath;needed$"
+ archive=${archive_member%[*}
+ if [[ ${archive_member} != *'['*']' ]]; then
+ if [[ "${soname};${runpath};${needed}" == "${archive##*/};;" && -e ${EROOT}${archive} ]]; then
+ # most likely is an archive stub that already exists,
+ # may have to preserve members being a shared object.
+ archives[${#archives[@]}]=${archive}
+ fi
+ continue
+ fi
+ archives_members="${archives_members}:(${archive_member}):"
+ contentmember="${archive%/*}/.${archive##*/}${archive_member#${archive}}"
+ # portage does os.lstat() on merged files every now
+ # and then, so keep stamp-files for archive members
+ # around to get the preserve-libs feature working.
+ helperfiles[${#helperfiles[@]}]=${ED}${contentmember}
+ done < "${PORTAGE_BUILDDIR}"/build-info/NEEDED.XCOFF.1
+ if [[ ${#helperfiles[@]} > 0 ]]; then
+ rm -f "${helperfiles[@]}" || die "cannot prune ${helperfiles[@]}"
+ local f prev=
+ for f in "${helperfiles[@]}"
+ do
+ if [[ -z ${prev} ]]; then
+ { echo "Please leave this file alone, it is an important helper"
+ echo "for portage to implement the 'preserve-libs' feature on AIX."
+ } > "${f}" || die "cannot create ${f}"
+ chmod 0400 "${f}" || die "cannot chmod ${f}"
+ prev=${f}
+ else
+ ln "${prev}" "${f}" || die "cannot create hardlink ${f}"
+ fi
+ done
+ fi
+
+ local preservemembers libmetadir prunedirs=()
+ local FILE MEMBER FLAGS
+ for archive in "${archives[@]}"; do
+ preservemembers=
+ while read line; do
+ [[ -n ${line} ]] || continue
+ FILE= MEMBER= FLAGS=
+ eval ${line}
+ [[ ${FILE} == ${EROOT}${archive} ]] ||
+ die "invalid result of aixdll-query for ${EROOT}${archive}"
+ [[ -n ${MEMBER} && " ${FLAGS} " == *" SHROBJ "* ]] || continue
+ [[ ${archives_members} == *":(${archive}[${MEMBER}]):"* ]] && continue
+ preservemembers="${preservemembers} ${MEMBER}"
+ done <<-EOF
+ $(aixdll-query "${EROOT}${archive}" FILE MEMBER FLAGS)
+ EOF
+ [[ -n ${preservemembers} ]] || continue
+ einfo "preserving (on spec) ${archive}[${preservemembers# }]"
+ libmetadir=${ED}${archive%/*}/.${archive##*/}
+ mkdir "${libmetadir}" || die "cannot create ${libmetadir}"
+ pushd "${libmetadir}" >/dev/null || die "cannot cd to ${libmetadir}"
+ ${ar} -X32_64 -x "${EROOT}${archive}" ${preservemembers} || die "cannot unpack ${EROOT}${archive}"
+ chmod u+w ${preservemembers} || die "cannot chmod${preservemembers}"
+ ${strip} -X32_64 -e ${preservemembers} || die "cannot strip${preservemembers}"
+ ${ar} -X32_64 -q "${ED}${archive}" ${preservemembers} || die "cannot update ${archive}"
+ eend $?
+ popd >/dev/null || die "cannot leave ${libmetadir}"
+ prunedirs[${#prunedirs[@]}]=${libmetadir}
+ done
+ [[ ${#prunedirs[@]} == 0 ]] ||
+ rm -rf "${prunedirs[@]}" || die "cannot prune ${prunedirs[@]}"
+ return 0
+}
+
+postinst_aix() {
+ if [[ ${CHOST} != *-aix* ]] || has binchecks ${RESTRICT}; then
+ return 0
+ fi
+ local MY_PR=${PR%r0}
+ local ar strip
+ if type ${CHOST}-ar >/dev/null 2>&1 && type ${CHOST}-strip >/dev/null 2>&1; then
+ ar=${CHOST}-ar
+ strip=${CHOST}-strip
+ elif [[ ${CBUILD} == "${CHOST}" ]] && type ar >/dev/null 2>&1 && type strip >/dev/null 2>&1; then
+ ar=ar
+ strip=strip
+ elif [[ -x /usr/ccs/bin/ar && -x /usr/ccs/bin/strip ]]; then
+ ar=/usr/ccs/bin/ar
+ strip=/usr/ccs/bin/strip
+ else
+ die "cannot find where to use 'ar' and 'strip' from"
+ fi
+ local archives_members= archives=() activearchives=
+ local archive_member soname runpath needed
+ while read archive_member; do
+ archive_member=${archive_member#*;${EPREFIX}/} # drop "^type;EPREFIX/"
+ soname=${archive_member#*;}
+ runpath=${soname#*;}
+ needed=${runpath#*;}
+ soname=${soname%%;*}
+ runpath=${runpath%%;*}
+ archive_member=${archive_member%%;*} # drop ";soname;runpath;needed$"
+ [[ ${archive_member} == *'['*']' ]] && continue
+ [[ "${soname};${runpath};${needed}" == "${archive_member##*/};;" ]] || continue
+ # most likely is an archive stub, we might have to
+ # drop members being preserved shared objects.
+ archives[${#archives[@]}]=${archive_member}
+ activearchives="${activearchives}:(${archive_member}):"
+ done < "${PORTAGE_BUILDDIR}"/build-info/NEEDED.XCOFF.1
+
+ local type allcontentmembers= oldarchives=()
+ local contentmember
+ while read type contentmember; do
+ [[ ${type} == 'obj' ]] || continue
+ contentmember=${contentmember% *} # drop " timestamp$"
+ contentmember=${contentmember% *} # drop " hash$"
+ [[ ${contentmember##*/} == *'['*']' ]] || continue
+ contentmember=${contentmember#${EPREFIX}/}
+ allcontentmembers="${allcontentmembers}:(${contentmember}):"
+ contentmember=${contentmember%[*}
+ contentmember=${contentmember%/.*}/${contentmember##*/.}
+ [[ ${activearchives} == *":(${contentmember}):"* ]] && continue
+ oldarchives[${#oldarchives[@]}]=${contentmember}
+ done < "${EPREFIX}/var/db/pkg/${CATEGORY}/${P}${MY_PR:+-}${MY_PR}/CONTENTS"
+
+ local archive line delmembers
+ local FILE MEMBER FLAGS
+ for archive in "${archives[@]}"; do
+ [[ -r ${EROOT}${archive} && -w ${EROOT}${archive} ]] ||
+ chmod a+r,u+w "${EROOT}${archive}" || die "cannot chmod ${EROOT}${archive}"
+ delmembers=
+ while read line; do
+ [[ -n ${line} ]] || continue
+ FILE= MEMBER= FLAGS=
+ eval ${line}
+ [[ ${FILE} == "${EROOT}${archive}" ]] ||
+ die "invalid result '${FILE}' of aixdll-query, expected '${EROOT}${archive}'"
+ [[ -n ${MEMBER} && " ${FLAGS} " == *" SHROBJ "* ]] || continue
+ [[ ${allcontentmembers} == *":(${archive%/*}/.${archive##*/}[${MEMBER}]):"* ]] && continue
+ delmembers="${delmembers} ${MEMBER}"
+ done <<-EOF
+ $(aixdll-query "${EROOT}${archive}" FILE MEMBER FLAGS)
+ EOF
+ [[ -n ${delmembers} ]] || continue
+ einfo "dropping ${archive}[${delmembers# }]"
+ rm -f "${EROOT}${archive}".new || die "cannot prune ${EROOT}${archive}.new"
+ cp "${EROOT}${archive}" "${EROOT}${archive}".new || die "cannot backup ${archive}"
+ ${ar} -X32_64 -z -o -d "${EROOT}${archive}".new ${delmembers} || die "cannot remove${delmembers} from ${archive}.new"
+ mv -f "${EROOT}${archive}".new "${EROOT}${archive}" || die "cannot put ${EROOT}${archive} in place"
+ eend $?
+ done
+ local libmetadir keepmembers prunedirs=()
+ for archive in "${oldarchives[@]}"; do
+ [[ -r ${EROOT}${archive} && -w ${EROOT}${archive} ]] ||
+ chmod a+r,u+w "${EROOT}${archive}" || die "cannot chmod ${EROOT}${archive}"
+ keepmembers=
+ while read line; do
+ FILE= MEMBER= FLAGS=
+ eval ${line}
+ [[ ${FILE} == "${EROOT}${archive}" ]] ||
+ die "invalid result of aixdll-query for ${EROOT}${archive}"
+ [[ -n ${MEMBER} && " ${FLAGS} " == *" SHROBJ "* ]] || continue
+ [[ ${allcontentmembers} == *":(${archive%/*}/.${archive##*/}[${MEMBER}]):"* ]] || continue
+ keepmembers="${keepmembers} ${MEMBER}"
+ done <<-EOF
+ $(aixdll-query "${EROOT}${archive}" FILE MEMBER FLAGS)
+ EOF
+
+ if [[ -n ${keepmembers} ]]; then
+ einfo "preserving (extra)${keepmembers}"
+ libmetadir=${EROOT}${archive%/*}/.${archive##*/}
+ [[ ! -e ${libmetadir} ]] || rm -rf "${libmetadir}" || die "cannot prune ${libmetadir}"
+ mkdir "${libmetadir}" || die "cannot create ${libmetadir}"
+ pushd "${libmetadir}" >/dev/null || die "cannot cd to ${libmetadir}"
+ ${ar} -X32_64 -x "${EROOT}${archive}" ${keepmembers} || die "cannot unpack ${archive}"
+ ${strip} -X32_64 -e ${keepmembers} || die "cannot strip ${keepmembers}"
+ rm -f "${EROOT}${archive}.new" || die "cannot prune ${EROOT}${archive}.new"
+ ${ar} -X32_64 -q "${EROOT}${archive}.new" ${keepmembers} || die "cannot create ${EROOT}${archive}.new"
+ mv -f "${EROOT}${archive}.new" "${EROOT}${archive}" || die "cannot put ${EROOT}${archive} in place"
+ popd > /dev/null || die "cannot leave ${libmetadir}"
+ prunedirs[${#prunedirs[@]}]=${libmetadir}
+ eend $?
+ fi
+ done
+ [[ ${#prunedirs[@]} == 0 ]] ||
+ rm -rf "${prunedirs[@]}" || die "cannot prune ${prunedirs[@]}"
+ return 0
+}
+
+preinst_mask() {
+ if [ -z "${D}" ]; then
+ eerror "${FUNCNAME}: D is unset"
+ return 1
+ fi
+
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
+
+ # Make sure $PWD is not ${D} so that we don't leave gmon.out files
+ # in there in case any tools were built with -pg in CFLAGS.
+ cd "${T}"
+
+ # remove man pages, info pages, docs if requested
+ local f
+ for f in man info doc; do
+ if has no${f} $FEATURES; then
+ INSTALL_MASK="${INSTALL_MASK} ${EPREFIX}/usr/share/${f}"
+ fi
+ done
+
+ install_mask "${ED}" "${INSTALL_MASK}"
+
+ # remove share dir if unnessesary
+ if has nodoc $FEATURES || has noman $FEATURES || has noinfo $FEATURES; then
+ rmdir "${ED}usr/share" &> /dev/null
+ fi
+}
+
+preinst_sfperms() {
+ if [ -z "${D}" ]; then
+ eerror "${FUNCNAME}: D is unset"
+ return 1
+ fi
+
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
+
+ # Smart FileSystem Permissions
+ if has sfperms $FEATURES; then
+ local i
+ find "${ED}" -type f -perm -4000 -print0 | \
+ while read -r -d $'\0' i ; do
+ if [ -n "$(find "$i" -perm -2000)" ] ; then
+ ebegin ">>> SetUID and SetGID: [chmod o-r] /${i#${ED}}"
+ chmod o-r "$i"
+ eend $?
+ else
+ ebegin ">>> SetUID: [chmod go-r] /${i#${ED}}"
+ chmod go-r "$i"
+ eend $?
+ fi
+ done
+ find "${ED}" -type f -perm -2000 -print0 | \
+ while read -r -d $'\0' i ; do
+ if [ -n "$(find "$i" -perm -4000)" ] ; then
+ # This case is already handled
+ # by the SetUID check above.
+ true
+ else
+ ebegin ">>> SetGID: [chmod o-r] /${i#${ED}}"
+ chmod o-r "$i"
+ eend $?
+ fi
+ done
+ fi
+}
+
+preinst_suid_scan() {
+ if [ -z "${D}" ]; then
+ eerror "${FUNCNAME}: D is unset"
+ return 1
+ fi
+
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
+
+ # total suid control.
+ if has suidctl $FEATURES; then
+ local i sfconf x
+ sfconf=${PORTAGE_CONFIGROOT}etc/portage/suidctl.conf
+ # sandbox prevents us from writing directly
+ # to files outside of the sandbox, but this
+ # can easly be bypassed using the addwrite() function
+ addwrite "${sfconf}"
+ __vecho ">>> Performing suid scan in ${ED}"
+ for i in $(find "${ED}" -type f \( -perm -4000 -o -perm -2000 \) ); do
+ if [ -s "${sfconf}" ]; then
+ install_path=/${i#${ED}}
+ if grep -q "^${install_path}\$" "${sfconf}" ; then
+ __vecho "- ${install_path} is an approved suid file"
+ else
+ __vecho ">>> Removing sbit on non registered ${install_path}"
+ for x in 5 4 3 2 1 0; do sleep 0.25 ; done
+ ls_ret=$(ls -ldh "${i}")
+ chmod ugo-s "${i}"
+ grep "^#${install_path}$" "${sfconf}" > /dev/null || {
+ __vecho ">>> Appending commented out entry to ${sfconf} for ${PF}"
+ echo "## ${ls_ret%${ED}*}${install_path}" >> "${sfconf}"
+ echo "#${install_path}" >> "${sfconf}"
+ # no delwrite() eh?
+ # delwrite ${sconf}
+ }
+ fi
+ else
+ __vecho "suidctl feature set but you are lacking a ${sfconf}"
+ fi
+ done
+ fi
+}
+
+preinst_selinux_labels() {
+ if [ -z "${D}" ]; then
+ eerror "${FUNCNAME}: D is unset"
+ return 1
+ fi
+ if has selinux ${FEATURES}; then
+ # SELinux file labeling (needs to execute after preinst)
+ # only attempt to label if setfiles is executable
+ # and 'context' is available on selinuxfs.
+ if [ -f /selinux/context -o -f /sys/fs/selinux/context ] && \
+ [ -x /usr/sbin/setfiles -a -x /usr/sbin/selinuxconfig ]; then
+ __vecho ">>> Setting SELinux security labels"
+ (
+ eval "$(/usr/sbin/selinuxconfig)" || \
+ die "Failed to determine SELinux policy paths.";
+
+ addwrite /selinux/context
+ addwrite /sys/fs/selinux/context
+
+ /usr/sbin/setfiles "${file_contexts_path}" -r "${D}" "${D}"
+ ) || die "Failed to set SELinux security labels."
+ else
+ # nonfatal, since merging can happen outside a SE kernel
+ # like during a recovery situation
+ __vecho "!!! Unable to set SELinux security labels"
+ fi
+ fi
+}
+
+__dyn_package() {
+ local PROOT
+
+ if ! ___eapi_has_prefix_variables; then
+ local EPREFIX= ED=${D}
+ fi
+
+ # Make sure $PWD is not ${D} so that we don't leave gmon.out files
+ # in there in case any tools were built with -pg in CFLAGS.
+
+ cd "${T}"
+
+ if [[ -n ${PKG_INSTALL_MASK} ]] ; then
+ PROOT=${T}/packaging/
+ # make a temporary copy of ${D} so that any modifications we do that
+ # are binpkg specific, do not influence the actual installed image.
+ rm -rf "${PROOT}" || die "failed removing stale package tree"
+ cp -pPR $(cp --help | grep -qs -e-l && echo -l) \
+ "${D}" "${PROOT}" \
+ || die "failed creating packaging tree"
+
+ install_mask "${PROOT%/}${EPREFIX}/" "${PKG_INSTALL_MASK}"
+ else
+ PROOT=${D}
+ fi
+
+ local tar_options=""
+ [[ $PORTAGE_VERBOSE = 1 ]] && tar_options+=" -v"
+ has xattr ${FEATURES} && [[ $(tar --help 2> /dev/null) == *--xattrs* ]] && tar_options+=" --xattrs"
+ # Sandbox is disabled in case the user wants to use a symlink
+ # for $PKGDIR and/or $PKGDIR/All.
+ export SANDBOX_ON="0"
+ [ -z "${PORTAGE_BINPKG_TMPFILE}" ] && \
+ die "PORTAGE_BINPKG_TMPFILE is unset"
+ mkdir -p "${PORTAGE_BINPKG_TMPFILE%/*}" || die "mkdir failed"
+ tar $tar_options -cf - $PORTAGE_BINPKG_TAR_OPTS -C "${PROOT}" . | \
+ $PORTAGE_BZIP2_COMMAND -c > "$PORTAGE_BINPKG_TMPFILE"
+ assert "failed to pack binary package: '$PORTAGE_BINPKG_TMPFILE'"
+ PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
+ "${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH"/xpak-helper.py recompose \
+ "$PORTAGE_BINPKG_TMPFILE" "$PORTAGE_BUILDDIR/build-info"
+ if [ $? -ne 0 ]; then
+ rm -f "${PORTAGE_BINPKG_TMPFILE}"
+ die "Failed to append metadata to the tbz2 file"
+ fi
+ local md5_hash=""
+ if type md5sum &>/dev/null ; then
+ md5_hash=$(md5sum "${PORTAGE_BINPKG_TMPFILE}")
+ md5_hash=${md5_hash%% *}
+ elif type md5 &>/dev/null ; then
+ md5_hash=$(md5 "${PORTAGE_BINPKG_TMPFILE}")
+ md5_hash=${md5_hash##* }
+ fi
+ [ -n "${md5_hash}" ] && \
+ echo ${md5_hash} > "${PORTAGE_BUILDDIR}"/build-info/BINPKGMD5
+ __vecho ">>> Done."
+
+ # cleanup our temp tree
+ [[ -n ${PKG_INSTALL_MASK} ]] && rm -rf "${PROOT}"
+ cd "${PORTAGE_BUILDDIR}"
+ >> "$PORTAGE_BUILDDIR/.packaged" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.packaged"
+}
+
+__dyn_spec() {
+ local sources_dir=${T}/rpmbuild/SOURCES
+ mkdir -p "${sources_dir}"
+ declare -a tar_args=("${EBUILD}")
+ [[ -d ${FILESDIR} ]] && tar_args=("${EBUILD}" "${FILESDIR}")
+ tar czf "${sources_dir}/${PF}.tar.gz" \
+ "${tar_args[@]}" || \
+ die "Failed to create base rpm tarball."
+
+ cat <<__END1__ > ${PF}.spec
+Summary: ${DESCRIPTION}
+Name: ${PN}
+Version: ${PV}
+Release: ${PR}
+License: GPL
+Group: portage/${CATEGORY}
+Source: ${PF}.tar.gz
+%description
+${DESCRIPTION}
+
+${HOMEPAGE}
+
+%prep
+%setup -c
+
+%build
+
+%install
+
+%clean
+
+%files
+/
+__END1__
+
+}
+
+__dyn_rpm() {
+ if ! ___eapi_has_prefix_variables; then
+ local EPREFIX=
+ fi
+
+ cd "${T}" || die "cd failed"
+ local machine_name=${CHOST%%-*}
+ local dest_dir=${T}/rpmbuild/RPMS/${machine_name}
+ addwrite "${RPMDIR}"
+ __dyn_spec
+ HOME=${T} \
+ rpmbuild -bb --clean --nodeps --rmsource "${PF}.spec" --buildroot "${D}" --target "${CHOST}" || die "Failed to integrate rpm spec file"
+ install -D "${dest_dir}/${PN}-${PV}-${PR}.${machine_name}.rpm" \
+ "${RPMDIR}/${CATEGORY}/${PN}-${PV}-${PR}.rpm" || \
+ die "Failed to move rpm"
+}
+
+die_hooks() {
+ [[ -f $PORTAGE_BUILDDIR/.die_hooks ]] && return
+ local x
+ for x in $EBUILD_DEATH_HOOKS ; do
+ $x >&2
+ done
+ > "$PORTAGE_BUILDDIR/.die_hooks"
+}
+
+success_hooks() {
+ local x
+ for x in $EBUILD_SUCCESS_HOOKS ; do
+ $x
+ done
+}
+
+install_hooks() {
+ local hooks_dir="${PORTAGE_CONFIGROOT}etc/portage/hooks/install"
+ local fp
+ local ret=0
+ shopt -s nullglob
+ for fp in "${hooks_dir}"/*; do
+ if [ -x "$fp" ]; then
+ "$fp"
+ ret=$(( $ret | $? ))
+ fi
+ done
+ shopt -u nullglob
+ return $ret
+}
+
+if [ -n "${MISC_FUNCTIONS_ARGS}" ]; then
+ __source_all_bashrcs
+ [ "$PORTAGE_DEBUG" == "1" ] && set -x
+ for x in ${MISC_FUNCTIONS_ARGS}; do
+ ${x}
+ done
+ unset x
+ [[ -n $PORTAGE_EBUILD_EXIT_FILE ]] && > "$PORTAGE_EBUILD_EXIT_FILE"
+ if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
+ [[ ! -s $SANDBOX_LOG ]]
+ "$PORTAGE_BIN_PATH"/ebuild-ipc exit $?
+ fi
+fi
+
+:
diff --git a/usr/lib/portage/bin/phase-functions.sh b/usr/lib/portage/bin/phase-functions.sh
new file mode 100755
index 0000000..660320a
--- /dev/null
+++ b/usr/lib/portage/bin/phase-functions.sh
@@ -0,0 +1,1013 @@
+#!/bin/bash
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Hardcoded bash lists are needed for backward compatibility with
+# <portage-2.1.4 since they assume that a newly installed version
+# of ebuild.sh will work for pkg_postinst, pkg_prerm, and pkg_postrm
+# when portage is upgrading itself.
+
+PORTAGE_READONLY_METADATA="DEFINED_PHASES DEPEND DESCRIPTION
+ EAPI HDEPEND HOMEPAGE INHERITED IUSE REQUIRED_USE KEYWORDS LICENSE
+ PDEPEND PROVIDE RDEPEND REPOSITORY RESTRICT SLOT SRC_URI"
+
+PORTAGE_READONLY_VARS="D EBUILD EBUILD_PHASE EBUILD_PHASE_FUNC \
+ EBUILD_SH_ARGS ECLASSDIR EMERGE_FROM FILESDIR MERGE_TYPE \
+ PM_EBUILD_HOOK_DIR \
+ PORTAGE_ACTUAL_DISTDIR PORTAGE_ARCHLIST PORTAGE_BASHRC \
+ PORTAGE_BINPKG_FILE PORTAGE_BINPKG_TAR_OPTS PORTAGE_BINPKG_TMPFILE \
+ PORTAGE_BIN_PATH PORTAGE_BUILDDIR PORTAGE_BUILD_GROUP \
+ PORTAGE_BUILD_USER PORTAGE_BUNZIP2_COMMAND \
+ PORTAGE_BZIP2_COMMAND PORTAGE_COLORMAP PORTAGE_CONFIGROOT \
+ PORTAGE_DEBUG PORTAGE_DEPCACHEDIR PORTAGE_EBUILD_EXIT_FILE \
+ PORTAGE_ECLASS_LOCATIONS \
+ PORTAGE_GID PORTAGE_GRPNAME PORTAGE_INST_GID PORTAGE_INST_UID \
+ PORTAGE_INTERNAL_CALLER PORTAGE_IPC_DAEMON PORTAGE_IUSE PORTAGE_LOG_FILE \
+ PORTAGE_MUTABLE_FILTERED_VARS PORTAGE_OVERRIDE_EPREFIX \
+ PORTAGE_PYM_PATH PORTAGE_PYTHON PORTAGE_PYTHONPATH \
+ PORTAGE_READONLY_METADATA PORTAGE_READONLY_VARS \
+ PORTAGE_REPO_NAME PORTAGE_REPOSITORIES PORTAGE_RESTRICT \
+ PORTAGE_SAVED_READONLY_VARS PORTAGE_SIGPIPE_STATUS \
+ PORTAGE_TMPDIR PORTAGE_UPDATE_ENV PORTAGE_USERNAME \
+ PORTAGE_VERBOSE PORTAGE_WORKDIR_MODE PORTAGE_XATTR_EXCLUDE \
+ PORTDIR \
+ PROFILE_PATHS REPLACING_VERSIONS REPLACED_BY_VERSION T WORKDIR \
+ __PORTAGE_HELPER __PORTAGE_TEST_HARDLINK_LOCKS ED EROOT"
+
+PORTAGE_SAVED_READONLY_VARS="A CATEGORY P PF PN PR PV PVR"
+
+# Variables that portage sets but doesn't mark readonly.
+# In order to prevent changed values from causing unexpected
+# interference, they are filtered out of the environment when
+# it is saved or loaded (any mutations do not persist).
+PORTAGE_MUTABLE_FILTERED_VARS="AA HOSTNAME"
+
+# @FUNCTION: __filter_readonly_variables
+# @DESCRIPTION: [--filter-sandbox] [--allow-extra-vars]
+# Read an environment from stdin and echo to stdout while filtering variables
+# with names that are known to cause interference:
+#
+# * some specific variables for which bash does not allow assignment
+# * some specific variables that affect portage or sandbox behavior
+# * variable names that begin with a digit or that contain any
+# non-alphanumeric characters that are not be supported by bash
+#
+# --filter-sandbox causes all SANDBOX_* variables to be filtered, which
+# is only desired in certain cases, such as during preprocessing or when
+# saving environment.bz2 for a binary or installed package.
+#
+# --filter-features causes the special FEATURES variable to be filtered.
+# Generally, we want it to persist between phases since the user might
+# want to modify it via bashrc to enable things like splitdebug and
+# installsources for specific packages. They should be able to modify it
+# in pre_pkg_setup() and have it persist all the way through the install
+# phase. However, if FEATURES exist inside environment.bz2 then they
+# should be overridden by current settings.
+#
+# --filter-locale causes locale related variables such as LANG and LC_*
+# variables to be filtered. These variables should persist between phases,
+# in case they are modified by the ebuild. However, the current user
+# settings should be used when loading the environment from a binary or
+# installed package.
+#
+# --filter-path causes the PATH variable to be filtered. This variable
+# should persist between phases, in case it is modified by the ebuild.
+# However, old settings should be overridden when loading the
+# environment from a binary or installed package.
+#
+# ---allow-extra-vars causes some extra vars to be allowd through, such
+# as ${PORTAGE_SAVED_READONLY_VARS} and ${PORTAGE_MUTABLE_FILTERED_VARS}.
+# This is enabled automatically if EMERGE_FROM=binary, since it preserves
+# variables from when the package was originally built.
+#
+# In bash-3.2_p20+ an attempt to assign BASH_*, FUNCNAME, GROUPS or any
+# readonly variable cause the shell to exit while executing the "source"
+# builtin command. To avoid this problem, this function filters those
+# variables out and discards them. See bug #190128.
+__filter_readonly_variables() {
+ local x filtered_vars
+ local readonly_bash_vars="BASHOPTS BASHPID DIRSTACK EUID
+ FUNCNAME GROUPS PIPESTATUS PPID SHELLOPTS UID"
+ local bash_misc_vars="BASH BASH_.* COLUMNS COMP_WORDBREAKS HISTCMD
+ HISTFILE HOSTNAME HOSTTYPE IFS LINENO MACHTYPE OLDPWD
+ OPTERR OPTIND OSTYPE POSIXLY_CORRECT PS4 PWD RANDOM
+ SECONDS SHLVL _"
+ local filtered_sandbox_vars="SANDBOX_ACTIVE SANDBOX_BASHRC
+ SANDBOX_DEBUG_LOG SANDBOX_DISABLED SANDBOX_LIB
+ SANDBOX_LOG SANDBOX_ON"
+ # Untrusted due to possible application of package renames to binpkgs
+ local binpkg_untrusted_vars="CATEGORY P PF PN PR PV PVR"
+ local misc_garbage_vars="_portage_filter_opts"
+ filtered_vars="$readonly_bash_vars $bash_misc_vars
+ $PORTAGE_READONLY_VARS $misc_garbage_vars"
+
+ # Don't filter/interfere with prefix variables unless they are
+ # supported by the current EAPI.
+ if ___eapi_has_prefix_variables; then
+ filtered_vars+=" ED EPREFIX EROOT"
+ fi
+
+ if has --filter-sandbox $* ; then
+ filtered_vars="${filtered_vars} SANDBOX_.*"
+ else
+ filtered_vars="${filtered_vars} ${filtered_sandbox_vars}"
+ fi
+ if has --filter-features $* ; then
+ filtered_vars="${filtered_vars} FEATURES PORTAGE_FEATURES"
+ fi
+ if has --filter-path $* ; then
+ filtered_vars+=" PATH"
+ fi
+ if has --filter-locale $* ; then
+ filtered_vars+=" LANG LC_ALL LC_COLLATE
+ LC_CTYPE LC_MESSAGES LC_MONETARY
+ LC_NUMERIC LC_PAPER LC_TIME"
+ fi
+ if ! has --allow-extra-vars $* ; then
+ if [ "${EMERGE_FROM}" = binary ] ; then
+ # preserve additional variables from build time,
+ # while excluding untrusted variables
+ filtered_vars+=" ${binpkg_untrusted_vars}"
+ else
+ filtered_vars+=" ${PORTAGE_SAVED_READONLY_VARS}"
+ filtered_vars+=" ${PORTAGE_MUTABLE_FILTERED_VARS}"
+ fi
+ fi
+
+ "${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}"/filter-bash-environment.py "${filtered_vars}" || die "filter-bash-environment.py failed"
+}
+
+# @FUNCTION: __preprocess_ebuild_env
+# @DESCRIPTION:
+# Filter any readonly variables from ${T}/environment, source it, and then
+# save it via __save_ebuild_env(). This process should be sufficient to prevent
+# any stale variables or functions from an arbitrary environment from
+# interfering with the current environment. This is useful when an existing
+# environment needs to be loaded from a binary or installed package.
+__preprocess_ebuild_env() {
+ local _portage_filter_opts="--filter-features --filter-locale --filter-path --filter-sandbox"
+
+ # If environment.raw is present, this is a signal from the python side,
+ # indicating that the environment may contain stale FEATURES and
+ # SANDBOX_{DENY,PREDICT,READ,WRITE} variables that should be filtered out.
+ # Otherwise, we don't need to filter the environment.
+ [ -f "${T}/environment.raw" ] || return 0
+
+ __filter_readonly_variables $_portage_filter_opts < "${T}"/environment \
+ >> "$T/environment.filtered" || return $?
+ unset _portage_filter_opts
+ mv "${T}"/environment.filtered "${T}"/environment || return $?
+ rm -f "${T}/environment.success" || return $?
+ # WARNING: Code inside this subshell should avoid making assumptions
+ # about variables or functions after source "${T}"/environment has been
+ # called. Any variables that need to be relied upon should already be
+ # filtered out above.
+ (
+ export SANDBOX_ON=1
+ source "${T}/environment" || exit $?
+ # We have to temporarily disable sandbox since the
+ # SANDBOX_{DENY,READ,PREDICT,WRITE} values we've just loaded
+ # may be unusable (triggering in spurious sandbox violations)
+ # until we've merged them with our current values.
+ export SANDBOX_ON=0
+
+ # It's remotely possible that __save_ebuild_env() has been overridden
+ # by the above source command. To protect ourselves, we override it
+ # here with our own version. ${PORTAGE_BIN_PATH} is safe to use here
+ # because it's already filtered above.
+ source "${PORTAGE_BIN_PATH}/save-ebuild-env.sh" || exit $?
+
+ # Rely on __save_ebuild_env() to filter out any remaining variables
+ # and functions that could interfere with the current environment.
+ __save_ebuild_env || exit $?
+ >> "$T/environment.success" || exit $?
+ ) > "${T}/environment.filtered"
+ local retval
+ if [ -e "${T}/environment.success" ] ; then
+ __filter_readonly_variables --filter-features < \
+ "${T}/environment.filtered" > "${T}/environment"
+ retval=$?
+ else
+ retval=1
+ fi
+ rm -f "${T}"/environment.{filtered,raw,success}
+ return ${retval}
+}
+
+__ebuild_phase() {
+ declare -F "$1" >/dev/null && __qa_call $1
+}
+
+__ebuild_phase_with_hooks() {
+ local x phase_name=${1}
+ for x in {pre_,,post_}${phase_name} ; do
+ __ebuild_phase ${x}
+ done
+}
+
+__dyn_pretend() {
+ if [[ -e $PORTAGE_BUILDDIR/.pretended ]] ; then
+ __vecho ">>> It appears that '$PF' is already pretended; skipping."
+ __vecho ">>> Remove '$PORTAGE_BUILDDIR/.pretended' to force pretend."
+ return 0
+ fi
+ __ebuild_phase pre_pkg_pretend
+ __ebuild_phase pkg_pretend
+ >> "$PORTAGE_BUILDDIR/.pretended" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.pretended"
+ __ebuild_phase post_pkg_pretend
+}
+
+__dyn_setup() {
+ if [[ -e $PORTAGE_BUILDDIR/.setuped ]] ; then
+ __vecho ">>> It appears that '$PF' is already setup; skipping."
+ __vecho ">>> Remove '$PORTAGE_BUILDDIR/.setuped' to force setup."
+ return 0
+ fi
+ __ebuild_phase pre_pkg_setup
+ __ebuild_phase pkg_setup
+ >> "$PORTAGE_BUILDDIR/.setuped" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.setuped"
+ __ebuild_phase post_pkg_setup
+}
+
+__dyn_unpack() {
+ if [[ -f ${PORTAGE_BUILDDIR}/.unpacked ]] ; then
+ __vecho ">>> WORKDIR is up-to-date, keeping..."
+ return 0
+ fi
+ if [ ! -d "${WORKDIR}" ]; then
+ install -m${PORTAGE_WORKDIR_MODE:-0700} -d "${WORKDIR}" || die "Failed to create dir '${WORKDIR}'"
+ fi
+ cd "${WORKDIR}" || die "Directory change failed: \`cd '${WORKDIR}'\`"
+ __ebuild_phase pre_src_unpack
+ __vecho ">>> Unpacking source..."
+ __ebuild_phase src_unpack
+ >> "$PORTAGE_BUILDDIR/.unpacked" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.unpacked"
+ __vecho ">>> Source unpacked in ${WORKDIR}"
+ __ebuild_phase post_src_unpack
+}
+
+__dyn_clean() {
+ if [ -z "${PORTAGE_BUILDDIR}" ]; then
+ echo "Aborting clean phase because PORTAGE_BUILDDIR is unset!"
+ return 1
+ elif [ ! -d "${PORTAGE_BUILDDIR}" ] ; then
+ return 0
+ fi
+ if has chflags $FEATURES ; then
+ chflags -R noschg,nouchg,nosappnd,nouappnd "${PORTAGE_BUILDDIR}"
+ chflags -R nosunlnk,nouunlnk "${PORTAGE_BUILDDIR}" 2>/dev/null
+ fi
+
+ rm -rf "${PORTAGE_BUILDDIR}/image" "${PORTAGE_BUILDDIR}/homedir"
+ rm -f "${PORTAGE_BUILDDIR}/.installed"
+
+ if [[ $EMERGE_FROM = binary ]] || \
+ ! has keeptemp $FEATURES && ! has keepwork $FEATURES ; then
+ rm -rf "${T}"
+ fi
+
+ if [[ $EMERGE_FROM = binary ]] || ! has keepwork $FEATURES; then
+ rm -f "$PORTAGE_BUILDDIR"/.{ebuild_changed,logid,pretended,setuped,unpacked,prepared} \
+ "$PORTAGE_BUILDDIR"/.{configured,compiled,tested,packaged} \
+ "$PORTAGE_BUILDDIR"/.die_hooks \
+ "$PORTAGE_BUILDDIR"/.ipc_{in,out,lock} \
+ "$PORTAGE_BUILDDIR"/.exit_status
+
+ rm -rf "${PORTAGE_BUILDDIR}/build-info"
+ rm -rf "${WORKDIR}"
+ fi
+
+ if [ -f "${PORTAGE_BUILDDIR}/.unpacked" ]; then
+ find "${PORTAGE_BUILDDIR}" -type d ! -regex "^${WORKDIR}" | sort -r | tr "\n" "\0" | $XARGS -0 rmdir &>/dev/null
+ fi
+
+ # do not bind this to doebuild defined DISTDIR; don't trust doebuild, and if mistakes are made it'll
+ # result in it wiping the users distfiles directory (bad).
+ rm -rf "${PORTAGE_BUILDDIR}/distdir"
+
+ # Some kernels, such as Solaris, return EINVAL when an attempt
+ # is made to remove the current working directory.
+ cd "$PORTAGE_BUILDDIR"/../..
+ rmdir "$PORTAGE_BUILDDIR" 2>/dev/null
+
+ true
+}
+
+__abort_handler() {
+ local msg
+ if [ "$2" != "fail" ]; then
+ msg="${EBUILD}: ${1} aborted; exiting."
+ else
+ msg="${EBUILD}: ${1} failed; exiting."
+ fi
+ echo
+ echo "$msg"
+ echo
+ eval ${3}
+ #unset signal handler
+ trap - SIGINT SIGQUIT
+}
+
+__abort_prepare() {
+ __abort_handler src_prepare $1
+ rm -f "$PORTAGE_BUILDDIR/.prepared"
+ exit 1
+}
+
+__abort_configure() {
+ __abort_handler src_configure $1
+ rm -f "$PORTAGE_BUILDDIR/.configured"
+ exit 1
+}
+
+__abort_compile() {
+ __abort_handler "src_compile" $1
+ rm -f "${PORTAGE_BUILDDIR}/.compiled"
+ exit 1
+}
+
+__abort_test() {
+ __abort_handler "__dyn_test" $1
+ rm -f "${PORTAGE_BUILDDIR}/.tested"
+ exit 1
+}
+
+__abort_install() {
+ __abort_handler "src_install" $1
+ rm -rf "${PORTAGE_BUILDDIR}/image"
+ exit 1
+}
+
+__has_phase_defined_up_to() {
+ local phase
+ for phase in unpack prepare configure compile install; do
+ has ${phase} ${DEFINED_PHASES} && return 0
+ [[ ${phase} == $1 ]] && return 1
+ done
+ # We shouldn't actually get here
+ return 1
+}
+
+__dyn_prepare() {
+
+ if [[ -e $PORTAGE_BUILDDIR/.prepared ]] ; then
+ __vecho ">>> It appears that '$PF' is already prepared; skipping."
+ __vecho ">>> Remove '$PORTAGE_BUILDDIR/.prepared' to force prepare."
+ return 0
+ fi
+
+ if [[ -d $S ]] ; then
+ cd "${S}"
+ elif ___eapi_has_S_WORKDIR_fallback; then
+ cd "${WORKDIR}"
+ elif [[ -z ${A} ]] && ! __has_phase_defined_up_to prepare; then
+ cd "${WORKDIR}"
+ else
+ die "The source directory '${S}' doesn't exist"
+ fi
+
+ trap __abort_prepare SIGINT SIGQUIT
+
+ __ebuild_phase pre_src_prepare
+ __vecho ">>> Preparing source in $PWD ..."
+ __ebuild_phase src_prepare
+ >> "$PORTAGE_BUILDDIR/.prepared" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.prepared"
+ __vecho ">>> Source prepared."
+ __ebuild_phase post_src_prepare
+
+ trap - SIGINT SIGQUIT
+}
+
+# @FUNCTION: __start_distcc
+# @DESCRIPTION:
+# Start distcc-pump if necessary.
+__start_distcc() {
+ if has distcc $FEATURES && has distcc-pump $FEATURES ; then
+ if [[ -z $INCLUDE_SERVER_PORT ]] || [[ ! -w $INCLUDE_SERVER_PORT ]] ; then
+ # adding distcc to PATH repeatedly results in fatal distcc recursion :)
+ eval $(pump --startup | grep -v PATH)
+ trap "pump --shutdown >/dev/null" EXIT
+ fi
+ fi
+}
+
+__dyn_configure() {
+
+ if [[ -e $PORTAGE_BUILDDIR/.configured ]] ; then
+ __vecho ">>> It appears that '$PF' is already configured; skipping."
+ __vecho ">>> Remove '$PORTAGE_BUILDDIR/.configured' to force configuration."
+ return 0
+ fi
+
+ if [[ -d $S ]] ; then
+ cd "${S}"
+ elif ___eapi_has_S_WORKDIR_fallback; then
+ cd "${WORKDIR}"
+ elif [[ -z ${A} ]] && ! __has_phase_defined_up_to configure; then
+ cd "${WORKDIR}"
+ else
+ die "The source directory '${S}' doesn't exist"
+ fi
+
+ trap __abort_configure SIGINT SIGQUIT
+ __start_distcc
+
+ __ebuild_phase pre_src_configure
+
+ __vecho ">>> Configuring source in $PWD ..."
+ __ebuild_phase src_configure
+ >> "$PORTAGE_BUILDDIR/.configured" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.configured"
+ __vecho ">>> Source configured."
+
+ __ebuild_phase post_src_configure
+
+ trap - SIGINT SIGQUIT
+}
+
+__dyn_compile() {
+
+ if [[ -e $PORTAGE_BUILDDIR/.compiled ]] ; then
+ __vecho ">>> It appears that '${PF}' is already compiled; skipping."
+ __vecho ">>> Remove '$PORTAGE_BUILDDIR/.compiled' to force compilation."
+ return 0
+ fi
+
+ if [[ -d $S ]] ; then
+ cd "${S}"
+ elif ___eapi_has_S_WORKDIR_fallback; then
+ cd "${WORKDIR}"
+ elif [[ -z ${A} ]] && ! __has_phase_defined_up_to compile; then
+ cd "${WORKDIR}"
+ else
+ die "The source directory '${S}' doesn't exist"
+ fi
+
+ trap __abort_compile SIGINT SIGQUIT
+ __start_distcc
+
+ __ebuild_phase pre_src_compile
+
+ __vecho ">>> Compiling source in $PWD ..."
+ __ebuild_phase src_compile
+ >> "$PORTAGE_BUILDDIR/.compiled" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.compiled"
+ __vecho ">>> Source compiled."
+
+ __ebuild_phase post_src_compile
+
+ trap - SIGINT SIGQUIT
+}
+
+__dyn_test() {
+
+ if [[ -e $PORTAGE_BUILDDIR/.tested ]] ; then
+ __vecho ">>> It appears that ${PN} has already been tested; skipping."
+ __vecho ">>> Remove '${PORTAGE_BUILDDIR}/.tested' to force test."
+ return
+ fi
+
+ trap "__abort_test" SIGINT SIGQUIT
+ __start_distcc
+
+ if [ -d "${S}" ]; then
+ cd "${S}"
+ else
+ cd "${WORKDIR}"
+ fi
+
+ if has test ${RESTRICT} ; then
+ einfo "Skipping make test/check due to ebuild restriction."
+ __vecho ">>> Test phase [disabled because of RESTRICT=test]: ${CATEGORY}/${PF}"
+
+ # If ${EBUILD_FORCE_TEST} == 1 and FEATURES came from ${T}/environment
+ # then it might not have FEATURES=test like it's supposed to here.
+ elif [[ ${EBUILD_FORCE_TEST} != 1 ]] && ! has test ${FEATURES} ; then
+ __vecho ">>> Test phase [not enabled]: ${CATEGORY}/${PF}"
+ else
+ # If ${EBUILD_FORCE_TEST} == 1 and USE came from ${T}/environment
+ # then it might not have USE=test like it's supposed to here.
+ if [[ ${EBUILD_FORCE_TEST} == 1 && test =~ ${PORTAGE_IUSE} ]] && \
+ ! has test ${USE} ; then
+ export USE="${USE} test"
+ fi
+
+ local save_sp=${SANDBOX_PREDICT}
+ addpredict /
+ __ebuild_phase pre_src_test
+
+ __vecho ">>> Test phase: ${CATEGORY}/${PF}"
+ __ebuild_phase src_test
+ __vecho ">>> Completed testing ${CATEGORY}/${PF}"
+
+ >> "$PORTAGE_BUILDDIR/.tested" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.tested"
+ __ebuild_phase post_src_test
+ SANDBOX_PREDICT=${save_sp}
+ fi
+
+ trap - SIGINT SIGQUIT
+}
+
+__dyn_install() {
+ [ -z "$PORTAGE_BUILDDIR" ] && die "${FUNCNAME}: PORTAGE_BUILDDIR is unset"
+ if has noauto $FEATURES ; then
+ rm -f "${PORTAGE_BUILDDIR}/.installed"
+ elif [[ -e $PORTAGE_BUILDDIR/.installed ]] ; then
+ __vecho ">>> It appears that '${PF}' is already installed; skipping."
+ __vecho ">>> Remove '${PORTAGE_BUILDDIR}/.installed' to force install."
+ return 0
+ fi
+ trap "__abort_install" SIGINT SIGQUIT
+ __start_distcc
+
+ __ebuild_phase pre_src_install
+
+ if ___eapi_has_prefix_variables; then
+ _x=${ED}
+ else
+ _x=${D}
+ fi
+ rm -rf "${D}"
+ mkdir -p "${_x}"
+ unset _x
+
+ if [[ -d $S ]] ; then
+ cd "${S}"
+ elif ___eapi_has_S_WORKDIR_fallback; then
+ cd "${WORKDIR}"
+ elif [[ -z ${A} ]] && ! __has_phase_defined_up_to install; then
+ cd "${WORKDIR}"
+ else
+ die "The source directory '${S}' doesn't exist"
+ fi
+
+ __vecho
+ __vecho ">>> Install ${PF} into ${D} category ${CATEGORY}"
+ #our custom version of libtool uses $S and $D to fix
+ #invalid paths in .la files
+ export S D
+
+ # Reset exeinto(), docinto(), insinto(), and into() state variables
+ # in case the user is running the install phase multiple times
+ # consecutively via the ebuild command.
+ export DESTTREE=/usr
+ export INSDESTTREE=""
+ export _E_EXEDESTTREE_=""
+ export _E_DOCDESTTREE_=""
+
+ __ebuild_phase src_install
+ >> "$PORTAGE_BUILDDIR/.installed" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.installed"
+ __vecho ">>> Completed installing ${PF} into ${D}"
+ __vecho
+ __ebuild_phase post_src_install
+
+ cd "${PORTAGE_BUILDDIR}"/build-info
+ set -f
+ local f x
+ IFS=$' \t\n\r'
+ for f in CATEGORY DEFINED_PHASES FEATURES INHERITED IUSE \
+ PF PKGUSE SLOT KEYWORDS HOMEPAGE DESCRIPTION ; do
+ x=$(echo -n ${!f})
+ [[ -n $x ]] && echo "$x" > $f
+ done
+ if [[ $CATEGORY != virtual ]] ; then
+ for f in ASFLAGS CBUILD CC CFLAGS CHOST CTARGET CXX \
+ CXXFLAGS EXTRA_ECONF EXTRA_EINSTALL EXTRA_MAKE \
+ LDFLAGS LIBCFLAGS LIBCXXFLAGS QA_CONFIGURE_OPTIONS \
+ QA_DESKTOP_FILE ; do
+ x=$(echo -n ${!f})
+ [[ -n $x ]] && echo "$x" > $f
+ done
+ # whitespace preserved
+ for f in QA_AM_MAINTAINER_MODE ; do
+ [[ -n ${!f} ]] && echo "${!f}" > $f
+ done
+ fi
+ echo "${USE}" > USE
+ echo "${EAPI:-0}" > EAPI
+
+ # Save EPREFIX, since it makes it easy to use chpathtool to
+ # adjust the content of a binary package so that it will
+ # work in a different EPREFIX from the one is was built for.
+ if ___eapi_has_prefix_variables && [[ -n ${EPREFIX} ]]; then
+ echo "${EPREFIX}" > EPREFIX
+ fi
+
+ set +f
+
+ # local variables can leak into the saved environment.
+ unset f
+
+ # Use safe cwd, avoiding unsafe import for bug #469338.
+ cd "${PORTAGE_PYM_PATH}"
+ __save_ebuild_env --exclude-init-phases | __filter_readonly_variables \
+ --filter-path --filter-sandbox --allow-extra-vars > \
+ "${PORTAGE_BUILDDIR}"/build-info/environment
+ assert "__save_ebuild_env failed"
+ cd "${PORTAGE_BUILDDIR}"/build-info || die
+
+ ${PORTAGE_BZIP2_COMMAND} -f9 environment
+
+ cp "${EBUILD}" "${PF}.ebuild"
+ [ -n "${PORTAGE_REPO_NAME}" ] && echo "${PORTAGE_REPO_NAME}" > repository
+ if has nostrip ${FEATURES} ${RESTRICT} || has strip ${RESTRICT}
+ then
+ >> DEBUGBUILD
+ fi
+ trap - SIGINT SIGQUIT
+}
+
+__dyn_help() {
+ echo
+ echo "Portage"
+ echo "Copyright 1999-2010 Gentoo Foundation"
+ echo
+ echo "How to use the ebuild command:"
+ echo
+ echo "The first argument to ebuild should be an existing .ebuild file."
+ echo
+ echo "One or more of the following options can then be specified. If more"
+ echo "than one option is specified, each will be executed in order."
+ echo
+ echo " help : show this help screen"
+ echo " pretend : execute package specific pretend actions"
+ echo " setup : execute package specific setup actions"
+ echo " fetch : download source archive(s) and patches"
+ echo " nofetch : display special fetch instructions"
+ echo " digest : create a manifest file for the package"
+ echo " manifest : create a manifest file for the package"
+ echo " unpack : unpack sources (auto-dependencies if needed)"
+ echo " prepare : prepare sources (auto-dependencies if needed)"
+ echo " configure : configure sources (auto-fetch/unpack if needed)"
+ echo " compile : compile sources (auto-fetch/unpack/configure if needed)"
+ echo " test : test package (auto-fetch/unpack/configure/compile if needed)"
+ echo " preinst : execute pre-install instructions"
+ echo " postinst : execute post-install instructions"
+ echo " install : install the package to the temporary install directory"
+ echo " qmerge : merge image into live filesystem, recording files in db"
+ echo " merge : do fetch, unpack, compile, install and qmerge"
+ echo " prerm : execute pre-removal instructions"
+ echo " postrm : execute post-removal instructions"
+ echo " unmerge : remove package from live filesystem"
+ echo " config : execute package specific configuration actions"
+ echo " package : create a tarball package in ${PKGDIR}/All"
+ echo " rpm : build a RedHat RPM package"
+ echo " clean : clean up all source and temporary files"
+ echo
+ echo "The following settings will be used for the ebuild process:"
+ echo
+ echo " package : ${PF}"
+ echo " slot : ${SLOT}"
+ echo " category : ${CATEGORY}"
+ echo " description : ${DESCRIPTION}"
+ echo " system : ${CHOST}"
+ echo " c flags : ${CFLAGS}"
+ echo " c++ flags : ${CXXFLAGS}"
+ echo " make flags : ${MAKEOPTS}"
+ echo -n " build mode : "
+ if has nostrip ${FEATURES} ${RESTRICT} || has strip ${RESTRICT} ;
+ then
+ echo "debug (large)"
+ else
+ echo "production (stripped)"
+ fi
+ echo " merge to : ${ROOT}"
+ echo " offset : ${EPREFIX}"
+ echo
+ if [ -n "$USE" ]; then
+ echo "Additionally, support for the following optional features will be enabled:"
+ echo
+ echo " ${USE}"
+ fi
+ echo
+}
+
+# @FUNCTION: __ebuild_arg_to_phase
+# @DESCRIPTION:
+# Translate a known ebuild(1) argument into the precise
+# name of it's corresponding ebuild phase.
+__ebuild_arg_to_phase() {
+ [ $# -ne 1 ] && die "expected exactly 1 arg, got $#: $*"
+ local arg=$1
+ local phase_func=""
+
+ case "$arg" in
+ pretend)
+ ___eapi_has_pkg_pretend && \
+ phase_func=pkg_pretend
+ ;;
+ setup)
+ phase_func=pkg_setup
+ ;;
+ nofetch)
+ phase_func=pkg_nofetch
+ ;;
+ unpack)
+ phase_func=src_unpack
+ ;;
+ prepare)
+ ___eapi_has_src_prepare && \
+ phase_func=src_prepare
+ ;;
+ configure)
+ ___eapi_has_src_configure && \
+ phase_func=src_configure
+ ;;
+ compile)
+ phase_func=src_compile
+ ;;
+ test)
+ phase_func=src_test
+ ;;
+ install)
+ phase_func=src_install
+ ;;
+ preinst)
+ phase_func=pkg_preinst
+ ;;
+ postinst)
+ phase_func=pkg_postinst
+ ;;
+ prerm)
+ phase_func=pkg_prerm
+ ;;
+ postrm)
+ phase_func=pkg_postrm
+ ;;
+ esac
+
+ [[ -z $phase_func ]] && return 1
+ echo "$phase_func"
+ return 0
+}
+
+__ebuild_phase_funcs() {
+ [ $# -ne 2 ] && die "expected exactly 2 args, got $#: $*"
+ local eapi=$1
+ local phase_func=$2
+ local all_phases="src_compile pkg_config src_configure pkg_info
+ src_install pkg_nofetch pkg_postinst pkg_postrm pkg_preinst
+ src_prepare pkg_prerm pkg_pretend pkg_setup src_test src_unpack"
+ local x
+
+ # First, set up the error handlers for default*
+ for x in ${all_phases} ; do
+ eval "default_${x}() {
+ die \"default_${x}() is not supported in EAPI='${eapi}' in phase ${phase_func}\"
+ }"
+ done
+
+ # We can just call the specific handler -- it will either error out
+ # on invalid phase or run it.
+ eval "default() {
+ default_${phase_func}
+ }"
+
+ case "$eapi" in
+ 0|1) # EAPIs not supporting 'default'
+
+ for x in pkg_nofetch src_unpack src_test ; do
+ declare -F $x >/dev/null || \
+ eval "$x() { __eapi0_$x; }"
+ done
+
+ if ! declare -F src_compile >/dev/null ; then
+ case "$eapi" in
+ 0)
+ src_compile() { __eapi0_src_compile; }
+ ;;
+ *)
+ src_compile() { __eapi1_src_compile; }
+ ;;
+ esac
+ fi
+ ;;
+
+ *) # EAPIs supporting 'default'
+
+ # defaults starting with EAPI 0
+ [[ ${phase_func} == pkg_nofetch ]] && \
+ default_pkg_nofetch() { __eapi0_pkg_nofetch; }
+ [[ ${phase_func} == src_unpack ]] && \
+ default_src_unpack() { __eapi0_src_unpack; }
+ [[ ${phase_func} == src_test ]] && \
+ default_src_test() { __eapi0_src_test; }
+
+ # defaults starting with EAPI 2
+ [[ ${phase_func} == src_prepare ]] && \
+ default_src_prepare() { __eapi2_src_prepare; }
+ [[ ${phase_func} == src_configure ]] && \
+ default_src_configure() { __eapi2_src_configure; }
+ [[ ${phase_func} == src_compile ]] && \
+ default_src_compile() { __eapi2_src_compile; }
+
+ # bind supported phases to the defaults
+ declare -F pkg_nofetch >/dev/null || \
+ pkg_nofetch() { default; }
+ declare -F src_unpack >/dev/null || \
+ src_unpack() { default; }
+ declare -F src_prepare >/dev/null || \
+ src_prepare() { default; }
+ declare -F src_configure >/dev/null || \
+ src_configure() { default; }
+ declare -F src_compile >/dev/null || \
+ src_compile() { default; }
+ declare -F src_test >/dev/null || \
+ src_test() { default; }
+
+ # defaults starting with EAPI 4
+ if ! has ${eapi} 2 3; then
+ [[ ${phase_func} == src_install ]] && \
+ default_src_install() { __eapi4_src_install; }
+
+ declare -F src_install >/dev/null || \
+ src_install() { default; }
+ fi
+ ;;
+ esac
+}
+
+__ebuild_main() {
+
+ # Subshell/helper die support (must export for the die helper).
+ # Since this function is typically executed in a subshell,
+ # setup EBUILD_MASTER_PID to refer to the current $BASHPID,
+ # which seems to give the best results when further
+ # nested subshells call die.
+ export EBUILD_MASTER_PID=${BASHPID:-$(__bashpid)}
+ trap 'exit 1' SIGTERM
+
+ #a reasonable default for $S
+ [[ -z ${S} ]] && export S=${WORKDIR}/${P}
+
+ if [[ -s $SANDBOX_LOG ]] ; then
+ # We use SANDBOX_LOG to check for sandbox violations,
+ # so we ensure that there can't be a stale log to
+ # interfere with our logic.
+ local x=
+ if [[ -n SANDBOX_ON ]] ; then
+ x=$SANDBOX_ON
+ export SANDBOX_ON=0
+ fi
+
+ rm -f "$SANDBOX_LOG" || \
+ die "failed to remove stale sandbox log: '$SANDBOX_LOG'"
+
+ if [[ -n $x ]] ; then
+ export SANDBOX_ON=$x
+ fi
+ unset x
+ fi
+
+ # Force configure scripts that automatically detect ccache to
+ # respect FEATURES="-ccache".
+ has ccache $FEATURES || export CCACHE_DISABLE=1
+
+ local phase_func=$(__ebuild_arg_to_phase "$EBUILD_PHASE")
+ [[ -n $phase_func ]] && __ebuild_phase_funcs "$EAPI" "$phase_func"
+ unset phase_func
+
+ __source_all_bashrcs
+
+ case ${1} in
+ nofetch)
+ __ebuild_phase_with_hooks pkg_nofetch
+ ;;
+ prerm|postrm|preinst|postinst|config|info)
+ if has "${1}" config info && \
+ ! declare -F "pkg_${1}" >/dev/null ; then
+ ewarn "pkg_${1}() is not defined: '${EBUILD##*/}'"
+ fi
+ export SANDBOX_ON="0"
+ if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
+ __ebuild_phase_with_hooks pkg_${1}
+ else
+ set -x
+ __ebuild_phase_with_hooks pkg_${1}
+ set +x
+ fi
+ if [[ -n $PORTAGE_UPDATE_ENV ]] ; then
+ # Update environment.bz2 in case installation phases
+ # need to pass some variables to uninstallation phases.
+ # Use safe cwd, avoiding unsafe import for bug #469338.
+ cd "${PORTAGE_PYM_PATH}"
+ __save_ebuild_env --exclude-init-phases | \
+ __filter_readonly_variables --filter-path \
+ --filter-sandbox --allow-extra-vars \
+ | ${PORTAGE_BZIP2_COMMAND} -c -f9 > "$PORTAGE_UPDATE_ENV"
+ assert "__save_ebuild_env failed"
+ fi
+ ;;
+ unpack|prepare|configure|compile|test|clean|install)
+ if [[ ${SANDBOX_DISABLED:-0} = 0 ]] ; then
+ export SANDBOX_ON="1"
+ else
+ export SANDBOX_ON="0"
+ fi
+
+ case "${1}" in
+ configure|compile)
+
+ local x
+ for x in ASFLAGS CCACHE_DIR CCACHE_SIZE \
+ CFLAGS CXXFLAGS LDFLAGS LIBCFLAGS LIBCXXFLAGS ; do
+ [[ ${!x+set} = set ]] && export $x
+ done
+ unset x
+
+ has distcc $FEATURES && [[ -n $DISTCC_DIR ]] && \
+ [[ ${SANDBOX_WRITE/$DISTCC_DIR} = $SANDBOX_WRITE ]] && \
+ addwrite "$DISTCC_DIR"
+
+ x=LIBDIR_$ABI
+ [ -z "$PKG_CONFIG_PATH" -a -n "$ABI" -a -n "${!x}" ] && \
+ export PKG_CONFIG_PATH=${EPREFIX}/usr/${!x}/pkgconfig
+
+ if has noauto $FEATURES && \
+ [[ ! -f $PORTAGE_BUILDDIR/.unpacked ]] ; then
+ echo
+ echo "!!! We apparently haven't unpacked..." \
+ "This is probably not what you"
+ echo "!!! want to be doing... You are using" \
+ "FEATURES=noauto so I'll assume"
+ echo "!!! that you know what you are doing..." \
+ "You have 5 seconds to abort..."
+ echo
+
+ local x
+ for x in 1 2 3 4 5 6 7 8; do
+ LC_ALL=C sleep 0.25
+ done
+
+ sleep 3
+ fi
+
+ cd "$PORTAGE_BUILDDIR"
+ if [ ! -d build-info ] ; then
+ mkdir build-info
+ cp "$EBUILD" "build-info/$PF.ebuild"
+ fi
+
+ #our custom version of libtool uses $S and $D to fix
+ #invalid paths in .la files
+ export S D
+
+ ;;
+ esac
+
+ if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
+ __dyn_${1}
+ else
+ set -x
+ __dyn_${1}
+ set +x
+ fi
+ export SANDBOX_ON="0"
+ ;;
+ help|pretend|setup)
+ #pkg_setup needs to be out of the sandbox for tmp file creation;
+ #for example, awking and piping a file in /tmp requires a temp file to be created
+ #in /etc. If pkg_setup is in the sandbox, both our lilo and apache ebuilds break.
+ export SANDBOX_ON="0"
+ if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
+ __dyn_${1}
+ else
+ set -x
+ __dyn_${1}
+ set +x
+ fi
+ ;;
+ _internal_test)
+ ;;
+ *)
+ export SANDBOX_ON="1"
+ echo "Unrecognized arg '${1}'"
+ echo
+ __dyn_help
+ exit 1
+ ;;
+ esac
+
+ # Save the env only for relevant phases.
+ if ! has "${1}" clean help info nofetch ; then
+ umask 002
+ # Use safe cwd, avoiding unsafe import for bug #469338.
+ cd "${PORTAGE_PYM_PATH}"
+ __save_ebuild_env | __filter_readonly_variables \
+ --filter-features > "$T/environment"
+ assert "__save_ebuild_env failed"
+ chgrp "${PORTAGE_GRPNAME:-${PORTAGE_GROUP}}" "$T/environment"
+ chmod g+w "$T/environment"
+ fi
+ [[ -n $PORTAGE_EBUILD_EXIT_FILE ]] && > "$PORTAGE_EBUILD_EXIT_FILE"
+ if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
+ [[ ! -s $SANDBOX_LOG ]]
+ "$PORTAGE_BIN_PATH"/ebuild-ipc exit $?
+ fi
+}
diff --git a/usr/lib/portage/bin/phase-helpers.sh b/usr/lib/portage/bin/phase-helpers.sh
new file mode 100755
index 0000000..5f7c809
--- /dev/null
+++ b/usr/lib/portage/bin/phase-helpers.sh
@@ -0,0 +1,1001 @@
+#!/bin/bash
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+export DESTTREE=/usr
+export INSDESTTREE=""
+export _E_EXEDESTTREE_=""
+export _E_DOCDESTTREE_=""
+export INSOPTIONS="-m0644"
+export EXEOPTIONS="-m0755"
+export LIBOPTIONS="-m0644"
+export DIROPTIONS="-m0755"
+export MOPREFIX=${PN}
+# Do not compress files which are smaller than this (in bytes). #169260
+export PORTAGE_DOCOMPRESS_SIZE_LIMIT="128"
+declare -a PORTAGE_DOCOMPRESS=( /usr/share/{doc,info,man} )
+declare -a PORTAGE_DOCOMPRESS_SKIP=( /usr/share/doc/${PF}/html )
+
+into() {
+ if [ "$1" == "/" ]; then
+ export DESTTREE=""
+ else
+ export DESTTREE=$1
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
+ if [ ! -d "${ED}${DESTTREE}" ]; then
+ install -d "${ED}${DESTTREE}"
+ local ret=$?
+ if [[ $ret -ne 0 ]] ; then
+ __helpers_die "${FUNCNAME[0]} failed"
+ return $ret
+ fi
+ fi
+ fi
+}
+
+insinto() {
+ if [ "$1" == "/" ]; then
+ export INSDESTTREE=""
+ else
+ export INSDESTTREE=$1
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
+ if [ ! -d "${ED}${INSDESTTREE}" ]; then
+ install -d "${ED}${INSDESTTREE}"
+ local ret=$?
+ if [[ $ret -ne 0 ]] ; then
+ __helpers_die "${FUNCNAME[0]} failed"
+ return $ret
+ fi
+ fi
+ fi
+}
+
+exeinto() {
+ if [ "$1" == "/" ]; then
+ export _E_EXEDESTTREE_=""
+ else
+ export _E_EXEDESTTREE_="$1"
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
+ if [ ! -d "${ED}${_E_EXEDESTTREE_}" ]; then
+ install -d "${ED}${_E_EXEDESTTREE_}"
+ local ret=$?
+ if [[ $ret -ne 0 ]] ; then
+ __helpers_die "${FUNCNAME[0]} failed"
+ return $ret
+ fi
+ fi
+ fi
+}
+
+docinto() {
+ if [ "$1" == "/" ]; then
+ export _E_DOCDESTTREE_=""
+ else
+ export _E_DOCDESTTREE_="$1"
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
+ if [ ! -d "${ED}usr/share/doc/${PF}/${_E_DOCDESTTREE_}" ]; then
+ install -d "${ED}usr/share/doc/${PF}/${_E_DOCDESTTREE_}"
+ local ret=$?
+ if [[ $ret -ne 0 ]] ; then
+ __helpers_die "${FUNCNAME[0]} failed"
+ return $ret
+ fi
+ fi
+ fi
+}
+
+insopts() {
+ export INSOPTIONS="$@"
+
+ # `install` should never be called with '-s' ...
+ has -s ${INSOPTIONS} && die "Never call insopts() with -s"
+}
+
+diropts() {
+ export DIROPTIONS="$@"
+}
+
+exeopts() {
+ export EXEOPTIONS="$@"
+
+ # `install` should never be called with '-s' ...
+ has -s ${EXEOPTIONS} && die "Never call exeopts() with -s"
+}
+
+libopts() {
+ export LIBOPTIONS="$@"
+
+ # `install` should never be called with '-s' ...
+ has -s ${LIBOPTIONS} && die "Never call libopts() with -s"
+}
+
+docompress() {
+ ___eapi_has_docompress || die "'docompress' not supported in this EAPI"
+
+ local f g
+ if [[ $1 = "-x" ]]; then
+ shift
+ for f; do
+ f=$(__strip_duplicate_slashes "${f}"); f=${f%/}
+ [[ ${f:0:1} = / ]] || f="/${f}"
+ for g in "${PORTAGE_DOCOMPRESS_SKIP[@]}"; do
+ [[ ${f} = "${g}" ]] && continue 2
+ done
+ PORTAGE_DOCOMPRESS_SKIP[${#PORTAGE_DOCOMPRESS_SKIP[@]}]=${f}
+ done
+ else
+ for f; do
+ f=$(__strip_duplicate_slashes "${f}"); f=${f%/}
+ [[ ${f:0:1} = / ]] || f="/${f}"
+ for g in "${PORTAGE_DOCOMPRESS[@]}"; do
+ [[ ${f} = "${g}" ]] && continue 2
+ done
+ PORTAGE_DOCOMPRESS[${#PORTAGE_DOCOMPRESS[@]}]=${f}
+ done
+ fi
+}
+
+useq() {
+ has $EBUILD_PHASE prerm postrm || eqawarn \
+ "QA Notice: The 'useq' function is deprecated (replaced by 'use')"
+ use ${1}
+}
+
+usev() {
+ if use ${1}; then
+ echo "${1#!}"
+ return 0
+ fi
+ return 1
+}
+
+if ___eapi_has_usex; then
+ usex() {
+ if use "$1"; then
+ echo "${2-yes}$4"
+ else
+ echo "${3-no}$5"
+ fi
+ return 0
+ }
+fi
+
+use() {
+ local u=$1
+ local found=0
+
+ # if we got something like '!flag', then invert the return value
+ if [[ ${u:0:1} == "!" ]] ; then
+ u=${u:1}
+ found=1
+ fi
+
+ if [[ $EBUILD_PHASE = depend ]] ; then
+ # TODO: Add a registration interface for eclasses to register
+ # any number of phase hooks, so that global scope eclass
+ # initialization can by migrated to phase hooks in new EAPIs.
+ # Example: add_phase_hook before pkg_setup $ECLASS_pre_pkg_setup
+ #if [[ -n $EAPI ]] && ! has "$EAPI" 0 1 2 3 ; then
+ # die "use() called during invalid phase: $EBUILD_PHASE"
+ #fi
+ true
+
+ # Make sure we have this USE flag in IUSE, but exempt binary
+ # packages for API consumers like Entropy which do not require
+ # a full profile with IUSE_IMPLICIT and stuff (see bug #456830).
+ elif [[ -n $PORTAGE_IUSE && -n $EBUILD_PHASE &&
+ -n $PORTAGE_INTERNAL_CALLER ]] ; then
+ if [[ ! $u =~ $PORTAGE_IUSE ]] ; then
+ if [[ ! ${EAPI} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]] ; then
+ # This is only strict starting with EAPI 5, since implicit IUSE
+ # is not well defined for earlier EAPIs (see bug #449708).
+ die "USE Flag '${u}' not in IUSE for ${CATEGORY}/${PF}"
+ fi
+ eqawarn "QA Notice: USE Flag '${u}' not" \
+ "in IUSE for ${CATEGORY}/${PF}"
+ fi
+ fi
+
+ local IFS=$' \t\n' prev_shopts=$- ret
+ set -f
+ if has ${u} ${USE} ; then
+ ret=${found}
+ else
+ ret=$((!found))
+ fi
+ [[ ${prev_shopts} == *f* ]] || set +f
+ return ${ret}
+}
+
+use_with() {
+ if [ -z "$1" ]; then
+ echo "!!! use_with() called without a parameter." >&2
+ echo "!!! use_with <USEFLAG> [<flagname> [value]]" >&2
+ return 1
+ fi
+
+ if ___eapi_use_enable_and_use_with_support_empty_third_argument; then
+ local UW_SUFFIX=${3+=$3}
+ else
+ local UW_SUFFIX=${3:+=$3}
+ fi
+ local UWORD=${2:-$1}
+
+ if use $1; then
+ echo "--with-${UWORD}${UW_SUFFIX}"
+ else
+ echo "--without-${UWORD}"
+ fi
+ return 0
+}
+
+use_enable() {
+ if [ -z "$1" ]; then
+ echo "!!! use_enable() called without a parameter." >&2
+ echo "!!! use_enable <USEFLAG> [<flagname> [value]]" >&2
+ return 1
+ fi
+
+ if ___eapi_use_enable_and_use_with_support_empty_third_argument; then
+ local UE_SUFFIX=${3+=$3}
+ else
+ local UE_SUFFIX=${3:+=$3}
+ fi
+ local UWORD=${2:-$1}
+
+ if use $1; then
+ echo "--enable-${UWORD}${UE_SUFFIX}"
+ else
+ echo "--disable-${UWORD}"
+ fi
+ return 0
+}
+
+unpack() {
+ local srcdir
+ local x
+ local y y_insensitive
+ local suffix suffix_insensitive
+ local myfail
+ local eapi=${EAPI:-0}
+ [ -z "$*" ] && die "Nothing passed to the 'unpack' command"
+
+ for x in "$@"; do
+ __vecho ">>> Unpacking ${x} to ${PWD}"
+ suffix=${x##*.}
+ suffix_insensitive=$(LC_ALL=C tr "[:upper:]" "[:lower:]" <<< "${suffix}")
+ y=${x%.*}
+ y=${y##*.}
+ y_insensitive=$(LC_ALL=C tr "[:upper:]" "[:lower:]" <<< "${y}")
+
+ if [[ ${x} == "./"* ]] ; then
+ srcdir=""
+ elif [[ ${x} == ${DISTDIR%/}/* ]] ; then
+ die "Arguments to unpack() cannot begin with \${DISTDIR}."
+ elif [[ ${x} == "/"* ]] ; then
+ die "Arguments to unpack() cannot be absolute"
+ else
+ srcdir="${DISTDIR}/"
+ fi
+ [[ ! -s ${srcdir}${x} ]] && die "${x} does not exist"
+
+ __unpack_tar() {
+ if [[ ${y_insensitive} == tar ]] ; then
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ tar != ${y} ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "secondary suffix '${y}' which is unofficially" \
+ "supported with EAPI '${EAPI}'. Instead use 'tar'."
+ fi
+ $1 -c -- "$srcdir$x" | tar xof -
+ __assert_sigpipe_ok "$myfail"
+ else
+ local cwd_dest=${x##*/}
+ cwd_dest=${cwd_dest%.*}
+ $1 -c -- "${srcdir}${x}" > "${cwd_dest}" || die "$myfail"
+ fi
+ }
+
+ myfail="failure unpacking ${x}"
+ case "${suffix_insensitive}" in
+ tar)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ tar != ${suffix} ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'tar'."
+ fi
+ tar xof "$srcdir$x" || die "$myfail"
+ ;;
+ tgz)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ tgz != ${suffix} ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'tgz'."
+ fi
+ tar xozf "$srcdir$x" || die "$myfail"
+ ;;
+ tbz|tbz2)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " tbz tbz2 " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'tbz' or 'tbz2'."
+ fi
+ ${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -c -- "$srcdir$x" | tar xof -
+ __assert_sigpipe_ok "$myfail"
+ ;;
+ zip|jar)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " ZIP zip jar " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'." \
+ "Instead use 'ZIP', 'zip', or 'jar'."
+ fi
+ # unzip will interactively prompt under some error conditions,
+ # as reported in bug #336285
+ ( set +x ; while true ; do echo n || break ; done ) | \
+ unzip -qo "${srcdir}${x}" || die "$myfail"
+ ;;
+ gz|z)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " gz z Z " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'gz', 'z', or 'Z'."
+ fi
+ __unpack_tar "gzip -d"
+ ;;
+ bz2|bz)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " bz bz2 " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'bz' or 'bz2'."
+ fi
+ __unpack_tar "${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d}"
+ ;;
+ 7z)
+ local my_output
+ my_output="$(7z x -y "${srcdir}${x}")"
+ if [ $? -ne 0 ]; then
+ echo "${my_output}" >&2
+ die "$myfail"
+ fi
+ ;;
+ rar)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " rar RAR " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'rar' or 'RAR'."
+ fi
+ unrar x -idq -o+ "${srcdir}${x}" || die "$myfail"
+ ;;
+ lha|lzh)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " LHA LHa lha lzh " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'." \
+ "Instead use 'LHA', 'LHa', 'lha', or 'lzh'."
+ fi
+ lha xfq "${srcdir}${x}" || die "$myfail"
+ ;;
+ a)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " a " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'a'."
+ fi
+ ar x "${srcdir}${x}" || die "$myfail"
+ ;;
+ deb)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " deb " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'deb'."
+ fi
+ # Unpacking .deb archives can not always be done with
+ # `ar`. For instance on AIX this doesn't work out. If
+ # we have `deb2targz` installed, prefer it over `ar` for
+ # that reason. We just make sure on AIX `deb2targz` is
+ # installed.
+ if type -P deb2targz > /dev/null; then
+ y=${x##*/}
+ local created_symlink=0
+ if [ ! "$srcdir$x" -ef "$y" ] ; then
+ # deb2targz always extracts into the same directory as
+ # the source file, so create a symlink in the current
+ # working directory if necessary.
+ ln -sf "$srcdir$x" "$y" || die "$myfail"
+ created_symlink=1
+ fi
+ deb2targz "$y" || die "$myfail"
+ if [ $created_symlink = 1 ] ; then
+ # Clean up the symlink so the ebuild
+ # doesn't inadvertently install it.
+ rm -f "$y"
+ fi
+ mv -f "${y%.deb}".tar.gz data.tar.gz || die "$myfail"
+ else
+ ar x "$srcdir$x" || die "$myfail"
+ fi
+ ;;
+ lzma)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " lzma " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'lzma'."
+ fi
+ __unpack_tar "lzma -d"
+ ;;
+ xz)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " xz " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'xz'."
+ fi
+ if ___eapi_unpack_supports_xz; then
+ __unpack_tar "xz -d"
+ else
+ __vecho "unpack ${x}: file format not recognized. Ignoring."
+ fi
+ ;;
+ *)
+ __vecho "unpack ${x}: file format not recognized. Ignoring."
+ ;;
+ esac
+ done
+ # Do not chmod '.' since it's probably ${WORKDIR} and PORTAGE_WORKDIR_MODE
+ # should be preserved.
+ find . -mindepth 1 -maxdepth 1 ! -type l -print0 | \
+ ${XARGS} -0 chmod -fR a+rX,u+w,g-w,o-w
+}
+
+econf() {
+ local x
+ local pid=${BASHPID:-$(__bashpid)}
+
+ if ! ___eapi_has_prefix_variables; then
+ local EPREFIX=
+ fi
+
+ __hasg() {
+ local x s=$1
+ shift
+ for x ; do [[ ${x} == ${s} ]] && echo "${x}" && return 0 ; done
+ return 1
+ }
+
+ __hasgq() { __hasg "$@" >/dev/null ; }
+
+ local phase_func=$(__ebuild_arg_to_phase "$EBUILD_PHASE")
+ if [[ -n $phase_func ]] ; then
+ if ! ___eapi_has_src_configure; then
+ [[ $phase_func != src_compile ]] && \
+ eqawarn "QA Notice: econf called in" \
+ "$phase_func instead of src_compile"
+ else
+ [[ $phase_func != src_configure ]] && \
+ eqawarn "QA Notice: econf called in" \
+ "$phase_func instead of src_configure"
+ fi
+ fi
+
+ : ${ECONF_SOURCE:=.}
+ if [ -x "${ECONF_SOURCE}/configure" ]; then
+ if [[ -n $CONFIG_SHELL && \
+ "$(head -n1 "$ECONF_SOURCE/configure")" =~ ^'#!'[[:space:]]*/bin/sh([[:space:]]|$) ]] ; then
+ # preserve timestamp, see bug #440304
+ touch -r "${ECONF_SOURCE}/configure" "${ECONF_SOURCE}/configure._portage_tmp_.${pid}" || die
+ sed -i \
+ -e "1s:^#![[:space:]]*/bin/sh:#!$CONFIG_SHELL:" \
+ "${ECONF_SOURCE}/configure" \
+ || die "Substition of shebang in '${ECONF_SOURCE}/configure' failed"
+ touch -r "${ECONF_SOURCE}/configure._portage_tmp_.${pid}" "${ECONF_SOURCE}/configure" || die
+ rm -f "${ECONF_SOURCE}/configure._portage_tmp_.${pid}"
+ fi
+ if [ -e "${EPREFIX}"/usr/share/gnuconfig/ ]; then
+ find "${WORKDIR}" -type f '(' \
+ -name config.guess -o -name config.sub ')' -print0 | \
+ while read -r -d $'\0' x ; do
+ __vecho " * econf: updating ${x/${WORKDIR}\/} with ${EPREFIX}/usr/share/gnuconfig/${x##*/}"
+ # Make sure we do this atomically incase we're run in parallel. #487478
+ cp -f "${EPREFIX}"/usr/share/gnuconfig/"${x##*/}" "${x}.${pid}"
+ mv -f "${x}.${pid}" "${x}"
+ done
+ fi
+
+ local conf_args=()
+ if ___eapi_econf_passes_--disable-dependency-tracking || ___eapi_econf_passes_--disable-silent-rules; then
+ local conf_help=$("${ECONF_SOURCE}/configure" --help 2>/dev/null)
+
+ if ___eapi_econf_passes_--disable-dependency-tracking; then
+ if [[ ${conf_help} == *--disable-dependency-tracking* ]]; then
+ conf_args+=( --disable-dependency-tracking )
+ fi
+ fi
+
+ if ___eapi_econf_passes_--disable-silent-rules; then
+ if [[ ${conf_help} == *--disable-silent-rules* ]]; then
+ conf_args+=( --disable-silent-rules )
+ fi
+ fi
+ fi
+
+ # if the profile defines a location to install libs to aside from default, pass it on.
+ # if the ebuild passes in --libdir, they're responsible for the conf_libdir fun.
+ local CONF_LIBDIR LIBDIR_VAR="LIBDIR_${ABI}"
+ if [[ -n ${ABI} && -n ${!LIBDIR_VAR} ]] ; then
+ CONF_LIBDIR=${!LIBDIR_VAR}
+ fi
+ if [[ -n ${CONF_LIBDIR} ]] && ! __hasgq --libdir=\* "$@" ; then
+ export CONF_PREFIX=$(__hasg --exec-prefix=\* "$@")
+ [[ -z ${CONF_PREFIX} ]] && CONF_PREFIX=$(__hasg --prefix=\* "$@")
+ : ${CONF_PREFIX:=${EPREFIX}/usr}
+ CONF_PREFIX=${CONF_PREFIX#*=}
+ [[ ${CONF_PREFIX} != /* ]] && CONF_PREFIX="/${CONF_PREFIX}"
+ [[ ${CONF_LIBDIR} != /* ]] && CONF_LIBDIR="/${CONF_LIBDIR}"
+ conf_args+=(
+ --libdir="$(__strip_duplicate_slashes "${CONF_PREFIX}${CONF_LIBDIR}")"
+ )
+ fi
+
+ # Handle arguments containing quoted whitespace (see bug #457136).
+ eval "local -a EXTRA_ECONF=(${EXTRA_ECONF})"
+
+ set -- \
+ --prefix="${EPREFIX}"/usr \
+ ${CBUILD:+--build=${CBUILD}} \
+ --host=${CHOST} \
+ ${CTARGET:+--target=${CTARGET}} \
+ --mandir="${EPREFIX}"/usr/share/man \
+ --infodir="${EPREFIX}"/usr/share/info \
+ --datadir="${EPREFIX}"/usr/share \
+ --sysconfdir="${EPREFIX}"/etc \
+ --localstatedir="${EPREFIX}"/var/lib \
+ "${conf_args[@]}" \
+ "$@" \
+ "${EXTRA_ECONF[@]}"
+ __vecho "${ECONF_SOURCE}/configure" "$@"
+
+ if ! "${ECONF_SOURCE}/configure" "$@" ; then
+
+ if [ -s config.log ]; then
+ echo
+ echo "!!! Please attach the following file when seeking support:"
+ echo "!!! ${PWD}/config.log"
+ fi
+ die "econf failed"
+ fi
+ elif [ -f "${ECONF_SOURCE}/configure" ]; then
+ die "configure is not executable"
+ else
+ die "no configure script found"
+ fi
+}
+
+einstall() {
+ # CONF_PREFIX is only set if they didn't pass in libdir above.
+ local LOCAL_EXTRA_EINSTALL="${EXTRA_EINSTALL}"
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
+ LIBDIR_VAR="LIBDIR_${ABI}"
+ if [ -n "${ABI}" -a -n "${!LIBDIR_VAR}" ]; then
+ CONF_LIBDIR="${!LIBDIR_VAR}"
+ fi
+ unset LIBDIR_VAR
+ if [ -n "${CONF_LIBDIR}" ] && [ "${CONF_PREFIX:+set}" = set ]; then
+ EI_DESTLIBDIR="${D}/${CONF_PREFIX}/${CONF_LIBDIR}"
+ EI_DESTLIBDIR="$(__strip_duplicate_slashes "${EI_DESTLIBDIR}")"
+ LOCAL_EXTRA_EINSTALL="libdir=${EI_DESTLIBDIR} ${LOCAL_EXTRA_EINSTALL}"
+ unset EI_DESTLIBDIR
+ fi
+
+ if [ -f ./[mM]akefile -o -f ./GNUmakefile ] ; then
+ if [ "${PORTAGE_DEBUG}" == "1" ]; then
+ ${MAKE:-make} -n prefix="${ED}usr" \
+ datadir="${ED}usr/share" \
+ infodir="${ED}usr/share/info" \
+ localstatedir="${ED}var/lib" \
+ mandir="${ED}usr/share/man" \
+ sysconfdir="${ED}etc" \
+ ${LOCAL_EXTRA_EINSTALL} \
+ ${MAKEOPTS} -j1 \
+ "$@" ${EXTRA_EMAKE} install
+ fi
+ ${MAKE:-make} prefix="${ED}usr" \
+ datadir="${ED}usr/share" \
+ infodir="${ED}usr/share/info" \
+ localstatedir="${ED}var/lib" \
+ mandir="${ED}usr/share/man" \
+ sysconfdir="${ED}etc" \
+ ${LOCAL_EXTRA_EINSTALL} \
+ ${MAKEOPTS} -j1 \
+ "$@" ${EXTRA_EMAKE} install || die "einstall failed"
+ else
+ die "no Makefile found"
+ fi
+}
+
+__eapi0_pkg_nofetch() {
+ [ -z "${SRC_URI}" ] && return
+
+ elog "The following are listed in SRC_URI for ${PN}:"
+ local x
+ for x in $(echo ${SRC_URI}); do
+ elog " ${x}"
+ done
+}
+
+__eapi0_src_unpack() {
+ [[ -n ${A} ]] && unpack ${A}
+}
+
+__eapi0_src_compile() {
+ if [ -x ./configure ] ; then
+ econf
+ fi
+ __eapi2_src_compile
+}
+
+__eapi0_src_test() {
+ # Since we don't want emake's automatic die
+ # support (EAPI 4 and later), and we also don't
+ # want the warning messages that it produces if
+ # we call it in 'nonfatal' mode, we use emake_cmd
+ # to emulate the desired parts of emake behavior.
+ local emake_cmd="${MAKE:-make} ${MAKEOPTS} ${EXTRA_EMAKE}"
+ local internal_opts=
+ if ___eapi_default_src_test_disables_parallel_jobs; then
+ internal_opts+=" -j1"
+ fi
+ if $emake_cmd ${internal_opts} check -n &> /dev/null; then
+ __vecho "${emake_cmd} ${internal_opts} check" >&2
+ $emake_cmd ${internal_opts} check || \
+ die "Make check failed. See above for details."
+ elif $emake_cmd ${internal_opts} test -n &> /dev/null; then
+ __vecho "${emake_cmd} ${internal_opts} test" >&2
+ $emake_cmd ${internal_opts} test || \
+ die "Make test failed. See above for details."
+ fi
+}
+
+__eapi1_src_compile() {
+ __eapi2_src_configure
+ __eapi2_src_compile
+}
+
+__eapi2_src_prepare() {
+ :
+}
+
+__eapi2_src_configure() {
+ if [[ -x ${ECONF_SOURCE:-.}/configure ]] ; then
+ econf
+ fi
+}
+
+__eapi2_src_compile() {
+ if [ -f Makefile ] || [ -f GNUmakefile ] || [ -f makefile ]; then
+ emake || die "emake failed"
+ fi
+}
+
+__eapi4_src_install() {
+ if [[ -f Makefile || -f GNUmakefile || -f makefile ]] ; then
+ emake DESTDIR="${D}" install
+ fi
+
+ if ! declare -p DOCS &>/dev/null ; then
+ local d
+ for d in README* ChangeLog AUTHORS NEWS TODO CHANGES \
+ THANKS BUGS FAQ CREDITS CHANGELOG ; do
+ [[ -s "${d}" ]] && dodoc "${d}"
+ done
+ elif [[ $(declare -p DOCS) == "declare -a "* ]] ; then
+ dodoc "${DOCS[@]}"
+ else
+ dodoc ${DOCS}
+ fi
+}
+
+# @FUNCTION: has_version
+# @USAGE: [--host-root] <DEPEND ATOM>
+# @DESCRIPTION:
+# Return true if given package is installed. Otherwise return false.
+# Callers may override the ROOT variable in order to match packages from an
+# alternative ROOT.
+has_version() {
+
+ local atom eroot host_root=false root=${ROOT}
+ if [[ $1 == --host-root ]] ; then
+ host_root=true
+ shift
+ fi
+ atom=$1
+ shift
+ [ $# -gt 0 ] && die "${FUNCNAME[0]}: unused argument(s): $*"
+
+ if ${host_root} ; then
+ if ! ___eapi_best_version_and_has_version_support_--host-root; then
+ die "${FUNCNAME[0]}: option --host-root is not supported with EAPI ${EAPI}"
+ fi
+ root=/
+ fi
+
+ if ___eapi_has_prefix_variables; then
+ # [[ ${root} == / ]] would be ambiguous here,
+ # since both prefixes can share root=/ while
+ # having different EPREFIX offsets.
+ if ${host_root} ; then
+ eroot=${root%/}${PORTAGE_OVERRIDE_EPREFIX}/
+ else
+ eroot=${root%/}${EPREFIX}/
+ fi
+ else
+ eroot=${root}
+ fi
+ if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
+ "$PORTAGE_BIN_PATH"/ebuild-ipc has_version "${eroot}" "${atom}"
+ else
+ "${PORTAGE_BIN_PATH}/ebuild-helpers/portageq" has_version "${eroot}" "${atom}"
+ fi
+ local retval=$?
+ case "${retval}" in
+ 0|1)
+ return ${retval}
+ ;;
+ 2)
+ die "${FUNCNAME[0]}: invalid atom: ${atom}"
+ ;;
+ *)
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ die "${FUNCNAME[0]}: unexpected ebuild-ipc exit code: ${retval}"
+ else
+ die "${FUNCNAME[0]}: unexpected portageq exit code: ${retval}"
+ fi
+ ;;
+ esac
+}
+
+# @FUNCTION: best_version
+# @USAGE: [--host-root] <DEPEND ATOM>
+# @DESCRIPTION:
+# Returns the best/most-current match.
+# Callers may override the ROOT variable in order to match packages from an
+# alternative ROOT.
+best_version() {
+
+ local atom eroot host_root=false root=${ROOT}
+ if [[ $1 == --host-root ]] ; then
+ host_root=true
+ shift
+ fi
+ atom=$1
+ shift
+ [ $# -gt 0 ] && die "${FUNCNAME[0]}: unused argument(s): $*"
+
+ if ${host_root} ; then
+ if ! ___eapi_best_version_and_has_version_support_--host-root; then
+ die "${FUNCNAME[0]}: option --host-root is not supported with EAPI ${EAPI}"
+ fi
+ root=/
+ fi
+
+ if ___eapi_has_prefix_variables; then
+ # [[ ${root} == / ]] would be ambiguous here,
+ # since both prefixes can share root=/ while
+ # having different EPREFIX offsets.
+ if ${host_root} ; then
+ eroot=${root%/}${PORTAGE_OVERRIDE_EPREFIX}/
+ else
+ eroot=${root%/}${EPREFIX}/
+ fi
+ else
+ eroot=${root}
+ fi
+ if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
+ "$PORTAGE_BIN_PATH"/ebuild-ipc best_version "${eroot}" "${atom}"
+ else
+ "${PORTAGE_BIN_PATH}/ebuild-helpers/portageq" best_version "${eroot}" "${atom}"
+ fi
+ local retval=$?
+ case "${retval}" in
+ 0|1)
+ return ${retval}
+ ;;
+ 2)
+ die "${FUNCNAME[0]}: invalid atom: ${atom}"
+ ;;
+ *)
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ die "${FUNCNAME[0]}: unexpected ebuild-ipc exit code: ${retval}"
+ else
+ die "${FUNCNAME[0]}: unexpected portageq exit code: ${retval}"
+ fi
+ ;;
+ esac
+}
+
+if ___eapi_has_master_repositories; then
+ master_repositories() {
+ local output repository=$1 retval
+ shift
+ [[ $# -gt 0 ]] && die "${FUNCNAME[0]}: unused argument(s): $*"
+
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ "${PORTAGE_BIN_PATH}/ebuild-ipc" master_repositories "${EROOT}" "${repository}"
+ else
+ output=$("${PORTAGE_BIN_PATH}/ebuild-helpers/portageq" master_repositories "${EROOT}" "${repository}")
+ fi
+ retval=$?
+ [[ -n ${output} ]] && echo "${output}"
+ case "${retval}" in
+ 0|1)
+ return ${retval}
+ ;;
+ 2)
+ die "${FUNCNAME[0]}: invalid repository: ${repository}"
+ ;;
+ *)
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ die "${FUNCNAME[0]}: unexpected ebuild-ipc exit code: ${retval}"
+ else
+ die "${FUNCNAME[0]}: unexpected portageq exit code: ${retval}"
+ fi
+ ;;
+ esac
+ }
+fi
+
+if ___eapi_has_repository_path; then
+ repository_path() {
+ local output repository=$1 retval
+ shift
+ [[ $# -gt 0 ]] && die "${FUNCNAME[0]}: unused argument(s): $*"
+
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ "${PORTAGE_BIN_PATH}/ebuild-ipc" repository_path "${EROOT}" "${repository}"
+ else
+ output=$("${PORTAGE_BIN_PATH}/ebuild-helpers/portageq" get_repo_path "${EROOT}" "${repository}")
+ fi
+ retval=$?
+ [[ -n ${output} ]] && echo "${output}"
+ case "${retval}" in
+ 0|1)
+ return ${retval}
+ ;;
+ 2)
+ die "${FUNCNAME[0]}: invalid repository: ${repository}"
+ ;;
+ *)
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ die "${FUNCNAME[0]}: unexpected ebuild-ipc exit code: ${retval}"
+ else
+ die "${FUNCNAME[0]}: unexpected portageq exit code: ${retval}"
+ fi
+ ;;
+ esac
+ }
+fi
+
+if ___eapi_has_available_eclasses; then
+ available_eclasses() {
+ local output repository=${PORTAGE_REPO_NAME} retval
+ [[ $# -gt 0 ]] && die "${FUNCNAME[0]}: unused argument(s): $*"
+
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ "${PORTAGE_BIN_PATH}/ebuild-ipc" available_eclasses "${EROOT}" "${repository}"
+ else
+ output=$("${PORTAGE_BIN_PATH}/ebuild-helpers/portageq" available_eclasses "${EROOT}" "${repository}")
+ fi
+ retval=$?
+ [[ -n ${output} ]] && echo "${output}"
+ case "${retval}" in
+ 0|1)
+ return ${retval}
+ ;;
+ 2)
+ die "${FUNCNAME[0]}: invalid repository: ${repository}"
+ ;;
+ *)
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ die "${FUNCNAME[0]}: unexpected ebuild-ipc exit code: ${retval}"
+ else
+ die "${FUNCNAME[0]}: unexpected portageq exit code: ${retval}"
+ fi
+ ;;
+ esac
+ }
+fi
+
+if ___eapi_has_eclass_path; then
+ eclass_path() {
+ local eclass=$1 output repository=${PORTAGE_REPO_NAME} retval
+ shift
+ [[ $# -gt 0 ]] && die "${FUNCNAME[0]}: unused argument(s): $*"
+
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ "${PORTAGE_BIN_PATH}/ebuild-ipc" eclass_path "${EROOT}" "${repository}" "${eclass}"
+ else
+ output=$("${PORTAGE_BIN_PATH}/ebuild-helpers/portageq" eclass_path "${EROOT}" "${repository}" "${eclass}")
+ fi
+ retval=$?
+ [[ -n ${output} ]] && echo "${output}"
+ case "${retval}" in
+ 0|1)
+ return ${retval}
+ ;;
+ 2)
+ die "${FUNCNAME[0]}: invalid repository: ${repository}"
+ ;;
+ *)
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ die "${FUNCNAME[0]}: unexpected ebuild-ipc exit code: ${retval}"
+ else
+ die "${FUNCNAME[0]}: unexpected portageq exit code: ${retval}"
+ fi
+ ;;
+ esac
+ }
+fi
+
+if ___eapi_has_license_path; then
+ license_path() {
+ local license=$1 output repository=${PORTAGE_REPO_NAME} retval
+ shift
+ [[ $# -gt 0 ]] && die "${FUNCNAME[0]}: unused argument(s): $*"
+
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ "${PORTAGE_BIN_PATH}/ebuild-ipc" license_path "${EROOT}" "${repository}" "${license}"
+ else
+ output=$("${PORTAGE_BIN_PATH}/ebuild-helpers/portageq" license_path "${EROOT}" "${repository}" "${license}")
+ fi
+ retval=$?
+ [[ -n ${output} ]] && echo "${output}"
+ case "${retval}" in
+ 0|1)
+ return ${retval}
+ ;;
+ 2)
+ die "${FUNCNAME[0]}: invalid repository: ${repository}"
+ ;;
+ *)
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ die "${FUNCNAME[0]}: unexpected ebuild-ipc exit code: ${retval}"
+ else
+ die "${FUNCNAME[0]}: unexpected portageq exit code: ${retval}"
+ fi
+ ;;
+ esac
+ }
+fi
+
+if ___eapi_has_package_manager_build_user; then
+ package_manager_build_user() {
+ echo "${PORTAGE_BUILD_USER}"
+ }
+fi
+
+if ___eapi_has_package_manager_build_group; then
+ package_manager_build_group() {
+ echo "${PORTAGE_BUILD_GROUP}"
+ }
+fi
diff --git a/usr/lib/portage/bin/portageq b/usr/lib/portage/bin/portageq
new file mode 100755
index 0000000..37d22db
--- /dev/null
+++ b/usr/lib/portage/bin/portageq
@@ -0,0 +1,1442 @@
+#!/usr/bin/python -bO
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function, unicode_literals
+
+import signal
+import sys
+# This block ensures that ^C interrupts are handled quietly.
+try:
+
+ def exithandler(signum, _frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ sys.exit(128 + signum)
+
+ signal.signal(signal.SIGINT, exithandler)
+ signal.signal(signal.SIGTERM, exithandler)
+
+except KeyboardInterrupt:
+ sys.exit(128 + signal.SIGINT)
+
+import os
+import types
+
+# for an explanation on this logic, see pym/_emerge/__init__.py
+# this differs from master, we need to revisit this when we can install
+# using distutils, like master
+if os.environ.__contains__("PORTAGE_PYTHONPATH"):
+ pym_paths = [ os.environ["PORTAGE_PYTHONPATH"] ]
+else:
+ pym_paths = [ os.path.join(os.path.dirname(
+ os.path.dirname(os.path.realpath(__file__))), "pym") ]
+# Avoid sandbox violations after Python upgrade.
+if os.environ.get("SANDBOX_ON") == "1":
+ sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
+ for pym_path in pym_paths:
+ if pym_path not in sandbox_write:
+ sandbox_write.append(pym_path)
+ os.environ["SANDBOX_WRITE"] = ":".join(filter(None, sandbox_write))
+ del pym_path, sandbox_write
+del pym_paths
+
+import portage
+portage._internal_caller = True
+from portage import os
+from portage.eapi import eapi_has_repo_deps
+from portage.util import writemsg, writemsg_stdout
+from portage.util._argparse import ArgumentParser
+portage.proxy.lazyimport.lazyimport(globals(),
+ 're',
+ 'subprocess',
+ '_emerge.Package:Package',
+ '_emerge.RootConfig:RootConfig',
+ '_emerge.is_valid_package_atom:insert_category_into_atom',
+ 'portage.dbapi._expand_new_virt:expand_new_virt',
+ 'portage._sets.base:InternalPackageSet',
+ 'portage.xml.metadata:MetaDataXML'
+)
+
+def eval_atom_use(atom):
+ if 'USE' in os.environ:
+ use = frozenset(os.environ['USE'].split())
+ atom = atom.evaluate_conditionals(use)
+ return atom
+
+def uses_eroot(function):
+ function.uses_eroot = True
+ return function
+
+# global to hold all function docstrings to be used for argparse help.
+# Avoids python compilation level 2 optimization troubles.
+docstrings = {}
+
+#-----------------------------------------------------------------------------
+#
+# To add functionality to this tool, add a function below.
+#
+# The format for functions is:
+#
+# def function(argv):
+# <code>
+#
+# docstrings['function'] = """<list of options for this function>
+# <description of the function>
+# """
+# function.__doc__ = docstrings['function']
+#
+# "argv" is an array of the command line parameters provided after the command.
+#
+# Make sure you document the function in the right format. The documentation
+# is used to display help on the function.
+#
+# You do not need to add the function to any lists, this tool is introspective,
+# and will automaticly add a command by the same name as the function!
+#
+
+@uses_eroot
+def has_version(argv):
+ if (len(argv) < 2):
+ print("ERROR: insufficient parameters!")
+ return 3
+
+ warnings = []
+
+ allow_repo = atom_validate_strict is False or eapi_has_repo_deps(eapi)
+ try:
+ atom = portage.dep.Atom(argv[1], allow_repo=allow_repo)
+ except portage.exception.InvalidAtom:
+ if atom_validate_strict:
+ portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1],
+ noiselevel=-1)
+ return 2
+ else:
+ atom = argv[1]
+ else:
+ if atom_validate_strict:
+ try:
+ atom = portage.dep.Atom(argv[1], allow_repo=allow_repo, eapi=eapi)
+ except portage.exception.InvalidAtom as e:
+ warnings.append("QA Notice: %s: %s" % ('has_version', e))
+ atom = eval_atom_use(atom)
+
+ if warnings:
+ elog('eqawarn', warnings)
+
+ try:
+ mylist = portage.db[argv[0]]["vartree"].dbapi.match(atom)
+ if mylist:
+ return 0
+ else:
+ return 1
+ except KeyError:
+ return 1
+ except portage.exception.InvalidAtom:
+ portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1],
+ noiselevel=-1)
+ return 2
+
+docstrings['has_version'] = """<eroot> <category/package>
+ Return code 0 if it's available, 1 otherwise.
+ """
+has_version.__doc__ = docstrings['has_version']
+
+
+@uses_eroot
+def best_version(argv):
+ if (len(argv) < 2):
+ print("ERROR: insufficient parameters!")
+ return 3
+
+ warnings = []
+
+ allow_repo = atom_validate_strict is False or eapi_has_repo_deps(eapi)
+ try:
+ atom = portage.dep.Atom(argv[1], allow_repo=allow_repo)
+ except portage.exception.InvalidAtom:
+ if atom_validate_strict:
+ portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1],
+ noiselevel=-1)
+ return 2
+ else:
+ atom = argv[1]
+ else:
+ if atom_validate_strict:
+ try:
+ atom = portage.dep.Atom(argv[1], allow_repo=allow_repo, eapi=eapi)
+ except portage.exception.InvalidAtom as e:
+ warnings.append("QA Notice: %s: %s" % ('best_version', e))
+ atom = eval_atom_use(atom)
+
+ if warnings:
+ elog('eqawarn', warnings)
+
+ try:
+ mylist = portage.db[argv[0]]["vartree"].dbapi.match(atom)
+ print(portage.best(mylist))
+ except KeyError:
+ return 1
+
+docstrings['best_version'] = """<eroot> <category/package>
+ Returns category/package-version (without .ebuild).
+ """
+best_version.__doc__ = docstrings['best_version']
+
+
+@uses_eroot
+def mass_best_version(argv):
+ if (len(argv) < 2):
+ print("ERROR: insufficient parameters!")
+ return 2
+ try:
+ for pack in argv[1:]:
+ mylist = portage.db[argv[0]]['vartree'].dbapi.match(pack)
+ print('%s:%s' % (pack, portage.best(mylist)))
+ except KeyError:
+ return 1
+
+docstrings['mass_best_version'] = """<eroot> [<category/package>]+
+ Returns category/package-version (without .ebuild).
+ """
+mass_best_version.__doc__ = docstrings['mass_best_version']
+
+
+@uses_eroot
+def metadata(argv):
+ if (len(argv) < 4):
+ print('ERROR: insufficient parameters!', file=sys.stderr)
+ return 2
+
+ eroot, pkgtype, pkgspec = argv[0:3]
+ metakeys = argv[3:]
+ type_map = {
+ 'ebuild': 'porttree',
+ 'binary': 'bintree',
+ 'installed': 'vartree'
+ }
+ if pkgtype not in type_map:
+ print("Unrecognized package type: '%s'" % pkgtype, file=sys.stderr)
+ return 1
+ trees = portage.db
+ repo = portage.dep.dep_getrepo(pkgspec)
+ pkgspec = portage.dep.remove_slot(pkgspec)
+ try:
+ values = trees[eroot][type_map[pkgtype]].dbapi.aux_get(
+ pkgspec, metakeys, myrepo=repo)
+ writemsg_stdout(''.join('%s\n' % x for x in values), noiselevel=-1)
+ except KeyError:
+ print("Package not found: '%s'" % pkgspec, file=sys.stderr)
+ return 1
+
+docstrings['metadata'] = """
+<eroot> <pkgtype> <category/package> [<key>]+
+Returns metadata values for the specified package.
+Available keys: %s
+""" % ','.join(sorted(x for x in portage.auxdbkeys \
+if not x.startswith('UNUSED_')))
+metadata.__doc__ = docstrings['metadata']
+
+
+@uses_eroot
+def contents(argv):
+ if len(argv) != 2:
+ print("ERROR: expected 2 parameters, got %d!" % len(argv))
+ return 2
+
+ root, cpv = argv
+ vartree = portage.db[root]["vartree"]
+ if not vartree.dbapi.cpv_exists(cpv):
+ sys.stderr.write("Package not found: '%s'\n" % cpv)
+ return 1
+ cat, pkg = portage.catsplit(cpv)
+ db = portage.dblink(cat, pkg, root, vartree.settings,
+ treetype="vartree", vartree=vartree)
+ writemsg_stdout(''.join('%s\n' % x for x in sorted(db.getcontents())),
+ noiselevel=-1)
+
+docstrings['contents'] = """<eroot> <category/package>
+ List the files that are installed for a given package, with
+ one file listed on each line. All file names will begin with
+ <eroot>.
+ """
+contents.__doc__ = docstrings['contents']
+
+
+@uses_eroot
+def owners(argv):
+ if len(argv) < 2:
+ sys.stderr.write("ERROR: insufficient parameters!\n")
+ sys.stderr.flush()
+ return 2
+
+ eroot = argv[0]
+ vardb = portage.db[eroot]["vartree"].dbapi
+ root = portage.settings['ROOT']
+
+ cwd = None
+ try:
+ cwd = os.getcwd()
+ except OSError:
+ pass
+
+ files = []
+ orphan_abs_paths = set()
+ orphan_basenames = set()
+ for f in argv[1:]:
+ f = portage.normalize_path(f)
+ is_basename = os.sep not in f
+ if not is_basename and f[:1] != os.sep:
+ if cwd is None:
+ sys.stderr.write("ERROR: cwd does not exist!\n")
+ sys.stderr.flush()
+ return 2
+ f = os.path.join(cwd, f)
+ f = portage.normalize_path(f)
+ if not is_basename and not f.startswith(eroot):
+ sys.stderr.write("ERROR: file paths must begin with <eroot>!\n")
+ sys.stderr.flush()
+ return 2
+ if is_basename:
+ files.append(f)
+ orphan_basenames.add(f)
+ else:
+ files.append(f[len(root)-1:])
+ orphan_abs_paths.add(f)
+
+ owners = vardb._owners.get_owners(files)
+
+ msg = []
+ for pkg, owned_files in owners.items():
+ cpv = pkg.mycpv
+ msg.append("%s\n" % cpv)
+ for f in sorted(owned_files):
+ f_abs = os.path.join(root, f.lstrip(os.path.sep))
+ msg.append("\t%s\n" % (f_abs,))
+ orphan_abs_paths.discard(f_abs)
+ if orphan_basenames:
+ orphan_basenames.discard(os.path.basename(f_abs))
+
+ writemsg_stdout(''.join(msg), noiselevel=-1)
+
+ if orphan_abs_paths or orphan_basenames:
+ orphans = []
+ orphans.extend(orphan_abs_paths)
+ orphans.extend(orphan_basenames)
+ orphans.sort()
+ msg = []
+ msg.append("None of the installed packages claim these files:\n")
+ for f in orphans:
+ msg.append("\t%s\n" % (f,))
+ sys.stderr.write("".join(msg))
+ sys.stderr.flush()
+
+ if owners:
+ return 0
+ return 1
+
+docstrings['owners'] = """<eroot> [<filename>]+
+ Given a list of files, print the packages that own the files and which
+ files belong to each package. Files owned by a package are listed on
+ the lines below it, indented by a single tab character (\\t). All file
+ paths must either start with <eroot> or be a basename alone.
+ Returns 1 if no owners could be found, and 0 otherwise.
+ """
+owners.__doc__ = docstrings['owners']
+
+
+@uses_eroot
+def is_protected(argv):
+ if len(argv) != 2:
+ sys.stderr.write("ERROR: expected 2 parameters, got %d!\n" % len(argv))
+ sys.stderr.flush()
+ return 2
+
+ root, filename = argv
+
+ err = sys.stderr
+ cwd = None
+ try:
+ cwd = os.getcwd()
+ except OSError:
+ pass
+
+ f = portage.normalize_path(filename)
+ if not f.startswith(os.path.sep):
+ if cwd is None:
+ err.write("ERROR: cwd does not exist!\n")
+ err.flush()
+ return 2
+ f = os.path.join(cwd, f)
+ f = portage.normalize_path(f)
+
+ if not f.startswith(root):
+ err.write("ERROR: file paths must begin with <eroot>!\n")
+ err.flush()
+ return 2
+
+ from portage.util import ConfigProtect
+
+ settings = portage.settings
+ protect = portage.util.shlex_split(settings.get("CONFIG_PROTECT", ""))
+ protect_mask = portage.util.shlex_split(
+ settings.get("CONFIG_PROTECT_MASK", ""))
+ protect_obj = ConfigProtect(root, protect, protect_mask,
+ case_insensitive = ("case-insensitive-fs" in settings.features))
+ if protect_obj.isprotected(f):
+ return 0
+ return 1
+
+docstrings['is_protected'] = """<eroot> <filename>
+ Given a single filename, return code 0 if it's protected, 1 otherwise.
+ The filename must begin with <eroot>.
+ """
+is_protected.__doc__ = docstrings['is_protected']
+
+
+@uses_eroot
+def filter_protected(argv):
+ if len(argv) != 1:
+ sys.stderr.write("ERROR: expected 1 parameter, got %d!\n" % len(argv))
+ sys.stderr.flush()
+ return 2
+
+ root, = argv
+ out = sys.stdout
+ err = sys.stderr
+ cwd = None
+ try:
+ cwd = os.getcwd()
+ except OSError:
+ pass
+
+ from portage.util import ConfigProtect
+
+ settings = portage.settings
+ protect = portage.util.shlex_split(settings.get("CONFIG_PROTECT", ""))
+ protect_mask = portage.util.shlex_split(
+ settings.get("CONFIG_PROTECT_MASK", ""))
+ protect_obj = ConfigProtect(root, protect, protect_mask,
+ case_insensitive = ("case-insensitive-fs" in settings.features))
+
+ errors = 0
+
+ for line in sys.stdin:
+ filename = line.rstrip("\n")
+ f = portage.normalize_path(filename)
+ if not f.startswith(os.path.sep):
+ if cwd is None:
+ err.write("ERROR: cwd does not exist!\n")
+ err.flush()
+ errors += 1
+ continue
+ f = os.path.join(cwd, f)
+ f = portage.normalize_path(f)
+
+ if not f.startswith(root):
+ err.write("ERROR: file paths must begin with <eroot>!\n")
+ err.flush()
+ errors += 1
+ continue
+
+ if protect_obj.isprotected(f):
+ out.write("%s\n" % filename)
+ out.flush()
+
+ if errors:
+ return 2
+
+ return 0
+
+docstrings['filter_protected'] = """<eroot>
+ Read filenames from stdin and write them to stdout if they are protected.
+ All filenames are delimited by \\n and must begin with <eroot>.
+ """
+filter_protected.__doc__ = docstrings['filter_protected']
+
+
+@uses_eroot
+def best_visible(argv):
+ if (len(argv) < 2):
+ writemsg("ERROR: insufficient parameters!\n", noiselevel=-1)
+ return 2
+
+ pkgtype = "ebuild"
+ if len(argv) > 2:
+ pkgtype = argv[1]
+ atom = argv[2]
+ else:
+ atom = argv[1]
+
+ type_map = {
+ "ebuild":"porttree",
+ "binary":"bintree",
+ "installed":"vartree"}
+
+ if pkgtype not in type_map:
+ writemsg("Unrecognized package type: '%s'\n" % pkgtype,
+ noiselevel=-1)
+ return 2
+
+ eroot = argv[0]
+ db = portage.db[eroot][type_map[pkgtype]].dbapi
+
+ try:
+ atom = portage.dep_expand(atom, mydb=db, settings=portage.settings)
+ except portage.exception.InvalidAtom:
+ writemsg("ERROR: Invalid atom: '%s'\n" % atom,
+ noiselevel=-1)
+ return 2
+
+ root_config = RootConfig(portage.settings, portage.db[eroot], None)
+
+ if hasattr(db, "xmatch"):
+ cpv_list = db.xmatch("match-all-cpv-only", atom)
+ else:
+ cpv_list = db.match(atom)
+
+ if cpv_list:
+ # reversed, for descending order
+ cpv_list.reverse()
+ # verify match, since the atom may match the package
+ # for a given cpv from one repo but not another, and
+ # we can use match-all-cpv-only to avoid redundant
+ # metadata access.
+ atom_set = InternalPackageSet(initial_atoms=(atom,))
+
+ if atom.repo is None and hasattr(db, "getRepositories"):
+ repo_list = db.getRepositories()
+ else:
+ repo_list = [atom.repo]
+
+ for cpv in cpv_list:
+ for repo in repo_list:
+ try:
+ metadata = dict(zip(Package.metadata_keys,
+ db.aux_get(cpv, Package.metadata_keys, myrepo=repo)))
+ except KeyError:
+ continue
+ pkg = Package(built=(pkgtype != "ebuild"), cpv=cpv,
+ installed=(pkgtype=="installed"), metadata=metadata,
+ root_config=root_config, type_name=pkgtype)
+ if not atom_set.findAtomForPackage(pkg):
+ continue
+
+ if pkg.visible:
+ writemsg_stdout("%s\n" % (pkg.cpv,), noiselevel=-1)
+ return os.EX_OK
+
+ # No package found, write out an empty line.
+ writemsg_stdout("\n", noiselevel=-1)
+
+ return 1
+
+docstrings['best_visible'] = """<eroot> [pkgtype] <atom>
+ Returns category/package-version (without .ebuild).
+ The pkgtype argument defaults to "ebuild" if unspecified,
+ otherwise it must be one of ebuild, binary, or installed.
+ """
+best_visible.__doc__ = docstrings['best_visible']
+
+
+@uses_eroot
+def mass_best_visible(argv):
+ type_map = {
+ "ebuild":"porttree",
+ "binary":"bintree",
+ "installed":"vartree"}
+
+ if (len(argv) < 2):
+ print("ERROR: insufficient parameters!")
+ return 2
+ try:
+ root = argv.pop(0)
+ pkgtype = "ebuild"
+ if argv[0] in type_map:
+ pkgtype = argv.pop(0)
+ for pack in argv:
+ writemsg_stdout("%s:" % pack, noiselevel=-1)
+ best_visible([root, pkgtype, pack])
+ except KeyError:
+ return 1
+
+docstrings['mass_best_visible'] = """<eroot> [<type>] [<category/package>]+
+ Returns category/package-version (without .ebuild).
+ The pkgtype argument defaults to "ebuild" if unspecified,
+ otherwise it must be one of ebuild, binary, or installed.
+ """
+mass_best_visible.__doc__ = docstrings['mass_best_visible']
+
+
+@uses_eroot
+def all_best_visible(argv):
+ if len(argv) < 1:
+ sys.stderr.write("ERROR: insufficient parameters!\n")
+ sys.stderr.flush()
+ return 2
+
+ #print portage.db[argv[0]]["porttree"].dbapi.cp_all()
+ for pkg in portage.db[argv[0]]["porttree"].dbapi.cp_all():
+ mybest=portage.best(portage.db[argv[0]]["porttree"].dbapi.match(pkg))
+ if mybest:
+ print(mybest)
+
+docstrings['all_best_visible'] = """<eroot>
+ Returns all best_visible packages (without .ebuild).
+ """
+all_best_visible.__doc__ = docstrings['all_best_visible']
+
+
+@uses_eroot
+def match(argv):
+ if len(argv) != 2:
+ print("ERROR: expected 2 parameters, got %d!" % len(argv))
+ return 2
+ root, atom = argv
+ if not atom:
+ atom = "*/*"
+
+ vardb = portage.db[root]["vartree"].dbapi
+ try:
+ atom = portage.dep.Atom(atom, allow_wildcard=True, allow_repo=True)
+ except portage.exception.InvalidAtom:
+ # maybe it's valid but missing category
+ atom = portage.dep_expand(atom, mydb=vardb, settings=vardb.settings)
+
+ if atom.extended_syntax:
+ if atom == "*/*":
+ results = vardb.cpv_all()
+ else:
+ results = []
+ require_metadata = atom.slot or atom.repo
+ for cpv in vardb.cpv_all():
+
+ if not portage.match_from_list(atom, [cpv]):
+ continue
+
+ if require_metadata:
+ try:
+ cpv = vardb._pkg_str(cpv, atom.repo)
+ except (KeyError, portage.exception.InvalidData):
+ continue
+ if not portage.match_from_list(atom, [cpv]):
+ continue
+
+ results.append(cpv)
+
+ results.sort()
+ else:
+ results = vardb.match(atom)
+ for cpv in results:
+ print(cpv)
+
+docstrings['match'] = """<eroot> <atom>
+ Returns a \\n separated list of category/package-version.
+ When given an empty string, all installed packages will
+ be listed.
+ """
+match.__doc__ = docstrings['match']
+
+
+@uses_eroot
+def expand_virtual(argv):
+ if len(argv) != 2:
+ writemsg("ERROR: expected 2 parameters, got %d!\n" % len(argv),
+ noiselevel=-1)
+ return 2
+
+ root, atom = argv
+
+ try:
+ results = list(expand_new_virt(
+ portage.db[root]["vartree"].dbapi, atom))
+ except portage.exception.InvalidAtom:
+ writemsg("ERROR: Invalid atom: '%s'\n" % atom,
+ noiselevel=-1)
+ return 2
+
+ results.sort()
+ for x in results:
+ if not x.blocker:
+ writemsg_stdout("%s\n" % (x,))
+
+ return os.EX_OK
+
+docstrings['expand_virtual'] = """<eroot> <atom>
+ Returns a \\n separated list of atoms expanded from a
+ given virtual atom (GLEP 37 virtuals only),
+ excluding blocker atoms. Satisfied
+ virtual atoms are not included in the output, since
+ they are expanded to real atoms which are displayed.
+ Unsatisfied virtual atoms are displayed without
+ any expansion. The "match" command can be used to
+ resolve the returned atoms to specific installed
+ packages.
+ """
+expand_virtual.__doc__ = docstrings['expand_virtual']
+
+
+def vdb_path(_argv):
+ out = sys.stdout
+ out.write(os.path.join(portage.settings["EROOT"], portage.VDB_PATH) + "\n")
+ out.flush()
+ return os.EX_OK
+
+docstrings['vdb_path'] = """
+ Returns the path used for the var(installed) package database for the
+ set environment/configuration options.
+ """
+vdb_path.__doc__ = docstrings['vdb_path']
+
+
+def gentoo_mirrors(_argv):
+ print(portage.settings["GENTOO_MIRRORS"])
+
+docstrings['gentoo_mirrors'] = """
+ Returns the mirrors set to use in the portage configuration.
+ """
+gentoo_mirrors.__doc__ = docstrings['gentoo_mirrors']
+
+
+@uses_eroot
+def repositories_configuration(argv):
+ if len(argv) < 1:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
+ sys.stdout.write(portage.db[argv[0]]["vartree"].settings.repositories.config_string())
+ sys.stdout.flush()
+
+docstrings['repositories_configuration'] = """<eroot>
+ Returns the configuration of repositories.
+ """
+repositories_configuration.__doc__ = docstrings['repositories_configuration']
+
+
+@uses_eroot
+def repos_config(argv):
+ return repositories_configuration(argv)
+
+docstrings['repos_config'] = """
+ <eroot>
+ This is an alias for the repositories_configuration command.
+ """
+repos_config.__doc__ = docstrings['repos_config']
+
+
+def portdir(_argv):
+ print("WARNING: 'portageq portdir' is deprecated. Use 'portageq repositories_configuration' instead.", file=sys.stderr)
+ print(portage.settings["PORTDIR"])
+
+docstrings['portdir'] = """
+ Returns the PORTDIR path.
+ Deprecated in favor of repositories_configuration command.
+ """
+portdir.__doc__ = docstrings['portdir']
+
+
+def config_protect(_argv):
+ print(portage.settings["CONFIG_PROTECT"])
+
+docstrings['config_protect'] = """
+ Returns the CONFIG_PROTECT paths.
+ """
+config_protect.__doc__ = docstrings['config_protect']
+
+
+def config_protect_mask(_argv):
+ print(portage.settings["CONFIG_PROTECT_MASK"])
+
+docstrings['config_protect_mask'] = """
+ Returns the CONFIG_PROTECT_MASK paths.
+ """
+config_protect_mask.__doc__ = docstrings['config_protect_mask']
+
+def portdir_overlay(_argv):
+ print("WARNING: 'portageq portdir_overlay' is deprecated. Use 'portageq repositories_configuration' instead.", file=sys.stderr)
+ print(portage.settings["PORTDIR_OVERLAY"])
+
+docstrings['portdir_overlay'] = """
+ Returns the PORTDIR_OVERLAY path.
+ Deprecated in favor of repositories_configuration command.
+ """
+portdir_overlay.__doc__ = docstrings['portdir_overlay']
+
+
+def pkgdir(_argv):
+ print(portage.settings["PKGDIR"])
+
+docstrings['pkgdir'] = """
+ Returns the PKGDIR path.
+ """
+pkgdir.__doc__ = docstrings['pkgdir']
+
+
+def distdir(_argv):
+ print(portage.settings["DISTDIR"])
+
+docstrings['distdir'] = """
+ Returns the DISTDIR path.
+ """
+distdir.__doc__ = docstrings['distdir']
+
+
+def colormap(_argv):
+ print(portage.output.colormap())
+
+docstrings['colormap'] = """
+ Display the color.map as environment variables.
+ """
+colormap.__doc__ = docstrings['colormap']
+
+
+def envvar(argv):
+ verbose = "-v" in argv
+ if verbose:
+ argv.pop(argv.index("-v"))
+
+ if len(argv) == 0:
+ print("ERROR: insufficient parameters!")
+ return 2
+
+ for arg in argv:
+ if arg in ("PORTDIR", "PORTDIR_OVERLAY", "SYNC"):
+ print("WARNING: 'portageq envvar %s' is deprecated. Use 'portageq repositories_configuration' instead." % arg, file=sys.stderr)
+ if verbose:
+ print(arg + "=" + portage._shell_quote(portage.settings[arg]))
+ else:
+ print(portage.settings[arg])
+
+docstrings['envvar'] = """<variable>+
+ Returns a specific environment variable as exists prior to ebuild.sh.
+ Similar to: emerge --verbose --info | egrep '^<variable>='
+ """
+envvar.__doc__ = docstrings['envvar']
+
+
+@uses_eroot
+def get_repos(argv):
+ if len(argv) < 1:
+ print("ERROR: insufficient parameters!")
+ return 2
+ print(" ".join(reversed(portage.db[argv[0]]["vartree"].settings.repositories.prepos_order)))
+
+docstrings['get_repos'] = """<eroot>
+ Returns all repos with names (repo_name file) argv[0] = $EROOT
+ """
+get_repos.__doc__ = docstrings['get_repos']
+
+
+@uses_eroot
+def master_repositories(argv):
+ if len(argv) < 2:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
+ for arg in argv[1:]:
+ if portage.dep._repo_name_re.match(arg) is None:
+ print("ERROR: invalid repository: %s" % arg, file=sys.stderr)
+ return 2
+ try:
+ repo = portage.db[argv[0]]["vartree"].settings.repositories[arg]
+ except KeyError:
+ print("")
+ return 1
+ else:
+ print(" ".join(x.name for x in repo.masters))
+
+docstrings['master_repositories'] = """<eroot> <repo_id>+
+ Returns space-separated list of master repositories for specified repository.
+ """
+master_repositories.__doc__ = docstrings['master_repositories']
+
+
+@uses_eroot
+def master_repos(argv):
+ return master_repositories(argv)
+
+docstrings['master_repos'] = """<eroot> <repo_id>+
+ This is an alias for the master_repositories command.
+ """
+master_repos.__doc__ = docstrings['master_repos']
+
+
+@uses_eroot
+def get_repo_path(argv):
+
+ if len(argv) < 2:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
+ for arg in argv[1:]:
+ if portage.dep._repo_name_re.match(arg) is None:
+ print("ERROR: invalid repository: %s" % arg, file=sys.stderr)
+ return 2
+ path = portage.db[argv[0]]["vartree"].settings.repositories.treemap.get(arg)
+ if path is None:
+ print("")
+ return 1
+ print(path)
+
+docstrings['get_repo_path'] = """<eroot> <repo_id>+
+ Returns the path to the repo named argv[1], argv[0] = $EROOT
+ """
+get_repo_path.__doc__ = docstrings['get_repo_path']
+
+
+@uses_eroot
+def available_eclasses(argv):
+ if len(argv) < 2:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
+ for arg in argv[1:]:
+ if portage.dep._repo_name_re.match(arg) is None:
+ print("ERROR: invalid repository: %s" % arg, file=sys.stderr)
+ return 2
+ try:
+ repo = portage.db[argv[0]]["vartree"].settings.repositories[arg]
+ except KeyError:
+ print("")
+ return 1
+ else:
+ print(" ".join(sorted(repo.eclass_db.eclasses)))
+
+docstrings['available_eclasses'] = """<eroot> <repo_id>+
+ Returns space-separated list of available eclasses for specified repository.
+ """
+available_eclasses.__doc__ = docstrings['available_eclasses']
+
+
+@uses_eroot
+def eclass_path(argv):
+ if len(argv) < 3:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
+ if portage.dep._repo_name_re.match(argv[1]) is None:
+ print("ERROR: invalid repository: %s" % argv[1], file=sys.stderr)
+ return 2
+ try:
+ repo = portage.db[argv[0]]["vartree"].settings.repositories[argv[1]]
+ except KeyError:
+ print("")
+ return 1
+ else:
+ retval = 0
+ for arg in argv[2:]:
+ try:
+ eclass = repo.eclass_db.eclasses[arg]
+ except KeyError:
+ print("")
+ retval = 1
+ else:
+ print(eclass.location)
+ return retval
+
+docstrings['eclass_path'] = """<eroot> <repo_id> <eclass>+
+ Returns the path to specified eclass for specified repository.
+ """
+eclass_path.__doc__ = docstrings['eclass_path']
+
+
+@uses_eroot
+def license_path(argv):
+ if len(argv) < 3:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
+ if portage.dep._repo_name_re.match(argv[1]) is None:
+ print("ERROR: invalid repository: %s" % argv[1], file=sys.stderr)
+ return 2
+ try:
+ repo = portage.db[argv[0]]["vartree"].settings.repositories[argv[1]]
+ except KeyError:
+ print("")
+ return 1
+ else:
+ retval = 0
+ for arg in argv[2:]:
+ eclass_path = ""
+ paths = reversed([os.path.join(x.location, 'licenses', arg) for x in list(repo.masters) + [repo]])
+ for path in paths:
+ if os.path.exists(path):
+ eclass_path = path
+ break
+ if eclass_path == "":
+ retval = 1
+ print(eclass_path)
+ return retval
+
+docstrings['license_path'] = """<eroot> <repo_id> <license>+
+ Returns the path to specified license for specified repository.
+ """
+license_path.__doc__ = docstrings['license_path']
+
+
+@uses_eroot
+def list_preserved_libs(argv):
+ if len(argv) != 1:
+ print("ERROR: wrong number of arguments")
+ return 2
+ mylibs = portage.db[argv[0]]["vartree"].dbapi._plib_registry.getPreservedLibs()
+ rValue = 1
+ msg = []
+ for cpv in sorted(mylibs):
+ msg.append(cpv)
+ for path in mylibs[cpv]:
+ msg.append(' ' + path)
+ rValue = 0
+ msg.append('\n')
+ writemsg_stdout(''.join(msg), noiselevel=-1)
+ return rValue
+
+docstrings['list_preserved_libs'] = """<eroot>
+ Print a list of libraries preserved during a package update in the form
+ package: path. Returns 1 if no preserved libraries could be found,
+ 0 otherwise.
+ """
+list_preserved_libs.__doc__ = docstrings['list_preserved_libs']
+
+
+class MaintainerEmailMatcher(object):
+ def __init__(self, maintainer_emails):
+ self._re = re.compile("^(%s)$" % "|".join(maintainer_emails))
+
+ def __call__(self, metadata_xml):
+ match = False
+ matcher = self._re.match
+ for x in metadata_xml.maintainers():
+ if x.email is not None and matcher(x.email) is not None:
+ match = True
+ break
+ return match
+
+class HerdMatcher(object):
+ def __init__(self, herds):
+ self._herds = frozenset(herds)
+
+ def __call__(self, metadata_xml):
+ herds = self._herds
+ return any(x in herds for x in metadata_xml.herds())
+
+
+def pquery(parser, opts, args):
+ portdb = portage.db[portage.root]['porttree'].dbapi
+ root_config = RootConfig(portdb.settings,
+ portage.db[portage.root], None)
+
+ def _pkg(cpv, repo_name):
+ try:
+ metadata = dict(zip(
+ Package.metadata_keys,
+ portdb.aux_get(cpv,
+ Package.metadata_keys,
+ myrepo=repo_name)))
+ except KeyError:
+ raise portage.exception.PackageNotFound(cpv)
+ return Package(built=False, cpv=cpv,
+ installed=False, metadata=metadata,
+ root_config=root_config,
+ type_name="ebuild")
+
+ need_metadata = False
+ atoms = []
+ for arg in args:
+ if "/" not in arg.split(":")[0]:
+ atom = insert_category_into_atom(arg, '*')
+ if atom is None:
+ writemsg("ERROR: Invalid atom: '%s'\n" % arg,
+ noiselevel=-1)
+ return 2
+ else:
+ atom = arg
+
+ try:
+ atom = portage.dep.Atom(atom, allow_wildcard=True, allow_repo=True)
+ except portage.exception.InvalidAtom:
+ writemsg("ERROR: Invalid atom: '%s'\n" % arg,
+ noiselevel=-1)
+ return 2
+
+ if atom.slot is not None:
+ need_metadata = True
+
+ atoms.append(atom)
+
+ if "*/*" in atoms:
+ del atoms[:]
+ need_metadata = False
+
+ if not opts.no_filters:
+ need_metadata = True
+
+ xml_matchers = []
+ if opts.maintainer_email:
+ maintainer_emails = []
+ for x in opts.maintainer_email:
+ maintainer_emails.extend(x.split(","))
+ xml_matchers.append(MaintainerEmailMatcher(maintainer_emails))
+ if opts.herd is not None:
+ herds = []
+ for x in opts.herd:
+ herds.extend(x.split(","))
+ xml_matchers.append(HerdMatcher(herds))
+
+ repos = []
+ if opts.all_repos:
+ repos.extend(portdb.repositories.get_repo_for_location(location)
+ for location in portdb.porttrees)
+ elif opts.repo is not None:
+ repos.append(portdb.repositories[opts.repo])
+ else:
+ repos.append(portdb.repositories.mainRepo())
+
+ if not atoms:
+ names = None
+ categories = list(portdb.categories)
+ else:
+ category_wildcard = False
+ name_wildcard = False
+ categories = []
+ names = []
+ for atom in atoms:
+ category, name = portage.catsplit(atom.cp)
+ categories.append(category)
+ names.append(name)
+ if "*" in category:
+ category_wildcard = True
+ if "*" in name:
+ name_wildcard = True
+
+ if category_wildcard:
+ categories = list(portdb.categories)
+ else:
+ categories = list(set(categories))
+
+ if name_wildcard:
+ names = None
+ else:
+ names = sorted(set(names))
+
+ no_version = opts.no_version
+ categories.sort()
+
+ for category in categories:
+ if names is None:
+ cp_list = portdb.cp_all(categories=(category,))
+ else:
+ cp_list = [category + "/" + name for name in names]
+ for cp in cp_list:
+ matches = []
+ for repo in repos:
+ match = True
+ if xml_matchers:
+ metadata_xml_path = os.path.join(
+ repo.location, cp, 'metadata.xml')
+ try:
+ metadata_xml = MetaDataXML(metadata_xml_path, None)
+ except (EnvironmentError, SyntaxError):
+ match = False
+ else:
+ for matcher in xml_matchers:
+ if not matcher(metadata_xml):
+ match = False
+ break
+ if not match:
+ continue
+ cpv_list = portdb.cp_list(cp, mytree=[repo.location])
+ if atoms:
+ for cpv in cpv_list:
+ pkg = None
+ for atom in atoms:
+ if atom.repo is not None and \
+ atom.repo != repo.name:
+ continue
+ if not portage.match_from_list(atom, [cpv]):
+ continue
+ if need_metadata:
+ if pkg is None:
+ try:
+ pkg = _pkg(cpv, repo.name)
+ except portage.exception.PackageNotFound:
+ continue
+
+ if not (opts.no_filters or pkg.visible):
+ continue
+ if not portage.match_from_list(atom, [pkg]):
+ continue
+ matches.append(cpv)
+ break
+ if no_version and matches:
+ break
+ elif opts.no_filters:
+ matches.extend(cpv_list)
+ else:
+ for cpv in cpv_list:
+ try:
+ pkg = _pkg(cpv, repo.name)
+ except portage.exception.PackageNotFound:
+ continue
+ else:
+ if pkg.visible:
+ matches.append(cpv)
+ if no_version:
+ break
+
+ if no_version and matches:
+ break
+
+ if not matches:
+ continue
+
+ if no_version:
+ writemsg_stdout("%s\n" % (cp,), noiselevel=-1)
+ else:
+ matches = list(set(matches))
+ portdb._cpv_sort_ascending(matches)
+ for cpv in matches:
+ writemsg_stdout("%s\n" % (cpv,), noiselevel=-1)
+
+ return os.EX_OK
+
+docstrings['pquery'] = """[options] [atom]+
+ Emulates a subset of Pkgcore's pquery tool.
+ """
+pquery.__doc__ = docstrings['pquery']
+
+
+#-----------------------------------------------------------------------------
+#
+# DO NOT CHANGE CODE BEYOND THIS POINT - IT'S NOT NEEDED!
+#
+
+non_commands = frozenset(['elog', 'eval_atom_use', 'exithandler', 'main', 'usage', 'uses_eroot'])
+commands = sorted(k for k, v in globals().items() \
+ if k not in non_commands and isinstance(v, types.FunctionType) and v.__module__ == "__main__")
+
+
+def add_pquery_arguments(parser):
+ pquery_option_groups = (
+ (
+ 'Repository matching options',
+ (
+ {
+ "longopt": "--no-filters",
+ "action": "store_true",
+ "help": "no visibility filters (ACCEPT_KEYWORDS, package masking, etc)"
+ },
+ {
+ "longopt": "--repo",
+ "help": "repo to use (default is PORTDIR if omitted)"
+ },
+ {
+ "longopt": "--all-repos",
+ "help": "search all repos"
+ }
+ )
+ ),
+ (
+ 'Package matching options',
+ (
+ {
+ "longopt": "--herd",
+ "action": "append",
+ "help": "exact match on a herd"
+ },
+ {
+ "longopt": "--maintainer-email",
+ "action": "append",
+ "help": "comma-separated list of maintainer email regexes to search for"
+ }
+ )
+ ),
+ (
+ 'Output formatting',
+ (
+ {
+ "shortopt": "-n",
+ "longopt": "--no-version",
+ "action": "store_true",
+ "help": "collapse multiple matching versions together"
+ },
+ )
+ ),
+ )
+
+ for group_title, opt_data in pquery_option_groups:
+ arg_group = parser.add_argument_group(group_title)
+ for opt_info in opt_data:
+ pargs = []
+ try:
+ pargs.append(opt_info["shortopt"])
+ except KeyError:
+ pass
+ try:
+ pargs.append(opt_info["longopt"])
+ except KeyError:
+ pass
+
+ kwargs = {}
+ try:
+ kwargs["action"] = opt_info["action"]
+ except KeyError:
+ pass
+ try:
+ kwargs["help"] = opt_info["help"]
+ except KeyError:
+ pass
+ arg_group.add_argument(*pargs, **portage._native_kwargs(kwargs))
+
+
+def usage(argv):
+ print(">>> Portage information query tool")
+ print(">>> %s" % portage.VERSION)
+ print(">>> Usage: portageq <command> [<option> ...]")
+ print("")
+ print("Available commands:")
+
+ #
+ # Show our commands -- we do this by scanning the functions in this
+ # file, and formatting each functions documentation.
+ #
+ help_mode = '--help' in argv
+ for name in commands:
+ doc = docstrings.get(name)
+ if (doc == None):
+ print(" " + name)
+ print(" MISSING DOCUMENTATION!")
+ print("")
+ continue
+
+ lines = doc.lstrip("\n").split("\n")
+ print(" " + name + " " + lines[0].strip())
+ if len(argv) > 1:
+ if (not help_mode):
+ lines = lines[:-1]
+ for line in lines[1:]:
+ print(" " + line.strip())
+
+ print()
+ print('Pkgcore pquery compatible options:')
+ print()
+ parser = ArgumentParser(add_help=False,
+ usage='portageq pquery [options] [atom ...]')
+ add_pquery_arguments(parser)
+ parser.print_help()
+
+ if len(argv) == 1:
+ print("\nRun portageq with --help for info")
+
+atom_validate_strict = "EBUILD_PHASE" in os.environ
+eapi = None
+if atom_validate_strict:
+ eapi = os.environ.get('EAPI')
+
+ def elog(elog_funcname, lines):
+ cmd = "source '%s/isolated-functions.sh' ; " % \
+ os.environ["PORTAGE_BIN_PATH"]
+ for line in lines:
+ cmd += "%s %s ; " % (elog_funcname, portage._shell_quote(line))
+ subprocess.call([portage.const.BASH_BINARY, "-c", cmd])
+
+else:
+ def elog(elog_funcname, lines):
+ pass
+
+def main(argv):
+
+ argv = portage._decode_argv(argv)
+
+ nocolor = os.environ.get('NOCOLOR')
+ if nocolor in ('yes', 'true'):
+ portage.output.nocolor()
+
+ parser = ArgumentParser(add_help=False)
+
+ # used by envvar
+ parser.add_argument("-v", dest="verbose", action="store_true")
+
+ actions = parser.add_argument_group('Actions')
+ actions.add_argument("-h", "--help", action="store_true")
+ actions.add_argument("--version", action="store_true")
+
+ add_pquery_arguments(parser)
+
+ opts, args = parser.parse_known_args(argv[1:])
+
+ if opts.help:
+ usage(argv)
+ return os.EX_OK
+ elif opts.version:
+ print("Portage", portage.VERSION)
+ return os.EX_OK
+
+ cmd = None
+ if args and args[0] in commands:
+ cmd = args[0]
+
+ if cmd == 'pquery':
+ cmd = None
+ args = args[1:]
+
+ if cmd is None:
+ return pquery(parser, opts, args)
+
+ if opts.verbose:
+ # used by envvar
+ args.append("-v")
+
+ argv = argv[:1] + args
+
+ if len(argv) < 2:
+ usage(argv)
+ sys.exit(os.EX_USAGE)
+
+ function = globals()[cmd]
+ uses_eroot = getattr(function, "uses_eroot", False) and len(argv) > 2
+ if uses_eroot:
+ if not os.path.isdir(argv[2]):
+ sys.stderr.write("Not a directory: '%s'\n" % argv[2])
+ sys.stderr.write("Run portageq with --help for info\n")
+ sys.stderr.flush()
+ sys.exit(os.EX_USAGE)
+ eprefix = portage.settings["EPREFIX"]
+ eroot = portage.util.normalize_path(argv[2])
+
+ if eprefix:
+ if not eroot.endswith(eprefix):
+ sys.stderr.write("ERROR: This version of portageq"
+ " only supports <eroot>s ending in"
+ " '%s'. The provided <eroot>, '%s',"
+ " doesn't.\n" % (eprefix, eroot))
+ sys.stderr.flush()
+ sys.exit(os.EX_USAGE)
+ root = eroot[:1 - len(eprefix)]
+ else:
+ root = eroot
+
+ os.environ["ROOT"] = root
+
+ args = argv[2:]
+
+ try:
+ if uses_eroot:
+ args[0] = portage.settings['EROOT']
+ retval = function(args)
+ if retval:
+ sys.exit(retval)
+ except portage.exception.PermissionDenied as e:
+ sys.stderr.write("Permission denied: '%s'\n" % str(e))
+ sys.exit(e.errno)
+ except portage.exception.ParseError as e:
+ sys.stderr.write("%s\n" % str(e))
+ sys.exit(1)
+ except portage.exception.AmbiguousPackageName as e:
+ # Multiple matches thrown from cpv_expand
+ pkgs = e.args[0]
+ # An error has occurred so we writemsg to stderr and exit nonzero.
+ portage.writemsg("You specified an unqualified atom that matched multiple packages:\n", noiselevel=-1)
+ for pkg in pkgs:
+ portage.writemsg("* %s\n" % pkg, noiselevel=-1)
+ portage.writemsg("\nPlease use a more specific atom.\n", noiselevel=-1)
+ sys.exit(1)
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
+
+#-----------------------------------------------------------------------------
diff --git a/usr/lib/portage/bin/quickpkg b/usr/lib/portage/bin/quickpkg
new file mode 100755
index 0000000..4d6bc87
--- /dev/null
+++ b/usr/lib/portage/bin/quickpkg
@@ -0,0 +1,333 @@
+#!/usr/bin/python -b
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division, print_function
+
+import errno
+import math
+import signal
+import sys
+import tarfile
+
+from os import path as osp
+if osp.isfile(osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")):
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+import portage
+portage._internal_caller = True
+from portage import os
+from portage import xpak
+from portage.dbapi.dep_expand import dep_expand
+from portage.dep import Atom, use_reduce
+from portage.exception import (AmbiguousPackageName, InvalidAtom, InvalidData,
+ InvalidDependString, PackageSetNotFound, PermissionDenied)
+from portage.util import ConfigProtect, ensure_dirs, shlex_split
+from portage.dbapi.vartree import dblink, tar_contents
+from portage.checksum import perform_md5
+from portage._sets import load_default_config, SETPREFIX
+from portage.util._argparse import ArgumentParser
+
+def quickpkg_atom(options, infos, arg, eout):
+ settings = portage.settings
+ root = portage.settings['ROOT']
+ eroot = portage.settings['EROOT']
+ trees = portage.db[eroot]
+ vartree = trees["vartree"]
+ vardb = vartree.dbapi
+ bintree = trees["bintree"]
+
+ include_config = options.include_config == "y"
+ include_unmodified_config = options.include_unmodified_config == "y"
+ fix_metadata_keys = ["PF", "CATEGORY"]
+
+ try:
+ atom = dep_expand(arg, mydb=vardb, settings=vartree.settings)
+ except AmbiguousPackageName as e:
+ # Multiple matches thrown from cpv_expand
+ eout.eerror("Please use a more specific atom: %s" % \
+ " ".join(e.args[0]))
+ del e
+ infos["missing"].append(arg)
+ return
+ except (InvalidAtom, InvalidData):
+ eout.eerror("Invalid atom: %s" % (arg,))
+ infos["missing"].append(arg)
+ return
+ if atom[:1] == '=' and arg[:1] != '=':
+ # dep_expand() allows missing '=' but it's really invalid
+ eout.eerror("Invalid atom: %s" % (arg,))
+ infos["missing"].append(arg)
+ return
+
+ matches = vardb.match(atom)
+ pkgs_for_arg = 0
+ for cpv in matches:
+ excluded_config_files = []
+ bintree.prevent_collision(cpv)
+ dblnk = vardb._dblink(cpv)
+ have_lock = False
+
+ if "__PORTAGE_INHERIT_VARDB_LOCK" not in settings:
+ try:
+ dblnk.lockdb()
+ have_lock = True
+ except PermissionDenied:
+ pass
+
+ try:
+ if not dblnk.exists():
+ # unmerged by a concurrent process
+ continue
+ iuse, use, restrict = vardb.aux_get(cpv,
+ ["IUSE","USE","RESTRICT"])
+ iuse = [ x.lstrip("+-") for x in iuse.split() ]
+ use = use.split()
+ try:
+ restrict = use_reduce(restrict, uselist=use, flat=True)
+ except InvalidDependString as e:
+ eout.eerror("Invalid RESTRICT metadata " + \
+ "for '%s': %s; skipping" % (cpv, str(e)))
+ del e
+ continue
+ if "bindist" in iuse and "bindist" not in use:
+ eout.ewarn("%s: package was emerged with USE=-bindist!" % cpv)
+ eout.ewarn("%s: it might not be legal to redistribute this." % cpv)
+ elif "bindist" in restrict:
+ eout.ewarn("%s: package has RESTRICT=bindist!" % cpv)
+ eout.ewarn("%s: it might not be legal to redistribute this." % cpv)
+ eout.ebegin("Building package for %s" % cpv)
+ pkgs_for_arg += 1
+ contents = dblnk.getcontents()
+ protect = None
+ if not include_config:
+ confprot = ConfigProtect(eroot,
+ shlex_split(settings.get("CONFIG_PROTECT", "")),
+ shlex_split(settings.get("CONFIG_PROTECT_MASK", "")),
+ case_insensitive = ("case-insensitive-fs"
+ in settings.features))
+ def protect(filename):
+ if not confprot.isprotected(filename):
+ return False
+ if include_unmodified_config:
+ file_data = contents[filename]
+ if file_data[0] == "obj":
+ orig_md5 = file_data[2].lower()
+ cur_md5 = perform_md5(filename, calc_prelink=1)
+ if orig_md5 == cur_md5:
+ return False
+ excluded_config_files.append(filename)
+ return True
+ existing_metadata = dict(zip(fix_metadata_keys,
+ vardb.aux_get(cpv, fix_metadata_keys)))
+ category, pf = portage.catsplit(cpv)
+ required_metadata = {}
+ required_metadata["CATEGORY"] = category
+ required_metadata["PF"] = pf
+ update_metadata = {}
+ for k, v in required_metadata.items():
+ if v != existing_metadata[k]:
+ update_metadata[k] = v
+ if update_metadata:
+ vardb.aux_update(cpv, update_metadata)
+ xpdata = xpak.xpak(dblnk.dbdir)
+ binpkg_tmpfile = os.path.join(bintree.pkgdir,
+ cpv + ".tbz2." + str(os.getpid()))
+ ensure_dirs(os.path.dirname(binpkg_tmpfile))
+ tar = tarfile.open(binpkg_tmpfile, "w:bz2")
+ tar_contents(contents, root, tar, protect=protect)
+ tar.close()
+ xpak.tbz2(binpkg_tmpfile).recompose_mem(xpdata)
+ finally:
+ if have_lock:
+ dblnk.unlockdb()
+ bintree.inject(cpv, filename=binpkg_tmpfile)
+ binpkg_path = bintree.getname(cpv)
+ try:
+ s = os.stat(binpkg_path)
+ except OSError as e:
+ # Sanity check, shouldn't happen normally.
+ eout.eend(1)
+ eout.eerror(str(e))
+ del e
+ eout.eerror("Failed to create package: '%s'" % binpkg_path)
+ else:
+ eout.eend(0)
+ infos["successes"].append((cpv, s.st_size))
+ infos["config_files_excluded"] += len(excluded_config_files)
+ for filename in excluded_config_files:
+ eout.ewarn("Excluded config: '%s'" % filename)
+ if not pkgs_for_arg:
+ eout.eerror("Could not find anything " + \
+ "to match '%s'; skipping" % arg)
+ infos["missing"].append(arg)
+
+def quickpkg_set(options, infos, arg, eout):
+ eroot = portage.settings['EROOT']
+ trees = portage.db[eroot]
+ vartree = trees["vartree"]
+
+ settings = vartree.settings
+ settings._init_dirs()
+ setconfig = load_default_config(settings, trees)
+ sets = setconfig.getSets()
+
+ set = arg[1:]
+ if not set in sets:
+ eout.eerror("Package set not found: '%s'; skipping" % (arg,))
+ infos["missing"].append(arg)
+ return
+
+ try:
+ atoms = setconfig.getSetAtoms(set)
+ except PackageSetNotFound as e:
+ eout.eerror("Failed to process package set '%s' because " % set +
+ "it contains the non-existent package set '%s'; skipping" % e)
+ infos["missing"].append(arg)
+ return
+
+ for atom in atoms:
+ quickpkg_atom(options, infos, atom, eout)
+
+
+def quickpkg_extended_atom(options, infos, atom, eout):
+ eroot = portage.settings['EROOT']
+ trees = portage.db[eroot]
+ vartree = trees["vartree"]
+ vardb = vartree.dbapi
+
+ require_metadata = atom.slot or atom.repo
+ atoms = []
+ for cpv in vardb.cpv_all():
+ cpv_atom = Atom("=%s" % cpv)
+
+ if atom == "*/*":
+ atoms.append(cpv_atom)
+ continue
+
+ if not portage.match_from_list(atom, [cpv]):
+ continue
+
+ if require_metadata:
+ try:
+ cpv = vardb._pkg_str(cpv, atom.repo)
+ except (KeyError, InvalidData):
+ continue
+ if not portage.match_from_list(atom, [cpv]):
+ continue
+
+ atoms.append(cpv_atom)
+
+ for atom in atoms:
+ quickpkg_atom(options, infos, atom, eout)
+
+
+def quickpkg_main(options, args, eout):
+ eroot = portage.settings['EROOT']
+ trees = portage.db[eroot]
+ bintree = trees["bintree"]
+
+ try:
+ ensure_dirs(bintree.pkgdir)
+ except portage.exception.PortageException:
+ pass
+ if not os.access(bintree.pkgdir, os.W_OK):
+ eout.eerror("No write access to '%s'" % bintree.pkgdir)
+ return errno.EACCES
+
+ infos = {}
+ infos["successes"] = []
+ infos["missing"] = []
+ infos["config_files_excluded"] = 0
+ for arg in args:
+ if arg[0] == SETPREFIX:
+ quickpkg_set(options, infos, arg, eout)
+ continue
+ try:
+ atom = Atom(arg, allow_wildcard=True, allow_repo=True)
+ except (InvalidAtom, InvalidData):
+ # maybe it's valid but missing category (requires dep_expand)
+ quickpkg_atom(options, infos, arg, eout)
+ else:
+ if atom.extended_syntax:
+ quickpkg_extended_atom(options, infos, atom, eout)
+ else:
+ quickpkg_atom(options, infos, atom, eout)
+
+ if not infos["successes"]:
+ eout.eerror("No packages found")
+ return 1
+ print()
+ eout.einfo("Packages now in '%s':" % bintree.pkgdir)
+ units = {10:'K', 20:'M', 30:'G', 40:'T',
+ 50:'P', 60:'E', 70:'Z', 80:'Y'}
+ for cpv, size in infos["successes"]:
+ if not size:
+ # avoid OverflowError in math.log()
+ size_str = "0"
+ else:
+ power_of_2 = math.log(size, 2)
+ power_of_2 = 10*(power_of_2//10)
+ unit = units.get(power_of_2)
+ if unit:
+ size = float(size)/(2**power_of_2)
+ size_str = "%.1f" % size
+ if len(size_str) > 4:
+ # emulate `du -h`, don't show too many sig figs
+ size_str = str(int(size))
+ size_str += unit
+ else:
+ size_str = str(size)
+ eout.einfo("%s: %s" % (cpv, size_str))
+ if infos["config_files_excluded"]:
+ print()
+ eout.ewarn("Excluded config files: %d" % infos["config_files_excluded"])
+ eout.ewarn("See --help if you would like to include config files.")
+ if infos["missing"]:
+ print()
+ eout.ewarn("The following packages could not be found:")
+ eout.ewarn(" ".join(infos["missing"]))
+ return 2
+ return os.EX_OK
+
+if __name__ == "__main__":
+ usage = "quickpkg [options] <list of package atoms or package sets>"
+ parser = ArgumentParser(usage=usage)
+ parser.add_argument("--umask",
+ default="0077",
+ help="umask used during package creation (default is 0077)")
+ parser.add_argument("--ignore-default-opts",
+ action="store_true",
+ help="do not use the QUICKPKG_DEFAULT_OPTS environment variable")
+ parser.add_argument("--include-config",
+ choices=["y","n"],
+ default="n",
+ metavar="<y|n>",
+ help="include all files protected by CONFIG_PROTECT (as a security precaution, default is 'n')")
+ parser.add_argument("--include-unmodified-config",
+ choices=["y","n"],
+ default="n",
+ metavar="<y|n>",
+ help="include files protected by CONFIG_PROTECT that have not been modified since installation (as a security precaution, default is 'n')")
+ options, args = parser.parse_known_args(sys.argv[1:])
+ if not options.ignore_default_opts:
+ default_opts = shlex_split(
+ portage.settings.get("QUICKPKG_DEFAULT_OPTS", ""))
+ options, args = parser.parse_known_args(default_opts + sys.argv[1:])
+ if not args:
+ parser.error("no packages atoms given")
+ try:
+ umask = int(options.umask, 8)
+ except ValueError:
+ parser.error("invalid umask: %s" % options.umask)
+ # We need to ensure a sane umask for the packages that will be created.
+ old_umask = os.umask(umask)
+ eout = portage.output.EOutput()
+ def sigwinch_handler(signum, frame):
+ lines, eout.term_columns = portage.output.get_term_size()
+ signal.signal(signal.SIGWINCH, sigwinch_handler)
+ try:
+ retval = quickpkg_main(options, args, eout)
+ finally:
+ os.umask(old_umask)
+ signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+ sys.exit(retval)
diff --git a/usr/lib/portage/bin/readpecoff b/usr/lib/portage/bin/readpecoff
new file mode 100755
index 0000000..b198cc5
--- /dev/null
+++ b/usr/lib/portage/bin/readpecoff
@@ -0,0 +1,108 @@
+#!/bin/bash
+
+###################################################################
+# This script does the following: for implemented platforms, #
+# it echos for each given path a line with the following format: #
+# #
+# <arch>;<obj>;<soname>;<rpath1:rpathN>;<needed1,neededN> #
+# #
+# arch may be any string, e.g. "PE32". obj is the full (!) path #
+# to the file itself. soname, rpath and needed should be self #
+# explaining - rpath is ":" separated, needed is "," separated. #
+# #
+# WARNING: Depends on CHOST argument to decide what to do! #
+# #
+# WARNING: The Script does _never_ fail! If required binaries #
+# are missing, or information gathering fails, the #
+# script will SILENTLY (!) exit, to not disturb the #
+# normal merging process. #
+# #
+# WARNING: The _first_ argument needs to be a valid CHOST!!! #
+# #
+###################################################################
+
+
+# Interix: Uses native objdump, since thats the only facility that
+# knows about the native shared library information data.
+# objdump is there in all interix installations where the GNU SDK
+# is installed, which is a prerequisite for prefix anyway.
+
+scanbin_interix() {
+ local _itx_objdump="/opt/gcc.3.3/bin/objdump"
+ [[ -x ${_itx_objdump} ]] || _itx_objdump="/opt/gcc.4.2/bin/objdump"
+ [[ -x ${_itx_objdump} ]] || exit 0
+
+ # objdump is there, so now gather the information
+ _itx_full_info() {
+ local obj="$(cd "$(dirname "$1")"; pwd)/${1##*/}"
+ local so=
+ local rp=
+ local ne=
+
+ { file -L "${obj}" | grep "PE" > /dev/null 2>&1; } || return
+
+ _itx_gather() {
+ ${_itx_objdump} -p "$1" | while IFS= read line; do
+ [[ ${line} == *RPATH* || ${line} == *NEEDED* || ${line} == *SONAME* ]] || continue
+
+ eval "$(echo "${line}" | sed -e 's,[[:space:]]*\([A-Z]*\)[[:space:]]*\(.*\)$,key=\1;value="\2",g')"
+
+ case "${key}" in
+ RPATH) echo "rp=\"${value}\"" ;;
+ NEEDED) echo "test -n \"\${ne}\" && ne=\"\${ne},${value}\"; test -z \"\${ne}\" && ne=\"${value}\"" ;;
+ SONAME) echo "so=\"${value}\"" ;;
+ esac
+ done
+ }
+
+ eval "$(_itx_gather ${obj})"
+ echo "386;${obj};${so};${rp};${ne}"
+ }
+
+ for x in "$@"; do
+ _itx_full_info "${x}"
+ done
+
+ exit 0
+}
+
+
+# Native Windows: Uses the winnt compiler ("parity") to gather
+# information. parity is the only one knowing about the location
+# and format of the relevant data, and it is there always when
+# wanting to build native win32 executables.
+
+scanbin_winnt() {
+ local _winnt_inspector="$(type -P "parity.inspector")"
+ [[ -x ${_winnt_inspector} ]] || exit 0
+
+ _winnt_full_info () {
+ local obj="$(cd "$(dirname "$1")"; pwd)/${1##*/}"
+
+ { file -L "${obj}" | grep "PE" > /dev/null 2>&1; } || exit 0
+
+ # parity.inspector in --raw mode has exactly the format we
+ # want - wonder, wonder, i implemented that switch :)
+
+ local info="$(${_winnt_inspector} --raw "${obj}")"
+ echo "386;${obj};${info}"
+ }
+
+ for x in "$@"; do
+ _winnt_full_info "${x}"
+ done
+}
+
+# CHOST is the first argument!
+_chost=$1
+
+# verify CHOST...
+[[ -z ${_chost} ]] && { echo "CHOST not set!!"; exit 1; }
+[[ ${_chost} == *-*-* ]] || { echo "invalid CHOST!!"; exit 1; }
+shift
+
+case "${_chost}" in
+*-interix*) scanbin_interix "$@" ;;
+*-winnt*) scanbin_winnt "$@" ;;
+esac
+
diff --git a/usr/lib/portage/bin/regenworld b/usr/lib/portage/bin/regenworld
new file mode 100755
index 0000000..5e1a0c9
--- /dev/null
+++ b/usr/lib/portage/bin/regenworld
@@ -0,0 +1,139 @@
+#!/usr/bin/python -b
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+from os import path as osp
+if osp.isfile(osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")):
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+import portage
+portage._internal_caller = True
+from portage import os
+import re
+import tempfile
+import textwrap
+
+__candidatematcher__ = re.compile("^[0-9]+: \\*\\*\\* emerge ")
+__noncandidatematcher__ = re.compile(" sync( |$)| clean( |$)| search( |$)|--oneshot|--fetchonly| unmerge( |$)")
+
+def issyspkg(pkgline):
+ return (pkgline[0] == "*")
+
+def iscandidate(logline):
+ return (__candidatematcher__.match(logline) \
+ and not __noncandidatematcher__.search(logline))
+
+def getpkginfo(logline):
+ logline = re.sub("^[0-9]+: \\*\\*\\* emerge ", "", logline)
+ logline = logline.strip()
+ logline = re.sub("(\\S+\\.(ebuild|tbz2))|(--\\S+)|inject ", "", logline)
+ return logline.strip()
+
+__uniqlist__ = []
+def isunwanted(pkgline):
+ if pkgline in ["world", "system", "depclean", "info", "regen", ""]:
+ return False
+ elif pkgline in __uniqlist__:
+ return False
+ elif not re.search("^[a-zA-Z<>=~]", pkgline):
+ return False
+ else:
+ __uniqlist__.append(pkgline)
+ return True
+
+eroot = portage.settings['EROOT']
+world_file = os.path.join(eroot, portage.WORLD_FILE)
+
+# show a little description if we have arguments
+if len(sys.argv) >= 2 and sys.argv[1] in ["-h", "--help"]:
+ print("This script regenerates the portage world file by checking the portage")
+ print("logfile for all actions that you've done in the past. It ignores any")
+ print("arguments except --help. It is recommended that you make a backup of")
+ print("your existing world file (%s) before using this tool." % world_file)
+ sys.exit(0)
+
+worldlist = portage.grabfile(world_file)
+syslist = [x for x in portage.settings.packages if issyspkg(x)]
+
+logfile = portage.grabfile(os.path.join(eroot, "var/log/emerge.log"))
+biglist = [getpkginfo(x) for x in logfile if iscandidate(x)]
+tmplist = []
+for l in biglist:
+ tmplist += l.split()
+biglist = [x for x in tmplist if isunwanted(x)]
+#for p in biglist:
+# print(p)
+#sys.exit(0)
+
+# resolving virtuals
+realsyslist = []
+for mykey in syslist:
+ # drop the asterix
+ mykey = mykey[1:]
+ #print("candidate:",mykey)
+ mylist = portage.db[eroot]["vartree"].dbapi.match(mykey)
+ if mylist:
+ mykey=portage.cpv_getkey(mylist[0])
+ if mykey not in realsyslist:
+ realsyslist.append(mykey)
+
+for mykey in biglist:
+ #print("checking:",mykey)
+ try:
+ mylist = portage.db[eroot]["vartree"].dbapi.match(mykey)
+ except (portage.exception.InvalidAtom, KeyError):
+ if "--debug" in sys.argv:
+ print("* ignoring broken log entry for %s (likely injected)" % mykey)
+ except ValueError as e:
+ try:
+ print("* %s is an ambiguous package name, candidates are:\n%s" % (mykey, e))
+ except AttributeError:
+ # FIXME: Find out what causes this (bug #344845).
+ print("* %s is an ambiguous package name" % (mykey,))
+ continue
+ if mylist:
+ #print "mylist:",mylist
+ myfavkey=portage.cpv_getkey(mylist[0])
+ if (myfavkey not in realsyslist) and (myfavkey not in worldlist):
+ print("add to world:",myfavkey)
+ worldlist.append(myfavkey)
+
+if not worldlist:
+ pass
+else:
+ existing_set = WorldSelectedSet(eroot)
+ existing_set.load()
+
+ if not existing_set:
+ existing_set.replace(worldlist)
+ else:
+ old_world = existing_set._filename
+ fd, tmp_filename = tempfile.mkstemp(suffix=".tmp",
+ prefix=os.path.basename(old_world) + ".",
+ dir=os.path.dirname(old_world))
+ os.close(fd)
+
+ new_set = StaticFileSet(tmp_filename)
+ new_set.update(worldlist)
+
+ if existing_set.getAtoms() == new_set.getAtoms():
+ os.unlink(tmp_filename)
+ else:
+ new_set.write()
+
+ msg = "Please review differences between old and new files, " + \
+ "and replace the old file if desired."
+
+ portage.util.writemsg_stdout("\n",
+ noiselevel=-1)
+ for line in textwrap.wrap(msg, 65):
+ portage.util.writemsg_stdout("%s\n" % line,
+ noiselevel=-1)
+ portage.util.writemsg_stdout("\n",
+ noiselevel=-1)
+ portage.util.writemsg_stdout(" old: %s\n\n" % old_world,
+ noiselevel=-1)
+ portage.util.writemsg_stdout(" new: %s\n\n" % tmp_filename,
+ noiselevel=-1)
diff --git a/usr/lib/portage/bin/repoman b/usr/lib/portage/bin/repoman
new file mode 100755
index 0000000..9f99949
--- /dev/null
+++ b/usr/lib/portage/bin/repoman
@@ -0,0 +1,3132 @@
+#!/usr/bin/python -bO
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Next to do: dep syntax checking in mask files
+# Then, check to make sure deps are satisfiable (to avoid "can't find match for" problems)
+# that last one is tricky because multiple profiles need to be checked.
+
+from __future__ import print_function, unicode_literals
+
+import codecs
+import copy
+import errno
+import formatter
+import io
+import logging
+import re
+import signal
+import stat
+import subprocess
+import sys
+import tempfile
+import textwrap
+import time
+import platform
+from itertools import chain
+from stat import S_ISDIR
+
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
+from os import path as osp
+if osp.isfile(osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")):
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+import portage
+portage._internal_caller = True
+portage._disable_legacy_globals()
+
+try:
+ import xml.etree.ElementTree
+ from xml.parsers.expat import ExpatError
+except (SystemExit, KeyboardInterrupt):
+ raise
+except (ImportError, SystemError, RuntimeError, Exception):
+ # broken or missing xml support
+ # http://bugs.python.org/issue14988
+ msg = ["Please enable python's \"xml\" USE flag in order to use repoman."]
+ from portage.output import EOutput
+ out = EOutput()
+ for line in msg:
+ out.eerror(line)
+ sys.exit(1)
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+import repoman.checks
+from repoman.checks import run_checks
+from repoman import utilities
+from repoman.herdbase import make_herd_base
+from _emerge.Package import Package
+from _emerge.RootConfig import RootConfig
+from _emerge.UserQuery import UserQuery
+import portage.checksum
+import portage.const
+import portage.repository.config
+from portage import cvstree, normalize_path
+from portage import util
+from portage.exception import (FileNotFound, InvalidAtom, MissingParameter,
+ ParseError, PermissionDenied)
+from portage.dep import Atom
+from portage.process import find_binary, spawn
+from portage.output import bold, create_color_func, \
+ green, nocolor, red
+from portage.output import ConsoleStyleFile, StyleWriter
+from portage.util import writemsg_level
+from portage.util._argparse import ArgumentParser
+from portage.package.ebuild.digestgen import digestgen
+from portage.eapi import eapi_has_iuse_defaults, eapi_has_required_use
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+util.initialize_logger()
+
+# 14 is the length of DESCRIPTION=""
+max_desc_len = 100
+allowed_filename_chars="a-zA-Z0-9._-+:"
+pv_toolong_re = re.compile(r'[0-9]{19,}')
+GPG_KEY_ID_REGEX = r'(0x)?([0-9a-fA-F]{8}|[0-9a-fA-F]{16}|[0-9a-fA-F]{24}|[0-9a-fA-F]{32}|[0-9a-fA-F]{40})!?'
+bad = create_color_func("BAD")
+
+# A sane umask is needed for files that portage creates.
+os.umask(0o22)
+# Repoman sets it's own ACCEPT_KEYWORDS and we don't want it to
+# behave incrementally.
+repoman_incrementals = tuple(x for x in \
+ portage.const.INCREMENTALS if x != 'ACCEPT_KEYWORDS')
+config_root = os.environ.get("PORTAGE_CONFIGROOT")
+repoman_settings = portage.config(config_root=config_root, local_config=False)
+
+if repoman_settings.get("NOCOLOR", "").lower() in ("yes", "true") or \
+ repoman_settings.get('TERM') == 'dumb' or \
+ not sys.stdout.isatty():
+ nocolor()
+
+def warn(txt):
+ print("repoman: " + txt)
+
+def err(txt):
+ warn(txt)
+ sys.exit(1)
+
+def exithandler(signum=None, _frame=None):
+ logging.fatal("Interrupted; exiting...")
+ if signum is None:
+ sys.exit(1)
+ else:
+ sys.exit(128 + signum)
+
+signal.signal(signal.SIGINT, exithandler)
+
+def ParseArgs(argv, qahelp):
+ """This function uses a customized optionParser to parse command line arguments for repoman
+ Args:
+ argv - a sequence of command line arguments
+ qahelp - a dict of qa warning to help message
+ Returns:
+ (opts, args), just like a call to parser.parse_args()
+ """
+
+ argv = portage._decode_argv(argv)
+
+ modes = {
+ 'commit' : 'Run a scan then commit changes',
+ 'ci' : 'Run a scan then commit changes',
+ 'fix' : 'Fix simple QA issues (stray digests, missing digests)',
+ 'full' : 'Scan directory tree and print all issues (not a summary)',
+ 'help' : 'Show this screen',
+ 'manifest' : 'Generate a Manifest (fetches files if necessary)',
+ 'manifest-check' : 'Check Manifests for missing or incorrect digests',
+ 'scan' : 'Scan directory tree for QA issues'
+ }
+
+ output_choices = {
+ 'default' : 'The normal output format',
+ 'column' : 'Columnar output suitable for use with grep'
+ }
+
+ mode_keys = list(modes)
+ mode_keys.sort()
+
+ output_keys = sorted(output_choices)
+
+ parser = ArgumentParser(usage="repoman [options] [mode]",
+ description="Modes: %s" % " | ".join(mode_keys),
+ epilog="For more help consult the man page.")
+
+ parser.add_argument('-a', '--ask', dest='ask', action='store_true', default=False,
+ help='Request a confirmation before commiting')
+
+ parser.add_argument('-m', '--commitmsg', dest='commitmsg',
+ help='specify a commit message on the command line')
+
+ parser.add_argument('-M', '--commitmsgfile', dest='commitmsgfile',
+ help='specify a path to a file that contains a commit message')
+
+ parser.add_argument('--digest',
+ choices=('y', 'n'), metavar='<y|n>',
+ help='Automatically update Manifest digests for modified files')
+
+ parser.add_argument('-p', '--pretend', dest='pretend', default=False,
+ action='store_true', help='don\'t commit or fix anything; just show what would be done')
+
+ parser.add_argument('-q', '--quiet', dest="quiet", action="count", default=0,
+ help='do not print unnecessary messages')
+
+ parser.add_argument(
+ '--echangelog', choices=('y', 'n', 'force'), metavar="<y|n|force>",
+ help='for commit mode, call echangelog if ChangeLog is unmodified (or '
+ 'regardless of modification if \'force\' is specified)')
+
+ parser.add_argument('--experimental-inherit', choices=('y', 'n'),
+ metavar="<y|n>", default='n',
+ help='Enable experimental inherit.missing checks which may misbehave'
+ ' when the internal eclass database becomes outdated')
+
+ parser.add_argument('-f', '--force', dest='force', default=False, action='store_true',
+ help='Commit with QA violations')
+
+ parser.add_argument('--vcs', dest='vcs',
+ help='Force using specific VCS instead of autodetection')
+
+ parser.add_argument('-v', '--verbose', dest="verbosity", action='count',
+ help='be very verbose in output', default=0)
+
+ parser.add_argument('-V', '--version', dest='version', action='store_true',
+ help='show version info')
+
+ parser.add_argument('-x', '--xmlparse', dest='xml_parse', action='store_true',
+ default=False, help='forces the metadata.xml parse check to be carried out')
+
+ parser.add_argument(
+ '--if-modified', choices=('y', 'n'), default='n',
+ metavar="<y|n>",
+ help='only check packages that have uncommitted modifications')
+
+ parser.add_argument('-i', '--ignore-arches', dest='ignore_arches', action='store_true',
+ default=False, help='ignore arch-specific failures (where arch != host)')
+
+ parser.add_argument("--ignore-default-opts",
+ action="store_true",
+ help="do not use the REPOMAN_DEFAULT_OPTS environment variable")
+
+ parser.add_argument('-I', '--ignore-masked', dest='ignore_masked', action='store_true',
+ default=False, help='ignore masked packages (not allowed with commit mode)')
+
+ parser.add_argument('--include-arches', dest='include_arches',
+ metavar='ARCHES', action='append',
+ help='A space separated list of arches used to '
+ 'filter the selection of profiles for dependency checks')
+
+ parser.add_argument('-d', '--include-dev', dest='include_dev', action='store_true',
+ default=False, help='include dev profiles in dependency checks')
+
+ parser.add_argument('-e', '--include-exp-profiles', choices=('y', 'n'),
+ default=False, help='include exp profiles in dependency checks',
+ metavar='<y|n>')
+
+ parser.add_argument('--unmatched-removal', dest='unmatched_removal', action='store_true',
+ default=False, help='enable strict checking of package.mask and package.unmask files for unmatched removal atoms')
+
+ parser.add_argument('--without-mask', dest='without_mask', action='store_true',
+ default=False, help='behave as if no package.mask entries exist (not allowed with commit mode)')
+
+ parser.add_argument('--output-style', dest='output_style', choices=output_keys,
+ help='select output type', default='default')
+
+ parser.add_argument('--mode', dest='mode', choices=mode_keys,
+ help='specify which mode repoman will run in (default=full)')
+
+ opts, args = parser.parse_known_args(argv[1:])
+
+ if not opts.ignore_default_opts:
+ default_opts = portage.util.shlex_split(
+ repoman_settings.get("REPOMAN_DEFAULT_OPTS", ""))
+ if default_opts:
+ opts, args = parser.parse_known_args(default_opts + sys.argv[1:])
+
+ if opts.mode == 'help':
+ parser.print_help(short=False)
+
+ for arg in args:
+ if arg in modes:
+ if not opts.mode:
+ opts.mode = arg
+ break
+ else:
+ parser.error("invalid mode: %s" % arg)
+
+ if not opts.mode:
+ opts.mode = 'full'
+
+ if opts.mode == 'ci':
+ opts.mode = 'commit' # backwards compat shortcut
+
+ # Use the verbosity and quiet options to fiddle with the loglevel appropriately
+ for val in range(opts.verbosity):
+ logger = logging.getLogger()
+ logger.setLevel(logger.getEffectiveLevel() - 10)
+
+ for val in range(opts.quiet):
+ logger = logging.getLogger()
+ logger.setLevel(logger.getEffectiveLevel() + 10)
+
+ if opts.mode == 'commit' and not (opts.force or opts.pretend):
+ if opts.ignore_masked:
+ opts.ignore_masked = False
+ logging.warn('Commit mode automatically disables --ignore-masked')
+ if opts.without_mask:
+ opts.without_mask = False
+ logging.warn('Commit mode automatically disables --without-mask')
+
+ return (opts, args)
+
+qahelp = {
+ "CVS/Entries.IO_error": "Attempting to commit, and an IO error was encountered access the Entries file",
+ "ebuild.invalidname": "Ebuild files with a non-parseable or syntactically incorrect name (or using 2.1 versioning extensions)",
+ "ebuild.namenomatch": "Ebuild files that do not have the same name as their parent directory",
+ "changelog.ebuildadded": "An ebuild was added but the ChangeLog was not modified",
+ "changelog.missing": "Missing ChangeLog files",
+ "ebuild.notadded": "Ebuilds that exist but have not been added to cvs",
+ "ebuild.patches": "PATCHES variable should be a bash array to ensure white space safety",
+ "changelog.notadded": "ChangeLogs that exist but have not been added to cvs",
+ "dependency.bad": "User-visible ebuilds with unsatisfied dependencies (matched against *visible* ebuilds)",
+ "dependency.badmasked": "Masked ebuilds with unsatisfied dependencies (matched against *all* ebuilds)",
+ "dependency.badindev": "User-visible ebuilds with unsatisfied dependencies (matched against *visible* ebuilds) in developing arch",
+ "dependency.badmaskedindev": "Masked ebuilds with unsatisfied dependencies (matched against *all* ebuilds) in developing arch",
+ "dependency.badtilde": "Uses the ~ dep operator with a non-zero revision part, which is useless (the revision is ignored)",
+ "dependency.perlcore": "This ebuild directly depends on a package in perl-core; it should use the corresponding virtual instead.",
+ "dependency.syntax": "Syntax error in dependency string (usually an extra/missing space/parenthesis)",
+ "dependency.unknown": "Ebuild has a dependency that refers to an unknown package (which may be valid if it is a blocker for a renamed/removed package, or is an alternative choice provided by an overlay)",
+ "file.executable": "Ebuilds, digests, metadata.xml, Manifest, and ChangeLog do not need the executable bit",
+ "file.size": "Files in the files directory must be under 20 KiB",
+ "file.size.fatal": "Files in the files directory must be under 60 KiB",
+ "file.name": "File/dir name must be composed of only the following chars: %s " % allowed_filename_chars,
+ "file.UTF8": "File is not UTF8 compliant",
+ "inherit.deprecated": "Ebuild inherits a deprecated eclass",
+ "inherit.missing": "Ebuild uses functions from an eclass but does not inherit it",
+ "inherit.unused": "Ebuild inherits an eclass but does not use it",
+ "java.eclassesnotused": "With virtual/jdk in DEPEND you must inherit a java eclass",
+ "wxwidgets.eclassnotused": "Ebuild DEPENDs on x11-libs/wxGTK without inheriting wxwidgets.eclass",
+ "KEYWORDS.dropped": "Ebuilds that appear to have dropped KEYWORDS for some arch",
+ "KEYWORDS.missing": "Ebuilds that have a missing or empty KEYWORDS variable",
+ "KEYWORDS.stable": "Ebuilds that have been added directly with stable KEYWORDS",
+ "KEYWORDS.stupid": "Ebuilds that use KEYWORDS=-* instead of package.mask",
+ "LICENSE.missing": "Ebuilds that have a missing or empty LICENSE variable",
+ "LICENSE.virtual": "Virtuals that have a non-empty LICENSE variable",
+ "DESCRIPTION.missing": "Ebuilds that have a missing or empty DESCRIPTION variable",
+ "DESCRIPTION.toolong": "DESCRIPTION is over %d characters" % max_desc_len,
+ "EAPI.definition": "EAPI definition does not conform to PMS section 7.3.1 (first non-comment, non-blank line)",
+ "EAPI.deprecated": "Ebuilds that use features that are deprecated in the current EAPI",
+ "EAPI.incompatible": "Ebuilds that use features that are only available with a different EAPI",
+ "EAPI.unsupported": "Ebuilds that have an unsupported EAPI version (you must upgrade portage)",
+ "SLOT.invalid": "Ebuilds that have a missing or invalid SLOT variable value",
+ "HOMEPAGE.missing": "Ebuilds that have a missing or empty HOMEPAGE variable",
+ "HOMEPAGE.virtual": "Virtuals that have a non-empty HOMEPAGE variable",
+ "PDEPEND.suspect": "PDEPEND contains a package that usually only belongs in DEPEND.",
+ "LICENSE.syntax": "Syntax error in LICENSE (usually an extra/missing space/parenthesis)",
+ "PROVIDE.syntax": "Syntax error in PROVIDE (usually an extra/missing space/parenthesis)",
+ "PROPERTIES.syntax": "Syntax error in PROPERTIES (usually an extra/missing space/parenthesis)",
+ "RESTRICT.syntax": "Syntax error in RESTRICT (usually an extra/missing space/parenthesis)",
+ "REQUIRED_USE.syntax": "Syntax error in REQUIRED_USE (usually an extra/missing space/parenthesis)",
+ "SRC_URI.syntax": "Syntax error in SRC_URI (usually an extra/missing space/parenthesis)",
+ "SRC_URI.mirror": "A uri listed in profiles/thirdpartymirrors is found in SRC_URI",
+ "ebuild.syntax": "Error generating cache entry for ebuild; typically caused by ebuild syntax error or digest verification failure",
+ "ebuild.output": "A simple sourcing of the ebuild produces output; this breaks ebuild policy.",
+ "ebuild.nesteddie": "Placing 'die' inside ( ) prints an error, but doesn't stop the ebuild.",
+ "variable.invalidchar": "A variable contains an invalid character that is not part of the ASCII character set",
+ "variable.readonly": "Assigning a readonly variable",
+ "variable.usedwithhelpers": "Ebuild uses D, ROOT, ED, EROOT or EPREFIX with helpers",
+ "LIVEVCS.stable": "This ebuild is a live checkout from a VCS but has stable keywords.",
+ "LIVEVCS.unmasked": "This ebuild is a live checkout from a VCS but has keywords and is not masked in the global package.mask.",
+ "IUSE.invalid": "This ebuild has a variable in IUSE that is not in the use.desc or its metadata.xml file",
+ "IUSE.missing": "This ebuild has a USE conditional which references a flag that is not listed in IUSE",
+ "IUSE.rubydeprecated": "The ebuild has set a ruby interpreter in USE_RUBY, that is not available as a ruby target anymore",
+ "LICENSE.invalid": "This ebuild is listing a license that doesnt exist in portages license/ dir.",
+ "LICENSE.deprecated": "This ebuild is listing a deprecated license.",
+ "KEYWORDS.invalid": "This ebuild contains KEYWORDS that are not listed in profiles/arch.list or for which no valid profile was found",
+ "RDEPEND.implicit": "RDEPEND is unset in the ebuild which triggers implicit RDEPEND=$DEPEND assignment (prior to EAPI 4)",
+ "RDEPEND.suspect": "RDEPEND contains a package that usually only belongs in DEPEND.",
+ "RESTRICT.invalid": "This ebuild contains invalid RESTRICT values.",
+ "digest.assumed": "Existing digest must be assumed correct (Package level only)",
+ "digest.missing": "Some files listed in SRC_URI aren't referenced in the Manifest",
+ "digest.unused": "Some files listed in the Manifest aren't referenced in SRC_URI",
+ "ebuild.majorsyn": "This ebuild has a major syntax error that may cause the ebuild to fail partially or fully",
+ "ebuild.minorsyn": "This ebuild has a minor syntax error that contravenes gentoo coding style",
+ "ebuild.badheader": "This ebuild has a malformed header",
+ "manifest.bad": "Manifest has missing or incorrect digests",
+ "metadata.missing": "Missing metadata.xml files",
+ "metadata.bad": "Bad metadata.xml files",
+ "metadata.warning": "Warnings in metadata.xml files",
+ "portage.internal": "The ebuild uses an internal Portage function or variable",
+ "repo.eapi.banned": "The ebuild uses an EAPI which is banned by the repository's metadata/layout.conf settings",
+ "repo.eapi.deprecated": "The ebuild uses an EAPI which is deprecated by the repository's metadata/layout.conf settings",
+ "virtual.oldstyle": "The ebuild PROVIDEs an old-style virtual (see GLEP 37)",
+ "virtual.suspect": "Ebuild contains a package that usually should be pulled via virtual/, not directly.",
+ "usage.obsolete": "The ebuild makes use of an obsolete construct",
+ "upstream.workaround": "The ebuild works around an upstream bug, an upstream bug should be filed and tracked in bugs.gentoo.org"
+}
+
+qacats = list(qahelp)
+qacats.sort()
+
+qawarnings = set((
+"changelog.ebuildadded",
+"changelog.missing",
+"changelog.notadded",
+"dependency.unknown",
+"digest.assumed",
+"digest.unused",
+"ebuild.notadded",
+"ebuild.nesteddie",
+"dependency.badmasked",
+"dependency.badindev",
+"dependency.badmaskedindev",
+"dependency.badtilde",
+"dependency.perlcore",
+"DESCRIPTION.toolong",
+"EAPI.deprecated",
+"HOMEPAGE.virtual",
+"LICENSE.deprecated",
+"LICENSE.virtual",
+"KEYWORDS.dropped",
+"KEYWORDS.stupid",
+"KEYWORDS.missing",
+"IUSE.invalid",
+"PDEPEND.suspect",
+"RDEPEND.implicit",
+"RDEPEND.suspect",
+"virtual.suspect",
+"RESTRICT.invalid",
+"ebuild.minorsyn",
+"ebuild.badheader",
+"ebuild.patches",
+"file.size",
+"inherit.unused",
+"inherit.deprecated",
+"java.eclassesnotused",
+"wxwidgets.eclassnotused",
+"metadata.warning",
+"portage.internal",
+"repo.eapi.deprecated",
+"usage.obsolete",
+"upstream.workaround",
+"LIVEVCS.stable",
+"LIVEVCS.unmasked",
+"IUSE.rubydeprecated",
+))
+
+non_ascii_re = re.compile(r'[^\x00-\x7f]')
+
+missingvars = ["KEYWORDS", "LICENSE", "DESCRIPTION", "HOMEPAGE"]
+allvars = set(x for x in portage.auxdbkeys if not x.startswith("UNUSED_"))
+allvars.update(Package.metadata_keys)
+allvars = sorted(allvars)
+commitmessage = None
+for x in missingvars:
+ x += ".missing"
+ if x not in qacats:
+ logging.warn('* missingvars values need to be added to qahelp ("%s")' % x)
+ qacats.append(x)
+ qawarnings.add(x)
+
+valid_restrict = frozenset(["binchecks", "bindist",
+ "fetch", "installsources", "mirror", "preserve-libs",
+ "primaryuri", "splitdebug", "strip", "test", "userpriv"])
+
+live_eclasses = portage.const.LIVE_ECLASSES
+
+suspect_rdepend = frozenset([
+ "app-arch/cabextract",
+ "app-arch/rpm2targz",
+ "app-doc/doxygen",
+ "dev-lang/nasm",
+ "dev-lang/swig",
+ "dev-lang/yasm",
+ "dev-perl/extutils-pkgconfig",
+ "dev-util/byacc",
+ "dev-util/cmake",
+ "dev-util/ftjam",
+ "dev-util/gperf",
+ "dev-util/gtk-doc",
+ "dev-util/gtk-doc-am",
+ "dev-util/intltool",
+ "dev-util/jam",
+ "dev-util/pkg-config-lite",
+ "dev-util/pkgconf",
+ "dev-util/pkgconfig",
+ "dev-util/pkgconfig-openbsd",
+ "dev-util/scons",
+ "dev-util/unifdef",
+ "dev-util/yacc",
+ "media-gfx/ebdftopcf",
+ "sys-apps/help2man",
+ "sys-devel/autoconf",
+ "sys-devel/automake",
+ "sys-devel/bin86",
+ "sys-devel/bison",
+ "sys-devel/dev86",
+ "sys-devel/flex",
+ "sys-devel/m4",
+ "sys-devel/pmake",
+ "virtual/linux-sources",
+ "virtual/pkgconfig",
+ "x11-misc/bdftopcf",
+ "x11-misc/imake",
+])
+
+suspect_virtual = {
+ "dev-util/pkg-config-lite":"virtual/pkgconfig",
+ "dev-util/pkgconf":"virtual/pkgconfig",
+ "dev-util/pkgconfig":"virtual/pkgconfig",
+ "dev-util/pkgconfig-openbsd":"virtual/pkgconfig",
+ "dev-libs/libusb":"virtual/libusb",
+ "dev-libs/libusbx":"virtual/libusb",
+ "dev-libs/libusb-compat":"virtual/libusb",
+}
+
+ruby_deprecated = frozenset([
+ "ruby_targets_ree18",
+ "ruby_targets_ruby18",
+])
+
+metadata_xml_encoding = 'UTF-8'
+metadata_xml_declaration = '<?xml version="1.0" encoding="%s"?>' % \
+ (metadata_xml_encoding,)
+metadata_doctype_name = 'pkgmetadata'
+metadata_dtd_uri = 'http://www.gentoo.org/dtd/metadata.dtd'
+# force refetch if the local copy creation time is older than this
+metadata_dtd_ctime_interval = 60 * 60 * 24 * 7 # 7 days
+
+# file.executable
+no_exec = frozenset(["Manifest", "ChangeLog", "metadata.xml"])
+
+options, arguments = ParseArgs(sys.argv, qahelp)
+
+if options.version:
+ print("Portage", portage.VERSION)
+ sys.exit(0)
+
+if options.experimental_inherit == 'y':
+ # This is experimental, so it's non-fatal.
+ qawarnings.add("inherit.missing")
+ repoman.checks._init(experimental_inherit=True)
+
+# Set this to False when an extraordinary issue (generally
+# something other than a QA issue) makes it impossible to
+# commit (like if Manifest generation fails).
+can_force = True
+
+portdir, portdir_overlay, mydir = utilities.FindPortdir(repoman_settings)
+if portdir is None:
+ sys.exit(1)
+
+myreporoot = os.path.basename(portdir_overlay)
+myreporoot += mydir[len(portdir_overlay):]
+
+if options.vcs:
+ if options.vcs in ('cvs', 'svn', 'git', 'bzr', 'hg'):
+ vcs = options.vcs
+ else:
+ vcs = None
+else:
+ vcses = utilities.FindVCS()
+ if len(vcses) > 1:
+ print(red('*** Ambiguous workdir -- more than one VCS found at the same depth: %s.' % ', '.join(vcses)))
+ print(red('*** Please either clean up your workdir or specify --vcs option.'))
+ sys.exit(1)
+ elif vcses:
+ vcs = vcses[0]
+ else:
+ vcs = None
+
+if options.if_modified == "y" and vcs is None:
+ logging.info("Not in a version controlled repository; "
+ "disabling --if-modified.")
+ options.if_modified = "n"
+
+# Disable copyright/mtime check if vcs does not preserve mtime (bug #324075).
+vcs_preserves_mtime = vcs in ('cvs',)
+
+vcs_local_opts = repoman_settings.get("REPOMAN_VCS_LOCAL_OPTS", "").split()
+vcs_global_opts = repoman_settings.get("REPOMAN_VCS_GLOBAL_OPTS")
+if vcs_global_opts is None:
+ if vcs in ('cvs', 'svn'):
+ vcs_global_opts = "-q"
+ else:
+ vcs_global_opts = ""
+vcs_global_opts = vcs_global_opts.split()
+
+if options.mode == 'commit' and not options.pretend and not vcs:
+ logging.info("Not in a version controlled repository; enabling pretend mode.")
+ options.pretend = True
+
+# Ensure that current repository is in the list of enabled repositories.
+repodir = os.path.realpath(portdir_overlay)
+try:
+ repoman_settings.repositories.get_repo_for_location(repodir)
+except KeyError:
+ repo_name = portage.repository.config.RepoConfig._read_valid_repo_name(portdir_overlay)[0]
+ layout_conf_data = portage.repository.config.parse_layout_conf(portdir_overlay)[0]
+ if layout_conf_data['repo-name']:
+ repo_name = layout_conf_data['repo-name']
+ tmp_conf_file = io.StringIO(textwrap.dedent("""
+ [%s]
+ location = %s
+ """) % (repo_name, portdir_overlay))
+ # Ensure that the repository corresponding to $PWD overrides a
+ # repository of the same name referenced by the existing PORTDIR
+ # or PORTDIR_OVERLAY settings.
+ repoman_settings['PORTDIR_OVERLAY'] = "%s %s" % \
+ (repoman_settings.get('PORTDIR_OVERLAY', ''),
+ portage._shell_quote(portdir_overlay))
+ repositories = portage.repository.config.load_repository_config(repoman_settings, extra_files=[tmp_conf_file])
+ # We have to call the config constructor again so that attributes
+ # dependent on config.repositories are initialized correctly.
+ repoman_settings = portage.config(config_root=config_root, local_config=False, repositories=repositories)
+
+root = repoman_settings['EROOT']
+trees = {
+ root : {'porttree' : portage.portagetree(settings=repoman_settings)}
+}
+portdb = trees[root]['porttree'].dbapi
+
+# Constrain dependency resolution to the master(s)
+# that are specified in layout.conf.
+repo_config = repoman_settings.repositories.get_repo_for_location(repodir)
+portdb.porttrees = list(repo_config.eclass_db.porttrees)
+portdir = portdb.porttrees[0]
+commit_env = os.environ.copy()
+# list() is for iteration on a copy.
+for repo in list(repoman_settings.repositories):
+ # all paths are canonical
+ if repo.location not in repo_config.eclass_db.porttrees:
+ del repoman_settings.repositories[repo.name]
+
+if repo_config.allow_provide_virtual:
+ qawarnings.add("virtual.oldstyle")
+
+if repo_config.sign_commit:
+ if vcs == 'git':
+ # NOTE: It's possible to use --gpg-sign=key_id to specify the key in
+ # the commit arguments. If key_id is unspecified, then it must be
+ # configured by `git config user.signingkey key_id`.
+ vcs_local_opts.append("--gpg-sign")
+ if repoman_settings.get("PORTAGE_GPG_DIR"):
+ # Pass GNUPGHOME to git for bug #462362.
+ commit_env["GNUPGHOME"] = repoman_settings["PORTAGE_GPG_DIR"]
+
+ # Pass GPG_TTY to git for bug #477728.
+ try:
+ commit_env["GPG_TTY"] = os.ttyname(sys.stdin.fileno())
+ except OSError:
+ pass
+
+# In order to disable manifest signatures, repos may set
+# "sign-manifests = false" in metadata/layout.conf. This
+# can be used to prevent merge conflicts like those that
+# thin-manifests is designed to prevent.
+sign_manifests = "sign" in repoman_settings.features and \
+ repo_config.sign_manifest
+
+if repo_config.sign_manifest and repo_config.name == "gentoo" and \
+ options.mode in ("commit",) and not sign_manifests:
+ msg = ("The '%s' repository has manifest signatures enabled, "
+ "but FEATURES=sign is currently disabled. In order to avoid this "
+ "warning, enable FEATURES=sign in make.conf. Alternatively, "
+ "repositories can disable manifest signatures by setting "
+ "'sign-manifests = false' in metadata/layout.conf.") % \
+ (repo_config.name,)
+ for line in textwrap.wrap(msg, 60):
+ logging.warn(line)
+
+if sign_manifests and options.mode in ("commit",) and \
+ repoman_settings.get("PORTAGE_GPG_KEY") and \
+ re.match(r'^%s$' % GPG_KEY_ID_REGEX,
+ repoman_settings["PORTAGE_GPG_KEY"]) is None:
+ logging.error("PORTAGE_GPG_KEY value is invalid: %s" %
+ repoman_settings["PORTAGE_GPG_KEY"])
+ sys.exit(1)
+
+manifest_hashes = repo_config.manifest_hashes
+if manifest_hashes is None:
+ manifest_hashes = portage.const.MANIFEST2_HASH_DEFAULTS
+
+if options.mode in ("commit", "fix", "manifest"):
+ if portage.const.MANIFEST2_REQUIRED_HASH not in manifest_hashes:
+ msg = ("The 'manifest-hashes' setting in the '%s' repository's "
+ "metadata/layout.conf does not contain the '%s' hash which "
+ "is required by this portage version. You will have to "
+ "upgrade portage if you want to generate valid manifests for "
+ "this repository.") % \
+ (repo_config.name, portage.const.MANIFEST2_REQUIRED_HASH)
+ for line in textwrap.wrap(msg, 70):
+ logging.error(line)
+ sys.exit(1)
+
+ unsupported_hashes = manifest_hashes.difference(
+ portage.const.MANIFEST2_HASH_FUNCTIONS)
+ if unsupported_hashes:
+ msg = ("The 'manifest-hashes' setting in the '%s' repository's "
+ "metadata/layout.conf contains one or more hash types '%s' "
+ "which are not supported by this portage version. You will "
+ "have to upgrade portage if you want to generate valid "
+ "manifests for this repository.") % \
+ (repo_config.name, " ".join(sorted(unsupported_hashes)))
+ for line in textwrap.wrap(msg, 70):
+ logging.error(line)
+ sys.exit(1)
+
+if options.echangelog is None and repo_config.update_changelog:
+ options.echangelog = 'y'
+
+if vcs is None:
+ options.echangelog = 'n'
+
+# The --echangelog option causes automatic ChangeLog generation,
+# which invalidates changelog.ebuildadded and changelog.missing
+# checks.
+# Note: Some don't use ChangeLogs in distributed SCMs.
+# It will be generated on server side from scm log,
+# before package moves to the rsync server.
+# This is needed because they try to avoid merge collisions.
+# Gentoo's Council decided to always use the ChangeLog file.
+# TODO: shouldn't this just be switched on the repo, iso the VCS?
+check_changelog = options.echangelog not in ('y', 'force') and vcs in ('cvs', 'svn')
+
+if 'digest' in repoman_settings.features and options.digest != 'n':
+ options.digest = 'y'
+
+logging.debug("vcs: %s" % (vcs,))
+logging.debug("repo config: %s" % (repo_config,))
+logging.debug("options: %s" % (options,))
+
+# It's confusing if these warnings are displayed without the user
+# being told which profile they come from, so disable them.
+env = os.environ.copy()
+env['FEATURES'] = env.get('FEATURES', '') + ' -unknown-features-warn'
+
+categories = []
+for path in repo_config.eclass_db.porttrees:
+ categories.extend(portage.util.grabfile(
+ os.path.join(path, 'profiles', 'categories')))
+repoman_settings.categories = frozenset(
+ portage.util.stack_lists([categories], incremental=1))
+categories = repoman_settings.categories
+
+portdb.settings = repoman_settings
+root_config = RootConfig(repoman_settings, trees[root], None)
+# We really only need to cache the metadata that's necessary for visibility
+# filtering. Anything else can be discarded to reduce memory consumption.
+portdb._aux_cache_keys.clear()
+portdb._aux_cache_keys.update(["EAPI", "IUSE", "KEYWORDS", "repository", "SLOT"])
+
+reposplit = myreporoot.split(os.path.sep)
+repolevel = len(reposplit)
+
+# check if it's in $PORTDIR/$CATEGORY/$PN , otherwise bail if commiting.
+# Reason for this is if they're trying to commit in just $FILESDIR/*, the Manifest needs updating.
+# this check ensures that repoman knows where it is, and the manifest recommit is at least possible.
+if options.mode == 'commit' and repolevel not in [1, 2, 3]:
+ print(red("***")+" Commit attempts *must* be from within a vcs co, category, or package directory.")
+ print(red("***")+" Attempting to commit from a packages files directory will be blocked for instance.")
+ print(red("***")+" This is intended behaviour, to ensure the manifest is recommitted for a package.")
+ print(red("***"))
+ err("Unable to identify level we're commiting from for %s" % '/'.join(reposplit))
+
+# Make startdir relative to the canonical repodir, so that we can pass
+# it to digestgen and it won't have to be canonicalized again.
+if repolevel == 1:
+ startdir = repodir
+else:
+ startdir = normalize_path(mydir)
+ startdir = os.path.join(repodir, *startdir.split(os.sep)[-2 - repolevel + 3:])
+
+def caterror(mycat):
+ err(mycat + " is not an official category. Skipping QA checks in this directory.\nPlease ensure that you add " + catdir + " to " + repodir + "/profiles/categories\nif it is a new category.")
+
+def repoman_getstatusoutput(cmd):
+ """
+ Implements an interface similar to getstatusoutput(), but with
+ customized unicode handling (see bug #310789) and without the shell.
+ """
+ args = portage.util.shlex_split(cmd)
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
+ not os.path.isabs(args[0]):
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = find_binary(args[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(args[0])
+ args[0] = fullname
+
+ encoding = _encodings['fs']
+ args = [_unicode_encode(x,
+ encoding=encoding, errors='strict') for x in args]
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = portage._unicode_decode(proc.communicate()[0],
+ encoding=encoding, errors='strict')
+ if output and output[-1] == "\n":
+ # getstatusoutput strips one newline
+ output = output[:-1]
+ return (proc.wait(), output)
+
+class repoman_popen(portage.proxy.objectproxy.ObjectProxy):
+ """
+ Implements an interface similar to os.popen(), but with customized
+ unicode handling (see bug #310789) and without the shell.
+ """
+
+ __slots__ = ('_proc', '_stdout')
+
+ def __init__(self, cmd):
+ args = portage.util.shlex_split(cmd)
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
+ not os.path.isabs(args[0]):
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = find_binary(args[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(args[0])
+ args[0] = fullname
+
+ encoding = _encodings['fs']
+ args = [_unicode_encode(x,
+ encoding=encoding, errors='strict') for x in args]
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+ object.__setattr__(self, '_proc', proc)
+ object.__setattr__(self, '_stdout',
+ codecs.getreader(encoding)(proc.stdout, 'strict'))
+
+ def _get_target(self):
+ return object.__getattribute__(self, '_stdout')
+
+ __enter__ = _get_target
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ proc = object.__getattribute__(self, '_proc')
+ proc.wait()
+ proc.stdout.close()
+
+class ProfileDesc(object):
+ __slots__ = ('abs_path', 'arch', 'status', 'sub_path', 'tree_path',)
+ def __init__(self, arch, status, sub_path, tree_path):
+ self.arch = arch
+ self.status = status
+ if sub_path:
+ sub_path = normalize_path(sub_path.lstrip(os.sep))
+ self.sub_path = sub_path
+ self.tree_path = tree_path
+ if tree_path:
+ self.abs_path = os.path.join(tree_path, 'profiles', self.sub_path)
+ else:
+ self.abs_path = tree_path
+
+ def __str__(self):
+ if self.sub_path:
+ return self.sub_path
+ return 'empty profile'
+
+profile_list = []
+valid_profile_types = frozenset(['dev', 'exp', 'stable'])
+
+# get lists of valid keywords, licenses, and use
+kwlist = set()
+liclist = set()
+uselist = set()
+global_pmasklines = []
+
+for path in portdb.porttrees:
+ try:
+ liclist.update(os.listdir(os.path.join(path, "licenses")))
+ except OSError:
+ pass
+ kwlist.update(portage.grabfile(os.path.join(path,
+ "profiles", "arch.list")))
+
+ use_desc = portage.grabfile(os.path.join(path, 'profiles', 'use.desc'))
+ for x in use_desc:
+ x = x.split()
+ if x:
+ uselist.add(x[0])
+
+ expand_desc_dir = os.path.join(path, 'profiles', 'desc')
+ try:
+ expand_list = os.listdir(expand_desc_dir)
+ except OSError:
+ pass
+ else:
+ for fn in expand_list:
+ if not fn[-5:] == '.desc':
+ continue
+ use_prefix = fn[:-5].lower() + '_'
+ for x in portage.grabfile(os.path.join(expand_desc_dir, fn)):
+ x = x.split()
+ if x:
+ uselist.add(use_prefix + x[0])
+
+ global_pmasklines.append(portage.util.grabfile_package(
+ os.path.join(path, 'profiles', 'package.mask'), recursive=1, verify_eapi=True))
+
+ desc_path = os.path.join(path, 'profiles', 'profiles.desc')
+ try:
+ desc_file = io.open(_unicode_encode(desc_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace')
+ except EnvironmentError:
+ pass
+ else:
+ for i, x in enumerate(desc_file):
+ if x[0] == "#":
+ continue
+ arch = x.split()
+ if len(arch) == 0:
+ continue
+ if len(arch) != 3:
+ err("wrong format: \"" + bad(x.strip()) + "\" in " + \
+ desc_path + " line %d" % (i + 1, ))
+ elif arch[0] not in kwlist:
+ err("invalid arch: \"" + bad(arch[0]) + "\" in " + \
+ desc_path + " line %d" % (i + 1, ))
+ elif arch[2] not in valid_profile_types:
+ err("invalid profile type: \"" + bad(arch[2]) + "\" in " + \
+ desc_path + " line %d" % (i + 1, ))
+ profile_desc = ProfileDesc(arch[0], arch[2], arch[1], path)
+ if not os.path.isdir(profile_desc.abs_path):
+ logging.error(
+ "Invalid %s profile (%s) for arch %s in %s line %d",
+ arch[2], arch[1], arch[0], desc_path, i + 1)
+ continue
+ if os.path.exists(
+ os.path.join(profile_desc.abs_path, 'deprecated')):
+ continue
+ profile_list.append(profile_desc)
+ desc_file.close()
+
+repoman_settings['PORTAGE_ARCHLIST'] = ' '.join(sorted(kwlist))
+repoman_settings.backup_changes('PORTAGE_ARCHLIST')
+
+global_pmasklines = portage.util.stack_lists(global_pmasklines, incremental=1)
+global_pmaskdict = {}
+for x in global_pmasklines:
+ global_pmaskdict.setdefault(x.cp, []).append(x)
+del global_pmasklines
+
+def has_global_mask(pkg):
+ mask_atoms = global_pmaskdict.get(pkg.cp)
+ if mask_atoms:
+ pkg_list = [pkg]
+ for x in mask_atoms:
+ if portage.dep.match_from_list(x, pkg_list):
+ return x
+ return None
+
+# Ensure that profile sub_path attributes are unique. Process in reverse order
+# so that profiles with duplicate sub_path from overlays will override
+# profiles with the same sub_path from parent repos.
+profiles = {}
+profile_list.reverse()
+profile_sub_paths = set()
+for prof in profile_list:
+ if prof.sub_path in profile_sub_paths:
+ continue
+ profile_sub_paths.add(prof.sub_path)
+ profiles.setdefault(prof.arch, []).append(prof)
+
+# Use an empty profile for checking dependencies of
+# packages that have empty KEYWORDS.
+prof = ProfileDesc('**', 'stable', '', '')
+profiles.setdefault(prof.arch, []).append(prof)
+
+for x in repoman_settings.archlist():
+ if x[0] == "~":
+ continue
+ if x not in profiles:
+ print(red("\"" + x + "\" doesn't have a valid profile listed in profiles.desc."))
+ print(red("You need to either \"cvs update\" your profiles dir or follow this"))
+ print(red("up with the " + x + " team."))
+ print()
+
+liclist_deprecated = set()
+if "DEPRECATED" in repoman_settings._license_manager._license_groups:
+ liclist_deprecated.update(
+ repoman_settings._license_manager.expandLicenseTokens(["@DEPRECATED"]))
+
+if not liclist:
+ logging.fatal("Couldn't find licenses?")
+ sys.exit(1)
+
+if not kwlist:
+ logging.fatal("Couldn't read KEYWORDS from arch.list")
+ sys.exit(1)
+
+if not uselist:
+ logging.fatal("Couldn't find use.desc?")
+ sys.exit(1)
+
+scanlist = []
+if repolevel == 2:
+ # we are inside a category directory
+ catdir = reposplit[-1]
+ if catdir not in categories:
+ caterror(catdir)
+ mydirlist = os.listdir(startdir)
+ for x in mydirlist:
+ if x == "CVS" or x.startswith("."):
+ continue
+ if os.path.isdir(startdir + "/" + x):
+ scanlist.append(catdir + "/" + x)
+ repo_subdir = catdir + os.sep
+elif repolevel == 1:
+ for x in categories:
+ if not os.path.isdir(startdir + "/" + x):
+ continue
+ for y in os.listdir(startdir + "/" + x):
+ if y == "CVS" or y.startswith("."):
+ continue
+ if os.path.isdir(startdir + "/" + x + "/" + y):
+ scanlist.append(x + "/" + y)
+ repo_subdir = ""
+elif repolevel == 3:
+ catdir = reposplit[-2]
+ if catdir not in categories:
+ caterror(catdir)
+ scanlist.append(catdir + "/" + reposplit[-1])
+ repo_subdir = scanlist[-1] + os.sep
+else:
+ msg = 'Repoman is unable to determine PORTDIR or PORTDIR_OVERLAY' + \
+ ' from the current working directory'
+ logging.critical(msg)
+ sys.exit(1)
+
+repo_subdir_len = len(repo_subdir)
+scanlist.sort()
+
+logging.debug("Found the following packages to scan:\n%s" % '\n'.join(scanlist))
+
+def vcs_files_to_cps(vcs_file_iter):
+ """
+ Iterate over the given modified file paths returned from the vcs,
+ and return a frozenset containing category/pn strings for each
+ modified package.
+ """
+
+ modified_cps = []
+
+ if repolevel == 3:
+ if reposplit[-2] in categories and \
+ next(vcs_file_iter, None) is not None:
+ modified_cps.append("/".join(reposplit[-2:]))
+
+ elif repolevel == 2:
+ category = reposplit[-1]
+ if category in categories:
+ for filename in vcs_file_iter:
+ f_split = filename.split(os.sep)
+ # ['.', pn, ...]
+ if len(f_split) > 2:
+ modified_cps.append(category + "/" + f_split[1])
+
+ else:
+ # repolevel == 1
+ for filename in vcs_file_iter:
+ f_split = filename.split(os.sep)
+ # ['.', category, pn, ...]
+ if len(f_split) > 3 and f_split[1] in categories:
+ modified_cps.append("/".join(f_split[1:3]))
+
+ return frozenset(modified_cps)
+
+def git_supports_gpg_sign():
+ status, cmd_output = \
+ repoman_getstatusoutput("git --version")
+ cmd_output = cmd_output.split()
+ if cmd_output:
+ version = re.match(r'^(\d+)\.(\d+)\.(\d+)', cmd_output[-1])
+ if version is not None:
+ version = [int(x) for x in version.groups()]
+ if version[0] > 1 or \
+ (version[0] == 1 and version[1] > 7) or \
+ (version[0] == 1 and version[1] == 7 and version[2] >= 9):
+ return True
+ return False
+
+def dev_keywords(profiles):
+ """
+ Create a set of KEYWORDS values that exist in 'dev'
+ profiles. These are used
+ to trigger a message notifying the user when they might
+ want to add the --include-dev option.
+ """
+ type_arch_map = {}
+ for arch, arch_profiles in profiles.items():
+ for prof in arch_profiles:
+ arch_set = type_arch_map.get(prof.status)
+ if arch_set is None:
+ arch_set = set()
+ type_arch_map[prof.status] = arch_set
+ arch_set.add(arch)
+
+ dev_keywords = type_arch_map.get('dev', set())
+ dev_keywords.update(['~' + arch for arch in dev_keywords])
+ return frozenset(dev_keywords)
+
+dev_keywords = dev_keywords(profiles)
+
+stats = {}
+fails = {}
+
+for x in qacats:
+ stats[x] = 0
+ fails[x] = []
+
+xmllint_capable = False
+metadata_dtd = os.path.join(repoman_settings["DISTDIR"], 'metadata.dtd')
+
+def fetch_metadata_dtd():
+ """
+ Fetch metadata.dtd if it doesn't exist or the ctime is older than
+ metadata_dtd_ctime_interval.
+ @rtype: bool
+ @return: True if successful, otherwise False
+ """
+
+ must_fetch = True
+ metadata_dtd_st = None
+ current_time = int(time.time())
+ try:
+ metadata_dtd_st = os.stat(metadata_dtd)
+ except EnvironmentError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ else:
+ # Trigger fetch if metadata.dtd mtime is old or clock is wrong.
+ if abs(current_time - metadata_dtd_st.st_ctime) \
+ < metadata_dtd_ctime_interval:
+ must_fetch = False
+
+ if must_fetch:
+ print()
+ print(green("***") + " the local copy of metadata.dtd " + \
+ "needs to be refetched, doing that now")
+ print()
+ parsed_url = urlparse(metadata_dtd_uri)
+ setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
+ fcmd = repoman_settings.get(setting)
+ if not fcmd:
+ fcmd = repoman_settings.get('FETCHCOMMAND')
+ if not fcmd:
+ logging.error("FETCHCOMMAND is unset")
+ return False
+
+ destdir = repoman_settings["DISTDIR"]
+ fd, metadata_dtd_tmp = tempfile.mkstemp(
+ prefix='metadata.dtd.', dir=destdir)
+ os.close(fd)
+
+ try:
+ if not portage.getbinpkg.file_get(metadata_dtd_uri,
+ destdir, fcmd=fcmd,
+ filename=os.path.basename(metadata_dtd_tmp)):
+ logging.error("failed to fetch metadata.dtd from '%s'" %
+ metadata_dtd_uri)
+ return False
+
+ try:
+ portage.util.apply_secpass_permissions(metadata_dtd_tmp,
+ gid=portage.data.portage_gid, mode=0o664, mask=0o2)
+ except portage.exception.PortageException:
+ pass
+
+ os.rename(metadata_dtd_tmp, metadata_dtd)
+ finally:
+ try:
+ os.unlink(metadata_dtd_tmp)
+ except OSError:
+ pass
+
+ return True
+
+if options.mode == "manifest":
+ pass
+elif not find_binary('xmllint'):
+ print(red("!!! xmllint not found. Can't check metadata.xml.\n"))
+ if options.xml_parse or repolevel == 3:
+ print(red("!!!")+" sorry, xmllint is needed. failing\n")
+ sys.exit(1)
+else:
+ if not fetch_metadata_dtd():
+ sys.exit(1)
+ # this can be problematic if xmllint changes their output
+ xmllint_capable = True
+
+if options.mode == 'commit' and vcs:
+ utilities.detect_vcs_conflicts(options, vcs)
+
+if options.mode == "manifest":
+ pass
+elif options.pretend:
+ print(green("\nRepoMan does a once-over of the neighborhood..."))
+else:
+ print(green("\nRepoMan scours the neighborhood..."))
+
+new_ebuilds = set()
+modified_ebuilds = set()
+modified_changelogs = set()
+mychanged = []
+mynew = []
+myremoved = []
+
+if vcs == "cvs":
+ mycvstree = cvstree.getentries("./", recursive=1)
+ mychanged = cvstree.findchanged(mycvstree, recursive=1, basedir="./")
+ mynew = cvstree.findnew(mycvstree, recursive=1, basedir="./")
+ if options.if_modified == "y":
+ myremoved = cvstree.findremoved(mycvstree, recursive=1, basedir="./")
+
+elif vcs == "svn":
+ with repoman_popen("svn status") as f:
+ svnstatus = f.readlines()
+ mychanged = ["./" + elem.split()[-1:][0] for elem in svnstatus if elem and elem[:1] in "MR"]
+ mynew = ["./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("A")]
+ if options.if_modified == "y":
+ myremoved = ["./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("D")]
+
+elif vcs == "git":
+ with repoman_popen("git diff-index --name-only "
+ "--relative --diff-filter=M HEAD") as f:
+ mychanged = f.readlines()
+ mychanged = ["./" + elem[:-1] for elem in mychanged]
+
+ with repoman_popen("git diff-index --name-only "
+ "--relative --diff-filter=A HEAD") as f:
+ mynew = f.readlines()
+ mynew = ["./" + elem[:-1] for elem in mynew]
+ if options.if_modified == "y":
+ with repoman_popen("git diff-index --name-only "
+ "--relative --diff-filter=D HEAD") as f:
+ myremoved = f.readlines()
+ myremoved = ["./" + elem[:-1] for elem in myremoved]
+
+elif vcs == "bzr":
+ with repoman_popen("bzr status -S .") as f:
+ bzrstatus = f.readlines()
+ mychanged = ["./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and elem[1:2] == "M"]
+ mynew = ["./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and (elem[1:2] == "NK" or elem[0:1] == "R")]
+ if options.if_modified == "y":
+ myremoved = ["./" + elem.split()[-3:-2][0].split('/')[-1:][0] for elem in bzrstatus if elem and (elem[1:2] == "K" or elem[0:1] == "R")]
+
+elif vcs == "hg":
+ with repoman_popen("hg status --no-status --modified .") as f:
+ mychanged = f.readlines()
+ mychanged = ["./" + elem.rstrip() for elem in mychanged]
+ with repoman_popen("hg status --no-status --added .") as f:
+ mynew = f.readlines()
+ mynew = ["./" + elem.rstrip() for elem in mynew]
+ if options.if_modified == "y":
+ with repoman_popen("hg status --no-status --removed .") as f:
+ myremoved = f.readlines()
+ myremoved = ["./" + elem.rstrip() for elem in myremoved]
+
+if vcs:
+ new_ebuilds.update(x for x in mynew if x.endswith(".ebuild"))
+ modified_ebuilds.update(x for x in mychanged if x.endswith(".ebuild"))
+ modified_changelogs.update(x for x in chain(mychanged, mynew) \
+ if os.path.basename(x) == "ChangeLog")
+
+def vcs_new_changed(relative_path):
+ for x in chain(mychanged, mynew):
+ if x == relative_path:
+ return True
+ return False
+
+have_pmasked = False
+have_dev_keywords = False
+dofail = 0
+
+# NOTE: match-all caches are not shared due to potential
+# differences between profiles in _get_implicit_iuse.
+arch_caches = {}
+arch_xmatch_caches = {}
+shared_xmatch_caches = {"cp-list":{}}
+
+include_arches = None
+if options.include_arches:
+ include_arches = set()
+ include_arches.update(*[x.split() for x in options.include_arches])
+
+# Disable the "ebuild.notadded" check when not in commit mode and
+# running `svn status` in every package dir will be too expensive.
+
+check_ebuild_notadded = not \
+ (vcs == "svn" and repolevel < 3 and options.mode != "commit")
+
+# Build a regex from thirdpartymirrors for the SRC_URI.mirror check.
+thirdpartymirrors = {}
+for k, v in repoman_settings.thirdpartymirrors().items():
+ for v in v:
+ if not v.endswith("/"):
+ v += "/"
+ thirdpartymirrors[v] = k
+
+class _XMLParser(xml.etree.ElementTree.XMLParser):
+
+ def __init__(self, data, **kwargs):
+ xml.etree.ElementTree.XMLParser.__init__(self, **kwargs)
+ self._portage_data = data
+ if hasattr(self, 'parser'):
+ self._base_XmlDeclHandler = self.parser.XmlDeclHandler
+ self.parser.XmlDeclHandler = self._portage_XmlDeclHandler
+ self._base_StartDoctypeDeclHandler = \
+ self.parser.StartDoctypeDeclHandler
+ self.parser.StartDoctypeDeclHandler = \
+ self._portage_StartDoctypeDeclHandler
+
+ def _portage_XmlDeclHandler(self, version, encoding, standalone):
+ if self._base_XmlDeclHandler is not None:
+ self._base_XmlDeclHandler(version, encoding, standalone)
+ self._portage_data["XML_DECLARATION"] = (version, encoding, standalone)
+
+ def _portage_StartDoctypeDeclHandler(self, doctypeName, systemId, publicId,
+ has_internal_subset):
+ if self._base_StartDoctypeDeclHandler is not None:
+ self._base_StartDoctypeDeclHandler(doctypeName, systemId, publicId,
+ has_internal_subset)
+ self._portage_data["DOCTYPE"] = (doctypeName, systemId, publicId)
+
+class _MetadataTreeBuilder(xml.etree.ElementTree.TreeBuilder):
+ """
+ Implements doctype() as required to avoid deprecation warnings with
+ >=python-2.7.
+ """
+ def doctype(self, name, pubid, system):
+ pass
+
+try:
+ herd_base = make_herd_base(os.path.join(repoman_settings["PORTDIR"], "metadata/herds.xml"))
+except (EnvironmentError, ParseError, PermissionDenied) as e:
+ err(str(e))
+except FileNotFound:
+ # TODO: Download as we do for metadata.dtd, but add a way to
+ # disable for non-gentoo repoman users who may not have herds.
+ herd_base = None
+
+effective_scanlist = scanlist
+if options.if_modified == "y":
+ effective_scanlist = sorted(vcs_files_to_cps(
+ chain(mychanged, mynew, myremoved)))
+
+for x in effective_scanlist:
+ # ebuilds and digests added to cvs respectively.
+ logging.info("checking package %s" % x)
+ # save memory by discarding xmatch caches from previous package(s)
+ arch_xmatch_caches.clear()
+ eadded = []
+ catdir, pkgdir = x.split("/")
+ checkdir = repodir + "/" + x
+ checkdir_relative = ""
+ if repolevel < 3:
+ checkdir_relative = os.path.join(pkgdir, checkdir_relative)
+ if repolevel < 2:
+ checkdir_relative = os.path.join(catdir, checkdir_relative)
+ checkdir_relative = os.path.join(".", checkdir_relative)
+ generated_manifest = False
+
+ if options.mode == "manifest" or \
+ (options.mode != 'manifest-check' and options.digest == 'y') or \
+ options.mode in ('commit', 'fix') and not options.pretend:
+ auto_assumed = set()
+ fetchlist_dict = portage.FetchlistDict(checkdir,
+ repoman_settings, portdb)
+ if options.mode == 'manifest' and options.force:
+ portage._doebuild_manifest_exempt_depend += 1
+ try:
+ distdir = repoman_settings['DISTDIR']
+ mf = repoman_settings.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(checkdir)))
+ mf = mf.load_manifest(checkdir, distdir,
+ fetchlist_dict=fetchlist_dict)
+ mf.create(requiredDistfiles=None,
+ assumeDistHashesAlways=True)
+ for distfiles in fetchlist_dict.values():
+ for distfile in distfiles:
+ if os.path.isfile(os.path.join(distdir, distfile)):
+ mf.fhashdict['DIST'].pop(distfile, None)
+ else:
+ auto_assumed.add(distfile)
+ mf.write()
+ finally:
+ portage._doebuild_manifest_exempt_depend -= 1
+
+ repoman_settings["O"] = checkdir
+ try:
+ generated_manifest = digestgen(
+ mysettings=repoman_settings, myportdb=portdb)
+ except portage.exception.PermissionDenied as e:
+ generated_manifest = False
+ writemsg_level("!!! Permission denied: '%s'\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+
+ if not generated_manifest:
+ print("Unable to generate manifest.")
+ dofail = 1
+
+ if options.mode == "manifest":
+ if not dofail and options.force and auto_assumed and \
+ 'assume-digests' in repoman_settings.features:
+ # Show which digests were assumed despite the --force option
+ # being given. This output will already have been shown by
+ # digestgen() if assume-digests is not enabled, so only show
+ # it here if assume-digests is enabled.
+ pkgs = list(fetchlist_dict)
+ pkgs.sort()
+ portage.writemsg_stdout(" digest.assumed" + \
+ portage.output.colorize("WARN",
+ str(len(auto_assumed)).rjust(18)) + "\n")
+ for cpv in pkgs:
+ fetchmap = fetchlist_dict[cpv]
+ pf = portage.catsplit(cpv)[1]
+ for distfile in sorted(fetchmap):
+ if distfile in auto_assumed:
+ portage.writemsg_stdout(
+ " %s::%s\n" % (pf, distfile))
+ continue
+ elif dofail:
+ sys.exit(1)
+
+ if not generated_manifest:
+ repoman_settings['O'] = checkdir
+ repoman_settings['PORTAGE_QUIET'] = '1'
+ if not portage.digestcheck([], repoman_settings, strict=1):
+ stats["manifest.bad"] += 1
+ fails["manifest.bad"].append(os.path.join(x, 'Manifest'))
+ repoman_settings.pop('PORTAGE_QUIET', None)
+
+ if options.mode == 'manifest-check':
+ continue
+
+ checkdirlist = os.listdir(checkdir)
+ ebuildlist = []
+ pkgs = {}
+ allvalid = True
+ for y in checkdirlist:
+ if (y in no_exec or y.endswith(".ebuild")) and \
+ stat.S_IMODE(os.stat(os.path.join(checkdir, y)).st_mode) & 0o111:
+ stats["file.executable"] += 1
+ fails["file.executable"].append(os.path.join(checkdir, y))
+ if y.endswith(".ebuild"):
+ pf = y[:-7]
+ ebuildlist.append(pf)
+ cpv = "%s/%s" % (catdir, pf)
+ try:
+ myaux = dict(zip(allvars, portdb.aux_get(cpv, allvars)))
+ except KeyError:
+ allvalid = False
+ stats["ebuild.syntax"] += 1
+ fails["ebuild.syntax"].append(os.path.join(x, y))
+ continue
+ except IOError:
+ allvalid = False
+ stats["ebuild.output"] += 1
+ fails["ebuild.output"].append(os.path.join(x, y))
+ continue
+ if not portage.eapi_is_supported(myaux["EAPI"]):
+ allvalid = False
+ stats["EAPI.unsupported"] += 1
+ fails["EAPI.unsupported"].append(os.path.join(x, y))
+ continue
+ pkgs[pf] = Package(cpv=cpv, metadata=myaux,
+ root_config=root_config, type_name="ebuild")
+
+ slot_keywords = {}
+
+ if len(pkgs) != len(ebuildlist):
+ # If we can't access all the metadata then it's totally unsafe to
+ # commit since there's no way to generate a correct Manifest.
+ # Do not try to do any more QA checks on this package since missing
+ # metadata leads to false positives for several checks, and false
+ # positives confuse users.
+ can_force = False
+ continue
+
+ # Sort ebuilds in ascending order for the KEYWORDS.dropped check.
+ ebuildlist = sorted(pkgs.values())
+ ebuildlist = [pkg.pf for pkg in ebuildlist]
+
+ for y in checkdirlist:
+ index = repo_config.find_invalid_path_char(y)
+ if index != -1:
+ y_relative = os.path.join(checkdir_relative, y)
+ if vcs is not None and not vcs_new_changed(y_relative):
+ # If the file isn't in the VCS new or changed set, then
+ # assume that it's an irrelevant temporary file (Manifest
+ # entries are not generated for file names containing
+ # prohibited characters). See bug #406877.
+ index = -1
+ if index != -1:
+ stats["file.name"] += 1
+ fails["file.name"].append("%s/%s: char '%s'" % \
+ (checkdir, y, y[index]))
+
+ if not (y in ("ChangeLog", "metadata.xml") or y.endswith(".ebuild")):
+ continue
+ f = None
+ try:
+ line = 1
+ f = io.open(_unicode_encode(os.path.join(checkdir, y),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'])
+ for l in f:
+ line += 1
+ except UnicodeDecodeError as ue:
+ stats["file.UTF8"] += 1
+ s = ue.object[:ue.start]
+ l2 = s.count("\n")
+ line += l2
+ if l2 != 0:
+ s = s[s.rfind("\n") + 1:]
+ fails["file.UTF8"].append("%s/%s: line %i, just after: '%s'" % (checkdir, y, line, s))
+ finally:
+ if f is not None:
+ f.close()
+
+ if vcs in ("git", "hg") and check_ebuild_notadded:
+ if vcs == "git":
+ myf = repoman_popen("git ls-files --others %s" % \
+ (portage._shell_quote(checkdir_relative),))
+ if vcs == "hg":
+ myf = repoman_popen("hg status --no-status --unknown %s" % \
+ (portage._shell_quote(checkdir_relative),))
+ for l in myf:
+ if l[:-1][-7:] == ".ebuild":
+ stats["ebuild.notadded"] += 1
+ fails["ebuild.notadded"].append(
+ os.path.join(x, os.path.basename(l[:-1])))
+ myf.close()
+
+ if vcs in ("cvs", "svn", "bzr") and check_ebuild_notadded:
+ try:
+ if vcs == "cvs":
+ myf = open(checkdir + "/CVS/Entries", "r")
+ if vcs == "svn":
+ myf = repoman_popen("svn status --depth=files --verbose " +
+ portage._shell_quote(checkdir))
+ if vcs == "bzr":
+ myf = repoman_popen("bzr ls -v --kind=file " +
+ portage._shell_quote(checkdir))
+ myl = myf.readlines()
+ myf.close()
+ for l in myl:
+ if vcs == "cvs":
+ if l[0] != "/":
+ continue
+ splitl = l[1:].split("/")
+ if not len(splitl):
+ continue
+ if splitl[0][-7:] == ".ebuild":
+ eadded.append(splitl[0][:-7])
+ if vcs == "svn":
+ if l[:1] == "?":
+ continue
+ if l[:7] == ' >':
+ # tree conflict, new in subversion 1.6
+ continue
+ l = l.split()[-1]
+ if l[-7:] == ".ebuild":
+ eadded.append(os.path.basename(l[:-7]))
+ if vcs == "bzr":
+ if l[1:2] == "?":
+ continue
+ l = l.split()[-1]
+ if l[-7:] == ".ebuild":
+ eadded.append(os.path.basename(l[:-7]))
+ if vcs == "svn":
+ myf = repoman_popen("svn status " +
+ portage._shell_quote(checkdir))
+ myl = myf.readlines()
+ myf.close()
+ for l in myl:
+ if l[0] == "A":
+ l = l.rstrip().split(' ')[-1]
+ if l[-7:] == ".ebuild":
+ eadded.append(os.path.basename(l[:-7]))
+ except IOError:
+ if vcs == "cvs":
+ stats["CVS/Entries.IO_error"] += 1
+ fails["CVS/Entries.IO_error"].append(checkdir + "/CVS/Entries")
+ else:
+ raise
+ continue
+
+ mf = repoman_settings.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(checkdir)))
+ mf = mf.load_manifest(checkdir, repoman_settings["DISTDIR"])
+ mydigests = mf.getTypeDigests("DIST")
+
+ fetchlist_dict = portage.FetchlistDict(checkdir, repoman_settings, portdb)
+ myfiles_all = []
+ src_uri_error = False
+ for mykey in fetchlist_dict:
+ try:
+ myfiles_all.extend(fetchlist_dict[mykey])
+ except portage.exception.InvalidDependString as e:
+ src_uri_error = True
+ try:
+ portdb.aux_get(mykey, ["SRC_URI"])
+ except KeyError:
+ # This will be reported as an "ebuild.syntax" error.
+ pass
+ else:
+ stats["SRC_URI.syntax"] += 1
+ fails["SRC_URI.syntax"].append(
+ "%s.ebuild SRC_URI: %s" % (mykey, e))
+ del fetchlist_dict
+ if not src_uri_error:
+ # This test can produce false positives if SRC_URI could not
+ # be parsed for one or more ebuilds. There's no point in
+ # producing a false error here since the root cause will
+ # produce a valid error elsewhere, such as "SRC_URI.syntax"
+ # or "ebuild.sytax".
+ myfiles_all = set(myfiles_all)
+ for entry in mydigests:
+ if entry not in myfiles_all:
+ stats["digest.unused"] += 1
+ fails["digest.unused"].append(checkdir + "::" + entry)
+ for entry in myfiles_all:
+ if entry not in mydigests:
+ stats["digest.missing"] += 1
+ fails["digest.missing"].append(checkdir + "::" + entry)
+ del myfiles_all
+
+ if os.path.exists(checkdir + "/files"):
+ filesdirlist = os.listdir(checkdir + "/files")
+
+ # recurse through files directory
+ # use filesdirlist as a stack, appending directories as needed so people can't hide > 20k files in a subdirectory.
+ while filesdirlist:
+ y = filesdirlist.pop(0)
+ relative_path = os.path.join(x, "files", y)
+ full_path = os.path.join(repodir, relative_path)
+ try:
+ mystat = os.stat(full_path)
+ except OSError as oe:
+ if oe.errno == 2:
+ # don't worry about it. it likely was removed via fix above.
+ continue
+ else:
+ raise oe
+ if S_ISDIR(mystat.st_mode):
+ # !!! VCS "portability" alert! Need some function isVcsDir() or alike !!!
+ if y == "CVS" or y == ".svn":
+ continue
+ for z in os.listdir(checkdir + "/files/" + y):
+ if z == "CVS" or z == ".svn":
+ continue
+ filesdirlist.append(y + "/" + z)
+ # Current policy is no files over 20 KiB, these are the checks. File size between
+ # 20 KiB and 60 KiB causes a warning, while file size over 60 KiB causes an error.
+ elif mystat.st_size > 61440:
+ stats["file.size.fatal"] += 1
+ fails["file.size.fatal"].append("(" + str(mystat.st_size//1024) + " KiB) " + x + "/files/" + y)
+ elif mystat.st_size > 20480:
+ stats["file.size"] += 1
+ fails["file.size"].append("(" + str(mystat.st_size//1024) + " KiB) " + x + "/files/" + y)
+
+ index = repo_config.find_invalid_path_char(y)
+ if index != -1:
+ y_relative = os.path.join(checkdir_relative, "files", y)
+ if vcs is not None and not vcs_new_changed(y_relative):
+ # If the file isn't in the VCS new or changed set, then
+ # assume that it's an irrelevant temporary file (Manifest
+ # entries are not generated for file names containing
+ # prohibited characters). See bug #406877.
+ index = -1
+ if index != -1:
+ stats["file.name"] += 1
+ fails["file.name"].append("%s/files/%s: char '%s'" % \
+ (checkdir, y, y[index]))
+ del mydigests
+
+ if check_changelog and "ChangeLog" not in checkdirlist:
+ stats["changelog.missing"] += 1
+ fails["changelog.missing"].append(x + "/ChangeLog")
+
+ musedict = {}
+ # metadata.xml file check
+ if "metadata.xml" not in checkdirlist:
+ stats["metadata.missing"] += 1
+ fails["metadata.missing"].append(x + "/metadata.xml")
+ # metadata.xml parse check
+ else:
+ metadata_bad = False
+ xml_info = {}
+ xml_parser = _XMLParser(xml_info, target=_MetadataTreeBuilder())
+
+ # read metadata.xml into memory
+ try:
+ _metadata_xml = xml.etree.ElementTree.parse(
+ _unicode_encode(os.path.join(checkdir, "metadata.xml"),
+ encoding=_encodings['fs'], errors='strict'),
+ parser=xml_parser)
+ except (ExpatError, SyntaxError, EnvironmentError) as e:
+ metadata_bad = True
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append("%s/metadata.xml: %s" % (x, e))
+ del e
+ else:
+ if not hasattr(xml_parser, 'parser') or \
+ sys.hexversion < 0x2070000 or \
+ (sys.hexversion > 0x3000000 and sys.hexversion < 0x3020000):
+ # doctype is not parsed with python 2.6 or 3.1
+ pass
+ else:
+ if "XML_DECLARATION" not in xml_info:
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append("%s/metadata.xml: "
+ "xml declaration is missing on first line, "
+ "should be '%s'" % (x, metadata_xml_declaration))
+ else:
+ xml_version, xml_encoding, xml_standalone = \
+ xml_info["XML_DECLARATION"]
+ if xml_encoding is None or \
+ xml_encoding.upper() != metadata_xml_encoding:
+ stats["metadata.bad"] += 1
+ if xml_encoding is None:
+ encoding_problem = "but it is undefined"
+ else:
+ encoding_problem = "not '%s'" % xml_encoding
+ fails["metadata.bad"].append("%s/metadata.xml: "
+ "xml declaration encoding should be '%s', %s" %
+ (x, metadata_xml_encoding, encoding_problem))
+
+ if "DOCTYPE" not in xml_info:
+ metadata_bad = True
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append("%s/metadata.xml: %s" % (x,
+ "DOCTYPE is missing"))
+ else:
+ doctype_name, doctype_system, doctype_pubid = \
+ xml_info["DOCTYPE"]
+ if doctype_system != metadata_dtd_uri:
+ stats["metadata.bad"] += 1
+ if doctype_system is None:
+ system_problem = "but it is undefined"
+ else:
+ system_problem = "not '%s'" % doctype_system
+ fails["metadata.bad"].append("%s/metadata.xml: "
+ "DOCTYPE: SYSTEM should refer to '%s', %s" %
+ (x, metadata_dtd_uri, system_problem))
+
+ if doctype_name != metadata_doctype_name:
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append("%s/metadata.xml: "
+ "DOCTYPE: name should be '%s', not '%s'" %
+ (x, metadata_doctype_name, doctype_name))
+
+ # load USE flags from metadata.xml
+ try:
+ musedict = utilities.parse_metadata_use(_metadata_xml)
+ except portage.exception.ParseError as e:
+ metadata_bad = True
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append("%s/metadata.xml: %s" % (x, e))
+ else:
+ for atom in chain(*musedict.values()):
+ if atom is None:
+ continue
+ try:
+ atom = Atom(atom)
+ except InvalidAtom as e:
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append(
+ "%s/metadata.xml: Invalid atom: %s" % (x, e))
+ else:
+ if atom.cp != x:
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append(
+ ("%s/metadata.xml: Atom contains "
+ "unexpected cat/pn: %s") % (x, atom))
+
+ # Run other metadata.xml checkers
+ try:
+ utilities.check_metadata(_metadata_xml, herd_base)
+ except (utilities.UnknownHerdsError, ) as e:
+ metadata_bad = True
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append("%s/metadata.xml: %s" % (x, e))
+ del e
+
+ # Only carry out if in package directory or check forced
+ if xmllint_capable and not metadata_bad:
+ # xmlint can produce garbage output even on success, so only dump
+ # the ouput when it fails.
+ st, out = repoman_getstatusoutput(
+ "xmllint --nonet --noout --dtdvalid %s %s" % \
+ (portage._shell_quote(metadata_dtd),
+ portage._shell_quote(os.path.join(checkdir, "metadata.xml"))))
+ if st != os.EX_OK:
+ print(red("!!!") + " metadata.xml is invalid:")
+ for z in out.splitlines():
+ print(red("!!! ") + z)
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append(x + "/metadata.xml")
+
+ del metadata_bad
+ muselist = frozenset(musedict)
+
+ changelog_path = os.path.join(checkdir_relative, "ChangeLog")
+ changelog_modified = changelog_path in modified_changelogs
+
+ # detect unused local USE-descriptions
+ used_useflags = set()
+
+ for y in ebuildlist:
+ relative_path = os.path.join(x, y + ".ebuild")
+ full_path = os.path.join(repodir, relative_path)
+ ebuild_path = y + ".ebuild"
+ if repolevel < 3:
+ ebuild_path = os.path.join(pkgdir, ebuild_path)
+ if repolevel < 2:
+ ebuild_path = os.path.join(catdir, ebuild_path)
+ ebuild_path = os.path.join(".", ebuild_path)
+ if check_changelog and not changelog_modified \
+ and ebuild_path in new_ebuilds:
+ stats['changelog.ebuildadded'] += 1
+ fails['changelog.ebuildadded'].append(relative_path)
+
+ if vcs in ("cvs", "svn", "bzr") and check_ebuild_notadded and y not in eadded:
+ # ebuild not added to vcs
+ stats["ebuild.notadded"] += 1
+ fails["ebuild.notadded"].append(x + "/" + y + ".ebuild")
+ myesplit = portage.pkgsplit(y)
+ if myesplit is None or myesplit[0] != x.split("/")[-1] \
+ or pv_toolong_re.search(myesplit[1]) \
+ or pv_toolong_re.search(myesplit[2]):
+ stats["ebuild.invalidname"] += 1
+ fails["ebuild.invalidname"].append(x + "/" + y + ".ebuild")
+ continue
+ elif myesplit[0] != pkgdir:
+ print(pkgdir, myesplit[0])
+ stats["ebuild.namenomatch"] += 1
+ fails["ebuild.namenomatch"].append(x + "/" + y + ".ebuild")
+ continue
+
+ pkg = pkgs[y]
+
+ if pkg.invalid:
+ allvalid = False
+ for k, msgs in pkg.invalid.items():
+ for msg in msgs:
+ stats[k] += 1
+ fails[k].append("%s: %s" % (relative_path, msg))
+ continue
+
+ myaux = pkg._metadata
+ eapi = myaux["EAPI"]
+ inherited = pkg.inherited
+ live_ebuild = live_eclasses.intersection(inherited)
+
+ if repo_config.eapi_is_banned(eapi):
+ stats["repo.eapi.banned"] += 1
+ fails["repo.eapi.banned"].append(
+ "%s: %s" % (relative_path, eapi))
+
+ elif repo_config.eapi_is_deprecated(eapi):
+ stats["repo.eapi.deprecated"] += 1
+ fails["repo.eapi.deprecated"].append(
+ "%s: %s" % (relative_path, eapi))
+
+ for k, v in myaux.items():
+ if not isinstance(v, basestring):
+ continue
+ m = non_ascii_re.search(v)
+ if m is not None:
+ stats["variable.invalidchar"] += 1
+ fails["variable.invalidchar"].append(
+ ("%s: %s variable contains non-ASCII " + \
+ "character at position %s") % \
+ (relative_path, k, m.start() + 1))
+
+ if not src_uri_error:
+ # Check that URIs don't reference a server from thirdpartymirrors.
+ for uri in portage.dep.use_reduce( \
+ myaux["SRC_URI"], matchall=True, is_src_uri=True, eapi=eapi, flat=True):
+ contains_mirror = False
+ for mirror, mirror_alias in thirdpartymirrors.items():
+ if uri.startswith(mirror):
+ contains_mirror = True
+ break
+ if not contains_mirror:
+ continue
+
+ new_uri = "mirror://%s/%s" % (mirror_alias, uri[len(mirror):])
+ stats["SRC_URI.mirror"] += 1
+ fails["SRC_URI.mirror"].append(
+ "%s: '%s' found in thirdpartymirrors, use '%s'" % \
+ (relative_path, mirror, new_uri))
+
+ if myaux.get("PROVIDE"):
+ stats["virtual.oldstyle"] += 1
+ fails["virtual.oldstyle"].append(relative_path)
+
+ for pos, missing_var in enumerate(missingvars):
+ if not myaux.get(missing_var):
+ if catdir == "virtual" and \
+ missing_var in ("HOMEPAGE", "LICENSE"):
+ continue
+ if live_ebuild and missing_var == "KEYWORDS":
+ continue
+ myqakey = missingvars[pos] + ".missing"
+ stats[myqakey] += 1
+ fails[myqakey].append(x + "/" + y + ".ebuild")
+
+ if catdir == "virtual":
+ for var in ("HOMEPAGE", "LICENSE"):
+ if myaux.get(var):
+ myqakey = var + ".virtual"
+ stats[myqakey] += 1
+ fails[myqakey].append(relative_path)
+
+ # 14 is the length of DESCRIPTION=""
+ if len(myaux['DESCRIPTION']) > max_desc_len:
+ stats['DESCRIPTION.toolong'] += 1
+ fails['DESCRIPTION.toolong'].append(
+ "%s: DESCRIPTION is %d characters (max %d)" % \
+ (relative_path, len(myaux['DESCRIPTION']), max_desc_len))
+
+ keywords = myaux["KEYWORDS"].split()
+ stable_keywords = []
+ for keyword in keywords:
+ if not keyword.startswith("~") and \
+ not keyword.startswith("-"):
+ stable_keywords.append(keyword)
+ if stable_keywords:
+ if ebuild_path in new_ebuilds and catdir != "virtual":
+ stable_keywords.sort()
+ stats["KEYWORDS.stable"] += 1
+ fails["KEYWORDS.stable"].append(
+ x + "/" + y + ".ebuild added with stable keywords: %s" % \
+ " ".join(stable_keywords))
+
+ ebuild_archs = set(kw.lstrip("~") for kw in keywords \
+ if not kw.startswith("-"))
+
+ previous_keywords = slot_keywords.get(pkg.slot)
+ if previous_keywords is None:
+ slot_keywords[pkg.slot] = set()
+ elif ebuild_archs and "*" not in ebuild_archs and not live_ebuild:
+ dropped_keywords = previous_keywords.difference(ebuild_archs)
+ if dropped_keywords:
+ stats["KEYWORDS.dropped"] += 1
+ fails["KEYWORDS.dropped"].append(
+ relative_path + ": %s" % \
+ " ".join(sorted(dropped_keywords)))
+
+ slot_keywords[pkg.slot].update(ebuild_archs)
+
+ # KEYWORDS="-*" is a stupid replacement for package.mask and screws general KEYWORDS semantics
+ if "-*" in keywords:
+ haskeyword = False
+ for kw in keywords:
+ if kw[0] == "~":
+ kw = kw[1:]
+ if kw in kwlist:
+ haskeyword = True
+ if not haskeyword:
+ stats["KEYWORDS.stupid"] += 1
+ fails["KEYWORDS.stupid"].append(x + "/" + y + ".ebuild")
+
+ """
+ Ebuilds that inherit a "Live" eclass (darcs,subversion,git,cvs,etc..) should
+ not be allowed to be marked stable
+ """
+ if live_ebuild and repo_config.name == "gentoo":
+ bad_stable_keywords = []
+ for keyword in keywords:
+ if not keyword.startswith("~") and \
+ not keyword.startswith("-"):
+ bad_stable_keywords.append(keyword)
+ del keyword
+ if bad_stable_keywords:
+ stats["LIVEVCS.stable"] += 1
+ fails["LIVEVCS.stable"].append(
+ x + "/" + y + ".ebuild with stable keywords:%s " % \
+ bad_stable_keywords)
+ del bad_stable_keywords
+
+ if keywords and not has_global_mask(pkg):
+ stats["LIVEVCS.unmasked"] += 1
+ fails["LIVEVCS.unmasked"].append(relative_path)
+
+ if options.ignore_arches:
+ arches = [[repoman_settings["ARCH"], repoman_settings["ARCH"],
+ repoman_settings["ACCEPT_KEYWORDS"].split()]]
+ else:
+ arches = set()
+ for keyword in keywords:
+ if keyword[0] == "-":
+ continue
+ elif keyword[0] == "~":
+ arch = keyword[1:]
+ if arch == "*":
+ for expanded_arch in profiles:
+ if expanded_arch == "**":
+ continue
+ arches.add((keyword, expanded_arch,
+ (expanded_arch, "~" + expanded_arch)))
+ else:
+ arches.add((keyword, arch, (arch, keyword)))
+ else:
+ if keyword == "*":
+ for expanded_arch in profiles:
+ if expanded_arch == "**":
+ continue
+ arches.add((keyword, expanded_arch,
+ (expanded_arch,)))
+ else:
+ arches.add((keyword, keyword, (keyword,)))
+ if not arches:
+ # Use an empty profile for checking dependencies of
+ # packages that have empty KEYWORDS.
+ arches.add(('**', '**', ('**',)))
+
+ unknown_pkgs = set()
+ baddepsyntax = False
+ badlicsyntax = False
+ badprovsyntax = False
+ catpkg = catdir + "/" + y
+
+ inherited_java_eclass = "java-pkg-2" in inherited or \
+ "java-pkg-opt-2" in inherited
+ inherited_wxwidgets_eclass = "wxwidgets" in inherited
+ operator_tokens = set(["||", "(", ")"])
+ type_list, badsyntax = [], []
+ for mytype in Package._dep_keys + ("LICENSE", "PROPERTIES", "PROVIDE"):
+ mydepstr = myaux[mytype]
+
+ buildtime = mytype in Package._buildtime_keys
+ runtime = mytype in Package._runtime_keys
+ token_class = None
+ if mytype.endswith("DEPEND"):
+ token_class = portage.dep.Atom
+
+ try:
+ atoms = portage.dep.use_reduce(mydepstr, matchall=1, flat=True, \
+ is_valid_flag=pkg.iuse.is_valid_flag, token_class=token_class)
+ except portage.exception.InvalidDependString as e:
+ atoms = None
+ badsyntax.append(str(e))
+
+ if atoms and mytype.endswith("DEPEND"):
+ if runtime and \
+ "test?" in mydepstr.split():
+ stats[mytype + '.suspect'] += 1
+ fails[mytype + '.suspect'].append(relative_path + \
+ ": 'test?' USE conditional in %s" % mytype)
+
+ for atom in atoms:
+ if atom == "||":
+ continue
+
+ is_blocker = atom.blocker
+
+ # Skip dependency.unknown for blockers, so that we
+ # don't encourage people to remove necessary blockers,
+ # as discussed in bug #382407.
+ if not is_blocker and \
+ not portdb.xmatch("match-all", atom) and \
+ not atom.cp.startswith("virtual/"):
+ unknown_pkgs.add((mytype, atom.unevaluated_atom))
+
+ if catdir != "virtual":
+ if not is_blocker and \
+ atom.cp in suspect_virtual:
+ stats['virtual.suspect'] += 1
+ fails['virtual.suspect'].append(
+ relative_path +
+ ": %s: consider using '%s' instead of '%s'" %
+ (mytype, suspect_virtual[atom.cp], atom))
+ if not is_blocker and \
+ atom.cp.startswith("perl-core/"):
+ stats['dependency.perlcore'] += 1
+ fails['dependency.perlcore'].append(
+ relative_path +
+ ": %s: please use '%s' instead of '%s'" %
+ (mytype, atom.replace("perl-core/","virtual/perl-"), atom))
+
+ if buildtime and \
+ not is_blocker and \
+ not inherited_java_eclass and \
+ atom.cp == "virtual/jdk":
+ stats['java.eclassesnotused'] += 1
+ fails['java.eclassesnotused'].append(relative_path)
+ elif buildtime and \
+ not is_blocker and \
+ not inherited_wxwidgets_eclass and \
+ atom.cp == "x11-libs/wxGTK":
+ stats['wxwidgets.eclassnotused'] += 1
+ fails['wxwidgets.eclassnotused'].append(
+ (relative_path + ": %ss on x11-libs/wxGTK"
+ " without inheriting wxwidgets.eclass") % mytype)
+ elif runtime:
+ if not is_blocker and \
+ atom.cp in suspect_rdepend:
+ stats[mytype + '.suspect'] += 1
+ fails[mytype + '.suspect'].append(
+ relative_path + ": '%s'" % atom)
+
+ if atom.operator == "~" and \
+ portage.versions.catpkgsplit(atom.cpv)[3] != "r0":
+ qacat = 'dependency.badtilde'
+ stats[qacat] += 1
+ fails[qacat].append(
+ (relative_path + ": %s uses the ~ operator"
+ " with a non-zero revision:" + \
+ " '%s'") % (mytype, atom))
+
+ type_list.extend([mytype] * (len(badsyntax) - len(type_list)))
+
+ for m, b in zip(type_list, badsyntax):
+ if m.endswith("DEPEND"):
+ qacat = "dependency.syntax"
+ else:
+ qacat = m + ".syntax"
+ stats[qacat] += 1
+ fails[qacat].append("%s: %s: %s" % (relative_path, m, b))
+
+ badlicsyntax = len([z for z in type_list if z == "LICENSE"])
+ badprovsyntax = len([z for z in type_list if z == "PROVIDE"])
+ baddepsyntax = len(type_list) != badlicsyntax + badprovsyntax
+ badlicsyntax = badlicsyntax > 0
+ badprovsyntax = badprovsyntax > 0
+
+ # uselist checks - global
+ myuse = []
+ default_use = []
+ for myflag in myaux["IUSE"].split():
+ flag_name = myflag.lstrip("+-")
+ used_useflags.add(flag_name)
+ if myflag != flag_name:
+ default_use.append(myflag)
+ if flag_name not in uselist:
+ myuse.append(flag_name)
+
+ # uselist checks - metadata
+ for mypos in range(len(myuse)-1, -1, -1):
+ if myuse[mypos] and (myuse[mypos] in muselist):
+ del myuse[mypos]
+
+ if default_use and not eapi_has_iuse_defaults(eapi):
+ for myflag in default_use:
+ stats['EAPI.incompatible'] += 1
+ fails['EAPI.incompatible'].append(
+ (relative_path + ": IUSE defaults" + \
+ " not supported with EAPI='%s':" + \
+ " '%s'") % (eapi, myflag))
+
+ for mypos in range(len(myuse)):
+ stats["IUSE.invalid"] += 1
+ fails["IUSE.invalid"].append(x + "/" + y + ".ebuild: %s" % myuse[mypos])
+
+ # Check for outdated RUBY targets
+ if "ruby-ng" in inherited or "ruby-fakegem" in inherited or "ruby" in inherited:
+ ruby_intersection = pkg.iuse.all.intersection(ruby_deprecated)
+ if ruby_intersection:
+ for myruby in ruby_intersection:
+ stats["IUSE.rubydeprecated"] += 1
+ fails["IUSE.rubydeprecated"].append(
+ (relative_path + ": Deprecated ruby target: %s") % myruby)
+
+ # license checks
+ if not badlicsyntax:
+ # Parse the LICENSE variable, remove USE conditions and
+ # flatten it.
+ licenses = portage.dep.use_reduce(myaux["LICENSE"], matchall=1, flat=True)
+ # Check each entry to ensure that it exists in PORTDIR's
+ # license directory.
+ for lic in licenses:
+ # Need to check for "||" manually as no portage
+ # function will remove it without removing values.
+ if lic not in liclist and lic != "||":
+ stats["LICENSE.invalid"] += 1
+ fails["LICENSE.invalid"].append(x + "/" + y + ".ebuild: %s" % lic)
+ elif lic in liclist_deprecated:
+ stats["LICENSE.deprecated"] += 1
+ fails["LICENSE.deprecated"].append("%s: %s" % (relative_path, lic))
+
+ # keyword checks
+ myuse = myaux["KEYWORDS"].split()
+ for mykey in myuse:
+ if mykey not in ("-*", "*", "~*"):
+ myskey = mykey
+ if myskey[:1] == "-":
+ myskey = myskey[1:]
+ if myskey[:1] == "~":
+ myskey = myskey[1:]
+ if myskey not in kwlist:
+ stats["KEYWORDS.invalid"] += 1
+ fails["KEYWORDS.invalid"].append(x + "/" + y + ".ebuild: %s" % mykey)
+ elif myskey not in profiles:
+ stats["KEYWORDS.invalid"] += 1
+ fails["KEYWORDS.invalid"].append(x + "/" + y + ".ebuild: %s (profile invalid)" % mykey)
+
+ # restrict checks
+ myrestrict = None
+ try:
+ myrestrict = portage.dep.use_reduce(myaux["RESTRICT"], matchall=1, flat=True)
+ except portage.exception.InvalidDependString as e:
+ stats["RESTRICT.syntax"] += 1
+ fails["RESTRICT.syntax"].append(
+ "%s: RESTRICT: %s" % (relative_path, e))
+ del e
+ if myrestrict:
+ myrestrict = set(myrestrict)
+ mybadrestrict = myrestrict.difference(valid_restrict)
+ if mybadrestrict:
+ stats["RESTRICT.invalid"] += len(mybadrestrict)
+ for mybad in mybadrestrict:
+ fails["RESTRICT.invalid"].append(x + "/" + y + ".ebuild: %s" % mybad)
+ # REQUIRED_USE check
+ required_use = myaux["REQUIRED_USE"]
+ if required_use:
+ if not eapi_has_required_use(eapi):
+ stats['EAPI.incompatible'] += 1
+ fails['EAPI.incompatible'].append(
+ relative_path + ": REQUIRED_USE" + \
+ " not supported with EAPI='%s'" % (eapi,))
+ try:
+ portage.dep.check_required_use(required_use, (),
+ pkg.iuse.is_valid_flag, eapi=eapi)
+ except portage.exception.InvalidDependString as e:
+ stats["REQUIRED_USE.syntax"] += 1
+ fails["REQUIRED_USE.syntax"].append(
+ "%s: REQUIRED_USE: %s" % (relative_path, e))
+ del e
+
+ # Syntax Checks
+ relative_path = os.path.join(x, y + ".ebuild")
+ full_path = os.path.join(repodir, relative_path)
+ if not vcs_preserves_mtime:
+ if ebuild_path not in new_ebuilds and \
+ ebuild_path not in modified_ebuilds:
+ pkg.mtime = None
+ try:
+ # All ebuilds should have utf_8 encoding.
+ f = io.open(_unicode_encode(full_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'])
+ try:
+ for check_name, e in run_checks(f, pkg):
+ stats[check_name] += 1
+ fails[check_name].append(relative_path + ': %s' % e)
+ finally:
+ f.close()
+ except UnicodeDecodeError:
+ # A file.UTF8 failure will have already been recorded above.
+ pass
+
+ if options.force:
+ # The dep_check() calls are the most expensive QA test. If --force
+ # is enabled, there's no point in wasting time on these since the
+ # user is intent on forcing the commit anyway.
+ continue
+
+ relevant_profiles = []
+ for keyword, arch, groups in arches:
+ if arch not in profiles:
+ # A missing profile will create an error further down
+ # during the KEYWORDS verification.
+ continue
+
+ if include_arches is not None:
+ if arch not in include_arches:
+ continue
+
+ relevant_profiles.extend((keyword, groups, prof)
+ for prof in profiles[arch])
+
+ def sort_key(item):
+ return item[2].sub_path
+
+ relevant_profiles.sort(key=sort_key)
+
+ for keyword, groups, prof in relevant_profiles:
+
+ if not (prof.status == "stable" or \
+ (prof.status == "dev" and options.include_dev) or \
+ (prof.status == "exp" and options.include_exp_profiles == 'y')):
+ continue
+
+ dep_settings = arch_caches.get(prof.sub_path)
+ if dep_settings is None:
+ dep_settings = portage.config(
+ config_profile_path=prof.abs_path,
+ config_incrementals=repoman_incrementals,
+ config_root=config_root,
+ local_config=False,
+ _unmatched_removal=options.unmatched_removal,
+ env=env, repositories=repoman_settings.repositories)
+ dep_settings.categories = repoman_settings.categories
+ if options.without_mask:
+ dep_settings._mask_manager_obj = \
+ copy.deepcopy(dep_settings._mask_manager)
+ dep_settings._mask_manager._pmaskdict.clear()
+ arch_caches[prof.sub_path] = dep_settings
+
+ xmatch_cache_key = (prof.sub_path, tuple(groups))
+ xcache = arch_xmatch_caches.get(xmatch_cache_key)
+ if xcache is None:
+ portdb.melt()
+ portdb.freeze()
+ xcache = portdb.xcache
+ xcache.update(shared_xmatch_caches)
+ arch_xmatch_caches[xmatch_cache_key] = xcache
+
+ trees[root]["porttree"].settings = dep_settings
+ portdb.settings = dep_settings
+ portdb.xcache = xcache
+
+ dep_settings["ACCEPT_KEYWORDS"] = " ".join(groups)
+ # just in case, prevent config.reset() from nuking these.
+ dep_settings.backup_changes("ACCEPT_KEYWORDS")
+
+ # This attribute is used in dbapi._match_use() to apply
+ # use.stable.{mask,force} settings based on the stable
+ # status of the parent package. This is required in order
+ # for USE deps of unstable packages to be resolved correctly,
+ # since otherwise use.stable.{mask,force} settings of
+ # dependencies may conflict (see bug #456342).
+ dep_settings._parent_stable = dep_settings._isStable(pkg)
+
+ # Handle package.use*.{force,mask) calculation, for use
+ # in dep_check.
+ dep_settings.useforce = dep_settings._use_manager.getUseForce(
+ pkg, stable=dep_settings._parent_stable)
+ dep_settings.usemask = dep_settings._use_manager.getUseMask(
+ pkg, stable=dep_settings._parent_stable)
+
+ if not baddepsyntax:
+ ismasked = not ebuild_archs or \
+ pkg.cpv not in portdb.xmatch("match-visible", pkg.cp)
+ if ismasked:
+ if not have_pmasked:
+ have_pmasked = bool(dep_settings._getMaskAtom(
+ pkg.cpv, pkg._metadata))
+ if options.ignore_masked:
+ continue
+ # we are testing deps for a masked package; give it some lee-way
+ suffix = "masked"
+ matchmode = "minimum-all"
+ else:
+ suffix = ""
+ matchmode = "minimum-visible"
+
+ if not have_dev_keywords:
+ have_dev_keywords = \
+ bool(dev_keywords.intersection(keywords))
+
+ if prof.status == "dev":
+ suffix = suffix + "indev"
+
+ for mytype in Package._dep_keys:
+
+ mykey = "dependency.bad" + suffix
+ myvalue = myaux[mytype]
+ if not myvalue:
+ continue
+
+ success, atoms = portage.dep_check(myvalue, portdb,
+ dep_settings, use="all", mode=matchmode,
+ trees=trees)
+
+ if success:
+ if atoms:
+
+ # Don't bother with dependency.unknown for
+ # cases in which *DEPEND.bad is triggered.
+ for atom in atoms:
+ # dep_check returns all blockers and they
+ # aren't counted for *DEPEND.bad, so we
+ # ignore them here.
+ if not atom.blocker:
+ unknown_pkgs.discard(
+ (mytype, atom.unevaluated_atom))
+
+ if not prof.sub_path:
+ # old-style virtuals currently aren't
+ # resolvable with empty profile, since
+ # 'virtuals' mappings are unavailable
+ # (it would be expensive to search
+ # for PROVIDE in all ebuilds)
+ atoms = [atom for atom in atoms if not \
+ (atom.cp.startswith('virtual/') and \
+ not portdb.cp_list(atom.cp))]
+
+ # we have some unsolvable deps
+ # remove ! deps, which always show up as unsatisfiable
+ atoms = [str(atom.unevaluated_atom) \
+ for atom in atoms if not atom.blocker]
+
+ # if we emptied out our list, continue:
+ if not atoms:
+ continue
+ stats[mykey] += 1
+ fails[mykey].append("%s: %s: %s(%s) %s" % \
+ (relative_path, mytype, keyword,
+ prof, repr(atoms)))
+ else:
+ stats[mykey] += 1
+ fails[mykey].append("%s: %s: %s(%s) %s" % \
+ (relative_path, mytype, keyword,
+ prof, repr(atoms)))
+
+ if not baddepsyntax and unknown_pkgs:
+ type_map = {}
+ for mytype, atom in unknown_pkgs:
+ type_map.setdefault(mytype, set()).add(atom)
+ for mytype, atoms in type_map.items():
+ stats["dependency.unknown"] += 1
+ fails["dependency.unknown"].append("%s: %s: %s" %
+ (relative_path, mytype, ", ".join(sorted(atoms))))
+
+ # check if there are unused local USE-descriptions in metadata.xml
+ # (unless there are any invalids, to avoid noise)
+ if allvalid:
+ for myflag in muselist.difference(used_useflags):
+ stats["metadata.warning"] += 1
+ fails["metadata.warning"].append(
+ "%s/metadata.xml: unused local USE-description: '%s'" % \
+ (x, myflag))
+
+if options.if_modified == "y" and len(effective_scanlist) < 1:
+ logging.warn("--if-modified is enabled, but no modified packages were found!")
+
+if options.mode == "manifest":
+ sys.exit(dofail)
+
+# dofail will be set to 1 if we have failed in at least one non-warning category
+dofail = 0
+# dowarn will be set to 1 if we tripped any warnings
+dowarn = 0
+# dofull will be set if we should print a "repoman full" informational message
+dofull = options.mode != 'full'
+
+for x in qacats:
+ if not stats[x]:
+ continue
+ dowarn = 1
+ if x not in qawarnings:
+ dofail = 1
+
+if dofail or \
+ (dowarn and not (options.quiet or options.mode == "scan")):
+ dofull = 0
+
+# Save QA output so that it can be conveniently displayed
+# in $EDITOR while the user creates a commit message.
+# Otherwise, the user would not be able to see this output
+# once the editor has taken over the screen.
+qa_output = io.StringIO()
+style_file = ConsoleStyleFile(sys.stdout)
+if options.mode == 'commit' and \
+ (not commitmessage or not commitmessage.strip()):
+ style_file.write_listener = qa_output
+console_writer = StyleWriter(file=style_file, maxcol=9999)
+console_writer.style_listener = style_file.new_styles
+
+f = formatter.AbstractFormatter(console_writer)
+
+format_outputs = {
+ 'column': utilities.format_qa_output_column,
+ 'default': utilities.format_qa_output
+}
+
+format_output = format_outputs.get(options.output_style,
+ format_outputs['default'])
+format_output(f, stats, fails, dofull, dofail, options, qawarnings)
+
+style_file.flush()
+del console_writer, f, style_file
+qa_output = qa_output.getvalue()
+qa_output = qa_output.splitlines(True)
+
+suggest_ignore_masked = False
+suggest_include_dev = False
+
+if have_pmasked and not (options.without_mask or options.ignore_masked):
+ suggest_ignore_masked = True
+if have_dev_keywords and not options.include_dev:
+ suggest_include_dev = True
+
+if suggest_ignore_masked or suggest_include_dev:
+ print()
+ if suggest_ignore_masked:
+ print(bold("Note: use --without-mask to check " + \
+ "KEYWORDS on dependencies of masked packages"))
+
+ if suggest_include_dev:
+ print(bold("Note: use --include-dev (-d) to check " + \
+ "dependencies for 'dev' profiles"))
+ print()
+
+if options.mode != 'commit':
+ if dofull:
+ print(bold("Note: type \"repoman full\" for a complete listing."))
+ if dowarn and not dofail:
+ print(green("RepoMan sez:"),"\"You're only giving me a partial QA payment?\n I'll take it this time, but I'm not happy.\"")
+ elif not dofail:
+ print(green("RepoMan sez:"),"\"If everyone were like you, I'd be out of business!\"")
+ elif dofail:
+ print(bad("Please fix these important QA issues first."))
+ print(green("RepoMan sez:"),"\"Make your QA payment on time and you'll never see the likes of me.\"\n")
+ sys.exit(1)
+else:
+ if dofail and can_force and options.force and not options.pretend:
+ print(green("RepoMan sez:") + \
+ " \"You want to commit even with these QA issues?\n" + \
+ " I'll take it this time, but I'm not happy.\"\n")
+ elif dofail:
+ if options.force and not can_force:
+ print(bad("The --force option has been disabled due to extraordinary issues."))
+ print(bad("Please fix these important QA issues first."))
+ print(green("RepoMan sez:"),"\"Make your QA payment on time and you'll never see the likes of me.\"\n")
+ sys.exit(1)
+
+ if options.pretend:
+ print(green("RepoMan sez:"), "\"So, you want to play it safe. Good call.\"\n")
+
+ myunadded = []
+ if vcs == "cvs":
+ try:
+ myvcstree = portage.cvstree.getentries("./", recursive=1)
+ myunadded = portage.cvstree.findunadded(myvcstree, recursive=1, basedir="./")
+ except SystemExit as e:
+ raise # TODO propagate this
+ except:
+ err("Error retrieving CVS tree; exiting.")
+ if vcs == "svn":
+ try:
+ with repoman_popen("svn status --no-ignore") as f:
+ svnstatus = f.readlines()
+ myunadded = ["./" + elem.rstrip().split()[1] for elem in svnstatus if elem.startswith("?") or elem.startswith("I")]
+ except SystemExit as e:
+ raise # TODO propagate this
+ except:
+ err("Error retrieving SVN info; exiting.")
+ if vcs == "git":
+ # get list of files not under version control or missing
+ myf = repoman_popen("git ls-files --others")
+ myunadded = ["./" + elem[:-1] for elem in myf]
+ myf.close()
+ if vcs == "bzr":
+ try:
+ with repoman_popen("bzr status -S .") as f:
+ bzrstatus = f.readlines()
+ myunadded = ["./" + elem.rstrip().split()[1].split('/')[-1:][0] for elem in bzrstatus if elem.startswith("?") or elem[0:2] == " D"]
+ except SystemExit as e:
+ raise # TODO propagate this
+ except:
+ err("Error retrieving bzr info; exiting.")
+ if vcs == "hg":
+ with repoman_popen("hg status --no-status --unknown .") as f:
+ myunadded = f.readlines()
+ myunadded = ["./" + elem.rstrip() for elem in myunadded]
+
+ # Mercurial doesn't handle manually deleted files as removed from
+ # the repository, so the user need to remove them before commit,
+ # using "hg remove [FILES]"
+ with repoman_popen("hg status --no-status --deleted .") as f:
+ mydeleted = f.readlines()
+ mydeleted = ["./" + elem.rstrip() for elem in mydeleted]
+
+
+ myautoadd = []
+ if myunadded:
+ for x in range(len(myunadded)-1, -1, -1):
+ xs = myunadded[x].split("/")
+ if xs[-1] == "files":
+ print("!!! files dir is not added! Please correct this.")
+ sys.exit(-1)
+ elif xs[-1] == "Manifest":
+ # It's a manifest... auto add
+ myautoadd += [myunadded[x]]
+ del myunadded[x]
+
+ if myunadded:
+ print(red("!!! The following files are in your local tree but are not added to the master"))
+ print(red("!!! tree. Please remove them from the local tree or add them to the master tree."))
+ for x in myunadded:
+ print(" ", x)
+ print()
+ print()
+ sys.exit(1)
+
+ if vcs == "hg" and mydeleted:
+ print(red("!!! The following files are removed manually from your local tree but are not"))
+ print(red("!!! removed from the repository. Please remove them, using \"hg remove [FILES]\"."))
+ for x in mydeleted:
+ print(" ", x)
+ print()
+ print()
+ sys.exit(1)
+
+ if vcs == "cvs":
+ mycvstree = cvstree.getentries("./", recursive=1)
+ mychanged = cvstree.findchanged(mycvstree, recursive=1, basedir="./")
+ mynew = cvstree.findnew(mycvstree, recursive=1, basedir="./")
+ myremoved = portage.cvstree.findremoved(mycvstree, recursive=1, basedir="./")
+ bin_blob_pattern = re.compile("^-kb$")
+ no_expansion = set(portage.cvstree.findoption(mycvstree, bin_blob_pattern,
+ recursive=1, basedir="./"))
+
+ if vcs == "svn":
+ with repoman_popen("svn status") as f:
+ svnstatus = f.readlines()
+ mychanged = ["./" + elem.split()[-1:][0] for elem in svnstatus if (elem[:1] in "MR" or elem[1:2] in "M")]
+ mynew = ["./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("A")]
+ myremoved = ["./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("D")]
+
+ # Subversion expands keywords specified in svn:keywords properties.
+ with repoman_popen("svn propget -R svn:keywords") as f:
+ props = f.readlines()
+ expansion = dict(("./" + prop.split(" - ")[0], prop.split(" - ")[1].split()) \
+ for prop in props if " - " in prop)
+
+ elif vcs == "git":
+ with repoman_popen("git diff-index --name-only "
+ "--relative --diff-filter=M HEAD") as f:
+ mychanged = f.readlines()
+ mychanged = ["./" + elem[:-1] for elem in mychanged]
+
+ with repoman_popen("git diff-index --name-only "
+ "--relative --diff-filter=A HEAD") as f:
+ mynew = f.readlines()
+ mynew = ["./" + elem[:-1] for elem in mynew]
+
+ with repoman_popen("git diff-index --name-only "
+ "--relative --diff-filter=D HEAD") as f:
+ myremoved = f.readlines()
+ myremoved = ["./" + elem[:-1] for elem in myremoved]
+
+ if vcs == "bzr":
+ with repoman_popen("bzr status -S .") as f:
+ bzrstatus = f.readlines()
+ mychanged = ["./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and elem[1:2] == "M"]
+ mynew = ["./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and (elem[1:2] in "NK" or elem[0:1] == "R")]
+ myremoved = ["./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem.startswith("-")]
+ myremoved = ["./" + elem.split()[-3:-2][0].split('/')[-1:][0] for elem in bzrstatus if elem and (elem[1:2] == "K" or elem[0:1] == "R")]
+ # Bazaar expands nothing.
+
+ if vcs == "hg":
+ with repoman_popen("hg status --no-status --modified .") as f:
+ mychanged = f.readlines()
+ mychanged = ["./" + elem.rstrip() for elem in mychanged]
+
+ with repoman_popen("hg status --no-status --added .") as f:
+ mynew = f.readlines()
+ mynew = ["./" + elem.rstrip() for elem in mynew]
+
+ with repoman_popen("hg status --no-status --removed .") as f:
+ myremoved = f.readlines()
+ myremoved = ["./" + elem.rstrip() for elem in myremoved]
+
+ if vcs:
+ if not (mychanged or mynew or myremoved or (vcs == "hg" and mydeleted)):
+ print(green("RepoMan sez:"), "\"Doing nothing is not always good for QA.\"")
+ print()
+ print("(Didn't find any changed files...)")
+ print()
+ sys.exit(1)
+
+ # Manifests need to be regenerated after all other commits, so don't commit
+ # them now even if they have changed.
+ mymanifests = set()
+ myupdates = set()
+ for f in mychanged + mynew:
+ if "Manifest" == os.path.basename(f):
+ mymanifests.add(f)
+ else:
+ myupdates.add(f)
+ myupdates.difference_update(myremoved)
+ myupdates = list(myupdates)
+ mymanifests = list(mymanifests)
+ myheaders = []
+ mydirty = []
+
+ commitmessage = options.commitmsg
+ if options.commitmsgfile:
+ try:
+ f = io.open(_unicode_encode(options.commitmsgfile,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace')
+ commitmessage = f.read()
+ f.close()
+ del f
+ except (IOError, OSError) as e:
+ if e.errno == errno.ENOENT:
+ portage.writemsg("!!! File Not Found: --commitmsgfile='%s'\n" % options.commitmsgfile)
+ else:
+ raise
+ # We've read the content so the file is no longer needed.
+ commitmessagefile = None
+ if not commitmessage or not commitmessage.strip():
+ try:
+ editor = os.environ.get("EDITOR")
+ if editor and utilities.editor_is_executable(editor):
+ commitmessage = utilities.get_commit_message_with_editor(
+ editor, message=qa_output)
+ else:
+ commitmessage = utilities.get_commit_message_with_stdin()
+ except KeyboardInterrupt:
+ exithandler()
+ if not commitmessage or not commitmessage.strip():
+ print("* no commit message? aborting commit.")
+ sys.exit(1)
+ commitmessage = commitmessage.rstrip()
+ changelog_msg = commitmessage
+ portage_version = getattr(portage, "VERSION", None)
+ gpg_key = repoman_settings.get("PORTAGE_GPG_KEY", "")
+ dco_sob = repoman_settings.get("DCO_SIGNED_OFF_BY", "")
+ if portage_version is None:
+ sys.stderr.write("Failed to insert portage version in message!\n")
+ sys.stderr.flush()
+ portage_version = "Unknown"
+
+ report_options = []
+ if options.force:
+ report_options.append("--force")
+ if options.ignore_arches:
+ report_options.append("--ignore-arches")
+ if include_arches is not None:
+ report_options.append("--include-arches=\"%s\"" %
+ " ".join(sorted(include_arches)))
+
+ if vcs == "git":
+ # Use new footer only for git (see bug #438364).
+ commit_footer = "\n\nPackage-Manager: portage-%s" % portage_version
+ if report_options:
+ commit_footer += "\nRepoMan-Options: " + " ".join(report_options)
+ if sign_manifests:
+ commit_footer += "\nManifest-Sign-Key: %s" % (gpg_key, )
+ if dco_sob:
+ commit_footer += "\nSigned-off-by: %s" % (dco_sob, )
+ else:
+ unameout = platform.system() + " "
+ if platform.system() in ["Darwin", "SunOS"]:
+ unameout += platform.processor()
+ else:
+ unameout += platform.machine()
+ commit_footer = "\n\n"
+ if dco_sob:
+ commit_footer += "Signed-off-by: %s\n" % (dco_sob, )
+ commit_footer += "(Portage version: %s/%s/%s" % \
+ (portage_version, vcs, unameout)
+ if report_options:
+ commit_footer += ", RepoMan options: " + " ".join(report_options)
+ if sign_manifests:
+ commit_footer += ", signed Manifest commit with key %s" % \
+ (gpg_key, )
+ else:
+ commit_footer += ", unsigned Manifest commit"
+ commit_footer += ")"
+
+ commitmessage += commit_footer
+
+ broken_changelog_manifests = []
+ if options.echangelog in ('y', 'force'):
+ logging.info("checking for unmodified ChangeLog files")
+ committer_name = utilities.get_committer_name(env=repoman_settings)
+ for x in sorted(vcs_files_to_cps(
+ chain(myupdates, mymanifests, myremoved))):
+ catdir, pkgdir = x.split("/")
+ checkdir = repodir + "/" + x
+ checkdir_relative = ""
+ if repolevel < 3:
+ checkdir_relative = os.path.join(pkgdir, checkdir_relative)
+ if repolevel < 2:
+ checkdir_relative = os.path.join(catdir, checkdir_relative)
+ checkdir_relative = os.path.join(".", checkdir_relative)
+
+ changelog_path = os.path.join(checkdir_relative, "ChangeLog")
+ changelog_modified = changelog_path in modified_changelogs
+ if changelog_modified and options.echangelog != 'force':
+ continue
+
+ # get changes for this package
+ cdrlen = len(checkdir_relative)
+ clnew = [elem[cdrlen:] for elem in mynew if elem.startswith(checkdir_relative)]
+ clremoved = [elem[cdrlen:] for elem in myremoved if elem.startswith(checkdir_relative)]
+ clchanged = [elem[cdrlen:] for elem in mychanged if elem.startswith(checkdir_relative)]
+
+ # Skip ChangeLog generation if only the Manifest was modified,
+ # as discussed in bug #398009.
+ nontrivial_cl_files = set()
+ nontrivial_cl_files.update(clnew, clremoved, clchanged)
+ nontrivial_cl_files.difference_update(['Manifest'])
+ if not nontrivial_cl_files and options.echangelog != 'force':
+ continue
+
+ new_changelog = utilities.UpdateChangeLog(checkdir_relative,
+ committer_name, changelog_msg,
+ os.path.join(repodir, 'skel.ChangeLog'),
+ catdir, pkgdir,
+ new=clnew, removed=clremoved, changed=clchanged,
+ pretend=options.pretend)
+ if new_changelog is None:
+ writemsg_level("!!! Updating the ChangeLog failed\n", \
+ level=logging.ERROR, noiselevel=-1)
+ sys.exit(1)
+
+ # if the ChangeLog was just created, add it to vcs
+ if new_changelog:
+ myautoadd.append(changelog_path)
+ # myautoadd is appended to myupdates below
+ else:
+ myupdates.append(changelog_path)
+
+ if options.ask and not options.pretend:
+ # regenerate Manifest for modified ChangeLog (bug #420735)
+ repoman_settings["O"] = checkdir
+ digestgen(mysettings=repoman_settings, myportdb=portdb)
+ else:
+ broken_changelog_manifests.append(x)
+
+ if myautoadd:
+ print(">>> Auto-Adding missing Manifest/ChangeLog file(s)...")
+ add_cmd = [vcs, "add"]
+ add_cmd += myautoadd
+ if options.pretend:
+ portage.writemsg_stdout("(%s)\n" % " ".join(add_cmd),
+ noiselevel=-1)
+ else:
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
+ not os.path.isabs(add_cmd[0]):
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = find_binary(add_cmd[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(add_cmd[0])
+ add_cmd[0] = fullname
+
+ add_cmd = [_unicode_encode(arg) for arg in add_cmd]
+ retcode = subprocess.call(add_cmd)
+ if retcode != os.EX_OK:
+ logging.error(
+ "Exiting on %s error code: %s\n" % (vcs, retcode))
+ sys.exit(retcode)
+
+ myupdates += myautoadd
+
+ print("* %s files being committed..." % green(str(len(myupdates))), end=' ')
+
+ if vcs not in ('cvs', 'svn'):
+ # With git, bzr and hg, there's never any keyword expansion, so
+ # there's no need to regenerate manifests and all files will be
+ # committed in one big commit at the end.
+ print()
+ elif not repo_config.thin_manifest:
+ if vcs == 'cvs':
+ headerstring = "'\$(Header|Id).*\$'"
+ elif vcs == "svn":
+ svn_keywords = dict((k.lower(), k) for k in [
+ "Rev",
+ "Revision",
+ "LastChangedRevision",
+ "Date",
+ "LastChangedDate",
+ "Author",
+ "LastChangedBy",
+ "URL",
+ "HeadURL",
+ "Id",
+ "Header",
+ ])
+
+ for myfile in myupdates:
+
+ # for CVS, no_expansion contains files that are excluded from expansion
+ if vcs == "cvs":
+ if myfile in no_expansion:
+ continue
+
+ # for SVN, expansion contains files that are included in expansion
+ elif vcs == "svn":
+ if myfile not in expansion:
+ continue
+
+ # Subversion keywords are case-insensitive in svn:keywords properties, but case-sensitive in contents of files.
+ enabled_keywords = []
+ for k in expansion[myfile]:
+ keyword = svn_keywords.get(k.lower())
+ if keyword is not None:
+ enabled_keywords.append(keyword)
+
+ headerstring = "'\$(%s).*\$'" % "|".join(enabled_keywords)
+
+ myout = repoman_getstatusoutput("egrep -q " + headerstring + " " +
+ portage._shell_quote(myfile))
+ if myout[0] == 0:
+ myheaders.append(myfile)
+
+ print("%s have headers that will change." % green(str(len(myheaders))))
+ print("* Files with headers will cause the manifests to be changed and committed separately.")
+
+ logging.info("myupdates: %s", myupdates)
+ logging.info("myheaders: %s", myheaders)
+
+ uq = UserQuery(options)
+ if options.ask and uq.query('Commit changes?', True) != 'Yes':
+ print("* aborting commit.")
+ sys.exit(128 + signal.SIGINT)
+
+ # Handle the case where committed files have keywords which
+ # will change and need a priming commit before the Manifest
+ # can be committed.
+ if (myupdates or myremoved) and myheaders:
+ myfiles = myupdates + myremoved
+ fd, commitmessagefile = tempfile.mkstemp(".repoman.msg")
+ mymsg = os.fdopen(fd, "wb")
+ mymsg.write(_unicode_encode(commitmessage))
+ mymsg.close()
+
+ print()
+ print(green("Using commit message:"))
+ print(green("------------------------------------------------------------------------------"))
+ print(commitmessage)
+ print(green("------------------------------------------------------------------------------"))
+ print()
+
+ # Having a leading ./ prefix on file paths can trigger a bug in
+ # the cvs server when committing files to multiple directories,
+ # so strip the prefix.
+ myfiles = [f.lstrip("./") for f in myfiles]
+
+ commit_cmd = [vcs]
+ commit_cmd.extend(vcs_global_opts)
+ commit_cmd.append("commit")
+ commit_cmd.extend(vcs_local_opts)
+ commit_cmd.extend(["-F", commitmessagefile])
+ commit_cmd.extend(myfiles)
+
+ try:
+ if options.pretend:
+ print("(%s)" % (" ".join(commit_cmd),))
+ else:
+ retval = spawn(commit_cmd, env=commit_env)
+ if retval != os.EX_OK:
+ writemsg_level(("!!! Exiting on %s (shell) " + \
+ "error code: %s\n") % (vcs, retval),
+ level=logging.ERROR, noiselevel=-1)
+ sys.exit(retval)
+ finally:
+ try:
+ os.unlink(commitmessagefile)
+ except OSError:
+ pass
+
+ # Setup the GPG commands
+ def gpgsign(filename):
+ gpgcmd = repoman_settings.get("PORTAGE_GPG_SIGNING_COMMAND")
+ if gpgcmd is None:
+ raise MissingParameter("PORTAGE_GPG_SIGNING_COMMAND is unset!" + \
+ " Is make.globals missing?")
+ if "${PORTAGE_GPG_KEY}" in gpgcmd and \
+ "PORTAGE_GPG_KEY" not in repoman_settings:
+ raise MissingParameter("PORTAGE_GPG_KEY is unset!")
+ if "${PORTAGE_GPG_DIR}" in gpgcmd:
+ if "PORTAGE_GPG_DIR" not in repoman_settings:
+ repoman_settings["PORTAGE_GPG_DIR"] = \
+ os.path.expanduser("~/.gnupg")
+ logging.info("Automatically setting PORTAGE_GPG_DIR to '%s'" \
+ % repoman_settings["PORTAGE_GPG_DIR"])
+ else:
+ repoman_settings["PORTAGE_GPG_DIR"] = \
+ os.path.expanduser(repoman_settings["PORTAGE_GPG_DIR"])
+ if not os.access(repoman_settings["PORTAGE_GPG_DIR"], os.X_OK):
+ raise portage.exception.InvalidLocation(
+ "Unable to access directory: PORTAGE_GPG_DIR='%s'" % \
+ repoman_settings["PORTAGE_GPG_DIR"])
+ gpgvars = {"FILE": filename}
+ for k in ("PORTAGE_GPG_DIR", "PORTAGE_GPG_KEY"):
+ v = repoman_settings.get(k)
+ if v is not None:
+ gpgvars[k] = v
+ gpgcmd = portage.util.varexpand(gpgcmd, mydict=gpgvars)
+ if options.pretend:
+ print("(" + gpgcmd + ")")
+ else:
+ # Encode unicode manually for bug #310789.
+ gpgcmd = portage.util.shlex_split(gpgcmd)
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
+ not os.path.isabs(gpgcmd[0]):
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = find_binary(gpgcmd[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(gpgcmd[0])
+ gpgcmd[0] = fullname
+
+ gpgcmd = [_unicode_encode(arg,
+ encoding=_encodings['fs'], errors='strict') for arg in gpgcmd]
+ rValue = subprocess.call(gpgcmd)
+ if rValue == os.EX_OK:
+ os.rename(filename + ".asc", filename)
+ else:
+ raise portage.exception.PortageException("!!! gpg exited with '" + str(rValue) + "' status")
+
+ def need_signature(filename):
+ try:
+ with open(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ return b"BEGIN PGP SIGNED MESSAGE" not in f.readline()
+ except IOError as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ return False
+ raise
+
+ # When files are removed and re-added, the cvs server will put /Attic/
+ # inside the $Header path. This code detects the problem and corrects it
+ # so that the Manifest will generate correctly. See bug #169500.
+ # Use binary mode in order to avoid potential character encoding issues.
+ cvs_header_re = re.compile(br'^#\s*\$Header.*\$$')
+ attic_str = b'/Attic/'
+ attic_replace = b'/'
+ for x in myheaders:
+ f = open(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='rb')
+ mylines = f.readlines()
+ f.close()
+ modified = False
+ for i, line in enumerate(mylines):
+ if cvs_header_re.match(line) is not None and \
+ attic_str in line:
+ mylines[i] = line.replace(attic_str, attic_replace)
+ modified = True
+ if modified:
+ portage.util.write_atomic(x, b''.join(mylines),
+ mode='wb')
+
+ if repolevel == 1:
+ print(green("RepoMan sez:"), "\"You're rather crazy... "
+ "doing the entire repository.\"\n")
+
+ if vcs in ('cvs', 'svn') and (myupdates or myremoved):
+
+ for x in sorted(vcs_files_to_cps(
+ chain(myupdates, myremoved, mymanifests))):
+ repoman_settings["O"] = os.path.join(repodir, x)
+ digestgen(mysettings=repoman_settings, myportdb=portdb)
+
+ elif broken_changelog_manifests:
+ for x in broken_changelog_manifests:
+ repoman_settings["O"] = os.path.join(repodir, x)
+ digestgen(mysettings=repoman_settings, myportdb=portdb)
+
+ signed = False
+ if sign_manifests:
+ signed = True
+ try:
+ for x in sorted(vcs_files_to_cps(
+ chain(myupdates, myremoved, mymanifests))):
+ repoman_settings["O"] = os.path.join(repodir, x)
+ manifest_path = os.path.join(repoman_settings["O"], "Manifest")
+ if not need_signature(manifest_path):
+ continue
+ gpgsign(manifest_path)
+ except portage.exception.PortageException as e:
+ portage.writemsg("!!! %s\n" % str(e))
+ portage.writemsg("!!! Disabled FEATURES='sign'\n")
+ signed = False
+
+ if vcs == 'git':
+ # It's not safe to use the git commit -a option since there might
+ # be some modified files elsewhere in the working tree that the
+ # user doesn't want to commit. Therefore, call git update-index
+ # in order to ensure that the index is updated with the latest
+ # versions of all new and modified files in the relevant portion
+ # of the working tree.
+ myfiles = mymanifests + myupdates
+ myfiles.sort()
+ update_index_cmd = ["git", "update-index"]
+ update_index_cmd.extend(f.lstrip("./") for f in myfiles)
+ if options.pretend:
+ print("(%s)" % (" ".join(update_index_cmd),))
+ else:
+ retval = spawn(update_index_cmd, env=os.environ)
+ if retval != os.EX_OK:
+ writemsg_level(("!!! Exiting on %s (shell) " + \
+ "error code: %s\n") % (vcs, retval),
+ level=logging.ERROR, noiselevel=-1)
+ sys.exit(retval)
+
+ if True:
+ myfiles = mymanifests[:]
+ # If there are no header (SVN/CVS keywords) changes in
+ # the files, this Manifest commit must include the
+ # other (yet uncommitted) files.
+ if not myheaders:
+ myfiles += myupdates
+ myfiles += myremoved
+ myfiles.sort()
+
+ fd, commitmessagefile = tempfile.mkstemp(".repoman.msg")
+ mymsg = os.fdopen(fd, "wb")
+ mymsg.write(_unicode_encode(commitmessage))
+ mymsg.close()
+
+ commit_cmd = []
+ if options.pretend and vcs is None:
+ # substitute a bogus value for pretend output
+ commit_cmd.append("cvs")
+ else:
+ commit_cmd.append(vcs)
+ commit_cmd.extend(vcs_global_opts)
+ commit_cmd.append("commit")
+ commit_cmd.extend(vcs_local_opts)
+ if vcs == "hg":
+ commit_cmd.extend(["--logfile", commitmessagefile])
+ commit_cmd.extend(myfiles)
+ else:
+ commit_cmd.extend(["-F", commitmessagefile])
+ commit_cmd.extend(f.lstrip("./") for f in myfiles)
+
+ try:
+ if options.pretend:
+ print("(%s)" % (" ".join(commit_cmd),))
+ else:
+ retval = spawn(commit_cmd, env=commit_env)
+ if retval != os.EX_OK:
+ if repo_config.sign_commit and vcs == 'git' and \
+ not git_supports_gpg_sign():
+ # Inform user that newer git is needed (bug #403323).
+ logging.error(
+ "Git >=1.7.9 is required for signed commits!")
+
+ writemsg_level(("!!! Exiting on %s (shell) " + \
+ "error code: %s\n") % (vcs, retval),
+ level=logging.ERROR, noiselevel=-1)
+ sys.exit(retval)
+ finally:
+ try:
+ os.unlink(commitmessagefile)
+ except OSError:
+ pass
+
+ print()
+ if vcs:
+ print("Commit complete.")
+ else:
+ print("repoman was too scared by not seeing any familiar version control file that he forgot to commit anything")
+ print(green("RepoMan sez:"), "\"If everyone were like you, I'd be out of business!\"\n")
+sys.exit(0)
diff --git a/usr/lib/portage/bin/save-ebuild-env.sh b/usr/lib/portage/bin/save-ebuild-env.sh
new file mode 100755
index 0000000..b790306
--- /dev/null
+++ b/usr/lib/portage/bin/save-ebuild-env.sh
@@ -0,0 +1,125 @@
+#!@PORTAGE_PREFIX_BASH@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# @FUNCTION: __save_ebuild_env
+# @DESCRIPTION:
+# echo the current environment to stdout, filtering out redundant info.
+#
+# --exclude-init-phases causes pkg_nofetch and src_* phase functions to
+# be excluded from the output. These function are not needed for installation
+# or removal of the packages, and can therefore be safely excluded.
+#
+__save_ebuild_env() {
+ (
+ if has --exclude-init-phases $* ; then
+ unset S _E_DOCDESTTREE_ _E_EXEDESTTREE_ \
+ PORTAGE_DOCOMPRESS_SIZE_LIMIT PORTAGE_DOCOMPRESS \
+ PORTAGE_DOCOMPRESS_SKIP
+ if [[ -n $PYTHONPATH &&
+ ${PYTHONPATH%%:*} -ef $PORTAGE_PYM_PATH ]] ; then
+ if [[ $PYTHONPATH == *:* ]] ; then
+ export PYTHONPATH=${PYTHONPATH#*:}
+ else
+ unset PYTHONPATH
+ fi
+ fi
+ fi
+
+ # misc variables inherited from the calling environment
+ unset COLORTERM DISPLAY EDITOR LESS LESSOPEN LOGNAME LS_COLORS PAGER \
+ TERM TERMCAP USER ftp_proxy http_proxy no_proxy
+
+ # other variables inherited from the calling environment
+ unset CVS_RSH ECHANGELOG_USER GPG_AGENT_INFO \
+ SSH_AGENT_PID SSH_AUTH_SOCK STY WINDOW XAUTHORITY
+
+ # CCACHE and DISTCC config
+ unset ${!CCACHE_*} ${!DISTCC_*}
+
+ # There's no need to bloat environment.bz2 with internally defined
+ # functions and variables, so filter them out if possible.
+
+ for x in pkg_setup pkg_nofetch src_unpack src_prepare src_configure \
+ src_compile src_test src_install pkg_preinst pkg_postinst \
+ pkg_prerm pkg_postrm pkg_config pkg_info pkg_pretend ; do
+ unset -f default_$x __eapi{0,1,2,3,4}_$x
+ done
+ unset x
+
+ unset -f assert __assert_sigpipe_ok \
+ __dump_trace die \
+ __quiet_mode __vecho __elog_base eqawarn elog \
+ einfo einfon ewarn eerror ebegin __eend eend KV_major \
+ KV_minor KV_micro KV_to_int get_KV __1 __1 has \
+ __has_phase_defined_up_to \
+ hasv hasq __qa_source __qa_call \
+ addread addwrite adddeny addpredict __sb_append_var \
+ use usev useq has_version portageq \
+ best_version use_with use_enable register_die_hook \
+ unpack __strip_duplicate_slashes econf einstall \
+ __dyn_setup __dyn_unpack __dyn_clean \
+ into insinto exeinto docinto \
+ insopts diropts exeopts libopts docompress \
+ __abort_handler __abort_prepare __abort_configure __abort_compile \
+ __abort_test __abort_install __dyn_prepare __dyn_configure \
+ __dyn_compile __dyn_test __dyn_install \
+ __dyn_pretend __dyn_help \
+ debug-print debug-print-function \
+ debug-print-section __helpers_die inherit EXPORT_FUNCTIONS \
+ nonfatal register_success_hook \
+ __hasg __hasgq \
+ __save_ebuild_env __set_colors __filter_readonly_variables \
+ __preprocess_ebuild_env \
+ __repo_attr __source_all_bashrcs \
+ __ebuild_main __ebuild_phase __ebuild_phase_with_hooks \
+ __ebuild_arg_to_phase __ebuild_phase_funcs default \
+ __unpack_tar __unset_colors \
+ ${QA_INTERCEPTORS}
+
+ ___eapi_has_usex && unset -f usex
+ ___eapi_has_master_repositories && unset -f master_repositories
+ ___eapi_has_repository_path && unset -f repository_path
+ ___eapi_has_available_eclasses && unset -f available_eclasses
+ ___eapi_has_eclass_path && unset -f eclass_path
+ ___eapi_has_license_path && unset -f license_path
+ ___eapi_has_package_manager_build_user && unset -f package_manager_build_user
+ ___eapi_has_package_manager_build_group && unset -f package_manager_build_group
+
+ # PREFIX: compgen is not compiled in during bootstrap
+ type compgen >& /dev/null && unset -f $(compgen -A function ___eapi_)
+
+ # portage config variables and variables set directly by portage
+ unset ACCEPT_LICENSE BAD BRACKET BUILD_PREFIX COLS \
+ DISTCC_DIR DISTDIR DOC_SYMLINKS_DIR \
+ EBUILD_FORCE_TEST EBUILD_MASTER_PID \
+ ECLASS_DEPTH ENDCOL FAKEROOTKEY \
+ GOOD HILITE HOME \
+ LAST_E_CMD LAST_E_LEN LD_PRELOAD MISC_FUNCTIONS_ARGS MOPREFIX \
+ NOCOLOR NORMAL PKGDIR PKGUSE PKG_LOGDIR PKG_TMPDIR \
+ PORTAGE_BASHRCS_SOURCED PORTAGE_COMPRESS \
+ PORTAGE_COMPRESS_EXCLUDE_SUFFIXES \
+ PORTAGE_DOHTML_UNWARNED_SKIPPED_EXTENSIONS \
+ PORTAGE_DOHTML_UNWARNED_SKIPPED_FILES \
+ PORTAGE_DOHTML_WARN_ON_SKIPPED_FILES \
+ PORTAGE_NONFATAL PORTAGE_QUIET \
+ PORTAGE_SANDBOX_DENY PORTAGE_SANDBOX_PREDICT \
+ PORTAGE_SANDBOX_READ PORTAGE_SANDBOX_WRITE PREROOTPATH \
+ QA_INTERCEPTORS \
+ RC_DEFAULT_INDENT RC_DOT_PATTERN RC_ENDCOL RC_INDENTATION \
+ ROOT ROOTPATH RPMDIR TEMP TMP TMPDIR USE_EXPAND \
+ WARN XARGS _RC_GET_KV_CACHE
+
+ # user config variables
+ unset DOC_SYMLINKS_DIR INSTALL_MASK PKG_INSTALL_MASK
+
+ # Prefix additions
+ unset DEFAULT_PATH EXTRA_PATH PORTAGE_GROUP PORTAGE_USER
+
+ declare -p
+ declare -fp
+ if [[ ${BASH_VERSINFO[0]} == 3 ]]; then
+ export
+ fi
+ )
+}
diff --git a/usr/lib/portage/bin/xattr-helper.py b/usr/lib/portage/bin/xattr-helper.py
new file mode 100755
index 0000000..3e9b81e
--- /dev/null
+++ b/usr/lib/portage/bin/xattr-helper.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python -b
+# Copyright 2012-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Dump and restore extended attributes.
+
+We use formats like that used by getfattr --dump. This is meant for shell
+helpers to save/restore. If you're looking for a python/portage API, see
+portage.util.movefile._copyxattr instead.
+
+https://en.wikipedia.org/wiki/Extended_file_attributes
+"""
+__doc__ = doc
+
+
+import array
+import os
+import re
+import sys
+
+from portage.util._argparse import ArgumentParser
+
+if hasattr(os, "getxattr"):
+
+ class xattr(object):
+ get = os.getxattr
+ set = os.setxattr
+ list = os.listxattr
+
+else:
+ import xattr
+
+
+_UNQUOTE_RE = re.compile(br'\\[0-7]{3}')
+_FS_ENCODING = sys.getfilesystemencoding()
+
+
+if sys.hexversion < 0x3000000:
+
+ def octal_quote_byte(b):
+ return b'\\%03o' % ord(b)
+
+ def unicode_encode(s):
+ if isinstance(s, unicode):
+ s = s.encode(_FS_ENCODING)
+ return s
+else:
+
+ def octal_quote_byte(b):
+ return ('\\%03o' % ord(b)).encode('ascii')
+
+ def unicode_encode(s):
+ if isinstance(s, str):
+ s = s.encode(_FS_ENCODING)
+ return s
+
+
+def quote(s, quote_chars):
+ """Convert all |quote_chars| in |s| to escape sequences
+
+ This is normally used to escape any embedded quotation marks.
+ """
+ quote_re = re.compile(b'[' + quote_chars + b']')
+ result = []
+ pos = 0
+ s_len = len(s)
+
+ while pos < s_len:
+ m = quote_re.search(s, pos=pos)
+ if m is None:
+ result.append(s[pos:])
+ pos = s_len
+ else:
+ start = m.start()
+ result.append(s[pos:start])
+ result.append(octal_quote_byte(s[start:start+1]))
+ pos = start + 1
+
+ return b''.join(result)
+
+
+def unquote(s):
+ """Process all escape sequences in |s|"""
+ result = []
+ pos = 0
+ s_len = len(s)
+
+ while pos < s_len:
+ m = _UNQUOTE_RE.search(s, pos=pos)
+ if m is None:
+ result.append(s[pos:])
+ pos = s_len
+ else:
+ start = m.start()
+ result.append(s[pos:start])
+ pos = start + 4
+ a = array.array('B')
+ a.append(int(s[start + 1:pos], 8))
+ try:
+ # Python >= 3.2
+ result.append(a.tobytes())
+ except AttributeError:
+ result.append(a.tostring())
+
+ return b''.join(result)
+
+
+def dump_xattrs(pathnames, file_out):
+ """Dump the xattr data for |pathnames| to |file_out|"""
+ # NOTE: Always quote backslashes, in order to ensure that they are
+ # not interpreted as quotes when they are processed by unquote.
+ quote_chars = b'\n\r\\\\'
+
+ for pathname in pathnames:
+ attrs = xattr.list(pathname)
+ if not attrs:
+ continue
+
+ file_out.write(b'# file: %s\n' % quote(pathname, quote_chars))
+ for attr in attrs:
+ attr = unicode_encode(attr)
+ value = xattr.get(pathname, attr)
+ file_out.write(b'%s="%s"\n' % (
+ quote(attr, b'=' + quote_chars),
+ quote(value, b'\0"' + quote_chars)))
+
+
+def restore_xattrs(file_in):
+ """Read |file_in| and restore xattrs content from it
+
+ This expects textual data in the format written by dump_xattrs.
+ """
+ pathname = None
+ for i, line in enumerate(file_in):
+ if line.startswith(b'# file: '):
+ pathname = unquote(line.rstrip(b'\n')[8:])
+ else:
+ parts = line.split(b'=', 1)
+ if len(parts) == 2:
+ if pathname is None:
+ raise ValueError('line %d: missing pathname' % (i + 1,))
+ attr = unquote(parts[0])
+ # strip trailing newline and quotes
+ value = unquote(parts[1].rstrip(b'\n')[1:-1])
+ xattr.set(pathname, attr, value)
+ elif line.strip():
+ raise ValueError('line %d: malformed entry' % (i + 1,))
+
+
+def main(argv):
+
+ parser = ArgumentParser(description=doc)
+ parser.add_argument('paths', nargs='*', default=[])
+
+ actions = parser.add_argument_group('Actions')
+ actions.add_argument('--dump',
+ action='store_true',
+ help='Dump the values of all extended '
+ 'attributes associated with null-separated'
+ ' paths read from stdin.')
+ actions.add_argument('--restore',
+ action='store_true',
+ help='Restore extended attributes using'
+ ' a dump read from stdin.')
+
+ options = parser.parse_args(argv)
+
+ if sys.hexversion >= 0x3000000:
+ file_in = sys.stdin.buffer.raw
+ else:
+ file_in = sys.stdin
+ if not options.paths:
+ options.paths += [x for x in file_in.read().split(b'\0') if x]
+
+ if options.dump:
+ if sys.hexversion >= 0x3000000:
+ file_out = sys.stdout.buffer
+ else:
+ file_out = sys.stdout
+ dump_xattrs(options.paths, file_out)
+
+ elif options.restore:
+ restore_xattrs(file_in)
+
+ else:
+ parser.error('missing action!')
+
+ return os.EX_OK
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/usr/lib/portage/bin/xpak-helper.py b/usr/lib/portage/bin/xpak-helper.py
new file mode 100755
index 0000000..c4391cd
--- /dev/null
+++ b/usr/lib/portage/bin/xpak-helper.py
@@ -0,0 +1,69 @@
+#!/usr/bin/python -b
+# Copyright 2009-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+import portage
+portage._internal_caller = True
+from portage import os
+from portage.util._argparse import ArgumentParser
+
+def command_recompose(args):
+
+ usage = "usage: recompose <binpkg_path> <metadata_dir>\n"
+
+ if len(args) != 2:
+ sys.stderr.write(usage)
+ sys.stderr.write("2 arguments are required, got %s\n" % len(args))
+ return 1
+
+ binpkg_path, metadata_dir = args
+
+ if not os.path.isfile(binpkg_path):
+ sys.stderr.write(usage)
+ sys.stderr.write("Argument 1 is not a regular file: '%s'\n" % \
+ binpkg_path)
+ return 1
+
+ if not os.path.isdir(metadata_dir):
+ sys.stderr.write(usage)
+ sys.stderr.write("Argument 2 is not a directory: '%s'\n" % \
+ metadata_dir)
+ return 1
+
+ t = portage.xpak.tbz2(binpkg_path)
+ t.recompose(metadata_dir)
+ return os.EX_OK
+
+def main(argv):
+
+ if argv and isinstance(argv[0], bytes):
+ for i, x in enumerate(argv):
+ argv[i] = portage._unicode_decode(x, errors='strict')
+
+ valid_commands = ('recompose',)
+ description = "Perform metadata operations on a binary package."
+ usage = "usage: %s COMMAND [args]" % \
+ os.path.basename(argv[0])
+
+ parser = ArgumentParser(description=description, usage=usage)
+ options, args = parser.parse_known_args(argv[1:])
+
+ if not args:
+ parser.error("missing command argument")
+
+ command = args[0]
+
+ if command not in valid_commands:
+ parser.error("invalid command: '%s'" % command)
+
+ if command == 'recompose':
+ rval = command_recompose(args[1:])
+ else:
+ raise AssertionError("invalid command: '%s'" % command)
+
+ return rval
+
+if __name__ == "__main__":
+ rval = main(sys.argv[:])
+ sys.exit(rval)
diff --git a/usr/lib/portage/pym/_emerge/AbstractDepPriority.py b/usr/lib/portage/pym/_emerge/AbstractDepPriority.py
new file mode 100644
index 0000000..1fcd043
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/AbstractDepPriority.py
@@ -0,0 +1,30 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+from portage.util.SlotObject import SlotObject
+
+class AbstractDepPriority(SlotObject):
+ __slots__ = ("buildtime", "buildtime_slot_op",
+ "runtime", "runtime_post", "runtime_slot_op")
+
+ def __lt__(self, other):
+ return self.__int__() < other
+
+ def __le__(self, other):
+ return self.__int__() <= other
+
+ def __eq__(self, other):
+ return self.__int__() == other
+
+ def __ne__(self, other):
+ return self.__int__() != other
+
+ def __gt__(self, other):
+ return self.__int__() > other
+
+ def __ge__(self, other):
+ return self.__int__() >= other
+
+ def copy(self):
+ return copy.copy(self)
diff --git a/usr/lib/portage/pym/_emerge/AbstractEbuildProcess.py b/usr/lib/portage/pym/_emerge/AbstractEbuildProcess.py
new file mode 100644
index 0000000..5be59e4
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/AbstractEbuildProcess.py
@@ -0,0 +1,344 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import platform
+import stat
+import subprocess
+import textwrap
+from _emerge.SpawnProcess import SpawnProcess
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildIpcDaemon import EbuildIpcDaemon
+import portage
+from portage.elog import messages as elog_messages
+from portage.localization import _
+from portage.package.ebuild._ipc.ExitCommand import ExitCommand
+from portage.package.ebuild._ipc.QueryCommand import QueryCommand
+from portage import os
+from portage.util._pty import _create_pty_or_pipe
+from portage.util import apply_secpass_permissions
+
+class AbstractEbuildProcess(SpawnProcess):
+
+ __slots__ = ('phase', 'settings',) + \
+ ('_build_dir', '_ipc_daemon', '_exit_command', '_exit_timeout_id')
+
+ _phases_without_builddir = ('clean', 'cleanrm', 'depend', 'help',)
+ _phases_interactive_whitelist = ('config',)
+ _phases_without_cgroup = ('preinst', 'postinst', 'prerm', 'postrm', 'config')
+
+ # Number of milliseconds to allow natural exit of the ebuild
+ # process after it has called the exit command via IPC. It
+ # doesn't hurt to be generous here since the scheduler
+ # continues to process events during this period, and it can
+ # return long before the timeout expires.
+ _exit_timeout = 10000 # 10 seconds
+
+ # The EbuildIpcDaemon support is well tested, but this variable
+ # is left so we can temporarily disable it if any issues arise.
+ _enable_ipc_daemon = False
+
+ def __init__(self, **kwargs):
+ SpawnProcess.__init__(self, **kwargs)
+ if self.phase is None:
+ phase = self.settings.get("EBUILD_PHASE")
+ if not phase:
+ phase = 'other'
+ self.phase = phase
+
+ def _start(self):
+
+ need_builddir = self.phase not in self._phases_without_builddir
+
+ # This can happen if the pre-clean phase triggers
+ # die_hooks for some reason, and PORTAGE_BUILDDIR
+ # doesn't exist yet.
+ if need_builddir and \
+ not os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
+ msg = _("The ebuild phase '%s' has been aborted "
+ "since PORTAGE_BUILDDIR does not exist: '%s'") % \
+ (self.phase, self.settings['PORTAGE_BUILDDIR'])
+ self._eerror(textwrap.wrap(msg, 72))
+ self._set_returncode((self.pid, 1 << 8))
+ self._async_wait()
+ return
+
+ # Check if the cgroup hierarchy is in place. If it's not, mount it.
+ if (os.geteuid() == 0 and platform.system() == 'Linux'
+ and 'cgroup' in self.settings.features
+ and self.phase not in self._phases_without_cgroup):
+ cgroup_root = '/sys/fs/cgroup'
+ cgroup_portage = os.path.join(cgroup_root, 'portage')
+ cgroup_path = os.path.join(cgroup_portage,
+ '%s:%s' % (self.settings["CATEGORY"],
+ self.settings["PF"]))
+ try:
+ # cgroup tmpfs
+ if not os.path.ismount(cgroup_root):
+ # we expect /sys/fs to be there already
+ if not os.path.isdir(cgroup_root):
+ os.mkdir(cgroup_root, 0o755)
+ subprocess.check_call(['mount', '-t', 'tmpfs',
+ '-o', 'rw,nosuid,nodev,noexec,mode=0755',
+ 'tmpfs', cgroup_root])
+
+ # portage subsystem
+ if not os.path.ismount(cgroup_portage):
+ if not os.path.isdir(cgroup_portage):
+ os.mkdir(cgroup_portage, 0o755)
+ subprocess.check_call(['mount', '-t', 'cgroup',
+ '-o', 'rw,nosuid,nodev,noexec,none,name=portage',
+ 'tmpfs', cgroup_portage])
+
+ # the ebuild cgroup
+ if not os.path.isdir(cgroup_path):
+ os.mkdir(cgroup_path)
+ except (subprocess.CalledProcessError, OSError):
+ pass
+ else:
+ self.cgroup = cgroup_path
+
+ if self.background:
+ # Automatically prevent color codes from showing up in logs,
+ # since we're not displaying to a terminal anyway.
+ self.settings['NOCOLOR'] = 'true'
+
+ if self._enable_ipc_daemon:
+ self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
+ if self.phase not in self._phases_without_builddir:
+ if 'PORTAGE_BUILDDIR_LOCKED' not in self.settings:
+ self._build_dir = EbuildBuildDir(
+ scheduler=self.scheduler, settings=self.settings)
+ self._build_dir.lock()
+ self.settings['PORTAGE_IPC_DAEMON'] = "1"
+ self._start_ipc_daemon()
+ else:
+ self.settings.pop('PORTAGE_IPC_DAEMON', None)
+ else:
+ # Since the IPC daemon is disabled, use a simple tempfile based
+ # approach to detect unexpected exit like in bug #190128.
+ self.settings.pop('PORTAGE_IPC_DAEMON', None)
+ if self.phase not in self._phases_without_builddir:
+ exit_file = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'],
+ '.exit_status')
+ self.settings['PORTAGE_EBUILD_EXIT_FILE'] = exit_file
+ try:
+ os.unlink(exit_file)
+ except OSError:
+ if os.path.exists(exit_file):
+ # make sure it doesn't exist
+ raise
+ else:
+ self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
+
+ if self.fd_pipes is None:
+ self.fd_pipes = {}
+ null_fd = None
+ if 0 not in self.fd_pipes and \
+ self.phase not in self._phases_interactive_whitelist and \
+ "interactive" not in self.settings.get("PROPERTIES", "").split():
+ null_fd = os.open('/dev/null', os.O_RDONLY)
+ self.fd_pipes[0] = null_fd
+
+ try:
+ SpawnProcess._start(self)
+ finally:
+ if null_fd is not None:
+ os.close(null_fd)
+
+ def _init_ipc_fifos(self):
+
+ input_fifo = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'], '.ipc_in')
+ output_fifo = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'], '.ipc_out')
+
+ for p in (input_fifo, output_fifo):
+
+ st = None
+ try:
+ st = os.lstat(p)
+ except OSError:
+ os.mkfifo(p)
+ else:
+ if not stat.S_ISFIFO(st.st_mode):
+ st = None
+ try:
+ os.unlink(p)
+ except OSError:
+ pass
+ os.mkfifo(p)
+
+ apply_secpass_permissions(p,
+ uid=os.getuid(),
+ gid=portage.data.portage_gid,
+ mode=0o770, stat_cached=st)
+
+ return (input_fifo, output_fifo)
+
+ def _start_ipc_daemon(self):
+ self._exit_command = ExitCommand()
+ self._exit_command.reply_hook = self._exit_command_callback
+ query_command = QueryCommand(self.settings, self.phase)
+ commands = {
+ 'available_eclasses' : query_command,
+ 'best_version' : query_command,
+ 'eclass_path' : query_command,
+ 'exit' : self._exit_command,
+ 'has_version' : query_command,
+ 'license_path' : query_command,
+ 'master_repositories' : query_command,
+ 'repository_path' : query_command,
+ }
+ input_fifo, output_fifo = self._init_ipc_fifos()
+ self._ipc_daemon = EbuildIpcDaemon(commands=commands,
+ input_fifo=input_fifo,
+ output_fifo=output_fifo,
+ scheduler=self.scheduler)
+ self._ipc_daemon.start()
+
+ def _exit_command_callback(self):
+ if self._registered:
+ # Let the process exit naturally, if possible.
+ self._exit_timeout_id = \
+ self.scheduler.timeout_add(self._exit_timeout,
+ self._exit_command_timeout_cb)
+
+ def _exit_command_timeout_cb(self):
+ if self._registered:
+ # If it doesn't exit naturally in a reasonable amount
+ # of time, kill it (solves bug #278895). We try to avoid
+ # this when possible since it makes sandbox complain about
+ # being killed by a signal.
+ self.cancel()
+ self._exit_timeout_id = \
+ self.scheduler.timeout_add(self._cancel_timeout,
+ self._cancel_timeout_cb)
+ else:
+ self._exit_timeout_id = None
+
+ return False # only run once
+
+ def _cancel_timeout_cb(self):
+ self._exit_timeout_id = None
+ self.wait()
+ return False # only run once
+
+ def _orphan_process_warn(self):
+ phase = self.phase
+
+ msg = _("The ebuild phase '%s' with pid %s appears "
+ "to have left an orphan process running in the "
+ "background.") % (phase, self.pid)
+
+ self._eerror(textwrap.wrap(msg, 72))
+
+ def _pipe(self, fd_pipes):
+ stdout_pipe = None
+ if not self.background:
+ stdout_pipe = fd_pipes.get(1)
+ got_pty, master_fd, slave_fd = \
+ _create_pty_or_pipe(copy_term_size=stdout_pipe)
+ return (master_fd, slave_fd)
+
+ def _can_log(self, slave_fd):
+ # With sesandbox, logging works through a pty but not through a
+ # normal pipe. So, disable logging if ptys are broken.
+ # See Bug #162404.
+ # TODO: Add support for logging via named pipe (fifo) with
+ # sesandbox, since EbuildIpcDaemon uses a fifo and it's known
+ # to be compatible with sesandbox.
+ return not ('sesandbox' in self.settings.features \
+ and self.settings.selinux_enabled()) or os.isatty(slave_fd)
+
+ def _killed_by_signal(self, signum):
+ msg = _("The ebuild phase '%s' has been "
+ "killed by signal %s.") % (self.phase, signum)
+ self._eerror(textwrap.wrap(msg, 72))
+
+ def _unexpected_exit(self):
+
+ phase = self.phase
+
+ msg = _("The ebuild phase '%s' has exited "
+ "unexpectedly. This type of behavior "
+ "is known to be triggered "
+ "by things such as failed variable "
+ "assignments (bug #190128) or bad substitution "
+ "errors (bug #200313). Normally, before exiting, bash should "
+ "have displayed an error message above. If bash did not "
+ "produce an error message above, it's possible "
+ "that the ebuild has called `exit` when it "
+ "should have called `die` instead. This behavior may also "
+ "be triggered by a corrupt bash binary or a hardware "
+ "problem such as memory or cpu malfunction. If the problem is not "
+ "reproducible or it appears to occur randomly, then it is likely "
+ "to be triggered by a hardware problem. "
+ "If you suspect a hardware problem then you should "
+ "try some basic hardware diagnostics such as memtest. "
+ "Please do not report this as a bug unless it is consistently "
+ "reproducible and you are sure that your bash binary and hardware "
+ "are functioning properly.") % phase
+
+ self._eerror(textwrap.wrap(msg, 72))
+
+ def _eerror(self, lines):
+ self._elog('eerror', lines)
+
+ def _elog(self, elog_funcname, lines):
+ out = io.StringIO()
+ phase = self.phase
+ elog_func = getattr(elog_messages, elog_funcname)
+ global_havecolor = portage.output.havecolor
+ try:
+ portage.output.havecolor = \
+ self.settings.get('NOCOLOR', 'false').lower() in ('no', 'false')
+ for line in lines:
+ elog_func(line, phase=phase, key=self.settings.mycpv, out=out)
+ finally:
+ portage.output.havecolor = global_havecolor
+ msg = out.getvalue()
+ if msg:
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ self.scheduler.output(msg, log_path=log_path)
+
+ def _log_poll_exception(self, event):
+ self._elog("eerror",
+ ["%s received strange poll event: %s\n" % \
+ (self.__class__.__name__, event,)])
+
+ def _set_returncode(self, wait_retval):
+ SpawnProcess._set_returncode(self, wait_retval)
+
+ if self._exit_timeout_id is not None:
+ self.scheduler.source_remove(self._exit_timeout_id)
+ self._exit_timeout_id = None
+
+ if self._ipc_daemon is not None:
+ self._ipc_daemon.cancel()
+ if self._exit_command.exitcode is not None:
+ self.returncode = self._exit_command.exitcode
+ else:
+ if self.returncode < 0:
+ if not self.cancelled:
+ self._killed_by_signal(-self.returncode)
+ else:
+ self.returncode = 1
+ if not self.cancelled:
+ self._unexpected_exit()
+ if self._build_dir is not None:
+ self._build_dir.unlock()
+ self._build_dir = None
+ elif not self.cancelled:
+ exit_file = self.settings.get('PORTAGE_EBUILD_EXIT_FILE')
+ if exit_file and not os.path.exists(exit_file):
+ if self.returncode < 0:
+ if not self.cancelled:
+ self._killed_by_signal(-self.returncode)
+ else:
+ self.returncode = 1
+ if not self.cancelled:
+ self._unexpected_exit()
diff --git a/usr/lib/portage/pym/_emerge/AbstractPollTask.py b/usr/lib/portage/pym/_emerge/AbstractPollTask.py
new file mode 100644
index 0000000..3f6dd6c
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/AbstractPollTask.py
@@ -0,0 +1,154 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import array
+import errno
+import logging
+import os
+
+from portage.util import writemsg_level
+from _emerge.AsynchronousTask import AsynchronousTask
+
+class AbstractPollTask(AsynchronousTask):
+
+ __slots__ = ("scheduler",) + \
+ ("_registered",)
+
+ _bufsize = 4096
+
+ @property
+ def _exceptional_events(self):
+ return self.scheduler.IO_ERR | self.scheduler.IO_NVAL
+
+ @property
+ def _registered_events(self):
+ return self.scheduler.IO_IN | self.scheduler.IO_HUP | \
+ self._exceptional_events
+
+ def isAlive(self):
+ return bool(self._registered)
+
+ def _read_array(self, f, event):
+ """
+ NOTE: array.fromfile() is used here only for testing purposes,
+ because it has bugs in all known versions of Python (including
+ Python 2.7 and Python 3.2). See PipeReaderArrayTestCase.
+
+ | POLLIN | RETURN
+ | BIT | VALUE
+ | ---------------------------------------------------
+ | 1 | Read self._bufsize into an instance of
+ | | array.array('B') and return it, handling
+ | | EOFError and IOError. An empty array
+ | | indicates EOF.
+ | ---------------------------------------------------
+ | 0 | None
+ """
+ buf = None
+ if event & self.scheduler.IO_IN:
+ buf = array.array('B')
+ try:
+ buf.fromfile(f, self._bufsize)
+ except EOFError:
+ pass
+ except TypeError:
+ # Python 3.2:
+ # TypeError: read() didn't return bytes
+ pass
+ except IOError as e:
+ # EIO happens with pty on Linux after the
+ # slave end of the pty has been closed.
+ if e.errno == errno.EIO:
+ # EOF: return empty string of bytes
+ pass
+ elif e.errno == errno.EAGAIN:
+ # EAGAIN: return None
+ buf = None
+ else:
+ raise
+
+ if buf is not None:
+ try:
+ # Python >=3.2
+ buf = buf.tobytes()
+ except AttributeError:
+ buf = buf.tostring()
+
+ return buf
+
+ def _read_buf(self, fd, event):
+ """
+ | POLLIN | RETURN
+ | BIT | VALUE
+ | ---------------------------------------------------
+ | 1 | Read self._bufsize into a string of bytes,
+ | | handling EAGAIN and EIO. An empty string
+ | | of bytes indicates EOF.
+ | ---------------------------------------------------
+ | 0 | None
+ """
+ # NOTE: array.fromfile() is no longer used here because it has
+ # bugs in all known versions of Python (including Python 2.7
+ # and Python 3.2).
+ buf = None
+ if event & self.scheduler.IO_IN:
+ try:
+ buf = os.read(fd, self._bufsize)
+ except OSError as e:
+ # EIO happens with pty on Linux after the
+ # slave end of the pty has been closed.
+ if e.errno == errno.EIO:
+ # EOF: return empty string of bytes
+ buf = b''
+ elif e.errno == errno.EAGAIN:
+ # EAGAIN: return None
+ buf = None
+ else:
+ raise
+
+ return buf
+
+ def _unregister(self):
+ raise NotImplementedError(self)
+
+ def _log_poll_exception(self, event):
+ writemsg_level(
+ "!!! %s received strange poll event: %s\n" % \
+ (self.__class__.__name__, event,),
+ level=logging.ERROR, noiselevel=-1)
+
+ def _unregister_if_appropriate(self, event):
+ if self._registered:
+ if event & self._exceptional_events:
+ self._log_poll_exception(event)
+ self._unregister()
+ self.cancel()
+ self.wait()
+ elif event & self.scheduler.IO_HUP:
+ self._unregister()
+ self.wait()
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ self._wait_loop()
+ return self.returncode
+
+ def _wait_loop(self, timeout=None):
+
+ if timeout is None:
+ while self._registered:
+ self.scheduler.iteration()
+ return
+
+ def timeout_cb():
+ timeout_cb.timed_out = True
+ return False
+ timeout_cb.timed_out = False
+ timeout_cb.timeout_id = self.scheduler.timeout_add(timeout, timeout_cb)
+
+ try:
+ while self._registered and not timeout_cb.timed_out:
+ self.scheduler.iteration()
+ finally:
+ self.scheduler.source_remove(timeout_cb.timeout_id)
diff --git a/usr/lib/portage/pym/_emerge/AsynchronousLock.py b/usr/lib/portage/pym/_emerge/AsynchronousLock.py
new file mode 100644
index 0000000..c0b9b26
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/AsynchronousLock.py
@@ -0,0 +1,286 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import dummy_threading
+import fcntl
+import errno
+import logging
+import sys
+
+try:
+ import threading
+except ImportError:
+ threading = dummy_threading
+
+import portage
+from portage import os
+from portage.exception import TryAgain
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage.util import writemsg_level
+from _emerge.AbstractPollTask import AbstractPollTask
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.SpawnProcess import SpawnProcess
+
+class AsynchronousLock(AsynchronousTask):
+ """
+ This uses the portage.locks module to acquire a lock asynchronously,
+ using either a thread (if available) or a subprocess.
+
+ The default behavior is to use a process instead of a thread, since
+ there is currently no way to interrupt a thread that is waiting for
+ a lock (notably, SIGINT doesn't work because python delivers all
+ signals to the main thread).
+ """
+
+ __slots__ = ('path', 'scheduler',) + \
+ ('_imp', '_force_async', '_force_dummy', '_force_process', \
+ '_force_thread')
+
+ _use_process_by_default = True
+
+ def _start(self):
+
+ if not self._force_async:
+ try:
+ self._imp = lockfile(self.path,
+ wantnewlockfile=True, flags=os.O_NONBLOCK)
+ except TryAgain:
+ pass
+ else:
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ if self._force_process or \
+ (not self._force_thread and \
+ (self._use_process_by_default or threading is dummy_threading)):
+ self._imp = _LockProcess(path=self.path, scheduler=self.scheduler)
+ else:
+ self._imp = _LockThread(path=self.path,
+ scheduler=self.scheduler,
+ _force_dummy=self._force_dummy)
+
+ self._imp.addExitListener(self._imp_exit)
+ self._imp.start()
+
+ def _imp_exit(self, imp):
+ # call exit listeners
+ self.wait()
+
+ def _cancel(self):
+ if isinstance(self._imp, AsynchronousTask):
+ self._imp.cancel()
+
+ def _poll(self):
+ if isinstance(self._imp, AsynchronousTask):
+ self._imp.poll()
+ return self.returncode
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ self.returncode = self._imp.wait()
+ return self.returncode
+
+ def unlock(self):
+ if self._imp is None:
+ raise AssertionError('not locked')
+ if isinstance(self._imp, (_LockProcess, _LockThread)):
+ self._imp.unlock()
+ else:
+ unlockfile(self._imp)
+ self._imp = None
+
+class _LockThread(AbstractPollTask):
+ """
+ This uses the portage.locks module to acquire a lock asynchronously,
+ using a background thread. After the lock is acquired, the thread
+ writes to a pipe in order to notify a poll loop running in the main
+ thread.
+
+ If the threading module is unavailable then the dummy_threading
+ module will be used, and the lock will be acquired synchronously
+ (before the start() method returns).
+ """
+
+ __slots__ = ('path',) + \
+ ('_force_dummy', '_lock_obj', '_thread',)
+
+ def _start(self):
+ self._registered = True
+ threading_mod = threading
+ if self._force_dummy:
+ threading_mod = dummy_threading
+ self._thread = threading_mod.Thread(target=self._run_lock)
+ self._thread.daemon = True
+ self._thread.start()
+
+ def _run_lock(self):
+ self._lock_obj = lockfile(self.path, wantnewlockfile=True)
+ # Thread-safe callback to EventLoop
+ self.scheduler.idle_add(self._run_lock_cb)
+
+ def _run_lock_cb(self):
+ self._unregister()
+ self.returncode = os.EX_OK
+ self.wait()
+ return False
+
+ def _cancel(self):
+ # There's currently no way to force thread termination.
+ pass
+
+ def unlock(self):
+ if self._lock_obj is None:
+ raise AssertionError('not locked')
+ if self.returncode is None:
+ raise AssertionError('lock not acquired yet')
+ unlockfile(self._lock_obj)
+ self._lock_obj = None
+
+ def _unregister(self):
+ self._registered = False
+
+ if self._thread is not None:
+ self._thread.join()
+ self._thread = None
+
+class _LockProcess(AbstractPollTask):
+ """
+ This uses the portage.locks module to acquire a lock asynchronously,
+ using a subprocess. After the lock is acquired, the process
+ writes to a pipe in order to notify a poll loop running in the main
+ process. The unlock() method notifies the subprocess to release the
+ lock and exit.
+ """
+
+ __slots__ = ('path',) + \
+ ('_acquired', '_kill_test', '_proc', '_files', '_reg_id', '_unlocked')
+
+ def _start(self):
+ in_pr, in_pw = os.pipe()
+ out_pr, out_pw = os.pipe()
+ self._files = {}
+ self._files['pipe_in'] = in_pr
+ self._files['pipe_out'] = out_pw
+
+ fcntl.fcntl(in_pr, fcntl.F_SETFL,
+ fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(in_pr, fcntl.F_SETFD,
+ fcntl.fcntl(in_pr, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_id = self.scheduler.io_add_watch(in_pr,
+ self.scheduler.IO_IN, self._output_handler)
+ self._registered = True
+ self._proc = SpawnProcess(
+ args=[portage._python_interpreter,
+ os.path.join(portage._bin_path, 'lock-helper.py'), self.path],
+ env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path),
+ fd_pipes={0:out_pr, 1:in_pw, 2:sys.__stderr__.fileno()},
+ scheduler=self.scheduler)
+ self._proc.addExitListener(self._proc_exit)
+ self._proc.start()
+ os.close(out_pr)
+ os.close(in_pw)
+
+ def _proc_exit(self, proc):
+
+ if self._files is not None:
+ # Close pipe_out if it's still open, since it's useless
+ # after the process has exited. This helps to avoid
+ # "ResourceWarning: unclosed file" since Python 3.2.
+ try:
+ pipe_out = self._files.pop('pipe_out')
+ except KeyError:
+ pass
+ else:
+ os.close(pipe_out)
+
+ if proc.returncode != os.EX_OK:
+ # Typically, this will happen due to the
+ # process being killed by a signal.
+
+ if not self._acquired:
+ # If the lock hasn't been aquired yet, the
+ # caller can check the returncode and handle
+ # this failure appropriately.
+ if not (self.cancelled or self._kill_test):
+ writemsg_level("_LockProcess: %s\n" % \
+ _("failed to acquire lock on '%s'") % (self.path,),
+ level=logging.ERROR, noiselevel=-1)
+ self._unregister()
+ self.returncode = proc.returncode
+ self.wait()
+ return
+
+ if not self.cancelled and \
+ not self._unlocked:
+ # We don't want lost locks going unnoticed, so it's
+ # only safe to ignore if either the cancel() or
+ # unlock() methods have been previously called.
+ raise AssertionError("lock process failed with returncode %s" \
+ % (proc.returncode,))
+
+ def _cancel(self):
+ if self._proc is not None:
+ self._proc.cancel()
+
+ def _poll(self):
+ if self._proc is not None:
+ self._proc.poll()
+ return self.returncode
+
+ def _output_handler(self, f, event):
+ buf = None
+ if event & self.scheduler.IO_IN:
+ try:
+ buf = os.read(self._files['pipe_in'], self._bufsize)
+ except OSError as e:
+ if e.errno not in (errno.EAGAIN,):
+ raise
+ if buf:
+ self._acquired = True
+ self._unregister()
+ self.returncode = os.EX_OK
+ self.wait()
+
+ return True
+
+ def _unregister(self):
+ self._registered = False
+
+ if self._reg_id is not None:
+ self.scheduler.source_remove(self._reg_id)
+ self._reg_id = None
+
+ if self._files is not None:
+ try:
+ pipe_in = self._files.pop('pipe_in')
+ except KeyError:
+ pass
+ else:
+ os.close(pipe_in)
+
+ def unlock(self):
+ if self._proc is None:
+ raise AssertionError('not locked')
+ if self.returncode is None:
+ raise AssertionError('lock not acquired yet')
+ if self.returncode != os.EX_OK:
+ raise AssertionError("lock process failed with returncode %s" \
+ % (self.returncode,))
+ self._unlocked = True
+ os.write(self._files['pipe_out'], b'\0')
+ os.close(self._files['pipe_out'])
+ self._files = None
+ self._proc.wait()
+ self._proc = None
diff --git a/usr/lib/portage/pym/_emerge/AsynchronousTask.py b/usr/lib/portage/pym/_emerge/AsynchronousTask.py
new file mode 100644
index 0000000..da58261
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/AsynchronousTask.py
@@ -0,0 +1,176 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import signal
+
+from portage import os
+from portage.util.SlotObject import SlotObject
+
+class AsynchronousTask(SlotObject):
+ """
+ Subclasses override _wait() and _poll() so that calls
+ to public methods can be wrapped for implementing
+ hooks such as exit listener notification.
+
+ Sublasses should call self.wait() to notify exit listeners after
+ the task is complete and self.returncode has been set.
+ """
+
+ __slots__ = ("background", "cancelled", "returncode") + \
+ ("_exit_listeners", "_exit_listener_stack", "_start_listeners",
+ "_waiting")
+
+ _cancelled_returncode = - signal.SIGINT
+
+ def start(self):
+ """
+ Start an asynchronous task and then return as soon as possible.
+ """
+ self._start_hook()
+ self._start()
+
+ def _start(self):
+ self.returncode = os.EX_OK
+ self.wait()
+
+ def isAlive(self):
+ return self.returncode is None
+
+ def poll(self):
+ if self.returncode is not None:
+ return self.returncode
+ self._poll()
+ self._wait_hook()
+ return self.returncode
+
+ def _poll(self):
+ return self.returncode
+
+ def wait(self):
+ if self.returncode is None:
+ if not self._waiting:
+ self._waiting = True
+ try:
+ self._wait()
+ finally:
+ self._waiting = False
+ self._wait_hook()
+ return self.returncode
+
+ def _wait(self):
+ return self.returncode
+
+ def _async_wait(self):
+ """
+ For cases where _start exits synchronously, this method is a
+ convenient way to trigger an asynchronous call to self.wait()
+ (in order to notify exit listeners), avoiding excessive event
+ loop recursion (or stack overflow) that synchronous calling of
+ exit listeners can cause. This method is thread-safe.
+ """
+ self.scheduler.idle_add(self._async_wait_cb)
+
+ def _async_wait_cb(self):
+ self.wait()
+ return False
+
+ def cancel(self):
+ """
+ Cancel the task, but do not wait for exit status. If asynchronous exit
+ notification is desired, then use addExitListener to add a listener
+ before calling this method.
+ NOTE: Synchronous waiting for status is not supported, since it would
+ be vulnerable to hitting the recursion limit when a large number of
+ tasks need to be terminated simultaneously, like in bug #402335.
+ """
+ if not self.cancelled:
+ self.cancelled = True
+ self._cancel()
+
+ def _cancel(self):
+ """
+ Subclasses should implement this, as a template method
+ to be called by AsynchronousTask.cancel().
+ """
+ pass
+
+ def _was_cancelled(self):
+ """
+ If cancelled, set returncode if necessary and return True.
+ Otherwise, return False.
+ """
+ if self.cancelled:
+ if self.returncode is None:
+ self.returncode = self._cancelled_returncode
+ return True
+ return False
+
+ def addStartListener(self, f):
+ """
+ The function will be called with one argument, a reference to self.
+ """
+ if self._start_listeners is None:
+ self._start_listeners = []
+ self._start_listeners.append(f)
+
+ def removeStartListener(self, f):
+ if self._start_listeners is None:
+ return
+ self._start_listeners.remove(f)
+
+ def _start_hook(self):
+ if self._start_listeners is not None:
+ start_listeners = self._start_listeners
+ self._start_listeners = None
+
+ for f in start_listeners:
+ f(self)
+
+ def addExitListener(self, f):
+ """
+ The function will be called with one argument, a reference to self.
+ """
+ if self._exit_listeners is None:
+ self._exit_listeners = []
+ self._exit_listeners.append(f)
+
+ def removeExitListener(self, f):
+ if self._exit_listeners is None:
+ if self._exit_listener_stack is not None:
+ self._exit_listener_stack.remove(f)
+ return
+ self._exit_listeners.remove(f)
+
+ def _wait_hook(self):
+ """
+ Call this method after the task completes, just before returning
+ the returncode from wait() or poll(). This hook is
+ used to trigger exit listeners when the returncode first
+ becomes available.
+ """
+ if self.returncode is not None and \
+ self._exit_listeners is not None:
+
+ # This prevents recursion, in case one of the
+ # exit handlers triggers this method again by
+ # calling wait(). Use a stack that gives
+ # removeExitListener() an opportunity to consume
+ # listeners from the stack, before they can get
+ # called below. This is necessary because a call
+ # to one exit listener may result in a call to
+ # removeExitListener() for another listener on
+ # the stack. That listener needs to be removed
+ # from the stack since it would be inconsistent
+ # to call it after it has been been passed into
+ # removeExitListener().
+ self._exit_listener_stack = self._exit_listeners
+ self._exit_listeners = None
+
+ # Execute exit listeners in reverse order, so that
+ # the last added listener is executed first. This
+ # allows SequentialTaskQueue to decrement its running
+ # task count as soon as one of its tasks exits, so that
+ # the value is accurate when other listeners execute.
+ while self._exit_listener_stack:
+ self._exit_listener_stack.pop()(self)
+
diff --git a/usr/lib/portage/pym/_emerge/AtomArg.py b/usr/lib/portage/pym/_emerge/AtomArg.py
new file mode 100644
index 0000000..343d7aa
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/AtomArg.py
@@ -0,0 +1,14 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage._sets.base import InternalPackageSet
+from _emerge.DependencyArg import DependencyArg
+
+class AtomArg(DependencyArg):
+
+ __slots__ = ('atom', 'pset')
+
+ def __init__(self, atom=None, **kwargs):
+ DependencyArg.__init__(self, **kwargs)
+ self.atom = atom
+ self.pset = InternalPackageSet(initial_atoms=(self.atom,), allow_repo=True)
diff --git a/usr/lib/portage/pym/_emerge/Binpkg.py b/usr/lib/portage/pym/_emerge/Binpkg.py
new file mode 100644
index 0000000..ded6dfd
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/Binpkg.py
@@ -0,0 +1,402 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import _emerge.emergelog
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.BinpkgFetcher import BinpkgFetcher
+from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
+from _emerge.BinpkgExtractorAsync import BinpkgExtractorAsync
+from _emerge.CompositeTask import CompositeTask
+from _emerge.BinpkgVerifier import BinpkgVerifier
+from _emerge.EbuildMerge import EbuildMerge
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.SpawnProcess import SpawnProcess
+from portage.eapi import eapi_exports_replace_vars
+from portage.util import ensure_dirs, writemsg
+import portage
+from portage import os
+from portage import shutil
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+import io
+import logging
+import textwrap
+from portage.output import colorize
+
+class Binpkg(CompositeTask):
+
+ __slots__ = ("find_blockers",
+ "ldpath_mtimes", "logger", "opts",
+ "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
+ ("_bintree", "_build_dir", "_build_prefix",
+ "_ebuild_path", "_fetched_pkg",
+ "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
+
+ def _writemsg_level(self, msg, level=0, noiselevel=0):
+ self.scheduler.output(msg, level=level, noiselevel=noiselevel,
+ log_path=self.settings.get("PORTAGE_LOG_FILE"))
+
+ def _start(self):
+
+ pkg = self.pkg
+ settings = self.settings
+ settings.setcpv(pkg)
+ self._tree = "bintree"
+ self._bintree = self.pkg.root_config.trees[self._tree]
+ self._verify = not self.opts.pretend
+
+ # Use realpath like doebuild_environment() does, since we assert
+ # that this path is literally identical to PORTAGE_BUILDDIR.
+ dir_path = os.path.join(os.path.realpath(settings["PORTAGE_TMPDIR"]),
+ "portage", pkg.category, pkg.pf)
+ self._image_dir = os.path.join(dir_path, "image")
+ self._infloc = os.path.join(dir_path, "build-info")
+ self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
+ settings["EBUILD"] = self._ebuild_path
+ portage.doebuild_environment(self._ebuild_path, 'setup',
+ settings=self.settings, db=self._bintree.dbapi)
+ if dir_path != self.settings['PORTAGE_BUILDDIR']:
+ raise AssertionError("'%s' != '%s'" % \
+ (dir_path, self.settings['PORTAGE_BUILDDIR']))
+ self._build_dir = EbuildBuildDir(
+ scheduler=self.scheduler, settings=settings)
+ settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+ settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
+
+ if eapi_exports_replace_vars(settings["EAPI"]):
+ vardb = self.pkg.root_config.trees["vartree"].dbapi
+ settings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(x) \
+ for x in vardb.match(self.pkg.slot_atom) + \
+ vardb.match('='+self.pkg.cpv)))
+
+ # The prefetcher has already completed or it
+ # could be running now. If it's running now,
+ # wait for it to complete since it holds
+ # a lock on the file being fetched. The
+ # portage.locks functions are only designed
+ # to work between separate processes. Since
+ # the lock is held by the current process,
+ # use the scheduler and fetcher methods to
+ # synchronize with the fetcher.
+ prefetcher = self.prefetcher
+ if prefetcher is None:
+ pass
+ elif prefetcher.isAlive() and \
+ prefetcher.poll() is None:
+
+ waiting_msg = ("Fetching '%s' " + \
+ "in the background. " + \
+ "To view fetch progress, run `tail -f %s` in another terminal.") \
+ % (prefetcher.pkg_path, os.path.join(
+ _emerge.emergelog._emerge_log_dir, "emerge-fetch.log"))
+ msg_prefix = colorize("GOOD", " * ")
+ waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
+ for line in textwrap.wrap(waiting_msg, 65))
+ if not self.background:
+ writemsg(waiting_msg, noiselevel=-1)
+
+ self._current_task = prefetcher
+ prefetcher.addExitListener(self._prefetch_exit)
+ return
+
+ self._prefetch_exit(prefetcher)
+
+ def _prefetch_exit(self, prefetcher):
+
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ pkg = self.pkg
+ pkg_count = self.pkg_count
+ if not (self.opts.pretend or self.opts.fetchonly):
+ self._build_dir.lock()
+ # Initialize PORTAGE_LOG_FILE (clean_log won't work without it).
+ portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
+ # If necessary, discard old log so that we don't
+ # append to it.
+ self._build_dir.clean_log()
+ fetcher = BinpkgFetcher(background=self.background,
+ logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
+ pretend=self.opts.pretend, scheduler=self.scheduler)
+ pkg_path = fetcher.pkg_path
+ self._pkg_path = pkg_path
+ # This gives bashrc users an opportunity to do various things
+ # such as remove binary packages after they're installed.
+ self.settings["PORTAGE_BINPKG_FILE"] = pkg_path
+
+ if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
+
+ msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
+ short_msg = "emerge: (%s of %s) %s Fetch" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ self.logger.log(msg, short_msg=short_msg)
+
+ # Allow the Scheduler's fetch queue to control the
+ # number of concurrent fetchers.
+ fetcher.addExitListener(self._fetcher_exit)
+ self._task_queued(fetcher)
+ self.scheduler.fetch.schedule(fetcher)
+ return
+
+ self._fetcher_exit(fetcher)
+
+ def _fetcher_exit(self, fetcher):
+
+ # The fetcher only has a returncode when
+ # --getbinpkg is enabled.
+ if fetcher.returncode is not None:
+ self._fetched_pkg = True
+ if self._default_exit(fetcher) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ if self.opts.pretend:
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ verifier = None
+ if self._verify:
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+ verifier = BinpkgVerifier(background=self.background,
+ logfile=logfile, pkg=self.pkg, scheduler=self.scheduler)
+ self._start_task(verifier, self._verifier_exit)
+ return
+
+ self._verifier_exit(verifier)
+
+ def _verifier_exit(self, verifier):
+ if verifier is not None and \
+ self._default_exit(verifier) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ logger = self.logger
+ pkg = self.pkg
+ pkg_count = self.pkg_count
+ pkg_path = self._pkg_path
+
+ if self._fetched_pkg:
+ self._bintree.inject(pkg.cpv, filename=pkg_path)
+
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+ if logfile is not None and os.path.isfile(logfile):
+ # Remove fetch log after successful fetch.
+ try:
+ os.unlink(logfile)
+ except OSError:
+ pass
+
+ if self.opts.fetchonly:
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ msg = " === (%s of %s) Merging Binary (%s::%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
+ short_msg = "emerge: (%s of %s) %s Merge Binary" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log(msg, short_msg=short_msg)
+
+ phase = "clean"
+ settings = self.settings
+ ebuild_phase = EbuildPhase(background=self.background,
+ phase=phase, scheduler=self.scheduler,
+ settings=settings)
+
+ self._start_task(ebuild_phase, self._clean_exit)
+
+ def _clean_exit(self, clean_phase):
+ if self._default_exit(clean_phase) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ dir_path = self.settings['PORTAGE_BUILDDIR']
+
+ infloc = self._infloc
+ pkg = self.pkg
+ pkg_path = self._pkg_path
+
+ dir_mode = 0o755
+ for mydir in (dir_path, self._image_dir, infloc):
+ portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
+ gid=portage.data.portage_gid, mode=dir_mode)
+
+ # This initializes PORTAGE_LOG_FILE.
+ portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
+ self._writemsg_level(">>> Extracting info\n")
+
+ pkg_xpak = portage.xpak.tbz2(self._pkg_path)
+ check_missing_metadata = ("CATEGORY", "PF")
+ missing_metadata = set()
+ for k in check_missing_metadata:
+ v = pkg_xpak.getfile(_unicode_encode(k,
+ encoding=_encodings['repo.content']))
+ if not v:
+ missing_metadata.add(k)
+
+ pkg_xpak.unpackinfo(infloc)
+ for k in missing_metadata:
+ if k == "CATEGORY":
+ v = pkg.category
+ elif k == "PF":
+ v = pkg.pf
+ else:
+ continue
+
+ f = io.open(_unicode_encode(os.path.join(infloc, k),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['content'],
+ errors='backslashreplace')
+ try:
+ f.write(_unicode_decode(v + "\n"))
+ finally:
+ f.close()
+
+ # Store the md5sum in the vdb.
+ f = io.open(_unicode_encode(os.path.join(infloc, 'BINPKGMD5'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['content'], errors='strict')
+ try:
+ f.write(_unicode_decode(
+ str(portage.checksum.perform_md5(pkg_path)) + "\n"))
+ finally:
+ f.close()
+
+ env_extractor = BinpkgEnvExtractor(background=self.background,
+ scheduler=self.scheduler, settings=self.settings)
+
+ self._start_task(env_extractor, self._env_extractor_exit)
+
+ def _env_extractor_exit(self, env_extractor):
+ if self._default_exit(env_extractor) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ setup_phase = EbuildPhase(background=self.background,
+ phase="setup", scheduler=self.scheduler,
+ settings=self.settings)
+
+ setup_phase.addExitListener(self._setup_exit)
+ self._task_queued(setup_phase)
+ self.scheduler.scheduleSetup(setup_phase)
+
+ def _setup_exit(self, setup_phase):
+ if self._default_exit(setup_phase) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ extractor = BinpkgExtractorAsync(background=self.background,
+ env=self.settings.environ(),
+ features=self.settings.features,
+ image_dir=self._image_dir,
+ pkg=self.pkg, pkg_path=self._pkg_path,
+ logfile=self.settings.get("PORTAGE_LOG_FILE"),
+ scheduler=self.scheduler)
+ self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
+ self._start_task(extractor, self._extractor_exit)
+
+ def _extractor_exit(self, extractor):
+ if self._default_exit(extractor) != os.EX_OK:
+ self._unlock_builddir()
+ self._writemsg_level("!!! Error Extracting '%s'\n" % \
+ self._pkg_path, noiselevel=-1, level=logging.ERROR)
+ self.wait()
+ return
+
+ try:
+ with io.open(_unicode_encode(os.path.join(self._infloc, "EPREFIX"),
+ encoding=_encodings['fs'], errors='strict'), mode='r',
+ encoding=_encodings['repo.content'], errors='replace') as f:
+ self._build_prefix = f.read().rstrip('\n')
+ except IOError:
+ self._build_prefix = ""
+
+ if self._build_prefix == self.settings["EPREFIX"]:
+ ensure_dirs(self.settings["ED"])
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ env = self.settings.environ()
+ env["PYTHONPATH"] = self.settings["PORTAGE_PYTHONPATH"]
+ chpathtool = SpawnProcess(
+ args=[portage._python_interpreter,
+ os.path.join(self.settings["PORTAGE_BIN_PATH"], "chpathtool.py"),
+ self.settings["D"], self._build_prefix, self.settings["EPREFIX"]],
+ background=self.background, env=env,
+ scheduler=self.scheduler,
+ logfile=self.settings.get('PORTAGE_LOG_FILE'))
+ self._writemsg_level(">>> Adjusting Prefix to %s\n" % self.settings["EPREFIX"])
+ self._start_task(chpathtool, self._chpathtool_exit)
+
+ def _chpathtool_exit(self, chpathtool):
+ if self._final_exit(chpathtool) != os.EX_OK:
+ self._unlock_builddir()
+ self._writemsg_level("!!! Error Adjusting Prefix to %s\n" %
+ (self.settings["EPREFIX"],),
+ noiselevel=-1, level=logging.ERROR)
+ self.wait()
+ return
+
+ # We want to install in "our" prefix, not the binary one
+ with io.open(_unicode_encode(os.path.join(self._infloc, "EPREFIX"),
+ encoding=_encodings['fs'], errors='strict'), mode='w',
+ encoding=_encodings['repo.content'], errors='strict') as f:
+ f.write(self.settings["EPREFIX"] + "\n")
+
+ # Move the files to the correct location for merge.
+ image_tmp_dir = os.path.join(
+ self.settings["PORTAGE_BUILDDIR"], "image_tmp")
+ build_d = os.path.join(self.settings["D"],
+ self._build_prefix.lstrip(os.sep))
+ if not os.path.isdir(build_d):
+ # Assume this is a virtual package or something.
+ shutil.rmtree(self._image_dir)
+ ensure_dirs(self.settings["ED"])
+ else:
+ os.rename(build_d, image_tmp_dir)
+ shutil.rmtree(self._image_dir)
+ ensure_dirs(os.path.dirname(self.settings["ED"].rstrip(os.sep)))
+ os.rename(image_tmp_dir, self.settings["ED"])
+
+ self.wait()
+
+ def _unlock_builddir(self):
+ if self.opts.pretend or self.opts.fetchonly:
+ return
+ portage.elog.elog_process(self.pkg.cpv, self.settings)
+ self._build_dir.unlock()
+
+ def create_install_task(self):
+ task = EbuildMerge(exit_hook=self._install_exit,
+ find_blockers=self.find_blockers,
+ ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
+ pkg=self.pkg, pkg_count=self.pkg_count,
+ pkg_path=self._pkg_path, scheduler=self.scheduler,
+ settings=self.settings, tree=self._tree,
+ world_atom=self.world_atom)
+ return task
+
+ def _install_exit(self, task):
+ self.settings.pop("PORTAGE_BINPKG_FILE", None)
+ self._unlock_builddir()
+ if task.returncode == os.EX_OK and \
+ 'binpkg-logs' not in self.settings.features and \
+ self.settings.get("PORTAGE_LOG_FILE"):
+ try:
+ os.unlink(self.settings["PORTAGE_LOG_FILE"])
+ except OSError:
+ pass
diff --git a/usr/lib/portage/pym/_emerge/BinpkgEnvExtractor.py b/usr/lib/portage/pym/_emerge/BinpkgEnvExtractor.py
new file mode 100644
index 0000000..5ba1495
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/BinpkgEnvExtractor.py
@@ -0,0 +1,66 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+
+from _emerge.CompositeTask import CompositeTask
+from _emerge.SpawnProcess import SpawnProcess
+from portage import os, _shell_quote, _unicode_encode
+from portage.const import BASH_BINARY
+
+class BinpkgEnvExtractor(CompositeTask):
+ """
+ Extract environment.bz2 for a binary or installed package.
+ """
+ __slots__ = ('settings',)
+
+ def saved_env_exists(self):
+ return os.path.exists(self._get_saved_env_path())
+
+ def dest_env_exists(self):
+ return os.path.exists(self._get_dest_env_path())
+
+ def _get_saved_env_path(self):
+ return os.path.join(os.path.dirname(self.settings['EBUILD']),
+ "environment.bz2")
+
+ def _get_dest_env_path(self):
+ return os.path.join(self.settings["T"], "environment")
+
+ def _start(self):
+ saved_env_path = self._get_saved_env_path()
+ dest_env_path = self._get_dest_env_path()
+ shell_cmd = "${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -c -- %s > %s" % \
+ (_shell_quote(saved_env_path),
+ _shell_quote(dest_env_path))
+ extractor_proc = SpawnProcess(
+ args=[BASH_BINARY, "-c", shell_cmd],
+ background=self.background,
+ env=self.settings.environ(),
+ scheduler=self.scheduler,
+ logfile=self.settings.get('PORTAGE_LOG_FILE'))
+
+ self._start_task(extractor_proc, self._extractor_exit)
+
+ def _remove_dest_env(self):
+ try:
+ os.unlink(self._get_dest_env_path())
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ def _extractor_exit(self, extractor_proc):
+
+ if self._default_exit(extractor_proc) != os.EX_OK:
+ self._remove_dest_env()
+ self.wait()
+ return
+
+ # This is a signal to ebuild.sh, so that it knows to filter
+ # out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that
+ # would be preserved between normal phases.
+ open(_unicode_encode(self._get_dest_env_path() + '.raw'), 'wb').close()
+
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
diff --git a/usr/lib/portage/pym/_emerge/BinpkgExtractorAsync.py b/usr/lib/portage/pym/_emerge/BinpkgExtractorAsync.py
new file mode 100644
index 0000000..be74c2f
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/BinpkgExtractorAsync.py
@@ -0,0 +1,39 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SpawnProcess import SpawnProcess
+import portage
+import signal
+import subprocess
+
+class BinpkgExtractorAsync(SpawnProcess):
+
+ __slots__ = ("features", "image_dir", "pkg", "pkg_path")
+
+ _shell_binary = portage.const.BASH_BINARY
+
+ def _start(self):
+ tar_options = ""
+ if "xattr" in self.features:
+ process = subprocess.Popen(["tar", "--help"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ output = process.communicate()[0]
+ if b"--xattrs" in output:
+ tar_options = "--xattrs"
+
+ # Add -q to bzip2 opts, in order to avoid "trailing garbage after
+ # EOF ignored" warning messages due to xpak trailer.
+ # SIGPIPE handling (128 + SIGPIPE) should be compatible with
+ # assert_sigpipe_ok() that's used by the ebuild unpack() helper.
+ self.args = [self._shell_binary, "-c",
+ ("${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -cq -- %s | tar -xp %s -C %s -f - ; " + \
+ "p=(${PIPESTATUS[@]}) ; " + \
+ "if [[ ${p[0]} != 0 && ${p[0]} != %d ]] ; then " % (128 + signal.SIGPIPE) + \
+ "echo bzip2 failed with status ${p[0]} ; exit ${p[0]} ; fi ; " + \
+ "if [ ${p[1]} != 0 ] ; then " + \
+ "echo tar failed with status ${p[1]} ; exit ${p[1]} ; fi ; " + \
+ "exit 0 ;") % \
+ (portage._shell_quote(self.pkg_path),
+ tar_options,
+ portage._shell_quote(self.image_dir))]
+
+ SpawnProcess._start(self)
diff --git a/usr/lib/portage/pym/_emerge/BinpkgFetcher.py b/usr/lib/portage/pym/_emerge/BinpkgFetcher.py
new file mode 100644
index 0000000..543881e
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/BinpkgFetcher.py
@@ -0,0 +1,184 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AsynchronousLock import AsynchronousLock
+from _emerge.SpawnProcess import SpawnProcess
+try:
+ from urllib.parse import urlparse as urllib_parse_urlparse
+except ImportError:
+ from urlparse import urlparse as urllib_parse_urlparse
+import stat
+import sys
+import portage
+from portage import os
+from portage.util._pty import _create_pty_or_pipe
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+class BinpkgFetcher(SpawnProcess):
+
+ __slots__ = ("pkg", "pretend",
+ "locked", "pkg_path", "_lock_obj")
+
+ def __init__(self, **kwargs):
+ SpawnProcess.__init__(self, **kwargs)
+ pkg = self.pkg
+ self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
+
+ def _start(self):
+
+ pkg = self.pkg
+ pretend = self.pretend
+ bintree = pkg.root_config.trees["bintree"]
+ settings = bintree.settings
+ use_locks = "distlocks" in settings.features
+ pkg_path = self.pkg_path
+
+ if not pretend:
+ portage.util.ensure_dirs(os.path.dirname(pkg_path))
+ if use_locks:
+ self.lock()
+ exists = os.path.exists(pkg_path)
+ resume = exists and os.path.basename(pkg_path) in bintree.invalids
+ if not (pretend or resume):
+ # Remove existing file or broken symlink.
+ try:
+ os.unlink(pkg_path)
+ except OSError:
+ pass
+
+ # urljoin doesn't work correctly with
+ # unrecognized protocols like sftp
+ if bintree._remote_has_index:
+ rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
+ if not rel_uri:
+ rel_uri = pkg.cpv + ".tbz2"
+ remote_base_uri = bintree._remotepkgs[pkg.cpv]["BASE_URI"]
+ uri = remote_base_uri.rstrip("/") + "/" + rel_uri.lstrip("/")
+ else:
+ uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
+ "/" + pkg.pf + ".tbz2"
+
+ if pretend:
+ portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
+ self._set_returncode((self.pid, os.EX_OK << 8))
+ self._async_wait()
+ return
+
+ protocol = urllib_parse_urlparse(uri)[0]
+ fcmd_prefix = "FETCHCOMMAND"
+ if resume:
+ fcmd_prefix = "RESUMECOMMAND"
+ fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
+ if not fcmd:
+ fcmd = settings.get(fcmd_prefix)
+
+ fcmd_vars = {
+ "DISTDIR" : os.path.dirname(pkg_path),
+ "URI" : uri,
+ "FILE" : os.path.basename(pkg_path)
+ }
+
+ for k in ("PORTAGE_SSH_OPTS",):
+ try:
+ fcmd_vars[k] = settings[k]
+ except KeyError:
+ pass
+
+ fetch_env = dict(settings.items())
+ fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
+ for x in portage.util.shlex_split(fcmd)]
+
+ if self.fd_pipes is None:
+ self.fd_pipes = {}
+ fd_pipes = self.fd_pipes
+
+ # Redirect all output to stdout since some fetchers like
+ # wget pollute stderr (if portage detects a problem then it
+ # can send it's own message to stderr).
+ fd_pipes.setdefault(0, portage._get_stdin().fileno())
+ fd_pipes.setdefault(1, sys.__stdout__.fileno())
+ fd_pipes.setdefault(2, sys.__stdout__.fileno())
+
+ self.args = fetch_args
+ self.env = fetch_env
+ if settings.selinux_enabled():
+ self._selinux_type = settings["PORTAGE_FETCH_T"]
+ SpawnProcess._start(self)
+
+ def _pipe(self, fd_pipes):
+ """When appropriate, use a pty so that fetcher progress bars,
+ like wget has, will work properly."""
+ if self.background or not sys.__stdout__.isatty():
+ # When the output only goes to a log file,
+ # there's no point in creating a pty.
+ return os.pipe()
+ stdout_pipe = None
+ if not self.background:
+ stdout_pipe = fd_pipes.get(1)
+ got_pty, master_fd, slave_fd = \
+ _create_pty_or_pipe(copy_term_size=stdout_pipe)
+ return (master_fd, slave_fd)
+
+ def _set_returncode(self, wait_retval):
+ SpawnProcess._set_returncode(self, wait_retval)
+ if not self.pretend and self.returncode == os.EX_OK:
+ # If possible, update the mtime to match the remote package if
+ # the fetcher didn't already do it automatically.
+ bintree = self.pkg.root_config.trees["bintree"]
+ if bintree._remote_has_index:
+ remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
+ if remote_mtime is not None:
+ try:
+ remote_mtime = long(remote_mtime)
+ except ValueError:
+ pass
+ else:
+ try:
+ local_mtime = os.stat(self.pkg_path)[stat.ST_MTIME]
+ except OSError:
+ pass
+ else:
+ if remote_mtime != local_mtime:
+ try:
+ os.utime(self.pkg_path,
+ (remote_mtime, remote_mtime))
+ except OSError:
+ pass
+
+ if self.locked:
+ self.unlock()
+
+ def lock(self):
+ """
+ This raises an AlreadyLocked exception if lock() is called
+ while a lock is already held. In order to avoid this, call
+ unlock() or check whether the "locked" attribute is True
+ or False before calling lock().
+ """
+ if self._lock_obj is not None:
+ raise self.AlreadyLocked((self._lock_obj,))
+
+ async_lock = AsynchronousLock(path=self.pkg_path,
+ scheduler=self.scheduler)
+ async_lock.start()
+
+ if async_lock.wait() != os.EX_OK:
+ # TODO: Use CompositeTask for better handling, like in EbuildPhase.
+ raise AssertionError("AsynchronousLock failed with returncode %s" \
+ % (async_lock.returncode,))
+
+ self._lock_obj = async_lock
+ self.locked = True
+
+ class AlreadyLocked(portage.exception.PortageException):
+ pass
+
+ def unlock(self):
+ if self._lock_obj is None:
+ return
+ self._lock_obj.unlock()
+ self._lock_obj = None
+ self.locked = False
+
diff --git a/usr/lib/portage/pym/_emerge/BinpkgPrefetcher.py b/usr/lib/portage/pym/_emerge/BinpkgPrefetcher.py
new file mode 100644
index 0000000..ffa4900
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/BinpkgPrefetcher.py
@@ -0,0 +1,43 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.BinpkgFetcher import BinpkgFetcher
+from _emerge.CompositeTask import CompositeTask
+from _emerge.BinpkgVerifier import BinpkgVerifier
+from portage import os
+
+class BinpkgPrefetcher(CompositeTask):
+
+ __slots__ = ("pkg",) + \
+ ("pkg_path", "_bintree",)
+
+ def _start(self):
+ self._bintree = self.pkg.root_config.trees["bintree"]
+ fetcher = BinpkgFetcher(background=self.background,
+ logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
+ scheduler=self.scheduler)
+ self.pkg_path = fetcher.pkg_path
+ self._start_task(fetcher, self._fetcher_exit)
+
+ def _fetcher_exit(self, fetcher):
+
+ if self._default_exit(fetcher) != os.EX_OK:
+ self.wait()
+ return
+
+ verifier = BinpkgVerifier(background=self.background,
+ logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
+ scheduler=self.scheduler)
+ self._start_task(verifier, self._verifier_exit)
+
+ def _verifier_exit(self, verifier):
+ if self._default_exit(verifier) != os.EX_OK:
+ self.wait()
+ return
+
+ self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
+
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+
diff --git a/usr/lib/portage/pym/_emerge/BinpkgVerifier.py b/usr/lib/portage/pym/_emerge/BinpkgVerifier.py
new file mode 100644
index 0000000..2c69792
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/BinpkgVerifier.py
@@ -0,0 +1,120 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+import sys
+
+from _emerge.CompositeTask import CompositeTask
+import portage
+from portage import os
+from portage.checksum import (_apply_hash_filter,
+ _filter_unaccelarated_hashes, _hash_filter)
+from portage.output import EOutput
+from portage.util._async.FileDigester import FileDigester
+from portage.package.ebuild.fetch import _checksum_failure_temp_file
+
+class BinpkgVerifier(CompositeTask):
+ __slots__ = ("logfile", "pkg", "_digests", "_pkg_path")
+
+ def _start(self):
+
+ bintree = self.pkg.root_config.trees["bintree"]
+ digests = bintree._get_digests(self.pkg)
+ if "size" not in digests:
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ digests = _filter_unaccelarated_hashes(digests)
+ hash_filter = _hash_filter(
+ bintree.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if not hash_filter.transparent:
+ digests = _apply_hash_filter(digests, hash_filter)
+
+ self._digests = digests
+ self._pkg_path = bintree.getname(self.pkg.cpv)
+
+ try:
+ size = os.stat(self._pkg_path).st_size
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ self.scheduler.output(("!!! Fetching Binary failed "
+ "for '%s'\n") % self.pkg.cpv, log_path=self.logfile,
+ background=self.background)
+ self.returncode = 1
+ self._async_wait()
+ return
+ else:
+ if size != digests["size"]:
+ self._digest_exception("size", size, digests["size"])
+ self.returncode = 1
+ self._async_wait()
+ return
+
+ self._start_task(FileDigester(file_path=self._pkg_path,
+ hash_names=(k for k in digests if k != "size"),
+ background=self.background, logfile=self.logfile,
+ scheduler=self.scheduler),
+ self._digester_exit)
+
+ def _digester_exit(self, digester):
+
+ if self._default_exit(digester) != os.EX_OK:
+ self.wait()
+ return
+
+ for hash_name in digester.hash_names:
+ if digester.digests[hash_name] != self._digests[hash_name]:
+ self._digest_exception(hash_name,
+ digester.digests[hash_name], self._digests[hash_name])
+ self.returncode = 1
+ self.wait()
+ return
+
+ if self.pkg.root_config.settings.get("PORTAGE_QUIET") != "1":
+ self._display_success()
+
+ self.returncode = os.EX_OK
+ self.wait()
+
+ def _display_success(self):
+ stdout_orig = sys.stdout
+ stderr_orig = sys.stderr
+ global_havecolor = portage.output.havecolor
+ out = io.StringIO()
+ try:
+ sys.stdout = out
+ sys.stderr = out
+ if portage.output.havecolor:
+ portage.output.havecolor = not self.background
+
+ eout = EOutput()
+ eout.ebegin("%s %s ;-)" % (os.path.basename(self._pkg_path),
+ " ".join(sorted(self._digests))))
+ eout.eend(0)
+
+ finally:
+ sys.stdout = stdout_orig
+ sys.stderr = stderr_orig
+ portage.output.havecolor = global_havecolor
+
+ self.scheduler.output(out.getvalue(), log_path=self.logfile,
+ background=self.background)
+
+ def _digest_exception(self, name, value, expected):
+
+ head, tail = os.path.split(self._pkg_path)
+ temp_filename = _checksum_failure_temp_file(head, tail)
+
+ self.scheduler.output((
+ "\n!!! Digest verification failed:\n"
+ "!!! %s\n"
+ "!!! Reason: Failed on %s verification\n"
+ "!!! Got: %s\n"
+ "!!! Expected: %s\n"
+ "File renamed to '%s'\n") %
+ (self._pkg_path, name, value, expected, temp_filename),
+ log_path=self.logfile,
+ background=self.background)
diff --git a/usr/lib/portage/pym/_emerge/Blocker.py b/usr/lib/portage/pym/_emerge/Blocker.py
new file mode 100644
index 0000000..9304606
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/Blocker.py
@@ -0,0 +1,15 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.Task import Task
+
+class Blocker(Task):
+
+ __hash__ = Task.__hash__
+ __slots__ = ("root", "atom", "cp", "eapi", "priority", "satisfied")
+
+ def __init__(self, **kwargs):
+ Task.__init__(self, **kwargs)
+ self.cp = self.atom.cp
+ self._hash_key = ("blocks", self.root, self.atom, self.eapi)
+ self._hash_value = hash(self._hash_key)
diff --git a/usr/lib/portage/pym/_emerge/BlockerCache.py b/usr/lib/portage/pym/_emerge/BlockerCache.py
new file mode 100644
index 0000000..53342d6
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/BlockerCache.py
@@ -0,0 +1,191 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import sys
+from portage.util import writemsg
+from portage.data import secpass
+import portage
+from portage import os
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+ _unicode = str
+else:
+ _unicode = unicode
+
+class BlockerCache(portage.cache.mappings.MutableMapping):
+ """This caches blockers of installed packages so that dep_check does not
+ have to be done for every single installed package on every invocation of
+ emerge. The cache is invalidated whenever it is detected that something
+ has changed that might alter the results of dep_check() calls:
+ 1) the set of installed packages (including COUNTER) has changed
+ """
+
+ # Number of uncached packages to trigger cache update, since
+ # it's wasteful to update it for every vdb change.
+ _cache_threshold = 5
+
+ class BlockerData(object):
+
+ __slots__ = ("__weakref__", "atoms", "counter")
+
+ def __init__(self, counter, atoms):
+ self.counter = counter
+ self.atoms = atoms
+
+ def __init__(self, myroot, vardb):
+ """ myroot is ignored in favour of EROOT """
+ self._vardb = vardb
+ self._cache_filename = os.path.join(vardb.settings['EROOT'],
+ portage.CACHE_PATH, "vdb_blockers.pickle")
+ self._cache_version = "1"
+ self._cache_data = None
+ self._modified = set()
+ self._load()
+
+ def _load(self):
+ try:
+ f = open(self._cache_filename, mode='rb')
+ mypickle = pickle.Unpickler(f)
+ try:
+ mypickle.find_global = None
+ except AttributeError:
+ # TODO: If py3k, override Unpickler.find_class().
+ pass
+ self._cache_data = mypickle.load()
+ f.close()
+ del f
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except Exception as e:
+ if isinstance(e, EnvironmentError) and \
+ getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
+ pass
+ else:
+ writemsg("!!! Error loading '%s': %s\n" % \
+ (self._cache_filename, str(e)), noiselevel=-1)
+ del e
+
+ cache_valid = self._cache_data and \
+ isinstance(self._cache_data, dict) and \
+ self._cache_data.get("version") == self._cache_version and \
+ isinstance(self._cache_data.get("blockers"), dict)
+ if cache_valid:
+ # Validate all the atoms and counters so that
+ # corruption is detected as soon as possible.
+ invalid_items = set()
+ for k, v in self._cache_data["blockers"].items():
+ if not isinstance(k, basestring):
+ invalid_items.add(k)
+ continue
+ try:
+ if portage.catpkgsplit(k) is None:
+ invalid_items.add(k)
+ continue
+ except portage.exception.InvalidData:
+ invalid_items.add(k)
+ continue
+ if not isinstance(v, tuple) or \
+ len(v) != 2:
+ invalid_items.add(k)
+ continue
+ counter, atoms = v
+ if not isinstance(counter, (int, long)):
+ invalid_items.add(k)
+ continue
+ if not isinstance(atoms, (list, tuple)):
+ invalid_items.add(k)
+ continue
+ invalid_atom = False
+ for atom in atoms:
+ if not isinstance(atom, basestring):
+ invalid_atom = True
+ break
+ if atom[:1] != "!" or \
+ not portage.isvalidatom(
+ atom, allow_blockers=True):
+ invalid_atom = True
+ break
+ if invalid_atom:
+ invalid_items.add(k)
+ continue
+
+ for k in invalid_items:
+ del self._cache_data["blockers"][k]
+ if not self._cache_data["blockers"]:
+ cache_valid = False
+
+ if not cache_valid:
+ self._cache_data = {"version":self._cache_version}
+ self._cache_data["blockers"] = {}
+ self._modified.clear()
+
+ def flush(self):
+ """If the current user has permission and the internal blocker cache has
+ been updated, save it to disk and mark it unmodified. This is called
+ by emerge after it has processed blockers for all installed packages.
+ Currently, the cache is only written if the user has superuser
+ privileges (since that's required to obtain a lock), but all users
+ have read access and benefit from faster blocker lookups (as long as
+ the entire cache is still valid). The cache is stored as a pickled
+ dict object with the following format:
+
+ {
+ version : "1",
+ "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
+ }
+ """
+ if len(self._modified) >= self._cache_threshold and \
+ secpass >= 2:
+ try:
+ f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
+ pickle.dump(self._cache_data, f, protocol=2)
+ f.close()
+ portage.util.apply_secpass_permissions(
+ self._cache_filename, gid=portage.portage_gid, mode=0o644)
+ except (IOError, OSError):
+ pass
+ self._modified.clear()
+
+ def __setitem__(self, cpv, blocker_data):
+ """
+ Update the cache and mark it as modified for a future call to
+ self.flush().
+
+ @param cpv: Package for which to cache blockers.
+ @type cpv: String
+ @param blocker_data: An object with counter and atoms attributes.
+ @type blocker_data: BlockerData
+ """
+ self._cache_data["blockers"][_unicode(cpv)] = (blocker_data.counter,
+ tuple(_unicode(x) for x in blocker_data.atoms))
+ self._modified.add(cpv)
+
+ def __iter__(self):
+ if self._cache_data is None:
+ # triggered by python-trace
+ return iter([])
+ return iter(self._cache_data["blockers"])
+
+ def __len__(self):
+ """This needs to be implemented in order to avoid
+ infinite recursion in some cases."""
+ return len(self._cache_data["blockers"])
+
+ def __delitem__(self, cpv):
+ del self._cache_data["blockers"][cpv]
+
+ def __getitem__(self, cpv):
+ """
+ @rtype: BlockerData
+ @return: An object with counter and atoms attributes.
+ """
+ return self.BlockerData(*self._cache_data["blockers"][cpv])
+
diff --git a/usr/lib/portage/pym/_emerge/BlockerDB.py b/usr/lib/portage/pym/_emerge/BlockerDB.py
new file mode 100644
index 0000000..8bb8f5f
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/BlockerDB.py
@@ -0,0 +1,125 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+import portage
+from portage import os
+from portage import digraph
+from portage._sets.base import InternalPackageSet
+
+from _emerge.BlockerCache import BlockerCache
+from _emerge.Package import Package
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+class BlockerDB(object):
+
+ def __init__(self, fake_vartree):
+ root_config = fake_vartree._root_config
+ self._root_config = root_config
+ self._vartree = root_config.trees["vartree"]
+ self._portdb = root_config.trees["porttree"].dbapi
+
+ self._dep_check_trees = None
+ self._fake_vartree = fake_vartree
+ self._dep_check_trees = {
+ self._vartree.settings["EROOT"] : {
+ "porttree" : fake_vartree,
+ "vartree" : fake_vartree,
+ }}
+
+ def findInstalledBlockers(self, new_pkg):
+ """
+ Search for installed run-time blockers in the root where
+ new_pkg is planned to be installed. This ignores build-time
+ blockers, since new_pkg is assumed to be built already.
+ """
+ blocker_cache = BlockerCache(None,
+ self._vartree.dbapi)
+ dep_keys = Package._runtime_keys
+ settings = self._vartree.settings
+ stale_cache = set(blocker_cache)
+ fake_vartree = self._fake_vartree
+ dep_check_trees = self._dep_check_trees
+ vardb = fake_vartree.dbapi
+ installed_pkgs = list(vardb)
+
+ for inst_pkg in installed_pkgs:
+ stale_cache.discard(inst_pkg.cpv)
+ cached_blockers = blocker_cache.get(inst_pkg.cpv)
+ if cached_blockers is not None and \
+ cached_blockers.counter != inst_pkg.counter:
+ cached_blockers = None
+ if cached_blockers is not None:
+ blocker_atoms = cached_blockers.atoms
+ else:
+ # Use aux_get() to trigger FakeVartree global
+ # updates on *DEPEND when appropriate.
+ depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
+ success, atoms = portage.dep_check(depstr,
+ vardb, settings, myuse=inst_pkg.use.enabled,
+ trees=dep_check_trees, myroot=inst_pkg.root)
+ if not success:
+ pkg_location = os.path.join(inst_pkg.root,
+ portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
+ portage.writemsg("!!! %s/*DEPEND: %s\n" % \
+ (pkg_location, atoms), noiselevel=-1)
+ continue
+
+ blocker_atoms = [atom for atom in atoms \
+ if atom.startswith("!")]
+ blocker_atoms.sort()
+ blocker_cache[inst_pkg.cpv] = \
+ blocker_cache.BlockerData(inst_pkg.counter, blocker_atoms)
+ for cpv in stale_cache:
+ del blocker_cache[cpv]
+ blocker_cache.flush()
+
+ blocker_parents = digraph()
+ blocker_atoms = []
+ for pkg in installed_pkgs:
+ for blocker_atom in blocker_cache[pkg.cpv].atoms:
+ blocker_atom = blocker_atom.lstrip("!")
+ blocker_atoms.append(blocker_atom)
+ blocker_parents.add(blocker_atom, pkg)
+
+ blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
+ blocking_pkgs = set()
+ for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
+ blocking_pkgs.update(blocker_parents.parent_nodes(atom))
+
+ # Check for blockers in the other direction.
+ depstr = " ".join(new_pkg._metadata[k] for k in dep_keys)
+ success, atoms = portage.dep_check(depstr,
+ vardb, settings, myuse=new_pkg.use.enabled,
+ trees=dep_check_trees, myroot=new_pkg.root)
+ if not success:
+ # We should never get this far with invalid deps.
+ show_invalid_depstring_notice(new_pkg, depstr, atoms)
+ assert False
+
+ blocker_atoms = [atom.lstrip("!") for atom in atoms \
+ if atom[:1] == "!"]
+ if blocker_atoms:
+ blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
+ for inst_pkg in installed_pkgs:
+ try:
+ next(blocker_atoms.iterAtomsForPackage(inst_pkg))
+ except (portage.exception.InvalidDependString, StopIteration):
+ continue
+ blocking_pkgs.add(inst_pkg)
+
+ return blocking_pkgs
+
+ def discardBlocker(self, pkg):
+ """Discard a package from the list of potential blockers.
+ This will match any package(s) with identical cpv or cp:slot."""
+ for cpv_match in self._fake_vartree.dbapi.match_pkgs("=%s" % (pkg.cpv,)):
+ if cpv_match.cp == pkg.cp:
+ self._fake_vartree.cpv_discard(cpv_match)
+ for slot_match in self._fake_vartree.dbapi.match_pkgs(pkg.slot_atom):
+ if slot_match.cp == pkg.cp:
+ self._fake_vartree.cpv_discard(slot_match)
diff --git a/usr/lib/portage/pym/_emerge/BlockerDepPriority.py b/usr/lib/portage/pym/_emerge/BlockerDepPriority.py
new file mode 100644
index 0000000..1004a37
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/BlockerDepPriority.py
@@ -0,0 +1,13 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+class BlockerDepPriority(DepPriority):
+ __slots__ = ()
+ def __int__(self):
+ return 0
+
+ def __str__(self):
+ return 'blocker'
+
+BlockerDepPriority.instance = BlockerDepPriority()
diff --git a/usr/lib/portage/pym/_emerge/CompositeTask.py b/usr/lib/portage/pym/_emerge/CompositeTask.py
new file mode 100644
index 0000000..40cf859
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/CompositeTask.py
@@ -0,0 +1,162 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AsynchronousTask import AsynchronousTask
+from portage import os
+
+class CompositeTask(AsynchronousTask):
+
+ __slots__ = ("scheduler",) + ("_current_task",)
+
+ _TASK_QUEUED = -1
+
+ def isAlive(self):
+ return self._current_task is not None
+
+ def _cancel(self):
+ if self._current_task is not None:
+ if self._current_task is self._TASK_QUEUED:
+ self.returncode = 1
+ self._current_task = None
+ else:
+ self._current_task.cancel()
+
+ def _poll(self):
+ """
+ This does a loop calling self._current_task.poll()
+ repeatedly as long as the value of self._current_task
+ keeps changing. It calls poll() a maximum of one time
+ for a given self._current_task instance. This is useful
+ since calling poll() on a task can trigger advance to
+ the next task could eventually lead to the returncode
+ being set in cases when polling only a single task would
+ not have the same effect.
+ """
+
+ prev = None
+ while True:
+ task = self._current_task
+ if task is None or \
+ task is self._TASK_QUEUED or \
+ task is prev:
+ # don't poll the same task more than once
+ break
+ task.poll()
+ prev = task
+
+ return self.returncode
+
+ def _wait(self):
+
+ prev = None
+ while True:
+ task = self._current_task
+ if task is None:
+ # don't wait for the same task more than once
+ break
+ if task is self._TASK_QUEUED:
+ if self.cancelled:
+ self.returncode = 1
+ self._current_task = None
+ break
+ else:
+ while not self._task_queued_wait():
+ self.scheduler.iteration()
+ if self.returncode is not None:
+ break
+ elif self.cancelled:
+ self.returncode = 1
+ self._current_task = None
+ break
+ else:
+ # try this again with new _current_task value
+ continue
+ if task is prev:
+ if self.returncode is not None:
+ # This is expected if we're being
+ # called from the task's exit listener
+ # after it's been cancelled.
+ break
+ # Before the task.wait() method returned, an exit
+ # listener should have set self._current_task to either
+ # a different task or None. Something is wrong.
+ raise AssertionError("self._current_task has not " + \
+ "changed since calling wait", self, task)
+ task.wait()
+ prev = task
+
+ return self.returncode
+
+ def _assert_current(self, task):
+ """
+ Raises an AssertionError if the given task is not the
+ same one as self._current_task. This can be useful
+ for detecting bugs.
+ """
+ if task is not self._current_task:
+ raise AssertionError("Unrecognized task: %s" % (task,))
+
+ def _default_exit(self, task):
+ """
+ Calls _assert_current() on the given task and then sets the
+ composite returncode attribute if task.returncode != os.EX_OK.
+ If the task failed then self._current_task will be set to None.
+ Subclasses can use this as a generic task exit callback.
+
+ @rtype: int
+ @return: The task.returncode attribute.
+ """
+ self._assert_current(task)
+ if task.returncode != os.EX_OK:
+ self.returncode = task.returncode
+ self._current_task = None
+ return task.returncode
+
+ def _final_exit(self, task):
+ """
+ Assumes that task is the final task of this composite task.
+ Calls _default_exit() and sets self.returncode to the task's
+ returncode and sets self._current_task to None.
+ """
+ self._default_exit(task)
+ self._current_task = None
+ self.returncode = task.returncode
+ return self.returncode
+
+ def _default_final_exit(self, task):
+ """
+ This calls _final_exit() and then wait().
+
+ Subclasses can use this as a generic final task exit callback.
+
+ """
+ self._final_exit(task)
+ return self.wait()
+
+ def _start_task(self, task, exit_handler):
+ """
+ Register exit handler for the given task, set it
+ as self._current_task, and call task.start().
+
+ Subclasses can use this as a generic way to start
+ a task.
+
+ """
+ try:
+ task.scheduler = self.scheduler
+ except AttributeError:
+ pass
+ task.addExitListener(exit_handler)
+ self._current_task = task
+ task.start()
+
+ def _task_queued(self, task):
+ task.addStartListener(self._task_queued_start_handler)
+ self._current_task = self._TASK_QUEUED
+
+ def _task_queued_start_handler(self, task):
+ self._current_task = task
+
+ def _task_queued_wait(self):
+ return self._current_task is not self._TASK_QUEUED or \
+ self.cancelled or self.returncode is not None
diff --git a/usr/lib/portage/pym/_emerge/DepPriority.py b/usr/lib/portage/pym/_emerge/DepPriority.py
new file mode 100644
index 0000000..34fdb48
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/DepPriority.py
@@ -0,0 +1,56 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractDepPriority import AbstractDepPriority
+class DepPriority(AbstractDepPriority):
+
+ __slots__ = ("satisfied", "optional", "ignored")
+
+ def __int__(self):
+ """
+ Note: These priorities are only used for measuring hardness
+ in the circular dependency display via digraph.debug_print(),
+ and nothing more. For actual merge order calculations, the
+ measures defined by the DepPriorityNormalRange and
+ DepPrioritySatisfiedRange classes are used.
+
+ Attributes Hardness
+
+ buildtime_slot_op 0
+ buildtime -1
+ runtime -2
+ runtime_post -3
+ optional -4
+ (none of the above) -5
+
+ """
+
+ if self.optional:
+ return -4
+ if self.buildtime_slot_op:
+ return 0
+ if self.buildtime:
+ return -1
+ if self.runtime:
+ return -2
+ if self.runtime_post:
+ return -3
+ return -5
+
+ def __str__(self):
+ if self.ignored:
+ return "ignored"
+ if self.optional:
+ return "optional"
+ if self.buildtime_slot_op:
+ return "buildtime_slot_op"
+ if self.buildtime:
+ return "buildtime"
+ if self.runtime_slot_op:
+ return "runtime_slot_op"
+ if self.runtime:
+ return "runtime"
+ if self.runtime_post:
+ return "runtime_post"
+ return "soft"
+
diff --git a/usr/lib/portage/pym/_emerge/DepPriorityNormalRange.py b/usr/lib/portage/pym/_emerge/DepPriorityNormalRange.py
new file mode 100644
index 0000000..8639554
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/DepPriorityNormalRange.py
@@ -0,0 +1,47 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+class DepPriorityNormalRange(object):
+ """
+ DepPriority properties Index Category
+
+ buildtime HARD
+ runtime 3 MEDIUM
+ runtime_post 2 MEDIUM_SOFT
+ optional 1 SOFT
+ (none of the above) 0 NONE
+ """
+ MEDIUM = 3
+ MEDIUM_SOFT = 2
+ SOFT = 1
+ NONE = 0
+
+ @classmethod
+ def _ignore_optional(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional)
+
+ @classmethod
+ def _ignore_runtime_post(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional or priority.runtime_post)
+
+ @classmethod
+ def _ignore_runtime(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional or not priority.buildtime)
+
+ ignore_medium = _ignore_runtime
+ ignore_medium_soft = _ignore_runtime_post
+ ignore_soft = _ignore_optional
+
+DepPriorityNormalRange.ignore_priority = (
+ None,
+ DepPriorityNormalRange._ignore_optional,
+ DepPriorityNormalRange._ignore_runtime_post,
+ DepPriorityNormalRange._ignore_runtime
+)
diff --git a/usr/lib/portage/pym/_emerge/DepPrioritySatisfiedRange.py b/usr/lib/portage/pym/_emerge/DepPrioritySatisfiedRange.py
new file mode 100644
index 0000000..391f540
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/DepPrioritySatisfiedRange.py
@@ -0,0 +1,97 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+class DepPrioritySatisfiedRange(object):
+ """
+ DepPriority Index Category
+
+ not satisfied and buildtime HARD
+ not satisfied and runtime 7 MEDIUM
+ not satisfied and runtime_post 6 MEDIUM_SOFT
+ satisfied and buildtime_slot_op 5 SOFT
+ satisfied and buildtime 4 SOFT
+ satisfied and runtime 3 SOFT
+ satisfied and runtime_post 2 SOFT
+ optional 1 SOFT
+ (none of the above) 0 NONE
+ """
+ MEDIUM = 7
+ MEDIUM_SOFT = 6
+ SOFT = 5
+ NONE = 0
+
+ @classmethod
+ def _ignore_optional(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional)
+
+ @classmethod
+ def _ignore_satisfied_runtime_post(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ if priority.optional:
+ return True
+ if not priority.satisfied:
+ return False
+ return bool(priority.runtime_post)
+
+ @classmethod
+ def _ignore_satisfied_runtime(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ if priority.optional:
+ return True
+ if not priority.satisfied:
+ return False
+ return not priority.buildtime
+
+ @classmethod
+ def _ignore_satisfied_buildtime(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ if priority.optional:
+ return True
+ if priority.buildtime_slot_op:
+ return False
+ return bool(priority.satisfied)
+
+ @classmethod
+ def _ignore_satisfied_buildtime_slot_op(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional or \
+ priority.satisfied)
+
+ @classmethod
+ def _ignore_runtime_post(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional or \
+ priority.satisfied or \
+ priority.runtime_post)
+
+ @classmethod
+ def _ignore_runtime(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.satisfied or \
+ priority.optional or \
+ not priority.buildtime)
+
+ ignore_medium = _ignore_runtime
+ ignore_medium_soft = _ignore_runtime_post
+ ignore_soft = _ignore_satisfied_buildtime
+
+
+DepPrioritySatisfiedRange.ignore_priority = (
+ None,
+ DepPrioritySatisfiedRange._ignore_optional,
+ DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
+ DepPrioritySatisfiedRange._ignore_satisfied_runtime,
+ DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
+ DepPrioritySatisfiedRange._ignore_satisfied_buildtime_slot_op,
+ DepPrioritySatisfiedRange._ignore_runtime_post,
+ DepPrioritySatisfiedRange._ignore_runtime
+)
diff --git a/usr/lib/portage/pym/_emerge/Dependency.py b/usr/lib/portage/pym/_emerge/Dependency.py
new file mode 100644
index 0000000..2ec860f
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/Dependency.py
@@ -0,0 +1,21 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.util.SlotObject import SlotObject
+from _emerge.DepPriority import DepPriority
+
+class Dependency(SlotObject):
+ __slots__ = ("atom", "blocker", "child", "depth",
+ "parent", "onlydeps", "priority", "root", "want_update",
+ "collapsed_parent", "collapsed_priority")
+ def __init__(self, **kwargs):
+ SlotObject.__init__(self, **kwargs)
+ if self.priority is None:
+ self.priority = DepPriority()
+ if self.depth is None:
+ self.depth = 0
+ if self.collapsed_parent is None:
+ self.collapsed_parent = self.parent
+ if self.collapsed_priority is None:
+ self.collapsed_priority = self.priority
+
diff --git a/usr/lib/portage/pym/_emerge/DependencyArg.py b/usr/lib/portage/pym/_emerge/DependencyArg.py
new file mode 100644
index 0000000..29a0072
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/DependencyArg.py
@@ -0,0 +1,46 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import sys
+
+from portage import _encodings, _unicode_encode
+
+class DependencyArg(object):
+
+ __slots__ = ('arg', 'force_reinstall', 'internal', 'reset_depth', 'root_config')
+
+ def __init__(self, arg=None, force_reinstall=False, internal=False,
+ reset_depth=True, root_config=None):
+ """
+ Use reset_depth=False for special arguments that should not interact
+ with depth calculations (see the emerge --deep=DEPTH option).
+ """
+ self.arg = arg
+ self.force_reinstall = force_reinstall
+ self.internal = internal
+ self.reset_depth = reset_depth
+ self.root_config = root_config
+
+ def __eq__(self, other):
+ if self.__class__ is not other.__class__:
+ return False
+ return self.arg == other.arg and \
+ self.root_config.root == other.root_config.root
+
+ def __hash__(self):
+ return hash((self.arg, self.root_config.root))
+
+ def __str__(self):
+ # Use unicode_literals format string for python-2.x safety,
+ # ensuring that self.arg.__unicode__() is used
+ # when necessary.
+ return "%s" % (self.arg,)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(), encoding=_encodings['content'])
diff --git a/usr/lib/portage/pym/_emerge/EbuildBinpkg.py b/usr/lib/portage/pym/_emerge/EbuildBinpkg.py
new file mode 100644
index 0000000..34a6aef
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/EbuildBinpkg.py
@@ -0,0 +1,50 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildPhase import EbuildPhase
+from portage import os
+
+class EbuildBinpkg(CompositeTask):
+ """
+ This assumes that src_install() has successfully completed.
+ """
+ __slots__ = ('pkg', 'settings') + \
+ ('_binpkg_tmpfile',)
+
+ def _start(self):
+ pkg = self.pkg
+ root_config = pkg.root_config
+ bintree = root_config.trees["bintree"]
+ bintree.prevent_collision(pkg.cpv)
+ binpkg_tmpfile = os.path.join(bintree.pkgdir,
+ pkg.cpv + ".tbz2." + str(os.getpid()))
+ bintree._ensure_dir(os.path.dirname(binpkg_tmpfile))
+
+ self._binpkg_tmpfile = binpkg_tmpfile
+ self.settings["PORTAGE_BINPKG_TMPFILE"] = self._binpkg_tmpfile
+
+ package_phase = EbuildPhase(background=self.background,
+ phase='package', scheduler=self.scheduler,
+ settings=self.settings)
+
+ self._start_task(package_phase, self._package_phase_exit)
+
+ def _package_phase_exit(self, package_phase):
+
+ self.settings.pop("PORTAGE_BINPKG_TMPFILE", None)
+ if self._default_exit(package_phase) != os.EX_OK:
+ try:
+ os.unlink(self._binpkg_tmpfile)
+ except OSError:
+ pass
+ self.wait()
+ return
+
+ pkg = self.pkg
+ bintree = pkg.root_config.trees["bintree"]
+ bintree.inject(pkg.cpv, filename=self._binpkg_tmpfile)
+
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
diff --git a/usr/lib/portage/pym/_emerge/EbuildBuild.py b/usr/lib/portage/pym/_emerge/EbuildBuild.py
new file mode 100644
index 0000000..1351c79
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/EbuildBuild.py
@@ -0,0 +1,409 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import _emerge.emergelog
+from _emerge.EbuildExecuter import EbuildExecuter
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.EbuildBinpkg import EbuildBinpkg
+from _emerge.EbuildFetcher import EbuildFetcher
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildMerge import EbuildMerge
+from _emerge.EbuildFetchonly import EbuildFetchonly
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.TaskSequence import TaskSequence
+
+from portage.util import writemsg
+import portage
+from portage import os
+from portage.output import colorize
+from portage.package.ebuild.digestcheck import digestcheck
+from portage.package.ebuild.digestgen import digestgen
+from portage.package.ebuild.doebuild import _check_temp_dir
+from portage.const import EPREFIX
+from portage.package.ebuild._spawn_nofetch import spawn_nofetch
+
+class EbuildBuild(CompositeTask):
+
+ __slots__ = ("args_set", "config_pool", "find_blockers",
+ "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
+ "prefetcher", "settings", "world_atom") + \
+ ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
+
+ def _start(self):
+
+ pkg = self.pkg
+ settings = self.settings
+
+ if not self.opts.fetchonly:
+ rval = _check_temp_dir(settings)
+ if rval != os.EX_OK:
+ self.returncode = rval
+ self._current_task = None
+ self._async_wait()
+ return
+
+ root_config = pkg.root_config
+ tree = "porttree"
+ self._tree = tree
+ portdb = root_config.trees[tree].dbapi
+ settings.setcpv(pkg)
+ settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
+ if self.opts.buildpkgonly:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
+ else:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "source"
+ ebuild_path = portdb.findname(pkg.cpv, myrepo=pkg.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % pkg.cpv)
+ self._ebuild_path = ebuild_path
+ portage.doebuild_environment(ebuild_path, 'setup',
+ settings=self.settings, db=portdb)
+
+ # Check the manifest here since with --keep-going mode it's
+ # currently possible to get this far with a broken manifest.
+ if not self._check_manifest():
+ self.returncode = 1
+ self._current_task = None
+ self._async_wait()
+ return
+
+ prefetcher = self.prefetcher
+ if prefetcher is None:
+ pass
+ elif prefetcher.isAlive() and \
+ prefetcher.poll() is None:
+
+ waiting_msg = "Fetching files " + \
+ "in the background. " + \
+ "To view fetch progress, run `tail -f %s` in another terminal." \
+ % (os.path.join(_emerge.emergelog._emerge_log_dir, "emerge-fetch.log"))
+ msg_prefix = colorize("GOOD", " * ")
+ from textwrap import wrap
+ waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
+ for line in wrap(waiting_msg, 65))
+ if not self.background:
+ writemsg(waiting_msg, noiselevel=-1)
+
+ self._current_task = prefetcher
+ prefetcher.addExitListener(self._prefetch_exit)
+ return
+
+ self._prefetch_exit(prefetcher)
+
+ def _check_manifest(self):
+ success = True
+
+ settings = self.settings
+ if 'strict' in settings.features and \
+ 'digest' not in settings.features:
+ settings['O'] = os.path.dirname(self._ebuild_path)
+ quiet_setting = settings.get('PORTAGE_QUIET')
+ settings['PORTAGE_QUIET'] = '1'
+ try:
+ success = digestcheck([], settings, strict=True)
+ finally:
+ if quiet_setting:
+ settings['PORTAGE_QUIET'] = quiet_setting
+ else:
+ del settings['PORTAGE_QUIET']
+
+ return success
+
+ def _prefetch_exit(self, prefetcher):
+
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ opts = self.opts
+ pkg = self.pkg
+ settings = self.settings
+
+ if opts.fetchonly:
+ if opts.pretend:
+ fetcher = EbuildFetchonly(
+ fetch_all=opts.fetch_all_uri,
+ pkg=pkg, pretend=opts.pretend,
+ settings=settings)
+ retval = fetcher.execute()
+ self.returncode = retval
+ self.wait()
+ return
+ else:
+ fetcher = EbuildFetcher(
+ config_pool=self.config_pool,
+ ebuild_path=self._ebuild_path,
+ fetchall=self.opts.fetch_all_uri,
+ fetchonly=self.opts.fetchonly,
+ background=False,
+ logfile=None,
+ pkg=self.pkg,
+ scheduler=self.scheduler)
+ self._start_task(fetcher, self._fetchonly_exit)
+ return
+
+ self._build_dir = EbuildBuildDir(
+ scheduler=self.scheduler, settings=settings)
+ self._build_dir.lock()
+
+ # Cleaning needs to happen before fetch, since the build dir
+ # is used for log handling.
+ msg = " === (%s of %s) Cleaning (%s::%s)" % \
+ (self.pkg_count.curval, self.pkg_count.maxval,
+ self.pkg.cpv, self._ebuild_path)
+ short_msg = "emerge: (%s of %s) %s Clean" % \
+ (self.pkg_count.curval, self.pkg_count.maxval, self.pkg.cpv)
+ self.logger.log(msg, short_msg=short_msg)
+
+ pre_clean_phase = EbuildPhase(background=self.background,
+ phase='clean', scheduler=self.scheduler, settings=self.settings)
+ self._start_task(pre_clean_phase, self._pre_clean_exit)
+
+ def _fetchonly_exit(self, fetcher):
+ self._final_exit(fetcher)
+ if self.returncode != os.EX_OK:
+ portdb = self.pkg.root_config.trees[self._tree].dbapi
+ spawn_nofetch(portdb, self._ebuild_path, settings=self.settings)
+ elif 'digest' in self.settings.features:
+ if not digestgen(mysettings=self.settings,
+ myportdb=self.pkg.root_config.trees[self._tree].dbapi):
+ self.returncode = 1
+ self.wait()
+
+ def _pre_clean_exit(self, pre_clean_phase):
+ if self._default_exit(pre_clean_phase) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ # for log handling
+ portage.prepare_build_dirs(self.pkg.root, self.settings, 1)
+
+ fetcher = EbuildFetcher(config_pool=self.config_pool,
+ ebuild_path=self._ebuild_path,
+ fetchall=self.opts.fetch_all_uri,
+ fetchonly=self.opts.fetchonly,
+ background=self.background,
+ logfile=self.settings.get('PORTAGE_LOG_FILE'),
+ pkg=self.pkg, scheduler=self.scheduler)
+
+ try:
+ already_fetched = fetcher.already_fetched(self.settings)
+ except portage.exception.InvalidDependString as e:
+ msg_lines = []
+ msg = "Fetch failed for '%s' due to invalid SRC_URI: %s" % \
+ (self.pkg.cpv, e)
+ msg_lines.append(msg)
+ fetcher._eerror(msg_lines)
+ portage.elog.elog_process(self.pkg.cpv, self.settings)
+ self.returncode = 1
+ self._current_task = None
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ if already_fetched:
+ # This case is optimized to skip the fetch queue.
+ fetcher = None
+ self._fetch_exit(fetcher)
+ return
+
+ # Allow the Scheduler's fetch queue to control the
+ # number of concurrent fetchers.
+ fetcher.addExitListener(self._fetch_exit)
+ self._task_queued(fetcher)
+ self.scheduler.fetch.schedule(fetcher)
+
+ def _fetch_exit(self, fetcher):
+
+ if fetcher is not None and \
+ self._default_exit(fetcher) != os.EX_OK:
+ self._fetch_failed()
+ return
+
+ # discard successful fetch log
+ self._build_dir.clean_log()
+ pkg = self.pkg
+ logger = self.logger
+ opts = self.opts
+ pkg_count = self.pkg_count
+ scheduler = self.scheduler
+ settings = self.settings
+ features = settings.features
+ ebuild_path = self._ebuild_path
+ system_set = pkg.root_config.sets["system"]
+
+ #buildsyspkg: Check if we need to _force_ binary package creation
+ self._issyspkg = "buildsyspkg" in features and \
+ system_set.findAtomForPackage(pkg) and \
+ "buildpkg" not in features and \
+ opts.buildpkg != 'n'
+
+ if ("buildpkg" in features or self._issyspkg) \
+ and not self.opts.buildpkg_exclude.findAtomForPackage(pkg):
+
+ self._buildpkg = True
+
+ msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
+ short_msg = "emerge: (%s of %s) %s Compile" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log(msg, short_msg=short_msg)
+
+ else:
+ msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
+ short_msg = "emerge: (%s of %s) %s Compile" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log(msg, short_msg=short_msg)
+
+ build = EbuildExecuter(background=self.background, pkg=pkg,
+ scheduler=scheduler, settings=settings)
+ self._start_task(build, self._build_exit)
+
+ def _fetch_failed(self):
+ # We only call the pkg_nofetch phase if either RESTRICT=fetch
+ # is set or the package has explicitly overridden the default
+ # pkg_nofetch implementation. This allows specialized messages
+ # to be displayed for problematic packages even though they do
+ # not set RESTRICT=fetch (bug #336499).
+
+ if 'fetch' not in self.pkg.restrict and \
+ 'nofetch' not in self.pkg.defined_phases:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ self.returncode = None
+ nofetch_phase = EbuildPhase(background=self.background,
+ phase='nofetch', scheduler=self.scheduler, settings=self.settings)
+ self._start_task(nofetch_phase, self._nofetch_exit)
+
+ def _nofetch_exit(self, nofetch_phase):
+ self._final_exit(nofetch_phase)
+ self._unlock_builddir()
+ self.returncode = 1
+ self.wait()
+
+ def _unlock_builddir(self):
+ portage.elog.elog_process(self.pkg.cpv, self.settings)
+ self._build_dir.unlock()
+
+ def _build_exit(self, build):
+ if self._default_exit(build) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ buildpkg = self._buildpkg
+
+ if not buildpkg:
+ self._final_exit(build)
+ self.wait()
+ return
+
+ if self._issyspkg:
+ msg = ">>> This is a system package, " + \
+ "let's pack a rescue tarball.\n"
+ self.scheduler.output(msg,
+ log_path=self.settings.get("PORTAGE_LOG_FILE"))
+
+ binpkg_tasks = TaskSequence()
+ requested_binpkg_formats = self.settings.get("PORTAGE_BINPKG_FORMAT", "tar").split()
+ for pkg_fmt in portage.const.SUPPORTED_BINPKG_FORMATS:
+ if pkg_fmt in requested_binpkg_formats:
+ if pkg_fmt == "rpm":
+ binpkg_tasks.add(EbuildPhase(background=self.background,
+ phase="rpm", scheduler=self.scheduler,
+ settings=self.settings))
+ else:
+ binpkg_tasks.add(EbuildBinpkg(background=self.background,
+ pkg=self.pkg, scheduler=self.scheduler,
+ settings=self.settings))
+
+ if binpkg_tasks:
+ self._start_task(binpkg_tasks, self._buildpkg_exit)
+ return
+
+ self._final_exit(build)
+ self.wait()
+
+ def _buildpkg_exit(self, packager):
+ """
+ Released build dir lock when there is a failure or
+ when in buildpkgonly mode. Otherwise, the lock will
+ be released when merge() is called.
+ """
+
+ if self._default_exit(packager) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ if self.opts.buildpkgonly:
+ phase = 'success_hooks'
+ success_hooks = MiscFunctionsProcess(
+ background=self.background,
+ commands=[phase], phase=phase,
+ scheduler=self.scheduler, settings=self.settings)
+ self._start_task(success_hooks,
+ self._buildpkgonly_success_hook_exit)
+ return
+
+ # Continue holding the builddir lock until
+ # after the package has been installed.
+ self._current_task = None
+ self.returncode = packager.returncode
+ self.wait()
+
+ def _buildpkgonly_success_hook_exit(self, success_hooks):
+ self._default_exit(success_hooks)
+ self.returncode = None
+ # Need to call "clean" phase for buildpkgonly mode
+ portage.elog.elog_process(self.pkg.cpv, self.settings)
+ phase = 'clean'
+ clean_phase = EbuildPhase(background=self.background,
+ phase=phase, scheduler=self.scheduler, settings=self.settings)
+ self._start_task(clean_phase, self._clean_exit)
+
+ def _clean_exit(self, clean_phase):
+ if self._final_exit(clean_phase) != os.EX_OK or \
+ self.opts.buildpkgonly:
+ self._unlock_builddir()
+ self.wait()
+
+ def create_install_task(self):
+ """
+ Install the package and then clean up and release locks.
+ Only call this after the build has completed successfully
+ and neither fetchonly nor buildpkgonly mode are enabled.
+ """
+
+ ldpath_mtimes = self.ldpath_mtimes
+ logger = self.logger
+ pkg = self.pkg
+ pkg_count = self.pkg_count
+ settings = self.settings
+ world_atom = self.world_atom
+ ebuild_path = self._ebuild_path
+ tree = self._tree
+
+ task = EbuildMerge(exit_hook=self._install_exit,
+ find_blockers=self.find_blockers,
+ ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
+ pkg_count=pkg_count, pkg_path=ebuild_path,
+ scheduler=self.scheduler,
+ settings=settings, tree=tree, world_atom=world_atom)
+
+ msg = " === (%s of %s) Merging (%s::%s)" % \
+ (pkg_count.curval, pkg_count.maxval,
+ pkg.cpv, ebuild_path)
+ short_msg = "emerge: (%s of %s) %s Merge" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log(msg, short_msg=short_msg)
+
+ return task
+
+ def _install_exit(self, task):
+ self._unlock_builddir()
diff --git a/usr/lib/portage/pym/_emerge/EbuildBuildDir.py b/usr/lib/portage/pym/_emerge/EbuildBuildDir.py
new file mode 100644
index 0000000..b64b7aa
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/EbuildBuildDir.py
@@ -0,0 +1,108 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AsynchronousLock import AsynchronousLock
+
+import portage
+from portage import os
+import sys
+from portage.exception import PortageException
+from portage.util.SlotObject import SlotObject
+
+class EbuildBuildDir(SlotObject):
+
+ __slots__ = ("scheduler", "settings",
+ "locked", "_catdir", "_lock_obj")
+
+ def __init__(self, **kwargs):
+ SlotObject.__init__(self, **kwargs)
+ self.locked = False
+
+ def lock(self):
+ """
+ This raises an AlreadyLocked exception if lock() is called
+ while a lock is already held. In order to avoid this, call
+ unlock() or check whether the "locked" attribute is True
+ or False before calling lock().
+ """
+ if self._lock_obj is not None:
+ raise self.AlreadyLocked((self._lock_obj,))
+
+ dir_path = self.settings.get('PORTAGE_BUILDDIR')
+ if not dir_path:
+ raise AssertionError('PORTAGE_BUILDDIR is unset')
+ catdir = os.path.dirname(dir_path)
+ self._catdir = catdir
+
+ try:
+ portage.util.ensure_dirs(os.path.dirname(catdir),
+ gid=portage.portage_gid,
+ mode=0o70, mask=0)
+ except PortageException:
+ if not os.path.isdir(os.path.dirname(catdir)):
+ raise
+ catdir_lock = AsynchronousLock(path=catdir, scheduler=self.scheduler)
+ catdir_lock.start()
+ catdir_lock.wait()
+ self._assert_lock(catdir_lock)
+
+ try:
+ try:
+ portage.util.ensure_dirs(catdir,
+ gid=portage.portage_gid,
+ mode=0o70, mask=0)
+ except PortageException:
+ if not os.path.isdir(catdir):
+ raise
+ builddir_lock = AsynchronousLock(path=dir_path,
+ scheduler=self.scheduler)
+ builddir_lock.start()
+ builddir_lock.wait()
+ self._assert_lock(builddir_lock)
+ self._lock_obj = builddir_lock
+ self.settings['PORTAGE_BUILDDIR_LOCKED'] = '1'
+ finally:
+ self.locked = self._lock_obj is not None
+ catdir_lock.unlock()
+
+ def _assert_lock(self, async_lock):
+ if async_lock.returncode != os.EX_OK:
+ # TODO: create a better way to propagate this error to the caller
+ raise AssertionError("AsynchronousLock failed with returncode %s" \
+ % (async_lock.returncode,))
+
+ def clean_log(self):
+ """Discard existing log. The log will not be be discarded
+ in cases when it would not make sense, like when FEATURES=keepwork
+ is enabled."""
+ settings = self.settings
+ if 'keepwork' in settings.features:
+ return
+ log_file = settings.get('PORTAGE_LOG_FILE')
+ if log_file is not None and os.path.isfile(log_file):
+ try:
+ os.unlink(log_file)
+ except OSError:
+ pass
+
+ def unlock(self):
+ if self._lock_obj is None:
+ return
+
+ self._lock_obj.unlock()
+ self._lock_obj = None
+ self.locked = False
+ self.settings.pop('PORTAGE_BUILDDIR_LOCKED', None)
+ catdir_lock = AsynchronousLock(path=self._catdir, scheduler=self.scheduler)
+ catdir_lock.start()
+ if catdir_lock.wait() == os.EX_OK:
+ try:
+ os.rmdir(self._catdir)
+ except OSError:
+ pass
+ finally:
+ catdir_lock.unlock()
+
+ class AlreadyLocked(portage.exception.PortageException):
+ pass
+
diff --git a/usr/lib/portage/pym/_emerge/EbuildExecuter.py b/usr/lib/portage/pym/_emerge/EbuildExecuter.py
new file mode 100644
index 0000000..5587d4e
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/EbuildExecuter.py
@@ -0,0 +1,88 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.TaskSequence import TaskSequence
+from _emerge.CompositeTask import CompositeTask
+import portage
+from portage import os
+from portage.eapi import eapi_has_src_prepare_and_src_configure, \
+ eapi_exports_replace_vars
+from portage.package.ebuild.doebuild import _prepare_fake_distdir
+
+class EbuildExecuter(CompositeTask):
+
+ __slots__ = ("pkg", "settings")
+
+ _phases = ("prepare", "configure", "compile", "test", "install")
+
+ _live_eclasses = portage.const.LIVE_ECLASSES
+
+ def _start(self):
+ pkg = self.pkg
+ scheduler = self.scheduler
+ settings = self.settings
+ cleanup = 0
+ portage.prepare_build_dirs(pkg.root, settings, cleanup)
+
+ alist = settings.configdict["pkg"].get("A", "").split()
+ _prepare_fake_distdir(settings, alist)
+
+ if eapi_exports_replace_vars(settings['EAPI']):
+ vardb = pkg.root_config.trees['vartree'].dbapi
+ settings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(match) \
+ for match in vardb.match(pkg.slot_atom) + \
+ vardb.match('='+pkg.cpv)))
+
+ setup_phase = EbuildPhase(background=self.background,
+ phase="setup", scheduler=scheduler,
+ settings=settings)
+
+ setup_phase.addExitListener(self._setup_exit)
+ self._task_queued(setup_phase)
+ self.scheduler.scheduleSetup(setup_phase)
+
+ def _setup_exit(self, setup_phase):
+
+ if self._default_exit(setup_phase) != os.EX_OK:
+ self.wait()
+ return
+
+ unpack_phase = EbuildPhase(background=self.background,
+ phase="unpack", scheduler=self.scheduler,
+ settings=self.settings)
+
+ if self._live_eclasses.intersection(self.pkg.inherited):
+ # Serialize $DISTDIR access for live ebuilds since
+ # otherwise they can interfere with eachother.
+
+ unpack_phase.addExitListener(self._unpack_exit)
+ self._task_queued(unpack_phase)
+ self.scheduler.scheduleUnpack(unpack_phase)
+
+ else:
+ self._start_task(unpack_phase, self._unpack_exit)
+
+ def _unpack_exit(self, unpack_phase):
+
+ if self._default_exit(unpack_phase) != os.EX_OK:
+ self.wait()
+ return
+
+ ebuild_phases = TaskSequence(scheduler=self.scheduler)
+
+ pkg = self.pkg
+ phases = self._phases
+ eapi = pkg.eapi
+ if not eapi_has_src_prepare_and_src_configure(eapi):
+ # skip src_prepare and src_configure
+ phases = phases[2:]
+
+ for phase in phases:
+ ebuild_phases.add(EbuildPhase(background=self.background,
+ phase=phase, scheduler=self.scheduler,
+ settings=self.settings))
+
+ self._start_task(ebuild_phases, self._default_final_exit)
+
diff --git a/usr/lib/portage/pym/_emerge/EbuildFetcher.py b/usr/lib/portage/pym/_emerge/EbuildFetcher.py
new file mode 100644
index 0000000..d98d007
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/EbuildFetcher.py
@@ -0,0 +1,286 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+import io
+import sys
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import _unicode_decode
+from portage.checksum import _hash_filter
+from portage.elog.messages import eerror
+from portage.package.ebuild.fetch import _check_distfile, fetch
+from portage.util._async.ForkProcess import ForkProcess
+from portage.util._pty import _create_pty_or_pipe
+
+class EbuildFetcher(ForkProcess):
+
+ __slots__ = ("config_pool", "ebuild_path", "fetchonly", "fetchall",
+ "pkg", "prefetch") + \
+ ("_digests", "_manifest", "_settings", "_uri_map")
+
+ def already_fetched(self, settings):
+ """
+ Returns True if all files already exist locally and have correct
+ digests, otherwise return False. When returning True, appropriate
+ digest checking messages are produced for display and/or logging.
+ When returning False, no messages are produced, since we assume
+ that a fetcher process will later be executed in order to produce
+ such messages. This will raise InvalidDependString if SRC_URI is
+ invalid.
+ """
+
+ uri_map = self._get_uri_map()
+ if not uri_map:
+ return True
+
+ digests = self._get_digests()
+ distdir = settings["DISTDIR"]
+ allow_missing = self._get_manifest().allow_missing
+
+ for filename in uri_map:
+ # Use stat rather than lstat since fetch() creates
+ # symlinks when PORTAGE_RO_DISTDIRS is used.
+ try:
+ st = os.stat(os.path.join(distdir, filename))
+ except OSError:
+ return False
+ if st.st_size == 0:
+ return False
+ expected_size = digests.get(filename, {}).get('size')
+ if expected_size is None:
+ continue
+ if st.st_size != expected_size:
+ return False
+
+ hash_filter = _hash_filter(settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
+ stdout_orig = sys.stdout
+ stderr_orig = sys.stderr
+ global_havecolor = portage.output.havecolor
+ out = io.StringIO()
+ eout = portage.output.EOutput()
+ eout.quiet = settings.get("PORTAGE_QUIET") == "1"
+ success = True
+ try:
+ sys.stdout = out
+ sys.stderr = out
+ if portage.output.havecolor:
+ portage.output.havecolor = not self.background
+
+ for filename in uri_map:
+ mydigests = digests.get(filename)
+ if mydigests is None:
+ if not allow_missing:
+ success = False
+ break
+ continue
+ ok, st = _check_distfile(os.path.join(distdir, filename),
+ mydigests, eout, show_errors=False, hash_filter=hash_filter)
+ if not ok:
+ success = False
+ break
+ except portage.exception.FileNotFound:
+ # A file disappeared unexpectedly.
+ return False
+ finally:
+ sys.stdout = stdout_orig
+ sys.stderr = stderr_orig
+ portage.output.havecolor = global_havecolor
+
+ if success:
+ # When returning unsuccessfully, no messages are produced, since
+ # we assume that a fetcher process will later be executed in order
+ # to produce such messages.
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=self.logfile)
+
+ return success
+
+ def _start(self):
+
+ root_config = self.pkg.root_config
+ portdb = root_config.trees["porttree"].dbapi
+ ebuild_path = self._get_ebuild_path()
+
+ try:
+ uri_map = self._get_uri_map()
+ except portage.exception.InvalidDependString as e:
+ msg_lines = []
+ msg = "Fetch failed for '%s' due to invalid SRC_URI: %s" % \
+ (self.pkg.cpv, e)
+ msg_lines.append(msg)
+ self._eerror(msg_lines)
+ self._set_returncode((self.pid, 1 << 8))
+ self._async_wait()
+ return
+
+ if not uri_map:
+ # Nothing to fetch.
+ self._set_returncode((self.pid, os.EX_OK << 8))
+ self._async_wait()
+ return
+
+ settings = self.config_pool.allocate()
+ settings.setcpv(self.pkg)
+ portage.doebuild_environment(ebuild_path, 'fetch',
+ settings=settings, db=portdb)
+
+ if self.prefetch and \
+ self._prefetch_size_ok(uri_map, settings, ebuild_path):
+ self.config_pool.deallocate(settings)
+ self._set_returncode((self.pid, os.EX_OK << 8))
+ self._async_wait()
+ return
+
+ nocolor = settings.get("NOCOLOR")
+
+ if self.prefetch:
+ settings["PORTAGE_PARALLEL_FETCHONLY"] = "1"
+
+ if self.background:
+ nocolor = "true"
+
+ if nocolor is not None:
+ settings["NOCOLOR"] = nocolor
+
+ self._settings = settings
+ ForkProcess._start(self)
+
+ # Free settings now since it's no longer needed in
+ # this process (the subprocess has a private copy).
+ self.config_pool.deallocate(settings)
+ settings = None
+ self._settings = None
+
+ def _run(self):
+ # Force consistent color output, in case we are capturing fetch
+ # output through a normal pipe due to unavailability of ptys.
+ portage.output.havecolor = self._settings.get('NOCOLOR') \
+ not in ('yes', 'true')
+
+ rval = 1
+ allow_missing = self._get_manifest().allow_missing or \
+ 'digest' in self._settings.features
+ if fetch(self._uri_map, self._settings, fetchonly=self.fetchonly,
+ digests=copy.deepcopy(self._get_digests()),
+ allow_missing_digests=allow_missing):
+ rval = os.EX_OK
+ return rval
+
+ def _get_ebuild_path(self):
+ if self.ebuild_path is not None:
+ return self.ebuild_path
+ portdb = self.pkg.root_config.trees["porttree"].dbapi
+ self.ebuild_path = portdb.findname(self.pkg.cpv, myrepo=self.pkg.repo)
+ if self.ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % self.pkg.cpv)
+ return self.ebuild_path
+
+ def _get_manifest(self):
+ if self._manifest is None:
+ pkgdir = os.path.dirname(self._get_ebuild_path())
+ self._manifest = self.pkg.root_config.settings.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir))).load_manifest(pkgdir, None)
+ return self._manifest
+
+ def _get_digests(self):
+ if self._digests is None:
+ self._digests = self._get_manifest().getTypeDigests("DIST")
+ return self._digests
+
+ def _get_uri_map(self):
+ """
+ This can raise InvalidDependString from portdbapi.getFetchMap().
+ """
+ if self._uri_map is not None:
+ return self._uri_map
+ pkgdir = os.path.dirname(self._get_ebuild_path())
+ mytree = os.path.dirname(os.path.dirname(pkgdir))
+ use = None
+ if not self.fetchall:
+ use = self.pkg.use.enabled
+ portdb = self.pkg.root_config.trees["porttree"].dbapi
+ self._uri_map = portdb.getFetchMap(self.pkg.cpv,
+ useflags=use, mytree=mytree)
+ return self._uri_map
+
+ def _prefetch_size_ok(self, uri_map, settings, ebuild_path):
+ distdir = settings["DISTDIR"]
+
+ sizes = {}
+ for filename in uri_map:
+ # Use stat rather than lstat since portage.fetch() creates
+ # symlinks when PORTAGE_RO_DISTDIRS is used.
+ try:
+ st = os.stat(os.path.join(distdir, filename))
+ except OSError:
+ return False
+ if st.st_size == 0:
+ return False
+ sizes[filename] = st.st_size
+
+ digests = self._get_digests()
+ for filename, actual_size in sizes.items():
+ size = digests.get(filename, {}).get('size')
+ if size is None:
+ continue
+ if size != actual_size:
+ return False
+
+ # All files are present and sizes are ok. In this case the normal
+ # fetch code will be skipped, so we need to generate equivalent
+ # output here.
+ if self.logfile is not None:
+ f = io.open(_unicode_encode(self.logfile,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='a', encoding=_encodings['content'],
+ errors='backslashreplace')
+ for filename in uri_map:
+ f.write(_unicode_decode((' * %s size ;-) ...' % \
+ filename).ljust(73) + '[ ok ]\n'))
+ f.close()
+
+ return True
+
+ def _pipe(self, fd_pipes):
+ """When appropriate, use a pty so that fetcher progress bars,
+ like wget has, will work properly."""
+ if self.background or not sys.stdout.isatty():
+ # When the output only goes to a log file,
+ # there's no point in creating a pty.
+ return os.pipe()
+ stdout_pipe = None
+ if not self.background:
+ stdout_pipe = fd_pipes.get(1)
+ got_pty, master_fd, slave_fd = \
+ _create_pty_or_pipe(copy_term_size=stdout_pipe)
+ return (master_fd, slave_fd)
+
+ def _eerror(self, lines):
+ out = io.StringIO()
+ for line in lines:
+ eerror(line, phase="unpack", key=self.pkg.cpv, out=out)
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=self.logfile)
+
+ def _set_returncode(self, wait_retval):
+ ForkProcess._set_returncode(self, wait_retval)
+ # Collect elog messages that might have been
+ # created by the pkg_nofetch phase.
+ # Skip elog messages for prefetch, in order to avoid duplicates.
+ if not self.prefetch and self.returncode != os.EX_OK:
+ msg_lines = []
+ msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
+ if self.logfile is not None:
+ msg += ", Log file:"
+ msg_lines.append(msg)
+ if self.logfile is not None:
+ msg_lines.append(" '%s'" % (self.logfile,))
+ self._eerror(msg_lines)
diff --git a/usr/lib/portage/pym/_emerge/EbuildFetchonly.py b/usr/lib/portage/pym/_emerge/EbuildFetchonly.py
new file mode 100644
index 0000000..f88ea96
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/EbuildFetchonly.py
@@ -0,0 +1,32 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.elog.messages import eerror
+from portage.util.SlotObject import SlotObject
+
+class EbuildFetchonly(SlotObject):
+
+ __slots__ = ("fetch_all", "pkg", "pretend", "settings")
+
+ def execute(self):
+ settings = self.settings
+ pkg = self.pkg
+ portdb = pkg.root_config.trees["porttree"].dbapi
+ ebuild_path = portdb.findname(pkg.cpv, myrepo=pkg.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % pkg.cpv)
+ settings.setcpv(pkg)
+ debug = settings.get("PORTAGE_DEBUG") == "1"
+
+ rval = portage.doebuild(ebuild_path, "fetch",
+ settings=settings, debug=debug,
+ listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
+ mydbapi=portdb, tree="porttree")
+
+ if rval != os.EX_OK:
+ msg = "Fetch failed for '%s'" % (pkg.cpv,)
+ eerror(msg, phase="unpack", key=pkg.cpv)
+
+ return rval
diff --git a/usr/lib/portage/pym/_emerge/EbuildIpcDaemon.py b/usr/lib/portage/pym/_emerge/EbuildIpcDaemon.py
new file mode 100644
index 0000000..8414d20
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/EbuildIpcDaemon.py
@@ -0,0 +1,133 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import pickle
+from portage import os
+from portage.exception import TryAgain
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage.util import writemsg_level
+from _emerge.FifoIpcDaemon import FifoIpcDaemon
+
+class EbuildIpcDaemon(FifoIpcDaemon):
+ """
+ This class serves as an IPC daemon, which ebuild processes can use
+ to communicate with portage's main python process.
+
+ Here are a few possible uses:
+
+ 1) Robust subshell/subprocess die support. This allows the ebuild
+ environment to reliably die without having to rely on signal IPC.
+
+ 2) Delegation of portageq calls to the main python process, eliminating
+ performance and userpriv permission issues.
+
+ 3) Reliable ebuild termination in cases when the ebuild has accidentally
+ left orphan processes running in the background (as in bug #278895).
+
+ 4) Detect cases in which bash has exited unexpectedly (as in bug #190128).
+ """
+
+ __slots__ = ('commands',)
+
+ def _input_handler(self, fd, event):
+ # Read the whole pickle in a single atomic read() call.
+ data = None
+ if event & self.scheduler.IO_IN:
+ # For maximum portability, use os.read() here since
+ # array.fromfile() and file.read() are both known to
+ # erroneously return an empty string from this
+ # non-blocking fifo stream on FreeBSD (bug #337465).
+ try:
+ data = os.read(fd, self._bufsize)
+ except OSError as e:
+ if e.errno != errno.EAGAIN:
+ raise
+ # Assume that another event will be generated
+ # if there's any relevant data.
+
+ if data:
+
+ try:
+ obj = pickle.loads(data)
+ except SystemExit:
+ raise
+ except Exception:
+ # The pickle module can raise practically
+ # any exception when given corrupt data.
+ pass
+ else:
+
+ self._reopen_input()
+
+ cmd_key = obj[0]
+ cmd_handler = self.commands[cmd_key]
+ reply = cmd_handler(obj)
+ try:
+ self._send_reply(reply)
+ except OSError as e:
+ if e.errno == errno.ENXIO:
+ # This happens if the client side has been killed.
+ pass
+ else:
+ raise
+
+ # Allow the command to execute hooks after its reply
+ # has been sent. This hook is used by the 'exit'
+ # command to kill the ebuild process. For some
+ # reason, the ebuild-ipc helper hangs up the
+ # ebuild process if it is waiting for a reply
+ # when we try to kill the ebuild process.
+ reply_hook = getattr(cmd_handler,
+ 'reply_hook', None)
+ if reply_hook is not None:
+ reply_hook()
+
+ elif event & self.scheduler.IO_HUP:
+ # This can be triggered due to a race condition which happens when
+ # the previous _reopen_input() call occurs before the writer has
+ # closed the pipe (see bug #401919). It's not safe to re-open
+ # without a lock here, since it's possible that another writer will
+ # write something to the pipe just before we close it, and in that
+ # case the write will be lost. Therefore, try for a non-blocking
+ # lock, and only re-open the pipe if the lock is acquired.
+ lock_filename = os.path.join(
+ os.path.dirname(self.input_fifo), '.ipc_lock')
+ try:
+ lock_obj = lockfile(lock_filename, unlinkfile=True,
+ flags=os.O_NONBLOCK)
+ except TryAgain:
+ # We'll try again when another IO_HUP event arrives.
+ pass
+ else:
+ try:
+ self._reopen_input()
+ finally:
+ unlockfile(lock_obj)
+
+ return True
+
+ def _send_reply(self, reply):
+ # File streams are in unbuffered mode since we do atomic
+ # read and write of whole pickles. Use non-blocking mode so
+ # we don't hang if the client is killed before we can send
+ # the reply. We rely on the client opening the other side
+ # of this fifo before it sends its request, since otherwise
+ # we'd have a race condition with this open call raising
+ # ENXIO if the client hasn't opened the fifo yet.
+ try:
+ output_fd = os.open(self.output_fifo,
+ os.O_WRONLY | os.O_NONBLOCK)
+ try:
+ os.write(output_fd, pickle.dumps(reply))
+ finally:
+ os.close(output_fd)
+ except OSError as e:
+ # This probably means that the client has been killed,
+ # which causes open to fail with ENXIO.
+ writemsg_level(
+ "!!! EbuildIpcDaemon %s: %s\n" % \
+ (_('failed to send reply'), e),
+ level=logging.ERROR, noiselevel=-1)
diff --git a/usr/lib/portage/pym/_emerge/EbuildMerge.py b/usr/lib/portage/pym/_emerge/EbuildMerge.py
new file mode 100644
index 0000000..df0778c
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/EbuildMerge.py
@@ -0,0 +1,58 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.CompositeTask import CompositeTask
+from portage import os
+from portage.dbapi._MergeProcess import MergeProcess
+
+class EbuildMerge(CompositeTask):
+
+ __slots__ = ("exit_hook", "find_blockers", "logger", "ldpath_mtimes",
+ "pkg", "pkg_count", "pkg_path", "pretend",
+ "settings", "tree", "world_atom")
+
+ def _start(self):
+ root_config = self.pkg.root_config
+ settings = self.settings
+ mycat = settings["CATEGORY"]
+ mypkg = settings["PF"]
+ pkgloc = settings["D"]
+ infloc = os.path.join(settings["PORTAGE_BUILDDIR"], "build-info")
+ myebuild = settings["EBUILD"]
+ mydbapi = root_config.trees[self.tree].dbapi
+ vartree = root_config.trees["vartree"]
+ background = (settings.get('PORTAGE_BACKGROUND') == '1')
+ logfile = settings.get('PORTAGE_LOG_FILE')
+
+ merge_task = MergeProcess(
+ mycat=mycat, mypkg=mypkg, settings=settings,
+ treetype=self.tree, vartree=vartree, scheduler=self.scheduler,
+ background=background, blockers=self.find_blockers, pkgloc=pkgloc,
+ infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
+ prev_mtimes=self.ldpath_mtimes, logfile=logfile)
+
+ self._start_task(merge_task, self._merge_exit)
+
+ def _merge_exit(self, merge_task):
+ if self._final_exit(merge_task) != os.EX_OK:
+ self.exit_hook(self)
+ self.wait()
+ return
+
+ pkg = self.pkg
+ self.world_atom(pkg)
+ pkg_count = self.pkg_count
+ pkg_path = self.pkg_path
+ logger = self.logger
+ if "noclean" not in self.settings.features:
+ short_msg = "emerge: (%s of %s) %s Clean Post" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log((" === (%s of %s) " + \
+ "Post-Build Cleaning (%s::%s)") % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
+ short_msg=short_msg)
+ logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
+
+ self.exit_hook(self)
+ self.wait()
diff --git a/usr/lib/portage/pym/_emerge/EbuildMetadataPhase.py b/usr/lib/portage/pym/_emerge/EbuildMetadataPhase.py
new file mode 100644
index 0000000..bbb1ca9
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/EbuildMetadataPhase.py
@@ -0,0 +1,221 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SubProcess import SubProcess
+import sys
+from portage.cache.mappings import slot_dict_class
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild._metadata_invalid:eapi_invalid',
+)
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.dep import extract_unpack_dependencies
+from portage.eapi import eapi_has_automatic_unpack_dependencies
+
+import errno
+import fcntl
+import io
+
+class EbuildMetadataPhase(SubProcess):
+
+ """
+ Asynchronous interface for the ebuild "depend" phase which is
+ used to extract metadata from the ebuild.
+ """
+
+ __slots__ = ("cpv", "eapi_supported", "ebuild_hash", "fd_pipes",
+ "metadata", "portdb", "repo_path", "settings", "write_auxdb") + \
+ ("_eapi", "_eapi_lineno", "_raw_metadata",)
+
+ _file_names = ("ebuild",)
+ _files_dict = slot_dict_class(_file_names, prefix="")
+
+ def _start(self):
+ ebuild_path = self.ebuild_hash.location
+
+ with io.open(_unicode_encode(ebuild_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ self._eapi, self._eapi_lineno = portage._parse_eapi_ebuild_head(f)
+
+ parsed_eapi = self._eapi
+ if parsed_eapi is None:
+ parsed_eapi = "0"
+
+ if not parsed_eapi:
+ # An empty EAPI setting is invalid.
+ self._eapi_invalid(None)
+ self._set_returncode((self.pid, 1 << 8))
+ self._async_wait()
+ return
+
+ self.eapi_supported = portage.eapi_is_supported(parsed_eapi)
+ if not self.eapi_supported:
+ self.metadata = {"EAPI": parsed_eapi}
+ self._set_returncode((self.pid, os.EX_OK << 8))
+ self._async_wait()
+ return
+
+ settings = self.settings
+ settings.setcpv(self.cpv)
+ settings.configdict['pkg']['EAPI'] = parsed_eapi
+
+ debug = settings.get("PORTAGE_DEBUG") == "1"
+ master_fd = None
+ slave_fd = None
+ fd_pipes = None
+ if self.fd_pipes is not None:
+ fd_pipes = self.fd_pipes.copy()
+ else:
+ fd_pipes = {}
+
+ null_input = open('/dev/null', 'rb')
+ fd_pipes.setdefault(0, null_input.fileno())
+ fd_pipes.setdefault(1, sys.__stdout__.fileno())
+ fd_pipes.setdefault(2, sys.__stderr__.fileno())
+
+ # flush any pending output
+ stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
+ for fd in fd_pipes.values():
+ if fd in stdout_filenos:
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
+ break
+
+ self._files = self._files_dict()
+ files = self._files
+
+ master_fd, slave_fd = os.pipe()
+
+ fcntl.fcntl(master_fd, fcntl.F_SETFL,
+ fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(master_fd, fcntl.F_SETFD,
+ fcntl.fcntl(master_fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ fd_pipes[slave_fd] = slave_fd
+ settings["PORTAGE_PIPE_FD"] = str(slave_fd)
+
+ self._raw_metadata = []
+ files.ebuild = master_fd
+ self._reg_id = self.scheduler.io_add_watch(files.ebuild,
+ self._registered_events, self._output_handler)
+ self._registered = True
+
+ retval = portage.doebuild(ebuild_path, "depend",
+ settings=settings, debug=debug,
+ mydbapi=self.portdb, tree="porttree",
+ fd_pipes=fd_pipes, returnpid=True)
+ settings.pop("PORTAGE_PIPE_FD", None)
+
+ os.close(slave_fd)
+ null_input.close()
+
+ if isinstance(retval, int):
+ # doebuild failed before spawning
+ self._unregister()
+ self._set_returncode((self.pid, retval << 8))
+ self._async_wait()
+ return
+
+ self.pid = retval[0]
+
+ def _output_handler(self, fd, event):
+
+ if event & self.scheduler.IO_IN:
+ while True:
+ try:
+ self._raw_metadata.append(
+ os.read(self._files.ebuild, self._bufsize))
+ except OSError as e:
+ if e.errno not in (errno.EAGAIN,):
+ raise
+ break
+ else:
+ if not self._raw_metadata[-1]:
+ self._unregister()
+ self.wait()
+ break
+
+ self._unregister_if_appropriate(event)
+
+ return True
+
+ def _set_returncode(self, wait_retval):
+ SubProcess._set_returncode(self, wait_retval)
+ # self._raw_metadata is None when _start returns
+ # early due to an unsupported EAPI
+ if self.returncode == os.EX_OK and \
+ self._raw_metadata is not None:
+ metadata_lines = _unicode_decode(b''.join(self._raw_metadata),
+ encoding=_encodings['repo.content'],
+ errors='replace').splitlines()
+ metadata_valid = True
+ if len(portage.auxdbkeys) != len(metadata_lines):
+ # Don't trust bash's returncode if the
+ # number of lines is incorrect.
+ metadata_valid = False
+ else:
+ metadata = dict(zip(portage.auxdbkeys, metadata_lines))
+ parsed_eapi = self._eapi
+ if parsed_eapi is None:
+ parsed_eapi = "0"
+ self.eapi_supported = \
+ portage.eapi_is_supported(metadata["EAPI"])
+ if (not metadata["EAPI"] or self.eapi_supported) and \
+ metadata["EAPI"] != parsed_eapi:
+ self._eapi_invalid(metadata)
+ metadata_valid = False
+
+ if metadata_valid:
+ # Since we're supposed to be able to efficiently obtain the
+ # EAPI from _parse_eapi_ebuild_head, we don't write cache
+ # entries for unsupported EAPIs.
+ if self.eapi_supported:
+
+ if metadata.get("INHERITED", False):
+ metadata["_eclasses_"] = \
+ self.portdb.repositories.get_repo_for_location(
+ self.repo_path).eclass_db.get_eclass_data(
+ metadata["INHERITED"].split())
+ else:
+ metadata["_eclasses_"] = {}
+ metadata.pop("INHERITED", None)
+
+ if eapi_has_automatic_unpack_dependencies(metadata["EAPI"]):
+ repo = self.portdb.repositories.get_name_for_location(self.repo_path)
+ unpackers = self.settings.unpack_dependencies.get(repo, {}).get(metadata["EAPI"], {})
+ unpack_dependencies = extract_unpack_dependencies(metadata["SRC_URI"], unpackers)
+ if unpack_dependencies:
+ metadata["DEPEND"] += (" " if metadata["DEPEND"] else "") + unpack_dependencies
+
+ # If called by egencache, this cache write is
+ # undesirable when metadata-transfer is disabled.
+ if self.write_auxdb is not False:
+ self.portdb._write_cache(self.cpv,
+ self.repo_path, metadata, self.ebuild_hash)
+ else:
+ metadata = {"EAPI": metadata["EAPI"]}
+ self.metadata = metadata
+ else:
+ self.returncode = 1
+
+ def _eapi_invalid(self, metadata):
+ repo_name = self.portdb.getRepositoryName(self.repo_path)
+ if metadata is not None:
+ eapi_var = metadata["EAPI"]
+ else:
+ eapi_var = None
+ eapi_invalid(self, self.cpv, repo_name, self.settings,
+ eapi_var, self._eapi, self._eapi_lineno)
diff --git a/usr/lib/portage/pym/_emerge/EbuildPhase.py b/usr/lib/portage/pym/_emerge/EbuildPhase.py
new file mode 100644
index 0000000..b1f7c21
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/EbuildPhase.py
@@ -0,0 +1,382 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import gzip
+import io
+import sys
+import tempfile
+
+from _emerge.AsynchronousLock import AsynchronousLock
+from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.EbuildProcess import EbuildProcess
+from _emerge.CompositeTask import CompositeTask
+from portage.package.ebuild.prepare_build_dirs import _prepare_workdir
+from portage.util import writemsg
+
+try:
+ from portage.xml.metadata import MetaDataXML
+except (SystemExit, KeyboardInterrupt):
+ raise
+except (ImportError, SystemError, RuntimeError, Exception):
+ # broken or missing xml support
+ # http://bugs.python.org/issue14988
+ MetaDataXML = None
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.elog:messages@elog_messages',
+ 'portage.package.ebuild.doebuild:_check_build_log,' + \
+ '_post_phase_cmds,_post_phase_userpriv_perms,' + \
+ '_post_src_install_soname_symlinks,' + \
+ '_post_src_install_uid_fix,_postinst_bsdflags,' + \
+ '_post_src_install_write_metadata,' + \
+ '_preinst_bsdflags'
+)
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+
+class EbuildPhase(CompositeTask):
+
+ __slots__ = ("actionmap", "fd_pipes", "phase", "settings") + \
+ ("_ebuild_lock",)
+
+ # FEATURES displayed prior to setup phase
+ _features_display = (
+ "ccache", "compressdebug", "distcc", "distcc-pump", "fakeroot",
+ "installsources", "keeptemp", "keepwork", "nostrip",
+ "preserve-libs", "sandbox", "selinux", "sesandbox",
+ "splitdebug", "suidctl", "test", "userpriv",
+ "usersandbox"
+ )
+
+ # Locked phases
+ _locked_phases = ("setup", "preinst", "postinst", "prerm", "postrm")
+
+ def _start(self):
+
+ need_builddir = self.phase not in EbuildProcess._phases_without_builddir
+
+ if need_builddir:
+ phase_completed_file = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'],
+ ".%sed" % self.phase.rstrip('e'))
+ if not os.path.exists(phase_completed_file):
+ # If the phase is really going to run then we want
+ # to eliminate any stale elog messages that may
+ # exist from a previous run.
+ try:
+ os.unlink(os.path.join(self.settings['T'],
+ 'logging', self.phase))
+ except OSError:
+ pass
+
+ if self.phase in ('nofetch', 'pretend', 'setup'):
+
+ use = self.settings.get('PORTAGE_BUILT_USE')
+ if use is None:
+ use = self.settings['PORTAGE_USE']
+
+ maint_str = ""
+ upstr_str = ""
+ metadata_xml_path = os.path.join(os.path.dirname(self.settings['EBUILD']), "metadata.xml")
+ if MetaDataXML is not None and os.path.isfile(metadata_xml_path):
+ herds_path = os.path.join(self.settings['PORTDIR'],
+ 'metadata/herds.xml')
+ try:
+ metadata_xml = MetaDataXML(metadata_xml_path, herds_path)
+ maint_str = metadata_xml.format_maintainer_string()
+ upstr_str = metadata_xml.format_upstream_string()
+ except SyntaxError:
+ maint_str = "<invalid metadata.xml>"
+
+ msg = []
+ msg.append("Package: %s" % self.settings.mycpv)
+ if self.settings.get('PORTAGE_REPO_NAME'):
+ msg.append("Repository: %s" % self.settings['PORTAGE_REPO_NAME'])
+ if maint_str:
+ msg.append("Maintainer: %s" % maint_str)
+ if upstr_str:
+ msg.append("Upstream: %s" % upstr_str)
+
+ msg.append("USE: %s" % use)
+ relevant_features = []
+ enabled_features = self.settings.features
+ for x in self._features_display:
+ if x in enabled_features:
+ relevant_features.append(x)
+ if relevant_features:
+ msg.append("FEATURES: %s" % " ".join(relevant_features))
+
+ # Force background=True for this header since it's intended
+ # for the log and it doesn't necessarily need to be visible
+ # elsewhere.
+ self._elog('einfo', msg, background=True)
+
+ if self.phase == 'package':
+ if 'PORTAGE_BINPKG_TMPFILE' not in self.settings:
+ self.settings['PORTAGE_BINPKG_TMPFILE'] = \
+ os.path.join(self.settings['PKGDIR'],
+ self.settings['CATEGORY'], self.settings['PF']) + '.tbz2'
+
+ if self.phase in ("pretend", "prerm"):
+ env_extractor = BinpkgEnvExtractor(background=self.background,
+ scheduler=self.scheduler, settings=self.settings)
+ if env_extractor.saved_env_exists():
+ self._start_task(env_extractor, self._env_extractor_exit)
+ return
+ # If the environment.bz2 doesn't exist, then ebuild.sh will
+ # source the ebuild as a fallback.
+
+ self._start_lock()
+
+ def _env_extractor_exit(self, env_extractor):
+ if self._default_exit(env_extractor) != os.EX_OK:
+ self.wait()
+ return
+
+ self._start_lock()
+
+ def _start_lock(self):
+ if (self.phase in self._locked_phases and
+ "ebuild-locks" in self.settings.features):
+ eroot = self.settings["EROOT"]
+ lock_path = os.path.join(eroot, portage.VDB_PATH + "-ebuild")
+ if os.access(os.path.dirname(lock_path), os.W_OK):
+ self._ebuild_lock = AsynchronousLock(path=lock_path,
+ scheduler=self.scheduler)
+ self._start_task(self._ebuild_lock, self._lock_exit)
+ return
+
+ self._start_ebuild()
+
+ def _lock_exit(self, ebuild_lock):
+ if self._default_exit(ebuild_lock) != os.EX_OK:
+ self.wait()
+ return
+ self._start_ebuild()
+
+ def _get_log_path(self):
+ # Don't open the log file during the clean phase since the
+ # open file can result in an nfs lock on $T/build.log which
+ # prevents the clean phase from removing $T.
+ logfile = None
+ if self.phase not in ("clean", "cleanrm") and \
+ self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+ return logfile
+
+ def _start_ebuild(self):
+
+ fd_pipes = self.fd_pipes
+ if fd_pipes is None:
+ if not self.background and self.phase == 'nofetch':
+ # All the pkg_nofetch output goes to stderr since
+ # it's considered to be an error message.
+ fd_pipes = {1 : sys.__stderr__.fileno()}
+
+ ebuild_process = EbuildProcess(actionmap=self.actionmap,
+ background=self.background, fd_pipes=fd_pipes,
+ logfile=self._get_log_path(), phase=self.phase,
+ scheduler=self.scheduler, settings=self.settings)
+
+ self._start_task(ebuild_process, self._ebuild_exit)
+
+ def _ebuild_exit(self, ebuild_process):
+
+ if self._ebuild_lock is not None:
+ self._ebuild_lock.unlock()
+ self._ebuild_lock = None
+
+ fail = False
+ if self._default_exit(ebuild_process) != os.EX_OK:
+ if self.phase == "test" and \
+ "test-fail-continue" in self.settings.features:
+ # mark test phase as complete (bug #452030)
+ try:
+ open(_unicode_encode(os.path.join(
+ self.settings["PORTAGE_BUILDDIR"], ".tested"),
+ encoding=_encodings['fs'], errors='strict'),
+ 'wb').close()
+ except OSError:
+ pass
+ else:
+ fail = True
+
+ if not fail:
+ self.returncode = None
+
+ logfile = self._get_log_path()
+
+ if self.phase == "install":
+ out = io.StringIO()
+ _check_build_log(self.settings, out=out)
+ msg = out.getvalue()
+ self.scheduler.output(msg, log_path=logfile)
+
+ if fail:
+ self._die_hooks()
+ return
+
+ settings = self.settings
+ _post_phase_userpriv_perms(settings)
+
+ if self.phase == "unpack":
+ # Bump WORKDIR timestamp, in case tar gave it a timestamp
+ # that will interfere with distfiles / WORKDIR timestamp
+ # comparisons as reported in bug #332217. Also, fix
+ # ownership since tar can change that too.
+ os.utime(settings["WORKDIR"], None)
+ _prepare_workdir(settings)
+ elif self.phase == "install":
+ out = io.StringIO()
+ _post_src_install_write_metadata(settings)
+ _post_src_install_uid_fix(settings, out)
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=logfile)
+ elif self.phase == "preinst":
+ _preinst_bsdflags(settings)
+ elif self.phase == "postinst":
+ _postinst_bsdflags(settings)
+
+ post_phase_cmds = _post_phase_cmds.get(self.phase)
+ if post_phase_cmds is not None:
+ if logfile is not None and self.phase in ("install",):
+ # Log to a temporary file, since the code we are running
+ # reads PORTAGE_LOG_FILE for QA checks, and we want to
+ # avoid annoying "gzip: unexpected end of file" messages
+ # when FEATURES=compress-build-logs is enabled.
+ fd, logfile = tempfile.mkstemp()
+ os.close(fd)
+ post_phase = MiscFunctionsProcess(background=self.background,
+ commands=post_phase_cmds, fd_pipes=self.fd_pipes,
+ logfile=logfile, phase=self.phase, scheduler=self.scheduler,
+ settings=settings)
+ self._start_task(post_phase, self._post_phase_exit)
+ return
+
+ # this point is not reachable if there was a failure and
+ # we returned for die_hooks above, so returncode must
+ # indicate success (especially if ebuild_process.returncode
+ # is unsuccessful and test-fail-continue came into play)
+ self.returncode = os.EX_OK
+ self._current_task = None
+ self.wait()
+
+ def _post_phase_exit(self, post_phase):
+
+ self._assert_current(post_phase)
+
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+
+ if post_phase.logfile is not None and \
+ post_phase.logfile != log_path:
+ # We were logging to a temp file (see above), so append
+ # temp file to main log and remove temp file.
+ self._append_temp_log(post_phase.logfile, log_path)
+
+ if self._final_exit(post_phase) != os.EX_OK:
+ writemsg("!!! post %s failed; exiting.\n" % self.phase,
+ noiselevel=-1)
+ self._die_hooks()
+ return
+
+ if self.phase == "install":
+ out = io.StringIO()
+ _post_src_install_soname_symlinks(self.settings, out)
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=log_path)
+
+ self._current_task = None
+ self.wait()
+ return
+
+ def _append_temp_log(self, temp_log, log_path):
+
+ temp_file = open(_unicode_encode(temp_log,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+
+ log_file, log_file_real = self._open_log(log_path)
+
+ for line in temp_file:
+ log_file.write(line)
+
+ temp_file.close()
+ log_file.close()
+ if log_file_real is not log_file:
+ log_file_real.close()
+ os.unlink(temp_log)
+
+ def _open_log(self, log_path):
+
+ f = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='ab')
+ f_real = f
+
+ if log_path.endswith('.gz'):
+ f = gzip.GzipFile(filename='', mode='ab', fileobj=f)
+
+ return (f, f_real)
+
+ def _die_hooks(self):
+ self.returncode = None
+ phase = 'die_hooks'
+ die_hooks = MiscFunctionsProcess(background=self.background,
+ commands=[phase], phase=phase, logfile=self._get_log_path(),
+ fd_pipes=self.fd_pipes, scheduler=self.scheduler,
+ settings=self.settings)
+ self._start_task(die_hooks, self._die_hooks_exit)
+
+ def _die_hooks_exit(self, die_hooks):
+ if self.phase != 'clean' and \
+ 'noclean' not in self.settings.features and \
+ 'fail-clean' in self.settings.features:
+ self._default_exit(die_hooks)
+ self._fail_clean()
+ return
+ self._final_exit(die_hooks)
+ self.returncode = 1
+ self.wait()
+
+ def _fail_clean(self):
+ self.returncode = None
+ portage.elog.elog_process(self.settings.mycpv, self.settings)
+ phase = "clean"
+ clean_phase = EbuildPhase(background=self.background,
+ fd_pipes=self.fd_pipes, phase=phase, scheduler=self.scheduler,
+ settings=self.settings)
+ self._start_task(clean_phase, self._fail_clean_exit)
+ return
+
+ def _fail_clean_exit(self, clean_phase):
+ self._final_exit(clean_phase)
+ self.returncode = 1
+ self.wait()
+
+ def _elog(self, elog_funcname, lines, background=None):
+ if background is None:
+ background = self.background
+ out = io.StringIO()
+ phase = self.phase
+ elog_func = getattr(elog_messages, elog_funcname)
+ global_havecolor = portage.output.havecolor
+ try:
+ portage.output.havecolor = \
+ self.settings.get('NOCOLOR', 'false').lower() in ('no', 'false')
+ for line in lines:
+ elog_func(line, phase=phase, key=self.settings.mycpv, out=out)
+ finally:
+ portage.output.havecolor = global_havecolor
+ msg = out.getvalue()
+ if msg:
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ self.scheduler.output(msg, log_path=log_path,
+ background=background)
diff --git a/usr/lib/portage/pym/_emerge/EbuildProcess.py b/usr/lib/portage/pym/_emerge/EbuildProcess.py
new file mode 100644
index 0000000..333ad7b
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/EbuildProcess.py
@@ -0,0 +1,27 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild.doebuild:_doebuild_spawn,_spawn_actionmap'
+)
+
+class EbuildProcess(AbstractEbuildProcess):
+
+ __slots__ = ('actionmap',)
+
+ def _spawn(self, args, **kwargs):
+
+ actionmap = self.actionmap
+ if actionmap is None:
+ actionmap = _spawn_actionmap(self.settings)
+
+ if self._dummy_pipe_fd is not None:
+ self.settings["PORTAGE_PIPE_FD"] = str(self._dummy_pipe_fd)
+
+ try:
+ return _doebuild_spawn(self.phase, self.settings,
+ actionmap=actionmap, **kwargs)
+ finally:
+ self.settings.pop("PORTAGE_PIPE_FD", None)
diff --git a/usr/lib/portage/pym/_emerge/EbuildSpawnProcess.py b/usr/lib/portage/pym/_emerge/EbuildSpawnProcess.py
new file mode 100644
index 0000000..26d26fc
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/EbuildSpawnProcess.py
@@ -0,0 +1,22 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
+
+class EbuildSpawnProcess(AbstractEbuildProcess):
+ """
+ Used by doebuild.spawn() to manage the spawned process.
+ """
+ _spawn_kwarg_names = AbstractEbuildProcess._spawn_kwarg_names + \
+ ('fakeroot_state',)
+
+ __slots__ = ('fakeroot_state', 'spawn_func')
+
+ def _spawn(self, args, **kwargs):
+
+ env = self.settings.environ()
+
+ if self._dummy_pipe_fd is not None:
+ env["PORTAGE_PIPE_FD"] = str(self._dummy_pipe_fd)
+
+ return self.spawn_func(args, env=env, **kwargs)
diff --git a/usr/lib/portage/pym/_emerge/FakeVartree.py b/usr/lib/portage/pym/_emerge/FakeVartree.py
new file mode 100644
index 0000000..254f667
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/FakeVartree.py
@@ -0,0 +1,329 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import sys
+import warnings
+
+import portage
+from portage import os
+from _emerge.Package import Package
+from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
+from portage.const import VDB_PATH
+from portage.dbapi.vartree import vartree
+from portage.dep._slot_operator import find_built_slot_operator_atoms
+from portage.eapi import _get_eapi_attrs
+from portage.exception import InvalidData, InvalidDependString
+from portage.update import grab_updates, parse_updates, update_dbentries
+from portage.versions import _pkg_str
+
+if sys.hexversion >= 0x3000000:
+ long = int
+ _unicode = str
+else:
+ _unicode = unicode
+
+class FakeVardbapi(PackageVirtualDbapi):
+ """
+ Implements the vardbapi.getpath() method which is used in error handling
+ code for the Package class and vartree.get_provide().
+ """
+ def getpath(self, cpv, filename=None):
+ path = os.path.join(self.settings['EROOT'], VDB_PATH, cpv)
+ if filename is not None:
+ path =os.path.join(path, filename)
+ return path
+
+class _DynamicDepsNotApplicable(Exception):
+ pass
+
+class FakeVartree(vartree):
+ """This is implements an in-memory copy of a vartree instance that provides
+ all the interfaces required for use by the depgraph. The vardb is locked
+ during the constructor call just long enough to read a copy of the
+ installed package information. This allows the depgraph to do it's
+ dependency calculations without holding a lock on the vardb. It also
+ allows things like vardb global updates to be done in memory so that the
+ user doesn't necessarily need write access to the vardb in cases where
+ global updates are necessary (updates are performed when necessary if there
+ is not a matching ebuild in the tree). Instances of this class are not
+ populated until the sync() method is called."""
+ def __init__(self, root_config, pkg_cache=None, pkg_root_config=None,
+ dynamic_deps=True, ignore_built_slot_operator_deps=False):
+ self._root_config = root_config
+ self._dynamic_deps = dynamic_deps
+ self._ignore_built_slot_operator_deps = ignore_built_slot_operator_deps
+ if pkg_root_config is None:
+ pkg_root_config = self._root_config
+ self._pkg_root_config = pkg_root_config
+ if pkg_cache is None:
+ pkg_cache = {}
+ real_vartree = root_config.trees["vartree"]
+ self._real_vardb = real_vartree.dbapi
+ portdb = root_config.trees["porttree"].dbapi
+ self.settings = real_vartree.settings
+ mykeys = list(real_vartree.dbapi._aux_cache_keys)
+ if "_mtime_" not in mykeys:
+ mykeys.append("_mtime_")
+ self._db_keys = mykeys
+ self._pkg_cache = pkg_cache
+ self.dbapi = FakeVardbapi(real_vartree.settings)
+ self.dbapi._aux_cache_keys = set(self._db_keys)
+
+ # Initialize variables needed for lazy cache pulls of the live ebuild
+ # metadata. This ensures that the vardb lock is released ASAP, without
+ # being delayed in case cache generation is triggered.
+ self._aux_get = self.dbapi.aux_get
+ self._match = self.dbapi.match
+ if dynamic_deps:
+ self.dbapi.aux_get = self._aux_get_wrapper
+ self.dbapi.match = self._match_wrapper
+ self._aux_get_history = set()
+ self._portdb_keys = Package._dep_keys + ("EAPI", "KEYWORDS")
+ self._portdb = portdb
+ self._global_updates = None
+
+ @property
+ def root(self):
+ warnings.warn("The root attribute of "
+ "_emerge.FakeVartree.FakeVartree"
+ " is deprecated. Use "
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=3)
+ return self.settings['ROOT']
+
+ def _match_wrapper(self, cpv, use_cache=1):
+ """
+ Make sure the metadata in Package instances gets updated for any
+ cpv that is returned from a match() call, since the metadata can
+ be accessed directly from the Package instance instead of via
+ aux_get().
+ """
+ matches = self._match(cpv, use_cache=use_cache)
+ for cpv in matches:
+ if cpv in self._aux_get_history:
+ continue
+ self._aux_get_wrapper(cpv, [])
+ return matches
+
+ def _aux_get_wrapper(self, cpv, wants, myrepo=None):
+ if cpv in self._aux_get_history:
+ return self._aux_get(cpv, wants)
+ self._aux_get_history.add(cpv)
+
+ # This raises a KeyError to the caller if appropriate.
+ pkg = self.dbapi._cpv_map[cpv]
+
+ try:
+ live_metadata = dict(zip(self._portdb_keys,
+ self._portdb.aux_get(cpv, self._portdb_keys,
+ myrepo=pkg.repo)))
+ except (KeyError, portage.exception.PortageException):
+ live_metadata = None
+
+ self._apply_dynamic_deps(pkg, live_metadata)
+
+ return self._aux_get(cpv, wants)
+
+ def _apply_dynamic_deps(self, pkg, live_metadata):
+
+ try:
+ if live_metadata is None:
+ raise _DynamicDepsNotApplicable()
+ # Use the metadata from the installed instance if the EAPI
+ # of either instance is unsupported, since if the installed
+ # instance has an unsupported or corrupt EAPI then we don't
+ # want to attempt to do complex operations such as execute
+ # pkg_config, pkg_prerm or pkg_postrm phases. If both EAPIs
+ # are supported then go ahead and use the live_metadata, in
+ # order to respect dep updates without revision bump or EAPI
+ # bump, as in bug #368725.
+ if not (portage.eapi_is_supported(live_metadata["EAPI"]) and \
+ portage.eapi_is_supported(pkg.eapi)):
+ raise _DynamicDepsNotApplicable()
+
+ # preserve built slot/sub-slot := operator deps
+ built_slot_operator_atoms = None
+ if not self._ignore_built_slot_operator_deps and \
+ _get_eapi_attrs(pkg.eapi).slot_operator:
+ try:
+ built_slot_operator_atoms = \
+ find_built_slot_operator_atoms(pkg)
+ except InvalidDependString:
+ pass
+
+ if built_slot_operator_atoms:
+ live_eapi_attrs = _get_eapi_attrs(live_metadata["EAPI"])
+ if not live_eapi_attrs.slot_operator:
+ raise _DynamicDepsNotApplicable()
+ for k, v in built_slot_operator_atoms.items():
+ live_metadata[k] += (" " +
+ " ".join(_unicode(atom) for atom in v))
+
+ self.dbapi.aux_update(pkg.cpv, live_metadata)
+ except _DynamicDepsNotApplicable:
+ if self._global_updates is None:
+ self._global_updates = \
+ grab_global_updates(self._portdb)
+
+ # Bypass _aux_get_wrapper, since calling that
+ # here would trigger infinite recursion.
+ aux_keys = Package._dep_keys + self.dbapi._pkg_str_aux_keys
+ aux_dict = dict(zip(aux_keys, self._aux_get(pkg.cpv, aux_keys)))
+ perform_global_updates(
+ pkg.cpv, aux_dict, self.dbapi, self._global_updates)
+
+ def dynamic_deps_preload(self, pkg, metadata):
+ if metadata is not None:
+ metadata = dict((k, metadata.get(k, ''))
+ for k in self._portdb_keys)
+ self._apply_dynamic_deps(pkg, metadata)
+ self._aux_get_history.add(pkg.cpv)
+
+ def cpv_discard(self, pkg):
+ """
+ Discard a package from the fake vardb if it exists.
+ """
+ old_pkg = self.dbapi.get(pkg)
+ if old_pkg is not None:
+ self.dbapi.cpv_remove(old_pkg)
+ self._pkg_cache.pop(old_pkg, None)
+ self._aux_get_history.discard(old_pkg.cpv)
+
+ def sync(self, acquire_lock=1):
+ """
+ Call this method to synchronize state with the real vardb
+ after one or more packages may have been installed or
+ uninstalled.
+ """
+ locked = False
+ try:
+ if acquire_lock and os.access(self._real_vardb._dbroot, os.W_OK):
+ self._real_vardb.lock()
+ locked = True
+ self._sync()
+ finally:
+ if locked:
+ self._real_vardb.unlock()
+
+ # Populate the old-style virtuals using the cached values.
+ # Skip the aux_get wrapper here, to avoid unwanted
+ # cache generation.
+ try:
+ self.dbapi.aux_get = self._aux_get
+ self.settings._populate_treeVirtuals_if_needed(self)
+ finally:
+ if self._dynamic_deps:
+ self.dbapi.aux_get = self._aux_get_wrapper
+
+ def _sync(self):
+
+ real_vardb = self._root_config.trees["vartree"].dbapi
+ current_cpv_set = frozenset(real_vardb.cpv_all())
+ pkg_vardb = self.dbapi
+
+ # Remove any packages that have been uninstalled.
+ for pkg in list(pkg_vardb):
+ if pkg.cpv not in current_cpv_set:
+ self.cpv_discard(pkg)
+
+ # Validate counters and timestamps.
+ slot_counters = {}
+ root_config = self._pkg_root_config
+ validation_keys = ["COUNTER", "_mtime_"]
+ for cpv in current_cpv_set:
+
+ pkg_hash_key = Package._gen_hash_key(cpv=cpv,
+ installed=True, root_config=root_config,
+ type_name="installed")
+ pkg = pkg_vardb.get(pkg_hash_key)
+ if pkg is not None:
+ counter, mtime = real_vardb.aux_get(cpv, validation_keys)
+ try:
+ counter = long(counter)
+ except ValueError:
+ counter = 0
+
+ if counter != pkg.counter or \
+ mtime != pkg.mtime:
+ self.cpv_discard(pkg)
+ pkg = None
+
+ if pkg is None:
+ pkg = self._pkg(cpv)
+
+ other_counter = slot_counters.get(pkg.slot_atom)
+ if other_counter is not None:
+ if other_counter > pkg.counter:
+ continue
+
+ slot_counters[pkg.slot_atom] = pkg.counter
+ pkg_vardb.cpv_inject(pkg)
+
+ real_vardb.flush_cache()
+
+ def _pkg(self, cpv):
+ """
+ The RootConfig instance that will become the Package.root_config
+ attribute can be overridden by the FakeVartree pkg_root_config
+ constructory argument, since we want to be consistent with the
+ depgraph._pkg() method which uses a specially optimized
+ RootConfig that has a FakeVartree instead of a real vartree.
+ """
+ pkg = Package(cpv=cpv, built=True, installed=True,
+ metadata=zip(self._db_keys,
+ self._real_vardb.aux_get(cpv, self._db_keys)),
+ root_config=self._pkg_root_config,
+ type_name="installed")
+
+ self._pkg_cache[pkg] = pkg
+ return pkg
+
+def grab_global_updates(portdb):
+ retupdates = {}
+
+ for repo_name in portdb.getRepositories():
+ repo = portdb.getRepositoryPath(repo_name)
+ updpath = os.path.join(repo, "profiles", "updates")
+ if not os.path.isdir(updpath):
+ continue
+
+ try:
+ rawupdates = grab_updates(updpath)
+ except portage.exception.DirectoryNotFound:
+ rawupdates = []
+ upd_commands = []
+ for mykey, mystat, mycontent in rawupdates:
+ commands, errors = parse_updates(mycontent)
+ upd_commands.extend(commands)
+ retupdates[repo_name] = upd_commands
+
+ master_repo = portdb.repositories.mainRepo()
+ if master_repo is not None:
+ master_repo = master_repo.name
+ if master_repo in retupdates:
+ retupdates['DEFAULT'] = retupdates[master_repo]
+
+ return retupdates
+
+def perform_global_updates(mycpv, aux_dict, mydb, myupdates):
+ try:
+ pkg = _pkg_str(mycpv, metadata=aux_dict, settings=mydb.settings)
+ except InvalidData:
+ return
+ aux_dict = dict((k, aux_dict[k]) for k in Package._dep_keys)
+ try:
+ mycommands = myupdates[pkg.repo]
+ except KeyError:
+ try:
+ mycommands = myupdates['DEFAULT']
+ except KeyError:
+ return
+
+ if not mycommands:
+ return
+
+ updates = update_dbentries(mycommands, aux_dict, parent=pkg)
+ if updates:
+ mydb.aux_update(mycpv, updates)
diff --git a/usr/lib/portage/pym/_emerge/FifoIpcDaemon.py b/usr/lib/portage/pym/_emerge/FifoIpcDaemon.py
new file mode 100644
index 0000000..7468de5
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/FifoIpcDaemon.py
@@ -0,0 +1,109 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+try:
+ import fcntl
+except ImportError:
+ # http://bugs.jython.org/issue1074
+ fcntl = None
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+from portage.cache.mappings import slot_dict_class
+
+class FifoIpcDaemon(AbstractPollTask):
+
+ __slots__ = ("input_fifo", "output_fifo",) + \
+ ("_files", "_reg_id",)
+
+ _file_names = ("pipe_in",)
+ _files_dict = slot_dict_class(_file_names, prefix="")
+
+ def _start(self):
+ self._files = self._files_dict()
+
+ # File streams are in unbuffered mode since we do atomic
+ # read and write of whole pickles.
+ self._files.pipe_in = \
+ os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(self._files.pipe_in, fcntl.F_SETFD,
+ fcntl.fcntl(self._files.pipe_in,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_id = self.scheduler.io_add_watch(
+ self._files.pipe_in,
+ self._registered_events, self._input_handler)
+
+ self._registered = True
+
+ def _reopen_input(self):
+ """
+ Re-open the input stream, in order to suppress
+ POLLHUP events (bug #339976).
+ """
+ self.scheduler.source_remove(self._reg_id)
+ os.close(self._files.pipe_in)
+ self._files.pipe_in = \
+ os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(self._files.pipe_in, fcntl.F_SETFD,
+ fcntl.fcntl(self._files.pipe_in,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_id = self.scheduler.io_add_watch(
+ self._files.pipe_in,
+ self._registered_events, self._input_handler)
+
+ def isAlive(self):
+ return self._registered
+
+ def _cancel(self):
+ if self.returncode is None:
+ self.returncode = 1
+ self._unregister()
+ # notify exit listeners
+ self.wait()
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ self._wait_loop()
+ if self.returncode is None:
+ self.returncode = os.EX_OK
+ return self.returncode
+
+ def _input_handler(self, fd, event):
+ raise NotImplementedError(self)
+
+ def _unregister(self):
+ """
+ Unregister from the scheduler and close open files.
+ """
+
+ self._registered = False
+
+ if self._reg_id is not None:
+ self.scheduler.source_remove(self._reg_id)
+ self._reg_id = None
+
+ if self._files is not None:
+ for f in self._files.values():
+ os.close(f)
+ self._files = None
diff --git a/usr/lib/portage/pym/_emerge/JobStatusDisplay.py b/usr/lib/portage/pym/_emerge/JobStatusDisplay.py
new file mode 100644
index 0000000..9f6f09b
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/JobStatusDisplay.py
@@ -0,0 +1,303 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import formatter
+import io
+import sys
+import time
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage.output import xtermTitle
+
+from _emerge.getloadavg import getloadavg
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+class JobStatusDisplay(object):
+
+ _bound_properties = ("curval", "failed", "running")
+
+ # Don't update the display unless at least this much
+ # time has passed, in units of seconds.
+ _min_display_latency = 2
+
+ _default_term_codes = {
+ 'cr' : '\r',
+ 'el' : '\x1b[K',
+ 'nel' : '\n',
+ }
+
+ _termcap_name_map = {
+ 'carriage_return' : 'cr',
+ 'clr_eol' : 'el',
+ 'newline' : 'nel',
+ }
+
+ def __init__(self, quiet=False, xterm_titles=True):
+ object.__setattr__(self, "quiet", quiet)
+ object.__setattr__(self, "xterm_titles", xterm_titles)
+ object.__setattr__(self, "maxval", 0)
+ object.__setattr__(self, "merges", 0)
+ object.__setattr__(self, "_changed", False)
+ object.__setattr__(self, "_displayed", False)
+ object.__setattr__(self, "_last_display_time", 0)
+
+ self.reset()
+
+ isatty = os.environ.get('TERM') != 'dumb' and \
+ hasattr(self.out, 'isatty') and \
+ self.out.isatty()
+ object.__setattr__(self, "_isatty", isatty)
+ if not isatty or not self._init_term():
+ term_codes = {}
+ for k, capname in self._termcap_name_map.items():
+ term_codes[k] = self._default_term_codes[capname]
+ object.__setattr__(self, "_term_codes", term_codes)
+ encoding = sys.getdefaultencoding()
+ for k, v in self._term_codes.items():
+ if not isinstance(v, basestring):
+ self._term_codes[k] = v.decode(encoding, 'replace')
+
+ if self._isatty:
+ width = portage.output.get_term_size()[1]
+ else:
+ width = 80
+ self._set_width(width)
+
+ def _set_width(self, width):
+ if width == getattr(self, 'width', None):
+ return
+ if width <= 0 or width > 80:
+ width = 80
+ object.__setattr__(self, "width", width)
+ object.__setattr__(self, "_jobs_column_width", width - 32)
+
+ @property
+ def out(self):
+ """Use a lazy reference to sys.stdout, in case the API consumer has
+ temporarily overridden stdout."""
+ return sys.stdout
+
+ def _write(self, s):
+ # avoid potential UnicodeEncodeError
+ s = _unicode_encode(s,
+ encoding=_encodings['stdio'], errors='backslashreplace')
+ out = self.out
+ if sys.hexversion >= 0x3000000:
+ out = out.buffer
+ out.write(s)
+ out.flush()
+
+ def _init_term(self):
+ """
+ Initialize term control codes.
+ @rtype: bool
+ @return: True if term codes were successfully initialized,
+ False otherwise.
+ """
+
+ term_type = os.environ.get("TERM", "").strip()
+ if not term_type:
+ return False
+ tigetstr = None
+
+ try:
+ import curses
+ try:
+ curses.setupterm(term_type, self.out.fileno())
+ tigetstr = curses.tigetstr
+ except curses.error:
+ pass
+ except ImportError:
+ pass
+
+ if tigetstr is None:
+ return False
+
+ term_codes = {}
+ for k, capname in self._termcap_name_map.items():
+ # Use _native_string for PyPy compat (bug #470258).
+ code = tigetstr(portage._native_string(capname))
+ if code is None:
+ code = self._default_term_codes[capname]
+ term_codes[k] = code
+ object.__setattr__(self, "_term_codes", term_codes)
+ return True
+
+ def _format_msg(self, msg):
+ return ">>> %s" % msg
+
+ def _erase(self):
+ self._write(
+ self._term_codes['carriage_return'] + \
+ self._term_codes['clr_eol'])
+ self._displayed = False
+
+ def _display(self, line):
+ self._write(line)
+ self._displayed = True
+
+ def _update(self, msg):
+
+ if not self._isatty:
+ self._write(self._format_msg(msg) + self._term_codes['newline'])
+ self._displayed = True
+ return
+
+ if self._displayed:
+ self._erase()
+
+ self._display(self._format_msg(msg))
+
+ def displayMessage(self, msg):
+
+ was_displayed = self._displayed
+
+ if self._isatty and self._displayed:
+ self._erase()
+
+ self._write(self._format_msg(msg) + self._term_codes['newline'])
+ self._displayed = False
+
+ if was_displayed:
+ self._changed = True
+ self.display()
+
+ def reset(self):
+ self.maxval = 0
+ self.merges = 0
+ for name in self._bound_properties:
+ object.__setattr__(self, name, 0)
+
+ if self._displayed:
+ self._write(self._term_codes['newline'])
+ self._displayed = False
+
+ def __setattr__(self, name, value):
+ old_value = getattr(self, name)
+ if value == old_value:
+ return
+ object.__setattr__(self, name, value)
+ if name in self._bound_properties:
+ self._property_change(name, old_value, value)
+
+ def _property_change(self, name, old_value, new_value):
+ self._changed = True
+ self.display()
+
+ def _load_avg_str(self):
+ try:
+ avg = getloadavg()
+ except OSError:
+ return 'unknown'
+
+ max_avg = max(avg)
+
+ if max_avg < 10:
+ digits = 2
+ elif max_avg < 100:
+ digits = 1
+ else:
+ digits = 0
+
+ return ", ".join(("%%.%df" % digits ) % x for x in avg)
+
+ def display(self):
+ """
+ Display status on stdout, but only if something has
+ changed since the last call. This always returns True,
+ for continuous scheduling via timeout_add.
+ """
+
+ if self.quiet:
+ return True
+
+ current_time = time.time()
+ time_delta = current_time - self._last_display_time
+ if self._displayed and \
+ not self._changed:
+ if not self._isatty:
+ return True
+ if time_delta < self._min_display_latency:
+ return True
+
+ self._last_display_time = current_time
+ self._changed = False
+ self._display_status()
+ return True
+
+ def _display_status(self):
+ # Don't use len(self._completed_tasks) here since that also
+ # can include uninstall tasks.
+ curval_str = "%s" % (self.curval,)
+ maxval_str = "%s" % (self.maxval,)
+ running_str = "%s" % (self.running,)
+ failed_str = "%s" % (self.failed,)
+ load_avg_str = self._load_avg_str()
+
+ color_output = io.StringIO()
+ plain_output = io.StringIO()
+ style_file = portage.output.ConsoleStyleFile(color_output)
+ style_file.write_listener = plain_output
+ style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
+ style_writer.style_listener = style_file.new_styles
+ f = formatter.AbstractFormatter(style_writer)
+
+ number_style = "INFORM"
+ f.add_literal_data("Jobs: ")
+ f.push_style(number_style)
+ f.add_literal_data(curval_str)
+ f.pop_style()
+ f.add_literal_data(" of ")
+ f.push_style(number_style)
+ f.add_literal_data(maxval_str)
+ f.pop_style()
+ f.add_literal_data(" complete")
+
+ if self.running:
+ f.add_literal_data(", ")
+ f.push_style(number_style)
+ f.add_literal_data(running_str)
+ f.pop_style()
+ f.add_literal_data(" running")
+
+ if self.failed:
+ f.add_literal_data(", ")
+ f.push_style(number_style)
+ f.add_literal_data(failed_str)
+ f.pop_style()
+ f.add_literal_data(" failed")
+
+ padding = self._jobs_column_width - len(plain_output.getvalue())
+ if padding > 0:
+ f.add_literal_data(padding * " ")
+
+ f.add_literal_data("Load avg: ")
+ f.add_literal_data(load_avg_str)
+
+ # Truncate to fit width, to avoid making the terminal scroll if the
+ # line overflows (happens when the load average is large).
+ plain_output = plain_output.getvalue()
+ if self._isatty and len(plain_output) > self.width:
+ # Use plain_output here since it's easier to truncate
+ # properly than the color output which contains console
+ # color codes.
+ self._update(plain_output[:self.width])
+ else:
+ self._update(color_output.getvalue())
+
+ if self.xterm_titles:
+ # If the HOSTNAME variable is exported, include it
+ # in the xterm title, just like emergelog() does.
+ # See bug #390699.
+ title_str = " ".join(plain_output.split())
+ hostname = os.environ.get("HOSTNAME")
+ if hostname is not None:
+ title_str = "%s: %s" % (hostname, title_str)
+ xtermTitle(title_str)
diff --git a/usr/lib/portage/pym/_emerge/MergeListItem.py b/usr/lib/portage/pym/_emerge/MergeListItem.py
new file mode 100644
index 0000000..938f801
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/MergeListItem.py
@@ -0,0 +1,129 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.dep import _repo_separator
+from portage.output import colorize
+
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.Binpkg import Binpkg
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildBuild import EbuildBuild
+from _emerge.PackageUninstall import PackageUninstall
+
+class MergeListItem(CompositeTask):
+
+ """
+ TODO: For parallel scheduling, everything here needs asynchronous
+ execution support (start, poll, and wait methods).
+ """
+
+ __slots__ = ("args_set",
+ "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
+ "find_blockers", "logger", "mtimedb", "pkg",
+ "pkg_count", "pkg_to_replace", "prefetcher",
+ "settings", "statusMessage", "world_atom") + \
+ ("_install_task",)
+
+ def _start(self):
+
+ pkg = self.pkg
+ build_opts = self.build_opts
+
+ if pkg.installed:
+ # uninstall, executed by self.merge()
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ args_set = self.args_set
+ find_blockers = self.find_blockers
+ logger = self.logger
+ mtimedb = self.mtimedb
+ pkg_count = self.pkg_count
+ scheduler = self.scheduler
+ settings = self.settings
+ world_atom = self.world_atom
+ ldpath_mtimes = mtimedb["ldpath"]
+
+ action_desc = "Emerging"
+ preposition = "for"
+ pkg_color = "PKG_MERGE"
+ if pkg.type_name == "binary":
+ pkg_color = "PKG_BINARY_MERGE"
+ action_desc += " binary"
+
+ if build_opts.fetchonly:
+ action_desc = "Fetching"
+
+ msg = "%s (%s of %s) %s" % \
+ (action_desc,
+ colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
+ colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
+ colorize(pkg_color, pkg.cpv + _repo_separator + pkg.repo))
+
+ if pkg.root_config.settings["ROOT"] != "/":
+ msg += " %s %s" % (preposition, pkg.root)
+
+ if not build_opts.pretend:
+ self.statusMessage(msg)
+ logger.log(" >>> emerge (%s of %s) %s to %s" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
+
+ if pkg.type_name == "ebuild":
+
+ build = EbuildBuild(args_set=args_set,
+ background=self.background,
+ config_pool=self.config_pool,
+ find_blockers=find_blockers,
+ ldpath_mtimes=ldpath_mtimes, logger=logger,
+ opts=build_opts, pkg=pkg, pkg_count=pkg_count,
+ prefetcher=self.prefetcher, scheduler=scheduler,
+ settings=settings, world_atom=world_atom)
+
+ self._install_task = build
+ self._start_task(build, self._default_final_exit)
+ return
+
+ elif pkg.type_name == "binary":
+
+ binpkg = Binpkg(background=self.background,
+ find_blockers=find_blockers,
+ ldpath_mtimes=ldpath_mtimes, logger=logger,
+ opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
+ prefetcher=self.prefetcher, settings=settings,
+ scheduler=scheduler, world_atom=world_atom)
+
+ self._install_task = binpkg
+ self._start_task(binpkg, self._default_final_exit)
+ return
+
+ def create_install_task(self):
+
+ pkg = self.pkg
+ build_opts = self.build_opts
+ mtimedb = self.mtimedb
+ scheduler = self.scheduler
+ settings = self.settings
+ world_atom = self.world_atom
+ ldpath_mtimes = mtimedb["ldpath"]
+
+ if pkg.installed:
+ if not (build_opts.buildpkgonly or \
+ build_opts.fetchonly or build_opts.pretend):
+
+ task = PackageUninstall(background=self.background,
+ ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
+ pkg=pkg, scheduler=scheduler, settings=settings,
+ world_atom=world_atom)
+
+ else:
+ task = AsynchronousTask()
+
+ elif build_opts.fetchonly or \
+ build_opts.buildpkgonly:
+ task = AsynchronousTask()
+ else:
+ task = self._install_task.create_install_task()
+
+ return task
diff --git a/usr/lib/portage/pym/_emerge/MetadataRegen.py b/usr/lib/portage/pym/_emerge/MetadataRegen.py
new file mode 100644
index 0000000..d92b6a0
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/MetadataRegen.py
@@ -0,0 +1,154 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.dep import _repo_separator
+from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
+from portage.cache.cache_errors import CacheError
+from portage.util._async.AsyncScheduler import AsyncScheduler
+
+class MetadataRegen(AsyncScheduler):
+
+ def __init__(self, portdb, cp_iter=None, consumer=None,
+ write_auxdb=True, **kwargs):
+ AsyncScheduler.__init__(self, **kwargs)
+ self._portdb = portdb
+ self._write_auxdb = write_auxdb
+ self._global_cleanse = False
+ if cp_iter is None:
+ cp_iter = self._iter_every_cp()
+ # We can globally cleanse stale cache only if we
+ # iterate over every single cp.
+ self._global_cleanse = True
+ self._cp_iter = cp_iter
+ self._consumer = consumer
+
+ self._valid_pkgs = set()
+ self._cp_set = set()
+ self._process_iter = self._iter_metadata_processes()
+ self._running_tasks = set()
+
+ def _next_task(self):
+ return next(self._process_iter)
+
+ def _iter_every_cp(self):
+ # List categories individually, in order to start yielding quicker,
+ # and in order to reduce latency in case of a signal interrupt.
+ cp_all = self._portdb.cp_all
+ for category in sorted(self._portdb.categories):
+ for cp in cp_all(categories=(category,)):
+ yield cp
+
+ def _iter_metadata_processes(self):
+ portdb = self._portdb
+ valid_pkgs = self._valid_pkgs
+ cp_set = self._cp_set
+ consumer = self._consumer
+
+ portage.writemsg_stdout("Regenerating cache entries...\n")
+ for cp in self._cp_iter:
+ if self._terminated.is_set():
+ break
+ cp_set.add(cp)
+ portage.writemsg_stdout("Processing %s\n" % cp)
+ # We iterate over portdb.porttrees, since it's common to
+ # tweak this attribute in order to adjust repo selection.
+ for mytree in portdb.porttrees:
+ repo = portdb.repositories.get_repo_for_location(mytree)
+ cpv_list = portdb.cp_list(cp, mytree=[repo.location])
+ for cpv in cpv_list:
+ if self._terminated.is_set():
+ break
+ valid_pkgs.add(cpv)
+ ebuild_path, repo_path = portdb.findname2(cpv, myrepo=repo.name)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s%s%s'" % (cpv, _repo_separator, repo.name))
+ metadata, ebuild_hash = portdb._pull_valid_cache(
+ cpv, ebuild_path, repo_path)
+ if metadata is not None:
+ if consumer is not None:
+ consumer(cpv, repo_path, metadata, ebuild_hash, True)
+ continue
+
+ yield EbuildMetadataPhase(cpv=cpv,
+ ebuild_hash=ebuild_hash,
+ portdb=portdb, repo_path=repo_path,
+ settings=portdb.doebuild_settings,
+ write_auxdb=self._write_auxdb)
+
+ def _wait(self):
+
+ AsyncScheduler._wait(self)
+
+ portdb = self._portdb
+ dead_nodes = {}
+
+ self._termination_check()
+ if self._terminated_tasks:
+ portdb.flush_cache()
+ self.returncode = self._cancelled_returncode
+ return self.returncode
+
+ if self._global_cleanse:
+ for mytree in portdb.porttrees:
+ try:
+ dead_nodes[mytree] = set(portdb.auxdb[mytree])
+ except CacheError as e:
+ portage.writemsg("Error listing cache entries for " + \
+ "'%s': %s, continuing...\n" % (mytree, e),
+ noiselevel=-1)
+ del e
+ dead_nodes = None
+ break
+ else:
+ cp_set = self._cp_set
+ cpv_getkey = portage.cpv_getkey
+ for mytree in portdb.porttrees:
+ try:
+ dead_nodes[mytree] = set(cpv for cpv in \
+ portdb.auxdb[mytree] \
+ if cpv_getkey(cpv) in cp_set)
+ except CacheError as e:
+ portage.writemsg("Error listing cache entries for " + \
+ "'%s': %s, continuing...\n" % (mytree, e),
+ noiselevel=-1)
+ del e
+ dead_nodes = None
+ break
+
+ if dead_nodes:
+ for y in self._valid_pkgs:
+ for mytree in portdb.porttrees:
+ if portdb.findname2(y, mytree=mytree)[0]:
+ dead_nodes[mytree].discard(y)
+
+ for mytree, nodes in dead_nodes.items():
+ auxdb = portdb.auxdb[mytree]
+ for y in nodes:
+ try:
+ del auxdb[y]
+ except (KeyError, CacheError):
+ pass
+
+ portdb.flush_cache()
+ return self.returncode
+
+ def _task_exit(self, metadata_process):
+
+ if metadata_process.returncode != os.EX_OK:
+ self._valid_pkgs.discard(metadata_process.cpv)
+ if not self._terminated_tasks:
+ portage.writemsg("Error processing %s, continuing...\n" % \
+ (metadata_process.cpv,), noiselevel=-1)
+
+ if self._consumer is not None:
+ # On failure, still notify the consumer (in this case the metadata
+ # argument is None).
+ self._consumer(metadata_process.cpv,
+ metadata_process.repo_path,
+ metadata_process.metadata,
+ metadata_process.ebuild_hash,
+ metadata_process.eapi_supported)
+
+ AsyncScheduler._task_exit(self, metadata_process)
diff --git a/usr/lib/portage/pym/_emerge/MiscFunctionsProcess.py b/usr/lib/portage/pym/_emerge/MiscFunctionsProcess.py
new file mode 100644
index 0000000..b7f5892
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/MiscFunctionsProcess.py
@@ -0,0 +1,48 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild.doebuild:spawn'
+)
+from portage import os
+
+class MiscFunctionsProcess(AbstractEbuildProcess):
+ """
+ Spawns misc-functions.sh with an existing ebuild environment.
+ """
+
+ __slots__ = ('commands',)
+
+ def _start(self):
+ settings = self.settings
+ portage_bin_path = settings["PORTAGE_BIN_PATH"]
+ misc_sh_binary = os.path.join(portage_bin_path,
+ os.path.basename(portage.const.MISC_SH_BINARY))
+
+ self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
+ if self.logfile is None and \
+ self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ self.logfile = settings.get("PORTAGE_LOG_FILE")
+
+ AbstractEbuildProcess._start(self)
+
+ def _spawn(self, args, **kwargs):
+
+ if self._dummy_pipe_fd is not None:
+ self.settings["PORTAGE_PIPE_FD"] = str(self._dummy_pipe_fd)
+
+ if "fakeroot" in self.settings.features:
+ kwargs["fakeroot"] = True
+
+ # Temporarily unset EBUILD_PHASE so that bashrc code doesn't
+ # think this is a real phase.
+ phase_backup = self.settings.pop("EBUILD_PHASE", None)
+ try:
+ return spawn(" ".join(args), self.settings,
+ **portage._native_kwargs(kwargs))
+ finally:
+ if phase_backup is not None:
+ self.settings["EBUILD_PHASE"] = phase_backup
+ self.settings.pop("PORTAGE_PIPE_FD", None)
diff --git a/usr/lib/portage/pym/_emerge/Package.py b/usr/lib/portage/pym/_emerge/Package.py
new file mode 100644
index 0000000..bdf3b23
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/Package.py
@@ -0,0 +1,857 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import sys
+from itertools import chain
+import warnings
+
+import portage
+from portage import _encodings, _unicode_decode, _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.const import EBUILD_PHASES
+from portage.dep import Atom, check_required_use, use_reduce, \
+ paren_enclose, _slot_separator, _repo_separator
+from portage.versions import _pkg_str, _unknown_repo
+from portage.eapi import _get_eapi_attrs, eapi_has_use_aliases
+from portage.exception import InvalidDependString
+from portage.localization import _
+from _emerge.Task import Task
+from portage.const import EPREFIX
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+ _unicode = str
+else:
+ _unicode = unicode
+
+class Package(Task):
+
+ __hash__ = Task.__hash__
+ __slots__ = ("built", "cpv", "depth",
+ "installed", "onlydeps", "operation",
+ "root_config", "type_name",
+ "category", "counter", "cp", "cpv_split",
+ "inherited", "iuse", "mtime",
+ "pf", "root", "slot", "sub_slot", "slot_atom", "version") + \
+ ("_invalid", "_masks", "_metadata", "_raw_metadata", "_use",
+ "_validated_atoms", "_visible")
+
+ metadata_keys = [
+ "BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "EAPI",
+ "HDEPEND", "INHERITED", "IUSE", "KEYWORDS",
+ "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
+ "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE",
+ "_mtime_", "DEFINED_PHASES", "REQUIRED_USE", "EPREFIX"]
+
+ _dep_keys = ('DEPEND', 'HDEPEND', 'PDEPEND', 'RDEPEND')
+ _buildtime_keys = ('DEPEND', 'HDEPEND')
+ _runtime_keys = ('PDEPEND', 'RDEPEND')
+ _use_conditional_misc_keys = ('LICENSE', 'PROPERTIES', 'RESTRICT')
+ UNKNOWN_REPO = _unknown_repo
+
+ def __init__(self, **kwargs):
+ metadata = _PackageMetadataWrapperBase(kwargs.pop('metadata'))
+ Task.__init__(self, **kwargs)
+ # the SlotObject constructor assigns self.root_config from keyword args
+ # and is an instance of a '_emerge.RootConfig.RootConfig class
+ self.root = self.root_config.root
+ self._raw_metadata = metadata
+ self._metadata = _PackageMetadataWrapper(self, metadata)
+ if not self.built:
+ self._metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
+ eapi_attrs = _get_eapi_attrs(self.eapi)
+ self.cpv = _pkg_str(self.cpv, metadata=self._metadata,
+ settings=self.root_config.settings)
+ if hasattr(self.cpv, 'slot_invalid'):
+ self._invalid_metadata('SLOT.invalid',
+ "SLOT: invalid value: '%s'" % self._metadata["SLOT"])
+ self.cpv_split = self.cpv.cpv_split
+ self.category, self.pf = portage.catsplit(self.cpv)
+ self.cp = self.cpv.cp
+ self.version = self.cpv.version
+ self.slot = self.cpv.slot
+ self.sub_slot = self.cpv.sub_slot
+ self.slot_atom = Atom("%s%s%s" % (self.cp, _slot_separator, self.slot))
+ # sync metadata with validated repo (may be UNKNOWN_REPO)
+ self._metadata['repository'] = self.cpv.repo
+
+ if eapi_attrs.iuse_effective:
+ implicit_match = self.root_config.settings._iuse_effective_match
+ else:
+ implicit_match = self.root_config.settings._iuse_implicit_match
+ usealiases = self.root_config.settings._use_manager.getUseAliases(self)
+ self.iuse = self._iuse(self, self._metadata["IUSE"].split(), implicit_match,
+ usealiases, self.eapi)
+
+ if (self.iuse.enabled or self.iuse.disabled) and \
+ not eapi_attrs.iuse_defaults:
+ if not self.installed:
+ self._invalid_metadata('EAPI.incompatible',
+ "IUSE contains defaults, but EAPI doesn't allow them")
+ if self.inherited is None:
+ self.inherited = frozenset()
+
+ if self.operation is None:
+ if self.onlydeps or self.installed:
+ self.operation = "nomerge"
+ else:
+ self.operation = "merge"
+
+ self._hash_key = Package._gen_hash_key(cpv=self.cpv,
+ installed=self.installed, onlydeps=self.onlydeps,
+ operation=self.operation, repo_name=self.cpv.repo,
+ root_config=self.root_config,
+ type_name=self.type_name)
+ self._hash_value = hash(self._hash_key)
+
+ @property
+ def eapi(self):
+ return self._metadata["EAPI"]
+
+ @property
+ def build_time(self):
+ if not self.built:
+ raise AttributeError('build_time')
+ try:
+ return long(self._metadata['BUILD_TIME'])
+ except (KeyError, ValueError):
+ return 0
+
+ @property
+ def defined_phases(self):
+ return self._metadata.defined_phases
+
+ @property
+ def properties(self):
+ return self._metadata.properties
+
+ @property
+ def restrict(self):
+ return self._metadata.restrict
+
+ @property
+ def metadata(self):
+ warnings.warn("_emerge.Package.Package.metadata is deprecated",
+ DeprecationWarning, stacklevel=3)
+ return self._metadata
+
+ # These are calculated on-demand, so that they are calculated
+ # after FakeVartree applies its metadata tweaks.
+ @property
+ def invalid(self):
+ if self._invalid is None:
+ self._validate_deps()
+ if self._invalid is None:
+ self._invalid = False
+ return self._invalid
+
+ @property
+ def masks(self):
+ if self._masks is None:
+ self._masks = self._eval_masks()
+ return self._masks
+
+ @property
+ def visible(self):
+ if self._visible is None:
+ self._visible = self._eval_visiblity(self.masks)
+ return self._visible
+
+ @property
+ def validated_atoms(self):
+ """
+ Returns *all* validated atoms from the deps, regardless
+ of USE conditionals, with USE conditionals inside
+ atoms left unevaluated.
+ """
+ if self._validated_atoms is None:
+ self._validate_deps()
+ return self._validated_atoms
+
+ @property
+ def stable(self):
+ return self.cpv.stable
+
+ @classmethod
+ def _gen_hash_key(cls, cpv=None, installed=None, onlydeps=None,
+ operation=None, repo_name=None, root_config=None,
+ type_name=None, **kwargs):
+
+ if operation is None:
+ if installed or onlydeps:
+ operation = "nomerge"
+ else:
+ operation = "merge"
+
+ root = None
+ if root_config is not None:
+ root = root_config.root
+ else:
+ raise TypeError("root_config argument is required")
+
+ # For installed (and binary) packages we don't care for the repo
+ # when it comes to hashing, because there can only be one cpv.
+ # So overwrite the repo_key with type_name.
+ if type_name is None:
+ raise TypeError("type_name argument is required")
+ elif type_name == "ebuild":
+ if repo_name is None:
+ raise AssertionError(
+ "Package._gen_hash_key() " + \
+ "called without 'repo_name' argument")
+ repo_key = repo_name
+ else:
+ # For installed (and binary) packages we don't care for the repo
+ # when it comes to hashing, because there can only be one cpv.
+ # So overwrite the repo_key with type_name.
+ repo_key = type_name
+
+ return (type_name, root, _unicode(cpv), operation, repo_key)
+
+ def _validate_deps(self):
+ """
+ Validate deps. This does not trigger USE calculation since that
+ is expensive for ebuilds and therefore we want to avoid doing
+ it unnecessarily (like for masked packages).
+ """
+ eapi = self.eapi
+ dep_eapi = eapi
+ dep_valid_flag = self.iuse.is_valid_flag
+ if self.installed:
+ # Ignore EAPI.incompatible and conditionals missing
+ # from IUSE for installed packages since these issues
+ # aren't relevant now (re-evaluate when new EAPIs are
+ # deployed).
+ dep_eapi = None
+ dep_valid_flag = None
+
+ validated_atoms = []
+ for k in self._dep_keys:
+ v = self._metadata.get(k)
+ if not v:
+ continue
+ try:
+ atoms = use_reduce(v, eapi=dep_eapi,
+ matchall=True, is_valid_flag=dep_valid_flag,
+ token_class=Atom, flat=True)
+ except InvalidDependString as e:
+ self._metadata_exception(k, e)
+ else:
+ validated_atoms.extend(atoms)
+ if not self.built:
+ for atom in atoms:
+ if not isinstance(atom, Atom):
+ continue
+ if atom.slot_operator_built:
+ e = InvalidDependString(
+ _("Improper context for slot-operator "
+ "\"built\" atom syntax: %s") %
+ (atom.unevaluated_atom,))
+ self._metadata_exception(k, e)
+
+ self._validated_atoms = tuple(set(atom for atom in
+ validated_atoms if isinstance(atom, Atom)))
+
+ k = 'PROVIDE'
+ v = self._metadata.get(k)
+ if v:
+ try:
+ use_reduce(v, eapi=dep_eapi, matchall=True,
+ is_valid_flag=dep_valid_flag, token_class=Atom)
+ except InvalidDependString as e:
+ self._invalid_metadata("PROVIDE.syntax", "%s: %s" % (k, e))
+
+ for k in self._use_conditional_misc_keys:
+ v = self._metadata.get(k)
+ if not v:
+ continue
+ try:
+ use_reduce(v, eapi=dep_eapi, matchall=True,
+ is_valid_flag=dep_valid_flag)
+ except InvalidDependString as e:
+ self._metadata_exception(k, e)
+
+ k = 'REQUIRED_USE'
+ v = self._metadata.get(k)
+ if v and not self.built:
+ if not _get_eapi_attrs(eapi).required_use:
+ self._invalid_metadata('EAPI.incompatible',
+ "REQUIRED_USE set, but EAPI='%s' doesn't allow it" % eapi)
+ else:
+ try:
+ check_required_use(v, (),
+ self.iuse.is_valid_flag, eapi=eapi)
+ except InvalidDependString as e:
+ self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))
+
+ k = 'SRC_URI'
+ v = self._metadata.get(k)
+ if v:
+ try:
+ use_reduce(v, is_src_uri=True, eapi=eapi, matchall=True,
+ is_valid_flag=self.iuse.is_valid_flag)
+ except InvalidDependString as e:
+ if not self.installed:
+ self._metadata_exception(k, e)
+
+ def copy(self):
+ return Package(built=self.built, cpv=self.cpv, depth=self.depth,
+ installed=self.installed, metadata=self._raw_metadata,
+ onlydeps=self.onlydeps, operation=self.operation,
+ root_config=self.root_config, type_name=self.type_name)
+
+ def _eval_masks(self):
+ masks = {}
+ settings = self.root_config.settings
+
+ if self.invalid is not False:
+ masks['invalid'] = self.invalid
+
+ if not settings._accept_chost(self.cpv, self._metadata):
+ masks['CHOST'] = self._metadata['CHOST']
+
+ eapi = self.eapi
+ if not portage.eapi_is_supported(eapi):
+ masks['EAPI.unsupported'] = eapi
+ if portage._eapi_is_deprecated(eapi):
+ masks['EAPI.deprecated'] = eapi
+
+ missing_keywords = settings._getMissingKeywords(
+ self.cpv, self._metadata)
+ if missing_keywords:
+ masks['KEYWORDS'] = missing_keywords
+
+ if self.built and not self.installed:
+ # we can have an old binary which has no EPREFIX information
+ if "EPREFIX" not in self.metadata:
+ masks['EPREFIX.missing'] = ''
+ if len(self.metadata["EPREFIX"].strip()) < len(EPREFIX):
+ masks['EPREFIX.tooshort'] = self.metadata["EPREFIX"].strip()
+
+ try:
+ missing_properties = settings._getMissingProperties(
+ self.cpv, self._metadata)
+ if missing_properties:
+ masks['PROPERTIES'] = missing_properties
+ except InvalidDependString:
+ # already recorded as 'invalid'
+ pass
+
+ try:
+ missing_restricts = settings._getMissingRestrict(
+ self.cpv, self._metadata)
+ if missing_restricts:
+ masks['RESTRICT'] = missing_restricts
+ except InvalidDependString:
+ # already recorded as 'invalid'
+ pass
+
+ mask_atom = settings._getMaskAtom(self.cpv, self._metadata)
+ if mask_atom is not None:
+ masks['package.mask'] = mask_atom
+
+ try:
+ missing_licenses = settings._getMissingLicenses(
+ self.cpv, self._metadata)
+ if missing_licenses:
+ masks['LICENSE'] = missing_licenses
+ except InvalidDependString:
+ # already recorded as 'invalid'
+ pass
+
+ if not masks:
+ masks = False
+
+ return masks
+
+ def _eval_visiblity(self, masks):
+
+ if masks is not False:
+
+ if 'EAPI.unsupported' in masks:
+ return False
+
+ if self.built and not self.installed and ( \
+ 'EPREFIX.missing' in masks or \
+ 'EPREFIX.tooshort' in masks) or \
+ 'invalid' in masks:
+ return False
+
+ if not self.installed and ( \
+ 'CHOST' in masks or \
+ 'EAPI.deprecated' in masks or \
+ 'KEYWORDS' in masks or \
+ 'PROPERTIES' in masks or \
+ 'RESTRICT' in masks):
+ return False
+
+ if 'package.mask' in masks or \
+ 'LICENSE' in masks:
+ return False
+
+ return True
+
+ def get_keyword_mask(self):
+ """returns None, 'missing', or 'unstable'."""
+
+ missing = self.root_config.settings._getRawMissingKeywords(
+ self.cpv, self._metadata)
+
+ if not missing:
+ return None
+
+ if '**' in missing:
+ return 'missing'
+
+ global_accept_keywords = frozenset(
+ self.root_config.settings.get("ACCEPT_KEYWORDS", "").split())
+
+ for keyword in missing:
+ if keyword.lstrip("~") in global_accept_keywords:
+ return 'unstable'
+
+ return 'missing'
+
+ def isHardMasked(self):
+ """returns a bool if the cpv is in the list of
+ expanded pmaskdict[cp] available ebuilds"""
+ pmask = self.root_config.settings._getRawMaskAtom(
+ self.cpv, self._metadata)
+ return pmask is not None
+
+ def _metadata_exception(self, k, e):
+
+ if k.endswith('DEPEND'):
+ qacat = 'dependency.syntax'
+ else:
+ qacat = k + ".syntax"
+
+ # For unicode safety with python-2.x we need to avoid
+ # using the string format operator with a non-unicode
+ # format string, since that will result in the
+ # PortageException.__str__() method being invoked,
+ # followed by unsafe decoding that may result in a
+ # UnicodeDecodeError. Therefore, use unicode_literals
+ # to ensure that format strings are unicode, so that
+ # PortageException.__unicode__() is used when necessary
+ # in python-2.x.
+ if not self.installed:
+ categorized_error = False
+ if e.errors:
+ for error in e.errors:
+ if getattr(error, 'category', None) is None:
+ continue
+ categorized_error = True
+ self._invalid_metadata(error.category,
+ "%s: %s" % (k, error))
+
+ if not categorized_error:
+ self._invalid_metadata(qacat,"%s: %s" % (k, e))
+ else:
+ # For installed packages, show the path of the file
+ # containing the invalid metadata, since the user may
+ # want to fix the deps by hand.
+ vardb = self.root_config.trees['vartree'].dbapi
+ path = vardb.getpath(self.cpv, filename=k)
+ self._invalid_metadata(qacat, "%s: %s in '%s'" % (k, e, path))
+
+ def _invalid_metadata(self, msg_type, msg):
+ if self._invalid is None:
+ self._invalid = {}
+ msgs = self._invalid.get(msg_type)
+ if msgs is None:
+ msgs = []
+ self._invalid[msg_type] = msgs
+ msgs.append(msg)
+
+ def __str__(self):
+ if self.operation == "merge":
+ if self.type_name == "binary":
+ cpv_color = "PKG_BINARY_MERGE"
+ else:
+ cpv_color = "PKG_MERGE"
+ elif self.operation == "uninstall":
+ cpv_color = "PKG_UNINSTALL"
+ else:
+ cpv_color = "PKG_NOMERGE"
+
+ s = "(%s, %s" \
+ % (portage.output.colorize(cpv_color, self.cpv + _slot_separator + \
+ self.slot + "/" + self.sub_slot + _repo_separator + self.repo) , self.type_name)
+
+ if self.type_name == "installed":
+ if self.root_config.settings['ROOT'] != "/":
+ s += " in '%s'" % self.root_config.settings['ROOT']
+ if self.operation == "uninstall":
+ s += " scheduled for uninstall"
+ else:
+ if self.operation == "merge":
+ s += " scheduled for merge"
+ if self.root_config.settings['ROOT'] != "/":
+ s += " to '%s'" % self.root_config.settings['ROOT']
+ s += ")"
+ return s
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
+
+ class _use_class(object):
+
+ __slots__ = ("enabled", "_expand", "_expand_hidden",
+ "_force", "_pkg", "_mask")
+
+ # Share identical frozenset instances when available.
+ _frozensets = {}
+
+ def __init__(self, pkg, enabled_flags):
+ self._pkg = pkg
+ self._expand = None
+ self._expand_hidden = None
+ self._force = None
+ self._mask = None
+ if eapi_has_use_aliases(pkg.eapi):
+ for enabled_flag in enabled_flags:
+ enabled_flags.extend(pkg.iuse.alias_mapping.get(enabled_flag, []))
+ self.enabled = frozenset(enabled_flags)
+ if pkg.built:
+ # Use IUSE to validate USE settings for built packages,
+ # in case the package manager that built this package
+ # failed to do that for some reason (or in case of
+ # data corruption).
+ missing_iuse = pkg.iuse.get_missing_iuse(self.enabled)
+ if missing_iuse:
+ self.enabled = self.enabled.difference(missing_iuse)
+
+ def _init_force_mask(self):
+ pkgsettings = self._pkg._get_pkgsettings()
+ frozensets = self._frozensets
+ s = frozenset(
+ pkgsettings.get("USE_EXPAND", "").lower().split())
+ self._expand = frozensets.setdefault(s, s)
+ s = frozenset(
+ pkgsettings.get("USE_EXPAND_HIDDEN", "").lower().split())
+ self._expand_hidden = frozensets.setdefault(s, s)
+ s = pkgsettings.useforce
+ self._force = frozensets.setdefault(s, s)
+ s = pkgsettings.usemask
+ self._mask = frozensets.setdefault(s, s)
+
+ @property
+ def expand(self):
+ if self._expand is None:
+ self._init_force_mask()
+ return self._expand
+
+ @property
+ def expand_hidden(self):
+ if self._expand_hidden is None:
+ self._init_force_mask()
+ return self._expand_hidden
+
+ @property
+ def force(self):
+ if self._force is None:
+ self._init_force_mask()
+ return self._force
+
+ @property
+ def mask(self):
+ if self._mask is None:
+ self._init_force_mask()
+ return self._mask
+
+ @property
+ def repo(self):
+ return self._metadata['repository']
+
+ @property
+ def repo_priority(self):
+ repo_info = self.root_config.settings.repositories.prepos.get(self.repo)
+ if repo_info is None:
+ return None
+ return repo_info.priority
+
+ @property
+ def use(self):
+ if self._use is None:
+ self._init_use()
+ return self._use
+
+ def _get_pkgsettings(self):
+ pkgsettings = self.root_config.trees[
+ 'porttree'].dbapi.doebuild_settings
+ pkgsettings.setcpv(self)
+ return pkgsettings
+
+ def _init_use(self):
+ if self.built:
+ # Use IUSE to validate USE settings for built packages,
+ # in case the package manager that built this package
+ # failed to do that for some reason (or in case of
+ # data corruption). The enabled flags must be consistent
+ # with implicit IUSE, in order to avoid potential
+ # inconsistencies in USE dep matching (see bug #453400).
+ use_str = self._metadata['USE']
+ is_valid_flag = self.iuse.is_valid_flag
+ enabled_flags = [x for x in use_str.split() if is_valid_flag(x)]
+ use_str = " ".join(enabled_flags)
+ self._use = self._use_class(
+ self, enabled_flags)
+ else:
+ try:
+ use_str = _PackageMetadataWrapperBase.__getitem__(
+ self._metadata, 'USE')
+ except KeyError:
+ use_str = None
+ calculated_use = False
+ if not use_str:
+ use_str = self._get_pkgsettings()["PORTAGE_USE"]
+ calculated_use = True
+ self._use = self._use_class(
+ self, use_str.split())
+ # Initialize these now, since USE access has just triggered
+ # setcpv, and we want to cache the result of the force/mask
+ # calculations that were done.
+ if calculated_use:
+ self._use._init_force_mask()
+
+ _PackageMetadataWrapperBase.__setitem__(
+ self._metadata, 'USE', use_str)
+
+ return use_str
+
+ class _iuse(object):
+
+ __slots__ = ("__weakref__", "_iuse_implicit_match", "_pkg", "alias_mapping",
+ "all", "all_aliases", "enabled", "disabled", "tokens")
+
+ def __init__(self, pkg, tokens, iuse_implicit_match, aliases, eapi):
+ self._pkg = pkg
+ self.tokens = tuple(tokens)
+ self._iuse_implicit_match = iuse_implicit_match
+ enabled = []
+ disabled = []
+ other = []
+ enabled_aliases = []
+ disabled_aliases = []
+ other_aliases = []
+ aliases_supported = eapi_has_use_aliases(eapi)
+ self.alias_mapping = {}
+ for x in tokens:
+ prefix = x[:1]
+ if prefix == "+":
+ enabled.append(x[1:])
+ if aliases_supported:
+ self.alias_mapping[x[1:]] = aliases.get(x[1:], [])
+ enabled_aliases.extend(self.alias_mapping[x[1:]])
+ elif prefix == "-":
+ disabled.append(x[1:])
+ if aliases_supported:
+ self.alias_mapping[x[1:]] = aliases.get(x[1:], [])
+ disabled_aliases.extend(self.alias_mapping[x[1:]])
+ else:
+ other.append(x)
+ if aliases_supported:
+ self.alias_mapping[x] = aliases.get(x, [])
+ other_aliases.extend(self.alias_mapping[x])
+ self.enabled = frozenset(chain(enabled, enabled_aliases))
+ self.disabled = frozenset(chain(disabled, disabled_aliases))
+ self.all = frozenset(chain(enabled, disabled, other))
+ self.all_aliases = frozenset(chain(enabled_aliases, disabled_aliases, other_aliases))
+
+ def is_valid_flag(self, flags):
+ """
+ @return: True if all flags are valid USE values which may
+ be specified in USE dependencies, False otherwise.
+ """
+ if isinstance(flags, basestring):
+ flags = [flags]
+
+ for flag in flags:
+ if not flag in self.all and not flag in self.all_aliases and \
+ not self._iuse_implicit_match(flag):
+ return False
+ return True
+
+ def get_missing_iuse(self, flags):
+ """
+ @return: A list of flags missing from IUSE.
+ """
+ if isinstance(flags, basestring):
+ flags = [flags]
+ missing_iuse = []
+ for flag in flags:
+ if not flag in self.all and not flag in self.all_aliases and \
+ not self._iuse_implicit_match(flag):
+ missing_iuse.append(flag)
+ return missing_iuse
+
+ def get_real_flag(self, flag):
+ """
+ Returns the flag's name within the scope of this package
+ (accounting for aliases), or None if the flag is unknown.
+ """
+ if flag in self.all:
+ return flag
+ elif flag in self.all_aliases:
+ for k, v in self.alias_mapping.items():
+ if flag in v:
+ return k
+
+ if self._iuse_implicit_match(flag):
+ return flag
+
+ return None
+
+ def __len__(self):
+ return 4
+
+ def __iter__(self):
+ """
+ This is used to generate mtimedb resume mergelist entries, so we
+ limit it to 4 items for backward compatibility.
+ """
+ return iter(self._hash_key[:4])
+
+ def __lt__(self, other):
+ if other.cp != self.cp:
+ return False
+ if portage.vercmp(self.version, other.version) < 0:
+ return True
+ return False
+
+ def __le__(self, other):
+ if other.cp != self.cp:
+ return False
+ if portage.vercmp(self.version, other.version) <= 0:
+ return True
+ return False
+
+ def __gt__(self, other):
+ if other.cp != self.cp:
+ return False
+ if portage.vercmp(self.version, other.version) > 0:
+ return True
+ return False
+
+ def __ge__(self, other):
+ if other.cp != self.cp:
+ return False
+ if portage.vercmp(self.version, other.version) >= 0:
+ return True
+ return False
+
+_all_metadata_keys = set(x for x in portage.auxdbkeys \
+ if not x.startswith("UNUSED_"))
+_all_metadata_keys.update(Package.metadata_keys)
+_all_metadata_keys = frozenset(_all_metadata_keys)
+
+_PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
+
+class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
+ """
+ Detect metadata updates and synchronize Package attributes.
+ """
+
+ __slots__ = ("_pkg",)
+ _wrapped_keys = frozenset(
+ ["COUNTER", "INHERITED", "USE", "_mtime_"])
+ _use_conditional_keys = frozenset(
+ ['LICENSE', 'PROPERTIES', 'PROVIDE', 'RESTRICT',])
+
+ def __init__(self, pkg, metadata):
+ _PackageMetadataWrapperBase.__init__(self)
+ self._pkg = pkg
+ if not pkg.built:
+ # USE is lazy, but we want it to show up in self.keys().
+ _PackageMetadataWrapperBase.__setitem__(self, 'USE', '')
+
+ self.update(metadata)
+
+ def __getitem__(self, k):
+ v = _PackageMetadataWrapperBase.__getitem__(self, k)
+ if k in self._use_conditional_keys:
+ if self._pkg.root_config.settings.local_config and '?' in v:
+ try:
+ v = paren_enclose(use_reduce(v, uselist=self._pkg.use.enabled, \
+ is_valid_flag=self._pkg.iuse.is_valid_flag))
+ except InvalidDependString:
+ # This error should already have been registered via
+ # self._pkg._invalid_metadata().
+ pass
+ else:
+ self[k] = v
+
+ elif k == 'USE' and not self._pkg.built:
+ if not v:
+ # This is lazy because it's expensive.
+ v = self._pkg._init_use()
+
+ return v
+
+ def __setitem__(self, k, v):
+ _PackageMetadataWrapperBase.__setitem__(self, k, v)
+ if k in self._wrapped_keys:
+ getattr(self, "_set_" + k.lower())(k, v)
+
+ def _set_inherited(self, k, v):
+ if isinstance(v, basestring):
+ v = frozenset(v.split())
+ self._pkg.inherited = v
+
+ def _set_counter(self, k, v):
+ if isinstance(v, basestring):
+ try:
+ v = long(v.strip())
+ except ValueError:
+ v = 0
+ self._pkg.counter = v
+
+ def _set_use(self, k, v):
+ # Force regeneration of _use attribute
+ self._pkg._use = None
+ # Use raw metadata to restore USE conditional values
+ # to unevaluated state
+ raw_metadata = self._pkg._raw_metadata
+ for x in self._use_conditional_keys:
+ try:
+ self[x] = raw_metadata[x]
+ except KeyError:
+ pass
+
+ def _set__mtime_(self, k, v):
+ if isinstance(v, basestring):
+ try:
+ v = long(v.strip())
+ except ValueError:
+ v = 0
+ self._pkg.mtime = v
+
+ @property
+ def properties(self):
+ return self['PROPERTIES'].split()
+
+ @property
+ def restrict(self):
+ return self['RESTRICT'].split()
+
+ @property
+ def defined_phases(self):
+ """
+ Returns tokens from DEFINED_PHASES metadata if it is defined,
+ otherwise returns a tuple containing all possible phases. This
+ makes it easy to do containment checks to see if it's safe to
+ skip execution of a given phase.
+ """
+ s = self['DEFINED_PHASES']
+ if s:
+ return s.split()
+ return EBUILD_PHASES
diff --git a/usr/lib/portage/pym/_emerge/PackageArg.py b/usr/lib/portage/pym/_emerge/PackageArg.py
new file mode 100644
index 0000000..ebfe4b2
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/PackageArg.py
@@ -0,0 +1,19 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DependencyArg import DependencyArg
+from _emerge.Package import Package
+import portage
+from portage._sets.base import InternalPackageSet
+from portage.dep import _repo_separator
+
+class PackageArg(DependencyArg):
+ def __init__(self, package=None, **kwargs):
+ DependencyArg.__init__(self, **kwargs)
+ self.package = package
+ atom = "=" + package.cpv
+ if package.repo != Package.UNKNOWN_REPO:
+ atom += _repo_separator + package.repo
+ self.atom = portage.dep.Atom(atom, allow_repo=True)
+ self.pset = InternalPackageSet(initial_atoms=(self.atom,),
+ allow_repo=True)
diff --git a/usr/lib/portage/pym/_emerge/PackageMerge.py b/usr/lib/portage/pym/_emerge/PackageMerge.py
new file mode 100644
index 0000000..fa2102f
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/PackageMerge.py
@@ -0,0 +1,44 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.CompositeTask import CompositeTask
+from portage.dep import _repo_separator
+from portage.output import colorize
+class PackageMerge(CompositeTask):
+ __slots__ = ("merge",)
+
+ def _start(self):
+
+ self.scheduler = self.merge.scheduler
+ pkg = self.merge.pkg
+ pkg_count = self.merge.pkg_count
+ pkg_color = "PKG_MERGE"
+ if pkg.type_name == "binary":
+ pkg_color = "PKG_BINARY_MERGE"
+
+ if pkg.installed:
+ action_desc = "Uninstalling"
+ preposition = "from"
+ counter_str = ""
+ else:
+ action_desc = "Installing"
+ preposition = "to"
+ counter_str = "(%s of %s) " % \
+ (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
+ colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
+
+ msg = "%s %s%s" % \
+ (action_desc,
+ counter_str,
+ colorize(pkg_color, pkg.cpv + _repo_separator + pkg.repo))
+
+ if pkg.root_config.settings["ROOT"] != "/":
+ msg += " %s %s" % (preposition, pkg.root)
+
+ if not self.merge.build_opts.fetchonly and \
+ not self.merge.build_opts.pretend and \
+ not self.merge.build_opts.buildpkgonly:
+ self.merge.statusMessage(msg)
+
+ task = self.merge.create_install_task()
+ self._start_task(task, self._default_final_exit)
diff --git a/usr/lib/portage/pym/_emerge/PackageUninstall.py b/usr/lib/portage/pym/_emerge/PackageUninstall.py
new file mode 100644
index 0000000..16c2f74
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/PackageUninstall.py
@@ -0,0 +1,110 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import portage
+from portage import os
+from portage.dbapi._MergeProcess import MergeProcess
+from portage.exception import UnsupportedAPIException
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.emergelog import emergelog
+from _emerge.CompositeTask import CompositeTask
+from _emerge.unmerge import _unmerge_display
+
+class PackageUninstall(CompositeTask):
+ """
+ Uninstall a package asynchronously in a subprocess. When
+ both parallel-install and ebuild-locks FEATURES are enabled,
+ it is essential for the ebuild-locks code to execute in a
+ subprocess, since the portage.locks module does not behave
+ as desired if we try to lock the same file multiple times
+ concurrently from the same process for ebuild-locks phases
+ such as pkg_setup, pkg_prerm, and pkg_postrm.
+ """
+
+ __slots__ = ("world_atom", "ldpath_mtimes", "opts",
+ "pkg", "settings", "_builddir_lock")
+
+ def _start(self):
+
+ vardb = self.pkg.root_config.trees["vartree"].dbapi
+ dbdir = vardb.getpath(self.pkg.cpv)
+ if not os.path.exists(dbdir):
+ # Apparently the package got uninstalled
+ # already, so we can safely return early.
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ self.settings.setcpv(self.pkg)
+ cat, pf = portage.catsplit(self.pkg.cpv)
+ myebuildpath = os.path.join(dbdir, pf + ".ebuild")
+
+ try:
+ portage.doebuild_environment(myebuildpath, "prerm",
+ settings=self.settings, db=vardb)
+ except UnsupportedAPIException:
+ # This is safe to ignore since this function is
+ # guaranteed to set PORTAGE_BUILDDIR even though
+ # it raises UnsupportedAPIException. The error
+ # will be logged when it prevents the pkg_prerm
+ # and pkg_postrm phases from executing.
+ pass
+
+ self._builddir_lock = EbuildBuildDir(
+ scheduler=self.scheduler, settings=self.settings)
+ self._builddir_lock.lock()
+
+ portage.prepare_build_dirs(
+ settings=self.settings, cleanup=True)
+
+ # Output only gets logged if it comes after prepare_build_dirs()
+ # which initializes PORTAGE_LOG_FILE.
+ retval, pkgmap = _unmerge_display(self.pkg.root_config,
+ self.opts, "unmerge", [self.pkg.cpv], clean_delay=0,
+ writemsg_level=self._writemsg_level)
+
+ if retval != os.EX_OK:
+ self._builddir_lock.unlock()
+ self.returncode = retval
+ self._async_wait()
+ return
+
+ self._writemsg_level(">>> Unmerging %s...\n" % (self.pkg.cpv,),
+ noiselevel=-1)
+ self._emergelog("=== Unmerging... (%s)" % (self.pkg.cpv,))
+
+ unmerge_task = MergeProcess(
+ mycat=cat, mypkg=pf, settings=self.settings,
+ treetype="vartree", vartree=self.pkg.root_config.trees["vartree"],
+ scheduler=self.scheduler, background=self.background,
+ mydbapi=self.pkg.root_config.trees["vartree"].dbapi,
+ prev_mtimes=self.ldpath_mtimes,
+ logfile=self.settings.get("PORTAGE_LOG_FILE"), unmerge=True)
+
+ self._start_task(unmerge_task, self._unmerge_exit)
+
+ def _unmerge_exit(self, unmerge_task):
+ if self._final_exit(unmerge_task) != os.EX_OK:
+ self._emergelog(" !!! unmerge FAILURE: %s" % (self.pkg.cpv,))
+ else:
+ self._emergelog(" >>> unmerge success: %s" % (self.pkg.cpv,))
+ self.world_atom(self.pkg)
+ self._builddir_lock.unlock()
+ self.wait()
+
+ def _emergelog(self, msg):
+ emergelog("notitles" not in self.settings.features, msg)
+
+ def _writemsg_level(self, msg, level=0, noiselevel=0):
+
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ background = self.background
+
+ if log_path is None:
+ if not (background and level < logging.WARNING):
+ portage.util.writemsg_level(msg,
+ level=level, noiselevel=noiselevel)
+ else:
+ self.scheduler.output(msg, log_path=log_path,
+ level=level, noiselevel=noiselevel)
diff --git a/usr/lib/portage/pym/_emerge/PackageVirtualDbapi.py b/usr/lib/portage/pym/_emerge/PackageVirtualDbapi.py
new file mode 100644
index 0000000..56a5576
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/PackageVirtualDbapi.py
@@ -0,0 +1,149 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.dbapi import dbapi
+from portage.dbapi.dep_expand import dep_expand
+
+class PackageVirtualDbapi(dbapi):
+ """
+ A dbapi-like interface class that represents the state of the installed
+ package database as new packages are installed, replacing any packages
+ that previously existed in the same slot. The main difference between
+ this class and fakedbapi is that this one uses Package instances
+ internally (passed in via cpv_inject() and cpv_remove() calls).
+ """
+ def __init__(self, settings):
+ dbapi.__init__(self)
+ self.settings = settings
+ self._match_cache = {}
+ self._cp_map = {}
+ self._cpv_map = {}
+
+ def clear(self):
+ """
+ Remove all packages.
+ """
+ if self._cpv_map:
+ self._clear_cache()
+ self._cp_map.clear()
+ self._cpv_map.clear()
+
+ def copy(self):
+ obj = PackageVirtualDbapi(self.settings)
+ obj._match_cache = self._match_cache.copy()
+ obj._cp_map = self._cp_map.copy()
+ for k, v in obj._cp_map.items():
+ obj._cp_map[k] = v[:]
+ obj._cpv_map = self._cpv_map.copy()
+ return obj
+
+ def __bool__(self):
+ return bool(self._cpv_map)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __iter__(self):
+ return iter(self._cpv_map.values())
+
+ def __contains__(self, item):
+ existing = self._cpv_map.get(item.cpv)
+ if existing is not None and \
+ existing == item:
+ return True
+ return False
+
+ def get(self, item, default=None):
+ cpv = getattr(item, "cpv", None)
+ if cpv is None:
+ if len(item) != 5:
+ return default
+ type_name, root, cpv, operation, repo_key = item
+
+ existing = self._cpv_map.get(cpv)
+ if existing is not None and \
+ existing == item:
+ return existing
+ return default
+
+ def match_pkgs(self, atom):
+ return [self._cpv_map[cpv] for cpv in self.match(atom)]
+
+ def _clear_cache(self):
+ if self._categories is not None:
+ self._categories = None
+ if self._match_cache:
+ self._match_cache = {}
+
+ def match(self, origdep, use_cache=1):
+ atom = dep_expand(origdep, mydb=self, settings=self.settings)
+ cache_key = (atom, atom.unevaluated_atom)
+ result = self._match_cache.get(cache_key)
+ if result is not None:
+ return result[:]
+ result = list(self._iter_match(atom, self.cp_list(atom.cp)))
+ self._match_cache[cache_key] = result
+ return result[:]
+
+ def cpv_exists(self, cpv, myrepo=None):
+ return cpv in self._cpv_map
+
+ def cp_list(self, mycp, use_cache=1):
+ # NOTE: Cache can be safely shared with the match cache, since the
+ # match cache uses the result from dep_expand for the cache_key.
+ cache_key = (mycp, mycp)
+ cachelist = self._match_cache.get(cache_key)
+ if cachelist is not None:
+ return cachelist[:]
+ cpv_list = self._cp_map.get(mycp)
+ if cpv_list is None:
+ cpv_list = []
+ else:
+ cpv_list = [pkg.cpv for pkg in cpv_list]
+ self._cpv_sort_ascending(cpv_list)
+ self._match_cache[cache_key] = cpv_list
+ return cpv_list[:]
+
+ def cp_all(self):
+ return list(self._cp_map)
+
+ def cpv_all(self):
+ return list(self._cpv_map)
+
+ def cpv_inject(self, pkg):
+ cp_list = self._cp_map.get(pkg.cp)
+ if cp_list is None:
+ cp_list = []
+ self._cp_map[pkg.cp] = cp_list
+ e_pkg = self._cpv_map.get(pkg.cpv)
+ if e_pkg is not None:
+ if e_pkg == pkg:
+ return
+ self.cpv_remove(e_pkg)
+ for e_pkg in cp_list:
+ if e_pkg.slot_atom == pkg.slot_atom:
+ if e_pkg == pkg:
+ return
+ self.cpv_remove(e_pkg)
+ break
+ cp_list.append(pkg)
+ self._cpv_map[pkg.cpv] = pkg
+ self._clear_cache()
+
+ def cpv_remove(self, pkg):
+ old_pkg = self._cpv_map.get(pkg.cpv)
+ if old_pkg != pkg:
+ raise KeyError(pkg)
+ self._cp_map[pkg.cp].remove(pkg)
+ del self._cpv_map[pkg.cpv]
+ self._clear_cache()
+
+ def aux_get(self, cpv, wants, myrepo=None):
+ metadata = self._cpv_map[cpv]._metadata
+ return [metadata.get(x, "") for x in wants]
+
+ def aux_update(self, cpv, values):
+ self._cpv_map[cpv]._metadata.update(values)
+ self._clear_cache()
+
diff --git a/usr/lib/portage/pym/_emerge/PipeReader.py b/usr/lib/portage/pym/_emerge/PipeReader.py
new file mode 100644
index 0000000..a8392c3
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/PipeReader.py
@@ -0,0 +1,127 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import fcntl
+import sys
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+
+class PipeReader(AbstractPollTask):
+
+ """
+ Reads output from one or more files and saves it in memory,
+ for retrieval via the getvalue() method. This is driven by
+ the scheduler's poll() loop, so it runs entirely within the
+ current process.
+ """
+
+ __slots__ = ("input_files",) + \
+ ("_read_data", "_reg_ids", "_use_array")
+
+ def _start(self):
+ self._reg_ids = set()
+ self._read_data = []
+
+ if self._use_array:
+ output_handler = self._array_output_handler
+ else:
+ output_handler = self._output_handler
+
+ for f in self.input_files.values():
+ fd = isinstance(f, int) and f or f.fileno()
+ fcntl.fcntl(fd, fcntl.F_SETFL,
+ fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(fd, fcntl.F_SETFD,
+ fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_ids.add(self.scheduler.io_add_watch(fd,
+ self._registered_events, output_handler))
+ self._registered = True
+
+ def _cancel(self):
+ self._unregister()
+ if self.returncode is None:
+ self.returncode = self._cancelled_returncode
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ self._wait_loop()
+ self.returncode = os.EX_OK
+ return self.returncode
+
+ def getvalue(self):
+ """Retrieve the entire contents"""
+ return b''.join(self._read_data)
+
+ def close(self):
+ """Free the memory buffer."""
+ self._read_data = None
+
+ def _output_handler(self, fd, event):
+
+ while True:
+ data = self._read_buf(fd, event)
+ if data is None:
+ break
+ if data:
+ self._read_data.append(data)
+ else:
+ self._unregister()
+ self.wait()
+ break
+
+ self._unregister_if_appropriate(event)
+
+ return True
+
+ def _array_output_handler(self, fd, event):
+
+ for f in self.input_files.values():
+ if f.fileno() == fd:
+ break
+
+ while True:
+ data = self._read_array(f, event)
+ if data is None:
+ break
+ if data:
+ self._read_data.append(data)
+ else:
+ self._unregister()
+ self.wait()
+ break
+
+ self._unregister_if_appropriate(event)
+
+ return True
+
+ def _unregister(self):
+ """
+ Unregister from the scheduler and close open files.
+ """
+
+ self._registered = False
+
+ if self._reg_ids is not None:
+ for reg_id in self._reg_ids:
+ self.scheduler.source_remove(reg_id)
+ self._reg_ids = None
+
+ if self.input_files is not None:
+ for f in self.input_files.values():
+ if isinstance(f, int):
+ os.close(f)
+ else:
+ f.close()
+ self.input_files = None
+
diff --git a/usr/lib/portage/pym/_emerge/PollScheduler.py b/usr/lib/portage/pym/_emerge/PollScheduler.py
new file mode 100644
index 0000000..b118ac1
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/PollScheduler.py
@@ -0,0 +1,160 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+import portage
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util._eventloop.global_event_loop import global_event_loop
+
+from _emerge.getloadavg import getloadavg
+
+class PollScheduler(object):
+
+ # max time between loadavg checks (milliseconds)
+ _loadavg_latency = None
+
+ def __init__(self, main=False, event_loop=None):
+ """
+ @param main: If True then use global_event_loop(), otherwise use
+ a local EventLoop instance (default is False, for safe use in
+ a non-main thread)
+ @type main: bool
+ """
+ self._terminated = threading.Event()
+ self._terminated_tasks = False
+ self._max_jobs = 1
+ self._max_load = None
+ self._scheduling = False
+ self._background = False
+ if event_loop is not None:
+ self._event_loop = event_loop
+ elif main:
+ self._event_loop = global_event_loop()
+ else:
+ self._event_loop = (portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
+ self._sched_iface = SchedulerInterface(self._event_loop,
+ is_background=self._is_background)
+
+ def _is_background(self):
+ return self._background
+
+ def terminate(self):
+ """
+ Schedules asynchronous, graceful termination of the scheduler
+ at the earliest opportunity.
+
+ This method is thread-safe (and safe for signal handlers).
+ """
+ self._terminated.set()
+
+ def _termination_check(self):
+ """
+ Calls _terminate_tasks() if appropriate. It's guaranteed not to
+ call it while _schedule_tasks() is being called. The check should
+ be executed for each iteration of the event loop, for response to
+ termination signals at the earliest opportunity. It always returns
+ True, for continuous scheduling via idle_add.
+ """
+ if not self._scheduling and \
+ self._terminated.is_set() and \
+ not self._terminated_tasks:
+ self._scheduling = True
+ try:
+ self._terminated_tasks = True
+ self._terminate_tasks()
+ finally:
+ self._scheduling = False
+ return True
+
+ def _terminate_tasks(self):
+ """
+ Send signals to terminate all tasks. This is called once
+ from _keep_scheduling() or _is_work_scheduled() in the event
+ dispatching thread. It will not be called while the _schedule_tasks()
+ implementation is running, in order to avoid potential
+ interference. All tasks should be cleaned up at the earliest
+ opportunity, but not necessarily before this method returns.
+ Typically, this method will send kill signals and return without
+ waiting for exit status. This allows basic cleanup to occur, such as
+ flushing of buffered output to logs.
+ """
+ raise NotImplementedError()
+
+ def _keep_scheduling(self):
+ """
+ @rtype: bool
+ @return: True if there may be remaining tasks to schedule,
+ False otherwise.
+ """
+ return False
+
+ def _schedule_tasks(self):
+ """
+ This is called from inside the _schedule() method, which
+ guarantees the following:
+
+ 1) It will not be called recursively.
+ 2) _terminate_tasks() will not be called while it is running.
+ 3) The state of the boolean _terminated_tasks variable will
+ not change while it is running.
+
+ Unless this method is used to perform user interface updates,
+ or something like that, the first thing it should do is check
+ the state of _terminated_tasks and if that is True then it
+ should return immediately (since there's no need to
+ schedule anything after _terminate_tasks() has been called).
+ """
+ pass
+
+ def _schedule(self):
+ """
+ Calls _schedule_tasks() and automatically returns early from
+ any recursive calls to this method that the _schedule_tasks()
+ call might trigger. This makes _schedule() safe to call from
+ inside exit listeners. This method always returns True, so that
+ it may be scheduled continuously via EventLoop.timeout_add().
+ """
+ if self._scheduling:
+ return True
+ self._scheduling = True
+ try:
+ self._schedule_tasks()
+ finally:
+ self._scheduling = False
+ return True
+
+ def _is_work_scheduled(self):
+ return bool(self._running_job_count())
+
+ def _running_job_count(self):
+ raise NotImplementedError(self)
+
+ def _can_add_job(self):
+ if self._terminated_tasks:
+ return False
+
+ max_jobs = self._max_jobs
+ max_load = self._max_load
+
+ if self._max_jobs is not True and \
+ self._running_job_count() >= self._max_jobs:
+ return False
+
+ if max_load is not None and \
+ (max_jobs is True or max_jobs > 1) and \
+ self._running_job_count() >= 1:
+ try:
+ avg1, avg5, avg15 = getloadavg()
+ except OSError:
+ return False
+
+ if avg1 >= max_load:
+ return False
+
+ return True
diff --git a/usr/lib/portage/pym/_emerge/ProgressHandler.py b/usr/lib/portage/pym/_emerge/ProgressHandler.py
new file mode 100644
index 0000000..f5afe6d
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/ProgressHandler.py
@@ -0,0 +1,22 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import time
+class ProgressHandler(object):
+ def __init__(self):
+ self.curval = 0
+ self.maxval = 0
+ self._last_update = 0
+ self.min_latency = 0.2
+
+ def onProgress(self, maxval, curval):
+ self.maxval = maxval
+ self.curval = curval
+ cur_time = time.time()
+ if cur_time - self._last_update >= self.min_latency:
+ self._last_update = cur_time
+ self.display()
+
+ def display(self):
+ raise NotImplementedError(self)
+
diff --git a/usr/lib/portage/pym/_emerge/RootConfig.py b/usr/lib/portage/pym/_emerge/RootConfig.py
new file mode 100644
index 0000000..3648d01
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/RootConfig.py
@@ -0,0 +1,41 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class RootConfig(object):
+ """This is used internally by depgraph to track information about a
+ particular $ROOT."""
+ __slots__ = ("mtimedb", "root", "setconfig", "sets", "settings", "trees")
+
+ pkg_tree_map = {
+ "ebuild" : "porttree",
+ "binary" : "bintree",
+ "installed" : "vartree"
+ }
+
+ tree_pkg_map = {}
+ for k, v in pkg_tree_map.items():
+ tree_pkg_map[v] = k
+
+ def __init__(self, settings, trees, setconfig):
+ self.trees = trees
+ self.settings = settings
+ self.root = self.settings['EROOT']
+ self.setconfig = setconfig
+ if setconfig is None:
+ self.sets = {}
+ else:
+ self.sets = self.setconfig.getSets()
+
+ def update(self, other):
+ """
+ Shallow copy all attributes from another instance.
+ """
+ for k in self.__slots__:
+ try:
+ setattr(self, k, getattr(other, k))
+ except AttributeError:
+ # mtimedb is currently not a required attribute
+ try:
+ delattr(self, k)
+ except AttributeError:
+ pass
diff --git a/usr/lib/portage/pym/_emerge/Scheduler.py b/usr/lib/portage/pym/_emerge/Scheduler.py
new file mode 100644
index 0000000..d6db311
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/Scheduler.py
@@ -0,0 +1,2007 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division, print_function, unicode_literals
+
+from collections import deque
+import gc
+import gzip
+import logging
+import signal
+import sys
+import textwrap
+import time
+import warnings
+import weakref
+import zlib
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.elog.messages import eerror
+from portage.localization import _
+from portage.output import colorize, create_color_func, red
+bad = create_color_func("BAD")
+from portage._sets import SETPREFIX
+from portage._sets.base import InternalPackageSet
+from portage.util import ensure_dirs, writemsg, writemsg_level
+from portage.util.SlotObject import SlotObject
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.package.ebuild.digestcheck import digestcheck
+from portage.package.ebuild.digestgen import digestgen
+from portage.package.ebuild.doebuild import (_check_temp_dir,
+ _prepare_self_update)
+from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
+
+import _emerge
+from _emerge.BinpkgFetcher import BinpkgFetcher
+from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
+from _emerge.BinpkgVerifier import BinpkgVerifier
+from _emerge.Blocker import Blocker
+from _emerge.BlockerDB import BlockerDB
+from _emerge.clear_caches import clear_caches
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.create_world_atom import create_world_atom
+from _emerge.DepPriority import DepPriority
+from _emerge.depgraph import depgraph, resume_depgraph
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildFetcher import EbuildFetcher
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.emergelog import emergelog
+from _emerge.FakeVartree import FakeVartree
+from _emerge.getloadavg import getloadavg
+from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
+from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
+from _emerge.JobStatusDisplay import JobStatusDisplay
+from _emerge.MergeListItem import MergeListItem
+from _emerge.Package import Package
+from _emerge.PackageMerge import PackageMerge
+from _emerge.PollScheduler import PollScheduler
+from _emerge.SequentialTaskQueue import SequentialTaskQueue
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+class Scheduler(PollScheduler):
+
+ # max time between loadavg checks (milliseconds)
+ _loadavg_latency = 30000
+
+ # max time between display status updates (milliseconds)
+ _max_display_latency = 3000
+
+ _opts_ignore_blockers = \
+ frozenset(["--buildpkgonly",
+ "--fetchonly", "--fetch-all-uri",
+ "--nodeps", "--pretend"])
+
+ _opts_no_background = \
+ frozenset(["--pretend",
+ "--fetchonly", "--fetch-all-uri"])
+
+ _opts_no_self_update = frozenset(["--buildpkgonly",
+ "--fetchonly", "--fetch-all-uri", "--pretend"])
+
+ class _iface_class(SchedulerInterface):
+ __slots__ = ("fetch",
+ "scheduleSetup", "scheduleUnpack")
+
+ class _fetch_iface_class(SlotObject):
+ __slots__ = ("log_file", "schedule")
+
+ _task_queues_class = slot_dict_class(
+ ("merge", "jobs", "ebuild_locks", "fetch", "unpack"), prefix="")
+
+ class _build_opts_class(SlotObject):
+ __slots__ = ("buildpkg", "buildpkg_exclude", "buildpkgonly",
+ "fetch_all_uri", "fetchonly", "pretend")
+
+ class _binpkg_opts_class(SlotObject):
+ __slots__ = ("fetchonly", "getbinpkg", "pretend")
+
+ class _pkg_count_class(SlotObject):
+ __slots__ = ("curval", "maxval")
+
+ class _emerge_log_class(SlotObject):
+ __slots__ = ("xterm_titles",)
+
+ def log(self, *pargs, **kwargs):
+ if not self.xterm_titles:
+ # Avoid interference with the scheduler's status display.
+ kwargs.pop("short_msg", None)
+ emergelog(self.xterm_titles, *pargs, **kwargs)
+
+ class _failed_pkg(SlotObject):
+ __slots__ = ("build_dir", "build_log", "pkg", "returncode")
+
+ class _ConfigPool(object):
+ """Interface for a task to temporarily allocate a config
+ instance from a pool. This allows a task to be constructed
+ long before the config instance actually becomes needed, like
+ when prefetchers are constructed for the whole merge list."""
+ __slots__ = ("_root", "_allocate", "_deallocate")
+ def __init__(self, root, allocate, deallocate):
+ self._root = root
+ self._allocate = allocate
+ self._deallocate = deallocate
+ def allocate(self):
+ return self._allocate(self._root)
+ def deallocate(self, settings):
+ self._deallocate(settings)
+
+ class _unknown_internal_error(portage.exception.PortageException):
+ """
+ Used internally to terminate scheduling. The specific reason for
+ the failure should have been dumped to stderr.
+ """
+ def __init__(self, value=""):
+ portage.exception.PortageException.__init__(self, value)
+
+ def __init__(self, settings, trees, mtimedb, myopts,
+ spinner, mergelist=None, favorites=None, graph_config=None):
+ PollScheduler.__init__(self, main=True)
+
+ if mergelist is not None:
+ warnings.warn("The mergelist parameter of the " + \
+ "_emerge.Scheduler constructor is now unused. Use " + \
+ "the graph_config parameter instead.",
+ DeprecationWarning, stacklevel=2)
+
+ self.settings = settings
+ self.target_root = settings["EROOT"]
+ self.trees = trees
+ self.myopts = myopts
+ self._spinner = spinner
+ self._mtimedb = mtimedb
+ self._favorites = favorites
+ self._args_set = InternalPackageSet(favorites, allow_repo=True)
+ self._build_opts = self._build_opts_class()
+
+ for k in self._build_opts.__slots__:
+ setattr(self._build_opts, k, myopts.get("--" + k.replace("_", "-")))
+ self._build_opts.buildpkg_exclude = InternalPackageSet( \
+ initial_atoms=" ".join(myopts.get("--buildpkg-exclude", [])).split(), \
+ allow_wildcard=True, allow_repo=True)
+ if "mirror" in self.settings.features:
+ self._build_opts.fetch_all_uri = True
+
+ self._binpkg_opts = self._binpkg_opts_class()
+ for k in self._binpkg_opts.__slots__:
+ setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
+
+ self.curval = 0
+ self._logger = self._emerge_log_class()
+ self._task_queues = self._task_queues_class()
+ for k in self._task_queues.allowed_keys:
+ setattr(self._task_queues, k,
+ SequentialTaskQueue())
+
+ # Holds merges that will wait to be executed when no builds are
+ # executing. This is useful for system packages since dependencies
+ # on system packages are frequently unspecified. For example, see
+ # bug #256616.
+ self._merge_wait_queue = deque()
+ # Holds merges that have been transfered from the merge_wait_queue to
+ # the actual merge queue. They are removed from this list upon
+ # completion. Other packages can start building only when this list is
+ # empty.
+ self._merge_wait_scheduled = []
+
+ # Holds system packages and their deep runtime dependencies. Before
+ # being merged, these packages go to merge_wait_queue, to be merged
+ # when no other packages are building.
+ self._deep_system_deps = set()
+
+ # Holds packages to merge which will satisfy currently unsatisfied
+ # deep runtime dependencies of system packages. If this is not empty
+ # then no parallel builds will be spawned until it is empty. This
+ # minimizes the possibility that a build will fail due to the system
+ # being in a fragile state. For example, see bug #259954.
+ self._unsatisfied_system_deps = set()
+
+ self._status_display = JobStatusDisplay(
+ xterm_titles=('notitles' not in settings.features))
+ self._max_load = myopts.get("--load-average")
+ max_jobs = myopts.get("--jobs")
+ if max_jobs is None:
+ max_jobs = 1
+ self._set_max_jobs(max_jobs)
+ self._running_root = trees[trees._running_eroot]["root_config"]
+ self.edebug = 0
+ if settings.get("PORTAGE_DEBUG", "") == "1":
+ self.edebug = 1
+ self.pkgsettings = {}
+ self._config_pool = {}
+ for root in self.trees:
+ self._config_pool[root] = []
+
+ self._fetch_log = os.path.join(_emerge.emergelog._emerge_log_dir,
+ 'emerge-fetch.log')
+ fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
+ schedule=self._schedule_fetch)
+ self._sched_iface = self._iface_class(
+ self._event_loop,
+ is_background=self._is_background,
+ fetch=fetch_iface,
+ scheduleSetup=self._schedule_setup,
+ scheduleUnpack=self._schedule_unpack)
+
+ self._prefetchers = weakref.WeakValueDictionary()
+ self._pkg_queue = []
+ self._jobs = 0
+ self._running_tasks = {}
+ self._completed_tasks = set()
+
+ self._failed_pkgs = []
+ self._failed_pkgs_all = []
+ self._failed_pkgs_die_msgs = []
+ self._post_mod_echo_msgs = []
+ self._parallel_fetch = False
+ self._init_graph(graph_config)
+ merge_count = len([x for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"])
+ self._pkg_count = self._pkg_count_class(
+ curval=0, maxval=merge_count)
+ self._status_display.maxval = self._pkg_count.maxval
+
+ # The load average takes some time to respond when new
+ # jobs are added, so we need to limit the rate of adding
+ # new jobs.
+ self._job_delay_max = 5
+ self._previous_job_start_time = None
+ self._job_delay_timeout_id = None
+
+ # The load average takes some time to respond when after
+ # a SIGSTOP/SIGCONT cycle, so delay scheduling for some
+ # time after SIGCONT is received.
+ self._sigcont_delay = 5
+ self._sigcont_time = None
+
+ # This is used to memoize the _choose_pkg() result when
+ # no packages can be chosen until one of the existing
+ # jobs completes.
+ self._choose_pkg_return_early = False
+
+ features = self.settings.features
+ if "parallel-fetch" in features and \
+ not ("--pretend" in self.myopts or \
+ "--fetch-all-uri" in self.myopts or \
+ "--fetchonly" in self.myopts):
+ if "distlocks" not in features:
+ portage.writemsg(red("!!!")+"\n", noiselevel=-1)
+ portage.writemsg(red("!!!")+" parallel-fetching " + \
+ "requires the distlocks feature enabled"+"\n",
+ noiselevel=-1)
+ portage.writemsg(red("!!!")+" you have it disabled, " + \
+ "thus parallel-fetching is being disabled"+"\n",
+ noiselevel=-1)
+ portage.writemsg(red("!!!")+"\n", noiselevel=-1)
+ elif merge_count > 1:
+ self._parallel_fetch = True
+
+ if self._parallel_fetch:
+ # clear out existing fetch log if it exists
+ try:
+ open(self._fetch_log, 'w').close()
+ except EnvironmentError:
+ pass
+
+ self._running_portage = None
+ portage_match = self._running_root.trees["vartree"].dbapi.match(
+ portage.const.PORTAGE_PACKAGE_ATOM)
+ if portage_match:
+ cpv = portage_match.pop()
+ self._running_portage = self._pkg(cpv, "installed",
+ self._running_root, installed=True)
+
+ def _handle_self_update(self):
+
+ if self._opts_no_self_update.intersection(self.myopts):
+ return os.EX_OK
+
+ for x in self._mergelist:
+ if not isinstance(x, Package):
+ continue
+ if x.operation != "merge":
+ continue
+ if x.root != self._running_root.root:
+ continue
+ if not portage.dep.match_from_list(
+ portage.const.PORTAGE_PACKAGE_ATOM, [x]):
+ continue
+ rval = _check_temp_dir(self.settings)
+ if rval != os.EX_OK:
+ return rval
+ _prepare_self_update(self.settings)
+ break
+
+ return os.EX_OK
+
+ def _terminate_tasks(self):
+ self._status_display.quiet = True
+ for task in list(self._running_tasks.values()):
+ task.cancel()
+ for q in self._task_queues.values():
+ q.clear()
+
+ def _init_graph(self, graph_config):
+ """
+ Initialization structures used for dependency calculations
+ involving currently installed packages.
+ """
+ self._set_graph_config(graph_config)
+ self._blocker_db = {}
+ dynamic_deps = self.myopts.get("--dynamic-deps", "y") != "n"
+ ignore_built_slot_operator_deps = self.myopts.get(
+ "--ignore-built-slot-operator-deps", "n") == "y"
+ for root in self.trees:
+ if graph_config is None:
+ fake_vartree = FakeVartree(self.trees[root]["root_config"],
+ pkg_cache=self._pkg_cache, dynamic_deps=dynamic_deps,
+ ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
+ fake_vartree.sync()
+ else:
+ fake_vartree = graph_config.trees[root]['vartree']
+ self._blocker_db[root] = BlockerDB(fake_vartree)
+
+ def _destroy_graph(self):
+ """
+ Use this to free memory at the beginning of _calc_resume_list().
+ After _calc_resume_list(), the _init_graph() method
+ must to be called in order to re-generate the structures that
+ this method destroys.
+ """
+ self._blocker_db = None
+ self._set_graph_config(None)
+ gc.collect()
+
+ def _set_max_jobs(self, max_jobs):
+ self._max_jobs = max_jobs
+ self._task_queues.jobs.max_jobs = max_jobs
+ if "parallel-install" in self.settings.features:
+ self._task_queues.merge.max_jobs = max_jobs
+
+ def _background_mode(self):
+ """
+ Check if background mode is enabled and adjust states as necessary.
+
+ @rtype: bool
+ @return: True if background mode is enabled, False otherwise.
+ """
+ background = (self._max_jobs is True or \
+ self._max_jobs > 1 or "--quiet" in self.myopts \
+ or self.myopts.get("--quiet-build") == "y") and \
+ not bool(self._opts_no_background.intersection(self.myopts))
+
+ if background:
+ interactive_tasks = self._get_interactive_tasks()
+ if interactive_tasks:
+ background = False
+ writemsg_level(">>> Sending package output to stdio due " + \
+ "to interactive package(s):\n",
+ level=logging.INFO, noiselevel=-1)
+ msg = [""]
+ for pkg in interactive_tasks:
+ pkg_str = " " + colorize("INFORM", str(pkg.cpv))
+ if pkg.root_config.settings["ROOT"] != "/":
+ pkg_str += " for " + pkg.root
+ msg.append(pkg_str)
+ msg.append("")
+ writemsg_level("".join("%s\n" % (l,) for l in msg),
+ level=logging.INFO, noiselevel=-1)
+ if self._max_jobs is True or self._max_jobs > 1:
+ self._set_max_jobs(1)
+ writemsg_level(">>> Setting --jobs=1 due " + \
+ "to the above interactive package(s)\n",
+ level=logging.INFO, noiselevel=-1)
+ writemsg_level(">>> In order to temporarily mask " + \
+ "interactive updates, you may\n" + \
+ ">>> specify --accept-properties=-interactive\n",
+ level=logging.INFO, noiselevel=-1)
+ self._status_display.quiet = \
+ not background or \
+ ("--quiet" in self.myopts and \
+ "--verbose" not in self.myopts)
+
+ self._logger.xterm_titles = \
+ "notitles" not in self.settings.features and \
+ self._status_display.quiet
+
+ return background
+
+ def _get_interactive_tasks(self):
+ interactive_tasks = []
+ for task in self._mergelist:
+ if not (isinstance(task, Package) and \
+ task.operation == "merge"):
+ continue
+ if 'interactive' in task.properties:
+ interactive_tasks.append(task)
+ return interactive_tasks
+
+ def _set_graph_config(self, graph_config):
+
+ if graph_config is None:
+ self._graph_config = None
+ self._pkg_cache = {}
+ self._digraph = None
+ self._mergelist = []
+ self._deep_system_deps.clear()
+ return
+
+ self._graph_config = graph_config
+ self._pkg_cache = graph_config.pkg_cache
+ self._digraph = graph_config.graph
+ self._mergelist = graph_config.mergelist
+
+ if "--nodeps" in self.myopts or \
+ (self._max_jobs is not True and self._max_jobs < 2):
+ # save some memory
+ self._digraph = None
+ graph_config.graph = None
+ graph_config.pkg_cache.clear()
+ self._deep_system_deps.clear()
+ for pkg in self._mergelist:
+ self._pkg_cache[pkg] = pkg
+ return
+
+ self._find_system_deps()
+ self._prune_digraph()
+ self._prevent_builddir_collisions()
+ if '--debug' in self.myopts:
+ writemsg("\nscheduler digraph:\n\n", noiselevel=-1)
+ self._digraph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ def _find_system_deps(self):
+ """
+ Find system packages and their deep runtime dependencies. Before being
+ merged, these packages go to merge_wait_queue, to be merged when no
+ other packages are building.
+ NOTE: This can only find deep system deps if the system set has been
+ added to the graph and traversed deeply (the depgraph "complete"
+ parameter will do this, triggered by emerge --complete-graph option).
+ """
+ deep_system_deps = self._deep_system_deps
+ deep_system_deps.clear()
+ deep_system_deps.update(
+ _find_deep_system_runtime_deps(self._digraph))
+ deep_system_deps.difference_update([pkg for pkg in \
+ deep_system_deps if pkg.operation != "merge"])
+
+ def _prune_digraph(self):
+ """
+ Prune any root nodes that are irrelevant.
+ """
+
+ graph = self._digraph
+ completed_tasks = self._completed_tasks
+ removed_nodes = set()
+ while True:
+ for node in graph.root_nodes():
+ if not isinstance(node, Package) or \
+ (node.installed and node.operation == "nomerge") or \
+ node.onlydeps or \
+ node in completed_tasks:
+ removed_nodes.add(node)
+ if removed_nodes:
+ graph.difference_update(removed_nodes)
+ if not removed_nodes:
+ break
+ removed_nodes.clear()
+
+ def _prevent_builddir_collisions(self):
+ """
+ When building stages, sometimes the same exact cpv needs to be merged
+ to both $ROOTs. Add edges to the digraph in order to avoid collisions
+ in the builddir. Currently, normal file locks would be inappropriate
+ for this purpose since emerge holds all of it's build dir locks from
+ the main process.
+ """
+ cpv_map = {}
+ for pkg in self._mergelist:
+ if not isinstance(pkg, Package):
+ # a satisfied blocker
+ continue
+ if pkg.installed:
+ continue
+ if pkg.cpv not in cpv_map:
+ cpv_map[pkg.cpv] = [pkg]
+ continue
+ for earlier_pkg in cpv_map[pkg.cpv]:
+ self._digraph.add(earlier_pkg, pkg,
+ priority=DepPriority(buildtime=True))
+ cpv_map[pkg.cpv].append(pkg)
+
+ class _pkg_failure(portage.exception.PortageException):
+ """
+ An instance of this class is raised by unmerge() when
+ an uninstallation fails.
+ """
+ status = 1
+ def __init__(self, *pargs):
+ portage.exception.PortageException.__init__(self, pargs)
+ if pargs:
+ self.status = pargs[0]
+
+ def _schedule_fetch(self, fetcher):
+ """
+ Schedule a fetcher, in order to control the number of concurrent
+ fetchers. If self._max_jobs is greater than 1 then the fetch
+ queue is bypassed and the fetcher is started immediately,
+ otherwise it is added to the front of the parallel-fetch queue.
+ NOTE: The parallel-fetch queue is currently used to serialize
+ access to the parallel-fetch log, so changes in the log handling
+ would be required before it would be possible to enable
+ concurrent fetching within the parallel-fetch queue.
+ """
+ if self._max_jobs > 1:
+ fetcher.start()
+ else:
+ self._task_queues.fetch.addFront(fetcher)
+
+ def _schedule_setup(self, setup_phase):
+ """
+ Schedule a setup phase on the merge queue, in order to
+ serialize unsandboxed access to the live filesystem.
+ """
+ if self._task_queues.merge.max_jobs > 1 and \
+ "ebuild-locks" in self.settings.features:
+ # Use a separate queue for ebuild-locks when the merge
+ # queue allows more than 1 job (due to parallel-install),
+ # since the portage.locks module does not behave as desired
+ # if we try to lock the same file multiple times
+ # concurrently from the same process.
+ self._task_queues.ebuild_locks.add(setup_phase)
+ else:
+ self._task_queues.merge.add(setup_phase)
+ self._schedule()
+
+ def _schedule_unpack(self, unpack_phase):
+ """
+ Schedule an unpack phase on the unpack queue, in order
+ to serialize $DISTDIR access for live ebuilds.
+ """
+ self._task_queues.unpack.add(unpack_phase)
+
+ def _find_blockers(self, new_pkg):
+ """
+ Returns a callable.
+ """
+ def get_blockers():
+ return self._find_blockers_impl(new_pkg)
+ return get_blockers
+
+ def _find_blockers_impl(self, new_pkg):
+ if self._opts_ignore_blockers.intersection(self.myopts):
+ return None
+
+ blocker_db = self._blocker_db[new_pkg.root]
+
+ blocker_dblinks = []
+ for blocking_pkg in blocker_db.findInstalledBlockers(new_pkg):
+ if new_pkg.slot_atom == blocking_pkg.slot_atom:
+ continue
+ if new_pkg.cpv == blocking_pkg.cpv:
+ continue
+ blocker_dblinks.append(portage.dblink(
+ blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
+ self.pkgsettings[blocking_pkg.root], treetype="vartree",
+ vartree=self.trees[blocking_pkg.root]["vartree"]))
+
+ return blocker_dblinks
+
+ def _generate_digests(self):
+ """
+ Generate digests if necessary for --digests or FEATURES=digest.
+ In order to avoid interference, this must done before parallel
+ tasks are started.
+ """
+
+ if '--fetchonly' in self.myopts:
+ return os.EX_OK
+
+ digest = '--digest' in self.myopts
+ if not digest:
+ for pkgsettings in self.pkgsettings.values():
+ if pkgsettings.mycpv is not None:
+ # ensure that we are using global features
+ # settings rather than those from package.env
+ pkgsettings.reset()
+ if 'digest' in pkgsettings.features:
+ digest = True
+ break
+
+ if not digest:
+ return os.EX_OK
+
+ for x in self._mergelist:
+ if not isinstance(x, Package) or \
+ x.type_name != 'ebuild' or \
+ x.operation != 'merge':
+ continue
+ pkgsettings = self.pkgsettings[x.root]
+ if pkgsettings.mycpv is not None:
+ # ensure that we are using global features
+ # settings rather than those from package.env
+ pkgsettings.reset()
+ if '--digest' not in self.myopts and \
+ 'digest' not in pkgsettings.features:
+ continue
+ portdb = x.root_config.trees['porttree'].dbapi
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ pkgsettings['O'] = os.path.dirname(ebuild_path)
+ if not digestgen(mysettings=pkgsettings, myportdb=portdb):
+ writemsg_level(
+ "!!! Unable to generate manifest for '%s'.\n" \
+ % x.cpv, level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ return os.EX_OK
+
+ def _env_sanity_check(self):
+ """
+ Verify a sane environment before trying to build anything from source.
+ """
+ have_src_pkg = False
+ for x in self._mergelist:
+ if isinstance(x, Package) and not x.built:
+ have_src_pkg = True
+ break
+
+ if not have_src_pkg:
+ return os.EX_OK
+
+ for settings in self.pkgsettings.values():
+ for var in ("ARCH", ):
+ value = settings.get(var)
+ if value and value.strip():
+ continue
+ msg = _("%(var)s is not set... "
+ "Are you missing the '%(configroot)s%(profile_path)s' symlink? "
+ "Is the symlink correct? "
+ "Is your portage tree complete?") % \
+ {"var": var, "configroot": settings["PORTAGE_CONFIGROOT"],
+ "profile_path": portage.const.PROFILE_PATH}
+
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 70):
+ out.eerror(line)
+ return 1
+
+ return os.EX_OK
+
+ def _check_manifests(self):
+ # Verify all the manifests now so that the user is notified of failure
+ # as soon as possible.
+ if "strict" not in self.settings.features or \
+ "--fetchonly" in self.myopts or \
+ "--fetch-all-uri" in self.myopts:
+ return os.EX_OK
+
+ shown_verifying_msg = False
+ quiet_settings = {}
+ for myroot, pkgsettings in self.pkgsettings.items():
+ quiet_config = portage.config(clone=pkgsettings)
+ quiet_config["PORTAGE_QUIET"] = "1"
+ quiet_config.backup_changes("PORTAGE_QUIET")
+ quiet_settings[myroot] = quiet_config
+ del quiet_config
+
+ failures = 0
+
+ for x in self._mergelist:
+ if not isinstance(x, Package) or \
+ x.type_name != "ebuild":
+ continue
+
+ if x.operation == "uninstall":
+ continue
+
+ if not shown_verifying_msg:
+ shown_verifying_msg = True
+ self._status_msg("Verifying ebuild manifests")
+
+ root_config = x.root_config
+ portdb = root_config.trees["porttree"].dbapi
+ quiet_config = quiet_settings[root_config.root]
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ quiet_config["O"] = os.path.dirname(ebuild_path)
+ if not digestcheck([], quiet_config, strict=True):
+ failures |= 1
+
+ if failures:
+ return 1
+ return os.EX_OK
+
+ def _add_prefetchers(self):
+
+ if not self._parallel_fetch:
+ return
+
+ if self._parallel_fetch:
+
+ prefetchers = self._prefetchers
+
+ for pkg in self._mergelist:
+ # mergelist can contain solved Blocker instances
+ if not isinstance(pkg, Package) or pkg.operation == "uninstall":
+ continue
+ prefetcher = self._create_prefetcher(pkg)
+ if prefetcher is not None:
+ # This will start the first prefetcher immediately, so that
+ # self._task() won't discard it. This avoids a case where
+ # the first prefetcher is discarded, causing the second
+ # prefetcher to occupy the fetch queue before the first
+ # fetcher has an opportunity to execute.
+ prefetchers[pkg] = prefetcher
+ self._task_queues.fetch.add(prefetcher)
+
+ def _create_prefetcher(self, pkg):
+ """
+ @return: a prefetcher, or None if not applicable
+ """
+ prefetcher = None
+
+ if not isinstance(pkg, Package):
+ pass
+
+ elif pkg.type_name == "ebuild":
+
+ prefetcher = EbuildFetcher(background=True,
+ config_pool=self._ConfigPool(pkg.root,
+ self._allocate_config, self._deallocate_config),
+ fetchonly=1, fetchall=self._build_opts.fetch_all_uri,
+ logfile=self._fetch_log,
+ pkg=pkg, prefetch=True, scheduler=self._sched_iface)
+
+ elif pkg.type_name == "binary" and \
+ "--getbinpkg" in self.myopts and \
+ pkg.root_config.trees["bintree"].isremote(pkg.cpv):
+
+ prefetcher = BinpkgPrefetcher(background=True,
+ pkg=pkg, scheduler=self._sched_iface)
+
+ return prefetcher
+
+ def _run_pkg_pretend(self):
+ """
+ Since pkg_pretend output may be important, this method sends all
+ output directly to stdout (regardless of options like --quiet or
+ --jobs).
+ """
+
+ failures = 0
+
+ # Use a local EventLoop instance here, since we don't
+ # want tasks here to trigger the usual Scheduler callbacks
+ # that handle job scheduling and status display.
+ sched_iface = SchedulerInterface(EventLoop(main=False))
+
+ for x in self._mergelist:
+ if not isinstance(x, Package):
+ continue
+
+ if x.operation == "uninstall":
+ continue
+
+ if x.eapi in ("0", "1", "2", "3"):
+ continue
+
+ if "pretend" not in x.defined_phases:
+ continue
+
+ out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
+ portage.util.writemsg_stdout(out_str, noiselevel=-1)
+
+ root_config = x.root_config
+ settings = self.pkgsettings[root_config.root]
+ settings.setcpv(x)
+
+ # setcpv/package.env allows for per-package PORTAGE_TMPDIR so we
+ # have to validate it for each package
+ rval = _check_temp_dir(settings)
+ if rval != os.EX_OK:
+ return rval
+
+ build_dir_path = os.path.join(
+ os.path.realpath(settings["PORTAGE_TMPDIR"]),
+ "portage", x.category, x.pf)
+ existing_builddir = os.path.isdir(build_dir_path)
+ settings["PORTAGE_BUILDDIR"] = build_dir_path
+ build_dir = EbuildBuildDir(scheduler=sched_iface,
+ settings=settings)
+ build_dir.lock()
+ current_task = None
+
+ try:
+
+ # Clean up the existing build dir, in case pkg_pretend
+ # checks for available space (bug #390711).
+ if existing_builddir:
+ if x.built:
+ tree = "bintree"
+ infloc = os.path.join(build_dir_path, "build-info")
+ ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
+ else:
+ tree = "porttree"
+ portdb = root_config.trees["porttree"].dbapi
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError(
+ "ebuild not found for '%s'" % x.cpv)
+ portage.package.ebuild.doebuild.doebuild_environment(
+ ebuild_path, "clean", settings=settings,
+ db=self.trees[settings['EROOT']][tree].dbapi)
+ clean_phase = EbuildPhase(background=False,
+ phase='clean', scheduler=sched_iface, settings=settings)
+ current_task = clean_phase
+ clean_phase.start()
+ clean_phase.wait()
+
+ if x.built:
+ tree = "bintree"
+ bintree = root_config.trees["bintree"].dbapi.bintree
+ fetched = False
+
+ # Display fetch on stdout, so that it's always clear what
+ # is consuming time here.
+ if bintree.isremote(x.cpv):
+ fetcher = BinpkgFetcher(pkg=x,
+ scheduler=sched_iface)
+ fetcher.start()
+ if fetcher.wait() != os.EX_OK:
+ failures += 1
+ continue
+ fetched = fetcher.pkg_path
+
+ verifier = BinpkgVerifier(pkg=x,
+ scheduler=sched_iface)
+ current_task = verifier
+ verifier.start()
+ if verifier.wait() != os.EX_OK:
+ failures += 1
+ continue
+
+ if fetched:
+ bintree.inject(x.cpv, filename=fetched)
+ tbz2_file = bintree.getname(x.cpv)
+ infloc = os.path.join(build_dir_path, "build-info")
+ ensure_dirs(infloc)
+ portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)
+ ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
+ settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+ settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
+
+ else:
+ tree = "porttree"
+ portdb = root_config.trees["porttree"].dbapi
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
+ if self._build_opts.buildpkgonly:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
+ else:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "source"
+
+ portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
+ "pretend", settings=settings,
+ db=self.trees[settings['EROOT']][tree].dbapi)
+
+ prepare_build_dirs(root_config.root, settings, cleanup=0)
+
+ vardb = root_config.trees['vartree'].dbapi
+ settings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(match) \
+ for match in vardb.match(x.slot_atom) + \
+ vardb.match('='+x.cpv)))
+ pretend_phase = EbuildPhase(
+ phase="pretend", scheduler=sched_iface,
+ settings=settings)
+
+ current_task = pretend_phase
+ pretend_phase.start()
+ ret = pretend_phase.wait()
+ if ret != os.EX_OK:
+ failures += 1
+ portage.elog.elog_process(x.cpv, settings)
+ finally:
+
+ if current_task is not None:
+ if current_task.isAlive():
+ current_task.cancel()
+ current_task.wait()
+ if current_task.returncode == os.EX_OK:
+ clean_phase = EbuildPhase(background=False,
+ phase='clean', scheduler=sched_iface,
+ settings=settings)
+ clean_phase.start()
+ clean_phase.wait()
+
+ build_dir.unlock()
+
+ if failures:
+ return 1
+ return os.EX_OK
+
+ def merge(self):
+ if "--resume" in self.myopts:
+ # We're resuming.
+ portage.writemsg_stdout(
+ colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
+ self._logger.log(" *** Resuming merge...")
+
+ self._save_resume_list()
+
+ try:
+ self._background = self._background_mode()
+ except self._unknown_internal_error:
+ return 1
+
+ rval = self._handle_self_update()
+ if rval != os.EX_OK:
+ return rval
+
+ for root in self.trees:
+ root_config = self.trees[root]["root_config"]
+
+ # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
+ # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
+ # for ensuring sane $PWD (bug #239560) and storing elog messages.
+ tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
+ if not tmpdir or not os.path.isdir(tmpdir):
+ msg = "The directory specified in your " + \
+ "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
+ "does not exist. Please create this " + \
+ "directory or correct your PORTAGE_TMPDIR setting."
+ msg = textwrap.wrap(msg, 70)
+ out = portage.output.EOutput()
+ for l in msg:
+ out.eerror(l)
+ return 1
+
+ if self._background:
+ root_config.settings.unlock()
+ root_config.settings["PORTAGE_BACKGROUND"] = "1"
+ root_config.settings.backup_changes("PORTAGE_BACKGROUND")
+ root_config.settings.lock()
+
+ self.pkgsettings[root] = portage.config(
+ clone=root_config.settings)
+
+ keep_going = "--keep-going" in self.myopts
+ fetchonly = self._build_opts.fetchonly
+ mtimedb = self._mtimedb
+ failed_pkgs = self._failed_pkgs
+
+ rval = self._generate_digests()
+ if rval != os.EX_OK:
+ return rval
+
+ rval = self._env_sanity_check()
+ if rval != os.EX_OK:
+ return rval
+
+ # TODO: Immediately recalculate deps here if --keep-going
+ # is enabled and corrupt manifests are detected.
+ rval = self._check_manifests()
+ if rval != os.EX_OK and not keep_going:
+ return rval
+
+ if not fetchonly:
+ rval = self._run_pkg_pretend()
+ if rval != os.EX_OK:
+ return rval
+
+ while True:
+
+ received_signal = []
+
+ def sighandler(signum, frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
+ {"signal":signum})
+ self.terminate()
+ received_signal.append(128 + signum)
+
+ earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
+ earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+ earlier_sigcont_handler = \
+ signal.signal(signal.SIGCONT, self._sigcont_handler)
+
+ try:
+ rval = self._merge()
+ finally:
+ # Restore previous handlers
+ if earlier_sigint_handler is not None:
+ signal.signal(signal.SIGINT, earlier_sigint_handler)
+ else:
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ if earlier_sigterm_handler is not None:
+ signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+ else:
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ if earlier_sigcont_handler is not None:
+ signal.signal(signal.SIGCONT, earlier_sigcont_handler)
+ else:
+ signal.signal(signal.SIGCONT, signal.SIG_DFL)
+
+ if received_signal:
+ sys.exit(received_signal[0])
+
+ if rval == os.EX_OK or fetchonly or not keep_going:
+ break
+ if "resume" not in mtimedb:
+ break
+ mergelist = self._mtimedb["resume"].get("mergelist")
+ if not mergelist:
+ break
+
+ if not failed_pkgs:
+ break
+
+ for failed_pkg in failed_pkgs:
+ mergelist.remove(list(failed_pkg.pkg))
+
+ self._failed_pkgs_all.extend(failed_pkgs)
+ del failed_pkgs[:]
+
+ if not mergelist:
+ break
+
+ if not self._calc_resume_list():
+ break
+
+ clear_caches(self.trees)
+ if not self._mergelist:
+ break
+
+ self._save_resume_list()
+ self._pkg_count.curval = 0
+ self._pkg_count.maxval = len([x for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"])
+ self._status_display.maxval = self._pkg_count.maxval
+
+ self._logger.log(" *** Finished. Cleaning up...")
+
+ if failed_pkgs:
+ self._failed_pkgs_all.extend(failed_pkgs)
+ del failed_pkgs[:]
+
+ printer = portage.output.EOutput()
+ background = self._background
+ failure_log_shown = False
+ if background and len(self._failed_pkgs_all) == 1 and \
+ self.myopts.get('--quiet-fail', 'n') != 'y':
+ # If only one package failed then just show it's
+ # whole log for easy viewing.
+ failed_pkg = self._failed_pkgs_all[-1]
+ log_file = None
+ log_file_real = None
+
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ try:
+ log_file = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'), mode='rb')
+ except IOError:
+ pass
+ else:
+ if log_path.endswith('.gz'):
+ log_file_real = log_file
+ log_file = gzip.GzipFile(filename='',
+ mode='rb', fileobj=log_file)
+
+ if log_file is not None:
+ try:
+ for line in log_file:
+ writemsg_level(line, noiselevel=-1)
+ except zlib.error as e:
+ writemsg_level("%s\n" % (e,), level=logging.ERROR,
+ noiselevel=-1)
+ finally:
+ log_file.close()
+ if log_file_real is not None:
+ log_file_real.close()
+ failure_log_shown = True
+
+ # Dump mod_echo output now since it tends to flood the terminal.
+ # This allows us to avoid having more important output, generated
+ # later, from being swept away by the mod_echo output.
+ mod_echo_output = _flush_elog_mod_echo()
+
+ if background and not failure_log_shown and \
+ self._failed_pkgs_all and \
+ self._failed_pkgs_die_msgs and \
+ not mod_echo_output:
+
+ for mysettings, key, logentries in self._failed_pkgs_die_msgs:
+ root_msg = ""
+ if mysettings["ROOT"] != "/":
+ root_msg = " merged to %s" % mysettings["ROOT"]
+ print()
+ printer.einfo("Error messages for package %s%s:" % \
+ (colorize("INFORM", key), root_msg))
+ print()
+ for phase in portage.const.EBUILD_PHASES:
+ if phase not in logentries:
+ continue
+ for msgtype, msgcontent in logentries[phase]:
+ if isinstance(msgcontent, basestring):
+ msgcontent = [msgcontent]
+ for line in msgcontent:
+ printer.eerror(line.strip("\n"))
+
+ if self._post_mod_echo_msgs:
+ for msg in self._post_mod_echo_msgs:
+ msg()
+
+ if len(self._failed_pkgs_all) > 1 or \
+ (self._failed_pkgs_all and keep_going):
+ if len(self._failed_pkgs_all) > 1:
+ msg = "The following %d packages have " % \
+ len(self._failed_pkgs_all) + \
+ "failed to build or install:"
+ else:
+ msg = "The following package has " + \
+ "failed to build or install:"
+
+ printer.eerror("")
+ for line in textwrap.wrap(msg, 72):
+ printer.eerror(line)
+ printer.eerror("")
+ for failed_pkg in self._failed_pkgs_all:
+ # Use unicode_literals to force unicode format string so
+ # that Package.__unicode__() is called in python2.
+ msg = " %s" % (failed_pkg.pkg,)
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ msg += ", Log file:"
+ printer.eerror(msg)
+ if log_path is not None:
+ printer.eerror(" '%s'" % colorize('INFORM', log_path))
+ printer.eerror("")
+
+ if self._failed_pkgs_all:
+ return 1
+ return os.EX_OK
+
+ def _elog_listener(self, mysettings, key, logentries, fulltext):
+ errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
+ if errors:
+ self._failed_pkgs_die_msgs.append(
+ (mysettings, key, errors))
+
+ def _locate_failure_log(self, failed_pkg):
+
+ log_paths = [failed_pkg.build_log]
+
+ for log_path in log_paths:
+ if not log_path:
+ continue
+
+ try:
+ log_size = os.stat(log_path).st_size
+ except OSError:
+ continue
+
+ if log_size == 0:
+ continue
+
+ return log_path
+
+ return None
+
+ def _add_packages(self):
+ pkg_queue = self._pkg_queue
+ for pkg in self._mergelist:
+ if isinstance(pkg, Package):
+ pkg_queue.append(pkg)
+ elif isinstance(pkg, Blocker):
+ pass
+
+ def _system_merge_started(self, merge):
+ """
+ Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
+ In general, this keeps track of installed system packages with
+ unsatisfied RDEPEND or PDEPEND (circular dependencies). It can be
+ a fragile situation, so we don't execute any unrelated builds until
+ the circular dependencies are built and installed.
+ """
+ graph = self._digraph
+ if graph is None:
+ return
+ pkg = merge.merge.pkg
+
+ # Skip this if $ROOT != / since it shouldn't matter if there
+ # are unsatisfied system runtime deps in this case.
+ if pkg.root_config.settings["ROOT"] != "/":
+ return
+
+ completed_tasks = self._completed_tasks
+ unsatisfied = self._unsatisfied_system_deps
+
+ def ignore_non_runtime_or_satisfied(priority):
+ """
+ Ignore non-runtime and satisfied runtime priorities.
+ """
+ if isinstance(priority, DepPriority) and \
+ not priority.satisfied and \
+ (priority.runtime or priority.runtime_post):
+ return False
+ return True
+
+ # When checking for unsatisfied runtime deps, only check
+ # direct deps since indirect deps are checked when the
+ # corresponding parent is merged.
+ for child in graph.child_nodes(pkg,
+ ignore_priority=ignore_non_runtime_or_satisfied):
+ if not isinstance(child, Package) or \
+ child.operation == 'uninstall':
+ continue
+ if child is pkg:
+ continue
+ if child.operation == 'merge' and \
+ child not in completed_tasks:
+ unsatisfied.add(child)
+
+ def _merge_wait_exit_handler(self, task):
+ self._merge_wait_scheduled.remove(task)
+ self._merge_exit(task)
+
+ def _merge_exit(self, merge):
+ self._running_tasks.pop(id(merge), None)
+ self._do_merge_exit(merge)
+ self._deallocate_config(merge.merge.settings)
+ if merge.returncode == os.EX_OK and \
+ not merge.merge.pkg.installed:
+ self._status_display.curval += 1
+ self._status_display.merges = len(self._task_queues.merge)
+ self._schedule()
+
+ def _do_merge_exit(self, merge):
+ pkg = merge.merge.pkg
+ if merge.returncode != os.EX_OK:
+ settings = merge.merge.settings
+ build_dir = settings.get("PORTAGE_BUILDDIR")
+ build_log = settings.get("PORTAGE_LOG_FILE")
+
+ self._failed_pkgs.append(self._failed_pkg(
+ build_dir=build_dir, build_log=build_log,
+ pkg=pkg,
+ returncode=merge.returncode))
+ if not self._terminated_tasks:
+ self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
+ self._status_display.failed = len(self._failed_pkgs)
+ return
+
+ self._task_complete(pkg)
+ pkg_to_replace = merge.merge.pkg_to_replace
+ if pkg_to_replace is not None:
+ # When a package is replaced, mark it's uninstall
+ # task complete (if any).
+ if self._digraph is not None and \
+ pkg_to_replace in self._digraph:
+ try:
+ self._pkg_queue.remove(pkg_to_replace)
+ except ValueError:
+ pass
+ self._task_complete(pkg_to_replace)
+ else:
+ self._pkg_cache.pop(pkg_to_replace, None)
+
+ if pkg.installed:
+ return
+
+ # Call mtimedb.commit() after each merge so that
+ # --resume still works after being interrupted
+ # by reboot, sigkill or similar.
+ mtimedb = self._mtimedb
+ mtimedb["resume"]["mergelist"].remove(list(pkg))
+ if not mtimedb["resume"]["mergelist"]:
+ del mtimedb["resume"]
+ mtimedb.commit()
+
+ def _build_exit(self, build):
+ self._running_tasks.pop(id(build), None)
+ if build.returncode == os.EX_OK and self._terminated_tasks:
+ # We've been interrupted, so we won't
+ # add this to the merge queue.
+ self.curval += 1
+ self._deallocate_config(build.settings)
+ elif build.returncode == os.EX_OK:
+ self.curval += 1
+ merge = PackageMerge(merge=build)
+ self._running_tasks[id(merge)] = merge
+ if not build.build_opts.buildpkgonly and \
+ build.pkg in self._deep_system_deps:
+ # Since dependencies on system packages are frequently
+ # unspecified, merge them only when no builds are executing.
+ self._merge_wait_queue.append(merge)
+ merge.addStartListener(self._system_merge_started)
+ else:
+ merge.addExitListener(self._merge_exit)
+ self._task_queues.merge.add(merge)
+ self._status_display.merges = len(self._task_queues.merge)
+ else:
+ settings = build.settings
+ build_dir = settings.get("PORTAGE_BUILDDIR")
+ build_log = settings.get("PORTAGE_LOG_FILE")
+
+ self._failed_pkgs.append(self._failed_pkg(
+ build_dir=build_dir, build_log=build_log,
+ pkg=build.pkg,
+ returncode=build.returncode))
+ if not self._terminated_tasks:
+ self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
+ self._status_display.failed = len(self._failed_pkgs)
+ self._deallocate_config(build.settings)
+ self._jobs -= 1
+ self._status_display.running = self._jobs
+ self._schedule()
+
+ def _extract_exit(self, build):
+ self._build_exit(build)
+
+ def _task_complete(self, pkg):
+ self._completed_tasks.add(pkg)
+ self._unsatisfied_system_deps.discard(pkg)
+ self._choose_pkg_return_early = False
+ blocker_db = self._blocker_db[pkg.root]
+ blocker_db.discardBlocker(pkg)
+
+ def _main_loop(self):
+ term_check_id = self._event_loop.idle_add(self._termination_check)
+ loadavg_check_id = None
+ if self._max_load is not None and \
+ self._loadavg_latency is not None and \
+ (self._max_jobs is True or self._max_jobs > 1):
+ # We have to schedule periodically, in case the load
+ # average has changed since the last call.
+ loadavg_check_id = self._event_loop.timeout_add(
+ self._loadavg_latency, self._schedule)
+
+ try:
+ # Populate initial event sources. Unless we're scheduling
+ # based on load average, we only need to do this once
+ # here, since it can be called during the loop from within
+ # event handlers.
+ self._schedule()
+
+ # Loop while there are jobs to be scheduled.
+ while self._keep_scheduling():
+ self._event_loop.iteration()
+
+ # Clean shutdown of previously scheduled jobs. In the
+ # case of termination, this allows for basic cleanup
+ # such as flushing of buffered output to logs.
+ while self._is_work_scheduled():
+ self._event_loop.iteration()
+ finally:
+ self._event_loop.source_remove(term_check_id)
+ if loadavg_check_id is not None:
+ self._event_loop.source_remove(loadavg_check_id)
+
+ def _merge(self):
+
+ if self._opts_no_background.intersection(self.myopts):
+ self._set_max_jobs(1)
+
+ self._add_prefetchers()
+ self._add_packages()
+ failed_pkgs = self._failed_pkgs
+ portage.locks._quiet = self._background
+ portage.elog.add_listener(self._elog_listener)
+ display_timeout_id = None
+ if self._status_display._isatty and not self._status_display.quiet:
+ display_timeout_id = self._event_loop.timeout_add(
+ self._max_display_latency, self._status_display.display)
+ rval = os.EX_OK
+
+ try:
+ self._main_loop()
+ finally:
+ self._main_loop_cleanup()
+ portage.locks._quiet = False
+ portage.elog.remove_listener(self._elog_listener)
+ if display_timeout_id is not None:
+ self._event_loop.source_remove(display_timeout_id)
+ if failed_pkgs:
+ rval = failed_pkgs[-1].returncode
+
+ return rval
+
+ def _main_loop_cleanup(self):
+ del self._pkg_queue[:]
+ self._completed_tasks.clear()
+ self._deep_system_deps.clear()
+ self._unsatisfied_system_deps.clear()
+ self._choose_pkg_return_early = False
+ self._status_display.reset()
+ self._digraph = None
+ self._task_queues.fetch.clear()
+ self._prefetchers.clear()
+
+ def _choose_pkg(self):
+ """
+ Choose a task that has all its dependencies satisfied. This is used
+ for parallel build scheduling, and ensures that we don't build
+ anything with deep dependencies that have yet to be merged.
+ """
+
+ if self._choose_pkg_return_early:
+ return None
+
+ if self._digraph is None:
+ if self._is_work_scheduled() and \
+ not ("--nodeps" in self.myopts and \
+ (self._max_jobs is True or self._max_jobs > 1)):
+ self._choose_pkg_return_early = True
+ return None
+ return self._pkg_queue.pop(0)
+
+ if not self._is_work_scheduled():
+ return self._pkg_queue.pop(0)
+
+ self._prune_digraph()
+
+ chosen_pkg = None
+
+ # Prefer uninstall operations when available.
+ graph = self._digraph
+ for pkg in self._pkg_queue:
+ if pkg.operation == 'uninstall' and \
+ not graph.child_nodes(pkg):
+ chosen_pkg = pkg
+ break
+
+ if chosen_pkg is None:
+ later = set(self._pkg_queue)
+ for pkg in self._pkg_queue:
+ later.remove(pkg)
+ if not self._dependent_on_scheduled_merges(pkg, later):
+ chosen_pkg = pkg
+ break
+
+ if chosen_pkg is not None:
+ self._pkg_queue.remove(chosen_pkg)
+
+ if chosen_pkg is None:
+ # There's no point in searching for a package to
+ # choose until at least one of the existing jobs
+ # completes.
+ self._choose_pkg_return_early = True
+
+ return chosen_pkg
+
+ def _dependent_on_scheduled_merges(self, pkg, later):
+ """
+ Traverse the subgraph of the given packages deep dependencies
+ to see if it contains any scheduled merges.
+ @param pkg: a package to check dependencies for
+ @type pkg: Package
+ @param later: packages for which dependence should be ignored
+ since they will be merged later than pkg anyway and therefore
+ delaying the merge of pkg will not result in a more optimal
+ merge order
+ @type later: set
+ @rtype: bool
+ @return: True if the package is dependent, False otherwise.
+ """
+
+ graph = self._digraph
+ completed_tasks = self._completed_tasks
+
+ dependent = False
+ traversed_nodes = set([pkg])
+ direct_deps = graph.child_nodes(pkg)
+ node_stack = direct_deps
+ direct_deps = frozenset(direct_deps)
+ while node_stack:
+ node = node_stack.pop()
+ if node in traversed_nodes:
+ continue
+ traversed_nodes.add(node)
+ if not ((node.installed and node.operation == "nomerge") or \
+ (node.operation == "uninstall" and \
+ node not in direct_deps) or \
+ node in completed_tasks or \
+ node in later):
+ dependent = True
+ break
+
+ # Don't traverse children of uninstall nodes since
+ # those aren't dependencies in the usual sense.
+ if node.operation != "uninstall":
+ node_stack.extend(graph.child_nodes(node))
+
+ return dependent
+
+ def _allocate_config(self, root):
+ """
+ Allocate a unique config instance for a task in order
+ to prevent interference between parallel tasks.
+ """
+ if self._config_pool[root]:
+ temp_settings = self._config_pool[root].pop()
+ else:
+ temp_settings = portage.config(clone=self.pkgsettings[root])
+ # Since config.setcpv() isn't guaranteed to call config.reset() due to
+ # performance reasons, call it here to make sure all settings from the
+ # previous package get flushed out (such as PORTAGE_LOG_FILE).
+ temp_settings.reload()
+ temp_settings.reset()
+ return temp_settings
+
+ def _deallocate_config(self, settings):
+ self._config_pool[settings['EROOT']].append(settings)
+
+ def _keep_scheduling(self):
+ return bool(not self._terminated.is_set() and self._pkg_queue and \
+ not (self._failed_pkgs and not self._build_opts.fetchonly))
+
+ def _is_work_scheduled(self):
+ return bool(self._running_tasks)
+
+ def _running_job_count(self):
+ return self._jobs
+
+ def _schedule_tasks(self):
+
+ while True:
+
+ state_change = 0
+
+ # When the number of jobs and merges drops to zero,
+ # process a single merge from _merge_wait_queue if
+ # it's not empty. We only process one since these are
+ # special packages and we want to ensure that
+ # parallel-install does not cause more than one of
+ # them to install at the same time.
+ if (self._merge_wait_queue and not self._jobs and
+ not self._task_queues.merge):
+ task = self._merge_wait_queue.popleft()
+ task.addExitListener(self._merge_wait_exit_handler)
+ self._merge_wait_scheduled.append(task)
+ self._task_queues.merge.add(task)
+ self._status_display.merges = len(self._task_queues.merge)
+ state_change += 1
+
+ if self._schedule_tasks_imp():
+ state_change += 1
+
+ self._status_display.display()
+
+ # Cancel prefetchers if they're the only reason
+ # the main poll loop is still running.
+ if self._failed_pkgs and not self._build_opts.fetchonly and \
+ not self._is_work_scheduled() and \
+ self._task_queues.fetch:
+ # Since this happens asynchronously, it doesn't count in
+ # state_change (counting it triggers an infinite loop).
+ self._task_queues.fetch.clear()
+
+ if not (state_change or \
+ (self._merge_wait_queue and not self._jobs and
+ not self._task_queues.merge)):
+ break
+
+ def _sigcont_handler(self, signum, frame):
+ self._sigcont_time = time.time()
+
+ def _job_delay(self):
+ """
+ @rtype: bool
+ @return: True if job scheduling should be delayed, False otherwise.
+ """
+
+ if self._jobs and self._max_load is not None:
+
+ current_time = time.time()
+
+ if self._sigcont_time is not None:
+
+ elapsed_seconds = current_time - self._sigcont_time
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds > 0 and \
+ elapsed_seconds < self._sigcont_delay:
+
+ if self._job_delay_timeout_id is not None:
+ self._event_loop.source_remove(
+ self._job_delay_timeout_id)
+
+ self._job_delay_timeout_id = self._event_loop.timeout_add(
+ 1000 * (self._sigcont_delay - elapsed_seconds),
+ self._schedule_once)
+ return True
+
+ # Only set this to None after the delay has expired,
+ # since this method may be called again before the
+ # delay has expired.
+ self._sigcont_time = None
+
+ try:
+ avg1, avg5, avg15 = getloadavg()
+ except OSError:
+ return False
+
+ delay = self._job_delay_max * avg1 / self._max_load
+ if delay > self._job_delay_max:
+ delay = self._job_delay_max
+ elapsed_seconds = current_time - self._previous_job_start_time
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds > 0 and elapsed_seconds < delay:
+
+ if self._job_delay_timeout_id is not None:
+ self._event_loop.source_remove(
+ self._job_delay_timeout_id)
+
+ self._job_delay_timeout_id = self._event_loop.timeout_add(
+ 1000 * (delay - elapsed_seconds), self._schedule_once)
+ return True
+
+ return False
+
+ def _schedule_once(self):
+ self._schedule()
+ return False
+
+ def _schedule_tasks_imp(self):
+ """
+ @rtype: bool
+ @return: True if state changed, False otherwise.
+ """
+
+ state_change = 0
+
+ while True:
+
+ if not self._keep_scheduling():
+ return bool(state_change)
+
+ if self._choose_pkg_return_early or \
+ self._merge_wait_scheduled or \
+ (self._jobs and self._unsatisfied_system_deps) or \
+ not self._can_add_job() or \
+ self._job_delay():
+ return bool(state_change)
+
+ pkg = self._choose_pkg()
+ if pkg is None:
+ return bool(state_change)
+
+ state_change += 1
+
+ if not pkg.installed:
+ self._pkg_count.curval += 1
+
+ task = self._task(pkg)
+
+ if pkg.installed:
+ merge = PackageMerge(merge=task)
+ self._running_tasks[id(merge)] = merge
+ merge.addExitListener(self._merge_exit)
+ self._task_queues.merge.addFront(merge)
+
+ elif pkg.built:
+ self._jobs += 1
+ self._previous_job_start_time = time.time()
+ self._status_display.running = self._jobs
+ self._running_tasks[id(task)] = task
+ task.addExitListener(self._extract_exit)
+ self._task_queues.jobs.add(task)
+
+ else:
+ self._jobs += 1
+ self._previous_job_start_time = time.time()
+ self._status_display.running = self._jobs
+ self._running_tasks[id(task)] = task
+ task.addExitListener(self._build_exit)
+ self._task_queues.jobs.add(task)
+
+ return bool(state_change)
+
+ def _task(self, pkg):
+
+ pkg_to_replace = None
+ if pkg.operation != "uninstall":
+ vardb = pkg.root_config.trees["vartree"].dbapi
+ previous_cpv = [x for x in vardb.match(pkg.slot_atom) \
+ if portage.cpv_getkey(x) == pkg.cp]
+ if not previous_cpv and vardb.cpv_exists(pkg.cpv):
+ # same cpv, different SLOT
+ previous_cpv = [pkg.cpv]
+ if previous_cpv:
+ previous_cpv = previous_cpv.pop()
+ pkg_to_replace = self._pkg(previous_cpv,
+ "installed", pkg.root_config, installed=True,
+ operation="uninstall")
+
+ try:
+ prefetcher = self._prefetchers.pop(pkg, None)
+ except KeyError:
+ # KeyError observed with PyPy 1.8, despite None given as default.
+ # Note that PyPy 1.8 has the same WeakValueDictionary code as
+ # CPython 2.7, so it may be possible for CPython to raise KeyError
+ # here as well.
+ prefetcher = None
+ if prefetcher is not None and not prefetcher.isAlive():
+ try:
+ self._task_queues.fetch._task_queue.remove(prefetcher)
+ except ValueError:
+ pass
+ prefetcher = None
+
+ task = MergeListItem(args_set=self._args_set,
+ background=self._background, binpkg_opts=self._binpkg_opts,
+ build_opts=self._build_opts,
+ config_pool=self._ConfigPool(pkg.root,
+ self._allocate_config, self._deallocate_config),
+ emerge_opts=self.myopts,
+ find_blockers=self._find_blockers(pkg), logger=self._logger,
+ mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
+ pkg_to_replace=pkg_to_replace,
+ prefetcher=prefetcher,
+ scheduler=self._sched_iface,
+ settings=self._allocate_config(pkg.root),
+ statusMessage=self._status_msg,
+ world_atom=self._world_atom)
+
+ return task
+
+ def _failed_pkg_msg(self, failed_pkg, action, preposition):
+ pkg = failed_pkg.pkg
+ msg = "%s to %s %s" % \
+ (bad("Failed"), action, colorize("INFORM", pkg.cpv))
+ if pkg.root_config.settings["ROOT"] != "/":
+ msg += " %s %s" % (preposition, pkg.root)
+
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ msg += ", Log file:"
+ self._status_msg(msg)
+
+ if log_path is not None:
+ self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
+
+ def _status_msg(self, msg):
+ """
+ Display a brief status message (no newlines) in the status display.
+ This is called by tasks to provide feedback to the user. This
+ delegates the resposibility of generating \r and \n control characters,
+ to guarantee that lines are created or erased when necessary and
+ appropriate.
+
+ @type msg: str
+ @param msg: a brief status message (no newlines allowed)
+ """
+ if not self._background:
+ writemsg_level("\n")
+ self._status_display.displayMessage(msg)
+
+ def _save_resume_list(self):
+ """
+ Do this before verifying the ebuild Manifests since it might
+ be possible for the user to use --resume --skipfirst get past
+ a non-essential package with a broken digest.
+ """
+ mtimedb = self._mtimedb
+
+ mtimedb["resume"] = {}
+ # Stored as a dict starting with portage-2.1.6_rc1, and supported
+ # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
+ # a list type for options.
+ mtimedb["resume"]["myopts"] = self.myopts.copy()
+
+ # Convert Atom instances to plain str.
+ mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites]
+ mtimedb["resume"]["mergelist"] = [list(x) \
+ for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"]
+
+ mtimedb.commit()
+
+ def _calc_resume_list(self):
+ """
+ Use the current resume list to calculate a new one,
+ dropping any packages with unsatisfied deps.
+ @rtype: bool
+ @return: True if successful, False otherwise.
+ """
+ print(colorize("GOOD", "*** Resuming merge..."))
+
+ # free some memory before creating
+ # the resume depgraph
+ self._destroy_graph()
+
+ myparams = create_depgraph_params(self.myopts, None)
+ success = False
+ e = None
+ try:
+ success, mydepgraph, dropped_tasks = resume_depgraph(
+ self.settings, self.trees, self._mtimedb, self.myopts,
+ myparams, self._spinner)
+ except depgraph.UnsatisfiedResumeDep as exc:
+ # rename variable to avoid python-3.0 error:
+ # SyntaxError: can not delete variable 'e' referenced in nested
+ # scope
+ e = exc
+ mydepgraph = e.depgraph
+ dropped_tasks = {}
+
+ if e is not None:
+ def unsatisfied_resume_dep_msg():
+ mydepgraph.display_problems()
+ out = portage.output.EOutput()
+ out.eerror("One or more packages are either masked or " + \
+ "have missing dependencies:")
+ out.eerror("")
+ indent = " "
+ show_parents = set()
+ for dep in e.value:
+ if dep.parent in show_parents:
+ continue
+ show_parents.add(dep.parent)
+ if dep.atom is None:
+ out.eerror(indent + "Masked package:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ else:
+ out.eerror(indent + str(dep.atom) + " pulled in by:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ msg = "The resume list contains packages " + \
+ "that are either masked or have " + \
+ "unsatisfied dependencies. " + \
+ "Please restart/continue " + \
+ "the operation manually, or use --skipfirst " + \
+ "to skip the first package in the list and " + \
+ "any other packages that may be " + \
+ "masked or have missing dependencies."
+ for line in textwrap.wrap(msg, 72):
+ out.eerror(line)
+ self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
+ return False
+
+ if success and self._show_list():
+ mydepgraph.display(mydepgraph.altlist(), favorites=self._favorites)
+
+ if not success:
+ self._post_mod_echo_msgs.append(mydepgraph.display_problems)
+ return False
+ mydepgraph.display_problems()
+ self._init_graph(mydepgraph.schedulerGraph())
+
+ msg_width = 75
+ for task, atoms in dropped_tasks.items():
+ if not (isinstance(task, Package) and task.operation == "merge"):
+ continue
+ pkg = task
+ msg = "emerge --keep-going:" + \
+ " %s" % (pkg.cpv,)
+ if pkg.root_config.settings["ROOT"] != "/":
+ msg += " for %s" % (pkg.root,)
+ if not atoms:
+ msg += " dropped because it is masked or unavailable"
+ else:
+ msg += " dropped because it requires %s" % ", ".join(atoms)
+ for line in textwrap.wrap(msg, msg_width):
+ eerror(line, phase="other", key=pkg.cpv)
+ settings = self.pkgsettings[pkg.root]
+ # Ensure that log collection from $T is disabled inside
+ # elog_process(), since any logs that might exist are
+ # not valid here.
+ settings.pop("T", None)
+ portage.elog.elog_process(pkg.cpv, settings)
+ self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
+
+ return True
+
+ def _show_list(self):
+ myopts = self.myopts
+ if "--quiet" not in myopts and \
+ ("--ask" in myopts or "--tree" in myopts or \
+ "--verbose" in myopts):
+ return True
+ return False
+
+ def _world_atom(self, pkg):
+ """
+ Add or remove the package to the world file, but only if
+ it's supposed to be added or removed. Otherwise, do nothing.
+ """
+
+ if set(("--buildpkgonly", "--fetchonly",
+ "--fetch-all-uri",
+ "--oneshot", "--onlydeps",
+ "--pretend")).intersection(self.myopts):
+ return
+
+ if pkg.root != self.target_root:
+ return
+
+ args_set = self._args_set
+ if not args_set.findAtomForPackage(pkg):
+ return
+
+ logger = self._logger
+ pkg_count = self._pkg_count
+ root_config = pkg.root_config
+ world_set = root_config.sets["selected"]
+ world_locked = False
+ atom = None
+
+ if pkg.operation != "uninstall":
+ # Do this before acquiring the lock, since it queries the
+ # portdbapi which can call the global event loop, triggering
+ # a concurrent call to this method or something else that
+ # needs an exclusive (non-reentrant) lock on the world file.
+ atom = create_world_atom(pkg, args_set, root_config)
+
+ try:
+
+ if hasattr(world_set, "lock"):
+ world_set.lock()
+ world_locked = True
+
+ if hasattr(world_set, "load"):
+ world_set.load() # maybe it's changed on disk
+
+ if pkg.operation == "uninstall":
+ if hasattr(world_set, "cleanPackage"):
+ world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi,
+ pkg.cpv)
+ if hasattr(world_set, "remove"):
+ for s in pkg.root_config.setconfig.active:
+ world_set.remove(SETPREFIX+s)
+ else:
+ if atom is not None:
+ if hasattr(world_set, "add"):
+ self._status_msg(('Recording %s in "world" ' + \
+ 'favorites file...') % atom)
+ logger.log(" === (%s of %s) Updating world file (%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv))
+ world_set.add(atom)
+ else:
+ writemsg_level('\n!!! Unable to record %s in "world"\n' % \
+ (atom,), level=logging.WARN, noiselevel=-1)
+ finally:
+ if world_locked:
+ world_set.unlock()
+
+ def _pkg(self, cpv, type_name, root_config, installed=False,
+ operation=None, myrepo=None):
+ """
+ Get a package instance from the cache, or create a new
+ one if necessary. Raises KeyError from aux_get if it
+ failures for some reason (package does not exist or is
+ corrupt).
+ """
+
+ # Reuse existing instance when available.
+ pkg = self._pkg_cache.get(Package._gen_hash_key(cpv=cpv,
+ type_name=type_name, repo_name=myrepo, root_config=root_config,
+ installed=installed, operation=operation))
+
+ if pkg is not None:
+ return pkg
+
+ tree_type = depgraph.pkg_tree_map[type_name]
+ db = root_config.trees[tree_type].dbapi
+ db_keys = list(self.trees[root_config.root][
+ tree_type].dbapi._aux_cache_keys)
+ metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
+ pkg = Package(built=(type_name != "ebuild"),
+ cpv=cpv, installed=installed, metadata=metadata,
+ root_config=root_config, type_name=type_name)
+ self._pkg_cache[pkg] = pkg
+ return pkg
diff --git a/usr/lib/portage/pym/_emerge/SequentialTaskQueue.py b/usr/lib/portage/pym/_emerge/SequentialTaskQueue.py
new file mode 100644
index 0000000..8090893
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/SequentialTaskQueue.py
@@ -0,0 +1,81 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from collections import deque
+import sys
+
+from portage.util.SlotObject import SlotObject
+
+class SequentialTaskQueue(SlotObject):
+
+ __slots__ = ("max_jobs", "running_tasks") + \
+ ("_scheduling", "_task_queue")
+
+ def __init__(self, **kwargs):
+ SlotObject.__init__(self, **kwargs)
+ self._task_queue = deque()
+ self.running_tasks = set()
+ if self.max_jobs is None:
+ self.max_jobs = 1
+
+ def add(self, task):
+ self._task_queue.append(task)
+ self.schedule()
+
+ def addFront(self, task):
+ self._task_queue.appendleft(task)
+ self.schedule()
+
+ def schedule(self):
+
+ if self._scheduling:
+ # Ignore any recursive schedule() calls triggered via
+ # self._task_exit().
+ return
+
+ self._scheduling = True
+ try:
+ while self._task_queue and (self.max_jobs is True or
+ len(self.running_tasks) < self.max_jobs):
+ task = self._task_queue.popleft()
+ cancelled = getattr(task, "cancelled", None)
+ if not cancelled:
+ self.running_tasks.add(task)
+ task.addExitListener(self._task_exit)
+ task.start()
+ finally:
+ self._scheduling = False
+
+ def _task_exit(self, task):
+ """
+ Since we can always rely on exit listeners being called, the set of
+ running tasks is always pruned automatically and there is never any need
+ to actively prune it.
+ """
+ self.running_tasks.remove(task)
+ if self._task_queue:
+ self.schedule()
+
+ def clear(self):
+ """
+ Clear the task queue and asynchronously terminate any running tasks.
+ """
+ self._task_queue.clear()
+ for task in list(self.running_tasks):
+ task.cancel()
+
+ def wait(self):
+ """
+ Synchronously wait for all running tasks to exit.
+ """
+ while self.running_tasks:
+ next(iter(self.running_tasks)).wait()
+
+ def __bool__(self):
+ return bool(self._task_queue or self.running_tasks)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __len__(self):
+ return len(self._task_queue) + len(self.running_tasks)
diff --git a/usr/lib/portage/pym/_emerge/SetArg.py b/usr/lib/portage/pym/_emerge/SetArg.py
new file mode 100644
index 0000000..5c82975
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/SetArg.py
@@ -0,0 +1,14 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DependencyArg import DependencyArg
+from portage._sets import SETPREFIX
+class SetArg(DependencyArg):
+
+ __slots__ = ('name', 'pset')
+
+ def __init__(self, pset=None, **kwargs):
+ DependencyArg.__init__(self, **kwargs)
+ self.pset = pset
+ self.name = self.arg[len(SETPREFIX):]
+
diff --git a/usr/lib/portage/pym/_emerge/SpawnProcess.py b/usr/lib/portage/pym/_emerge/SpawnProcess.py
new file mode 100644
index 0000000..15d3dc5
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/SpawnProcess.py
@@ -0,0 +1,217 @@
+# Copyright 2008-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+try:
+ import fcntl
+except ImportError:
+ # http://bugs.jython.org/issue1074
+ fcntl = None
+
+import errno
+import logging
+import signal
+import sys
+
+from _emerge.SubProcess import SubProcess
+import portage
+from portage import os
+from portage.const import BASH_BINARY
+from portage.util import writemsg_level
+from portage.util._async.PipeLogger import PipeLogger
+
+class SpawnProcess(SubProcess):
+
+ """
+ Constructor keyword args are passed into portage.process.spawn().
+ The required "args" keyword argument will be passed as the first
+ spawn() argument.
+ """
+
+ _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
+ "uid", "gid", "groups", "umask", "logfile",
+ "path_lookup", "pre_exec", "close_fds", "cgroup",
+ "unshare_ipc", "unshare_net")
+
+ __slots__ = ("args",) + \
+ _spawn_kwarg_names + ("_pipe_logger", "_selinux_type",)
+
+ def _start(self):
+
+ if self.fd_pipes is None:
+ self.fd_pipes = {}
+ else:
+ self.fd_pipes = self.fd_pipes.copy()
+ fd_pipes = self.fd_pipes
+
+ master_fd, slave_fd = self._pipe(fd_pipes)
+
+ can_log = self._can_log(slave_fd)
+ if can_log:
+ log_file_path = self.logfile
+ else:
+ log_file_path = None
+
+ null_input = None
+ if not self.background or 0 in fd_pipes:
+ # Subclasses such as AbstractEbuildProcess may have already passed
+ # in a null file descriptor in fd_pipes, so use that when given.
+ pass
+ else:
+ # TODO: Use job control functions like tcsetpgrp() to control
+ # access to stdin. Until then, use /dev/null so that any
+ # attempts to read from stdin will immediately return EOF
+ # instead of blocking indefinitely.
+ null_input = os.open('/dev/null', os.O_RDWR)
+ fd_pipes[0] = null_input
+
+ fd_pipes.setdefault(0, portage._get_stdin().fileno())
+ fd_pipes.setdefault(1, sys.__stdout__.fileno())
+ fd_pipes.setdefault(2, sys.__stderr__.fileno())
+
+ # flush any pending output
+ stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
+ for fd in fd_pipes.values():
+ if fd in stdout_filenos:
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
+ break
+
+ fd_pipes_orig = fd_pipes.copy()
+
+ if log_file_path is not None or self.background:
+ fd_pipes[1] = slave_fd
+ fd_pipes[2] = slave_fd
+
+ else:
+ # Create a dummy pipe that PipeLogger uses to efficiently
+ # monitor for process exit by listening for the EOF event.
+ # Re-use of the allocated fd number for the key in fd_pipes
+ # guarantees that the keys will not collide for similarly
+ # allocated pipes which are used by callers such as
+ # FileDigester and MergeProcess. See the _setup_pipes
+ # docstring for more benefits of this allocation approach.
+ self._dummy_pipe_fd = slave_fd
+ fd_pipes[slave_fd] = slave_fd
+
+ kwargs = {}
+ for k in self._spawn_kwarg_names:
+ v = getattr(self, k)
+ if v is not None:
+ kwargs[k] = v
+
+ kwargs["fd_pipes"] = fd_pipes
+ kwargs["returnpid"] = True
+ kwargs.pop("logfile", None)
+
+ retval = self._spawn(self.args, **kwargs)
+
+ os.close(slave_fd)
+ if null_input is not None:
+ os.close(null_input)
+
+ if isinstance(retval, int):
+ # spawn failed
+ self._unregister()
+ self._set_returncode((self.pid, retval))
+ self._async_wait()
+ return
+
+ self.pid = retval[0]
+
+ stdout_fd = None
+ if can_log and not self.background:
+ stdout_fd = os.dup(fd_pipes_orig[1])
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(stdout_fd, fcntl.F_SETFD,
+ fcntl.fcntl(stdout_fd,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._pipe_logger = PipeLogger(background=self.background,
+ scheduler=self.scheduler, input_fd=master_fd,
+ log_file_path=log_file_path,
+ stdout_fd=stdout_fd)
+ self._pipe_logger.addExitListener(self._pipe_logger_exit)
+ self._pipe_logger.start()
+ self._registered = True
+
+ def _can_log(self, slave_fd):
+ return True
+
+ def _pipe(self, fd_pipes):
+ """
+ @type fd_pipes: dict
+ @param fd_pipes: pipes from which to copy terminal size if desired.
+ """
+ return os.pipe()
+
+ def _spawn(self, args, **kwargs):
+ spawn_func = portage.process.spawn
+
+ if self._selinux_type is not None:
+ spawn_func = portage.selinux.spawn_wrapper(spawn_func,
+ self._selinux_type)
+ # bash is an allowed entrypoint, while most binaries are not
+ if args[0] != BASH_BINARY:
+ args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
+
+ return spawn_func(args, **kwargs)
+
+ def _pipe_logger_exit(self, pipe_logger):
+ self._pipe_logger = None
+ self._unregister()
+ self.wait()
+
+ def _waitpid_loop(self):
+ SubProcess._waitpid_loop(self)
+
+ pipe_logger = self._pipe_logger
+ if pipe_logger is not None:
+ self._pipe_logger = None
+ pipe_logger.removeExitListener(self._pipe_logger_exit)
+ pipe_logger.cancel()
+ pipe_logger.wait()
+
+ def _set_returncode(self, wait_retval):
+ SubProcess._set_returncode(self, wait_retval)
+
+ if self.cgroup:
+ def get_pids(cgroup):
+ try:
+ with open(os.path.join(cgroup, 'cgroup.procs'), 'r') as f:
+ return [int(p) for p in f.read().split()]
+ except OSError:
+ # cgroup removed already?
+ return []
+
+ def kill_all(pids, sig):
+ for p in pids:
+ try:
+ os.kill(p, sig)
+ except OSError as e:
+ if e.errno == errno.EPERM:
+ # Reported with hardened kernel (bug #358211).
+ writemsg_level(
+ "!!! kill: (%i) - Operation not permitted\n" %
+ (p,), level=logging.ERROR,
+ noiselevel=-1)
+ elif e.errno != errno.ESRCH:
+ raise
+
+ # step 1: kill all orphans
+ pids = get_pids(self.cgroup)
+ if pids:
+ kill_all(pids, signal.SIGKILL)
+
+ # step 2: remove the cgroup
+ try:
+ os.rmdir(self.cgroup)
+ except OSError:
+ # it may be removed already, or busy
+ # we can't do anything good about it
+ pass
diff --git a/usr/lib/portage/pym/_emerge/SubProcess.py b/usr/lib/portage/pym/_emerge/SubProcess.py
new file mode 100644
index 0000000..13d9382
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/SubProcess.py
@@ -0,0 +1,156 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+
+from portage import os
+from portage.util import writemsg_level
+from _emerge.AbstractPollTask import AbstractPollTask
+import signal
+import errno
+
+class SubProcess(AbstractPollTask):
+
+ __slots__ = ("pid",) + \
+ ("_dummy_pipe_fd", "_files", "_reg_id")
+
+ # This is how much time we allow for waitpid to succeed after
+ # we've sent a kill signal to our subprocess.
+ _cancel_timeout = 1000 # 1 second
+
+ def _poll(self):
+ if self.returncode is not None:
+ return self.returncode
+ if self.pid is None:
+ return self.returncode
+ if self._registered:
+ return self.returncode
+
+ try:
+ # With waitpid and WNOHANG, only check the
+ # first element of the tuple since the second
+ # element may vary (bug #337465).
+ retval = os.waitpid(self.pid, os.WNOHANG)
+ except OSError as e:
+ if e.errno != errno.ECHILD:
+ raise
+ del e
+ retval = (self.pid, 1)
+
+ if retval[0] == 0:
+ return None
+ self._set_returncode(retval)
+ self.wait()
+ return self.returncode
+
+ def _cancel(self):
+ if self.isAlive():
+ try:
+ os.kill(self.pid, signal.SIGTERM)
+ except OSError as e:
+ if e.errno == errno.EPERM:
+ # Reported with hardened kernel (bug #358211).
+ writemsg_level(
+ "!!! kill: (%i) - Operation not permitted\n" %
+ (self.pid,), level=logging.ERROR,
+ noiselevel=-1)
+ elif e.errno != errno.ESRCH:
+ raise
+
+ def isAlive(self):
+ return self.pid is not None and \
+ self.returncode is None
+
+ def _wait(self):
+
+ if self.returncode is not None:
+ return self.returncode
+
+ if self._registered:
+ if self.cancelled:
+ self._wait_loop(timeout=self._cancel_timeout)
+ if self._registered:
+ try:
+ os.kill(self.pid, signal.SIGKILL)
+ except OSError as e:
+ if e.errno == errno.EPERM:
+ # Reported with hardened kernel (bug #358211).
+ writemsg_level(
+ "!!! kill: (%i) - Operation not permitted\n" %
+ (self.pid,), level=logging.ERROR,
+ noiselevel=-1)
+ elif e.errno != errno.ESRCH:
+ raise
+ del e
+ self._wait_loop(timeout=self._cancel_timeout)
+ if self._registered:
+ self._orphan_process_warn()
+ else:
+ self._wait_loop()
+
+ if self.returncode is not None:
+ return self.returncode
+
+ if not isinstance(self.pid, int):
+ # Get debug info for bug #403697.
+ raise AssertionError(
+ "%s: pid is non-integer: %s" %
+ (self.__class__.__name__, repr(self.pid)))
+
+ self._waitpid_loop()
+
+ return self.returncode
+
+ def _waitpid_loop(self):
+ source_id = self.scheduler.child_watch_add(
+ self.pid, self._waitpid_cb)
+ try:
+ while self.returncode is None:
+ self.scheduler.iteration()
+ finally:
+ self.scheduler.source_remove(source_id)
+
+ def _waitpid_cb(self, pid, condition, user_data=None):
+ if pid != self.pid:
+ raise AssertionError("expected pid %s, got %s" % (self.pid, pid))
+ self._set_returncode((pid, condition))
+
+ def _orphan_process_warn(self):
+ pass
+
+ def _unregister(self):
+ """
+ Unregister from the scheduler and close open files.
+ """
+
+ self._registered = False
+
+ if self._reg_id is not None:
+ self.scheduler.source_remove(self._reg_id)
+ self._reg_id = None
+
+ if self._files is not None:
+ for f in self._files.values():
+ if isinstance(f, int):
+ os.close(f)
+ else:
+ f.close()
+ self._files = None
+
+ def _set_returncode(self, wait_retval):
+ """
+ Set the returncode in a manner compatible with
+ subprocess.Popen.returncode: A negative value -N indicates
+ that the child was terminated by signal N (Unix only).
+ """
+ self._unregister()
+
+ pid, status = wait_retval
+
+ if os.WIFSIGNALED(status):
+ retval = - os.WTERMSIG(status)
+ else:
+ retval = os.WEXITSTATUS(status)
+
+ self.returncode = retval
+
diff --git a/usr/lib/portage/pym/_emerge/Task.py b/usr/lib/portage/pym/_emerge/Task.py
new file mode 100644
index 0000000..250d458
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/Task.py
@@ -0,0 +1,50 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.util.SlotObject import SlotObject
+
+class Task(SlotObject):
+ __slots__ = ("_hash_key", "_hash_value")
+
+ def __eq__(self, other):
+ try:
+ return self._hash_key == other._hash_key
+ except AttributeError:
+ # depgraph._pkg() generates _hash_key
+ # for lookups here, so handle that
+ return self._hash_key == other
+
+ def __ne__(self, other):
+ try:
+ return self._hash_key != other._hash_key
+ except AttributeError:
+ return True
+
+ def __hash__(self):
+ return self._hash_value
+
+ def __len__(self):
+ return len(self._hash_key)
+
+ def __getitem__(self, key):
+ return self._hash_key[key]
+
+ def __iter__(self):
+ return iter(self._hash_key)
+
+ def __contains__(self, key):
+ return key in self._hash_key
+
+ def __str__(self):
+ """
+ Emulate tuple.__repr__, but don't show 'foo' as u'foo' for unicode
+ strings.
+ """
+ return "(%s)" % ", ".join(("'%s'" % x for x in self._hash_key))
+
+ def __repr__(self):
+ if self._hash_key is None:
+ # triggered by python-trace
+ return SlotObject.__repr__(self)
+ return "<%s (%s)>" % (self.__class__.__name__,
+ ", ".join(("'%s'" % x for x in self._hash_key)))
diff --git a/usr/lib/portage/pym/_emerge/TaskSequence.py b/usr/lib/portage/pym/_emerge/TaskSequence.py
new file mode 100644
index 0000000..1f2ba94
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/TaskSequence.py
@@ -0,0 +1,61 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from collections import deque
+
+from portage import os
+from _emerge.CompositeTask import CompositeTask
+from _emerge.AsynchronousTask import AsynchronousTask
+
+class TaskSequence(CompositeTask):
+ """
+ A collection of tasks that executes sequentially. Each task
+ must have a addExitListener() method that can be used as
+ a means to trigger movement from one task to the next.
+ """
+
+ __slots__ = ("_task_queue",)
+
+ def __init__(self, **kwargs):
+ AsynchronousTask.__init__(self, **kwargs)
+ self._task_queue = deque()
+
+ def add(self, task):
+ self._task_queue.append(task)
+
+ def _start(self):
+ self._start_next_task()
+
+ def _cancel(self):
+ self._task_queue.clear()
+ CompositeTask._cancel(self)
+
+ def _start_next_task(self):
+ try:
+ task = self._task_queue.popleft()
+ except IndexError:
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ self._start_task(task, self._task_exit_handler)
+
+ def _task_exit_handler(self, task):
+ if self._default_exit(task) != os.EX_OK:
+ self.wait()
+ elif self._task_queue:
+ self._start_next_task()
+ else:
+ self._final_exit(task)
+ self.wait()
+
+ def __bool__(self):
+ return bool(self._task_queue)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __len__(self):
+ return len(self._task_queue)
diff --git a/usr/lib/portage/pym/_emerge/UninstallFailure.py b/usr/lib/portage/pym/_emerge/UninstallFailure.py
new file mode 100644
index 0000000..e4f2834
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/UninstallFailure.py
@@ -0,0 +1,15 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+
+class UninstallFailure(portage.exception.PortageException):
+ """
+ An instance of this class is raised by unmerge() when
+ an uninstallation fails.
+ """
+ status = 1
+ def __init__(self, *pargs):
+ portage.exception.PortageException.__init__(self, pargs)
+ if pargs:
+ self.status = pargs[0]
diff --git a/usr/lib/portage/pym/_emerge/UnmergeDepPriority.py b/usr/lib/portage/pym/_emerge/UnmergeDepPriority.py
new file mode 100644
index 0000000..ec44a67
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/UnmergeDepPriority.py
@@ -0,0 +1,46 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractDepPriority import AbstractDepPriority
+class UnmergeDepPriority(AbstractDepPriority):
+ __slots__ = ("ignored", "optional", "satisfied",)
+ """
+ Combination of properties Priority Category
+
+ runtime_slot_op 0 HARD
+ runtime -1 HARD
+ runtime_post -2 HARD
+ buildtime -3 SOFT
+ (none of the above) -3 SOFT
+ """
+
+ MAX = 0
+ SOFT = -3
+ MIN = -3
+
+ def __init__(self, **kwargs):
+ AbstractDepPriority.__init__(self, **kwargs)
+ if self.buildtime:
+ self.optional = True
+
+ def __int__(self):
+ if self.runtime_slot_op:
+ return 0
+ if self.runtime:
+ return -1
+ if self.runtime_post:
+ return -2
+ if self.buildtime:
+ return -3
+ return -3
+
+ def __str__(self):
+ if self.ignored:
+ return "ignored"
+ if self.runtime_slot_op:
+ return "hard slot op"
+ myvalue = self.__int__()
+ if myvalue > self.SOFT:
+ return "hard"
+ return "soft"
+
diff --git a/usr/lib/portage/pym/_emerge/UseFlagDisplay.py b/usr/lib/portage/pym/_emerge/UseFlagDisplay.py
new file mode 100644
index 0000000..f460474
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/UseFlagDisplay.py
@@ -0,0 +1,124 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+from itertools import chain
+import sys
+
+from portage import _encodings, _unicode_encode
+from portage.output import red
+from portage.util import cmp_sort_key
+from portage.output import blue
+
+class UseFlagDisplay(object):
+
+ __slots__ = ('name', 'enabled', 'forced')
+
+ def __init__(self, name, enabled, forced):
+ self.name = name
+ self.enabled = enabled
+ self.forced = forced
+
+ def __str__(self):
+ s = self.name
+ if self.enabled:
+ s = red(s)
+ else:
+ s = '-' + s
+ s = blue(s)
+ if self.forced:
+ s = '(%s)' % s
+ return s
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
+
+ def _cmp_combined(a, b):
+ """
+ Sort by name, combining enabled and disabled flags.
+ """
+ return (a.name > b.name) - (a.name < b.name)
+
+ sort_combined = cmp_sort_key(_cmp_combined)
+ del _cmp_combined
+
+ def _cmp_separated(a, b):
+ """
+ Sort by name, separating enabled flags from disabled flags.
+ """
+ enabled_diff = b.enabled - a.enabled
+ if enabled_diff:
+ return enabled_diff
+ return (a.name > b.name) - (a.name < b.name)
+
+ sort_separated = cmp_sort_key(_cmp_separated)
+ del _cmp_separated
+
+def pkg_use_display(pkg, opts, modified_use=None):
+ settings = pkg.root_config.settings
+ use_expand = pkg.use.expand
+ use_expand_hidden = pkg.use.expand_hidden
+ alphabetical_use = '--alphabetical' in opts
+ forced_flags = set(chain(pkg.use.force,
+ pkg.use.mask))
+ if modified_use is None:
+ use = set(pkg.use.enabled)
+ else:
+ use = set(modified_use)
+ use.discard(settings.get('ARCH'))
+ use_expand_flags = set()
+ use_enabled = {}
+ use_disabled = {}
+ for varname in use_expand:
+ flag_prefix = varname.lower() + "_"
+ for f in use:
+ if f.startswith(flag_prefix):
+ use_expand_flags.add(f)
+ use_enabled.setdefault(
+ varname.upper(), []).append(f[len(flag_prefix):])
+
+ for f in pkg.iuse.all:
+ if f.startswith(flag_prefix):
+ use_expand_flags.add(f)
+ if f not in use:
+ use_disabled.setdefault(
+ varname.upper(), []).append(f[len(flag_prefix):])
+
+ var_order = set(use_enabled)
+ var_order.update(use_disabled)
+ var_order = sorted(var_order)
+ var_order.insert(0, 'USE')
+ use.difference_update(use_expand_flags)
+ use_enabled['USE'] = list(use)
+ use_disabled['USE'] = []
+
+ for f in pkg.iuse.all:
+ if f not in use and \
+ f not in use_expand_flags:
+ use_disabled['USE'].append(f)
+
+ flag_displays = []
+ for varname in var_order:
+ if varname.lower() in use_expand_hidden:
+ continue
+ flags = []
+ for f in use_enabled.get(varname, []):
+ flags.append(UseFlagDisplay(f, True, f in forced_flags))
+ for f in use_disabled.get(varname, []):
+ flags.append(UseFlagDisplay(f, False, f in forced_flags))
+ if alphabetical_use:
+ flags.sort(key=UseFlagDisplay.sort_combined)
+ else:
+ flags.sort(key=UseFlagDisplay.sort_separated)
+ # Use unicode_literals to force unicode format string so
+ # that UseFlagDisplay.__unicode__() is called in python2.
+ flag_displays.append('%s="%s"' % (varname,
+ ' '.join("%s" % (f,) for f in flags)))
+
+ return ' '.join(flag_displays)
diff --git a/usr/lib/portage/pym/_emerge/UserQuery.py b/usr/lib/portage/pym/_emerge/UserQuery.py
new file mode 100644
index 0000000..c866a0d
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/UserQuery.py
@@ -0,0 +1,71 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import signal
+import sys
+
+from portage.output import bold, create_color_func
+
+
+class UserQuery(object):
+ """The UserQuery class is used to prompt the user with a set of responses,
+ as well as accepting and handling the responses."""
+
+ def __init__(self, myopts):
+ self.myopts = myopts
+
+ def query(self, prompt, enter_invalid, responses=None, colours=None):
+ """Display a prompt and a set of responses, then waits for user input
+ and check it against the responses. The first match is returned.
+
+ An empty response will match the first value in the list of responses,
+ unless enter_invalid is True. The input buffer is *not* cleared prior
+ to the prompt!
+
+ prompt: The String to display as a prompt.
+ responses: a List of Strings with the acceptable responses.
+ colours: a List of Functions taking and returning a String, used to
+ process the responses for display. Typically these will be functions
+ like red() but could be e.g. lambda x: "DisplayString".
+
+ If responses is omitted, it defaults to ["Yes", "No"], [green, red].
+ If only colours is omitted, it defaults to [bold, ...].
+
+ Returns a member of the List responses. (If called without optional
+ arguments, it returns "Yes" or "No".)
+
+ KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
+ printed."""
+ if responses is None:
+ responses = ["Yes", "No"]
+ colours = [
+ create_color_func("PROMPT_CHOICE_DEFAULT"),
+ create_color_func("PROMPT_CHOICE_OTHER")
+ ]
+ elif colours is None:
+ colours=[bold]
+ colours=(colours*len(responses))[:len(responses)]
+ if "--alert" in self.myopts:
+ prompt = '\a' + prompt
+ print(bold(prompt), end=' ')
+ try:
+ while True:
+ if sys.hexversion >= 0x3000000:
+ response=input("["+"/".join([colours[i](responses[i])
+ for i in range(len(responses))])+"] ")
+ else:
+ response=raw_input("["+"/".join([colours[i](responses[i])
+ for i in range(len(responses))])+"] ")
+ if response or not enter_invalid:
+ for key in responses:
+ # An empty response will match the
+ # first value in responses.
+ if response.upper()==key[:len(response)].upper():
+ return key
+ print("Sorry, response '%s' not understood." % response,
+ end=' ')
+ except (EOFError, KeyboardInterrupt):
+ print("Interrupted.")
+ sys.exit(128 + signal.SIGINT)
diff --git a/usr/lib/portage/pym/_emerge/__init__.py b/usr/lib/portage/pym/_emerge/__init__.py
new file mode 100644
index 0000000..f98c564
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/_emerge/_find_deep_system_runtime_deps.py b/usr/lib/portage/pym/_emerge/_find_deep_system_runtime_deps.py
new file mode 100644
index 0000000..ca09d83
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/_find_deep_system_runtime_deps.py
@@ -0,0 +1,38 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+from _emerge.Package import Package
+
+def _find_deep_system_runtime_deps(graph):
+ deep_system_deps = set()
+ node_stack = []
+ for node in graph:
+ if not isinstance(node, Package) or \
+ node.operation == 'uninstall':
+ continue
+ if node.root_config.sets['system'].findAtomForPackage(node):
+ node_stack.append(node)
+
+ def ignore_priority(priority):
+ """
+ Ignore non-runtime priorities.
+ """
+ if isinstance(priority, DepPriority) and \
+ (priority.runtime or priority.runtime_post):
+ return False
+ return True
+
+ while node_stack:
+ node = node_stack.pop()
+ if node in deep_system_deps:
+ continue
+ deep_system_deps.add(node)
+ for child in graph.child_nodes(node, ignore_priority=ignore_priority):
+ if not isinstance(child, Package) or \
+ child.operation == 'uninstall':
+ continue
+ node_stack.append(child)
+
+ return deep_system_deps
+
diff --git a/usr/lib/portage/pym/_emerge/_flush_elog_mod_echo.py b/usr/lib/portage/pym/_emerge/_flush_elog_mod_echo.py
new file mode 100644
index 0000000..9ac65b8
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/_flush_elog_mod_echo.py
@@ -0,0 +1,15 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.elog import mod_echo
+
+def _flush_elog_mod_echo():
+ """
+ Dump the mod_echo output now so that our other
+ notifications are shown last.
+ @rtype: bool
+ @return: True if messages were shown, False otherwise.
+ """
+ messages_shown = bool(mod_echo._items)
+ mod_echo.finalize()
+ return messages_shown
diff --git a/usr/lib/portage/pym/_emerge/actions.py b/usr/lib/portage/pym/_emerge/actions.py
new file mode 100644
index 0000000..9ca071f
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/actions.py
@@ -0,0 +1,4088 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division, print_function, unicode_literals
+
+import errno
+import logging
+import operator
+import platform
+import pwd
+import random
+import re
+import signal
+import socket
+import stat
+import subprocess
+import sys
+import tempfile
+import textwrap
+import time
+import warnings
+from itertools import chain
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dbapi._similar_name_search:similar_name_search',
+ 'portage.debug',
+ 'portage.news:count_unread_news,display_news_notifications',
+ 'portage.util._get_vm_info:get_vm_info',
+ '_emerge.chk_updated_cfg_files:chk_updated_cfg_files',
+ '_emerge.help:help@emerge_help',
+ '_emerge.post_emerge:display_news_notification,post_emerge',
+ '_emerge.stdout_spinner:stdout_spinner',
+)
+
+from portage.localization import _
+from portage import os
+from portage import shutil
+from portage import eapi_is_supported, _encodings, _unicode_decode
+from portage.cache.cache_errors import CacheError
+from portage.const import EPREFIX
+from portage.const import GLOBAL_CONFIG_PATH, VCS_DIRS, _DEPCLEAN_LIB_CHECK_DEFAULT
+from portage.const import SUPPORTED_BINPKG_FORMATS, TIMESTAMP_FORMAT
+from portage.dbapi.dep_expand import dep_expand
+from portage.dbapi._expand_new_virt import expand_new_virt
+from portage.dep import Atom
+from portage.eclass_cache import hashed_path
+from portage.exception import InvalidAtom, InvalidData, ParseError
+from portage.output import blue, bold, colorize, create_color_func, darkgreen, \
+ red, xtermTitle, xtermTitleReset, yellow
+good = create_color_func("GOOD")
+bad = create_color_func("BAD")
+warn = create_color_func("WARN")
+from portage.package.ebuild._ipc.QueryCommand import QueryCommand
+from portage.package.ebuild.doebuild import _check_temp_dir
+from portage._sets import load_default_config, SETPREFIX
+from portage._sets.base import InternalPackageSet
+from portage.util import cmp_sort_key, writemsg, varexpand, \
+ writemsg_level, writemsg_stdout
+from portage.util.digraph import digraph
+from portage.util.SlotObject import SlotObject
+from portage.util._async.run_main_scheduler import run_main_scheduler
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage._global_updates import _global_updates
+
+from _emerge.clear_caches import clear_caches
+from _emerge.countdown import countdown
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.Dependency import Dependency
+from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.emergelog import emergelog
+from _emerge.is_valid_package_atom import is_valid_package_atom
+from _emerge.MetadataRegen import MetadataRegen
+from _emerge.Package import Package
+from _emerge.ProgressHandler import ProgressHandler
+from _emerge.RootConfig import RootConfig
+from _emerge.Scheduler import Scheduler
+from _emerge.search import search
+from _emerge.SetArg import SetArg
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+from _emerge.sync.getaddrinfo_validate import getaddrinfo_validate
+from _emerge.sync.old_tree_timestamp import old_tree_timestamp_warn
+from _emerge.unmerge import unmerge
+from _emerge.UnmergeDepPriority import UnmergeDepPriority
+from _emerge.UseFlagDisplay import pkg_use_display
+from _emerge.UserQuery import UserQuery
+
+if sys.hexversion >= 0x3000000:
+ long = int
+ _unicode = str
+else:
+ _unicode = unicode
+
+def action_build(settings, trees, mtimedb,
+ myopts, myaction, myfiles, spinner):
+
+ if '--usepkgonly' not in myopts:
+ old_tree_timestamp_warn(settings['PORTDIR'], settings)
+
+ # It's best for config updates in /etc/portage to be processed
+ # before we get here, so warn if they're not (bug #267103).
+ chk_updated_cfg_files(settings['EROOT'], ['/etc/portage'])
+
+ # validate the state of the resume data
+ # so that we can make assumptions later.
+ for k in ("resume", "resume_backup"):
+ if k not in mtimedb:
+ continue
+ resume_data = mtimedb[k]
+ if not isinstance(resume_data, dict):
+ del mtimedb[k]
+ continue
+ mergelist = resume_data.get("mergelist")
+ if not isinstance(mergelist, list):
+ del mtimedb[k]
+ continue
+ for x in mergelist:
+ if not (isinstance(x, list) and len(x) == 4):
+ continue
+ pkg_type, pkg_root, pkg_key, pkg_action = x
+ if pkg_root not in trees:
+ # Current $ROOT setting differs,
+ # so the list must be stale.
+ mergelist = None
+ break
+ if not mergelist:
+ del mtimedb[k]
+ continue
+ resume_opts = resume_data.get("myopts")
+ if not isinstance(resume_opts, (dict, list)):
+ del mtimedb[k]
+ continue
+ favorites = resume_data.get("favorites")
+ if not isinstance(favorites, list):
+ del mtimedb[k]
+ continue
+
+ resume = False
+ if "--resume" in myopts and \
+ ("resume" in mtimedb or
+ "resume_backup" in mtimedb):
+ resume = True
+ if "resume" not in mtimedb:
+ mtimedb["resume"] = mtimedb["resume_backup"]
+ del mtimedb["resume_backup"]
+ mtimedb.commit()
+ # "myopts" is a list for backward compatibility.
+ resume_opts = mtimedb["resume"].get("myopts", [])
+ if isinstance(resume_opts, list):
+ resume_opts = dict((k,True) for k in resume_opts)
+ for opt in ("--ask", "--color", "--skipfirst", "--tree"):
+ resume_opts.pop(opt, None)
+
+ # Current options always override resume_opts.
+ resume_opts.update(myopts)
+ myopts.clear()
+ myopts.update(resume_opts)
+
+ if "--debug" in myopts:
+ writemsg_level("myopts %s\n" % (myopts,))
+
+ # Adjust config according to options of the command being resumed.
+ for myroot in trees:
+ mysettings = trees[myroot]["vartree"].settings
+ mysettings.unlock()
+ adjust_config(myopts, mysettings)
+ mysettings.lock()
+ del myroot, mysettings
+
+ ldpath_mtimes = mtimedb["ldpath"]
+ favorites=[]
+ buildpkgonly = "--buildpkgonly" in myopts
+ pretend = "--pretend" in myopts
+ fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
+ ask = "--ask" in myopts
+ enter_invalid = '--ask-enter-invalid' in myopts
+ nodeps = "--nodeps" in myopts
+ oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
+ tree = "--tree" in myopts
+ if nodeps and tree:
+ tree = False
+ del myopts["--tree"]
+ portage.writemsg(colorize("WARN", " * ") + \
+ "--tree is broken with --nodeps. Disabling...\n")
+ debug = "--debug" in myopts
+ verbose = "--verbose" in myopts
+ quiet = "--quiet" in myopts
+ myparams = create_depgraph_params(myopts, myaction)
+ mergelist_shown = False
+
+ if pretend or fetchonly:
+ # make the mtimedb readonly
+ mtimedb.filename = None
+ if '--digest' in myopts or 'digest' in settings.features:
+ if '--digest' in myopts:
+ msg = "The --digest option"
+ else:
+ msg = "The FEATURES=digest setting"
+
+ msg += " can prevent corruption from being" + \
+ " noticed. The `repoman manifest` command is the preferred" + \
+ " way to generate manifests and it is capable of doing an" + \
+ " entire repository or category at once."
+ prefix = bad(" * ")
+ writemsg(prefix + "\n")
+ for line in textwrap.wrap(msg, 72):
+ writemsg("%s%s\n" % (prefix, line))
+ writemsg(prefix + "\n")
+
+ if resume:
+ favorites = mtimedb["resume"].get("favorites")
+ if not isinstance(favorites, list):
+ favorites = []
+
+ resume_data = mtimedb["resume"]
+ mergelist = resume_data["mergelist"]
+ if mergelist and "--skipfirst" in myopts:
+ for i, task in enumerate(mergelist):
+ if isinstance(task, list) and \
+ task and task[-1] == "merge":
+ del mergelist[i]
+ break
+
+ success = False
+ mydepgraph = None
+ try:
+ success, mydepgraph, dropped_tasks = resume_depgraph(
+ settings, trees, mtimedb, myopts, myparams, spinner)
+ except (portage.exception.PackageNotFound,
+ depgraph.UnsatisfiedResumeDep) as e:
+ if isinstance(e, depgraph.UnsatisfiedResumeDep):
+ mydepgraph = e.depgraph
+
+ from portage.output import EOutput
+ out = EOutput()
+
+ resume_data = mtimedb["resume"]
+ mergelist = resume_data.get("mergelist")
+ if not isinstance(mergelist, list):
+ mergelist = []
+ if mergelist and debug or (verbose and not quiet):
+ out.eerror("Invalid resume list:")
+ out.eerror("")
+ indent = " "
+ for task in mergelist:
+ if isinstance(task, list):
+ out.eerror(indent + str(tuple(task)))
+ out.eerror("")
+
+ if isinstance(e, depgraph.UnsatisfiedResumeDep):
+ out.eerror("One or more packages are either masked or " + \
+ "have missing dependencies:")
+ out.eerror("")
+ indent = " "
+ for dep in e.value:
+ if dep.atom is None:
+ out.eerror(indent + "Masked package:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ else:
+ out.eerror(indent + str(dep.atom) + " pulled in by:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ msg = "The resume list contains packages " + \
+ "that are either masked or have " + \
+ "unsatisfied dependencies. " + \
+ "Please restart/continue " + \
+ "the operation manually, or use --skipfirst " + \
+ "to skip the first package in the list and " + \
+ "any other packages that may be " + \
+ "masked or have missing dependencies."
+ for line in textwrap.wrap(msg, 72):
+ out.eerror(line)
+ elif isinstance(e, portage.exception.PackageNotFound):
+ out.eerror("An expected package is " + \
+ "not available: %s" % str(e))
+ out.eerror("")
+ msg = "The resume list contains one or more " + \
+ "packages that are no longer " + \
+ "available. Please restart/continue " + \
+ "the operation manually."
+ for line in textwrap.wrap(msg, 72):
+ out.eerror(line)
+
+ if success:
+ if dropped_tasks:
+ portage.writemsg("!!! One or more packages have been " + \
+ "dropped due to\n" + \
+ "!!! masking or unsatisfied dependencies:\n\n",
+ noiselevel=-1)
+ for task, atoms in dropped_tasks.items():
+ if not atoms:
+ writemsg(" %s is masked or unavailable\n" %
+ (task,), noiselevel=-1)
+ else:
+ writemsg(" %s requires %s\n" %
+ (task, ", ".join(atoms)), noiselevel=-1)
+
+ portage.writemsg("\n", noiselevel=-1)
+ del dropped_tasks
+ else:
+ if mydepgraph is not None:
+ mydepgraph.display_problems()
+ if not (ask or pretend):
+ # delete the current list and also the backup
+ # since it's probably stale too.
+ for k in ("resume", "resume_backup"):
+ mtimedb.pop(k, None)
+ mtimedb.commit()
+
+ return 1
+ else:
+ if ("--resume" in myopts):
+ print(darkgreen("emerge: It seems we have nothing to resume..."))
+ return os.EX_OK
+
+ try:
+ success, mydepgraph, favorites = backtrack_depgraph(
+ settings, trees, myopts, myparams, myaction, myfiles, spinner)
+ except portage.exception.PackageSetNotFound as e:
+ root_config = trees[settings['EROOT']]['root_config']
+ display_missing_pkg_set(root_config, e.value)
+ return 1
+
+ if not success:
+ mydepgraph.display_problems()
+ return 1
+
+ mergecount = None
+ if "--pretend" not in myopts and \
+ ("--ask" in myopts or "--tree" in myopts or \
+ "--verbose" in myopts) and \
+ not ("--quiet" in myopts and "--ask" not in myopts):
+ if "--resume" in myopts:
+ mymergelist = mydepgraph.altlist()
+ if len(mymergelist) == 0:
+ print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
+ return os.EX_OK
+ favorites = mtimedb["resume"]["favorites"]
+ retval = mydepgraph.display(
+ mydepgraph.altlist(),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ mergelist_shown = True
+ if retval != os.EX_OK:
+ return retval
+ prompt="Would you like to resume merging these packages?"
+ else:
+ retval = mydepgraph.display(
+ mydepgraph.altlist(),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ mergelist_shown = True
+ if retval != os.EX_OK:
+ return retval
+ mergecount=0
+ for x in mydepgraph.altlist():
+ if isinstance(x, Package) and x.operation == "merge":
+ mergecount += 1
+
+ prompt = None
+ if mergecount==0:
+ sets = trees[settings['EROOT']]['root_config'].sets
+ world_candidates = None
+ if "selective" in myparams and \
+ not oneshot and favorites:
+ # Sets that are not world candidates are filtered
+ # out here since the favorites list needs to be
+ # complete for depgraph.loadResumeCommand() to
+ # operate correctly.
+ world_candidates = [x for x in favorites \
+ if not (x.startswith(SETPREFIX) and \
+ not sets[x[1:]].world_candidate)]
+
+ if "selective" in myparams and \
+ not oneshot and world_candidates:
+ # Prompt later, inside saveNomergeFavorites.
+ prompt = None
+ else:
+ print()
+ print("Nothing to merge; quitting.")
+ print()
+ return os.EX_OK
+ elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
+ prompt="Would you like to fetch the source files for these packages?"
+ else:
+ prompt="Would you like to merge these packages?"
+ print()
+ uq = UserQuery(myopts)
+ if prompt is not None and "--ask" in myopts and \
+ uq.query(prompt, enter_invalid) == "No":
+ print()
+ print("Quitting.")
+ print()
+ return 128 + signal.SIGINT
+ # Don't ask again (e.g. when auto-cleaning packages after merge)
+ if mergecount != 0:
+ myopts.pop("--ask", None)
+
+ if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
+ if ("--resume" in myopts):
+ mymergelist = mydepgraph.altlist()
+ if len(mymergelist) == 0:
+ print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
+ return os.EX_OK
+ favorites = mtimedb["resume"]["favorites"]
+ retval = mydepgraph.display(
+ mydepgraph.altlist(),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ mergelist_shown = True
+ if retval != os.EX_OK:
+ return retval
+ else:
+ retval = mydepgraph.display(
+ mydepgraph.altlist(),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ mergelist_shown = True
+ if retval != os.EX_OK:
+ return retval
+
+ else:
+
+ if not mergelist_shown:
+ # If we haven't already shown the merge list above, at
+ # least show warnings about missed updates and such.
+ mydepgraph.display_problems()
+
+ if ("--resume" in myopts):
+ favorites=mtimedb["resume"]["favorites"]
+
+ else:
+ if "resume" in mtimedb and \
+ "mergelist" in mtimedb["resume"] and \
+ len(mtimedb["resume"]["mergelist"]) > 1:
+ mtimedb["resume_backup"] = mtimedb["resume"]
+ del mtimedb["resume"]
+ mtimedb.commit()
+
+ mydepgraph.saveNomergeFavorites()
+
+ if mergecount == 0:
+ retval = os.EX_OK
+ else:
+ mergetask = Scheduler(settings, trees, mtimedb, myopts,
+ spinner, favorites=favorites,
+ graph_config=mydepgraph.schedulerGraph())
+
+ del mydepgraph
+ clear_caches(trees)
+
+ retval = mergetask.merge()
+
+ if retval == os.EX_OK and \
+ not (buildpkgonly or fetchonly or pretend):
+ if "yes" == settings.get("AUTOCLEAN"):
+ portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
+ unmerge(trees[settings['EROOT']]['root_config'],
+ myopts, "clean", [],
+ ldpath_mtimes, autoclean=1)
+ else:
+ portage.writemsg_stdout(colorize("WARN", "WARNING:")
+ + " AUTOCLEAN is disabled. This can cause serious"
+ + " problems due to overlapping packages.\n")
+
+ return retval
+
+def action_config(settings, trees, myopts, myfiles):
+ enter_invalid = '--ask-enter-invalid' in myopts
+ uq = UserQuery(myopts)
+ if len(myfiles) != 1:
+ print(red("!!! config can only take a single package atom at this time\n"))
+ sys.exit(1)
+ if not is_valid_package_atom(myfiles[0], allow_repo=True):
+ portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
+ noiselevel=-1)
+ portage.writemsg("!!! Please check ebuild(5) for full details.\n")
+ portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
+ sys.exit(1)
+ print()
+ try:
+ pkgs = trees[settings['EROOT']]['vartree'].dbapi.match(myfiles[0])
+ except portage.exception.AmbiguousPackageName as e:
+ # Multiple matches thrown from cpv_expand
+ pkgs = e.args[0]
+ if len(pkgs) == 0:
+ print("No packages found.\n")
+ sys.exit(0)
+ elif len(pkgs) > 1:
+ if "--ask" in myopts:
+ options = []
+ print("Please select a package to configure:")
+ idx = 0
+ for pkg in pkgs:
+ idx += 1
+ options.append(str(idx))
+ print(options[-1]+") "+pkg)
+ print("X) Cancel")
+ options.append("X")
+ idx = uq.query("Selection?", enter_invalid, responses=options)
+ if idx == "X":
+ sys.exit(128 + signal.SIGINT)
+ pkg = pkgs[int(idx)-1]
+ else:
+ print("The following packages available:")
+ for pkg in pkgs:
+ print("* "+pkg)
+ print("\nPlease use a specific atom or the --ask option.")
+ sys.exit(1)
+ else:
+ pkg = pkgs[0]
+
+ print()
+ if "--ask" in myopts:
+ if uq.query("Ready to configure %s?" % pkg, enter_invalid) == "No":
+ sys.exit(128 + signal.SIGINT)
+ else:
+ print("Configuring pkg...")
+ print()
+ ebuildpath = trees[settings['EROOT']]['vartree'].dbapi.findname(pkg)
+ mysettings = portage.config(clone=settings)
+ vardb = trees[mysettings['EROOT']]['vartree'].dbapi
+ debug = mysettings.get("PORTAGE_DEBUG") == "1"
+ retval = portage.doebuild(ebuildpath, "config", settings=mysettings,
+ debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
+ mydbapi = trees[settings['EROOT']]['vartree'].dbapi, tree="vartree")
+ if retval == os.EX_OK:
+ portage.doebuild(ebuildpath, "clean", settings=mysettings,
+ debug=debug, mydbapi=vardb, tree="vartree")
+ print()
+
+def action_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, myfiles, spinner, scheduler=None):
+ # Kill packages that aren't explicitly merged or are required as a
+ # dependency of another package. World file is explicit.
+
+ # Global depclean or prune operations are not very safe when there are
+ # missing dependencies since it's unknown how badly incomplete
+ # the dependency graph is, and we might accidentally remove packages
+ # that should have been pulled into the graph. On the other hand, it's
+ # relatively safe to ignore missing deps when only asked to remove
+ # specific packages.
+
+ msg = []
+ if "preserve-libs" not in settings.features and \
+ not myopts.get("--depclean-lib-check", _DEPCLEAN_LIB_CHECK_DEFAULT) != "n":
+ msg.append("Depclean may break link level dependencies. Thus, it is\n")
+ msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
+ msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
+ msg.append("\n")
+ msg.append("Always study the list of packages to be cleaned for any obvious\n")
+ msg.append("mistakes. Packages that are part of the world set will always\n")
+ msg.append("be kept. They can be manually added to this set with\n")
+ msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
+ msg.append("package.provided (see portage(5)) will be removed by\n")
+ msg.append("depclean, even if they are part of the world set.\n")
+ msg.append("\n")
+ msg.append("As a safety measure, depclean will not remove any packages\n")
+ msg.append("unless *all* required dependencies have been resolved. As a\n")
+ msg.append("consequence of this, it often becomes necessary to run \n")
+ msg.append("%s" % good("`emerge --update --newuse --deep @world`")
+ + " prior to depclean.\n")
+
+ if action == "depclean" and "--quiet" not in myopts and not myfiles:
+ portage.writemsg_stdout("\n")
+ for x in msg:
+ portage.writemsg_stdout(colorize("WARN", " * ") + x)
+
+ root_config = trees[settings['EROOT']]['root_config']
+ vardb = root_config.trees['vartree'].dbapi
+
+ args_set = InternalPackageSet(allow_repo=True)
+ if myfiles:
+ args_set.update(myfiles)
+ matched_packages = False
+ for x in args_set:
+ if vardb.match(x):
+ matched_packages = True
+ else:
+ writemsg_level("--- Couldn't find '%s' to %s.\n" % \
+ (x.replace("null/", ""), action),
+ level=logging.WARN, noiselevel=-1)
+ if not matched_packages:
+ writemsg_level(">>> No packages selected for removal by %s\n" % \
+ action)
+ return 0
+
+ # The calculation is done in a separate function so that depgraph
+ # references go out of scope and the corresponding memory
+ # is freed before we call unmerge().
+ rval, cleanlist, ordered, req_pkg_count = \
+ calc_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, args_set, spinner)
+
+ clear_caches(trees)
+
+ if rval != os.EX_OK:
+ return rval
+
+ if cleanlist:
+ rval = unmerge(root_config, myopts, "unmerge",
+ cleanlist, ldpath_mtimes, ordered=ordered,
+ scheduler=scheduler)
+
+ if action == "prune":
+ return rval
+
+ if not cleanlist and "--quiet" in myopts:
+ return rval
+
+ set_atoms = {}
+ for k in ("system", "selected"):
+ try:
+ set_atoms[k] = root_config.setconfig.getSetAtoms(k)
+ except portage.exception.PackageSetNotFound:
+ # A nested set could not be resolved, so ignore nested sets.
+ set_atoms[k] = root_config.sets[k].getAtoms()
+
+ print("Packages installed: " + str(len(vardb.cpv_all())))
+ print("Packages in world: %d" % len(set_atoms["selected"]))
+ print("Packages in system: %d" % len(set_atoms["system"]))
+ print("Required packages: "+str(req_pkg_count))
+ if "--pretend" in myopts:
+ print("Number to remove: "+str(len(cleanlist)))
+ else:
+ print("Number removed: "+str(len(cleanlist)))
+
+ return rval
+
+def calc_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, args_set, spinner):
+ allow_missing_deps = bool(args_set)
+
+ debug = '--debug' in myopts
+ xterm_titles = "notitles" not in settings.features
+ root_len = len(settings["ROOT"])
+ eroot = settings['EROOT']
+ root_config = trees[eroot]["root_config"]
+ psets = root_config.setconfig.psets
+ deselect = myopts.get('--deselect') != 'n'
+ required_sets = {}
+ required_sets['world'] = psets['world']
+
+ # When removing packages, a temporary version of the world 'selected'
+ # set may be used which excludes packages that are intended to be
+ # eligible for removal.
+ selected_set = psets['selected']
+ required_sets['selected'] = selected_set
+ protected_set = InternalPackageSet()
+ protected_set_name = '____depclean_protected_set____'
+ required_sets[protected_set_name] = protected_set
+ system_set = psets["system"]
+
+ set_atoms = {}
+ for k in ("system", "selected"):
+ try:
+ set_atoms[k] = root_config.setconfig.getSetAtoms(k)
+ except portage.exception.PackageSetNotFound:
+ # A nested set could not be resolved, so ignore nested sets.
+ set_atoms[k] = root_config.sets[k].getAtoms()
+
+ if not set_atoms["system"] or not set_atoms["selected"]:
+
+ if not set_atoms["system"]:
+ writemsg_level("!!! You have no system list.\n",
+ level=logging.ERROR, noiselevel=-1)
+
+ if not set_atoms["selected"]:
+ writemsg_level("!!! You have no world file.\n",
+ level=logging.WARNING, noiselevel=-1)
+
+ writemsg_level("!!! Proceeding is likely to " + \
+ "break your installation.\n",
+ level=logging.WARNING, noiselevel=-1)
+ if "--pretend" not in myopts:
+ countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
+
+ if action == "depclean":
+ emergelog(xterm_titles, " >>> depclean")
+
+ writemsg_level("\nCalculating dependencies ")
+ resolver_params = create_depgraph_params(myopts, "remove")
+ resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
+ resolver._load_vdb()
+ vardb = resolver._frozen_config.trees[eroot]["vartree"].dbapi
+ real_vardb = trees[eroot]["vartree"].dbapi
+
+ if action == "depclean":
+
+ if args_set:
+
+ if deselect:
+ # Start with an empty set.
+ selected_set = InternalPackageSet()
+ required_sets['selected'] = selected_set
+ # Pull in any sets nested within the selected set.
+ selected_set.update(psets['selected'].getNonAtoms())
+
+ # Pull in everything that's installed but not matched
+ # by an argument atom since we don't want to clean any
+ # package if something depends on it.
+ for pkg in vardb:
+ if spinner:
+ spinner.update()
+
+ try:
+ if args_set.findAtomForPackage(pkg) is None:
+ protected_set.add("=" + pkg.cpv)
+ continue
+ except portage.exception.InvalidDependString as e:
+ show_invalid_depstring_notice(pkg,
+ pkg._metadata["PROVIDE"], _unicode(e))
+ del e
+ protected_set.add("=" + pkg.cpv)
+ continue
+
+ elif action == "prune":
+
+ if deselect:
+ # Start with an empty set.
+ selected_set = InternalPackageSet()
+ required_sets['selected'] = selected_set
+ # Pull in any sets nested within the selected set.
+ selected_set.update(psets['selected'].getNonAtoms())
+
+ # Pull in everything that's installed since we don't
+ # to prune a package if something depends on it.
+ protected_set.update(vardb.cp_all())
+
+ if not args_set:
+
+ # Try to prune everything that's slotted.
+ for cp in vardb.cp_all():
+ if len(vardb.cp_list(cp)) > 1:
+ args_set.add(cp)
+
+ # Remove atoms from world that match installed packages
+ # that are also matched by argument atoms, but do not remove
+ # them if they match the highest installed version.
+ for pkg in vardb:
+ if spinner is not None:
+ spinner.update()
+ pkgs_for_cp = vardb.match_pkgs(pkg.cp)
+ if not pkgs_for_cp or pkg not in pkgs_for_cp:
+ raise AssertionError("package expected in matches: " + \
+ "cp = %s, cpv = %s matches = %s" % \
+ (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
+
+ highest_version = pkgs_for_cp[-1]
+ if pkg == highest_version:
+ # pkg is the highest version
+ protected_set.add("=" + pkg.cpv)
+ continue
+
+ if len(pkgs_for_cp) <= 1:
+ raise AssertionError("more packages expected: " + \
+ "cp = %s, cpv = %s matches = %s" % \
+ (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
+
+ try:
+ if args_set.findAtomForPackage(pkg) is None:
+ protected_set.add("=" + pkg.cpv)
+ continue
+ except portage.exception.InvalidDependString as e:
+ show_invalid_depstring_notice(pkg,
+ pkg._metadata["PROVIDE"], _unicode(e))
+ del e
+ protected_set.add("=" + pkg.cpv)
+ continue
+
+ if resolver._frozen_config.excluded_pkgs:
+ excluded_set = resolver._frozen_config.excluded_pkgs
+ required_sets['__excluded__'] = InternalPackageSet()
+
+ for pkg in vardb:
+ if spinner:
+ spinner.update()
+
+ try:
+ if excluded_set.findAtomForPackage(pkg):
+ required_sets['__excluded__'].add("=" + pkg.cpv)
+ except portage.exception.InvalidDependString as e:
+ show_invalid_depstring_notice(pkg,
+ pkg._metadata["PROVIDE"], _unicode(e))
+ del e
+ required_sets['__excluded__'].add("=" + pkg.cpv)
+
+ success = resolver._complete_graph(required_sets={eroot:required_sets})
+ writemsg_level("\b\b... done!\n")
+
+ resolver.display_problems()
+
+ if not success:
+ return 1, [], False, 0
+
+ def unresolved_deps():
+
+ unresolvable = set()
+ for dep in resolver._dynamic_config._initially_unsatisfied_deps:
+ if isinstance(dep.parent, Package) and \
+ (dep.priority > UnmergeDepPriority.SOFT):
+ unresolvable.add((dep.atom, dep.parent.cpv))
+
+ if not unresolvable:
+ return False
+
+ if unresolvable and not allow_missing_deps:
+
+ if "--debug" in myopts:
+ writemsg("\ndigraph:\n\n", noiselevel=-1)
+ resolver._dynamic_config.digraph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ prefix = bad(" * ")
+ msg = []
+ msg.append("Dependencies could not be completely resolved due to")
+ msg.append("the following required packages not being installed:")
+ msg.append("")
+ for atom, parent in unresolvable:
+ if atom != atom.unevaluated_atom and \
+ vardb.match(_unicode(atom)):
+ msg.append(" %s (%s) pulled in by:" %
+ (atom.unevaluated_atom, atom))
+ else:
+ msg.append(" %s pulled in by:" % (atom,))
+ msg.append(" %s" % (parent,))
+ msg.append("")
+ msg.extend(textwrap.wrap(
+ "Have you forgotten to do a complete update prior " + \
+ "to depclean? The most comprehensive command for this " + \
+ "purpose is as follows:", 65
+ ))
+ msg.append("")
+ msg.append(" " + \
+ good("emerge --update --newuse --deep --with-bdeps=y @world"))
+ msg.append("")
+ msg.extend(textwrap.wrap(
+ "Note that the --with-bdeps=y option is not required in " + \
+ "many situations. Refer to the emerge manual page " + \
+ "(run `man emerge`) for more information about " + \
+ "--with-bdeps.", 65
+ ))
+ msg.append("")
+ msg.extend(textwrap.wrap(
+ "Also, note that it may be necessary to manually uninstall " + \
+ "packages that no longer exist in the portage tree, since " + \
+ "it may not be possible to satisfy their dependencies.", 65
+ ))
+ if action == "prune":
+ msg.append("")
+ msg.append("If you would like to ignore " + \
+ "dependencies then use %s." % good("--nodeps"))
+ writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return True
+ return False
+
+ if unresolved_deps():
+ return 1, [], False, 0
+
+ graph = resolver._dynamic_config.digraph.copy()
+ required_pkgs_total = 0
+ for node in graph:
+ if isinstance(node, Package):
+ required_pkgs_total += 1
+
+ def show_parents(child_node):
+ parent_atoms = \
+ resolver._dynamic_config._parent_atoms.get(child_node, [])
+
+ # Never display the special internal protected_set.
+ parent_atoms = [parent_atom for parent_atom in parent_atoms
+ if not (isinstance(parent_atom[0], SetArg) and
+ parent_atom[0].name == protected_set_name)]
+
+ if not parent_atoms:
+ # With --prune, the highest version can be pulled in without any
+ # real parent since all installed packages are pulled in. In that
+ # case there's nothing to show here.
+ return
+ parent_atom_dict = {}
+ for parent, atom in parent_atoms:
+ parent_atom_dict.setdefault(parent, []).append(atom)
+
+ parent_strs = []
+ for parent, atoms in parent_atom_dict.items():
+ parent_strs.append("%s requires %s" %
+ (getattr(parent, "cpv", parent), ", ".join(atoms)))
+ parent_strs.sort()
+ msg = []
+ msg.append(" %s pulled in by:\n" % (child_node.cpv,))
+ for parent_str in parent_strs:
+ msg.append(" %s\n" % (parent_str,))
+ msg.append("\n")
+ portage.writemsg_stdout("".join(msg), noiselevel=-1)
+
+ def cmp_pkg_cpv(pkg1, pkg2):
+ """Sort Package instances by cpv."""
+ if pkg1.cpv > pkg2.cpv:
+ return 1
+ elif pkg1.cpv == pkg2.cpv:
+ return 0
+ else:
+ return -1
+
+ def create_cleanlist():
+
+ if "--debug" in myopts:
+ writemsg("\ndigraph:\n\n", noiselevel=-1)
+ graph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ pkgs_to_remove = []
+
+ if action == "depclean":
+ if args_set:
+
+ for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
+ arg_atom = None
+ try:
+ arg_atom = args_set.findAtomForPackage(pkg)
+ except portage.exception.InvalidDependString:
+ # this error has already been displayed by now
+ continue
+
+ if arg_atom:
+ if pkg not in graph:
+ pkgs_to_remove.append(pkg)
+ elif "--verbose" in myopts:
+ show_parents(pkg)
+
+ else:
+ for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
+ if pkg not in graph:
+ pkgs_to_remove.append(pkg)
+ elif "--verbose" in myopts:
+ show_parents(pkg)
+
+ elif action == "prune":
+
+ for atom in args_set:
+ for pkg in vardb.match_pkgs(atom):
+ if pkg not in graph:
+ pkgs_to_remove.append(pkg)
+ elif "--verbose" in myopts:
+ show_parents(pkg)
+
+ if not pkgs_to_remove:
+ writemsg_level(
+ ">>> No packages selected for removal by %s\n" % action)
+ if "--verbose" not in myopts:
+ writemsg_level(
+ ">>> To see reverse dependencies, use %s\n" % \
+ good("--verbose"))
+ if action == "prune":
+ writemsg_level(
+ ">>> To ignore dependencies, use %s\n" % \
+ good("--nodeps"))
+
+ return pkgs_to_remove
+
+ cleanlist = create_cleanlist()
+ clean_set = set(cleanlist)
+
+ depclean_lib_check = cleanlist and real_vardb._linkmap is not None and \
+ myopts.get("--depclean-lib-check", _DEPCLEAN_LIB_CHECK_DEFAULT) != "n"
+ preserve_libs = "preserve-libs" in settings.features
+ preserve_libs_restrict = False
+
+ if depclean_lib_check and preserve_libs:
+ for pkg in cleanlist:
+ if "preserve-libs" in pkg.restrict:
+ preserve_libs_restrict = True
+ break
+
+ if depclean_lib_check and \
+ (preserve_libs_restrict or not preserve_libs):
+
+ # Check if any of these packages are the sole providers of libraries
+ # with consumers that have not been selected for removal. If so, these
+ # packages and any dependencies need to be added to the graph.
+ linkmap = real_vardb._linkmap
+ consumer_cache = {}
+ provider_cache = {}
+ consumer_map = {}
+
+ writemsg_level(">>> Checking for lib consumers...\n")
+
+ for pkg in cleanlist:
+
+ if preserve_libs and "preserve-libs" not in pkg.restrict:
+ # Any needed libraries will be preserved
+ # when this package is unmerged, so there's
+ # no need to account for it here.
+ continue
+
+ pkg_dblink = real_vardb._dblink(pkg.cpv)
+ consumers = {}
+
+ for lib in pkg_dblink.getcontents():
+ lib = lib[root_len:]
+ lib_key = linkmap._obj_key(lib)
+ lib_consumers = consumer_cache.get(lib_key)
+ if lib_consumers is None:
+ try:
+ lib_consumers = linkmap.findConsumers(lib_key)
+ except KeyError:
+ continue
+ consumer_cache[lib_key] = lib_consumers
+ if lib_consumers:
+ consumers[lib_key] = lib_consumers
+
+ if not consumers:
+ continue
+
+ for lib, lib_consumers in list(consumers.items()):
+ for consumer_file in list(lib_consumers):
+ if pkg_dblink.isowner(consumer_file):
+ lib_consumers.remove(consumer_file)
+ if not lib_consumers:
+ del consumers[lib]
+
+ if not consumers:
+ continue
+
+ for lib, lib_consumers in consumers.items():
+
+ soname = linkmap.getSoname(lib)
+
+ consumer_providers = []
+ for lib_consumer in lib_consumers:
+ providers = provider_cache.get(lib)
+ if providers is None:
+ providers = linkmap.findProviders(lib_consumer)
+ provider_cache[lib_consumer] = providers
+ if soname not in providers:
+ # Why does this happen?
+ continue
+ consumer_providers.append(
+ (lib_consumer, providers[soname]))
+
+ consumers[lib] = consumer_providers
+
+ consumer_map[pkg] = consumers
+
+ if consumer_map:
+
+ search_files = set()
+ for consumers in consumer_map.values():
+ for lib, consumer_providers in consumers.items():
+ for lib_consumer, providers in consumer_providers:
+ search_files.add(lib_consumer)
+ search_files.update(providers)
+
+ writemsg_level(">>> Assigning files to packages...\n")
+ file_owners = {}
+ for f in search_files:
+ owner_set = set()
+ for owner in linkmap.getOwners(f):
+ owner_dblink = real_vardb._dblink(owner)
+ if owner_dblink.exists():
+ owner_set.add(owner_dblink)
+ if owner_set:
+ file_owners[f] = owner_set
+
+ for pkg, consumers in list(consumer_map.items()):
+ for lib, consumer_providers in list(consumers.items()):
+ lib_consumers = set()
+
+ for lib_consumer, providers in consumer_providers:
+ owner_set = file_owners.get(lib_consumer)
+ provider_dblinks = set()
+ provider_pkgs = set()
+
+ if len(providers) > 1:
+ for provider in providers:
+ provider_set = file_owners.get(provider)
+ if provider_set is not None:
+ provider_dblinks.update(provider_set)
+
+ if len(provider_dblinks) > 1:
+ for provider_dblink in provider_dblinks:
+ provider_pkg = resolver._pkg(
+ provider_dblink.mycpv, "installed",
+ root_config, installed=True)
+ if provider_pkg not in clean_set:
+ provider_pkgs.add(provider_pkg)
+
+ if provider_pkgs:
+ continue
+
+ if owner_set is not None:
+ lib_consumers.update(owner_set)
+
+ for consumer_dblink in list(lib_consumers):
+ if resolver._pkg(consumer_dblink.mycpv, "installed",
+ root_config, installed=True) in clean_set:
+ lib_consumers.remove(consumer_dblink)
+ continue
+
+ if lib_consumers:
+ consumers[lib] = lib_consumers
+ else:
+ del consumers[lib]
+ if not consumers:
+ del consumer_map[pkg]
+
+ if consumer_map:
+ # TODO: Implement a package set for rebuilding consumer packages.
+
+ msg = "In order to avoid breakage of link level " + \
+ "dependencies, one or more packages will not be removed. " + \
+ "This can be solved by rebuilding " + \
+ "the packages that pulled them in."
+
+ prefix = bad(" * ")
+ writemsg_level("".join(prefix + "%s\n" % line for \
+ line in textwrap.wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
+
+ msg = []
+ for pkg in sorted(consumer_map, key=cmp_sort_key(cmp_pkg_cpv)):
+ consumers = consumer_map[pkg]
+ consumer_libs = {}
+ for lib, lib_consumers in consumers.items():
+ for consumer in lib_consumers:
+ consumer_libs.setdefault(
+ consumer.mycpv, set()).add(linkmap.getSoname(lib))
+ unique_consumers = set(chain(*consumers.values()))
+ unique_consumers = sorted(consumer.mycpv \
+ for consumer in unique_consumers)
+ msg.append("")
+ msg.append(" %s pulled in by:" % (pkg.cpv,))
+ for consumer in unique_consumers:
+ libs = consumer_libs[consumer]
+ msg.append(" %s needs %s" % \
+ (consumer, ', '.join(sorted(libs))))
+ msg.append("")
+ writemsg_level("".join(prefix + "%s\n" % line for line in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ # Add lib providers to the graph as children of lib consumers,
+ # and also add any dependencies pulled in by the provider.
+ writemsg_level(">>> Adding lib providers to graph...\n")
+
+ for pkg, consumers in consumer_map.items():
+ for consumer_dblink in set(chain(*consumers.values())):
+ consumer_pkg = resolver._pkg(consumer_dblink.mycpv,
+ "installed", root_config, installed=True)
+ if not resolver._add_pkg(pkg,
+ Dependency(parent=consumer_pkg,
+ priority=UnmergeDepPriority(runtime=True,
+ runtime_slot_op=True),
+ root=pkg.root)):
+ resolver.display_problems()
+ return 1, [], False, 0
+
+ writemsg_level("\nCalculating dependencies ")
+ success = resolver._complete_graph(
+ required_sets={eroot:required_sets})
+ writemsg_level("\b\b... done!\n")
+ resolver.display_problems()
+ if not success:
+ return 1, [], False, 0
+ if unresolved_deps():
+ return 1, [], False, 0
+
+ graph = resolver._dynamic_config.digraph.copy()
+ required_pkgs_total = 0
+ for node in graph:
+ if isinstance(node, Package):
+ required_pkgs_total += 1
+ cleanlist = create_cleanlist()
+ if not cleanlist:
+ return 0, [], False, required_pkgs_total
+ clean_set = set(cleanlist)
+
+ if clean_set:
+ writemsg_level(">>> Calculating removal order...\n")
+ # Use a topological sort to create an unmerge order such that
+ # each package is unmerged before it's dependencies. This is
+ # necessary to avoid breaking things that may need to run
+ # during pkg_prerm or pkg_postrm phases.
+
+ # Create a new graph to account for dependencies between the
+ # packages being unmerged.
+ graph = digraph()
+ del cleanlist[:]
+
+ runtime = UnmergeDepPriority(runtime=True)
+ runtime_post = UnmergeDepPriority(runtime_post=True)
+ buildtime = UnmergeDepPriority(buildtime=True)
+ priority_map = {
+ "RDEPEND": runtime,
+ "PDEPEND": runtime_post,
+ "HDEPEND": buildtime,
+ "DEPEND": buildtime,
+ }
+
+ for node in clean_set:
+ graph.add(node, None)
+ for dep_type in Package._dep_keys:
+ depstr = node._metadata[dep_type]
+ if not depstr:
+ continue
+ priority = priority_map[dep_type]
+
+ if debug:
+ writemsg_level("\nParent: %s\n"
+ % (node,), noiselevel=-1, level=logging.DEBUG)
+ writemsg_level( "Depstring: %s\n"
+ % (depstr,), noiselevel=-1, level=logging.DEBUG)
+ writemsg_level( "Priority: %s\n"
+ % (priority,), noiselevel=-1, level=logging.DEBUG)
+
+ try:
+ atoms = resolver._select_atoms(eroot, depstr,
+ myuse=node.use.enabled, parent=node,
+ priority=priority)[node]
+ except portage.exception.InvalidDependString:
+ # Ignore invalid deps of packages that will
+ # be uninstalled anyway.
+ continue
+
+ if debug:
+ writemsg_level("Candidates: [%s]\n" % \
+ ', '.join("'%s'" % (x,) for x in atoms),
+ noiselevel=-1, level=logging.DEBUG)
+
+ for atom in atoms:
+ if not isinstance(atom, portage.dep.Atom):
+ # Ignore invalid atoms returned from dep_check().
+ continue
+ if atom.blocker:
+ continue
+ matches = vardb.match_pkgs(atom)
+ if not matches:
+ continue
+ for child_node in matches:
+ if child_node in clean_set:
+
+ mypriority = priority.copy()
+ if atom.slot_operator_built:
+ if mypriority.buildtime:
+ mypriority.buildtime_slot_op = True
+ if mypriority.runtime:
+ mypriority.runtime_slot_op = True
+
+ graph.add(child_node, node, priority=mypriority)
+
+ if debug:
+ writemsg_level("\nunmerge digraph:\n\n",
+ noiselevel=-1, level=logging.DEBUG)
+ graph.debug_print()
+ writemsg_level("\n", noiselevel=-1, level=logging.DEBUG)
+
+ ordered = True
+ if len(graph.order) == len(graph.root_nodes()):
+ # If there are no dependencies between packages
+ # let unmerge() group them by cat/pn.
+ ordered = False
+ cleanlist = [pkg.cpv for pkg in graph.order]
+ else:
+ # Order nodes from lowest to highest overall reference count for
+ # optimal root node selection (this can help minimize issues
+ # with unaccounted implicit dependencies).
+ node_refcounts = {}
+ for node in graph.order:
+ node_refcounts[node] = len(graph.parent_nodes(node))
+ def cmp_reference_count(node1, node2):
+ return node_refcounts[node1] - node_refcounts[node2]
+ graph.order.sort(key=cmp_sort_key(cmp_reference_count))
+
+ ignore_priority_range = [None]
+ ignore_priority_range.extend(
+ range(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
+ while graph:
+ for ignore_priority in ignore_priority_range:
+ nodes = graph.root_nodes(ignore_priority=ignore_priority)
+ if nodes:
+ break
+ if not nodes:
+ raise AssertionError("no root nodes")
+ if ignore_priority is not None:
+ # Some deps have been dropped due to circular dependencies,
+ # so only pop one node in order to minimize the number that
+ # are dropped.
+ del nodes[1:]
+ for node in nodes:
+ graph.remove(node)
+ cleanlist.append(node.cpv)
+
+ return 0, cleanlist, ordered, required_pkgs_total
+ return 0, [], False, required_pkgs_total
+
+def action_deselect(settings, trees, opts, atoms):
+ enter_invalid = '--ask-enter-invalid' in opts
+ root_config = trees[settings['EROOT']]['root_config']
+ world_set = root_config.sets['selected']
+ if not hasattr(world_set, 'update'):
+ writemsg_level("World @selected set does not appear to be mutable.\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ pretend = '--pretend' in opts
+ locked = False
+ if not pretend and hasattr(world_set, 'lock'):
+ world_set.lock()
+ locked = True
+ try:
+ world_set.load()
+ world_atoms = world_set.getAtoms()
+ vardb = root_config.trees["vartree"].dbapi
+ expanded_atoms = set(atoms)
+
+ for atom in atoms:
+ if not atom.startswith(SETPREFIX):
+ if atom.cp.startswith("null/"):
+ # try to expand category from world set
+ null_cat, pn = portage.catsplit(atom.cp)
+ for world_atom in world_atoms:
+ cat, world_pn = portage.catsplit(world_atom.cp)
+ if pn == world_pn:
+ expanded_atoms.add(
+ Atom(atom.replace("null", cat, 1),
+ allow_repo=True, allow_wildcard=True))
+
+ for cpv in vardb.match(atom):
+ pkg = vardb._pkg_str(cpv, None)
+ expanded_atoms.add(Atom("%s:%s" % (pkg.cp, pkg.slot)))
+
+ discard_atoms = set()
+ for atom in world_set:
+ for arg_atom in expanded_atoms:
+ if arg_atom.startswith(SETPREFIX):
+ if atom.startswith(SETPREFIX) and \
+ arg_atom == atom:
+ discard_atoms.add(atom)
+ break
+ else:
+ if not atom.startswith(SETPREFIX) and \
+ arg_atom.intersects(atom) and \
+ not (arg_atom.slot and not atom.slot) and \
+ not (arg_atom.repo and not atom.repo):
+ discard_atoms.add(atom)
+ break
+ if discard_atoms:
+ for atom in sorted(discard_atoms):
+
+ if pretend:
+ action_desc = "Would remove"
+ else:
+ action_desc = "Removing"
+
+ if atom.startswith(SETPREFIX):
+ filename = "world_sets"
+ else:
+ filename = "world"
+
+ writemsg_stdout(
+ ">>> %s %s from \"%s\" favorites file...\n" %
+ (action_desc, colorize("INFORM", _unicode(atom)),
+ filename), noiselevel=-1)
+
+ if '--ask' in opts:
+ prompt = "Would you like to remove these " + \
+ "packages from your world favorites?"
+ uq = UserQuery(opts)
+ if uq.query(prompt, enter_invalid) == 'No':
+ return 128 + signal.SIGINT
+
+ remaining = set(world_set)
+ remaining.difference_update(discard_atoms)
+ if not pretend:
+ world_set.replace(remaining)
+ else:
+ print(">>> No matching atoms found in \"world\" favorites file...")
+ finally:
+ if locked:
+ world_set.unlock()
+ return os.EX_OK
+
+class _info_pkgs_ver(object):
+ def __init__(self, ver, repo_suffix, provide_suffix):
+ self.ver = ver
+ self.repo_suffix = repo_suffix
+ self.provide_suffix = provide_suffix
+
+ def __lt__(self, other):
+ return portage.versions.vercmp(self.ver, other.ver) < 0
+
+ def toString(self):
+ """
+ This may return unicode if repo_name contains unicode.
+ Don't use __str__ and str() since unicode triggers compatibility
+ issues between python 2.x and 3.x.
+ """
+ return self.ver + self.repo_suffix + self.provide_suffix
+
+def action_info(settings, trees, myopts, myfiles):
+
+ # See if we can find any packages installed matching the strings
+ # passed on the command line
+ mypkgs = []
+ eroot = settings['EROOT']
+ vardb = trees[eroot]["vartree"].dbapi
+ portdb = trees[eroot]['porttree'].dbapi
+ bindb = trees[eroot]["bintree"].dbapi
+ for x in myfiles:
+ any_match = False
+ cp_exists = bool(vardb.match(x.cp))
+ installed_match = vardb.match(x)
+ for installed in installed_match:
+ mypkgs.append((installed, "installed"))
+ any_match = True
+
+ if any_match:
+ continue
+
+ for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
+ if pkg_type == "binary" and "--usepkg" not in myopts:
+ continue
+
+ # Use match instead of cp_list, to account for old-style virtuals.
+ if not cp_exists and db.match(x.cp):
+ cp_exists = True
+ # Search for masked packages too.
+ if not cp_exists and hasattr(db, "xmatch") and \
+ db.xmatch("match-all", x.cp):
+ cp_exists = True
+
+ matches = db.match(x)
+ matches.reverse()
+ for match in matches:
+ if pkg_type == "binary":
+ if db.bintree.isremote(match):
+ continue
+ auxkeys = ["EAPI", "DEFINED_PHASES"]
+ metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
+ if metadata["EAPI"] not in ("0", "1", "2", "3") and \
+ "info" in metadata["DEFINED_PHASES"].split():
+ mypkgs.append((match, pkg_type))
+ break
+
+ if not cp_exists:
+ xinfo = '"%s"' % x.unevaluated_atom
+ # Discard null/ from failed cpv_expand category expansion.
+ xinfo = xinfo.replace("null/", "")
+ if settings["ROOT"] != "/":
+ xinfo = "%s for %s" % (xinfo, eroot)
+ writemsg("\nemerge: there are no ebuilds to satisfy %s.\n" %
+ colorize("INFORM", xinfo), noiselevel=-1)
+
+ if myopts.get("--misspell-suggestions", "y") != "n":
+
+ writemsg("\nemerge: searching for similar names..."
+ , noiselevel=-1)
+
+ dbs = [vardb]
+ #if "--usepkgonly" not in myopts:
+ dbs.append(portdb)
+ if "--usepkg" in myopts:
+ dbs.append(bindb)
+
+ matches = similar_name_search(dbs, x)
+
+ if len(matches) == 1:
+ writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
+ , noiselevel=-1)
+ elif len(matches) > 1:
+ writemsg(
+ "\nemerge: Maybe you meant any of these: %s?\n" % \
+ (", ".join(matches),), noiselevel=-1)
+ else:
+ # Generally, this would only happen if
+ # all dbapis are empty.
+ writemsg(" nothing similar found.\n"
+ , noiselevel=-1)
+
+ return 1
+
+ output_buffer = []
+ append = output_buffer.append
+ root_config = trees[settings['EROOT']]['root_config']
+ chost = settings.get("CHOST")
+
+ append(getportageversion(settings["PORTDIR"], None,
+ settings.profile_path, settings["CHOST"],
+ trees[settings['EROOT']]["vartree"].dbapi))
+
+ header_width = 65
+ header_title = "System Settings"
+ if myfiles:
+ append(header_width * "=")
+ append(header_title.rjust(int(header_width/2 + len(header_title)/2)))
+ append(header_width * "=")
+ append("System uname: %s" % (platform.platform(aliased=1),))
+
+ vm_info = get_vm_info()
+ if "ram.total" in vm_info:
+ line = "%-9s %10d total" % ("KiB Mem:", vm_info["ram.total"] // 1024)
+ if "ram.free" in vm_info:
+ line += ",%10d free" % (vm_info["ram.free"] // 1024,)
+ append(line)
+ if "swap.total" in vm_info:
+ line = "%-9s %10d total" % ("KiB Swap:", vm_info["swap.total"] // 1024)
+ if "swap.free" in vm_info:
+ line += ",%10d free" % (vm_info["swap.free"] // 1024,)
+ append(line)
+
+ lastSync = portage.grabfile(os.path.join(
+ settings["PORTDIR"], "metadata", "timestamp.chk"))
+ if lastSync:
+ lastSync = lastSync[0]
+ else:
+ lastSync = "Unknown"
+ append("Timestamp of tree: %s" % (lastSync,))
+
+ ld_names = []
+ if chost:
+ ld_names.append(chost + "-ld")
+ ld_names.append("ld")
+ for name in ld_names:
+ try:
+ proc = subprocess.Popen([name, "--version"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ pass
+ else:
+ output = _unicode_decode(proc.communicate()[0]).splitlines()
+ proc.wait()
+ if proc.wait() == os.EX_OK and output:
+ append("ld %s" % (output[0]))
+ break
+
+ try:
+ proc = subprocess.Popen(["distcc", "--version"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ output = (1, None)
+ else:
+ output = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ output = (proc.wait(), output)
+ if output[0] == os.EX_OK:
+ distcc_str = output[1].split("\n", 1)[0]
+ if "distcc" in settings.features:
+ distcc_str += " [enabled]"
+ else:
+ distcc_str += " [disabled]"
+ append(distcc_str)
+
+ try:
+ proc = subprocess.Popen(["ccache", "-V"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ output = (1, None)
+ else:
+ output = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ output = (proc.wait(), output)
+ if output[0] == os.EX_OK:
+ ccache_str = output[1].split("\n", 1)[0]
+ if "ccache" in settings.features:
+ ccache_str += " [enabled]"
+ else:
+ ccache_str += " [disabled]"
+ append(ccache_str)
+
+ myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
+ "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
+ myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
+ atoms = []
+ for x in myvars:
+ try:
+ x = Atom(x)
+ except InvalidAtom:
+ append("%-20s %s" % (x+":", "[NOT VALID]"))
+ else:
+ for atom in expand_new_virt(vardb, x):
+ if not atom.blocker:
+ atoms.append((x, atom))
+
+ myvars = sorted(set(atoms))
+
+ main_repo = portdb.repositories.mainRepo()
+ if main_repo is not None:
+ main_repo = main_repo.name
+ cp_map = {}
+ cp_max_len = 0
+
+ for orig_atom, x in myvars:
+ pkg_matches = vardb.match(x)
+
+ versions = []
+ for cpv in pkg_matches:
+ matched_cp = portage.versions.cpv_getkey(cpv)
+ ver = portage.versions.cpv_getversion(cpv)
+ ver_map = cp_map.setdefault(matched_cp, {})
+ prev_match = ver_map.get(ver)
+ if prev_match is not None:
+ if prev_match.provide_suffix:
+ # prefer duplicate matches that include
+ # additional virtual provider info
+ continue
+
+ if len(matched_cp) > cp_max_len:
+ cp_max_len = len(matched_cp)
+ repo = vardb.aux_get(cpv, ["repository"])[0]
+ if repo == main_repo:
+ repo_suffix = ""
+ elif not repo:
+ repo_suffix = "::<unknown repository>"
+ else:
+ repo_suffix = "::" + repo
+
+ if matched_cp == orig_atom.cp:
+ provide_suffix = ""
+ else:
+ provide_suffix = " (%s)" % (orig_atom,)
+
+ ver_map[ver] = _info_pkgs_ver(ver, repo_suffix, provide_suffix)
+
+ for cp in sorted(cp_map):
+ versions = sorted(cp_map[cp].values())
+ versions = ", ".join(ver.toString() for ver in versions)
+ append("%s %s" % \
+ ((cp + ":").ljust(cp_max_len + 1), versions))
+
+ repos = portdb.settings.repositories
+ if "--verbose" in myopts:
+ append("Repositories:\n")
+ for repo in repos:
+ append(repo.info_string())
+ else:
+ append("Repositories: %s" % \
+ " ".join(repo.name for repo in repos))
+
+ installed_sets = sorted(s for s in
+ root_config.sets['selected'].getNonAtoms() if s.startswith(SETPREFIX))
+ if installed_sets:
+ sets_line = "Installed sets: "
+ sets_line += ", ".join(installed_sets)
+ append(sets_line)
+
+ if "--verbose" in myopts:
+ myvars = list(settings)
+ else:
+ myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
+ 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
+ 'PORTDIR_OVERLAY', 'PORTAGE_BUNZIP2_COMMAND',
+ 'PORTAGE_BZIP2_COMMAND',
+ 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
+ 'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'FEATURES',
+ 'EMERGE_DEFAULT_OPTS']
+
+ myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
+
+ myvars_ignore_defaults = {
+ 'PORTAGE_BZIP2_COMMAND' : 'bzip2',
+ }
+
+ myvars = portage.util.unique_array(myvars)
+ use_expand = settings.get('USE_EXPAND', '').split()
+ use_expand.sort()
+ unset_vars = []
+ myvars.sort()
+ for k in myvars:
+ v = settings.get(k)
+ if v is not None:
+ if k != "USE":
+ default = myvars_ignore_defaults.get(k)
+ if default is not None and \
+ default == v:
+ continue
+ append('%s="%s"' % (k, v))
+ else:
+ use = set(v.split())
+ for varname in use_expand:
+ flag_prefix = varname.lower() + "_"
+ for f in list(use):
+ if f.startswith(flag_prefix):
+ use.remove(f)
+ use = list(use)
+ use.sort()
+ use = ['USE="%s"' % " ".join(use)]
+ for varname in use_expand:
+ myval = settings.get(varname)
+ if myval:
+ use.append('%s="%s"' % (varname, myval))
+ append(" ".join(use))
+ else:
+ unset_vars.append(k)
+ if unset_vars:
+ append("Unset: "+", ".join(unset_vars))
+ append("")
+ append("")
+ writemsg_stdout("\n".join(output_buffer),
+ noiselevel=-1)
+ del output_buffer[:]
+
+ # If some packages were found...
+ if mypkgs:
+ # Get our global settings (we only print stuff if it varies from
+ # the current config)
+ mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
+ auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
+ auxkeys.append('DEFINED_PHASES')
+ pkgsettings = portage.config(clone=settings)
+
+ # Loop through each package
+ # Only print settings if they differ from global settings
+ header_title = "Package Settings"
+ append(header_width * "=")
+ append(header_title.rjust(int(header_width/2 + len(header_title)/2)))
+ append(header_width * "=")
+ append("")
+ writemsg_stdout("\n".join(output_buffer),
+ noiselevel=-1)
+ del output_buffer[:]
+
+ out = portage.output.EOutput()
+ for mypkg in mypkgs:
+ cpv = mypkg[0]
+ pkg_type = mypkg[1]
+ # Get all package specific variables
+ if pkg_type == "installed":
+ metadata = dict(zip(auxkeys, vardb.aux_get(cpv, auxkeys)))
+ elif pkg_type == "ebuild":
+ metadata = dict(zip(auxkeys, portdb.aux_get(cpv, auxkeys)))
+ elif pkg_type == "binary":
+ metadata = dict(zip(auxkeys, bindb.aux_get(cpv, auxkeys)))
+
+ pkg = Package(built=(pkg_type!="ebuild"), cpv=cpv,
+ installed=(pkg_type=="installed"), metadata=zip(Package.metadata_keys,
+ (metadata.get(x, '') for x in Package.metadata_keys)),
+ root_config=root_config, type_name=pkg_type)
+
+ if pkg_type == "installed":
+ append("\n%s was built with the following:" % \
+ colorize("INFORM", str(pkg.cpv)))
+ elif pkg_type == "ebuild":
+ append("\n%s would be build with the following:" % \
+ colorize("INFORM", str(pkg.cpv)))
+ elif pkg_type == "binary":
+ append("\n%s (non-installed binary) was built with the following:" % \
+ colorize("INFORM", str(pkg.cpv)))
+
+ append('%s' % pkg_use_display(pkg, myopts))
+ if pkg_type == "installed":
+ for myvar in mydesiredvars:
+ if metadata[myvar].split() != settings.get(myvar, '').split():
+ append("%s=\"%s\"" % (myvar, metadata[myvar]))
+ append("")
+ append("")
+ writemsg_stdout("\n".join(output_buffer),
+ noiselevel=-1)
+ del output_buffer[:]
+
+ if metadata['DEFINED_PHASES']:
+ if 'info' not in metadata['DEFINED_PHASES'].split():
+ continue
+
+ writemsg_stdout(">>> Attempting to run pkg_info() for '%s'\n"
+ % pkg.cpv, noiselevel=-1)
+
+ if pkg_type == "installed":
+ ebuildpath = vardb.findname(pkg.cpv)
+ elif pkg_type == "ebuild":
+ ebuildpath = portdb.findname(pkg.cpv, myrepo=pkg.repo)
+ elif pkg_type == "binary":
+ tbz2_file = bindb.bintree.getname(pkg.cpv)
+ ebuild_file_name = pkg.cpv.split("/")[1] + ".ebuild"
+ ebuild_file_contents = portage.xpak.tbz2(tbz2_file).getfile(ebuild_file_name)
+ tmpdir = tempfile.mkdtemp()
+ ebuildpath = os.path.join(tmpdir, ebuild_file_name)
+ file = open(ebuildpath, 'w')
+ file.write(ebuild_file_contents)
+ file.close()
+
+ if not ebuildpath or not os.path.exists(ebuildpath):
+ out.ewarn("No ebuild found for '%s'" % pkg.cpv)
+ continue
+
+ if pkg_type == "installed":
+ portage.doebuild(ebuildpath, "info", settings=pkgsettings,
+ debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings['EROOT']]["vartree"].dbapi,
+ tree="vartree")
+ elif pkg_type == "ebuild":
+ portage.doebuild(ebuildpath, "info", settings=pkgsettings,
+ debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings['EROOT']]['porttree'].dbapi,
+ tree="porttree")
+ elif pkg_type == "binary":
+ portage.doebuild(ebuildpath, "info", settings=pkgsettings,
+ debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings['EROOT']]["bintree"].dbapi,
+ tree="bintree")
+ shutil.rmtree(tmpdir)
+
+def action_metadata(settings, portdb, myopts, porttrees=None):
+ if porttrees is None:
+ porttrees = portdb.porttrees
+ portage.writemsg_stdout("\n>>> Updating Portage cache\n")
+ old_umask = os.umask(0o002)
+ cachedir = os.path.normpath(settings.depcachedir)
+ if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
+ "/lib", "/opt", "/proc", "/root", "/sbin",
+ "/sys", "/tmp", "/usr", "/var"]:
+ print("!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
+ "ROOT DIRECTORY ON YOUR SYSTEM.", file=sys.stderr)
+ print("!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir, file=sys.stderr)
+ sys.exit(73)
+ if not os.path.exists(cachedir):
+ os.makedirs(cachedir)
+
+ auxdbkeys = portdb._known_keys
+
+ class TreeData(object):
+ __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
+ def __init__(self, dest_db, eclass_db, path, src_db):
+ self.dest_db = dest_db
+ self.eclass_db = eclass_db
+ self.path = path
+ self.src_db = src_db
+ self.valid_nodes = set()
+
+ porttrees_data = []
+ for path in porttrees:
+ src_db = portdb._pregen_auxdb.get(path)
+ if src_db is None:
+ # portdbapi does not populate _pregen_auxdb
+ # when FEATURES=metadata-transfer is enabled
+ src_db = portdb._create_pregen_cache(path)
+
+ if src_db is not None:
+ porttrees_data.append(TreeData(portdb.auxdb[path],
+ portdb.repositories.get_repo_for_location(path).eclass_db, path, src_db))
+
+ porttrees = [tree_data.path for tree_data in porttrees_data]
+
+ quiet = settings.get('TERM') == 'dumb' or \
+ '--quiet' in myopts or \
+ not sys.stdout.isatty()
+
+ onProgress = None
+ if not quiet:
+ progressBar = portage.output.TermProgressBar()
+ progressHandler = ProgressHandler()
+ onProgress = progressHandler.onProgress
+ def display():
+ progressBar.set(progressHandler.curval, progressHandler.maxval)
+ progressHandler.display = display
+ def sigwinch_handler(signum, frame):
+ lines, progressBar.term_columns = \
+ portage.output.get_term_size()
+ signal.signal(signal.SIGWINCH, sigwinch_handler)
+
+ # Temporarily override portdb.porttrees so portdb.cp_all()
+ # will only return the relevant subset.
+ portdb_porttrees = portdb.porttrees
+ portdb.porttrees = porttrees
+ try:
+ cp_all = portdb.cp_all()
+ finally:
+ portdb.porttrees = portdb_porttrees
+
+ curval = 0
+ maxval = len(cp_all)
+ if onProgress is not None:
+ onProgress(maxval, curval)
+
+ # TODO: Display error messages, but do not interfere with the progress bar.
+ # Here's how:
+ # 1) erase the progress bar
+ # 2) show the error message
+ # 3) redraw the progress bar on a new line
+
+ for cp in cp_all:
+ for tree_data in porttrees_data:
+
+ src_chf = tree_data.src_db.validation_chf
+ dest_chf = tree_data.dest_db.validation_chf
+ dest_chf_key = '_%s_' % dest_chf
+ dest_chf_getter = operator.attrgetter(dest_chf)
+
+ for cpv in portdb.cp_list(cp, mytree=tree_data.path):
+ tree_data.valid_nodes.add(cpv)
+ try:
+ src = tree_data.src_db[cpv]
+ except (CacheError, KeyError):
+ continue
+
+ ebuild_location = portdb.findname(cpv, mytree=tree_data.path)
+ if ebuild_location is None:
+ continue
+ ebuild_hash = hashed_path(ebuild_location)
+
+ try:
+ if not tree_data.src_db.validate_entry(src,
+ ebuild_hash, tree_data.eclass_db):
+ continue
+ except CacheError:
+ continue
+
+ eapi = src.get('EAPI')
+ if not eapi:
+ eapi = '0'
+ eapi_supported = eapi_is_supported(eapi)
+ if not eapi_supported:
+ continue
+
+ dest = None
+ try:
+ dest = tree_data.dest_db[cpv]
+ except (KeyError, CacheError):
+ pass
+
+ for d in (src, dest):
+ if d is not None and d.get('EAPI') in ('', '0'):
+ del d['EAPI']
+
+ if src_chf != 'mtime':
+ # src may contain an irrelevant _mtime_ which corresponds
+ # to the time that the cache entry was written
+ src.pop('_mtime_', None)
+
+ if src_chf != dest_chf:
+ # populate src entry with dest_chf_key
+ # (the validity of the dest_chf that we generate from the
+ # ebuild here relies on the fact that we already used
+ # validate_entry to validate the ebuild with src_chf)
+ src[dest_chf_key] = dest_chf_getter(ebuild_hash)
+
+ if dest is not None:
+ if not (dest[dest_chf_key] == src[dest_chf_key] and \
+ tree_data.eclass_db.validate_and_rewrite_cache(
+ dest['_eclasses_'], tree_data.dest_db.validation_chf,
+ tree_data.dest_db.store_eclass_paths) is not None and \
+ set(dest['_eclasses_']) == set(src['_eclasses_'])):
+ dest = None
+ else:
+ # We don't want to skip the write unless we're really
+ # sure that the existing cache is identical, so don't
+ # trust _mtime_ and _eclasses_ alone.
+ for k in auxdbkeys:
+ if dest.get(k, '') != src.get(k, ''):
+ dest = None
+ break
+
+ if dest is not None:
+ # The existing data is valid and identical,
+ # so there's no need to overwrite it.
+ continue
+
+ try:
+ tree_data.dest_db[cpv] = src
+ except CacheError:
+ # ignore it; can't do anything about it.
+ pass
+
+ curval += 1
+ if onProgress is not None:
+ onProgress(maxval, curval)
+
+ if onProgress is not None:
+ onProgress(maxval, curval)
+
+ for tree_data in porttrees_data:
+ try:
+ dead_nodes = set(tree_data.dest_db)
+ except CacheError as e:
+ writemsg_level("Error listing cache entries for " + \
+ "'%s': %s, continuing...\n" % (tree_data.path, e),
+ level=logging.ERROR, noiselevel=-1)
+ del e
+ else:
+ dead_nodes.difference_update(tree_data.valid_nodes)
+ for cpv in dead_nodes:
+ try:
+ del tree_data.dest_db[cpv]
+ except (KeyError, CacheError):
+ pass
+
+ if not quiet:
+ # make sure the final progress is displayed
+ progressHandler.display()
+ print()
+ signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+
+ portdb.flush_cache()
+ sys.stdout.flush()
+ os.umask(old_umask)
+
+def action_regen(settings, portdb, max_jobs, max_load):
+ xterm_titles = "notitles" not in settings.features
+ emergelog(xterm_titles, " === regen")
+ #regenerate cache entries
+ sys.stdout.flush()
+
+ regen = MetadataRegen(portdb, max_jobs=max_jobs,
+ max_load=max_load, main=True)
+
+ signum = run_main_scheduler(regen)
+ if signum is not None:
+ sys.exit(128 + signum)
+
+ portage.writemsg_stdout("done!\n")
+ return regen.returncode
+
+def action_search(root_config, myopts, myfiles, spinner):
+ if not myfiles:
+ print("emerge: no search terms provided.")
+ else:
+ searchinstance = search(root_config,
+ spinner, "--searchdesc" in myopts,
+ "--quiet" not in myopts, "--usepkg" in myopts,
+ "--usepkgonly" in myopts)
+ for mysearch in myfiles:
+ try:
+ searchinstance.execute(mysearch)
+ except re.error as comment:
+ print("\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment ))
+ sys.exit(1)
+ searchinstance.output()
+
+def action_sync(emerge_config, trees=DeprecationWarning,
+ mtimedb=DeprecationWarning, opts=DeprecationWarning,
+ action=DeprecationWarning):
+
+ if not isinstance(emerge_config, _emerge_config):
+ warnings.warn("_emerge.actions.action_sync() now expects "
+ "an _emerge_config instance as the first parameter",
+ DeprecationWarning, stacklevel=2)
+ emerge_config = load_emerge_config(
+ action=action, args=[], trees=trees, opts=opts)
+
+ xterm_titles = "notitles" not in \
+ emerge_config.target_config.settings.features
+ emergelog(xterm_titles, " === sync")
+
+ selected_repos = []
+ unknown_repo_names = []
+ missing_sync_type = []
+ if emerge_config.args:
+ for repo_name in emerge_config.args:
+ try:
+ repo = emerge_config.target_config.settings.repositories[repo_name]
+ except KeyError:
+ unknown_repo_names.append(repo_name)
+ else:
+ selected_repos.append(repo)
+ if repo.sync_type is None:
+ missing_sync_type.append(repo)
+
+ if unknown_repo_names:
+ writemsg_level("!!! %s\n" % _("Unknown repo(s): %s") %
+ " ".join(unknown_repo_names),
+ level=logging.ERROR, noiselevel=-1)
+
+ if missing_sync_type:
+ writemsg_level("!!! %s\n" %
+ _("Missing sync-type for repo(s): %s") %
+ " ".join(repo.name for repo in missing_sync_type),
+ level=logging.ERROR, noiselevel=-1)
+
+ if unknown_repo_names or missing_sync_type:
+ return 1
+
+ else:
+ selected_repos.extend(emerge_config.target_config.settings.repositories)
+
+ for repo in selected_repos:
+ if repo.sync_type is not None:
+ returncode = _sync_repo(emerge_config, repo)
+ if returncode != os.EX_OK:
+ return returncode
+
+ # Reload the whole config from scratch.
+ portage._sync_mode = False
+ load_emerge_config(emerge_config=emerge_config)
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+
+ if emerge_config.opts.get('--package-moves') != 'n' and \
+ _global_updates(emerge_config.trees,
+ emerge_config.target_config.mtimedb["updates"],
+ quiet=("--quiet" in emerge_config.opts)):
+ emerge_config.target_config.mtimedb.commit()
+ # Reload the whole config from scratch.
+ load_emerge_config(emerge_config=emerge_config)
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+
+ mybestpv = emerge_config.target_config.trees['porttree'].dbapi.xmatch(
+ "bestmatch-visible", portage.const.PORTAGE_PACKAGE_ATOM)
+ mypvs = portage.best(
+ emerge_config.target_config.trees['vartree'].dbapi.match(
+ portage.const.PORTAGE_PACKAGE_ATOM))
+
+ chk_updated_cfg_files(emerge_config.target_config.root,
+ portage.util.shlex_split(
+ emerge_config.target_config.settings.get("CONFIG_PROTECT", "")))
+
+ if mybestpv != mypvs and "--quiet" not in emerge_config.opts:
+ print()
+ print(warn(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
+ print(warn(" * ")+"that you update portage now, before any other packages are updated.")
+ print()
+ print(warn(" * ")+"To update portage, run 'emerge --oneshot portage' now.")
+ print()
+
+ display_news_notification(emerge_config.target_config, emerge_config.opts)
+ return os.EX_OK
+
+def _sync_repo(emerge_config, repo):
+ settings, trees, mtimedb = emerge_config
+ myopts = emerge_config.opts
+ enter_invalid = '--ask-enter-invalid' in myopts
+ xterm_titles = "notitles" not in settings.features
+ msg = ">>> Synchronization of repository '%s' located in '%s'..." % (repo.name, repo.location)
+ emergelog(xterm_titles, msg)
+ writemsg_level(msg + "\n")
+ out = portage.output.EOutput()
+ try:
+ st = os.stat(repo.location)
+ except OSError:
+ st = None
+ if st is None:
+ print(">>> '%s' not found, creating it." % repo.location)
+ portage.util.ensure_dirs(repo.location, mode=0o755)
+ st = os.stat(repo.location)
+
+ usersync_uid = None
+ spawn_kwargs = {}
+ spawn_kwargs["env"] = settings.environ()
+ if 'usersync' in settings.features and \
+ portage.data.secpass >= 2 and \
+ (st.st_uid != os.getuid() and st.st_mode & 0o700 or \
+ st.st_gid != os.getgid() and st.st_mode & 0o070):
+ try:
+ homedir = pwd.getpwuid(st.st_uid).pw_dir
+ except KeyError:
+ pass
+ else:
+ # Drop privileges when syncing, in order to match
+ # existing uid/gid settings.
+ usersync_uid = st.st_uid
+ spawn_kwargs["uid"] = st.st_uid
+ spawn_kwargs["gid"] = st.st_gid
+ spawn_kwargs["groups"] = [st.st_gid]
+ spawn_kwargs["env"]["HOME"] = homedir
+ umask = 0o002
+ if not st.st_mode & 0o020:
+ umask = umask | 0o020
+ spawn_kwargs["umask"] = umask
+
+ if usersync_uid is not None:
+ # PORTAGE_TMPDIR is used below, so validate it and
+ # bail out if necessary.
+ rval = _check_temp_dir(settings)
+ if rval != os.EX_OK:
+ return rval
+
+ syncuri = repo.sync_uri
+
+ vcs_dirs = frozenset(VCS_DIRS)
+ vcs_dirs = vcs_dirs.intersection(os.listdir(repo.location))
+
+ os.umask(0o022)
+ dosyncuri = syncuri
+ updatecache_flg = False
+ if repo.sync_type == "git":
+ # Update existing git repository, and ignore the syncuri. We are
+ # going to trust the user and assume that the user is in the branch
+ # that he/she wants updated. We'll let the user manage branches with
+ # git directly.
+ if portage.process.find_binary("git") is None:
+ msg = ["Command not found: git",
+ "Type \"emerge %s\" to enable git support." % portage.const.GIT_PACKAGE_ATOM]
+ for l in msg:
+ writemsg_level("!!! %s\n" % l,
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ msg = ">>> Starting git pull in %s..." % repo.location
+ emergelog(xterm_titles, msg )
+ writemsg_level(msg + "\n")
+ exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
+ (portage._shell_quote(repo.location),),
+ **portage._native_kwargs(spawn_kwargs))
+ if exitcode != os.EX_OK:
+ msg = "!!! git pull error in %s." % repo.location
+ emergelog(xterm_titles, msg)
+ writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
+ return exitcode
+ msg = ">>> Git pull in %s successful" % repo.location
+ emergelog(xterm_titles, msg)
+ writemsg_level(msg + "\n")
+ elif repo.sync_type == "rsync":
+ for vcs_dir in vcs_dirs:
+ writemsg_level(("!!! %s appears to be under revision " + \
+ "control (contains %s).\n!!! Aborting rsync sync.\n") % \
+ (repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
+ return 1
+ rsync_binary = portage.process.find_binary("rsync")
+ if rsync_binary is None:
+ print("!!! /usr/bin/rsync does not exist, so rsync support is disabled.")
+ print("!!! Type \"emerge %s\" to enable rsync support." % portage.const.RSYNC_PACKAGE_ATOM)
+ return os.EX_UNAVAILABLE
+ mytimeout=180
+
+ rsync_opts = []
+ if settings["PORTAGE_RSYNC_OPTS"] == "":
+ portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
+ rsync_opts.extend([
+ "--recursive", # Recurse directories
+ "--links", # Consider symlinks
+ "--safe-links", # Ignore links outside of tree
+ "--perms", # Preserve permissions
+ "--times", # Preserive mod times
+ "--omit-dir-times",
+ "--compress", # Compress the data transmitted
+ "--force", # Force deletion on non-empty dirs
+ "--whole-file", # Don't do block transfers, only entire files
+ "--delete", # Delete files that aren't in the master tree
+ "--stats", # Show final statistics about what was transfered
+ "--human-readable",
+ "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
+ "--exclude=/distfiles", # Exclude distfiles from consideration
+ "--exclude=/local", # Exclude local from consideration
+ "--exclude=/packages", # Exclude packages from consideration
+ ])
+
+ else:
+ # The below validation is not needed when using the above hardcoded
+ # defaults.
+
+ portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
+ rsync_opts.extend(portage.util.shlex_split(
+ settings.get("PORTAGE_RSYNC_OPTS", "")))
+ for opt in ("--recursive", "--times"):
+ if opt not in rsync_opts:
+ portage.writemsg(yellow("WARNING:") + " adding required option " + \
+ "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
+ rsync_opts.append(opt)
+
+ for exclude in ("distfiles", "local", "packages"):
+ opt = "--exclude=/%s" % exclude
+ if opt not in rsync_opts:
+ portage.writemsg(yellow("WARNING:") + \
+ " adding required option %s not included in " % opt + \
+ "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
+ rsync_opts.append(opt)
+
+ if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
+ def rsync_opt_startswith(opt_prefix):
+ for x in rsync_opts:
+ if x.startswith(opt_prefix):
+ return True
+ return False
+
+ if not rsync_opt_startswith("--timeout="):
+ rsync_opts.append("--timeout=%d" % mytimeout)
+
+ for opt in ("--compress", "--whole-file"):
+ if opt not in rsync_opts:
+ portage.writemsg(yellow("WARNING:") + " adding required option " + \
+ "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
+ rsync_opts.append(opt)
+
+ if "--quiet" in myopts:
+ rsync_opts.append("--quiet") # Shut up a lot
+ else:
+ rsync_opts.append("--verbose") # Print filelist
+
+ if "--verbose" in myopts:
+ rsync_opts.append("--progress") # Progress meter for each file
+
+ if "--debug" in myopts:
+ rsync_opts.append("--checksum") # Force checksum on all files
+
+ # Real local timestamp file.
+ servertimestampfile = os.path.join(
+ repo.location, "metadata", "timestamp.chk")
+
+ content = portage.util.grabfile(servertimestampfile)
+ mytimestamp = 0
+ if content:
+ try:
+ mytimestamp = time.mktime(time.strptime(content[0],
+ TIMESTAMP_FORMAT))
+ except (OverflowError, ValueError):
+ pass
+ del content
+
+ try:
+ rsync_initial_timeout = \
+ int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
+ except ValueError:
+ rsync_initial_timeout = 15
+
+ try:
+ maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
+ except SystemExit as e:
+ raise # Needed else can't exit
+ except:
+ maxretries = -1 #default number of retries
+
+ retries=0
+ try:
+ proto, user_name, hostname, port = re.split(
+ r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
+ syncuri, maxsplit=4)[1:5]
+ except ValueError:
+ writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
+ noiselevel=-1, level=logging.ERROR)
+ return 1
+
+ ssh_opts = settings.get("PORTAGE_SSH_OPTS")
+
+ if port is None:
+ port=""
+ if user_name is None:
+ user_name=""
+ if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
+ getaddrinfo_host = hostname
+ else:
+ # getaddrinfo needs the brackets stripped
+ getaddrinfo_host = hostname[1:-1]
+ updatecache_flg=True
+ all_rsync_opts = set(rsync_opts)
+ extra_rsync_opts = portage.util.shlex_split(
+ settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
+ all_rsync_opts.update(extra_rsync_opts)
+
+ family = socket.AF_UNSPEC
+ if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
+ family = socket.AF_INET
+ elif socket.has_ipv6 and \
+ ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
+ family = socket.AF_INET6
+
+ addrinfos = None
+ uris = []
+
+ try:
+ addrinfos = getaddrinfo_validate(
+ socket.getaddrinfo(getaddrinfo_host, None,
+ family, socket.SOCK_STREAM))
+ except socket.error as e:
+ writemsg_level(
+ "!!! getaddrinfo failed for '%s': %s\n" % (hostname,
+ _unicode_decode(e.strerror, encoding=_encodings['stdio'])),
+ noiselevel=-1, level=logging.ERROR)
+
+ if addrinfos:
+
+ AF_INET = socket.AF_INET
+ AF_INET6 = None
+ if socket.has_ipv6:
+ AF_INET6 = socket.AF_INET6
+
+ ips_v4 = []
+ ips_v6 = []
+
+ for addrinfo in addrinfos:
+ if addrinfo[0] == AF_INET:
+ ips_v4.append("%s" % addrinfo[4][0])
+ elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
+ # IPv6 addresses need to be enclosed in square brackets
+ ips_v6.append("[%s]" % addrinfo[4][0])
+
+ random.shuffle(ips_v4)
+ random.shuffle(ips_v6)
+
+ # Give priority to the address family that
+ # getaddrinfo() returned first.
+ if AF_INET6 is not None and addrinfos and \
+ addrinfos[0][0] == AF_INET6:
+ ips = ips_v6 + ips_v4
+ else:
+ ips = ips_v4 + ips_v6
+
+ for ip in ips:
+ uris.append(syncuri.replace(
+ "//" + user_name + hostname + port + "/",
+ "//" + user_name + ip + port + "/", 1))
+
+ if not uris:
+ # With some configurations we need to use the plain hostname
+ # rather than try to resolve the ip addresses (bug #340817).
+ uris.append(syncuri)
+
+ # reverse, for use with pop()
+ uris.reverse()
+
+ effective_maxretries = maxretries
+ if effective_maxretries < 0:
+ effective_maxretries = len(uris) - 1
+
+ SERVER_OUT_OF_DATE = -1
+ EXCEEDED_MAX_RETRIES = -2
+ while (1):
+ if uris:
+ dosyncuri = uris.pop()
+ else:
+ writemsg("!!! Exhausted addresses for %s\n" % \
+ hostname, noiselevel=-1)
+ return 1
+
+ if (retries==0):
+ if "--ask" in myopts:
+ uq = UserQuery(myopts)
+ if uq.query("Do you want to sync your Portage tree " + \
+ "with the mirror at\n" + blue(dosyncuri) + bold("?"),
+ enter_invalid) == "No":
+ print()
+ print("Quitting.")
+ print()
+ sys.exit(128 + signal.SIGINT)
+ emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
+ if "--quiet" not in myopts:
+ print(">>> Starting rsync with "+dosyncuri+"...")
+ else:
+ emergelog(xterm_titles,
+ ">>> Starting retry %d of %d with %s" % \
+ (retries, effective_maxretries, dosyncuri))
+ writemsg_stdout(
+ "\n\n>>> Starting retry %d of %d with %s\n" % \
+ (retries, effective_maxretries, dosyncuri), noiselevel=-1)
+
+ if dosyncuri.startswith('ssh://'):
+ dosyncuri = dosyncuri[6:].replace('/', ':/', 1)
+
+ if mytimestamp != 0 and "--quiet" not in myopts:
+ print(">>> Checking server timestamp ...")
+
+ rsynccommand = [rsync_binary] + rsync_opts + extra_rsync_opts
+
+ if proto == 'ssh' and ssh_opts:
+ rsynccommand.append("--rsh=ssh " + ssh_opts)
+
+ if "--debug" in myopts:
+ print(rsynccommand)
+
+ exitcode = os.EX_OK
+ servertimestamp = 0
+ # Even if there's no timestamp available locally, fetch the
+ # timestamp anyway as an initial probe to verify that the server is
+ # responsive. This protects us from hanging indefinitely on a
+ # connection attempt to an unresponsive server which rsync's
+ # --timeout option does not prevent.
+ if True:
+ # Temporary file for remote server timestamp comparison.
+ # NOTE: If FEATURES=usersync is enabled then the tempfile
+ # needs to be in a directory that's readable by the usersync
+ # user. We assume that PORTAGE_TMPDIR will satisfy this
+ # requirement, since that's not necessarily true for the
+ # default directory used by the tempfile module.
+ if usersync_uid is not None:
+ tmpdir = settings['PORTAGE_TMPDIR']
+ else:
+ # use default dir from tempfile module
+ tmpdir = None
+ fd, tmpservertimestampfile = \
+ tempfile.mkstemp(dir=tmpdir)
+ os.close(fd)
+ if usersync_uid is not None:
+ portage.util.apply_permissions(tmpservertimestampfile,
+ uid=usersync_uid)
+ mycommand = rsynccommand[:]
+ mycommand.append(dosyncuri.rstrip("/") + \
+ "/metadata/timestamp.chk")
+ mycommand.append(tmpservertimestampfile)
+ content = None
+ mypids = []
+ try:
+ # Timeout here in case the server is unresponsive. The
+ # --timeout rsync option doesn't apply to the initial
+ # connection attempt.
+ try:
+ if rsync_initial_timeout:
+ portage.exception.AlarmSignal.register(
+ rsync_initial_timeout)
+
+ mypids.extend(portage.process.spawn(
+ mycommand, returnpid=True,
+ **portage._native_kwargs(spawn_kwargs)))
+ exitcode = os.waitpid(mypids[0], 0)[1]
+ if usersync_uid is not None:
+ portage.util.apply_permissions(tmpservertimestampfile,
+ uid=os.getuid())
+ content = portage.grabfile(tmpservertimestampfile)
+ finally:
+ if rsync_initial_timeout:
+ portage.exception.AlarmSignal.unregister()
+ try:
+ os.unlink(tmpservertimestampfile)
+ except OSError:
+ pass
+ except portage.exception.AlarmSignal:
+ # timed out
+ print('timed out')
+ # With waitpid and WNOHANG, only check the
+ # first element of the tuple since the second
+ # element may vary (bug #337465).
+ if mypids and os.waitpid(mypids[0], os.WNOHANG)[0] == 0:
+ os.kill(mypids[0], signal.SIGTERM)
+ os.waitpid(mypids[0], 0)
+ # This is the same code rsync uses for timeout.
+ exitcode = 30
+ else:
+ if exitcode != os.EX_OK:
+ if exitcode & 0xff:
+ exitcode = (exitcode & 0xff) << 8
+ else:
+ exitcode = exitcode >> 8
+
+ if content:
+ try:
+ servertimestamp = time.mktime(time.strptime(
+ content[0], TIMESTAMP_FORMAT))
+ except (OverflowError, ValueError):
+ pass
+ del mycommand, mypids, content
+ if exitcode == os.EX_OK:
+ if (servertimestamp != 0) and (servertimestamp == mytimestamp):
+ emergelog(xterm_titles,
+ ">>> Cancelling sync -- Already current.")
+ print()
+ print(">>>")
+ print(">>> Timestamps on the server and in the local repository are the same.")
+ print(">>> Cancelling all further sync action. You are already up to date.")
+ print(">>>")
+ print(">>> In order to force sync, remove '%s'." % servertimestampfile)
+ print(">>>")
+ print()
+ return os.EX_OK
+ elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
+ emergelog(xterm_titles,
+ ">>> Server out of date: %s" % dosyncuri)
+ print()
+ print(">>>")
+ print(">>> SERVER OUT OF DATE: %s" % dosyncuri)
+ print(">>>")
+ print(">>> In order to force sync, remove '%s'." % servertimestampfile)
+ print(">>>")
+ print()
+ exitcode = SERVER_OUT_OF_DATE
+ elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
+ # actual sync
+ mycommand = rsynccommand + [dosyncuri+"/", repo.location]
+ exitcode = None
+ try:
+ exitcode = portage.process.spawn(mycommand,
+ **portage._native_kwargs(spawn_kwargs))
+ finally:
+ if exitcode is None:
+ # interrupted
+ exitcode = 128 + signal.SIGINT
+
+ # 0 Success
+ # 1 Syntax or usage error
+ # 2 Protocol incompatibility
+ # 5 Error starting client-server protocol
+ # 35 Timeout waiting for daemon connection
+ if exitcode not in (0, 1, 2, 5, 35):
+ # If the exit code is not among those listed above,
+ # then we may have a partial/inconsistent sync
+ # state, so our previously read timestamp as well
+ # as the corresponding file can no longer be
+ # trusted.
+ mytimestamp = 0
+ try:
+ os.unlink(servertimestampfile)
+ except OSError:
+ pass
+
+ if exitcode in [0,1,3,4,11,14,20,21]:
+ break
+ elif exitcode in [1,3,4,11,14,20,21]:
+ break
+ else:
+ # Code 2 indicates protocol incompatibility, which is expected
+ # for servers with protocol < 29 that don't support
+ # --prune-empty-directories. Retry for a server that supports
+ # at least rsync protocol version 29 (>=rsync-2.6.4).
+ pass
+
+ retries=retries+1
+
+ if maxretries < 0 or retries <= maxretries:
+ print(">>> Retrying...")
+ else:
+ # over retries
+ # exit loop
+ updatecache_flg=False
+ exitcode = EXCEEDED_MAX_RETRIES
+ break
+
+ if (exitcode==0):
+ emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
+ elif exitcode == SERVER_OUT_OF_DATE:
+ return 1
+ elif exitcode == EXCEEDED_MAX_RETRIES:
+ sys.stderr.write(
+ ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
+ return 1
+ elif (exitcode>0):
+ msg = []
+ if exitcode==1:
+ msg.append("Rsync has reported that there is a syntax error. Please ensure")
+ msg.append("that sync-uri attribute for repository '%s' is proper." % repo.name)
+ msg.append("sync-uri: '%s'" % repo.sync_uri)
+ elif exitcode==11:
+ msg.append("Rsync has reported that there is a File IO error. Normally")
+ msg.append("this means your disk is full, but can be caused by corruption")
+ msg.append("on the filesystem that contains repository '%s'. Please investigate" % repo.name)
+ msg.append("and try again after the problem has been fixed.")
+ msg.append("Location of repository: '%s'" % repo.location)
+ elif exitcode==20:
+ msg.append("Rsync was killed before it finished.")
+ else:
+ msg.append("Rsync has not successfully finished. It is recommended that you keep")
+ msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
+ msg.append("to use rsync due to firewall or other restrictions. This should be a")
+ msg.append("temporary problem unless complications exist with your network")
+ msg.append("(and possibly your system's filesystem) configuration.")
+ for line in msg:
+ out.eerror(line)
+ return exitcode
+ elif repo.sync_type == "cvs":
+ if not os.path.exists(EPREFIX + "/usr/bin/cvs"):
+ print("!!! %s/usr/bin/cvs does not exist, so CVS support is disabled." % (EPREFIX))
+ print("!!! Type \"emerge %s\" to enable CVS support." % portage.const.CVS_PACKAGE_ATOM)
+ return os.EX_UNAVAILABLE
+ cvs_root = syncuri
+ if cvs_root.startswith("cvs://"):
+ cvs_root = cvs_root[6:]
+ if not os.path.exists(os.path.join(repo.location, "CVS")):
+ #initial checkout
+ print(">>> Starting initial cvs checkout with "+syncuri+"...")
+ try:
+ os.rmdir(repo.location)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ sys.stderr.write(
+ "!!! existing '%s' directory; exiting.\n" % repo.location)
+ return 1
+ del e
+ if portage.process.spawn_bash(
+ "cd %s; exec cvs -z0 -d %s co -P -d %s %s" %
+ (portage._shell_quote(os.path.dirname(repo.location)), portage._shell_quote(cvs_root),
+ portage._shell_quote(os.path.basename(repo.location)), portage._shell_quote(repo.sync_cvs_repo)),
+ **portage._native_kwargs(spawn_kwargs)) != os.EX_OK:
+ print("!!! cvs checkout error; exiting.")
+ return 1
+ else:
+ #cvs update
+ print(">>> Starting cvs update with "+syncuri+"...")
+ retval = portage.process.spawn_bash(
+ "cd %s; exec cvs -z0 -q update -dP" % \
+ (portage._shell_quote(repo.location),),
+ **portage._native_kwargs(spawn_kwargs))
+ if retval != os.EX_OK:
+ writemsg_level("!!! cvs update error; exiting.\n",
+ noiselevel=-1, level=logging.ERROR)
+ return retval
+ dosyncuri = syncuri
+
+ # Reload the whole config from scratch.
+ settings, trees, mtimedb = load_emerge_config(emerge_config=emerge_config)
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+ portdb = trees[settings['EROOT']]['porttree'].dbapi
+
+ if repo.sync_type == "git":
+ # NOTE: Do this after reloading the config, in case
+ # it did not exist prior to sync, so that the config
+ # and portdb properly account for its existence.
+ exitcode = git_sync_timestamps(portdb, repo.location)
+ if exitcode == os.EX_OK:
+ updatecache_flg = True
+
+ if updatecache_flg and "metadata-transfer" not in settings.features:
+ updatecache_flg = False
+
+ if updatecache_flg and \
+ os.path.exists(os.path.join(repo.location, 'metadata', 'cache')):
+
+ # Only update cache for repo.location since that's
+ # the only one that's been synced here.
+ action_metadata(settings, portdb, myopts, porttrees=[repo.location])
+
+ postsync = os.path.join(settings["PORTAGE_CONFIGROOT"], portage.USER_CONFIG_PATH, "bin", "post_sync")
+ if os.access(postsync, os.X_OK):
+ retval = portage.process.spawn([postsync, dosyncuri], env=settings.environ())
+ if retval != os.EX_OK:
+ writemsg_level(" %s spawn failed of %s\n" % (bad("*"), postsync,),
+ level=logging.ERROR, noiselevel=-1)
+
+ return os.EX_OK
+
+def action_uninstall(settings, trees, ldpath_mtimes,
+ opts, action, files, spinner):
+ # For backward compat, some actions do not require leading '='.
+ ignore_missing_eq = action in ('clean', 'unmerge')
+ root = settings['ROOT']
+ eroot = settings['EROOT']
+ vardb = trees[settings['EROOT']]['vartree'].dbapi
+ valid_atoms = []
+ lookup_owners = []
+
+ # Ensure atoms are valid before calling unmerge().
+ # For backward compat, leading '=' is not required.
+ for x in files:
+ if is_valid_package_atom(x, allow_repo=True) or \
+ (ignore_missing_eq and is_valid_package_atom('=' + x)):
+
+ try:
+ atom = dep_expand(x, mydb=vardb, settings=settings)
+ except portage.exception.AmbiguousPackageName as e:
+ msg = "The short ebuild name \"" + x + \
+ "\" is ambiguous. Please specify " + \
+ "one of the following " + \
+ "fully-qualified ebuild names instead:"
+ for line in textwrap.wrap(msg, 70):
+ writemsg_level("!!! %s\n" % (line,),
+ level=logging.ERROR, noiselevel=-1)
+ for i in e.args[0]:
+ writemsg_level(" %s\n" % colorize("INFORM", i),
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
+ return 1
+ else:
+ if atom.use and atom.use.conditional:
+ writemsg_level(
+ ("\n\n!!! '%s' contains a conditional " + \
+ "which is not allowed.\n") % (x,),
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level(
+ "!!! Please check ebuild(5) for full details.\n",
+ level=logging.ERROR)
+ return 1
+ valid_atoms.append(atom)
+
+ elif x.startswith(os.sep):
+ if not x.startswith(eroot):
+ writemsg_level(("!!! '%s' does not start with" + \
+ " $EROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
+ return 1
+ # Queue these up since it's most efficient to handle
+ # multiple files in a single iter_owners() call.
+ lookup_owners.append(x)
+
+ elif x.startswith(SETPREFIX) and action == "deselect":
+ valid_atoms.append(x)
+
+ elif "*" in x:
+ try:
+ ext_atom = Atom(x, allow_repo=True, allow_wildcard=True)
+ except InvalidAtom:
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ for cpv in vardb.cpv_all():
+ if portage.match_from_list(ext_atom, [cpv]):
+ require_metadata = False
+ atom = portage.cpv_getkey(cpv)
+ if ext_atom.operator == '=*':
+ atom = "=" + atom + "-" + \
+ portage.versions.cpv_getversion(cpv)
+ if ext_atom.slot:
+ atom += ":" + ext_atom.slot
+ require_metadata = True
+ if ext_atom.repo:
+ atom += "::" + ext_atom.repo
+ require_metadata = True
+
+ atom = Atom(atom, allow_repo=True)
+ if require_metadata:
+ try:
+ cpv = vardb._pkg_str(cpv, ext_atom.repo)
+ except (KeyError, InvalidData):
+ continue
+ if not portage.match_from_list(atom, [cpv]):
+ continue
+
+ valid_atoms.append(atom)
+
+ else:
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if lookup_owners:
+ relative_paths = []
+ search_for_multiple = False
+ if len(lookup_owners) > 1:
+ search_for_multiple = True
+
+ for x in lookup_owners:
+ if not search_for_multiple and os.path.isdir(x):
+ search_for_multiple = True
+ relative_paths.append(x[len(root)-1:])
+
+ owners = set()
+ for pkg, relative_path in \
+ vardb._owners.iter_owners(relative_paths):
+ owners.add(pkg.mycpv)
+ if not search_for_multiple:
+ break
+
+ if owners:
+ for cpv in owners:
+ pkg = vardb._pkg_str(cpv, None)
+ atom = '%s:%s' % (pkg.cp, pkg.slot)
+ valid_atoms.append(portage.dep.Atom(atom))
+ else:
+ writemsg_level(("!!! '%s' is not claimed " + \
+ "by any package.\n") % lookup_owners[0],
+ level=logging.WARNING, noiselevel=-1)
+
+ if files and not valid_atoms:
+ return 1
+
+ if action == 'unmerge' and \
+ '--quiet' not in opts and \
+ '--quiet-unmerge-warn' not in opts:
+ msg = "This action can remove important packages! " + \
+ "In order to be safer, use " + \
+ "`emerge -pv --depclean <atom>` to check for " + \
+ "reverse dependencies before removing packages."
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 72):
+ out.ewarn(line)
+
+ if action == 'deselect':
+ return action_deselect(settings, trees, opts, valid_atoms)
+
+ # Use the same logic as the Scheduler class to trigger redirection
+ # of ebuild pkg_prerm/postrm phase output to logs as appropriate
+ # for options such as --jobs, --quiet and --quiet-build.
+ max_jobs = opts.get("--jobs", 1)
+ background = (max_jobs is True or max_jobs > 1 or
+ "--quiet" in opts or opts.get("--quiet-build") == "y")
+ sched_iface = SchedulerInterface(global_event_loop(),
+ is_background=lambda: background)
+
+ if background:
+ settings.unlock()
+ settings["PORTAGE_BACKGROUND"] = "1"
+ settings.backup_changes("PORTAGE_BACKGROUND")
+ settings.lock()
+
+ if action in ('clean', 'unmerge') or \
+ (action == 'prune' and "--nodeps" in opts):
+ # When given a list of atoms, unmerge them in the order given.
+ ordered = action == 'unmerge'
+ rval = unmerge(trees[settings['EROOT']]['root_config'], opts, action,
+ valid_atoms, ldpath_mtimes, ordered=ordered,
+ scheduler=sched_iface)
+ else:
+ rval = action_depclean(settings, trees, ldpath_mtimes,
+ opts, action, valid_atoms, spinner,
+ scheduler=sched_iface)
+
+ return rval
+
+def adjust_configs(myopts, trees):
+ for myroot in trees:
+ mysettings = trees[myroot]["vartree"].settings
+ mysettings.unlock()
+ adjust_config(myopts, mysettings)
+ mysettings.lock()
+
+def adjust_config(myopts, settings):
+ """Make emerge specific adjustments to the config."""
+
+ # Kill noauto as it will break merges otherwise.
+ if "noauto" in settings.features:
+ settings.features.remove('noauto')
+
+ fail_clean = myopts.get('--fail-clean')
+ if fail_clean is not None:
+ if fail_clean is True and \
+ 'fail-clean' not in settings.features:
+ settings.features.add('fail-clean')
+ elif fail_clean == 'n' and \
+ 'fail-clean' in settings.features:
+ settings.features.remove('fail-clean')
+
+ CLEAN_DELAY = 5
+ try:
+ CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
+ except ValueError as e:
+ portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
+ settings["CLEAN_DELAY"], noiselevel=-1)
+ settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
+ settings.backup_changes("CLEAN_DELAY")
+
+ EMERGE_WARNING_DELAY = 10
+ try:
+ EMERGE_WARNING_DELAY = int(settings.get(
+ "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
+ except ValueError as e:
+ portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
+ settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
+ settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
+ settings.backup_changes("EMERGE_WARNING_DELAY")
+
+ buildpkg = myopts.get("--buildpkg")
+ if buildpkg is True:
+ settings.features.add("buildpkg")
+ elif buildpkg == 'n':
+ settings.features.discard("buildpkg")
+
+ if "--quiet" in myopts:
+ settings["PORTAGE_QUIET"]="1"
+ settings.backup_changes("PORTAGE_QUIET")
+
+ if "--verbose" in myopts:
+ settings["PORTAGE_VERBOSE"] = "1"
+ settings.backup_changes("PORTAGE_VERBOSE")
+
+ # Set so that configs will be merged regardless of remembered status
+ if ("--noconfmem" in myopts):
+ settings["NOCONFMEM"]="1"
+ settings.backup_changes("NOCONFMEM")
+
+ # Set various debug markers... They should be merged somehow.
+ PORTAGE_DEBUG = 0
+ try:
+ PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
+ if PORTAGE_DEBUG not in (0, 1):
+ portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
+ PORTAGE_DEBUG, noiselevel=-1)
+ portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
+ noiselevel=-1)
+ PORTAGE_DEBUG = 0
+ except ValueError as e:
+ portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
+ settings["PORTAGE_DEBUG"], noiselevel=-1)
+ del e
+ if "--debug" in myopts:
+ PORTAGE_DEBUG = 1
+ settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
+ settings.backup_changes("PORTAGE_DEBUG")
+
+ if settings.get("NOCOLOR") not in ("yes","true"):
+ portage.output.havecolor = 1
+
+ # The explicit --color < y | n > option overrides the NOCOLOR environment
+ # variable and stdout auto-detection.
+ if "--color" in myopts:
+ if "y" == myopts["--color"]:
+ portage.output.havecolor = 1
+ settings["NOCOLOR"] = "false"
+ else:
+ portage.output.havecolor = 0
+ settings["NOCOLOR"] = "true"
+ settings.backup_changes("NOCOLOR")
+ elif settings.get('TERM') == 'dumb' or \
+ not sys.stdout.isatty():
+ portage.output.havecolor = 0
+ settings["NOCOLOR"] = "true"
+ settings.backup_changes("NOCOLOR")
+
+ if "--pkg-format" in myopts:
+ settings["PORTAGE_BINPKG_FORMAT"] = myopts["--pkg-format"]
+ settings.backup_changes("PORTAGE_BINPKG_FORMAT")
+
+def display_missing_pkg_set(root_config, set_name):
+
+ msg = []
+ msg.append(("emerge: There are no sets to satisfy '%s'. " + \
+ "The following sets exist:") % \
+ colorize("INFORM", set_name))
+ msg.append("")
+
+ for s in sorted(root_config.sets):
+ msg.append(" %s" % s)
+ msg.append("")
+
+ writemsg_level("".join("%s\n" % l for l in msg),
+ level=logging.ERROR, noiselevel=-1)
+
+def relative_profile_path(portdir, abs_profile):
+ realpath = os.path.realpath(abs_profile)
+ basepath = os.path.realpath(os.path.join(portdir, "profiles"))
+ if realpath.startswith(basepath):
+ profilever = realpath[1 + len(basepath):]
+ else:
+ profilever = None
+ return profilever
+
+def getportageversion(portdir, _unused, profile, chost, vardb):
+ pythonver = 'python %d.%d.%d-%s-%d' % sys.version_info[:]
+ profilever = None
+ repositories = vardb.settings.repositories
+ if profile:
+ profilever = relative_profile_path(portdir, profile)
+ if profilever is None:
+ try:
+ for parent in portage.grabfile(
+ os.path.join(profile, 'parent')):
+ profilever = relative_profile_path(portdir,
+ os.path.join(profile, parent))
+ if profilever is not None:
+ break
+ colon = parent.find(":")
+ if colon != -1:
+ p_repo_name = parent[:colon]
+ try:
+ p_repo_loc = \
+ repositories.get_location_for_name(p_repo_name)
+ except KeyError:
+ pass
+ else:
+ profilever = relative_profile_path(p_repo_loc,
+ os.path.join(p_repo_loc, 'profiles',
+ parent[colon+1:]))
+ if profilever is not None:
+ break
+ except portage.exception.PortageException:
+ pass
+
+ if profilever is None:
+ try:
+ profilever = "!" + os.readlink(profile)
+ except (OSError):
+ pass
+
+ if profilever is None:
+ profilever = "unavailable"
+
+ libcver = []
+ libclist = set()
+ for atom in expand_new_virt(vardb, portage.const.LIBC_PACKAGE_ATOM):
+ if not atom.blocker:
+ libclist.update(vardb.match(atom))
+ if libclist:
+ for cpv in sorted(libclist):
+ libc_split = portage.catpkgsplit(cpv)[1:]
+ if libc_split[-1] == "r0":
+ libc_split = libc_split[:-1]
+ libcver.append("-".join(libc_split))
+ else:
+ libcver = ["unavailable"]
+
+ gccver = getgccversion(chost)
+ unameout=platform.release()+" "+platform.machine()
+
+ return "Portage %s (%s, %s, %s, %s, %s)" % \
+ (portage.VERSION, pythonver, profilever, gccver, ",".join(libcver), unameout)
+
+def git_sync_timestamps(portdb, portdir):
+ """
+ Since git doesn't preserve timestamps, synchronize timestamps between
+ entries and ebuilds/eclasses. Assume the cache has the correct timestamp
+ for a given file as long as the file in the working tree is not modified
+ (relative to HEAD).
+ """
+
+ cache_db = portdb._pregen_auxdb.get(portdir)
+
+ try:
+ if cache_db is None:
+ # portdbapi does not populate _pregen_auxdb
+ # when FEATURES=metadata-transfer is enabled
+ cache_db = portdb._create_pregen_cache(portdir)
+ except CacheError as e:
+ writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if cache_db is None:
+ return os.EX_OK
+
+ if cache_db.validation_chf != 'mtime':
+ # newer formats like md5-dict do not require mtime sync
+ return os.EX_OK
+
+ writemsg_level(">>> Synchronizing timestamps...\n")
+
+ ec_dir = os.path.join(portdir, "eclass")
+ try:
+ ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
+ if f.endswith(".eclass"))
+ except OSError as e:
+ writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ args = [portage.const.BASH_BINARY, "-c",
+ "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
+ portage._shell_quote(portdir)]
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+ modified_files = set(_unicode_decode(l).rstrip("\n") for l in proc.stdout)
+ rval = proc.wait()
+ proc.stdout.close()
+ if rval != os.EX_OK:
+ return rval
+
+ modified_eclasses = set(ec for ec in ec_names \
+ if os.path.join("eclass", ec + ".eclass") in modified_files)
+
+ updated_ec_mtimes = {}
+
+ for cpv in cache_db:
+ cpv_split = portage.catpkgsplit(cpv)
+ if cpv_split is None:
+ writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+
+ cat, pn, ver, rev = cpv_split
+ cat, pf = portage.catsplit(cpv)
+ relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
+ if relative_eb_path in modified_files:
+ continue
+
+ try:
+ cache_entry = cache_db[cpv]
+ eb_mtime = cache_entry.get("_mtime_")
+ ec_mtimes = cache_entry.get("_eclasses_")
+ except KeyError:
+ writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ except CacheError as e:
+ writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
+ (cpv, e), level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if eb_mtime is None:
+ writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+
+ try:
+ eb_mtime = long(eb_mtime)
+ except ValueError:
+ writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
+ (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if ec_mtimes is None:
+ writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if modified_eclasses.intersection(ec_mtimes):
+ continue
+
+ missing_eclasses = set(ec_mtimes).difference(ec_names)
+ if missing_eclasses:
+ writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
+ (cpv, sorted(missing_eclasses)), level=logging.ERROR,
+ noiselevel=-1)
+ continue
+
+ eb_path = os.path.join(portdir, relative_eb_path)
+ try:
+ current_eb_mtime = os.stat(eb_path)
+ except OSError:
+ writemsg_level("!!! Missing ebuild: %s\n" % \
+ (cpv,), level=logging.ERROR, noiselevel=-1)
+ continue
+
+ inconsistent = False
+ for ec, (ec_path, ec_mtime) in ec_mtimes.items():
+ updated_mtime = updated_ec_mtimes.get(ec)
+ if updated_mtime is not None and updated_mtime != ec_mtime:
+ writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
+ (cpv, ec), level=logging.ERROR, noiselevel=-1)
+ inconsistent = True
+ break
+
+ if inconsistent:
+ continue
+
+ if current_eb_mtime != eb_mtime:
+ os.utime(eb_path, (eb_mtime, eb_mtime))
+
+ for ec, (ec_path, ec_mtime) in ec_mtimes.items():
+ if ec in updated_ec_mtimes:
+ continue
+ ec_path = os.path.join(ec_dir, ec + ".eclass")
+ current_mtime = os.stat(ec_path)[stat.ST_MTIME]
+ if current_mtime != ec_mtime:
+ os.utime(ec_path, (ec_mtime, ec_mtime))
+ updated_ec_mtimes[ec] = ec_mtime
+
+ return os.EX_OK
+
+class _emerge_config(SlotObject):
+
+ __slots__ = ('action', 'args', 'opts',
+ 'running_config', 'target_config', 'trees')
+
+ # Support unpack as tuple, for load_emerge_config backward compatibility.
+ def __iter__(self):
+ yield self.target_config.settings
+ yield self.trees
+ yield self.target_config.mtimedb
+
+ def __getitem__(self, index):
+ return list(self)[index]
+
+ def __len__(self):
+ return 3
+
+def load_emerge_config(emerge_config=None, **kargs):
+
+ if emerge_config is None:
+ emerge_config = _emerge_config(**kargs)
+
+ kwargs = {}
+ for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT"),
+ ("eprefix", "EPREFIX")):
+ v = os.environ.get(envvar, None)
+ if v and v.strip():
+ kwargs[k] = v
+ emerge_config.trees = portage.create_trees(trees=emerge_config.trees,
+ **portage._native_kwargs(kwargs))
+
+ for root_trees in emerge_config.trees.values():
+ settings = root_trees["vartree"].settings
+ settings._init_dirs()
+ setconfig = load_default_config(settings, root_trees)
+ root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
+
+ target_eroot = emerge_config.trees._target_eroot
+ emerge_config.target_config = \
+ emerge_config.trees[target_eroot]['root_config']
+ emerge_config.target_config.mtimedb = portage.MtimeDB(
+ os.path.join(target_eroot, portage.CACHE_PATH, "mtimedb"))
+ emerge_config.running_config = emerge_config.trees[
+ emerge_config.trees._running_eroot]['root_config']
+ QueryCommand._db = emerge_config.trees
+
+ return emerge_config
+
+def getgccversion(chost):
+ """
+ rtype: C{str}
+ return: the current in-use gcc version
+ """
+
+ gcc_ver_command = ['gcc', '-dumpversion']
+ gcc_ver_prefix = 'gcc-'
+
+ gcc_not_found_error = red(
+ "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
+ "!!! to update the environment of this terminal and possibly\n" +
+ "!!! other terminals also.\n"
+ )
+
+ try:
+ proc = subprocess.Popen(["gcc-config", "-c"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ myoutput = None
+ mystatus = 1
+ else:
+ myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ mystatus = proc.wait()
+ if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
+ return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
+
+ try:
+ proc = subprocess.Popen(
+ [chost + "-" + gcc_ver_command[0]] + gcc_ver_command[1:],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ myoutput = None
+ mystatus = 1
+ else:
+ myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ mystatus = proc.wait()
+ if mystatus == os.EX_OK:
+ return gcc_ver_prefix + myoutput
+
+ try:
+ proc = subprocess.Popen(gcc_ver_command,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ myoutput = None
+ mystatus = 1
+ else:
+ myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ mystatus = proc.wait()
+ if mystatus == os.EX_OK:
+ return gcc_ver_prefix + myoutput
+
+ portage.writemsg(gcc_not_found_error, noiselevel=-1)
+ return "[unavailable]"
+
+# Warn about features that may confuse users and
+# lead them to report invalid bugs.
+_emerge_features_warn = frozenset(['keeptemp', 'keepwork'])
+
+def validate_ebuild_environment(trees):
+ features_warn = set()
+ for myroot in trees:
+ settings = trees[myroot]["vartree"].settings
+ settings.validate()
+ features_warn.update(
+ _emerge_features_warn.intersection(settings.features))
+
+ if features_warn:
+ msg = "WARNING: The FEATURES variable contains one " + \
+ "or more values that should be disabled under " + \
+ "normal circumstances: %s" % " ".join(features_warn)
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 65):
+ out.ewarn(line)
+
+def check_procfs():
+ procfs_path = '/proc'
+ if platform.system() not in ("Linux",) or \
+ os.path.ismount(procfs_path):
+ return os.EX_OK
+ msg = "It seems that %s is not mounted. You have been warned." % procfs_path
+ writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+def config_protect_check(trees):
+ for root, root_trees in trees.items():
+ settings = root_trees["root_config"].settings
+ if not settings.get("CONFIG_PROTECT"):
+ msg = "!!! CONFIG_PROTECT is empty"
+ if settings["ROOT"] != "/":
+ msg += " for '%s'" % root
+ msg += "\n"
+ writemsg_level(msg, level=logging.WARN, noiselevel=-1)
+
+def apply_priorities(settings):
+ ionice(settings)
+ nice(settings)
+
+def nice(settings):
+ try:
+ os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
+ except (OSError, ValueError) as e:
+ out = portage.output.EOutput()
+ out.eerror("Failed to change nice value to '%s'" % \
+ settings["PORTAGE_NICENESS"])
+ out.eerror("%s\n" % str(e))
+
+def ionice(settings):
+
+ ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
+ if ionice_cmd:
+ ionice_cmd = portage.util.shlex_split(ionice_cmd)
+ if not ionice_cmd:
+ return
+
+ variables = {"PID" : str(os.getpid())}
+ cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
+
+ try:
+ rval = portage.process.spawn(cmd, env=os.environ)
+ except portage.exception.CommandNotFound:
+ # The OS kernel probably doesn't support ionice,
+ # so return silently.
+ return
+
+ if rval != os.EX_OK:
+ out = portage.output.EOutput()
+ out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
+ out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
+
+def setconfig_fallback(root_config):
+ setconfig = root_config.setconfig
+ setconfig._create_default_config()
+ setconfig._parse(update=True)
+ root_config.sets = setconfig.getSets()
+
+def get_missing_sets(root_config):
+ # emerge requires existence of "world", "selected", and "system"
+ missing_sets = []
+
+ for s in ("selected", "system", "world",):
+ if s not in root_config.sets:
+ missing_sets.append(s)
+
+ return missing_sets
+
+def missing_sets_warning(root_config, missing_sets):
+ if len(missing_sets) > 2:
+ missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
+ missing_sets_str += ', and "%s"' % missing_sets[-1]
+ elif len(missing_sets) == 2:
+ missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
+ else:
+ missing_sets_str = '"%s"' % missing_sets[-1]
+ msg = ["emerge: incomplete set configuration, " + \
+ "missing set(s): %s" % missing_sets_str]
+ if root_config.sets:
+ msg.append(" sets defined: %s" % ", ".join(root_config.sets))
+ global_config_path = portage.const.GLOBAL_CONFIG_PATH
+ if portage.const.EPREFIX:
+ global_config_path = os.path.join(portage.const.EPREFIX,
+ portage.const.GLOBAL_CONFIG_PATH.lstrip(os.sep))
+ msg.append(" This usually means that '%s'" % \
+ (os.path.join(global_config_path, "sets/portage.conf"),))
+ msg.append(" is missing or corrupt.")
+ msg.append(" Falling back to default world and system set configuration!!!")
+ for line in msg:
+ writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
+
+def ensure_required_sets(trees):
+ warning_shown = False
+ for root_trees in trees.values():
+ missing_sets = get_missing_sets(root_trees["root_config"])
+ if missing_sets and not warning_shown:
+ warning_shown = True
+ missing_sets_warning(root_trees["root_config"], missing_sets)
+ if missing_sets:
+ setconfig_fallback(root_trees["root_config"])
+
+def expand_set_arguments(myfiles, myaction, root_config):
+ retval = os.EX_OK
+ setconfig = root_config.setconfig
+
+ sets = setconfig.getSets()
+
+ # In order to know exactly which atoms/sets should be added to the
+ # world file, the depgraph performs set expansion later. It will get
+ # confused about where the atoms came from if it's not allowed to
+ # expand them itself.
+ do_not_expand = myaction is None
+ newargs = []
+ for a in myfiles:
+ if a in ("system", "world"):
+ newargs.append(SETPREFIX+a)
+ else:
+ newargs.append(a)
+ myfiles = newargs
+ del newargs
+ newargs = []
+
+ # separators for set arguments
+ ARG_START = "{"
+ ARG_END = "}"
+
+ for i in range(0, len(myfiles)):
+ if myfiles[i].startswith(SETPREFIX):
+ start = 0
+ end = 0
+ x = myfiles[i][len(SETPREFIX):]
+ newset = ""
+ while x:
+ start = x.find(ARG_START)
+ end = x.find(ARG_END)
+ if start > 0 and start < end:
+ namepart = x[:start]
+ argpart = x[start+1:end]
+
+ # TODO: implement proper quoting
+ args = argpart.split(",")
+ options = {}
+ for a in args:
+ if "=" in a:
+ k, v = a.split("=", 1)
+ options[k] = v
+ else:
+ options[a] = "True"
+ setconfig.update(namepart, options)
+ newset += (x[:start-len(namepart)]+namepart)
+ x = x[end+len(ARG_END):]
+ else:
+ newset += x
+ x = ""
+ myfiles[i] = SETPREFIX+newset
+
+ sets = setconfig.getSets()
+
+ # display errors that occurred while loading the SetConfig instance
+ for e in setconfig.errors:
+ print(colorize("BAD", "Error during set creation: %s" % e))
+
+ unmerge_actions = ("unmerge", "prune", "clean", "depclean")
+
+ for a in myfiles:
+ if a.startswith(SETPREFIX):
+ s = a[len(SETPREFIX):]
+ if s not in sets:
+ display_missing_pkg_set(root_config, s)
+ return (None, 1)
+ if s == "installed":
+ msg = ("The @installed set is deprecated and will soon be "
+ "removed. Please refer to bug #387059 for details.")
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 50):
+ out.ewarn(line)
+ setconfig.active.append(s)
+
+ if do_not_expand:
+ # Loading sets can be slow, so skip it here, in order
+ # to allow the depgraph to indicate progress with the
+ # spinner while sets are loading (bug #461412).
+ newargs.append(a)
+ continue
+
+ try:
+ set_atoms = setconfig.getSetAtoms(s)
+ except portage.exception.PackageSetNotFound as e:
+ writemsg_level(("emerge: the given set '%s' " + \
+ "contains a non-existent set named '%s'.\n") % \
+ (s, e), level=logging.ERROR, noiselevel=-1)
+ if s in ('world', 'selected') and \
+ SETPREFIX + e.value in sets['selected']:
+ writemsg_level(("Use `emerge --deselect %s%s` to "
+ "remove this set from world_sets.\n") %
+ (SETPREFIX, e,), level=logging.ERROR,
+ noiselevel=-1)
+ return (None, 1)
+ if myaction in unmerge_actions and \
+ not sets[s].supportsOperation("unmerge"):
+ writemsg_level("emerge: the given set '%s' does " % s + \
+ "not support unmerge operations\n",
+ level=logging.ERROR, noiselevel=-1)
+ retval = 1
+ elif not set_atoms:
+ writemsg_level("emerge: '%s' is an empty set\n" % s,
+ level=logging.INFO, noiselevel=-1)
+ else:
+ newargs.extend(set_atoms)
+ for error_msg in sets[s].errors:
+ writemsg_level("%s\n" % (error_msg,),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ newargs.append(a)
+ return (newargs, retval)
+
+def repo_name_check(trees):
+ missing_repo_names = set()
+ for root_trees in trees.values():
+ porttree = root_trees.get("porttree")
+ if porttree:
+ portdb = porttree.dbapi
+ missing_repo_names.update(portdb.getMissingRepoNames())
+
+ # Skip warnings about missing repo_name entries for
+ # /usr/local/portage (see bug #248603).
+ try:
+ missing_repo_names.remove('/usr/local/portage')
+ except KeyError:
+ pass
+
+ if missing_repo_names:
+ msg = []
+ msg.append("WARNING: One or more repositories " + \
+ "have missing repo_name entries:")
+ msg.append("")
+ for p in missing_repo_names:
+ msg.append("\t%s/profiles/repo_name" % (p,))
+ msg.append("")
+ msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
+ "should be a plain text file containing a unique " + \
+ "name for the repository on the first line.", 70))
+ msg.append("\n")
+ writemsg_level("".join("%s\n" % l for l in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ return bool(missing_repo_names)
+
+def repo_name_duplicate_check(trees):
+ ignored_repos = {}
+ for root, root_trees in trees.items():
+ if 'porttree' in root_trees:
+ portdb = root_trees['porttree'].dbapi
+ if portdb.settings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
+ for repo_name, paths in portdb.getIgnoredRepos():
+ k = (root, repo_name, portdb.getRepositoryPath(repo_name))
+ ignored_repos.setdefault(k, []).extend(paths)
+
+ if ignored_repos:
+ msg = []
+ msg.append('WARNING: One or more repositories ' + \
+ 'have been ignored due to duplicate')
+ msg.append(' profiles/repo_name entries:')
+ msg.append('')
+ for k in sorted(ignored_repos):
+ msg.append(' %s overrides' % ", ".join(k))
+ for path in ignored_repos[k]:
+ msg.append(' %s' % (path,))
+ msg.append('')
+ msg.extend(' ' + x for x in textwrap.wrap(
+ "All profiles/repo_name entries must be unique in order " + \
+ "to avoid having duplicates ignored. " + \
+ "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
+ "/etc/portage/make.conf if you would like to disable this warning."))
+ msg.append("\n")
+ writemsg_level(''.join('%s\n' % l for l in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ return bool(ignored_repos)
+
+def run_action(emerge_config):
+
+ # skip global updates prior to sync, since it's called after sync
+ if emerge_config.action not in ('help', 'info', 'sync', 'version') and \
+ emerge_config.opts.get('--package-moves') != 'n' and \
+ _global_updates(emerge_config.trees,
+ emerge_config.target_config.mtimedb["updates"],
+ quiet=("--quiet" in emerge_config.opts)):
+ emerge_config.target_config.mtimedb.commit()
+ # Reload the whole config from scratch.
+ load_emerge_config(emerge_config=emerge_config)
+
+ xterm_titles = "notitles" not in \
+ emerge_config.target_config.settings.features
+ if xterm_titles:
+ xtermTitle("emerge")
+
+ if "--digest" in emerge_config.opts:
+ os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
+ # Reload the whole config from scratch so that the portdbapi internal
+ # config is updated with new FEATURES.
+ load_emerge_config(emerge_config=emerge_config)
+
+ # NOTE: adjust_configs() can map options to FEATURES, so any relevant
+ # options adjustments should be made prior to calling adjust_configs().
+ if "--buildpkgonly" in emerge_config.opts:
+ emerge_config.opts["--buildpkg"] = True
+
+ if "getbinpkg" in emerge_config.target_config.settings.features:
+ emerge_config.opts["--getbinpkg"] = True
+
+ if "--getbinpkgonly" in emerge_config.opts:
+ emerge_config.opts["--getbinpkg"] = True
+
+ if "--getbinpkgonly" in emerge_config.opts:
+ emerge_config.opts["--usepkgonly"] = True
+
+ if "--getbinpkg" in emerge_config.opts:
+ emerge_config.opts["--usepkg"] = True
+
+ if "--usepkgonly" in emerge_config.opts:
+ emerge_config.opts["--usepkg"] = True
+
+ if "--buildpkgonly" in emerge_config.opts:
+ # --buildpkgonly will not merge anything, so
+ # it cancels all binary package options.
+ for opt in ("--getbinpkg", "--getbinpkgonly",
+ "--usepkg", "--usepkgonly"):
+ emerge_config.opts.pop(opt, None)
+
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+ apply_priorities(emerge_config.target_config.settings)
+
+ for fmt in emerge_config.target_config.settings["PORTAGE_BINPKG_FORMAT"].split():
+ if not fmt in portage.const.SUPPORTED_BINPKG_FORMATS:
+ if "--pkg-format" in emerge_config.opts:
+ problematic="--pkg-format"
+ else:
+ problematic="PORTAGE_BINPKG_FORMAT"
+
+ writemsg_level(("emerge: %s is not set correctly. Format " + \
+ "'%s' is not supported.\n") % (problematic, fmt),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if emerge_config.action == 'version':
+ writemsg_stdout(getportageversion(
+ emerge_config.target_config.settings["PORTDIR"],
+ None,
+ emerge_config.target_config.settings.profile_path,
+ emerge_config.target_config.settings["CHOST"],
+ emerge_config.target_config.trees['vartree'].dbapi) + '\n',
+ noiselevel=-1)
+ return 0
+ elif emerge_config.action == 'help':
+ emerge_help()
+ return 0
+
+ spinner = stdout_spinner()
+ if "candy" in emerge_config.target_config.settings.features:
+ spinner.update = spinner.update_scroll
+
+ if "--quiet" not in emerge_config.opts:
+ portage.deprecated_profile_check(
+ settings=emerge_config.target_config.settings)
+ repo_name_check(emerge_config.trees)
+ repo_name_duplicate_check(emerge_config.trees)
+ config_protect_check(emerge_config.trees)
+ check_procfs()
+
+ for mytrees in emerge_config.trees.values():
+ mydb = mytrees["porttree"].dbapi
+ # Freeze the portdbapi for performance (memoize all xmatch results).
+ mydb.freeze()
+
+ if emerge_config.action in ('search', None) and \
+ "--usepkg" in emerge_config.opts:
+ # Populate the bintree with current --getbinpkg setting.
+ # This needs to happen before expand_set_arguments(), in case
+ # any sets use the bintree.
+ try:
+ mytrees["bintree"].populate(
+ getbinpkgs="--getbinpkg" in emerge_config.opts)
+ except ParseError as e:
+ writemsg("\n\n!!!%s.\nSee make.conf(5) for more info.\n"
+ % e, noiselevel=-1)
+ return 1
+
+ del mytrees, mydb
+
+ for x in emerge_config.args:
+ if x.endswith((".ebuild", ".tbz2")) and \
+ os.path.exists(os.path.abspath(x)):
+ print(colorize("BAD", "\n*** emerging by path is broken "
+ "and may not always work!!!\n"))
+ break
+
+ if emerge_config.action == "list-sets":
+ writemsg_stdout("".join("%s\n" % s for s in
+ sorted(emerge_config.target_config.sets)))
+ return os.EX_OK
+ elif emerge_config.action == "check-news":
+ news_counts = count_unread_news(
+ emerge_config.target_config.trees["porttree"].dbapi,
+ emerge_config.target_config.trees["vartree"].dbapi)
+ if any(news_counts.values()):
+ display_news_notifications(news_counts)
+ elif "--quiet" not in emerge_config.opts:
+ print("", colorize("GOOD", "*"), "No news items were found.")
+ return os.EX_OK
+
+ ensure_required_sets(emerge_config.trees)
+
+ if emerge_config.action is None and \
+ "--resume" in emerge_config.opts and emerge_config.args:
+ writemsg("emerge: unexpected argument(s) for --resume: %s\n" %
+ " ".join(emerge_config.args), noiselevel=-1)
+ return 1
+
+ # only expand sets for actions taking package arguments
+ oldargs = emerge_config.args[:]
+ if emerge_config.action in ("clean", "config", "depclean",
+ "info", "prune", "unmerge", None):
+ newargs, retval = expand_set_arguments(
+ emerge_config.args, emerge_config.action,
+ emerge_config.target_config)
+ if retval != os.EX_OK:
+ return retval
+
+ # Need to handle empty sets specially, otherwise emerge will react
+ # with the help message for empty argument lists
+ if oldargs and not newargs:
+ print("emerge: no targets left after set expansion")
+ return 0
+
+ emerge_config.args = newargs
+
+ if "--tree" in emerge_config.opts and \
+ "--columns" in emerge_config.opts:
+ print("emerge: can't specify both of \"--tree\" and \"--columns\".")
+ return 1
+
+ if '--emptytree' in emerge_config.opts and \
+ '--noreplace' in emerge_config.opts:
+ writemsg_level("emerge: can't specify both of " + \
+ "\"--emptytree\" and \"--noreplace\".\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if ("--quiet" in emerge_config.opts):
+ spinner.update = spinner.update_quiet
+ portage.util.noiselimit = -1
+
+ if "--fetch-all-uri" in emerge_config.opts:
+ emerge_config.opts["--fetchonly"] = True
+
+ if "--skipfirst" in emerge_config.opts and \
+ "--resume" not in emerge_config.opts:
+ emerge_config.opts["--resume"] = True
+
+ # Allow -p to remove --ask
+ if "--pretend" in emerge_config.opts:
+ emerge_config.opts.pop("--ask", None)
+
+ # forbid --ask when not in a terminal
+ # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
+ if ("--ask" in emerge_config.opts) and (not sys.stdin.isatty()):
+ portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
+ noiselevel=-1)
+ return 1
+
+ if emerge_config.target_config.settings.get("PORTAGE_DEBUG", "") == "1":
+ spinner.update = spinner.update_quiet
+ portage.util.noiselimit = 0
+ if "python-trace" in emerge_config.target_config.settings.features:
+ portage.debug.set_trace(True)
+
+ if not ("--quiet" in emerge_config.opts):
+ if '--nospinner' in emerge_config.opts or \
+ emerge_config.target_config.settings.get('TERM') == 'dumb' or \
+ not sys.stdout.isatty():
+ spinner.update = spinner.update_basic
+
+ if "--debug" in emerge_config.opts:
+ print("myaction", emerge_config.action)
+ print("myopts", emerge_config.opts)
+
+ if not emerge_config.action and not emerge_config.args and \
+ "--resume" not in emerge_config.opts:
+ emerge_help()
+ return 1
+
+ pretend = "--pretend" in emerge_config.opts
+ fetchonly = "--fetchonly" in emerge_config.opts or \
+ "--fetch-all-uri" in emerge_config.opts
+ buildpkgonly = "--buildpkgonly" in emerge_config.opts
+
+ # check if root user is the current user for the actions where emerge needs this
+ if portage.data.secpass < 2:
+ # We've already allowed "--version" and "--help" above.
+ if "--pretend" not in emerge_config.opts and \
+ emerge_config.action not in ("search", "info"):
+ need_superuser = emerge_config.action in ('clean', 'depclean',
+ 'deselect', 'prune', 'unmerge') or not \
+ (fetchonly or \
+ (buildpkgonly and portage.data.secpass >= 1) or \
+ emerge_config.action in ("metadata", "regen", "sync"))
+ if portage.data.secpass < 1 or \
+ need_superuser:
+ if need_superuser:
+ access_desc = "superuser"
+ else:
+ access_desc = "portage group"
+ # Always show portage_group_warning() when only portage group
+ # access is required but the user is not in the portage group.
+ if "--ask" in emerge_config.opts:
+ writemsg_stdout("This action requires %s access...\n" % \
+ (access_desc,), noiselevel=-1)
+ if portage.data.secpass < 1 and not need_superuser:
+ portage.data.portage_group_warning()
+ uq = UserQuery(emerge_config.opts)
+ if uq.query("Would you like to add --pretend to options?",
+ "--ask-enter-invalid" in emerge_config.opts) == "No":
+ return 128 + signal.SIGINT
+ emerge_config.opts["--pretend"] = True
+ emerge_config.opts.pop("--ask")
+ else:
+ sys.stderr.write(("emerge: %s access is required\n") \
+ % access_desc)
+ if portage.data.secpass < 1 and not need_superuser:
+ portage.data.portage_group_warning()
+ return 1
+
+ # Disable emergelog for everything except build or unmerge operations.
+ # This helps minimize parallel emerge.log entries that can confuse log
+ # parsers like genlop.
+ disable_emergelog = False
+ for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
+ if x in emerge_config.opts:
+ disable_emergelog = True
+ break
+ if disable_emergelog:
+ pass
+ elif emerge_config.action in ("search", "info"):
+ disable_emergelog = True
+ elif portage.data.secpass < 1:
+ disable_emergelog = True
+
+ import _emerge.emergelog
+ _emerge.emergelog._disable = disable_emergelog
+
+ if not disable_emergelog:
+ emerge_log_dir = \
+ emerge_config.target_config.settings.get('EMERGE_LOG_DIR')
+ if emerge_log_dir:
+ try:
+ # At least the parent needs to exist for the lock file.
+ portage.util.ensure_dirs(emerge_log_dir)
+ except portage.exception.PortageException as e:
+ writemsg_level("!!! Error creating directory for " + \
+ "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
+ (emerge_log_dir, e),
+ noiselevel=-1, level=logging.ERROR)
+ portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
+ else:
+ _emerge.emergelog._emerge_log_dir = emerge_log_dir
+ else:
+ _emerge.emergelog._emerge_log_dir = os.path.join(os.sep,
+ portage.const.EPREFIX.lstrip(os.sep), "var", "log")
+ portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
+
+ if not "--pretend" in emerge_config.opts:
+ time_fmt = "%b %d, %Y %H:%M:%S"
+ if sys.hexversion < 0x3000000:
+ time_fmt = portage._unicode_encode(time_fmt)
+ time_str = time.strftime(time_fmt, time.localtime(time.time()))
+ # Avoid potential UnicodeDecodeError in Python 2, since strftime
+ # returns bytes in Python 2, and %b may contain non-ascii chars.
+ time_str = _unicode_decode(time_str,
+ encoding=_encodings['content'], errors='replace')
+ emergelog(xterm_titles, "Started emerge on: %s" % time_str)
+ myelogstr=""
+ if emerge_config.opts:
+ opt_list = []
+ for opt, arg in emerge_config.opts.items():
+ if arg is True:
+ opt_list.append(opt)
+ elif isinstance(arg, list):
+ # arguments like --exclude that use 'append' action
+ for x in arg:
+ opt_list.append("%s=%s" % (opt, x))
+ else:
+ opt_list.append("%s=%s" % (opt, arg))
+ myelogstr=" ".join(opt_list)
+ if emerge_config.action:
+ myelogstr += " --" + emerge_config.action
+ if oldargs:
+ myelogstr += " " + " ".join(oldargs)
+ emergelog(xterm_titles, " *** emerge " + myelogstr)
+
+ oldargs = None
+
+ def emergeexitsig(signum, frame):
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ portage.util.writemsg(
+ "\n\nExiting on signal %(signal)s\n" % {"signal":signum})
+ sys.exit(128 + signum)
+
+ signal.signal(signal.SIGTERM, emergeexitsig)
+
+ def emergeexit():
+ """This gets out final log message in before we quit."""
+ if "--pretend" not in emerge_config.opts:
+ emergelog(xterm_titles, " *** terminating.")
+ if xterm_titles:
+ xtermTitleReset()
+ portage.atexit_register(emergeexit)
+
+ if emerge_config.action in ("config", "metadata", "regen", "sync"):
+ if "--pretend" in emerge_config.opts:
+ sys.stderr.write(("emerge: The '%s' action does " + \
+ "not support '--pretend'.\n") % emerge_config.action)
+ return 1
+
+ if "sync" == emerge_config.action:
+ return action_sync(emerge_config)
+ elif "metadata" == emerge_config.action:
+ action_metadata(emerge_config.target_config.settings,
+ emerge_config.target_config.trees['porttree'].dbapi,
+ emerge_config.opts)
+ elif emerge_config.action=="regen":
+ validate_ebuild_environment(emerge_config.trees)
+ return action_regen(emerge_config.target_config.settings,
+ emerge_config.target_config.trees['porttree'].dbapi,
+ emerge_config.opts.get("--jobs"),
+ emerge_config.opts.get("--load-average"))
+ # HELP action
+ elif "config" == emerge_config.action:
+ validate_ebuild_environment(emerge_config.trees)
+ action_config(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.opts, emerge_config.args)
+
+ # SEARCH action
+ elif "search" == emerge_config.action:
+ validate_ebuild_environment(emerge_config.trees)
+ action_search(emerge_config.target_config,
+ emerge_config.opts, emerge_config.args, spinner)
+
+ elif emerge_config.action in \
+ ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
+ validate_ebuild_environment(emerge_config.trees)
+ rval = action_uninstall(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.target_config.mtimedb["ldpath"],
+ emerge_config.opts, emerge_config.action,
+ emerge_config.args, spinner)
+ if not (emerge_config.action == 'deselect' or
+ buildpkgonly or fetchonly or pretend):
+ post_emerge(emerge_config.action, emerge_config.opts,
+ emerge_config.args, emerge_config.target_config.root,
+ emerge_config.trees, emerge_config.target_config.mtimedb, rval)
+ return rval
+
+ elif emerge_config.action == 'info':
+
+ # Ensure atoms are valid before calling unmerge().
+ vardb = emerge_config.target_config.trees['vartree'].dbapi
+ portdb = emerge_config.target_config.trees['porttree'].dbapi
+ bindb = emerge_config.target_config.trees['bintree'].dbapi
+ valid_atoms = []
+ for x in emerge_config.args:
+ if is_valid_package_atom(x, allow_repo=True):
+ try:
+ #look at the installed files first, if there is no match
+ #look at the ebuilds, since EAPI 4 allows running pkg_info
+ #on non-installed packages
+ valid_atom = dep_expand(x, mydb=vardb)
+ if valid_atom.cp.split("/")[0] == "null":
+ valid_atom = dep_expand(x, mydb=portdb)
+
+ if valid_atom.cp.split("/")[0] == "null" and \
+ "--usepkg" in emerge_config.opts:
+ valid_atom = dep_expand(x, mydb=bindb)
+
+ valid_atoms.append(valid_atom)
+
+ except portage.exception.AmbiguousPackageName as e:
+ msg = "The short ebuild name \"" + x + \
+ "\" is ambiguous. Please specify " + \
+ "one of the following " + \
+ "fully-qualified ebuild names instead:"
+ for line in textwrap.wrap(msg, 70):
+ writemsg_level("!!! %s\n" % (line,),
+ level=logging.ERROR, noiselevel=-1)
+ for i in e.args[0]:
+ writemsg_level(" %s\n" % colorize("INFORM", i),
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
+ return 1
+ continue
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ return action_info(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.opts, valid_atoms)
+
+ # "update", "system", or just process files:
+ else:
+ validate_ebuild_environment(emerge_config.trees)
+
+ for x in emerge_config.args:
+ if x.startswith(SETPREFIX) or \
+ is_valid_package_atom(x, allow_repo=True):
+ continue
+ if x[:1] == os.sep:
+ continue
+ try:
+ os.lstat(x)
+ continue
+ except OSError:
+ pass
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ # GLEP 42 says to display news *after* an emerge --pretend
+ if "--pretend" not in emerge_config.opts:
+ uq = UserQuery(emerge_config.opts)
+ if display_news_notification(emerge_config.target_config,
+ emerge_config.opts) \
+ and "--read-news" in emerge_config.opts \
+ and uq.query("Would you like to read the news items while " \
+ "calculating dependencies?",
+ '--ask-enter-invalid' in emerge_config.opts) == "Yes":
+ try:
+ subprocess.call(['eselect', 'news', 'read'])
+ # If eselect is not installed, Python <3.3 will throw an
+ # OSError. >=3.3 will throw a FileNotFoundError, which is a
+ # subclass of OSError.
+ except OSError:
+ writemsg("Please install eselect to use this feature.\n",
+ noiselevel=-1)
+ retval = action_build(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.target_config.mtimedb,
+ emerge_config.opts, emerge_config.action,
+ emerge_config.args, spinner)
+ post_emerge(emerge_config.action, emerge_config.opts,
+ emerge_config.args, emerge_config.target_config.root,
+ emerge_config.trees, emerge_config.target_config.mtimedb, retval)
+
+ return retval
diff --git a/usr/lib/portage/pym/_emerge/chk_updated_cfg_files.py b/usr/lib/portage/pym/_emerge/chk_updated_cfg_files.py
new file mode 100644
index 0000000..9f2ab6f
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/chk_updated_cfg_files.py
@@ -0,0 +1,42 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+
+import portage
+from portage import os
+from portage.localization import _
+from portage.output import bold, colorize, yellow
+from portage.util import writemsg_level
+
+def chk_updated_cfg_files(eroot, config_protect):
+ target_root = eroot
+ result = list(
+ portage.util.find_updated_config_files(target_root, config_protect))
+
+ for x in result:
+ writemsg_level("\n %s " % (colorize("WARN", "* " + _("IMPORTANT:"))),
+ level=logging.INFO, noiselevel=-1)
+ if not x[1]: # it's a protected file
+ writemsg_level( _("config file '%s' needs updating.\n") % x[0],
+ level=logging.INFO, noiselevel=-1)
+ else: # it's a protected dir
+ if len(x[1]) == 1:
+ head, tail = os.path.split(x[1][0])
+ tail = tail[len("._cfg0000_"):]
+ fpath = os.path.join(head, tail)
+ writemsg_level(_("config file '%s' needs updating.\n") % fpath,
+ level=logging.INFO, noiselevel=-1)
+ else:
+ writemsg_level(
+ _("%d config files in '%s' need updating.\n") % \
+ (len(x[1]), x[0]), level=logging.INFO, noiselevel=-1)
+
+ if result:
+ print(" " + yellow("*") + " See the " +
+ colorize("INFORM", _("CONFIGURATION FILES")) +
+ " " + _("section of the") + " " + bold("emerge"))
+ print(" " + yellow("*") + " " +
+ _("man page to learn how to update config files."))
diff --git a/usr/lib/portage/pym/_emerge/clear_caches.py b/usr/lib/portage/pym/_emerge/clear_caches.py
new file mode 100644
index 0000000..513df62
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/clear_caches.py
@@ -0,0 +1,17 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import gc
+
+def clear_caches(trees):
+ for d in trees.values():
+ d["porttree"].dbapi.melt()
+ d["porttree"].dbapi._aux_cache.clear()
+ d["bintree"].dbapi._aux_cache.clear()
+ d["bintree"].dbapi._clear_cache()
+ if d["vartree"].dbapi._linkmap is None:
+ # preserve-libs is entirely disabled
+ pass
+ else:
+ d["vartree"].dbapi._linkmap._clear_cache()
+ gc.collect()
diff --git a/usr/lib/portage/pym/_emerge/countdown.py b/usr/lib/portage/pym/_emerge/countdown.py
new file mode 100644
index 0000000..62e3c8d
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/countdown.py
@@ -0,0 +1,22 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+import time
+
+from portage.output import colorize
+
+
+def countdown(secs=5, doing='Starting'):
+ if secs:
+ print(
+ '>>> Waiting %s seconds before starting...\n'
+ '>>> (Control-C to abort)...\n'
+ '%s in:' % (secs, doing), end='')
+ for sec in range(secs, 0, -1):
+ sys.stdout.write(colorize('UNMERGE_WARN', ' %i' % sec))
+ sys.stdout.flush()
+ time.sleep(1)
+ print()
diff --git a/usr/lib/portage/pym/_emerge/create_depgraph_params.py b/usr/lib/portage/pym/_emerge/create_depgraph_params.py
new file mode 100644
index 0000000..225b792
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/create_depgraph_params.py
@@ -0,0 +1,112 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+from portage.util import writemsg_level
+
+def create_depgraph_params(myopts, myaction):
+ #configure emerge engine parameters
+ #
+ # self: include _this_ package regardless of if it is merged.
+ # selective: exclude the package if it is merged
+ # recurse: go into the dependencies
+ # deep: go into the dependencies of already merged packages
+ # empty: pretend nothing is merged
+ # complete: completely account for all known dependencies
+ # remove: build graph for use in removing packages
+ # rebuilt_binaries: replace installed packages with rebuilt binaries
+ # rebuild_if_new_slot: rebuild or reinstall packages when
+ # slot/sub-slot := operator dependencies can be satisfied by a newer
+ # slot/sub-slot, so that older packages slots will become eligible for
+ # removal by the --depclean action as soon as possible
+ # ignore_built_slot_operator_deps: ignore the slot/sub-slot := operator parts
+ # of dependencies that have been recorded when packages where built
+ myparams = {"recurse" : True}
+
+ bdeps = myopts.get("--with-bdeps")
+ if bdeps is not None:
+ myparams["bdeps"] = bdeps
+
+ ignore_built_slot_operator_deps = myopts.get("--ignore-built-slot-operator-deps")
+ if ignore_built_slot_operator_deps is not None:
+ myparams["ignore_built_slot_operator_deps"] = ignore_built_slot_operator_deps
+
+ dynamic_deps = myopts.get("--dynamic-deps")
+ if dynamic_deps is not None:
+ myparams["dynamic_deps"] = dynamic_deps
+
+ if myaction == "remove":
+ myparams["remove"] = True
+ myparams["complete"] = True
+ myparams["selective"] = True
+ return myparams
+
+ rebuild_if_new_slot = myopts.get('--rebuild-if-new-slot')
+ if rebuild_if_new_slot is not None:
+ myparams['rebuild_if_new_slot'] = rebuild_if_new_slot
+
+ if "--update" in myopts or \
+ "--newrepo" in myopts or \
+ "--newuse" in myopts or \
+ "--reinstall" in myopts or \
+ "--noreplace" in myopts or \
+ myopts.get("--selective", "n") != "n":
+ myparams["selective"] = True
+
+ deep = myopts.get("--deep")
+ if deep is not None and deep != 0:
+ myparams["deep"] = deep
+
+ complete_if_new_use = \
+ myopts.get("--complete-graph-if-new-use")
+ if complete_if_new_use is not None:
+ myparams["complete_if_new_use"] = complete_if_new_use
+
+ complete_if_new_ver = \
+ myopts.get("--complete-graph-if-new-ver")
+ if complete_if_new_ver is not None:
+ myparams["complete_if_new_ver"] = complete_if_new_ver
+
+ if ("--complete-graph" in myopts or "--rebuild-if-new-rev" in myopts or
+ "--rebuild-if-new-ver" in myopts or "--rebuild-if-unbuilt" in myopts):
+ myparams["complete"] = True
+ if "--emptytree" in myopts:
+ myparams["empty"] = True
+ myparams["deep"] = True
+ myparams.pop("selective", None)
+
+ if "--nodeps" in myopts:
+ myparams.pop("recurse", None)
+ myparams.pop("deep", None)
+ myparams.pop("complete", None)
+
+ rebuilt_binaries = myopts.get('--rebuilt-binaries')
+ if rebuilt_binaries is True or \
+ rebuilt_binaries != 'n' and \
+ '--usepkgonly' in myopts and \
+ myopts.get('--deep') is True and \
+ '--update' in myopts:
+ myparams['rebuilt_binaries'] = True
+
+ binpkg_respect_use = myopts.get('--binpkg-respect-use')
+ if binpkg_respect_use is not None:
+ myparams['binpkg_respect_use'] = binpkg_respect_use
+ elif '--usepkgonly' not in myopts:
+ # If --binpkg-respect-use is not explicitly specified, we enable
+ # the behavior automatically (like requested in bug #297549), as
+ # long as it doesn't strongly conflict with other options that
+ # have been specified.
+ myparams['binpkg_respect_use'] = 'auto'
+
+ if myopts.get("--selective") == "n":
+ # --selective=n can be used to remove selective
+ # behavior that may have been implied by some
+ # other option like --update.
+ myparams.pop("selective", None)
+
+ if '--debug' in myopts:
+ writemsg_level('\n\nmyparams %s\n\n' % myparams,
+ noiselevel=-1, level=logging.DEBUG)
+
+ return myparams
+
diff --git a/usr/lib/portage/pym/_emerge/create_world_atom.py b/usr/lib/portage/pym/_emerge/create_world_atom.py
new file mode 100644
index 0000000..ac994cc
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/create_world_atom.py
@@ -0,0 +1,126 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+from portage.dep import _repo_separator
+from portage.exception import InvalidData
+
+if sys.hexversion >= 0x3000000:
+ _unicode = str
+else:
+ _unicode = unicode
+
+def create_world_atom(pkg, args_set, root_config):
+ """Create a new atom for the world file if one does not exist. If the
+ argument atom is precise enough to identify a specific slot then a slot
+ atom will be returned. Atoms that are in the system set may also be stored
+ in world since system atoms can only match one slot while world atoms can
+ be greedy with respect to slots. Unslotted system packages will not be
+ stored in world."""
+
+ arg_atom = args_set.findAtomForPackage(pkg)
+ if not arg_atom:
+ return None
+ cp = arg_atom.cp
+ new_world_atom = cp
+ if arg_atom.repo:
+ new_world_atom += _repo_separator + arg_atom.repo
+ sets = root_config.sets
+ portdb = root_config.trees["porttree"].dbapi
+ vardb = root_config.trees["vartree"].dbapi
+
+ if arg_atom.repo is not None:
+ repos = [arg_atom.repo]
+ else:
+ # Iterate over portdbapi.porttrees, since it's common to
+ # tweak this attribute in order to adjust match behavior.
+ repos = []
+ for tree in portdb.porttrees:
+ repos.append(portdb.repositories.get_name_for_location(tree))
+
+ available_slots = set()
+ for cpv in portdb.match(cp):
+ for repo in repos:
+ try:
+ available_slots.add(portdb._pkg_str(_unicode(cpv), repo).slot)
+ except (KeyError, InvalidData):
+ pass
+
+ slotted = len(available_slots) > 1 or \
+ (len(available_slots) == 1 and "0" not in available_slots)
+ if not slotted:
+ # check the vdb in case this is multislot
+ available_slots = set(vardb._pkg_str(cpv, None).slot \
+ for cpv in vardb.match(cp))
+ slotted = len(available_slots) > 1 or \
+ (len(available_slots) == 1 and "0" not in available_slots)
+ if slotted and arg_atom.without_repo != cp:
+ # If the user gave a specific atom, store it as a
+ # slot atom in the world file.
+ slot_atom = pkg.slot_atom
+
+ # For USE=multislot, there are a couple of cases to
+ # handle here:
+ #
+ # 1) SLOT="0", but the real SLOT spontaneously changed to some
+ # unknown value, so just record an unslotted atom.
+ #
+ # 2) SLOT comes from an installed package and there is no
+ # matching SLOT in the portage tree.
+ #
+ # Make sure that the slot atom is available in either the
+ # portdb or the vardb, since otherwise the user certainly
+ # doesn't want the SLOT atom recorded in the world file
+ # (case 1 above). If it's only available in the vardb,
+ # the user may be trying to prevent a USE=multislot
+ # package from being removed by --depclean (case 2 above).
+
+ mydb = portdb
+ if not portdb.match(slot_atom):
+ # SLOT seems to come from an installed multislot package
+ mydb = vardb
+ # If there is no installed package matching the SLOT atom,
+ # it probably changed SLOT spontaneously due to USE=multislot,
+ # so just record an unslotted atom.
+ if vardb.match(slot_atom):
+ # Now verify that the argument is precise
+ # enough to identify a specific slot.
+ matches = mydb.match(arg_atom)
+ matched_slots = set()
+ if mydb is vardb:
+ for cpv in matches:
+ matched_slots.add(mydb._pkg_str(cpv, None).slot)
+ else:
+ for cpv in matches:
+ for repo in repos:
+ try:
+ matched_slots.add(
+ portdb._pkg_str(_unicode(cpv), repo).slot)
+ except (KeyError, InvalidData):
+ pass
+
+ if len(matched_slots) == 1:
+ new_world_atom = slot_atom
+ if arg_atom.repo:
+ new_world_atom += _repo_separator + arg_atom.repo
+
+ if new_world_atom == sets["selected"].findAtomForPackage(pkg):
+ # Both atoms would be identical, so there's nothing to add.
+ return None
+ if not slotted and not arg_atom.repo:
+ # Unlike world atoms, system atoms are not greedy for slots, so they
+ # can't be safely excluded from world if they are slotted.
+ system_atom = sets["system"].findAtomForPackage(pkg)
+ if system_atom:
+ if not system_atom.cp.startswith("virtual/"):
+ return None
+ # System virtuals aren't safe to exclude from world since they can
+ # match multiple old-style virtuals but only one of them will be
+ # pulled in by update or depclean.
+ providers = portdb.settings.getvirtuals().get(system_atom.cp)
+ if providers and len(providers) == 1 and \
+ providers[0].cp == arg_atom.cp:
+ return None
+ return new_world_atom
+
diff --git a/usr/lib/portage/pym/_emerge/depgraph.py b/usr/lib/portage/pym/_emerge/depgraph.py
new file mode 100644
index 0000000..4f01a7b
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/depgraph.py
@@ -0,0 +1,8943 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division, print_function, unicode_literals
+
+import collections
+import errno
+import io
+import logging
+import stat
+import sys
+import textwrap
+import warnings
+from collections import deque
+from itertools import chain
+
+import portage
+from portage import os, OrderedDict
+from portage import _unicode_decode, _unicode_encode, _encodings
+from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH, VCS_DIRS
+from portage.dbapi import dbapi
+from portage.dbapi.dep_expand import dep_expand
+from portage.dbapi._similar_name_search import similar_name_search
+from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
+ check_required_use, human_readable_required_use, match_from_list, \
+ _repo_separator
+from portage.dep._slot_operator import ignore_built_slot_operator_deps
+from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \
+ _get_eapi_attrs
+from portage.exception import (InvalidAtom, InvalidData, InvalidDependString,
+ PackageNotFound, PortageException)
+from portage.output import colorize, create_color_func, \
+ darkgreen, green
+bad = create_color_func("BAD")
+from portage.package.ebuild.config import _get_feature_flags
+from portage.package.ebuild.getmaskingstatus import \
+ _getmaskingstatus, _MaskReason
+from portage._sets import SETPREFIX
+from portage._sets.base import InternalPackageSet
+from portage.util import ConfigProtect, shlex_split, new_protect_filename
+from portage.util import cmp_sort_key, writemsg, writemsg_stdout
+from portage.util import ensure_dirs
+from portage.util import writemsg_level, write_atomic
+from portage.util.digraph import digraph
+from portage.util._async.TaskScheduler import TaskScheduler
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage.versions import catpkgsplit
+
+from _emerge.AtomArg import AtomArg
+from _emerge.Blocker import Blocker
+from _emerge.BlockerCache import BlockerCache
+from _emerge.BlockerDepPriority import BlockerDepPriority
+from .chk_updated_cfg_files import chk_updated_cfg_files
+from _emerge.countdown import countdown
+from _emerge.create_world_atom import create_world_atom
+from _emerge.Dependency import Dependency
+from _emerge.DependencyArg import DependencyArg
+from _emerge.DepPriority import DepPriority
+from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
+from _emerge.FakeVartree import FakeVartree
+from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
+from _emerge.is_valid_package_atom import insert_category_into_atom, \
+ is_valid_package_atom
+from _emerge.Package import Package
+from _emerge.PackageArg import PackageArg
+from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
+from _emerge.RootConfig import RootConfig
+from _emerge.search import search
+from _emerge.SetArg import SetArg
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+from _emerge.UnmergeDepPriority import UnmergeDepPriority
+from _emerge.UseFlagDisplay import pkg_use_display
+from _emerge.UserQuery import UserQuery
+
+from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
+from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper
+from _emerge.resolver.slot_collision import slot_conflict_handler
+from _emerge.resolver.circular_dependency import circular_dependency_handler
+from _emerge.resolver.output import Display, format_unmatched_atom
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+ _unicode = str
+else:
+ _unicode = unicode
+
+class _scheduler_graph_config(object):
+ def __init__(self, trees, pkg_cache, graph, mergelist):
+ self.trees = trees
+ self.pkg_cache = pkg_cache
+ self.graph = graph
+ self.mergelist = mergelist
+
+def _wildcard_set(atoms):
+ pkgs = InternalPackageSet(allow_wildcard=True)
+ for x in atoms:
+ try:
+ x = Atom(x, allow_wildcard=True, allow_repo=False)
+ except portage.exception.InvalidAtom:
+ x = Atom("*/" + x, allow_wildcard=True, allow_repo=False)
+ pkgs.add(x)
+ return pkgs
+
+class _frozen_depgraph_config(object):
+
+ def __init__(self, settings, trees, myopts, params, spinner):
+ self.settings = settings
+ self.target_root = settings["EROOT"]
+ self.myopts = myopts
+ self.edebug = 0
+ if settings.get("PORTAGE_DEBUG", "") == "1":
+ self.edebug = 1
+ self.spinner = spinner
+ self.requested_depth = params.get("deep", 0)
+ self._running_root = trees[trees._running_eroot]["root_config"]
+ self.pkgsettings = {}
+ self.trees = {}
+ self._trees_orig = trees
+ self.roots = {}
+ # All Package instances
+ self._pkg_cache = {}
+ self._highest_license_masked = {}
+ dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
+ ignore_built_slot_operator_deps = myopts.get(
+ "--ignore-built-slot-operator-deps", "n") == "y"
+ for myroot in trees:
+ self.trees[myroot] = {}
+ # Create a RootConfig instance that references
+ # the FakeVartree instead of the real one.
+ self.roots[myroot] = RootConfig(
+ trees[myroot]["vartree"].settings,
+ self.trees[myroot],
+ trees[myroot]["root_config"].setconfig)
+ for tree in ("porttree", "bintree"):
+ self.trees[myroot][tree] = trees[myroot][tree]
+ self.trees[myroot]["vartree"] = \
+ FakeVartree(trees[myroot]["root_config"],
+ pkg_cache=self._pkg_cache,
+ pkg_root_config=self.roots[myroot],
+ dynamic_deps=dynamic_deps,
+ ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
+ self.pkgsettings[myroot] = portage.config(
+ clone=self.trees[myroot]["vartree"].settings)
+
+ self._required_set_names = set(["world"])
+
+ atoms = ' '.join(myopts.get("--exclude", [])).split()
+ self.excluded_pkgs = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
+ self.reinstall_atoms = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
+ self.usepkg_exclude = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
+ self.useoldpkg_atoms = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
+ self.rebuild_exclude = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
+ self.rebuild_ignore = _wildcard_set(atoms)
+
+ self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
+ self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
+ self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
+
+class _depgraph_sets(object):
+ def __init__(self):
+ # contains all sets added to the graph
+ self.sets = {}
+ # contains non-set atoms given as arguments
+ self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
+ # contains all atoms from all sets added to the graph, including
+ # atoms given as arguments
+ self.atoms = InternalPackageSet(allow_repo=True)
+ self.atom_arg_map = {}
+
+class _rebuild_config(object):
+ def __init__(self, frozen_config, backtrack_parameters):
+ self._graph = digraph()
+ self._frozen_config = frozen_config
+ self.rebuild_list = backtrack_parameters.rebuild_list.copy()
+ self.orig_rebuild_list = self.rebuild_list.copy()
+ self.reinstall_list = backtrack_parameters.reinstall_list.copy()
+ self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
+ self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
+ self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
+ self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
+ self.rebuild_if_unbuilt)
+
+ def add(self, dep_pkg, dep):
+ parent = dep.collapsed_parent
+ priority = dep.collapsed_priority
+ rebuild_exclude = self._frozen_config.rebuild_exclude
+ rebuild_ignore = self._frozen_config.rebuild_ignore
+ if (self.rebuild and isinstance(parent, Package) and
+ parent.built and priority.buildtime and
+ isinstance(dep_pkg, Package) and
+ not rebuild_exclude.findAtomForPackage(parent) and
+ not rebuild_ignore.findAtomForPackage(dep_pkg)):
+ self._graph.add(dep_pkg, parent, priority)
+
+ def _needs_rebuild(self, dep_pkg):
+ """Check whether packages that depend on dep_pkg need to be rebuilt."""
+ dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
+ if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
+ return False
+
+ if self.rebuild_if_unbuilt:
+ # dep_pkg is being installed from source, so binary
+ # packages for parents are invalid. Force rebuild
+ return True
+
+ trees = self._frozen_config.trees
+ vardb = trees[dep_pkg.root]["vartree"].dbapi
+ if self.rebuild_if_new_rev:
+ # Parent packages are valid if a package with the same
+ # cpv is already installed.
+ return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
+
+ # Otherwise, parent packages are valid if a package with the same
+ # version (excluding revision) is already installed.
+ assert self.rebuild_if_new_ver
+ cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
+ for inst_cpv in vardb.match(dep_pkg.slot_atom):
+ inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
+ if inst_cpv_norev == cpv_norev:
+ return False
+
+ return True
+
+ def _trigger_rebuild(self, parent, build_deps):
+ root_slot = (parent.root, parent.slot_atom)
+ if root_slot in self.rebuild_list:
+ return False
+ trees = self._frozen_config.trees
+ reinstall = False
+ for slot_atom, dep_pkg in build_deps.items():
+ dep_root_slot = (dep_pkg.root, slot_atom)
+ if self._needs_rebuild(dep_pkg):
+ self.rebuild_list.add(root_slot)
+ return True
+ elif ("--usepkg" in self._frozen_config.myopts and
+ (dep_root_slot in self.reinstall_list or
+ dep_root_slot in self.rebuild_list or
+ not dep_pkg.installed)):
+
+ # A direct rebuild dependency is being installed. We
+ # should update the parent as well to the latest binary,
+ # if that binary is valid.
+ #
+ # To validate the binary, we check whether all of the
+ # rebuild dependencies are present on the same binhost.
+ #
+ # 1) If parent is present on the binhost, but one of its
+ # rebuild dependencies is not, then the parent should
+ # be rebuilt from source.
+ # 2) Otherwise, the parent binary is assumed to be valid,
+ # because all of its rebuild dependencies are
+ # consistent.
+ bintree = trees[parent.root]["bintree"]
+ uri = bintree.get_pkgindex_uri(parent.cpv)
+ dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
+ bindb = bintree.dbapi
+ if self.rebuild_if_new_ver and uri and uri != dep_uri:
+ cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
+ for cpv in bindb.match(dep_pkg.slot_atom):
+ if cpv_norev == catpkgsplit(cpv)[:-1]:
+ dep_uri = bintree.get_pkgindex_uri(cpv)
+ if uri == dep_uri:
+ break
+ if uri and uri != dep_uri:
+ # 1) Remote binary package is invalid because it was
+ # built without dep_pkg. Force rebuild.
+ self.rebuild_list.add(root_slot)
+ return True
+ elif (parent.installed and
+ root_slot not in self.reinstall_list):
+ try:
+ bin_build_time, = bindb.aux_get(parent.cpv,
+ ["BUILD_TIME"])
+ except KeyError:
+ continue
+ if bin_build_time != _unicode(parent.build_time):
+ # 2) Remote binary package is valid, and local package
+ # is not up to date. Force reinstall.
+ reinstall = True
+ if reinstall:
+ self.reinstall_list.add(root_slot)
+ return reinstall
+
+ def trigger_rebuilds(self):
+ """
+ Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
+ depends on pkgA at both build-time and run-time, pkgB needs to be
+ rebuilt.
+ """
+ need_restart = False
+ graph = self._graph
+ build_deps = {}
+
+ leaf_nodes = deque(graph.leaf_nodes())
+
+ # Trigger rebuilds bottom-up (starting with the leaves) so that parents
+ # will always know which children are being rebuilt.
+ while graph:
+ if not leaf_nodes:
+ # We'll have to drop an edge. This should be quite rare.
+ leaf_nodes.append(graph.order[-1])
+
+ node = leaf_nodes.popleft()
+ if node not in graph:
+ # This can be triggered by circular dependencies.
+ continue
+ slot_atom = node.slot_atom
+
+ # Remove our leaf node from the graph, keeping track of deps.
+ parents = graph.parent_nodes(node)
+ graph.remove(node)
+ node_build_deps = build_deps.get(node, {})
+ for parent in parents:
+ if parent == node:
+ # Ignore a direct cycle.
+ continue
+ parent_bdeps = build_deps.setdefault(parent, {})
+ parent_bdeps[slot_atom] = node
+ if not graph.child_nodes(parent):
+ leaf_nodes.append(parent)
+
+ # Trigger rebuilds for our leaf node. Because all of our children
+ # have been processed, the build_deps will be completely filled in,
+ # and self.rebuild_list / self.reinstall_list will tell us whether
+ # any of our children need to be rebuilt or reinstalled.
+ if self._trigger_rebuild(node, node_build_deps):
+ need_restart = True
+
+ return need_restart
+
+
+class _dynamic_depgraph_config(object):
+
+ def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
+ self.myparams = myparams.copy()
+ self._vdb_loaded = False
+ self._allow_backtracking = allow_backtracking
+ # Maps nodes to the reasons they were selected for reinstallation.
+ self._reinstall_nodes = {}
+ # Contains a filtered view of preferred packages that are selected
+ # from available repositories.
+ self._filtered_trees = {}
+ # Contains installed packages and new packages that have been added
+ # to the graph.
+ self._graph_trees = {}
+ # Caches visible packages returned from _select_package, for use in
+ # depgraph._iter_atoms_for_pkg() SLOT logic.
+ self._visible_pkgs = {}
+ #contains the args created by select_files
+ self._initial_arg_list = []
+ self.digraph = portage.digraph()
+ # manages sets added to the graph
+ self.sets = {}
+ # contains all nodes pulled in by self.sets
+ self._set_nodes = set()
+ # Contains only Blocker -> Uninstall edges
+ self._blocker_uninstalls = digraph()
+ # Contains only Package -> Blocker edges
+ self._blocker_parents = digraph()
+ # Contains only irrelevant Package -> Blocker edges
+ self._irrelevant_blockers = digraph()
+ # Contains only unsolvable Package -> Blocker edges
+ self._unsolvable_blockers = digraph()
+ # Contains all Blocker -> Blocked Package edges
+ self._blocked_pkgs = digraph()
+ # Contains world packages that have been protected from
+ # uninstallation but may not have been added to the graph
+ # if the graph is not complete yet.
+ self._blocked_world_pkgs = {}
+ # Contains packages whose dependencies have been traversed.
+ # This use used to check if we have accounted for blockers
+ # relevant to a package.
+ self._traversed_pkg_deps = set()
+ self._parent_atoms = {}
+ self._slot_conflict_handler = None
+ self._circular_dependency_handler = None
+ self._serialized_tasks_cache = None
+ self._scheduler_graph = None
+ self._displayed_list = None
+ self._pprovided_args = []
+ self._missing_args = []
+ self._masked_installed = set()
+ self._masked_license_updates = set()
+ self._unsatisfied_deps_for_display = []
+ self._unsatisfied_blockers_for_display = None
+ self._circular_deps_for_display = None
+ self._dep_stack = []
+ self._dep_disjunctive_stack = []
+ self._unsatisfied_deps = []
+ self._initially_unsatisfied_deps = []
+ self._ignored_deps = []
+ self._highest_pkg_cache = {}
+
+ # Binary packages that have been rejected because their USE
+ # didn't match the user's config. It maps packages to a set
+ # of flags causing the rejection.
+ self.ignored_binaries = {}
+
+ self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
+ self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
+ self._needed_license_changes = backtrack_parameters.needed_license_changes
+ self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
+ self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
+ self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed
+ self._prune_rebuilds = backtrack_parameters.prune_rebuilds
+ self._need_restart = False
+ # For conditions that always require user intervention, such as
+ # unsatisfied REQUIRED_USE (currently has no autounmask support).
+ self._skip_restart = False
+ self._backtrack_infos = {}
+
+ self._buildpkgonly_deps_unsatisfied = False
+ self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
+ self._success_without_autounmask = False
+ self._required_use_unsatisfied = False
+ self._traverse_ignored_deps = False
+ self._complete_mode = False
+ self._slot_operator_deps = {}
+ self._package_tracker = PackageTracker()
+ # Track missed updates caused by solved conflicts.
+ self._conflict_missed_update = collections.defaultdict(dict)
+
+ for myroot in depgraph._frozen_config.trees:
+ self.sets[myroot] = _depgraph_sets()
+ vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
+ # This dbapi instance will model the state that the vdb will
+ # have after new packages have been installed.
+ fakedb = PackageTrackerDbapiWrapper(myroot, self._package_tracker)
+
+ def graph_tree():
+ pass
+ graph_tree.dbapi = fakedb
+ self._graph_trees[myroot] = {}
+ self._filtered_trees[myroot] = {}
+ # Substitute the graph tree for the vartree in dep_check() since we
+ # want atom selections to be consistent with package selections
+ # have already been made.
+ self._graph_trees[myroot]["porttree"] = graph_tree
+ self._graph_trees[myroot]["vartree"] = graph_tree
+ self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
+ self._graph_trees[myroot]["graph"] = self.digraph
+ self._graph_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
+ def filtered_tree():
+ pass
+ filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
+ self._filtered_trees[myroot]["porttree"] = filtered_tree
+ self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
+
+ # Passing in graph_tree as the vartree here could lead to better
+ # atom selections in some cases by causing atoms for packages that
+ # have been added to the graph to be preferred over other choices.
+ # However, it can trigger atom selections that result in
+ # unresolvable direct circular dependencies. For example, this
+ # happens with gwydion-dylan which depends on either itself or
+ # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
+ # gwydion-dylan-bin needs to be selected in order to avoid a
+ # an unresolvable direct circular dependency.
+ #
+ # To solve the problem described above, pass in "graph_db" so that
+ # packages that have been added to the graph are distinguishable
+ # from other available packages and installed packages. Also, pass
+ # the parent package into self._select_atoms() calls so that
+ # unresolvable direct circular dependencies can be detected and
+ # avoided when possible.
+ self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
+ self._filtered_trees[myroot]["graph"] = self.digraph
+ self._filtered_trees[myroot]["vartree"] = \
+ depgraph._frozen_config.trees[myroot]["vartree"]
+ self._filtered_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
+
+ dbs = []
+ # (db, pkg_type, built, installed, db_keys)
+ if "remove" in self.myparams:
+ # For removal operations, use _dep_check_composite_db
+ # for availability and visibility checks. This provides
+ # consistency with install operations, so we don't
+ # get install/uninstall cycles like in bug #332719.
+ self._graph_trees[myroot]["porttree"] = filtered_tree
+ else:
+ if "--usepkgonly" not in depgraph._frozen_config.myopts:
+ portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
+ db_keys = list(portdb._aux_cache_keys)
+ dbs.append((portdb, "ebuild", False, False, db_keys))
+
+ if "--usepkg" in depgraph._frozen_config.myopts:
+ bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
+ db_keys = list(bindb._aux_cache_keys)
+ dbs.append((bindb, "binary", True, False, db_keys))
+
+ vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
+ db_keys = list(depgraph._frozen_config._trees_orig[myroot
+ ]["vartree"].dbapi._aux_cache_keys)
+ dbs.append((vardb, "installed", True, True, db_keys))
+ self._filtered_trees[myroot]["dbs"] = dbs
+
+class depgraph(object):
+
+ # Represents the depth of a node that is unreachable from explicit
+ # user arguments (or their deep dependencies). Such nodes are pulled
+ # in by the _complete_graph method.
+ _UNREACHABLE_DEPTH = object()
+
+ pkg_tree_map = RootConfig.pkg_tree_map
+
+ def __init__(self, settings, trees, myopts, myparams, spinner,
+ frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
+ if frozen_config is None:
+ frozen_config = _frozen_depgraph_config(settings, trees,
+ myopts, myparams, spinner)
+ self._frozen_config = frozen_config
+ self._dynamic_config = _dynamic_depgraph_config(self, myparams,
+ allow_backtracking, backtrack_parameters)
+ self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
+
+ self._select_atoms = self._select_atoms_highest_available
+ self._select_package = self._select_pkg_highest_available
+
+ self._event_loop = (portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
+
+ self.query = UserQuery(myopts).query
+
+ def _load_vdb(self):
+ """
+ Load installed package metadata if appropriate. This used to be called
+ from the constructor, but that wasn't very nice since this procedure
+ is slow and it generates spinner output. So, now it's called on-demand
+ by various methods when necessary.
+ """
+
+ if self._dynamic_config._vdb_loaded:
+ return
+
+ for myroot in self._frozen_config.trees:
+
+ dynamic_deps = self._dynamic_config.myparams.get(
+ "dynamic_deps", "y") != "n"
+ preload_installed_pkgs = \
+ "--nodeps" not in self._frozen_config.myopts
+
+ fake_vartree = self._frozen_config.trees[myroot]["vartree"]
+ if not fake_vartree.dbapi:
+ # This needs to be called for the first depgraph, but not for
+ # backtracking depgraphs that share the same frozen_config.
+ fake_vartree.sync()
+
+ # FakeVartree.sync() populates virtuals, and we want
+ # self.pkgsettings to have them populated too.
+ self._frozen_config.pkgsettings[myroot] = \
+ portage.config(clone=fake_vartree.settings)
+
+ if preload_installed_pkgs:
+ vardb = fake_vartree.dbapi
+
+ if not dynamic_deps:
+ for pkg in vardb:
+ self._dynamic_config._package_tracker.add_installed_pkg(pkg)
+ else:
+ max_jobs = self._frozen_config.myopts.get("--jobs")
+ max_load = self._frozen_config.myopts.get("--load-average")
+ scheduler = TaskScheduler(
+ self._dynamic_deps_preload(fake_vartree),
+ max_jobs=max_jobs,
+ max_load=max_load,
+ event_loop=fake_vartree._portdb._event_loop)
+ scheduler.start()
+ scheduler.wait()
+
+ self._dynamic_config._vdb_loaded = True
+
+ def _dynamic_deps_preload(self, fake_vartree):
+ portdb = fake_vartree._portdb
+ for pkg in fake_vartree.dbapi:
+ self._spinner_update()
+ self._dynamic_config._package_tracker.add_installed_pkg(pkg)
+ ebuild_path, repo_path = \
+ portdb.findname2(pkg.cpv, myrepo=pkg.repo)
+ if ebuild_path is None:
+ fake_vartree.dynamic_deps_preload(pkg, None)
+ continue
+ metadata, ebuild_hash = portdb._pull_valid_cache(
+ pkg.cpv, ebuild_path, repo_path)
+ if metadata is not None:
+ fake_vartree.dynamic_deps_preload(pkg, metadata)
+ else:
+ proc = EbuildMetadataPhase(cpv=pkg.cpv,
+ ebuild_hash=ebuild_hash,
+ portdb=portdb, repo_path=repo_path,
+ settings=portdb.doebuild_settings)
+ proc.addExitListener(
+ self._dynamic_deps_proc_exit(pkg, fake_vartree))
+ yield proc
+
+ class _dynamic_deps_proc_exit(object):
+
+ __slots__ = ('_pkg', '_fake_vartree')
+
+ def __init__(self, pkg, fake_vartree):
+ self._pkg = pkg
+ self._fake_vartree = fake_vartree
+
+ def __call__(self, proc):
+ metadata = None
+ if proc.returncode == os.EX_OK:
+ metadata = proc.metadata
+ self._fake_vartree.dynamic_deps_preload(self._pkg, metadata)
+
+ def _spinner_update(self):
+ if self._frozen_config.spinner:
+ self._frozen_config.spinner.update()
+
+ def _compute_abi_rebuild_info(self):
+ """
+ Fill self._forced_rebuilds with packages that cause rebuilds.
+ """
+
+ debug = "--debug" in self._frozen_config.myopts
+
+ # Get all atoms that might have caused a forced rebuild.
+ atoms = {}
+ for s in self._dynamic_config._initial_arg_list:
+ if s.force_reinstall:
+ root = s.root_config.root
+ atoms.setdefault(root, set()).update(s.pset)
+
+ if debug:
+ writemsg_level("forced reinstall atoms:\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ for root in atoms:
+ writemsg_level(" root: %s\n" % root,
+ level=logging.DEBUG, noiselevel=-1)
+ for atom in atoms[root]:
+ writemsg_level(" atom: %s\n" % atom,
+ level=logging.DEBUG, noiselevel=-1)
+ writemsg_level("\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ # Go through all slot operator deps and check if one of these deps
+ # has a parent that is matched by one of the atoms from above.
+ forced_rebuilds = {}
+
+ for root, rebuild_atoms in atoms.items():
+
+ for slot_atom in rebuild_atoms:
+
+ inst_pkg, reinst_pkg = \
+ self._select_pkg_from_installed(root, slot_atom)
+
+ if inst_pkg is reinst_pkg or reinst_pkg is None:
+ continue
+
+ # Generate pseudo-deps for any slot-operator deps of
+ # inst_pkg. Its deps aren't in _slot_operator_deps
+ # because it hasn't been added to the graph, but we
+ # are interested in any rebuilds that it triggered.
+ built_slot_op_atoms = []
+ if inst_pkg is not None:
+ selected_atoms = self._select_atoms_probe(
+ inst_pkg.root, inst_pkg)
+ for atom in selected_atoms:
+ if atom.slot_operator_built:
+ built_slot_op_atoms.append(atom)
+
+ if not built_slot_op_atoms:
+ continue
+
+ # Use a cloned list, since we may append to it below.
+ deps = self._dynamic_config._slot_operator_deps.get(
+ (root, slot_atom), [])[:]
+
+ if built_slot_op_atoms and reinst_pkg is not None:
+ for child in self._dynamic_config.digraph.child_nodes(
+ reinst_pkg):
+
+ if child.installed:
+ continue
+
+ for atom in built_slot_op_atoms:
+ # NOTE: Since atom comes from inst_pkg, and
+ # reinst_pkg is the replacement parent, there's
+ # no guarantee that atom will completely match
+ # child. So, simply use atom.cp and atom.slot
+ # for matching.
+ if atom.cp != child.cp:
+ continue
+ if atom.slot and atom.slot != child.slot:
+ continue
+ deps.append(Dependency(atom=atom, child=child,
+ root=child.root, parent=reinst_pkg))
+
+ for dep in deps:
+ if dep.child.installed:
+ # Find the replacement child.
+ child = next((pkg for pkg in
+ self._dynamic_config._package_tracker.match(
+ dep.root, dep.child.slot_atom)
+ if not pkg.installed), None)
+
+ if child is None:
+ continue
+
+ inst_child = dep.child.installed
+
+ else:
+ child = dep.child
+ inst_child = self._select_pkg_from_installed(
+ child.root, child.slot_atom)[0]
+
+ # Make sure the child's slot/subslot has changed. If it
+ # hasn't, then another child has forced this rebuild.
+ if inst_child and inst_child.slot == child.slot and \
+ inst_child.sub_slot == child.sub_slot:
+ continue
+
+ if dep.parent.installed:
+ # Find the replacement parent.
+ parent = next((pkg for pkg in
+ self._dynamic_config._package_tracker.match(
+ dep.parent.root, dep.parent.slot_atom)
+ if not pkg.installed), None)
+
+ if parent is None:
+ continue
+
+ else:
+ parent = dep.parent
+
+ # The child has forced a rebuild of the parent
+ forced_rebuilds.setdefault(root, {}
+ ).setdefault(child, set()).add(parent)
+
+ if debug:
+ writemsg_level("slot operator dependencies:\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ for (root, slot_atom), deps in self._dynamic_config._slot_operator_deps.items():
+ writemsg_level(" (%s, %s)\n" % \
+ (root, slot_atom), level=logging.DEBUG, noiselevel=-1)
+ for dep in deps:
+ writemsg_level(" parent: %s\n" % dep.parent, level=logging.DEBUG, noiselevel=-1)
+ writemsg_level(" child: %s (%s)\n" % (dep.child, dep.priority), level=logging.DEBUG, noiselevel=-1)
+
+ writemsg_level("\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+
+ writemsg_level("forced rebuilds:\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ for root in forced_rebuilds:
+ writemsg_level(" root: %s\n" % root,
+ level=logging.DEBUG, noiselevel=-1)
+ for child in forced_rebuilds[root]:
+ writemsg_level(" child: %s\n" % child,
+ level=logging.DEBUG, noiselevel=-1)
+ for parent in forced_rebuilds[root][child]:
+ writemsg_level(" parent: %s\n" % parent,
+ level=logging.DEBUG, noiselevel=-1)
+ writemsg_level("\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ self._forced_rebuilds = forced_rebuilds
+
+ def _show_abi_rebuild_info(self):
+
+ if not self._forced_rebuilds:
+ return
+
+ writemsg_stdout("\nThe following packages are causing rebuilds:\n\n", noiselevel=-1)
+
+ for root in self._forced_rebuilds:
+ for child in self._forced_rebuilds[root]:
+ writemsg_stdout(" %s causes rebuilds for:\n" % (child,), noiselevel=-1)
+ for parent in self._forced_rebuilds[root][child]:
+ writemsg_stdout(" %s\n" % (parent,), noiselevel=-1)
+
+ def _show_ignored_binaries(self):
+ """
+ Show binaries that have been ignored because their USE didn't
+ match the user's config.
+ """
+ if not self._dynamic_config.ignored_binaries \
+ or '--quiet' in self._frozen_config.myopts \
+ or self._dynamic_config.myparams.get(
+ "binpkg_respect_use") in ("y", "n"):
+ return
+
+ for pkg in list(self._dynamic_config.ignored_binaries):
+
+ selected_pkg = list()
+
+ for selected_pkg in self._dynamic_config._package_tracker.match(
+ pkg.root, pkg.slot_atom):
+
+ if selected_pkg > pkg:
+ self._dynamic_config.ignored_binaries.pop(pkg)
+ break
+
+ if selected_pkg.installed and \
+ selected_pkg.cpv == pkg.cpv and \
+ selected_pkg.build_time == pkg.build_time:
+ # We don't care about ignored binaries when an
+ # identical installed instance is selected to
+ # fill the slot.
+ self._dynamic_config.ignored_binaries.pop(pkg)
+ break
+
+ if not self._dynamic_config.ignored_binaries:
+ return
+
+ self._show_merge_list()
+
+ writemsg("\n!!! The following binary packages have been ignored " + \
+ "due to non matching USE:\n\n", noiselevel=-1)
+
+ for pkg, flags in self._dynamic_config.ignored_binaries.items():
+ flag_display = []
+ for flag in sorted(flags):
+ if flag not in pkg.use.enabled:
+ flag = "-" + flag
+ flag_display.append(flag)
+ flag_display = " ".join(flag_display)
+ # The user can paste this line into package.use
+ writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1)
+ if pkg.root_config.settings["ROOT"] != "/":
+ writemsg(" # for %s" % (pkg.root,), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+ msg = [
+ "",
+ "NOTE: The --binpkg-respect-use=n option will prevent emerge",
+ " from ignoring these binary packages if possible.",
+ " Using --binpkg-respect-use=y will silence this warning."
+ ]
+
+ for line in msg:
+ if line:
+ line = colorize("INFORM", line)
+ writemsg(line + "\n", noiselevel=-1)
+
+ def _get_missed_updates(self):
+
+ # In order to minimize noise, show only the highest
+ # missed update from each SLOT.
+ missed_updates = {}
+ for pkg, mask_reasons in \
+ chain(self._dynamic_config._runtime_pkg_mask.items(),
+ self._dynamic_config._conflict_missed_update.items()):
+ if pkg.installed:
+ # Exclude installed here since we only
+ # want to show available updates.
+ continue
+ missed_update = True
+ any_selected = False
+ for chosen_pkg in self._dynamic_config._package_tracker.match(
+ pkg.root, pkg.slot_atom):
+ any_selected = True
+ if chosen_pkg > pkg or (not chosen_pkg.installed and \
+ chosen_pkg.version == pkg.version):
+ missed_update = False
+ break
+ if any_selected and missed_update:
+ k = (pkg.root, pkg.slot_atom)
+ if k in missed_updates:
+ other_pkg, mask_type, parent_atoms = missed_updates[k]
+ if other_pkg > pkg:
+ continue
+ for mask_type, parent_atoms in mask_reasons.items():
+ if not parent_atoms:
+ continue
+ missed_updates[k] = (pkg, mask_type, parent_atoms)
+ break
+
+ return missed_updates
+
+ def _show_missed_update(self):
+
+ missed_updates = self._get_missed_updates()
+
+ if not missed_updates:
+ return
+
+ missed_update_types = {}
+ for pkg, mask_type, parent_atoms in missed_updates.values():
+ missed_update_types.setdefault(mask_type,
+ []).append((pkg, parent_atoms))
+
+ if '--quiet' in self._frozen_config.myopts and \
+ '--debug' not in self._frozen_config.myopts:
+ missed_update_types.pop("slot conflict", None)
+ missed_update_types.pop("missing dependency", None)
+
+ self._show_missed_update_slot_conflicts(
+ missed_update_types.get("slot conflict"))
+
+ self._show_missed_update_unsatisfied_dep(
+ missed_update_types.get("missing dependency"))
+
+ def _show_missed_update_unsatisfied_dep(self, missed_updates):
+
+ if not missed_updates:
+ return
+
+ self._show_merge_list()
+ backtrack_masked = []
+
+ for pkg, parent_atoms in missed_updates:
+
+ try:
+ for parent, root, atom in parent_atoms:
+ self._show_unsatisfied_dep(root, atom, myparent=parent,
+ check_backtrack=True)
+ except self._backtrack_mask:
+ # This is displayed below in abbreviated form.
+ backtrack_masked.append((pkg, parent_atoms))
+ continue
+
+ writemsg("\n!!! The following update has been skipped " + \
+ "due to unsatisfied dependencies:\n\n", noiselevel=-1)
+
+ writemsg(str(pkg.slot_atom), noiselevel=-1)
+ if pkg.root_config.settings["ROOT"] != "/":
+ writemsg(" for %s" % (pkg.root,), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+ for parent, root, atom in parent_atoms:
+ self._show_unsatisfied_dep(root, atom, myparent=parent)
+ writemsg("\n", noiselevel=-1)
+
+ if backtrack_masked:
+ # These are shown in abbreviated form, in order to avoid terminal
+ # flooding from mask messages as reported in bug #285832.
+ writemsg("\n!!! The following update(s) have been skipped " + \
+ "due to unsatisfied dependencies\n" + \
+ "!!! triggered by backtracking:\n\n", noiselevel=-1)
+ for pkg, parent_atoms in backtrack_masked:
+ writemsg(str(pkg.slot_atom), noiselevel=-1)
+ if pkg.root_config.settings["ROOT"] != "/":
+ writemsg(" for %s" % (pkg.root,), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+ def _show_missed_update_slot_conflicts(self, missed_updates):
+
+ if not missed_updates:
+ return
+
+ self._show_merge_list()
+ msg = []
+ msg.append("\nWARNING: One or more updates/rebuilds have been " + \
+ "skipped due to a dependency conflict:\n\n")
+
+ indent = " "
+ for pkg, parent_atoms in missed_updates:
+ msg.append(str(pkg.slot_atom))
+ if pkg.root_config.settings["ROOT"] != "/":
+ msg.append(" for %s" % (pkg.root,))
+ msg.append("\n\n")
+
+ msg.append(indent)
+ msg.append(str(pkg))
+ msg.append(" conflicts with\n")
+
+ for parent, atom in parent_atoms:
+ if isinstance(parent,
+ (PackageArg, AtomArg)):
+ # For PackageArg and AtomArg types, it's
+ # redundant to display the atom attribute.
+ msg.append(2*indent)
+ msg.append(str(parent))
+ msg.append("\n")
+ else:
+ # Display the specific atom from SetArg or
+ # Package types.
+ atom, marker = format_unmatched_atom(
+ pkg, atom, self._pkg_use_enabled)
+
+ msg.append(2*indent)
+ msg.append("%s required by %s\n" % (atom, parent))
+ msg.append(2*indent)
+ msg.append(marker)
+ msg.append("\n")
+ msg.append("\n")
+
+ writemsg("".join(msg), noiselevel=-1)
+
+ def _show_slot_collision_notice(self):
+ """Show an informational message advising the user to mask one of the
+ the packages. In some cases it may be possible to resolve this
+ automatically, but support for backtracking (removal nodes that have
+ already been selected) will be required in order to handle all possible
+ cases.
+ """
+
+ if not any(self._dynamic_config._package_tracker.slot_conflicts()):
+ return
+
+ self._show_merge_list()
+
+ self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
+ handler = self._dynamic_config._slot_conflict_handler
+
+ conflict = handler.get_conflict()
+ writemsg(conflict, noiselevel=-1)
+
+ explanation = handler.get_explanation()
+ if explanation:
+ writemsg(explanation, noiselevel=-1)
+ return
+
+ if "--quiet" in self._frozen_config.myopts:
+ return
+
+ msg = []
+ msg.append("It may be possible to solve this problem ")
+ msg.append("by using package.mask to prevent one of ")
+ msg.append("those packages from being selected. ")
+ msg.append("However, it is also possible that conflicting ")
+ msg.append("dependencies exist such that they are impossible to ")
+ msg.append("satisfy simultaneously. If such a conflict exists in ")
+ msg.append("the dependencies of two different packages, then those ")
+ msg.append("packages can not be installed simultaneously.")
+ backtrack_opt = self._frozen_config.myopts.get('--backtrack')
+ if not self._dynamic_config._allow_backtracking and \
+ (backtrack_opt is None or \
+ (backtrack_opt > 0 and backtrack_opt < 30)):
+ msg.append(" You may want to try a larger value of the ")
+ msg.append("--backtrack option, such as --backtrack=30, ")
+ msg.append("in order to see if that will solve this conflict ")
+ msg.append("automatically.")
+
+ for line in textwrap.wrap(''.join(msg), 70):
+ writemsg(line + '\n', noiselevel=-1)
+ writemsg('\n', noiselevel=-1)
+
+ msg = []
+ msg.append("For more information, see MASKED PACKAGES ")
+ msg.append("section in the emerge man page or refer ")
+ msg.append("to the Gentoo Handbook.")
+ for line in textwrap.wrap(''.join(msg), 70):
+ writemsg(line + '\n', noiselevel=-1)
+ writemsg('\n', noiselevel=-1)
+
+ def _solve_non_slot_operator_slot_conflicts(self):
+ """
+ This function solves slot conflicts which can
+ be solved by simply choosing one of the conflicting
+ and removing all the other ones.
+ It is able to solve somewhat more complex cases where
+ conflicts can only be solved simultaniously.
+ """
+ debug = "--debug" in self._frozen_config.myopts
+
+ # List all conflicts. Ignore those that involve slot operator rebuilds
+ # as the logic there needs special slot conflict behavior which isn't
+ # provided by this function.
+ conflicts = []
+ for conflict in self._dynamic_config._package_tracker.slot_conflicts():
+ slot_key = conflict.root, conflict.atom
+ if slot_key not in self._dynamic_config._slot_operator_replace_installed:
+ conflicts.append(conflict)
+
+ if not conflicts:
+ return
+
+ if debug:
+ writemsg_level(
+ "\n!!! Slot conflict handler started.\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ # Get a set of all conflicting packages.
+ conflict_pkgs = set()
+ for conflict in conflicts:
+ conflict_pkgs.update(conflict)
+
+ # Get the list of other packages which are only
+ # required by conflict packages.
+ indirect_conflict_candidates = set()
+ for pkg in conflict_pkgs:
+ indirect_conflict_candidates.update(self._dynamic_config.digraph.child_nodes(pkg))
+ indirect_conflict_candidates.difference_update(conflict_pkgs)
+
+ indirect_conflict_pkgs = set()
+ while indirect_conflict_candidates:
+ pkg = indirect_conflict_candidates.pop()
+
+ only_conflict_parents = True
+ for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
+ if parent not in conflict_pkgs and parent not in indirect_conflict_pkgs:
+ only_conflict_parents = False
+ break
+ if not only_conflict_parents:
+ continue
+
+ indirect_conflict_pkgs.add(pkg)
+ for child in self._dynamic_config.digraph.child_nodes(pkg):
+ if child in conflict_pkgs or child in indirect_conflict_pkgs:
+ continue
+ indirect_conflict_candidates.add(child)
+
+ # Create a graph containing the conflict packages
+ # and a special 'non_conflict_node' that represents
+ # all non-conflict packages.
+ conflict_graph = digraph()
+
+ non_conflict_node = "(non-conflict package)"
+ conflict_graph.add(non_conflict_node, None)
+
+ for pkg in chain(conflict_pkgs, indirect_conflict_pkgs):
+ conflict_graph.add(pkg, None)
+
+ # Add parent->child edges for each conflict package.
+ # Parents, which aren't conflict packages are represented
+ # by 'non_conflict_node'.
+ # If several conflicting packages are matched, but not all,
+ # add a tuple with the matched packages to the graph.
+ class or_tuple(tuple):
+ """
+ Helper class for debug printing.
+ """
+ def __str__(self):
+ return "(%s)" % ",".join(str(pkg) for pkg in self)
+
+ non_matching_forced = set()
+ for conflict in conflicts:
+ if debug:
+ writemsg_level(" conflict:\n", level=logging.DEBUG, noiselevel=-1)
+ writemsg_level(" root: %s\n" % conflict.root, level=logging.DEBUG, noiselevel=-1)
+ writemsg_level(" atom: %s\n" % conflict.atom, level=logging.DEBUG, noiselevel=-1)
+ for pkg in conflict:
+ writemsg_level(" pkg: %s\n" % pkg, level=logging.DEBUG, noiselevel=-1)
+
+ all_parent_atoms = set()
+ for pkg in conflict:
+ all_parent_atoms.update(
+ self._dynamic_config._parent_atoms.get(pkg, []))
+
+ for parent, atom in all_parent_atoms:
+ is_arg_parent = isinstance(parent, AtomArg)
+ is_non_conflict_parent = parent not in conflict_pkgs and \
+ parent not in indirect_conflict_pkgs
+
+ if debug:
+ writemsg_level(" parent: %s\n" % parent, level=logging.DEBUG, noiselevel=-1)
+ writemsg_level(" arg, non-conflict: %s, %s\n" % (is_arg_parent, is_non_conflict_parent),
+ level=logging.DEBUG, noiselevel=-1)
+ writemsg_level(" atom: %s\n" % atom, level=logging.DEBUG, noiselevel=-1)
+
+ if is_non_conflict_parent:
+ parent = non_conflict_node
+
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom,), allow_repo=True)
+
+ matched = []
+ for pkg in conflict:
+ if atom_set.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)) and \
+ not (is_arg_parent and pkg.installed):
+ matched.append(pkg)
+
+ if debug:
+ for match in matched:
+ writemsg_level(" match: %s\n" % match, level=logging.DEBUG, noiselevel=-1)
+
+ if len(matched) > 1:
+ # Even if all packages match, this parent must still
+ # be added to the conflict_graph. Otherwise, we risk
+ # removing all of these packages from the depgraph,
+ # which could cause a missed update (bug #522084).
+ conflict_graph.add(or_tuple(matched), parent)
+ elif len(matched) == 1:
+ conflict_graph.add(matched[0], parent)
+ else:
+ # This typically means that autounmask broke a
+ # USE-dep, but it could also be due to the slot
+ # not matching due to multislot (bug #220341).
+ # Either way, don't try to solve this conflict.
+ # Instead, force them all into the graph so that
+ # they are protected from removal.
+ non_matching_forced.update(conflict)
+ if debug:
+ for pkg in conflict:
+ writemsg_level(" non-match: %s\n" % pkg,
+ level=logging.DEBUG, noiselevel=-1)
+
+ for pkg in indirect_conflict_pkgs:
+ for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
+ if parent not in conflict_pkgs and \
+ parent not in indirect_conflict_pkgs:
+ parent = non_conflict_node
+ conflict_graph.add(pkg, parent)
+
+ if debug:
+ writemsg_level(
+ "\n!!! Slot conflict graph:\n",
+ level=logging.DEBUG, noiselevel=-1)
+ conflict_graph.debug_print()
+
+ # Now select required packages. Collect them in the
+ # 'forced' set.
+ forced = set([non_conflict_node])
+ forced.update(non_matching_forced)
+ unexplored = set([non_conflict_node])
+ # or_tuples get special handling. We first explore
+ # all packages in the hope of having forced one of
+ # the packages in the tuple. This way we don't have
+ # to choose one.
+ unexplored_tuples = set()
+
+ while unexplored:
+ # Handle all unexplored packages.
+ while unexplored:
+ node = unexplored.pop()
+ for child in conflict_graph.child_nodes(node):
+ if child in forced:
+ continue
+ forced.add(child)
+ if isinstance(child, Package):
+ unexplored.add(child)
+ else:
+ unexplored_tuples.add(child)
+
+ # Now handle unexplored or_tuples. Move on with packages
+ # once we had to choose one.
+ while unexplored_tuples:
+ nodes = unexplored_tuples.pop()
+ if any(node in forced for node in nodes):
+ # At least one of the packages in the
+ # tuple is already forced, which means the
+ # dependency represented by this tuple
+ # is satisfied.
+ continue
+
+ # We now have to choose one of packages in the tuple.
+ # In theory one could solve more conflicts if we'd be
+ # able to try different choices here, but that has lots
+ # of other problems. For now choose the package that was
+ # pulled first, as this should be the most desirable choice
+ # (otherwise it wouldn't have been the first one).
+ forced.add(nodes[0])
+ unexplored.add(nodes[0])
+ break
+
+ # Remove 'non_conflict_node' and or_tuples from 'forced'.
+ forced = set(pkg for pkg in forced if isinstance(pkg, Package))
+ non_forced = set(pkg for pkg in conflict_pkgs if pkg not in forced)
+
+ if debug:
+ writemsg_level(
+ "\n!!! Slot conflict solution:\n",
+ level=logging.DEBUG, noiselevel=-1)
+ for conflict in conflicts:
+ writemsg_level(
+ " Conflict: (%s, %s)\n" % (conflict.root, conflict.atom),
+ level=logging.DEBUG, noiselevel=-1)
+ for pkg in conflict:
+ if pkg in forced:
+ writemsg_level(
+ " keep: %s\n" % pkg,
+ level=logging.DEBUG, noiselevel=-1)
+ else:
+ writemsg_level(
+ " remove: %s\n" % pkg,
+ level=logging.DEBUG, noiselevel=-1)
+
+ broken_packages = set()
+ for pkg in non_forced:
+ for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
+ if isinstance(parent, Package) and parent not in non_forced:
+ # Non-forcing set args are expected to be a parent of all
+ # packages in the conflict.
+ broken_packages.add(parent)
+ self._remove_pkg(pkg)
+
+ # Process the dependencies of choosen conflict packages
+ # again to properly account for blockers.
+ broken_packages.update(forced)
+
+ # Filter out broken packages which have been removed during
+ # recursive removal in self._remove_pkg.
+ broken_packages = list(pkg for pkg in broken_packages if pkg in broken_packages \
+ if self._dynamic_config._package_tracker.contains(pkg, installed=False))
+
+ self._dynamic_config._dep_stack.extend(broken_packages)
+
+ if broken_packages:
+ # Process dependencies. This cannot fail because we just ensured that
+ # the remaining packages satisfy all dependencies.
+ self._create_graph()
+
+ # Record missed updates.
+ for conflict in conflicts:
+ if not any(pkg in non_forced for pkg in conflict):
+ continue
+ for pkg in conflict:
+ if pkg not in non_forced:
+ continue
+
+ for other in conflict:
+ if other is pkg:
+ continue
+
+ for parent, atom in self._dynamic_config._parent_atoms.get(other, []):
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom,), allow_repo=True)
+ if not atom_set.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ self._dynamic_config._conflict_missed_update[pkg].setdefault(
+ "slot conflict", set())
+ self._dynamic_config._conflict_missed_update[pkg]["slot conflict"].add(
+ (parent, atom))
+
+
+ def _process_slot_conflicts(self):
+ """
+ If there are any slot conflicts and backtracking is enabled,
+ _complete_graph should complete the graph before this method
+ is called, so that all relevant reverse dependencies are
+ available for use in backtracking decisions.
+ """
+
+ self._solve_non_slot_operator_slot_conflicts()
+
+ for conflict in self._dynamic_config._package_tracker.slot_conflicts():
+ self._process_slot_conflict(conflict)
+
+ def _process_slot_conflict(self, conflict):
+ """
+ Process slot conflict data to identify specific atoms which
+ lead to conflict. These atoms only match a subset of the
+ packages that have been pulled into a given slot.
+ """
+ root = conflict.root
+ slot_atom = conflict.atom
+ slot_nodes = conflict.pkgs
+
+ debug = "--debug" in self._frozen_config.myopts
+
+ slot_parent_atoms = set()
+ for pkg in slot_nodes:
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if not parent_atoms:
+ continue
+ slot_parent_atoms.update(parent_atoms)
+
+ conflict_pkgs = []
+ conflict_atoms = {}
+ for pkg in slot_nodes:
+
+ if self._dynamic_config._allow_backtracking and \
+ pkg in self._dynamic_config._runtime_pkg_mask:
+ if debug:
+ writemsg_level(
+ "!!! backtracking loop detected: %s %s\n" % \
+ (pkg,
+ self._dynamic_config._runtime_pkg_mask[pkg]),
+ level=logging.DEBUG, noiselevel=-1)
+
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if parent_atoms is None:
+ parent_atoms = set()
+ self._dynamic_config._parent_atoms[pkg] = parent_atoms
+
+ all_match = True
+ for parent_atom in slot_parent_atoms:
+ if parent_atom in parent_atoms:
+ continue
+ # Use package set for matching since it will match via
+ # PROVIDE when necessary, while match_from_list does not.
+ parent, atom = parent_atom
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom,), allow_repo=True)
+ if atom_set.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ parent_atoms.add(parent_atom)
+ else:
+ all_match = False
+ conflict_atoms.setdefault(parent_atom, set()).add(pkg)
+
+ if not all_match:
+ conflict_pkgs.append(pkg)
+
+ if conflict_pkgs and \
+ self._dynamic_config._allow_backtracking and \
+ not self._accept_blocker_conflicts():
+ remaining = []
+ for pkg in conflict_pkgs:
+ if self._slot_conflict_backtrack_abi(pkg,
+ slot_nodes, conflict_atoms):
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+ config.setdefault("slot_conflict_abi", set()).add(pkg)
+ else:
+ remaining.append(pkg)
+ if remaining:
+ self._slot_confict_backtrack(root, slot_atom,
+ slot_parent_atoms, remaining)
+
+ def _slot_confict_backtrack(self, root, slot_atom,
+ all_parents, conflict_pkgs):
+
+ debug = "--debug" in self._frozen_config.myopts
+ existing_node = next(self._dynamic_config._package_tracker.match(
+ root, slot_atom, installed=False))
+ # In order to avoid a missed update, first mask lower versions
+ # that conflict with higher versions (the backtracker visits
+ # these in reverse order).
+ conflict_pkgs.sort(reverse=True)
+ backtrack_data = []
+ for to_be_masked in conflict_pkgs:
+ # For missed update messages, find out which
+ # atoms matched to_be_selected that did not
+ # match to_be_masked.
+ parent_atoms = \
+ self._dynamic_config._parent_atoms.get(to_be_masked, set())
+ conflict_atoms = set(parent_atom for parent_atom in all_parents \
+ if parent_atom not in parent_atoms)
+ backtrack_data.append((to_be_masked, conflict_atoms))
+
+ to_be_masked = backtrack_data[-1][0]
+
+ self._dynamic_config._backtrack_infos.setdefault(
+ "slot conflict", []).append(backtrack_data)
+ self._dynamic_config._need_restart = True
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to slot conflict:")
+ msg.append(" first package: %s" % existing_node)
+ msg.append(" package to mask: %s" % to_be_masked)
+ msg.append(" slot: %s" % slot_atom)
+ msg.append(" parents: %s" % ", ".join( \
+ "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
+ msg.append("")
+ writemsg_level("".join("%s\n" % l for l in msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
+ """
+ If one or more conflict atoms have a slot/sub-slot dep that can be resolved
+ by rebuilding the parent package, then schedule the rebuild via
+ backtracking, and return True. Otherwise, return False.
+ """
+
+ found_update = False
+ for parent_atom, conflict_pkgs in conflict_atoms.items():
+ parent, atom = parent_atom
+
+ if not isinstance(parent, Package):
+ continue
+
+ if atom.slot_operator != "=" or not parent.built:
+ continue
+
+ if pkg not in conflict_pkgs:
+ continue
+
+ for other_pkg in slot_nodes:
+ if other_pkg in conflict_pkgs:
+ continue
+
+ dep = Dependency(atom=atom, child=other_pkg,
+ parent=parent, root=pkg.root)
+
+ new_dep = \
+ self._slot_operator_update_probe_slot_conflict(dep)
+ if new_dep is not None:
+ self._slot_operator_update_backtrack(dep,
+ new_dep=new_dep)
+ found_update = True
+
+ return found_update
+
+ def _slot_change_probe(self, dep):
+ """
+ @rtype: bool
+ @return: True if dep.child should be rebuilt due to a change
+ in sub-slot (without revbump, as in bug #456208).
+ """
+ if not (isinstance(dep.parent, Package) and \
+ not dep.parent.built and dep.child.built):
+ return None
+
+ root_config = self._frozen_config.roots[dep.root]
+ matches = []
+ try:
+ matches.append(self._pkg(dep.child.cpv, "ebuild",
+ root_config, myrepo=dep.child.repo))
+ except PackageNotFound:
+ pass
+
+ for unbuilt_child in chain(matches,
+ self._iter_match_pkgs(root_config, "ebuild",
+ Atom("=%s" % (dep.child.cpv,)))):
+ if unbuilt_child in self._dynamic_config._runtime_pkg_mask:
+ continue
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(
+ unbuilt_child,
+ modified_use=self._pkg_use_enabled(unbuilt_child)):
+ continue
+ if not self._pkg_visibility_check(unbuilt_child):
+ continue
+ break
+ else:
+ return None
+
+ if unbuilt_child.slot == dep.child.slot and \
+ unbuilt_child.sub_slot == dep.child.sub_slot:
+ return None
+
+ return unbuilt_child
+
+ def _slot_change_backtrack(self, dep, new_child_slot):
+ child = dep.child
+ if "--debug" in self._frozen_config.myopts:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to slot/sub-slot change:")
+ msg.append(" child package: %s" % child)
+ msg.append(" child slot: %s/%s" %
+ (child.slot, child.sub_slot))
+ msg.append(" new child: %s" % new_child_slot)
+ msg.append(" new child slot: %s/%s" %
+ (new_child_slot.slot, new_child_slot.sub_slot))
+ msg.append(" parent package: %s" % dep.parent)
+ msg.append(" atom: %s" % dep.atom)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+
+ # mask unwanted binary packages if necessary
+ masks = {}
+ if not child.installed:
+ masks.setdefault(dep.child, {})["slot_operator_mask_built"] = None
+ if masks:
+ config.setdefault("slot_operator_mask_built", {}).update(masks)
+
+ # trigger replacement of installed packages if necessary
+ reinstalls = set()
+ if child.installed:
+ replacement_atom = self._replace_installed_atom(child)
+ if replacement_atom is not None:
+ reinstalls.add((child.root, replacement_atom))
+ if reinstalls:
+ config.setdefault("slot_operator_replace_installed",
+ set()).update(reinstalls)
+
+ self._dynamic_config._need_restart = True
+
+ def _slot_operator_update_backtrack(self, dep, new_child_slot=None,
+ new_dep=None):
+ if new_child_slot is None:
+ child = dep.child
+ else:
+ child = new_child_slot
+ if "--debug" in self._frozen_config.myopts:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to missed slot abi update:")
+ msg.append(" child package: %s" % child)
+ if new_child_slot is not None:
+ msg.append(" new child slot package: %s" % new_child_slot)
+ msg.append(" parent package: %s" % dep.parent)
+ if new_dep is not None:
+ msg.append(" new parent pkg: %s" % new_dep.parent)
+ msg.append(" atom: %s" % dep.atom)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+
+ # mask unwanted binary packages if necessary
+ abi_masks = {}
+ if new_child_slot is None:
+ if not child.installed:
+ abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None
+ if not dep.parent.installed:
+ abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None
+ if abi_masks:
+ config.setdefault("slot_operator_mask_built", {}).update(abi_masks)
+
+ # trigger replacement of installed packages if necessary
+ abi_reinstalls = set()
+ if dep.parent.installed:
+ if new_dep is not None:
+ replacement_atom = new_dep.parent.slot_atom
+ else:
+ replacement_atom = self._replace_installed_atom(dep.parent)
+ if replacement_atom is not None:
+ abi_reinstalls.add((dep.parent.root, replacement_atom))
+ if new_child_slot is None and child.installed:
+ replacement_atom = self._replace_installed_atom(child)
+ if replacement_atom is not None:
+ abi_reinstalls.add((child.root, replacement_atom))
+ if abi_reinstalls:
+ config.setdefault("slot_operator_replace_installed",
+ set()).update(abi_reinstalls)
+
+ self._dynamic_config._need_restart = True
+
+ def _slot_operator_update_probe_slot_conflict(self, dep):
+ new_dep = self._slot_operator_update_probe(dep, slot_conflict=True)
+
+ if new_dep is not None:
+ return new_dep
+
+ if self._dynamic_config._autounmask is True:
+
+ for autounmask_level in self._autounmask_levels():
+
+ new_dep = self._slot_operator_update_probe(dep,
+ slot_conflict=True, autounmask_level=autounmask_level)
+
+ if new_dep is not None:
+ return new_dep
+
+ return None
+
+ def _slot_operator_update_probe(self, dep, new_child_slot=False,
+ slot_conflict=False, autounmask_level=None):
+ """
+ slot/sub-slot := operators tend to prevent updates from getting pulled in,
+ since installed packages pull in packages with the slot/sub-slot that they
+ were built against. Detect this case so that we can schedule rebuilds
+ and reinstalls when appropriate.
+ NOTE: This function only searches for updates that involve upgrades
+ to higher versions, since the logic required to detect when a
+ downgrade would be desirable is not implemented.
+ """
+
+ if dep.child.installed and \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(dep.child,
+ modified_use=self._pkg_use_enabled(dep.child)):
+ return None
+
+ if dep.parent.installed and \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
+ modified_use=self._pkg_use_enabled(dep.parent)):
+ return None
+
+ debug = "--debug" in self._frozen_config.myopts
+ selective = "selective" in self._dynamic_config.myparams
+ want_downgrade = None
+
+ def check_reverse_dependencies(existing_pkg, candidate_pkg,
+ replacement_parent=None):
+ """
+ Check if candidate_pkg satisfies all of existing_pkg's non-
+ slot operator parents.
+ """
+ built_slot_operator_parents = set()
+ for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []):
+ if atom.slot_operator_built:
+ built_slot_operator_parents.add(parent)
+
+ for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []):
+ if isinstance(parent, Package):
+ if parent in built_slot_operator_parents:
+ # This parent may need to be rebuilt, so its
+ # dependencies aren't necessarily relevant.
+ continue
+
+ if replacement_parent is not None and \
+ (replacement_parent.slot_atom == parent.slot_atom
+ or replacement_parent.cpv == parent.cpv):
+ # This parent is irrelevant because we intend to
+ # replace it with replacement_parent.
+ continue
+
+ if any(pkg is not parent and
+ (pkg.slot_atom == parent.slot_atom or
+ pkg.cpv == parent.cpv) for pkg in
+ self._dynamic_config._package_tracker.match(
+ parent.root, Atom(parent.cp))):
+ # This parent may need to be eliminated due to a
+ # slot conflict, so its dependencies aren't
+ # necessarily relevant.
+ continue
+
+ atom_set = InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True)
+ if not atom_set.findAtomForPackage(candidate_pkg,
+ modified_use=self._pkg_use_enabled(candidate_pkg)):
+ return False
+ return True
+
+
+ for replacement_parent in self._iter_similar_available(dep.parent,
+ dep.parent.slot_atom, autounmask_level=autounmask_level):
+
+ if not check_reverse_dependencies(dep.parent, replacement_parent):
+ continue
+
+ selected_atoms = None
+
+ atoms = set()
+ invalid_metadata = False
+ for dep_key in ("DEPEND", "HDEPEND", "RDEPEND", "PDEPEND"):
+ dep_string = replacement_parent._metadata[dep_key]
+ if not dep_string:
+ continue
+
+ try:
+ dep_string = portage.dep.use_reduce(dep_string,
+ uselist=self._pkg_use_enabled(replacement_parent),
+ is_valid_flag=replacement_parent.iuse.is_valid_flag,
+ flat=True, token_class=Atom,
+ eapi=replacement_parent.eapi)
+ except portage.exception.InvalidDependString:
+ invalid_metadata = True
+ break
+
+ atoms.update(token for token in dep_string if isinstance(token, Atom))
+
+ if invalid_metadata:
+ continue
+
+ # List of list of child,atom pairs for each atom.
+ replacement_candidates = []
+ # Set of all packages all atoms can agree on.
+ all_candidate_pkgs = None
+
+ for atom in atoms:
+ if atom.blocker or \
+ atom.cp != dep.atom.cp:
+ continue
+
+ # Discard USE deps, we're only searching for an approximate
+ # pattern, and dealing with USE states is too complex for
+ # this purpose.
+ unevaluated_atom = atom.unevaluated_atom
+ atom = atom.without_use
+
+ if replacement_parent.built and \
+ portage.dep._match_slot(atom, dep.child):
+ # Our selected replacement_parent appears to be built
+ # for the existing child selection. So, discard this
+ # parent and search for another.
+ break
+
+ candidate_pkg_atoms = []
+ candidate_pkgs = []
+ for pkg in self._iter_similar_available(
+ dep.child, atom):
+ if pkg.slot == dep.child.slot and \
+ pkg.sub_slot == dep.child.sub_slot:
+ # If slot/sub-slot is identical, then there's
+ # no point in updating.
+ continue
+ if new_child_slot:
+ if pkg.slot == dep.child.slot:
+ continue
+ if pkg < dep.child:
+ # the new slot only matters if the
+ # package version is higher
+ continue
+ else:
+ if pkg.slot != dep.child.slot:
+ continue
+ if pkg < dep.child:
+ if want_downgrade is None:
+ want_downgrade = self._downgrade_probe(dep.child)
+ # be careful not to trigger a rebuild when
+ # the only version available with a
+ # different slot_operator is an older version
+ if not want_downgrade:
+ continue
+ if pkg.version == dep.child.version and not dep.child.built:
+ continue
+
+ insignificant = False
+ if not slot_conflict and \
+ selective and \
+ dep.parent.installed and \
+ dep.child.installed and \
+ dep.parent >= replacement_parent and \
+ dep.child.cpv == pkg.cpv:
+ # Then can happen if the child's sub-slot changed
+ # without a revision bump. The sub-slot change is
+ # considered insignificant until one of its parent
+ # packages needs to be rebuilt (which may trigger a
+ # slot conflict).
+ insignificant = True
+
+ if not insignificant:
+ # Evaluate USE conditionals and || deps, in order
+ # to see if this atom is really desirable, since
+ # otherwise we may trigger an undesirable rebuild
+ # as in bug #460304.
+ if selected_atoms is None:
+ selected_atoms = self._select_atoms_probe(
+ dep.child.root, replacement_parent)
+ if unevaluated_atom not in selected_atoms:
+ continue
+
+ if not insignificant and \
+ check_reverse_dependencies(dep.child, pkg,
+ replacement_parent=replacement_parent):
+
+ candidate_pkg_atoms.append((pkg, unevaluated_atom))
+ candidate_pkgs.append(pkg)
+ replacement_candidates.append(candidate_pkg_atoms)
+ if all_candidate_pkgs is None:
+ all_candidate_pkgs = set(candidate_pkgs)
+ else:
+ all_candidate_pkgs.intersection_update(candidate_pkgs)
+
+ if not all_candidate_pkgs:
+ # If the atoms that connect parent and child can't agree on
+ # any replacement child, we can't do anything.
+ continue
+
+ # Now select one of the pkgs as replacement. This is as easy as
+ # selecting the highest version.
+ # The more complicated part is to choose an atom for the
+ # new Dependency object. Choose the one which ranked the selected
+ # parent highest.
+ selected = None
+ for candidate_pkg_atoms in replacement_candidates:
+ for i, (pkg, atom) in enumerate(candidate_pkg_atoms):
+ if pkg not in all_candidate_pkgs:
+ continue
+ if selected is None or \
+ selected[0] < pkg or \
+ (selected[0] is pkg and i < selected[2]):
+ selected = (pkg, atom, i)
+
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("slot_operator_update_probe:")
+ msg.append(" existing child package: %s" % dep.child)
+ msg.append(" existing parent package: %s" % dep.parent)
+ msg.append(" new child package: %s" % selected[0])
+ msg.append(" new parent package: %s" % replacement_parent)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return Dependency(parent=replacement_parent,
+ child=selected[0], atom=selected[1])
+
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("slot_operator_update_probe:")
+ msg.append(" existing child package: %s" % dep.child)
+ msg.append(" existing parent package: %s" % dep.parent)
+ msg.append(" new child package: %s" % None)
+ msg.append(" new parent package: %s" % None)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return None
+
+ def _slot_operator_unsatisfied_probe(self, dep):
+
+ if dep.parent.installed and \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
+ modified_use=self._pkg_use_enabled(dep.parent)):
+ return False
+
+ debug = "--debug" in self._frozen_config.myopts
+
+ for replacement_parent in self._iter_similar_available(dep.parent,
+ dep.parent.slot_atom):
+
+ for atom in replacement_parent.validated_atoms:
+ if not atom.slot_operator == "=" or \
+ atom.blocker or \
+ atom.cp != dep.atom.cp:
+ continue
+
+ # Discard USE deps, we're only searching for an approximate
+ # pattern, and dealing with USE states is too complex for
+ # this purpose.
+ atom = atom.without_use
+
+ pkg, existing_node = self._select_package(dep.root, atom,
+ onlydeps=dep.onlydeps)
+
+ if pkg is not None:
+
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("slot_operator_unsatisfied_probe:")
+ msg.append(" existing parent package: %s" % dep.parent)
+ msg.append(" existing parent atom: %s" % dep.atom)
+ msg.append(" new parent package: %s" % replacement_parent)
+ msg.append(" new child package: %s" % pkg)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return True
+
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("slot_operator_unsatisfied_probe:")
+ msg.append(" existing parent package: %s" % dep.parent)
+ msg.append(" existing parent atom: %s" % dep.atom)
+ msg.append(" new parent package: %s" % None)
+ msg.append(" new child package: %s" % None)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return False
+
+ def _slot_operator_unsatisfied_backtrack(self, dep):
+
+ parent = dep.parent
+
+ if "--debug" in self._frozen_config.myopts:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to unsatisfied "
+ "built slot-operator dep:")
+ msg.append(" parent package: %s" % parent)
+ msg.append(" atom: %s" % dep.atom)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+
+ # mask unwanted binary packages if necessary
+ masks = {}
+ if not parent.installed:
+ masks.setdefault(parent, {})["slot_operator_mask_built"] = None
+ if masks:
+ config.setdefault("slot_operator_mask_built", {}).update(masks)
+
+ # trigger replacement of installed packages if necessary
+ reinstalls = set()
+ if parent.installed:
+ replacement_atom = self._replace_installed_atom(parent)
+ if replacement_atom is not None:
+ reinstalls.add((parent.root, replacement_atom))
+ if reinstalls:
+ config.setdefault("slot_operator_replace_installed",
+ set()).update(reinstalls)
+
+ self._dynamic_config._need_restart = True
+
+ def _downgrade_probe(self, pkg):
+ """
+ Detect cases where a downgrade of the given package is considered
+ desirable due to the current version being masked or unavailable.
+ """
+ available_pkg = None
+ for available_pkg in self._iter_similar_available(pkg,
+ pkg.slot_atom):
+ if available_pkg >= pkg:
+ # There's an available package of the same or higher
+ # version, so downgrade seems undesirable.
+ return False
+
+ return available_pkg is not None
+
+ def _select_atoms_probe(self, root, pkg):
+ selected_atoms = []
+ use = self._pkg_use_enabled(pkg)
+ for k in pkg._dep_keys:
+ v = pkg._metadata.get(k)
+ if not v:
+ continue
+ selected_atoms.extend(self._select_atoms(
+ root, v, myuse=use, parent=pkg)[pkg])
+ return frozenset(x.unevaluated_atom for
+ x in selected_atoms)
+
+ def _iter_similar_available(self, graph_pkg, atom, autounmask_level=None):
+ """
+ Given a package that's in the graph, do a rough check to
+ see if a similar package is available to install. The given
+ graph_pkg itself may be yielded only if it's not installed.
+ """
+
+ usepkgonly = "--usepkgonly" in self._frozen_config.myopts
+ useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
+ use_ebuild_visibility = self._frozen_config.myopts.get(
+ '--use-ebuild-visibility', 'n') != 'n'
+
+ for pkg in self._iter_match_pkgs_any(
+ graph_pkg.root_config, atom):
+ if pkg.cp != graph_pkg.cp:
+ # discard old-style virtual match
+ continue
+ if pkg.installed:
+ continue
+ if pkg in self._dynamic_config._runtime_pkg_mask:
+ continue
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
+ if pkg.built:
+ if self._equiv_binary_installed(pkg):
+ continue
+ if not (not use_ebuild_visibility and
+ (usepkgonly or useoldpkg_atoms.findAtomForPackage(
+ pkg, modified_use=self._pkg_use_enabled(pkg)))) and \
+ not self._equiv_ebuild_visible(pkg,
+ autounmask_level=autounmask_level):
+ continue
+ if not self._pkg_visibility_check(pkg,
+ autounmask_level=autounmask_level):
+ continue
+ yield pkg
+
+ def _replace_installed_atom(self, inst_pkg):
+ """
+ Given an installed package, generate an atom suitable for
+ slot_operator_replace_installed backtracking info. The replacement
+ SLOT may differ from the installed SLOT, so first search by cpv.
+ """
+ built_pkgs = []
+ for pkg in self._iter_similar_available(inst_pkg,
+ Atom("=%s" % inst_pkg.cpv)):
+ if not pkg.built:
+ return pkg.slot_atom
+ elif not pkg.installed:
+ # avoid using SLOT from a built instance
+ built_pkgs.append(pkg)
+
+ for pkg in self._iter_similar_available(inst_pkg, inst_pkg.slot_atom):
+ if not pkg.built:
+ return pkg.slot_atom
+ elif not pkg.installed:
+ # avoid using SLOT from a built instance
+ built_pkgs.append(pkg)
+
+ if built_pkgs:
+ best_version = None
+ for pkg in built_pkgs:
+ if best_version is None or pkg > best_version:
+ best_version = pkg
+ return best_version.slot_atom
+
+ return None
+
+ def _slot_operator_trigger_reinstalls(self):
+ """
+ Search for packages with slot-operator deps on older slots, and schedule
+ rebuilds if they can link to a newer slot that's in the graph.
+ """
+
+ rebuild_if_new_slot = self._dynamic_config.myparams.get(
+ "rebuild_if_new_slot", "y") == "y"
+
+ for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items():
+
+ for dep in slot_info:
+
+ atom = dep.atom
+ if atom.slot_operator is None:
+ continue
+
+ if not atom.slot_operator_built:
+ new_child_slot = self._slot_change_probe(dep)
+ if new_child_slot is not None:
+ self._slot_change_backtrack(dep, new_child_slot)
+ continue
+
+ if not (dep.parent and
+ isinstance(dep.parent, Package) and dep.parent.built):
+ continue
+
+ # Check for slot update first, since we don't want to
+ # trigger reinstall of the child package when a newer
+ # slot will be used instead.
+ if rebuild_if_new_slot:
+ new_dep = self._slot_operator_update_probe(dep,
+ new_child_slot=True)
+ if new_dep is not None:
+ self._slot_operator_update_backtrack(dep,
+ new_child_slot=new_dep.child)
+
+ if dep.want_update:
+ if self._slot_operator_update_probe(dep):
+ self._slot_operator_update_backtrack(dep)
+
+ def _reinstall_for_flags(self, pkg, forced_flags,
+ orig_use, orig_iuse, cur_use, cur_iuse):
+ """Return a set of flags that trigger reinstallation, or None if there
+ are no such flags."""
+
+ # binpkg_respect_use: Behave like newuse by default. If newuse is
+ # False and changed_use is True, then behave like changed_use.
+ binpkg_respect_use = (pkg.built and
+ self._dynamic_config.myparams.get("binpkg_respect_use")
+ in ("y", "auto"))
+ newuse = "--newuse" in self._frozen_config.myopts
+ changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
+ feature_flags = _get_feature_flags(
+ _get_eapi_attrs(pkg.eapi))
+
+ if newuse or (binpkg_respect_use and not changed_use):
+ flags = set(orig_iuse.symmetric_difference(
+ cur_iuse).difference(forced_flags))
+ flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
+ cur_iuse.intersection(cur_use)))
+ flags.difference_update(feature_flags)
+ if flags:
+ return flags
+
+ elif changed_use or binpkg_respect_use:
+ flags = set(orig_iuse.intersection(orig_use).symmetric_difference(
+ cur_iuse.intersection(cur_use)))
+ flags.difference_update(feature_flags)
+ if flags:
+ return flags
+ return None
+
+ def _create_graph(self, allow_unsatisfied=False):
+ dep_stack = self._dynamic_config._dep_stack
+ dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
+ while dep_stack or dep_disjunctive_stack:
+ self._spinner_update()
+ while dep_stack:
+ dep = dep_stack.pop()
+ if isinstance(dep, Package):
+ if not self._add_pkg_deps(dep,
+ allow_unsatisfied=allow_unsatisfied):
+ return 0
+ continue
+ if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
+ return 0
+ if dep_disjunctive_stack:
+ if not self._pop_disjunction(allow_unsatisfied):
+ return 0
+ return 1
+
+ def _expand_set_args(self, input_args, add_to_digraph=False):
+ """
+ Iterate over a list of DependencyArg instances and yield all
+ instances given in the input together with additional SetArg
+ instances that are generated from nested sets.
+ @param input_args: An iterable of DependencyArg instances
+ @type input_args: Iterable
+ @param add_to_digraph: If True then add SetArg instances
+ to the digraph, in order to record parent -> child
+ relationships from nested sets
+ @type add_to_digraph: Boolean
+ @rtype: Iterable
+ @return: All args given in the input together with additional
+ SetArg instances that are generated from nested sets
+ """
+
+ traversed_set_args = set()
+
+ for arg in input_args:
+ if not isinstance(arg, SetArg):
+ yield arg
+ continue
+
+ root_config = arg.root_config
+ depgraph_sets = self._dynamic_config.sets[root_config.root]
+ arg_stack = [arg]
+ while arg_stack:
+ arg = arg_stack.pop()
+ if arg in traversed_set_args:
+ continue
+
+ # If a node with the same hash already exists in
+ # the digraph, preserve the existing instance which
+ # may have a different reset_depth attribute
+ # (distiguishes user arguments from sets added for
+ # another reason such as complete mode).
+ arg = self._dynamic_config.digraph.get(arg, arg)
+ traversed_set_args.add(arg)
+
+ if add_to_digraph:
+ self._dynamic_config.digraph.add(arg, None,
+ priority=BlockerDepPriority.instance)
+
+ yield arg
+
+ # Traverse nested sets and add them to the stack
+ # if they're not already in the graph. Also, graph
+ # edges between parent and nested sets.
+ for token in arg.pset.getNonAtoms():
+ if not token.startswith(SETPREFIX):
+ continue
+ s = token[len(SETPREFIX):]
+ nested_set = depgraph_sets.sets.get(s)
+ if nested_set is None:
+ nested_set = root_config.sets.get(s)
+ if nested_set is not None:
+ # Propagate the reset_depth attribute from
+ # parent set to nested set.
+ nested_arg = SetArg(arg=token, pset=nested_set,
+ reset_depth=arg.reset_depth,
+ root_config=root_config)
+
+ # Preserve instances already in the graph (same
+ # reason as for the "arg" variable above).
+ nested_arg = self._dynamic_config.digraph.get(
+ nested_arg, nested_arg)
+ arg_stack.append(nested_arg)
+ if add_to_digraph:
+ self._dynamic_config.digraph.add(nested_arg, arg,
+ priority=BlockerDepPriority.instance)
+ depgraph_sets.sets[nested_arg.name] = nested_arg.pset
+
+ def _add_dep(self, dep, allow_unsatisfied=False):
+ debug = "--debug" in self._frozen_config.myopts
+ buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
+ nodeps = "--nodeps" in self._frozen_config.myopts
+ if dep.blocker:
+
+ # Slot collision nodes are not allowed to block other packages since
+ # blocker validation is only able to account for one package per slot.
+ is_slot_conflict_parent = any(dep.parent in conflict.pkgs[1:] for conflict in \
+ self._dynamic_config._package_tracker.slot_conflicts())
+ if not buildpkgonly and \
+ not nodeps and \
+ not dep.collapsed_priority.ignored and \
+ not dep.collapsed_priority.optional and \
+ not is_slot_conflict_parent:
+ if dep.parent.onlydeps:
+ # It's safe to ignore blockers if the
+ # parent is an --onlydeps node.
+ return 1
+ # The blocker applies to the root where
+ # the parent is or will be installed.
+ blocker = Blocker(atom=dep.atom,
+ eapi=dep.parent.eapi,
+ priority=dep.priority, root=dep.parent.root)
+ self._dynamic_config._blocker_parents.add(blocker, dep.parent)
+ return 1
+
+ if dep.child is None:
+ dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
+ onlydeps=dep.onlydeps)
+ else:
+ # The caller has selected a specific package
+ # via self._minimize_packages().
+ dep_pkg = dep.child
+ existing_node = next(self._dynamic_config._package_tracker.match(
+ dep.root, dep_pkg.slot_atom, installed=False), None)
+
+ if not dep_pkg:
+ if (dep.collapsed_priority.optional or
+ dep.collapsed_priority.ignored):
+ # This is an unnecessary build-time dep.
+ return 1
+
+ # NOTE: For removal actions, allow_unsatisfied is always
+ # True since all existing removal actions traverse all
+ # installed deps deeply via the _complete_graph method,
+ # which calls _create_graph with allow_unsatisfied = True.
+ if allow_unsatisfied:
+ self._dynamic_config._unsatisfied_deps.append(dep)
+ return 1
+
+ # The following case occurs when
+ # _solve_non_slot_operator_slot_conflicts calls
+ # _create_graph. In this case, ignore unsatisfied deps for
+ # installed packages only if their depth is beyond the depth
+ # requested by the user and the dep was initially
+ # unsatisfied (not broken by a slot conflict in the current
+ # graph). See bug #520950.
+ # NOTE: The value of dep.parent.depth is guaranteed to be
+ # either an integer or _UNREACHABLE_DEPTH, where
+ # _UNREACHABLE_DEPTH indicates that the parent has been
+ # pulled in by the _complete_graph method (rather than by
+ # explicit arguments or their deep dependencies). These
+ # cases must be distinguished because depth is meaningless
+ # for packages that are not reachable as deep dependencies
+ # of arguments.
+ if (self._dynamic_config._complete_mode and
+ isinstance(dep.parent, Package) and
+ dep.parent.installed and
+ (dep.parent.depth is self._UNREACHABLE_DEPTH or
+ (self._frozen_config.requested_depth is not True and
+ dep.parent.depth >= self._frozen_config.requested_depth))):
+ inst_pkg, in_graph = \
+ self._select_pkg_from_installed(dep.root, dep.atom)
+ if inst_pkg is None:
+ self._dynamic_config._initially_unsatisfied_deps.append(dep)
+ return 1
+
+ self._dynamic_config._unsatisfied_deps_for_display.append(
+ ((dep.root, dep.atom), {"myparent":dep.parent}))
+
+ # The parent node should not already be in
+ # runtime_pkg_mask, since that would trigger an
+ # infinite backtracking loop.
+ if self._dynamic_config._allow_backtracking:
+ if dep.parent in self._dynamic_config._runtime_pkg_mask:
+ if debug:
+ writemsg(
+ "!!! backtracking loop detected: %s %s\n" % \
+ (dep.parent,
+ self._dynamic_config._runtime_pkg_mask[
+ dep.parent]), noiselevel=-1)
+ elif dep.atom.slot_operator_built and \
+ self._slot_operator_unsatisfied_probe(dep):
+ self._slot_operator_unsatisfied_backtrack(dep)
+ return 1
+ else:
+ # Do not backtrack if only USE have to be changed in
+ # order to satisfy the dependency. Note that when
+ # want_restart_for_use_change sets the need_restart
+ # flag, it causes _select_pkg_highest_available to
+ # return None, and eventually we come through here
+ # and skip the "missing dependency" backtracking path.
+ dep_pkg, existing_node = \
+ self._select_package(dep.root, dep.atom.without_use,
+ onlydeps=dep.onlydeps)
+ if dep_pkg is None:
+ self._dynamic_config._backtrack_infos["missing dependency"] = dep
+ self._dynamic_config._need_restart = True
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to unsatisfied dep:")
+ msg.append(" parent: %s" % dep.parent)
+ msg.append(" priority: %s" % dep.priority)
+ msg.append(" root: %s" % dep.root)
+ msg.append(" atom: %s" % dep.atom)
+ msg.append("")
+ writemsg_level("".join("%s\n" % l for l in msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return 0
+
+ self._rebuild.add(dep_pkg, dep)
+
+ ignore = dep.collapsed_priority.ignored and \
+ not self._dynamic_config._traverse_ignored_deps
+ if not ignore and not self._add_pkg(dep_pkg, dep):
+ return 0
+ return 1
+
+ def _check_slot_conflict(self, pkg, atom):
+ existing_node = next(self._dynamic_config._package_tracker.match(
+ pkg.root, pkg.slot_atom, installed=False), None)
+
+ matches = None
+ if existing_node:
+ matches = pkg.cpv == existing_node.cpv
+ if pkg != existing_node and \
+ atom is not None:
+ # Use package set for matching since it will match via
+ # PROVIDE when necessary, while match_from_list does not.
+ matches = bool(InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True).findAtomForPackage(existing_node,
+ modified_use=self._pkg_use_enabled(existing_node)))
+
+ return (existing_node, matches)
+
+ def _add_pkg(self, pkg, dep):
+ """
+ Adds a package to the depgraph, queues dependencies, and handles
+ slot conflicts.
+ """
+ debug = "--debug" in self._frozen_config.myopts
+ myparent = None
+ priority = None
+ depth = 0
+ if dep is None:
+ dep = Dependency()
+ else:
+ myparent = dep.parent
+ priority = dep.priority
+ depth = dep.depth
+ if priority is None:
+ priority = DepPriority()
+
+ if debug:
+ writemsg_level(
+ "\n%s%s %s\n" % ("Child:".ljust(15), pkg,
+ pkg_use_display(pkg, self._frozen_config.myopts,
+ modified_use=self._pkg_use_enabled(pkg))),
+ level=logging.DEBUG, noiselevel=-1)
+ if isinstance(myparent,
+ (PackageArg, AtomArg)):
+ # For PackageArg and AtomArg types, it's
+ # redundant to display the atom attribute.
+ writemsg_level(
+ "%s%s\n" % ("Parent Dep:".ljust(15), myparent),
+ level=logging.DEBUG, noiselevel=-1)
+ else:
+ # Display the specific atom from SetArg or
+ # Package types.
+ uneval = ""
+ if dep.atom and dep.atom.unevaluated_atom and \
+ dep.atom is not dep.atom.unevaluated_atom:
+ uneval = " (%s)" % (dep.atom.unevaluated_atom,)
+ writemsg_level(
+ "%s%s%s required by %s\n" %
+ ("Parent Dep:".ljust(15), dep.atom, uneval, myparent),
+ level=logging.DEBUG, noiselevel=-1)
+
+ # Ensure that the dependencies of the same package
+ # are never processed more than once.
+ previously_added = pkg in self._dynamic_config.digraph
+
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+
+ arg_atoms = None
+ if True:
+ try:
+ arg_atoms = list(self._iter_atoms_for_pkg(pkg))
+ except portage.exception.InvalidDependString as e:
+ if not pkg.installed:
+ # should have been masked before it was selected
+ raise
+ del e
+
+ # NOTE: REQUIRED_USE checks are delayed until after
+ # package selection, since we want to prompt the user
+ # for USE adjustment rather than have REQUIRED_USE
+ # affect package selection and || dep choices.
+ if not pkg.built and pkg._metadata.get("REQUIRED_USE") and \
+ eapi_has_required_use(pkg.eapi):
+ required_use_is_sat = check_required_use(
+ pkg._metadata["REQUIRED_USE"],
+ self._pkg_use_enabled(pkg),
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.eapi)
+ if not required_use_is_sat:
+ if dep.atom is not None and dep.parent is not None:
+ self._add_parent_atom(pkg, (dep.parent, dep.atom))
+
+ if arg_atoms:
+ for parent_atom in arg_atoms:
+ parent, atom = parent_atom
+ self._add_parent_atom(pkg, parent_atom)
+
+ atom = dep.atom
+ if atom is None:
+ atom = Atom("=" + pkg.cpv)
+ self._dynamic_config._unsatisfied_deps_for_display.append(
+ ((pkg.root, atom),
+ {"myparent" : dep.parent, "show_req_use" : pkg}))
+ self._dynamic_config._required_use_unsatisfied = True
+ self._dynamic_config._skip_restart = True
+ return 0
+
+ if not pkg.onlydeps:
+
+ existing_node, existing_node_matches = \
+ self._check_slot_conflict(pkg, dep.atom)
+ if existing_node:
+ if existing_node_matches:
+ # The existing node can be reused.
+ if pkg != existing_node:
+ pkg = existing_node
+ previously_added = True
+ try:
+ arg_atoms = list(self._iter_atoms_for_pkg(pkg))
+ except InvalidDependString as e:
+ if not pkg.installed:
+ # should have been masked before
+ # it was selected
+ raise
+
+ if debug:
+ writemsg_level(
+ "%s%s %s\n" % ("Re-used Child:".ljust(15),
+ pkg, pkg_use_display(pkg,
+ self._frozen_config.myopts,
+ modified_use=self._pkg_use_enabled(pkg))),
+ level=logging.DEBUG, noiselevel=-1)
+
+ else:
+ if debug:
+ writemsg_level(
+ "%s%s %s\n" % ("Slot Conflict:".ljust(15),
+ existing_node, pkg_use_display(existing_node,
+ self._frozen_config.myopts,
+ modified_use=self._pkg_use_enabled(existing_node))),
+ level=logging.DEBUG, noiselevel=-1)
+
+ if not previously_added:
+ self._dynamic_config._package_tracker.add_pkg(pkg)
+ self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
+ self._dynamic_config._highest_pkg_cache.clear()
+ self._check_masks(pkg)
+
+ if not pkg.installed:
+ # Allow this package to satisfy old-style virtuals in case it
+ # doesn't already. Any pre-existing providers will be preferred
+ # over this one.
+ try:
+ pkgsettings.setinst(pkg.cpv, pkg._metadata)
+ # For consistency, also update the global virtuals.
+ settings = self._frozen_config.roots[pkg.root].settings
+ settings.unlock()
+ settings.setinst(pkg.cpv, pkg._metadata)
+ settings.lock()
+ except portage.exception.InvalidDependString:
+ if not pkg.installed:
+ # should have been masked before it was selected
+ raise
+
+ if arg_atoms:
+ self._dynamic_config._set_nodes.add(pkg)
+
+ # Do this even for onlydeps, so that the
+ # parent/child relationship is always known in case
+ # self._show_slot_collision_notice() needs to be called later.
+ # If a direct circular dependency is not an unsatisfied
+ # buildtime dependency then drop it here since otherwise
+ # it can skew the merge order calculation in an unwanted
+ # way.
+ if pkg != dep.parent or \
+ (priority.buildtime and not priority.satisfied):
+ self._dynamic_config.digraph.add(pkg,
+ dep.parent, priority=priority)
+ if dep.atom is not None and dep.parent is not None:
+ self._add_parent_atom(pkg, (dep.parent, dep.atom))
+
+ if arg_atoms:
+ for parent_atom in arg_atoms:
+ parent, atom = parent_atom
+ self._dynamic_config.digraph.add(pkg, parent, priority=priority)
+ self._add_parent_atom(pkg, parent_atom)
+
+ # This section determines whether we go deeper into dependencies or not.
+ # We want to go deeper on a few occasions:
+ # Installing package A, we need to make sure package A's deps are met.
+ # emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
+ # If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
+ if arg_atoms and depth != 0:
+ for parent, atom in arg_atoms:
+ if parent.reset_depth:
+ depth = 0
+ break
+
+ if previously_added and depth != 0 and \
+ isinstance(pkg.depth, int):
+ # Use pkg.depth if it is less than depth.
+ if isinstance(depth, int):
+ depth = min(pkg.depth, depth)
+ else:
+ # depth is _UNREACHABLE_DEPTH and pkg.depth is
+ # an int, so use the int because it's considered
+ # to be less than _UNREACHABLE_DEPTH.
+ depth = pkg.depth
+
+ pkg.depth = depth
+ deep = self._dynamic_config.myparams.get("deep", 0)
+ update = "--update" in self._frozen_config.myopts
+
+ dep.want_update = (not self._dynamic_config._complete_mode and
+ (arg_atoms or update) and
+ not (deep is not True and depth > deep))
+
+ dep.child = pkg
+ if (not pkg.onlydeps and
+ dep.atom and dep.atom.slot_operator is not None):
+ self._add_slot_operator_dep(dep)
+
+ recurse = deep is True or depth + 1 <= deep
+ dep_stack = self._dynamic_config._dep_stack
+ if "recurse" not in self._dynamic_config.myparams:
+ return 1
+ elif pkg.installed and not recurse:
+ dep_stack = self._dynamic_config._ignored_deps
+
+ self._spinner_update()
+
+ if not previously_added:
+ dep_stack.append(pkg)
+ return 1
+
+
+ def _remove_pkg(self, pkg):
+ """
+ Remove a package and all its then parentless digraph
+ children from all depgraph datastructures.
+ """
+ debug = "--debug" in self._frozen_config.myopts
+ if debug:
+ writemsg_level(
+ "Removing package: %s\n" % pkg,
+ level=logging.DEBUG, noiselevel=-1)
+
+ try:
+ children = [child for child in self._dynamic_config.digraph.child_nodes(pkg) \
+ if child is not pkg]
+ self._dynamic_config.digraph.remove(pkg)
+ except KeyError:
+ children = []
+
+ self._dynamic_config._package_tracker.discard_pkg(pkg)
+
+ self._dynamic_config._parent_atoms.pop(pkg, None)
+ self._dynamic_config._set_nodes.discard(pkg)
+
+ for child in children:
+ try:
+ self._dynamic_config._parent_atoms[child] = set((parent, atom) \
+ for (parent, atom) in self._dynamic_config._parent_atoms[child] \
+ if parent is not pkg)
+ except KeyError:
+ pass
+
+ # Remove slot operator dependencies.
+ slot_key = (pkg.root, pkg.slot_atom)
+ if slot_key in self._dynamic_config._slot_operator_deps:
+ self._dynamic_config._slot_operator_deps[slot_key] = \
+ [dep for dep in self._dynamic_config._slot_operator_deps[slot_key] \
+ if dep.child is not pkg]
+ if not self._dynamic_config._slot_operator_deps[slot_key]:
+ del self._dynamic_config._slot_operator_deps[slot_key]
+
+ # Remove blockers.
+ self._dynamic_config._blocker_parents.discard(pkg)
+ self._dynamic_config._irrelevant_blockers.discard(pkg)
+ self._dynamic_config._unsolvable_blockers.discard(pkg)
+ self._dynamic_config._blocked_pkgs.discard(pkg)
+ self._dynamic_config._blocked_world_pkgs.pop(pkg, None)
+
+ for child in children:
+ if child in self._dynamic_config.digraph and \
+ not self._dynamic_config.digraph.parent_nodes(child):
+ self._remove_pkg(child)
+
+ # Clear caches.
+ self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
+ self._dynamic_config._highest_pkg_cache.clear()
+
+
+ def _check_masks(self, pkg):
+
+ slot_key = (pkg.root, pkg.slot_atom)
+
+ # Check for upgrades in the same slot that are
+ # masked due to a LICENSE change in a newer
+ # version that is not masked for any other reason.
+ other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
+ if other_pkg is not None and pkg < other_pkg:
+ self._dynamic_config._masked_license_updates.add(other_pkg)
+
+ def _add_parent_atom(self, pkg, parent_atom):
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if parent_atoms is None:
+ parent_atoms = set()
+ self._dynamic_config._parent_atoms[pkg] = parent_atoms
+ parent_atoms.add(parent_atom)
+
+ def _add_slot_operator_dep(self, dep):
+ slot_key = (dep.root, dep.child.slot_atom)
+ slot_info = self._dynamic_config._slot_operator_deps.get(slot_key)
+ if slot_info is None:
+ slot_info = []
+ self._dynamic_config._slot_operator_deps[slot_key] = slot_info
+ slot_info.append(dep)
+
+ def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
+
+ myroot = pkg.root
+ metadata = pkg._metadata
+ removal_action = "remove" in self._dynamic_config.myparams
+ eapi_attrs = _get_eapi_attrs(pkg.eapi)
+
+ edepend={}
+ for k in Package._dep_keys:
+ edepend[k] = metadata[k]
+
+ if not pkg.built and \
+ "--buildpkgonly" in self._frozen_config.myopts and \
+ "deep" not in self._dynamic_config.myparams:
+ edepend["RDEPEND"] = ""
+ edepend["PDEPEND"] = ""
+
+ ignore_build_time_deps = False
+ if pkg.built and not removal_action:
+ if self._dynamic_config.myparams.get("bdeps", "n") == "y":
+ # Pull in build time deps as requested, but marked them as
+ # "optional" since they are not strictly required. This allows
+ # more freedom in the merge order calculation for solving
+ # circular dependencies. Don't convert to PDEPEND since that
+ # could make --with-bdeps=y less effective if it is used to
+ # adjust merge order to prevent built_with_use() calls from
+ # failing.
+ pass
+ else:
+ ignore_build_time_deps = True
+
+ if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n":
+ # Removal actions never traverse ignored buildtime
+ # dependencies, so it's safe to discard them early.
+ edepend["DEPEND"] = ""
+ edepend["HDEPEND"] = ""
+ ignore_build_time_deps = True
+
+ ignore_depend_deps = ignore_build_time_deps
+ ignore_hdepend_deps = ignore_build_time_deps
+
+ if removal_action:
+ depend_root = myroot
+ else:
+ if eapi_attrs.hdepend:
+ depend_root = myroot
+ else:
+ depend_root = self._frozen_config._running_root.root
+ root_deps = self._frozen_config.myopts.get("--root-deps")
+ if root_deps is not None:
+ if root_deps is True:
+ depend_root = myroot
+ elif root_deps == "rdeps":
+ ignore_depend_deps = True
+
+ # If rebuild mode is not enabled, it's safe to discard ignored
+ # build-time dependencies. If you want these deps to be traversed
+ # in "complete" mode then you need to specify --with-bdeps=y.
+ if not self._rebuild.rebuild:
+ if ignore_depend_deps:
+ edepend["DEPEND"] = ""
+ if ignore_hdepend_deps:
+ edepend["HDEPEND"] = ""
+
+ deps = (
+ (depend_root, edepend["DEPEND"],
+ self._priority(buildtime=True,
+ optional=(pkg.built or ignore_depend_deps),
+ ignored=ignore_depend_deps)),
+ (self._frozen_config._running_root.root, edepend["HDEPEND"],
+ self._priority(buildtime=True,
+ optional=(pkg.built or ignore_hdepend_deps),
+ ignored=ignore_hdepend_deps)),
+ (myroot, edepend["RDEPEND"],
+ self._priority(runtime=True)),
+ (myroot, edepend["PDEPEND"],
+ self._priority(runtime_post=True))
+ )
+
+ debug = "--debug" in self._frozen_config.myopts
+
+ for dep_root, dep_string, dep_priority in deps:
+ if not dep_string:
+ continue
+ if debug:
+ writemsg_level("\nParent: %s\n" % (pkg,),
+ noiselevel=-1, level=logging.DEBUG)
+ writemsg_level("Depstring: %s\n" % (dep_string,),
+ noiselevel=-1, level=logging.DEBUG)
+ writemsg_level("Priority: %s\n" % (dep_priority,),
+ noiselevel=-1, level=logging.DEBUG)
+
+ try:
+ dep_string = portage.dep.use_reduce(dep_string,
+ uselist=self._pkg_use_enabled(pkg),
+ is_valid_flag=pkg.iuse.is_valid_flag,
+ opconvert=True, token_class=Atom,
+ eapi=pkg.eapi)
+ except portage.exception.InvalidDependString as e:
+ if not pkg.installed:
+ # should have been masked before it was selected
+ raise
+ del e
+
+ # Try again, but omit the is_valid_flag argument, since
+ # invalid USE conditionals are a common problem and it's
+ # practical to ignore this issue for installed packages.
+ try:
+ dep_string = portage.dep.use_reduce(dep_string,
+ uselist=self._pkg_use_enabled(pkg),
+ opconvert=True, token_class=Atom,
+ eapi=pkg.eapi)
+ except portage.exception.InvalidDependString as e:
+ self._dynamic_config._masked_installed.add(pkg)
+ del e
+ continue
+
+ try:
+ dep_string = list(self._queue_disjunctive_deps(
+ pkg, dep_root, dep_priority, dep_string))
+ except portage.exception.InvalidDependString as e:
+ if pkg.installed:
+ self._dynamic_config._masked_installed.add(pkg)
+ del e
+ continue
+
+ # should have been masked before it was selected
+ raise
+
+ if not dep_string:
+ continue
+
+ if not self._add_pkg_dep_string(
+ pkg, dep_root, dep_priority, dep_string,
+ allow_unsatisfied):
+ return 0
+
+ self._dynamic_config._traversed_pkg_deps.add(pkg)
+ return 1
+
+ def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
+ allow_unsatisfied):
+ _autounmask_backup = self._dynamic_config._autounmask
+ if dep_priority.optional or dep_priority.ignored:
+ # Temporarily disable autounmask for deps that
+ # don't necessarily need to be satisfied.
+ self._dynamic_config._autounmask = False
+ try:
+ return self._wrapped_add_pkg_dep_string(
+ pkg, dep_root, dep_priority, dep_string,
+ allow_unsatisfied)
+ finally:
+ self._dynamic_config._autounmask = _autounmask_backup
+
+ def _ignore_dependency(self, atom, pkg, child, dep, mypriority, recurse_satisfied):
+ """
+ In some cases, dep_check will return deps that shouldn't
+ be processed any further, so they are identified and
+ discarded here. Try to discard as few as possible since
+ discarded dependencies reduce the amount of information
+ available for optimization of merge order.
+ Don't ignore dependencies if pkg has a slot operator dependency on the child
+ and the child has changed slot/sub_slot.
+ """
+ if not mypriority.satisfied:
+ return False
+ slot_operator_rebuild = False
+ if atom.slot_operator == '=' and \
+ (pkg.root, pkg.slot_atom) in self._dynamic_config._slot_operator_replace_installed and \
+ mypriority.satisfied is not child and \
+ mypriority.satisfied.installed and \
+ child and \
+ not child.installed and \
+ (child.slot != mypriority.satisfied.slot or child.sub_slot != mypriority.satisfied.sub_slot):
+ slot_operator_rebuild = True
+
+ return not atom.blocker and \
+ not recurse_satisfied and \
+ mypriority.satisfied.visible and \
+ dep.child is not None and \
+ not dep.child.installed and \
+ not any(self._dynamic_config._package_tracker.match(
+ dep.child.root, dep.child.slot_atom, installed=False)) and \
+ not slot_operator_rebuild
+
+ def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
+ dep_string, allow_unsatisfied):
+ if isinstance(pkg.depth, int):
+ depth = pkg.depth + 1
+ else:
+ depth = pkg.depth
+
+ deep = self._dynamic_config.myparams.get("deep", 0)
+ recurse_satisfied = deep is True or depth <= deep
+ debug = "--debug" in self._frozen_config.myopts
+ strict = pkg.type_name != "installed"
+
+ if debug:
+ writemsg_level("\nParent: %s\n" % (pkg,),
+ noiselevel=-1, level=logging.DEBUG)
+ dep_repr = portage.dep.paren_enclose(dep_string,
+ unevaluated_atom=True, opconvert=True)
+ writemsg_level("Depstring: %s\n" % (dep_repr,),
+ noiselevel=-1, level=logging.DEBUG)
+ writemsg_level("Priority: %s\n" % (dep_priority,),
+ noiselevel=-1, level=logging.DEBUG)
+
+ try:
+ selected_atoms = self._select_atoms(dep_root,
+ dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
+ strict=strict, priority=dep_priority)
+ except portage.exception.InvalidDependString:
+ if pkg.installed:
+ self._dynamic_config._masked_installed.add(pkg)
+ return 1
+
+ # should have been masked before it was selected
+ raise
+
+ if debug:
+ writemsg_level("Candidates: %s\n" % \
+ ([str(x) for x in selected_atoms[pkg]],),
+ noiselevel=-1, level=logging.DEBUG)
+
+ root_config = self._frozen_config.roots[dep_root]
+ vardb = root_config.trees["vartree"].dbapi
+ traversed_virt_pkgs = set()
+
+ reinstall_atoms = self._frozen_config.reinstall_atoms
+ for atom, child in self._minimize_children(
+ pkg, dep_priority, root_config, selected_atoms[pkg]):
+
+ # If this was a specially generated virtual atom
+ # from dep_check, map it back to the original, in
+ # order to avoid distortion in places like display
+ # or conflict resolution code.
+ is_virt = hasattr(atom, '_orig_atom')
+ atom = getattr(atom, '_orig_atom', atom)
+
+ if atom.blocker and \
+ (dep_priority.optional or dep_priority.ignored):
+ # For --with-bdeps, ignore build-time only blockers
+ # that originate from built packages.
+ continue
+
+ mypriority = dep_priority.copy()
+ if not atom.blocker:
+
+ if atom.slot_operator == "=":
+ if mypriority.buildtime:
+ mypriority.buildtime_slot_op = True
+ if mypriority.runtime:
+ mypriority.runtime_slot_op = True
+
+ inst_pkgs = [inst_pkg for inst_pkg in
+ reversed(vardb.match_pkgs(atom))
+ if not reinstall_atoms.findAtomForPackage(inst_pkg,
+ modified_use=self._pkg_use_enabled(inst_pkg))]
+ if inst_pkgs:
+ for inst_pkg in inst_pkgs:
+ if self._pkg_visibility_check(inst_pkg):
+ # highest visible
+ mypriority.satisfied = inst_pkg
+ break
+ if not mypriority.satisfied:
+ # none visible, so use highest
+ mypriority.satisfied = inst_pkgs[0]
+
+ dep = Dependency(atom=atom,
+ blocker=atom.blocker, child=child, depth=depth, parent=pkg,
+ priority=mypriority, root=dep_root)
+
+ # In some cases, dep_check will return deps that shouldn't
+ # be processed any further, so they are identified and
+ # discarded here. Try to discard as few as possible since
+ # discarded dependencies reduce the amount of information
+ # available for optimization of merge order.
+ ignored = False
+ if self._ignore_dependency(atom, pkg, child, dep, mypriority, recurse_satisfied):
+ myarg = None
+ try:
+ myarg = next(self._iter_atoms_for_pkg(dep.child), None)
+ except InvalidDependString:
+ if not dep.child.installed:
+ raise
+
+ if myarg is None:
+ # Existing child selection may not be valid unless
+ # it's added to the graph immediately, since "complete"
+ # mode may select a different child later.
+ ignored = True
+ dep.child = None
+ self._dynamic_config._ignored_deps.append(dep)
+
+ if not ignored:
+ if dep_priority.ignored and \
+ not self._dynamic_config._traverse_ignored_deps:
+ if is_virt and dep.child is not None:
+ traversed_virt_pkgs.add(dep.child)
+ dep.child = None
+ self._dynamic_config._ignored_deps.append(dep)
+ else:
+ if not self._add_dep(dep,
+ allow_unsatisfied=allow_unsatisfied):
+ return 0
+ if is_virt and dep.child is not None:
+ traversed_virt_pkgs.add(dep.child)
+
+ selected_atoms.pop(pkg)
+
+ # Add selected indirect virtual deps to the graph. This
+ # takes advantage of circular dependency avoidance that's done
+ # by dep_zapdeps. We preserve actual parent/child relationships
+ # here in order to avoid distorting the dependency graph like
+ # <=portage-2.1.6.x did.
+ for virt_dep, atoms in selected_atoms.items():
+
+ virt_pkg = virt_dep.child
+ if virt_pkg not in traversed_virt_pkgs:
+ continue
+
+ if debug:
+ writemsg_level("\nCandidates: %s: %s\n" % \
+ (virt_pkg.cpv, [str(x) for x in atoms]),
+ noiselevel=-1, level=logging.DEBUG)
+
+ if not dep_priority.ignored or \
+ self._dynamic_config._traverse_ignored_deps:
+
+ inst_pkgs = [inst_pkg for inst_pkg in
+ reversed(vardb.match_pkgs(virt_dep.atom))
+ if not reinstall_atoms.findAtomForPackage(inst_pkg,
+ modified_use=self._pkg_use_enabled(inst_pkg))]
+ if inst_pkgs:
+ for inst_pkg in inst_pkgs:
+ if self._pkg_visibility_check(inst_pkg):
+ # highest visible
+ virt_dep.priority.satisfied = inst_pkg
+ break
+ if not virt_dep.priority.satisfied:
+ # none visible, so use highest
+ virt_dep.priority.satisfied = inst_pkgs[0]
+
+ if not self._add_pkg(virt_pkg, virt_dep):
+ return 0
+
+ for atom, child in self._minimize_children(
+ pkg, self._priority(runtime=True), root_config, atoms):
+
+ # If this was a specially generated virtual atom
+ # from dep_check, map it back to the original, in
+ # order to avoid distortion in places like display
+ # or conflict resolution code.
+ is_virt = hasattr(atom, '_orig_atom')
+ atom = getattr(atom, '_orig_atom', atom)
+
+ # This is a GLEP 37 virtual, so its deps are all runtime.
+ mypriority = self._priority(runtime=True)
+ if not atom.blocker:
+ inst_pkgs = [inst_pkg for inst_pkg in
+ reversed(vardb.match_pkgs(atom))
+ if not reinstall_atoms.findAtomForPackage(inst_pkg,
+ modified_use=self._pkg_use_enabled(inst_pkg))]
+ if inst_pkgs:
+ for inst_pkg in inst_pkgs:
+ if self._pkg_visibility_check(inst_pkg):
+ # highest visible
+ mypriority.satisfied = inst_pkg
+ break
+ if not mypriority.satisfied:
+ # none visible, so use highest
+ mypriority.satisfied = inst_pkgs[0]
+
+ # Dependencies of virtuals are considered to have the
+ # same depth as the virtual itself.
+ dep = Dependency(atom=atom,
+ blocker=atom.blocker, child=child, depth=virt_dep.depth,
+ parent=virt_pkg, priority=mypriority, root=dep_root,
+ collapsed_parent=pkg, collapsed_priority=dep_priority)
+
+ ignored = False
+ if self._ignore_dependency(atom, pkg, child, dep, mypriority, recurse_satisfied):
+ myarg = None
+ try:
+ myarg = next(self._iter_atoms_for_pkg(dep.child), None)
+ except InvalidDependString:
+ if not dep.child.installed:
+ raise
+
+ if myarg is None:
+ ignored = True
+ dep.child = None
+ self._dynamic_config._ignored_deps.append(dep)
+
+ if not ignored:
+ if dep_priority.ignored and \
+ not self._dynamic_config._traverse_ignored_deps:
+ if is_virt and dep.child is not None:
+ traversed_virt_pkgs.add(dep.child)
+ dep.child = None
+ self._dynamic_config._ignored_deps.append(dep)
+ else:
+ if not self._add_dep(dep,
+ allow_unsatisfied=allow_unsatisfied):
+ return 0
+ if is_virt and dep.child is not None:
+ traversed_virt_pkgs.add(dep.child)
+
+ if debug:
+ writemsg_level("\nExiting... %s\n" % (pkg,),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return 1
+
+ def _minimize_children(self, parent, priority, root_config, atoms):
+ """
+ Selects packages to satisfy the given atoms, and minimizes the
+ number of selected packages. This serves to identify and eliminate
+ redundant package selections when multiple atoms happen to specify
+ a version range.
+ """
+
+ atom_pkg_map = {}
+
+ for atom in atoms:
+ if atom.blocker:
+ yield (atom, None)
+ continue
+ dep_pkg, existing_node = self._select_package(
+ root_config.root, atom, parent=parent)
+ if dep_pkg is None:
+ yield (atom, None)
+ continue
+ atom_pkg_map[atom] = dep_pkg
+
+ if len(atom_pkg_map) < 2:
+ for item in atom_pkg_map.items():
+ yield item
+ return
+
+ cp_pkg_map = {}
+ pkg_atom_map = {}
+ for atom, pkg in atom_pkg_map.items():
+ pkg_atom_map.setdefault(pkg, set()).add(atom)
+ cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
+
+ for pkgs in cp_pkg_map.values():
+ if len(pkgs) < 2:
+ for pkg in pkgs:
+ for atom in pkg_atom_map[pkg]:
+ yield (atom, pkg)
+ continue
+
+ # Use a digraph to identify and eliminate any
+ # redundant package selections.
+ atom_pkg_graph = digraph()
+ cp_atoms = set()
+ for pkg1 in pkgs:
+ for atom in pkg_atom_map[pkg1]:
+ cp_atoms.add(atom)
+ atom_pkg_graph.add(pkg1, atom)
+ atom_set = InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True)
+ for pkg2 in pkgs:
+ if pkg2 is pkg1:
+ continue
+ if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
+ atom_pkg_graph.add(pkg2, atom)
+
+ for pkg in pkgs:
+ eliminate_pkg = True
+ for atom in atom_pkg_graph.parent_nodes(pkg):
+ if len(atom_pkg_graph.child_nodes(atom)) < 2:
+ eliminate_pkg = False
+ break
+ if eliminate_pkg:
+ atom_pkg_graph.remove(pkg)
+
+ # Yield ~, =*, < and <= atoms first, since those are more likely to
+ # cause slot conflicts, and we want those atoms to be displayed
+ # in the resulting slot conflict message (see bug #291142).
+ # Give similar treatment to slot/sub-slot atoms.
+ conflict_atoms = []
+ normal_atoms = []
+ abi_atoms = []
+ for atom in cp_atoms:
+ if atom.slot_operator_built:
+ abi_atoms.append(atom)
+ continue
+ conflict = False
+ for child_pkg in atom_pkg_graph.child_nodes(atom):
+ existing_node, matches = \
+ self._check_slot_conflict(child_pkg, atom)
+ if existing_node and not matches:
+ conflict = True
+ break
+ if conflict:
+ conflict_atoms.append(atom)
+ else:
+ normal_atoms.append(atom)
+
+ for atom in chain(abi_atoms, conflict_atoms, normal_atoms):
+ child_pkgs = atom_pkg_graph.child_nodes(atom)
+ # if more than one child, yield highest version
+ if len(child_pkgs) > 1:
+ child_pkgs.sort()
+ yield (atom, child_pkgs[-1])
+
+ def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
+ """
+ Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
+ Yields non-disjunctive deps. Raises InvalidDependString when
+ necessary.
+ """
+ for x in dep_struct:
+ if isinstance(x, list):
+ if x and x[0] == "||":
+ self._queue_disjunction(pkg, dep_root, dep_priority, [x])
+ else:
+ for y in self._queue_disjunctive_deps(
+ pkg, dep_root, dep_priority, x):
+ yield y
+ else:
+ # Note: Eventually this will check for PROPERTIES=virtual
+ # or whatever other metadata gets implemented for this
+ # purpose.
+ if x.cp.startswith('virtual/'):
+ self._queue_disjunction(pkg, dep_root, dep_priority, [x])
+ else:
+ yield x
+
+ def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
+ self._dynamic_config._dep_disjunctive_stack.append(
+ (pkg, dep_root, dep_priority, dep_struct))
+
+ def _pop_disjunction(self, allow_unsatisfied):
+ """
+ Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
+ populate self._dynamic_config._dep_stack.
+ """
+ pkg, dep_root, dep_priority, dep_struct = \
+ self._dynamic_config._dep_disjunctive_stack.pop()
+ if not self._add_pkg_dep_string(
+ pkg, dep_root, dep_priority, dep_struct, allow_unsatisfied):
+ return 0
+ return 1
+
+ def _priority(self, **kwargs):
+ if "remove" in self._dynamic_config.myparams:
+ priority_constructor = UnmergeDepPriority
+ else:
+ priority_constructor = DepPriority
+ return priority_constructor(**kwargs)
+
+ def _dep_expand(self, root_config, atom_without_category):
+ """
+ @param root_config: a root config instance
+ @type root_config: RootConfig
+ @param atom_without_category: an atom without a category component
+ @type atom_without_category: String
+ @rtype: list
+ @return: a list of atoms containing categories (possibly empty)
+ """
+ null_cp = portage.dep_getkey(insert_category_into_atom(
+ atom_without_category, "null"))
+ cat, atom_pn = portage.catsplit(null_cp)
+
+ dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
+ categories = set()
+ for db, pkg_type, built, installed, db_keys in dbs:
+ for cat in db.categories:
+ if db.cp_list("%s/%s" % (cat, atom_pn)):
+ categories.add(cat)
+
+ deps = []
+ for cat in categories:
+ deps.append(Atom(insert_category_into_atom(
+ atom_without_category, cat), allow_repo=True))
+ return deps
+
+ def _have_new_virt(self, root, atom_cp):
+ ret = False
+ for db, pkg_type, built, installed, db_keys in \
+ self._dynamic_config._filtered_trees[root]["dbs"]:
+ if db.cp_list(atom_cp):
+ ret = True
+ break
+ return ret
+
+ def _iter_atoms_for_pkg(self, pkg):
+ depgraph_sets = self._dynamic_config.sets[pkg.root]
+ atom_arg_map = depgraph_sets.atom_arg_map
+ for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
+ if atom.cp != pkg.cp and \
+ self._have_new_virt(pkg.root, atom.cp):
+ continue
+ visible_pkgs = \
+ self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
+ visible_pkgs.reverse() # descending order
+ higher_slot = None
+ for visible_pkg in visible_pkgs:
+ if visible_pkg.cp != atom.cp:
+ continue
+ if pkg >= visible_pkg:
+ # This is descending order, and we're not
+ # interested in any versions <= pkg given.
+ break
+ if pkg.slot_atom != visible_pkg.slot_atom:
+ higher_slot = visible_pkg
+ break
+ if higher_slot is not None:
+ continue
+ for arg in atom_arg_map[(atom, pkg.root)]:
+ if isinstance(arg, PackageArg) and \
+ arg.package != pkg:
+ continue
+ yield arg, atom
+
+ def select_files(self, args):
+ # Use the global event loop for spinner progress
+ # indication during file owner lookups (bug #461412).
+ spinner_id = None
+ try:
+ spinner = self._frozen_config.spinner
+ if spinner is not None and \
+ spinner.update is not spinner.update_quiet:
+ spinner_id = self._event_loop.idle_add(
+ self._frozen_config.spinner.update)
+ return self._select_files(args)
+ finally:
+ if spinner_id is not None:
+ self._event_loop.source_remove(spinner_id)
+
+ def _select_files(self, myfiles):
+ """Given a list of .tbz2s, .ebuilds sets, and deps, populate
+ self._dynamic_config._initial_arg_list and call self._resolve to create the
+ appropriate depgraph and return a favorite list."""
+ self._load_vdb()
+ debug = "--debug" in self._frozen_config.myopts
+ root_config = self._frozen_config.roots[self._frozen_config.target_root]
+ sets = root_config.sets
+ depgraph_sets = self._dynamic_config.sets[root_config.root]
+ myfavorites=[]
+ eroot = root_config.root
+ root = root_config.settings['ROOT']
+ vardb = self._frozen_config.trees[eroot]["vartree"].dbapi
+ real_vardb = self._frozen_config._trees_orig[eroot]["vartree"].dbapi
+ portdb = self._frozen_config.trees[eroot]["porttree"].dbapi
+ bindb = self._frozen_config.trees[eroot]["bintree"].dbapi
+ pkgsettings = self._frozen_config.pkgsettings[eroot]
+ args = []
+ onlydeps = "--onlydeps" in self._frozen_config.myopts
+ lookup_owners = []
+ for x in myfiles:
+ ext = os.path.splitext(x)[1]
+ if ext==".tbz2":
+ if not os.path.exists(x):
+ if os.path.exists(
+ os.path.join(pkgsettings["PKGDIR"], "All", x)):
+ x = os.path.join(pkgsettings["PKGDIR"], "All", x)
+ elif os.path.exists(
+ os.path.join(pkgsettings["PKGDIR"], x)):
+ x = os.path.join(pkgsettings["PKGDIR"], x)
+ else:
+ writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
+ writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
+ return 0, myfavorites
+ mytbz2=portage.xpak.tbz2(x)
+ mykey = None
+ cat = mytbz2.getfile("CATEGORY")
+ if cat is not None:
+ cat = _unicode_decode(cat.strip(),
+ encoding=_encodings['repo.content'])
+ mykey = cat + "/" + os.path.basename(x)[:-5]
+
+ if mykey is None:
+ writemsg(colorize("BAD", "\n*** Package is missing CATEGORY metadata: %s.\n\n" % x), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, myfavorites
+ elif os.path.realpath(x) != \
+ os.path.realpath(bindb.bintree.getname(mykey)):
+ writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, myfavorites
+
+ pkg = self._pkg(mykey, "binary", root_config,
+ onlydeps=onlydeps)
+ args.append(PackageArg(arg=x, package=pkg,
+ root_config=root_config))
+ elif ext==".ebuild":
+ ebuild_path = portage.util.normalize_path(os.path.abspath(x))
+ pkgdir = os.path.dirname(ebuild_path)
+ tree_root = os.path.dirname(os.path.dirname(pkgdir))
+ cp = pkgdir[len(tree_root)+1:]
+ error_msg = ("\n\n!!! '%s' is not in a valid portage tree "
+ "hierarchy or does not exist\n") % x
+ if not portage.isvalidatom(cp):
+ writemsg(error_msg, noiselevel=-1)
+ return 0, myfavorites
+ cat = portage.catsplit(cp)[0]
+ mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
+ if not portage.isvalidatom("="+mykey):
+ writemsg(error_msg, noiselevel=-1)
+ return 0, myfavorites
+ ebuild_path = portdb.findname(mykey)
+ if ebuild_path:
+ if ebuild_path != os.path.join(os.path.realpath(tree_root),
+ cp, os.path.basename(ebuild_path)):
+ writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, myfavorites
+ if mykey not in portdb.xmatch(
+ "match-visible", portage.cpv_getkey(mykey)):
+ writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
+ writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
+ writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
+ countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
+ "Continuing...")
+ else:
+ writemsg(error_msg, noiselevel=-1)
+ return 0, myfavorites
+ pkg = self._pkg(mykey, "ebuild", root_config,
+ onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
+ os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
+ args.append(PackageArg(arg=x, package=pkg,
+ root_config=root_config))
+ elif x.startswith(os.path.sep):
+ if not x.startswith(eroot):
+ portage.writemsg(("\n\n!!! '%s' does not start with" + \
+ " $EROOT.\n") % x, noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, []
+ # Queue these up since it's most efficient to handle
+ # multiple files in a single iter_owners() call.
+ lookup_owners.append(x)
+ elif x.startswith("." + os.sep) or \
+ x.startswith(".." + os.sep):
+ f = os.path.abspath(x)
+ if not f.startswith(eroot):
+ portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
+ " $EROOT.\n") % (f, x), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, []
+ lookup_owners.append(f)
+ else:
+ if x in ("system", "world"):
+ x = SETPREFIX + x
+ if x.startswith(SETPREFIX):
+ s = x[len(SETPREFIX):]
+ if s not in sets:
+ raise portage.exception.PackageSetNotFound(s)
+ if s in depgraph_sets.sets:
+ continue
+
+ try:
+ set_atoms = root_config.setconfig.getSetAtoms(s)
+ except portage.exception.PackageSetNotFound as e:
+ writemsg_level("\n\n", level=logging.ERROR,
+ noiselevel=-1)
+ for pset in list(depgraph_sets.sets.values()) + [sets[s]]:
+ for error_msg in pset.errors:
+ writemsg_level("%s\n" % (error_msg,),
+ level=logging.ERROR, noiselevel=-1)
+
+ writemsg_level(("emerge: the given set '%s' "
+ "contains a non-existent set named '%s'.\n") % \
+ (s, e), level=logging.ERROR, noiselevel=-1)
+ if s in ('world', 'selected') and \
+ SETPREFIX + e.value in sets['selected']:
+ writemsg_level(("Use `emerge --deselect %s%s` to "
+ "remove this set from world_sets.\n") %
+ (SETPREFIX, e,), level=logging.ERROR,
+ noiselevel=-1)
+ writemsg_level("\n", level=logging.ERROR,
+ noiselevel=-1)
+ return False, myfavorites
+
+ pset = sets[s]
+ depgraph_sets.sets[s] = pset
+ args.append(SetArg(arg=x, pset=pset,
+ root_config=root_config))
+ continue
+ if not is_valid_package_atom(x, allow_repo=True):
+ portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
+ noiselevel=-1)
+ portage.writemsg("!!! Please check ebuild(5) for full details.\n")
+ portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
+ self._dynamic_config._skip_restart = True
+ return (0,[])
+ # Don't expand categories or old-style virtuals here unless
+ # necessary. Expansion of old-style virtuals here causes at
+ # least the following problems:
+ # 1) It's more difficult to determine which set(s) an atom
+ # came from, if any.
+ # 2) It takes away freedom from the resolver to choose other
+ # possible expansions when necessary.
+ if "/" in x.split(":")[0]:
+ args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
+ root_config=root_config))
+ continue
+ expanded_atoms = self._dep_expand(root_config, x)
+ installed_cp_set = set()
+ for atom in expanded_atoms:
+ if vardb.cp_list(atom.cp):
+ installed_cp_set.add(atom.cp)
+
+ if len(installed_cp_set) > 1:
+ non_virtual_cps = set()
+ for atom_cp in installed_cp_set:
+ if not atom_cp.startswith("virtual/"):
+ non_virtual_cps.add(atom_cp)
+ if len(non_virtual_cps) == 1:
+ installed_cp_set = non_virtual_cps
+
+ if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
+ installed_cp = next(iter(installed_cp_set))
+ for atom in expanded_atoms:
+ if atom.cp == installed_cp:
+ available = False
+ for pkg in self._iter_match_pkgs_any(
+ root_config, atom.without_use,
+ onlydeps=onlydeps):
+ if not pkg.installed:
+ available = True
+ break
+ if available:
+ expanded_atoms = [atom]
+ break
+
+ # If a non-virtual package and one or more virtual packages
+ # are in expanded_atoms, use the non-virtual package.
+ if len(expanded_atoms) > 1:
+ number_of_virtuals = 0
+ for expanded_atom in expanded_atoms:
+ if expanded_atom.cp.startswith("virtual/"):
+ number_of_virtuals += 1
+ else:
+ candidate = expanded_atom
+ if len(expanded_atoms) - number_of_virtuals == 1:
+ expanded_atoms = [ candidate ]
+
+ if len(expanded_atoms) > 1:
+ writemsg("\n\n", noiselevel=-1)
+ ambiguous_package_name(x, expanded_atoms, root_config,
+ self._frozen_config.spinner, self._frozen_config.myopts)
+ self._dynamic_config._skip_restart = True
+ return False, myfavorites
+ if expanded_atoms:
+ atom = expanded_atoms[0]
+ else:
+ null_atom = Atom(insert_category_into_atom(x, "null"),
+ allow_repo=True)
+ cat, atom_pn = portage.catsplit(null_atom.cp)
+ virts_p = root_config.settings.get_virts_p().get(atom_pn)
+ if virts_p:
+ # Allow the depgraph to choose which virtual.
+ atom = Atom(null_atom.replace('null/', 'virtual/', 1),
+ allow_repo=True)
+ else:
+ atom = null_atom
+
+ if atom.use and atom.use.conditional:
+ writemsg(
+ ("\n\n!!! '%s' contains a conditional " + \
+ "which is not allowed.\n") % (x,), noiselevel=-1)
+ writemsg("!!! Please check ebuild(5) for full details.\n")
+ self._dynamic_config._skip_restart = True
+ return (0,[])
+
+ args.append(AtomArg(arg=x, atom=atom,
+ root_config=root_config))
+
+ if lookup_owners:
+ relative_paths = []
+ search_for_multiple = False
+ if len(lookup_owners) > 1:
+ search_for_multiple = True
+
+ for x in lookup_owners:
+ if not search_for_multiple and os.path.isdir(x):
+ search_for_multiple = True
+ relative_paths.append(x[len(root)-1:])
+
+ owners = set()
+ for pkg, relative_path in \
+ real_vardb._owners.iter_owners(relative_paths):
+ owners.add(pkg.mycpv)
+ if not search_for_multiple:
+ break
+
+ if not owners:
+ portage.writemsg(("\n\n!!! '%s' is not claimed " + \
+ "by any package.\n") % lookup_owners[0], noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, []
+
+ for cpv in owners:
+ pkg = vardb._pkg_str(cpv, None)
+ atom = Atom("%s:%s" % (pkg.cp, pkg.slot))
+ args.append(AtomArg(arg=atom, atom=atom,
+ root_config=root_config))
+
+ if "--update" in self._frozen_config.myopts:
+ # In some cases, the greedy slots behavior can pull in a slot that
+ # the user would want to uninstall due to it being blocked by a
+ # newer version in a different slot. Therefore, it's necessary to
+ # detect and discard any that should be uninstalled. Each time
+ # that arguments are updated, package selections are repeated in
+ # order to ensure consistency with the current arguments:
+ #
+ # 1) Initialize args
+ # 2) Select packages and generate initial greedy atoms
+ # 3) Update args with greedy atoms
+ # 4) Select packages and generate greedy atoms again, while
+ # accounting for any blockers between selected packages
+ # 5) Update args with revised greedy atoms
+
+ self._set_args(args)
+ greedy_args = []
+ for arg in args:
+ greedy_args.append(arg)
+ if not isinstance(arg, AtomArg):
+ continue
+ for atom in self._greedy_slots(arg.root_config, arg.atom):
+ greedy_args.append(
+ AtomArg(arg=arg.arg, atom=atom,
+ root_config=arg.root_config))
+
+ self._set_args(greedy_args)
+ del greedy_args
+
+ # Revise greedy atoms, accounting for any blockers
+ # between selected packages.
+ revised_greedy_args = []
+ for arg in args:
+ revised_greedy_args.append(arg)
+ if not isinstance(arg, AtomArg):
+ continue
+ for atom in self._greedy_slots(arg.root_config, arg.atom,
+ blocker_lookahead=True):
+ revised_greedy_args.append(
+ AtomArg(arg=arg.arg, atom=atom,
+ root_config=arg.root_config))
+ args = revised_greedy_args
+ del revised_greedy_args
+
+ args.extend(self._gen_reinstall_sets())
+ self._set_args(args)
+
+ myfavorites = set(myfavorites)
+ for arg in args:
+ if isinstance(arg, (AtomArg, PackageArg)):
+ myfavorites.add(arg.atom)
+ elif isinstance(arg, SetArg):
+ if not arg.internal:
+ myfavorites.add(arg.arg)
+ myfavorites = list(myfavorites)
+
+ if debug:
+ portage.writemsg("\n", noiselevel=-1)
+ # Order needs to be preserved since a feature of --nodeps
+ # is to allow the user to force a specific merge order.
+ self._dynamic_config._initial_arg_list = args[:]
+
+ return self._resolve(myfavorites)
+
+ def _gen_reinstall_sets(self):
+
+ atom_list = []
+ for root, atom in self._rebuild.rebuild_list:
+ atom_list.append((root, '__auto_rebuild__', atom))
+ for root, atom in self._rebuild.reinstall_list:
+ atom_list.append((root, '__auto_reinstall__', atom))
+ for root, atom in self._dynamic_config._slot_operator_replace_installed:
+ atom_list.append((root, '__auto_slot_operator_replace_installed__', atom))
+
+ set_dict = {}
+ for root, set_name, atom in atom_list:
+ set_dict.setdefault((root, set_name), []).append(atom)
+
+ for (root, set_name), atoms in set_dict.items():
+ yield SetArg(arg=(SETPREFIX + set_name),
+ # Set reset_depth=False here, since we don't want these
+ # special sets to interact with depth calculations (see
+ # the emerge --deep=DEPTH option), though we want them
+ # to behave like normal arguments in most other respects.
+ pset=InternalPackageSet(initial_atoms=atoms),
+ force_reinstall=True,
+ internal=True,
+ reset_depth=False,
+ root_config=self._frozen_config.roots[root])
+
+ def _resolve(self, myfavorites):
+ """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
+ call self._creategraph to process theier deps and return
+ a favorite list."""
+ debug = "--debug" in self._frozen_config.myopts
+ onlydeps = "--onlydeps" in self._frozen_config.myopts
+ myroot = self._frozen_config.target_root
+ pkgsettings = self._frozen_config.pkgsettings[myroot]
+ pprovideddict = pkgsettings.pprovideddict
+ virtuals = pkgsettings.getvirtuals()
+ args = self._dynamic_config._initial_arg_list[:]
+
+ for arg in self._expand_set_args(args, add_to_digraph=True):
+ for atom in arg.pset.getAtoms():
+ self._spinner_update()
+ dep = Dependency(atom=atom, onlydeps=onlydeps,
+ root=myroot, parent=arg)
+ try:
+ pprovided = pprovideddict.get(atom.cp)
+ if pprovided and portage.match_from_list(atom, pprovided):
+ # A provided package has been specified on the command line.
+ self._dynamic_config._pprovided_args.append((arg, atom))
+ continue
+ if isinstance(arg, PackageArg):
+ if not self._add_pkg(arg.package, dep) or \
+ not self._create_graph():
+ if not self.need_restart():
+ writemsg(("\n\n!!! Problem " + \
+ "resolving dependencies for %s\n") % \
+ arg.arg, noiselevel=-1)
+ return 0, myfavorites
+ continue
+ if debug:
+ writemsg_level("\n Arg: %s\n Atom: %s\n" %
+ (arg, atom), noiselevel=-1, level=logging.DEBUG)
+ pkg, existing_node = self._select_package(
+ myroot, atom, onlydeps=onlydeps)
+ if not pkg:
+ pprovided_match = False
+ for virt_choice in virtuals.get(atom.cp, []):
+ expanded_atom = portage.dep.Atom(
+ atom.replace(atom.cp, virt_choice.cp, 1))
+ pprovided = pprovideddict.get(expanded_atom.cp)
+ if pprovided and \
+ portage.match_from_list(expanded_atom, pprovided):
+ # A provided package has been
+ # specified on the command line.
+ self._dynamic_config._pprovided_args.append((arg, atom))
+ pprovided_match = True
+ break
+ if pprovided_match:
+ continue
+
+ excluded = False
+ for any_match in self._iter_match_pkgs_any(
+ self._frozen_config.roots[myroot], atom):
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(
+ any_match, modified_use=self._pkg_use_enabled(any_match)):
+ excluded = True
+ break
+ if excluded:
+ continue
+
+ if not (isinstance(arg, SetArg) and \
+ arg.name in ("selected", "system", "world")):
+ self._dynamic_config._unsatisfied_deps_for_display.append(
+ ((myroot, atom), {"myparent" : arg}))
+ return 0, myfavorites
+
+ self._dynamic_config._missing_args.append((arg, atom))
+ continue
+ if atom.cp != pkg.cp:
+ # For old-style virtuals, we need to repeat the
+ # package.provided check against the selected package.
+ expanded_atom = atom.replace(atom.cp, pkg.cp)
+ pprovided = pprovideddict.get(pkg.cp)
+ if pprovided and \
+ portage.match_from_list(expanded_atom, pprovided):
+ # A provided package has been
+ # specified on the command line.
+ self._dynamic_config._pprovided_args.append((arg, atom))
+ continue
+ if pkg.installed and \
+ "selective" not in self._dynamic_config.myparams and \
+ not self._frozen_config.excluded_pkgs.findAtomForPackage(
+ pkg, modified_use=self._pkg_use_enabled(pkg)):
+ self._dynamic_config._unsatisfied_deps_for_display.append(
+ ((myroot, atom), {"myparent" : arg}))
+ # Previous behavior was to bail out in this case, but
+ # since the dep is satisfied by the installed package,
+ # it's more friendly to continue building the graph
+ # and just show a warning message. Therefore, only bail
+ # out here if the atom is not from either the system or
+ # world set.
+ if not (isinstance(arg, SetArg) and \
+ arg.name in ("selected", "system", "world")):
+ return 0, myfavorites
+
+ # Add the selected package to the graph as soon as possible
+ # so that later dep_check() calls can use it as feedback
+ # for making more consistent atom selections.
+ if not self._add_pkg(pkg, dep):
+ if self.need_restart():
+ pass
+ elif isinstance(arg, SetArg):
+ writemsg(("\n\n!!! Problem resolving " + \
+ "dependencies for %s from %s\n") % \
+ (atom, arg.arg), noiselevel=-1)
+ else:
+ writemsg(("\n\n!!! Problem resolving " + \
+ "dependencies for %s\n") % \
+ (atom,), noiselevel=-1)
+ return 0, myfavorites
+
+ except SystemExit as e:
+ raise # Needed else can't exit
+ except Exception as e:
+ writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
+ writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
+ raise
+
+ # Now that the root packages have been added to the graph,
+ # process the dependencies.
+ if not self._create_graph():
+ return 0, myfavorites
+
+ try:
+ self.altlist()
+ except self._unknown_internal_error:
+ return False, myfavorites
+
+ have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
+ if (have_slot_conflict and
+ not self._accept_blocker_conflicts()) or \
+ (self._dynamic_config._allow_backtracking and
+ "slot conflict" in self._dynamic_config._backtrack_infos):
+ return False, myfavorites
+
+ if self._rebuild.trigger_rebuilds():
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+ config["rebuild_list"] = self._rebuild.rebuild_list
+ config["reinstall_list"] = self._rebuild.reinstall_list
+ self._dynamic_config._need_restart = True
+ return False, myfavorites
+
+ if "config" in self._dynamic_config._backtrack_infos and \
+ ("slot_operator_mask_built" in self._dynamic_config._backtrack_infos["config"] or
+ "slot_operator_replace_installed" in self._dynamic_config._backtrack_infos["config"]) and \
+ self.need_restart():
+ return False, myfavorites
+
+ if not self._dynamic_config._prune_rebuilds and \
+ self._dynamic_config._slot_operator_replace_installed and \
+ self._get_missed_updates():
+ # When there are missed updates, we might have triggered
+ # some unnecessary rebuilds (see bug #439688). So, prune
+ # all the rebuilds and backtrack with the problematic
+ # updates masked. The next backtrack run should pull in
+ # any rebuilds that are really needed, and this
+ # prune_rebuilds path should never be entered more than
+ # once in a series of backtracking nodes (in order to
+ # avoid a backtracking loop).
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+ config["prune_rebuilds"] = True
+ self._dynamic_config._need_restart = True
+ return False, myfavorites
+
+ if self.need_restart():
+ # want_restart_for_use_change triggers this
+ return False, myfavorites
+
+ if "--fetchonly" not in self._frozen_config.myopts and \
+ "--buildpkgonly" in self._frozen_config.myopts:
+ graph_copy = self._dynamic_config.digraph.copy()
+ removed_nodes = set()
+ for node in graph_copy:
+ if not isinstance(node, Package) or \
+ node.operation == "nomerge":
+ removed_nodes.add(node)
+ graph_copy.difference_update(removed_nodes)
+ if not graph_copy.hasallzeros(ignore_priority = \
+ DepPrioritySatisfiedRange.ignore_medium):
+ self._dynamic_config._buildpkgonly_deps_unsatisfied = True
+ self._dynamic_config._skip_restart = True
+ return False, myfavorites
+
+ # Any failures except those due to autounmask *alone* should return
+ # before this point, since the success_without_autounmask flag that's
+ # set below is reserved for cases where there are *zero* other
+ # problems. For reference, see backtrack_depgraph, where it skips the
+ # get_best_run() call when success_without_autounmask is True.
+
+ digraph_nodes = self._dynamic_config.digraph.nodes
+
+ if any(x in digraph_nodes for x in
+ self._dynamic_config._needed_unstable_keywords) or \
+ any(x in digraph_nodes for x in
+ self._dynamic_config._needed_p_mask_changes) or \
+ any(x in digraph_nodes for x in
+ self._dynamic_config._needed_use_config_changes) or \
+ any(x in digraph_nodes for x in
+ self._dynamic_config._needed_license_changes) :
+ #We failed if the user needs to change the configuration
+ self._dynamic_config._success_without_autounmask = True
+ return False, myfavorites
+
+ # We're true here unless we are missing binaries.
+ return (True, myfavorites)
+
+ def _set_args(self, args):
+ """
+ Create the "__non_set_args__" package set from atoms and packages given as
+ arguments. This method can be called multiple times if necessary.
+ The package selection cache is automatically invalidated, since
+ arguments influence package selections.
+ """
+
+ set_atoms = {}
+ non_set_atoms = {}
+ for root in self._dynamic_config.sets:
+ depgraph_sets = self._dynamic_config.sets[root]
+ depgraph_sets.sets.setdefault('__non_set_args__',
+ InternalPackageSet(allow_repo=True)).clear()
+ depgraph_sets.atoms.clear()
+ depgraph_sets.atom_arg_map.clear()
+ set_atoms[root] = []
+ non_set_atoms[root] = []
+
+ # We don't add set args to the digraph here since that
+ # happens at a later stage and we don't want to make
+ # any state changes here that aren't reversed by a
+ # another call to this method.
+ for arg in self._expand_set_args(args, add_to_digraph=False):
+ atom_arg_map = self._dynamic_config.sets[
+ arg.root_config.root].atom_arg_map
+ if isinstance(arg, SetArg):
+ atom_group = set_atoms[arg.root_config.root]
+ else:
+ atom_group = non_set_atoms[arg.root_config.root]
+
+ for atom in arg.pset.getAtoms():
+ atom_group.append(atom)
+ atom_key = (atom, arg.root_config.root)
+ refs = atom_arg_map.get(atom_key)
+ if refs is None:
+ refs = []
+ atom_arg_map[atom_key] = refs
+ if arg not in refs:
+ refs.append(arg)
+
+ for root in self._dynamic_config.sets:
+ depgraph_sets = self._dynamic_config.sets[root]
+ depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
+ non_set_atoms.get(root, [])))
+ depgraph_sets.sets['__non_set_args__'].update(
+ non_set_atoms.get(root, []))
+
+ # Invalidate the package selection cache, since
+ # arguments influence package selections.
+ self._dynamic_config._highest_pkg_cache.clear()
+ for trees in self._dynamic_config._filtered_trees.values():
+ trees["porttree"].dbapi._clear_cache()
+
+ def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
+ """
+ Return a list of slot atoms corresponding to installed slots that
+ differ from the slot of the highest visible match. When
+ blocker_lookahead is True, slot atoms that would trigger a blocker
+ conflict are automatically discarded, potentially allowing automatic
+ uninstallation of older slots when appropriate.
+ """
+ highest_pkg, in_graph = self._select_package(root_config.root, atom)
+ if highest_pkg is None:
+ return []
+ vardb = root_config.trees["vartree"].dbapi
+ slots = set()
+ for cpv in vardb.match(atom):
+ # don't mix new virtuals with old virtuals
+ pkg = vardb._pkg_str(cpv, None)
+ if pkg.cp == highest_pkg.cp:
+ slots.add(pkg.slot)
+
+ slots.add(highest_pkg.slot)
+ if len(slots) == 1:
+ return []
+ greedy_pkgs = []
+ slots.remove(highest_pkg.slot)
+ while slots:
+ slot = slots.pop()
+ slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
+ pkg, in_graph = self._select_package(root_config.root, slot_atom)
+ if pkg is not None and \
+ pkg.cp == highest_pkg.cp and pkg < highest_pkg:
+ greedy_pkgs.append(pkg)
+ if not greedy_pkgs:
+ return []
+ if not blocker_lookahead:
+ return [pkg.slot_atom for pkg in greedy_pkgs]
+
+ blockers = {}
+ blocker_dep_keys = Package._dep_keys
+ for pkg in greedy_pkgs + [highest_pkg]:
+ dep_str = " ".join(pkg._metadata[k] for k in blocker_dep_keys)
+ try:
+ selected_atoms = self._select_atoms(
+ pkg.root, dep_str, self._pkg_use_enabled(pkg),
+ parent=pkg, strict=True)
+ except portage.exception.InvalidDependString:
+ continue
+ blocker_atoms = []
+ for atoms in selected_atoms.values():
+ blocker_atoms.extend(x for x in atoms if x.blocker)
+ blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
+
+ if highest_pkg not in blockers:
+ return []
+
+ # filter packages with invalid deps
+ greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
+
+ # filter packages that conflict with highest_pkg
+ greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
+ (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
+ blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
+
+ if not greedy_pkgs:
+ return []
+
+ # If two packages conflict, discard the lower version.
+ discard_pkgs = set()
+ greedy_pkgs.sort(reverse=True)
+ for i in range(len(greedy_pkgs) - 1):
+ pkg1 = greedy_pkgs[i]
+ if pkg1 in discard_pkgs:
+ continue
+ for j in range(i + 1, len(greedy_pkgs)):
+ pkg2 = greedy_pkgs[j]
+ if pkg2 in discard_pkgs:
+ continue
+ if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
+ blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
+ # pkg1 > pkg2
+ discard_pkgs.add(pkg2)
+
+ return [pkg.slot_atom for pkg in greedy_pkgs \
+ if pkg not in discard_pkgs]
+
+ def _select_atoms_from_graph(self, *pargs, **kwargs):
+ """
+ Prefer atoms matching packages that have already been
+ added to the graph or those that are installed and have
+ not been scheduled for replacement.
+ """
+ kwargs["trees"] = self._dynamic_config._graph_trees
+ return self._select_atoms_highest_available(*pargs,
+ **portage._native_kwargs(kwargs))
+
+ def _select_atoms_highest_available(self, root, depstring,
+ myuse=None, parent=None, strict=True, trees=None, priority=None):
+ """This will raise InvalidDependString if necessary. If trees is
+ None then self._dynamic_config._filtered_trees is used."""
+
+ if not isinstance(depstring, list):
+ eapi = None
+ is_valid_flag = None
+ if parent is not None:
+ eapi = parent.eapi
+ if not parent.installed:
+ is_valid_flag = parent.iuse.is_valid_flag
+ depstring = portage.dep.use_reduce(depstring,
+ uselist=myuse, opconvert=True, token_class=Atom,
+ is_valid_flag=is_valid_flag, eapi=eapi)
+
+ if (self._dynamic_config.myparams.get(
+ "ignore_built_slot_operator_deps", "n") == "y" and
+ parent and parent.built):
+ ignore_built_slot_operator_deps(depstring)
+
+ pkgsettings = self._frozen_config.pkgsettings[root]
+ if trees is None:
+ trees = self._dynamic_config._filtered_trees
+ mytrees = trees[root]
+ atom_graph = digraph()
+ if True:
+ # Temporarily disable autounmask so that || preferences
+ # account for masking and USE settings.
+ _autounmask_backup = self._dynamic_config._autounmask
+ self._dynamic_config._autounmask = False
+ # backup state for restoration, in case of recursive
+ # calls to this method
+ backup_state = mytrees.copy()
+ try:
+ # clear state from previous call, in case this
+ # call is recursive (we have a backup, that we
+ # will use to restore it later)
+ mytrees.pop("pkg_use_enabled", None)
+ mytrees.pop("parent", None)
+ mytrees.pop("atom_graph", None)
+ mytrees.pop("priority", None)
+
+ mytrees["pkg_use_enabled"] = self._pkg_use_enabled
+ if parent is not None:
+ mytrees["parent"] = parent
+ mytrees["atom_graph"] = atom_graph
+ if priority is not None:
+ mytrees["priority"] = priority
+
+ mycheck = portage.dep_check(depstring, None,
+ pkgsettings, myuse=myuse,
+ myroot=root, trees=trees)
+ finally:
+ # restore state
+ self._dynamic_config._autounmask = _autounmask_backup
+ mytrees.pop("pkg_use_enabled", None)
+ mytrees.pop("parent", None)
+ mytrees.pop("atom_graph", None)
+ mytrees.pop("priority", None)
+ mytrees.update(backup_state)
+ if not mycheck[0]:
+ raise portage.exception.InvalidDependString(mycheck[1])
+ if parent is None:
+ selected_atoms = mycheck[1]
+ elif parent not in atom_graph:
+ selected_atoms = {parent : mycheck[1]}
+ else:
+ # Recursively traversed virtual dependencies, and their
+ # direct dependencies, are considered to have the same
+ # depth as direct dependencies.
+ if isinstance(parent.depth, int):
+ virt_depth = parent.depth + 1
+ else:
+ # The depth may be None when called via
+ # _select_atoms_probe, or it may be
+ # _UNREACHABLE_DEPTH for complete mode.
+ virt_depth = parent.depth
+
+ chosen_atom_ids = frozenset(id(atom) for atom in mycheck[1])
+ selected_atoms = OrderedDict()
+ node_stack = [(parent, None, None)]
+ traversed_nodes = set()
+ while node_stack:
+ node, node_parent, parent_atom = node_stack.pop()
+ traversed_nodes.add(node)
+ if node is parent:
+ k = parent
+ else:
+ if node_parent is parent:
+ if priority is None:
+ node_priority = None
+ else:
+ node_priority = priority.copy()
+ else:
+ # virtuals only have runtime deps
+ node_priority = self._priority(runtime=True)
+
+ k = Dependency(atom=parent_atom,
+ blocker=parent_atom.blocker, child=node,
+ depth=virt_depth, parent=node_parent,
+ priority=node_priority, root=node.root)
+
+ child_atoms = []
+ selected_atoms[k] = child_atoms
+ for atom_node in atom_graph.child_nodes(node):
+ child_atom = atom_node[0]
+ if id(child_atom) not in chosen_atom_ids:
+ continue
+ child_atoms.append(child_atom)
+ for child_node in atom_graph.child_nodes(atom_node):
+ if child_node in traversed_nodes:
+ continue
+ if not portage.match_from_list(
+ child_atom, [child_node]):
+ # Typically this means that the atom
+ # specifies USE deps that are unsatisfied
+ # by the selected package. The caller will
+ # record this as an unsatisfied dependency
+ # when necessary.
+ continue
+ node_stack.append((child_node, node, child_atom))
+
+ return selected_atoms
+
+ def _expand_virt_from_graph(self, root, atom):
+ if not isinstance(atom, Atom):
+ atom = Atom(atom)
+
+ if not atom.cp.startswith("virtual/"):
+ yield atom
+ return
+
+ any_match = False
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ try:
+ rdepend = self._select_atoms_from_graph(
+ pkg.root, pkg._metadata.get("RDEPEND", ""),
+ myuse=self._pkg_use_enabled(pkg),
+ parent=pkg, strict=False)
+ except InvalidDependString as e:
+ writemsg_level("!!! Invalid RDEPEND in " + \
+ "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
+ (pkg.root, pkg.cpv, e),
+ noiselevel=-1, level=logging.ERROR)
+ continue
+
+ for atoms in rdepend.values():
+ for atom in atoms:
+ if hasattr(atom, "_orig_atom"):
+ # Ignore virtual atoms since we're only
+ # interested in expanding the real atoms.
+ continue
+ yield atom
+
+ any_match = True
+
+ if not any_match:
+ yield atom
+
+ def _virt_deps_visible(self, pkg, ignore_use=False):
+ """
+ Assumes pkg is a virtual package. Traverses virtual deps recursively
+ and returns True if all deps are visible, False otherwise. This is
+ useful for checking if it will be necessary to expand virtual slots,
+ for cases like bug #382557.
+ """
+ try:
+ rdepend = self._select_atoms(
+ pkg.root, pkg._metadata.get("RDEPEND", ""),
+ myuse=self._pkg_use_enabled(pkg),
+ parent=pkg, priority=self._priority(runtime=True))
+ except InvalidDependString as e:
+ if not pkg.installed:
+ raise
+ writemsg_level("!!! Invalid RDEPEND in " + \
+ "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
+ (pkg.root, pkg.cpv, e),
+ noiselevel=-1, level=logging.ERROR)
+ return False
+
+ for atoms in rdepend.values():
+ for atom in atoms:
+ if ignore_use:
+ atom = atom.without_use
+ pkg, existing = self._select_package(
+ pkg.root, atom)
+ if pkg is None or not self._pkg_visibility_check(pkg):
+ return False
+
+ return True
+
+ def _get_dep_chain(self, start_node, target_atom=None,
+ unsatisfied_dependency=False):
+ """
+ Returns a list of (atom, node_type) pairs that represent a dep chain.
+ If target_atom is None, the first package shown is pkg's parent.
+ If target_atom is not None the first package shown is pkg.
+ If unsatisfied_dependency is True, the first parent is select who's
+ dependency is not satisfied by 'pkg'. This is need for USE changes.
+ (Does not support target_atom.)
+ """
+ traversed_nodes = set()
+ dep_chain = []
+ node = start_node
+ child = None
+ all_parents = self._dynamic_config._parent_atoms
+ graph = self._dynamic_config.digraph
+ verbose_main_repo_display = "--verbose-main-repo-display" in \
+ self._frozen_config.myopts
+
+ def format_pkg(pkg):
+ pkg_name = "%s" % (pkg.cpv,)
+ if verbose_main_repo_display or pkg.repo != \
+ pkg.root_config.settings.repositories.mainRepo().name:
+ pkg_name += _repo_separator + pkg.repo
+ return pkg_name
+
+ if target_atom is not None and isinstance(node, Package):
+ affecting_use = set()
+ for dep_str in Package._dep_keys:
+ try:
+ affecting_use.update(extract_affecting_use(
+ node._metadata[dep_str], target_atom,
+ eapi=node.eapi))
+ except InvalidDependString:
+ if not node.installed:
+ raise
+ affecting_use.difference_update(node.use.mask, node.use.force)
+ pkg_name = format_pkg(node)
+
+ if affecting_use:
+ usedep = []
+ for flag in affecting_use:
+ if flag in self._pkg_use_enabled(node):
+ usedep.append(flag)
+ else:
+ usedep.append("-"+flag)
+ pkg_name += "[%s]" % ",".join(usedep)
+
+ dep_chain.append((pkg_name, node.type_name))
+
+
+ # To build a dep chain for the given package we take
+ # "random" parents form the digraph, except for the
+ # first package, because we want a parent that forced
+ # the corresponding change (i.e '>=foo-2', instead 'foo').
+
+ traversed_nodes.add(start_node)
+
+ start_node_parent_atoms = {}
+ for ppkg, patom in all_parents.get(node, []):
+ # Get a list of suitable atoms. For use deps
+ # (aka unsatisfied_dependency is not None) we
+ # need that the start_node doesn't match the atom.
+ if not unsatisfied_dependency or \
+ not InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(start_node):
+ start_node_parent_atoms.setdefault(patom, []).append(ppkg)
+
+ if start_node_parent_atoms:
+ # If there are parents in all_parents then use one of them.
+ # If not, then this package got pulled in by an Arg and
+ # will be correctly handled by the code that handles later
+ # packages in the dep chain.
+ best_match = best_match_to_list(node.cpv, start_node_parent_atoms)
+
+ child = node
+ for ppkg in start_node_parent_atoms[best_match]:
+ node = ppkg
+ if ppkg in self._dynamic_config._initial_arg_list:
+ # Stop if reached the top level of the dep chain.
+ break
+
+ while node is not None:
+ traversed_nodes.add(node)
+
+ if node not in graph:
+ # The parent is not in the graph due to backtracking.
+ break
+
+ elif isinstance(node, DependencyArg):
+ if graph.parent_nodes(node):
+ node_type = "set"
+ else:
+ node_type = "argument"
+ dep_chain.append(("%s" % (node,), node_type))
+
+ elif node is not start_node:
+ for ppkg, patom in all_parents[child]:
+ if ppkg == node:
+ if child is start_node and unsatisfied_dependency and \
+ InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(child):
+ # This atom is satisfied by child, there must be another atom.
+ continue
+ atom = patom.unevaluated_atom
+ break
+
+ dep_strings = set()
+ priorities = graph.nodes[node][0].get(child)
+ if priorities is None:
+ # This edge comes from _parent_atoms and was not added to
+ # the graph, and _parent_atoms does not contain priorities.
+ for k in Package._dep_keys:
+ dep_strings.add(node._metadata[k])
+ else:
+ for priority in priorities:
+ if priority.buildtime:
+ for k in Package._buildtime_keys:
+ dep_strings.add(node._metadata[k])
+ if priority.runtime:
+ dep_strings.add(node._metadata["RDEPEND"])
+ if priority.runtime_post:
+ dep_strings.add(node._metadata["PDEPEND"])
+
+ affecting_use = set()
+ for dep_str in dep_strings:
+ try:
+ affecting_use.update(extract_affecting_use(
+ dep_str, atom, eapi=node.eapi))
+ except InvalidDependString:
+ if not node.installed:
+ raise
+
+ #Don't show flags as 'affecting' if the user can't change them,
+ affecting_use.difference_update(node.use.mask, \
+ node.use.force)
+
+ pkg_name = format_pkg(node)
+ if affecting_use:
+ usedep = []
+ for flag in affecting_use:
+ if flag in self._pkg_use_enabled(node):
+ usedep.append(flag)
+ else:
+ usedep.append("-"+flag)
+ pkg_name += "[%s]" % ",".join(usedep)
+
+ dep_chain.append((pkg_name, node.type_name))
+
+ # When traversing to parents, prefer arguments over packages
+ # since arguments are root nodes. Never traverse the same
+ # package twice, in order to prevent an infinite loop.
+ child = node
+ selected_parent = None
+ parent_arg = None
+ parent_merge = None
+ parent_unsatisfied = None
+
+ for parent in self._dynamic_config.digraph.parent_nodes(node):
+ if parent in traversed_nodes:
+ continue
+ if isinstance(parent, DependencyArg):
+ parent_arg = parent
+ else:
+ if isinstance(parent, Package) and \
+ parent.operation == "merge":
+ parent_merge = parent
+ if unsatisfied_dependency and node is start_node:
+ # Make sure that pkg doesn't satisfy parent's dependency.
+ # This ensures that we select the correct parent for use
+ # flag changes.
+ for ppkg, atom in all_parents[start_node]:
+ if parent is ppkg:
+ atom_set = InternalPackageSet(initial_atoms=(atom,))
+ if not atom_set.findAtomForPackage(start_node):
+ parent_unsatisfied = parent
+ break
+ else:
+ selected_parent = parent
+
+ if parent_unsatisfied is not None:
+ selected_parent = parent_unsatisfied
+ elif parent_merge is not None:
+ # Prefer parent in the merge list (bug #354747).
+ selected_parent = parent_merge
+ elif parent_arg is not None:
+ if self._dynamic_config.digraph.parent_nodes(parent_arg):
+ selected_parent = parent_arg
+ else:
+ dep_chain.append(("%s" % (parent_arg,), "argument"))
+ selected_parent = None
+
+ node = selected_parent
+ return dep_chain
+
+ def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
+ dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
+ display_list = []
+ for node, node_type in dep_chain:
+ if node_type == "argument":
+ display_list.append("required by %s (argument)" % node)
+ else:
+ display_list.append("required by %s" % node)
+
+ msg = "# " + "\n# ".join(display_list) + "\n"
+ return msg
+
+
+ def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
+ check_backtrack=False, check_autounmask_breakage=False, show_req_use=None):
+ """
+ When check_backtrack=True, no output is produced and
+ the method either returns or raises _backtrack_mask if
+ a matching package has been masked by backtracking.
+ """
+ backtrack_mask = False
+ autounmask_broke_use_dep = False
+ atom_set = InternalPackageSet(initial_atoms=(atom.without_use,),
+ allow_repo=True)
+ atom_set_with_use = InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True)
+ xinfo = '"%s"' % atom.unevaluated_atom
+ if arg:
+ xinfo='"%s"' % arg
+ if isinstance(myparent, AtomArg):
+ xinfo = '"%s"' % (myparent,)
+ # Discard null/ from failed cpv_expand category expansion.
+ xinfo = xinfo.replace("null/", "")
+ if root != self._frozen_config._running_root.root:
+ xinfo = "%s for %s" % (xinfo, root)
+ masked_packages = []
+ missing_use = []
+ missing_use_adjustable = set()
+ required_use_unsatisfied = []
+ masked_pkg_instances = set()
+ have_eapi_mask = False
+ pkgsettings = self._frozen_config.pkgsettings[root]
+ root_config = self._frozen_config.roots[root]
+ portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
+ vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
+ bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
+ dbs = self._dynamic_config._filtered_trees[root]["dbs"]
+ for db, pkg_type, built, installed, db_keys in dbs:
+ if installed:
+ continue
+ if hasattr(db, "xmatch"):
+ cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
+ else:
+ cpv_list = db.match(atom.without_use)
+
+ if atom.repo is None and hasattr(db, "getRepositories"):
+ repo_list = db.getRepositories()
+ else:
+ repo_list = [atom.repo]
+
+ # descending order
+ cpv_list.reverse()
+ for cpv in cpv_list:
+ for repo in repo_list:
+ if not db.cpv_exists(cpv, myrepo=repo):
+ continue
+
+ metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
+ built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
+ if metadata is not None and \
+ portage.eapi_is_supported(metadata["EAPI"]):
+ if not repo:
+ repo = metadata.get('repository')
+ pkg = self._pkg(cpv, pkg_type, root_config,
+ installed=installed, myrepo=repo)
+ # pkg._metadata contains calculated USE for ebuilds,
+ # required later for getMissingLicenses.
+ metadata = pkg._metadata
+ if pkg.invalid:
+ # Avoid doing any operations with packages that
+ # have invalid metadata. It would be unsafe at
+ # least because it could trigger unhandled
+ # exceptions in places like check_required_use().
+ masked_packages.append(
+ (root_config, pkgsettings, cpv, repo, metadata, mreasons))
+ continue
+ if not atom_set.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
+ if pkg in self._dynamic_config._runtime_pkg_mask:
+ backtrack_reasons = \
+ self._dynamic_config._runtime_pkg_mask[pkg]
+ mreasons.append('backtracking: %s' % \
+ ', '.join(sorted(backtrack_reasons)))
+ backtrack_mask = True
+ if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)):
+ mreasons = ["exclude option"]
+ if mreasons:
+ masked_pkg_instances.add(pkg)
+ if atom.unevaluated_atom.use:
+ try:
+ if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
+ or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
+ missing_use.append(pkg)
+ if atom_set_with_use.findAtomForPackage(pkg):
+ autounmask_broke_use_dep = True
+ if not mreasons:
+ continue
+ except InvalidAtom:
+ writemsg("violated_conditionals raised " + \
+ "InvalidAtom: '%s' parent: %s" % \
+ (atom, myparent), noiselevel=-1)
+ raise
+ if not mreasons and \
+ not pkg.built and \
+ pkg._metadata.get("REQUIRED_USE") and \
+ eapi_has_required_use(pkg.eapi):
+ if not check_required_use(
+ pkg._metadata["REQUIRED_USE"],
+ self._pkg_use_enabled(pkg),
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.eapi):
+ required_use_unsatisfied.append(pkg)
+ continue
+ root_slot = (pkg.root, pkg.slot_atom)
+ if pkg.built and root_slot in self._rebuild.rebuild_list:
+ mreasons = ["need to rebuild from source"]
+ elif pkg.installed and root_slot in self._rebuild.reinstall_list:
+ mreasons = ["need to rebuild from source"]
+ elif pkg.built and not mreasons:
+ mreasons = ["use flag configuration mismatch"]
+ masked_packages.append(
+ (root_config, pkgsettings, cpv, repo, metadata, mreasons))
+
+ if check_backtrack:
+ if backtrack_mask:
+ raise self._backtrack_mask()
+ else:
+ return
+
+ if check_autounmask_breakage:
+ if autounmask_broke_use_dep:
+ raise self._autounmask_breakage()
+ else:
+ return
+
+ missing_use_reasons = []
+ missing_iuse_reasons = []
+ for pkg in missing_use:
+ use = self._pkg_use_enabled(pkg)
+ missing_iuse = []
+ #Use the unevaluated atom here, because some flags might have gone
+ #lost during evaluation.
+ required_flags = atom.unevaluated_atom.use.required
+ missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
+
+ mreasons = []
+ if missing_iuse:
+ mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
+ missing_iuse_reasons.append((pkg, mreasons))
+ else:
+ need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
+ need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
+
+ untouchable_flags = \
+ frozenset(chain(pkg.use.mask, pkg.use.force))
+ if any(x in untouchable_flags for x in
+ chain(need_enable, need_disable)):
+ continue
+
+ missing_use_adjustable.add(pkg)
+ required_use = pkg._metadata.get("REQUIRED_USE")
+ required_use_warning = ""
+ if required_use:
+ old_use = self._pkg_use_enabled(pkg)
+ new_use = set(self._pkg_use_enabled(pkg))
+ for flag in need_enable:
+ new_use.add(flag)
+ for flag in need_disable:
+ new_use.discard(flag)
+ if check_required_use(required_use, old_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi) \
+ and not check_required_use(required_use, new_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi):
+ required_use_warning = ", this change violates use flag constraints " + \
+ "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
+
+ if need_enable or need_disable:
+ changes = []
+ changes.extend(colorize("red", "+" + x) \
+ for x in need_enable)
+ changes.extend(colorize("blue", "-" + x) \
+ for x in need_disable)
+ mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
+ missing_use_reasons.append((pkg, mreasons))
+
+ if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
+ # Lets see if the violated use deps are conditional.
+ # If so, suggest to change them on the parent.
+
+ # If the child package is masked then a change to
+ # parent USE is not a valid solution (a normal mask
+ # message should be displayed instead).
+ if pkg in masked_pkg_instances:
+ continue
+
+ mreasons = []
+ violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
+ pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
+ if not (violated_atom.use.enabled or violated_atom.use.disabled):
+ #all violated use deps are conditional
+ changes = []
+ conditional = violated_atom.use.conditional
+ involved_flags = set(chain(conditional.equal, conditional.not_equal, \
+ conditional.enabled, conditional.disabled))
+
+ untouchable_flags = \
+ frozenset(chain(myparent.use.mask, myparent.use.force))
+ if any(x in untouchable_flags for x in involved_flags):
+ continue
+
+ required_use = myparent._metadata.get("REQUIRED_USE")
+ required_use_warning = ""
+ if required_use:
+ old_use = self._pkg_use_enabled(myparent)
+ new_use = set(self._pkg_use_enabled(myparent))
+ for flag in involved_flags:
+ if flag in old_use:
+ new_use.discard(flag)
+ else:
+ new_use.add(flag)
+ if check_required_use(required_use, old_use,
+ myparent.iuse.is_valid_flag,
+ eapi=myparent.eapi) and \
+ not check_required_use(required_use, new_use,
+ myparent.iuse.is_valid_flag,
+ eapi=myparent.eapi):
+ required_use_warning = ", this change violates use flag constraints " + \
+ "defined by %s: '%s'" % (myparent.cpv, \
+ human_readable_required_use(required_use))
+
+ for flag in involved_flags:
+ if flag in self._pkg_use_enabled(myparent):
+ changes.append(colorize("blue", "-" + flag))
+ else:
+ changes.append(colorize("red", "+" + flag))
+ mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
+ if (myparent, mreasons) not in missing_use_reasons:
+ missing_use_reasons.append((myparent, mreasons))
+
+ unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
+ in missing_use_reasons if pkg not in masked_pkg_instances]
+
+ unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
+ in missing_iuse_reasons if pkg not in masked_pkg_instances]
+
+ show_missing_use = False
+ if unmasked_use_reasons:
+ # Only show the latest version.
+ show_missing_use = []
+ pkg_reason = None
+ parent_reason = None
+ for pkg, mreasons in unmasked_use_reasons:
+ if pkg is myparent:
+ if parent_reason is None:
+ #This happens if a use change on the parent
+ #leads to a satisfied conditional use dep.
+ parent_reason = (pkg, mreasons)
+ elif pkg_reason is None:
+ #Don't rely on the first pkg in unmasked_use_reasons,
+ #being the highest version of the dependency.
+ pkg_reason = (pkg, mreasons)
+ if pkg_reason:
+ show_missing_use.append(pkg_reason)
+ if parent_reason:
+ show_missing_use.append(parent_reason)
+
+ elif unmasked_iuse_reasons:
+ masked_with_iuse = False
+ for pkg in masked_pkg_instances:
+ #Use atom.unevaluated here, because some flags might have gone
+ #lost during evaluation.
+ if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
+ # Package(s) with required IUSE are masked,
+ # so display a normal masking message.
+ masked_with_iuse = True
+ break
+ if not masked_with_iuse:
+ show_missing_use = unmasked_iuse_reasons
+
+ if required_use_unsatisfied:
+ # If there's a higher unmasked version in missing_use_adjustable
+ # then we want to show that instead.
+ for pkg in missing_use_adjustable:
+ if pkg not in masked_pkg_instances and \
+ pkg > required_use_unsatisfied[0]:
+ required_use_unsatisfied = False
+ break
+
+ mask_docs = False
+
+ if show_req_use is None and required_use_unsatisfied:
+ # We have an unmasked package that only requires USE adjustment
+ # in order to satisfy REQUIRED_USE, and nothing more. We assume
+ # that the user wants the latest version, so only the first
+ # instance is displayed.
+ show_req_use = required_use_unsatisfied[0]
+
+ if show_req_use is not None:
+
+ pkg = show_req_use
+ output_cpv = pkg.cpv + _repo_separator + pkg.repo
+ writemsg("\n!!! " + \
+ colorize("BAD", "The ebuild selected to satisfy ") + \
+ colorize("INFORM", xinfo) + \
+ colorize("BAD", " has unmet requirements.") + "\n",
+ noiselevel=-1)
+ use_display = pkg_use_display(pkg, self._frozen_config.myopts)
+ writemsg("- %s %s\n" % (output_cpv, use_display),
+ noiselevel=-1)
+ writemsg("\n The following REQUIRED_USE flag constraints " + \
+ "are unsatisfied:\n", noiselevel=-1)
+ reduced_noise = check_required_use(
+ pkg._metadata["REQUIRED_USE"],
+ self._pkg_use_enabled(pkg),
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.eapi).tounicode()
+ writemsg(" %s\n" % \
+ human_readable_required_use(reduced_noise),
+ noiselevel=-1)
+ normalized_required_use = \
+ " ".join(pkg._metadata["REQUIRED_USE"].split())
+ if reduced_noise != normalized_required_use:
+ writemsg("\n The above constraints " + \
+ "are a subset of the following complete expression:\n",
+ noiselevel=-1)
+ writemsg(" %s\n" % \
+ human_readable_required_use(normalized_required_use),
+ noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+ elif show_missing_use:
+ writemsg("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
+ writemsg("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
+ for pkg, mreasons in show_missing_use:
+ writemsg("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
+
+ elif masked_packages:
+ writemsg("\n!!! " + \
+ colorize("BAD", "All ebuilds that could satisfy ") + \
+ colorize("INFORM", xinfo) + \
+ colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
+ writemsg("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
+ have_eapi_mask = show_masked_packages(masked_packages)
+ if have_eapi_mask:
+ writemsg("\n", noiselevel=-1)
+ msg = ("The current version of portage supports " + \
+ "EAPI '%s'. You must upgrade to a newer version" + \
+ " of portage before EAPI masked packages can" + \
+ " be installed.") % portage.const.EAPI
+ writemsg("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+ mask_docs = True
+ else:
+ cp_exists = False
+ if not atom.cp.startswith("null/"):
+ for pkg in self._iter_match_pkgs_any(
+ root_config, Atom(atom.cp)):
+ cp_exists = True
+ break
+
+ writemsg("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
+ if isinstance(myparent, AtomArg) and \
+ not cp_exists and \
+ self._frozen_config.myopts.get(
+ "--misspell-suggestions", "y") != "n":
+
+ writemsg("\nemerge: searching for similar names..."
+ , noiselevel=-1)
+
+ dbs = [vardb]
+ if "--usepkgonly" not in self._frozen_config.myopts:
+ dbs.append(portdb)
+ if "--usepkg" in self._frozen_config.myopts:
+ dbs.append(bindb)
+
+ matches = similar_name_search(dbs, atom)
+
+ if len(matches) == 1:
+ writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
+ , noiselevel=-1)
+ elif len(matches) > 1:
+ writemsg(
+ "\nemerge: Maybe you meant any of these: %s?\n" % \
+ (", ".join(matches),), noiselevel=-1)
+ else:
+ # Generally, this would only happen if
+ # all dbapis are empty.
+ writemsg(" nothing similar found.\n"
+ , noiselevel=-1)
+ msg = []
+ if not isinstance(myparent, AtomArg):
+ # It's redundant to show parent for AtomArg since
+ # it's the same as 'xinfo' displayed above.
+ dep_chain = self._get_dep_chain(myparent, atom)
+ for node, node_type in dep_chain:
+ msg.append('(dependency required by "%s" [%s])' % \
+ (colorize('INFORM', "%s" % (node)), node_type))
+
+ if msg:
+ writemsg("\n".join(msg), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+ if mask_docs:
+ show_mask_docs()
+ writemsg("\n", noiselevel=-1)
+
+ def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
+ for db, pkg_type, built, installed, db_keys in \
+ self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
+ for pkg in self._iter_match_pkgs(root_config,
+ pkg_type, atom, onlydeps=onlydeps):
+ yield pkg
+
+ def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
+ """
+ Iterate over Package instances of pkg_type matching the given atom.
+ This does not check visibility and it also does not match USE for
+ unbuilt ebuilds since USE are lazily calculated after visibility
+ checks (to avoid the expense when possible).
+ """
+
+ db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
+ atom_exp = dep_expand(atom, mydb=db, settings=root_config.settings)
+ cp_list = db.cp_list(atom_exp.cp)
+ matched_something = False
+ installed = pkg_type == 'installed'
+
+ if cp_list:
+ atom_set = InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True)
+ if atom.repo is None and hasattr(db, "getRepositories"):
+ repo_list = db.getRepositories()
+ else:
+ repo_list = [atom.repo]
+
+ # descending order
+ cp_list.reverse()
+ for cpv in cp_list:
+ # Call match_from_list on one cpv at a time, in order
+ # to avoid unnecessary match_from_list comparisons on
+ # versions that are never yielded from this method.
+ if not match_from_list(atom_exp, [cpv]):
+ continue
+ for repo in repo_list:
+
+ try:
+ pkg = self._pkg(cpv, pkg_type, root_config,
+ installed=installed, onlydeps=onlydeps, myrepo=repo)
+ except portage.exception.PackageNotFound:
+ pass
+ else:
+ # A cpv can be returned from dbapi.match() as an
+ # old-style virtual match even in cases when the
+ # package does not actually PROVIDE the virtual.
+ # Filter out any such false matches here.
+
+ # Make sure that cpv from the current repo satisfies the atom.
+ # This might not be the case if there are several repos with
+ # the same cpv, but different metadata keys, like SLOT.
+ # Also, parts of the match that require metadata access
+ # are deferred until we have cached the metadata in a
+ # Package instance.
+ if not atom_set.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
+ matched_something = True
+ yield pkg
+
+ # USE=multislot can make an installed package appear as if
+ # it doesn't satisfy a slot dependency. Rebuilding the ebuild
+ # won't do any good as long as USE=multislot is enabled since
+ # the newly built package still won't have the expected slot.
+ # Therefore, assume that such SLOT dependencies are already
+ # satisfied rather than forcing a rebuild.
+ if not matched_something and installed and \
+ atom.slot is not None and not atom.slot_operator_built:
+
+ if "remove" in self._dynamic_config.myparams:
+ # We need to search the portdbapi, which is not in our
+ # normal dbs list, in order to find the real SLOT.
+ portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
+ db_keys = list(portdb._aux_cache_keys)
+ dbs = [(portdb, "ebuild", False, False, db_keys)]
+ else:
+ dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
+
+ cp_list = db.cp_list(atom_exp.cp)
+ if cp_list:
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom.without_slot,), allow_repo=True)
+ atom_exp_without_slot = atom_exp.without_slot
+ cp_list.reverse()
+ for cpv in cp_list:
+ if not match_from_list(atom_exp_without_slot, [cpv]):
+ continue
+ slot_available = False
+ for other_db, other_type, other_built, \
+ other_installed, other_keys in dbs:
+ try:
+ if portage.dep._match_slot(atom,
+ other_db._pkg_str(_unicode(cpv), None)):
+ slot_available = True
+ break
+ except (KeyError, InvalidData):
+ pass
+ if not slot_available:
+ continue
+ inst_pkg = self._pkg(cpv, "installed",
+ root_config, installed=installed, myrepo=atom.repo)
+ # Remove the slot from the atom and verify that
+ # the package matches the resulting atom.
+ if atom_set.findAtomForPackage(inst_pkg):
+ yield inst_pkg
+ return
+
+ def _select_pkg_highest_available(self, root, atom, onlydeps=False, parent=None):
+ cache_key = (root, atom, atom.unevaluated_atom, onlydeps, self._dynamic_config._autounmask)
+ ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
+ if ret is not None:
+ return ret
+ ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps, parent=parent)
+ self._dynamic_config._highest_pkg_cache[cache_key] = ret
+ pkg, existing = ret
+ if pkg is not None:
+ if self._pkg_visibility_check(pkg) and \
+ not (pkg.installed and pkg.masks):
+ self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
+ return ret
+
+ def _want_installed_pkg(self, pkg):
+ """
+ Given an installed package returned from select_pkg, return
+ True if the user has not explicitly requested for this package
+ to be replaced (typically via an atom on the command line).
+ """
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ return True
+
+ arg = False
+ try:
+ for arg, atom in self._iter_atoms_for_pkg(pkg):
+ if arg.force_reinstall:
+ return False
+ except InvalidDependString:
+ pass
+
+ if "selective" in self._dynamic_config.myparams:
+ return True
+
+ return not arg
+
+ def _want_update_pkg(self, parent, pkg):
+
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ return False
+
+ arg_atoms = None
+ try:
+ arg_atoms = list(self._iter_atoms_for_pkg(pkg))
+ except InvalidDependString:
+ if not pkg.installed:
+ # should have been masked before it was selected
+ raise
+
+ depth = parent.depth or 0
+ if isinstance(depth, int):
+ depth += 1
+
+ if arg_atoms:
+ for arg, atom in arg_atoms:
+ if arg.reset_depth:
+ depth = 0
+ break
+
+ deep = self._dynamic_config.myparams.get("deep", 0)
+ update = "--update" in self._frozen_config.myopts
+
+ return (not self._dynamic_config._complete_mode and
+ (arg_atoms or update) and
+ not (deep is not True and depth > deep))
+
+ def _equiv_ebuild_visible(self, pkg, autounmask_level=None):
+ try:
+ pkg_eb = self._pkg(
+ pkg.cpv, "ebuild", pkg.root_config, myrepo=pkg.repo)
+ except portage.exception.PackageNotFound:
+ pkg_eb_visible = False
+ for pkg_eb in self._iter_match_pkgs(pkg.root_config,
+ "ebuild", Atom("=%s" % (pkg.cpv,))):
+ if self._pkg_visibility_check(pkg_eb, autounmask_level):
+ pkg_eb_visible = True
+ break
+ if not pkg_eb_visible:
+ return False
+ else:
+ if not self._pkg_visibility_check(pkg_eb, autounmask_level):
+ return False
+
+ return True
+
+ def _equiv_binary_installed(self, pkg):
+ build_time = pkg.build_time
+ if not build_time:
+ return False
+
+ try:
+ inst_pkg = self._pkg(pkg.cpv, "installed",
+ pkg.root_config, installed=True)
+ except PackageNotFound:
+ return False
+
+ return build_time == inst_pkg.build_time
+
+ class _AutounmaskLevel(object):
+ __slots__ = ("allow_use_changes", "allow_unstable_keywords", "allow_license_changes", \
+ "allow_missing_keywords", "allow_unmasks")
+
+ def __init__(self):
+ self.allow_use_changes = False
+ self.allow_license_changes = False
+ self.allow_unstable_keywords = False
+ self.allow_missing_keywords = False
+ self.allow_unmasks = False
+
+ def _autounmask_levels(self):
+ """
+ Iterate over the different allowed things to unmask.
+
+ 0. USE
+ 1. USE + license
+ 2. USE + ~arch + license
+ 3. USE + ~arch + license + missing keywords
+ 4. USE + license + masks
+ 5. USE + ~arch + license + masks
+ 6. USE + ~arch + license + missing keywords + masks
+
+ Some thoughts:
+ * Do least invasive changes first.
+ * Try unmasking alone before unmasking + missing keywords
+ to avoid -9999 versions if possible
+ """
+
+ if self._dynamic_config._autounmask is not True:
+ return
+
+ autounmask_keep_masks = self._frozen_config.myopts.get("--autounmask-keep-masks", "n") != "n"
+ autounmask_level = self._AutounmaskLevel()
+
+ autounmask_level.allow_use_changes = True
+ yield autounmask_level
+
+ autounmask_level.allow_license_changes = True
+ yield autounmask_level
+
+ autounmask_level.allow_unstable_keywords = True
+ yield autounmask_level
+
+ if not autounmask_keep_masks:
+
+ autounmask_level.allow_missing_keywords = True
+ yield autounmask_level
+
+ # 4. USE + license + masks
+ # Try to respect keywords while discarding
+ # package.mask (see bug #463394).
+ autounmask_level.allow_unstable_keywords = False
+ autounmask_level.allow_missing_keywords = False
+ autounmask_level.allow_unmasks = True
+ yield autounmask_level
+
+ autounmask_level.allow_unstable_keywords = True
+
+ for missing_keyword, unmask in ((False, True), (True, True)):
+
+ autounmask_level.allow_missing_keywords = missing_keyword
+ autounmask_level.allow_unmasks = unmask
+
+ yield autounmask_level
+
+
+ def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False, parent=None):
+ pkg, existing = self._wrapped_select_pkg_highest_available_imp(
+ root, atom, onlydeps=onlydeps, parent=parent)
+
+ default_selection = (pkg, existing)
+
+ if self._dynamic_config._autounmask is True:
+ if pkg is not None and \
+ pkg.installed and \
+ not self._want_installed_pkg(pkg):
+ pkg = None
+
+ # Temporarily reset _need_restart state, in order to
+ # avoid interference as reported in bug #459832.
+ earlier_need_restart = self._dynamic_config._need_restart
+ self._dynamic_config._need_restart = False
+ try:
+ for autounmask_level in self._autounmask_levels():
+ if pkg is not None:
+ break
+
+ pkg, existing = \
+ self._wrapped_select_pkg_highest_available_imp(
+ root, atom, onlydeps=onlydeps,
+ autounmask_level=autounmask_level, parent=parent)
+
+ if pkg is not None and \
+ pkg.installed and \
+ not self._want_installed_pkg(pkg):
+ pkg = None
+
+ if self._dynamic_config._need_restart:
+ return None, None
+ finally:
+ if earlier_need_restart:
+ self._dynamic_config._need_restart = True
+
+ if pkg is None:
+ # This ensures that we can fall back to an installed package
+ # that may have been rejected in the autounmask path above.
+ return default_selection
+
+ return pkg, existing
+
+ def _pkg_visibility_check(self, pkg, autounmask_level=None, trust_graph=True):
+
+ if pkg.visible:
+ return True
+
+ if trust_graph and pkg in self._dynamic_config.digraph:
+ # Sometimes we need to temporarily disable
+ # dynamic_config._autounmask, but for overall
+ # consistency in dependency resolution, in most
+ # cases we want to treat packages in the graph
+ # as though they are visible.
+ return True
+
+ if not self._dynamic_config._autounmask or autounmask_level is None:
+ return False
+
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+ root_config = self._frozen_config.roots[pkg.root]
+ mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
+
+ masked_by_unstable_keywords = False
+ masked_by_missing_keywords = False
+ missing_licenses = None
+ masked_by_something_else = False
+ masked_by_p_mask = False
+
+ for reason in mreasons:
+ hint = reason.unmask_hint
+
+ if hint is None:
+ masked_by_something_else = True
+ elif hint.key == "unstable keyword":
+ masked_by_unstable_keywords = True
+ if hint.value == "**":
+ masked_by_missing_keywords = True
+ elif hint.key == "p_mask":
+ masked_by_p_mask = True
+ elif hint.key == "license":
+ missing_licenses = hint.value
+ else:
+ masked_by_something_else = True
+
+ if masked_by_something_else:
+ return False
+
+ if pkg in self._dynamic_config._needed_unstable_keywords:
+ #If the package is already keyworded, remove the mask.
+ masked_by_unstable_keywords = False
+ masked_by_missing_keywords = False
+
+ if pkg in self._dynamic_config._needed_p_mask_changes:
+ #If the package is already keyworded, remove the mask.
+ masked_by_p_mask = False
+
+ if missing_licenses:
+ #If the needed licenses are already unmasked, remove the mask.
+ missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
+
+ if not (masked_by_unstable_keywords or masked_by_p_mask or missing_licenses):
+ #Package has already been unmasked.
+ return True
+
+ if (masked_by_unstable_keywords and not autounmask_level.allow_unstable_keywords) or \
+ (masked_by_missing_keywords and not autounmask_level.allow_missing_keywords) or \
+ (masked_by_p_mask and not autounmask_level.allow_unmasks) or \
+ (missing_licenses and not autounmask_level.allow_license_changes):
+ #We are not allowed to do the needed changes.
+ return False
+
+ if masked_by_unstable_keywords:
+ self._dynamic_config._needed_unstable_keywords.add(pkg)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ backtrack_infos.setdefault("config", {})
+ backtrack_infos["config"].setdefault("needed_unstable_keywords", set())
+ backtrack_infos["config"]["needed_unstable_keywords"].add(pkg)
+
+ if masked_by_p_mask:
+ self._dynamic_config._needed_p_mask_changes.add(pkg)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ backtrack_infos.setdefault("config", {})
+ backtrack_infos["config"].setdefault("needed_p_mask_changes", set())
+ backtrack_infos["config"]["needed_p_mask_changes"].add(pkg)
+
+ if missing_licenses:
+ self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ backtrack_infos.setdefault("config", {})
+ backtrack_infos["config"].setdefault("needed_license_changes", set())
+ backtrack_infos["config"]["needed_license_changes"].add((pkg, frozenset(missing_licenses)))
+
+ return True
+
+ def _pkg_use_enabled(self, pkg, target_use=None):
+ """
+ If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
+ If target_use is given, the need changes are computed to make the package useable.
+ Example: target_use = { "foo": True, "bar": False }
+ The flags target_use must be in the pkg's IUSE.
+ """
+ if pkg.built:
+ return pkg.use.enabled
+ needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
+
+ if target_use is None:
+ if needed_use_config_change is None:
+ return pkg.use.enabled
+ else:
+ return needed_use_config_change[0]
+
+ if needed_use_config_change is not None:
+ old_use = needed_use_config_change[0]
+ new_use = set()
+ old_changes = needed_use_config_change[1]
+ new_changes = old_changes.copy()
+ else:
+ old_use = pkg.use.enabled
+ new_use = set()
+ old_changes = {}
+ new_changes = {}
+
+ for flag, state in target_use.items():
+ real_flag = pkg.iuse.get_real_flag(flag)
+ if real_flag is None:
+ # Triggered by use-dep defaults.
+ continue
+ if state:
+ if real_flag not in old_use:
+ if new_changes.get(real_flag) == False:
+ return old_use
+ new_changes[real_flag] = True
+ new_use.add(flag)
+ else:
+ if real_flag in old_use:
+ if new_changes.get(real_flag) == True:
+ return old_use
+ new_changes[real_flag] = False
+ new_use.update(old_use.difference(target_use))
+
+ def want_restart_for_use_change(pkg, new_use):
+ if pkg not in self._dynamic_config.digraph.nodes:
+ return False
+
+ for key in Package._dep_keys + ("LICENSE",):
+ dep = pkg._metadata[key]
+ old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
+ new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
+
+ if old_val != new_val:
+ return True
+
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if not parent_atoms:
+ return False
+
+ new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
+ for ppkg, atom in parent_atoms:
+ if not atom.use or \
+ not any(x in atom.use.required for x in changes):
+ continue
+ else:
+ return True
+
+ return False
+
+ if new_changes != old_changes:
+ #Don't do the change if it violates REQUIRED_USE.
+ required_use = pkg._metadata.get("REQUIRED_USE")
+ if required_use and check_required_use(required_use, old_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi) and \
+ not check_required_use(required_use, new_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi):
+ return old_use
+
+ if any(x in pkg.use.mask for x in new_changes) or \
+ any(x in pkg.use.force for x in new_changes):
+ return old_use
+
+ self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ backtrack_infos.setdefault("config", {})
+ backtrack_infos["config"].setdefault("needed_use_config_changes", [])
+ backtrack_infos["config"]["needed_use_config_changes"].append((pkg, (new_use, new_changes)))
+ if want_restart_for_use_change(pkg, new_use):
+ self._dynamic_config._need_restart = True
+ return new_use
+
+ def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None, parent=None):
+ root_config = self._frozen_config.roots[root]
+ pkgsettings = self._frozen_config.pkgsettings[root]
+ dbs = self._dynamic_config._filtered_trees[root]["dbs"]
+ vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
+ # List of acceptable packages, ordered by type preference.
+ matched_packages = []
+ highest_version = None
+ if not isinstance(atom, portage.dep.Atom):
+ atom = portage.dep.Atom(atom)
+ atom_cp = atom.cp
+ have_new_virt = atom_cp.startswith("virtual/") and \
+ self._have_new_virt(root, atom_cp)
+ atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
+ existing_node = None
+ myeb = None
+ rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
+ usepkg = "--usepkg" in self._frozen_config.myopts
+ usepkgonly = "--usepkgonly" in self._frozen_config.myopts
+ empty = "empty" in self._dynamic_config.myparams
+ selective = "selective" in self._dynamic_config.myparams
+ reinstall = False
+ avoid_update = "--update" not in self._frozen_config.myopts
+ dont_miss_updates = "--update" in self._frozen_config.myopts
+ use_ebuild_visibility = self._frozen_config.myopts.get(
+ '--use-ebuild-visibility', 'n') != 'n'
+ reinstall_atoms = self._frozen_config.reinstall_atoms
+ usepkg_exclude = self._frozen_config.usepkg_exclude
+ useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
+ matched_oldpkg = []
+ # Behavior of the "selective" parameter depends on
+ # whether or not a package matches an argument atom.
+ # If an installed package provides an old-style
+ # virtual that is no longer provided by an available
+ # package, the installed package may match an argument
+ # atom even though none of the available packages do.
+ # Therefore, "selective" logic does not consider
+ # whether or not an installed package matches an
+ # argument atom. It only considers whether or not
+ # available packages match argument atoms, which is
+ # represented by the found_available_arg flag.
+ found_available_arg = False
+ packages_with_invalid_use_config = []
+ for find_existing_node in True, False:
+ if existing_node:
+ break
+ for db, pkg_type, built, installed, db_keys in dbs:
+ if existing_node:
+ break
+ if installed and not find_existing_node:
+ want_reinstall = reinstall or empty or \
+ (found_available_arg and not selective)
+ if want_reinstall and matched_packages:
+ continue
+
+ # Ignore USE deps for the initial match since we want to
+ # ensure that updates aren't missed solely due to the user's
+ # USE configuration.
+ for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
+ onlydeps=onlydeps):
+ if pkg.cp != atom_cp and have_new_virt:
+ # pull in a new-style virtual instead
+ continue
+ if pkg in self._dynamic_config._runtime_pkg_mask:
+ # The package has been masked by the backtracking logic
+ continue
+ root_slot = (pkg.root, pkg.slot_atom)
+ if pkg.built and root_slot in self._rebuild.rebuild_list:
+ continue
+ if (pkg.installed and
+ root_slot in self._rebuild.reinstall_list):
+ continue
+
+ if not pkg.installed and \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
+
+ if built and not installed and usepkg_exclude.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)):
+ break
+
+ useoldpkg = useoldpkg_atoms.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg))
+
+ if packages_with_invalid_use_config and (not built or not useoldpkg) and \
+ (not pkg.installed or dont_miss_updates):
+ # Check if a higher version was rejected due to user
+ # USE configuration. The packages_with_invalid_use_config
+ # list only contains unbuilt ebuilds since USE can't
+ # be changed for built packages.
+ higher_version_rejected = False
+ repo_priority = pkg.repo_priority
+ for rejected in packages_with_invalid_use_config:
+ if rejected.cp != pkg.cp:
+ continue
+ if rejected > pkg:
+ higher_version_rejected = True
+ break
+ if portage.dep.cpvequal(rejected.cpv, pkg.cpv):
+ # If version is identical then compare
+ # repo priority (see bug #350254).
+ rej_repo_priority = rejected.repo_priority
+ if rej_repo_priority is not None and \
+ (repo_priority is None or
+ rej_repo_priority > repo_priority):
+ higher_version_rejected = True
+ break
+ if higher_version_rejected:
+ continue
+
+ cpv = pkg.cpv
+ reinstall_for_flags = None
+
+ if not pkg.installed or \
+ (matched_packages and not avoid_update):
+ # Only enforce visibility on installed packages
+ # if there is at least one other visible package
+ # available. By filtering installed masked packages
+ # here, packages that have been masked since they
+ # were installed can be automatically downgraded
+ # to an unmasked version. NOTE: This code needs to
+ # be consistent with masking behavior inside
+ # _dep_check_composite_db, in order to prevent
+ # incorrect choices in || deps like bug #351828.
+
+ if not self._pkg_visibility_check(pkg, autounmask_level):
+ continue
+
+ # Enable upgrade or downgrade to a version
+ # with visible KEYWORDS when the installed
+ # version is masked by KEYWORDS, but never
+ # reinstall the same exact version only due
+ # to a KEYWORDS mask. See bug #252167.
+
+ if pkg.type_name != "ebuild" and matched_packages:
+ # Don't re-install a binary package that is
+ # identical to the currently installed package
+ # (see bug #354441).
+ identical_binary = False
+ if usepkg and pkg.installed:
+ for selected_pkg in matched_packages:
+ if selected_pkg.type_name == "binary" and \
+ selected_pkg.cpv == pkg.cpv and \
+ selected_pkg.build_time == \
+ pkg.build_time:
+ identical_binary = True
+ break
+
+ if not identical_binary:
+ # If the ebuild no longer exists or it's
+ # keywords have been dropped, reject built
+ # instances (installed or binary).
+ # If --usepkgonly is enabled, assume that
+ # the ebuild status should be ignored.
+ if not use_ebuild_visibility and (usepkgonly or useoldpkg):
+ if pkg.installed and pkg.masks:
+ continue
+ elif not self._equiv_ebuild_visible(pkg,
+ autounmask_level=autounmask_level):
+ continue
+
+ # Calculation of USE for unbuilt ebuilds is relatively
+ # expensive, so it is only performed lazily, after the
+ # above visibility checks are complete.
+
+ myarg = None
+ try:
+ for myarg, myarg_atom in self._iter_atoms_for_pkg(pkg):
+ if myarg.force_reinstall:
+ reinstall = True
+ break
+ except InvalidDependString:
+ if not installed:
+ # masked by corruption
+ continue
+ if not installed and myarg:
+ found_available_arg = True
+
+ if atom.unevaluated_atom.use:
+ #Make sure we don't miss a 'missing IUSE'.
+ if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
+ # Don't add this to packages_with_invalid_use_config
+ # since IUSE cannot be adjusted by the user.
+ continue
+
+ if atom.use:
+
+ if autounmask_level and autounmask_level.allow_use_changes and not pkg.built:
+ target_use = {}
+ for flag in atom.use.enabled:
+ target_use[flag] = True
+ for flag in atom.use.disabled:
+ target_use[flag] = False
+ use = self._pkg_use_enabled(pkg, target_use)
+ else:
+ use = self._pkg_use_enabled(pkg)
+
+ use_match = True
+ can_adjust_use = not pkg.built
+ is_valid_flag = pkg.iuse.is_valid_flag
+ missing_enabled = frozenset(x for x in
+ atom.use.missing_enabled if not is_valid_flag(x))
+ missing_disabled = frozenset(x for x in
+ atom.use.missing_disabled if not is_valid_flag(x))
+
+ if atom.use.enabled:
+ if any(x in atom.use.enabled for x in missing_disabled):
+ use_match = False
+ can_adjust_use = False
+ need_enabled = atom.use.enabled.difference(use)
+ if need_enabled:
+ need_enabled = need_enabled.difference(missing_enabled)
+ if need_enabled:
+ use_match = False
+ if can_adjust_use:
+ if any(x in pkg.use.mask for x in need_enabled):
+ can_adjust_use = False
+
+ if atom.use.disabled:
+ if any(x in atom.use.disabled for x in missing_enabled):
+ use_match = False
+ can_adjust_use = False
+ need_disabled = atom.use.disabled.intersection(use)
+ if need_disabled:
+ need_disabled = need_disabled.difference(missing_disabled)
+ if need_disabled:
+ use_match = False
+ if can_adjust_use:
+ if any(x in pkg.use.force and x not in
+ pkg.use.mask for x in need_disabled):
+ can_adjust_use = False
+
+ if not use_match:
+ if can_adjust_use:
+ # Above we must ensure that this package has
+ # absolutely no use.force, use.mask, or IUSE
+ # issues that the user typically can't make
+ # adjustments to solve (see bug #345979).
+ # FIXME: Conditional USE deps complicate
+ # issues. This code currently excludes cases
+ # in which the user can adjust the parent
+ # package's USE in order to satisfy the dep.
+ packages_with_invalid_use_config.append(pkg)
+ continue
+
+ if pkg.cp == atom_cp:
+ if highest_version is None:
+ highest_version = pkg
+ elif pkg > highest_version:
+ highest_version = pkg
+ # At this point, we've found the highest visible
+ # match from the current repo. Any lower versions
+ # from this repo are ignored, so this so the loop
+ # will always end with a break statement below
+ # this point.
+ if find_existing_node:
+ e_pkg = next(self._dynamic_config._package_tracker.match(
+ root, pkg.slot_atom, installed=False), None)
+
+ if not e_pkg:
+ break
+
+ # Use PackageSet.findAtomForPackage()
+ # for PROVIDE support.
+ if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
+ if highest_version and \
+ e_pkg.cp == atom_cp and \
+ e_pkg < highest_version and \
+ e_pkg.slot_atom != highest_version.slot_atom:
+ # There is a higher version available in a
+ # different slot, so this existing node is
+ # irrelevant.
+ pass
+ else:
+ matched_packages.append(e_pkg)
+ existing_node = e_pkg
+ break
+ # Compare built package to current config and
+ # reject the built package if necessary.
+ reinstall_use = ("--newuse" in self._frozen_config.myopts or \
+ "--reinstall" in self._frozen_config.myopts)
+ respect_use = self._dynamic_config.myparams.get("binpkg_respect_use") in ("y", "auto")
+ if built and not useoldpkg and \
+ (not installed or matched_packages) and \
+ not (installed and
+ self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg))):
+ if myeb and "--newrepo" in self._frozen_config.myopts and myeb.repo != pkg.repo:
+ break
+ elif reinstall_use or (not installed and respect_use):
+ iuses = pkg.iuse.all
+ old_use = self._pkg_use_enabled(pkg)
+ if myeb:
+ pkgsettings.setcpv(myeb)
+ else:
+ pkgsettings.setcpv(pkg)
+ now_use = pkgsettings["PORTAGE_USE"].split()
+ forced_flags = set()
+ forced_flags.update(pkgsettings.useforce)
+ forced_flags.update(pkgsettings.usemask)
+ cur_iuse = iuses
+ if myeb and not usepkgonly and not useoldpkg:
+ cur_iuse = myeb.iuse.all
+ reinstall_for_flags = self._reinstall_for_flags(pkg,
+ forced_flags, old_use, iuses, now_use, cur_iuse)
+ if reinstall_for_flags:
+ if not pkg.installed:
+ self._dynamic_config.ignored_binaries.setdefault(pkg, set()).update(reinstall_for_flags)
+ break
+ # Compare current config to installed package
+ # and do not reinstall if possible.
+ if not installed and not useoldpkg and cpv in vardb.match(atom):
+ inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
+ if "--newrepo" in self._frozen_config.myopts and pkg.repo != inst_pkg.repo:
+ reinstall = True
+ elif reinstall_use:
+ forced_flags = set()
+ forced_flags.update(pkg.use.force)
+ forced_flags.update(pkg.use.mask)
+ old_use = inst_pkg.use.enabled
+ old_iuse = inst_pkg.iuse.all
+ cur_use = self._pkg_use_enabled(pkg)
+ cur_iuse = pkg.iuse.all
+ reinstall_for_flags = \
+ self._reinstall_for_flags(pkg,
+ forced_flags, old_use, old_iuse,
+ cur_use, cur_iuse)
+ if reinstall_for_flags:
+ reinstall = True
+ if reinstall_atoms.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)):
+ reinstall = True
+ if not built:
+ myeb = pkg
+ elif useoldpkg:
+ matched_oldpkg.append(pkg)
+ matched_packages.append(pkg)
+ if reinstall_for_flags:
+ self._dynamic_config._reinstall_nodes[pkg] = \
+ reinstall_for_flags
+ break
+
+ if not matched_packages:
+ return None, None
+
+ if "--debug" in self._frozen_config.myopts:
+ for pkg in matched_packages:
+ portage.writemsg("%s %s%s%s\n" % \
+ ((pkg.type_name + ":").rjust(10),
+ pkg.cpv, _repo_separator, pkg.repo), noiselevel=-1)
+
+ # Filter out any old-style virtual matches if they are
+ # mixed with new-style virtual matches.
+ cp = atom.cp
+ if len(matched_packages) > 1 and \
+ "virtual" == portage.catsplit(cp)[0]:
+ for pkg in matched_packages:
+ if pkg.cp != cp:
+ continue
+ # Got a new-style virtual, so filter
+ # out any old-style virtuals.
+ matched_packages = [pkg for pkg in matched_packages \
+ if pkg.cp == cp]
+ break
+
+ if existing_node is not None and \
+ existing_node in matched_packages:
+ return existing_node, existing_node
+
+ if len(matched_packages) > 1:
+ if parent is not None and \
+ (parent.root, parent.slot_atom) in self._dynamic_config._slot_operator_replace_installed:
+ # We're forcing a rebuild of the parent because we missed some
+ # update because of a slot operator dep.
+ if atom.slot_operator == "=" and atom.sub_slot is None:
+ # This one is a slot operator dep. Exclude the installed packages if a newer non-installed
+ # pkg exists.
+ highest_installed = None
+ for pkg in matched_packages:
+ if pkg.installed:
+ if highest_installed is None or pkg.version > highest_installed.version:
+ highest_installed = pkg
+
+ if highest_installed:
+ non_installed = [pkg for pkg in matched_packages \
+ if not pkg.installed and pkg.version > highest_installed.version]
+
+ if non_installed:
+ matched_packages = non_installed
+
+ if rebuilt_binaries:
+ inst_pkg = None
+ built_pkg = None
+ unbuilt_pkg = None
+ for pkg in matched_packages:
+ if pkg.installed:
+ inst_pkg = pkg
+ elif pkg.built:
+ built_pkg = pkg
+ else:
+ if unbuilt_pkg is None or pkg > unbuilt_pkg:
+ unbuilt_pkg = pkg
+ if built_pkg is not None and inst_pkg is not None:
+ # Only reinstall if binary package BUILD_TIME is
+ # non-empty, in order to avoid cases like to
+ # bug #306659 where BUILD_TIME fields are missing
+ # in local and/or remote Packages file.
+ built_timestamp = built_pkg.build_time
+ installed_timestamp = inst_pkg.build_time
+
+ if unbuilt_pkg is not None and unbuilt_pkg > built_pkg:
+ pass
+ elif "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
+ minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
+ if built_timestamp and \
+ built_timestamp > installed_timestamp and \
+ built_timestamp >= minimal_timestamp:
+ return built_pkg, existing_node
+ else:
+ #Don't care if the binary has an older BUILD_TIME than the installed
+ #package. This is for closely tracking a binhost.
+ #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
+ #pulled in here.
+ if built_timestamp and \
+ built_timestamp != installed_timestamp:
+ return built_pkg, existing_node
+
+ for pkg in matched_packages:
+ if pkg.installed and pkg.invalid:
+ matched_packages = [x for x in \
+ matched_packages if x is not pkg]
+
+ if avoid_update:
+ for pkg in matched_packages:
+ if pkg.installed and self._pkg_visibility_check(pkg, autounmask_level):
+ return pkg, existing_node
+
+ visible_matches = []
+ if matched_oldpkg:
+ visible_matches = [pkg.cpv for pkg in matched_oldpkg \
+ if self._pkg_visibility_check(pkg, autounmask_level)]
+ if not visible_matches:
+ visible_matches = [pkg.cpv for pkg in matched_packages \
+ if self._pkg_visibility_check(pkg, autounmask_level)]
+ if visible_matches:
+ bestmatch = portage.best(visible_matches)
+ else:
+ # all are masked, so ignore visibility
+ bestmatch = portage.best([pkg.cpv for pkg in matched_packages])
+ matched_packages = [pkg for pkg in matched_packages \
+ if portage.dep.cpvequal(pkg.cpv, bestmatch)]
+
+ # ordered by type preference ("ebuild" type is the last resort)
+ return matched_packages[-1], existing_node
+
+ def _select_pkg_from_graph(self, root, atom, onlydeps=False, parent=None):
+ """
+ Select packages that have already been added to the graph or
+ those that are installed and have not been scheduled for
+ replacement.
+ """
+ graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
+ matches = graph_db.match_pkgs(atom)
+ if not matches:
+ return None, None
+
+ # There may be multiple matches, and they may
+ # conflict with eachother, so choose the highest
+ # version that has already been added to the graph.
+ for pkg in reversed(matches):
+ if pkg in self._dynamic_config.digraph:
+ return pkg, pkg
+
+ # Fall back to installed packages
+ return self._select_pkg_from_installed(root, atom, onlydeps=onlydeps, parent=parent)
+
+ def _select_pkg_from_installed(self, root, atom, onlydeps=False, parent=None):
+ """
+ Select packages that are installed.
+ """
+ matches = list(self._iter_match_pkgs(self._frozen_config.roots[root],
+ "installed", atom))
+ if not matches:
+ return None, None
+ if len(matches) > 1:
+ matches.reverse() # ascending order
+ unmasked = [pkg for pkg in matches if \
+ self._pkg_visibility_check(pkg)]
+ if unmasked:
+ if len(unmasked) == 1:
+ matches = unmasked
+ else:
+ # Account for packages with masks (like KEYWORDS masks)
+ # that are usually ignored in visibility checks for
+ # installed packages, in order to handle cases like
+ # bug #350285.
+ unmasked = [pkg for pkg in matches if not pkg.masks]
+ if unmasked:
+ matches = unmasked
+ if len(matches) > 1:
+ # Now account for packages for which existing
+ # ebuilds are masked or unavailable (bug #445506).
+ unmasked = [pkg for pkg in matches if
+ self._equiv_ebuild_visible(pkg)]
+ if unmasked:
+ matches = unmasked
+
+ pkg = matches[-1] # highest match
+ in_graph = next(self._dynamic_config._package_tracker.match(
+ root, pkg.slot_atom, installed=False), None)
+
+ return pkg, in_graph
+
+ def _complete_graph(self, required_sets=None):
+ """
+ Add any deep dependencies of required sets (args, system, world) that
+ have not been pulled into the graph yet. This ensures that the graph
+ is consistent such that initially satisfied deep dependencies are not
+ broken in the new graph. Initially unsatisfied dependencies are
+ irrelevant since we only want to avoid breaking dependencies that are
+ initially satisfied.
+
+ Since this method can consume enough time to disturb users, it is
+ currently only enabled by the --complete-graph option.
+
+ @param required_sets: contains required sets (currently only used
+ for depclean and prune removal operations)
+ @type required_sets: dict
+ """
+ if "--buildpkgonly" in self._frozen_config.myopts or \
+ "recurse" not in self._dynamic_config.myparams:
+ return 1
+
+ complete_if_new_use = self._dynamic_config.myparams.get(
+ "complete_if_new_use", "y") == "y"
+ complete_if_new_ver = self._dynamic_config.myparams.get(
+ "complete_if_new_ver", "y") == "y"
+ rebuild_if_new_slot = self._dynamic_config.myparams.get(
+ "rebuild_if_new_slot", "y") == "y"
+ complete_if_new_slot = rebuild_if_new_slot
+
+ if "complete" not in self._dynamic_config.myparams and \
+ (complete_if_new_use or
+ complete_if_new_ver or complete_if_new_slot):
+ # Enable complete mode if an installed package will change somehow.
+ use_change = False
+ version_change = False
+ for node in self._dynamic_config.digraph:
+ if not isinstance(node, Package) or \
+ node.operation != "merge":
+ continue
+ vardb = self._frozen_config.roots[
+ node.root].trees["vartree"].dbapi
+
+ if complete_if_new_use or complete_if_new_ver:
+ inst_pkg = vardb.match_pkgs(node.slot_atom)
+ if inst_pkg and inst_pkg[0].cp == node.cp:
+ inst_pkg = inst_pkg[0]
+ if complete_if_new_ver:
+ if inst_pkg < node or node < inst_pkg:
+ version_change = True
+ break
+ elif not (inst_pkg.slot == node.slot and
+ inst_pkg.sub_slot == node.sub_slot):
+ # slot/sub-slot change without revbump gets
+ # similar treatment to a version change
+ version_change = True
+ break
+
+ # Intersect enabled USE with IUSE, in order to
+ # ignore forced USE from implicit IUSE flags, since
+ # they're probably irrelevant and they are sensitive
+ # to use.mask/force changes in the profile.
+ if complete_if_new_use and \
+ (node.iuse.all != inst_pkg.iuse.all or
+ self._pkg_use_enabled(node).intersection(node.iuse.all) !=
+ self._pkg_use_enabled(inst_pkg).intersection(inst_pkg.iuse.all)):
+ use_change = True
+ break
+
+ if complete_if_new_slot:
+ cp_list = vardb.match_pkgs(Atom(node.cp))
+ if (cp_list and cp_list[0].cp == node.cp and
+ not any(node.slot == pkg.slot and
+ node.sub_slot == pkg.sub_slot for pkg in cp_list)):
+ version_change = True
+ break
+
+ if use_change or version_change:
+ self._dynamic_config.myparams["complete"] = True
+
+ if "complete" not in self._dynamic_config.myparams:
+ return 1
+
+ self._load_vdb()
+
+ # Put the depgraph into a mode that causes it to only
+ # select packages that have already been added to the
+ # graph or those that are installed and have not been
+ # scheduled for replacement. Also, toggle the "deep"
+ # parameter so that all dependencies are traversed and
+ # accounted for.
+ self._dynamic_config._complete_mode = True
+ self._select_atoms = self._select_atoms_from_graph
+ if "remove" in self._dynamic_config.myparams:
+ self._select_package = self._select_pkg_from_installed
+ else:
+ self._select_package = self._select_pkg_from_graph
+ self._dynamic_config._traverse_ignored_deps = True
+ already_deep = self._dynamic_config.myparams.get("deep") is True
+ if not already_deep:
+ self._dynamic_config.myparams["deep"] = True
+
+ # Invalidate the package selection cache, since
+ # _select_package has just changed implementations.
+ for trees in self._dynamic_config._filtered_trees.values():
+ trees["porttree"].dbapi._clear_cache()
+
+ args = self._dynamic_config._initial_arg_list[:]
+ for root in self._frozen_config.roots:
+ if root != self._frozen_config.target_root and \
+ ("remove" in self._dynamic_config.myparams or
+ self._frozen_config.myopts.get("--root-deps") is not None):
+ # Only pull in deps for the relevant root.
+ continue
+ depgraph_sets = self._dynamic_config.sets[root]
+ required_set_names = self._frozen_config._required_set_names.copy()
+ remaining_args = required_set_names.copy()
+ if required_sets is None or root not in required_sets:
+ pass
+ else:
+ # Removal actions may override sets with temporary
+ # replacements that have had atoms removed in order
+ # to implement --deselect behavior.
+ required_set_names = set(required_sets[root])
+ depgraph_sets.sets.clear()
+ depgraph_sets.sets.update(required_sets[root])
+ if "remove" not in self._dynamic_config.myparams and \
+ root == self._frozen_config.target_root and \
+ already_deep:
+ remaining_args.difference_update(depgraph_sets.sets)
+ if not remaining_args and \
+ not self._dynamic_config._ignored_deps and \
+ not self._dynamic_config._dep_stack:
+ continue
+ root_config = self._frozen_config.roots[root]
+ for s in required_set_names:
+ pset = depgraph_sets.sets.get(s)
+ if pset is None:
+ pset = root_config.sets[s]
+ atom = SETPREFIX + s
+ args.append(SetArg(arg=atom, pset=pset,
+ reset_depth=False, root_config=root_config))
+
+ self._set_args(args)
+ for arg in self._expand_set_args(args, add_to_digraph=True):
+ for atom in arg.pset.getAtoms():
+ self._dynamic_config._dep_stack.append(
+ Dependency(atom=atom, root=arg.root_config.root,
+ parent=arg, depth=self._UNREACHABLE_DEPTH))
+
+ if True:
+ if self._dynamic_config._ignored_deps:
+ self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
+ self._dynamic_config._ignored_deps = []
+ if not self._create_graph(allow_unsatisfied=True):
+ return 0
+ # Check the unsatisfied deps to see if any initially satisfied deps
+ # will become unsatisfied due to an upgrade. Initially unsatisfied
+ # deps are irrelevant since we only want to avoid breaking deps
+ # that are initially satisfied.
+ while self._dynamic_config._unsatisfied_deps:
+ dep = self._dynamic_config._unsatisfied_deps.pop()
+ vardb = self._frozen_config.roots[
+ dep.root].trees["vartree"].dbapi
+ matches = vardb.match_pkgs(dep.atom)
+ if not matches:
+ self._dynamic_config._initially_unsatisfied_deps.append(dep)
+ continue
+ # An scheduled installation broke a deep dependency.
+ # Add the installed package to the graph so that it
+ # will be appropriately reported as a slot collision
+ # (possibly solvable via backtracking).
+ pkg = matches[-1] # highest match
+ if not self._add_pkg(pkg, dep):
+ return 0
+ if not self._create_graph(allow_unsatisfied=True):
+ return 0
+ return 1
+
+ def _pkg(self, cpv, type_name, root_config, installed=False,
+ onlydeps=False, myrepo = None):
+ """
+ Get a package instance from the cache, or create a new
+ one if necessary. Raises PackageNotFound from aux_get if it
+ failures for some reason (package does not exist or is
+ corrupt).
+ """
+
+ # Ensure that we use the specially optimized RootConfig instance
+ # that refers to FakeVartree instead of the real vartree.
+ root_config = self._frozen_config.roots[root_config.root]
+ pkg = self._frozen_config._pkg_cache.get(
+ Package._gen_hash_key(cpv=cpv, type_name=type_name,
+ repo_name=myrepo, root_config=root_config,
+ installed=installed, onlydeps=onlydeps))
+ if pkg is None and onlydeps and not installed:
+ # Maybe it already got pulled in as a "merge" node.
+ for candidate in self._dynamic_config._package_tracker.match(
+ root_config.root, Atom("="+cpv)):
+ if candidate.type_name == type_name and \
+ candidate.repo_name == myrepo and \
+ candidate.root_config is root_config and \
+ candidate.installed == installed and \
+ not candidate.onlydeps:
+ pkg = candidate
+
+ if pkg is None:
+ tree_type = self.pkg_tree_map[type_name]
+ db = root_config.trees[tree_type].dbapi
+ db_keys = list(self._frozen_config._trees_orig[root_config.root][
+ tree_type].dbapi._aux_cache_keys)
+
+ try:
+ metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
+ except KeyError:
+ raise portage.exception.PackageNotFound(cpv)
+
+ pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
+ installed=installed, metadata=metadata, onlydeps=onlydeps,
+ root_config=root_config, type_name=type_name)
+
+ self._frozen_config._pkg_cache[pkg] = pkg
+
+ if not self._pkg_visibility_check(pkg) and \
+ 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
+ slot_key = (pkg.root, pkg.slot_atom)
+ other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
+ if other_pkg is None or pkg > other_pkg:
+ self._frozen_config._highest_license_masked[slot_key] = pkg
+
+ return pkg
+
+ def _validate_blockers(self):
+ """Remove any blockers from the digraph that do not match any of the
+ packages within the graph. If necessary, create hard deps to ensure
+ correct merge order such that mutually blocking packages are never
+ installed simultaneously. Also add runtime blockers from all installed
+ packages if any of them haven't been added already (bug 128809)."""
+
+ if "--buildpkgonly" in self._frozen_config.myopts or \
+ "--nodeps" in self._frozen_config.myopts:
+ return True
+
+ if True:
+ # Pull in blockers from all installed packages that haven't already
+ # been pulled into the depgraph, in order to ensure that they are
+ # respected (bug 128809). Due to the performance penalty that is
+ # incurred by all the additional dep_check calls that are required,
+ # blockers returned from dep_check are cached on disk by the
+ # BlockerCache class.
+
+ # For installed packages, always ignore blockers from DEPEND since
+ # only runtime dependencies should be relevant for packages that
+ # are already built.
+ dep_keys = Package._runtime_keys
+ for myroot in self._frozen_config.trees:
+
+ if self._frozen_config.myopts.get("--root-deps") is not None and \
+ myroot != self._frozen_config.target_root:
+ continue
+
+ vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
+ pkgsettings = self._frozen_config.pkgsettings[myroot]
+ root_config = self._frozen_config.roots[myroot]
+ final_db = PackageTrackerDbapiWrapper(
+ myroot, self._dynamic_config._package_tracker)
+
+ blocker_cache = BlockerCache(myroot, vardb)
+ stale_cache = set(blocker_cache)
+ for pkg in vardb:
+ cpv = pkg.cpv
+ stale_cache.discard(cpv)
+ pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
+ pkg_deps_added = \
+ pkg in self._dynamic_config._traversed_pkg_deps
+
+ # Check for masked installed packages. Only warn about
+ # packages that are in the graph in order to avoid warning
+ # about those that will be automatically uninstalled during
+ # the merge process or by --depclean. Always warn about
+ # packages masked by license, since the user likely wants
+ # to adjust ACCEPT_LICENSE.
+ if pkg in self._dynamic_config._package_tracker:
+ if not self._pkg_visibility_check(pkg,
+ trust_graph=False) and \
+ (pkg_in_graph or 'LICENSE' in pkg.masks):
+ self._dynamic_config._masked_installed.add(pkg)
+ else:
+ self._check_masks(pkg)
+
+ blocker_atoms = None
+ blockers = None
+ if pkg_deps_added:
+ blockers = []
+ try:
+ blockers.extend(
+ self._dynamic_config._blocker_parents.child_nodes(pkg))
+ except KeyError:
+ pass
+ try:
+ blockers.extend(
+ self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
+ except KeyError:
+ pass
+ if blockers:
+ # Select just the runtime blockers.
+ blockers = [blocker for blocker in blockers \
+ if blocker.priority.runtime or \
+ blocker.priority.runtime_post]
+ if blockers is not None:
+ blockers = set(blocker.atom for blocker in blockers)
+
+ # If this node has any blockers, create a "nomerge"
+ # node for it so that they can be enforced.
+ self._spinner_update()
+ blocker_data = blocker_cache.get(cpv)
+ if blocker_data is not None and \
+ blocker_data.counter != pkg.counter:
+ blocker_data = None
+
+ # If blocker data from the graph is available, use
+ # it to validate the cache and update the cache if
+ # it seems invalid.
+ if blocker_data is not None and \
+ blockers is not None:
+ if not blockers.symmetric_difference(
+ blocker_data.atoms):
+ continue
+ blocker_data = None
+
+ if blocker_data is None and \
+ blockers is not None:
+ # Re-use the blockers from the graph.
+ blocker_atoms = sorted(blockers)
+ blocker_data = \
+ blocker_cache.BlockerData(pkg.counter, blocker_atoms)
+ blocker_cache[pkg.cpv] = blocker_data
+ continue
+
+ if blocker_data:
+ blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
+ else:
+ # Use aux_get() to trigger FakeVartree global
+ # updates on *DEPEND when appropriate.
+ depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
+ # It is crucial to pass in final_db here in order to
+ # optimize dep_check calls by eliminating atoms via
+ # dep_wordreduce and dep_eval calls.
+ try:
+ success, atoms = portage.dep_check(depstr,
+ final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
+ trees=self._dynamic_config._graph_trees, myroot=myroot)
+ except SystemExit:
+ raise
+ except Exception as e:
+ # This is helpful, for example, if a ValueError
+ # is thrown from cpv_expand due to multiple
+ # matches (this can happen if an atom lacks a
+ # category).
+ show_invalid_depstring_notice(
+ pkg, depstr, "%s" % (e,))
+ del e
+ raise
+ if not success:
+ replacement_pkgs = self._dynamic_config._package_tracker.match(
+ myroot, pkg.slot_atom)
+ if any(replacement_pkg[0].operation == "merge" for \
+ replacement_pkg in replacement_pkgs):
+ # This package is being replaced anyway, so
+ # ignore invalid dependencies so as not to
+ # annoy the user too much (otherwise they'd be
+ # forced to manually unmerge it first).
+ continue
+ show_invalid_depstring_notice(pkg, depstr, atoms)
+ return False
+ blocker_atoms = [myatom for myatom in atoms \
+ if myatom.blocker]
+ blocker_atoms.sort()
+ blocker_cache[cpv] = \
+ blocker_cache.BlockerData(pkg.counter, blocker_atoms)
+ if blocker_atoms:
+ try:
+ for atom in blocker_atoms:
+ blocker = Blocker(atom=atom,
+ eapi=pkg.eapi,
+ priority=self._priority(runtime=True),
+ root=myroot)
+ self._dynamic_config._blocker_parents.add(blocker, pkg)
+ except portage.exception.InvalidAtom as e:
+ depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
+ show_invalid_depstring_notice(
+ pkg, depstr, "Invalid Atom: %s" % (e,))
+ return False
+ for cpv in stale_cache:
+ del blocker_cache[cpv]
+ blocker_cache.flush()
+ del blocker_cache
+
+ # Discard any "uninstall" tasks scheduled by previous calls
+ # to this method, since those tasks may not make sense given
+ # the current graph state.
+ previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
+ if previous_uninstall_tasks:
+ self._dynamic_config._blocker_uninstalls = digraph()
+ self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
+
+ for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
+ self._spinner_update()
+ root_config = self._frozen_config.roots[blocker.root]
+ virtuals = root_config.settings.getvirtuals()
+ myroot = blocker.root
+ initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
+
+ provider_virtual = False
+ if blocker.cp in virtuals and \
+ not self._have_new_virt(blocker.root, blocker.cp):
+ provider_virtual = True
+
+ # Use this to check PROVIDE for each matched package
+ # when necessary.
+ atom_set = InternalPackageSet(
+ initial_atoms=[blocker.atom])
+
+ if provider_virtual:
+ atoms = []
+ for provider_entry in virtuals[blocker.cp]:
+ atoms.append(Atom(blocker.atom.replace(
+ blocker.cp, provider_entry.cp, 1)))
+ else:
+ atoms = [blocker.atom]
+
+ blocked_initial = set()
+ for atom in atoms:
+ for pkg in initial_db.match_pkgs(atom):
+ if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
+ blocked_initial.add(pkg)
+
+ blocked_final = set()
+ for atom in atoms:
+ for pkg in self._dynamic_config._package_tracker.match(myroot, atom):
+ if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
+ blocked_final.add(pkg)
+
+ if not blocked_initial and not blocked_final:
+ parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
+ self._dynamic_config._blocker_parents.remove(blocker)
+ # Discard any parents that don't have any more blockers.
+ for pkg in parent_pkgs:
+ self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
+ if not self._dynamic_config._blocker_parents.child_nodes(pkg):
+ self._dynamic_config._blocker_parents.remove(pkg)
+ continue
+ for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
+ unresolved_blocks = False
+ depends_on_order = set()
+ for pkg in blocked_initial:
+ if pkg.slot_atom == parent.slot_atom and \
+ not blocker.atom.blocker.overlap.forbid:
+ # New !!atom blockers do not allow temporary
+ # simulaneous installation, so unlike !atom
+ # blockers, !!atom blockers aren't ignored
+ # when they match other packages occupying
+ # the same slot.
+ continue
+ if parent.installed:
+ # Two currently installed packages conflict with
+ # eachother. Ignore this case since the damage
+ # is already done and this would be likely to
+ # confuse users if displayed like a normal blocker.
+ continue
+
+ self._dynamic_config._blocked_pkgs.add(pkg, blocker)
+
+ if parent.operation == "merge":
+ # Maybe the blocked package can be replaced or simply
+ # unmerged to resolve this block.
+ depends_on_order.add((pkg, parent))
+ continue
+ # None of the above blocker resolutions techniques apply,
+ # so apparently this one is unresolvable.
+ unresolved_blocks = True
+ for pkg in blocked_final:
+ if pkg.slot_atom == parent.slot_atom and \
+ not blocker.atom.blocker.overlap.forbid:
+ # New !!atom blockers do not allow temporary
+ # simulaneous installation, so unlike !atom
+ # blockers, !!atom blockers aren't ignored
+ # when they match other packages occupying
+ # the same slot.
+ continue
+ if parent.operation == "nomerge" and \
+ pkg.operation == "nomerge":
+ # This blocker will be handled the next time that a
+ # merge of either package is triggered.
+ continue
+
+ self._dynamic_config._blocked_pkgs.add(pkg, blocker)
+
+ # Maybe the blocking package can be
+ # unmerged to resolve this block.
+ if parent.operation == "merge" and pkg.installed:
+ depends_on_order.add((pkg, parent))
+ continue
+ elif parent.operation == "nomerge":
+ depends_on_order.add((parent, pkg))
+ continue
+ # None of the above blocker resolutions techniques apply,
+ # so apparently this one is unresolvable.
+ unresolved_blocks = True
+
+ # Make sure we don't unmerge any package that have been pulled
+ # into the graph.
+ if not unresolved_blocks and depends_on_order:
+ for inst_pkg, inst_task in depends_on_order:
+ if self._dynamic_config.digraph.contains(inst_pkg) and \
+ self._dynamic_config.digraph.parent_nodes(inst_pkg):
+ unresolved_blocks = True
+ break
+
+ if not unresolved_blocks and depends_on_order:
+ for inst_pkg, inst_task in depends_on_order:
+ uninst_task = Package(built=inst_pkg.built,
+ cpv=inst_pkg.cpv, installed=inst_pkg.installed,
+ metadata=inst_pkg._metadata,
+ operation="uninstall",
+ root_config=inst_pkg.root_config,
+ type_name=inst_pkg.type_name)
+ # Enforce correct merge order with a hard dep.
+ self._dynamic_config.digraph.addnode(uninst_task, inst_task,
+ priority=BlockerDepPriority.instance)
+ # Count references to this blocker so that it can be
+ # invalidated after nodes referencing it have been
+ # merged.
+ self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
+ if not unresolved_blocks and not depends_on_order:
+ self._dynamic_config._irrelevant_blockers.add(blocker, parent)
+ self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
+ if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
+ self._dynamic_config._blocker_parents.remove(blocker)
+ if not self._dynamic_config._blocker_parents.child_nodes(parent):
+ self._dynamic_config._blocker_parents.remove(parent)
+ if unresolved_blocks:
+ self._dynamic_config._unsolvable_blockers.add(blocker, parent)
+
+ return True
+
+ def _accept_blocker_conflicts(self):
+ acceptable = False
+ for x in ("--buildpkgonly", "--fetchonly",
+ "--fetch-all-uri", "--nodeps"):
+ if x in self._frozen_config.myopts:
+ acceptable = True
+ break
+ return acceptable
+
+ def _merge_order_bias(self, mygraph):
+ """
+ For optimal leaf node selection, promote deep system runtime deps and
+ order nodes from highest to lowest overall reference count.
+ """
+
+ node_info = {}
+ for node in mygraph.order:
+ node_info[node] = len(mygraph.parent_nodes(node))
+ deep_system_deps = _find_deep_system_runtime_deps(mygraph)
+
+ def cmp_merge_preference(node1, node2):
+
+ if node1.operation == 'uninstall':
+ if node2.operation == 'uninstall':
+ return 0
+ return 1
+
+ if node2.operation == 'uninstall':
+ if node1.operation == 'uninstall':
+ return 0
+ return -1
+
+ node1_sys = node1 in deep_system_deps
+ node2_sys = node2 in deep_system_deps
+ if node1_sys != node2_sys:
+ if node1_sys:
+ return -1
+ return 1
+
+ return node_info[node2] - node_info[node1]
+
+ mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
+
+ def altlist(self, reversed=DeprecationWarning):
+
+ if reversed is not DeprecationWarning:
+ warnings.warn("The reversed parameter of "
+ "_emerge.depgraph.depgraph.altlist() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ while self._dynamic_config._serialized_tasks_cache is None:
+ self._resolve_conflicts()
+ try:
+ self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
+ self._serialize_tasks()
+ except self._serialize_tasks_retry:
+ pass
+
+ retlist = self._dynamic_config._serialized_tasks_cache
+ if reversed is not DeprecationWarning and reversed:
+ # TODO: remove the "reversed" parameter (builtin name collision)
+ retlist = list(retlist)
+ retlist.reverse()
+ retlist = tuple(retlist)
+
+ return retlist
+
+ def _implicit_libc_deps(self, mergelist, graph):
+ """
+ Create implicit dependencies on libc, in order to ensure that libc
+ is installed as early as possible (see bug #303567).
+ """
+ libc_pkgs = {}
+ implicit_libc_roots = (self._frozen_config._running_root.root,)
+ for root in implicit_libc_roots:
+ vardb = self._frozen_config.trees[root]["vartree"].dbapi
+ for atom in self._expand_virt_from_graph(root,
+ portage.const.LIBC_PACKAGE_ATOM):
+ if atom.blocker:
+ continue
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ libc_pkgs.setdefault(pkg.root, set()).add(pkg)
+
+ if not libc_pkgs:
+ return
+
+ earlier_libc_pkgs = set()
+
+ for pkg in mergelist:
+ if not isinstance(pkg, Package):
+ # a satisfied blocker
+ continue
+ root_libc_pkgs = libc_pkgs.get(pkg.root)
+ if root_libc_pkgs is not None and \
+ pkg.operation == "merge":
+ if pkg in root_libc_pkgs:
+ earlier_libc_pkgs.add(pkg)
+ else:
+ for libc_pkg in root_libc_pkgs:
+ if libc_pkg in earlier_libc_pkgs:
+ graph.add(libc_pkg, pkg,
+ priority=DepPriority(buildtime=True))
+
+ def schedulerGraph(self):
+ """
+ The scheduler graph is identical to the normal one except that
+ uninstall edges are reversed in specific cases that require
+ conflicting packages to be temporarily installed simultaneously.
+ This is intended for use by the Scheduler in it's parallelization
+ logic. It ensures that temporary simultaneous installation of
+ conflicting packages is avoided when appropriate (especially for
+ !!atom blockers), but allowed in specific cases that require it.
+
+ Note that this method calls break_refs() which alters the state of
+ internal Package instances such that this depgraph instance should
+ not be used to perform any more calculations.
+ """
+
+ # NOTE: altlist initializes self._dynamic_config._scheduler_graph
+ mergelist = self.altlist()
+ self._implicit_libc_deps(mergelist,
+ self._dynamic_config._scheduler_graph)
+
+ # Break DepPriority.satisfied attributes which reference
+ # installed Package instances.
+ for parents, children, node in \
+ self._dynamic_config._scheduler_graph.nodes.values():
+ for priorities in chain(parents.values(), children.values()):
+ for priority in priorities:
+ if priority.satisfied:
+ priority.satisfied = True
+
+ pkg_cache = self._frozen_config._pkg_cache
+ graph = self._dynamic_config._scheduler_graph
+ trees = self._frozen_config.trees
+ pruned_pkg_cache = {}
+ for key, pkg in pkg_cache.items():
+ if pkg in graph or \
+ (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
+ pruned_pkg_cache[key] = pkg
+
+ for root in trees:
+ trees[root]['vartree']._pkg_cache = pruned_pkg_cache
+
+ self.break_refs()
+ sched_config = \
+ _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
+
+ return sched_config
+
+ def break_refs(self):
+ """
+ Break any references in Package instances that lead back to the depgraph.
+ This is useful if you want to hold references to packages without also
+ holding the depgraph on the heap. It should only be called after the
+ depgraph and _frozen_config will not be used for any more calculations.
+ """
+ for root_config in self._frozen_config.roots.values():
+ root_config.update(self._frozen_config._trees_orig[
+ root_config.root]["root_config"])
+ # Both instances are now identical, so discard the
+ # original which should have no other references.
+ self._frozen_config._trees_orig[
+ root_config.root]["root_config"] = root_config
+
+ def _resolve_conflicts(self):
+
+ if "complete" not in self._dynamic_config.myparams and \
+ self._dynamic_config._allow_backtracking and \
+ any(self._dynamic_config._package_tracker.slot_conflicts()) and \
+ not self._accept_blocker_conflicts():
+ self._dynamic_config.myparams["complete"] = True
+
+ if not self._complete_graph():
+ raise self._unknown_internal_error()
+
+ self._process_slot_conflicts()
+
+ if self._dynamic_config._allow_backtracking:
+ self._slot_operator_trigger_reinstalls()
+
+ if not self._validate_blockers():
+ # Blockers don't trigger the _skip_restart flag, since
+ # backtracking may solve blockers when it solves slot
+ # conflicts (or by blind luck).
+ raise self._unknown_internal_error()
+
+ def _serialize_tasks(self):
+
+ debug = "--debug" in self._frozen_config.myopts
+
+ if debug:
+ writemsg("\ndigraph:\n\n", noiselevel=-1)
+ self._dynamic_config.digraph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ scheduler_graph = self._dynamic_config.digraph.copy()
+
+ if '--nodeps' in self._frozen_config.myopts:
+ # Preserve the package order given on the command line.
+ return ([node for node in scheduler_graph \
+ if isinstance(node, Package) \
+ and node.operation == 'merge'], scheduler_graph)
+
+ mygraph=self._dynamic_config.digraph.copy()
+
+ removed_nodes = set()
+
+ # Prune off all DependencyArg instances since they aren't
+ # needed, and because of nested sets this is faster than doing
+ # it with multiple digraph.root_nodes() calls below. This also
+ # takes care of nested sets that have circular references,
+ # which wouldn't be matched by digraph.root_nodes().
+ for node in mygraph:
+ if isinstance(node, DependencyArg):
+ removed_nodes.add(node)
+ if removed_nodes:
+ mygraph.difference_update(removed_nodes)
+ removed_nodes.clear()
+
+ # Prune "nomerge" root nodes if nothing depends on them, since
+ # otherwise they slow down merge order calculation. Don't remove
+ # non-root nodes since they help optimize merge order in some cases
+ # such as revdep-rebuild.
+
+ while True:
+ for node in mygraph.root_nodes():
+ if not isinstance(node, Package) or \
+ node.installed or node.onlydeps:
+ removed_nodes.add(node)
+ if removed_nodes:
+ self._spinner_update()
+ mygraph.difference_update(removed_nodes)
+ if not removed_nodes:
+ break
+ removed_nodes.clear()
+ self._merge_order_bias(mygraph)
+ def cmp_circular_bias(n1, n2):
+ """
+ RDEPEND is stronger than PDEPEND and this function
+ measures such a strength bias within a circular
+ dependency relationship.
+ """
+ n1_n2_medium = n2 in mygraph.child_nodes(n1,
+ ignore_priority=priority_range.ignore_medium_soft)
+ n2_n1_medium = n1 in mygraph.child_nodes(n2,
+ ignore_priority=priority_range.ignore_medium_soft)
+ if n1_n2_medium == n2_n1_medium:
+ return 0
+ elif n1_n2_medium:
+ return 1
+ return -1
+ myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
+ retlist=[]
+ # Contains uninstall tasks that have been scheduled to
+ # occur after overlapping blockers have been installed.
+ scheduled_uninstalls = set()
+ # Contains any Uninstall tasks that have been ignored
+ # in order to avoid the circular deps code path. These
+ # correspond to blocker conflicts that could not be
+ # resolved.
+ ignored_uninstall_tasks = set()
+ have_uninstall_task = False
+ complete = "complete" in self._dynamic_config.myparams
+ asap_nodes = []
+
+ def get_nodes(**kwargs):
+ """
+ Returns leaf nodes excluding Uninstall instances
+ since those should be executed as late as possible.
+ """
+ return [node for node in mygraph.leaf_nodes(**kwargs) \
+ if isinstance(node, Package) and \
+ (node.operation != "uninstall" or \
+ node in scheduled_uninstalls)]
+
+ # sys-apps/portage needs special treatment if ROOT="/"
+ running_root = self._frozen_config._running_root.root
+ runtime_deps = InternalPackageSet(
+ initial_atoms=[PORTAGE_PACKAGE_ATOM])
+ running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
+ PORTAGE_PACKAGE_ATOM)
+ replacement_portage = list(self._dynamic_config._package_tracker.match(
+ running_root, Atom(PORTAGE_PACKAGE_ATOM)))
+
+ if running_portage:
+ running_portage = running_portage[0]
+ else:
+ running_portage = None
+
+ if replacement_portage:
+ replacement_portage = replacement_portage[0]
+ else:
+ replacement_portage = None
+
+ if replacement_portage == running_portage:
+ replacement_portage = None
+
+ if running_portage is not None:
+ try:
+ portage_rdepend = self._select_atoms_highest_available(
+ running_root, running_portage._metadata["RDEPEND"],
+ myuse=self._pkg_use_enabled(running_portage),
+ parent=running_portage, strict=False)
+ except portage.exception.InvalidDependString as e:
+ portage.writemsg("!!! Invalid RDEPEND in " + \
+ "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
+ (running_root, running_portage.cpv, e), noiselevel=-1)
+ del e
+ portage_rdepend = {running_portage : []}
+ for atoms in portage_rdepend.values():
+ runtime_deps.update(atom for atom in atoms \
+ if not atom.blocker)
+
+ # Merge libc asap, in order to account for implicit
+ # dependencies. See bug #303567.
+ implicit_libc_roots = (running_root,)
+ for root in implicit_libc_roots:
+ libc_pkgs = set()
+ vardb = self._frozen_config.trees[root]["vartree"].dbapi
+ for atom in self._expand_virt_from_graph(root,
+ portage.const.LIBC_PACKAGE_ATOM):
+ if atom.blocker:
+ continue
+
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ libc_pkgs.add(pkg)
+
+ if libc_pkgs:
+ # If there's also an os-headers upgrade, we need to
+ # pull that in first. See bug #328317.
+ for atom in self._expand_virt_from_graph(root,
+ portage.const.OS_HEADERS_PACKAGE_ATOM):
+ if atom.blocker:
+ continue
+
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ asap_nodes.append(pkg)
+
+ asap_nodes.extend(libc_pkgs)
+
+ def gather_deps(ignore_priority, mergeable_nodes,
+ selected_nodes, node):
+ """
+ Recursively gather a group of nodes that RDEPEND on
+ eachother. This ensures that they are merged as a group
+ and get their RDEPENDs satisfied as soon as possible.
+ """
+ if node in selected_nodes:
+ return True
+ if node not in mergeable_nodes:
+ return False
+ if node == replacement_portage and \
+ mygraph.child_nodes(node,
+ ignore_priority=priority_range.ignore_medium_soft):
+ # Make sure that portage always has all of it's
+ # RDEPENDs installed first.
+ return False
+ selected_nodes.add(node)
+ for child in mygraph.child_nodes(node,
+ ignore_priority=ignore_priority):
+ if not gather_deps(ignore_priority,
+ mergeable_nodes, selected_nodes, child):
+ return False
+ return True
+
+ def ignore_uninst_or_med(priority):
+ if priority is BlockerDepPriority.instance:
+ return True
+ return priority_range.ignore_medium(priority)
+
+ def ignore_uninst_or_med_soft(priority):
+ if priority is BlockerDepPriority.instance:
+ return True
+ return priority_range.ignore_medium_soft(priority)
+
+ tree_mode = "--tree" in self._frozen_config.myopts
+ # Tracks whether or not the current iteration should prefer asap_nodes
+ # if available. This is set to False when the previous iteration
+ # failed to select any nodes. It is reset whenever nodes are
+ # successfully selected.
+ prefer_asap = True
+
+ # Controls whether or not the current iteration should drop edges that
+ # are "satisfied" by installed packages, in order to solve circular
+ # dependencies. The deep runtime dependencies of installed packages are
+ # not checked in this case (bug #199856), so it must be avoided
+ # whenever possible.
+ drop_satisfied = False
+
+ # State of variables for successive iterations that loosen the
+ # criteria for node selection.
+ #
+ # iteration prefer_asap drop_satisfied
+ # 1 True False
+ # 2 False False
+ # 3 False True
+ #
+ # If no nodes are selected on the last iteration, it is due to
+ # unresolved blockers or circular dependencies.
+
+ while mygraph:
+ self._spinner_update()
+ selected_nodes = None
+ ignore_priority = None
+ if drop_satisfied or (prefer_asap and asap_nodes):
+ priority_range = DepPrioritySatisfiedRange
+ else:
+ priority_range = DepPriorityNormalRange
+ if prefer_asap and asap_nodes:
+ # ASAP nodes are merged before their soft deps. Go ahead and
+ # select root nodes here if necessary, since it's typical for
+ # the parent to have been removed from the graph already.
+ asap_nodes = [node for node in asap_nodes \
+ if mygraph.contains(node)]
+ for i in range(priority_range.SOFT,
+ priority_range.MEDIUM_SOFT + 1):
+ ignore_priority = priority_range.ignore_priority[i]
+ for node in asap_nodes:
+ if not mygraph.child_nodes(node,
+ ignore_priority=ignore_priority):
+ selected_nodes = [node]
+ asap_nodes.remove(node)
+ break
+ if selected_nodes:
+ break
+
+ if not selected_nodes and \
+ not (prefer_asap and asap_nodes):
+ for i in range(priority_range.NONE,
+ priority_range.MEDIUM_SOFT + 1):
+ ignore_priority = priority_range.ignore_priority[i]
+ nodes = get_nodes(ignore_priority=ignore_priority)
+ if nodes:
+ # If there is a mixture of merges and uninstalls,
+ # do the uninstalls first.
+ good_uninstalls = None
+ if len(nodes) > 1:
+ good_uninstalls = []
+ for node in nodes:
+ if node.operation == "uninstall":
+ good_uninstalls.append(node)
+
+ if good_uninstalls:
+ nodes = good_uninstalls
+ else:
+ nodes = nodes
+
+ if good_uninstalls or len(nodes) == 1 or \
+ (ignore_priority is None and \
+ not asap_nodes and not tree_mode):
+ # Greedily pop all of these nodes since no
+ # relationship has been ignored. This optimization
+ # destroys --tree output, so it's disabled in tree
+ # mode.
+ selected_nodes = nodes
+ else:
+ # For optimal merge order:
+ # * Only pop one node.
+ # * Removing a root node (node without a parent)
+ # will not produce a leaf node, so avoid it.
+ # * It's normal for a selected uninstall to be a
+ # root node, so don't check them for parents.
+ if asap_nodes:
+ prefer_asap_parents = (True, False)
+ else:
+ prefer_asap_parents = (False,)
+ for check_asap_parent in prefer_asap_parents:
+ if check_asap_parent:
+ for node in nodes:
+ parents = mygraph.parent_nodes(node,
+ ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
+ if any(x in asap_nodes for x in parents):
+ selected_nodes = [node]
+ break
+ else:
+ for node in nodes:
+ if mygraph.parent_nodes(node):
+ selected_nodes = [node]
+ break
+ if selected_nodes:
+ break
+ if selected_nodes:
+ break
+
+ if not selected_nodes:
+ nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
+ if nodes:
+ mergeable_nodes = set(nodes)
+ if prefer_asap and asap_nodes:
+ nodes = asap_nodes
+ # When gathering the nodes belonging to a runtime cycle,
+ # we want to minimize the number of nodes gathered, since
+ # this tends to produce a more optimal merge order.
+ # Ignoring all medium_soft deps serves this purpose.
+ # In the case of multiple runtime cycles, where some cycles
+ # may depend on smaller independent cycles, it's optimal
+ # to merge smaller independent cycles before other cycles
+ # that depend on them. Therefore, we search for the
+ # smallest cycle in order to try and identify and prefer
+ # these smaller independent cycles.
+ ignore_priority = priority_range.ignore_medium_soft
+ smallest_cycle = None
+ for node in nodes:
+ if not mygraph.parent_nodes(node):
+ continue
+ selected_nodes = set()
+ if gather_deps(ignore_priority,
+ mergeable_nodes, selected_nodes, node):
+ # When selecting asap_nodes, we need to ensure
+ # that we haven't selected a large runtime cycle
+ # that is obviously sub-optimal. This will be
+ # obvious if any of the non-asap selected_nodes
+ # is a leaf node when medium_soft deps are
+ # ignored.
+ if prefer_asap and asap_nodes and \
+ len(selected_nodes) > 1:
+ for node in selected_nodes.difference(
+ asap_nodes):
+ if not mygraph.child_nodes(node,
+ ignore_priority =
+ DepPriorityNormalRange.ignore_medium_soft):
+ selected_nodes = None
+ break
+ if selected_nodes:
+ if smallest_cycle is None or \
+ len(selected_nodes) < len(smallest_cycle):
+ smallest_cycle = selected_nodes
+
+ selected_nodes = smallest_cycle
+
+ if selected_nodes and debug:
+ writemsg("\nruntime cycle digraph (%s nodes):\n\n" %
+ (len(selected_nodes),), noiselevel=-1)
+ cycle_digraph = mygraph.copy()
+ cycle_digraph.difference_update([x for x in
+ cycle_digraph if x not in selected_nodes])
+ cycle_digraph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ if prefer_asap and asap_nodes and not selected_nodes:
+ # We failed to find any asap nodes to merge, so ignore
+ # them for the next iteration.
+ prefer_asap = False
+ continue
+
+ if selected_nodes and ignore_priority is not None:
+ # Try to merge ignored medium_soft deps as soon as possible
+ # if they're not satisfied by installed packages.
+ for node in selected_nodes:
+ children = set(mygraph.child_nodes(node))
+ soft = children.difference(
+ mygraph.child_nodes(node,
+ ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
+ medium_soft = children.difference(
+ mygraph.child_nodes(node,
+ ignore_priority = \
+ DepPrioritySatisfiedRange.ignore_medium_soft))
+ medium_soft.difference_update(soft)
+ for child in medium_soft:
+ if child in selected_nodes:
+ continue
+ if child in asap_nodes:
+ continue
+ # Merge PDEPEND asap for bug #180045.
+ asap_nodes.append(child)
+
+ if selected_nodes and len(selected_nodes) > 1:
+ if not isinstance(selected_nodes, list):
+ selected_nodes = list(selected_nodes)
+ selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
+
+ if not selected_nodes and myblocker_uninstalls:
+ # An Uninstall task needs to be executed in order to
+ # avoid conflict if possible.
+
+ if drop_satisfied:
+ priority_range = DepPrioritySatisfiedRange
+ else:
+ priority_range = DepPriorityNormalRange
+
+ mergeable_nodes = get_nodes(
+ ignore_priority=ignore_uninst_or_med)
+
+ min_parent_deps = None
+ uninst_task = None
+
+ for task in myblocker_uninstalls.leaf_nodes():
+ # Do some sanity checks so that system or world packages
+ # don't get uninstalled inappropriately here (only really
+ # necessary when --complete-graph has not been enabled).
+
+ if task in ignored_uninstall_tasks:
+ continue
+
+ if task in scheduled_uninstalls:
+ # It's been scheduled but it hasn't
+ # been executed yet due to dependence
+ # on installation of blocking packages.
+ continue
+
+ root_config = self._frozen_config.roots[task.root]
+ inst_pkg = self._pkg(task.cpv, "installed", root_config,
+ installed=True)
+
+ if self._dynamic_config.digraph.contains(inst_pkg):
+ continue
+
+ forbid_overlap = False
+ heuristic_overlap = False
+ for blocker in myblocker_uninstalls.parent_nodes(task):
+ if not eapi_has_strong_blocks(blocker.eapi):
+ heuristic_overlap = True
+ elif blocker.atom.blocker.overlap.forbid:
+ forbid_overlap = True
+ break
+ if forbid_overlap and running_root == task.root:
+ continue
+
+ if heuristic_overlap and running_root == task.root:
+ # Never uninstall sys-apps/portage or it's essential
+ # dependencies, except through replacement.
+ try:
+ runtime_dep_atoms = \
+ list(runtime_deps.iterAtomsForPackage(task))
+ except portage.exception.InvalidDependString as e:
+ portage.writemsg("!!! Invalid PROVIDE in " + \
+ "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
+ (task.root, task.cpv, e), noiselevel=-1)
+ del e
+ continue
+
+ # Don't uninstall a runtime dep if it appears
+ # to be the only suitable one installed.
+ skip = False
+ vardb = root_config.trees["vartree"].dbapi
+ for atom in runtime_dep_atoms:
+ other_version = None
+ for pkg in vardb.match_pkgs(atom):
+ if pkg.cpv == task.cpv and \
+ pkg.counter == task.counter:
+ continue
+ other_version = pkg
+ break
+ if other_version is None:
+ skip = True
+ break
+ if skip:
+ continue
+
+ # For packages in the system set, don't take
+ # any chances. If the conflict can't be resolved
+ # by a normal replacement operation then abort.
+ skip = False
+ try:
+ for atom in root_config.sets[
+ "system"].iterAtomsForPackage(task):
+ skip = True
+ break
+ except portage.exception.InvalidDependString as e:
+ portage.writemsg("!!! Invalid PROVIDE in " + \
+ "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
+ (task.root, task.cpv, e), noiselevel=-1)
+ del e
+ skip = True
+ if skip:
+ continue
+
+ # Note that the world check isn't always
+ # necessary since self._complete_graph() will
+ # add all packages from the system and world sets to the
+ # graph. This just allows unresolved conflicts to be
+ # detected as early as possible, which makes it possible
+ # to avoid calling self._complete_graph() when it is
+ # unnecessary due to blockers triggering an abortion.
+ if not complete:
+ # For packages in the world set, go ahead an uninstall
+ # when necessary, as long as the atom will be satisfied
+ # in the final state.
+ skip = False
+ try:
+ for atom in root_config.sets[
+ "selected"].iterAtomsForPackage(task):
+ satisfied = False
+ for pkg in self._dynamic_config._package_tracker.match(task.root, atom):
+ if pkg == inst_pkg:
+ continue
+ satisfied = True
+ break
+ if not satisfied:
+ skip = True
+ self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
+ break
+ except portage.exception.InvalidDependString as e:
+ portage.writemsg("!!! Invalid PROVIDE in " + \
+ "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
+ (task.root, task.cpv, e), noiselevel=-1)
+ del e
+ skip = True
+ if skip:
+ continue
+
+ # Check the deps of parent nodes to ensure that
+ # the chosen task produces a leaf node. Maybe
+ # this can be optimized some more to make the
+ # best possible choice, but the current algorithm
+ # is simple and should be near optimal for most
+ # common cases.
+ self._spinner_update()
+ mergeable_parent = False
+ parent_deps = set()
+ parent_deps.add(task)
+ for parent in mygraph.parent_nodes(task):
+ parent_deps.update(mygraph.child_nodes(parent,
+ ignore_priority=priority_range.ignore_medium_soft))
+ if min_parent_deps is not None and \
+ len(parent_deps) >= min_parent_deps:
+ # This task is no better than a previously selected
+ # task, so abort search now in order to avoid wasting
+ # any more cpu time on this task. This increases
+ # performance dramatically in cases when there are
+ # hundreds of blockers to solve, like when
+ # upgrading to a new slot of kde-meta.
+ mergeable_parent = None
+ break
+ if parent in mergeable_nodes and \
+ gather_deps(ignore_uninst_or_med_soft,
+ mergeable_nodes, set(), parent):
+ mergeable_parent = True
+
+ if not mergeable_parent:
+ continue
+
+ if min_parent_deps is None or \
+ len(parent_deps) < min_parent_deps:
+ min_parent_deps = len(parent_deps)
+ uninst_task = task
+
+ if uninst_task is not None and min_parent_deps == 1:
+ # This is the best possible result, so so abort search
+ # now in order to avoid wasting any more cpu time.
+ break
+
+ if uninst_task is not None:
+ # The uninstall is performed only after blocking
+ # packages have been merged on top of it. File
+ # collisions between blocking packages are detected
+ # and removed from the list of files to be uninstalled.
+ scheduled_uninstalls.add(uninst_task)
+ parent_nodes = mygraph.parent_nodes(uninst_task)
+
+ # Reverse the parent -> uninstall edges since we want
+ # to do the uninstall after blocking packages have
+ # been merged on top of it.
+ mygraph.remove(uninst_task)
+ for blocked_pkg in parent_nodes:
+ mygraph.add(blocked_pkg, uninst_task,
+ priority=BlockerDepPriority.instance)
+ scheduler_graph.remove_edge(uninst_task, blocked_pkg)
+ scheduler_graph.add(blocked_pkg, uninst_task,
+ priority=BlockerDepPriority.instance)
+
+ # Sometimes a merge node will render an uninstall
+ # node unnecessary (due to occupying the same SLOT),
+ # and we want to avoid executing a separate uninstall
+ # task in that case.
+ for slot_node in self._dynamic_config._package_tracker.match(
+ uninst_task.root, uninst_task.slot_atom):
+ if slot_node.operation == "merge":
+ mygraph.add(slot_node, uninst_task,
+ priority=BlockerDepPriority.instance)
+
+ # Reset the state variables for leaf node selection and
+ # continue trying to select leaf nodes.
+ prefer_asap = True
+ drop_satisfied = False
+ continue
+
+ if not selected_nodes:
+ # Only select root nodes as a last resort. This case should
+ # only trigger when the graph is nearly empty and the only
+ # remaining nodes are isolated (no parents or children). Since
+ # the nodes must be isolated, ignore_priority is not needed.
+ selected_nodes = get_nodes()
+
+ if not selected_nodes and not drop_satisfied:
+ drop_satisfied = True
+ continue
+
+ if not selected_nodes and myblocker_uninstalls:
+ # If possible, drop an uninstall task here in order to avoid
+ # the circular deps code path. The corresponding blocker will
+ # still be counted as an unresolved conflict.
+ uninst_task = None
+ for node in myblocker_uninstalls.leaf_nodes():
+ try:
+ mygraph.remove(node)
+ except KeyError:
+ pass
+ else:
+ uninst_task = node
+ ignored_uninstall_tasks.add(node)
+ break
+
+ if uninst_task is not None:
+ # Reset the state variables for leaf node selection and
+ # continue trying to select leaf nodes.
+ prefer_asap = True
+ drop_satisfied = False
+ continue
+
+ if not selected_nodes:
+ self._dynamic_config._circular_deps_for_display = mygraph
+ self._dynamic_config._skip_restart = True
+ raise self._unknown_internal_error()
+
+ # At this point, we've succeeded in selecting one or more nodes, so
+ # reset state variables for leaf node selection.
+ prefer_asap = True
+ drop_satisfied = False
+
+ mygraph.difference_update(selected_nodes)
+
+ for node in selected_nodes:
+ if isinstance(node, Package) and \
+ node.operation == "nomerge":
+ continue
+
+ # Handle interactions between blockers
+ # and uninstallation tasks.
+ solved_blockers = set()
+ uninst_task = None
+ if isinstance(node, Package) and \
+ "uninstall" == node.operation:
+ have_uninstall_task = True
+ uninst_task = node
+ else:
+ vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
+ inst_pkg = vardb.match_pkgs(node.slot_atom)
+ if inst_pkg:
+ # The package will be replaced by this one, so remove
+ # the corresponding Uninstall task if necessary.
+ inst_pkg = inst_pkg[0]
+ uninst_task = Package(built=inst_pkg.built,
+ cpv=inst_pkg.cpv, installed=inst_pkg.installed,
+ metadata=inst_pkg._metadata,
+ operation="uninstall",
+ root_config=inst_pkg.root_config,
+ type_name=inst_pkg.type_name)
+ try:
+ mygraph.remove(uninst_task)
+ except KeyError:
+ pass
+
+ if uninst_task is not None and \
+ uninst_task not in ignored_uninstall_tasks and \
+ myblocker_uninstalls.contains(uninst_task):
+ blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
+ myblocker_uninstalls.remove(uninst_task)
+ # Discard any blockers that this Uninstall solves.
+ for blocker in blocker_nodes:
+ if not myblocker_uninstalls.child_nodes(blocker):
+ myblocker_uninstalls.remove(blocker)
+ if blocker not in \
+ self._dynamic_config._unsolvable_blockers:
+ solved_blockers.add(blocker)
+
+ retlist.append(node)
+
+ if (isinstance(node, Package) and \
+ "uninstall" == node.operation) or \
+ (uninst_task is not None and \
+ uninst_task in scheduled_uninstalls):
+ # Include satisfied blockers in the merge list
+ # since the user might be interested and also
+ # it serves as an indicator that blocking packages
+ # will be temporarily installed simultaneously.
+ for blocker in solved_blockers:
+ retlist.append(blocker)
+
+ unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
+ for node in myblocker_uninstalls.root_nodes():
+ unsolvable_blockers.add(node)
+
+ # If any Uninstall tasks need to be executed in order
+ # to avoid a conflict, complete the graph with any
+ # dependencies that may have been initially
+ # neglected (to ensure that unsafe Uninstall tasks
+ # are properly identified and blocked from execution).
+ if have_uninstall_task and \
+ not complete and \
+ not unsolvable_blockers:
+ self._dynamic_config.myparams["complete"] = True
+ if '--debug' in self._frozen_config.myopts:
+ msg = []
+ msg.append("enabling 'complete' depgraph mode " + \
+ "due to uninstall task(s):")
+ msg.append("")
+ for node in retlist:
+ if isinstance(node, Package) and \
+ node.operation == 'uninstall':
+ msg.append("\t%s" % (node,))
+ writemsg_level("\n%s\n" % \
+ "".join("%s\n" % line for line in msg),
+ level=logging.DEBUG, noiselevel=-1)
+ raise self._serialize_tasks_retry("")
+
+ # Set satisfied state on blockers, but not before the
+ # above retry path, since we don't want to modify the
+ # state in that case.
+ for node in retlist:
+ if isinstance(node, Blocker):
+ node.satisfied = True
+
+ for blocker in unsolvable_blockers:
+ retlist.append(blocker)
+
+ retlist = tuple(retlist)
+
+ if unsolvable_blockers and \
+ not self._accept_blocker_conflicts():
+ self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
+ self._dynamic_config._serialized_tasks_cache = retlist
+ self._dynamic_config._scheduler_graph = scheduler_graph
+ # Blockers don't trigger the _skip_restart flag, since
+ # backtracking may solve blockers when it solves slot
+ # conflicts (or by blind luck).
+ raise self._unknown_internal_error()
+
+ have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
+ if have_slot_conflict and \
+ not self._accept_blocker_conflicts():
+ self._dynamic_config._serialized_tasks_cache = retlist
+ self._dynamic_config._scheduler_graph = scheduler_graph
+ raise self._unknown_internal_error()
+
+ return retlist, scheduler_graph
+
+ def _show_circular_deps(self, mygraph):
+ self._dynamic_config._circular_dependency_handler = \
+ circular_dependency_handler(self, mygraph)
+ handler = self._dynamic_config._circular_dependency_handler
+
+ self._frozen_config.myopts.pop("--quiet", None)
+ self._frozen_config.myopts["--verbose"] = True
+ self._frozen_config.myopts["--tree"] = True
+ portage.writemsg("\n\n", noiselevel=-1)
+ self.display(handler.merge_list)
+ prefix = colorize("BAD", " * ")
+ portage.writemsg("\n", noiselevel=-1)
+ portage.writemsg(prefix + "Error: circular dependencies:\n",
+ noiselevel=-1)
+ portage.writemsg("\n", noiselevel=-1)
+
+ if handler.circular_dep_message is None:
+ handler.debug_print()
+ portage.writemsg("\n", noiselevel=-1)
+
+ if handler.circular_dep_message is not None:
+ portage.writemsg(handler.circular_dep_message, noiselevel=-1)
+
+ suggestions = handler.suggestions
+ if suggestions:
+ writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
+ if len(suggestions) == 1:
+ writemsg("by applying the following change:\n", noiselevel=-1)
+ else:
+ writemsg("by applying " + colorize("bold", "any of") + \
+ " the following changes:\n", noiselevel=-1)
+ writemsg("".join(suggestions), noiselevel=-1)
+ writemsg("\nNote that this change can be reverted, once the package has" + \
+ " been installed.\n", noiselevel=-1)
+ if handler.large_cycle_count:
+ writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
+ "Several changes might be required to resolve all cycles.\n" + \
+ "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
+ else:
+ writemsg("\n\n", noiselevel=-1)
+ writemsg(prefix + "Note that circular dependencies " + \
+ "can often be avoided by temporarily\n", noiselevel=-1)
+ writemsg(prefix + "disabling USE flags that trigger " + \
+ "optional dependencies.\n", noiselevel=-1)
+
+ def _show_merge_list(self):
+ if self._dynamic_config._serialized_tasks_cache is not None and \
+ not (self._dynamic_config._displayed_list is not None and \
+ self._dynamic_config._displayed_list is self._dynamic_config._serialized_tasks_cache):
+ self.display(self._dynamic_config._serialized_tasks_cache)
+
+ def _show_unsatisfied_blockers(self, blockers):
+ self._show_merge_list()
+ msg = "Error: The above package list contains " + \
+ "packages which cannot be installed " + \
+ "at the same time on the same system."
+ prefix = colorize("BAD", " * ")
+ portage.writemsg("\n", noiselevel=-1)
+ for line in textwrap.wrap(msg, 70):
+ portage.writemsg(prefix + line + "\n", noiselevel=-1)
+
+ # Display the conflicting packages along with the packages
+ # that pulled them in. This is helpful for troubleshooting
+ # cases in which blockers don't solve automatically and
+ # the reasons are not apparent from the normal merge list
+ # display.
+
+ conflict_pkgs = {}
+ for blocker in blockers:
+ for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
+ self._dynamic_config._blocker_parents.parent_nodes(blocker)):
+
+ is_slot_conflict_pkg = False
+ for conflict in self._dynamic_config._package_tracker.slot_conflicts():
+ if conflict.root == pkg.root and conflict.atom == pkg.slot_atom:
+ is_slot_conflict_pkg = True
+ break
+ if is_slot_conflict_pkg:
+ # The slot conflict display has better noise reduction
+ # than the unsatisfied blockers display, so skip
+ # unsatisfied blockers display for packages involved
+ # directly in slot conflicts (see bug #385391).
+ continue
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if not parent_atoms:
+ atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
+ if atom is not None:
+ parent_atoms = set([("@selected", atom)])
+ if parent_atoms:
+ conflict_pkgs[pkg] = parent_atoms
+
+ if conflict_pkgs:
+ # Reduce noise by pruning packages that are only
+ # pulled in by other conflict packages.
+ pruned_pkgs = set()
+ for pkg, parent_atoms in conflict_pkgs.items():
+ relevant_parent = False
+ for parent, atom in parent_atoms:
+ if parent not in conflict_pkgs:
+ relevant_parent = True
+ break
+ if not relevant_parent:
+ pruned_pkgs.add(pkg)
+ for pkg in pruned_pkgs:
+ del conflict_pkgs[pkg]
+
+ if conflict_pkgs:
+ msg = []
+ msg.append("\n")
+ indent = " "
+ for pkg, parent_atoms in conflict_pkgs.items():
+
+ # Prefer packages that are not directly involved in a conflict.
+ # It can be essential to see all the packages here, so don't
+ # omit any. If the list is long, people can simply use a pager.
+ preferred_parents = set()
+ for parent_atom in parent_atoms:
+ parent, atom = parent_atom
+ if parent not in conflict_pkgs:
+ preferred_parents.add(parent_atom)
+
+ ordered_list = list(preferred_parents)
+ if len(parent_atoms) > len(ordered_list):
+ for parent_atom in parent_atoms:
+ if parent_atom not in preferred_parents:
+ ordered_list.append(parent_atom)
+
+ msg.append(indent + "%s pulled in by\n" % pkg)
+
+ for parent_atom in ordered_list:
+ parent, atom = parent_atom
+ msg.append(2*indent)
+ if isinstance(parent,
+ (PackageArg, AtomArg)):
+ # For PackageArg and AtomArg types, it's
+ # redundant to display the atom attribute.
+ msg.append(str(parent))
+ else:
+ # Display the specific atom from SetArg or
+ # Package types.
+ if atom != atom.unevaluated_atom:
+ # Show the unevaluated atom, since it can reveal
+ # issues with conditional use-flags missing
+ # from IUSE.
+ msg.append("%s (%s) required by %s" %
+ (atom.unevaluated_atom, atom, parent))
+ else:
+ msg.append("%s required by %s" % (atom, parent))
+ msg.append("\n")
+
+ msg.append("\n")
+
+ writemsg("".join(msg), noiselevel=-1)
+
+ if "--quiet" not in self._frozen_config.myopts:
+ show_blocker_docs_link()
+
+ def display(self, mylist, favorites=[], verbosity=None):
+
+ # This is used to prevent display_problems() from
+ # redundantly displaying this exact same merge list
+ # again via _show_merge_list().
+ self._dynamic_config._displayed_list = mylist
+
+ if "--tree" in self._frozen_config.myopts:
+ mylist = tuple(reversed(mylist))
+
+ display = Display()
+
+ return display(self, mylist, favorites, verbosity)
+
+ def _display_autounmask(self):
+ """
+ Display --autounmask message and optionally write it to config files
+ (using CONFIG_PROTECT). The message includes the comments and the changes.
+ """
+
+ ask = "--ask" in self._frozen_config.myopts
+ autounmask_write = \
+ self._frozen_config.myopts.get("--autounmask-write",
+ ask) is True
+ autounmask_unrestricted_atoms = \
+ self._frozen_config.myopts.get("--autounmask-unrestricted-atoms", "n") == True
+ quiet = "--quiet" in self._frozen_config.myopts
+ pretend = "--pretend" in self._frozen_config.myopts
+ enter_invalid = '--ask-enter-invalid' in self._frozen_config.myopts
+
+ def check_if_latest(pkg):
+ is_latest = True
+ is_latest_in_slot = True
+ dbs = self._dynamic_config._filtered_trees[pkg.root]["dbs"]
+ root_config = self._frozen_config.roots[pkg.root]
+
+ for db, pkg_type, built, installed, db_keys in dbs:
+ for other_pkg in self._iter_match_pkgs(root_config, pkg_type, Atom(pkg.cp)):
+ if other_pkg.cp != pkg.cp:
+ # old-style PROVIDE virtual means there are no
+ # normal matches for this pkg_type
+ break
+ if other_pkg > pkg:
+ is_latest = False
+ if other_pkg.slot_atom == pkg.slot_atom:
+ is_latest_in_slot = False
+ break
+ else:
+ # iter_match_pkgs yields highest version first, so
+ # there's no need to search this pkg_type any further
+ break
+
+ if not is_latest_in_slot:
+ break
+
+ return is_latest, is_latest_in_slot
+
+ #Set of roots we have autounmask changes for.
+ roots = set()
+
+ masked_by_missing_keywords = False
+ unstable_keyword_msg = {}
+ for pkg in self._dynamic_config._needed_unstable_keywords:
+ self._show_merge_list()
+ if pkg in self._dynamic_config.digraph:
+ root = pkg.root
+ roots.add(root)
+ unstable_keyword_msg.setdefault(root, [])
+ is_latest, is_latest_in_slot = check_if_latest(pkg)
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+ mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
+ use=self._pkg_use_enabled(pkg))
+ for reason in mreasons:
+ if reason.unmask_hint and \
+ reason.unmask_hint.key == 'unstable keyword':
+ keyword = reason.unmask_hint.value
+ if keyword == "**":
+ masked_by_missing_keywords = True
+
+ unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
+ if autounmask_unrestricted_atoms:
+ if is_latest:
+ unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
+ elif is_latest_in_slot:
+ unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, keyword))
+ else:
+ unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
+ else:
+ unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
+
+ p_mask_change_msg = {}
+ for pkg in self._dynamic_config._needed_p_mask_changes:
+ self._show_merge_list()
+ if pkg in self._dynamic_config.digraph:
+ root = pkg.root
+ roots.add(root)
+ p_mask_change_msg.setdefault(root, [])
+ is_latest, is_latest_in_slot = check_if_latest(pkg)
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+ mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
+ use=self._pkg_use_enabled(pkg))
+ for reason in mreasons:
+ if reason.unmask_hint and \
+ reason.unmask_hint.key == 'p_mask':
+ keyword = reason.unmask_hint.value
+
+ comment, filename = portage.getmaskingreason(
+ pkg.cpv, metadata=pkg._metadata,
+ settings=pkgsettings,
+ portdb=pkg.root_config.trees["porttree"].dbapi,
+ return_location=True)
+
+ p_mask_change_msg[root].append(self._get_dep_chain_as_comment(pkg))
+ if filename:
+ p_mask_change_msg[root].append("# %s:\n" % filename)
+ if comment:
+ comment = [line for line in
+ comment.splitlines() if line]
+ for line in comment:
+ p_mask_change_msg[root].append("%s\n" % line)
+ if autounmask_unrestricted_atoms:
+ if is_latest:
+ p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
+ elif is_latest_in_slot:
+ p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.slot))
+ else:
+ p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
+ else:
+ p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
+
+ use_changes_msg = {}
+ for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
+ self._show_merge_list()
+ if pkg in self._dynamic_config.digraph:
+ root = pkg.root
+ roots.add(root)
+ use_changes_msg.setdefault(root, [])
+ is_latest, is_latest_in_slot = check_if_latest(pkg)
+ changes = needed_use_config_change[1]
+ adjustments = []
+ for flag, state in changes.items():
+ if state:
+ adjustments.append(flag)
+ else:
+ adjustments.append("-" + flag)
+ use_changes_msg[root].append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
+ if is_latest:
+ use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
+ elif is_latest_in_slot:
+ use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(adjustments)))
+ else:
+ use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
+
+ license_msg = {}
+ for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
+ self._show_merge_list()
+ if pkg in self._dynamic_config.digraph:
+ root = pkg.root
+ roots.add(root)
+ license_msg.setdefault(root, [])
+ is_latest, is_latest_in_slot = check_if_latest(pkg)
+
+ license_msg[root].append(self._get_dep_chain_as_comment(pkg))
+ if is_latest:
+ license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
+ elif is_latest_in_slot:
+ license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(sorted(missing_licenses))))
+ else:
+ license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
+
+ def find_config_file(abs_user_config, file_name):
+ """
+ Searches /etc/portage for an appropriate file to append changes to.
+ If the file_name is a file it is returned, if it is a directory, the
+ last file in it is returned. Order of traversal is the identical to
+ portage.util.grablines(recursive=True).
+
+ file_name - String containing a file name like "package.use"
+ return value - String. Absolute path of file to write to. None if
+ no suitable file exists.
+ """
+ file_path = os.path.join(abs_user_config, file_name)
+
+ try:
+ os.lstat(file_path)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ # The file doesn't exist, so we'll
+ # simply create it.
+ return file_path
+
+ # Disk or file system trouble?
+ return None
+
+ last_file_path = None
+ stack = [file_path]
+ while stack:
+ p = stack.pop()
+ try:
+ st = os.stat(p)
+ except OSError:
+ pass
+ else:
+ if stat.S_ISREG(st.st_mode):
+ last_file_path = p
+ elif stat.S_ISDIR(st.st_mode):
+ if os.path.basename(p) in VCS_DIRS:
+ continue
+ try:
+ contents = os.listdir(p)
+ except OSError:
+ pass
+ else:
+ contents.sort(reverse=True)
+ for child in contents:
+ if child.startswith(".") or \
+ child.endswith("~"):
+ continue
+ stack.append(os.path.join(p, child))
+
+ return last_file_path
+
+ write_to_file = autounmask_write and not pretend
+ #Make sure we have a file to write to before doing any write.
+ file_to_write_to = {}
+ problems = []
+ if write_to_file:
+ for root in roots:
+ settings = self._frozen_config.roots[root].settings
+ abs_user_config = os.path.join(
+ settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
+
+ if root in unstable_keyword_msg:
+ if not os.path.exists(os.path.join(abs_user_config,
+ "package.keywords")):
+ filename = "package.accept_keywords"
+ else:
+ filename = "package.keywords"
+ file_to_write_to[(abs_user_config, "package.keywords")] = \
+ find_config_file(abs_user_config, filename)
+
+ if root in p_mask_change_msg:
+ file_to_write_to[(abs_user_config, "package.unmask")] = \
+ find_config_file(abs_user_config, "package.unmask")
+
+ if root in use_changes_msg:
+ file_to_write_to[(abs_user_config, "package.use")] = \
+ find_config_file(abs_user_config, "package.use")
+
+ if root in license_msg:
+ file_to_write_to[(abs_user_config, "package.license")] = \
+ find_config_file(abs_user_config, "package.license")
+
+ for (abs_user_config, f), path in file_to_write_to.items():
+ if path is None:
+ problems.append("!!! No file to write for '%s'\n" % os.path.join(abs_user_config, f))
+
+ write_to_file = not problems
+
+ def format_msg(lines):
+ lines = lines[:]
+ for i, line in enumerate(lines):
+ if line.startswith("#"):
+ continue
+ lines[i] = colorize("INFORM", line.rstrip()) + "\n"
+ return "".join(lines)
+
+ for root in roots:
+ settings = self._frozen_config.roots[root].settings
+ abs_user_config = os.path.join(
+ settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
+
+ if len(roots) > 1:
+ writemsg("\nFor %s:\n" % abs_user_config, noiselevel=-1)
+
+ def _writemsg(reason, file):
+ writemsg(('\nThe following %s are necessary to proceed:\n'
+ ' (see "%s" in the portage(5) man page for more details)\n')
+ % (colorize('BAD', reason), file), noiselevel=-1)
+
+ if root in unstable_keyword_msg:
+ _writemsg('keyword changes', 'package.accept_keywords')
+ writemsg(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
+
+ if root in p_mask_change_msg:
+ _writemsg('mask changes', 'package.unmask')
+ writemsg(format_msg(p_mask_change_msg[root]), noiselevel=-1)
+
+ if root in use_changes_msg:
+ _writemsg('USE changes', 'package.use')
+ writemsg(format_msg(use_changes_msg[root]), noiselevel=-1)
+
+ if root in license_msg:
+ _writemsg('license changes', 'package.license')
+ writemsg(format_msg(license_msg[root]), noiselevel=-1)
+
+ protect_obj = {}
+ if write_to_file:
+ for root in roots:
+ settings = self._frozen_config.roots[root].settings
+ protect_obj[root] = ConfigProtect(settings["EROOT"], \
+ shlex_split(settings.get("CONFIG_PROTECT", "")),
+ shlex_split(settings.get("CONFIG_PROTECT_MASK", "")),
+ case_insensitive = ("case-insensitive-fs"
+ in settings.features))
+
+ def write_changes(root, changes, file_to_write_to):
+ file_contents = None
+ try:
+ with io.open(
+ _unicode_encode(file_to_write_to,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'],
+ errors='replace') as f:
+ file_contents = f.readlines()
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ file_contents = []
+ else:
+ problems.append("!!! Failed to read '%s': %s\n" % \
+ (file_to_write_to, e))
+ if file_contents is not None:
+ file_contents.extend(changes)
+ if protect_obj[root].isprotected(file_to_write_to):
+ # We want to force new_protect_filename to ensure
+ # that the user will see all our changes via
+ # dispatch-conf, even if file_to_write_to doesn't
+ # exist yet, so we specify force=True.
+ file_to_write_to = new_protect_filename(file_to_write_to,
+ force=True)
+ try:
+ write_atomic(file_to_write_to, "".join(file_contents))
+ except PortageException:
+ problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
+
+ if not quiet and (p_mask_change_msg or masked_by_missing_keywords):
+ msg = [
+ "",
+ "NOTE: The --autounmask-keep-masks option will prevent emerge",
+ " from creating package.unmask or ** keyword changes."
+ ]
+ for line in msg:
+ if line:
+ line = colorize("INFORM", line)
+ writemsg(line + "\n", noiselevel=-1)
+
+ if ask and write_to_file and file_to_write_to:
+ prompt = "\nWould you like to add these " + \
+ "changes to your config files?"
+ if self.query(prompt, enter_invalid) == 'No':
+ write_to_file = False
+
+ if write_to_file and file_to_write_to:
+ for root in roots:
+ settings = self._frozen_config.roots[root].settings
+ abs_user_config = os.path.join(
+ settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
+ ensure_dirs(abs_user_config)
+
+ if root in unstable_keyword_msg:
+ write_changes(root, unstable_keyword_msg[root],
+ file_to_write_to.get((abs_user_config, "package.keywords")))
+
+ if root in p_mask_change_msg:
+ write_changes(root, p_mask_change_msg[root],
+ file_to_write_to.get((abs_user_config, "package.unmask")))
+
+ if root in use_changes_msg:
+ write_changes(root, use_changes_msg[root],
+ file_to_write_to.get((abs_user_config, "package.use")))
+
+ if root in license_msg:
+ write_changes(root, license_msg[root],
+ file_to_write_to.get((abs_user_config, "package.license")))
+
+ if problems:
+ writemsg("\nThe following problems occurred while writing autounmask changes:\n", \
+ noiselevel=-1)
+ writemsg("".join(problems), noiselevel=-1)
+ elif write_to_file and roots:
+ writemsg("\nAutounmask changes successfully written.\n",
+ noiselevel=-1)
+ for root in roots:
+ chk_updated_cfg_files(root,
+ [os.path.join(os.sep, USER_CONFIG_PATH)])
+ elif not pretend and not autounmask_write and roots:
+ writemsg("\nUse --autounmask-write to write changes to config files (honoring\n"
+ "CONFIG_PROTECT). Carefully examine the list of proposed changes,\n"
+ "paying special attention to mask or keyword changes that may expose\n"
+ "experimental or unstable packages.\n",
+ noiselevel=-1)
+
+
+ def display_problems(self):
+ """
+ Display problems with the dependency graph such as slot collisions.
+ This is called internally by display() to show the problems _after_
+ the merge list where it is most likely to be seen, but if display()
+ is not going to be called then this method should be called explicitly
+ to ensure that the user is notified of problems with the graph.
+ """
+
+ if self._dynamic_config._circular_deps_for_display is not None:
+ self._show_circular_deps(
+ self._dynamic_config._circular_deps_for_display)
+
+ unresolved_conflicts = False
+ have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
+ if have_slot_conflict:
+ unresolved_conflicts = True
+ self._show_slot_collision_notice()
+ if self._dynamic_config._unsatisfied_blockers_for_display is not None:
+ unresolved_conflicts = True
+ self._show_unsatisfied_blockers(
+ self._dynamic_config._unsatisfied_blockers_for_display)
+
+ # Only show missed updates if there are no unresolved conflicts,
+ # since they may be irrelevant after the conflicts are solved.
+ if not unresolved_conflicts:
+ self._show_missed_update()
+
+ if self._frozen_config.myopts.get("--verbose-slot-rebuilds", 'y') != 'n':
+ self._compute_abi_rebuild_info()
+ self._show_abi_rebuild_info()
+
+ self._show_ignored_binaries()
+
+ self._display_autounmask()
+
+ for depgraph_sets in self._dynamic_config.sets.values():
+ for pset in depgraph_sets.sets.values():
+ for error_msg in pset.errors:
+ writemsg_level("%s\n" % (error_msg,),
+ level=logging.ERROR, noiselevel=-1)
+
+ # TODO: Add generic support for "set problem" handlers so that
+ # the below warnings aren't special cases for world only.
+
+ if self._dynamic_config._missing_args:
+ world_problems = False
+ if "world" in self._dynamic_config.sets[
+ self._frozen_config.target_root].sets:
+ # Filter out indirect members of world (from nested sets)
+ # since only direct members of world are desired here.
+ world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
+ for arg, atom in self._dynamic_config._missing_args:
+ if arg.name in ("selected", "world") and atom in world_set:
+ world_problems = True
+ break
+
+ if world_problems:
+ writemsg("\n!!! Problems have been " + \
+ "detected with your world file\n",
+ noiselevel=-1)
+ writemsg("!!! Please run " + \
+ green("emaint --check world")+"\n\n",
+ noiselevel=-1)
+
+ if self._dynamic_config._missing_args:
+ writemsg("\n" + colorize("BAD", "!!!") + \
+ " Ebuilds for the following packages are either all\n",
+ noiselevel=-1)
+ writemsg(colorize("BAD", "!!!") + \
+ " masked or don't exist:\n",
+ noiselevel=-1)
+ writemsg(" ".join(str(atom) for arg, atom in \
+ self._dynamic_config._missing_args) + "\n",
+ noiselevel=-1)
+
+ if self._dynamic_config._pprovided_args:
+ arg_refs = {}
+ for arg, atom in self._dynamic_config._pprovided_args:
+ if isinstance(arg, SetArg):
+ parent = arg.name
+ arg_atom = (atom, atom)
+ else:
+ parent = "args"
+ arg_atom = (arg.arg, atom)
+ refs = arg_refs.setdefault(arg_atom, [])
+ if parent not in refs:
+ refs.append(parent)
+ msg = []
+ msg.append(bad("\nWARNING: "))
+ if len(self._dynamic_config._pprovided_args) > 1:
+ msg.append("Requested packages will not be " + \
+ "merged because they are listed in\n")
+ else:
+ msg.append("A requested package will not be " + \
+ "merged because it is listed in\n")
+ msg.append("package.provided:\n\n")
+ problems_sets = set()
+ for (arg, atom), refs in arg_refs.items():
+ ref_string = ""
+ if refs:
+ problems_sets.update(refs)
+ refs.sort()
+ ref_string = ", ".join(["'%s'" % name for name in refs])
+ ref_string = " pulled in by " + ref_string
+ msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
+ msg.append("\n")
+ if "selected" in problems_sets or "world" in problems_sets:
+ msg.append("This problem can be solved in one of the following ways:\n\n")
+ msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
+ msg.append(" B) Uninstall offending packages (cleans them from world).\n")
+ msg.append(" C) Remove offending entries from package.provided.\n\n")
+ msg.append("The best course of action depends on the reason that an offending\n")
+ msg.append("package.provided entry exists.\n\n")
+ writemsg("".join(msg), noiselevel=-1)
+
+ masked_packages = []
+ for pkg in self._dynamic_config._masked_license_updates:
+ root_config = pkg.root_config
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+ mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
+ masked_packages.append((root_config, pkgsettings,
+ pkg.cpv, pkg.repo, pkg._metadata, mreasons))
+ if masked_packages:
+ writemsg("\n" + colorize("BAD", "!!!") + \
+ " The following updates are masked by LICENSE changes:\n",
+ noiselevel=-1)
+ show_masked_packages(masked_packages)
+ show_mask_docs()
+ writemsg("\n", noiselevel=-1)
+
+ masked_packages = []
+ for pkg in self._dynamic_config._masked_installed:
+ root_config = pkg.root_config
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+ mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
+ masked_packages.append((root_config, pkgsettings,
+ pkg.cpv, pkg.repo, pkg._metadata, mreasons))
+ if masked_packages:
+ writemsg("\n" + colorize("BAD", "!!!") + \
+ " The following installed packages are masked:\n",
+ noiselevel=-1)
+ show_masked_packages(masked_packages)
+ show_mask_docs()
+ writemsg("\n", noiselevel=-1)
+
+ for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
+ self._show_unsatisfied_dep(*pargs,
+ **portage._native_kwargs(kwargs))
+
+ if self._dynamic_config._buildpkgonly_deps_unsatisfied:
+ self._show_merge_list()
+ writemsg("\n!!! --buildpkgonly requires all "
+ "dependencies to be merged.\n", noiselevel=-1)
+ writemsg("!!! Cannot merge requested packages. "
+ "Merge deps and try again.\n\n", noiselevel=-1)
+
+ def saveNomergeFavorites(self):
+ """Find atoms in favorites that are not in the mergelist and add them
+ to the world file if necessary."""
+ for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
+ "--oneshot", "--onlydeps", "--pretend"):
+ if x in self._frozen_config.myopts:
+ return
+ root_config = self._frozen_config.roots[self._frozen_config.target_root]
+ world_set = root_config.sets["selected"]
+
+ world_locked = False
+ if hasattr(world_set, "lock"):
+ world_set.lock()
+ world_locked = True
+
+ if hasattr(world_set, "load"):
+ world_set.load() # maybe it's changed on disk
+
+ args_set = self._dynamic_config.sets[
+ self._frozen_config.target_root].sets['__non_set_args__']
+ added_favorites = set()
+ for x in self._dynamic_config._set_nodes:
+ if x.operation != "nomerge":
+ continue
+
+ if x.root != root_config.root:
+ continue
+
+ try:
+ myfavkey = create_world_atom(x, args_set, root_config)
+ if myfavkey:
+ if myfavkey in added_favorites:
+ continue
+ added_favorites.add(myfavkey)
+ except portage.exception.InvalidDependString as e:
+ writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
+ (x.cpv, e), noiselevel=-1)
+ writemsg("!!! see '%s'\n\n" % os.path.join(
+ x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
+ del e
+ all_added = []
+ for arg in self._dynamic_config._initial_arg_list:
+ if not isinstance(arg, SetArg):
+ continue
+ if arg.root_config.root != root_config.root:
+ continue
+ if arg.internal:
+ # __auto_* sets
+ continue
+ k = arg.name
+ if k in ("selected", "world") or \
+ not root_config.sets[k].world_candidate:
+ continue
+ s = SETPREFIX + k
+ if s in world_set:
+ continue
+ all_added.append(SETPREFIX + k)
+ all_added.extend(added_favorites)
+ all_added.sort()
+ if all_added:
+ skip = False
+ if "--ask" in self._frozen_config.myopts:
+ writemsg_stdout("\n", noiselevel=-1)
+ for a in all_added:
+ writemsg_stdout(" %s %s\n" % (colorize("GOOD", "*"), a),
+ noiselevel=-1)
+ writemsg_stdout("\n", noiselevel=-1)
+ prompt = "Would you like to add these packages to your world " \
+ "favorites?"
+ enter_invalid = '--ask-enter-invalid' in \
+ self._frozen_config.myopts
+ if self.query(prompt, enter_invalid) == "No":
+ skip = True
+
+ if not skip:
+ for a in all_added:
+ if a.startswith(SETPREFIX):
+ filename = "world_sets"
+ else:
+ filename = "world"
+ writemsg_stdout(
+ ">>> Recording %s in \"%s\" favorites file...\n" %
+ (colorize("INFORM", _unicode(a)), filename), noiselevel=-1)
+ world_set.update(all_added)
+
+ if world_locked:
+ world_set.unlock()
+
+ def _loadResumeCommand(self, resume_data, skip_masked=True,
+ skip_missing=True):
+ """
+ Add a resume command to the graph and validate it in the process. This
+ will raise a PackageNotFound exception if a package is not available.
+ """
+
+ self._load_vdb()
+
+ if not isinstance(resume_data, dict):
+ return False
+
+ mergelist = resume_data.get("mergelist")
+ if not isinstance(mergelist, list):
+ mergelist = []
+
+ favorites = resume_data.get("favorites")
+ if isinstance(favorites, list):
+ args = self._load_favorites(favorites)
+ else:
+ args = []
+
+ serialized_tasks = []
+ masked_tasks = []
+ for x in mergelist:
+ if not (isinstance(x, list) and len(x) == 4):
+ continue
+ pkg_type, myroot, pkg_key, action = x
+ if pkg_type not in self.pkg_tree_map:
+ continue
+ if action != "merge":
+ continue
+ root_config = self._frozen_config.roots[myroot]
+
+ # Use the resume "favorites" list to see if a repo was specified
+ # for this package.
+ depgraph_sets = self._dynamic_config.sets[root_config.root]
+ repo = None
+ for atom in depgraph_sets.atoms.getAtoms():
+ if atom.repo and portage.dep.match_from_list(atom, [pkg_key]):
+ repo = atom.repo
+ break
+
+ atom = "=" + pkg_key
+ if repo:
+ atom = atom + _repo_separator + repo
+
+ try:
+ atom = Atom(atom, allow_repo=True)
+ except InvalidAtom:
+ continue
+
+ pkg = None
+ for pkg in self._iter_match_pkgs(root_config, pkg_type, atom):
+ if not self._pkg_visibility_check(pkg) or \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
+ break
+
+ if pkg is None:
+ # It does no exist or it is corrupt.
+ if skip_missing:
+ # TODO: log these somewhere
+ continue
+ raise portage.exception.PackageNotFound(pkg_key)
+
+ if "merge" == pkg.operation and \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
+
+ if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
+ if skip_masked:
+ masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
+ else:
+ self._dynamic_config._unsatisfied_deps_for_display.append(
+ ((pkg.root, "="+pkg.cpv), {"myparent":None}))
+
+ self._dynamic_config._package_tracker.add_pkg(pkg)
+ serialized_tasks.append(pkg)
+ self._spinner_update()
+
+ if self._dynamic_config._unsatisfied_deps_for_display:
+ return False
+
+ if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
+ self._dynamic_config._serialized_tasks_cache = serialized_tasks
+ self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
+ else:
+ self._select_package = self._select_pkg_from_graph
+ self._dynamic_config.myparams["selective"] = True
+ # Always traverse deep dependencies in order to account for
+ # potentially unsatisfied dependencies of installed packages.
+ # This is necessary for correct --keep-going or --resume operation
+ # in case a package from a group of circularly dependent packages
+ # fails. In this case, a package which has recently been installed
+ # may have an unsatisfied circular dependency (pulled in by
+ # PDEPEND, for example). So, even though a package is already
+ # installed, it may not have all of it's dependencies satisfied, so
+ # it may not be usable. If such a package is in the subgraph of
+ # deep depenedencies of a scheduled build, that build needs to
+ # be cancelled. In order for this type of situation to be
+ # recognized, deep traversal of dependencies is required.
+ self._dynamic_config.myparams["deep"] = True
+
+ for task in serialized_tasks:
+ if isinstance(task, Package) and \
+ task.operation == "merge":
+ if not self._add_pkg(task, None):
+ return False
+
+ # Packages for argument atoms need to be explicitly
+ # added via _add_pkg() so that they are included in the
+ # digraph (needed at least for --tree display).
+ for arg in self._expand_set_args(args, add_to_digraph=True):
+ for atom in arg.pset.getAtoms():
+ pkg, existing_node = self._select_package(
+ arg.root_config.root, atom)
+ if existing_node is None and \
+ pkg is not None:
+ if not self._add_pkg(pkg, Dependency(atom=atom,
+ root=pkg.root, parent=arg)):
+ return False
+
+ # Allow unsatisfied deps here to avoid showing a masking
+ # message for an unsatisfied dep that isn't necessarily
+ # masked.
+ if not self._create_graph(allow_unsatisfied=True):
+ return False
+
+ unsatisfied_deps = []
+ for dep in self._dynamic_config._unsatisfied_deps:
+ if not isinstance(dep.parent, Package):
+ continue
+ if dep.parent.operation == "merge":
+ unsatisfied_deps.append(dep)
+ continue
+
+ # For unsatisfied deps of installed packages, only account for
+ # them if they are in the subgraph of dependencies of a package
+ # which is scheduled to be installed.
+ unsatisfied_install = False
+ traversed = set()
+ dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
+ while dep_stack:
+ node = dep_stack.pop()
+ if not isinstance(node, Package):
+ continue
+ if node.operation == "merge":
+ unsatisfied_install = True
+ break
+ if node in traversed:
+ continue
+ traversed.add(node)
+ dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
+
+ if unsatisfied_install:
+ unsatisfied_deps.append(dep)
+
+ if masked_tasks or unsatisfied_deps:
+ # This probably means that a required package
+ # was dropped via --skipfirst. It makes the
+ # resume list invalid, so convert it to a
+ # UnsatisfiedResumeDep exception.
+ raise self.UnsatisfiedResumeDep(self,
+ masked_tasks + unsatisfied_deps)
+ self._dynamic_config._serialized_tasks_cache = None
+ try:
+ self.altlist()
+ except self._unknown_internal_error:
+ return False
+
+ return True
+
+ def _load_favorites(self, favorites):
+ """
+ Use a list of favorites to resume state from a
+ previous select_files() call. This creates similar
+ DependencyArg instances to those that would have
+ been created by the original select_files() call.
+ This allows Package instances to be matched with
+ DependencyArg instances during graph creation.
+ """
+ root_config = self._frozen_config.roots[self._frozen_config.target_root]
+ sets = root_config.sets
+ depgraph_sets = self._dynamic_config.sets[root_config.root]
+ args = []
+ for x in favorites:
+ if not isinstance(x, basestring):
+ continue
+ if x in ("system", "world"):
+ x = SETPREFIX + x
+ if x.startswith(SETPREFIX):
+ s = x[len(SETPREFIX):]
+ if s not in sets:
+ continue
+ if s in depgraph_sets.sets:
+ continue
+ pset = sets[s]
+ depgraph_sets.sets[s] = pset
+ args.append(SetArg(arg=x, pset=pset,
+ root_config=root_config))
+ else:
+ try:
+ x = Atom(x, allow_repo=True)
+ except portage.exception.InvalidAtom:
+ continue
+ args.append(AtomArg(arg=x, atom=x,
+ root_config=root_config))
+
+ self._set_args(args)
+ return args
+
+ class UnsatisfiedResumeDep(portage.exception.PortageException):
+ """
+ A dependency of a resume list is not installed. This
+ can occur when a required package is dropped from the
+ merge list via --skipfirst.
+ """
+ def __init__(self, depgraph, value):
+ portage.exception.PortageException.__init__(self, value)
+ self.depgraph = depgraph
+
+ class _internal_exception(portage.exception.PortageException):
+ def __init__(self, value=""):
+ portage.exception.PortageException.__init__(self, value)
+
+ class _unknown_internal_error(_internal_exception):
+ """
+ Used by the depgraph internally to terminate graph creation.
+ The specific reason for the failure should have been dumped
+ to stderr, unfortunately, the exact reason for the failure
+ may not be known.
+ """
+
+ class _serialize_tasks_retry(_internal_exception):
+ """
+ This is raised by the _serialize_tasks() method when it needs to
+ be called again for some reason. The only case that it's currently
+ used for is when neglected dependencies need to be added to the
+ graph in order to avoid making a potentially unsafe decision.
+ """
+
+ class _backtrack_mask(_internal_exception):
+ """
+ This is raised by _show_unsatisfied_dep() when it's called with
+ check_backtrack=True and a matching package has been masked by
+ backtracking.
+ """
+
+ class _autounmask_breakage(_internal_exception):
+ """
+ This is raised by _show_unsatisfied_dep() when it's called with
+ check_autounmask_breakage=True and a matching package has been
+ been disqualified due to autounmask changes.
+ """
+
+ def need_restart(self):
+ return self._dynamic_config._need_restart and \
+ not self._dynamic_config._skip_restart
+
+ def need_config_change(self):
+ return self._dynamic_config._success_without_autounmask or \
+ self._dynamic_config._required_use_unsatisfied
+
+ def autounmask_breakage_detected(self):
+ try:
+ for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
+ self._show_unsatisfied_dep(
+ *pargs, check_autounmask_breakage=True,
+ **portage._native_kwargs(kwargs))
+ except self._autounmask_breakage:
+ return True
+ return False
+
+ def get_backtrack_infos(self):
+ return self._dynamic_config._backtrack_infos
+
+
+class _dep_check_composite_db(dbapi):
+ """
+ A dbapi-like interface that is optimized for use in dep_check() calls.
+ This is built on top of the existing depgraph package selection logic.
+ Some packages that have been added to the graph may be masked from this
+ view in order to influence the atom preference selection that occurs
+ via dep_check().
+ """
+ def __init__(self, depgraph, root):
+ dbapi.__init__(self)
+ self._depgraph = depgraph
+ self._root = root
+ self._match_cache = {}
+ self._cpv_pkg_map = {}
+
+ def _clear_cache(self):
+ self._match_cache.clear()
+ self._cpv_pkg_map.clear()
+
+ def cp_list(self, cp):
+ """
+ Emulate cp_list just so it can be used to check for existence
+ of new-style virtuals. Since it's a waste of time to return
+ more than one cpv for this use case, a maximum of one cpv will
+ be returned.
+ """
+ if isinstance(cp, Atom):
+ atom = cp
+ else:
+ atom = Atom(cp)
+ ret = []
+ for pkg in self._depgraph._iter_match_pkgs_any(
+ self._depgraph._frozen_config.roots[self._root], atom):
+ if pkg.cp == cp:
+ ret.append(pkg.cpv)
+ break
+
+ return ret
+
+ def match_pkgs(self, atom):
+ cache_key = (atom, atom.unevaluated_atom)
+ ret = self._match_cache.get(cache_key)
+ if ret is not None:
+ for pkg in ret:
+ self._cpv_pkg_map[pkg.cpv] = pkg
+ return ret[:]
+
+ atom_set = InternalPackageSet(initial_atoms=(atom,))
+ ret = []
+ pkg, existing = self._depgraph._select_package(self._root, atom)
+
+ if pkg is not None and self._visible(pkg, atom_set):
+ ret.append(pkg)
+
+ if pkg is not None and \
+ atom.slot is None and \
+ pkg.cp.startswith("virtual/") and \
+ (("remove" not in self._depgraph._dynamic_config.myparams and
+ "--update" not in self._depgraph._frozen_config.myopts) or
+ not ret or
+ not self._depgraph._virt_deps_visible(pkg, ignore_use=True)):
+ # For new-style virtual lookahead that occurs inside dep_check()
+ # for bug #141118, examine all slots. This is needed so that newer
+ # slots will not unnecessarily be pulled in when a satisfying lower
+ # slot is already installed. For example, if virtual/jdk-1.5 is
+ # satisfied via gcj-jdk then there's no need to pull in a newer
+ # slot to satisfy a virtual/jdk dependency, unless --update is
+ # enabled.
+ slots = set()
+ slots.add(pkg.slot)
+ for virt_pkg in self._depgraph._iter_match_pkgs_any(
+ self._depgraph._frozen_config.roots[self._root], atom):
+ if virt_pkg.cp != pkg.cp:
+ continue
+ slots.add(virt_pkg.slot)
+
+ slots.remove(pkg.slot)
+ while slots:
+ slot_atom = atom.with_slot(slots.pop())
+ pkg, existing = self._depgraph._select_package(
+ self._root, slot_atom)
+ if not pkg:
+ continue
+ if not self._visible(pkg, atom_set):
+ continue
+ ret.append(pkg)
+
+ if len(ret) > 1:
+ ret.sort()
+
+ self._match_cache[cache_key] = ret
+ for pkg in ret:
+ self._cpv_pkg_map[pkg.cpv] = pkg
+ return ret[:]
+
+ def _visible(self, pkg, atom_set):
+ if pkg.installed and not self._depgraph._want_installed_pkg(pkg):
+ return False
+ if pkg.installed and \
+ (pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
+ # Account for packages with masks (like KEYWORDS masks)
+ # that are usually ignored in visibility checks for
+ # installed packages, in order to handle cases like
+ # bug #350285.
+ myopts = self._depgraph._frozen_config.myopts
+ use_ebuild_visibility = myopts.get(
+ '--use-ebuild-visibility', 'n') != 'n'
+ avoid_update = "--update" not in myopts and \
+ "remove" not in self._depgraph._dynamic_config.myparams
+ usepkgonly = "--usepkgonly" in myopts
+ if not avoid_update:
+ if not use_ebuild_visibility and usepkgonly:
+ return False
+ elif not self._depgraph._equiv_ebuild_visible(pkg):
+ return False
+
+ in_graph = next(self._depgraph._dynamic_config._package_tracker.match(
+ self._root, pkg.slot_atom, installed=False), None)
+
+ if in_graph is None:
+ # Mask choices for packages which are not the highest visible
+ # version within their slot (since they usually trigger slot
+ # conflicts).
+ highest_visible, in_graph = self._depgraph._select_package(
+ self._root, pkg.slot_atom)
+ # Note: highest_visible is not necessarily the real highest
+ # visible, especially when --update is not enabled, so use
+ # < operator instead of !=.
+ if highest_visible is not None and pkg < highest_visible:
+ return False
+ elif in_graph != pkg:
+ # Mask choices for packages that would trigger a slot
+ # conflict with a previously selected package.
+ if not atom_set.findAtomForPackage(in_graph,
+ modified_use=self._depgraph._pkg_use_enabled(in_graph)):
+ # Only mask if the graph package matches the given
+ # atom (fixes bug #515230).
+ return True
+ return False
+ return True
+
+ def aux_get(self, cpv, wants):
+ metadata = self._cpv_pkg_map[cpv]._metadata
+ return [metadata.get(x, "") for x in wants]
+
+ def match(self, atom):
+ return [pkg.cpv for pkg in self.match_pkgs(atom)]
+
+def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
+
+ if "--quiet" in myopts:
+ writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
+ writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
+ for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
+ writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
+ return
+
+ s = search(root_config, spinner, "--searchdesc" in myopts,
+ "--quiet" not in myopts, "--usepkg" in myopts,
+ "--usepkgonly" in myopts)
+ null_cp = portage.dep_getkey(insert_category_into_atom(
+ arg, "null"))
+ cat, atom_pn = portage.catsplit(null_cp)
+ s.searchkey = atom_pn
+ for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
+ s.addCP(cp)
+ s.output()
+ writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
+ writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
+
+def _spinner_start(spinner, myopts):
+ if spinner is None:
+ return
+ if "--quiet" not in myopts and \
+ ("--pretend" in myopts or "--ask" in myopts or \
+ "--tree" in myopts or "--verbose" in myopts):
+ action = ""
+ if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
+ action = "fetched"
+ elif "--buildpkgonly" in myopts:
+ action = "built"
+ else:
+ action = "merged"
+ if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
+ if "--unordered-display" in myopts:
+ portage.writemsg_stdout("\n" + \
+ darkgreen("These are the packages that " + \
+ "would be %s:" % action) + "\n\n")
+ else:
+ portage.writemsg_stdout("\n" + \
+ darkgreen("These are the packages that " + \
+ "would be %s, in reverse order:" % action) + "\n\n")
+ else:
+ portage.writemsg_stdout("\n" + \
+ darkgreen("These are the packages that " + \
+ "would be %s, in order:" % action) + "\n\n")
+
+ show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
+ if not show_spinner:
+ spinner.update = spinner.update_quiet
+
+ if show_spinner:
+ portage.writemsg_stdout("Calculating dependencies ")
+
+def _spinner_stop(spinner):
+ if spinner is None or \
+ spinner.update == spinner.update_quiet:
+ return
+
+ if spinner.update != spinner.update_basic:
+ # update_basic is used for non-tty output,
+ # so don't output backspaces in that case.
+ portage.writemsg_stdout("\b\b")
+
+ portage.writemsg_stdout("... done!\n")
+
+def backtrack_depgraph(settings, trees, myopts, myparams,
+ myaction, myfiles, spinner):
+ """
+ Raises PackageSetNotFound if myfiles contains a missing package set.
+ """
+ _spinner_start(spinner, myopts)
+ try:
+ return _backtrack_depgraph(settings, trees, myopts, myparams,
+ myaction, myfiles, spinner)
+ finally:
+ _spinner_stop(spinner)
+
+
+def _backtrack_depgraph(settings, trees, myopts, myparams, myaction, myfiles, spinner):
+
+ debug = "--debug" in myopts
+ mydepgraph = None
+ max_retries = myopts.get('--backtrack', 10)
+ max_depth = max(1, (max_retries + 1) // 2)
+ allow_backtracking = max_retries > 0
+ backtracker = Backtracker(max_depth)
+ backtracked = 0
+
+ frozen_config = _frozen_depgraph_config(settings, trees,
+ myopts, myparams, spinner)
+
+ while backtracker:
+
+ if debug and mydepgraph is not None:
+ writemsg_level(
+ "\n\nbacktracking try %s \n\n" % \
+ backtracked, noiselevel=-1, level=logging.DEBUG)
+ mydepgraph.display_problems()
+
+ backtrack_parameters = backtracker.get()
+
+ mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
+ frozen_config=frozen_config,
+ allow_backtracking=allow_backtracking,
+ backtrack_parameters=backtrack_parameters)
+ success, favorites = mydepgraph.select_files(myfiles)
+
+ if success or mydepgraph.need_config_change():
+ break
+ elif not allow_backtracking:
+ break
+ elif backtracked >= max_retries:
+ break
+ elif mydepgraph.need_restart():
+ backtracked += 1
+ backtracker.feedback(mydepgraph.get_backtrack_infos())
+ else:
+ break
+
+ if not (success or mydepgraph.need_config_change()) and backtracked:
+
+ if debug:
+ writemsg_level(
+ "\n\nbacktracking aborted after %s tries\n\n" % \
+ backtracked, noiselevel=-1, level=logging.DEBUG)
+ mydepgraph.display_problems()
+
+ mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
+ frozen_config=frozen_config,
+ allow_backtracking=False,
+ backtrack_parameters=backtracker.get_best_run())
+ success, favorites = mydepgraph.select_files(myfiles)
+
+ if not success and mydepgraph.autounmask_breakage_detected():
+ if debug:
+ writemsg_level(
+ "\n\nautounmask breakage detected\n\n",
+ noiselevel=-1, level=logging.DEBUG)
+ mydepgraph.display_problems()
+ myopts["--autounmask"] = "n"
+ mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
+ frozen_config=frozen_config, allow_backtracking=False)
+ success, favorites = mydepgraph.select_files(myfiles)
+
+ return (success, mydepgraph, favorites)
+
+
+def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
+ """
+ Raises PackageSetNotFound if myfiles contains a missing package set.
+ """
+ _spinner_start(spinner, myopts)
+ try:
+ return _resume_depgraph(settings, trees, mtimedb, myopts,
+ myparams, spinner)
+ finally:
+ _spinner_stop(spinner)
+
+def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
+ """
+ Construct a depgraph for the given resume list. This will raise
+ PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
+ TODO: Return reasons for dropped_tasks, for display/logging.
+ @rtype: tuple
+ @return: (success, depgraph, dropped_tasks)
+ """
+ skip_masked = True
+ skip_unsatisfied = True
+ mergelist = mtimedb["resume"]["mergelist"]
+ dropped_tasks = {}
+ frozen_config = _frozen_depgraph_config(settings, trees,
+ myopts, myparams, spinner)
+ while True:
+ mydepgraph = depgraph(settings, trees,
+ myopts, myparams, spinner, frozen_config=frozen_config)
+ try:
+ success = mydepgraph._loadResumeCommand(mtimedb["resume"],
+ skip_masked=skip_masked)
+ except depgraph.UnsatisfiedResumeDep as e:
+ if not skip_unsatisfied:
+ raise
+
+ graph = mydepgraph._dynamic_config.digraph
+ unsatisfied_parents = {}
+ traversed_nodes = set()
+ unsatisfied_stack = [(dep.parent, dep.atom) for dep in e.value]
+ while unsatisfied_stack:
+ pkg, atom = unsatisfied_stack.pop()
+ if atom is not None and \
+ mydepgraph._select_pkg_from_installed(
+ pkg.root, atom)[0] is not None:
+ continue
+ atoms = unsatisfied_parents.get(pkg)
+ if atoms is None:
+ atoms = []
+ unsatisfied_parents[pkg] = atoms
+ if atom is not None:
+ atoms.append(atom)
+ if pkg in traversed_nodes:
+ continue
+ traversed_nodes.add(pkg)
+
+ # If this package was pulled in by a parent
+ # package scheduled for merge, removing this
+ # package may cause the the parent package's
+ # dependency to become unsatisfied.
+ for parent_node, atom in \
+ mydepgraph._dynamic_config._parent_atoms.get(pkg, []):
+ if not isinstance(parent_node, Package) \
+ or parent_node.operation not in ("merge", "nomerge"):
+ continue
+ # We need to traverse all priorities here, in order to
+ # ensure that a package with an unsatisfied depenedency
+ # won't get pulled in, even indirectly via a soft
+ # dependency.
+ unsatisfied_stack.append((parent_node, atom))
+
+ unsatisfied_tuples = frozenset(tuple(parent_node)
+ for parent_node in unsatisfied_parents
+ if isinstance(parent_node, Package))
+ pruned_mergelist = []
+ for x in mergelist:
+ if isinstance(x, list) and \
+ tuple(x) not in unsatisfied_tuples:
+ pruned_mergelist.append(x)
+
+ # If the mergelist doesn't shrink then this loop is infinite.
+ if len(pruned_mergelist) == len(mergelist):
+ # This happens if a package can't be dropped because
+ # it's already installed, but it has unsatisfied PDEPEND.
+ raise
+ mergelist[:] = pruned_mergelist
+
+ # Exclude installed packages that have been removed from the graph due
+ # to failure to build/install runtime dependencies after the dependent
+ # package has already been installed.
+ dropped_tasks.update((pkg, atoms) for pkg, atoms in \
+ unsatisfied_parents.items() if pkg.operation != "nomerge")
+
+ del e, graph, traversed_nodes, \
+ unsatisfied_parents, unsatisfied_stack
+ continue
+ else:
+ break
+ return (success, mydepgraph, dropped_tasks)
+
+def get_mask_info(root_config, cpv, pkgsettings,
+ db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
+ try:
+ metadata = dict(zip(db_keys,
+ db.aux_get(cpv, db_keys, myrepo=myrepo)))
+ except KeyError:
+ metadata = None
+
+ if metadata is None:
+ mreasons = ["corruption"]
+ else:
+ eapi = metadata['EAPI']
+ if not portage.eapi_is_supported(eapi):
+ mreasons = ['EAPI %s' % eapi]
+ else:
+ pkg = Package(type_name=pkg_type, root_config=root_config,
+ cpv=cpv, built=built, installed=installed, metadata=metadata)
+
+ modified_use = None
+ if _pkg_use_enabled is not None:
+ modified_use = _pkg_use_enabled(pkg)
+
+ mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
+
+ return metadata, mreasons
+
+def show_masked_packages(masked_packages):
+ shown_licenses = set()
+ shown_comments = set()
+ # Maybe there is both an ebuild and a binary. Only
+ # show one of them to avoid redundant appearance.
+ shown_cpvs = set()
+ have_eapi_mask = False
+ for (root_config, pkgsettings, cpv, repo,
+ metadata, mreasons) in masked_packages:
+ output_cpv = cpv
+ if repo:
+ output_cpv += _repo_separator + repo
+ if output_cpv in shown_cpvs:
+ continue
+ shown_cpvs.add(output_cpv)
+ eapi_masked = metadata is not None and \
+ not portage.eapi_is_supported(metadata["EAPI"])
+ if eapi_masked:
+ have_eapi_mask = True
+ # When masked by EAPI, metadata is mostly useless since
+ # it doesn't contain essential things like SLOT.
+ metadata = None
+ comment, filename = None, None
+ if not eapi_masked and \
+ "package.mask" in mreasons:
+ comment, filename = \
+ portage.getmaskingreason(
+ cpv, metadata=metadata,
+ settings=pkgsettings,
+ portdb=root_config.trees["porttree"].dbapi,
+ return_location=True)
+ missing_licenses = []
+ if not eapi_masked and metadata is not None:
+ try:
+ missing_licenses = \
+ pkgsettings._getMissingLicenses(
+ cpv, metadata)
+ except portage.exception.InvalidDependString:
+ # This will have already been reported
+ # above via mreasons.
+ pass
+
+ writemsg("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n",
+ noiselevel=-1)
+
+ if comment and comment not in shown_comments:
+ writemsg(filename + ":\n" + comment + "\n",
+ noiselevel=-1)
+ shown_comments.add(comment)
+ portdb = root_config.trees["porttree"].dbapi
+ for l in missing_licenses:
+ if l in shown_licenses:
+ continue
+ l_path = portdb.findLicensePath(l)
+ if l_path is None:
+ continue
+ msg = ("A copy of the '%s' license" + \
+ " is located at '%s'.\n\n") % (l, l_path)
+ writemsg(msg, noiselevel=-1)
+ shown_licenses.add(l)
+ return have_eapi_mask
+
+def show_mask_docs():
+ writemsg("For more information, see the MASKED PACKAGES "
+ "section in the emerge\n", noiselevel=-1)
+ writemsg("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
+
+def show_blocker_docs_link():
+ writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
+ writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
+ writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
+
+def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
+ return [mreason.message for \
+ mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
+
+def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
+ mreasons = _getmaskingstatus(
+ pkg, settings=pkgsettings,
+ portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
+
+ if not pkg.installed:
+ if not pkgsettings._accept_chost(pkg.cpv, pkg._metadata):
+ mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
+ pkg._metadata["CHOST"]))
+
+ if pkg.built and not pkg.installed:
+ if not "EPREFIX" in pkg.metadata:
+ mreasons.append(_MaskReason("EPREFIX", "missing EPREFIX"))
+ elif len(pkg.metadata["EPREFIX"].strip()) < len(pkgsettings["EPREFIX"]):
+ mreasons.append(_MaskReason("EPREFIX", "EPREFIX: '%s' too small" % pkg.metadata["EPREFIX"]))
+
+ if pkg.invalid:
+ for msgs in pkg.invalid.values():
+ for msg in msgs:
+ mreasons.append(
+ _MaskReason("invalid", "invalid: %s" % (msg,)))
+
+ if not pkg._metadata["SLOT"]:
+ mreasons.append(
+ _MaskReason("invalid", "SLOT: undefined"))
+
+ return mreasons
diff --git a/usr/lib/portage/pym/_emerge/emergelog.py b/usr/lib/portage/pym/_emerge/emergelog.py
new file mode 100644
index 0000000..9397aca
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/emergelog.py
@@ -0,0 +1,57 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import io
+import sys
+import time
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.data import secpass
+from portage.output import xtermTitle
+from portage.const import EPREFIX
+
+# We disable emergelog by default, since it's called from
+# dblink.merge() and we don't want that to trigger log writes
+# unless it's really called via emerge.
+_disable = True
+_emerge_log_dir = EPREFIX + '/var/log'
+
+def emergelog(xterm_titles, mystr, short_msg=None):
+
+ if _disable:
+ return
+
+ mystr = _unicode_decode(mystr)
+
+ if short_msg is not None:
+ short_msg = _unicode_decode(short_msg)
+
+ if xterm_titles and short_msg:
+ if "HOSTNAME" in os.environ:
+ short_msg = os.environ["HOSTNAME"]+": "+short_msg
+ xtermTitle(short_msg)
+ try:
+ file_path = os.path.join(_emerge_log_dir, 'emerge.log')
+ existing_log = os.path.isfile(file_path)
+ mylogfile = io.open(_unicode_encode(file_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='a', encoding=_encodings['content'],
+ errors='backslashreplace')
+ if not existing_log:
+ portage.util.apply_secpass_permissions(file_path,
+ uid=portage.portage_uid, gid=portage.portage_gid,
+ mode=0o660)
+ mylock = portage.locks.lockfile(file_path)
+ try:
+ mylogfile.write("%.0f: %s\n" % (time.time(), mystr))
+ mylogfile.close()
+ finally:
+ portage.locks.unlockfile(mylock)
+ except (IOError,OSError,portage.exception.PortageException) as e:
+ if secpass >= 1:
+ portage.util.writemsg("emergelog(): %s\n" % (e,), noiselevel=-1)
diff --git a/usr/lib/portage/pym/_emerge/getloadavg.py b/usr/lib/portage/pym/_emerge/getloadavg.py
new file mode 100644
index 0000000..e4cb009
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/getloadavg.py
@@ -0,0 +1,37 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+import platform
+
+getloadavg = getattr(os, "getloadavg", None)
+if getloadavg is None:
+ def getloadavg():
+ """
+ Uses /proc/loadavg to emulate os.getloadavg().
+ Raises OSError if the load average was unobtainable.
+ """
+ try:
+ if platform.system() in ["AIX", "HP-UX"]:
+ loadavg_str = os.popen('LANG=C /usr/bin/uptime 2>/dev/null').readline().split()
+ while loadavg_str[0] != 'load' and loadavg_str[1] != 'average:':
+ loadavg_str = loadavg_str[1:]
+ loadavg_str = loadavg_str[2:5]
+ loadavg_str = [x.rstrip(',') for x in loadavg_str]
+ loadavg_str = ' '.join(loadavg_str)
+ else:
+ with open('/proc/loadavg') as f:
+ loadavg_str = f.readline()
+ except (IOError, IndexError):
+ # getloadavg() is only supposed to raise OSError, so convert
+ raise OSError('unknown')
+ loadavg_split = loadavg_str.split()
+ if len(loadavg_split) < 3:
+ raise OSError('unknown')
+ loadavg_floats = []
+ for i in range(3):
+ try:
+ loadavg_floats.append(float(loadavg_split[i]))
+ except ValueError:
+ raise OSError('unknown')
+ return tuple(loadavg_floats)
diff --git a/usr/lib/portage/pym/_emerge/help.py b/usr/lib/portage/pym/_emerge/help.py
new file mode 100644
index 0000000..8e241a8
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/help.py
@@ -0,0 +1,25 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+from portage.output import bold, turquoise, green
+
+def help():
+ print(bold("emerge:")+" the other white meat (command-line interface to the Portage system)")
+ print(bold("Usage:"))
+ print(" "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] [ "+turquoise("ebuild")+" | "+turquoise("tbz2")+" | "+turquoise("file")+" | "+turquoise("@set")+" | "+turquoise("atom")+" ] [ ... ]")
+ print(" "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] < "+turquoise("@system")+" | "+turquoise("@world")+" >")
+ print(" "+turquoise("emerge")+" < "+turquoise("--sync")+" | "+turquoise("--metadata")+" | "+turquoise("--info")+" >")
+ print(" "+turquoise("emerge")+" "+turquoise("--resume")+" [ "+green("--pretend")+" | "+green("--ask")+" | "+green("--skipfirst")+" ]")
+ print(" "+turquoise("emerge")+" "+turquoise("--help"))
+ print(bold("Options:")+" "+green("-")+"["+green("abBcCdDefgGhjkKlnNoOpPqrsStuvVw")+"]")
+ print(" [ " + green("--color")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ] [ "+green("--columns")+" ]")
+ print(" [ "+green("--complete-graph")+" ] [ "+green("--deep")+" ]")
+ print(" [ "+green("--jobs") + " " + turquoise("JOBS")+" ] [ "+green("--keep-going")+" ] [ " + green("--load-average")+" " + turquoise("LOAD") + " ]")
+ print(" [ "+green("--newrepo")+" ] [ "+green("--newuse")+" ] [ "+green("--noconfmem")+" ] [ "+green("--nospinner")+" ]")
+ print(" [ "+green("--oneshot")+" ] [ "+green("--onlydeps")+" ] [ "+ green("--quiet-build")+" [ " + turquoise("y") + " | "+ turquoise("n")+" ] ]")
+ print(" [ "+green("--reinstall ")+turquoise("changed-use")+" ] [ " + green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ]")
+ print(bold("Actions:")+" [ "+green("--depclean")+" | "+green("--list-sets")+" | "+green("--search")+" | "+green("--sync")+" | "+green("--version")+" ]")
+ print()
+ print(" For more help consult the man page.")
diff --git a/usr/lib/portage/pym/_emerge/is_valid_package_atom.py b/usr/lib/portage/pym/_emerge/is_valid_package_atom.py
new file mode 100644
index 0000000..112afc1
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/is_valid_package_atom.py
@@ -0,0 +1,22 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import re
+from portage.dep import isvalidatom
+
+def insert_category_into_atom(atom, category):
+ # Handle '*' character for "extended syntax" wildcard support.
+ alphanum = re.search(r'[\*\w]', atom, re.UNICODE)
+ if alphanum:
+ ret = atom[:alphanum.start()] + "%s/" % category + \
+ atom[alphanum.start():]
+ else:
+ ret = None
+ return ret
+
+def is_valid_package_atom(x, allow_repo=False):
+ if "/" not in x.split(":")[0]:
+ x2 = insert_category_into_atom(x, 'cat')
+ if x2 != None:
+ x = x2
+ return isvalidatom(x, allow_blockers=False, allow_repo=allow_repo)
diff --git a/usr/lib/portage/pym/_emerge/main.py b/usr/lib/portage/pym/_emerge/main.py
new file mode 100644
index 0000000..a5de7c3
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/main.py
@@ -0,0 +1,1077 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import locale
+import platform
+import sys
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'logging',
+ 'portage.dep:Atom',
+ 'portage.util:writemsg_level',
+ 'textwrap',
+ '_emerge.actions:load_emerge_config,run_action,' + \
+ 'validate_ebuild_environment',
+ '_emerge.help:help@emerge_help',
+ '_emerge.is_valid_package_atom:insert_category_into_atom'
+)
+from portage import os
+from portage.const import EPREFIX
+from portage.util._argparse import ArgumentParser
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+options=[
+"--alphabetical",
+"--ask-enter-invalid",
+"--buildpkgonly",
+"--changed-use",
+"--changelog", "--columns",
+"--debug",
+"--digest",
+"--emptytree",
+"--verbose-conflicts",
+"--fetchonly", "--fetch-all-uri",
+"--ignore-default-opts",
+"--noconfmem",
+"--newrepo",
+"--newuse",
+"--nodeps", "--noreplace",
+"--nospinner", "--oneshot",
+"--onlydeps", "--pretend",
+"--quiet-repo-display",
+"--quiet-unmerge-warn",
+"--resume",
+"--searchdesc",
+"--skipfirst",
+"--tree",
+"--unordered-display",
+"--update",
+"--verbose-main-repo-display",
+]
+
+shortmapping={
+"1":"--oneshot",
+"B":"--buildpkgonly",
+"c":"--depclean",
+"C":"--unmerge",
+"d":"--debug",
+"e":"--emptytree",
+"f":"--fetchonly", "F":"--fetch-all-uri",
+"h":"--help",
+"l":"--changelog",
+"n":"--noreplace", "N":"--newuse",
+"o":"--onlydeps", "O":"--nodeps",
+"p":"--pretend", "P":"--prune",
+"r":"--resume",
+"s":"--search", "S":"--searchdesc",
+"t":"--tree",
+"u":"--update", "U":"--changed-use",
+"V":"--version"
+}
+
+COWSAY_MOO = """
+
+ Larry loves Gentoo (%s)
+
+ _______________________
+< Have you mooed today? >
+ -----------------------
+ \ ^__^
+ \ (oo)\_______
+ (__)\ )\/\\
+ ||----w |
+ || ||
+
+"""
+
+def multiple_actions(action1, action2):
+ sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
+ sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
+ sys.exit(1)
+
+def insert_optional_args(args):
+ """
+ Parse optional arguments and insert a value if one has
+ not been provided. This is done before feeding the args
+ to the optparse parser since that parser does not support
+ this feature natively.
+ """
+
+ class valid_integers(object):
+ def __contains__(self, s):
+ try:
+ return int(s) >= 0
+ except (ValueError, OverflowError):
+ return False
+
+ valid_integers = valid_integers()
+
+ class valid_floats(object):
+ def __contains__(self, s):
+ try:
+ return float(s) >= 0
+ except (ValueError, OverflowError):
+ return False
+
+ valid_floats = valid_floats()
+
+ y_or_n = ('y', 'n',)
+
+ new_args = []
+
+ default_arg_opts = {
+ '--alert' : y_or_n,
+ '--ask' : y_or_n,
+ '--autounmask' : y_or_n,
+ '--autounmask-keep-masks': y_or_n,
+ '--autounmask-unrestricted-atoms' : y_or_n,
+ '--autounmask-write' : y_or_n,
+ '--buildpkg' : y_or_n,
+ '--complete-graph' : y_or_n,
+ '--deep' : valid_integers,
+ '--depclean-lib-check' : y_or_n,
+ '--deselect' : y_or_n,
+ '--binpkg-respect-use' : y_or_n,
+ '--fail-clean' : y_or_n,
+ '--getbinpkg' : y_or_n,
+ '--getbinpkgonly' : y_or_n,
+ '--jobs' : valid_integers,
+ '--keep-going' : y_or_n,
+ '--load-average' : valid_floats,
+ '--package-moves' : y_or_n,
+ '--quiet' : y_or_n,
+ '--quiet-build' : y_or_n,
+ '--quiet-fail' : y_or_n,
+ '--read-news' : y_or_n,
+ '--rebuild-if-new-slot': y_or_n,
+ '--rebuild-if-new-rev' : y_or_n,
+ '--rebuild-if-new-ver' : y_or_n,
+ '--rebuild-if-unbuilt' : y_or_n,
+ '--rebuilt-binaries' : y_or_n,
+ '--root-deps' : ('rdeps',),
+ '--select' : y_or_n,
+ '--selective' : y_or_n,
+ "--use-ebuild-visibility": y_or_n,
+ '--usepkg' : y_or_n,
+ '--usepkgonly' : y_or_n,
+ '--verbose' : y_or_n,
+ '--verbose-slot-rebuilds': y_or_n,
+ }
+
+ short_arg_opts = {
+ 'D' : valid_integers,
+ 'j' : valid_integers,
+ }
+
+ # Don't make things like "-kn" expand to "-k n"
+ # since existence of -n makes it too ambiguous.
+ short_arg_opts_n = {
+ 'a' : y_or_n,
+ 'A' : y_or_n,
+ 'b' : y_or_n,
+ 'g' : y_or_n,
+ 'G' : y_or_n,
+ 'k' : y_or_n,
+ 'K' : y_or_n,
+ 'q' : y_or_n,
+ 'v' : y_or_n,
+ 'w' : y_or_n,
+ }
+
+ arg_stack = args[:]
+ arg_stack.reverse()
+ while arg_stack:
+ arg = arg_stack.pop()
+
+ default_arg_choices = default_arg_opts.get(arg)
+ if default_arg_choices is not None:
+ new_args.append(arg)
+ if arg_stack and arg_stack[-1] in default_arg_choices:
+ new_args.append(arg_stack.pop())
+ else:
+ # insert default argument
+ new_args.append('True')
+ continue
+
+ if arg[:1] != "-" or arg[:2] == "--":
+ new_args.append(arg)
+ continue
+
+ match = None
+ for k, arg_choices in short_arg_opts.items():
+ if k in arg:
+ match = k
+ break
+
+ if match is None:
+ for k, arg_choices in short_arg_opts_n.items():
+ if k in arg:
+ match = k
+ break
+
+ if match is None:
+ new_args.append(arg)
+ continue
+
+ if len(arg) == 2:
+ new_args.append(arg)
+ if arg_stack and arg_stack[-1] in arg_choices:
+ new_args.append(arg_stack.pop())
+ else:
+ # insert default argument
+ new_args.append('True')
+ continue
+
+ # Insert an empty placeholder in order to
+ # satisfy the requirements of optparse.
+
+ new_args.append("-" + match)
+ opt_arg = None
+ saved_opts = None
+
+ if arg[1:2] == match:
+ if match not in short_arg_opts_n and arg[2:] in arg_choices:
+ opt_arg = arg[2:]
+ else:
+ saved_opts = arg[2:]
+ opt_arg = "True"
+ else:
+ saved_opts = arg[1:].replace(match, "")
+ opt_arg = "True"
+
+ if opt_arg is None and arg_stack and \
+ arg_stack[-1] in arg_choices:
+ opt_arg = arg_stack.pop()
+
+ if opt_arg is None:
+ new_args.append("True")
+ else:
+ new_args.append(opt_arg)
+
+ if saved_opts is not None:
+ # Recycle these on arg_stack since they
+ # might contain another match.
+ arg_stack.append("-" + saved_opts)
+
+ return new_args
+
+def _find_bad_atoms(atoms, less_strict=False):
+ """
+ Declares all atoms as invalid that have an operator,
+ a use dependency, a blocker or a repo spec.
+ It accepts atoms with wildcards.
+ In less_strict mode it accepts operators and repo specs.
+ """
+ bad_atoms = []
+ for x in ' '.join(atoms).split():
+ atom = x
+ if "/" not in x.split(":")[0]:
+ x_cat = insert_category_into_atom(x, 'dummy-category')
+ if x_cat is not None:
+ atom = x_cat
+
+ bad_atom = False
+ try:
+ atom = Atom(atom, allow_wildcard=True, allow_repo=less_strict)
+ except portage.exception.InvalidAtom:
+ bad_atom = True
+
+ if bad_atom or (atom.operator and not less_strict) or atom.blocker or atom.use:
+ bad_atoms.append(x)
+ return bad_atoms
+
+
+def parse_opts(tmpcmdline, silent=False):
+ myaction=None
+ myopts = {}
+ myfiles=[]
+
+ actions = frozenset([
+ "clean", "check-news", "config", "depclean", "help",
+ "info", "list-sets", "metadata", "moo",
+ "prune", "regen", "search",
+ "sync", "unmerge", "version",
+ ])
+
+ longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
+ y_or_n = ("y", "n")
+ true_y_or_n = ("True", "y", "n")
+ true_y = ("True", "y")
+ argument_options = {
+
+ "--alert": {
+ "shortopt" : "-A",
+ "help" : "alert (terminal bell) on prompts",
+ "choices" : true_y_or_n
+ },
+
+ "--ask": {
+ "shortopt" : "-a",
+ "help" : "prompt before performing any actions",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask": {
+ "help" : "automatically unmask packages",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask-unrestricted-atoms": {
+ "help" : "write autounmask changes with >= atoms if possible",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask-keep-masks": {
+ "help" : "don't add package.unmask entries",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask-write": {
+ "help" : "write changes made by --autounmask to disk",
+ "choices" : true_y_or_n
+ },
+
+ "--accept-properties": {
+ "help":"temporarily override ACCEPT_PROPERTIES",
+ "action":"store"
+ },
+
+ "--accept-restrict": {
+ "help":"temporarily override ACCEPT_RESTRICT",
+ "action":"store"
+ },
+
+ "--backtrack": {
+
+ "help" : "Specifies how many times to backtrack if dependency " + \
+ "calculation fails ",
+
+ "action" : "store"
+ },
+
+ "--buildpkg": {
+ "shortopt" : "-b",
+ "help" : "build binary packages",
+ "choices" : true_y_or_n
+ },
+
+ "--buildpkg-exclude": {
+ "help" :"A space separated list of package atoms for which " + \
+ "no binary packages should be built. This option overrides all " + \
+ "possible ways to enable building of binary packages.",
+
+ "action" : "append"
+ },
+
+ "--config-root": {
+ "help":"specify the location for portage configuration files",
+ "action":"store"
+ },
+ "--color": {
+ "help":"enable or disable color output",
+ "choices":("y", "n")
+ },
+
+ "--complete-graph": {
+ "help" : "completely account for all known dependencies",
+ "choices" : true_y_or_n
+ },
+
+ "--complete-graph-if-new-use": {
+ "help" : "trigger --complete-graph behavior if USE or IUSE will change for an installed package",
+ "choices" : y_or_n
+ },
+
+ "--complete-graph-if-new-ver": {
+ "help" : "trigger --complete-graph behavior if an installed package version will change (upgrade or downgrade)",
+ "choices" : y_or_n
+ },
+
+ "--deep": {
+
+ "shortopt" : "-D",
+
+ "help" : "Specifies how deep to recurse into dependencies " + \
+ "of packages given as arguments. If no argument is given, " + \
+ "depth is unlimited. Default behavior is to skip " + \
+ "dependencies of installed packages.",
+
+ "action" : "store"
+ },
+
+ "--depclean-lib-check": {
+ "help" : "check for consumers of libraries before removing them",
+ "choices" : true_y_or_n
+ },
+
+ "--deselect": {
+ "help" : "remove atoms/sets from the world file",
+ "choices" : true_y_or_n
+ },
+
+ "--dynamic-deps": {
+ "help": "substitute the dependencies of installed packages with the dependencies of unbuilt ebuilds",
+ "choices": y_or_n
+ },
+
+ "--exclude": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge won't install any ebuild or binary package that " + \
+ "matches any of the given package atoms.",
+
+ "action" : "append"
+ },
+
+ "--fail-clean": {
+ "help" : "clean temp files after build failure",
+ "choices" : true_y_or_n
+ },
+
+ "--ignore-built-slot-operator-deps": {
+ "help": "Ignore the slot/sub-slot := operator parts of dependencies that have "
+ "been recorded when packages where built. This option is intended "
+ "only for debugging purposes, and it only affects built packages "
+ "that specify slot/sub-slot := operator dependencies using the "
+ "experimental \"4-slot-abi\" EAPI.",
+ "choices": y_or_n
+ },
+
+ "--jobs": {
+
+ "shortopt" : "-j",
+
+ "help" : "Specifies the number of packages to build " + \
+ "simultaneously.",
+
+ "action" : "store"
+ },
+
+ "--keep-going": {
+ "help" : "continue as much as possible after an error",
+ "choices" : true_y_or_n
+ },
+
+ "--load-average": {
+
+ "help" :"Specifies that no new builds should be started " + \
+ "if there are other builds running and the load average " + \
+ "is at least LOAD (a floating-point number).",
+
+ "action" : "store"
+ },
+
+ "--misspell-suggestions": {
+ "help" : "enable package name misspell suggestions",
+ "choices" : ("y", "n")
+ },
+
+ "--with-bdeps": {
+ "help":"include unnecessary build time dependencies",
+ "choices":("y", "n")
+ },
+ "--reinstall": {
+ "help":"specify conditions to trigger package reinstallation",
+ "choices":["changed-use"]
+ },
+
+ "--reinstall-atoms": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will treat matching packages as if they are not " + \
+ "installed, and reinstall them if necessary. Implies --deep.",
+
+ "action" : "append",
+ },
+
+ "--binpkg-respect-use": {
+ "help" : "discard binary packages if their use flags \
+ don't match the current configuration",
+ "choices" : true_y_or_n
+ },
+
+ "--getbinpkg": {
+ "shortopt" : "-g",
+ "help" : "fetch binary packages",
+ "choices" : true_y_or_n
+ },
+
+ "--getbinpkgonly": {
+ "shortopt" : "-G",
+ "help" : "fetch binary packages only",
+ "choices" : true_y_or_n
+ },
+
+ "--usepkg-exclude": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will ignore matching binary packages. ",
+
+ "action" : "append",
+ },
+
+ "--rebuild-exclude": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will not rebuild these packages due to the " + \
+ "--rebuild flag. ",
+
+ "action" : "append",
+ },
+
+ "--rebuild-ignore": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will not rebuild packages that depend on matching " + \
+ "packages due to the --rebuild flag. ",
+
+ "action" : "append",
+ },
+
+ "--package-moves": {
+ "help" : "perform package moves when necessary",
+ "choices" : true_y_or_n
+ },
+
+ "--prefix": {
+ "help" : "specify the installation prefix",
+ "action" : "store"
+ },
+
+ "--pkg-format": {
+ "help" : "format of result binary package",
+ "action" : "store",
+ },
+
+ "--quiet": {
+ "shortopt" : "-q",
+ "help" : "reduced or condensed output",
+ "choices" : true_y_or_n
+ },
+
+ "--quiet-build": {
+ "help" : "redirect build output to logs",
+ "choices" : true_y_or_n,
+ },
+
+ "--quiet-fail": {
+ "help" : "suppresses display of the build log on stdout",
+ "choices" : true_y_or_n,
+ },
+
+ "--read-news": {
+ "help" : "offer to read unread news via eselect",
+ "choices" : true_y_or_n
+ },
+
+
+ "--rebuild-if-new-slot": {
+ "help" : ("Automatically rebuild or reinstall packages when slot/sub-slot := "
+ "operator dependencies can be satisfied by a newer slot, so that "
+ "older packages slots will become eligible for removal by the "
+ "--depclean action as soon as possible."),
+ "choices" : true_y_or_n
+ },
+
+ "--rebuild-if-new-rev": {
+ "help" : "Rebuild packages when dependencies that are " + \
+ "used at both build-time and run-time are built, " + \
+ "if the dependency is not already installed with the " + \
+ "same version and revision.",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuild-if-new-ver": {
+ "help" : "Rebuild packages when dependencies that are " + \
+ "used at both build-time and run-time are built, " + \
+ "if the dependency is not already installed with the " + \
+ "same version. Revision numbers are ignored.",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuild-if-unbuilt": {
+ "help" : "Rebuild packages when dependencies that are " + \
+ "used at both build-time and run-time are built.",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuilt-binaries": {
+ "help" : "replace installed packages with binary " + \
+ "packages that have been rebuilt",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuilt-binaries-timestamp": {
+ "help" : "use only binaries that are newer than this " + \
+ "timestamp for --rebuilt-binaries",
+ "action" : "store"
+ },
+
+ "--root": {
+ "help" : "specify the target root filesystem for merging packages",
+ "action" : "store"
+ },
+
+ "--root-deps": {
+ "help" : "modify interpretation of depedencies",
+ "choices" :("True", "rdeps")
+ },
+
+ "--select": {
+ "shortopt" : "-w",
+ "help" : "add specified packages to the world set " + \
+ "(inverse of --oneshot)",
+ "choices" : true_y_or_n
+ },
+
+ "--selective": {
+ "help" : "identical to --noreplace",
+ "choices" : true_y_or_n
+ },
+
+ "--use-ebuild-visibility": {
+ "help" : "use unbuilt ebuild metadata for visibility checks on built packages",
+ "choices" : true_y_or_n
+ },
+
+ "--useoldpkg-atoms": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will prefer matching binary packages over newer unbuilt packages. ",
+
+ "action" : "append",
+ },
+
+ "--usepkg": {
+ "shortopt" : "-k",
+ "help" : "use binary packages",
+ "choices" : true_y_or_n
+ },
+
+ "--usepkgonly": {
+ "shortopt" : "-K",
+ "help" : "use only binary packages",
+ "choices" : true_y_or_n
+ },
+
+ "--verbose": {
+ "shortopt" : "-v",
+ "help" : "verbose output",
+ "choices" : true_y_or_n
+ },
+ "--verbose-slot-rebuilds": {
+ "help" : "verbose slot rebuild output",
+ "choices" : true_y_or_n
+ },
+ }
+
+ parser = ArgumentParser(add_help=False)
+
+ for action_opt in actions:
+ parser.add_argument("--" + action_opt, action="store_true",
+ dest=action_opt.replace("-", "_"), default=False)
+ for myopt in options:
+ parser.add_argument(myopt, action="store_true",
+ dest=myopt.lstrip("--").replace("-", "_"), default=False)
+ for shortopt, longopt in shortmapping.items():
+ parser.add_argument("-" + shortopt, action="store_true",
+ dest=longopt.lstrip("--").replace("-", "_"), default=False)
+ for myalias, myopt in longopt_aliases.items():
+ parser.add_argument(myalias, action="store_true",
+ dest=myopt.lstrip("--").replace("-", "_"), default=False)
+
+ for myopt, kwargs in argument_options.items():
+ shortopt = kwargs.pop("shortopt", None)
+ args = [myopt]
+ if shortopt is not None:
+ args.append(shortopt)
+ parser.add_argument(dest=myopt.lstrip("--").replace("-", "_"),
+ *args, **kwargs)
+
+ tmpcmdline = insert_optional_args(tmpcmdline)
+
+ myoptions, myargs = parser.parse_known_args(args=tmpcmdline)
+
+ if myoptions.alert in true_y:
+ myoptions.alert = True
+ else:
+ myoptions.alert = None
+
+ if myoptions.ask in true_y:
+ myoptions.ask = True
+ else:
+ myoptions.ask = None
+
+ if myoptions.autounmask in true_y:
+ myoptions.autounmask = True
+
+ if myoptions.autounmask_unrestricted_atoms in true_y:
+ myoptions.autounmask_unrestricted_atoms = True
+
+ if myoptions.autounmask_keep_masks in true_y:
+ myoptions.autounmask_keep_masks = True
+
+ if myoptions.autounmask_write in true_y:
+ myoptions.autounmask_write = True
+
+ if myoptions.buildpkg in true_y:
+ myoptions.buildpkg = True
+
+ if myoptions.buildpkg_exclude:
+ bad_atoms = _find_bad_atoms(myoptions.buildpkg_exclude, less_strict=True)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --buildpkg-exclude parameter: '%s'\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.changed_use is not False:
+ myoptions.reinstall = "changed-use"
+ myoptions.changed_use = False
+
+ if myoptions.deselect in true_y:
+ myoptions.deselect = True
+
+ if myoptions.binpkg_respect_use is not None:
+ if myoptions.binpkg_respect_use in true_y:
+ myoptions.binpkg_respect_use = 'y'
+ else:
+ myoptions.binpkg_respect_use = 'n'
+
+ if myoptions.complete_graph in true_y:
+ myoptions.complete_graph = True
+ else:
+ myoptions.complete_graph = None
+
+ if myoptions.depclean_lib_check in true_y:
+ myoptions.depclean_lib_check = True
+
+ if myoptions.exclude:
+ bad_atoms = _find_bad_atoms(myoptions.exclude)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --exclude parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.reinstall_atoms:
+ bad_atoms = _find_bad_atoms(myoptions.reinstall_atoms)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --reinstall-atoms parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.rebuild_exclude:
+ bad_atoms = _find_bad_atoms(myoptions.rebuild_exclude)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --rebuild-exclude parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.rebuild_ignore:
+ bad_atoms = _find_bad_atoms(myoptions.rebuild_ignore)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --rebuild-ignore parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.usepkg_exclude:
+ bad_atoms = _find_bad_atoms(myoptions.usepkg_exclude)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --usepkg-exclude parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.useoldpkg_atoms:
+ bad_atoms = _find_bad_atoms(myoptions.useoldpkg_atoms)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --useoldpkg-atoms parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.fail_clean in true_y:
+ myoptions.fail_clean = True
+
+ if myoptions.getbinpkg in true_y:
+ myoptions.getbinpkg = True
+ else:
+ myoptions.getbinpkg = None
+
+ if myoptions.getbinpkgonly in true_y:
+ myoptions.getbinpkgonly = True
+ else:
+ myoptions.getbinpkgonly = None
+
+ if myoptions.keep_going in true_y:
+ myoptions.keep_going = True
+ else:
+ myoptions.keep_going = None
+
+ if myoptions.package_moves in true_y:
+ myoptions.package_moves = True
+
+ if myoptions.quiet in true_y:
+ myoptions.quiet = True
+ else:
+ myoptions.quiet = None
+
+ if myoptions.quiet_build in true_y:
+ myoptions.quiet_build = 'y'
+
+ if myoptions.quiet_fail in true_y:
+ myoptions.quiet_fail = 'y'
+
+ if myoptions.read_news in true_y:
+ myoptions.read_news = True
+ else:
+ myoptions.read_news = None
+
+
+ if myoptions.rebuild_if_new_slot in true_y:
+ myoptions.rebuild_if_new_slot = 'y'
+
+ if myoptions.rebuild_if_new_ver in true_y:
+ myoptions.rebuild_if_new_ver = True
+ else:
+ myoptions.rebuild_if_new_ver = None
+
+ if myoptions.rebuild_if_new_rev in true_y:
+ myoptions.rebuild_if_new_rev = True
+ myoptions.rebuild_if_new_ver = None
+ else:
+ myoptions.rebuild_if_new_rev = None
+
+ if myoptions.rebuild_if_unbuilt in true_y:
+ myoptions.rebuild_if_unbuilt = True
+ myoptions.rebuild_if_new_rev = None
+ myoptions.rebuild_if_new_ver = None
+ else:
+ myoptions.rebuild_if_unbuilt = None
+
+ if myoptions.rebuilt_binaries in true_y:
+ myoptions.rebuilt_binaries = True
+
+ if myoptions.root_deps in true_y:
+ myoptions.root_deps = True
+
+ if myoptions.select in true_y:
+ myoptions.select = True
+ myoptions.oneshot = False
+ elif myoptions.select == "n":
+ myoptions.oneshot = True
+
+ if myoptions.selective in true_y:
+ myoptions.selective = True
+
+ if myoptions.backtrack is not None:
+
+ try:
+ backtrack = int(myoptions.backtrack)
+ except (OverflowError, ValueError):
+ backtrack = -1
+
+ if backtrack < 0:
+ backtrack = None
+ if not silent:
+ parser.error("Invalid --backtrack parameter: '%s'\n" % \
+ (myoptions.backtrack,))
+
+ myoptions.backtrack = backtrack
+
+ if myoptions.deep is not None:
+ deep = None
+ if myoptions.deep == "True":
+ deep = True
+ else:
+ try:
+ deep = int(myoptions.deep)
+ except (OverflowError, ValueError):
+ deep = -1
+
+ if deep is not True and deep < 0:
+ deep = None
+ if not silent:
+ parser.error("Invalid --deep parameter: '%s'\n" % \
+ (myoptions.deep,))
+
+ myoptions.deep = deep
+
+ if myoptions.jobs:
+ jobs = None
+ if myoptions.jobs == "True":
+ jobs = True
+ else:
+ try:
+ jobs = int(myoptions.jobs)
+ except ValueError:
+ jobs = -1
+
+ if jobs is not True and \
+ jobs < 1:
+ jobs = None
+ if not silent:
+ parser.error("Invalid --jobs parameter: '%s'\n" % \
+ (myoptions.jobs,))
+
+ myoptions.jobs = jobs
+
+ if myoptions.load_average == "True":
+ myoptions.load_average = None
+
+ if myoptions.load_average:
+ try:
+ load_average = float(myoptions.load_average)
+ except ValueError:
+ load_average = 0.0
+
+ if load_average <= 0.0:
+ load_average = None
+ if not silent:
+ parser.error("Invalid --load-average parameter: '%s'\n" % \
+ (myoptions.load_average,))
+
+ myoptions.load_average = load_average
+
+ if myoptions.rebuilt_binaries_timestamp:
+ try:
+ rebuilt_binaries_timestamp = int(myoptions.rebuilt_binaries_timestamp)
+ except ValueError:
+ rebuilt_binaries_timestamp = -1
+
+ if rebuilt_binaries_timestamp < 0:
+ rebuilt_binaries_timestamp = 0
+ if not silent:
+ parser.error("Invalid --rebuilt-binaries-timestamp parameter: '%s'\n" % \
+ (myoptions.rebuilt_binaries_timestamp,))
+
+ myoptions.rebuilt_binaries_timestamp = rebuilt_binaries_timestamp
+
+ if myoptions.use_ebuild_visibility in true_y:
+ myoptions.use_ebuild_visibility = True
+ else:
+ # None or "n"
+ pass
+
+ if myoptions.usepkg in true_y:
+ myoptions.usepkg = True
+ else:
+ myoptions.usepkg = None
+
+ if myoptions.usepkgonly in true_y:
+ myoptions.usepkgonly = True
+ else:
+ myoptions.usepkgonly = None
+
+ if myoptions.verbose in true_y:
+ myoptions.verbose = True
+ else:
+ myoptions.verbose = None
+
+ for myopt in options:
+ v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
+ if v:
+ myopts[myopt] = True
+
+ for myopt in argument_options:
+ v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
+ if v is not None:
+ myopts[myopt] = v
+
+ if myoptions.searchdesc:
+ myoptions.search = True
+
+ for action_opt in actions:
+ v = getattr(myoptions, action_opt.replace("-", "_"))
+ if v:
+ if myaction:
+ multiple_actions(myaction, action_opt)
+ sys.exit(1)
+ myaction = action_opt
+
+ if myaction is None and myoptions.deselect is True:
+ myaction = 'deselect'
+
+ myfiles += myargs
+
+ return myaction, myopts, myfiles
+
+def profile_check(trees, myaction):
+ if myaction in ("help", "info", "search", "sync", "version"):
+ return os.EX_OK
+ for root_trees in trees.values():
+ if root_trees["root_config"].settings.profiles:
+ continue
+ # generate some profile related warning messages
+ validate_ebuild_environment(trees)
+ msg = ("Your current profile is invalid. If you have just changed "
+ "your profile configuration, you should revert back to the "
+ "previous configuration. Allowed actions are limited to "
+ "--help, --info, --search, --sync, and --version.")
+ writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ return os.EX_OK
+
+def emerge_main(args=None):
+ """
+ @param args: command arguments (default: sys.argv[1:])
+ @type args: list
+ """
+ if args is None:
+ args = sys.argv[1:]
+
+ args = portage._decode_argv(args)
+
+ # Use system locale.
+ try:
+ locale.setlocale(locale.LC_ALL, '')
+ except locale.Error as e:
+ writemsg_level("setlocale: %s\n" % e, level=logging.WARN)
+
+ # Disable color until we're sure that it should be enabled (after
+ # EMERGE_DEFAULT_OPTS has been parsed).
+ portage.output.havecolor = 0
+
+ # This first pass is just for options that need to be known as early as
+ # possible, such as --config-root. They will be parsed again later,
+ # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
+ # the value of --config-root).
+ myaction, myopts, myfiles = parse_opts(args, silent=True)
+ if "--debug" in myopts:
+ os.environ["PORTAGE_DEBUG"] = "1"
+ if "--config-root" in myopts:
+ os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
+ if "--root" in myopts:
+ os.environ["ROOT"] = myopts["--root"]
+ if "--prefix" in myopts:
+ os.environ["EPREFIX"] = myopts["--prefix"]
+ if "--accept-properties" in myopts:
+ os.environ["ACCEPT_PROPERTIES"] = myopts["--accept-properties"]
+ if "--accept-restrict" in myopts:
+ os.environ["ACCEPT_RESTRICT"] = myopts["--accept-restrict"]
+
+ # optimize --help (no need to load config / EMERGE_DEFAULT_OPTS)
+ if myaction == "help":
+ emerge_help()
+ return os.EX_OK
+ elif myaction == "moo":
+ print(COWSAY_MOO % platform.system())
+ return os.EX_OK
+
+ # Portage needs to ensure a sane umask for the files it creates.
+ os.umask(0o22)
+ if myaction == "sync":
+ portage._sync_mode = True
+ emerge_config = load_emerge_config(
+ action=myaction, args=myfiles, opts=myopts)
+ rval = profile_check(emerge_config.trees, emerge_config.action)
+ if rval != os.EX_OK:
+ return rval
+
+ tmpcmdline = []
+ if "--ignore-default-opts" not in myopts:
+ tmpcmdline.extend(portage.util.shlex_split(
+ emerge_config.target_config.settings.get(
+ "EMERGE_DEFAULT_OPTS", "")))
+ tmpcmdline.extend(args)
+ emerge_config.action, emerge_config.opts, emerge_config.args = \
+ parse_opts(tmpcmdline)
+
+ try:
+ return run_action(emerge_config)
+ finally:
+ # Call destructors for our portdbapi instances.
+ for x in emerge_config.trees.values():
+ if "porttree" in x.lazy_items:
+ continue
+ x["porttree"].dbapi.close_caches()
diff --git a/usr/lib/portage/pym/_emerge/post_emerge.py b/usr/lib/portage/pym/_emerge/post_emerge.py
new file mode 100644
index 0000000..0cb533c
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/post_emerge.py
@@ -0,0 +1,168 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+import textwrap
+
+import portage
+from portage import os
+from portage.emaint.modules.logs.logs import CleanLogs
+from portage.news import count_unread_news, display_news_notifications
+from portage.output import colorize
+from portage.util._dyn_libs.display_preserved_libs import \
+ display_preserved_libs
+from portage.util._info_files import chk_updated_info_files
+
+from .chk_updated_cfg_files import chk_updated_cfg_files
+from .emergelog import emergelog
+from ._flush_elog_mod_echo import _flush_elog_mod_echo
+
+def clean_logs(settings):
+
+ if "clean-logs" not in settings.features:
+ return
+
+ logdir = settings.get("PORT_LOGDIR")
+ if logdir is None or not os.path.isdir(logdir):
+ return
+
+ cleanlogs = CleanLogs()
+ errors = cleanlogs.clean(settings=settings)
+ if errors:
+ out = portage.output.EOutput()
+ for msg in errors:
+ out.eerror(msg)
+
+def display_news_notification(root_config, myopts):
+ if "news" not in root_config.settings.features:
+ return False
+ portdb = root_config.trees["porttree"].dbapi
+ vardb = root_config.trees["vartree"].dbapi
+ news_counts = count_unread_news(portdb, vardb)
+ if all(v == 0 for v in news_counts.values()):
+ return False
+ display_news_notifications(news_counts)
+ return True
+
+def show_depclean_suggestion():
+ out = portage.output.EOutput()
+ msg = "After world updates, it is important to remove " + \
+ "obsolete packages with emerge --depclean. Refer " + \
+ "to `man emerge` for more information."
+ for line in textwrap.wrap(msg, 72):
+ out.ewarn(line)
+
+def post_emerge(myaction, myopts, myfiles,
+ target_root, trees, mtimedb, retval):
+ """
+ Misc. things to run at the end of a merge session.
+
+ Update Info Files
+ Update Config Files
+ Update News Items
+ Commit mtimeDB
+ Display preserved libs warnings
+
+ @param myaction: The action returned from parse_opts()
+ @type myaction: String
+ @param myopts: emerge options
+ @type myopts: dict
+ @param myfiles: emerge arguments
+ @type myfiles: list
+ @param target_root: The target EROOT for myaction
+ @type target_root: String
+ @param trees: A dictionary mapping each ROOT to it's package databases
+ @type trees: dict
+ @param mtimedb: The mtimeDB to store data needed across merge invocations
+ @type mtimedb: MtimeDB class instance
+ @param retval: Emerge's return value
+ @type retval: Int
+ """
+
+ root_config = trees[target_root]["root_config"]
+ vardbapi = trees[target_root]['vartree'].dbapi
+ settings = vardbapi.settings
+ info_mtimes = mtimedb["info"]
+
+ # Load the most current variables from ${ROOT}/etc/profile.env
+ settings.unlock()
+ settings.reload()
+ settings.regenerate()
+ settings.lock()
+
+ config_protect = portage.util.shlex_split(
+ settings.get("CONFIG_PROTECT", ""))
+ infodirs = settings.get("INFOPATH","").split(":") + \
+ settings.get("INFODIR","").split(":")
+
+ os.chdir("/")
+
+ if retval == os.EX_OK:
+ exit_msg = " *** exiting successfully."
+ else:
+ exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
+ emergelog("notitles" not in settings.features, exit_msg)
+
+ _flush_elog_mod_echo()
+
+ if not vardbapi._pkgs_changed:
+ # GLEP 42 says to display news *after* an emerge --pretend
+ if "--pretend" in myopts:
+ display_news_notification(root_config, myopts)
+ # If vdb state has not changed then there's nothing else to do.
+ return
+
+ vdb_path = os.path.join(root_config.settings['EROOT'], portage.VDB_PATH)
+ portage.util.ensure_dirs(vdb_path)
+ vdb_lock = None
+ if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
+ vardbapi.lock()
+ vdb_lock = True
+
+ if vdb_lock:
+ try:
+ if "noinfo" not in settings.features:
+ chk_updated_info_files(target_root,
+ infodirs, info_mtimes)
+ mtimedb.commit()
+ finally:
+ if vdb_lock:
+ vardbapi.unlock()
+
+ # Explicitly load and prune the PreservedLibsRegistry in order
+ # to ensure that we do not display stale data.
+ vardbapi._plib_registry.load()
+
+ if vardbapi._plib_registry.hasEntries():
+ if "--quiet" in myopts:
+ print()
+ print(colorize("WARN", "!!!") + " existing preserved libs found")
+ else:
+ print()
+ print(colorize("WARN", "!!!") + " existing preserved libs:")
+ display_preserved_libs(vardbapi)
+ print("Use " + colorize("GOOD", "emerge @preserved-rebuild") +
+ " to rebuild packages using these libraries")
+
+ chk_updated_cfg_files(settings['EROOT'], config_protect)
+
+ display_news_notification(root_config, myopts)
+
+ postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"],
+ portage.USER_CONFIG_PATH, "bin", "post_emerge")
+ if os.access(postemerge, os.X_OK):
+ hook_retval = portage.process.spawn(
+ [postemerge], env=settings.environ())
+ if hook_retval != os.EX_OK:
+ portage.util.writemsg_level(
+ " %s spawn failed of %s\n" %
+ (colorize("BAD", "*"), postemerge,),
+ level=logging.ERROR, noiselevel=-1)
+
+ clean_logs(settings)
+
+ if "--quiet" not in myopts and \
+ myaction is None and "@world" in myfiles:
+ show_depclean_suggestion()
diff --git a/usr/lib/portage/pym/_emerge/resolver/__init__.py b/usr/lib/portage/pym/_emerge/resolver/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/resolver/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/_emerge/resolver/backtracking.py b/usr/lib/portage/pym/_emerge/resolver/backtracking.py
new file mode 100644
index 0000000..c29b9d4
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/resolver/backtracking.py
@@ -0,0 +1,264 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+
+class BacktrackParameter(object):
+
+ __slots__ = (
+ "needed_unstable_keywords", "runtime_pkg_mask", "needed_use_config_changes", "needed_license_changes",
+ "prune_rebuilds", "rebuild_list", "reinstall_list", "needed_p_mask_changes",
+ "slot_operator_mask_built", "slot_operator_replace_installed"
+ )
+
+ def __init__(self):
+ self.needed_unstable_keywords = set()
+ self.needed_p_mask_changes = set()
+ self.runtime_pkg_mask = {}
+ self.needed_use_config_changes = {}
+ self.needed_license_changes = {}
+ self.rebuild_list = set()
+ self.reinstall_list = set()
+ self.slot_operator_replace_installed = set()
+ self.slot_operator_mask_built = set()
+ self.prune_rebuilds = False
+
+ def __deepcopy__(self, memo=None):
+ if memo is None:
+ memo = {}
+ result = BacktrackParameter()
+ memo[id(self)] = result
+
+ #Shallow copies are enough here, as we only need to ensure that nobody adds stuff
+ #to our sets and dicts. The existing content is immutable.
+ result.needed_unstable_keywords = copy.copy(self.needed_unstable_keywords)
+ result.needed_p_mask_changes = copy.copy(self.needed_p_mask_changes)
+ result.needed_use_config_changes = copy.copy(self.needed_use_config_changes)
+ result.needed_license_changes = copy.copy(self.needed_license_changes)
+ result.rebuild_list = copy.copy(self.rebuild_list)
+ result.reinstall_list = copy.copy(self.reinstall_list)
+ result.slot_operator_replace_installed = copy.copy(self.slot_operator_replace_installed)
+ result.slot_operator_mask_built = self.slot_operator_mask_built.copy()
+ result.prune_rebuilds = self.prune_rebuilds
+
+ # runtime_pkg_mask contains nested dicts that must also be copied
+ result.runtime_pkg_mask = {}
+ for k, v in self.runtime_pkg_mask.items():
+ result.runtime_pkg_mask[k] = copy.copy(v)
+
+ return result
+
+ def __eq__(self, other):
+ return self.needed_unstable_keywords == other.needed_unstable_keywords and \
+ self.needed_p_mask_changes == other.needed_p_mask_changes and \
+ self.runtime_pkg_mask == other.runtime_pkg_mask and \
+ self.needed_use_config_changes == other.needed_use_config_changes and \
+ self.needed_license_changes == other.needed_license_changes and \
+ self.rebuild_list == other.rebuild_list and \
+ self.reinstall_list == other.reinstall_list and \
+ self.slot_operator_replace_installed == other.slot_operator_replace_installed and \
+ self.slot_operator_mask_built == other.slot_operator_mask_built and \
+ self.prune_rebuilds == other.prune_rebuilds
+
+
+class _BacktrackNode(object):
+
+ __slots__ = (
+ "parameter", "depth", "mask_steps", "terminal",
+ )
+
+ def __init__(self, parameter=BacktrackParameter(), depth=0, mask_steps=0, terminal=True):
+ self.parameter = parameter
+ self.depth = depth
+ self.mask_steps = mask_steps
+ self.terminal = terminal
+
+ def __eq__(self, other):
+ return self.parameter == other.parameter
+
+
+class Backtracker(object):
+
+ __slots__ = (
+ "_max_depth", "_unexplored_nodes", "_current_node", "_nodes", "_root",
+ )
+
+ def __init__(self, max_depth):
+ self._max_depth = max_depth
+ self._unexplored_nodes = []
+ self._current_node = None
+ self._nodes = []
+
+ self._root = _BacktrackNode()
+ self._add(self._root)
+
+
+ def _add(self, node, explore=True):
+ """
+ Adds a newly computed backtrack parameter. Makes sure that it doesn't already exist and
+ that we don't backtrack deeper than we are allowed by --backtrack.
+ """
+ if not self._check_runtime_pkg_mask(node.parameter.runtime_pkg_mask):
+ return
+
+ if node.mask_steps <= self._max_depth and node not in self._nodes:
+ if explore:
+ self._unexplored_nodes.append(node)
+ self._nodes.append(node)
+
+
+ def get(self):
+ """
+ Returns a backtrack parameter. The backtrack graph is explored with depth first.
+ """
+ if self._unexplored_nodes:
+ node = self._unexplored_nodes.pop()
+ self._current_node = node
+ return copy.deepcopy(node.parameter)
+ else:
+ return None
+
+
+ def __len__(self):
+ return len(self._unexplored_nodes)
+
+ def _check_runtime_pkg_mask(self, runtime_pkg_mask):
+ """
+ If a package gets masked that caused other packages to be masked
+ before, we revert the mask for other packages (bug 375573).
+ """
+
+ for pkg, mask_info in runtime_pkg_mask.items():
+
+ if "missing dependency" in mask_info or \
+ "slot_operator_mask_built" in mask_info:
+ continue
+
+ entry_is_valid = False
+
+ for ppkg, patom in runtime_pkg_mask[pkg].get("slot conflict", set()):
+ if ppkg not in runtime_pkg_mask:
+ entry_is_valid = True
+ break
+
+ if not entry_is_valid:
+ return False
+
+ return True
+
+ def _feedback_slot_conflicts(self, conflicts_data):
+ # Only create BacktrackNode instances for the first
+ # conflict which occurred, since the conflicts that
+ # occurred later may have been caused by the first
+ # conflict.
+ self._feedback_slot_conflict(conflicts_data[0])
+
+ def _feedback_slot_conflict(self, conflict_data):
+ for pkg, parent_atoms in conflict_data:
+ new_node = copy.deepcopy(self._current_node)
+ new_node.depth += 1
+ new_node.mask_steps += 1
+ new_node.terminal = False
+ new_node.parameter.runtime_pkg_mask.setdefault(
+ pkg, {})["slot conflict"] = parent_atoms
+ self._add(new_node)
+
+
+ def _feedback_missing_dep(self, dep):
+ new_node = copy.deepcopy(self._current_node)
+ new_node.depth += 1
+ new_node.mask_steps += 1
+ new_node.terminal = False
+
+ new_node.parameter.runtime_pkg_mask.setdefault(
+ dep.parent, {})["missing dependency"] = \
+ set([(dep.parent, dep.root, dep.atom)])
+
+ self._add(new_node)
+
+
+ def _feedback_config(self, changes, explore=True):
+ """
+ Handle config changes. Don't count config changes for the maximum backtrack depth.
+ """
+ new_node = copy.deepcopy(self._current_node)
+ new_node.depth += 1
+ para = new_node.parameter
+
+ for change, data in changes.items():
+ if change == "needed_unstable_keywords":
+ para.needed_unstable_keywords.update(data)
+ elif change == "needed_p_mask_changes":
+ para.needed_p_mask_changes.update(data)
+ elif change == "needed_license_changes":
+ for pkg, missing_licenses in data:
+ para.needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
+ elif change == "needed_use_config_changes":
+ for pkg, (new_use, new_changes) in data:
+ para.needed_use_config_changes[pkg] = (new_use, new_changes)
+ elif change == "slot_conflict_abi":
+ new_node.terminal = False
+ elif change == "slot_operator_mask_built":
+ para.slot_operator_mask_built.update(data)
+ for pkg, mask_reasons in data.items():
+ para.runtime_pkg_mask.setdefault(pkg,
+ {}).update(mask_reasons)
+ elif change == "slot_operator_replace_installed":
+ para.slot_operator_replace_installed.update(data)
+ elif change == "rebuild_list":
+ para.rebuild_list.update(data)
+ elif change == "reinstall_list":
+ para.reinstall_list.update(data)
+ elif change == "prune_rebuilds":
+ para.prune_rebuilds = True
+ para.slot_operator_replace_installed.clear()
+ for pkg in para.slot_operator_mask_built:
+ runtime_masks = para.runtime_pkg_mask.get(pkg)
+ if runtime_masks is None:
+ continue
+ runtime_masks.pop("slot_operator_mask_built", None)
+ if not runtime_masks:
+ para.runtime_pkg_mask.pop(pkg)
+ para.slot_operator_mask_built.clear()
+
+ self._add(new_node, explore=explore)
+ self._current_node = new_node
+
+
+ def feedback(self, infos):
+ """
+ Takes information from the depgraph and computes new backtrack parameters to try.
+ """
+ assert self._current_node is not None, "call feedback() only after get() was called"
+
+ #Not all config changes require a restart, that's why they can appear together
+ #with other conflicts.
+ if "config" in infos:
+ self._feedback_config(infos["config"], explore=(len(infos)==1))
+
+ #There is at most one of the following types of conflicts for a given restart.
+ if "slot conflict" in infos:
+ self._feedback_slot_conflicts(infos["slot conflict"])
+ elif "missing dependency" in infos:
+ self._feedback_missing_dep(infos["missing dependency"])
+
+
+ def backtracked(self):
+ """
+ If we didn't backtrack, there is only the root.
+ """
+ return len(self._nodes) > 1
+
+
+ def get_best_run(self):
+ """
+ Like, get() but returns the backtrack parameter that has as many config changes as possible,
+ but has no masks. This makes --autounmask effective, but prevents confusing error messages
+ with "masked by backtracking".
+ """
+ best_node = self._root
+ for node in self._nodes:
+ if node.terminal and node.depth > best_node.depth:
+ best_node = node
+
+ return copy.deepcopy(best_node.parameter)
diff --git a/usr/lib/portage/pym/_emerge/resolver/circular_dependency.py b/usr/lib/portage/pym/_emerge/resolver/circular_dependency.py
new file mode 100644
index 0000000..b710671
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/resolver/circular_dependency.py
@@ -0,0 +1,272 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function, unicode_literals
+
+from itertools import chain, product
+import logging
+
+from portage.dep import use_reduce, extract_affecting_use, check_required_use, get_required_use_flags
+from portage.exception import InvalidDependString
+from portage.output import colorize
+from portage.util import writemsg_level
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.Package import Package
+
+class circular_dependency_handler(object):
+
+ def __init__(self, depgraph, graph):
+ self.depgraph = depgraph
+ self.graph = graph
+ self.all_parent_atoms = depgraph._dynamic_config._parent_atoms
+
+ if "--debug" in depgraph._frozen_config.myopts:
+ # Show this debug output before doing the calculations
+ # that follow, so at least we have this debug info
+ # if we happen to hit a bug later.
+ writemsg_level("\n\ncircular dependency graph:\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+ self.debug_print()
+
+ self.cycles, self.shortest_cycle = self._find_cycles()
+ #Guess if it is a large cluster of cycles. This usually requires
+ #a global USE change.
+ self.large_cycle_count = len(self.cycles) > 3
+ self.merge_list = self._prepare_reduced_merge_list()
+ #The digraph dump
+ self.circular_dep_message = self._prepare_circular_dep_message()
+ #Suggestions, in machine and human readable form
+ self.solutions, self.suggestions = self._find_suggestions()
+
+ def _find_cycles(self):
+ shortest_cycle = None
+ cycles = self.graph.get_cycles(ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
+ for cycle in cycles:
+ if not shortest_cycle or len(cycle) < len(shortest_cycle):
+ shortest_cycle = cycle
+ return cycles, shortest_cycle
+
+ def _prepare_reduced_merge_list(self):
+ """
+ Create a merge to be displayed by depgraph.display().
+ This merge list contains only packages involved in
+ the circular deps.
+ """
+ display_order = []
+ tempgraph = self.graph.copy()
+ while tempgraph:
+ nodes = tempgraph.leaf_nodes()
+ if not nodes:
+ node = tempgraph.order[0]
+ else:
+ node = nodes[0]
+ display_order.append(node)
+ tempgraph.remove(node)
+ return tuple(display_order)
+
+ def _prepare_circular_dep_message(self):
+ """
+ Like digraph.debug_print(), but prints only the shortest cycle.
+ """
+ if not self.shortest_cycle:
+ return None
+
+ msg = []
+ indent = ""
+ for pos, pkg in enumerate(self.shortest_cycle):
+ parent = self.shortest_cycle[pos-1]
+ priorities = self.graph.nodes[parent][0][pkg]
+ if pos > 0:
+ msg.append(indent + "%s (%s)" % (pkg, priorities[-1],))
+ else:
+ msg.append(indent + "%s depends on" % pkg)
+ indent += " "
+
+ pkg = self.shortest_cycle[0]
+ parent = self.shortest_cycle[-1]
+ priorities = self.graph.nodes[parent][0][pkg]
+ msg.append(indent + "%s (%s)" % (pkg, priorities[-1],))
+
+ return "\n".join(msg)
+
+ def _get_use_mask_and_force(self, pkg):
+ return pkg.use.mask, pkg.use.force
+
+ def _get_autounmask_changes(self, pkg):
+ needed_use_config_change = self.depgraph._dynamic_config._needed_use_config_changes.get(pkg)
+ if needed_use_config_change is None:
+ return frozenset()
+
+ use, changes = needed_use_config_change
+ return frozenset(changes.keys())
+
+ def _find_suggestions(self):
+ if not self.shortest_cycle:
+ return None, None
+
+ suggestions = []
+ final_solutions = {}
+
+ for pos, pkg in enumerate(self.shortest_cycle):
+ parent = self.shortest_cycle[pos-1]
+ priorities = self.graph.nodes[parent][0][pkg]
+ parent_atoms = self.all_parent_atoms.get(pkg)
+
+ if priorities[-1].buildtime:
+ dep = " ".join(parent._metadata[k]
+ for k in Package._buildtime_keys)
+ elif priorities[-1].runtime:
+ dep = parent._metadata["RDEPEND"]
+
+ for ppkg, atom in parent_atoms:
+ if ppkg == parent:
+ changed_parent = ppkg
+ parent_atom = atom.unevaluated_atom
+ break
+
+ try:
+ affecting_use = extract_affecting_use(dep, parent_atom,
+ eapi=parent.eapi)
+ except InvalidDependString:
+ if not parent.installed:
+ raise
+ affecting_use = set()
+
+ # Make sure we don't want to change a flag that is
+ # a) in use.mask or use.force
+ # b) changed by autounmask
+
+ usemask, useforce = self._get_use_mask_and_force(parent)
+ autounmask_changes = self._get_autounmask_changes(parent)
+ untouchable_flags = frozenset(chain(usemask, useforce, autounmask_changes))
+
+ affecting_use.difference_update(untouchable_flags)
+
+ #If any of the flags we're going to touch is in REQUIRED_USE, add all
+ #other flags in REQUIRED_USE to affecting_use, to not lose any solution.
+ required_use_flags = get_required_use_flags(
+ parent._metadata.get("REQUIRED_USE", ""),
+ eapi=parent.eapi)
+
+ if affecting_use.intersection(required_use_flags):
+ # TODO: Find out exactly which REQUIRED_USE flags are
+ # entangled with affecting_use. We have to limit the
+ # number of flags since the number of loops is
+ # exponentially related (see bug #374397).
+ total_flags = set()
+ total_flags.update(affecting_use, required_use_flags)
+ total_flags.difference_update(untouchable_flags)
+ if len(total_flags) <= 10:
+ affecting_use = total_flags
+
+ affecting_use = tuple(affecting_use)
+
+ if not affecting_use:
+ continue
+
+ #We iterate over all possible settings of these use flags and gather
+ #a set of possible changes
+ #TODO: Use the information encoded in REQUIRED_USE
+ solutions = set()
+ for use_state in product(("disabled", "enabled"),
+ repeat=len(affecting_use)):
+ current_use = set(self.depgraph._pkg_use_enabled(parent))
+ for flag, state in zip(affecting_use, use_state):
+ if state == "enabled":
+ current_use.add(flag)
+ else:
+ current_use.discard(flag)
+ try:
+ reduced_dep = use_reduce(dep,
+ uselist=current_use, flat=True)
+ except InvalidDependString:
+ if not parent.installed:
+ raise
+ reduced_dep = None
+
+ if reduced_dep is not None and \
+ parent_atom not in reduced_dep:
+ #We found an assignment that removes the atom from 'dep'.
+ #Make sure it doesn't conflict with REQUIRED_USE.
+ required_use = parent._metadata.get("REQUIRED_USE", "")
+
+ if check_required_use(required_use, current_use,
+ parent.iuse.is_valid_flag,
+ eapi=parent.eapi):
+ use = self.depgraph._pkg_use_enabled(parent)
+ solution = set()
+ for flag, state in zip(affecting_use, use_state):
+ if state == "enabled" and \
+ flag not in use:
+ solution.add((flag, True))
+ elif state == "disabled" and \
+ flag in use:
+ solution.add((flag, False))
+ solutions.add(frozenset(solution))
+
+ for solution in solutions:
+ ignore_solution = False
+ for other_solution in solutions:
+ if solution is other_solution:
+ continue
+ if solution.issuperset(other_solution):
+ ignore_solution = True
+ if ignore_solution:
+ continue
+
+ #Check if a USE change conflicts with use requirements of the parents.
+ #If a requiremnet is hard, ignore the suggestion.
+ #If the requirment is conditional, warn the user that other changes might be needed.
+ followup_change = False
+ parent_parent_atoms = self.depgraph._dynamic_config._parent_atoms.get(changed_parent)
+ for ppkg, atom in parent_parent_atoms:
+
+ atom = atom.unevaluated_atom
+ if not atom.use:
+ continue
+
+ for flag, state in solution:
+ if flag in atom.use.enabled or flag in atom.use.disabled:
+ ignore_solution = True
+ break
+ elif atom.use.conditional:
+ for flags in atom.use.conditional.values():
+ if flag in flags:
+ followup_change = True
+ break
+
+ if ignore_solution:
+ break
+
+ if ignore_solution:
+ continue
+
+ changes = []
+ for flag, state in solution:
+ if state:
+ changes.append(colorize("red", "+"+flag))
+ else:
+ changes.append(colorize("blue", "-"+flag))
+ msg = "- %s (Change USE: %s)\n" \
+ % (parent.cpv, " ".join(changes))
+ if followup_change:
+ msg += " (This change might require USE changes on parent packages.)"
+ suggestions.append(msg)
+ final_solutions.setdefault(pkg, set()).add(solution)
+
+ return final_solutions, suggestions
+
+ def debug_print(self):
+ """
+ Create a copy of the digraph, prune all root nodes,
+ and call the debug_print() method.
+ """
+ graph = self.graph.copy()
+ while True:
+ root_nodes = graph.root_nodes(
+ ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
+ if not root_nodes:
+ break
+ graph.difference_update(root_nodes)
+
+ graph.debug_print()
diff --git a/usr/lib/portage/pym/_emerge/resolver/output.py b/usr/lib/portage/pym/_emerge/resolver/output.py
new file mode 100644
index 0000000..aefc3f4
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/resolver/output.py
@@ -0,0 +1,1022 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Resolver output display operation.
+"""
+
+from __future__ import unicode_literals
+
+__all__ = (
+ "Display", "format_unmatched_atom",
+ )
+
+import sys
+
+import portage
+from portage import os
+from portage.dbapi.dep_expand import dep_expand
+from portage.dep import cpvequal, _repo_separator, _slot_separator
+from portage.eapi import _get_eapi_attrs
+from portage.exception import InvalidDependString, SignatureException
+from portage.localization import localized_size
+from portage.package.ebuild.config import _get_feature_flags
+from portage.package.ebuild._spawn_nofetch import spawn_nofetch
+from portage.output import ( blue, colorize, create_color_func,
+ darkblue, darkgreen, green, nc_len, teal)
+bad = create_color_func("BAD")
+from portage._sets.base import InternalPackageSet
+from portage.util import writemsg_stdout
+from portage.versions import best, cpv_getversion
+
+from _emerge.Blocker import Blocker
+from _emerge.create_world_atom import create_world_atom
+from _emerge.resolver.output_helpers import ( _DisplayConfig, _tree_display,
+ _PackageCounters, _create_use_string, _calc_changelog, PkgInfo)
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ _unicode = str
+else:
+ _unicode = unicode
+
+class Display(object):
+ """Formats and outputs the depgrah supplied it for merge/re-merge, etc.
+
+ __call__()
+ @param depgraph: list
+ @param favorites: defaults to []
+ @param verbosity: integer, defaults to None
+ """
+
+ def __init__(self):
+ self.changelogs = []
+ self.print_msg = []
+ self.blockers = []
+ self.counters = _PackageCounters()
+ self.resolver = None
+ self.resolved = None
+ self.vardb = None
+ self.portdb = None
+ self.verboseadd = ''
+ self.oldlp = None
+ self.myfetchlist = None
+ self.indent = ''
+ self.use_expand = None
+ self.use_expand_hidden = None
+ self.pkgsettings = None
+ self.forced_flags = None
+ self.newlp = None
+ self.conf = None
+ self.blocker_style = None
+
+
+ def _blockers(self, blocker):
+ """Adds colorized strings to
+ self.print_msg and self.blockers
+
+ @param blocker: _emerge.Blocker.Blocker instance
+ @rtype: bool
+ Modifies class globals: self.blocker_style, self.resolved,
+ self.print_msg
+ """
+ if blocker.satisfied:
+ self.blocker_style = "PKG_BLOCKER_SATISFIED"
+ addl = "%s " % (colorize(self.blocker_style, "b"),)
+ else:
+ self.blocker_style = "PKG_BLOCKER"
+ addl = "%s " % (colorize(self.blocker_style, "B"),)
+ addl += self.empty_space_in_brackets()
+ self.resolved = dep_expand(
+ _unicode(blocker.atom).lstrip("!"), mydb=self.vardb,
+ settings=self.pkgsettings
+ )
+ if self.conf.columns and self.conf.quiet:
+ addl += " " + colorize(self.blocker_style, _unicode(self.resolved))
+ else:
+ addl = "[%s %s] %s%s" % \
+ (colorize(self.blocker_style, "blocks"),
+ addl, self.indent,
+ colorize(self.blocker_style, _unicode(self.resolved))
+ )
+ block_parents = self.conf.blocker_parents.parent_nodes(blocker)
+ block_parents = set(_unicode(pnode.cpv) for pnode in block_parents)
+ block_parents = ", ".join(block_parents)
+ if blocker.atom.blocker.overlap.forbid:
+ blocking_desc = "hard blocking"
+ else:
+ blocking_desc = "blocking"
+ if self.resolved != blocker.atom:
+ addl += colorize(self.blocker_style,
+ " (\"%s\" is %s %s)" %
+ (_unicode(blocker.atom).lstrip("!"),
+ blocking_desc, block_parents))
+ else:
+ addl += colorize(self.blocker_style,
+ " (is %s %s)" % (blocking_desc, block_parents))
+ if blocker.satisfied:
+ if not self.conf.columns:
+ self.print_msg.append(addl)
+ else:
+ self.blockers.append(addl)
+
+ def include_mask_str(self):
+ return self.conf.verbosity > 1
+
+ def gen_mask_str(self, pkg):
+ """
+ @param pkg: _emerge.Package.Package instance
+ """
+ hardmasked = pkg.isHardMasked()
+ mask_str = " "
+
+ if hardmasked:
+ mask_str = colorize("BAD", "#")
+ else:
+ keyword_mask = pkg.get_keyword_mask()
+
+ if keyword_mask is None:
+ pass
+ elif keyword_mask == "missing":
+ mask_str = colorize("BAD", "*")
+ else:
+ mask_str = colorize("WARN", "~")
+
+ return mask_str
+
+ def empty_space_in_brackets(self):
+ space = ""
+ if self.include_mask_str():
+ # add column for mask status
+ space += " "
+ return space
+
+ def map_to_use_expand(self, myvals, forced_flags=False,
+ remove_hidden=True):
+ """Map use expand variables
+
+ @param myvals: list
+ @param forced_flags: bool
+ @param remove_hidden: bool
+ @rtype ret dictionary
+ or ret dict, forced dict.
+ """
+ ret = {}
+ forced = {}
+ for exp in self.use_expand:
+ ret[exp] = []
+ forced[exp] = set()
+ for val in myvals[:]:
+ if val.startswith(exp.lower()+"_"):
+ if val in self.forced_flags:
+ forced[exp].add(val[len(exp)+1:])
+ ret[exp].append(val[len(exp)+1:])
+ myvals.remove(val)
+ ret["USE"] = myvals
+ forced["USE"] = [val for val in myvals \
+ if val in self.forced_flags]
+ if remove_hidden:
+ for exp in self.use_expand_hidden:
+ ret.pop(exp, None)
+ if forced_flags:
+ return ret, forced
+ return ret
+
+
+ def _display_use(self, pkg, pkg_info):
+ """ USE flag display
+
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: PkgInfo instance
+ Modifies self.use_expand_hidden, self.use_expand, self.verboseadd,
+ self.forced_flags
+ """
+
+ self.forced_flags = set()
+ self.forced_flags.update(pkg.use.force)
+ self.forced_flags.update(pkg.use.mask)
+
+ cur_use = [flag for flag in self.conf.pkg_use_enabled(pkg) \
+ if flag in pkg.iuse.all]
+ cur_iuse = sorted(pkg.iuse.all)
+
+ if pkg_info.previous_pkg is not None:
+ previous_pkg = pkg_info.previous_pkg
+ old_iuse = sorted(previous_pkg.iuse.all)
+ old_use = previous_pkg.use.enabled
+ is_new = False
+ else:
+ old_iuse = []
+ old_use = []
+ is_new = True
+
+ old_use = [flag for flag in old_use if flag in old_iuse]
+
+ self.use_expand = pkg.use.expand
+ self.use_expand_hidden = pkg.use.expand_hidden
+
+ # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
+ # are the only thing that triggered reinstallation.
+ reinst_flags_map = {}
+ reinstall_for_flags = self.conf.reinstall_nodes.get(pkg)
+ reinst_expand_map = None
+ if reinstall_for_flags:
+ reinst_flags_map = self.map_to_use_expand(
+ list(reinstall_for_flags), remove_hidden=False)
+ for k in list(reinst_flags_map):
+ if not reinst_flags_map[k]:
+ del reinst_flags_map[k]
+ if not reinst_flags_map.get("USE"):
+ reinst_expand_map = reinst_flags_map.copy()
+ reinst_expand_map.pop("USE", None)
+ if reinst_expand_map and \
+ not set(reinst_expand_map).difference(
+ self.use_expand_hidden):
+ self.use_expand_hidden = \
+ set(self.use_expand_hidden).difference(
+ reinst_expand_map)
+
+ cur_iuse_map, iuse_forced = \
+ self.map_to_use_expand(cur_iuse, forced_flags=True)
+ cur_use_map = self.map_to_use_expand(cur_use)
+ old_iuse_map = self.map_to_use_expand(old_iuse)
+ old_use_map = self.map_to_use_expand(old_use)
+
+ use_expand = sorted(self.use_expand)
+ use_expand.insert(0, "USE")
+ feature_flags = _get_feature_flags(_get_eapi_attrs(pkg.eapi))
+
+ for key in use_expand:
+ if key in self.use_expand_hidden:
+ continue
+ self.verboseadd += _create_use_string(self.conf, key.upper(),
+ cur_iuse_map[key], iuse_forced[key],
+ cur_use_map[key], old_iuse_map[key],
+ old_use_map[key], is_new, feature_flags,
+ reinst_flags_map.get(key))
+ return
+
+
+ @staticmethod
+ def pkgprint(pkg_str, pkg_info):
+ """Colorizes a string acording to pkg_info settings
+
+ @param pkg_str: string
+ @param pkg_info: dictionary
+ @rtype colorized string
+ """
+ if pkg_info.merge:
+ if pkg_info.built:
+ if pkg_info.system:
+ return colorize("PKG_BINARY_MERGE_SYSTEM", pkg_str)
+ elif pkg_info.world:
+ return colorize("PKG_BINARY_MERGE_WORLD", pkg_str)
+ else:
+ return colorize("PKG_BINARY_MERGE", pkg_str)
+ else:
+ if pkg_info.system:
+ return colorize("PKG_MERGE_SYSTEM", pkg_str)
+ elif pkg_info.world:
+ return colorize("PKG_MERGE_WORLD", pkg_str)
+ else:
+ return colorize("PKG_MERGE", pkg_str)
+ elif pkg_info.operation == "uninstall":
+ return colorize("PKG_UNINSTALL", pkg_str)
+ else:
+ if pkg_info.system:
+ return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
+ elif pkg_info.world:
+ return colorize("PKG_NOMERGE_WORLD", pkg_str)
+ else:
+ return colorize("PKG_NOMERGE", pkg_str)
+
+
+ def verbose_size(self, pkg, repoadd_set, pkg_info):
+ """Determines the size of the downloads required
+
+ @param pkg: _emerge.Package.Package instance
+ @param repoadd_set: set of repos to add
+ @param pkg_info: dictionary
+ Modifies class globals: self.myfetchlist, self.counters.totalsize,
+ self.verboseadd, repoadd_set.
+ """
+ mysize = 0
+ if pkg.type_name in ("binary", "ebuild") and pkg_info.merge:
+ db = pkg.root_config.trees[
+ pkg.root_config.pkg_tree_map[pkg.type_name]].dbapi
+ kwargs = {}
+ if pkg.type_name == "ebuild":
+ kwargs["useflags"] = pkg_info.use
+ kwargs["myrepo"] = pkg.repo
+ myfilesdict = None
+ try:
+ myfilesdict = db.getfetchsizes(pkg.cpv,
+ **portage._native_kwargs(kwargs))
+ except InvalidDependString as e:
+ # FIXME: validate SRC_URI earlier
+ depstr, = db.aux_get(pkg.cpv,
+ ["SRC_URI"], myrepo=pkg.repo)
+ show_invalid_depstring_notice(
+ pkg, depstr, _unicode(e))
+ raise
+ except SignatureException:
+ # missing/invalid binary package SIZE signature
+ pass
+ if myfilesdict is None:
+ myfilesdict = "[empty/missing/bad digest]"
+ else:
+ for myfetchfile in myfilesdict:
+ if myfetchfile not in self.myfetchlist:
+ mysize += myfilesdict[myfetchfile]
+ self.myfetchlist.add(myfetchfile)
+ if pkg_info.ordered:
+ self.counters.totalsize += mysize
+ self.verboseadd += localized_size(mysize)
+
+ if self.quiet_repo_display:
+ # overlay verbose
+ # assign index for a previous version in the same slot
+ if pkg_info.previous_pkg is not None:
+ repo_name_prev = pkg_info.previous_pkg.repo
+ else:
+ repo_name_prev = None
+
+ # now use the data to generate output
+ if pkg.installed or pkg_info.previous_pkg is None:
+ self.repoadd = self.conf.repo_display.repoStr(
+ pkg_info.repo_path_real)
+ else:
+ repo_path_prev = None
+ if repo_name_prev:
+ repo_path_prev = self.portdb.getRepositoryPath(
+ repo_name_prev)
+ if repo_path_prev == pkg_info.repo_path_real:
+ self.repoadd = self.conf.repo_display.repoStr(
+ pkg_info.repo_path_real)
+ else:
+ self.repoadd = "%s=>%s" % (
+ self.conf.repo_display.repoStr(repo_path_prev),
+ self.conf.repo_display.repoStr(pkg_info.repo_path_real))
+ if self.repoadd:
+ repoadd_set.add(self.repoadd)
+
+
+ def convert_myoldbest(self, pkg, pkg_info):
+ """converts and colorizes a version list to a string
+
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
+ @rtype string.
+ """
+ myoldbest = pkg_info.oldbest_list
+ # Convert myoldbest from a list to a string.
+ myoldbest_str = ""
+ if myoldbest:
+ versions = []
+ for pos, old_pkg in enumerate(myoldbest):
+ key = old_pkg.version
+ if key[-3:] == "-r0":
+ key = key[:-3]
+ if self.conf.verbosity == 3:
+ if pkg_info.attr_display.new_slot:
+ key += _slot_separator + old_pkg.slot
+ if old_pkg.slot != old_pkg.sub_slot:
+ key += "/" + old_pkg.sub_slot
+ elif any(x.slot + "/" + x.sub_slot != "0/0" for x in myoldbest + [pkg]):
+ key += _slot_separator + old_pkg.slot
+ if old_pkg.slot != old_pkg.sub_slot or \
+ old_pkg.slot == pkg.slot and old_pkg.sub_slot != pkg.sub_slot:
+ key += "/" + old_pkg.sub_slot
+ if not self.quiet_repo_display and (self.verbose_main_repo_display or
+ self.portdb.repositories.mainRepo() is None or
+ any(x.repo != self.portdb.repositories.mainRepo().name for x in myoldbest + [pkg])):
+ key += _repo_separator + old_pkg.repo
+ versions.append(key)
+ myoldbest_str = blue("["+", ".join(versions)+"]")
+ return myoldbest_str
+
+ def _append_slot(self, pkg_str, pkg, pkg_info):
+ """Potentially appends slot and subslot to package string.
+
+ @param pkg_str: string
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
+ @rtype string
+ """
+ if pkg_info.attr_display.new_slot:
+ pkg_str += _slot_separator + pkg_info.slot
+ if pkg_info.slot != pkg_info.sub_slot:
+ pkg_str += "/" + pkg_info.sub_slot
+ elif any(x.slot + "/" + x.sub_slot != "0/0" for x in pkg_info.oldbest_list + [pkg]):
+ pkg_str += _slot_separator + pkg_info.slot
+ if pkg_info.slot != pkg_info.sub_slot or \
+ any(x.slot == pkg_info.slot and x.sub_slot != pkg_info.sub_slot for x in pkg_info.oldbest_list):
+ pkg_str += "/" + pkg_info.sub_slot
+ return pkg_str
+
+ def _append_repository(self, pkg_str, pkg, pkg_info):
+ """Potentially appends repository to package string.
+
+ @param pkg_str: string
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
+ @rtype string
+ """
+ if not self.quiet_repo_display and (self.verbose_main_repo_display or
+ self.portdb.repositories.mainRepo() is None or
+ any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
+ pkg_str += _repo_separator + pkg.repo
+ return pkg_str
+
+ def _set_non_root_columns(self, pkg, pkg_info):
+ """sets the indent level and formats the output
+
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
+ @rtype string
+ """
+ ver_str = pkg_info.ver
+ if self.conf.verbosity == 3:
+ ver_str = self._append_slot(ver_str, pkg, pkg_info)
+ ver_str = self._append_repository(ver_str, pkg, pkg_info)
+ if self.conf.quiet:
+ myprint = _unicode(pkg_info.attr_display) + " " + self.indent + \
+ self.pkgprint(pkg_info.cp, pkg_info)
+ myprint = myprint+darkblue(" "+ver_str)+" "
+ myprint = myprint+pkg_info.oldbest
+ myprint = myprint+darkgreen("to "+pkg.root)
+ self.verboseadd = None
+ else:
+ if not pkg_info.merge:
+ myprint = "[%s] %s%s" % \
+ (self.pkgprint(pkg_info.operation.ljust(13), pkg_info),
+ self.indent, self.pkgprint(pkg.cp, pkg_info))
+ else:
+ myprint = "[%s %s] %s%s" % \
+ (self.pkgprint(pkg.type_name, pkg_info),
+ pkg_info.attr_display,
+ self.indent, self.pkgprint(pkg.cp, pkg_info))
+ if (self.newlp-nc_len(myprint)) > 0:
+ myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
+ myprint = myprint+" "+darkblue("["+ver_str+"]")+" "
+ if (self.oldlp-nc_len(myprint)) > 0:
+ myprint = myprint+" "*(self.oldlp-nc_len(myprint))
+ myprint = myprint+pkg_info.oldbest
+ myprint += darkgreen("to " + pkg.root)
+ return myprint
+
+
+ def _set_root_columns(self, pkg, pkg_info):
+ """sets the indent level and formats the output
+
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
+ @rtype string
+ Modifies self.verboseadd
+ """
+ ver_str = pkg_info.ver
+ if self.conf.verbosity == 3:
+ ver_str = self._append_slot(ver_str, pkg, pkg_info)
+ ver_str = self._append_repository(ver_str, pkg, pkg_info)
+ if self.conf.quiet:
+ myprint = _unicode(pkg_info.attr_display) + " " + self.indent + \
+ self.pkgprint(pkg_info.cp, pkg_info)
+ myprint = myprint+" "+green(ver_str)+" "
+ myprint = myprint+pkg_info.oldbest
+ self.verboseadd = None
+ else:
+ if not pkg_info.merge:
+ addl = self.empty_space_in_brackets()
+ myprint = "[%s%s] %s%s" % \
+ (self.pkgprint(pkg_info.operation.ljust(13), pkg_info),
+ addl, self.indent, self.pkgprint(pkg.cp, pkg_info))
+ else:
+ myprint = "[%s %s] %s%s" % \
+ (self.pkgprint(pkg.type_name, pkg_info),
+ pkg_info.attr_display,
+ self.indent, self.pkgprint(pkg.cp, pkg_info))
+ if (self.newlp-nc_len(myprint)) > 0:
+ myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
+ myprint = myprint+" "+green("["+ver_str+"]")+" "
+ if (self.oldlp-nc_len(myprint)) > 0:
+ myprint = myprint+(" "*(self.oldlp-nc_len(myprint)))
+ myprint += pkg_info.oldbest
+ return myprint
+
+
+ def _set_no_columns(self, pkg, pkg_info):
+ """prints pkg info without column indentation.
+
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
+ @rtype the updated addl
+ """
+ pkg_str = pkg.cpv
+ if self.conf.verbosity == 3:
+ pkg_str = self._append_slot(pkg_str, pkg, pkg_info)
+ pkg_str = self._append_repository(pkg_str, pkg, pkg_info)
+ if not pkg_info.merge:
+ addl = self.empty_space_in_brackets()
+ myprint = "[%s%s] %s%s %s" % \
+ (self.pkgprint(pkg_info.operation.ljust(13),
+ pkg_info), addl,
+ self.indent, self.pkgprint(pkg_str, pkg_info),
+ pkg_info.oldbest)
+ else:
+ myprint = "[%s %s] %s%s %s" % \
+ (self.pkgprint(pkg.type_name, pkg_info),
+ pkg_info.attr_display, self.indent,
+ self.pkgprint(pkg_str, pkg_info), pkg_info.oldbest)
+ return myprint
+
+ def print_messages(self, show_repos):
+ """Performs the actual output printing of the pre-formatted
+ messages
+
+ @param show_repos: bool.
+ """
+ for msg in self.print_msg:
+ if isinstance(msg, basestring):
+ writemsg_stdout("%s\n" % (msg,), noiselevel=-1)
+ continue
+ myprint, self.verboseadd, repoadd = msg
+ if self.verboseadd:
+ myprint += " " + self.verboseadd
+ if show_repos and repoadd:
+ myprint += " " + teal("[%s]" % repoadd)
+ writemsg_stdout("%s\n" % (myprint,), noiselevel=-1)
+ return
+
+
+ def print_blockers(self):
+ """Performs the actual output printing of the pre-formatted
+ blocker messages
+ """
+ for pkg in self.blockers:
+ writemsg_stdout("%s\n" % (pkg,), noiselevel=-1)
+ return
+
+
+ def print_verbose(self, show_repos):
+ """Prints the verbose output to std_out
+
+ @param show_repos: bool.
+ """
+ writemsg_stdout('\n%s\n' % (self.counters,), noiselevel=-1)
+ if show_repos:
+ # Use unicode_literals to force unicode format string so
+ # that RepoDisplay.__unicode__() is called in python2.
+ writemsg_stdout("%s" % (self.conf.repo_display,),
+ noiselevel=-1)
+ return
+
+
+ def print_changelog(self):
+ """Prints the changelog text to std_out
+ """
+ for chunk in self.changelogs:
+ writemsg_stdout(chunk,
+ noiselevel=-1)
+
+
+ def get_display_list(self, mylist):
+ """Determines the display list to process
+
+ @param mylist
+ @rtype list
+ Modifies self.counters.blocks, self.counters.blocks_satisfied,
+
+ """
+ unsatisfied_blockers = []
+ ordered_nodes = []
+ for pkg in mylist:
+ if isinstance(pkg, Blocker):
+ self.counters.blocks += 1
+ if pkg.satisfied:
+ ordered_nodes.append(pkg)
+ self.counters.blocks_satisfied += 1
+ else:
+ unsatisfied_blockers.append(pkg)
+ else:
+ ordered_nodes.append(pkg)
+ if self.conf.tree_display:
+ display_list = _tree_display(self.conf, ordered_nodes)
+ else:
+ display_list = [(pkg, 0, True) for pkg in ordered_nodes]
+ for pkg in unsatisfied_blockers:
+ display_list.append((pkg, 0, True))
+ return display_list
+
+
+ def set_pkg_info(self, pkg, ordered):
+ """Sets various pkg_info dictionary variables
+
+ @param pkg: _emerge.Package.Package instance
+ @param ordered: bool
+ @rtype pkg_info dictionary
+ Modifies self.counters.restrict_fetch,
+ self.counters.restrict_fetch_satisfied
+ """
+ pkg_info = PkgInfo()
+ pkg_info.cp = pkg.cp
+ pkg_info.ver = self.get_ver_str(pkg)
+ pkg_info.slot = pkg.slot
+ pkg_info.sub_slot = pkg.sub_slot
+ pkg_info.repo_name = pkg.repo
+ pkg_info.ordered = ordered
+ pkg_info.operation = pkg.operation
+ pkg_info.merge = ordered and pkg_info.operation == "merge"
+ if not pkg_info.merge and pkg_info.operation == "merge":
+ pkg_info.operation = "nomerge"
+ pkg_info.built = pkg.type_name != "ebuild"
+ pkg_info.ebuild_path = None
+ if ordered:
+ if pkg_info.merge:
+ if pkg.type_name == "binary":
+ self.counters.binary += 1
+ elif pkg_info.operation == "uninstall":
+ self.counters.uninst += 1
+ if pkg.type_name == "ebuild":
+ pkg_info.ebuild_path = self.portdb.findname(
+ pkg.cpv, myrepo=pkg_info.repo_name)
+ if pkg_info.ebuild_path is None:
+ raise AssertionError(
+ "ebuild not found for '%s'" % pkg.cpv)
+ pkg_info.repo_path_real = os.path.dirname(os.path.dirname(
+ os.path.dirname(pkg_info.ebuild_path)))
+ else:
+ pkg_info.repo_path_real = self.portdb.getRepositoryPath(pkg.repo)
+ pkg_info.use = list(self.conf.pkg_use_enabled(pkg))
+ if not pkg.built and pkg.operation == 'merge' and \
+ 'fetch' in pkg.restrict:
+ if pkg_info.ordered:
+ self.counters.restrict_fetch += 1
+ pkg_info.attr_display.fetch_restrict = True
+ if not self.portdb.getfetchsizes(pkg.cpv,
+ useflags=pkg_info.use, myrepo=pkg.repo):
+ pkg_info.attr_display.fetch_restrict_satisfied = True
+ if pkg_info.ordered:
+ self.counters.restrict_fetch_satisfied += 1
+ else:
+ if pkg_info.ebuild_path is not None:
+ self.restrict_fetch_list[pkg] = pkg_info
+
+ if self.vardb.cpv_exists(pkg.cpv):
+ # Do a cpv match first, in case the SLOT has changed.
+ pkg_info.previous_pkg = self.vardb.match_pkgs('=' + pkg.cpv)[0]
+ else:
+ slot_matches = self.vardb.match_pkgs(pkg.slot_atom)
+ if slot_matches:
+ pkg_info.previous_pkg = slot_matches[0]
+
+ return pkg_info
+
+
+ def do_changelog(self, pkg, pkg_info):
+ """Processes and adds the changelog text to the master text for output
+
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionay
+ Modifies self.changelogs
+ """
+ if pkg_info.previous_pkg is not None:
+ ebuild_path_cl = pkg_info.ebuild_path
+ if ebuild_path_cl is None:
+ # binary package
+ ebuild_path_cl = self.portdb.findname(pkg.cpv, myrepo=pkg.repo)
+ if ebuild_path_cl is not None:
+ self.changelogs.extend(_calc_changelog(
+ ebuild_path_cl, pkg_info.previous_pkg, pkg.cpv))
+ return
+
+
+ def check_system_world(self, pkg):
+ """Checks for any occurances of the package in the system or world sets
+
+ @param pkg: _emerge.Package.Package instance
+ @rtype system and world booleans
+ """
+ root_config = self.conf.roots[pkg.root]
+ system_set = root_config.sets["system"]
+ world_set = root_config.sets["selected"]
+ system = False
+ world = False
+ try:
+ system = system_set.findAtomForPackage(
+ pkg, modified_use=self.conf.pkg_use_enabled(pkg))
+ world = world_set.findAtomForPackage(
+ pkg, modified_use=self.conf.pkg_use_enabled(pkg))
+ if not (self.conf.oneshot or world) and \
+ pkg.root == self.conf.target_root and \
+ self.conf.favorites.findAtomForPackage(
+ pkg, modified_use=self.conf.pkg_use_enabled(pkg)
+ ):
+ # Maybe it will be added to world now.
+ if create_world_atom(pkg, self.conf.favorites, root_config):
+ world = True
+ except InvalidDependString:
+ # This is reported elsewhere if relevant.
+ pass
+ return system, world
+
+
+ @staticmethod
+ def get_ver_str(pkg):
+ """Obtains the version string
+ @param pkg: _emerge.Package.Package instance
+ @rtype string
+ """
+ ver_str = pkg.cpv.version
+ if ver_str.endswith("-r0"):
+ ver_str = ver_str[:-3]
+ return ver_str
+
+
+ def _get_installed_best(self, pkg, pkg_info):
+ """ we need to use "--emptrytree" testing here rather than
+ "empty" param testing because "empty"
+ param is used for -u, where you still *do* want to see when
+ something is being upgraded.
+
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionay
+ @rtype addl, myoldbest: list, myinslotlist: list
+ Modifies self.counters.reinst, self.counters.new
+
+ """
+ myoldbest = []
+ myinslotlist = None
+ installed_versions = self.vardb.match_pkgs(pkg.cp)
+ if self.vardb.cpv_exists(pkg.cpv):
+ pkg_info.attr_display.replace = True
+ installed_version = pkg_info.previous_pkg
+ if installed_version.slot != pkg.slot or installed_version.sub_slot != pkg.sub_slot or \
+ not self.quiet_repo_display and installed_version.repo != pkg.repo:
+ myoldbest = [installed_version]
+ if pkg_info.ordered:
+ if pkg_info.merge:
+ self.counters.reinst += 1
+ # filter out old-style virtual matches
+ elif installed_versions and \
+ installed_versions[0].cp == pkg.cp:
+ myinslotlist = self.vardb.match_pkgs(pkg.slot_atom)
+ # If this is the first install of a new-style virtual, we
+ # need to filter out old-style virtual matches.
+ if myinslotlist and \
+ myinslotlist[0].cp != pkg.cp:
+ myinslotlist = None
+ if myinslotlist:
+ myoldbest = myinslotlist[:]
+ if not cpvequal(pkg.cpv,
+ best([pkg.cpv] + [x.cpv for x in myinslotlist])):
+ # Downgrade in slot
+ pkg_info.attr_display.new_version = True
+ pkg_info.attr_display.downgrade = True
+ if pkg_info.ordered:
+ self.counters.downgrades += 1
+ else:
+ # Update in slot
+ pkg_info.attr_display.new_version = True
+ if pkg_info.ordered:
+ self.counters.upgrades += 1
+ else:
+ myoldbest = installed_versions
+ pkg_info.attr_display.new = True
+ pkg_info.attr_display.new_slot = True
+ if pkg_info.ordered:
+ self.counters.newslot += 1
+ if self.conf.changelog:
+ self.do_changelog(pkg, pkg_info)
+ else:
+ pkg_info.attr_display.new = True
+ if pkg_info.ordered:
+ self.counters.new += 1
+ return myoldbest, myinslotlist
+
+
+ def __call__(self, depgraph, mylist, favorites=None, verbosity=None):
+ """The main operation to format and display the resolver output.
+
+ @param depgraph: dependency grah
+ @param mylist: list of packages being processed
+ @param favorites: list, defaults to []
+ @param verbosity: verbose level, defaults to None
+ Modifies self.conf, self.myfetchlist, self.portdb, self.vardb,
+ self.pkgsettings, self.verboseadd, self.oldlp, self.newlp,
+ self.print_msg,
+ """
+ if favorites is None:
+ favorites = []
+ self.conf = _DisplayConfig(depgraph, mylist, favorites, verbosity)
+ mylist = self.get_display_list(self.conf.mylist)
+ # files to fetch list - avoids counting a same file twice
+ # in size display (verbose mode)
+ self.myfetchlist = set()
+
+ self.quiet_repo_display = "--quiet-repo-display" in depgraph._frozen_config.myopts
+ if self.quiet_repo_display:
+ # Use this set to detect when all the "repoadd" strings are "[0]"
+ # and disable the entire repo display in this case.
+ repoadd_set = set()
+
+ self.verbose_main_repo_display = "--verbose-main-repo-display" in depgraph._frozen_config.myopts
+ self.restrict_fetch_list = {}
+
+ for mylist_index in range(len(mylist)):
+ pkg, depth, ordered = mylist[mylist_index]
+ self.portdb = self.conf.trees[pkg.root]["porttree"].dbapi
+ self.vardb = self.conf.trees[pkg.root]["vartree"].dbapi
+ self.pkgsettings = self.conf.pkgsettings[pkg.root]
+ self.indent = " " * depth
+
+ if isinstance(pkg, Blocker):
+ self._blockers(pkg)
+ else:
+ pkg_info = self.set_pkg_info(pkg, ordered)
+ pkg_info.oldbest_list, myinslotlist = \
+ self._get_installed_best(pkg, pkg_info)
+ if ordered and pkg_info.merge and \
+ not pkg_info.attr_display.new:
+ for arg, atom in depgraph._iter_atoms_for_pkg(pkg):
+ if arg.force_reinstall:
+ pkg_info.attr_display.force_reinstall = True
+ break
+
+ self.verboseadd = ""
+ if self.quiet_repo_display:
+ self.repoadd = None
+ self._display_use(pkg, pkg_info)
+ if self.conf.verbosity == 3:
+ if self.quiet_repo_display:
+ self.verbose_size(pkg, repoadd_set, pkg_info)
+ else:
+ self.verbose_size(pkg, None, pkg_info)
+
+ self.oldlp = self.conf.columnwidth - 30
+ self.newlp = self.oldlp - 30
+ pkg_info.oldbest = self.convert_myoldbest(pkg, pkg_info)
+ pkg_info.system, pkg_info.world = \
+ self.check_system_world(pkg)
+ if 'interactive' in pkg.properties and \
+ pkg.operation == 'merge':
+ pkg_info.attr_display.interactive = True
+ if ordered:
+ self.counters.interactive += 1
+
+ if self.include_mask_str():
+ pkg_info.attr_display.mask = self.gen_mask_str(pkg)
+
+ if pkg.root_config.settings["ROOT"] != "/":
+ if pkg_info.oldbest:
+ pkg_info.oldbest += " "
+ if self.conf.columns:
+ myprint = self._set_non_root_columns(pkg, pkg_info)
+ else:
+ pkg_str = pkg.cpv
+ if self.conf.verbosity == 3:
+ pkg_str = self._append_slot(pkg_str, pkg, pkg_info)
+ pkg_str = self._append_repository(pkg_str, pkg, pkg_info)
+ if not pkg_info.merge:
+ addl = self.empty_space_in_brackets()
+ myprint = "[%s%s] " % (
+ self.pkgprint(pkg_info.operation.ljust(13),
+ pkg_info), addl,
+ )
+ else:
+ myprint = "[%s %s] " % (
+ self.pkgprint(pkg.type_name, pkg_info),
+ pkg_info.attr_display)
+ myprint += self.indent + \
+ self.pkgprint(pkg_str, pkg_info) + " " + \
+ pkg_info.oldbest + darkgreen("to " + pkg.root)
+ else:
+ if self.conf.columns:
+ myprint = self._set_root_columns(pkg, pkg_info)
+ else:
+ myprint = self._set_no_columns(pkg, pkg_info)
+
+ if self.conf.columns and pkg.operation == "uninstall":
+ continue
+ if self.quiet_repo_display:
+ self.print_msg.append((myprint, self.verboseadd, self.repoadd))
+ else:
+ self.print_msg.append((myprint, self.verboseadd, None))
+
+ show_repos = self.quiet_repo_display and repoadd_set and repoadd_set != set(["0"])
+
+ # now finally print out the messages
+ self.print_messages(show_repos)
+ self.print_blockers()
+ if self.conf.verbosity == 3:
+ self.print_verbose(show_repos)
+ for pkg, pkg_info in self.restrict_fetch_list.items():
+ writemsg_stdout("\nFetch instructions for %s:\n" % (pkg.cpv,),
+ noiselevel=-1)
+ spawn_nofetch(self.conf.trees[pkg.root]["porttree"].dbapi,
+ pkg_info.ebuild_path)
+ if self.conf.changelog:
+ self.print_changelog()
+
+ return os.EX_OK
+
+
+def format_unmatched_atom(pkg, atom, pkg_use_enabled):
+ """
+ Returns two strings. The first string contains the
+ 'atom' with parts of the atom colored, which 'pkg'
+ doesn't match. The second string has the same number
+ of characters as the first one, but consists of only
+ white space or ^. The ^ characters have the same position
+ as the colored parts of the first string.
+ """
+ # Things to check:
+ # 1. Version
+ # 2. cp
+ # 3. slot/sub_slot
+ # 4. repository
+ # 5. USE
+
+ highlight = set()
+
+ def perform_coloring():
+ atom_str = ""
+ marker_str = ""
+ for ii, x in enumerate(atom):
+ if ii in highlight:
+ atom_str += colorize("BAD", x)
+ marker_str += "^"
+ else:
+ atom_str += x
+ marker_str += " "
+ return atom_str, marker_str
+
+ if atom.cp != pkg.cp:
+ # Highlight the cp part only.
+ ii = atom.find(atom.cp)
+ highlight.update(range(ii, ii + len(atom.cp)))
+ return perform_coloring()
+
+ version_atom = atom.without_repo.without_slot.without_use
+ version_atom_set = InternalPackageSet(initial_atoms=(version_atom,))
+ highlight_version = not bool(version_atom_set.findAtomForPackage(pkg,
+ modified_use=pkg_use_enabled(pkg)))
+
+ highlight_slot = False
+ if (atom.slot and atom.slot != pkg.slot) or \
+ (atom.sub_slot and atom.sub_slot != pkg.sub_slot):
+ highlight_slot = True
+
+ if highlight_version:
+ op = atom.operator
+ ver = None
+ if atom.cp != atom.cpv:
+ ver = cpv_getversion(atom.cpv)
+
+ if op == "=*":
+ op = "="
+ ver += "*"
+
+ if op is not None:
+ highlight.update(range(len(op)))
+
+ if ver is not None:
+ start = atom.rfind(ver)
+ end = start + len(ver)
+ highlight.update(range(start, end))
+
+ if highlight_slot:
+ slot_str = ":" + atom.slot
+ if atom.sub_slot:
+ slot_str += "/" + atom.sub_slot
+ if atom.slot_operator:
+ slot_str += atom.slot_operator
+ start = atom.find(slot_str)
+ end = start + len(slot_str)
+ highlight.update(range(start, end))
+
+ highlight_use = set()
+ if atom.use:
+ use_atom = "%s[%s]" % (atom.cp, str(atom.use))
+ use_atom_set = InternalPackageSet(initial_atoms=(use_atom,))
+ if not use_atom_set.findAtomForPackage(pkg, \
+ modified_use=pkg_use_enabled(pkg)):
+ missing_iuse = pkg.iuse.get_missing_iuse(
+ atom.unevaluated_atom.use.required)
+ if missing_iuse:
+ highlight_use = set(missing_iuse)
+ else:
+ #Use conditionals not met.
+ violated_atom = atom.violated_conditionals(
+ pkg_use_enabled(pkg), pkg.iuse.is_valid_flag)
+ if violated_atom.use is not None:
+ highlight_use = set(violated_atom.use.enabled.union(
+ violated_atom.use.disabled))
+
+ if highlight_use:
+ ii = atom.find("[") + 1
+ for token in atom.use.tokens:
+ if token.lstrip("-!").rstrip("=?") in highlight_use:
+ highlight.update(range(ii, ii + len(token)))
+ ii += len(token) + 1
+
+ return perform_coloring()
diff --git a/usr/lib/portage/pym/_emerge/resolver/output_helpers.py b/usr/lib/portage/pym/_emerge/resolver/output_helpers.py
new file mode 100644
index 0000000..eb8d97d
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/resolver/output_helpers.py
@@ -0,0 +1,693 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Contains private support functions for the Display class
+in output.py
+"""
+
+from __future__ import unicode_literals
+
+__all__ = (
+ )
+
+import io
+import re
+import sys
+
+from portage import os
+from portage import _encodings, _unicode_encode
+from portage._sets.base import InternalPackageSet
+from portage.localization import localized_size
+from portage.output import (blue, bold, colorize, create_color_func,
+ green, red, teal, turquoise, yellow)
+bad = create_color_func("BAD")
+from portage.util import shlex_split, writemsg
+from portage.util.SlotObject import SlotObject
+from portage.versions import catpkgsplit
+
+from _emerge.Blocker import Blocker
+from _emerge.Package import Package
+
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+
+class _RepoDisplay(object):
+ def __init__(self, roots):
+ self._shown_repos = {}
+ self._unknown_repo = False
+ repo_paths = set()
+ for root_config in roots.values():
+ portdir = root_config.settings.get("PORTDIR")
+ if portdir:
+ repo_paths.add(portdir)
+ overlays = root_config.settings.get("PORTDIR_OVERLAY")
+ if overlays:
+ repo_paths.update(shlex_split(overlays))
+ repo_paths = list(repo_paths)
+ self._repo_paths = repo_paths
+ self._repo_paths_real = [ os.path.realpath(repo_path) \
+ for repo_path in repo_paths ]
+
+ # pre-allocate index for PORTDIR so that it always has index 0.
+ for root_config in roots.values():
+ portdb = root_config.trees["porttree"].dbapi
+ portdir = portdb.repositories.mainRepoLocation()
+ if portdir:
+ self.repoStr(portdir)
+
+ def repoStr(self, repo_path_real):
+ real_index = -1
+ if repo_path_real:
+ real_index = self._repo_paths_real.index(repo_path_real)
+ if real_index == -1:
+ s = "?"
+ self._unknown_repo = True
+ else:
+ shown_repos = self._shown_repos
+ repo_paths = self._repo_paths
+ repo_path = repo_paths[real_index]
+ index = shown_repos.get(repo_path)
+ if index is None:
+ index = len(shown_repos)
+ shown_repos[repo_path] = index
+ s = str(index)
+ return s
+
+ def __str__(self):
+ output = []
+ shown_repos = self._shown_repos
+ unknown_repo = self._unknown_repo
+ if shown_repos or self._unknown_repo:
+ output.append("Portage tree and overlays:\n")
+ show_repo_paths = list(shown_repos)
+ for repo_path, repo_index in shown_repos.items():
+ show_repo_paths[repo_index] = repo_path
+ if show_repo_paths:
+ for index, repo_path in enumerate(show_repo_paths):
+ output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
+ if unknown_repo:
+ output.append(" "+teal("[?]") + \
+ " indicates that the source repository could not be determined\n")
+ return "".join(output)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
+
+
+class _PackageCounters(object):
+
+ def __init__(self):
+ self.upgrades = 0
+ self.downgrades = 0
+ self.new = 0
+ self.newslot = 0
+ self.reinst = 0
+ self.uninst = 0
+ self.blocks = 0
+ self.blocks_satisfied = 0
+ self.totalsize = 0
+ self.restrict_fetch = 0
+ self.restrict_fetch_satisfied = 0
+ self.interactive = 0
+ self.binary = 0
+
+ def __str__(self):
+ total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
+ myoutput = []
+ details = []
+ myoutput.append("Total: %s package" % total_installs)
+ if total_installs != 1:
+ myoutput.append("s")
+ if total_installs != 0:
+ myoutput.append(" (")
+ if self.upgrades > 0:
+ details.append("%s upgrade" % self.upgrades)
+ if self.upgrades > 1:
+ details[-1] += "s"
+ if self.downgrades > 0:
+ details.append("%s downgrade" % self.downgrades)
+ if self.downgrades > 1:
+ details[-1] += "s"
+ if self.new > 0:
+ details.append("%s new" % self.new)
+ if self.newslot > 0:
+ details.append("%s in new slot" % self.newslot)
+ if self.newslot > 1:
+ details[-1] += "s"
+ if self.reinst > 0:
+ details.append("%s reinstall" % self.reinst)
+ if self.reinst > 1:
+ details[-1] += "s"
+ if self.binary > 0:
+ details.append("%s binary" % self.binary)
+ if self.binary > 1:
+ details[-1] = details[-1][:-1] + "ies"
+ if self.uninst > 0:
+ details.append("%s uninstall" % self.uninst)
+ if self.uninst > 1:
+ details[-1] += "s"
+ if self.interactive > 0:
+ details.append("%s %s" % (self.interactive,
+ colorize("WARN", "interactive")))
+ myoutput.append(", ".join(details))
+ if total_installs != 0:
+ myoutput.append(")")
+ myoutput.append(", Size of downloads: %s" % localized_size(self.totalsize))
+ if self.restrict_fetch:
+ myoutput.append("\nFetch Restriction: %s package" % \
+ self.restrict_fetch)
+ if self.restrict_fetch > 1:
+ myoutput.append("s")
+ if self.restrict_fetch_satisfied < self.restrict_fetch:
+ myoutput.append(bad(" (%s unsatisfied)") % \
+ (self.restrict_fetch - self.restrict_fetch_satisfied))
+ if self.blocks > 0:
+ myoutput.append("\nConflict: %s block" % \
+ self.blocks)
+ if self.blocks > 1:
+ myoutput.append("s")
+ if self.blocks_satisfied < self.blocks:
+ myoutput.append(bad(" (%s unsatisfied)") % \
+ (self.blocks - self.blocks_satisfied))
+ return "".join(myoutput)
+
+
+class _DisplayConfig(object):
+
+ def __init__(self, depgraph, mylist, favorites, verbosity):
+ frozen_config = depgraph._frozen_config
+ dynamic_config = depgraph._dynamic_config
+
+ self.mylist = mylist
+ self.favorites = InternalPackageSet(favorites, allow_repo=True)
+ self.verbosity = verbosity
+
+ if self.verbosity is None:
+ self.verbosity = ("--quiet" in frozen_config.myopts and 1 or \
+ "--verbose" in frozen_config.myopts and 3 or 2)
+
+ self.oneshot = "--oneshot" in frozen_config.myopts or \
+ "--onlydeps" in frozen_config.myopts
+ self.columns = "--columns" in frozen_config.myopts
+ self.tree_display = "--tree" in frozen_config.myopts
+ self.alphabetical = "--alphabetical" in frozen_config.myopts
+ self.quiet = "--quiet" in frozen_config.myopts
+ self.all_flags = self.verbosity == 3 or self.quiet
+ self.print_use_string = self.verbosity != 1 or "--verbose" in frozen_config.myopts
+ self.changelog = "--changelog" in frozen_config.myopts
+ self.edebug = frozen_config.edebug
+ self.unordered_display = "--unordered-display" in frozen_config.myopts
+
+ mywidth = 130
+ if "COLUMNWIDTH" in frozen_config.settings:
+ try:
+ mywidth = int(frozen_config.settings["COLUMNWIDTH"])
+ except ValueError as e:
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ writemsg("!!! Unable to parse COLUMNWIDTH='%s'\n" % \
+ frozen_config.settings["COLUMNWIDTH"], noiselevel=-1)
+ del e
+ self.columnwidth = mywidth
+
+ if "--quiet-repo-display" in frozen_config.myopts:
+ self.repo_display = _RepoDisplay(frozen_config.roots)
+ self.trees = frozen_config.trees
+ self.pkgsettings = frozen_config.pkgsettings
+ self.target_root = frozen_config.target_root
+ self.running_root = frozen_config._running_root
+ self.roots = frozen_config.roots
+
+ self.blocker_parents = dynamic_config._blocker_parents
+ self.reinstall_nodes = dynamic_config._reinstall_nodes
+ self.digraph = dynamic_config.digraph
+ self.blocker_uninstalls = dynamic_config._blocker_uninstalls
+ self.package_tracker = dynamic_config._package_tracker
+ self.set_nodes = dynamic_config._set_nodes
+
+ self.pkg_use_enabled = depgraph._pkg_use_enabled
+ self.pkg = depgraph._pkg
+
+
+def _create_use_string(conf, name, cur_iuse, iuse_forced, cur_use,
+ old_iuse, old_use,
+ is_new, feature_flags, reinst_flags):
+
+ if not conf.print_use_string:
+ return ""
+
+ enabled = []
+ if conf.alphabetical:
+ disabled = enabled
+ removed = enabled
+ else:
+ disabled = []
+ removed = []
+ cur_iuse = set(cur_iuse)
+ enabled_flags = cur_iuse.intersection(cur_use)
+ removed_iuse = set(old_iuse).difference(cur_iuse)
+ any_iuse = cur_iuse.union(old_iuse)
+ any_iuse = list(any_iuse)
+ any_iuse.sort()
+
+ for flag in any_iuse:
+ flag_str = None
+ isEnabled = False
+ reinst_flag = reinst_flags and flag in reinst_flags
+ if flag in enabled_flags:
+ isEnabled = True
+ if is_new or flag in old_use and \
+ (conf.all_flags or reinst_flag):
+ flag_str = red(flag)
+ elif flag not in old_iuse:
+ flag_str = yellow(flag) + "%*"
+ elif flag not in old_use:
+ flag_str = green(flag) + "*"
+ elif flag in removed_iuse:
+ if conf.all_flags or reinst_flag:
+ flag_str = yellow("-" + flag) + "%"
+ if flag in old_use:
+ flag_str += "*"
+ flag_str = "(" + flag_str + ")"
+ removed.append(flag_str)
+ continue
+ else:
+ if is_new or flag in old_iuse and \
+ flag not in old_use and \
+ (conf.all_flags or reinst_flag):
+ flag_str = blue("-" + flag)
+ elif flag not in old_iuse:
+ flag_str = yellow("-" + flag)
+ if flag not in iuse_forced:
+ flag_str += "%"
+ elif flag in old_use:
+ flag_str = green("-" + flag) + "*"
+ if flag_str:
+ if flag in feature_flags:
+ flag_str = "{" + flag_str + "}"
+ elif flag in iuse_forced:
+ flag_str = "(" + flag_str + ")"
+ if isEnabled:
+ enabled.append(flag_str)
+ else:
+ disabled.append(flag_str)
+
+ if conf.alphabetical:
+ ret = " ".join(enabled)
+ else:
+ ret = " ".join(enabled + disabled + removed)
+ if ret:
+ ret = '%s="%s" ' % (name, ret)
+ return ret
+
+
+def _tree_display(conf, mylist):
+
+ # If there are any Uninstall instances, add the
+ # corresponding blockers to the digraph.
+ mygraph = conf.digraph.copy()
+
+ executed_uninstalls = set(node for node in mylist \
+ if isinstance(node, Package) and node.operation == "unmerge")
+
+ for uninstall in conf.blocker_uninstalls.leaf_nodes():
+ uninstall_parents = \
+ conf.blocker_uninstalls.parent_nodes(uninstall)
+ if not uninstall_parents:
+ continue
+
+ # Remove the corresponding "nomerge" node and substitute
+ # the Uninstall node.
+ inst_pkg = conf.pkg(uninstall.cpv, "installed",
+ uninstall.root_config, installed=True)
+
+ try:
+ mygraph.remove(inst_pkg)
+ except KeyError:
+ pass
+
+ try:
+ inst_pkg_blockers = conf.blocker_parents.child_nodes(inst_pkg)
+ except KeyError:
+ inst_pkg_blockers = []
+
+ # Break the Package -> Uninstall edges.
+ mygraph.remove(uninstall)
+
+ # Resolution of a package's blockers
+ # depend on it's own uninstallation.
+ for blocker in inst_pkg_blockers:
+ mygraph.add(uninstall, blocker)
+
+ # Expand Package -> Uninstall edges into
+ # Package -> Blocker -> Uninstall edges.
+ for blocker in uninstall_parents:
+ mygraph.add(uninstall, blocker)
+ for parent in conf.blocker_parents.parent_nodes(blocker):
+ if parent != inst_pkg:
+ mygraph.add(blocker, parent)
+
+ # If the uninstall task did not need to be executed because
+ # of an upgrade, display Blocker -> Upgrade edges since the
+ # corresponding Blocker -> Uninstall edges will not be shown.
+ upgrade_node = next(conf.package_tracker.match(
+ uninstall.root, uninstall.slot_atom), None)
+
+ if upgrade_node is not None and \
+ uninstall not in executed_uninstalls:
+ for blocker in uninstall_parents:
+ mygraph.add(upgrade_node, blocker)
+
+ if conf.unordered_display:
+ display_list = _unordered_tree_display(mygraph, mylist)
+ else:
+ display_list = _ordered_tree_display(conf, mygraph, mylist)
+
+ _prune_tree_display(display_list)
+
+ return display_list
+
+
+def _unordered_tree_display(mygraph, mylist):
+ display_list = []
+ seen_nodes = set()
+
+ def print_node(node, depth):
+
+ if node in seen_nodes:
+ pass
+ else:
+ seen_nodes.add(node)
+
+ if isinstance(node, (Blocker, Package)):
+ display_list.append((node, depth, True))
+ else:
+ depth = -1
+
+ for child_node in mygraph.child_nodes(node):
+ print_node(child_node, depth + 1)
+
+ for root_node in mygraph.root_nodes():
+ print_node(root_node, 0)
+
+ return display_list
+
+
+def _ordered_tree_display(conf, mygraph, mylist):
+ depth = 0
+ shown_edges = set()
+ tree_nodes = []
+ display_list = []
+
+ for x in mylist:
+ depth = len(tree_nodes)
+ while depth and x not in \
+ mygraph.child_nodes(tree_nodes[depth-1]):
+ depth -= 1
+ if depth:
+ tree_nodes = tree_nodes[:depth]
+ tree_nodes.append(x)
+ display_list.append((x, depth, True))
+ shown_edges.add((x, tree_nodes[depth-1]))
+ else:
+ traversed_nodes = set() # prevent endless circles
+ traversed_nodes.add(x)
+ def add_parents(current_node, ordered):
+ parent_nodes = None
+ # Do not traverse to parents if this node is an
+ # an argument or a direct member of a set that has
+ # been specified as an argument (system or world).
+ if current_node not in conf.set_nodes:
+ parent_nodes = mygraph.parent_nodes(current_node)
+ if parent_nodes:
+ child_nodes = set(mygraph.child_nodes(current_node))
+ selected_parent = None
+ # First, try to avoid a direct cycle.
+ for node in parent_nodes:
+ if not isinstance(node, (Blocker, Package)):
+ continue
+ if node not in traversed_nodes and \
+ node not in child_nodes:
+ edge = (current_node, node)
+ if edge in shown_edges:
+ continue
+ selected_parent = node
+ break
+ if not selected_parent:
+ # A direct cycle is unavoidable.
+ for node in parent_nodes:
+ if not isinstance(node, (Blocker, Package)):
+ continue
+ if node not in traversed_nodes:
+ edge = (current_node, node)
+ if edge in shown_edges:
+ continue
+ selected_parent = node
+ break
+ if selected_parent:
+ shown_edges.add((current_node, selected_parent))
+ traversed_nodes.add(selected_parent)
+ add_parents(selected_parent, False)
+ display_list.append((current_node,
+ len(tree_nodes), ordered))
+ tree_nodes.append(current_node)
+ tree_nodes = []
+ add_parents(x, True)
+
+ return display_list
+
+
+def _prune_tree_display(display_list):
+ last_merge_depth = 0
+ for i in range(len(display_list) - 1, -1, -1):
+ node, depth, ordered = display_list[i]
+ if not ordered and depth == 0 and i > 0 \
+ and node == display_list[i-1][0] and \
+ display_list[i-1][1] == 0:
+ # An ordered node got a consecutive duplicate
+ # when the tree was being filled in.
+ del display_list[i]
+ continue
+ if ordered and isinstance(node, Package) \
+ and node.operation in ('merge', 'uninstall'):
+ last_merge_depth = depth
+ continue
+ if depth >= last_merge_depth or \
+ i < len(display_list) - 1 and \
+ depth >= display_list[i+1][1]:
+ del display_list[i]
+
+
+def _calc_changelog(ebuildpath,current,next):
+ if ebuildpath == None or not os.path.exists(ebuildpath):
+ return []
+ current = '-'.join(catpkgsplit(current)[1:])
+ if current.endswith('-r0'):
+ current = current[:-3]
+ next = '-'.join(catpkgsplit(next)[1:])
+ if next.endswith('-r0'):
+ next = next[:-3]
+
+ changelogdir = os.path.dirname(ebuildpath)
+ changelogs = ['ChangeLog']
+ # ChangeLog-YYYY (see bug #389611)
+ changelogs.extend(sorted((fn for fn in os.listdir(changelogdir)
+ if fn.startswith('ChangeLog-')), reverse=True))
+
+ divisions = []
+ found_current = False
+ for fn in changelogs:
+ changelogpath = os.path.join(changelogdir, fn)
+ try:
+ with io.open(_unicode_encode(changelogpath,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ changelog = f.read()
+ except EnvironmentError:
+ return []
+ for node in _find_changelog_tags(changelog):
+ if node[0] == current:
+ found_current = True
+ break
+ else:
+ divisions.append(node)
+ if found_current:
+ break
+
+ if not found_current:
+ return []
+
+ #print 'XX from',current,'to',next
+ #for div,text in divisions: print 'XX',div
+ # skip entries for all revisions above the one we are about to emerge
+ later_rev_index = None
+ for i, node in enumerate(divisions):
+ if node[0] == next:
+ if later_rev_index is not None:
+ first_node = divisions[later_rev_index]
+ # Discard the later revision and the first ChangeLog entry
+ # that follows it. We want to display all the entries after
+ # that first entry, as discussed in bug #373009.
+ trimmed_lines = []
+ iterator = iter(first_node[1])
+ for l in iterator:
+ if not l:
+ # end of the first entry that's discarded
+ break
+ first_node = (None, list(iterator))
+ divisions = [first_node] + divisions[later_rev_index+1:]
+ break
+ if node[0] is not None:
+ later_rev_index = i
+
+ output = []
+ prev_blank = False
+ prev_rev = False
+ for rev, lines in divisions:
+ if rev is not None:
+ if not (prev_blank or prev_rev):
+ output.append("\n")
+ output.append(bold('*' + rev) + '\n')
+ prev_rev = True
+ prev_blank = False
+ if lines:
+ prev_rev = False
+ if not prev_blank:
+ output.append("\n")
+ for l in lines:
+ output.append(l + "\n")
+ output.append("\n")
+ prev_blank = True
+ return output
+
+def _strip_header_comments(lines):
+ # strip leading and trailing blank or header/comment lines
+ i = 0
+ while i < len(lines) and (not lines[i] or lines[i][:1] == "#"):
+ i += 1
+ if i:
+ lines = lines[i:]
+ while lines and (not lines[-1] or lines[-1][:1] == "#"):
+ lines.pop()
+ return lines
+
+def _find_changelog_tags(changelog):
+ divs = []
+ if not changelog:
+ return divs
+ release = None
+ release_end = 0
+ for match in re.finditer(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?$',
+ changelog, re.M):
+ divs.append((release, _strip_header_comments(
+ changelog[release_end:match.start()].splitlines())))
+ release_end = match.end()
+ release = match.group(1)
+ if release.endswith('.ebuild'):
+ release = release[:-7]
+ if release.endswith('-r0'):
+ release = release[:-3]
+
+ divs.append((release,
+ _strip_header_comments(changelog[release_end:].splitlines())))
+ return divs
+
+class PkgInfo(object):
+ """Simple class to hold instance attributes for current
+ information about the pkg being printed.
+ """
+
+ __slots__ = ("attr_display", "built", "cp",
+ "ebuild_path", "fetch_symbol", "merge",
+ "oldbest", "oldbest_list", "operation", "ordered", "previous_pkg",
+ "repo_name", "repo_path_real", "slot", "sub_slot", "system", "use", "ver", "world")
+
+
+ def __init__(self):
+ self.built = False
+ self.cp = ''
+ self.ebuild_path = ''
+ self.fetch_symbol = ''
+ self.merge = ''
+ self.oldbest = ''
+ self.oldbest_list = []
+ self.operation = ''
+ self.ordered = False
+ self.previous_pkg = None
+ self.repo_path_real = ''
+ self.repo_name = ''
+ self.slot = ''
+ self.sub_slot = ''
+ self.system = False
+ self.use = ''
+ self.ver = ''
+ self.world = False
+ self.attr_display = PkgAttrDisplay()
+
+class PkgAttrDisplay(SlotObject):
+
+ __slots__ = ("downgrade", "fetch_restrict", "fetch_restrict_satisfied",
+ "force_reinstall",
+ "interactive", "mask", "new", "new_slot", "new_version", "replace")
+
+ def __str__(self):
+ output = []
+
+ if self.interactive:
+ output.append(colorize("WARN", "I"))
+ else:
+ output.append(" ")
+
+ if self.new or self.force_reinstall:
+ if self.force_reinstall:
+ output.append(red("r"))
+ else:
+ output.append(green("N"))
+ else:
+ output.append(" ")
+
+ if self.new_slot or self.replace:
+ if self.replace:
+ output.append(yellow("R"))
+ else:
+ output.append(green("S"))
+ else:
+ output.append(" ")
+
+ if self.fetch_restrict or self.fetch_restrict_satisfied:
+ if self.fetch_restrict_satisfied:
+ output.append(green("f"))
+ else:
+ output.append(red("F"))
+ else:
+ output.append(" ")
+
+ if self.new_version:
+ output.append(turquoise("U"))
+ else:
+ output.append(" ")
+
+ if self.downgrade:
+ output.append(blue("D"))
+ else:
+ output.append(" ")
+
+ if self.mask is not None:
+ output.append(self.mask)
+
+ return "".join(output)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
diff --git a/usr/lib/portage/pym/_emerge/resolver/package_tracker.py b/usr/lib/portage/pym/_emerge/resolver/package_tracker.py
new file mode 100644
index 0000000..406d5ce
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/resolver/package_tracker.py
@@ -0,0 +1,301 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import collections
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dep:Atom,match_from_list',
+ 'portage.util:cmp_sort_key',
+ 'portage.versions:vercmp',
+)
+
+_PackageConflict = collections.namedtuple("_PackageConflict", ["root", "pkgs", "atom", "description"])
+
+class PackageConflict(_PackageConflict):
+ """
+ Class to track the reason for a conflict and the conflicting packages.
+ """
+ def __iter__(self):
+ return iter(self.pkgs)
+
+ def __contains__(self, pkg):
+ return pkg in self.pkgs
+
+ def __len__(self):
+ return len(self.pkgs)
+
+
+class PackageTracker(object):
+ """
+ This class tracks packages which are currently
+ installed and packages which have been pulled into
+ the dependency graph.
+
+ It automatically tracks conflicts between packages.
+
+ Possible conflicts:
+ 1) Packages that share the same SLOT.
+ 2) Packages with the same cpv.
+ Not yet implemented:
+ 3) Packages that block each other.
+ """
+
+ def __init__(self):
+ # Mapping from package keys to set of packages.
+ self._cp_pkg_map = collections.defaultdict(list)
+ self._cp_vdb_pkg_map = collections.defaultdict(list)
+ # List of package keys that may contain conflicts.
+ # The insetation order must be preserved.
+ self._multi_pkgs = []
+
+ # Cache for result of conflicts().
+ self._conflicts_cache = None
+
+ # Records for each pulled package which installed package
+ # are replaced.
+ self._replacing = collections.defaultdict(list)
+ # Records which pulled packages replace this package.
+ self._replaced_by = collections.defaultdict(list)
+
+ self._match_cache = collections.defaultdict(dict)
+
+ def add_pkg(self, pkg):
+ """
+ Add a new package to the tracker. Records conflicts as necessary.
+ """
+ cp_key = pkg.root, pkg.cp
+
+ if any(other is pkg for other in self._cp_pkg_map[cp_key]):
+ return
+
+ self._cp_pkg_map[cp_key].append(pkg)
+
+ if len(self._cp_pkg_map[cp_key]) > 1:
+ self._conflicts_cache = None
+ if len(self._cp_pkg_map[cp_key]) == 2:
+ self._multi_pkgs.append(cp_key)
+
+ self._replacing[pkg] = []
+ for installed in self._cp_vdb_pkg_map.get(cp_key, []):
+ if installed.slot_atom == pkg.slot_atom or \
+ installed.cpv == pkg.cpv:
+ self._replacing[pkg].append(installed)
+ self._replaced_by[installed].append(pkg)
+
+ self._match_cache.pop(cp_key, None)
+
+ def add_installed_pkg(self, installed):
+ """
+ Add an installed package during vdb load. These packages
+ are not returned by matched_pull as long as add_pkg hasn't
+ been called with them. They are only returned by match_final.
+ """
+ cp_key = installed.root, installed.cp
+ if any(other is installed for other in self._cp_vdb_pkg_map[cp_key]):
+ return
+
+ self._cp_vdb_pkg_map[cp_key].append(installed)
+
+ for pkg in self._cp_pkg_map.get(cp_key, []):
+ if installed.slot_atom == pkg.slot_atom or \
+ installed.cpv == pkg.cpv:
+ self._replacing[pkg].append(installed)
+ self._replaced_by[installed].append(pkg)
+
+ self._match_cache.pop(cp_key, None)
+
+ def remove_pkg(self, pkg):
+ """
+ Removes the package from the tracker.
+ Raises KeyError if it isn't present.
+ """
+ cp_key = pkg.root, pkg.cp
+ try:
+ self._cp_pkg_map.get(cp_key, []).remove(pkg)
+ except ValueError:
+ raise KeyError(pkg)
+
+ if self._cp_pkg_map[cp_key]:
+ self._conflicts_cache = None
+
+ if not self._cp_pkg_map[cp_key]:
+ del self._cp_pkg_map[cp_key]
+ elif len(self._cp_pkg_map[cp_key]) == 1:
+ self._multi_pkgs = [other_cp_key for other_cp_key in self._multi_pkgs \
+ if other_cp_key != cp_key]
+
+ for installed in self._replacing[pkg]:
+ self._replaced_by[installed].remove(pkg)
+ if not self._replaced_by[installed]:
+ del self._replaced_by[installed]
+ del self._replacing[pkg]
+
+ self._match_cache.pop(cp_key, None)
+
+ def discard_pkg(self, pkg):
+ """
+ Removes the package from the tracker.
+ Does not raises KeyError if it is not present.
+ """
+ try:
+ self.remove_pkg(pkg)
+ except KeyError:
+ pass
+
+ def match(self, root, atom, installed=True):
+ """
+ Iterates over the packages matching 'atom'.
+ If 'installed' is True, installed non-replaced
+ packages may also be returned.
+ """
+ cp_key = root, atom.cp
+ cache_key = root, atom, atom.unevaluated_atom, installed
+ try:
+ return iter(self._match_cache.get(cp_key, {})[cache_key])
+ except KeyError:
+ pass
+
+ candidates = self._cp_pkg_map.get(cp_key, [])[:]
+
+ if installed:
+ for installed in self._cp_vdb_pkg_map.get(cp_key, []):
+ if installed not in self._replaced_by:
+ candidates.append(installed)
+
+ ret = match_from_list(atom, candidates)
+ ret.sort(key=cmp_sort_key(lambda x, y: vercmp(x.version, y.version)))
+ self._match_cache[cp_key][cache_key] = ret
+
+ return iter(ret)
+
+ def conflicts(self):
+ """
+ Iterates over the curently existing conflicts.
+ """
+ if self._conflicts_cache is None:
+ self._conflicts_cache = []
+
+ for cp_key in self._multi_pkgs:
+
+ # Categorize packages according to cpv and slot.
+ slot_map = collections.defaultdict(list)
+ cpv_map = collections.defaultdict(list)
+ for pkg in self._cp_pkg_map[cp_key]:
+ slot_key = pkg.root, pkg.slot_atom
+ cpv_key = pkg.root, pkg.cpv
+ slot_map[slot_key].append(pkg)
+ cpv_map[cpv_key].append(pkg)
+
+ # Slot conflicts.
+ for slot_key in slot_map:
+ slot_pkgs = slot_map[slot_key]
+ if len(slot_pkgs) > 1:
+ self._conflicts_cache.append(PackageConflict(
+ description = "slot conflict",
+ root = slot_key[0],
+ atom = slot_key[1],
+ pkgs = tuple(slot_pkgs),
+ ))
+
+ # CPV conflicts.
+ for cpv_key in cpv_map:
+ cpv_pkgs = cpv_map[cpv_key]
+ if len(cpv_pkgs) > 1:
+ # Make sure this cpv conflict is not a slot conflict at the same time.
+ # Ignore it if it is.
+ slots = set(pkg.slot for pkg in cpv_pkgs)
+ if len(slots) > 1:
+ self._conflicts_cache.append(PackageConflict(
+ description = "cpv conflict",
+ root = cpv_key[0],
+ atom = cpv_key[1],
+ pkgs = tuple(cpv_pkgs),
+ ))
+
+ return iter(self._conflicts_cache)
+
+ def slot_conflicts(self):
+ """
+ Iterates over present slot conflicts.
+ This is only intended for consumers that haven't been
+ updated to deal with other kinds of conflicts.
+ This funcion should be removed once all consumers are updated.
+ """
+ return (conflict for conflict in self.conflicts() \
+ if conflict.description == "slot conflict")
+
+ def all_pkgs(self, root):
+ """
+ Iterates over all packages for the given root
+ present in the tracker, including the installed
+ packages.
+ """
+ for cp_key in self._cp_pkg_map:
+ if cp_key[0] == root:
+ for pkg in self._cp_pkg_map[cp_key]:
+ yield pkg
+
+ for cp_key in self._cp_vdb_pkg_map:
+ if cp_key[0] == root:
+ for installed in self._cp_vdb_pkg_map[cp_key]:
+ if installed not in self._replaced_by:
+ yield installed
+
+ def contains(self, pkg, installed=True):
+ """
+ Checks if the package is in the tracker.
+ If 'installed' is True, returns True for
+ non-replaced installed packages.
+ """
+ cp_key = pkg.root, pkg.cp
+ for other in self._cp_pkg_map.get(cp_key, []):
+ if other is pkg:
+ return True
+
+ if installed:
+ for installed in self._cp_vdb_pkg_map.get(cp_key, []):
+ if installed is pkg and \
+ installed not in self._replaced_by:
+ return True
+
+ return False
+
+ def __contains__(self, pkg):
+ """
+ Checks if the package is in the tracker.
+ Returns True for non-replaced installed packages.
+ """
+ return self.contains(pkg, installed=True)
+
+
+class PackageTrackerDbapiWrapper(object):
+ """
+ A wrpper class that provides parts of the legacy
+ dbapi interface. Remove it once all consumers have
+ died.
+ """
+ def __init__(self, root, package_tracker):
+ self._root = root
+ self._package_tracker = package_tracker
+
+ def cpv_inject(self, pkg):
+ self._package_tracker.add_pkg(pkg)
+
+ def match_pkgs(self, atom):
+ if not isinstance(atom, Atom):
+ atom = Atom(atom)
+ ret = sorted(self._package_tracker.match(self._root, atom),
+ key=cmp_sort_key(lambda x, y: vercmp(x.version, y.version)))
+ return ret
+
+ def __iter__(self):
+ return self._package_tracker.all_pkgs(self._root)
+
+ def match(self, atom, use_cache=None):
+ return self.match_pkgs(atom)
+
+ def cp_list(self, cp):
+ return self.match_pkgs(cp)
diff --git a/usr/lib/portage/pym/_emerge/resolver/slot_collision.py b/usr/lib/portage/pym/_emerge/resolver/slot_collision.py
new file mode 100644
index 0000000..baeab08
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/resolver/slot_collision.py
@@ -0,0 +1,1122 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function, unicode_literals
+
+import sys
+
+from portage import _encodings, _unicode_encode
+from _emerge.AtomArg import AtomArg
+from _emerge.Package import Package
+from _emerge.PackageArg import PackageArg
+from portage.dep import check_required_use
+from portage.output import colorize
+from portage._sets.base import InternalPackageSet
+from portage.util import writemsg
+from portage.versions import cpv_getversion, vercmp
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+class slot_conflict_handler(object):
+ """This class keeps track of all slot conflicts and provides
+ an interface to get possible solutions.
+
+ How it works:
+ If two packages have been pulled into a slot, one needs to
+ go away. This class focuses on cases where this can be achieved
+ with a change in USE settings.
+
+ 1) Find out if what causes a given slot conflict. There are
+ three possibilities:
+
+ a) One parent needs foo-1:0 and another one needs foo-2:0,
+ nothing we can do about this. This is called a 'version
+ based conflict'.
+
+ b) All parents of one of the conflict packages could use
+ another conflict package. This is called an 'unspecific
+ conflict'. This should be caught by the backtracking logic.
+ Ask the user to enable -uN (if not already enabled). If -uN is
+ enabled, this case is treated in the same way as c).
+
+ c) Neither a 'version based conflict' nor an 'unspecific
+ conflict'. Ignoring use deps would result result in an
+ 'unspecific conflict'. This is called a 'specific conflict'.
+ This is the only conflict we try to find suggestions for.
+
+ 2) Computing suggestions.
+
+ Def.: "configuration": A list of packages, containing exactly one
+ package from each slot conflict.
+
+ We try to find USE changes such that all parents of conflict packages
+ can work with a package in the configuration we're looking at. This
+ is done for all possible configurations, except if the 'all-ebuild'
+ configuration has a suggestion. In this case we immediately abort the
+ search.
+ For the current configuration, all use flags that are part of violated
+ use deps are computed. This is done for every slot conflict on its own.
+
+ Def.: "solution (candidate)": An assignment of "enabled" / "disabled"
+ values for the use flags that are part of violated use deps.
+
+ Now all involved use flags for the current configuration are known. For
+ now they have an undetermined value. Fix their value in the
+ following cases:
+ * The use dep in the parent atom is unconditional.
+ * The parent package is 'installed'.
+ * The conflict package is 'installed'.
+
+ USE of 'installed' packages can't be changed. This always requires an
+ non-installed package.
+
+ During this procedure, contradictions may occur. In this case the
+ configuration has no solution.
+
+ Now generate all possible solution candidates with fixed values. Check
+ if they don't introduce new conflicts.
+
+ We have found a valid assignment for all involved use flags. Compute
+ the needed USE changes and prepare the message for the user.
+ """
+
+ _check_configuration_max = 1024
+
+ def __init__(self, depgraph):
+ self.depgraph = depgraph
+ self.myopts = depgraph._frozen_config.myopts
+ self.debug = "--debug" in self.myopts
+ if self.debug:
+ writemsg("Starting slot conflict handler\n", noiselevel=-1)
+
+ # List of tuples, where each tuple represents a slot conflict.
+ self.all_conflicts = []
+ for conflict in depgraph._dynamic_config._package_tracker.slot_conflicts():
+ self.all_conflicts.append((conflict.root, conflict.atom, conflict.pkgs))
+
+ #A dict mapping packages to pairs of parent package
+ #and parent atom
+ self.all_parents = depgraph._dynamic_config._parent_atoms
+
+ #set containing all nodes that are part of a slot conflict
+ conflict_nodes = set()
+
+ #a list containing list of packages that form a slot conflict
+ conflict_pkgs = []
+
+ #a list containing sets of (parent, atom) pairs that have pulled packages
+ #into the same slot
+ all_conflict_atoms_by_slotatom = []
+
+ #fill conflict_pkgs, all_conflict_atoms_by_slotatom
+ for root, atom, pkgs in self.all_conflicts:
+ conflict_pkgs.append(list(pkgs))
+ all_conflict_atoms_by_slotatom.append(set())
+
+ for pkg in pkgs:
+ conflict_nodes.add(pkg)
+ for ppkg, atom in self.all_parents.get(pkg):
+ all_conflict_atoms_by_slotatom[-1].add((ppkg, atom))
+
+ #Variable that holds the non-explanation part of the message.
+ self.conflict_msg = []
+ #If any conflict package was pulled in only by unspecific atoms, then
+ #the user forgot to enable --newuse and/or --update.
+ self.conflict_is_unspecific = False
+
+ #Indicate if the conflict is caused by incompatible version requirements
+ #cat/pkg-2 pulled in, but a parent requires <cat/pkg-2
+ self.is_a_version_conflict = False
+
+ self._prepare_conflict_msg_and_check_for_specificity()
+
+ #a list of dicts that hold the needed USE values to solve all conflicts
+ self.solutions = []
+
+ #a list of dicts that hold the needed USE changes to solve all conflicts
+ self.changes = []
+
+ #configuration = a list of packages with exactly one package from every
+ #single slot conflict
+ config_gen = _configuration_generator(conflict_pkgs)
+ first_config = True
+
+ #go through all configurations and collect solutions
+ while(True):
+ config = config_gen.get_configuration()
+ if not config:
+ break
+
+ if self.debug:
+ writemsg("\nNew configuration:\n", noiselevel=-1)
+ for pkg in config:
+ writemsg(" %s\n" % (pkg,), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+ new_solutions = self._check_configuration(config, all_conflict_atoms_by_slotatom, conflict_nodes)
+
+ if new_solutions:
+ self.solutions.extend(new_solutions)
+
+ if first_config:
+ #If the "all ebuild"-config gives a solution, use it.
+ #Otherwise enumerate all other solutions.
+ if self.debug:
+ writemsg("All-ebuild configuration has a solution. Aborting search.\n", noiselevel=-1)
+ break
+ first_config = False
+
+ if len(conflict_pkgs) > 4:
+ # The number of configurations to check grows exponentially in the number of conflict_pkgs.
+ # To prevent excessive running times, only check the "all-ebuild" configuration,
+ # if the number of conflict packages is too large.
+ if self.debug:
+ writemsg("\nAborting search due to excessive number of configurations.\n", noiselevel=-1)
+ break
+
+ for solution in self.solutions:
+ self._add_change(self._get_change(solution))
+
+
+ def get_conflict(self):
+ return "".join(self.conflict_msg)
+
+ def _is_subset(self, change1, change2):
+ """
+ Checks if a set of changes 'change1' is a subset of the changes 'change2'.
+ """
+ #All pkgs of change1 have to be in change2.
+ #For every package in change1, the changes have to be a subset of
+ #the corresponding changes in change2.
+ for pkg in change1:
+ if pkg not in change2:
+ return False
+
+ for flag in change1[pkg]:
+ if flag not in change2[pkg]:
+ return False
+ if change1[pkg][flag] != change2[pkg][flag]:
+ return False
+ return True
+
+ def _add_change(self, new_change):
+ """
+ Make sure to keep only minimal changes. If "+foo", does the job, discard "+foo -bar".
+ """
+ changes = self.changes
+ #Make sure there is no other solution that is a subset of the new solution.
+ ignore = False
+ to_be_removed = []
+ for change in changes:
+ if self._is_subset(change, new_change):
+ ignore = True
+ break
+ elif self._is_subset(new_change, change):
+ to_be_removed.append(change)
+
+ if not ignore:
+ #Discard all existing change that are a superset of the new change.
+ for obsolete_change in to_be_removed:
+ changes.remove(obsolete_change)
+ changes.append(new_change)
+
+ def _get_change(self, solution):
+ _pkg_use_enabled = self.depgraph._pkg_use_enabled
+ new_change = {}
+ for pkg in solution:
+ for flag, state in solution[pkg].items():
+ real_flag = pkg.iuse.get_real_flag(flag)
+ if real_flag is None:
+ # Triggered by use-dep defaults.
+ continue
+ if state == "enabled" and flag not in _pkg_use_enabled(pkg):
+ new_change.setdefault(pkg, {})[real_flag] = True
+ elif state == "disabled" and flag in _pkg_use_enabled(pkg):
+ new_change.setdefault(pkg, {})[real_flag] = False
+ return new_change
+
+ def _prepare_conflict_msg_and_check_for_specificity(self):
+ """
+ Print all slot conflicts in a human readable way.
+ """
+ _pkg_use_enabled = self.depgraph._pkg_use_enabled
+ verboseconflicts = "--verbose-conflicts" in self.myopts
+ msg = self.conflict_msg
+ indent = " "
+ msg.append("\n!!! Multiple package instances within a single " + \
+ "package slot have been pulled\n")
+ msg.append("!!! into the dependency graph, resulting" + \
+ " in a slot conflict:\n\n")
+
+ for root, slot_atom, pkgs in self.all_conflicts:
+ msg.append("%s" % (slot_atom,))
+ if root != self.depgraph._frozen_config._running_root.root:
+ msg.append(" for %s" % (root,))
+ msg.append("\n\n")
+
+ for pkg in pkgs:
+ msg.append(indent)
+ msg.append("%s" % (pkg,))
+ parent_atoms = self.all_parents.get(pkg)
+ if parent_atoms:
+ #Create a list of collision reasons and map them to sets
+ #of atoms.
+ #Possible reasons:
+ # ("version", "ge") for operator >=, >
+ # ("version", "eq") for operator =, ~
+ # ("version", "le") for operator <=, <
+ # ("use", "<some use flag>") for unmet use conditionals
+ collision_reasons = {}
+ num_all_specific_atoms = 0
+
+ for ppkg, atom in parent_atoms:
+ atom_set = InternalPackageSet(initial_atoms=(atom,))
+ atom_without_use_set = InternalPackageSet(initial_atoms=(atom.without_use,))
+ atom_without_use_and_slot_set = InternalPackageSet(initial_atoms=(
+ atom.without_use.without_slot,))
+
+ for other_pkg in pkgs:
+ if other_pkg == pkg:
+ continue
+
+ if not atom_without_use_and_slot_set.findAtomForPackage(other_pkg, \
+ modified_use=_pkg_use_enabled(other_pkg)):
+ if atom.operator is not None:
+ # The version range does not match.
+ sub_type = None
+ if atom.operator in (">=", ">"):
+ sub_type = "ge"
+ elif atom.operator in ("=", "~"):
+ sub_type = "eq"
+ elif atom.operator in ("<=", "<"):
+ sub_type = "le"
+
+ key = ("version", sub_type)
+ atoms = collision_reasons.get(key, set())
+ atoms.add((ppkg, atom, other_pkg))
+ num_all_specific_atoms += 1
+ collision_reasons[key] = atoms
+
+ elif not atom_without_use_set.findAtomForPackage(other_pkg, \
+ modified_use=_pkg_use_enabled(other_pkg)):
+ # The slot and/or sub_slot does not match.
+ key = ("slot", (atom.slot, atom.sub_slot, atom.slot_operator))
+ atoms = collision_reasons.get(key, set())
+ atoms.add((ppkg, atom, other_pkg))
+ num_all_specific_atoms += 1
+ collision_reasons[key] = atoms
+
+ elif not atom_set.findAtomForPackage(other_pkg, \
+ modified_use=_pkg_use_enabled(other_pkg)):
+ missing_iuse = other_pkg.iuse.get_missing_iuse(
+ atom.unevaluated_atom.use.required)
+ if missing_iuse:
+ for flag in missing_iuse:
+ atoms = collision_reasons.get(("use", flag), set())
+ atoms.add((ppkg, atom, other_pkg))
+ collision_reasons[("use", flag)] = atoms
+ num_all_specific_atoms += 1
+ else:
+ #Use conditionals not met.
+ violated_atom = atom.violated_conditionals(_pkg_use_enabled(other_pkg), \
+ other_pkg.iuse.is_valid_flag)
+ if violated_atom.use is None:
+ # Something like bug #453400 caused the
+ # above findAtomForPackage call to
+ # return None unexpectedly.
+ msg = ("\n\n!!! BUG: Detected "
+ "USE dep match inconsistency:\n"
+ "\tppkg: %s\n"
+ "\tviolated_atom: %s\n"
+ "\tatom: %s unevaluated: %s\n"
+ "\tother_pkg: %s IUSE: %s USE: %s\n" %
+ (ppkg,
+ violated_atom,
+ atom,
+ atom.unevaluated_atom,
+ other_pkg,
+ sorted(other_pkg.iuse.all),
+ sorted(_pkg_use_enabled(other_pkg))))
+ writemsg(msg, noiselevel=-2)
+ raise AssertionError(
+ 'BUG: USE dep match inconsistency')
+ for flag in violated_atom.use.enabled.union(violated_atom.use.disabled):
+ atoms = collision_reasons.get(("use", flag), set())
+ atoms.add((ppkg, atom, other_pkg))
+ collision_reasons[("use", flag)] = atoms
+ num_all_specific_atoms += 1
+ elif isinstance(ppkg, AtomArg) and other_pkg.installed:
+ parent_atoms = collision_reasons.get(("AtomArg", None), set())
+ parent_atoms.add((ppkg, atom))
+ collision_reasons[("AtomArg", None)] = parent_atoms
+ num_all_specific_atoms += 1
+
+ msg.append(" pulled in by\n")
+
+ selected_for_display = set()
+ unconditional_use_deps = set()
+
+ for (type, sub_type), parents in collision_reasons.items():
+ #From each (type, sub_type) pair select at least one atom.
+ #Try to select as few atoms as possible
+
+ if type == "version":
+ #Find the atom with version that is as far away as possible.
+ best_matches = {}
+ for ppkg, atom, other_pkg in parents:
+ if atom.cp in best_matches:
+ cmp = vercmp( \
+ cpv_getversion(atom.cpv), \
+ cpv_getversion(best_matches[atom.cp][1].cpv))
+
+ if (sub_type == "ge" and cmp > 0) \
+ or (sub_type == "le" and cmp < 0) \
+ or (sub_type == "eq" and cmp > 0):
+ best_matches[atom.cp] = (ppkg, atom)
+ else:
+ best_matches[atom.cp] = (ppkg, atom)
+ if verboseconflicts:
+ selected_for_display.add((ppkg, atom))
+ if not verboseconflicts:
+ selected_for_display.update(
+ best_matches.values())
+ elif type == "slot":
+ for ppkg, atom, other_pkg in parents:
+ selected_for_display.add((ppkg, atom))
+ if not verboseconflicts:
+ break
+ elif type == "use":
+ #Prefer atoms with unconditional use deps over, because it's
+ #not possible to change them on the parent, which means there
+ #are fewer possible solutions.
+ use = sub_type
+ for ppkg, atom, other_pkg in parents:
+ missing_iuse = other_pkg.iuse.get_missing_iuse(
+ atom.unevaluated_atom.use.required)
+ if missing_iuse:
+ unconditional_use_deps.add((ppkg, atom))
+ else:
+ parent_use = None
+ if isinstance(ppkg, Package):
+ parent_use = _pkg_use_enabled(ppkg)
+ violated_atom = atom.unevaluated_atom.violated_conditionals(
+ _pkg_use_enabled(other_pkg),
+ other_pkg.iuse.is_valid_flag,
+ parent_use=parent_use)
+ # It's possible for autounmask to change
+ # parent_use such that the unevaluated form
+ # of the atom now matches, even though the
+ # earlier evaluated form (from before
+ # autounmask changed parent_use) does not.
+ # In this case (see bug #374423), it's
+ # expected that violated_atom.use is None.
+ # Since the atom now matches, we don't want
+ # to display it in the slot conflict
+ # message, so we simply ignore it and rely
+ # on the autounmask display to communicate
+ # the necessary USE change to the user.
+ if violated_atom.use is None:
+ continue
+ if use in violated_atom.use.enabled or \
+ use in violated_atom.use.disabled:
+ unconditional_use_deps.add((ppkg, atom))
+ # When USE flags are removed, it can be
+ # essential to see all broken reverse
+ # dependencies here, so don't omit any.
+ # If the list is long, people can simply
+ # use a pager.
+ selected_for_display.add((ppkg, atom))
+ elif type == "AtomArg":
+ for ppkg, atom in parents:
+ selected_for_display.add((ppkg, atom))
+
+ def highlight_violations(atom, version, use, slot_violated):
+ """Colorize parts of an atom"""
+ atom_str = "%s" % (atom,)
+ colored_idx = set()
+ if version:
+ op = atom.operator
+ ver = None
+ if atom.cp != atom.cpv:
+ ver = cpv_getversion(atom.cpv)
+ slot = atom.slot
+ sub_slot = atom.sub_slot
+ slot_operator = atom.slot_operator
+
+ if op == "=*":
+ op = "="
+ ver += "*"
+
+ slot_str = ""
+ if slot:
+ slot_str = ":" + slot
+ if sub_slot:
+ slot_str += "/" + sub_slot
+ if slot_operator:
+ slot_str += slot_operator
+
+ # Compute color_idx before adding the color codes
+ # as these change the indices of the letters.
+ if op is not None:
+ colored_idx.update(range(len(op)))
+
+ if ver is not None:
+ start = atom_str.rfind(ver)
+ end = start + len(ver)
+ colored_idx.update(range(start, end))
+
+ if slot_str:
+ ii = atom_str.find(slot_str)
+ colored_idx.update(range(ii, ii + len(slot_str)))
+
+
+ if op is not None:
+ atom_str = atom_str.replace(op, colorize("BAD", op), 1)
+
+ if ver is not None:
+ start = atom_str.rfind(ver)
+ end = start + len(ver)
+ atom_str = atom_str[:start] + \
+ colorize("BAD", ver) + \
+ atom_str[end:]
+
+ if slot_str:
+ atom_str = atom_str.replace(slot_str, colorize("BAD", slot_str), 1)
+
+ elif slot_violated:
+ slot = atom.slot
+ sub_slot = atom.sub_slot
+ slot_operator = atom.slot_operator
+
+ slot_str = ""
+ if slot:
+ slot_str = ":" + slot
+ if sub_slot:
+ slot_str += "/" + sub_slot
+ if slot_operator:
+ slot_str += slot_operator
+
+ if slot_str:
+ ii = atom_str.find(slot_str)
+ colored_idx.update(range(ii, ii + len(slot_str)))
+ atom_str = atom_str.replace(slot_str, colorize("BAD", slot_str), 1)
+
+ if use and atom.use.tokens:
+ use_part_start = atom_str.find("[")
+ use_part_end = atom_str.find("]")
+
+ new_tokens = []
+ # Compute start index in non-colored atom.
+ ii = str(atom).find("[") + 1
+ for token in atom.use.tokens:
+ if token.lstrip("-!").rstrip("=?") in use:
+ new_tokens.append(colorize("BAD", token))
+ colored_idx.update(range(ii, ii + len(token)))
+ else:
+ new_tokens.append(token)
+ ii += 1 + len(token)
+
+ atom_str = atom_str[:use_part_start] \
+ + "[%s]" % (",".join(new_tokens),) + \
+ atom_str[use_part_end+1:]
+
+ return atom_str, colored_idx
+
+ # Show unconditional use deps first, since those
+ # are more problematic than the conditional kind.
+ ordered_list = list(unconditional_use_deps)
+ if len(selected_for_display) > len(unconditional_use_deps):
+ for parent_atom in selected_for_display:
+ if parent_atom not in unconditional_use_deps:
+ ordered_list.append(parent_atom)
+ for parent_atom in ordered_list:
+ parent, atom = parent_atom
+ if isinstance(parent, PackageArg):
+ # For PackageArg it's
+ # redundant to display the atom attribute.
+ msg.append("%s\n" % (parent,))
+ elif isinstance(parent, AtomArg):
+ msg.append(2*indent)
+ msg.append("%s (Argument)\n" % (atom,))
+ else:
+ # Display the specific atom from SetArg or
+ # Package types.
+ version_violated = False
+ slot_violated = False
+ use = []
+ for (type, sub_type), parents in collision_reasons.items():
+ for x in parents:
+ if parent == x[0] and atom == x[1]:
+ if type == "version":
+ version_violated = True
+ elif type == "slot":
+ slot_violated = True
+ elif type == "use":
+ use.append(sub_type)
+ break
+
+ atom_str, colored_idx = highlight_violations(atom.unevaluated_atom,
+ version_violated, use, slot_violated)
+
+ if version_violated or slot_violated:
+ self.is_a_version_conflict = True
+
+ cur_line = "%s required by %s\n" % (atom_str, parent)
+ marker_line = ""
+ for ii in range(len(cur_line)):
+ if ii in colored_idx:
+ marker_line += "^"
+ else:
+ marker_line += " "
+ marker_line += "\n"
+ msg.append(2*indent)
+ msg.append(cur_line)
+ msg.append(2*indent)
+ msg.append(marker_line)
+
+ if not selected_for_display:
+ msg.append(2*indent)
+ msg.append("(no parents that aren't satisfied by other packages in this slot)\n")
+ self.conflict_is_unspecific = True
+
+ omitted_parents = num_all_specific_atoms - len(selected_for_display)
+ if omitted_parents:
+ msg.append(2*indent)
+ if len(selected_for_display) > 1:
+ msg.append("(and %d more with the same problems)\n" % omitted_parents)
+ else:
+ msg.append("(and %d more with the same problem)\n" % omitted_parents)
+ else:
+ msg.append(" (no parents)\n")
+ msg.append("\n")
+ msg.append("\n")
+
+ def get_explanation(self):
+ msg = ""
+
+ if self.is_a_version_conflict:
+ return None
+
+ if self.conflict_is_unspecific and \
+ not ("--newuse" in self.myopts and "--update" in self.myopts):
+ msg += "!!! Enabling --newuse and --update might solve this conflict.\n"
+ msg += "!!! If not, it might help emerge to give a more specific suggestion.\n\n"
+ return msg
+
+ solutions = self.solutions
+ if not solutions:
+ return None
+
+ if len(solutions)==1:
+ if len(self.all_conflicts) == 1:
+ msg += "It might be possible to solve this slot collision\n"
+ else:
+ msg += "It might be possible to solve these slot collisions\n"
+ msg += "by applying all of the following changes:\n"
+ else:
+ if len(self.all_conflicts) == 1:
+ msg += "It might be possible to solve this slot collision\n"
+ else:
+ msg += "It might be possible to solve these slot collisions\n"
+ msg += "by applying one of the following solutions:\n"
+
+ def print_change(change, indent=""):
+ mymsg = ""
+ for pkg in change:
+ changes = []
+ for flag, state in change[pkg].items():
+ if state:
+ changes.append(colorize("red", "+" + flag))
+ else:
+ changes.append(colorize("blue", "-" + flag))
+ mymsg += indent + "- " + pkg.cpv + " (Change USE: %s" % " ".join(changes) + ")\n"
+ mymsg += "\n"
+ return mymsg
+
+
+ if len(self.changes) == 1:
+ msg += print_change(self.changes[0], " ")
+ else:
+ for change in self.changes:
+ msg += " Solution: Apply all of:\n"
+ msg += print_change(change, " ")
+
+ return msg
+
+ def _check_configuration(self, config, all_conflict_atoms_by_slotatom, conflict_nodes):
+ """
+ Given a configuartion, required use changes are computed and checked to
+ make sure that no new conflict is introduced. Returns a solution or None.
+ """
+ _pkg_use_enabled = self.depgraph._pkg_use_enabled
+ #An installed package can only be part of a valid configuration if it has no
+ #pending use changed. Otherwise the ebuild will be pulled in again.
+ for pkg in config:
+ if not pkg.installed:
+ continue
+
+ for root, atom, pkgs in self.all_conflicts:
+ if pkg not in pkgs:
+ continue
+ for other_pkg in pkgs:
+ if other_pkg == pkg:
+ continue
+ if pkg.iuse.all.symmetric_difference(other_pkg.iuse.all) \
+ or _pkg_use_enabled(pkg).symmetric_difference(_pkg_use_enabled(other_pkg)):
+ if self.debug:
+ writemsg(("%s has pending USE changes. "
+ "Rejecting configuration.\n") % (pkg,),
+ noiselevel=-1)
+ return False
+
+ #A list of dicts. Keeps one dict per slot conflict. [ { flag1: "enabled" }, { flag2: "disabled" } ]
+ all_involved_flags = []
+
+ #Go through all slot conflicts
+ for id, pkg in enumerate(config):
+ involved_flags = {}
+ for ppkg, atom in all_conflict_atoms_by_slotatom[id]:
+ if ppkg in conflict_nodes and not ppkg in config:
+ #The parent is part of a slot conflict itself and is
+ #not part of the current config.
+ continue
+
+ i = InternalPackageSet(initial_atoms=(atom,))
+ if i.findAtomForPackage(pkg, modified_use=_pkg_use_enabled(pkg)):
+ continue
+
+ i = InternalPackageSet(initial_atoms=(atom.without_use,))
+ if not i.findAtomForPackage(pkg, modified_use=_pkg_use_enabled(pkg)):
+ #Version range does not match.
+ if self.debug:
+ writemsg(("%s does not satify all version "
+ "requirements. Rejecting configuration.\n") %
+ (pkg,), noiselevel=-1)
+ return False
+
+ if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required):
+ #Missing IUSE.
+ #FIXME: This needs to support use dep defaults.
+ if self.debug:
+ writemsg(("%s misses needed flags from IUSE."
+ " Rejecting configuration.\n") % (pkg,),
+ noiselevel=-1)
+ return False
+
+ if not isinstance(ppkg, Package) or ppkg.installed:
+ #We cannot assume that it's possible to reinstall the package. Do not
+ #check if some of its atom has use.conditional
+ violated_atom = atom.violated_conditionals(_pkg_use_enabled(pkg), \
+ pkg.iuse.is_valid_flag)
+ else:
+ violated_atom = atom.unevaluated_atom.violated_conditionals(_pkg_use_enabled(pkg), \
+ pkg.iuse.is_valid_flag, parent_use=_pkg_use_enabled(ppkg))
+ if violated_atom.use is None:
+ # It's possible for autounmask to change
+ # parent_use such that the unevaluated form
+ # of the atom now matches, even though the
+ # earlier evaluated form (from before
+ # autounmask changed parent_use) does not.
+ # In this case (see bug #374423), it's
+ # expected that violated_atom.use is None.
+ continue
+
+ if pkg.installed and (violated_atom.use.enabled or violated_atom.use.disabled):
+ #We can't change USE of an installed package (only of an ebuild, but that is already
+ #part of the conflict, isn't it?
+ if self.debug:
+ writemsg(("%s: installed package would need USE"
+ " changes. Rejecting configuration.\n") % (pkg,),
+ noiselevel=-1)
+ return False
+
+ #Compute the required USE changes. A flag can be forced to "enabled" or "disabled",
+ #it can be in the conditional state "cond" that allows both values or in the
+ #"contradiction" state, which means that some atoms insist on differnt values
+ #for this flag and those kill this configuration.
+ for flag in violated_atom.use.required:
+ state = involved_flags.get(flag, "")
+
+ if flag in violated_atom.use.enabled:
+ if state in ("", "cond", "enabled"):
+ state = "enabled"
+ else:
+ state = "contradiction"
+ elif flag in violated_atom.use.disabled:
+ if state in ("", "cond", "disabled"):
+ state = "disabled"
+ else:
+ state = "contradiction"
+ else:
+ if state == "":
+ state = "cond"
+
+ involved_flags[flag] = state
+
+ if pkg.installed:
+ #We don't change the installed pkg's USE. Force all involved flags
+ #to the same value as the installed package has it.
+ for flag in involved_flags:
+ if involved_flags[flag] == "enabled":
+ if not flag in _pkg_use_enabled(pkg):
+ involved_flags[flag] = "contradiction"
+ elif involved_flags[flag] == "disabled":
+ if flag in _pkg_use_enabled(pkg):
+ involved_flags[flag] = "contradiction"
+ elif involved_flags[flag] == "cond":
+ if flag in _pkg_use_enabled(pkg):
+ involved_flags[flag] = "enabled"
+ else:
+ involved_flags[flag] = "disabled"
+
+ for flag, state in involved_flags.items():
+ if state == "contradiction":
+ if self.debug:
+ writemsg("Contradicting requirements found for flag " + \
+ flag + ". Rejecting configuration.\n", noiselevel=-1)
+ return False
+
+ all_involved_flags.append(involved_flags)
+
+ if self.debug:
+ writemsg("All involved flags:\n", noiselevel=-1)
+ for id, involved_flags in enumerate(all_involved_flags):
+ writemsg(" %s\n" % (config[id],), noiselevel=-1)
+ for flag, state in involved_flags.items():
+ writemsg(" " + flag + ": " + state + "\n", noiselevel=-1)
+
+ solutions = []
+ sol_gen = _solution_candidate_generator(all_involved_flags)
+ checked = 0
+ while True:
+ candidate = sol_gen.get_candidate()
+ if not candidate:
+ break
+ solution = self._check_solution(config, candidate, all_conflict_atoms_by_slotatom)
+ checked += 1
+ if solution:
+ solutions.append(solution)
+
+ if checked >= self._check_configuration_max:
+ # TODO: Implement early elimination for candidates that would
+ # change forced or masked flags, and don't count them here.
+ if self.debug:
+ writemsg("\nAborting _check_configuration due to "
+ "excessive number of candidates.\n", noiselevel=-1)
+ break
+
+ if self.debug:
+ if not solutions:
+ writemsg("No viable solutions. Rejecting configuration.\n", noiselevel=-1)
+ return solutions
+
+
+ def _force_flag_for_package(self, required_changes, pkg, flag, state):
+ """
+ Adds an USE change to required_changes. Sets the target state to
+ "contradiction" if a flag is forced to conflicting values.
+ """
+ _pkg_use_enabled = self.depgraph._pkg_use_enabled
+
+ if state == "disabled":
+ changes = required_changes.get(pkg, {})
+ flag_change = changes.get(flag, "")
+ if flag_change == "enabled":
+ flag_change = "contradiction"
+ elif flag in _pkg_use_enabled(pkg):
+ flag_change = "disabled"
+
+ changes[flag] = flag_change
+ required_changes[pkg] = changes
+ elif state == "enabled":
+ changes = required_changes.get(pkg, {})
+ flag_change = changes.get(flag, "")
+ if flag_change == "disabled":
+ flag_change = "contradiction"
+ else:
+ flag_change = "enabled"
+
+ changes[flag] = flag_change
+ required_changes[pkg] = changes
+
+ def _check_solution(self, config, all_involved_flags, all_conflict_atoms_by_slotatom):
+ """
+ Given a configuartion and all involved flags, all possible settings for the involved
+ flags are checked if they solve the slot conflict.
+ """
+ _pkg_use_enabled = self.depgraph._pkg_use_enabled
+
+ if self.debug:
+ #The code is a bit verbose, because the states might not
+ #be a string, but a _value_helper.
+ msg = "Solution candidate: "
+ msg += "["
+ first = True
+ for involved_flags in all_involved_flags:
+ if first:
+ first = False
+ else:
+ msg += ", "
+ msg += "{"
+ inner_first = True
+ for flag, state in involved_flags.items():
+ if inner_first:
+ inner_first = False
+ else:
+ msg += ", "
+ msg += flag + ": %s" % (state,)
+ msg += "}"
+ msg += "]\n"
+ writemsg(msg, noiselevel=-1)
+
+ required_changes = {}
+ for id, pkg in enumerate(config):
+ if not pkg.installed:
+ #We can't change the USE of installed packages.
+ for flag in all_involved_flags[id]:
+ if not pkg.iuse.is_valid_flag(flag):
+ continue
+ state = all_involved_flags[id][flag]
+ self._force_flag_for_package(required_changes, pkg, flag, state)
+
+ #Go through all (parent, atom) pairs for the current slot conflict.
+ for ppkg, atom in all_conflict_atoms_by_slotatom[id]:
+ use = atom.unevaluated_atom.use
+ if not use:
+ #No need to force something for an atom without USE conditionals.
+ #These atoms are already satisfied.
+ continue
+ for flag in all_involved_flags[id]:
+ state = all_involved_flags[id][flag]
+
+ if flag not in use.required or not use.conditional:
+ continue
+ if flag in use.conditional.enabled:
+ #[flag?]
+ if state == "enabled":
+ #no need to change anything, the atom won't
+ #force -flag on pkg
+ pass
+ elif state == "disabled":
+ #if flag is enabled we get [flag] -> it must be disabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "disabled")
+ elif flag in use.conditional.disabled:
+ #[!flag?]
+ if state == "enabled":
+ #if flag is enabled we get [-flag] -> it must be disabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "disabled")
+ elif state == "disabled":
+ #no need to change anything, the atom won't
+ #force +flag on pkg
+ pass
+ elif flag in use.conditional.equal:
+ #[flag=]
+ if state == "enabled":
+ #if flag is disabled we get [-flag] -> it must be enabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "enabled")
+ elif state == "disabled":
+ #if flag is enabled we get [flag] -> it must be disabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "disabled")
+ elif flag in use.conditional.not_equal:
+ #[!flag=]
+ if state == "enabled":
+ #if flag is enabled we get [-flag] -> it must be disabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "disabled")
+ elif state == "disabled":
+ #if flag is disabled we get [flag] -> it must be enabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "enabled")
+
+ is_valid_solution = True
+ for pkg in required_changes:
+ for state in required_changes[pkg].values():
+ if not state in ("enabled", "disabled"):
+ is_valid_solution = False
+
+ if not is_valid_solution:
+ return None
+
+ #Check if all atoms are satisfied after the changes are applied.
+ for id, pkg in enumerate(config):
+ new_use = _pkg_use_enabled(pkg)
+ if pkg in required_changes:
+ old_use = pkg.use.enabled
+ new_use = set(new_use)
+ for flag, state in required_changes[pkg].items():
+ if state == "enabled":
+ new_use.add(flag)
+ elif state == "disabled":
+ new_use.discard(flag)
+ if not new_use.symmetric_difference(old_use):
+ #avoid copying the package in findAtomForPackage if possible
+ new_use = old_use
+
+ for ppkg, atom in all_conflict_atoms_by_slotatom[id]:
+ if not hasattr(ppkg, "use"):
+ #It's a SetArg or something like that.
+ continue
+ ppkg_new_use = set(_pkg_use_enabled(ppkg))
+ if ppkg in required_changes:
+ for flag, state in required_changes[ppkg].items():
+ if state == "enabled":
+ ppkg_new_use.add(flag)
+ elif state == "disabled":
+ ppkg_new_use.discard(flag)
+
+ new_atom = atom.unevaluated_atom.evaluate_conditionals(ppkg_new_use)
+ i = InternalPackageSet(initial_atoms=(new_atom,))
+ if not i.findAtomForPackage(pkg, new_use):
+ #We managed to create a new problem with our changes.
+ is_valid_solution = False
+ if self.debug:
+ writemsg(("new conflict introduced: %s"
+ " does not match %s from %s\n") %
+ (pkg, new_atom, ppkg), noiselevel=-1)
+ break
+
+ if not is_valid_solution:
+ break
+
+ #Make sure the changes don't violate REQUIRED_USE
+ for pkg in required_changes:
+ required_use = pkg._metadata.get("REQUIRED_USE")
+ if not required_use:
+ continue
+
+ use = set(_pkg_use_enabled(pkg))
+ for flag, state in required_changes[pkg].items():
+ if state == "enabled":
+ use.add(flag)
+ else:
+ use.discard(flag)
+
+ if not check_required_use(required_use, use, pkg.iuse.is_valid_flag):
+ is_valid_solution = False
+ break
+
+ if is_valid_solution and required_changes:
+ return required_changes
+ else:
+ return None
+
+class _configuration_generator(object):
+ def __init__(self, conflict_pkgs):
+ #reorder packages such that installed packages come last
+ self.conflict_pkgs = []
+ for pkgs in conflict_pkgs:
+ new_pkgs = []
+ for pkg in pkgs:
+ if not pkg.installed:
+ new_pkgs.append(pkg)
+ for pkg in pkgs:
+ if pkg.installed:
+ new_pkgs.append(pkg)
+ self.conflict_pkgs.append(new_pkgs)
+
+ self.solution_ids = []
+ for pkgs in self.conflict_pkgs:
+ self.solution_ids.append(0)
+ self._is_first_solution = True
+
+ def get_configuration(self):
+ if self._is_first_solution:
+ self._is_first_solution = False
+ else:
+ if not self._next():
+ return None
+
+ solution = []
+ for id, pkgs in enumerate(self.conflict_pkgs):
+ solution.append(pkgs[self.solution_ids[id]])
+ return solution
+
+ def _next(self, id=None):
+ solution_ids = self.solution_ids
+ conflict_pkgs = self.conflict_pkgs
+
+ if id is None:
+ id = len(solution_ids)-1
+
+ if solution_ids[id] == len(conflict_pkgs[id])-1:
+ if id > 0:
+ return self._next(id=id-1)
+ else:
+ return False
+ else:
+ solution_ids[id] += 1
+ for other_id in range(id+1, len(solution_ids)):
+ solution_ids[other_id] = 0
+ return True
+
+class _solution_candidate_generator(object):
+ class _value_helper(object):
+ def __init__(self, value=None):
+ self.value = value
+ def __eq__(self, other):
+ if isinstance(other, basestring):
+ return self.value == other
+ else:
+ return self.value == other.value
+ def __str__(self):
+ return "%s" % (self.value,)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'], errors='backslashreplace')
+
+ def __init__(self, all_involved_flags):
+ #A copy of all_involved_flags with all "cond" values
+ #replaced by a _value_helper object.
+ self.all_involved_flags = []
+
+ #A list tracking references to all used _value_helper
+ #objects.
+ self.conditional_values = []
+
+ for involved_flags in all_involved_flags:
+ new_involved_flags = {}
+ for flag, state in involved_flags.items():
+ if state in ("enabled", "disabled"):
+ new_involved_flags[flag] = state
+ else:
+ v = self._value_helper("disabled")
+ new_involved_flags[flag] = v
+ self.conditional_values.append(v)
+ self.all_involved_flags.append(new_involved_flags)
+
+ self._is_first_solution = True
+
+ def get_candidate(self):
+ if self._is_first_solution:
+ self._is_first_solution = False
+ else:
+ if not self._next():
+ return None
+
+ return self.all_involved_flags
+
+ def _next(self, id=None):
+ values = self.conditional_values
+
+ if not values:
+ return False
+
+ if id is None:
+ id = len(values)-1
+
+ if values[id].value == "enabled":
+ if id > 0:
+ return self._next(id=id-1)
+ else:
+ return False
+ else:
+ values[id].value = "enabled"
+ for other_id in range(id+1, len(values)):
+ values[other_id].value = "disabled"
+ return True
+
+
diff --git a/usr/lib/portage/pym/_emerge/search.py b/usr/lib/portage/pym/_emerge/search.py
new file mode 100644
index 0000000..4b0fd9f
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/search.py
@@ -0,0 +1,394 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import re
+import portage
+from portage import os
+from portage.dbapi.porttree import _parse_uri_map
+from portage.localization import localized_size
+from portage.output import bold, bold as white, darkgreen, green, red
+from portage.util import writemsg_stdout
+
+from _emerge.Package import Package
+
+class search(object):
+
+ #
+ # class constants
+ #
+ VERSION_SHORT=1
+ VERSION_RELEASE=2
+
+ #
+ # public interface
+ #
+ def __init__(self, root_config, spinner, searchdesc,
+ verbose, usepkg, usepkgonly):
+ """Searches the available and installed packages for the supplied search key.
+ The list of available and installed packages is created at object instantiation.
+ This makes successive searches faster."""
+ self.settings = root_config.settings
+ self.vartree = root_config.trees["vartree"]
+ self.spinner = spinner
+ self.verbose = verbose
+ self.searchdesc = searchdesc
+ self.root_config = root_config
+ self.setconfig = root_config.setconfig
+ self.matches = {"pkg" : []}
+ self.mlen = 0
+
+ self._dbs = []
+
+ portdb = root_config.trees["porttree"].dbapi
+ bindb = root_config.trees["bintree"].dbapi
+ vardb = root_config.trees["vartree"].dbapi
+
+ if not usepkgonly and portdb._have_root_eclass_dir:
+ self._dbs.append(portdb)
+
+ if (usepkg or usepkgonly) and bindb.cp_all():
+ self._dbs.append(bindb)
+
+ self._dbs.append(vardb)
+ self._portdb = portdb
+
+ def _spinner_update(self):
+ if self.spinner:
+ self.spinner.update()
+
+ def _cp_all(self):
+ cp_all = set()
+ for db in self._dbs:
+ cp_all.update(db.cp_all())
+ return list(sorted(cp_all))
+
+ def _aux_get(self, *args, **kwargs):
+ for db in self._dbs:
+ try:
+ return db.aux_get(*args, **kwargs)
+ except KeyError:
+ pass
+ raise KeyError(args[0])
+
+ def _findname(self, *args, **kwargs):
+ for db in self._dbs:
+ if db is not self._portdb:
+ # We don't want findname to return anything
+ # unless it's an ebuild in a portage tree.
+ # Otherwise, it's already built and we don't
+ # care about it.
+ continue
+ func = getattr(db, "findname", None)
+ if func:
+ value = func(*args, **kwargs)
+ if value:
+ return value
+ return None
+
+ def _getFetchMap(self, *args, **kwargs):
+ for db in self._dbs:
+ func = getattr(db, "getFetchMap", None)
+ if func:
+ value = func(*args, **kwargs)
+ if value:
+ return value
+ return {}
+
+ def _visible(self, db, cpv, metadata):
+ installed = db is self.vartree.dbapi
+ built = installed or db is not self._portdb
+ pkg_type = "ebuild"
+ if installed:
+ pkg_type = "installed"
+ elif built:
+ pkg_type = "binary"
+ return Package(type_name=pkg_type,
+ root_config=self.root_config,
+ cpv=cpv, built=built, installed=installed,
+ metadata=metadata).visible
+
+ def _xmatch(self, level, atom):
+ """
+ This method does not expand old-style virtuals because it
+ is restricted to returning matches for a single ${CATEGORY}/${PN}
+ and old-style virual matches unreliable for that when querying
+ multiple package databases. If necessary, old-style virtuals
+ can be performed on atoms prior to calling this method.
+ """
+ cp = portage.dep_getkey(atom)
+ if level == "match-all":
+ matches = set()
+ for db in self._dbs:
+ if hasattr(db, "xmatch"):
+ matches.update(db.xmatch(level, atom))
+ else:
+ matches.update(db.match(atom))
+ result = list(x for x in matches if portage.cpv_getkey(x) == cp)
+ db._cpv_sort_ascending(result)
+ elif level == "match-visible":
+ matches = set()
+ for db in self._dbs:
+ if hasattr(db, "xmatch"):
+ matches.update(db.xmatch(level, atom))
+ else:
+ db_keys = list(db._aux_cache_keys)
+ for cpv in db.match(atom):
+ metadata = zip(db_keys,
+ db.aux_get(cpv, db_keys))
+ if not self._visible(db, cpv, metadata):
+ continue
+ matches.add(cpv)
+ result = list(x for x in matches if portage.cpv_getkey(x) == cp)
+ db._cpv_sort_ascending(result)
+ elif level == "bestmatch-visible":
+ result = None
+ for db in self._dbs:
+ if hasattr(db, "xmatch"):
+ cpv = db.xmatch("bestmatch-visible", atom)
+ if not cpv or portage.cpv_getkey(cpv) != cp:
+ continue
+ if not result or cpv == portage.best([cpv, result]):
+ result = cpv
+ else:
+ db_keys = list(db._aux_cache_keys)
+ # break out of this loop with highest visible
+ # match, checked in descending order
+ for cpv in reversed(db.match(atom)):
+ if portage.cpv_getkey(cpv) != cp:
+ continue
+ metadata = zip(db_keys,
+ db.aux_get(cpv, db_keys))
+ if not self._visible(db, cpv, metadata):
+ continue
+ if not result or cpv == portage.best([cpv, result]):
+ result = cpv
+ break
+ else:
+ raise NotImplementedError(level)
+ return result
+
+ def execute(self,searchkey):
+ """Performs the search for the supplied search key"""
+ match_category = 0
+ self.searchkey=searchkey
+ self.packagematches = []
+ if self.searchdesc:
+ self.searchdesc=1
+ self.matches = {"pkg":[], "desc":[], "set":[]}
+ else:
+ self.searchdesc=0
+ self.matches = {"pkg":[], "set":[]}
+ print("Searching... ", end=' ')
+
+ regexsearch = False
+ if self.searchkey.startswith('%'):
+ regexsearch = True
+ self.searchkey = self.searchkey[1:]
+ if self.searchkey.startswith('@'):
+ match_category = 1
+ self.searchkey = self.searchkey[1:]
+ if regexsearch:
+ self.searchre=re.compile(self.searchkey,re.I)
+ else:
+ self.searchre=re.compile(re.escape(self.searchkey), re.I)
+
+ for package in self._cp_all():
+ self._spinner_update()
+
+ if match_category:
+ match_string = package[:]
+ else:
+ match_string = package.split("/")[-1]
+
+ masked=0
+ if self.searchre.search(match_string):
+ if not self._xmatch("match-visible", package):
+ masked=1
+ self.matches["pkg"].append([package,masked])
+ elif self.searchdesc: # DESCRIPTION searching
+ full_package = self._xmatch("bestmatch-visible", package)
+ if not full_package:
+ #no match found; we don't want to query description
+ full_package = portage.best(
+ self._xmatch("match-all", package))
+ if not full_package:
+ continue
+ else:
+ masked=1
+ try:
+ full_desc = self._aux_get(
+ full_package, ["DESCRIPTION"])[0]
+ except KeyError:
+ print("emerge: search: aux_get() failed, skipping")
+ continue
+ if self.searchre.search(full_desc):
+ self.matches["desc"].append([full_package,masked])
+
+ self.sdict = self.setconfig.getSets()
+ for setname in self.sdict:
+ self._spinner_update()
+ if match_category:
+ match_string = setname
+ else:
+ match_string = setname.split("/")[-1]
+
+ if self.searchre.search(match_string):
+ self.matches["set"].append([setname, False])
+ elif self.searchdesc:
+ if self.searchre.search(
+ self.sdict[setname].getMetadata("DESCRIPTION")):
+ self.matches["set"].append([setname, False])
+
+ self.mlen=0
+ for mtype in self.matches:
+ self.matches[mtype].sort()
+ self.mlen += len(self.matches[mtype])
+
+ def addCP(self, cp):
+ if not self._xmatch("match-all", cp):
+ return
+ masked = 0
+ if not self._xmatch("bestmatch-visible", cp):
+ masked = 1
+ self.matches["pkg"].append([cp, masked])
+ self.mlen += 1
+
+ def output(self):
+ """Outputs the results of the search."""
+ msg = []
+ msg.append("\b\b \n[ Results for search key : " + \
+ bold(self.searchkey) + " ]\n")
+ msg.append("[ Applications found : " + \
+ bold(str(self.mlen)) + " ]\n\n")
+ vardb = self.vartree.dbapi
+ metadata_keys = set(Package.metadata_keys)
+ metadata_keys.update(["DESCRIPTION", "HOMEPAGE", "LICENSE", "SRC_URI"])
+ metadata_keys = tuple(metadata_keys)
+ for mtype in self.matches:
+ for match,masked in self.matches[mtype]:
+ full_package = None
+ if mtype == "pkg":
+ full_package = self._xmatch(
+ "bestmatch-visible", match)
+ if not full_package:
+ #no match found; we don't want to query description
+ masked=1
+ full_package = portage.best(
+ self._xmatch("match-all",match))
+ elif mtype == "desc":
+ full_package = match
+ match = portage.cpv_getkey(match)
+ elif mtype == "set":
+ msg.append(green("*") + " " + bold(match) + "\n")
+ if self.verbose:
+ msg.append(" " + darkgreen("Description:") + \
+ " " + \
+ self.sdict[match].getMetadata("DESCRIPTION") \
+ + "\n\n")
+ if full_package:
+ try:
+ metadata = dict(zip(metadata_keys,
+ self._aux_get(full_package, metadata_keys)))
+ except KeyError:
+ msg.append("emerge: search: aux_get() failed, skipping\n")
+ continue
+
+ desc = metadata["DESCRIPTION"]
+ homepage = metadata["HOMEPAGE"]
+ license = metadata["LICENSE"]
+
+ if masked:
+ msg.append(green("*") + " " + \
+ white(match) + " " + red("[ Masked ]") + "\n")
+ else:
+ msg.append(green("*") + " " + bold(match) + "\n")
+ myversion = self.getVersion(full_package, search.VERSION_RELEASE)
+
+ mysum = [0,0]
+ file_size_str = None
+ mycat = match.split("/")[0]
+ mypkg = match.split("/")[1]
+ mycpv = match + "-" + myversion
+ myebuild = self._findname(mycpv)
+ if myebuild:
+ pkg = Package(built=False, cpv=mycpv,
+ installed=False, metadata=metadata,
+ root_config=self.root_config, type_name="ebuild")
+ pkgdir = os.path.dirname(myebuild)
+ mf = self.settings.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir)))
+ mf = mf.load_manifest(
+ pkgdir, self.settings["DISTDIR"])
+ try:
+ uri_map = _parse_uri_map(mycpv, metadata,
+ use=pkg.use.enabled)
+ except portage.exception.InvalidDependString as e:
+ file_size_str = "Unknown (%s)" % (e,)
+ del e
+ else:
+ try:
+ mysum[0] = mf.getDistfilesSize(uri_map)
+ except KeyError as e:
+ file_size_str = "Unknown (missing " + \
+ "digest for %s)" % (e,)
+ del e
+
+ available = False
+ for db in self._dbs:
+ if db is not vardb and \
+ db.cpv_exists(mycpv):
+ available = True
+ if not myebuild and hasattr(db, "bintree"):
+ myebuild = db.bintree.getname(mycpv)
+ try:
+ mysum[0] = os.stat(myebuild).st_size
+ except OSError:
+ myebuild = None
+ break
+
+ if myebuild and file_size_str is None:
+ file_size_str = localized_size(mysum[0])
+
+ if self.verbose:
+ if available:
+ msg.append(" %s %s\n" % \
+ (darkgreen("Latest version available:"),
+ myversion))
+ msg.append(" %s\n" % \
+ self.getInstallationStatus(mycat+'/'+mypkg))
+ if myebuild:
+ msg.append(" %s %s\n" % \
+ (darkgreen("Size of files:"), file_size_str))
+ msg.append(" " + darkgreen("Homepage:") + \
+ " " + homepage + "\n")
+ msg.append(" " + darkgreen("Description:") \
+ + " " + desc + "\n")
+ msg.append(" " + darkgreen("License:") + \
+ " " + license + "\n\n")
+ writemsg_stdout(''.join(msg), noiselevel=-1)
+ #
+ # private interface
+ #
+ def getInstallationStatus(self,package):
+ installed_package = self.vartree.dep_bestmatch(package)
+ result = ""
+ version = self.getVersion(installed_package,search.VERSION_RELEASE)
+ if len(version) > 0:
+ result = darkgreen("Latest version installed:")+" "+version
+ else:
+ result = darkgreen("Latest version installed:")+" [ Not Installed ]"
+ return result
+
+ def getVersion(self,full_package,detail):
+ if len(full_package) > 1:
+ package_parts = portage.catpkgsplit(full_package)
+ if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
+ result = package_parts[2]+ "-" + package_parts[3]
+ else:
+ result = package_parts[2]
+ else:
+ result = ""
+ return result
+
diff --git a/usr/lib/portage/pym/_emerge/show_invalid_depstring_notice.py b/usr/lib/portage/pym/_emerge/show_invalid_depstring_notice.py
new file mode 100644
index 0000000..a230b31
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/show_invalid_depstring_notice.py
@@ -0,0 +1,35 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import textwrap
+import portage
+from portage import os
+from portage.util import writemsg_level
+
+def show_invalid_depstring_notice(parent_node, depstring, error_msg):
+
+ msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
+ "\n\n%s\n\n%s\n\n" % (error_msg, parent_node)
+ p_key = parent_node.cpv
+ p_status = parent_node.operation
+ msg = []
+ if p_status == "nomerge":
+ category, pf = portage.catsplit(p_key)
+ pkg_location = os.path.join(parent_node.root_config.settings['EROOT'], portage.VDB_PATH, category, pf)
+ msg.append("Portage is unable to process the dependencies of the ")
+ msg.append("'%s' package. " % p_key)
+ msg.append("In order to correct this problem, the package ")
+ msg.append("should be uninstalled, reinstalled, or upgraded. ")
+ msg.append("As a temporary workaround, the --nodeps option can ")
+ msg.append("be used to ignore all dependencies. For reference, ")
+ msg.append("the problematic dependencies can be found in the ")
+ msg.append("*DEPEND files located in '%s/'." % pkg_location)
+ else:
+ msg.append("This package can not be installed. ")
+ msg.append("Please notify the '%s' package maintainer " % p_key)
+ msg.append("about this problem.")
+
+ msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
+ writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
+
diff --git a/usr/lib/portage/pym/_emerge/stdout_spinner.py b/usr/lib/portage/pym/_emerge/stdout_spinner.py
new file mode 100644
index 0000000..670686a
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/stdout_spinner.py
@@ -0,0 +1,86 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import platform
+import sys
+import time
+
+from portage.output import darkgreen, green
+
+class stdout_spinner(object):
+ scroll_msgs = [
+ "Gentoo Rocks ("+platform.system()+")",
+ "Thank you for using Gentoo. :)",
+ "Are you actually trying to read this?",
+ "How many times have you stared at this?",
+ "We are generating the cache right now",
+ "You are paying too much attention.",
+ "A theory is better than its explanation.",
+ "Phasers locked on target, Captain.",
+ "Thrashing is just virtual crashing.",
+ "To be is to program.",
+ "Real Users hate Real Programmers.",
+ "When all else fails, read the instructions.",
+ "Functionality breeds Contempt.",
+ "The future lies ahead.",
+ "3.1415926535897932384626433832795028841971694",
+ "Sometimes insanity is the only alternative.",
+ "Inaccuracy saves a world of explanation.",
+ ]
+
+ twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
+
+ def __init__(self):
+ self.spinpos = 0
+ self.update = self.update_twirl
+ self.scroll_sequence = self.scroll_msgs[
+ int(time.time() * 100) % len(self.scroll_msgs)]
+ self.last_update = 0
+ self.min_display_latency = 0.05
+
+ def _return_early(self):
+ """
+ Flushing ouput to the tty too frequently wastes cpu time. Therefore,
+ each update* method should return without doing any output when this
+ method returns True.
+ """
+ cur_time = time.time()
+ if cur_time - self.last_update < self.min_display_latency:
+ return True
+ self.last_update = cur_time
+ return False
+
+ def update_basic(self):
+ self.spinpos = (self.spinpos + 1) % 500
+ if self._return_early():
+ return True
+ if (self.spinpos % 100) == 0:
+ if self.spinpos == 0:
+ sys.stdout.write(". ")
+ else:
+ sys.stdout.write(".")
+ sys.stdout.flush()
+ return True
+
+ def update_scroll(self):
+ if self._return_early():
+ return True
+ if(self.spinpos >= len(self.scroll_sequence)):
+ sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
+ len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
+ else:
+ sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
+ sys.stdout.flush()
+ self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
+ return True
+
+ def update_twirl(self):
+ self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
+ if self._return_early():
+ return True
+ sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
+ sys.stdout.flush()
+ return True
+
+ def update_quiet(self):
+ return True
diff --git a/usr/lib/portage/pym/_emerge/sync/__init__.py b/usr/lib/portage/pym/_emerge/sync/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/sync/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/_emerge/sync/getaddrinfo_validate.py b/usr/lib/portage/pym/_emerge/sync/getaddrinfo_validate.py
new file mode 100644
index 0000000..5e6009c
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/sync/getaddrinfo_validate.py
@@ -0,0 +1,29 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+def getaddrinfo_validate(addrinfos):
+ """
+ Validate structures returned from getaddrinfo(),
+ since they may be corrupt, especially when python
+ has IPv6 support disabled (bug #340899).
+ """
+ valid_addrinfos = []
+ for addrinfo in addrinfos:
+ try:
+ if len(addrinfo) != 5:
+ continue
+ if len(addrinfo[4]) < 2:
+ continue
+ if not isinstance(addrinfo[4][0], basestring):
+ continue
+ except TypeError:
+ continue
+
+ valid_addrinfos.append(addrinfo)
+
+ return valid_addrinfos
diff --git a/usr/lib/portage/pym/_emerge/sync/old_tree_timestamp.py b/usr/lib/portage/pym/_emerge/sync/old_tree_timestamp.py
new file mode 100644
index 0000000..aa23a27
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/sync/old_tree_timestamp.py
@@ -0,0 +1,100 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division
+
+import locale
+import logging
+import time
+
+from portage import os
+from portage.exception import PortageException
+from portage.localization import _
+from portage.output import EOutput
+from portage.util import grabfile, writemsg_level
+
+def have_english_locale():
+ lang, enc = locale.getdefaultlocale()
+ if lang is not None:
+ lang = lang.lower()
+ lang = lang.split('_', 1)[0]
+ return lang is None or lang in ('c', 'en')
+
+def whenago(seconds):
+ sec = int(seconds)
+ mins = 0
+ days = 0
+ hrs = 0
+ years = 0
+ out = []
+
+ if sec > 60:
+ mins = sec // 60
+ sec = sec % 60
+ if mins > 60:
+ hrs = mins // 60
+ mins = mins % 60
+ if hrs > 24:
+ days = hrs // 24
+ hrs = hrs % 24
+ if days > 365:
+ years = days // 365
+ days = days % 365
+
+ if years:
+ out.append("%dy " % years)
+ if days:
+ out.append("%dd " % days)
+ if hrs:
+ out.append("%dh " % hrs)
+ if mins:
+ out.append("%dm " % mins)
+ if sec:
+ out.append("%ds " % sec)
+
+ return "".join(out).strip()
+
+def old_tree_timestamp_warn(portdir, settings):
+ unixtime = time.time()
+ default_warnsync = 30
+
+ timestamp_file = os.path.join(portdir, "metadata/timestamp.x")
+ try:
+ lastsync = grabfile(timestamp_file)
+ except PortageException:
+ return False
+
+ if not lastsync:
+ return False
+
+ lastsync = lastsync[0].split()
+ if not lastsync:
+ return False
+
+ try:
+ lastsync = int(lastsync[0])
+ except ValueError:
+ return False
+
+ var_name = 'PORTAGE_SYNC_STALE'
+ try:
+ warnsync = float(settings.get(var_name, default_warnsync))
+ except ValueError:
+ writemsg_level("!!! %s contains non-numeric value: %s\n" % \
+ (var_name, settings[var_name]),
+ level=logging.ERROR, noiselevel=-1)
+ return False
+
+ if warnsync <= 0:
+ return False
+
+ if (unixtime - 86400 * warnsync) > lastsync:
+ out = EOutput()
+ if have_english_locale():
+ out.ewarn("Last emerge --sync was %s ago." % \
+ whenago(unixtime - lastsync))
+ else:
+ out.ewarn(_("Last emerge --sync was %s.") % \
+ time.strftime('%c', time.localtime(lastsync)))
+ return True
+ return False
diff --git a/usr/lib/portage/pym/_emerge/unmerge.py b/usr/lib/portage/pym/_emerge/unmerge.py
new file mode 100644
index 0000000..30b1930
--- /dev/null
+++ b/usr/lib/portage/pym/_emerge/unmerge.py
@@ -0,0 +1,594 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+import signal
+import sys
+import textwrap
+import portage
+from portage import os
+from portage.dbapi._expand_new_virt import expand_new_virt
+from portage.output import bold, colorize, darkgreen, green
+from portage._sets import SETPREFIX
+from portage._sets.base import EditablePackageSet
+from portage.versions import cpv_sort_key, _pkg_str
+
+from _emerge.emergelog import emergelog
+from _emerge.Package import Package
+from _emerge.UserQuery import UserQuery
+from _emerge.UninstallFailure import UninstallFailure
+from _emerge.countdown import countdown
+
+def _unmerge_display(root_config, myopts, unmerge_action,
+ unmerge_files, clean_delay=1, ordered=0,
+ writemsg_level=portage.util.writemsg_level):
+ """
+ Returns a tuple of (returncode, pkgmap) where returncode is
+ os.EX_OK if no errors occur, and 1 otherwise.
+ """
+
+ quiet = "--quiet" in myopts
+ settings = root_config.settings
+ sets = root_config.sets
+ vartree = root_config.trees["vartree"]
+ candidate_catpkgs=[]
+ global_unmerge=0
+ out = portage.output.EOutput()
+ pkg_cache = {}
+ db_keys = list(vartree.dbapi._aux_cache_keys)
+
+ def _pkg(cpv):
+ pkg = pkg_cache.get(cpv)
+ if pkg is None:
+ pkg = Package(built=True, cpv=cpv, installed=True,
+ metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
+ operation="uninstall", root_config=root_config,
+ type_name="installed")
+ pkg_cache[cpv] = pkg
+ return pkg
+
+ vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH)
+ try:
+ # At least the parent needs to exist for the lock file.
+ portage.util.ensure_dirs(vdb_path)
+ except portage.exception.PortageException:
+ pass
+ vdb_lock = None
+ try:
+ if os.access(vdb_path, os.W_OK):
+ vartree.dbapi.lock()
+ vdb_lock = True
+
+ realsyslist = []
+ sys_virt_map = {}
+ for x in sets["system"].getAtoms():
+ for atom in expand_new_virt(vartree.dbapi, x):
+ if not atom.blocker:
+ realsyslist.append(atom)
+ if atom.cp != x.cp:
+ sys_virt_map[atom.cp] = x.cp
+
+ syslist = []
+ for x in realsyslist:
+ mycp = x.cp
+ # Since Gentoo stopped using old-style virtuals in
+ # 2011, typically it's possible to avoid getvirtuals()
+ # calls entirely. It will not be triggered here by
+ # new-style virtuals since those are expanded to
+ # non-virtual atoms above by expand_new_virt().
+ if mycp.startswith("virtual/") and \
+ mycp in settings.getvirtuals():
+ providers = []
+ for provider in settings.getvirtuals()[mycp]:
+ if vartree.dbapi.match(provider):
+ providers.append(provider)
+ if len(providers) == 1:
+ syslist.extend(providers)
+ else:
+ syslist.append(mycp)
+ syslist = frozenset(syslist)
+
+ if not unmerge_files:
+ if unmerge_action == "unmerge":
+ print()
+ print(bold("emerge unmerge") + " can only be used with specific package names")
+ print()
+ return 1, {}
+ else:
+ global_unmerge = 1
+
+ localtree = vartree
+ # process all arguments and add all
+ # valid db entries to candidate_catpkgs
+ if global_unmerge:
+ if not unmerge_files:
+ candidate_catpkgs.extend(vartree.dbapi.cp_all())
+ else:
+ #we've got command-line arguments
+ if not unmerge_files:
+ print("\nNo packages to unmerge have been provided.\n")
+ return 1, {}
+ for x in unmerge_files:
+ arg_parts = x.split('/')
+ if x[0] not in [".","/"] and \
+ arg_parts[-1][-7:] != ".ebuild":
+ #possible cat/pkg or dep; treat as such
+ candidate_catpkgs.append(x)
+ elif unmerge_action in ["prune","clean"]:
+ print("\n!!! Prune and clean do not accept individual" + \
+ " ebuilds as arguments;\n skipping.\n")
+ continue
+ else:
+ # it appears that the user is specifying an installed
+ # ebuild and we're in "unmerge" mode, so it's ok.
+ if not os.path.exists(x):
+ print("\n!!! The path '"+x+"' doesn't exist.\n")
+ return 1, {}
+
+ absx = os.path.abspath(x)
+ sp_absx = absx.split("/")
+ if sp_absx[-1][-7:] == ".ebuild":
+ del sp_absx[-1]
+ absx = "/".join(sp_absx)
+
+ sp_absx_len = len(sp_absx)
+
+ vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH)
+
+ sp_vdb = vdb_path.split("/")
+ sp_vdb_len = len(sp_vdb)
+
+ if not os.path.exists(absx+"/CONTENTS"):
+ print("!!! Not a valid db dir: "+str(absx))
+ return 1, {}
+
+ if sp_absx_len <= sp_vdb_len:
+ # The Path is shorter... so it can't be inside the vdb.
+ print(sp_absx)
+ print(absx)
+ print("\n!!!",x,"cannot be inside "+ \
+ vdb_path+"; aborting.\n")
+ return 1, {}
+
+ for idx in range(0,sp_vdb_len):
+ if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
+ print(sp_absx)
+ print(absx)
+ print("\n!!!", x, "is not inside "+\
+ vdb_path+"; aborting.\n")
+ return 1, {}
+
+ print("="+"/".join(sp_absx[sp_vdb_len:]))
+ candidate_catpkgs.append(
+ "="+"/".join(sp_absx[sp_vdb_len:]))
+
+ newline=""
+ if (not "--quiet" in myopts):
+ newline="\n"
+ if settings["ROOT"] != "/":
+ writemsg_level(darkgreen(newline+ \
+ ">>> Using system located in ROOT tree %s\n" % \
+ settings["ROOT"]))
+
+ if (("--pretend" in myopts) or ("--ask" in myopts)) and \
+ not ("--quiet" in myopts):
+ writemsg_level(darkgreen(newline+\
+ ">>> These are the packages that would be unmerged:\n"))
+
+ # Preservation of order is required for --depclean and --prune so
+ # that dependencies are respected. Use all_selected to eliminate
+ # duplicate packages since the same package may be selected by
+ # multiple atoms.
+ pkgmap = []
+ all_selected = set()
+ for x in candidate_catpkgs:
+ # cycle through all our candidate deps and determine
+ # what will and will not get unmerged
+ try:
+ mymatch = vartree.dbapi.match(x)
+ except portage.exception.AmbiguousPackageName as errpkgs:
+ print("\n\n!!! The short ebuild name \"" + \
+ x + "\" is ambiguous. Please specify")
+ print("!!! one of the following fully-qualified " + \
+ "ebuild names instead:\n")
+ for i in errpkgs[0]:
+ print(" " + green(i))
+ print()
+ sys.exit(1)
+
+ if not mymatch and x[0] not in "<>=~":
+ mymatch = localtree.dep_match(x)
+ if not mymatch:
+ portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
+ (x.replace("null/", ""), unmerge_action), noiselevel=-1)
+ continue
+
+ pkgmap.append(
+ {"protected": set(), "selected": set(), "omitted": set()})
+ mykey = len(pkgmap) - 1
+ if unmerge_action=="unmerge":
+ for y in mymatch:
+ if y not in all_selected:
+ pkgmap[mykey]["selected"].add(y)
+ all_selected.add(y)
+ elif unmerge_action == "prune":
+ if len(mymatch) == 1:
+ continue
+ best_version = mymatch[0]
+ best_slot = vartree.getslot(best_version)
+ best_counter = vartree.dbapi.cpv_counter(best_version)
+ for mypkg in mymatch[1:]:
+ myslot = vartree.getslot(mypkg)
+ mycounter = vartree.dbapi.cpv_counter(mypkg)
+ if (myslot == best_slot and mycounter > best_counter) or \
+ mypkg == portage.best([mypkg, best_version]):
+ if myslot == best_slot:
+ if mycounter < best_counter:
+ # On slot collision, keep the one with the
+ # highest counter since it is the most
+ # recently installed.
+ continue
+ best_version = mypkg
+ best_slot = myslot
+ best_counter = mycounter
+ pkgmap[mykey]["protected"].add(best_version)
+ pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
+ if mypkg != best_version and mypkg not in all_selected)
+ all_selected.update(pkgmap[mykey]["selected"])
+ else:
+ # unmerge_action == "clean"
+ slotmap={}
+ for mypkg in mymatch:
+ if unmerge_action == "clean":
+ myslot = localtree.getslot(mypkg)
+ else:
+ # since we're pruning, we don't care about slots
+ # and put all the pkgs in together
+ myslot = 0
+ if myslot not in slotmap:
+ slotmap[myslot] = {}
+ slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
+
+ for mypkg in vartree.dbapi.cp_list(
+ portage.cpv_getkey(mymatch[0])):
+ myslot = vartree.getslot(mypkg)
+ if myslot not in slotmap:
+ slotmap[myslot] = {}
+ slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
+
+ for myslot in slotmap:
+ counterkeys = list(slotmap[myslot])
+ if not counterkeys:
+ continue
+ counterkeys.sort()
+ pkgmap[mykey]["protected"].add(
+ slotmap[myslot][counterkeys[-1]])
+ del counterkeys[-1]
+
+ for counter in counterkeys[:]:
+ mypkg = slotmap[myslot][counter]
+ if mypkg not in mymatch:
+ counterkeys.remove(counter)
+ pkgmap[mykey]["protected"].add(
+ slotmap[myslot][counter])
+
+ #be pretty and get them in order of merge:
+ for ckey in counterkeys:
+ mypkg = slotmap[myslot][ckey]
+ if mypkg not in all_selected:
+ pkgmap[mykey]["selected"].add(mypkg)
+ all_selected.add(mypkg)
+ # ok, now the last-merged package
+ # is protected, and the rest are selected
+ numselected = len(all_selected)
+ if global_unmerge and not numselected:
+ portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
+ return 1, {}
+
+ if not numselected:
+ portage.writemsg_stdout(
+ "\n>>> No packages selected for removal by " + \
+ unmerge_action + "\n")
+ return 1, {}
+ finally:
+ if vdb_lock:
+ vartree.dbapi.flush_cache()
+ vartree.dbapi.unlock()
+
+ # generate a list of package sets that are directly or indirectly listed in "selected",
+ # as there is no persistent list of "installed" sets
+ installed_sets = ["selected"]
+ stop = False
+ pos = 0
+ while not stop:
+ stop = True
+ pos = len(installed_sets)
+ for s in installed_sets[pos - 1:]:
+ if s not in sets:
+ continue
+ candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
+ if candidates:
+ stop = False
+ installed_sets += candidates
+ installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
+ del stop, pos
+
+ # we don't want to unmerge packages that are still listed in user-editable package sets
+ # listed in "world" as they would be remerged on the next update of "world" or the
+ # relevant package sets.
+ unknown_sets = set()
+ for cp in range(len(pkgmap)):
+ for cpv in pkgmap[cp]["selected"].copy():
+ try:
+ pkg = _pkg(cpv)
+ except KeyError:
+ # It could have been uninstalled
+ # by a concurrent process.
+ continue
+
+ if unmerge_action != "clean" and root_config.root == "/":
+ skip_pkg = False
+ if portage.match_from_list(portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
+ msg = ("Not unmerging package %s since there is no valid reason "
+ "for Portage to unmerge itself.") % (pkg.cpv,)
+ skip_pkg = True
+ elif vartree.dbapi._dblink(cpv).isowner(portage._python_interpreter):
+ msg = ("Not unmerging package %s since there is no valid reason "
+ "for Portage to unmerge currently used Python interpreter.") % (pkg.cpv,)
+ skip_pkg = True
+ if skip_pkg:
+ for line in textwrap.wrap(msg, 75):
+ out.eerror(line)
+ # adjust pkgmap so the display output is correct
+ pkgmap[cp]["selected"].remove(cpv)
+ all_selected.remove(cpv)
+ pkgmap[cp]["protected"].add(cpv)
+ continue
+
+ parents = []
+ for s in installed_sets:
+ # skip sets that the user requested to unmerge, and skip world
+ # user-selected set, since the package will be removed from
+ # that set later on.
+ if s in root_config.setconfig.active or s == "selected":
+ continue
+
+ if s not in sets:
+ if s in unknown_sets:
+ continue
+ unknown_sets.add(s)
+ out = portage.output.EOutput()
+ out.eerror(("Unknown set '@%s' in %s%s") % \
+ (s, root_config.settings['EROOT'], portage.const.WORLD_SETS_FILE))
+ continue
+
+ # only check instances of EditablePackageSet as other classes are generally used for
+ # special purposes and can be ignored here (and are usually generated dynamically, so the
+ # user can't do much about them anyway)
+ if isinstance(sets[s], EditablePackageSet):
+
+ # This is derived from a snippet of code in the
+ # depgraph._iter_atoms_for_pkg() method.
+ for atom in sets[s].iterAtomsForPackage(pkg):
+ inst_matches = vartree.dbapi.match(atom)
+ inst_matches.reverse() # descending order
+ higher_slot = None
+ for inst_cpv in inst_matches:
+ try:
+ inst_pkg = _pkg(inst_cpv)
+ except KeyError:
+ # It could have been uninstalled
+ # by a concurrent process.
+ continue
+
+ if inst_pkg.cp != atom.cp:
+ continue
+ if pkg >= inst_pkg:
+ # This is descending order, and we're not
+ # interested in any versions <= pkg given.
+ break
+ if pkg.slot_atom != inst_pkg.slot_atom:
+ higher_slot = inst_pkg
+ break
+ if higher_slot is None:
+ parents.append(s)
+ break
+ if parents:
+ print(colorize("WARN", "Package %s is going to be unmerged," % cpv))
+ print(colorize("WARN", "but still listed in the following package sets:"))
+ print(" %s\n" % ", ".join(parents))
+
+ del installed_sets
+
+ numselected = len(all_selected)
+ if not numselected:
+ writemsg_level(
+ "\n>>> No packages selected for removal by " + \
+ unmerge_action + "\n")
+ return 1, {}
+
+ # Unmerge order only matters in some cases
+ if not ordered:
+ unordered = {}
+ for d in pkgmap:
+ selected = d["selected"]
+ if not selected:
+ continue
+ cp = portage.cpv_getkey(next(iter(selected)))
+ cp_dict = unordered.get(cp)
+ if cp_dict is None:
+ cp_dict = {}
+ unordered[cp] = cp_dict
+ for k in d:
+ cp_dict[k] = set()
+ for k, v in d.items():
+ cp_dict[k].update(v)
+ pkgmap = [unordered[cp] for cp in sorted(unordered)]
+
+ for x in range(len(pkgmap)):
+ selected = pkgmap[x]["selected"]
+ if not selected:
+ continue
+ for mytype, mylist in pkgmap[x].items():
+ if mytype == "selected":
+ continue
+ mylist.difference_update(all_selected)
+ cp = portage.cpv_getkey(next(iter(selected)))
+ for y in localtree.dep_match(cp):
+ if y not in pkgmap[x]["omitted"] and \
+ y not in pkgmap[x]["selected"] and \
+ y not in pkgmap[x]["protected"] and \
+ y not in all_selected:
+ pkgmap[x]["omitted"].add(y)
+ if global_unmerge and not pkgmap[x]["selected"]:
+ #avoid cluttering the preview printout with stuff that isn't getting unmerged
+ continue
+ if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
+ virt_cp = sys_virt_map.get(cp)
+ if virt_cp is None:
+ cp_info = "'%s'" % (cp,)
+ else:
+ cp_info = "'%s' (%s)" % (cp, virt_cp)
+ writemsg_level(colorize("BAD","\n\n!!! " + \
+ "%s is part of your system profile.\n" % (cp_info,)),
+ level=logging.WARNING, noiselevel=-1)
+ writemsg_level(colorize("WARN","!!! Unmerging it may " + \
+ "be damaging to your system.\n\n"),
+ level=logging.WARNING, noiselevel=-1)
+ if not quiet:
+ writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
+ else:
+ writemsg_level(bold(cp) + ": ", noiselevel=-1)
+ for mytype in ["selected","protected","omitted"]:
+ if not quiet:
+ writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
+ if pkgmap[x][mytype]:
+ sorted_pkgs = []
+ for mypkg in pkgmap[x][mytype]:
+ try:
+ sorted_pkgs.append(mypkg.cpv)
+ except AttributeError:
+ sorted_pkgs.append(_pkg_str(mypkg))
+ sorted_pkgs.sort(key=cpv_sort_key())
+ for mypkg in sorted_pkgs:
+ if mytype == "selected":
+ writemsg_level(
+ colorize("UNMERGE_WARN", mypkg.version + " "),
+ noiselevel=-1)
+ else:
+ writemsg_level(
+ colorize("GOOD", mypkg.version + " "),
+ noiselevel=-1)
+ else:
+ writemsg_level("none ", noiselevel=-1)
+ if not quiet:
+ writemsg_level("\n", noiselevel=-1)
+ if quiet:
+ writemsg_level("\n", noiselevel=-1)
+
+ writemsg_level("\nAll selected packages: %s\n" %
+ " ".join('=%s' % x for x in all_selected), noiselevel=-1)
+
+ writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
+ " packages are slated for removal.\n")
+ writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
+ " and " + colorize("GOOD", "'omitted'") + \
+ " packages will not be removed.\n\n")
+
+ return os.EX_OK, pkgmap
+
+def unmerge(root_config, myopts, unmerge_action,
+ unmerge_files, ldpath_mtimes, autoclean=0,
+ clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
+ scheduler=None, writemsg_level=portage.util.writemsg_level):
+ """
+ Returns os.EX_OK if no errors occur, 1 if an error occurs, and
+ 130 if interrupted due to a 'no' answer for --ask.
+ """
+
+ if clean_world:
+ clean_world = myopts.get('--deselect') != 'n'
+
+ rval, pkgmap = _unmerge_display(root_config, myopts,
+ unmerge_action, unmerge_files,
+ clean_delay=clean_delay, ordered=ordered,
+ writemsg_level=writemsg_level)
+
+ if rval != os.EX_OK:
+ return rval
+
+ enter_invalid = '--ask-enter-invalid' in myopts
+ vartree = root_config.trees["vartree"]
+ sets = root_config.sets
+ settings = root_config.settings
+ mysettings = portage.config(clone=settings)
+ xterm_titles = "notitles" not in settings.features
+
+ if "--pretend" in myopts:
+ #we're done... return
+ return os.EX_OK
+ if "--ask" in myopts:
+ uq = UserQuery(myopts)
+ if uq.query("Would you like to unmerge these packages?",
+ enter_invalid) == "No":
+ # enter pretend mode for correct formatting of results
+ myopts["--pretend"] = True
+ print()
+ print("Quitting.")
+ print()
+ return 128 + signal.SIGINT
+ #the real unmerging begins, after a short delay....
+ if clean_delay and not autoclean:
+ countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
+
+ all_selected = set()
+ all_selected.update(*[x["selected"] for x in pkgmap])
+
+ # Set counter variables
+ curval = 1
+ maxval = len(all_selected)
+
+ for x in range(len(pkgmap)):
+ for y in pkgmap[x]["selected"]:
+ emergelog(xterm_titles, "=== Unmerging... ("+y+")")
+ message = ">>> Unmerging ({0} of {1}) {2}...\n".format(
+ colorize("MERGE_LIST_PROGRESS", str(curval)),
+ colorize("MERGE_LIST_PROGRESS", str(maxval)),
+ y)
+ writemsg_level(message, noiselevel=-1)
+ curval += 1
+
+ mysplit = y.split("/")
+ #unmerge...
+ retval = portage.unmerge(mysplit[0], mysplit[1],
+ settings=mysettings,
+ vartree=vartree, ldpath_mtimes=ldpath_mtimes,
+ scheduler=scheduler)
+
+ if retval != os.EX_OK:
+ emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
+ if raise_on_error:
+ raise UninstallFailure(retval)
+ sys.exit(retval)
+ else:
+ if clean_world and hasattr(sets["selected"], "cleanPackage")\
+ and hasattr(sets["selected"], "lock"):
+ sets["selected"].lock()
+ if hasattr(sets["selected"], "load"):
+ sets["selected"].load()
+ sets["selected"].cleanPackage(vartree.dbapi, y)
+ sets["selected"].unlock()
+ emergelog(xterm_titles, " >>> unmerge success: "+y)
+
+ if clean_world and hasattr(sets["selected"], "remove")\
+ and hasattr(sets["selected"], "lock"):
+ sets["selected"].lock()
+ # load is called inside remove()
+ for s in root_config.setconfig.active:
+ sets["selected"].remove(SETPREFIX + s)
+ sets["selected"].unlock()
+
+ return os.EX_OK
+
diff --git a/usr/lib/portage/pym/portage/__init__.py b/usr/lib/portage/pym/portage/__init__.py
new file mode 100644
index 0000000..c0872c1
--- /dev/null
+++ b/usr/lib/portage/pym/portage/__init__.py
@@ -0,0 +1,690 @@
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+VERSION = "2.2.14-prefix"
+
+# ===========================================================================
+# START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
+# ===========================================================================
+
+try:
+ import sys
+ import errno
+ if not hasattr(errno, 'ESTALE'):
+ # ESTALE may not be defined on some systems, such as interix.
+ errno.ESTALE = -1
+ import re
+ import types
+ import platform
+
+ # Temporarily delete these imports, to ensure that only the
+ # wrapped versions are imported by portage internals.
+ import os
+ del os
+ import shutil
+ del shutil
+
+except ImportError as e:
+ sys.stderr.write("\n\n")
+ sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
+ sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
+ sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
+
+ sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
+ sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
+ sys.stderr.write(" "+str(e)+"\n\n")
+ raise
+
+try:
+
+ import portage.proxy.lazyimport
+ import portage.proxy as proxy
+ proxy.lazyimport.lazyimport(globals(),
+ 'portage.cache.cache_errors:CacheError',
+ 'portage.checksum',
+ 'portage.checksum:perform_checksum,perform_md5,prelink_capable',
+ 'portage.cvstree',
+ 'portage.data',
+ 'portage.data:lchown,ostype,portage_gid,portage_uid,secpass,' + \
+ 'uid,userland,userpriv_groups,wheelgid',
+ 'portage.dbapi',
+ 'portage.dbapi.bintree:bindbapi,binarytree',
+ 'portage.dbapi.cpv_expand:cpv_expand',
+ 'portage.dbapi.dep_expand:dep_expand',
+ 'portage.dbapi.porttree:close_portdbapi_caches,FetchlistDict,' + \
+ 'portagetree,portdbapi',
+ 'portage.dbapi.vartree:dblink,merge,unmerge,vardbapi,vartree',
+ 'portage.dbapi.virtual:fakedbapi',
+ 'portage.dep',
+ 'portage.dep:best_match_to_list,dep_getcpv,dep_getkey,' + \
+ 'flatten,get_operator,isjustname,isspecific,isvalidatom,' + \
+ 'match_from_list,match_to_list',
+ 'portage.dep.dep_check:dep_check,dep_eval,dep_wordreduce,dep_zapdeps',
+ 'portage.eclass_cache',
+ 'portage.elog',
+ 'portage.exception',
+ 'portage.getbinpkg',
+ 'portage.locks',
+ 'portage.locks:lockdir,lockfile,unlockdir,unlockfile',
+ 'portage.mail',
+ 'portage.manifest:Manifest',
+ 'portage.output',
+ 'portage.output:bold,colorize',
+ 'portage.package.ebuild.doebuild:doebuild,' + \
+ 'doebuild_environment,spawn,spawnebuild',
+ 'portage.package.ebuild.config:autouse,best_from_dict,' + \
+ 'check_config_instance,config',
+ 'portage.package.ebuild.deprecated_profile_check:' + \
+ 'deprecated_profile_check',
+ 'portage.package.ebuild.digestcheck:digestcheck',
+ 'portage.package.ebuild.digestgen:digestgen',
+ 'portage.package.ebuild.fetch:fetch',
+ 'portage.package.ebuild.getmaskingreason:getmaskingreason',
+ 'portage.package.ebuild.getmaskingstatus:getmaskingstatus',
+ 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
+ 'portage.process',
+ 'portage.process:atexit_register,run_exitfuncs',
+ 'portage.update:dep_transform,fixdbentries,grab_updates,' + \
+ 'parse_updates,update_config_files,update_dbentries,' + \
+ 'update_dbentry',
+ 'portage.util',
+ 'portage.util:atomic_ofstream,apply_secpass_permissions,' + \
+ 'apply_recursive_permissions,dump_traceback,getconfig,' + \
+ 'grabdict,grabdict_package,grabfile,grabfile_package,' + \
+ 'map_dictlist_vals,new_protect_filename,normalize_path,' + \
+ 'pickle_read,pickle_write,stack_dictlist,stack_dicts,' + \
+ 'stack_lists,unique_array,varexpand,writedict,writemsg,' + \
+ 'writemsg_stdout,write_atomic',
+ 'portage.util.digraph:digraph',
+ 'portage.util.env_update:env_update',
+ 'portage.util.ExtractKernelVersion:ExtractKernelVersion',
+ 'portage.util.listdir:cacheddir,listdir',
+ 'portage.util.movefile:movefile',
+ 'portage.util.mtimedb:MtimeDB',
+ 'portage.versions',
+ 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,' + \
+ 'cpv_getkey@getCPFromCPV,endversion_keys,' + \
+ 'suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify',
+ 'portage.xpak',
+ 'subprocess',
+ 'time',
+ )
+
+ try:
+ from collections import OrderedDict
+ except ImportError:
+ proxy.lazyimport.lazyimport(globals(),
+ 'portage.cache.mappings:OrderedDict')
+
+ import portage.const
+ from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
+ USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
+ PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
+ EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
+ MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
+ DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
+ INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
+ INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE, \
+ EPREFIX, EPREFIX_LSTRIP, rootuid
+
+except ImportError as e:
+ sys.stderr.write("\n\n")
+ sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
+ sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
+ sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
+ sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
+ sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
+ sys.stderr.write("!!! a recovery of portage.\n")
+ sys.stderr.write(" "+str(e)+"\n\n")
+ raise
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+ long = int
+
+# We use utf_8 encoding everywhere. Previously, we used
+# sys.getfilesystemencoding() for the 'merge' encoding, but that had
+# various problems:
+#
+# 1) If the locale is ever changed then it can cause orphan files due
+# to changed character set translation.
+#
+# 2) Ebuilds typically install files with utf_8 encoded file names,
+# and then portage would be forced to rename those files to match
+# sys.getfilesystemencoding(), possibly breaking things.
+#
+# 3) Automatic translation between encodings can lead to nonsensical
+# file names when the source encoding is unknown by portage.
+#
+# 4) It's inconvenient for ebuilds to convert the encodings of file
+# names to match the current locale, and upstreams typically encode
+# file names with utf_8 encoding.
+#
+# So, instead of relying on sys.getfilesystemencoding(), we avoid the above
+# problems by using a constant utf_8 'merge' encoding for all locales, as
+# discussed in bug #382199 and bug #381509.
+_encodings = {
+ 'content' : 'utf_8',
+ 'fs' : 'utf_8',
+ 'merge' : 'utf_8',
+ 'repo.content' : 'utf_8',
+ 'stdio' : 'utf_8',
+}
+
+if sys.hexversion >= 0x3000000:
+
+ def _decode_argv(argv):
+ # With Python 3, the surrogateescape encoding error handler makes it
+ # possible to access the original argv bytes, which can be useful
+ # if their actual encoding does no match the filesystem encoding.
+ fs_encoding = sys.getfilesystemencoding()
+ return [_unicode_decode(x.encode(fs_encoding, 'surrogateescape'))
+ for x in argv]
+
+ def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
+ if isinstance(s, str):
+ s = s.encode(encoding, errors)
+ return s
+
+ def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
+ if isinstance(s, bytes):
+ s = str(s, encoding=encoding, errors=errors)
+ return s
+
+ _native_string = _unicode_decode
+else:
+
+ def _decode_argv(argv):
+ return [_unicode_decode(x) for x in argv]
+
+ def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
+ if isinstance(s, unicode):
+ s = s.encode(encoding, errors)
+ return s
+
+ def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
+ if isinstance(s, bytes):
+ s = unicode(s, encoding=encoding, errors=errors)
+ return s
+
+ _native_string = _unicode_encode
+
+if sys.hexversion >= 0x20605f0:
+ def _native_kwargs(kwargs):
+ return kwargs
+else:
+ # Avoid "TypeError: keywords must be strings" issue triggered
+ # by unicode_literals: http://bugs.python.org/issue4978
+ def _native_kwargs(kwargs):
+ return dict((_native_string(k), v) for k, v in kwargs.iteritems())
+
+class _unicode_func_wrapper(object):
+ """
+ Wraps a function, converts arguments from unicode to bytes,
+ and return values to unicode from bytes. Function calls
+ will raise UnicodeEncodeError if an argument fails to be
+ encoded with the required encoding. Return values that
+ are single strings are decoded with errors='replace'. Return
+ values that are lists of strings are decoded with errors='strict'
+ and elements that fail to be decoded are omitted from the returned
+ list.
+ """
+ __slots__ = ('_func', '_encoding')
+
+ def __init__(self, func, encoding=_encodings['fs']):
+ self._func = func
+ self._encoding = encoding
+
+ def _process_args(self, args, kwargs):
+
+ encoding = self._encoding
+ wrapped_args = [_unicode_encode(x, encoding=encoding, errors='strict')
+ for x in args]
+ if kwargs:
+ wrapped_kwargs = dict(
+ (k, _unicode_encode(v, encoding=encoding, errors='strict'))
+ for k, v in kwargs.items())
+ else:
+ wrapped_kwargs = {}
+
+ return (wrapped_args, wrapped_kwargs)
+
+ def __call__(self, *args, **kwargs):
+
+ encoding = self._encoding
+ wrapped_args, wrapped_kwargs = self._process_args(args, kwargs)
+
+ rval = self._func(*wrapped_args, **wrapped_kwargs)
+
+ # Don't use isinstance() since we don't want to convert subclasses
+ # of tuple such as posix.stat_result in Python >=3.2.
+ if rval.__class__ in (list, tuple):
+ decoded_rval = []
+ for x in rval:
+ try:
+ x = _unicode_decode(x, encoding=encoding, errors='strict')
+ except UnicodeDecodeError:
+ pass
+ else:
+ decoded_rval.append(x)
+
+ if isinstance(rval, tuple):
+ rval = tuple(decoded_rval)
+ else:
+ rval = decoded_rval
+ else:
+ rval = _unicode_decode(rval, encoding=encoding, errors='replace')
+
+ return rval
+
+class _unicode_module_wrapper(object):
+ """
+ Wraps a module and wraps all functions with _unicode_func_wrapper.
+ """
+ __slots__ = ('_mod', '_encoding', '_overrides', '_cache')
+
+ def __init__(self, mod, encoding=_encodings['fs'], overrides=None, cache=True):
+ object.__setattr__(self, '_mod', mod)
+ object.__setattr__(self, '_encoding', encoding)
+ object.__setattr__(self, '_overrides', overrides)
+ if cache:
+ cache = {}
+ else:
+ cache = None
+ object.__setattr__(self, '_cache', cache)
+
+ def __getattribute__(self, attr):
+ cache = object.__getattribute__(self, '_cache')
+ if cache is not None:
+ result = cache.get(attr)
+ if result is not None:
+ return result
+ result = getattr(object.__getattribute__(self, '_mod'), attr)
+ encoding = object.__getattribute__(self, '_encoding')
+ overrides = object.__getattribute__(self, '_overrides')
+ override = None
+ if overrides is not None:
+ override = overrides.get(id(result))
+ if override is not None:
+ result = override
+ elif isinstance(result, type):
+ pass
+ elif type(result) is types.ModuleType:
+ result = _unicode_module_wrapper(result,
+ encoding=encoding, overrides=overrides)
+ elif hasattr(result, '__call__'):
+ result = _unicode_func_wrapper(result, encoding=encoding)
+ if cache is not None:
+ cache[attr] = result
+ return result
+
+import os as _os
+_os_overrides = {
+ id(_os.fdopen) : _os.fdopen,
+ id(_os.popen) : _os.popen,
+ id(_os.read) : _os.read,
+ id(_os.system) : _os.system,
+}
+
+
+try:
+ _os_overrides[id(_os.mkfifo)] = _os.mkfifo
+except AttributeError:
+ pass # Jython
+
+if hasattr(_os, 'statvfs'):
+ _os_overrides[id(_os.statvfs)] = _os.statvfs
+
+os = _unicode_module_wrapper(_os, overrides=_os_overrides,
+ encoding=_encodings['fs'])
+_os_merge = _unicode_module_wrapper(_os,
+ encoding=_encodings['merge'], overrides=_os_overrides)
+
+import shutil as _shutil
+shutil = _unicode_module_wrapper(_shutil, encoding=_encodings['fs'])
+
+# Imports below this point rely on the above unicode wrapper definitions.
+try:
+ __import__('selinux')
+ import portage._selinux
+ selinux = _unicode_module_wrapper(_selinux,
+ encoding=_encodings['fs'])
+ _selinux_merge = _unicode_module_wrapper(_selinux,
+ encoding=_encodings['merge'])
+except (ImportError, OSError) as e:
+ if isinstance(e, OSError):
+ sys.stderr.write("!!! SELinux not loaded: %s\n" % str(e))
+ del e
+ _selinux = None
+ selinux = None
+ _selinux_merge = None
+
+# ===========================================================================
+# END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
+# ===========================================================================
+
+_python_interpreter = os.path.realpath(sys.executable)
+_bin_path = PORTAGE_BIN_PATH
+_pym_path = PORTAGE_PYM_PATH
+_not_installed = os.path.isfile(os.path.join(PORTAGE_BASE_PATH, ".portage_not_installed"))
+
+# Api consumers included in portage should set this to True.
+_internal_caller = False
+
+_sync_mode = False
+
+def _get_stdin():
+ """
+ Buggy code in python's multiprocessing/process.py closes sys.stdin
+ and reassigns it to open(os.devnull), but fails to update the
+ corresponding __stdin__ reference. So, detect that case and handle
+ it appropriately.
+ """
+ if not sys.__stdin__.closed:
+ return sys.__stdin__
+ return sys.stdin
+
+_shell_quote_re = re.compile(r"[\s><=*\\\"'$`]")
+
+def _shell_quote(s):
+ """
+ Quote a string in double-quotes and use backslashes to
+ escape any backslashes, double-quotes, dollar signs, or
+ backquotes in the string.
+ """
+ if _shell_quote_re.search(s) is None:
+ return s
+ for letter in "\\\"$`":
+ if letter in s:
+ s = s.replace(letter, "\\" + letter)
+ return "\"%s\"" % s
+
+bsd_chflags = None
+
+if platform.system() in ('FreeBSD',) and rootuid == 0:
+
+ class bsd_chflags(object):
+
+ @classmethod
+ def chflags(cls, path, flags, opts=""):
+ cmd = ['chflags']
+ if opts:
+ cmd.append(opts)
+ cmd.append('%o' % (flags,))
+ cmd.append(path)
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = process.find_binary(cmd[0])
+ if fullname is None:
+ raise exception.CommandNotFound(cmd[0])
+ cmd[0] = fullname
+
+ encoding = _encodings['fs']
+ cmd = [_unicode_encode(x, encoding=encoding, errors='strict')
+ for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = proc.communicate()[0]
+ status = proc.wait()
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ return
+ # Try to generate an ENOENT error if appropriate.
+ if 'h' in opts:
+ _os_merge.lstat(path)
+ else:
+ _os_merge.stat(path)
+ # Make sure the binary exists.
+ if not portage.process.find_binary('chflags'):
+ raise portage.exception.CommandNotFound('chflags')
+ # Now we're not sure exactly why it failed or what
+ # the real errno was, so just report EPERM.
+ output = _unicode_decode(output, encoding=encoding)
+ e = OSError(errno.EPERM, output)
+ e.errno = errno.EPERM
+ e.filename = path
+ e.message = output
+ raise e
+
+ @classmethod
+ def lchflags(cls, path, flags):
+ return cls.chflags(path, flags, opts='-h')
+
+def load_mod(name):
+ modname = ".".join(name.split(".")[:-1])
+ mod = __import__(modname)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
+def getcwd():
+ "this fixes situations where the current directory doesn't exist"
+ try:
+ return os.getcwd()
+ except OSError: #dir doesn't exist
+ os.chdir("/")
+ return "/"
+getcwd()
+
+def abssymlink(symlink, target=None):
+ """
+ This reads symlinks, resolving the relative symlinks,
+ and returning the absolute.
+ @param symlink: path of symlink (must be absolute)
+ @param target: the target of the symlink (as returned
+ by readlink)
+ @rtype: str
+ @return: the absolute path of the symlink target
+ """
+ if target is not None:
+ mylink = target
+ else:
+ mylink = os.readlink(symlink)
+ if mylink[0] != '/':
+ mydir = os.path.dirname(symlink)
+ mylink = mydir + "/" + mylink
+ return os.path.normpath(mylink)
+
+_doebuild_manifest_exempt_depend = 0
+
+_testing_eapis = frozenset(["4-python", "4-slot-abi", "5-progress", "5-hdepend"])
+_deprecated_eapis = frozenset(["4_pre1", "3_pre2", "3_pre1", "5_pre1", "5_pre2"])
+_supported_eapis = frozenset([str(x) for x in range(portage.const.EAPI + 1)] + list(_testing_eapis) + list(_deprecated_eapis))
+
+def _eapi_is_deprecated(eapi):
+ return eapi in _deprecated_eapis
+
+def eapi_is_supported(eapi):
+ if not isinstance(eapi, basestring):
+ # Only call str() when necessary since with python2 it
+ # can trigger UnicodeEncodeError if EAPI is corrupt.
+ eapi = str(eapi)
+ eapi = eapi.strip()
+
+ return eapi in _supported_eapis
+
+# This pattern is specified by PMS section 7.3.1.
+_pms_eapi_re = re.compile(r"^[ \t]*EAPI=(['\"]?)([A-Za-z0-9+_.-]*)\1[ \t]*([ \t]#.*)?$")
+_comment_or_blank_line = re.compile(r"^\s*(#.*)?$")
+
+def _parse_eapi_ebuild_head(f):
+ eapi = None
+ eapi_lineno = None
+ lineno = 0
+ for line in f:
+ lineno += 1
+ m = _comment_or_blank_line.match(line)
+ if m is None:
+ eapi_lineno = lineno
+ m = _pms_eapi_re.match(line)
+ if m is not None:
+ eapi = m.group(2)
+ break
+
+ return (eapi, eapi_lineno)
+
+def _movefile(src, dest, **kwargs):
+ """Calls movefile and raises a PortageException if an error occurs."""
+ if movefile(src, dest, **kwargs) is None:
+ raise portage.exception.PortageException(
+ "mv '%s' '%s'" % (src, dest))
+
+auxdbkeys = (
+ 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'INHERITED', 'IUSE', 'REQUIRED_USE',
+ 'PDEPEND', 'PROVIDE', 'EAPI',
+ 'PROPERTIES', 'DEFINED_PHASES', 'HDEPEND', 'UNUSED_04',
+ 'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
+)
+auxdbkeylen = len(auxdbkeys)
+
+def portageexit():
+ pass
+
+class _trees_dict(dict):
+ __slots__ = ('_running_eroot', '_target_eroot',)
+ def __init__(self, *pargs, **kargs):
+ dict.__init__(self, *pargs, **kargs)
+ self._running_eroot = None
+ self._target_eroot = None
+
+def create_trees(config_root=None, target_root=None, trees=None, env=None,
+ eprefix=None):
+
+ if trees is None:
+ trees = _trees_dict()
+ elif not isinstance(trees, _trees_dict):
+ # caller passed a normal dict or something,
+ # but we need a _trees_dict instance
+ trees = _trees_dict(trees)
+
+ if env is None:
+ env = os.environ
+ settings = config(config_root=config_root, target_root=target_root,
+ env=env, eprefix=eprefix)
+ settings.lock()
+
+ trees._target_eroot = settings['EROOT']
+ myroots = [(settings['EROOT'], settings)]
+ if settings["ROOT"] == "/" and settings["EPREFIX"] == const.EPREFIX:
+ trees._running_eroot = trees._target_eroot
+ else:
+
+ # When ROOT != "/" we only want overrides from the calling
+ # environment to apply to the config that's associated
+ # with ROOT != "/", so pass a nearly empty dict for the env parameter.
+ clean_env = {}
+ for k in ('PATH', 'PORTAGE_GRPNAME', 'PORTAGE_REPOSITORIES', 'PORTAGE_USERNAME',
+ 'PYTHONPATH', 'SSH_AGENT_PID', 'SSH_AUTH_SOCK', 'TERM',
+ 'ftp_proxy', 'http_proxy', 'no_proxy',
+ '__PORTAGE_TEST_HARDLINK_LOCKS'):
+ v = settings.get(k)
+ if v is not None:
+ clean_env[k] = v
+ settings = config(config_root=None, target_root="/",
+ env=clean_env, eprefix=None)
+ settings.lock()
+ trees._running_eroot = settings['EROOT']
+ myroots.append((settings['EROOT'], settings))
+
+ for myroot, mysettings in myroots:
+ trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, {}))
+ trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals)
+ trees[myroot].addLazySingleton(
+ "vartree", vartree, categories=mysettings.categories,
+ settings=mysettings)
+ trees[myroot].addLazySingleton("porttree",
+ portagetree, settings=mysettings)
+ trees[myroot].addLazySingleton("bintree",
+ binarytree, pkgdir=mysettings["PKGDIR"], settings=mysettings)
+ return trees
+
+if VERSION == 'HEAD':
+ class _LazyVersion(proxy.objectproxy.ObjectProxy):
+ def _get_target(self):
+ global VERSION
+ if VERSION is not self:
+ return VERSION
+ if os.path.isdir(os.path.join(PORTAGE_BASE_PATH, '.git')):
+ encoding = _encodings['fs']
+ cmd = [BASH_BINARY, "-c", ("cd %s ; git describe --tags || exit $? ; " + \
+ "if [ -n \"`git diff-index --name-only --diff-filter=M HEAD`\" ] ; " + \
+ "then echo modified ; git rev-list --format=%%ct -n 1 HEAD ; fi ; " + \
+ "exit 0") % _shell_quote(PORTAGE_BASE_PATH)]
+ cmd = [_unicode_encode(x, encoding=encoding, errors='strict')
+ for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = _unicode_decode(proc.communicate()[0], encoding=encoding)
+ status = proc.wait()
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ output_lines = output.splitlines()
+ if output_lines:
+ version_split = output_lines[0].split('-')
+ if version_split:
+ VERSION = version_split[0].lstrip('v')
+ patchlevel = False
+ if len(version_split) > 1:
+ patchlevel = True
+ VERSION = "%s_p%s" % (VERSION, version_split[1])
+ if len(output_lines) > 1 and output_lines[1] == 'modified':
+ head_timestamp = None
+ if len(output_lines) > 3:
+ try:
+ head_timestamp = long(output_lines[3])
+ except ValueError:
+ pass
+ timestamp = long(time.time())
+ if head_timestamp is not None and timestamp > head_timestamp:
+ timestamp = timestamp - head_timestamp
+ if not patchlevel:
+ VERSION = "%s_p0" % (VERSION,)
+ VERSION = "%s_p%d" % (VERSION, timestamp)
+ return VERSION
+ VERSION = 'HEAD'
+ return VERSION
+ VERSION = _LazyVersion()
+
+_legacy_global_var_names = ("archlist", "db", "features",
+ "groups", "mtimedb", "mtimedbfile", "pkglines",
+ "portdb", "profiledir", "root", "selinux_enabled",
+ "settings", "thirdpartymirrors")
+
+def _reset_legacy_globals():
+
+ global _legacy_globals_constructed
+ _legacy_globals_constructed = set()
+ for k in _legacy_global_var_names:
+ globals()[k] = _LegacyGlobalProxy(k)
+
+class _LegacyGlobalProxy(proxy.objectproxy.ObjectProxy):
+
+ __slots__ = ('_name',)
+
+ def __init__(self, name):
+ proxy.objectproxy.ObjectProxy.__init__(self)
+ object.__setattr__(self, '_name', name)
+
+ def _get_target(self):
+ name = object.__getattribute__(self, '_name')
+ from portage._legacy_globals import _get_legacy_global
+ return _get_legacy_global(name)
+
+_reset_legacy_globals()
+
+def _disable_legacy_globals():
+ """
+ This deletes the ObjectProxy instances that are used
+ for lazy initialization of legacy global variables.
+ The purpose of deleting them is to prevent new code
+ from referencing these deprecated variables.
+ """
+ global _legacy_global_var_names
+ for k in _legacy_global_var_names:
+ globals().pop(k, None)
diff --git a/usr/lib/portage/pym/portage/_emirrordist/Config.py b/usr/lib/portage/pym/portage/_emirrordist/Config.py
new file mode 100644
index 0000000..db4bfeb
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_emirrordist/Config.py
@@ -0,0 +1,132 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+import io
+import logging
+import shelve
+import sys
+import time
+
+import portage
+from portage import os
+from portage.util import grabdict, grablines
+from portage.util._ShelveUnicodeWrapper import ShelveUnicodeWrapper
+
+class Config(object):
+ def __init__(self, options, portdb, event_loop):
+ self.options = options
+ self.portdb = portdb
+ self.event_loop = event_loop
+ self.added_byte_count = 0
+ self.added_file_count = 0
+ self.scheduled_deletion_count = 0
+ self.delete_count = 0
+ self.file_owners = {}
+ self.file_failures = {}
+ self.start_time = time.time()
+ self._open_files = []
+
+ self.log_success = self._open_log('success', options.success_log, 'a')
+ self.log_failure = self._open_log('failure', options.failure_log, 'a')
+
+ self.distfiles = None
+ if options.distfiles is not None:
+ self.distfiles = options.distfiles
+
+ self.mirrors = copy.copy(portdb.settings.thirdpartymirrors())
+
+ if options.mirror_overrides is not None:
+ self.mirrors.update(grabdict(options.mirror_overrides))
+
+ if options.mirror_skip is not None:
+ for x in options.mirror_skip.split(","):
+ self.mirrors[x] = []
+
+ self.whitelist = None
+ if options.whitelist_from is not None:
+ self.whitelist = set()
+ for filename in options.whitelist_from:
+ for line in grablines(filename):
+ line = line.strip()
+ if line and not line.startswith("#"):
+ self.whitelist.add(line)
+
+ self.restrict_mirror_exemptions = None
+ if options.restrict_mirror_exemptions is not None:
+ self.restrict_mirror_exemptions = frozenset(
+ options.restrict_mirror_exemptions.split(","))
+
+ self.recycle_db = None
+ if options.recycle_db is not None:
+ self.recycle_db = self._open_shelve(
+ options.recycle_db, 'recycle')
+
+ self.distfiles_db = None
+ if options.distfiles_db is not None:
+ self.distfiles_db = self._open_shelve(
+ options.distfiles_db, 'distfiles')
+
+ self.deletion_db = None
+ if options.deletion_db is not None:
+ self.deletion_db = self._open_shelve(
+ options.deletion_db, 'deletion')
+
+ def _open_log(self, log_desc, log_path, mode):
+
+ if log_path is None or self.options.dry_run:
+ log_func = logging.info
+ line_format = "%s: %%s" % log_desc
+ add_newline = False
+ if log_path is not None:
+ logging.warn(("dry-run: %s log "
+ "redirected to logging.info") % log_desc)
+ else:
+ self._open_files.append(io.open(log_path, mode=mode,
+ encoding='utf_8'))
+ line_format = "%s\n"
+ log_func = self._open_files[-1].write
+
+ return self._LogFormatter(line_format, log_func)
+
+ class _LogFormatter(object):
+
+ __slots__ = ('_line_format', '_log_func')
+
+ def __init__(self, line_format, log_func):
+ self._line_format = line_format
+ self._log_func = log_func
+
+ def __call__(self, msg):
+ self._log_func(self._line_format % (msg,))
+
+ def _open_shelve(self, db_file, db_desc):
+ if self.options.dry_run:
+ open_flag = "r"
+ else:
+ open_flag = "c"
+
+ if self.options.dry_run and not os.path.exists(db_file):
+ db = {}
+ else:
+ db = shelve.open(db_file, flag=open_flag)
+ if sys.hexversion < 0x3000000:
+ db = ShelveUnicodeWrapper(db)
+
+ if self.options.dry_run:
+ logging.warn("dry-run: %s db opened in readonly mode" % db_desc)
+ if not isinstance(db, dict):
+ volatile_db = dict((k, db[k]) for k in db)
+ db.close()
+ db = volatile_db
+ else:
+ self._open_files.append(db)
+
+ return db
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ while self._open_files:
+ self._open_files.pop().close()
diff --git a/usr/lib/portage/pym/portage/_emirrordist/DeletionIterator.py b/usr/lib/portage/pym/portage/_emirrordist/DeletionIterator.py
new file mode 100644
index 0000000..dff52c0
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_emirrordist/DeletionIterator.py
@@ -0,0 +1,83 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import stat
+
+from portage import os
+from .DeletionTask import DeletionTask
+
+class DeletionIterator(object):
+
+ def __init__(self, config):
+ self._config = config
+
+ def __iter__(self):
+ distdir = self._config.options.distfiles
+ file_owners = self._config.file_owners
+ whitelist = self._config.whitelist
+ distfiles_local = self._config.options.distfiles_local
+ deletion_db = self._config.deletion_db
+ deletion_delay = self._config.options.deletion_delay
+ start_time = self._config.start_time
+ distfiles_set = set(os.listdir(self._config.options.distfiles))
+ for filename in distfiles_set:
+ try:
+ st = os.stat(os.path.join(distdir, filename))
+ except OSError as e:
+ logging.error("stat failed on '%s' in distfiles: %s\n" %
+ (filename, e))
+ continue
+ if not stat.S_ISREG(st.st_mode):
+ continue
+ elif filename in file_owners:
+ if deletion_db is not None:
+ try:
+ del deletion_db[filename]
+ except KeyError:
+ pass
+ elif whitelist is not None and filename in whitelist:
+ if deletion_db is not None:
+ try:
+ del deletion_db[filename]
+ except KeyError:
+ pass
+ elif distfiles_local is not None and \
+ os.path.exists(os.path.join(distfiles_local, filename)):
+ if deletion_db is not None:
+ try:
+ del deletion_db[filename]
+ except KeyError:
+ pass
+ else:
+ self._config.scheduled_deletion_count += 1
+
+ if deletion_db is None or deletion_delay is None:
+
+ yield DeletionTask(background=True,
+ distfile=filename,
+ config=self._config)
+
+ else:
+ deletion_entry = deletion_db.get(filename)
+
+ if deletion_entry is None:
+ logging.debug("add '%s' to deletion db" % filename)
+ deletion_db[filename] = start_time
+
+ elif deletion_entry + deletion_delay <= start_time:
+
+ yield DeletionTask(background=True,
+ distfile=filename,
+ config=self._config)
+
+ if deletion_db is not None:
+ for filename in list(deletion_db):
+ if filename not in distfiles_set:
+ try:
+ del deletion_db[filename]
+ except KeyError:
+ pass
+ else:
+ logging.debug("drop '%s' from deletion db" %
+ filename)
diff --git a/usr/lib/portage/pym/portage/_emirrordist/DeletionTask.py b/usr/lib/portage/pym/portage/_emirrordist/DeletionTask.py
new file mode 100644
index 0000000..7d10957
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_emirrordist/DeletionTask.py
@@ -0,0 +1,129 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+
+from portage import os
+from portage.util._async.FileCopier import FileCopier
+from _emerge.CompositeTask import CompositeTask
+
+class DeletionTask(CompositeTask):
+
+ __slots__ = ('distfile', 'config')
+
+ def _start(self):
+
+ distfile_path = os.path.join(
+ self.config.options.distfiles, self.distfile)
+
+ if self.config.options.recycle_dir is not None:
+ distfile_path = os.path.join(self.config.options.distfiles, self.distfile)
+ recycle_path = os.path.join(
+ self.config.options.recycle_dir, self.distfile)
+ if self.config.options.dry_run:
+ logging.info(("dry-run: move '%s' from "
+ "distfiles to recycle") % self.distfile)
+ else:
+ logging.debug(("move '%s' from "
+ "distfiles to recycle") % self.distfile)
+ try:
+ os.rename(distfile_path, recycle_path)
+ except OSError as e:
+ if e.errno != errno.EXDEV:
+ logging.error(("rename %s from distfiles to "
+ "recycle failed: %s") % (self.distfile, e))
+ else:
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ self._start_task(
+ FileCopier(src_path=distfile_path,
+ dest_path=recycle_path,
+ background=False),
+ self._recycle_copier_exit)
+ return
+
+ success = True
+
+ if self.config.options.dry_run:
+ logging.info(("dry-run: delete '%s' from "
+ "distfiles") % self.distfile)
+ else:
+ logging.debug(("delete '%s' from "
+ "distfiles") % self.distfile)
+ try:
+ os.unlink(distfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ logging.error("%s unlink failed in distfiles: %s" %
+ (self.distfile, e))
+ success = False
+
+ if success:
+ self._success()
+ self.returncode = os.EX_OK
+ else:
+ self.returncode = 1
+
+ self._async_wait()
+
+ def _recycle_copier_exit(self, copier):
+
+ self._assert_current(copier)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ success = True
+ if copier.returncode == os.EX_OK:
+
+ try:
+ os.unlink(copier.src_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ logging.error("%s unlink failed in distfiles: %s" %
+ (self.distfile, e))
+ success = False
+
+ else:
+ logging.error(("%s copy from distfiles "
+ "to recycle failed: %s") % (self.distfile, e))
+ success = False
+
+ if success:
+ self._success()
+ self.returncode = os.EX_OK
+ else:
+ self.returncode = 1
+
+ self._current_task = None
+ self.wait()
+
+ def _success(self):
+
+ cpv = "unknown"
+ if self.config.distfiles_db is not None:
+ cpv = self.config.distfiles_db.get(self.distfile, cpv)
+
+ self.config.delete_count += 1
+ self.config.log_success("%s\t%s\tremoved" % (cpv, self.distfile))
+
+ if self.config.distfiles_db is not None:
+ try:
+ del self.config.distfiles_db[self.distfile]
+ except KeyError:
+ pass
+ else:
+ logging.debug(("drop '%s' from "
+ "distfiles db") % self.distfile)
+
+ if self.config.deletion_db is not None:
+ try:
+ del self.config.deletion_db[self.distfile]
+ except KeyError:
+ pass
+ else:
+ logging.debug(("drop '%s' from "
+ "deletion db") % self.distfile)
diff --git a/usr/lib/portage/pym/portage/_emirrordist/FetchIterator.py b/usr/lib/portage/pym/portage/_emirrordist/FetchIterator.py
new file mode 100644
index 0000000..16a0b04
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_emirrordist/FetchIterator.py
@@ -0,0 +1,147 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.checksum import (_apply_hash_filter,
+ _filter_unaccelarated_hashes, _hash_filter)
+from portage.dep import use_reduce
+from portage.exception import PortageException
+from .FetchTask import FetchTask
+
+class FetchIterator(object):
+
+ def __init__(self, config):
+ self._config = config
+ self._log_failure = config.log_failure
+
+ def _iter_every_cp(self):
+ # List categories individually, in order to start yielding quicker,
+ # and in order to reduce latency in case of a signal interrupt.
+ cp_all = self._config.portdb.cp_all
+ for category in sorted(self._config.portdb.categories):
+ for cp in cp_all(categories=(category,)):
+ yield cp
+
+ def __iter__(self):
+
+ portdb = self._config.portdb
+ get_repo_for_location = portdb.repositories.get_repo_for_location
+ file_owners = self._config.file_owners
+ file_failures = self._config.file_failures
+ restrict_mirror_exemptions = self._config.restrict_mirror_exemptions
+
+ hash_filter = _hash_filter(
+ portdb.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
+
+ for cp in self._iter_every_cp():
+
+ for tree in portdb.porttrees:
+
+ # Reset state so the Manifest is pulled once
+ # for this cp / tree combination.
+ digests = None
+ repo_config = get_repo_for_location(tree)
+
+ for cpv in portdb.cp_list(cp, mytree=tree):
+
+ try:
+ restrict, = portdb.aux_get(cpv, ("RESTRICT",),
+ mytree=tree)
+ except (KeyError, PortageException) as e:
+ self._log_failure("%s\t\taux_get exception %s" %
+ (cpv, e))
+ continue
+
+ # Here we use matchnone=True to ignore conditional parts
+ # of RESTRICT since they don't apply unconditionally.
+ # Assume such conditionals only apply on the client side.
+ try:
+ restrict = frozenset(use_reduce(restrict,
+ flat=True, matchnone=True))
+ except PortageException as e:
+ self._log_failure("%s\t\tuse_reduce exception %s" %
+ (cpv, e))
+ continue
+
+ if "fetch" in restrict:
+ continue
+
+ try:
+ uri_map = portdb.getFetchMap(cpv)
+ except PortageException as e:
+ self._log_failure("%s\t\tgetFetchMap exception %s" %
+ (cpv, e))
+ continue
+
+ if not uri_map:
+ continue
+
+ if "mirror" in restrict:
+ skip = False
+ if restrict_mirror_exemptions is not None:
+ new_uri_map = {}
+ for filename, uri_tuple in uri_map.items():
+ for uri in uri_tuple:
+ if uri[:9] == "mirror://":
+ i = uri.find("/", 9)
+ if i != -1 and uri[9:i].strip("/") in \
+ restrict_mirror_exemptions:
+ new_uri_map[filename] = uri_tuple
+ break
+ if new_uri_map:
+ uri_map = new_uri_map
+ else:
+ skip = True
+ else:
+ skip = True
+
+ if skip:
+ continue
+
+ # Parse Manifest for this cp if we haven't yet.
+ if digests is None:
+ try:
+ digests = repo_config.load_manifest(
+ os.path.join(repo_config.location, cp)
+ ).getTypeDigests("DIST")
+ except (EnvironmentError, PortageException) as e:
+ for filename in uri_map:
+ self._log_failure(
+ "%s\t%s\tManifest exception %s" %
+ (cpv, filename, e))
+ file_failures[filename] = cpv
+ continue
+
+ if not digests:
+ for filename in uri_map:
+ self._log_failure("%s\t%s\tdigest entry missing" %
+ (cpv, filename))
+ file_failures[filename] = cpv
+ continue
+
+ for filename, uri_tuple in uri_map.items():
+ file_digests = digests.get(filename)
+ if file_digests is None:
+ self._log_failure("%s\t%s\tdigest entry missing" %
+ (cpv, filename))
+ file_failures[filename] = cpv
+ continue
+ if filename in file_owners:
+ continue
+ file_owners[filename] = cpv
+
+ file_digests = \
+ _filter_unaccelarated_hashes(file_digests)
+ if hash_filter is not None:
+ file_digests = _apply_hash_filter(
+ file_digests, hash_filter)
+
+ yield FetchTask(cpv=cpv,
+ background=True,
+ digests=file_digests,
+ distfile=filename,
+ restrict=restrict,
+ uri_tuple=uri_tuple,
+ config=self._config)
diff --git a/usr/lib/portage/pym/portage/_emirrordist/FetchTask.py b/usr/lib/portage/pym/portage/_emirrordist/FetchTask.py
new file mode 100644
index 0000000..307c5bd
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_emirrordist/FetchTask.py
@@ -0,0 +1,631 @@
+# Copyright 2013-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division
+
+import collections
+import errno
+import logging
+import random
+import stat
+import subprocess
+import sys
+
+import portage
+from portage import _encodings, _unicode_encode
+from portage import os
+from portage.util._async.FileCopier import FileCopier
+from portage.util._async.FileDigester import FileDigester
+from portage.util._async.PipeLogger import PipeLogger
+from portage.util._async.PopenProcess import PopenProcess
+from _emerge.CompositeTask import CompositeTask
+
+default_hash_name = portage.const.MANIFEST2_REQUIRED_HASH
+
+# Use --no-check-certificate since Manifest digests should provide
+# enough security, and certificates can be self-signed or whatnot.
+default_fetchcommand = "wget -c -v -t 1 --passive-ftp --no-check-certificate --timeout=60 -O \"${DISTDIR}/${FILE}\" \"${URI}\""
+
+class FetchTask(CompositeTask):
+
+ __slots__ = ('distfile', 'digests', 'config', 'cpv',
+ 'restrict', 'uri_tuple', '_current_mirror',
+ '_current_stat', '_fetch_tmp_dir_info', '_fetch_tmp_file',
+ '_fs_mirror_stack', '_mirror_stack',
+ '_previously_added',
+ '_primaryuri_stack', '_log_path', '_tried_uris')
+
+ def _start(self):
+
+ if self.config.options.fetch_log_dir is not None and \
+ not self.config.options.dry_run:
+ self._log_path = os.path.join(
+ self.config.options.fetch_log_dir,
+ self.distfile + '.log')
+
+ self._previously_added = True
+ if self.config.distfiles_db is not None and \
+ self.distfile not in self.config.distfiles_db:
+ self._previously_added = False
+ self.config.distfiles_db[self.distfile] = self.cpv
+
+ if not self._have_needed_digests():
+ msg = "incomplete digests: %s" % " ".join(self.digests)
+ self.scheduler.output(msg, background=self.background,
+ log_path=self._log_path)
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, msg))
+ self.config.file_failures[self.distfile] = self.cpv
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ distfile_path = os.path.join(
+ self.config.options.distfiles, self.distfile)
+
+ st = None
+ size_ok = False
+ try:
+ st = os.stat(distfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ msg = "%s stat failed in %s: %s" % \
+ (self.distfile, "distfiles", e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+ size_ok = st.st_size == self.digests["size"]
+
+ if not size_ok:
+ if self.config.options.dry_run:
+ if st is not None:
+ logging.info(("dry-run: delete '%s' with "
+ "wrong size from distfiles") % (self.distfile,))
+ else:
+ # Do the unlink in order to ensure that the path is clear,
+ # even if stat raised ENOENT, since a broken symlink can
+ # trigger ENOENT.
+ if self._unlink_file(distfile_path, "distfiles"):
+ if st is not None:
+ logging.debug(("delete '%s' with "
+ "wrong size from distfiles") % (self.distfile,))
+ else:
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, "unlink failed in distfiles"))
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ if size_ok:
+ if self.config.options.verify_existing_digest:
+ self._start_task(
+ FileDigester(file_path=distfile_path,
+ hash_names=(self._select_hash(),),
+ background=self.background,
+ logfile=self._log_path), self._distfiles_digester_exit)
+ return
+
+ self._success()
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ self._start_fetch()
+
+ def _success(self):
+ if not self._previously_added:
+ size = self.digests["size"]
+ self.config.added_byte_count += size
+ self.config.added_file_count += 1
+ self.config.log_success("%s\t%s\tadded %i bytes" %
+ (self.cpv, self.distfile, size))
+
+ if self._log_path is not None:
+ if not self.config.options.dry_run:
+ try:
+ os.unlink(self._log_path)
+ except OSError:
+ pass
+
+ if self.config.options.recycle_dir is not None:
+
+ recycle_file = os.path.join(
+ self.config.options.recycle_dir, self.distfile)
+
+ if self.config.options.dry_run:
+ if os.path.exists(recycle_file):
+ logging.info("dry-run: delete '%s' from recycle" %
+ (self.distfile,))
+ else:
+ try:
+ os.unlink(recycle_file)
+ except OSError:
+ pass
+ else:
+ logging.debug("delete '%s' from recycle" %
+ (self.distfile,))
+
+ def _distfiles_digester_exit(self, digester):
+
+ self._assert_current(digester)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if self._default_exit(digester) != os.EX_OK:
+ # IOError reading file in our main distfiles directory? This
+ # is a bad situation which normally does not occur, so
+ # skip this file and report it, in order to draw attention
+ # from the administrator.
+ msg = "%s distfiles digester failed unexpectedly" % \
+ (self.distfile,)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, msg))
+ self.config.file_failures[self.distfile] = self.cpv
+ self.wait()
+ return
+
+ wrong_digest = self._find_bad_digest(digester.digests)
+ if wrong_digest is None:
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ self._start_fetch()
+
+ _mirror_info = collections.namedtuple('_mirror_info',
+ 'name location')
+
+ def _start_fetch(self):
+
+ self._previously_added = False
+ self._fs_mirror_stack = []
+ if self.config.options.distfiles_local is not None:
+ self._fs_mirror_stack.append(self._mirror_info(
+ 'distfiles-local', self.config.options.distfiles_local))
+ if self.config.options.recycle_dir is not None:
+ self._fs_mirror_stack.append(self._mirror_info(
+ 'recycle', self.config.options.recycle_dir))
+
+ self._primaryuri_stack = []
+ self._mirror_stack = []
+ for uri in reversed(self.uri_tuple):
+ if uri.startswith('mirror://'):
+ self._mirror_stack.append(
+ self._mirror_iterator(uri, self.config.mirrors))
+ else:
+ self._primaryuri_stack.append(uri)
+
+ self._tried_uris = set()
+ self._try_next_mirror()
+
+ @staticmethod
+ def _mirror_iterator(uri, mirrors_dict):
+
+ slash_index = uri.find("/", 9)
+ if slash_index != -1:
+ mirror_name = uri[9:slash_index].strip("/")
+ mirrors = mirrors_dict.get(mirror_name)
+ if not mirrors:
+ return
+ mirrors = list(mirrors)
+ while mirrors:
+ mirror = mirrors.pop(random.randint(0, len(mirrors) - 1))
+ yield mirror.rstrip("/") + "/" + uri[slash_index+1:]
+
+ def _try_next_mirror(self):
+ if self._fs_mirror_stack:
+ self._fetch_fs(self._fs_mirror_stack.pop())
+ return
+ else:
+ uri = self._next_uri()
+ if uri is not None:
+ self._tried_uris.add(uri)
+ self._fetch_uri(uri)
+ return
+
+ if self._tried_uris:
+ msg = "all uris failed"
+ else:
+ msg = "no fetchable uris"
+
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, msg))
+ self.config.file_failures[self.distfile] = self.cpv
+ self.returncode = os.EX_OK
+ self.wait()
+
+ def _next_uri(self):
+ remaining_tries = self.config.options.tries - len(self._tried_uris)
+ if remaining_tries > 0:
+
+ if remaining_tries <= self.config.options.tries // 2:
+ while self._primaryuri_stack:
+ uri = self._primaryuri_stack.pop()
+ if uri not in self._tried_uris:
+ return uri
+
+ while self._mirror_stack:
+ uri = next(self._mirror_stack[-1], None)
+ if uri is None:
+ self._mirror_stack.pop()
+ else:
+ if uri not in self._tried_uris:
+ return uri
+
+ while self._primaryuri_stack:
+ uri = self._primaryuri_stack.pop()
+ if uri not in self._tried_uris:
+ return uri
+
+ return None
+
+ def _fetch_fs(self, mirror_info):
+ file_path = os.path.join(mirror_info.location, self.distfile)
+
+ st = None
+ size_ok = False
+ try:
+ st = os.stat(file_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ msg = "%s stat failed in %s: %s" % \
+ (self.distfile, mirror_info.name, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+ size_ok = st.st_size == self.digests["size"]
+ self._current_stat = st
+
+ if size_ok:
+ self._current_mirror = mirror_info
+ self._start_task(
+ FileDigester(file_path=file_path,
+ hash_names=(self._select_hash(),),
+ background=self.background,
+ logfile=self._log_path),
+ self._fs_mirror_digester_exit)
+ else:
+ self._try_next_mirror()
+
+ def _fs_mirror_digester_exit(self, digester):
+
+ self._assert_current(digester)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ current_mirror = self._current_mirror
+ if digester.returncode != os.EX_OK:
+ msg = "%s %s digester failed unexpectedly" % \
+ (self.distfile, current_mirror.name)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+ bad_digest = self._find_bad_digest(digester.digests)
+ if bad_digest is not None:
+ msg = "%s %s has bad %s digest: expected %s, got %s" % \
+ (self.distfile, current_mirror.name, bad_digest,
+ self.digests[bad_digest], digester.digests[bad_digest])
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ elif self.config.options.dry_run:
+ # Report success without actually touching any files
+ if self._same_device(current_mirror.location,
+ self.config.options.distfiles):
+ logging.info(("dry-run: hardlink '%s' from %s "
+ "to distfiles") % (self.distfile, current_mirror.name))
+ else:
+ logging.info("dry-run: copy '%s' from %s to distfiles" %
+ (self.distfile, current_mirror.name))
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+ else:
+ src = os.path.join(current_mirror.location, self.distfile)
+ dest = os.path.join(self.config.options.distfiles, self.distfile)
+ if self._hardlink_atomic(src, dest,
+ "%s to %s" % (current_mirror.name, "distfiles")):
+ logging.debug("hardlink '%s' from %s to distfiles" %
+ (self.distfile, current_mirror.name))
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+ else:
+ self._start_task(
+ FileCopier(src_path=src, dest_path=dest,
+ background=(self.background and
+ self._log_path is not None),
+ logfile=self._log_path),
+ self._fs_mirror_copier_exit)
+ return
+
+ self._try_next_mirror()
+
+ def _fs_mirror_copier_exit(self, copier):
+
+ self._assert_current(copier)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ current_mirror = self._current_mirror
+ if copier.returncode != os.EX_OK:
+ msg = "%s %s copy failed unexpectedly" % \
+ (self.distfile, current_mirror.name)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+
+ logging.debug("copy '%s' from %s to distfiles" %
+ (self.distfile, current_mirror.name))
+
+ # Apply the timestamp from the source file, but
+ # just rely on umask for permissions.
+ try:
+ if sys.hexversion >= 0x3030000:
+ os.utime(copier.dest_path,
+ ns=(self._current_stat.st_mtime_ns,
+ self._current_stat.st_mtime_ns))
+ else:
+ os.utime(copier.dest_path,
+ (self._current_stat[stat.ST_MTIME],
+ self._current_stat[stat.ST_MTIME]))
+ except OSError as e:
+ msg = "%s %s utime failed unexpectedly: %s" % \
+ (self.distfile, current_mirror.name, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ self._try_next_mirror()
+
+ def _fetch_uri(self, uri):
+
+ if self.config.options.dry_run:
+ # Simply report success.
+ logging.info("dry-run: fetch '%s' from '%s'" %
+ (self.distfile, uri))
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ if self.config.options.temp_dir:
+ self._fetch_tmp_dir_info = 'temp-dir'
+ distdir = self.config.options.temp_dir
+ else:
+ self._fetch_tmp_dir_info = 'distfiles'
+ distdir = self.config.options.distfiles
+
+ tmp_basename = self.distfile + '._emirrordist_fetch_.%s' % os.getpid()
+
+ variables = {
+ "DISTDIR": distdir,
+ "URI": uri,
+ "FILE": tmp_basename
+ }
+
+ self._fetch_tmp_file = os.path.join(distdir, tmp_basename)
+
+ try:
+ os.unlink(self._fetch_tmp_file)
+ except OSError:
+ pass
+
+ args = portage.util.shlex_split(default_fetchcommand)
+ args = [portage.util.varexpand(x, mydict=variables)
+ for x in args]
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
+ not os.path.isabs(args[0]):
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(args[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(args[0])
+ args[0] = fullname
+
+ args = [_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict') for x in args]
+
+ null_fd = os.open(os.devnull, os.O_RDONLY)
+ fetcher = PopenProcess(background=self.background,
+ proc=subprocess.Popen(args, stdin=null_fd,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ scheduler=self.scheduler)
+ os.close(null_fd)
+
+ fetcher.pipe_reader = PipeLogger(background=self.background,
+ input_fd=fetcher.proc.stdout, log_file_path=self._log_path,
+ scheduler=self.scheduler)
+
+ self._start_task(fetcher, self._fetcher_exit)
+
+ def _fetcher_exit(self, fetcher):
+
+ self._assert_current(fetcher)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if os.path.exists(self._fetch_tmp_file):
+ self._start_task(
+ FileDigester(file_path=self._fetch_tmp_file,
+ hash_names=(self._select_hash(),),
+ background=self.background,
+ logfile=self._log_path),
+ self._fetch_digester_exit)
+ else:
+ self._try_next_mirror()
+
+ def _fetch_digester_exit(self, digester):
+
+ self._assert_current(digester)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if digester.returncode != os.EX_OK:
+ msg = "%s %s digester failed unexpectedly" % \
+ (self.distfile, self._fetch_tmp_dir_info)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+ bad_digest = self._find_bad_digest(digester.digests)
+ if bad_digest is not None:
+ msg = "%s has bad %s digest: expected %s, got %s" % \
+ (self.distfile, bad_digest,
+ self.digests[bad_digest], digester.digests[bad_digest])
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ try:
+ os.unlink(self._fetch_tmp_file)
+ except OSError:
+ pass
+ else:
+ dest = os.path.join(self.config.options.distfiles, self.distfile)
+ try:
+ os.rename(self._fetch_tmp_file, dest)
+ except OSError:
+ self._start_task(
+ FileCopier(src_path=self._fetch_tmp_file,
+ dest_path=dest,
+ background=(self.background and
+ self._log_path is not None),
+ logfile=self._log_path),
+ self._fetch_copier_exit)
+ return
+ else:
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ self._try_next_mirror()
+
+ def _fetch_copier_exit(self, copier):
+
+ self._assert_current(copier)
+
+ try:
+ os.unlink(self._fetch_tmp_file)
+ except OSError:
+ pass
+
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if copier.returncode == os.EX_OK:
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ else:
+ # out of space?
+ msg = "%s %s copy failed unexpectedly" % \
+ (self.distfile, self._fetch_tmp_dir_info)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, msg))
+ self.config.file_failures[self.distfile] = self.cpv
+ self.returncode = 1
+ self.wait()
+
+ def _unlink_file(self, file_path, dir_info):
+ try:
+ os.unlink(file_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ msg = "unlink '%s' failed in %s: %s" % \
+ (self.distfile, dir_info, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ return False
+ return True
+
+ def _have_needed_digests(self):
+ return "size" in self.digests and \
+ self._select_hash() is not None
+
+ def _select_hash(self):
+ if default_hash_name in self.digests:
+ return default_hash_name
+ else:
+ for hash_name in self.digests:
+ if hash_name != "size" and \
+ hash_name in portage.checksum.hashfunc_map:
+ return hash_name
+
+ return None
+
+ def _find_bad_digest(self, digests):
+ for hash_name, hash_value in digests.items():
+ if self.digests[hash_name] != hash_value:
+ return hash_name
+ return None
+
+ @staticmethod
+ def _same_device(path1, path2):
+ try:
+ st1 = os.stat(path1)
+ st2 = os.stat(path2)
+ except OSError:
+ return False
+ else:
+ return st1.st_dev == st2.st_dev
+
+ def _hardlink_atomic(self, src, dest, dir_info):
+
+ head, tail = os.path.split(dest)
+ hardlink_tmp = os.path.join(head, ".%s._mirrordist_hardlink_.%s" % \
+ (tail, os.getpid()))
+
+ try:
+ try:
+ os.link(src, hardlink_tmp)
+ except OSError as e:
+ if e.errno != errno.EXDEV:
+ msg = "hardlink %s from %s failed: %s" % \
+ (self.distfile, dir_info, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ return False
+
+ try:
+ os.rename(hardlink_tmp, dest)
+ except OSError as e:
+ msg = "hardlink rename '%s' from %s failed: %s" % \
+ (self.distfile, dir_info, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ return False
+ finally:
+ try:
+ os.unlink(hardlink_tmp)
+ except OSError:
+ pass
+
+ return True
diff --git a/usr/lib/portage/pym/portage/_emirrordist/MirrorDistTask.py b/usr/lib/portage/pym/portage/_emirrordist/MirrorDistTask.py
new file mode 100644
index 0000000..571caa5
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_emirrordist/MirrorDistTask.py
@@ -0,0 +1,219 @@
+# Copyright 2013-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import sys
+import time
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+import portage
+from portage import os
+from portage.util._async.TaskScheduler import TaskScheduler
+from _emerge.CompositeTask import CompositeTask
+from .FetchIterator import FetchIterator
+from .DeletionIterator import DeletionIterator
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+class MirrorDistTask(CompositeTask):
+
+ __slots__ = ('_config', '_terminated', '_term_check_id')
+
+ def __init__(self, config):
+ CompositeTask.__init__(self, scheduler=config.event_loop)
+ self._config = config
+ self._terminated = threading.Event()
+
+ def _start(self):
+ self._term_check_id = self.scheduler.idle_add(self._termination_check)
+ fetch = TaskScheduler(iter(FetchIterator(self._config)),
+ max_jobs=self._config.options.jobs,
+ max_load=self._config.options.load_average,
+ event_loop=self._config.event_loop)
+ self._start_task(fetch, self._fetch_exit)
+
+ def _fetch_exit(self, fetch):
+
+ self._assert_current(fetch)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if self._config.options.delete:
+ deletion = TaskScheduler(iter(DeletionIterator(self._config)),
+ max_jobs=self._config.options.jobs,
+ max_load=self._config.options.load_average,
+ event_loop=self._config.event_loop)
+ self._start_task(deletion, self._deletion_exit)
+ return
+
+ self._post_deletion()
+
+ def _deletion_exit(self, deletion):
+
+ self._assert_current(deletion)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ self._post_deletion()
+
+ def _post_deletion(self):
+
+ if self._config.options.recycle_db is not None:
+ self._update_recycle_db()
+
+ if self._config.options.scheduled_deletion_log is not None:
+ self._scheduled_deletion_log()
+
+ self._summary()
+
+ self.returncode = os.EX_OK
+ self._current_task = None
+ self.wait()
+
+ def _update_recycle_db(self):
+
+ start_time = self._config.start_time
+ recycle_dir = self._config.options.recycle_dir
+ recycle_db = self._config.recycle_db
+ r_deletion_delay = self._config.options.recycle_deletion_delay
+
+ # Use a dict optimize access.
+ recycle_db_cache = dict(recycle_db.items())
+
+ for filename in os.listdir(recycle_dir):
+
+ recycle_file = os.path.join(recycle_dir, filename)
+
+ try:
+ st = os.stat(recycle_file)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ logging.error(("stat failed for '%s' in "
+ "recycle: %s") % (filename, e))
+ continue
+
+ value = recycle_db_cache.pop(filename, None)
+ if value is None:
+ logging.debug(("add '%s' to "
+ "recycle db") % filename)
+ recycle_db[filename] = (st.st_size, start_time)
+ else:
+ r_size, r_time = value
+ if long(r_size) != st.st_size:
+ recycle_db[filename] = (st.st_size, start_time)
+ elif r_time + r_deletion_delay < start_time:
+ if self._config.options.dry_run:
+ logging.info(("dry-run: delete '%s' from "
+ "recycle") % filename)
+ logging.info(("drop '%s' from "
+ "recycle db") % filename)
+ else:
+ try:
+ os.unlink(recycle_file)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ logging.error(("delete '%s' from "
+ "recycle failed: %s") % (filename, e))
+ else:
+ logging.debug(("delete '%s' from "
+ "recycle") % filename)
+ try:
+ del recycle_db[filename]
+ except KeyError:
+ pass
+ else:
+ logging.debug(("drop '%s' from "
+ "recycle db") % filename)
+
+ # Existing files were popped from recycle_db_cache,
+ # so any remaining entries are for files that no
+ # longer exist.
+ for filename in recycle_db_cache:
+ try:
+ del recycle_db[filename]
+ except KeyError:
+ pass
+ else:
+ logging.debug(("drop non-existent '%s' from "
+ "recycle db") % filename)
+
+ def _scheduled_deletion_log(self):
+
+ start_time = self._config.start_time
+ dry_run = self._config.options.dry_run
+ deletion_delay = self._config.options.deletion_delay
+ distfiles_db = self._config.distfiles_db
+
+ date_map = {}
+ for filename, timestamp in self._config.deletion_db.items():
+ date = timestamp + deletion_delay
+ if date < start_time:
+ date = start_time
+ date = time.strftime("%Y-%m-%d", time.gmtime(date))
+ date_files = date_map.get(date)
+ if date_files is None:
+ date_files = []
+ date_map[date] = date_files
+ date_files.append(filename)
+
+ if dry_run:
+ logging.warn(("dry-run: scheduled-deletions log "
+ "will be summarized via logging.info"))
+
+ lines = []
+ for date in sorted(date_map):
+ date_files = date_map[date]
+ if dry_run:
+ logging.info(("dry-run: scheduled deletions for %s: %s files") %
+ (date, len(date_files)))
+ lines.append("%s\n" % date)
+ for filename in date_files:
+ cpv = "unknown"
+ if distfiles_db is not None:
+ cpv = distfiles_db.get(filename, cpv)
+ lines.append("\t%s\t%s\n" % (filename, cpv))
+
+ if not dry_run:
+ portage.util.write_atomic(
+ self._config.options.scheduled_deletion_log,
+ "".join(lines))
+
+ def _summary(self):
+ elapsed_time = time.time() - self._config.start_time
+ fail_count = len(self._config.file_failures)
+ delete_count = self._config.delete_count
+ scheduled_deletion_count = self._config.scheduled_deletion_count - delete_count
+ added_file_count = self._config.added_file_count
+ added_byte_count = self._config.added_byte_count
+
+ logging.info("finished in %i seconds" % elapsed_time)
+ logging.info("failed to fetch %i files" % fail_count)
+ logging.info("deleted %i files" % delete_count)
+ logging.info("deletion of %i files scheduled" %
+ scheduled_deletion_count)
+ logging.info("added %i files" % added_file_count)
+ logging.info("added %i bytes total" % added_byte_count)
+
+ def terminate(self):
+ self._terminated.set()
+
+ def _termination_check(self):
+ if self._terminated.is_set():
+ self.cancel()
+ self.wait()
+ return True
+
+ def _wait(self):
+ CompositeTask._wait(self)
+ if self._term_check_id is not None:
+ self.scheduler.source_remove(self._term_check_id)
+ self._term_check_id = None
diff --git a/usr/lib/portage/pym/portage/_emirrordist/__init__.py b/usr/lib/portage/pym/portage/_emirrordist/__init__.py
new file mode 100644
index 0000000..6cde932
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_emirrordist/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/_emirrordist/main.py b/usr/lib/portage/pym/portage/_emirrordist/main.py
new file mode 100644
index 0000000..ce92c2a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_emirrordist/main.py
@@ -0,0 +1,463 @@
+# Copyright 2013-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import sys
+
+import portage
+from portage import os
+from portage.util import normalize_path, writemsg_level, _recursive_file_list
+from portage.util._argparse import ArgumentParser
+from portage.util._async.run_main_scheduler import run_main_scheduler
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.global_event_loop import global_event_loop
+from .Config import Config
+from .MirrorDistTask import MirrorDistTask
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+seconds_per_day = 24 * 60 * 60
+
+common_options = (
+ {
+ "longopt" : "--dry-run",
+ "help" : "perform a trial run with no changes made (usually combined "
+ "with --verbose)",
+ "action" : "store_true"
+ },
+ {
+ "longopt" : "--verbose",
+ "shortopt" : "-v",
+ "help" : "display extra information on stderr "
+ "(multiple occurences increase verbosity)",
+ "action" : "count",
+ "default" : 0,
+ },
+ {
+ "longopt" : "--ignore-default-opts",
+ "help" : "do not use the EMIRRORDIST_DEFAULT_OPTS environment variable",
+ "action" : "store_true"
+ },
+ {
+ "longopt" : "--distfiles",
+ "help" : "distfiles directory to use (required)",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--jobs",
+ "shortopt" : "-j",
+ "help" : "number of concurrent jobs to run",
+ "type" : int
+ },
+ {
+ "longopt" : "--load-average",
+ "shortopt" : "-l",
+ "help" : "load average limit for spawning of new concurrent jobs",
+ "metavar" : "LOAD",
+ "type" : float
+ },
+ {
+ "longopt" : "--tries",
+ "help" : "maximum number of tries per file, 0 means unlimited (default is 10)",
+ "default" : 10,
+ "type" : int
+ },
+ {
+ "longopt" : "--repo",
+ "help" : "name of repo to operate on"
+ },
+ {
+ "longopt" : "--config-root",
+ "help" : "location of portage config files",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--portdir",
+ "help" : "override the PORTDIR variable (deprecated in favor of --repositories-configuration)",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--portdir-overlay",
+ "help" : "override the PORTDIR_OVERLAY variable (deprecated in favor of --repositories-configuration)"
+ },
+ {
+ "longopt" : "--repositories-configuration",
+ "help" : "override configuration of repositories (in format of repos.conf)"
+ },
+ {
+ "longopt" : "--strict-manifests",
+ "help" : "manually override \"strict\" FEATURES setting",
+ "choices" : ("y", "n"),
+ "metavar" : "<y|n>",
+ },
+ {
+ "longopt" : "--failure-log",
+ "help" : "log file for fetch failures, with tab-delimited "
+ "output, for reporting purposes",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--success-log",
+ "help" : "log file for fetch successes, with tab-delimited "
+ "output, for reporting purposes",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--scheduled-deletion-log",
+ "help" : "log file for scheduled deletions, with tab-delimited "
+ "output, for reporting purposes",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--delete",
+ "help" : "enable deletion of unused distfiles",
+ "action" : "store_true"
+ },
+ {
+ "longopt" : "--deletion-db",
+ "help" : "database file used to track lifetime of files "
+ "scheduled for delayed deletion",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--deletion-delay",
+ "help" : "delay time for deletion, measured in seconds",
+ "metavar" : "SECONDS"
+ },
+ {
+ "longopt" : "--temp-dir",
+ "help" : "temporary directory for downloads",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--mirror-overrides",
+ "help" : "file holding a list of mirror overrides",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--mirror-skip",
+ "help" : "comma delimited list of mirror targets to skip "
+ "when fetching"
+ },
+ {
+ "longopt" : "--restrict-mirror-exemptions",
+ "help" : "comma delimited list of mirror targets for which to "
+ "ignore RESTRICT=\"mirror\""
+ },
+ {
+ "longopt" : "--verify-existing-digest",
+ "help" : "use digest as a verification of whether existing "
+ "distfiles are valid",
+ "action" : "store_true"
+ },
+ {
+ "longopt" : "--distfiles-local",
+ "help" : "distfiles-local directory to use",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--distfiles-db",
+ "help" : "database file used to track which ebuilds a "
+ "distfile belongs to",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--recycle-dir",
+ "help" : "directory for extended retention of files that "
+ "are removed from distdir with the --delete option",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--recycle-db",
+ "help" : "database file used to track lifetime of files "
+ "in recycle dir",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--recycle-deletion-delay",
+ "help" : "delay time for deletion of unused files from "
+ "recycle dir, measured in seconds (defaults to "
+ "the equivalent of 60 days)",
+ "default" : 60 * seconds_per_day,
+ "metavar" : "SECONDS",
+ "type" : int
+ },
+ {
+ "longopt" : "--fetch-log-dir",
+ "help" : "directory for individual fetch logs",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--whitelist-from",
+ "help" : "specifies a file containing a list of files to "
+ "whitelist, one per line, # prefixed lines ignored",
+ "action" : "append",
+ "metavar" : "FILE"
+ },
+)
+
+def parse_args(args):
+ description = "emirrordist - a fetch tool for mirroring " \
+ "of package distfiles"
+ usage = "emirrordist [options] <action>"
+ parser = ArgumentParser(description=description, usage=usage)
+
+ actions = parser.add_argument_group('Actions')
+ actions.add_argument("--version",
+ action="store_true",
+ help="display portage version and exit")
+ actions.add_argument("--mirror",
+ action="store_true",
+ help="mirror distfiles for the selected repository")
+
+ common = parser.add_argument_group('Common options')
+ for opt_info in common_options:
+ opt_pargs = [opt_info["longopt"]]
+ if opt_info.get("shortopt"):
+ opt_pargs.append(opt_info["shortopt"])
+ opt_kwargs = {"help" : opt_info["help"]}
+ for k in ("action", "choices", "default", "metavar", "type"):
+ if k in opt_info:
+ opt_kwargs[k] = opt_info[k]
+ common.add_argument(*opt_pargs, **opt_kwargs)
+
+ options, args = parser.parse_known_args(args)
+
+ return (parser, options, args)
+
+def emirrordist_main(args):
+
+ # The calling environment is ignored, so the program is
+ # completely controlled by commandline arguments.
+ env = {}
+
+ if not sys.stdout.isatty():
+ portage.output.nocolor()
+ env['NOCOLOR'] = 'true'
+
+ parser, options, args = parse_args(args)
+
+ if options.version:
+ sys.stdout.write("Portage %s\n" % portage.VERSION)
+ return os.EX_OK
+
+ config_root = options.config_root
+
+ if options.portdir is not None:
+ writemsg_level("emirrordist: warning: --portdir option is deprecated in favor of --repositories-configuration option\n",
+ level=logging.WARNING, noiselevel=-1)
+ if options.portdir_overlay is not None:
+ writemsg_level("emirrordist: warning: --portdir-overlay option is deprecated in favor of --repositories-configuration option\n",
+ level=logging.WARNING, noiselevel=-1)
+
+ if options.repositories_configuration is not None:
+ env['PORTAGE_REPOSITORIES'] = options.repositories_configuration
+ elif options.portdir_overlay is not None:
+ env['PORTDIR_OVERLAY'] = options.portdir_overlay
+
+ if options.portdir is not None:
+ env['PORTDIR'] = options.portdir
+
+ settings = portage.config(config_root=config_root,
+ local_config=False, env=env)
+
+ default_opts = None
+ if not options.ignore_default_opts:
+ default_opts = settings.get('EMIRRORDIST_DEFAULT_OPTS', '').split()
+
+ if default_opts:
+ parser, options, args = parse_args(default_opts + args)
+
+ settings = portage.config(config_root=config_root,
+ local_config=False, env=env)
+
+ if options.repo is None:
+ if len(settings.repositories.prepos) == 2:
+ for repo in settings.repositories:
+ if repo.name != "DEFAULT":
+ options.repo = repo.name
+ break
+
+ if options.repo is None:
+ parser.error("--repo option is required")
+
+ repo_path = settings.repositories.treemap.get(options.repo)
+ if repo_path is None:
+ parser.error("Unable to locate repository named '%s'" % (options.repo,))
+
+ if options.jobs is not None:
+ options.jobs = int(options.jobs)
+
+ if options.load_average is not None:
+ options.load_average = float(options.load_average)
+
+ if options.failure_log is not None:
+ options.failure_log = normalize_path(
+ os.path.abspath(options.failure_log))
+
+ parent_dir = os.path.dirname(options.failure_log)
+ if not (os.path.isdir(parent_dir) and
+ os.access(parent_dir, os.W_OK|os.X_OK)):
+ parser.error(("--failure-log '%s' parent is not a "
+ "writable directory") % options.failure_log)
+
+ if options.success_log is not None:
+ options.success_log = normalize_path(
+ os.path.abspath(options.success_log))
+
+ parent_dir = os.path.dirname(options.success_log)
+ if not (os.path.isdir(parent_dir) and
+ os.access(parent_dir, os.W_OK|os.X_OK)):
+ parser.error(("--success-log '%s' parent is not a "
+ "writable directory") % options.success_log)
+
+ if options.scheduled_deletion_log is not None:
+ options.scheduled_deletion_log = normalize_path(
+ os.path.abspath(options.scheduled_deletion_log))
+
+ parent_dir = os.path.dirname(options.scheduled_deletion_log)
+ if not (os.path.isdir(parent_dir) and
+ os.access(parent_dir, os.W_OK|os.X_OK)):
+ parser.error(("--scheduled-deletion-log '%s' parent is not a "
+ "writable directory") % options.scheduled_deletion_log)
+
+ if options.deletion_db is None:
+ parser.error("--scheduled-deletion-log requires --deletion-db")
+
+ if options.deletion_delay is not None:
+ options.deletion_delay = long(options.deletion_delay)
+ if options.deletion_db is None:
+ parser.error("--deletion-delay requires --deletion-db")
+
+ if options.deletion_db is not None:
+ if options.deletion_delay is None:
+ parser.error("--deletion-db requires --deletion-delay")
+ options.deletion_db = normalize_path(
+ os.path.abspath(options.deletion_db))
+
+ if options.temp_dir is not None:
+ options.temp_dir = normalize_path(
+ os.path.abspath(options.temp_dir))
+
+ if not (os.path.isdir(options.temp_dir) and
+ os.access(options.temp_dir, os.W_OK|os.X_OK)):
+ parser.error(("--temp-dir '%s' is not a "
+ "writable directory") % options.temp_dir)
+
+ if options.distfiles is not None:
+ options.distfiles = normalize_path(
+ os.path.abspath(options.distfiles))
+
+ if not (os.path.isdir(options.distfiles) and
+ os.access(options.distfiles, os.W_OK|os.X_OK)):
+ parser.error(("--distfiles '%s' is not a "
+ "writable directory") % options.distfiles)
+ else:
+ parser.error("missing required --distfiles parameter")
+
+ if options.mirror_overrides is not None:
+ options.mirror_overrides = normalize_path(
+ os.path.abspath(options.mirror_overrides))
+
+ if not (os.access(options.mirror_overrides, os.R_OK) and
+ os.path.isfile(options.mirror_overrides)):
+ parser.error(
+ "--mirror-overrides-file '%s' is not a readable file" %
+ options.mirror_overrides)
+
+ if options.distfiles_local is not None:
+ options.distfiles_local = normalize_path(
+ os.path.abspath(options.distfiles_local))
+
+ if not (os.path.isdir(options.distfiles_local) and
+ os.access(options.distfiles_local, os.W_OK|os.X_OK)):
+ parser.error(("--distfiles-local '%s' is not a "
+ "writable directory") % options.distfiles_local)
+
+ if options.distfiles_db is not None:
+ options.distfiles_db = normalize_path(
+ os.path.abspath(options.distfiles_db))
+
+ if options.tries is not None:
+ options.tries = int(options.tries)
+
+ if options.recycle_dir is not None:
+ options.recycle_dir = normalize_path(
+ os.path.abspath(options.recycle_dir))
+ if not (os.path.isdir(options.recycle_dir) and
+ os.access(options.recycle_dir, os.W_OK|os.X_OK)):
+ parser.error(("--recycle-dir '%s' is not a "
+ "writable directory") % options.recycle_dir)
+
+ if options.recycle_db is not None:
+ if options.recycle_dir is None:
+ parser.error("--recycle-db requires "
+ "--recycle-dir to be specified")
+ options.recycle_db = normalize_path(
+ os.path.abspath(options.recycle_db))
+
+ if options.recycle_deletion_delay is not None:
+ options.recycle_deletion_delay = \
+ long(options.recycle_deletion_delay)
+
+ if options.fetch_log_dir is not None:
+ options.fetch_log_dir = normalize_path(
+ os.path.abspath(options.fetch_log_dir))
+
+ if not (os.path.isdir(options.fetch_log_dir) and
+ os.access(options.fetch_log_dir, os.W_OK|os.X_OK)):
+ parser.error(("--fetch-log-dir '%s' is not a "
+ "writable directory") % options.fetch_log_dir)
+
+ if options.whitelist_from:
+ normalized_paths = []
+ for x in options.whitelist_from:
+ path = normalize_path(os.path.abspath(x))
+ if not os.access(path, os.R_OK):
+ parser.error("--whitelist-from '%s' is not readable" % x)
+ if os.path.isfile(path):
+ normalized_paths.append(path)
+ elif os.path.isdir(path):
+ for file in _recursive_file_list(path):
+ if not os.access(file, os.R_OK):
+ parser.error("--whitelist-from '%s' directory contains not readable file '%s'" % (x, file))
+ normalized_paths.append(file)
+ else:
+ parser.error("--whitelist-from '%s' is not a regular file or a directory" % x)
+ options.whitelist_from = normalized_paths
+
+ if options.strict_manifests is not None:
+ if options.strict_manifests == "y":
+ settings.features.add("strict")
+ else:
+ settings.features.discard("strict")
+
+ settings.lock()
+
+ portdb = portage.portdbapi(mysettings=settings)
+
+ # Limit ebuilds to the specified repo.
+ portdb.porttrees = [repo_path]
+
+ portage.util.initialize_logger()
+
+ if options.verbose > 0:
+ l = logging.getLogger()
+ l.setLevel(l.getEffectiveLevel() - 10 * options.verbose)
+
+ with Config(options, portdb,
+ SchedulerInterface(global_event_loop())) as config:
+
+ if not options.mirror:
+ parser.error('No action specified')
+
+ returncode = os.EX_OK
+
+ if options.mirror:
+ signum = run_main_scheduler(MirrorDistTask(config))
+ if signum is not None:
+ sys.exit(128 + signum)
+
+ return returncode
diff --git a/usr/lib/portage/pym/portage/_global_updates.py b/usr/lib/portage/pym/portage/_global_updates.py
new file mode 100644
index 0000000..bb39f7a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_global_updates.py
@@ -0,0 +1,255 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import stat
+
+from portage import best, os
+from portage.const import WORLD_FILE
+from portage.data import secpass
+from portage.exception import DirectoryNotFound
+from portage.localization import _
+from portage.output import bold, colorize
+from portage.update import grab_updates, parse_updates, update_config_files, update_dbentry
+from portage.util import grabfile, shlex_split, \
+ writemsg, writemsg_stdout, write_atomic
+
+def _global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
+ """
+ Perform new global updates if they exist in 'profiles/updates/'
+ subdirectories of all active repositories (PORTDIR + PORTDIR_OVERLAY).
+ This simply returns if ROOT != "/" (when len(trees) != 1). If ROOT != "/"
+ then the user should instead use emaint --fix movebin and/or moveinst.
+
+ @param trees: A dictionary containing portage trees.
+ @type trees: dict
+ @param prev_mtimes: A dictionary containing mtimes of files located in
+ $PORTDIR/profiles/updates/.
+ @type prev_mtimes: dict
+ @rtype: bool
+ @return: True if update commands have been performed, otherwise False
+ """
+ # only do this if we're root and not running repoman/ebuild digest
+
+ if secpass < 2 or \
+ "SANDBOX_ACTIVE" in os.environ or \
+ len(trees) != 1:
+ return False
+
+ return _do_global_updates(trees, prev_mtimes,
+ quiet=quiet, if_mtime_changed=if_mtime_changed)
+
+def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
+ root = trees._running_eroot
+ mysettings = trees[root]["vartree"].settings
+ portdb = trees[root]["porttree"].dbapi
+ vardb = trees[root]["vartree"].dbapi
+ bindb = trees[root]["bintree"].dbapi
+
+ world_file = os.path.join(mysettings['EROOT'], WORLD_FILE)
+ world_list = grabfile(world_file)
+ world_modified = False
+ world_warnings = set()
+ updpath_map = {}
+ # Maps repo_name to list of updates. If a given repo has no updates
+ # directory, it will be omitted. If a repo has an updates directory
+ # but none need to be applied (according to timestamp logic), the
+ # value in the dict will be an empty list.
+ repo_map = {}
+ timestamps = {}
+
+ retupd = False
+ update_notice_printed = False
+ for repo_name in portdb.getRepositories():
+ repo = portdb.getRepositoryPath(repo_name)
+ updpath = os.path.join(repo, "profiles", "updates")
+ if not os.path.isdir(updpath):
+ continue
+
+ if updpath in updpath_map:
+ repo_map[repo_name] = updpath_map[updpath]
+ continue
+
+ try:
+ if if_mtime_changed:
+ update_data = grab_updates(updpath, prev_mtimes=prev_mtimes)
+ else:
+ update_data = grab_updates(updpath)
+ except DirectoryNotFound:
+ continue
+ myupd = []
+ updpath_map[updpath] = myupd
+ repo_map[repo_name] = myupd
+ if len(update_data) > 0:
+ for mykey, mystat, mycontent in update_data:
+ if not update_notice_printed:
+ update_notice_printed = True
+ writemsg_stdout("\n")
+ writemsg_stdout(colorize("GOOD",
+ _("Performing Global Updates\n")))
+ writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
+ if not quiet:
+ writemsg_stdout(_(" %s='update pass' %s='binary update' "
+ "%s='/var/db update' %s='/var/db move'\n"
+ " %s='/var/db SLOT move' %s='binary move' "
+ "%s='binary SLOT move'\n %s='update /etc/portage/package.*'\n") % \
+ (bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
+ valid_updates, errors = parse_updates(mycontent)
+ myupd.extend(valid_updates)
+ if not quiet:
+ writemsg_stdout(bold(mykey))
+ writemsg_stdout(len(valid_updates) * "." + "\n")
+ if len(errors) == 0:
+ # Update our internal mtime since we
+ # processed all of our directives.
+ timestamps[mykey] = mystat[stat.ST_MTIME]
+ else:
+ for msg in errors:
+ writemsg("%s\n" % msg, noiselevel=-1)
+ if myupd:
+ retupd = True
+
+ if retupd:
+ if os.access(bindb.bintree.pkgdir, os.W_OK):
+ # Call binarytree.populate(), since we want to make sure it's
+ # only populated with local packages here (getbinpkgs=0).
+ bindb.bintree.populate()
+ else:
+ bindb = None
+
+ master_repo = portdb.repositories.mainRepo()
+ if master_repo is not None:
+ master_repo = master_repo.name
+ if master_repo in repo_map:
+ repo_map['DEFAULT'] = repo_map[master_repo]
+
+ for repo_name, myupd in repo_map.items():
+ if repo_name == 'DEFAULT':
+ continue
+ if not myupd:
+ continue
+
+ def repo_match(repository):
+ return repository == repo_name or \
+ (repo_name == master_repo and repository not in repo_map)
+
+ def _world_repo_match(atoma, atomb):
+ """
+ Check whether to perform a world change from atoma to atomb.
+ If best vardb match for atoma comes from the same repository
+ as the update file, allow that. Additionally, if portdb still
+ can find a match for old atom name, warn about that.
+ """
+ matches = vardb.match(atoma)
+ if not matches:
+ matches = vardb.match(atomb)
+ if matches and \
+ repo_match(vardb.aux_get(best(matches), ['repository'])[0]):
+ if portdb.match(atoma):
+ world_warnings.add((atoma, atomb))
+ return True
+ else:
+ return False
+
+ for update_cmd in myupd:
+ for pos, atom in enumerate(world_list):
+ new_atom = update_dbentry(update_cmd, atom)
+ if atom != new_atom:
+ if _world_repo_match(atom, new_atom):
+ world_list[pos] = new_atom
+ world_modified = True
+
+ for update_cmd in myupd:
+ if update_cmd[0] == "move":
+ moves = vardb.move_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "@")
+ if bindb:
+ moves = bindb.move_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "%")
+ elif update_cmd[0] == "slotmove":
+ moves = vardb.move_slot_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "s")
+ if bindb:
+ moves = bindb.move_slot_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "S")
+
+ if world_modified:
+ world_list.sort()
+ write_atomic(world_file,
+ "".join("%s\n" % (x,) for x in world_list))
+ if world_warnings:
+ # XXX: print warning that we've updated world entries
+ # and the old name still matches something (from an overlay)?
+ pass
+
+ if retupd:
+
+ def _config_repo_match(repo_name, atoma, atomb):
+ """
+ Check whether to perform a world change from atoma to atomb.
+ If best vardb match for atoma comes from the same repository
+ as the update file, allow that. Additionally, if portdb still
+ can find a match for old atom name, warn about that.
+ """
+ matches = vardb.match(atoma)
+ if not matches:
+ matches = vardb.match(atomb)
+ if not matches:
+ return False
+ repository = vardb.aux_get(best(matches), ['repository'])[0]
+ return repository == repo_name or \
+ (repo_name == master_repo and repository not in repo_map)
+
+ update_config_files(root,
+ shlex_split(mysettings.get("CONFIG_PROTECT", "")),
+ shlex_split(mysettings.get("CONFIG_PROTECT_MASK", "")),
+ repo_map, match_callback = _config_repo_match,
+ case_insensitive = "case-insensitive-fs"
+ in mysettings.features)
+
+ # The above global updates proceed quickly, so they
+ # are considered a single mtimedb transaction.
+ if timestamps:
+ # We do not update the mtime in the mtimedb
+ # until after _all_ of the above updates have
+ # been processed because the mtimedb will
+ # automatically commit when killed by ctrl C.
+ for mykey, mtime in timestamps.items():
+ prev_mtimes[mykey] = mtime
+
+ do_upgrade_packagesmessage = False
+ # We gotta do the brute force updates for these now.
+ if True:
+ def onUpdate(_maxval, curval):
+ if curval > 0:
+ writemsg_stdout("#")
+ if quiet:
+ onUpdate = None
+ vardb.update_ents(repo_map, onUpdate=onUpdate)
+ if bindb:
+ def onUpdate(_maxval, curval):
+ if curval > 0:
+ writemsg_stdout("*")
+ if quiet:
+ onUpdate = None
+ bindb.update_ents(repo_map, onUpdate=onUpdate)
+ else:
+ do_upgrade_packagesmessage = 1
+
+ # Update progress above is indicated by characters written to stdout so
+ # we print a couple new lines here to separate the progress output from
+ # what follows.
+ writemsg_stdout("\n\n")
+
+ if do_upgrade_packagesmessage and bindb and \
+ bindb.cpv_all():
+ writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
+ writemsg_stdout(bold(_("Note: This can take a very long time.")))
+ writemsg_stdout("\n")
+
+ return retupd
diff --git a/usr/lib/portage/pym/portage/_legacy_globals.py b/usr/lib/portage/pym/portage/_legacy_globals.py
new file mode 100644
index 0000000..bb9691a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_legacy_globals.py
@@ -0,0 +1,77 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.const import CACHE_PATH, PROFILE_PATH
+
+def _get_legacy_global(name):
+ constructed = portage._legacy_globals_constructed
+ if name in constructed:
+ return getattr(portage, name)
+
+ if name == 'portdb':
+ portage.portdb = portage.db[portage.root]["porttree"].dbapi
+ constructed.add(name)
+ return getattr(portage, name)
+
+ elif name in ('mtimedb', 'mtimedbfile'):
+ portage.mtimedbfile = os.path.join(portage.settings['EROOT'],
+ CACHE_PATH, "mtimedb")
+ constructed.add('mtimedbfile')
+ portage.mtimedb = portage.MtimeDB(portage.mtimedbfile)
+ constructed.add('mtimedb')
+ return getattr(portage, name)
+
+ # Portage needs to ensure a sane umask for the files it creates.
+ os.umask(0o22)
+
+ kwargs = {}
+ for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"),
+ ("target_root", "ROOT"), ("eprefix", "EPREFIX")):
+ kwargs[k] = os.environ.get(envvar)
+
+ portage._initializing_globals = True
+ portage.db = portage.create_trees(**kwargs)
+ constructed.add('db')
+ del portage._initializing_globals
+
+ settings = portage.db[portage.db._target_eroot]["vartree"].settings
+
+ portage.settings = settings
+ constructed.add('settings')
+
+ # Since portage.db now uses EROOT for keys instead of ROOT, we make
+ # portage.root refer to EROOT such that it continues to work as a key.
+ portage.root = portage.db._target_eroot
+ constructed.add('root')
+
+ # COMPATIBILITY
+ # These attributes should not be used within
+ # Portage under any circumstances.
+
+ portage.archlist = settings.archlist()
+ constructed.add('archlist')
+
+ portage.features = settings.features
+ constructed.add('features')
+
+ portage.groups = settings["ACCEPT_KEYWORDS"].split()
+ constructed.add('groups')
+
+ portage.pkglines = settings.packages
+ constructed.add('pkglines')
+
+ portage.selinux_enabled = settings.selinux_enabled()
+ constructed.add('selinux_enabled')
+
+ portage.thirdpartymirrors = settings.thirdpartymirrors()
+ constructed.add('thirdpartymirrors')
+
+ profiledir = os.path.join(settings["PORTAGE_CONFIGROOT"], PROFILE_PATH)
+ if not os.path.isdir(profiledir):
+ profiledir = None
+ portage.profiledir = profiledir
+ constructed.add('profiledir')
+
+ return getattr(portage, name)
diff --git a/usr/lib/portage/pym/portage/_selinux.py b/usr/lib/portage/pym/portage/_selinux.py
new file mode 100644
index 0000000..2a7194c
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_selinux.py
@@ -0,0 +1,140 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Don't use the unicode-wrapped os and shutil modules here since
+# the whole _selinux module itself will be wrapped.
+import os
+import shutil
+
+import portage
+from portage import _encodings
+from portage import _native_string, _unicode_decode
+from portage.localization import _
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'selinux')
+
+def copyfile(src, dest):
+ src = _native_string(src, encoding=_encodings['fs'], errors='strict')
+ dest = _native_string(dest, encoding=_encodings['fs'], errors='strict')
+ (rc, ctx) = selinux.lgetfilecon(src)
+ if rc < 0:
+ if sys.hexversion < 0x3000000:
+ src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
+ raise OSError(_("copyfile: Failed getting context of \"%s\".") % src)
+
+ setfscreate(ctx)
+ try:
+ shutil.copyfile(src, dest)
+ finally:
+ setfscreate()
+
+def getcontext():
+ (rc, ctx) = selinux.getcon()
+ if rc < 0:
+ raise OSError(_("getcontext: Failed getting current process context."))
+
+ return ctx
+
+def is_selinux_enabled():
+ return selinux.is_selinux_enabled()
+
+def mkdir(target, refdir):
+ target = _native_string(target, encoding=_encodings['fs'], errors='strict')
+ refdir = _native_string(refdir, encoding=_encodings['fs'], errors='strict')
+ (rc, ctx) = selinux.getfilecon(refdir)
+ if rc < 0:
+ if sys.hexversion < 0x3000000:
+ refdir = _unicode_decode(refdir, encoding=_encodings['fs'], errors='replace')
+ raise OSError(
+ _("mkdir: Failed getting context of reference directory \"%s\".") \
+ % refdir)
+
+ setfscreate(ctx)
+ try:
+ os.mkdir(target)
+ finally:
+ setfscreate()
+
+def rename(src, dest):
+ src = _native_string(src, encoding=_encodings['fs'], errors='strict')
+ dest = _native_string(dest, encoding=_encodings['fs'], errors='strict')
+ (rc, ctx) = selinux.lgetfilecon(src)
+ if rc < 0:
+ if sys.hexversion < 0x3000000:
+ src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
+ raise OSError(_("rename: Failed getting context of \"%s\".") % src)
+
+ setfscreate(ctx)
+ try:
+ os.rename(src, dest)
+ finally:
+ setfscreate()
+
+def settype(newtype):
+ ret = getcontext().split(":")
+ ret[2] = newtype
+ return ":".join(ret)
+
+def setexec(ctx="\n"):
+ ctx = _native_string(ctx, encoding=_encodings['content'], errors='strict')
+ if selinux.setexeccon(ctx) < 0:
+ if sys.hexversion < 0x3000000:
+ ctx = _unicode_decode(ctx, encoding=_encodings['content'], errors='replace')
+ if selinux.security_getenforce() == 1:
+ raise OSError(_("Failed setting exec() context \"%s\".") % ctx)
+ else:
+ portage.writemsg("!!! " + \
+ _("Failed setting exec() context \"%s\".") % ctx, \
+ noiselevel=-1)
+
+def setfscreate(ctx="\n"):
+ ctx = _native_string(ctx, encoding=_encodings['content'], errors='strict')
+ if selinux.setfscreatecon(ctx) < 0:
+ if sys.hexversion < 0x3000000:
+ ctx = _unicode_decode(ctx, encoding=_encodings['content'], errors='replace')
+ raise OSError(
+ _("setfscreate: Failed setting fs create context \"%s\".") % ctx)
+
+class spawn_wrapper(object):
+ """
+ Create a wrapper function for the given spawn function. When the wrapper
+ is called, it will adjust the arguments such that setexec() to be called
+ *after* the fork (thereby avoiding any interference with concurrent
+ threads in the calling process).
+ """
+ __slots__ = ("_con", "_spawn_func")
+
+ def __init__(self, spawn_func, selinux_type):
+ self._spawn_func = spawn_func
+ selinux_type = _native_string(selinux_type, encoding=_encodings['content'], errors='strict')
+ self._con = settype(selinux_type)
+
+ def __call__(self, *args, **kwargs):
+
+ pre_exec = kwargs.get("pre_exec")
+
+ def _pre_exec():
+ if pre_exec is not None:
+ pre_exec()
+ setexec(self._con)
+
+ kwargs["pre_exec"] = _pre_exec
+ return self._spawn_func(*args, **kwargs)
+
+def symlink(target, link, reflnk):
+ target = _native_string(target, encoding=_encodings['fs'], errors='strict')
+ link = _native_string(link, encoding=_encodings['fs'], errors='strict')
+ reflnk = _native_string(reflnk, encoding=_encodings['fs'], errors='strict')
+ (rc, ctx) = selinux.lgetfilecon(reflnk)
+ if rc < 0:
+ if sys.hexversion < 0x3000000:
+ reflnk = _unicode_decode(reflnk, encoding=_encodings['fs'], errors='replace')
+ raise OSError(
+ _("symlink: Failed getting context of reference symlink \"%s\".") \
+ % reflnk)
+
+ setfscreate(ctx)
+ try:
+ os.symlink(target, link)
+ finally:
+ setfscreate()
diff --git a/usr/lib/portage/pym/portage/_sets/__init__.py b/usr/lib/portage/pym/portage/_sets/__init__.py
new file mode 100644
index 0000000..75d1df7
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_sets/__init__.py
@@ -0,0 +1,316 @@
+# Copyright 2007-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+__all__ = ["SETPREFIX", "get_boolean", "SetConfigError",
+ "SetConfig", "load_default_config"]
+
+import io
+import logging
+import sys
+try:
+ from configparser import NoOptionError, ParsingError
+ if sys.hexversion >= 0x3020000:
+ from configparser import ConfigParser as SafeConfigParser
+ else:
+ from configparser import SafeConfigParser
+except ImportError:
+ from ConfigParser import SafeConfigParser, NoOptionError, ParsingError
+import portage
+from portage import os
+from portage import load_mod
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage import _encodings
+from portage.const import USER_CONFIG_PATH, GLOBAL_CONFIG_PATH
+from portage.const import _ENABLE_SET_CONFIG
+from portage.exception import PackageSetNotFound
+from portage.localization import _
+from portage.util import writemsg_level
+
+SETPREFIX = "@"
+
+def get_boolean(options, name, default):
+ if not name in options:
+ return default
+ elif options[name].lower() in ("1", "yes", "on", "true"):
+ return True
+ elif options[name].lower() in ("0", "no", "off", "false"):
+ return False
+ else:
+ raise SetConfigError(_("invalid value '%(value)s' for option '%(option)s'") % {"value": options[name], "option": name})
+
+class SetConfigError(Exception):
+ pass
+
+class SetConfig(object):
+ def __init__(self, paths, settings, trees):
+ self._parser = SafeConfigParser(
+ defaults={
+ "EPREFIX" : settings["EPREFIX"],
+ "EROOT" : settings["EROOT"],
+ "PORTAGE_CONFIGROOT" : settings["PORTAGE_CONFIGROOT"],
+ "ROOT" : settings["ROOT"],
+ })
+
+ if _ENABLE_SET_CONFIG:
+ # use read_file/readfp in order to control decoding of unicode
+ try:
+ # Python >=3.2
+ read_file = self._parser.read_file
+ except AttributeError:
+ read_file = self._parser.readfp
+
+ for p in paths:
+ f = None
+ try:
+ f = io.open(_unicode_encode(p,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ except EnvironmentError:
+ pass
+ else:
+ try:
+ read_file(f)
+ except ParsingError as e:
+ writemsg_level(_unicode_decode(
+ _("!!! Error while reading sets config file: %s\n")
+ ) % e, level=logging.ERROR, noiselevel=-1)
+ finally:
+ if f is not None:
+ f.close()
+ else:
+ self._create_default_config()
+
+ self.errors = []
+ self.psets = {}
+ self.trees = trees
+ self.settings = settings
+ self._parsed = False
+ self.active = []
+
+ def _create_default_config(self):
+ """
+ Create a default hardcoded set configuration for a portage version
+ that does not support set configuration files. This is only used
+ in the current branch of portage if _ENABLE_SET_CONFIG is False.
+ Even if it's not used in this branch, keep it here in order to
+ minimize the diff between branches.
+
+ [world]
+ class = portage.sets.base.DummyPackageSet
+ packages = @selected @system
+
+ [selected]
+ class = portage.sets.files.WorldSelectedSet
+
+ [system]
+ class = portage.sets.profiles.PackagesSystemSet
+
+ """
+ parser = self._parser
+
+ parser.remove_section("world")
+ parser.add_section("world")
+ parser.set("world", "class", "portage.sets.base.DummyPackageSet")
+ parser.set("world", "packages", "@selected @system")
+
+ parser.remove_section("selected")
+ parser.add_section("selected")
+ parser.set("selected", "class", "portage.sets.files.WorldSelectedSet")
+
+ parser.remove_section("system")
+ parser.add_section("system")
+ parser.set("system", "class", "portage.sets.profiles.PackagesSystemSet")
+
+ parser.remove_section("security")
+ parser.add_section("security")
+ parser.set("security", "class", "portage.sets.security.NewAffectedSet")
+
+ parser.remove_section("usersets")
+ parser.add_section("usersets")
+ parser.set("usersets", "class", "portage.sets.files.StaticFileSet")
+ parser.set("usersets", "multiset", "true")
+ parser.set("usersets", "directory", "%(PORTAGE_CONFIGROOT)setc/portage/sets")
+ parser.set("usersets", "world-candidate", "true")
+
+ parser.remove_section("live-rebuild")
+ parser.add_section("live-rebuild")
+ parser.set("live-rebuild", "class", "portage.sets.dbapi.VariableSet")
+ parser.set("live-rebuild", "variable", "INHERITED")
+ parser.set("live-rebuild", "includes", " ".join(sorted(portage.const.LIVE_ECLASSES)))
+
+ parser.remove_section("module-rebuild")
+ parser.add_section("module-rebuild")
+ parser.set("module-rebuild", "class", "portage.sets.dbapi.OwnerSet")
+ parser.set("module-rebuild", "files", "/lib/modules")
+
+ parser.remove_section("preserved-rebuild")
+ parser.add_section("preserved-rebuild")
+ parser.set("preserved-rebuild", "class", "portage.sets.libs.PreservedLibraryConsumerSet")
+
+ parser.remove_section("x11-module-rebuild")
+ parser.add_section("x11-module-rebuild")
+ parser.set("x11-module-rebuild", "class", "portage.sets.dbapi.OwnerSet")
+ parser.set("x11-module-rebuild", "files", "/usr/lib/xorg/modules")
+ parser.set("x11-module-rebuild", "exclude-files", "/usr/bin/Xorg")
+
+ def update(self, setname, options):
+ parser = self._parser
+ self.errors = []
+ if not setname in self.psets:
+ options["name"] = setname
+ options["world-candidate"] = "False"
+
+ # for the unlikely case that there is already a section with the requested setname
+ import random
+ while setname in parser.sections():
+ setname = "%08d" % random.randint(0, 10**10)
+
+ parser.add_section(setname)
+ for k, v in options.items():
+ parser.set(setname, k, v)
+ else:
+ section = self.psets[setname].creator
+ if parser.has_option(section, "multiset") and \
+ parser.getboolean(section, "multiset"):
+ self.errors.append(_("Invalid request to reconfigure set '%(set)s' generated "
+ "by multiset section '%(section)s'") % {"set": setname, "section": section})
+ return
+ for k, v in options.items():
+ parser.set(section, k, v)
+ self._parse(update=True)
+
+ def _parse(self, update=False):
+ if self._parsed and not update:
+ return
+ parser = self._parser
+ for sname in parser.sections():
+ # find classname for current section, default to file based sets
+ if not parser.has_option(sname, "class"):
+ classname = "portage._sets.files.StaticFileSet"
+ else:
+ classname = parser.get(sname, "class")
+
+ if classname.startswith('portage.sets.'):
+ # The module has been made private, but we still support
+ # the previous namespace for sets.conf entries.
+ classname = classname.replace('sets', '_sets', 1)
+
+ # try to import the specified class
+ try:
+ setclass = load_mod(classname)
+ except (ImportError, AttributeError):
+ try:
+ setclass = load_mod("portage._sets." + classname)
+ except (ImportError, AttributeError):
+ self.errors.append(_("Could not import '%(class)s' for section "
+ "'%(section)s'") % {"class": classname, "section": sname})
+ continue
+ # prepare option dict for the current section
+ optdict = {}
+ for oname in parser.options(sname):
+ optdict[oname] = parser.get(sname, oname)
+
+ # create single or multiple instances of the given class depending on configuration
+ if parser.has_option(sname, "multiset") and \
+ parser.getboolean(sname, "multiset"):
+ if hasattr(setclass, "multiBuilder"):
+ newsets = {}
+ try:
+ newsets = setclass.multiBuilder(optdict, self.settings, self.trees)
+ except SetConfigError as e:
+ self.errors.append(_("Configuration error in section '%s': %s") % (sname, str(e)))
+ continue
+ for x in newsets:
+ if x in self.psets and not update:
+ self.errors.append(_("Redefinition of set '%s' (sections: '%s', '%s')") % (x, self.psets[x].creator, sname))
+ newsets[x].creator = sname
+ if parser.has_option(sname, "world-candidate") and \
+ parser.getboolean(sname, "world-candidate"):
+ newsets[x].world_candidate = True
+ self.psets.update(newsets)
+ else:
+ self.errors.append(_("Section '%(section)s' is configured as multiset, but '%(class)s' "
+ "doesn't support that configuration") % {"section": sname, "class": classname})
+ continue
+ else:
+ try:
+ setname = parser.get(sname, "name")
+ except NoOptionError:
+ setname = sname
+ if setname in self.psets and not update:
+ self.errors.append(_("Redefinition of set '%s' (sections: '%s', '%s')") % (setname, self.psets[setname].creator, sname))
+ if hasattr(setclass, "singleBuilder"):
+ try:
+ self.psets[setname] = setclass.singleBuilder(optdict, self.settings, self.trees)
+ self.psets[setname].creator = sname
+ if parser.has_option(sname, "world-candidate") and \
+ parser.getboolean(sname, "world-candidate"):
+ self.psets[setname].world_candidate = True
+ except SetConfigError as e:
+ self.errors.append(_("Configuration error in section '%s': %s") % (sname, str(e)))
+ continue
+ else:
+ self.errors.append(_("'%(class)s' does not support individual set creation, section '%(section)s' "
+ "must be configured as multiset") % {"class": classname, "section": sname})
+ continue
+ self._parsed = True
+
+ def getSets(self):
+ self._parse()
+ return self.psets.copy()
+
+ def getSetAtoms(self, setname, ignorelist=None):
+ """
+ This raises PackageSetNotFound if the give setname does not exist.
+ """
+ self._parse()
+ try:
+ myset = self.psets[setname]
+ except KeyError:
+ raise PackageSetNotFound(setname)
+ myatoms = myset.getAtoms()
+
+ if ignorelist is None:
+ ignorelist = set()
+
+ ignorelist.add(setname)
+ for n in myset.getNonAtoms():
+ if n.startswith(SETPREFIX):
+ s = n[len(SETPREFIX):]
+ if s in self.psets:
+ if s not in ignorelist:
+ myatoms.update(self.getSetAtoms(s,
+ ignorelist=ignorelist))
+ else:
+ raise PackageSetNotFound(s)
+
+ return myatoms
+
+def load_default_config(settings, trees):
+
+ if not _ENABLE_SET_CONFIG:
+ return SetConfig(None, settings, trees)
+
+ global_config_path = GLOBAL_CONFIG_PATH
+ if portage.const.EPREFIX:
+ global_config_path = os.path.join(portage.const.EPREFIX,
+ GLOBAL_CONFIG_PATH.lstrip(os.sep))
+ def _getfiles():
+ for path, dirs, files in os.walk(os.path.join(global_config_path, "sets")):
+ for f in files:
+ if not f.startswith(b'.'):
+ yield os.path.join(path, f)
+
+ dbapi = trees["porttree"].dbapi
+ for repo in dbapi.getRepositories():
+ path = dbapi.getRepositoryPath(repo)
+ yield os.path.join(path, "sets.conf")
+
+ yield os.path.join(settings["PORTAGE_CONFIGROOT"],
+ USER_CONFIG_PATH, "sets.conf")
+
+ return SetConfig(_getfiles(), settings, trees)
diff --git a/usr/lib/portage/pym/portage/_sets/base.py b/usr/lib/portage/pym/portage/_sets/base.py
new file mode 100644
index 0000000..ee20d36
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_sets/base.py
@@ -0,0 +1,265 @@
+# Copyright 2007-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.dep import Atom, ExtendedAtomDict, best_match_to_list, match_from_list
+from portage.exception import InvalidAtom
+from portage.versions import cpv_getkey
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+OPERATIONS = ["merge", "unmerge"]
+
+class PackageSet(object):
+ # Set this to operations that are supported by your subclass. While
+ # technically there is no difference between "merge" and "unmerge" regarding
+ # package sets, the latter doesn't make sense for some sets like "system"
+ # or "security" and therefore isn't supported by them.
+ _operations = ["merge"]
+ description = "generic package set"
+
+ def __init__(self, allow_wildcard=False, allow_repo=False):
+ self._atoms = set()
+ self._atommap = ExtendedAtomDict(set)
+ self._loaded = False
+ self._loading = False
+ self.errors = []
+ self._nonatoms = set()
+ self.world_candidate = False
+ self._allow_wildcard = allow_wildcard
+ self._allow_repo = allow_repo
+
+ def __contains__(self, atom):
+ self._load()
+ return atom in self._atoms or atom in self._nonatoms
+
+ def __iter__(self):
+ self._load()
+ for x in self._atoms:
+ yield x
+ for x in self._nonatoms:
+ yield x
+
+ def __bool__(self):
+ self._load()
+ return bool(self._atoms or self._nonatoms)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def supportsOperation(self, op):
+ if not op in OPERATIONS:
+ raise ValueError(op)
+ return op in self._operations
+
+ def _load(self):
+ if not (self._loaded or self._loading):
+ self._loading = True
+ self.load()
+ self._loaded = True
+ self._loading = False
+
+ def getAtoms(self):
+ self._load()
+ return self._atoms.copy()
+
+ def getNonAtoms(self):
+ self._load()
+ return self._nonatoms.copy()
+
+ def _setAtoms(self, atoms):
+ self._atoms.clear()
+ self._nonatoms.clear()
+ for a in atoms:
+ if not isinstance(a, Atom):
+ if isinstance(a, basestring):
+ a = a.strip()
+ if not a:
+ continue
+ try:
+ a = Atom(a, allow_wildcard=True, allow_repo=True)
+ except InvalidAtom:
+ self._nonatoms.add(a)
+ continue
+ if not self._allow_wildcard and a.extended_syntax:
+ raise InvalidAtom("extended atom syntax not allowed here")
+ if not self._allow_repo and a.repo:
+ raise InvalidAtom("repository specification not allowed here")
+ self._atoms.add(a)
+
+ self._updateAtomMap()
+
+ def load(self):
+ # This method must be overwritten by subclasses
+ # Editable sets should use the value of self._mtime to determine if they
+ # need to reload themselves
+ raise NotImplementedError()
+
+ def containsCPV(self, cpv):
+ self._load()
+ for a in self._atoms:
+ if match_from_list(a, [cpv]):
+ return True
+ return False
+
+ def getMetadata(self, key):
+ if hasattr(self, key.lower()):
+ return getattr(self, key.lower())
+ else:
+ return ""
+
+ def _updateAtomMap(self, atoms=None):
+ """Update self._atommap for specific atoms or all atoms."""
+ if not atoms:
+ self._atommap.clear()
+ atoms = self._atoms
+ for a in atoms:
+ self._atommap.setdefault(a.cp, set()).add(a)
+
+ # Not sure if this one should really be in PackageSet
+ def findAtomForPackage(self, pkg, modified_use=None):
+ """Return the best match for a given package from the arguments, or
+ None if there are no matches. This matches virtual arguments against
+ the PROVIDE metadata. This can raise an InvalidDependString exception
+ if an error occurs while parsing PROVIDE."""
+
+ if modified_use is not None and modified_use is not pkg.use.enabled:
+ pkg = pkg.copy()
+ pkg._metadata["USE"] = " ".join(modified_use)
+
+ # Atoms matched via PROVIDE must be temporarily transformed since
+ # match_from_list() only works correctly when atom.cp == pkg.cp.
+ rev_transform = {}
+ for atom in self.iterAtomsForPackage(pkg):
+ if atom.cp == pkg.cp:
+ rev_transform[atom] = atom
+ else:
+ rev_transform[Atom(atom.replace(atom.cp, pkg.cp, 1), allow_wildcard=True, allow_repo=True)] = atom
+ best_match = best_match_to_list(pkg, iter(rev_transform))
+ if best_match:
+ return rev_transform[best_match]
+ return None
+
+ def iterAtomsForPackage(self, pkg):
+ """
+ Find all matching atoms for a given package. This matches virtual
+ arguments against the PROVIDE metadata. This will raise an
+ InvalidDependString exception if PROVIDE is invalid.
+ """
+ cpv_slot_list = [pkg]
+ cp = cpv_getkey(pkg.cpv)
+ self._load() # make sure the atoms are loaded
+
+ atoms = self._atommap.get(cp)
+ if atoms:
+ for atom in atoms:
+ if match_from_list(atom, cpv_slot_list):
+ yield atom
+ provides = pkg._metadata['PROVIDE']
+ if not provides:
+ return
+ provides = provides.split()
+ for provide in provides:
+ try:
+ provided_cp = Atom(provide).cp
+ except InvalidAtom:
+ continue
+ atoms = self._atommap.get(provided_cp)
+ if atoms:
+ for atom in atoms:
+ if match_from_list(atom.replace(provided_cp, cp),
+ cpv_slot_list):
+ yield atom
+
+class EditablePackageSet(PackageSet):
+
+ def __init__(self, allow_wildcard=False, allow_repo=False):
+ super(EditablePackageSet, self).__init__(allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+
+ def update(self, atoms):
+ self._load()
+ modified = False
+ normal_atoms = []
+ for a in atoms:
+ if not isinstance(a, Atom):
+ try:
+ a = Atom(a, allow_wildcard=True, allow_repo=True)
+ except InvalidAtom:
+ modified = True
+ self._nonatoms.add(a)
+ continue
+ if not self._allow_wildcard and a.extended_syntax:
+ raise InvalidAtom("extended atom syntax not allowed here")
+ if not self._allow_repo and a.repo:
+ raise InvalidAtom("repository specification not allowed here")
+ normal_atoms.append(a)
+
+ if normal_atoms:
+ modified = True
+ self._atoms.update(normal_atoms)
+ self._updateAtomMap(atoms=normal_atoms)
+ if modified:
+ self.write()
+
+ def add(self, atom):
+ self.update([atom])
+
+ def replace(self, atoms):
+ self._setAtoms(atoms)
+ self.write()
+
+ def remove(self, atom):
+ self._load()
+ self._atoms.discard(atom)
+ self._nonatoms.discard(atom)
+ self._updateAtomMap()
+ self.write()
+
+ def removePackageAtoms(self, cp):
+ self._load()
+ for a in list(self._atoms):
+ if a.cp == cp:
+ self.remove(a)
+ self.write()
+
+ def write(self):
+ # This method must be overwritten in subclasses that should be editable
+ raise NotImplementedError()
+
+class InternalPackageSet(EditablePackageSet):
+ def __init__(self, initial_atoms=None, allow_wildcard=False, allow_repo=True):
+ """
+ Repo atoms are allowed more often than not, so it makes sense for this
+ class to allow them by default. The Atom constructor and isvalidatom()
+ functions default to allow_repo=False, which is sufficient to ensure
+ that repo atoms are prohibited when necessary.
+ """
+ super(InternalPackageSet, self).__init__(allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+ if initial_atoms != None:
+ self.update(initial_atoms)
+
+ def clear(self):
+ self._atoms.clear()
+ self._updateAtomMap()
+
+ def load(self):
+ pass
+
+ def write(self):
+ pass
+
+class DummyPackageSet(PackageSet):
+ def __init__(self, atoms=None):
+ super(DummyPackageSet, self).__init__()
+ if atoms:
+ self._setAtoms(atoms)
+
+ def load(self):
+ pass
+
+ def singleBuilder(cls, options, settings, trees):
+ atoms = options.get("packages", "").split()
+ return DummyPackageSet(atoms=atoms)
+ singleBuilder = classmethod(singleBuilder)
diff --git a/usr/lib/portage/pym/portage/_sets/dbapi.py b/usr/lib/portage/pym/portage/_sets/dbapi.py
new file mode 100644
index 0000000..299cb81
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_sets/dbapi.py
@@ -0,0 +1,537 @@
+# Copyright 2007-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division
+
+import re
+import time
+
+from portage import os
+from portage.versions import best, catsplit, vercmp
+from portage.dep import Atom, use_reduce
+from portage.exception import InvalidAtom
+from portage.localization import _
+from portage._sets.base import PackageSet
+from portage._sets import SetConfigError, get_boolean
+import portage
+
+__all__ = ["CategorySet", "ChangedDepsSet", "DowngradeSet",
+ "EverythingSet", "OwnerSet", "VariableSet"]
+
+class EverythingSet(PackageSet):
+ _operations = ["merge"]
+ description = "Package set which contains SLOT " + \
+ "atoms to match all installed packages"
+ _filter = None
+
+ def __init__(self, vdbapi, **kwargs):
+ super(EverythingSet, self).__init__()
+ self._db = vdbapi
+
+ def load(self):
+ myatoms = []
+ pkg_str = self._db._pkg_str
+ cp_list = self._db.cp_list
+
+ for cp in self._db.cp_all():
+ for cpv in cp_list(cp):
+ # NOTE: Create SLOT atoms even when there is only one
+ # SLOT installed, in order to avoid the possibility
+ # of unwanted upgrades as reported in bug #338959.
+ pkg = pkg_str(cpv, None)
+ atom = Atom("%s:%s" % (pkg.cp, pkg.slot))
+ if self._filter:
+ if self._filter(atom):
+ myatoms.append(atom)
+ else:
+ myatoms.append(atom)
+
+ self._setAtoms(myatoms)
+
+ def singleBuilder(self, options, settings, trees):
+ return EverythingSet(trees["vartree"].dbapi)
+ singleBuilder = classmethod(singleBuilder)
+
+class OwnerSet(PackageSet):
+
+ _operations = ["merge", "unmerge"]
+
+ description = "Package set which contains all packages " + \
+ "that own one or more files."
+
+ def __init__(self, vardb=None, exclude_files=None, files=None):
+ super(OwnerSet, self).__init__()
+ self._db = vardb
+ self._exclude_files = exclude_files
+ self._files = files
+
+ def mapPathsToAtoms(self, paths, exclude_paths=None):
+ """
+ All paths must have $EROOT stripped from the left side.
+ """
+ rValue = set()
+ vardb = self._db
+ pkg_str = vardb._pkg_str
+ if exclude_paths is None:
+ for link, p in vardb._owners.iter_owners(paths):
+ pkg = pkg_str(link.mycpv, None)
+ rValue.add("%s:%s" % (pkg.cp, pkg.slot))
+ else:
+ all_paths = set()
+ all_paths.update(paths)
+ all_paths.update(exclude_paths)
+ exclude_atoms = set()
+ for link, p in vardb._owners.iter_owners(all_paths):
+ pkg = pkg_str(link.mycpv, None)
+ atom = "%s:%s" % (pkg.cp, pkg.slot)
+ rValue.add(atom)
+ if p in exclude_paths:
+ exclude_atoms.add(atom)
+ rValue.difference_update(exclude_atoms)
+
+ return rValue
+
+ def load(self):
+ self._setAtoms(self.mapPathsToAtoms(self._files,
+ exclude_paths=self._exclude_files))
+
+ def singleBuilder(cls, options, settings, trees):
+ if not "files" in options:
+ raise SetConfigError(_("no files given"))
+
+ exclude_files = options.get("exclude-files")
+ if exclude_files is not None:
+ exclude_files = frozenset(portage.util.shlex_split(exclude_files))
+ return cls(vardb=trees["vartree"].dbapi, exclude_files=exclude_files,
+ files=frozenset(portage.util.shlex_split(options["files"])))
+
+ singleBuilder = classmethod(singleBuilder)
+
+class VariableSet(EverythingSet):
+
+ _operations = ["merge", "unmerge"]
+
+ description = "Package set which contains all packages " + \
+ "that match specified values of a specified variable."
+
+ def __init__(self, vardb, metadatadb=None, variable=None, includes=None, excludes=None):
+ super(VariableSet, self).__init__(vardb)
+ self._metadatadb = metadatadb
+ self._variable = variable
+ self._includes = includes
+ self._excludes = excludes
+
+ def _filter(self, atom):
+ ebuild = best(self._metadatadb.match(atom))
+ if not ebuild:
+ return False
+ values, = self._metadatadb.aux_get(ebuild, [self._variable])
+ values = values.split()
+ if self._includes and not self._includes.intersection(values):
+ return False
+ if self._excludes and self._excludes.intersection(values):
+ return False
+ return True
+
+ def singleBuilder(cls, options, settings, trees):
+
+ variable = options.get("variable")
+ if variable is None:
+ raise SetConfigError(_("missing required attribute: 'variable'"))
+
+ includes = options.get("includes", "")
+ excludes = options.get("excludes", "")
+
+ if not (includes or excludes):
+ raise SetConfigError(_("no includes or excludes given"))
+
+ metadatadb = options.get("metadata-source", "vartree")
+ if not metadatadb in trees:
+ raise SetConfigError(_("invalid value '%s' for option metadata-source") % metadatadb)
+
+ return cls(trees["vartree"].dbapi,
+ metadatadb=trees[metadatadb].dbapi,
+ excludes=frozenset(excludes.split()),
+ includes=frozenset(includes.split()),
+ variable=variable)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class DowngradeSet(PackageSet):
+
+ _operations = ["merge", "unmerge"]
+
+ description = "Package set which contains all packages " + \
+ "for which the highest visible ebuild version is lower than " + \
+ "the currently installed version."
+
+ def __init__(self, portdb=None, vardb=None):
+ super(DowngradeSet, self).__init__()
+ self._portdb = portdb
+ self._vardb = vardb
+
+ def load(self):
+ atoms = []
+ xmatch = self._portdb.xmatch
+ xmatch_level = "bestmatch-visible"
+ cp_list = self._vardb.cp_list
+ pkg_str = self._vardb._pkg_str
+ for cp in self._vardb.cp_all():
+ for cpv in cp_list(cp):
+ pkg = pkg_str(cpv, None)
+ slot_atom = "%s:%s" % (pkg.cp, pkg.slot)
+ ebuild = xmatch(xmatch_level, slot_atom)
+ if not ebuild:
+ continue
+ if vercmp(cpv.version, ebuild.version) > 0:
+ atoms.append(slot_atom)
+
+ self._setAtoms(atoms)
+
+ def singleBuilder(cls, options, settings, trees):
+ return cls(portdb=trees["porttree"].dbapi,
+ vardb=trees["vartree"].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class UnavailableSet(EverythingSet):
+
+ _operations = ["unmerge"]
+
+ description = "Package set which contains all installed " + \
+ "packages for which there are no visible ebuilds " + \
+ "corresponding to the same $CATEGORY/$PN:$SLOT."
+
+ def __init__(self, vardb, metadatadb=None):
+ super(UnavailableSet, self).__init__(vardb)
+ self._metadatadb = metadatadb
+
+ def _filter(self, atom):
+ return not self._metadatadb.match(atom)
+
+ def singleBuilder(cls, options, settings, trees):
+
+ metadatadb = options.get("metadata-source", "porttree")
+ if not metadatadb in trees:
+ raise SetConfigError(_("invalid value '%s' for option "
+ "metadata-source") % (metadatadb,))
+
+ return cls(trees["vartree"].dbapi,
+ metadatadb=trees[metadatadb].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class UnavailableBinaries(EverythingSet):
+
+ _operations = ('merge', 'unmerge',)
+
+ description = "Package set which contains all installed " + \
+ "packages for which corresponding binary packages " + \
+ "are not available."
+
+ def __init__(self, vardb, metadatadb=None):
+ super(UnavailableBinaries, self).__init__(vardb)
+ self._metadatadb = metadatadb
+
+ def _filter(self, atom):
+ inst_pkg = self._db.match(atom)
+ if not inst_pkg:
+ return False
+ inst_cpv = inst_pkg[0]
+ return not self._metadatadb.cpv_exists(inst_cpv)
+
+ def singleBuilder(cls, options, settings, trees):
+
+ metadatadb = options.get("metadata-source", "bintree")
+ if not metadatadb in trees:
+ raise SetConfigError(_("invalid value '%s' for option "
+ "metadata-source") % (metadatadb,))
+
+ return cls(trees["vartree"].dbapi,
+ metadatadb=trees[metadatadb].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class CategorySet(PackageSet):
+ _operations = ["merge", "unmerge"]
+
+ def __init__(self, category, dbapi, only_visible=True):
+ super(CategorySet, self).__init__()
+ self._db = dbapi
+ self._category = category
+ self._check = only_visible
+ if only_visible:
+ s="visible"
+ else:
+ s="all"
+ self.description = "Package set containing %s packages of category %s" % (s, self._category)
+
+ def load(self):
+ myatoms = []
+ for cp in self._db.cp_all():
+ if catsplit(cp)[0] == self._category:
+ if (not self._check) or len(self._db.match(cp)) > 0:
+ myatoms.append(cp)
+ self._setAtoms(myatoms)
+
+ def _builderGetRepository(cls, options, repositories):
+ repository = options.get("repository", "porttree")
+ if not repository in repositories:
+ raise SetConfigError(_("invalid repository class '%s'") % repository)
+ return repository
+ _builderGetRepository = classmethod(_builderGetRepository)
+
+ def _builderGetVisible(cls, options):
+ return get_boolean(options, "only_visible", True)
+ _builderGetVisible = classmethod(_builderGetVisible)
+
+ def singleBuilder(cls, options, settings, trees):
+ if not "category" in options:
+ raise SetConfigError(_("no category given"))
+
+ category = options["category"]
+ if not category in settings.categories:
+ raise SetConfigError(_("invalid category name '%s'") % category)
+
+ repository = cls._builderGetRepository(options, trees.keys())
+ visible = cls._builderGetVisible(options)
+
+ return CategorySet(category, dbapi=trees[repository].dbapi, only_visible=visible)
+ singleBuilder = classmethod(singleBuilder)
+
+ def multiBuilder(cls, options, settings, trees):
+ rValue = {}
+
+ if "categories" in options:
+ categories = options["categories"].split()
+ invalid = set(categories).difference(settings.categories)
+ if invalid:
+ raise SetConfigError(_("invalid categories: %s") % ", ".join(list(invalid)))
+ else:
+ categories = settings.categories
+
+ repository = cls._builderGetRepository(options, trees.keys())
+ visible = cls._builderGetVisible(options)
+ name_pattern = options.get("name_pattern", "$category/*")
+
+ if not "$category" in name_pattern and not "${category}" in name_pattern:
+ raise SetConfigError(_("name_pattern doesn't include $category placeholder"))
+
+ for cat in categories:
+ myset = CategorySet(cat, trees[repository].dbapi, only_visible=visible)
+ myname = name_pattern.replace("$category", cat)
+ myname = myname.replace("${category}", cat)
+ rValue[myname] = myset
+ return rValue
+ multiBuilder = classmethod(multiBuilder)
+
+class AgeSet(EverythingSet):
+ _operations = ["merge", "unmerge"]
+ _aux_keys = ('BUILD_TIME',)
+
+ def __init__(self, vardb, mode="older", age=7):
+ super(AgeSet, self).__init__(vardb)
+ self._mode = mode
+ self._age = age
+
+ def _filter(self, atom):
+
+ cpv = self._db.match(atom)[0]
+ try:
+ date, = self._db.aux_get(cpv, self._aux_keys)
+ date = int(date)
+ except (KeyError, ValueError):
+ return bool(self._mode == "older")
+ age = (time.time() - date) / (3600 * 24)
+ if ((self._mode == "older" and age <= self._age) \
+ or (self._mode == "newer" and age >= self._age)):
+ return False
+ else:
+ return True
+
+ def singleBuilder(cls, options, settings, trees):
+ mode = options.get("mode", "older")
+ if str(mode).lower() not in ["newer", "older"]:
+ raise SetConfigError(_("invalid 'mode' value %s (use either 'newer' or 'older')") % mode)
+ try:
+ age = int(options.get("age", "7"))
+ except ValueError as e:
+ raise SetConfigError(_("value of option 'age' is not an integer"))
+ return AgeSet(vardb=trees["vartree"].dbapi, mode=mode, age=age)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class DateSet(EverythingSet):
+ _operations = ["merge", "unmerge"]
+ _aux_keys = ('BUILD_TIME',)
+
+ def __init__(self, vardb, date, mode="older"):
+ super(DateSet, self).__init__(vardb)
+ self._mode = mode
+ self._date = date
+
+ def _filter(self, atom):
+
+ cpv = self._db.match(atom)[0]
+ try:
+ date, = self._db.aux_get(cpv, self._aux_keys)
+ date = int(date)
+ except (KeyError, ValueError):
+ return bool(self._mode == "older")
+ # Make sure inequality is _strict_ to exclude tested package
+ if ((self._mode == "older" and date < self._date) \
+ or (self._mode == "newer" and date > self._date)):
+ return True
+ else:
+ return False
+
+ def singleBuilder(cls, options, settings, trees):
+ vardbapi = trees["vartree"].dbapi
+ mode = options.get("mode", "older")
+ if str(mode).lower() not in ["newer", "older"]:
+ raise SetConfigError(_("invalid 'mode' value %s (use either 'newer' or 'older')") % mode)
+
+ formats = []
+ if options.get("package") is not None:
+ formats.append("package")
+ if options.get("filestamp") is not None:
+ formats.append("filestamp")
+ if options.get("seconds") is not None:
+ formats.append("seconds")
+ if options.get("date") is not None:
+ formats.append("date")
+
+ if not formats:
+ raise SetConfigError(_("none of these options specified: 'package', 'filestamp', 'seconds', 'date'"))
+ elif len(formats) > 1:
+ raise SetConfigError(_("no more than one of these options is allowed: 'package', 'filestamp', 'seconds', 'date'"))
+
+ format = formats[0]
+
+ if (format == "package"):
+ package = options.get("package")
+ try:
+ cpv = vardbapi.match(package)[0]
+ date, = vardbapi.aux_get(cpv, ('BUILD_TIME',))
+ date = int(date)
+ except (KeyError, ValueError):
+ raise SetConfigError(_("cannot determine installation date of package %s") % package)
+ elif (format == "filestamp"):
+ filestamp = options.get("filestamp")
+ try:
+ date = int(os.stat(filestamp).st_mtime)
+ except (OSError, ValueError):
+ raise SetConfigError(_("cannot determine 'filestamp' of '%s'") % filestamp)
+ elif (format == "seconds"):
+ try:
+ date = int(options.get("seconds"))
+ except ValueError:
+ raise SetConfigError(_("option 'seconds' must be an integer"))
+ else:
+ dateopt = options.get("date")
+ try:
+ dateformat = options.get("dateformat", "%x %X")
+ date = int(time.mktime(time.strptime(dateopt, dateformat)))
+ except ValueError:
+ raise SetConfigError(_("'date=%s' does not match 'dateformat=%s'") % (dateopt, dateformat))
+ return DateSet(vardb=vardbapi, date=date, mode=mode)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class RebuiltBinaries(EverythingSet):
+ _operations = ('merge',)
+ _aux_keys = ('BUILD_TIME',)
+
+ def __init__(self, vardb, bindb=None):
+ super(RebuiltBinaries, self).__init__(vardb, bindb=bindb)
+ self._bindb = bindb
+
+ def _filter(self, atom):
+ cpv = self._db.match(atom)[0]
+ inst_build_time, = self._db.aux_get(cpv, self._aux_keys)
+ try:
+ bin_build_time, = self._bindb.aux_get(cpv, self._aux_keys)
+ except KeyError:
+ return False
+ return bool(bin_build_time and (inst_build_time != bin_build_time))
+
+ def singleBuilder(cls, options, settings, trees):
+ return RebuiltBinaries(trees["vartree"].dbapi,
+ bindb=trees["bintree"].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class ChangedDepsSet(PackageSet):
+
+ _operations = ["merge", "unmerge"]
+
+ description = "Package set which contains all installed " + \
+ "packages for which the vdb *DEPEND entries are outdated " + \
+ "compared to corresponding portdb entries."
+
+ def __init__(self, portdb=None, vardb=None):
+ super(ChangedDepsSet, self).__init__()
+ self._portdb = portdb
+ self._vardb = vardb
+
+ def load(self):
+ depvars = ('RDEPEND', 'PDEPEND')
+
+ # regexp used to match atoms using subslot operator :=
+ subslot_repl_re = re.compile(r':[^[]*=')
+
+ atoms = []
+ for cpv in self._vardb.cpv_all():
+ # no ebuild, no update :).
+ if not self._portdb.cpv_exists(cpv):
+ continue
+
+ # USE flags used to build the ebuild and EAPI
+ # (needed for Atom & use_reduce())
+ use, eapi = self._vardb.aux_get(cpv, ('USE', 'EAPI'))
+ usel = use.split()
+
+ # function used to recursively process atoms in nested lists.
+ def clean_subslots(depatom, usel=None):
+ if isinstance(depatom, list):
+ # process the nested list.
+ return [clean_subslots(x, usel) for x in depatom]
+ else:
+ try:
+ # this can be either an atom or some special operator.
+ # in the latter case, we get InvalidAtom and pass it as-is.
+ a = Atom(depatom)
+ except InvalidAtom:
+ return depatom
+ else:
+ # if we're processing portdb, we need to evaluate USE flag
+ # dependency conditionals to make them match vdb. this
+ # requires passing the list of USE flags, so we reuse it
+ # as conditional for the operation as well.
+ if usel is not None:
+ a = a.evaluate_conditionals(usel)
+
+ # replace slot operator := dependencies with plain :=
+ # since we can't properly compare expanded slots
+ # in vardb to abstract slots in portdb.
+ return subslot_repl_re.sub(':=', a)
+
+ # get all *DEPEND variables from vdb & portdb and compare them.
+ # we need to do some cleaning up & expansion to make matching
+ # meaningful since vdb dependencies are conditional-free.
+ vdbvars = [clean_subslots(use_reduce(x, uselist=usel, eapi=eapi))
+ for x in self._vardb.aux_get(cpv, depvars)]
+ pdbvars = [clean_subslots(use_reduce(x, uselist=usel, eapi=eapi), usel)
+ for x in self._portdb.aux_get(cpv, depvars)]
+
+ # if dependencies don't match, trigger the rebuild.
+ if vdbvars != pdbvars:
+ atoms.append('=%s' % cpv)
+
+ self._setAtoms(atoms)
+
+ def singleBuilder(cls, options, settings, trees):
+ return cls(portdb=trees["porttree"].dbapi,
+ vardb=trees["vartree"].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
diff --git a/usr/lib/portage/pym/portage/_sets/files.py b/usr/lib/portage/pym/portage/_sets/files.py
new file mode 100644
index 0000000..2fb64de
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_sets/files.py
@@ -0,0 +1,342 @@
+# Copyright 2007-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import re
+from itertools import chain
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.util import grabfile, write_atomic, ensure_dirs, normalize_path
+from portage.const import USER_CONFIG_PATH, WORLD_FILE, WORLD_SETS_FILE
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage import portage_gid
+from portage._sets.base import PackageSet, EditablePackageSet
+from portage._sets import SetConfigError, SETPREFIX, get_boolean
+from portage.env.loaders import ItemFileLoader, KeyListFileLoader
+from portage.env.validators import ValidAtomValidator
+from portage import cpv_getkey
+
+__all__ = ["StaticFileSet", "ConfigFileSet", "WorldSelectedSet"]
+
+class StaticFileSet(EditablePackageSet):
+ _operations = ["merge", "unmerge"]
+ _repopath_match = re.compile(r'.*\$\{repository:(?P<reponame>.+)\}.*')
+ _repopath_sub = re.compile(r'\$\{repository:(?P<reponame>.+)\}')
+
+ def __init__(self, filename, greedy=False, dbapi=None):
+ super(StaticFileSet, self).__init__(allow_repo=True)
+ self._filename = filename
+ self._mtime = None
+ self.description = "Package set loaded from file %s" % self._filename
+ self.loader = ItemFileLoader(self._filename, self._validate)
+ if greedy and not dbapi:
+ self.errors.append(_("%s configured as greedy set, but no dbapi instance passed in constructor") % self._filename)
+ greedy = False
+ self.greedy = greedy
+ self.dbapi = dbapi
+
+ metadata = grabfile(self._filename + ".metadata")
+ key = None
+ value = []
+ for line in metadata:
+ line = line.strip()
+ if len(line) == 0 and key != None:
+ setattr(self, key, " ".join(value))
+ key = None
+ elif line[-1] == ":" and key == None:
+ key = line[:-1].lower()
+ value = []
+ elif key != None:
+ value.append(line)
+ else:
+ pass
+ else:
+ if key != None:
+ setattr(self, key, " ".join(value))
+
+ def _validate(self, atom):
+ return bool(atom[:1] == SETPREFIX or ValidAtomValidator(atom, allow_repo=True))
+
+ def write(self):
+ write_atomic(self._filename, "".join("%s\n" % (atom,) \
+ for atom in sorted(chain(self._atoms, self._nonatoms))))
+
+ def load(self):
+ try:
+ mtime = os.stat(self._filename).st_mtime
+ except (OSError, IOError):
+ mtime = None
+ if (not self._loaded or self._mtime != mtime):
+ try:
+ data, errors = self.loader.load()
+ for fname in errors:
+ for e in errors[fname]:
+ self.errors.append(fname+": "+e)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ data = {}
+ if self.greedy:
+ atoms = []
+ for a in data:
+ matches = self.dbapi.match(a)
+ for cpv in matches:
+ pkg = self.dbapi._pkg_str(cpv, None)
+ atoms.append("%s:%s" % (pkg.cp, pkg.slot))
+ # In addition to any installed slots, also try to pull
+ # in the latest new slot that may be available.
+ atoms.append(a)
+ else:
+ atoms = iter(data)
+ self._setAtoms(atoms)
+ self._mtime = mtime
+
+ def singleBuilder(self, options, settings, trees):
+ if not "filename" in options:
+ raise SetConfigError(_("no filename specified"))
+ greedy = get_boolean(options, "greedy", False)
+ filename = options["filename"]
+ # look for repository path variables
+ match = self._repopath_match.match(filename)
+ if match:
+ try:
+ filename = self._repopath_sub.sub(trees["porttree"].dbapi.treemap[match.groupdict()["reponame"]], filename)
+ except KeyError:
+ raise SetConfigError(_("Could not find repository '%s'") % match.groupdict()["reponame"])
+ return StaticFileSet(filename, greedy=greedy, dbapi=trees["vartree"].dbapi)
+ singleBuilder = classmethod(singleBuilder)
+
+ def multiBuilder(self, options, settings, trees):
+ rValue = {}
+ directory = options.get("directory",
+ os.path.join(settings["PORTAGE_CONFIGROOT"],
+ USER_CONFIG_PATH, "sets"))
+ name_pattern = options.get("name_pattern", "${name}")
+ if not "$name" in name_pattern and not "${name}" in name_pattern:
+ raise SetConfigError(_("name_pattern doesn't include ${name} placeholder"))
+ greedy = get_boolean(options, "greedy", False)
+ # look for repository path variables
+ match = self._repopath_match.match(directory)
+ if match:
+ try:
+ directory = self._repopath_sub.sub(trees["porttree"].dbapi.treemap[match.groupdict()["reponame"]], directory)
+ except KeyError:
+ raise SetConfigError(_("Could not find repository '%s'") % match.groupdict()["reponame"])
+
+ try:
+ directory = _unicode_decode(directory,
+ encoding=_encodings['fs'], errors='strict')
+ # Now verify that we can also encode it.
+ _unicode_encode(directory,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeError:
+ directory = _unicode_decode(directory,
+ encoding=_encodings['fs'], errors='replace')
+ raise SetConfigError(
+ _("Directory path contains invalid character(s) for encoding '%s': '%s'") \
+ % (_encodings['fs'], directory))
+
+ if os.path.isdir(directory):
+ directory = normalize_path(directory)
+
+ for parent, dirs, files in os.walk(directory):
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ for d in dirs[:]:
+ if d[:1] == '.':
+ dirs.remove(d)
+ for filename in files:
+ try:
+ filename = _unicode_decode(filename,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if filename[:1] == '.':
+ continue
+ if filename.endswith(".metadata"):
+ continue
+ filename = os.path.join(parent,
+ filename)[1 + len(directory):]
+ myname = name_pattern.replace("$name", filename)
+ myname = myname.replace("${name}", filename)
+ rValue[myname] = StaticFileSet(
+ os.path.join(directory, filename),
+ greedy=greedy, dbapi=trees["vartree"].dbapi)
+ return rValue
+ multiBuilder = classmethod(multiBuilder)
+
+class ConfigFileSet(PackageSet):
+ def __init__(self, filename):
+ super(ConfigFileSet, self).__init__()
+ self._filename = filename
+ self.description = "Package set generated from %s" % self._filename
+ self.loader = KeyListFileLoader(self._filename, ValidAtomValidator)
+
+ def load(self):
+ data, errors = self.loader.load()
+ self._setAtoms(iter(data))
+
+ def singleBuilder(self, options, settings, trees):
+ if not "filename" in options:
+ raise SetConfigError(_("no filename specified"))
+ return ConfigFileSet(options["filename"])
+ singleBuilder = classmethod(singleBuilder)
+
+ def multiBuilder(self, options, settings, trees):
+ rValue = {}
+ directory = options.get("directory",
+ os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH))
+ name_pattern = options.get("name_pattern", "sets/package_$suffix")
+ if not "$suffix" in name_pattern and not "${suffix}" in name_pattern:
+ raise SetConfigError(_("name_pattern doesn't include $suffix placeholder"))
+ for suffix in ["keywords", "use", "mask", "unmask"]:
+ myname = name_pattern.replace("$suffix", suffix)
+ myname = myname.replace("${suffix}", suffix)
+ rValue[myname] = ConfigFileSet(os.path.join(directory, "package."+suffix))
+ return rValue
+ multiBuilder = classmethod(multiBuilder)
+
+class WorldSelectedSet(EditablePackageSet):
+ description = "Set of packages that were directly installed by the user"
+
+ def __init__(self, eroot):
+ super(WorldSelectedSet, self).__init__(allow_repo=True)
+ # most attributes exist twice as atoms and non-atoms are stored in
+ # separate files
+ self._lock = None
+ self._filename = os.path.join(eroot, WORLD_FILE)
+ self.loader = ItemFileLoader(self._filename, self._validate)
+ self._mtime = None
+
+ self._filename2 = os.path.join(eroot, WORLD_SETS_FILE)
+ self.loader2 = ItemFileLoader(self._filename2, self._validate2)
+ self._mtime2 = None
+
+ def _validate(self, atom):
+ return ValidAtomValidator(atom, allow_repo=True)
+
+ def _validate2(self, setname):
+ return setname.startswith(SETPREFIX)
+
+ def write(self):
+ write_atomic(self._filename,
+ "".join(sorted("%s\n" % x for x in self._atoms)))
+
+ write_atomic(self._filename2,
+ "".join(sorted("%s\n" % x for x in self._nonatoms)))
+
+ def load(self):
+ atoms = []
+ nonatoms = []
+ atoms_changed = False
+ # load atoms and non-atoms from different files so the worldfile is
+ # backwards-compatible with older versions and other PMs, even though
+ # it's supposed to be private state data :/
+ try:
+ mtime = os.stat(self._filename).st_mtime
+ except (OSError, IOError):
+ mtime = None
+ if (not self._loaded or self._mtime != mtime):
+ try:
+ data, errors = self.loader.load()
+ for fname in errors:
+ for e in errors[fname]:
+ self.errors.append(fname+": "+e)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ data = {}
+ atoms = list(data)
+ self._mtime = mtime
+ atoms_changed = True
+ else:
+ atoms.extend(self._atoms)
+
+ changed2, nonatoms = self._load2()
+ atoms_changed |= changed2
+
+ if atoms_changed:
+ self._setAtoms(atoms+nonatoms)
+
+ def _load2(self):
+ changed = False
+ try:
+ mtime = os.stat(self._filename2).st_mtime
+ except (OSError, IOError):
+ mtime = None
+ if (not self._loaded or self._mtime2 != mtime):
+ try:
+ data, errors = self.loader2.load()
+ for fname in errors:
+ for e in errors[fname]:
+ self.errors.append(fname+": "+e)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ data = {}
+ nonatoms = list(data)
+ self._mtime2 = mtime
+ changed = True
+ else:
+ nonatoms = list(self._nonatoms)
+
+ return changed, nonatoms
+
+ def _ensure_dirs(self):
+ ensure_dirs(os.path.dirname(self._filename), gid=portage_gid, mode=0o2750, mask=0o2)
+
+ def lock(self):
+ if self._lock is not None:
+ raise AssertionError("already locked")
+ self._ensure_dirs()
+ self._lock = lockfile(self._filename, wantnewlockfile=1)
+
+ def unlock(self):
+ if self._lock is None:
+ raise AssertionError("not locked")
+ unlockfile(self._lock)
+ self._lock = None
+
+ def cleanPackage(self, vardb, cpv):
+ '''
+ Before calling this function you should call lock and load.
+ After calling this function you should call unlock.
+ '''
+ if not self._lock:
+ raise AssertionError('cleanPackage needs the set to be locked')
+
+ worldlist = list(self._atoms)
+ mykey = cpv_getkey(cpv)
+ newworldlist = []
+ for x in worldlist:
+ if x.cp == mykey:
+ matches = vardb.match(x, use_cache=0)
+ if not matches:
+ #zap our world entry
+ pass
+ elif len(matches) == 1 and matches[0] == cpv:
+ #zap our world entry
+ pass
+ else:
+ #others are around; keep it.
+ newworldlist.append(x)
+ else:
+ #this doesn't match the package we're unmerging; keep it.
+ newworldlist.append(x)
+
+ newworldlist.extend(self._nonatoms)
+ self.replace(newworldlist)
+
+ def singleBuilder(self, options, settings, trees):
+ return WorldSelectedSet(settings["EROOT"])
+ singleBuilder = classmethod(singleBuilder)
diff --git a/usr/lib/portage/pym/portage/_sets/libs.py b/usr/lib/portage/pym/portage/_sets/libs.py
new file mode 100644
index 0000000..022e076
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_sets/libs.py
@@ -0,0 +1,99 @@
+# Copyright 2007-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+from portage.exception import InvalidData
+from portage.localization import _
+from portage._sets.base import PackageSet
+from portage._sets import get_boolean, SetConfigError
+import portage
+
+class LibraryConsumerSet(PackageSet):
+ _operations = ["merge", "unmerge"]
+
+ def __init__(self, vardbapi, debug=False):
+ super(LibraryConsumerSet, self).__init__()
+ self.dbapi = vardbapi
+ self.debug = debug
+
+ def mapPathsToAtoms(self, paths):
+ rValue = set()
+ for p in paths:
+ for cpv in self.dbapi._linkmap.getOwners(p):
+ try:
+ pkg = self.dbapi._pkg_str(cpv, None)
+ except (KeyError, InvalidData):
+ # This is expected for preserved libraries
+ # of packages that have been uninstalled
+ # without replacement.
+ pass
+ else:
+ rValue.add("%s:%s" % (pkg.cp, pkg.slot))
+ return rValue
+
+class LibraryFileConsumerSet(LibraryConsumerSet):
+
+ """
+ Note: This does not detect libtool archive (*.la) files that consume the
+ specified files (revdep-rebuild is able to detect them).
+ """
+
+ description = "Package set which contains all packages " + \
+ "that consume the specified library file(s)."
+
+ def __init__(self, vardbapi, files, **kargs):
+ super(LibraryFileConsumerSet, self).__init__(vardbapi, **kargs)
+ self.files = files
+
+ def load(self):
+ consumers = set()
+ for lib in self.files:
+ consumers.update(
+ self.dbapi._linkmap.findConsumers(lib, greedy=False))
+
+ if not consumers:
+ return
+ self._setAtoms(self.mapPathsToAtoms(consumers))
+
+ def singleBuilder(cls, options, settings, trees):
+ files = tuple(portage.util.shlex_split(options.get("files", "")))
+ if not files:
+ raise SetConfigError(_("no files given"))
+ debug = get_boolean(options, "debug", False)
+ return LibraryFileConsumerSet(trees["vartree"].dbapi,
+ files, debug=debug)
+ singleBuilder = classmethod(singleBuilder)
+
+class PreservedLibraryConsumerSet(LibraryConsumerSet):
+ def load(self):
+ reg = self.dbapi._plib_registry
+ if reg is None:
+ # preserve-libs is entirely disabled
+ return
+ consumers = set()
+ if reg:
+ plib_dict = reg.getPreservedLibs()
+ for libs in plib_dict.values():
+ for lib in libs:
+ if self.debug:
+ print(lib)
+ for x in sorted(self.dbapi._linkmap.findConsumers(lib, greedy=False)):
+ print(" ", x)
+ print("-"*40)
+ consumers.update(self.dbapi._linkmap.findConsumers(lib, greedy=False))
+ # Don't rebuild packages just because they contain preserved
+ # libs that happen to be consumers of other preserved libs.
+ for libs in plib_dict.values():
+ consumers.difference_update(libs)
+ else:
+ return
+ if not consumers:
+ return
+ self._setAtoms(self.mapPathsToAtoms(consumers))
+
+ def singleBuilder(cls, options, settings, trees):
+ debug = get_boolean(options, "debug", False)
+ return PreservedLibraryConsumerSet(trees["vartree"].dbapi,
+ debug=debug)
+ singleBuilder = classmethod(singleBuilder)
diff --git a/usr/lib/portage/pym/portage/_sets/profiles.py b/usr/lib/portage/pym/portage/_sets/profiles.py
new file mode 100644
index 0000000..3fb6df0
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_sets/profiles.py
@@ -0,0 +1,54 @@
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+
+from portage import os
+from portage.util import grabfile_package, stack_lists
+from portage._sets.base import PackageSet
+from portage._sets import get_boolean
+from portage.util import writemsg_level
+from portage.const import EPREFIX
+
+__all__ = ["PackagesSystemSet"]
+
+class PackagesSystemSet(PackageSet):
+ _operations = ["merge"]
+
+ def __init__(self, profile_paths, debug=False):
+ super(PackagesSystemSet, self).__init__()
+ self._profile_paths = profile_paths
+ self._debug = debug
+ if profile_paths:
+ description = self._profile_paths[-1]
+ if description == EPREFIX+"/etc/portage/profile" and \
+ len(self._profile_paths) > 1:
+ description = self._profile_paths[-2]
+ else:
+ description = None
+ self.description = "System packages for profile %s" % description
+
+ def load(self):
+ debug = self._debug
+ if debug:
+ writemsg_level("\nPackagesSystemSet: profile paths: %s\n" % \
+ (self._profile_paths,), level=logging.DEBUG, noiselevel=-1)
+
+ mylist = [grabfile_package(os.path.join(x, "packages"), verify_eapi=True) for x in self._profile_paths]
+
+ if debug:
+ writemsg_level("\nPackagesSystemSet: raw packages: %s\n" % \
+ (mylist,), level=logging.DEBUG, noiselevel=-1)
+
+ mylist = stack_lists(mylist, incremental=1)
+
+ if debug:
+ writemsg_level("\nPackagesSystemSet: stacked packages: %s\n" % \
+ (mylist,), level=logging.DEBUG, noiselevel=-1)
+
+ self._setAtoms([x[1:] for x in mylist if x[0] == "*"])
+
+ def singleBuilder(self, options, settings, trees):
+ debug = get_boolean(options, "debug", False)
+ return PackagesSystemSet(settings.profiles, debug=debug)
+ singleBuilder = classmethod(singleBuilder)
diff --git a/usr/lib/portage/pym/portage/_sets/security.py b/usr/lib/portage/pym/portage/_sets/security.py
new file mode 100644
index 0000000..f8dbef2
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_sets/security.py
@@ -0,0 +1,86 @@
+# Copyright 2007-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage.glsa as glsa
+from portage._sets.base import PackageSet
+from portage.versions import vercmp
+from portage._sets import get_boolean
+
+__all__ = ["SecuritySet", "NewGlsaSet", "NewAffectedSet", "AffectedSet"]
+
+class SecuritySet(PackageSet):
+ _operations = ["merge"]
+ _skip_applied = False
+
+ description = "package set that includes all packages possibly affected by a GLSA"
+
+ def __init__(self, settings, vardbapi, portdbapi, least_change=True):
+ super(SecuritySet, self).__init__()
+ self._settings = settings
+ self._vardbapi = vardbapi
+ self._portdbapi = portdbapi
+ self._least_change = least_change
+
+ def getGlsaList(self, skip_applied):
+ glsaindexlist = glsa.get_glsa_list(self._settings)
+ if skip_applied:
+ applied_list = glsa.get_applied_glsas(self._settings)
+ glsaindexlist = set(glsaindexlist).difference(applied_list)
+ glsaindexlist = list(glsaindexlist)
+ glsaindexlist.sort()
+ return glsaindexlist
+
+ def load(self):
+ glsaindexlist = self.getGlsaList(self._skip_applied)
+ atomlist = []
+ for glsaid in glsaindexlist:
+ myglsa = glsa.Glsa(glsaid, self._settings, self._vardbapi, self._portdbapi)
+ #print glsaid, myglsa.isVulnerable(), myglsa.isApplied(), myglsa.getMergeList()
+ if self.useGlsa(myglsa):
+ atomlist += ["="+x for x in myglsa.getMergeList(least_change=self._least_change)]
+ self._setAtoms(self._reduce(atomlist))
+
+ def _reduce(self, atomlist):
+ mydict = {}
+ for atom in atomlist[:]:
+ cpv = self._portdbapi.xmatch("match-all", atom)[0]
+ pkg = self._portdbapi._pkg_str(cpv, None)
+ cps = "%s:%s" % (pkg.cp, pkg.slot)
+ if not cps in mydict:
+ mydict[cps] = (atom, cpv)
+ else:
+ other_cpv = mydict[cps][1]
+ if vercmp(cpv.version, other_cpv.version) > 0:
+ atomlist.remove(mydict[cps][0])
+ mydict[cps] = (atom, cpv)
+ return atomlist
+
+ def useGlsa(self, myglsa):
+ return True
+
+ def updateAppliedList(self):
+ glsaindexlist = self.getGlsaList(True)
+ applied_list = glsa.get_applied_glsas(self._settings)
+ for glsaid in glsaindexlist:
+ myglsa = glsa.Glsa(glsaid, self._settings, self._vardbapi, self._portdbapi)
+ if not myglsa.isVulnerable() and not myglsa.nr in applied_list:
+ myglsa.inject()
+
+ def singleBuilder(cls, options, settings, trees):
+ least_change = not get_boolean(options, "use_emerge_resolver", False)
+ return cls(settings, trees["vartree"].dbapi, trees["porttree"].dbapi, least_change=least_change)
+ singleBuilder = classmethod(singleBuilder)
+
+class NewGlsaSet(SecuritySet):
+ _skip_applied = True
+ description = "Package set that includes all packages possibly affected by an unapplied GLSA"
+
+class AffectedSet(SecuritySet):
+ description = "Package set that includes all packages affected by an unapplied GLSA"
+
+ def useGlsa(self, myglsa):
+ return myglsa.isVulnerable()
+
+class NewAffectedSet(AffectedSet):
+ _skip_applied = True
+ description = "Package set that includes all packages affected by an unapplied GLSA"
diff --git a/usr/lib/portage/pym/portage/_sets/shell.py b/usr/lib/portage/pym/portage/_sets/shell.py
new file mode 100644
index 0000000..2c95845
--- /dev/null
+++ b/usr/lib/portage/pym/portage/_sets/shell.py
@@ -0,0 +1,44 @@
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+
+from portage import os
+from portage import _unicode_decode
+from portage._sets.base import PackageSet
+from portage._sets import SetConfigError
+
+__all__ = ["CommandOutputSet"]
+
+class CommandOutputSet(PackageSet):
+ """This class creates a PackageSet from the output of a shell command.
+ The shell command should produce one atom per line, that is:
+
+ >>> atom1
+ atom2
+ ...
+ atomN
+
+ Args:
+ name: A string that identifies the set.
+ command: A string or sequence identifying the command to run
+ (see the subprocess.Popen documentaion for the format)
+ """
+ _operations = ["merge", "unmerge"]
+
+ def __init__(self, command):
+ super(CommandOutputSet, self).__init__()
+ self._command = command
+ self.description = "Package set generated from output of '%s'" % self._command
+
+ def load(self):
+ pipe = subprocess.Popen(self._command, stdout=subprocess.PIPE, shell=True)
+ stdout, stderr = pipe.communicate()
+ if pipe.wait() == os.EX_OK:
+ self._setAtoms(_unicode_decode(stdout).splitlines())
+
+ def singleBuilder(self, options, settings, trees):
+ if not "command" in options:
+ raise SetConfigError("no command specified")
+ return CommandOutputSet(options["command"])
+ singleBuilder = classmethod(singleBuilder)
diff --git a/usr/lib/portage/pym/portage/cache/__init__.py b/usr/lib/portage/pym/portage/cache/__init__.py
new file mode 100644
index 0000000..e7fe599
--- /dev/null
+++ b/usr/lib/portage/pym/portage/cache/__init__.py
@@ -0,0 +1,4 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
diff --git a/usr/lib/portage/pym/portage/cache/anydbm.py b/usr/lib/portage/pym/portage/cache/anydbm.py
new file mode 100644
index 0000000..1d56b14
--- /dev/null
+++ b/usr/lib/portage/pym/portage/cache/anydbm.py
@@ -0,0 +1,113 @@
+# Copyright 2005-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+from __future__ import absolute_import
+
+try:
+ import anydbm as anydbm_module
+except ImportError:
+ # python 3.x
+ import dbm as anydbm_module
+
+try:
+ import dbm.gnu as gdbm
+except ImportError:
+ try:
+ import gdbm
+ except ImportError:
+ gdbm = None
+
+try:
+ from dbm import whichdb
+except ImportError:
+ from whichdb import whichdb
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+from portage import _unicode_encode
+from portage import os
+import sys
+from portage.cache import fs_template
+from portage.cache import cache_errors
+
+
+class database(fs_template.FsBased):
+
+ autocommits = True
+ cleanse_keys = True
+ serialize_eclasses = False
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+
+ default_db = config.get("dbtype","anydbm")
+ if not default_db.startswith("."):
+ default_db = '.' + default_db
+
+ self._db_path = os.path.join(self.location, fs_template.gen_label(self.location, self.label)+default_db)
+ self.__db = None
+ mode = "w"
+ if whichdb(self._db_path) in ("dbm.gnu", "gdbm"):
+ # Allow multiple concurrent writers (see bug #53607).
+ mode += "u"
+ try:
+ # dbm.open() will not work with bytes in python-3.1:
+ # TypeError: can't concat bytes to str
+ self.__db = anydbm_module.open(self._db_path,
+ mode, self._perms)
+ except anydbm_module.error:
+ # XXX handle this at some point
+ try:
+ self._ensure_dirs()
+ self._ensure_dirs(self._db_path)
+ except (OSError, IOError) as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ # try again if failed
+ try:
+ if self.__db == None:
+ # dbm.open() will not work with bytes in python-3.1:
+ # TypeError: can't concat bytes to str
+ if gdbm is None:
+ self.__db = anydbm_module.open(self._db_path,
+ "c", self._perms)
+ else:
+ # Prefer gdbm type if available, since it allows
+ # multiple concurrent writers (see bug #53607).
+ self.__db = gdbm.open(self._db_path,
+ "cu", self._perms)
+ except anydbm_module.error as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+ self._ensure_access(self._db_path)
+
+ def iteritems(self):
+ # dbm doesn't implement items()
+ for k in self.__db.keys():
+ yield (k, self[k])
+
+ def _getitem(self, cpv):
+ # we override getitem because it's just a cpickling of the data handed in.
+ return pickle.loads(self.__db[_unicode_encode(cpv)])
+
+ def _setitem(self, cpv, values):
+ self.__db[_unicode_encode(cpv)] = pickle.dumps(values,pickle.HIGHEST_PROTOCOL)
+
+ def _delitem(self, cpv):
+ del self.__db[cpv]
+
+ def __iter__(self):
+ return iter(list(self.__db.keys()))
+
+ def __contains__(self, cpv):
+ return cpv in self.__db
+
+ def __del__(self):
+ if "__db" in self.__dict__ and self.__db != None:
+ self.__db.sync()
+ self.__db.close()
+
+ if sys.hexversion >= 0x3000000:
+ items = iteritems
diff --git a/usr/lib/portage/pym/portage/cache/cache_errors.py b/usr/lib/portage/pym/portage/cache/cache_errors.py
new file mode 100644
index 0000000..3c1f239
--- /dev/null
+++ b/usr/lib/portage/pym/portage/cache/cache_errors.py
@@ -0,0 +1,62 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+class CacheError(Exception): pass
+
+class InitializationError(CacheError):
+ def __init__(self, class_name, error):
+ self.error, self.class_name = error, class_name
+ def __str__(self):
+ return "Creation of instance %s failed due to %s" % \
+ (self.class_name, str(self.error))
+
+
+class CacheCorruption(CacheError):
+ def __init__(self, key, ex):
+ self.key, self.ex = key, ex
+ def __str__(self):
+ return "%s is corrupt: %s" % (self.key, str(self.ex))
+
+
+class GeneralCacheCorruption(CacheError):
+ def __init__(self,ex): self.ex = ex
+ def __str__(self): return "corruption detected: %s" % str(self.ex)
+
+
+class InvalidRestriction(CacheError):
+ def __init__(self, key, restriction, exception=None):
+ if exception == None: exception = ''
+ self.key, self.restriction, self.ex = key, restriction, ex
+ def __str__(self):
+ return "%s:%s is not valid: %s" % \
+ (self.key, self.restriction, str(self.ex))
+
+
+class ReadOnlyRestriction(CacheError):
+ def __init__(self, info=''):
+ self.info = info
+ def __str__(self):
+ return "cache is non-modifiable"+str(self.info)
+
+class StatCollision(CacheError):
+ """
+ If the content of a cache entry changes and neither the file mtime nor
+ size changes, it will prevent rsync from detecting changes. Cache backends
+ may raise this exception from _setitem() if they detect this type of stat
+ collision. See bug #139134.
+ """
+ def __init__(self, key, filename, mtime, size):
+ self.key = key
+ self.filename = filename
+ self.mtime = mtime
+ self.size = size
+
+ def __str__(self):
+ return "%s has stat collision with size %s and mtime %s" % \
+ (self.key, self.size, self.mtime)
+
+ def __repr__(self):
+ return "portage.cache.cache_errors.StatCollision(%s)" % \
+ (', '.join((repr(self.key), repr(self.filename),
+ repr(self.mtime), repr(self.size))),)
diff --git a/usr/lib/portage/pym/portage/cache/ebuild_xattr.py b/usr/lib/portage/pym/portage/cache/ebuild_xattr.py
new file mode 100644
index 0000000..db6e177
--- /dev/null
+++ b/usr/lib/portage/pym/portage/cache/ebuild_xattr.py
@@ -0,0 +1,172 @@
+# -*- coding: utf-8 -*-
+# Copyright: 2009-2011 Gentoo Foundation
+# Author(s): Petteri Räty (betelgeuse@gentoo.org)
+# License: GPL2
+
+__all__ = ['database']
+
+import errno
+
+import portage
+from portage.cache import fs_template
+from portage.versions import catsplit
+from portage import cpv_getkey
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'xattr')
+
+class NoValueException(Exception):
+ pass
+
+class database(fs_template.FsBased):
+
+ autocommits = True
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+ self.portdir = self.label
+ self.ns = xattr.NS_USER + '.gentoo.cache'
+ self.keys = set(self._known_keys)
+ self.keys.add('_mtime_')
+ self.keys.add('_eclasses_')
+ # xattrs have an upper length
+ self.max_len = self.__get_max()
+
+ def __get_max(self):
+ path = os.path.join(self.portdir,'profiles/repo_name')
+ try:
+ return int(self.__get(path,'value_max_len'))
+ except NoValueException as e:
+ max = self.__calc_max(path)
+ self.__set(path,'value_max_len',str(max))
+ return max
+
+ def __calc_max(self,path):
+ """ Find out max attribute length supported by the file system """
+
+ hundred = ''
+ for i in range(100):
+ hundred+='a'
+
+ s=hundred
+
+ # Could use finally but needs python 2.5 then
+ try:
+ while True:
+ self.__set(path,'test_max',s)
+ s+=hundred
+ except IOError as e:
+ # ext based give wrong errno
+ # http://bugzilla.kernel.org/show_bug.cgi?id=12793
+ if e.errno in (errno.E2BIG, errno.ENOSPC):
+ result = len(s)-100
+ else:
+ raise
+
+ try:
+ self.__remove(path,'test_max')
+ except IOError as e:
+ if e.errno != errno.ENODATA:
+ raise
+
+ return result
+
+ def __get_path(self,cpv):
+ cat,pn = catsplit(cpv_getkey(cpv))
+ return os.path.join(self.portdir,cat,pn,os.path.basename(cpv) + ".ebuild")
+
+ def __has_cache(self,path):
+ try:
+ self.__get(path,'_mtime_')
+ except NoValueException as e:
+ return False
+
+ return True
+
+ def __get(self,path,key,default=None):
+ try:
+ return xattr.get(path,key,namespace=self.ns)
+ except IOError as e:
+ if not default is None and errno.ENODATA == e.errno:
+ return default
+ else:
+ raise NoValueException()
+
+ def __remove(self,path,key):
+ xattr.remove(path,key,namespace=self.ns)
+
+ def __set(self,path,key,value):
+ xattr.set(path,key,value,namespace=self.ns)
+
+ def _getitem(self, cpv):
+ values = {}
+ path = self.__get_path(cpv)
+ all = {}
+ for tuple in xattr.get_all(path,namespace=self.ns):
+ key,value = tuple
+ all[key] = value
+
+ if not '_mtime_' in all:
+ raise KeyError(cpv)
+
+ # We default to '' like other caches
+ for key in self.keys:
+ attr_value = all.get(key,'1:')
+ parts,sep,value = attr_value.partition(':')
+ parts = int(parts)
+ if parts > 1:
+ for i in range(1,parts):
+ value += all.get(key+str(i))
+ values[key] = value
+
+ return values
+
+ def _setitem(self, cpv, values):
+ path = self.__get_path(cpv)
+ max = self.max_len
+ for key,value in values.items():
+ # mtime comes in as long so need to convert to strings
+ s = str(value)
+ # We need to split long values
+ value_len = len(s)
+ parts = 0
+ if value_len > max:
+ # Find out how many parts we need
+ parts = value_len/max
+ if value_len % max > 0:
+ parts += 1
+
+ # Only the first entry carries the number of parts
+ self.__set(path,key,'%s:%s'%(parts,s[0:max]))
+
+ # Write out the rest
+ for i in range(1,parts):
+ start = i * max
+ val = s[start:start+max]
+ self.__set(path,key+str(i),val)
+ else:
+ self.__set(path,key,"%s:%s"%(1,s))
+
+ def _delitem(self, cpv):
+ pass # Will be gone with the ebuild
+
+ def __contains__(self, cpv):
+ return os.path.exists(self.__get_path(cpv))
+
+ def __iter__(self):
+
+ for root, dirs, files in os.walk(self.portdir):
+ for file in files:
+ try:
+ file = _unicode_decode(file,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if file[-7:] == '.ebuild':
+ cat = os.path.basename(os.path.dirname(root))
+ pn_pv = file[:-7]
+ path = os.path.join(root,file)
+ if self.__has_cache(path):
+ yield "%s/%s/%s" % (cat,os.path.basename(root),file[:-7])
diff --git a/usr/lib/portage/pym/portage/cache/flat_hash.py b/usr/lib/portage/pym/portage/cache/flat_hash.py
new file mode 100644
index 0000000..5304296
--- /dev/null
+++ b/usr/lib/portage/pym/portage/cache/flat_hash.py
@@ -0,0 +1,162 @@
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+from __future__ import unicode_literals
+
+from portage.cache import fs_template
+from portage.cache import cache_errors
+import errno
+import io
+import stat
+import sys
+import os as _os
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage.exception import InvalidData
+from portage.versions import _pkg_str
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+class database(fs_template.FsBased):
+
+ autocommits = True
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+ self.location = os.path.join(self.location,
+ self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+ write_keys = set(self._known_keys)
+ write_keys.add("_eclasses_")
+ write_keys.add("_%s_" % (self.validation_chf,))
+ self._write_keys = sorted(write_keys)
+ if not self.readonly and not os.path.exists(self.location):
+ self._ensure_dirs()
+
+ def _getitem(self, cpv):
+ # Don't use os.path.join, for better performance.
+ fp = self.location + _os.sep + cpv
+ try:
+ with io.open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as myf:
+ lines = myf.read().split("\n")
+ if not lines[-1]:
+ lines.pop()
+ d = self._parse_data(lines, cpv)
+ if '_mtime_' not in d:
+ # Backward compatibility with old cache
+ # that uses mtime mangling.
+ d['_mtime_'] = _os.fstat(myf.fileno())[stat.ST_MTIME]
+ return d
+ except (IOError, OSError) as e:
+ if e.errno != errno.ENOENT:
+ raise cache_errors.CacheCorruption(cpv, e)
+ raise KeyError(cpv, e)
+
+ def _parse_data(self, data, cpv):
+ try:
+ return dict( x.split("=", 1) for x in data )
+ except ValueError as e:
+ # If a line is missing an "=", the split length is 1 instead of 2.
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ def _setitem(self, cpv, values):
+ s = cpv.rfind("/")
+ fp = os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+ try:
+ myf = io.open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ except (IOError, OSError) as e:
+ if errno.ENOENT == e.errno:
+ try:
+ self._ensure_dirs(cpv)
+ myf = io.open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ except (OSError, IOError) as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ try:
+ for k in self._write_keys:
+ v = values.get(k)
+ if not v:
+ continue
+ # NOTE: This format string requires unicode_literals, so that
+ # k and v are coerced to unicode, in order to prevent TypeError
+ # when writing raw bytes to TextIOWrapper with Python 2.
+ myf.write("%s=%s\n" % (k, v))
+ finally:
+ myf.close()
+ self._ensure_access(fp)
+
+ #update written. now we move it.
+
+ new_fp = os.path.join(self.location,cpv)
+ try:
+ os.rename(fp, new_fp)
+ except (OSError, IOError) as e:
+ os.remove(fp)
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ def _delitem(self, cpv):
+# import pdb;pdb.set_trace()
+ try:
+ os.remove(os.path.join(self.location,cpv))
+ except OSError as e:
+ if errno.ENOENT == e.errno:
+ raise KeyError(cpv)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ def __contains__(self, cpv):
+ return os.path.exists(os.path.join(self.location, cpv))
+
+ def __iter__(self):
+ """generator for walking the dir struct"""
+ dirs = [(0, self.location)]
+ len_base = len(self.location)
+ while dirs:
+ depth, dir_path = dirs.pop()
+ try:
+ dir_list = os.listdir(dir_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ continue
+ for l in dir_list:
+ p = os.path.join(dir_path, l)
+ try:
+ st = os.lstat(p)
+ except OSError:
+ # Cache entry disappeared.
+ continue
+ if stat.S_ISDIR(st.st_mode):
+ # Only recurse 1 deep, in order to avoid iteration over
+ # entries from another nested cache instance. This can
+ # happen if the user nests an overlay inside
+ # /usr/portage/local as in bug #302764.
+ if depth < 1:
+ dirs.append((depth+1, p))
+ continue
+
+ try:
+ yield _pkg_str(p[len_base+1:])
+ except InvalidData:
+ continue
+
+
+class md5_database(database):
+
+ validation_chf = 'md5'
+ store_eclass_paths = False
diff --git a/usr/lib/portage/pym/portage/cache/fs_template.py b/usr/lib/portage/pym/portage/cache/fs_template.py
new file mode 100644
index 0000000..de4fe4b
--- /dev/null
+++ b/usr/lib/portage/pym/portage/cache/fs_template.py
@@ -0,0 +1,98 @@
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+import os as _os
+import sys
+from portage.cache import template
+from portage import os
+
+from portage.proxy.lazyimport import lazyimport
+lazyimport(globals(),
+ 'portage.exception:PortageException',
+ 'portage.util:apply_permissions',
+)
+del lazyimport
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+class FsBased(template.database):
+ """template wrapping fs needed options, and providing _ensure_access as a way to
+ attempt to ensure files have the specified owners/perms"""
+
+ def __init__(self, *args, **config):
+
+ for x, y in (("gid", -1), ("perms", -1)):
+ if x in config:
+ # Since Python 3.4, chown requires int type (no proxies).
+ setattr(self, "_" + x, int(config[x]))
+ del config[x]
+ else:
+ setattr(self, "_"+x, y)
+ super(FsBased, self).__init__(*args, **config)
+
+ if self.label.startswith(os.path.sep):
+ # normpath.
+ self.label = os.path.sep + os.path.normpath(self.label).lstrip(os.path.sep)
+
+
+ def _ensure_access(self, path, mtime=-1):
+ """returns true or false if it's able to ensure that path is properly chmod'd and chowned.
+ if mtime is specified, attempts to ensure that's correct also"""
+ try:
+ apply_permissions(path, gid=self._gid, mode=self._perms)
+ if mtime != -1:
+ mtime=long(mtime)
+ os.utime(path, (mtime, mtime))
+ except (PortageException, EnvironmentError):
+ return False
+ return True
+
+ def _ensure_dirs(self, path=None):
+ """with path!=None, ensure beyond self.location. otherwise, ensure self.location"""
+ if path:
+ path = os.path.dirname(path)
+ base = self.location
+ else:
+ path = self.location
+ base='/'
+
+ for dir in path.lstrip(os.path.sep).rstrip(os.path.sep).split(os.path.sep):
+ base = os.path.join(base,dir)
+ if not os.path.exists(base):
+ if self._perms != -1:
+ um = os.umask(0)
+ try:
+ perms = self._perms
+ if perms == -1:
+ perms = 0
+ perms |= 0o755
+ os.mkdir(base, perms)
+ if self._gid != -1:
+ os.chown(base, -1, self._gid)
+ finally:
+ if self._perms != -1:
+ os.umask(um)
+
+ def _prune_empty_dirs(self):
+ all_dirs = []
+ for parent, dirs, files in os.walk(self.location):
+ for x in dirs:
+ all_dirs.append(_os.path.join(parent, x))
+ while all_dirs:
+ try:
+ _os.rmdir(all_dirs.pop())
+ except OSError:
+ pass
+
+def gen_label(base, label):
+ """if supplied label is a path, generate a unique label based upon label, and supplied base path"""
+ if label.find(os.path.sep) == -1:
+ return label
+ label = label.strip("\"").strip("'")
+ label = os.path.join(*(label.rstrip(os.path.sep).split(os.path.sep)))
+ tail = os.path.split(label)[1]
+ return "%s-%X" % (tail, abs(label.__hash__()))
+
diff --git a/usr/lib/portage/pym/portage/cache/mappings.py b/usr/lib/portage/pym/portage/cache/mappings.py
new file mode 100644
index 0000000..cd39a6e
--- /dev/null
+++ b/usr/lib/portage/pym/portage/cache/mappings.py
@@ -0,0 +1,485 @@
+# Copyright: 2005-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+__all__ = ["Mapping", "MutableMapping", "UserDict", "ProtectedDict",
+ "LazyLoad", "slot_dict_class"]
+
+import sys
+import weakref
+
+class Mapping(object):
+ """
+ In python-3.0, the UserDict.DictMixin class has been replaced by
+ Mapping and MutableMapping from the collections module, but 2to3
+ doesn't currently account for this change:
+
+ http://bugs.python.org/issue2876
+
+ As a workaround for the above issue, use this class as a substitute
+ for UserDict.DictMixin so that code converted via 2to3 will run.
+ """
+
+ __slots__ = ()
+
+ def __iter__(self):
+ return iter(self.keys())
+
+ def keys(self):
+ return list(self.__iter__())
+
+ def __contains__(self, key):
+ try:
+ value = self[key]
+ except KeyError:
+ return False
+ return True
+
+ def iteritems(self):
+ for k in self:
+ yield (k, self[k])
+
+ def iterkeys(self):
+ return self.__iter__()
+
+ def itervalues(self):
+ for _, v in self.items():
+ yield v
+
+ def values(self):
+ return [v for _, v in self.iteritems()]
+
+ def items(self):
+ return list(self.iteritems())
+
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def __repr__(self):
+ return repr(dict(self.items()))
+
+ def __len__(self):
+ return len(list(self))
+
+ if sys.hexversion >= 0x3000000:
+ items = iteritems
+ keys = __iter__
+ values = itervalues
+
+class MutableMapping(Mapping):
+ """
+ A mutable vesion of the Mapping class.
+ """
+
+ __slots__ = ()
+
+ def clear(self):
+ for key in list(self):
+ del self[key]
+
+ def setdefault(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+
+ def pop(self, key, *args):
+ if len(args) > 1:
+ raise TypeError("pop expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+ try:
+ value = self[key]
+ except KeyError:
+ if args:
+ return args[0]
+ raise
+ del self[key]
+ return value
+
+ def popitem(self):
+ try:
+ k, v = next(iter(self.items()))
+ except StopIteration:
+ raise KeyError('container is empty')
+ del self[k]
+ return (k, v)
+
+ def update(self, *args, **kwargs):
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+ other = None
+ if args:
+ other = args[0]
+ if other is None:
+ pass
+ elif hasattr(other, 'iteritems'):
+ # Use getattr to avoid interference from 2to3.
+ for k, v in getattr(other, 'iteritems')():
+ self[k] = v
+ elif hasattr(other, 'items'):
+ # Use getattr to avoid interference from 2to3.
+ for k, v in getattr(other, 'items')():
+ self[k] = v
+ elif hasattr(other, 'keys'):
+ for k in other.keys():
+ self[k] = other[k]
+ else:
+ for k, v in other:
+ self[k] = v
+ if kwargs:
+ self.update(kwargs)
+
+class UserDict(MutableMapping):
+ """
+ Use this class as a substitute for UserDict.UserDict so that
+ code converted via 2to3 will run:
+
+ http://bugs.python.org/issue2876
+ """
+
+ __slots__ = ('data',)
+
+ def __init__(self, *args, **kwargs):
+
+ self.data = {}
+
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+
+ if args:
+ self.update(args[0])
+
+ if kwargs:
+ self.update(kwargs)
+
+ def __repr__(self):
+ return repr(self.data)
+
+ def __contains__(self, key):
+ return key in self.data
+
+ def __iter__(self):
+ return iter(self.data)
+
+ def __len__(self):
+ return len(self.data)
+
+ def __getitem__(self, key):
+ return self.data[key]
+
+ def __setitem__(self, key, item):
+ self.data[key] = item
+
+ def __delitem__(self, key):
+ del self.data[key]
+
+ def clear(self):
+ self.data.clear()
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+class OrderedDict(UserDict):
+
+ __slots__ = ('_order',)
+
+ def __init__(self, *args, **kwargs):
+ self._order = []
+ UserDict.__init__(self, *args, **kwargs)
+
+ def __iter__(self):
+ return iter(self._order)
+
+ def __setitem__(self, key, item):
+ new_key = key not in self
+ UserDict.__setitem__(self, key, item)
+ if new_key:
+ self._order.append(key)
+
+ def __delitem__(self, key):
+ UserDict.__delitem__(self, key)
+ self._order.remove(key)
+
+ def clear(self):
+ UserDict.clear(self)
+ del self._order[:]
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+class ProtectedDict(MutableMapping):
+ """
+ given an initial dict, this wraps that dict storing changes in a secondary dict, protecting
+ the underlying dict from changes
+ """
+ __slots__=("orig","new","blacklist")
+
+ def __init__(self, orig):
+ self.orig = orig
+ self.new = {}
+ self.blacklist = {}
+
+
+ def __setitem__(self, key, val):
+ self.new[key] = val
+ if key in self.blacklist:
+ del self.blacklist[key]
+
+
+ def __getitem__(self, key):
+ if key in self.new:
+ return self.new[key]
+ if key in self.blacklist:
+ raise KeyError(key)
+ return self.orig[key]
+
+
+ def __delitem__(self, key):
+ if key in self.new:
+ del self.new[key]
+ elif key in self.orig:
+ if key not in self.blacklist:
+ self.blacklist[key] = True
+ return
+ raise KeyError(key)
+
+
+ def __iter__(self):
+ for k in self.new:
+ yield k
+ for k in self.orig:
+ if k not in self.blacklist and k not in self.new:
+ yield k
+
+ def __contains__(self, key):
+ return key in self.new or (key not in self.blacklist and key in self.orig)
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+class LazyLoad(Mapping):
+ """
+ Lazy loading of values for a dict
+ """
+ __slots__=("pull", "d")
+
+ def __init__(self, pull_items_func, initial_items=[]):
+ self.d = {}
+ for k, v in initial_items:
+ self.d[k] = v
+ self.pull = pull_items_func
+
+ def __getitem__(self, key):
+ if key in self.d:
+ return self.d[key]
+ elif self.pull != None:
+ self.d.update(self.pull())
+ self.pull = None
+ return self.d[key]
+
+ def __iter__(self):
+ if self.pull is not None:
+ self.d.update(self.pull())
+ self.pull = None
+ return iter(self.d)
+
+ def __contains__(self, key):
+ if key in self.d:
+ return True
+ elif self.pull != None:
+ self.d.update(self.pull())
+ self.pull = None
+ return key in self.d
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+_slot_dict_classes = weakref.WeakValueDictionary()
+
+def slot_dict_class(keys, prefix="_val_"):
+ """
+ Generates mapping classes that behave similar to a dict but store values
+ as object attributes that are allocated via __slots__. Instances of these
+ objects have a smaller memory footprint than a normal dict object.
+
+ @param keys: Fixed set of allowed keys
+ @type keys: Iterable
+ @param prefix: a prefix to use when mapping
+ attribute names from keys
+ @type prefix: String
+ @rtype: SlotDict
+ @return: A class that constructs SlotDict instances
+ having the specified keys.
+ """
+ if isinstance(keys, frozenset):
+ keys_set = keys
+ else:
+ keys_set = frozenset(keys)
+ v = _slot_dict_classes.get((keys_set, prefix))
+ if v is None:
+
+ class SlotDict(object):
+
+ allowed_keys = keys_set
+ _prefix = prefix
+ __slots__ = ("__weakref__",) + \
+ tuple(prefix + k for k in allowed_keys)
+
+ def __init__(self, *args, **kwargs):
+
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+
+ if args:
+ self.update(args[0])
+
+ if kwargs:
+ self.update(kwargs)
+
+ def __iter__(self):
+ for k, v in self.iteritems():
+ yield k
+
+ def __len__(self):
+ l = 0
+ for i in self.iteritems():
+ l += 1
+ return l
+
+ def keys(self):
+ return list(self)
+
+ def iteritems(self):
+ prefix = self._prefix
+ for k in self.allowed_keys:
+ try:
+ yield (k, getattr(self, prefix + k))
+ except AttributeError:
+ pass
+
+ def items(self):
+ return list(self.iteritems())
+
+ def itervalues(self):
+ for k, v in self.iteritems():
+ yield v
+
+ def values(self):
+ return list(self.itervalues())
+
+ def __delitem__(self, k):
+ try:
+ delattr(self, self._prefix + k)
+ except AttributeError:
+ raise KeyError(k)
+
+ def __setitem__(self, k, v):
+ setattr(self, self._prefix + k, v)
+
+ def setdefault(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+
+ def update(self, *args, **kwargs):
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+ other = None
+ if args:
+ other = args[0]
+ if other is None:
+ pass
+ elif hasattr(other, 'iteritems'):
+ # Use getattr to avoid interference from 2to3.
+ for k, v in getattr(other, 'iteritems')():
+ self[k] = v
+ elif hasattr(other, 'items'):
+ # Use getattr to avoid interference from 2to3.
+ for k, v in getattr(other, 'items')():
+ self[k] = v
+ elif hasattr(other, 'keys'):
+ for k in other.keys():
+ self[k] = other[k]
+ else:
+ for k, v in other:
+ self[k] = v
+ if kwargs:
+ self.update(kwargs)
+
+ def __getitem__(self, k):
+ try:
+ return getattr(self, self._prefix + k)
+ except AttributeError:
+ raise KeyError(k)
+
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def __contains__(self, k):
+ return hasattr(self, self._prefix + k)
+
+ def pop(self, key, *args):
+ if len(args) > 1:
+ raise TypeError(
+ "pop expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+ try:
+ value = self[key]
+ except KeyError:
+ if args:
+ return args[0]
+ raise
+ del self[key]
+ return value
+
+ def popitem(self):
+ try:
+ k, v = self.iteritems().next()
+ except StopIteration:
+ raise KeyError('container is empty')
+ del self[k]
+ return (k, v)
+
+ def copy(self):
+ c = self.__class__()
+ c.update(self)
+ return c
+
+ def clear(self):
+ for k in self.allowed_keys:
+ try:
+ delattr(self, self._prefix + k)
+ except AttributeError:
+ pass
+
+ def __str__(self):
+ return str(dict(self.iteritems()))
+
+ def __repr__(self):
+ return repr(dict(self.iteritems()))
+
+ if sys.hexversion >= 0x3000000:
+ items = iteritems
+ keys = __iter__
+ values = itervalues
+
+ v = SlotDict
+ _slot_dict_classes[v.allowed_keys] = v
+ return v
diff --git a/usr/lib/portage/pym/portage/cache/metadata.py b/usr/lib/portage/pym/portage/cache/metadata.py
new file mode 100644
index 0000000..0c588bd
--- /dev/null
+++ b/usr/lib/portage/pym/portage/cache/metadata.py
@@ -0,0 +1,158 @@
+# Copyright 2005-2014 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+import errno
+import re
+import stat
+import sys
+from operator import attrgetter
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage.cache import cache_errors, flat_hash
+import portage.eclass_cache
+from portage.cache.template import reconstruct_eclasses
+from portage.cache.mappings import ProtectedDict
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+ long = int
+
+# this is the old cache format, flat_list. count maintained here.
+magic_line_count = 22
+
+# store the current key order *here*.
+class database(flat_hash.database):
+ complete_eclass_entries = False
+ auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'INHERITED', 'IUSE', 'REQUIRED_USE',
+ 'PDEPEND', 'PROVIDE', 'EAPI', 'PROPERTIES',
+ 'DEFINED_PHASES', 'HDEPEND')
+
+ autocommits = True
+ serialize_eclasses = False
+
+ _hashed_re = re.compile('^(\\w+)=([^\n]*)')
+
+ def __init__(self, location, *args, **config):
+ loc = location
+ super(database, self).__init__(location, *args, **config)
+ self.location = os.path.join(loc, "metadata","cache")
+ self.ec = None
+ self.raise_stat_collision = False
+
+ def _parse_data(self, data, cpv):
+ _hashed_re_match = self._hashed_re.match
+ d = {}
+
+ for line in data:
+ hashed = False
+ hashed_match = _hashed_re_match(line)
+ if hashed_match is None:
+ d.clear()
+ try:
+ for i, key in enumerate(self.auxdbkey_order):
+ d[key] = data[i]
+ except IndexError:
+ pass
+ break
+ else:
+ d[hashed_match.group(1)] = hashed_match.group(2)
+
+ if "_eclasses_" not in d:
+ if "INHERITED" in d:
+ if self.ec is None:
+ self.ec = portage.eclass_cache.cache(self.location[:-15])
+ getter = attrgetter(self.validation_chf)
+ try:
+ ec_data = self.ec.get_eclass_data(d["INHERITED"].split())
+ d["_eclasses_"] = dict((k, (v.eclass_dir, getter(v)))
+ for k,v in ec_data.items())
+ except KeyError as e:
+ # INHERITED contains a non-existent eclass.
+ raise cache_errors.CacheCorruption(cpv, e)
+ else:
+ d["_eclasses_"] = {}
+ elif isinstance(d["_eclasses_"], basestring):
+ # We skip this if flat_hash.database._parse_data() was called above
+ # because it calls reconstruct_eclasses() internally.
+ d["_eclasses_"] = reconstruct_eclasses(None, d["_eclasses_"])
+
+ return d
+
+ def _setitem(self, cpv, values):
+ if "_eclasses_" in values:
+ values = ProtectedDict(values)
+ values["INHERITED"] = ' '.join(sorted(values["_eclasses_"]))
+
+ new_content = []
+ for k in self.auxdbkey_order:
+ new_content.append(values.get(k, ''))
+ new_content.append('\n')
+ for i in range(magic_line_count - len(self.auxdbkey_order)):
+ new_content.append('\n')
+ new_content = ''.join(new_content)
+ new_content = _unicode_encode(new_content,
+ _encodings['repo.content'], errors='backslashreplace')
+
+ new_fp = os.path.join(self.location, cpv)
+ try:
+ f = open(_unicode_encode(new_fp,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ except EnvironmentError:
+ pass
+ else:
+ try:
+ try:
+ existing_st = os.fstat(f.fileno())
+ existing_content = f.read()
+ finally:
+ f.close()
+ except EnvironmentError:
+ pass
+ else:
+ existing_mtime = existing_st[stat.ST_MTIME]
+ if values['_mtime_'] == existing_mtime and \
+ existing_content == new_content:
+ return
+
+ if self.raise_stat_collision and \
+ values['_mtime_'] == existing_mtime and \
+ len(new_content) == existing_st.st_size:
+ raise cache_errors.StatCollision(cpv, new_fp,
+ existing_mtime, existing_st.st_size)
+
+ s = cpv.rfind("/")
+ fp = os.path.join(self.location,cpv[:s],
+ ".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+ try:
+ myf = open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ except EnvironmentError as e:
+ if errno.ENOENT == e.errno:
+ try:
+ self._ensure_dirs(cpv)
+ myf = open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ except EnvironmentError as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ try:
+ myf.write(new_content)
+ finally:
+ myf.close()
+ self._ensure_access(fp, mtime=values["_mtime_"])
+
+ try:
+ os.rename(fp, new_fp)
+ except EnvironmentError as e:
+ try:
+ os.unlink(fp)
+ except EnvironmentError:
+ pass
+ raise cache_errors.CacheCorruption(cpv, e)
diff --git a/usr/lib/portage/pym/portage/cache/sql_template.py b/usr/lib/portage/pym/portage/cache/sql_template.py
new file mode 100644
index 0000000..d023b1b
--- /dev/null
+++ b/usr/lib/portage/pym/portage/cache/sql_template.py
@@ -0,0 +1,301 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+import sys
+from portage.cache import template, cache_errors
+from portage.cache.template import reconstruct_eclasses
+
+class SQLDatabase(template.database):
+ """template class for RDBM based caches
+
+ This class is designed such that derivatives don't have to change much code, mostly constant strings.
+ _BaseError must be an exception class that all Exceptions thrown from the derived RDBMS are derived
+ from.
+
+ SCHEMA_INSERT_CPV_INTO_PACKAGE should be modified dependant on the RDBMS, as should SCHEMA_PACKAGE_CREATE-
+ basically you need to deal with creation of a unique pkgid. If the dbapi2 rdbms class has a method of
+ recovering that id, then modify _insert_cpv to remove the extra select.
+
+ Creation of a derived class involves supplying _initdb_con, and table_exists.
+ Additionally, the default schemas may have to be modified.
+ """
+
+ SCHEMA_PACKAGE_NAME = "package_cache"
+ SCHEMA_PACKAGE_CREATE = "CREATE TABLE %s (\
+ pkgid INTEGER PRIMARY KEY, label VARCHAR(255), cpv VARCHAR(255), UNIQUE(label, cpv))" % SCHEMA_PACKAGE_NAME
+ SCHEMA_PACKAGE_DROP = "DROP TABLE %s" % SCHEMA_PACKAGE_NAME
+
+ SCHEMA_VALUES_NAME = "values_cache"
+ SCHEMA_VALUES_CREATE = "CREATE TABLE %s ( pkgid integer references %s (pkgid) on delete cascade, \
+ key varchar(255), value text, UNIQUE(pkgid, key))" % (SCHEMA_VALUES_NAME, SCHEMA_PACKAGE_NAME)
+ SCHEMA_VALUES_DROP = "DROP TABLE %s" % SCHEMA_VALUES_NAME
+ SCHEMA_INSERT_CPV_INTO_PACKAGE = "INSERT INTO %s (label, cpv) VALUES(%%s, %%s)" % SCHEMA_PACKAGE_NAME
+
+ _BaseError = ()
+ _dbClass = None
+
+ autocommits = False
+# cleanse_keys = True
+
+ # boolean indicating if the derived RDBMS class supports replace syntax
+ _supports_replace = False
+
+ def __init__(self, location, label, auxdbkeys, *args, **config):
+ """initialize the instance.
+ derived classes shouldn't need to override this"""
+
+ super(SQLDatabase, self).__init__(location, label, auxdbkeys, *args, **config)
+
+ config.setdefault("host","127.0.0.1")
+ config.setdefault("autocommit", self.autocommits)
+ self._initdb_con(config)
+
+ self.label = self._sfilter(self.label)
+
+
+ def _dbconnect(self, config):
+ """should be overridden if the derived class needs special parameters for initializing
+ the db connection, or cursor"""
+ self.db = self._dbClass(**config)
+ self.con = self.db.cursor()
+
+
+ def _initdb_con(self,config):
+ """ensure needed tables are in place.
+ If the derived class needs a different set of table creation commands, overload the approriate
+ SCHEMA_ attributes. If it needs additional execution beyond, override"""
+
+ self._dbconnect(config)
+ if not self._table_exists(self.SCHEMA_PACKAGE_NAME):
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
+ self.SCHEMA_PACKAGE_NAME)
+ try:
+ self.con.execute(self.SCHEMA_PACKAGE_CREATE)
+ except self._BaseError as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ if not self._table_exists(self.SCHEMA_VALUES_NAME):
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
+ self.SCHEMA_VALUES_NAME)
+ try:
+ self.con.execute(self.SCHEMA_VALUES_CREATE)
+ except self._BaseError as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+
+ def _table_exists(self, tbl):
+ """return true if a table exists
+ derived classes must override this"""
+ raise NotImplementedError
+
+
+ def _sfilter(self, s):
+ """meta escaping, returns quoted string for use in sql statements"""
+ return "\"%s\"" % s.replace("\\","\\\\").replace("\"","\\\"")
+
+
+ def _getitem(self, cpv):
+ try:
+ self.con.execute("SELECT key, value FROM %s NATURAL JOIN %s "
+ "WHERE label=%s AND cpv=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
+ self.label, self._sfilter(cpv)))
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(self, cpv, e)
+
+ rows = self.con.fetchall()
+
+ if len(rows) == 0:
+ raise KeyError(cpv)
+
+ vals = dict([(k,"") for k in self._known_keys])
+ vals.update(dict(rows))
+ return vals
+
+
+ def _delitem(self, cpv):
+ """delete a cpv cache entry
+ derived RDBM classes for this *must* either support cascaded deletes, or
+ override this method"""
+ try:
+ try:
+ self.con.execute("DELETE FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
+ if self.autocommits:
+ self.commit()
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(self, cpv, e)
+ if self.con.rowcount <= 0:
+ raise KeyError(cpv)
+ except SystemExit:
+ raise
+ except Exception:
+ if not self.autocommits:
+ self.db.rollback()
+ # yes, this can roll back a lot more then just the delete. deal.
+ raise
+
+ def __del__(self):
+ # just to be safe.
+ if "db" in self.__dict__ and self.db != None:
+ self.commit()
+ self.db.close()
+
+ def _setitem(self, cpv, values):
+
+ try:
+ # insert.
+ try:
+ pkgid = self._insert_cpv(cpv)
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ # __getitem__ fills out missing values,
+ # so we store only what's handed to us and is a known key
+ db_values = []
+ for key in self._known_keys:
+ if key in values and values[key]:
+ db_values.append({"key":key, "value":values[key]})
+
+ if len(db_values) > 0:
+ try:
+ self.con.executemany("INSERT INTO %s (pkgid, key, value) VALUES(\"%s\", %%(key)s, %%(value)s)" % \
+ (self.SCHEMA_VALUES_NAME, str(pkgid)), db_values)
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ if self.autocommits:
+ self.commit()
+
+ except SystemExit:
+ raise
+ except Exception:
+ if not self.autocommits:
+ try:
+ self.db.rollback()
+ except self._BaseError:
+ pass
+ raise
+
+
+ def _insert_cpv(self, cpv):
+ """uses SCHEMA_INSERT_CPV_INTO_PACKAGE, which must be overloaded if the table definition
+ doesn't support auto-increment columns for pkgid.
+ returns the cpvs new pkgid
+ note this doesn't commit the transaction. The caller is expected to."""
+
+ cpv = self._sfilter(cpv)
+ if self._supports_replace:
+ query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace("INSERT","REPLACE",1)
+ else:
+ # just delete it.
+ try:
+ del self[cpv]
+ except (cache_errors.CacheCorruption, KeyError):
+ pass
+ query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE
+ try:
+ self.con.execute(query_str % (self.label, cpv))
+ except self._BaseError:
+ self.db.rollback()
+ raise
+ self.con.execute("SELECT pkgid FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, cpv))
+
+ if self.con.rowcount != 1:
+ raise cache_error.CacheCorruption(cpv, "Tried to insert the cpv, but found "
+ " %i matches upon the following select!" % len(rows))
+ return self.con.fetchone()[0]
+
+
+ def __contains__(self, cpv):
+ if not self.autocommits:
+ try:
+ self.commit()
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ try:
+ self.con.execute("SELECT cpv FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+ return self.con.rowcount > 0
+
+
+ def __iter__(self):
+ if not self.autocommits:
+ try:
+ self.commit()
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ try:
+ self.con.execute("SELECT cpv FROM %s WHERE label=%s" %
+ (self.SCHEMA_PACKAGE_NAME, self.label))
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+# return [ row[0] for row in self.con.fetchall() ]
+ for x in self.con.fetchall():
+ yield x[0]
+
+ def iteritems(self):
+ try:
+ self.con.execute("SELECT cpv, key, value FROM %s NATURAL JOIN %s "
+ "WHERE label=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
+ self.label))
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(self, cpv, e)
+
+ oldcpv = None
+ l = []
+ for x, y, v in self.con.fetchall():
+ if oldcpv != x:
+ if oldcpv != None:
+ d = dict(l)
+ if "_eclasses_" in d:
+ d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"])
+ else:
+ d["_eclasses_"] = {}
+ yield cpv, d
+ l.clear()
+ oldcpv = x
+ l.append((y,v))
+ if oldcpv != None:
+ d = dict(l)
+ if "_eclasses_" in d:
+ d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"])
+ else:
+ d["_eclasses_"] = {}
+ yield cpv, d
+
+ def commit(self):
+ self.db.commit()
+
+ def get_matches(self,match_dict):
+ query_list = []
+ for k,v in match_dict.items():
+ if k not in self._known_keys:
+ raise cache_errors.InvalidRestriction(k, v, "key isn't known to this cache instance")
+ v = v.replace("%","\\%")
+ v = v.replace(".*","%")
+ query_list.append("(key=%s AND value LIKE %s)" % (self._sfilter(k), self._sfilter(v)))
+
+ if len(query_list):
+ query = " AND "+" AND ".join(query_list)
+ else:
+ query = ''
+
+ print("query = SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % (self.label, query))
+ try:
+ self.con.execute("SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % \
+ (self.label, query))
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ return [ row[0] for row in self.con.fetchall() ]
+
+ if sys.hexversion >= 0x3000000:
+ items = iteritems
+ keys = __iter__
diff --git a/usr/lib/portage/pym/portage/cache/sqlite.py b/usr/lib/portage/pym/portage/cache/sqlite.py
new file mode 100644
index 0000000..310ac94
--- /dev/null
+++ b/usr/lib/portage/pym/portage/cache/sqlite.py
@@ -0,0 +1,280 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division, unicode_literals
+
+import re
+import sys
+from portage.cache import fs_template
+from portage.cache import cache_errors
+from portage import os
+from portage import _unicode_decode
+from portage.util import writemsg
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+class database(fs_template.FsBased):
+
+ autocommits = False
+ synchronous = False
+ # cache_bytes is used together with page_size (set at sqlite build time)
+ # to calculate the number of pages requested, according to the following
+ # equation: cache_bytes = page_bytes * page_count
+ cache_bytes = 1024 * 1024 * 10
+
+ def __init__(self, *args, **config):
+ super(database, self).__init__(*args, **config)
+ self._import_sqlite()
+ self._allowed_keys = ["_mtime_", "_eclasses_"]
+ self._allowed_keys.extend(self._known_keys)
+ self._allowed_keys.sort()
+ self._allowed_keys_set = frozenset(self._allowed_keys)
+ self.location = os.path.join(self.location,
+ self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+
+ if not self.readonly and not os.path.exists(self.location):
+ self._ensure_dirs()
+
+ config.setdefault("autocommit", self.autocommits)
+ config.setdefault("cache_bytes", self.cache_bytes)
+ config.setdefault("synchronous", self.synchronous)
+ # Set longer timeout for throwing a "database is locked" exception.
+ # Default timeout in sqlite3 module is 5.0 seconds.
+ config.setdefault("timeout", 15)
+ self._db_init_connection(config)
+ self._db_init_structures()
+
+ def _import_sqlite(self):
+ # sqlite3 is optional with >=python-2.5
+ try:
+ import sqlite3 as db_module
+ except ImportError as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ self._db_module = db_module
+ self._db_error = db_module.Error
+
+ def _db_escape_string(self, s):
+ """meta escaping, returns quoted string for use in sql statements"""
+ if not isinstance(s, basestring):
+ # Avoid potential UnicodeEncodeError in python-2.x by
+ # only calling str() when it's absolutely necessary.
+ s = str(s)
+ return "'%s'" % s.replace("'", "''")
+
+ def _db_init_connection(self, config):
+ self._dbpath = self.location + ".sqlite"
+ #if os.path.exists(self._dbpath):
+ # os.unlink(self._dbpath)
+ connection_kwargs = {}
+ connection_kwargs["timeout"] = config["timeout"]
+ try:
+ if not self.readonly:
+ self._ensure_dirs()
+ self._db_connection = self._db_module.connect(
+ database=_unicode_decode(self._dbpath), **connection_kwargs)
+ self._db_cursor = self._db_connection.cursor()
+ self._db_cursor.execute("PRAGMA encoding = %s" % self._db_escape_string("UTF-8"))
+ if not self.readonly and not self._ensure_access(self._dbpath):
+ raise cache_errors.InitializationError(self.__class__, "can't ensure perms on %s" % self._dbpath)
+ self._db_init_cache_size(config["cache_bytes"])
+ self._db_init_synchronous(config["synchronous"])
+ except self._db_error as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ def _db_init_structures(self):
+ self._db_table = {}
+ self._db_table["packages"] = {}
+ mytable = "portage_packages"
+ self._db_table["packages"]["table_name"] = mytable
+ self._db_table["packages"]["package_id"] = "internal_db_package_id"
+ self._db_table["packages"]["package_key"] = "portage_package_key"
+ create_statement = []
+ create_statement.append("CREATE TABLE")
+ create_statement.append(mytable)
+ create_statement.append("(")
+ table_parameters = []
+ table_parameters.append("%s INTEGER PRIMARY KEY AUTOINCREMENT" % self._db_table["packages"]["package_id"])
+ table_parameters.append("%s TEXT" % self._db_table["packages"]["package_key"])
+ for k in self._allowed_keys:
+ table_parameters.append("%s TEXT" % k)
+ table_parameters.append("UNIQUE(%s)" % self._db_table["packages"]["package_key"])
+ create_statement.append(",".join(table_parameters))
+ create_statement.append(")")
+
+ self._db_table["packages"]["create"] = " ".join(create_statement)
+
+ cursor = self._db_cursor
+ for k, v in self._db_table.items():
+ if self._db_table_exists(v["table_name"]):
+ create_statement = self._db_table_get_create(v["table_name"])
+ table_ok, missing_keys = self._db_validate_create_statement(create_statement)
+ if table_ok:
+ if missing_keys:
+ for k in sorted(missing_keys):
+ cursor.execute("ALTER TABLE %s ADD COLUMN %s TEXT" %
+ (self._db_table["packages"]["table_name"], k))
+ else:
+ writemsg(_("sqlite: dropping old table: %s\n") % v["table_name"])
+ cursor.execute("DROP TABLE %s" % v["table_name"])
+ cursor.execute(v["create"])
+ else:
+ cursor.execute(v["create"])
+
+ def _db_table_exists(self, table_name):
+ """return true/false dependant on a tbl existing"""
+ cursor = self._db_cursor
+ cursor.execute("SELECT name FROM sqlite_master WHERE type=\"table\" AND name=%s" % \
+ self._db_escape_string(table_name))
+ return len(cursor.fetchall()) == 1
+
+ def _db_table_get_create(self, table_name):
+ """return true/false dependant on a tbl existing"""
+ cursor = self._db_cursor
+ cursor.execute("SELECT sql FROM sqlite_master WHERE name=%s" % \
+ self._db_escape_string(table_name))
+ return cursor.fetchall()[0][0]
+
+ def _db_validate_create_statement(self, statement):
+ missing_keys = None
+ if statement == self._db_table["packages"]["create"]:
+ return True, missing_keys
+
+ m = re.match(r'^\s*CREATE\s*TABLE\s*%s\s*\(\s*%s\s*INTEGER\s*PRIMARY\s*KEY\s*AUTOINCREMENT\s*,(.*)\)\s*$' %
+ (self._db_table["packages"]["table_name"],
+ self._db_table["packages"]["package_id"]),
+ statement)
+ if m is None:
+ return False, missing_keys
+
+ unique_constraints = set([self._db_table["packages"]["package_key"]])
+ missing_keys = set(self._allowed_keys)
+ unique_re = re.compile(r'^\s*UNIQUE\s*\(\s*(\w*)\s*\)\s*$')
+ column_re = re.compile(r'^\s*(\w*)\s*TEXT\s*$')
+ for x in m.group(1).split(","):
+ m = column_re.match(x)
+ if m is not None:
+ missing_keys.discard(m.group(1))
+ continue
+ m = unique_re.match(x)
+ if m is not None:
+ unique_constraints.discard(m.group(1))
+ continue
+
+ if unique_constraints:
+ return False, missing_keys
+
+ return True, missing_keys
+
+ def _db_init_cache_size(self, cache_bytes):
+ cursor = self._db_cursor
+ cursor.execute("PRAGMA page_size")
+ page_size=int(cursor.fetchone()[0])
+ # number of pages, sqlite default is 2000
+ cache_size = cache_bytes // page_size
+ cursor.execute("PRAGMA cache_size = %d" % cache_size)
+ cursor.execute("PRAGMA cache_size")
+ actual_cache_size = int(cursor.fetchone()[0])
+ del cursor
+ if actual_cache_size != cache_size:
+ raise cache_errors.InitializationError(self.__class__,"actual cache_size = "+actual_cache_size+" does does not match requested size of "+cache_size)
+
+ def _db_init_synchronous(self, synchronous):
+ cursor = self._db_cursor
+ cursor.execute("PRAGMA synchronous = %d" % synchronous)
+ cursor.execute("PRAGMA synchronous")
+ actual_synchronous=int(cursor.fetchone()[0])
+ del cursor
+ if actual_synchronous!=synchronous:
+ raise cache_errors.InitializationError(self.__class__,"actual synchronous = "+actual_synchronous+" does does not match requested value of "+synchronous)
+
+ def _getitem(self, cpv):
+ cursor = self._db_cursor
+ cursor.execute("select * from %s where %s=%s" % \
+ (self._db_table["packages"]["table_name"],
+ self._db_table["packages"]["package_key"],
+ self._db_escape_string(cpv)))
+ result = cursor.fetchall()
+ if len(result) == 1:
+ pass
+ elif len(result) == 0:
+ raise KeyError(cpv)
+ else:
+ raise cache_errors.CacheCorruption(cpv, "key is not unique")
+ result = result[0]
+ d = {}
+ allowed_keys_set = self._allowed_keys_set
+ for column_index, column_info in enumerate(cursor.description):
+ k = column_info[0]
+ if k in allowed_keys_set:
+ v = result[column_index]
+ if v is None:
+ # This happens after a new empty column has been added.
+ v = ""
+ d[k] = v
+
+ return d
+
+ def _setitem(self, cpv, values):
+ update_statement = []
+ update_statement.append("REPLACE INTO %s" % self._db_table["packages"]["table_name"])
+ update_statement.append("(")
+ update_statement.append(','.join([self._db_table["packages"]["package_key"]] + self._allowed_keys))
+ update_statement.append(")")
+ update_statement.append("VALUES")
+ update_statement.append("(")
+ values_parameters = []
+ values_parameters.append(self._db_escape_string(cpv))
+ for k in self._allowed_keys:
+ values_parameters.append(self._db_escape_string(values.get(k, '')))
+ update_statement.append(",".join(values_parameters))
+ update_statement.append(")")
+ cursor = self._db_cursor
+ try:
+ s = " ".join(update_statement)
+ cursor.execute(s)
+ except self._db_error as e:
+ writemsg("%s: %s\n" % (cpv, str(e)))
+ raise
+
+ def commit(self):
+ self._db_connection.commit()
+
+ def _delitem(self, cpv):
+ cursor = self._db_cursor
+ cursor.execute("DELETE FROM %s WHERE %s=%s" % \
+ (self._db_table["packages"]["table_name"],
+ self._db_table["packages"]["package_key"],
+ self._db_escape_string(cpv)))
+
+ def __contains__(self, cpv):
+ cursor = self._db_cursor
+ cursor.execute(" ".join(
+ ["SELECT %s FROM %s" %
+ (self._db_table["packages"]["package_id"],
+ self._db_table["packages"]["table_name"]),
+ "WHERE %s=%s" % (
+ self._db_table["packages"]["package_key"],
+ self._db_escape_string(cpv))]))
+ result = cursor.fetchall()
+ if len(result) == 0:
+ return False
+ elif len(result) == 1:
+ return True
+ else:
+ raise cache_errors.CacheCorruption(cpv, "key is not unique")
+
+ def __iter__(self):
+ """generator for walking the dir struct"""
+ cursor = self._db_cursor
+ cursor.execute("SELECT %s FROM %s" % \
+ (self._db_table["packages"]["package_key"],
+ self._db_table["packages"]["table_name"]))
+ result = cursor.fetchall()
+ key_list = [x[0] for x in result]
+ del result
+ while key_list:
+ yield key_list.pop()
diff --git a/usr/lib/portage/pym/portage/cache/template.py b/usr/lib/portage/pym/portage/cache/template.py
new file mode 100644
index 0000000..bc81b86
--- /dev/null
+++ b/usr/lib/portage/pym/portage/cache/template.py
@@ -0,0 +1,312 @@
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+from portage.cache import cache_errors
+from portage.cache.cache_errors import InvalidRestriction
+from portage.cache.mappings import ProtectedDict
+import sys
+import warnings
+import operator
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ _unicode = str
+ basestring = str
+ long = int
+else:
+ _unicode = unicode
+
+class database(object):
+ # this is for metadata/cache transfer.
+ # basically flags the cache needs be updated when transfered cache to cache.
+ # leave this.
+
+ complete_eclass_entries = True
+ autocommits = False
+ cleanse_keys = False
+ serialize_eclasses = True
+ validation_chf = 'mtime'
+ store_eclass_paths = True
+
+ def __init__(self, location, label, auxdbkeys, readonly=False):
+ """ initialize the derived class; specifically, store label/keys"""
+ self._known_keys = auxdbkeys
+ self.location = location
+ self.label = label
+ self.readonly = readonly
+ self.sync_rate = 0
+ self.updates = 0
+
+ def __getitem__(self, cpv):
+ """set a cpv to values
+ This shouldn't be overriden in derived classes since it handles the __eclasses__ conversion.
+ that said, if the class handles it, they can override it."""
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+ d=self._getitem(cpv)
+ if self.serialize_eclasses and "_eclasses_" in d:
+ d["_eclasses_"] = reconstruct_eclasses(cpv, d["_eclasses_"],
+ self.validation_chf, paths=self.store_eclass_paths)
+ elif "_eclasses_" not in d:
+ d["_eclasses_"] = {}
+ # Never return INHERITED, since portdbapi.aux_get() will
+ # generate it automatically from _eclasses_, and we want
+ # to omit it in comparisons between cache entries like
+ # those that egencache uses to avoid redundant writes.
+ d.pop("INHERITED", None)
+ mtime = d.get('_mtime_')
+ if mtime is None:
+ raise cache_errors.CacheCorruption(cpv,
+ '_mtime_ field is missing')
+ try:
+ mtime = long(mtime)
+ except ValueError:
+ raise cache_errors.CacheCorruption(cpv,
+ '_mtime_ conversion to long failed: %s' % (mtime,))
+ d['_mtime_'] = mtime
+ return d
+
+ def _getitem(self, cpv):
+ """get cpv's values.
+ override this in derived classess"""
+ raise NotImplementedError
+
+ @staticmethod
+ def _internal_eclasses(extern_ec_dict, chf_type, paths):
+ """
+ When serialize_eclasses is False, we have to convert an external
+ eclass dict containing hashed_path objects into an appropriate
+ internal dict containing values of chf_type (and eclass dirs
+ if store_eclass_paths is True).
+ """
+ if not extern_ec_dict:
+ return extern_ec_dict
+ chf_getter = operator.attrgetter(chf_type)
+ if paths:
+ intern_ec_dict = dict((k, (v.eclass_dir, chf_getter(v)))
+ for k, v in extern_ec_dict.items())
+ else:
+ intern_ec_dict = dict((k, chf_getter(v))
+ for k, v in extern_ec_dict.items())
+ return intern_ec_dict
+
+ def __setitem__(self, cpv, values):
+ """set a cpv to values
+ This shouldn't be overriden in derived classes since it handles the readonly checks"""
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction()
+ d = None
+ if self.cleanse_keys:
+ d=ProtectedDict(values)
+ for k, v in list(d.items()):
+ if not v:
+ del d[k]
+ if "_eclasses_" in values:
+ if d is None:
+ d = ProtectedDict(values)
+ if self.serialize_eclasses:
+ d["_eclasses_"] = serialize_eclasses(d["_eclasses_"],
+ self.validation_chf, paths=self.store_eclass_paths)
+ else:
+ d["_eclasses_"] = self._internal_eclasses(d["_eclasses_"],
+ self.validation_chf, self.store_eclass_paths)
+ elif d is None:
+ d = values
+ self._setitem(cpv, d)
+ if not self.autocommits:
+ self.updates += 1
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+
+ def _setitem(self, name, values):
+ """__setitem__ calls this after readonly checks. override it in derived classes
+ note _eclassees_ key *must* be handled"""
+ raise NotImplementedError
+
+ def __delitem__(self, cpv):
+ """delete a key from the cache.
+ This shouldn't be overriden in derived classes since it handles the readonly checks"""
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction()
+ if not self.autocommits:
+ self.updates += 1
+ self._delitem(cpv)
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+
+ def _delitem(self,cpv):
+ """__delitem__ calls this after readonly checks. override it in derived classes"""
+ raise NotImplementedError
+
+ def has_key(self, cpv):
+ return cpv in self
+
+ def keys(self):
+ return list(self)
+
+ def iterkeys(self):
+ return iter(self)
+
+ def iteritems(self):
+ for x in self:
+ yield (x, self[x])
+
+ def items(self):
+ return list(self.iteritems())
+
+ def sync(self, rate=0):
+ self.sync_rate = rate
+ if(rate == 0):
+ self.commit()
+
+ def commit(self):
+ if not self.autocommits:
+ raise NotImplementedError(self)
+
+ def __del__(self):
+ # This used to be handled by an atexit hook that called
+ # close_portdbapi_caches() for all portdbapi instances, but that was
+ # prone to memory leaks for API consumers that needed to create/destroy
+ # many portdbapi instances. So, instead we rely on __del__.
+ self.sync()
+
+ def __contains__(self, cpv):
+ """This method should always be overridden. It is provided only for
+ backward compatibility with modules that override has_key instead. It
+ will automatically raise a NotImplementedError if has_key has not been
+ overridden."""
+ if self.has_key is database.has_key:
+ # prevent a possible recursive loop
+ raise NotImplementedError
+ warnings.warn("portage.cache.template.database.has_key() is "
+ "deprecated, override __contains__ instead",
+ DeprecationWarning)
+ return self.has_key(cpv)
+
+ def __iter__(self):
+ """This method should always be overridden. It is provided only for
+ backward compatibility with modules that override iterkeys instead. It
+ will automatically raise a NotImplementedError if iterkeys has not been
+ overridden."""
+ if self.iterkeys is database.iterkeys:
+ # prevent a possible recursive loop
+ raise NotImplementedError(self)
+ return iter(self.keys())
+
+ def get(self, k, x=None):
+ try:
+ return self[k]
+ except KeyError:
+ return x
+
+ def validate_entry(self, entry, ebuild_hash, eclass_db):
+ hash_key = '_%s_' % self.validation_chf
+ try:
+ entry_hash = entry[hash_key]
+ except KeyError:
+ return False
+ else:
+ if entry_hash != getattr(ebuild_hash, self.validation_chf):
+ return False
+ update = eclass_db.validate_and_rewrite_cache(entry['_eclasses_'], self.validation_chf,
+ self.store_eclass_paths)
+ if update is None:
+ return False
+ if update:
+ entry['_eclasses_'] = update
+ return True
+
+ def get_matches(self, match_dict):
+ """generic function for walking the entire cache db, matching restrictions to
+ filter what cpv's are returned. Derived classes should override this if they
+ can implement a faster method then pulling each cpv:values, and checking it.
+
+ For example, RDBMS derived classes should push the matching logic down to the
+ actual RDBM."""
+
+ import re
+ restricts = {}
+ for key,match in match_dict.items():
+ # XXX this sucks.
+ try:
+ if isinstance(match, basestring):
+ restricts[key] = re.compile(match).match
+ else:
+ restricts[key] = re.compile(match[0],match[1]).match
+ except re.error as e:
+ raise InvalidRestriction(key, match, e)
+ if key not in self.__known_keys:
+ raise InvalidRestriction(key, match, "Key isn't valid")
+
+ for cpv in self:
+ cont = True
+ vals = self[cpv]
+ for key, match in restricts.items():
+ if not match(vals[key]):
+ cont = False
+ break
+ if cont:
+ yield cpv
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+ items = iteritems
+
+_keysorter = operator.itemgetter(0)
+
+def serialize_eclasses(eclass_dict, chf_type='mtime', paths=True):
+ """takes a dict, returns a string representing said dict"""
+ """The "new format", which causes older versions of <portage-2.1.2 to
+ traceback with a ValueError due to failed long() conversion. This format
+ isn't currently written, but the the capability to read it is already built
+ in.
+ return "\t".join(["%s\t%s" % (k, str(v)) \
+ for k, v in eclass_dict.iteritems()])
+ """
+ if not eclass_dict:
+ return ""
+ getter = operator.attrgetter(chf_type)
+ if paths:
+ return "\t".join("%s\t%s\t%s" % (k, v.eclass_dir, getter(v))
+ for k, v in sorted(eclass_dict.items(), key=_keysorter))
+ return "\t".join("%s\t%s" % (k, getter(v))
+ for k, v in sorted(eclass_dict.items(), key=_keysorter))
+
+
+def reconstruct_eclasses(cpv, eclass_string, chf_type='mtime', paths=True):
+ """returns a dict when handed a string generated by serialize_eclasses"""
+ eclasses = eclass_string.rstrip().lstrip().split("\t")
+ if eclasses == [""]:
+ # occasionally this occurs in the fs backends. they suck.
+ return {}
+
+ converter = _unicode
+ if chf_type == 'mtime':
+ converter = long
+
+ if paths:
+ if len(eclasses) % 3 != 0:
+ raise cache_errors.CacheCorruption(cpv, "_eclasses_ was of invalid len %i" % len(eclasses))
+ elif len(eclasses) % 2 != 0:
+ raise cache_errors.CacheCorruption(cpv, "_eclasses_ was of invalid len %i" % len(eclasses))
+ d={}
+ try:
+ i = iter(eclasses)
+ if paths:
+ # The old format contains paths that will be discarded.
+ for name, path, val in zip(i, i, i):
+ d[name] = (path, converter(val))
+ else:
+ for name, val in zip(i, i):
+ d[name] = converter(val)
+ except IndexError:
+ raise cache_errors.CacheCorruption(cpv,
+ "_eclasses_ was of invalid len %i" % len(eclasses))
+ except ValueError:
+ raise cache_errors.CacheCorruption(cpv, "_eclasses_ mtime conversion to long failed")
+ del eclasses
+ return d
diff --git a/usr/lib/portage/pym/portage/cache/volatile.py b/usr/lib/portage/pym/portage/cache/volatile.py
new file mode 100644
index 0000000..5516745
--- /dev/null
+++ b/usr/lib/portage/pym/portage/cache/volatile.py
@@ -0,0 +1,30 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+from portage.cache import template
+
+class database(template.database):
+
+ autocommits = True
+ serialize_eclasses = False
+ store_eclass_paths = False
+
+ def __init__(self, *args, **config):
+ config.pop("gid", None)
+ config.pop("perms", None)
+ super(database, self).__init__(*args, **config)
+ self._data = {}
+ self._delitem = self._data.__delitem__
+
+ def _setitem(self, name, values):
+ self._data[name] = copy.deepcopy(values)
+
+ def __getitem__(self, cpv):
+ return copy.deepcopy(self._data[cpv])
+
+ def __iter__(self):
+ return iter(self._data)
+
+ def __contains__(self, key):
+ return key in self._data
diff --git a/usr/lib/portage/pym/portage/checksum.py b/usr/lib/portage/pym/portage/checksum.py
new file mode 100644
index 0000000..f24a90f
--- /dev/null
+++ b/usr/lib/portage/pym/portage/checksum.py
@@ -0,0 +1,427 @@
+# checksum.py -- core Portage functionality
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.const import PRELINK_BINARY, HASHING_BLOCKSIZE
+from portage.localization import _
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+import errno
+import stat
+import subprocess
+import tempfile
+
+#dict of all available hash functions
+hashfunc_map = {}
+hashorigin_map = {}
+
+def _open_file(filename):
+ try:
+ return open(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ except IOError as e:
+ func_call = "open('%s')" % filename
+ if e.errno == errno.EPERM:
+ raise portage.exception.OperationNotPermitted(func_call)
+ elif e.errno == errno.EACCES:
+ raise portage.exception.PermissionDenied(func_call)
+ elif e.errno == errno.ENOENT:
+ raise portage.exception.FileNotFound(filename)
+ else:
+ raise
+
+class _generate_hash_function(object):
+
+ __slots__ = ("_hashobject",)
+
+ def __init__(self, hashtype, hashobject, origin="unknown"):
+ self._hashobject = hashobject
+ hashfunc_map[hashtype] = self
+ hashorigin_map[hashtype] = origin
+
+ def __call__(self, filename):
+ """
+ Run a checksum against a file.
+
+ @param filename: File to run the checksum against
+ @type filename: String
+ @return: The hash and size of the data
+ """
+ with _open_file(filename) as f:
+ blocksize = HASHING_BLOCKSIZE
+ size = 0
+ checksum = self._hashobject()
+ data = f.read(blocksize)
+ while data:
+ checksum.update(data)
+ size = size + len(data)
+ data = f.read(blocksize)
+
+ return (checksum.hexdigest(), size)
+
+# Define hash functions, try to use the best module available. Later definitions
+# override earlier ones
+
+# Use the internal modules as last fallback
+try:
+ from hashlib import md5 as _new_md5
+except ImportError:
+ from md5 import new as _new_md5
+
+md5hash = _generate_hash_function("MD5", _new_md5, origin="internal")
+
+try:
+ from hashlib import sha1 as _new_sha1
+except ImportError:
+ from sha import new as _new_sha1
+
+sha1hash = _generate_hash_function("SHA1", _new_sha1, origin="internal")
+
+# Try to use mhash if available
+# mhash causes GIL presently, so it gets less priority than hashlib and
+# pycrypto. However, it might be the only accelerated implementation of
+# WHIRLPOOL available.
+try:
+ import mhash, functools
+ md5hash = _generate_hash_function("MD5", functools.partial(mhash.MHASH, mhash.MHASH_MD5), origin="mhash")
+ sha1hash = _generate_hash_function("SHA1", functools.partial(mhash.MHASH, mhash.MHASH_SHA1), origin="mhash")
+ sha256hash = _generate_hash_function("SHA256", functools.partial(mhash.MHASH, mhash.MHASH_SHA256), origin="mhash")
+ sha512hash = _generate_hash_function("SHA512", functools.partial(mhash.MHASH, mhash.MHASH_SHA512), origin="mhash")
+ for local_name, hash_name in (("rmd160", "ripemd160"), ("whirlpool", "whirlpool")):
+ if hasattr(mhash, 'MHASH_%s' % local_name.upper()):
+ globals()['%shash' % local_name] = \
+ _generate_hash_function(local_name.upper(), \
+ functools.partial(mhash.MHASH, getattr(mhash, 'MHASH_%s' % hash_name.upper())), \
+ origin='mhash')
+except ImportError:
+ pass
+
+# Use pycrypto when available, prefer it over the internal fallbacks
+# Check for 'new' attributes, since they can be missing if the module
+# is broken somehow.
+try:
+ from Crypto.Hash import SHA256, RIPEMD
+ sha256hash = getattr(SHA256, 'new', None)
+ if sha256hash is not None:
+ sha256hash = _generate_hash_function("SHA256",
+ sha256hash, origin="pycrypto")
+ rmd160hash = getattr(RIPEMD, 'new', None)
+ if rmd160hash is not None:
+ rmd160hash = _generate_hash_function("RMD160",
+ rmd160hash, origin="pycrypto")
+except ImportError:
+ pass
+
+# Use hashlib from python-2.5 if available and prefer it over pycrypto and internal fallbacks.
+# Need special handling for RMD160/WHIRLPOOL as they may not always be provided by hashlib.
+try:
+ import hashlib, functools
+
+ md5hash = _generate_hash_function("MD5", hashlib.md5, origin="hashlib")
+ sha1hash = _generate_hash_function("SHA1", hashlib.sha1, origin="hashlib")
+ sha256hash = _generate_hash_function("SHA256", hashlib.sha256, origin="hashlib")
+ sha512hash = _generate_hash_function("SHA512", hashlib.sha512, origin="hashlib")
+ for local_name, hash_name in (("rmd160", "ripemd160"), ("whirlpool", "whirlpool")):
+ try:
+ hashlib.new(hash_name)
+ except ValueError:
+ pass
+ else:
+ globals()['%shash' % local_name] = \
+ _generate_hash_function(local_name.upper(), \
+ functools.partial(hashlib.new, hash_name), \
+ origin='hashlib')
+
+except ImportError:
+ pass
+
+_whirlpool_unaccelerated = False
+if "WHIRLPOOL" not in hashfunc_map:
+ # Bundled WHIRLPOOL implementation
+ _whirlpool_unaccelerated = True
+ from portage.util.whirlpool import new as _new_whirlpool
+ whirlpoolhash = _generate_hash_function("WHIRLPOOL", _new_whirlpool, origin="bundled")
+
+# Use python-fchksum if available, prefer it over all other MD5 implementations
+try:
+ from fchksum import fmd5t as md5hash
+ hashfunc_map["MD5"] = md5hash
+ hashorigin_map["MD5"] = "python-fchksum"
+
+except ImportError:
+ pass
+
+# There is only one implementation for size
+def getsize(filename):
+ size = os.stat(filename).st_size
+ return (size, size)
+hashfunc_map["size"] = getsize
+
+# end actual hash functions
+
+prelink_capable = False
+if os.path.exists(PRELINK_BINARY):
+ cmd = [PRELINK_BINARY, "--version"]
+ cmd = [_unicode_encode(x, encoding=_encodings['fs'], errors='strict')
+ for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ proc.communicate()
+ status = proc.wait()
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ prelink_capable = 1
+ del cmd, proc, status
+
+def is_prelinkable_elf(filename):
+ f = _open_file(filename)
+ try:
+ magic = f.read(17)
+ finally:
+ f.close()
+ return (len(magic) == 17 and magic.startswith(b'\x7fELF') and
+ magic[16] in (b'\x02', b'\x03')) # 2=ET_EXEC, 3=ET_DYN
+
+def perform_md5(x, calc_prelink=0):
+ return perform_checksum(x, "MD5", calc_prelink)[0]
+
+def _perform_md5_merge(x, **kwargs):
+ return perform_md5(_unicode_encode(x,
+ encoding=_encodings['merge'], errors='strict'), **kwargs)
+
+def perform_all(x, calc_prelink=0):
+ mydict = {}
+ for k in hashfunc_map:
+ mydict[k] = perform_checksum(x, k, calc_prelink)[0]
+ return mydict
+
+def get_valid_checksum_keys():
+ return list(hashfunc_map)
+
+def get_hash_origin(hashtype):
+ if hashtype not in hashfunc_map:
+ raise KeyError(hashtype)
+ return hashorigin_map.get(hashtype, "unknown")
+
+def _filter_unaccelarated_hashes(digests):
+ """
+ If multiple digests are available and some are unaccelerated,
+ then return a new dict that omits the unaccelerated ones. This
+ allows extreme performance problems like bug #425046 to be
+ avoided whenever practical, especially for cases like stage
+ builds where acceleration may not be available for some hashes
+ due to minimization of dependencies.
+ """
+ if _whirlpool_unaccelerated and "WHIRLPOOL" in digests:
+ verifiable_hash_types = set(digests).intersection(hashfunc_map)
+ verifiable_hash_types.discard("size")
+ if len(verifiable_hash_types) > 1:
+ digests = dict(digests)
+ digests.pop("WHIRLPOOL")
+
+ return digests
+
+class _hash_filter(object):
+ """
+ Implements filtering for PORTAGE_CHECKSUM_FILTER.
+ """
+
+ __slots__ = ('transparent', '_tokens',)
+
+ def __init__(self, filter_str):
+ tokens = filter_str.upper().split()
+ if not tokens or tokens[-1] == "*":
+ del tokens[:]
+ self.transparent = not tokens
+ tokens.reverse()
+ self._tokens = tuple(tokens)
+
+ def __call__(self, hash_name):
+ if self.transparent:
+ return True
+ matches = ("*", hash_name)
+ for token in self._tokens:
+ if token in matches:
+ return True
+ elif token[:1] == "-":
+ if token[1:] in matches:
+ return False
+ return False
+
+def _apply_hash_filter(digests, hash_filter):
+ """
+ Return a new dict containing the filtered digests, or the same
+ dict if no changes are necessary. This will always preserve at
+ at least one digest, in order to ensure that they are not all
+ discarded.
+ @param digests: dictionary of digests
+ @type digests: dict
+ @param hash_filter: A callable that takes a single hash name
+ argument, and returns True if the hash is to be used or
+ False otherwise
+ @type hash_filter: callable
+ """
+
+ verifiable_hash_types = set(digests).intersection(hashfunc_map)
+ verifiable_hash_types.discard("size")
+ modified = False
+ if len(verifiable_hash_types) > 1:
+ for k in list(verifiable_hash_types):
+ if not hash_filter(k):
+ modified = True
+ verifiable_hash_types.remove(k)
+ if len(verifiable_hash_types) == 1:
+ break
+
+ if modified:
+ digests = dict((k, v) for (k, v) in digests.items()
+ if k == "size" or k in verifiable_hash_types)
+
+ return digests
+
+def verify_all(filename, mydict, calc_prelink=0, strict=0):
+ """
+ Verify all checksums against a file.
+
+ @param filename: File to run the checksums against
+ @type filename: String
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @param strict: Enable/Disable strict checking (which stops exactly at a checksum failure and throws an exception)
+ @type strict: Integer
+ @rtype: Tuple
+ @return: Result of the checks and possible message:
+ 1) If size fails, False, and a tuple containing a message, the given size, and the actual size
+ 2) If there is an os error, False, and a tuple containing the system error followed by 2 nulls
+ 3) If a checksum fails, False and a tuple containing a message, the given hash, and the actual hash
+ 4) If all checks succeed, return True and a fake reason
+ """
+ # Dict relates to single file only.
+ # returns: (passed,reason)
+ file_is_ok = True
+ reason = "Reason unknown"
+ try:
+ mysize = os.stat(filename)[stat.ST_SIZE]
+ if mydict["size"] != mysize:
+ return False,(_("Filesize does not match recorded size"), mysize, mydict["size"])
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise portage.exception.FileNotFound(filename)
+ return False, (str(e), None, None)
+
+ verifiable_hash_types = set(mydict).intersection(hashfunc_map)
+ verifiable_hash_types.discard("size")
+ if not verifiable_hash_types:
+ expected = set(hashfunc_map)
+ expected.discard("size")
+ expected = list(expected)
+ expected.sort()
+ expected = " ".join(expected)
+ got = set(mydict)
+ got.discard("size")
+ got = list(got)
+ got.sort()
+ got = " ".join(got)
+ return False, (_("Insufficient data for checksum verification"), got, expected)
+
+ for x in sorted(mydict):
+ if x == "size":
+ continue
+ elif x in hashfunc_map:
+ myhash = perform_checksum(filename, x, calc_prelink=calc_prelink)[0]
+ if mydict[x] != myhash:
+ if strict:
+ raise portage.exception.DigestException(
+ ("Failed to verify '$(file)s' on " + \
+ "checksum type '%(type)s'") % \
+ {"file" : filename, "type" : x})
+ else:
+ file_is_ok = False
+ reason = (("Failed on %s verification" % x), myhash, mydict[x])
+ break
+
+ return file_is_ok, reason
+
+def perform_checksum(filename, hashname="MD5", calc_prelink=0):
+ """
+ Run a specific checksum against a file. The filename can
+ be either unicode or an encoded byte string. If filename
+ is unicode then a UnicodeDecodeError will be raised if
+ necessary.
+
+ @param filename: File to run the checksum against
+ @type filename: String
+ @param hashname: The type of hash function to run
+ @type hashname: String
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @rtype: Tuple
+ @return: The hash and size of the data
+ """
+ global prelink_capable
+ # Make sure filename is encoded with the correct encoding before
+ # it is passed to spawn (for prelink) and/or the hash function.
+ filename = _unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict')
+ myfilename = filename
+ prelink_tmpfile = None
+ try:
+ if (calc_prelink and prelink_capable and
+ is_prelinkable_elf(filename)):
+ # Create non-prelinked temporary file to checksum.
+ # Files rejected by prelink are summed in place.
+ try:
+ tmpfile_fd, prelink_tmpfile = tempfile.mkstemp()
+ try:
+ retval = portage.process.spawn([PRELINK_BINARY,
+ "--verify", filename], fd_pipes={1:tmpfile_fd})
+ finally:
+ os.close(tmpfile_fd)
+ if retval == os.EX_OK:
+ myfilename = prelink_tmpfile
+ except portage.exception.CommandNotFound:
+ # This happens during uninstallation of prelink.
+ prelink_capable = False
+ try:
+ if hashname not in hashfunc_map:
+ raise portage.exception.DigestException(hashname + \
+ " hash function not available (needs dev-python/pycrypto)")
+ myhash, mysize = hashfunc_map[hashname](myfilename)
+ except (OSError, IOError) as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ raise portage.exception.FileNotFound(myfilename)
+ elif e.errno == portage.exception.PermissionDenied.errno:
+ raise portage.exception.PermissionDenied(myfilename)
+ raise
+ return myhash, mysize
+ finally:
+ if prelink_tmpfile:
+ try:
+ os.unlink(prelink_tmpfile)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+def perform_multiple_checksums(filename, hashes=["MD5"], calc_prelink=0):
+ """
+ Run a group of checksums against a file.
+
+ @param filename: File to run the checksums against
+ @type filename: String
+ @param hashes: A list of checksum functions to run against the file
+ @type hashname: List
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @rtype: Tuple
+ @return: A dictionary in the form:
+ return_value[hash_name] = (hash_result,size)
+ for each given checksum
+ """
+ rVal = {}
+ for x in hashes:
+ if x not in hashfunc_map:
+ raise portage.exception.DigestException(x+" hash function not available (needs dev-python/pycrypto or >=dev-lang/python-2.5)")
+ rVal[x] = perform_checksum(filename, x, calc_prelink)[0]
+ return rVal
diff --git a/usr/lib/portage/pym/portage/const.py b/usr/lib/portage/pym/portage/const.py
new file mode 100644
index 0000000..681e9c4
--- /dev/null
+++ b/usr/lib/portage/pym/portage/const.py
@@ -0,0 +1,306 @@
+# portage: Constants
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+# ===========================================================================
+# autotool supplied constants.
+# ===========================================================================
+from portage.const_autotool import *
+
+import os
+
+# ===========================================================================
+# START OF CONSTANTS -- START OF CONSTANTS -- START OF CONSTANTS -- START OF
+# ===========================================================================
+
+EPREFIX_LSTRIP = EPREFIX.lstrip(os.path.sep)
+
+# There are two types of variables here which can easily be confused,
+# resulting in arbitrary bugs, mainly exposed with an offset
+# installation (Prefix). The two types relate to the usage of
+# config_root or target_root.
+# The first, config_root (PORTAGE_CONFIGROOT), can be a path somewhere,
+# from which all derived paths need to be relative (e.g.
+# USER_CONFIG_PATH) without EPREFIX prepended in Prefix. This means
+# config_root can for instance be set to "$HOME/my/config". Obviously,
+# in such case it is not appropriate to prepend EPREFIX to derived
+# constants. The default value of config_root is EPREFIX (in non-Prefix
+# the empty string) -- overriding the value loses the EPREFIX as one
+# would expect.
+# Second there is target_root (ROOT) which is used to install somewhere
+# completely else, in Prefix of limited use. Because this is an offset
+# always given, the EPREFIX should always be applied in it, hence the
+# code always prefixes them with EROOT.
+# The variables in this file are grouped by config_root, target_root.
+
+# variables used with config_root (these need to be relative)
+USER_CONFIG_PATH = "etc/portage"
+MAKE_CONF_FILE = USER_CONFIG_PATH + "/make.conf"
+MODULES_FILE_PATH = USER_CONFIG_PATH + "/modules"
+CUSTOM_PROFILE_PATH = USER_CONFIG_PATH + "/profile"
+USER_VIRTUALS_FILE = USER_CONFIG_PATH + "/virtuals"
+EBUILD_SH_ENV_FILE = USER_CONFIG_PATH + "/bashrc"
+EBUILD_SH_ENV_DIR = USER_CONFIG_PATH + "/env"
+CUSTOM_MIRRORS_FILE = USER_CONFIG_PATH + "/mirrors"
+COLOR_MAP_FILE = USER_CONFIG_PATH + "/color.map"
+PROFILE_PATH = USER_CONFIG_PATH + "/make.profile"
+MAKE_DEFAULTS_FILE = PROFILE_PATH + "/make.defaults" # FIXME: not used
+DEPRECATED_PROFILE_FILE = PROFILE_PATH + "/deprecated"
+
+# variables used with targetroot (these need to be absolute, but not
+# have a leading '/' since they are used directly with os.path.join on EROOT)
+VDB_PATH = "var/db/pkg"
+CACHE_PATH = "var/cache/edb"
+PRIVATE_PATH = "var/lib/portage"
+WORLD_FILE = PRIVATE_PATH + "/world"
+WORLD_SETS_FILE = PRIVATE_PATH + "/world_sets"
+CONFIG_MEMORY_FILE = PRIVATE_PATH + "/config"
+NEWS_LIB_PATH = "var/lib/gentoo"
+
+# these variables get EPREFIX prepended automagically when they are
+# translated into their lowercase variants
+DEPCACHE_PATH = "/var/cache/edb/dep"
+GLOBAL_CONFIG_PATH = "/usr/share/portage/config"
+
+# these variables are not used with target_root or config_root
+PORTAGE_BASE_PATH = PORTAGE_BASE
+# NOTE: Use realpath(__file__) so that python module symlinks in site-packages
+# are followed back to the real location of the whole portage installation.
+#PREFIX: below should work, but I'm not sure how it it affects other places
+# NOTE: Please keep PORTAGE_BASE_PATH in one line to help substitutions.
+#PORTAGE_BASE_PATH = os.path.join(os.sep, os.sep.join(os.path.realpath(__file__.rstrip("co")).split(os.sep)[:-3]))
+PORTAGE_BIN_PATH = PORTAGE_BASE_PATH + "/bin"
+PORTAGE_PYM_PATH = os.path.realpath(os.path.join(__file__, '../..'))
+LOCALE_DATA_PATH = PORTAGE_BASE_PATH + "/locale" # FIXME: not used
+EBUILD_SH_BINARY = PORTAGE_BIN_PATH + "/ebuild.sh"
+MISC_SH_BINARY = PORTAGE_BIN_PATH + "/misc-functions.sh"
+SANDBOX_BINARY = EPREFIX + "/usr/bin/sandbox"
+FAKEROOT_BINARY = EPREFIX + "/usr/bin/fakeroot"
+BASH_BINARY = PORTAGE_BASH
+MOVE_BINARY = PORTAGE_MV
+PRELINK_BINARY = "/usr/sbin/prelink"
+MACOSSANDBOX_BINARY = "/usr/bin/sandbox-exec"
+MACOSSANDBOX_PROFILE = '''(version 1)
+(allow default)
+(deny file-write*)
+(allow file-write*
+@@MACOSSANDBOX_PATHS@@)
+(allow file-write-data
+@@MACOSSANDBOX_PATHS_CONTENT_ONLY@@)'''
+
+PORTAGE_GROUPNAME = portagegroup
+PORTAGE_USERNAME = portageuser
+
+INVALID_ENV_FILE = "/etc/spork/is/not/valid/profile.env"
+MERGING_IDENTIFIER = "-MERGING-"
+REPO_NAME_FILE = "repo_name"
+REPO_NAME_LOC = "profiles" + "/" + REPO_NAME_FILE
+
+PORTAGE_PACKAGE_ATOM = "sys-apps/portage"
+LIBC_PACKAGE_ATOM = "virtual/libc"
+OS_HEADERS_PACKAGE_ATOM = "virtual/os-headers"
+CVS_PACKAGE_ATOM = "dev-vcs/cvs"
+GIT_PACKAGE_ATOM = "dev-vcs/git"
+RSYNC_PACKAGE_ATOM = "net-misc/rsync"
+
+INCREMENTALS = (
+ "ACCEPT_KEYWORDS",
+ "CONFIG_PROTECT",
+ "CONFIG_PROTECT_MASK",
+ "FEATURES",
+ "IUSE_IMPLICIT",
+ "PRELINK_PATH",
+ "PRELINK_PATH_MASK",
+ "PROFILE_ONLY_VARIABLES",
+ "USE",
+ "USE_EXPAND",
+ "USE_EXPAND_HIDDEN",
+ "USE_EXPAND_IMPLICIT",
+ "USE_EXPAND_UNPREFIXED",
+)
+EBUILD_PHASES = (
+ "pretend",
+ "setup",
+ "unpack",
+ "prepare",
+ "configure",
+ "compile",
+ "test",
+ "install",
+ "package",
+ "preinst",
+ "postinst",
+ "prerm",
+ "postrm",
+ "nofetch",
+ "config",
+ "info",
+ "other",
+)
+SUPPORTED_FEATURES = frozenset([
+ "assume-digests",
+ "binpkg-logs",
+ "buildpkg",
+ "buildsyspkg",
+ "candy",
+ "case-insensitive-fs",
+ "ccache",
+ "cgroup",
+ "chflags",
+ "clean-logs",
+ "collision-protect",
+ "compress-build-logs",
+ "compressdebug",
+ "compress-index",
+ "config-protect-if-modified",
+ "digest",
+ "distcc",
+ "distcc-pump",
+ "distlocks",
+ "downgrade-backup",
+ "ebuild-locks",
+ "fail-clean",
+ "fakeroot",
+ "fixlafiles",
+ "force-mirror",
+ "force-prefix",
+ "getbinpkg",
+ "installsources",
+ "ipc-sandbox",
+ "keeptemp",
+ "keepwork",
+ "lmirror",
+ "merge-sync",
+ "metadata-transfer",
+ "mirror",
+ "multilib-strict",
+ "network-sandbox",
+ "news",
+ "noauto",
+ "noclean",
+ "nodoc",
+ "noinfo",
+ "noman",
+ "nostrip",
+ "notitles",
+ "parallel-fetch",
+ "parallel-install",
+ "prelink-checksums",
+ "preserve-libs",
+ "protect-owned",
+ "python-trace",
+ "sandbox",
+ "selinux",
+ "sesandbox",
+ "sfperms",
+ "sign",
+ "skiprocheck",
+ "splitdebug",
+ "split-elog",
+ "split-log",
+ "strict",
+ "stricter",
+ "suidctl",
+ "test",
+ "test-fail-continue",
+ "unknown-features-filter",
+ "unknown-features-warn",
+ "unmerge-backup",
+ "unmerge-logs",
+ "unmerge-orphans",
+ "userfetch",
+ "userpriv",
+ "usersandbox",
+ "usersync",
+ "webrsync-gpg",
+ "xattr",
+])
+
+EAPI = 5
+
+HASHING_BLOCKSIZE = 32768
+MANIFEST1_HASH_FUNCTIONS = ("MD5", "SHA256", "RMD160")
+MANIFEST1_REQUIRED_HASH = "MD5"
+
+# Past events:
+#
+# 20120704 - After WHIRLPOOL is supported in stable portage:
+# - Set manifest-hashes in gentoo-x86/metadata/layout.conf as follows:
+# manifest-hashes = SHA256 SHA512 WHIRLPOOL
+# - Add SHA512 and WHIRLPOOL to MANIFEST2_HASH_DEFAULTS.
+# - Remove SHA1 and RMD160 from MANIFEST2_HASH_*.
+#
+# Future events:
+#
+# After WHIRLPOOL is supported in stable portage for at least 1 year:
+# - Change MANIFEST2_REQUIRED_HASH to WHIRLPOOL.
+# - Remove SHA256 from MANIFEST2_HASH_*.
+# - Set manifest-hashes in gentoo-x86/metadata/layout.conf as follows:
+# manifest-hashes = SHA512 WHIRLPOOL
+#
+# After SHA-3 is approved:
+# - Add new hashes to MANIFEST2_HASH_*.
+#
+# After SHA-3 is supported in stable portage:
+# - Set manifest-hashes in gentoo-x86/metadata/layout.conf as follows:
+# manifest-hashes = SHA3 SHA512 WHIRLPOOL
+#
+# After layout.conf settings correspond to defaults in stable portage:
+# - Remove redundant settings from gentoo-x86/metadata/layout.conf.
+
+MANIFEST2_HASH_FUNCTIONS = ("SHA256", "SHA512", "WHIRLPOOL")
+MANIFEST2_HASH_DEFAULTS = frozenset(["SHA256", "SHA512", "WHIRLPOOL"])
+MANIFEST2_REQUIRED_HASH = "SHA256"
+
+MANIFEST2_IDENTIFIERS = ("AUX", "MISC", "DIST", "EBUILD")
+
+# The EPREFIX for the current install is hardcoded here, but access to this
+# constant should be minimal, in favor of access via the EPREFIX setting of
+# a config instance (since it's possible to contruct a config instance with
+# a different EPREFIX). Therefore, the EPREFIX constant should *NOT* be used
+# in the definition of any other constants within this file.
+# PREFIX LOCAL: rely on EPREFIX from autotools
+#EPREFIX = ""
+# END PREFIX LOCAL
+
+# pick up EPREFIX from the environment if set
+if "PORTAGE_OVERRIDE_EPREFIX" in os.environ:
+ EPREFIX = os.environ["PORTAGE_OVERRIDE_EPREFIX"]
+ if EPREFIX:
+ EPREFIX = os.path.normpath(EPREFIX)
+ if EPREFIX == os.sep:
+ EPREFIX = ""
+
+VCS_DIRS = ("CVS", "RCS", "SCCS", ".bzr", ".git", ".hg", ".svn")
+
+# List of known live eclasses. Keep it in sync with cnf/sets/portage.conf
+LIVE_ECLASSES = frozenset([
+ "bzr",
+ "cvs",
+ "darcs",
+ "git",
+ "git-2",
+ "git-r3",
+ "mercurial",
+ "subversion",
+ "tla",
+])
+
+SUPPORTED_BINPKG_FORMATS = ("tar", "rpm")
+
+# Time formats used in various places like metadata.chk.
+TIMESTAMP_FORMAT = "%a, %d %b %Y %H:%M:%S +0000" # to be used with time.gmtime()
+
+# Top-level names of Python packages installed by Portage.
+PORTAGE_PYM_PACKAGES = ("_emerge", "portage", "repoman")
+
+# ===========================================================================
+# END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANT
+# ===========================================================================
+
+# Private constants for use in conditional code in order to minimize the diff
+# between branches.
+_DEPCLEAN_LIB_CHECK_DEFAULT = True
+_ENABLE_SET_CONFIG = True
diff --git a/usr/lib/portage/pym/portage/const_autotool.py b/usr/lib/portage/pym/portage/const_autotool.py
new file mode 100644
index 0000000..c113247
--- /dev/null
+++ b/usr/lib/portage/pym/portage/const_autotool.py
@@ -0,0 +1,23 @@
+# Copyright: 2005-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# all vars that are to wind up in portage_const must have their name listed in __all__
+
+__all__ = ["EPREFIX", "SYSCONFDIR", "PORTAGE_BASE",
+ "portageuser", "portagegroup", "rootuser", "rootuid", "rootgid",
+ "PORTAGE_BASH", "PORTAGE_MV"]
+
+from os import path
+
+EPREFIX = "/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir"
+SYSCONFDIR = "/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/etc"
+PORTAGE_BASE = "/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/lib/portage"
+
+portagegroup = "eng"
+portageuser = "vapier"
+rootuser = "vapier"
+rootuid = 145691
+rootgid = 5000
+
+PORTAGE_BASH = "/bin/bash"
+PORTAGE_MV = "/bin/mv"
diff --git a/usr/lib/portage/pym/portage/cvstree.py b/usr/lib/portage/pym/portage/cvstree.py
new file mode 100644
index 0000000..4a3afae
--- /dev/null
+++ b/usr/lib/portage/pym/portage/cvstree.py
@@ -0,0 +1,315 @@
+# cvstree.py -- cvs tree utilities
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import io
+import re
+import stat
+import sys
+import time
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+# [D]/Name/Version/Date/Flags/Tags
+
+def pathdata(entries, path):
+ """Returns the data(dict) for a specific file/dir at the path specified."""
+ mysplit = path.split("/")
+ myentries = entries
+ mytarget = mysplit[-1]
+ mysplit = mysplit[:-1]
+ for mys in mysplit:
+ if mys in myentries["dirs"]:
+ myentries = myentries["dirs"][mys]
+ else:
+ return None
+ if mytarget in myentries["dirs"]:
+ return myentries["dirs"][mytarget]
+ elif mytarget in myentries["files"]:
+ return myentries["files"][mytarget]
+ else:
+ return None
+
+def fileat(entries, path):
+ return pathdata(entries, path)
+
+def isadded(entries, path):
+ """Returns True if the path exists and is added to the cvs tree."""
+ mytarget = pathdata(entries, path)
+ if mytarget:
+ if "cvs" in mytarget["status"]:
+ return 1
+
+ basedir = os.path.dirname(path)
+ filename = os.path.basename(path)
+
+ try:
+ myfile = io.open(
+ _unicode_encode(os.path.join(basedir, 'CVS', 'Entries'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='strict')
+ except IOError:
+ return 0
+ mylines = myfile.readlines()
+ myfile.close()
+
+ rep = re.compile("^\/%s\/" % re.escape(filename))
+ for x in mylines:
+ if rep.search(x):
+ return 1
+
+ return 0
+
+def findnew(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that have been added but
+ have not yet been committed. Returns a list of paths, optionally prepended
+ with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ mylist = []
+ for myfile in entries["files"]:
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "0" == entries["files"][myfile]["revision"]:
+ mylist.append(basedir + myfile)
+
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist += findnew(entries["dirs"][mydir], recursive, basedir + mydir)
+
+ return mylist
+
+def findoption(entries, pattern, recursive=0, basedir=""):
+ """Iterate over paths of cvs entries for which the pattern.search() method
+ finds a match. Returns a list of paths, optionally prepended with a
+ basedir.
+ """
+ if not basedir.endswith("/"):
+ basedir += "/"
+
+ for myfile, mydata in entries["files"].items():
+ if "cvs" in mydata["status"]:
+ if pattern.search(mydata["flags"]):
+ yield basedir + myfile
+
+ if recursive:
+ for mydir, mydata in entries["dirs"].items():
+ for x in findoption(mydata, pattern,
+ recursive, basedir + mydir):
+ yield x
+
+def findchanged(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that exist in the cvs tree
+ and differ from the committed version. Returns a list of paths, optionally
+ prepended with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ mylist = []
+ for myfile in entries["files"]:
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "current" not in entries["files"][myfile]["status"]:
+ if "exists" in entries["files"][myfile]["status"]:
+ if entries["files"][myfile]["revision"] != "0":
+ mylist.append(basedir + myfile)
+
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist += findchanged(entries["dirs"][mydir], recursive, basedir + mydir)
+
+ return mylist
+
+def findmissing(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that are listed in the cvs
+ tree but do not exist on the filesystem. Returns a list of paths,
+ optionally prepended with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ mylist = []
+ for myfile in entries["files"]:
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "exists" not in entries["files"][myfile]["status"]:
+ if "removed" not in entries["files"][myfile]["status"]:
+ mylist.append(basedir + myfile)
+
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist += findmissing(entries["dirs"][mydir], recursive, basedir + mydir)
+
+ return mylist
+
+def findunadded(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that are in valid cvs
+ directories but are not part of the cvs tree. Returns a list of paths,
+ optionally prepended with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ # Ignore what cvs ignores.
+ mylist = []
+ for myfile in entries["files"]:
+ if "cvs" not in entries["files"][myfile]["status"]:
+ mylist.append(basedir + myfile)
+
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist += findunadded(entries["dirs"][mydir], recursive, basedir + mydir)
+
+ return mylist
+
+def findremoved(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that are in flagged for cvs
+ deletions. Returns a list of paths, optionally prepended with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ mylist = []
+ for myfile in entries["files"]:
+ if "removed" in entries["files"][myfile]["status"]:
+ mylist.append(basedir + myfile)
+
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist += findremoved(entries["dirs"][mydir], recursive, basedir + mydir)
+
+ return mylist
+
+def findall(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all new, changed, missing, and unadded
+ entities. Returns a 4 element list of lists as returned from each find*().
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+ mynew = findnew(entries, recursive, basedir)
+ mychanged = findchanged(entries, recursive, basedir)
+ mymissing = findmissing(entries, recursive, basedir)
+ myunadded = findunadded(entries, recursive, basedir)
+ myremoved = findremoved(entries, recursive, basedir)
+ return [mynew, mychanged, mymissing, myunadded, myremoved]
+
+ignore_list = re.compile("(^|/)(RCS(|LOG)|SCCS|CVS(|\.adm)|cvslog\..*|tags|TAGS|\.(make\.state|nse_depinfo)|.*~|(\.|)#.*|,.*|_$.*|.*\$|\.del-.*|.*\.(old|BAK|bak|orig|rej|a|olb|o|obj|so|exe|Z|elc|ln)|core)$")
+def apply_cvsignore_filter(list):
+ x = 0
+ while x < len(list):
+ if ignore_list.match(list[x].split("/")[-1]):
+ list.pop(x)
+ else:
+ x += 1
+ return list
+
+def getentries(mydir, recursive=0):
+ """Scans the given directory and returns a datadict of all the entries in
+ the directory separated as a dirs dict and a files dict.
+ """
+ myfn = mydir + "/CVS/Entries"
+ # entries=[dirs, files]
+ entries = {"dirs":{}, "files":{}}
+ if not os.path.exists(mydir):
+ return entries
+ try:
+ myfile = io.open(_unicode_encode(myfn,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='strict')
+ mylines = myfile.readlines()
+ myfile.close()
+ except SystemExit as e:
+ raise
+ except:
+ mylines = []
+
+ for line in mylines:
+ if line and line[-1] == "\n":
+ line = line[:-1]
+ if not line:
+ continue
+ if line == "D": # End of entries file
+ break
+ mysplit = line.split("/")
+ if len(mysplit) != 6:
+ print("Confused:", mysplit)
+ continue
+ if mysplit[0] == "D":
+ entries["dirs"][mysplit[1]] = {"dirs":{}, "files":{}, "status":[]}
+ entries["dirs"][mysplit[1]]["status"] = ["cvs"]
+ if os.path.isdir(mydir+"/"+mysplit[1]):
+ entries["dirs"][mysplit[1]]["status"] += ["exists"]
+ entries["dirs"][mysplit[1]]["flags"] = mysplit[2:]
+ if recursive:
+ rentries = getentries(mydir + "/" + mysplit[1], recursive)
+ entries["dirs"][mysplit[1]]["dirs"] = rentries["dirs"]
+ entries["dirs"][mysplit[1]]["files"] = rentries["files"]
+ else:
+ # [D]/Name/revision/Date/Flags/Tags
+ entries["files"][mysplit[1]] = {}
+ entries["files"][mysplit[1]]["revision"] = mysplit[2]
+ entries["files"][mysplit[1]]["date"] = mysplit[3]
+ entries["files"][mysplit[1]]["flags"] = mysplit[4]
+ entries["files"][mysplit[1]]["tags"] = mysplit[5]
+ entries["files"][mysplit[1]]["status"] = ["cvs"]
+ if entries["files"][mysplit[1]]["revision"][0] == "-":
+ entries["files"][mysplit[1]]["status"] += ["removed"]
+
+ for file in os.listdir(mydir):
+ if file == "CVS":
+ continue
+ if os.path.isdir(mydir + "/" + file):
+ if file not in entries["dirs"]:
+ if ignore_list.match(file) is not None:
+ continue
+ entries["dirs"][file] = {"dirs":{}, "files":{}}
+ # It's normal for a directory to be unlisted in Entries
+ # when checked out without -P (see bug #257660).
+ rentries = getentries(mydir + "/" + file, recursive)
+ entries["dirs"][file]["dirs"] = rentries["dirs"]
+ entries["dirs"][file]["files"] = rentries["files"]
+ if "status" in entries["dirs"][file]:
+ if "exists" not in entries["dirs"][file]["status"]:
+ entries["dirs"][file]["status"] += ["exists"]
+ else:
+ entries["dirs"][file]["status"] = ["exists"]
+ elif os.path.isfile(mydir + "/" + file):
+ if file not in entries["files"]:
+ if ignore_list.match(file) is not None:
+ continue
+ entries["files"][file] = {"revision":"", "date":"", "flags":"", "tags":""}
+ if "status" in entries["files"][file]:
+ if "exists" not in entries["files"][file]["status"]:
+ entries["files"][file]["status"] += ["exists"]
+ else:
+ entries["files"][file]["status"] = ["exists"]
+ try:
+ mystat = os.stat(mydir + "/" + file)
+ mytime = time.asctime(time.gmtime(mystat[stat.ST_MTIME]))
+ if "status" not in entries["files"][file]:
+ entries["files"][file]["status"] = []
+ if mytime == entries["files"][file]["date"]:
+ entries["files"][file]["status"] += ["current"]
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ print("failed to stat", file)
+ print(e)
+ return
+
+ elif ignore_list.match(file) is not None:
+ pass
+ else:
+ print()
+ print("File of unknown type:", mydir + "/" + file)
+ print()
+
+ return entries
diff --git a/usr/lib/portage/pym/portage/data.py b/usr/lib/portage/pym/portage/data.py
new file mode 100644
index 0000000..f4bbb44
--- /dev/null
+++ b/usr/lib/portage/pym/portage/data.py
@@ -0,0 +1,281 @@
+# data.py -- Calculated/Discovered Data Values
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os, pwd, grp, platform, sys
+from portage.const import PORTAGE_GROUPNAME, PORTAGE_USERNAME, EPREFIX
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.output:colorize',
+ 'portage.util:writemsg',
+ 'subprocess'
+)
+from portage.localization import _
+
+ostype = platform.system()
+userland = None
+# Prefix always has USERLAND=GNU, even on
+# FreeBSD, OpenBSD and Darwin (thank the lord!).
+# Hopefully this entire USERLAND hack can go once
+if EPREFIX == "" and (ostype == "DragonFly" or ostype.endswith("BSD")):
+ userland = "BSD"
+else:
+ userland = "GNU"
+
+lchown = getattr(os, "lchown", None)
+
+if not lchown:
+ if ostype == "Darwin":
+ def lchown(*_args, **_kwargs):
+ pass
+ else:
+ def lchown(*_args, **_kwargs):
+ writemsg(colorize("BAD", "!!!") + _(
+ " It seems that os.lchown does not"
+ " exist. Please rebuild python.\n"), noiselevel=-1)
+ lchown()
+
+lchown = portage._unicode_func_wrapper(lchown)
+
+def portage_group_warning():
+ warn_prefix = colorize("BAD", "*** WARNING *** ")
+ mylines = [
+ "For security reasons, only system administrators should be",
+ "allowed in the portage group. Untrusted users or processes",
+ "can potentially exploit the portage group for attacks such as",
+ "local privilege escalation."
+ ]
+ for x in mylines:
+ writemsg(warn_prefix, noiselevel=-1)
+ writemsg(x, noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+# Portage has 3 security levels that depend on the uid and gid of the main
+# process and are assigned according to the following table:
+#
+# Privileges secpass uid gid
+# normal 0 any any
+# group 1 any portage_gid
+# super 2 0 any
+#
+# If the "wheel" group does not exist then wheelgid falls back to 0.
+# If the "portage" group does not exist then portage_uid falls back to wheelgid.
+
+uid = os.getuid()
+wheelgid = 0
+try:
+ wheelgid = grp.getgrnam("wheel")[2]
+except KeyError:
+ pass
+
+# The portage_uid and portage_gid global constants, and others that
+# depend on them are initialized lazily, in order to allow configuration
+# via make.conf. Eventually, these constants may be deprecated in favor
+# of config attributes, since it's conceivable that multiple
+# configurations with different constants could be used simultaneously.
+_initialized_globals = set()
+
+def _get_global(k):
+ if k in _initialized_globals:
+ return globals()[k]
+
+ if k in ('portage_gid', 'portage_uid', 'secpass'):
+ global portage_gid, portage_uid, secpass
+ secpass = 0
+ if uid == 0:
+ secpass = 2
+ elif portage.const.EPREFIX:
+ secpass = 2
+ #Discover the uid and gid of the portage user/group
+ keyerror = False
+ try:
+ username = str(_get_global('_portage_username'))
+ portage_uid = pwd.getpwnam(username).pw_uid
+ except KeyError:
+ # PREFIX LOCAL: some sysadmins are insane, bug #344307
+ if username.isdigit():
+ portage_uid = int(username)
+ else:
+ keyerror = True
+ portage_uid = 0
+ # END PREFIX LOCAL
+
+ try:
+ grpname = str(_get_global('_portage_grpname'))
+ portage_gid = grp.getgrnam(grpname).gr_gid
+ except KeyError:
+ # PREFIX LOCAL: some sysadmins are insane, bug #344307
+ if grpname.isdigit():
+ portage_gid = int(grpname)
+ else:
+ keyerror = True
+ portage_gid = 0
+ # END PREFIX LOCAL
+
+ if secpass < 1 and portage_gid in os.getgroups():
+ secpass = 1
+
+ # Suppress this error message if both PORTAGE_GRPNAME and
+ # PORTAGE_USERNAME are set to "root", for things like
+ # Android (see bug #454060).
+ if keyerror and not (_get_global('_portage_username') == "root" and
+ _get_global('_portage_grpname') == "root"):
+ # PREFIX LOCAL: we need to fix this one day to distinguish prefix vs non-prefix
+ writemsg(colorize("BAD",
+ _("portage: '%s' user or '%s' group missing." % (_get_global('_portage_username'), _get_global('_portage_grpname')))) + "\n", noiselevel=-1)
+ writemsg(colorize("BAD",
+ _(" In Prefix Portage this is quite dramatic")) + "\n", noiselevel=-1)
+ writemsg(colorize("BAD",
+ _(" since it means you have thrown away yourself.")) + "\n", noiselevel=-1)
+ writemsg(colorize("BAD",
+ _(" Re-add yourself or re-bootstrap Gentoo Prefix.")) + "\n", noiselevel=-1)
+ # END PREFIX LOCAL
+ portage_group_warning()
+
+ _initialized_globals.add('portage_gid')
+ _initialized_globals.add('portage_uid')
+ _initialized_globals.add('secpass')
+
+ if k == 'portage_gid':
+ return portage_gid
+ elif k == 'portage_uid':
+ return portage_uid
+ elif k == 'secpass':
+ return secpass
+ else:
+ raise AssertionError('unknown name: %s' % k)
+
+ elif k == 'userpriv_groups':
+ v = [portage_gid]
+ if secpass >= 2:
+ # Get a list of group IDs for the portage user. Do not use
+ # grp.getgrall() since it is known to trigger spurious
+ # SIGPIPE problems with nss_ldap.
+ cmd = ["id", "-G", _portage_username]
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(cmd[0])
+ if fullname is None:
+ globals()[k] = v
+ _initialized_globals.add(k)
+ return v
+ cmd[0] = fullname
+
+ encoding = portage._encodings['content']
+ cmd = [portage._unicode_encode(x,
+ encoding=encoding, errors='strict') for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ myoutput = proc.communicate()[0]
+ status = proc.wait()
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ for x in portage._unicode_decode(myoutput,
+ encoding=encoding, errors='strict').split():
+ try:
+ v.append(int(x))
+ except ValueError:
+ pass
+ v = sorted(set(v))
+
+ # Avoid instantiating portage.settings when the desired
+ # variable is set in os.environ.
+ elif k in ('_portage_grpname', '_portage_username'):
+ v = None
+ if k == '_portage_grpname':
+ env_key = 'PORTAGE_GRPNAME'
+ else:
+ env_key = 'PORTAGE_USERNAME'
+
+ if env_key in os.environ:
+ v = os.environ[env_key]
+ elif hasattr(portage, 'settings'):
+ v = portage.settings.get(env_key)
+ elif portage.const.EPREFIX:
+ # For prefix environments, default to the UID and GID of
+ # the top-level EROOT directory. The config class has
+ # equivalent code, but we also need to do it here if
+ # _disable_legacy_globals() has been called.
+ eroot = os.path.join(os.environ.get('ROOT', os.sep),
+ portage.const.EPREFIX.lstrip(os.sep))
+ try:
+ eroot_st = os.stat(eroot)
+ except OSError:
+ pass
+ else:
+ if k == '_portage_grpname':
+ try:
+ grp_struct = grp.getgrgid(eroot_st.st_gid)
+ except KeyError:
+ v = eroot_st.st_gid
+ else:
+ v = grp_struct.gr_name
+ else:
+ try:
+ pwd_struct = pwd.getpwuid(eroot_st.st_uid)
+ except KeyError:
+ v = eroot_st.st_uid
+ else:
+ v = pwd_struct.pw_name
+
+ if v is None:
+ # PREFIX LOCAL: use var iso hardwired 'portage'
+ if k == '_portage_grpname':
+ v = PORTAGE_GROUPNAME
+ else:
+ v = PORTAGE_USERNAME
+ # END PREFIX LOCAL
+ else:
+ raise AssertionError('unknown name: %s' % k)
+
+ globals()[k] = v
+ _initialized_globals.add(k)
+ return v
+
+class _GlobalProxy(portage.proxy.objectproxy.ObjectProxy):
+
+ __slots__ = ('_name',)
+
+ def __init__(self, name):
+ portage.proxy.objectproxy.ObjectProxy.__init__(self)
+ object.__setattr__(self, '_name', name)
+
+ def _get_target(self):
+ return _get_global(object.__getattribute__(self, '_name'))
+
+for k in ('portage_gid', 'portage_uid', 'secpass', 'userpriv_groups',
+ '_portage_grpname', '_portage_username'):
+ globals()[k] = _GlobalProxy(k)
+del k
+
+def _init(settings):
+ """
+ Use config variables like PORTAGE_GRPNAME and PORTAGE_USERNAME to
+ initialize global variables. This allows settings to come from make.conf
+ instead of requiring them to be set in the calling environment.
+ """
+ if '_portage_grpname' not in _initialized_globals and \
+ '_portage_username' not in _initialized_globals:
+
+ # Prevents "TypeError: expected string" errors
+ # from grp.getgrnam() with PyPy
+ native_string = platform.python_implementation() == 'PyPy'
+
+ # PREFIX LOCAL: use var iso hardwired 'portage'
+ v = settings.get('PORTAGE_GRPNAME', PORTAGE_GROUPNAME)
+ # END PREFIX LOCAL
+ if native_string:
+ v = portage._native_string(v)
+ globals()['_portage_grpname'] = v
+ _initialized_globals.add('_portage_grpname')
+
+ # PREFIX LOCAL: use var iso hardwired 'portage'
+ v = settings.get('PORTAGE_USERNAME', PORTAGE_USERNAME)
+ # END PREFIX LOCAL
+ if native_string:
+ v = portage._native_string(v)
+ globals()['_portage_username'] = v
+ _initialized_globals.add('_portage_username')
diff --git a/usr/lib/portage/pym/portage/dbapi/_MergeProcess.py b/usr/lib/portage/pym/portage/dbapi/_MergeProcess.py
new file mode 100644
index 0000000..956dbb9
--- /dev/null
+++ b/usr/lib/portage/pym/portage/dbapi/_MergeProcess.py
@@ -0,0 +1,279 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import platform
+import signal
+import sys
+import traceback
+
+import errno
+import fcntl
+import portage
+from portage import os, _unicode_decode
+from portage.util._ctypes import find_library
+import portage.elog.messages
+from portage.util._async.ForkProcess import ForkProcess
+
+class MergeProcess(ForkProcess):
+ """
+ Merge packages in a subprocess, so the Scheduler can run in the main
+ thread while files are moved or copied asynchronously.
+ """
+
+ __slots__ = ('mycat', 'mypkg', 'settings', 'treetype',
+ 'vartree', 'blockers', 'pkgloc', 'infloc', 'myebuild',
+ 'mydbapi', 'prev_mtimes', 'unmerge', '_elog_reader_fd', '_elog_reg_id',
+ '_buf', '_elog_keys', '_locked_vdb')
+
+ def _start(self):
+ # Portage should always call setcpv prior to this
+ # point, but here we have a fallback as a convenience
+ # for external API consumers. It's important that
+ # this metadata access happens in the parent process,
+ # since closing of file descriptors in the subprocess
+ # can prevent access to open database connections such
+ # as that used by the sqlite metadata cache module.
+ cpv = "%s/%s" % (self.mycat, self.mypkg)
+ settings = self.settings
+ if cpv != settings.mycpv or \
+ "EAPI" not in settings.configdict["pkg"]:
+ settings.reload()
+ settings.reset()
+ settings.setcpv(cpv, mydb=self.mydbapi)
+
+ # This caches the libc library lookup in the current
+ # process, so that it's only done once rather than
+ # for each child process.
+ if platform.system() == "Linux" and \
+ "merge-sync" in settings.features:
+ find_library("c")
+
+ # Inherit stdin by default, so that the pdb SIGUSR1
+ # handler is usable for the subprocess.
+ if self.fd_pipes is None:
+ self.fd_pipes = {}
+ else:
+ self.fd_pipes = self.fd_pipes.copy()
+ self.fd_pipes.setdefault(0, portage._get_stdin().fileno())
+
+ super(MergeProcess, self)._start()
+
+ def _lock_vdb(self):
+ """
+ Lock the vdb if FEATURES=parallel-install is NOT enabled,
+ otherwise do nothing. This is implemented with
+ vardbapi.lock(), which supports reentrance by the
+ subprocess that we spawn.
+ """
+ if "parallel-install" not in self.settings.features:
+ self.vartree.dbapi.lock()
+ self._locked_vdb = True
+
+ def _unlock_vdb(self):
+ """
+ Unlock the vdb if we hold a lock, otherwise do nothing.
+ """
+ if self._locked_vdb:
+ self.vartree.dbapi.unlock()
+ self._locked_vdb = False
+
+ def _elog_output_handler(self, fd, event):
+ output = None
+ if event & self.scheduler.IO_IN:
+ try:
+ output = os.read(fd, self._bufsize)
+ except OSError as e:
+ if e.errno not in (errno.EAGAIN, errno.EINTR):
+ raise
+ if output:
+ lines = _unicode_decode(output).split('\n')
+ if len(lines) == 1:
+ self._buf += lines[0]
+ else:
+ lines[0] = self._buf + lines[0]
+ self._buf = lines.pop()
+ out = io.StringIO()
+ for line in lines:
+ funcname, phase, key, msg = line.split(' ', 3)
+ self._elog_keys.add(key)
+ reporter = getattr(portage.elog.messages, funcname)
+ reporter(msg, phase=phase, key=key, out=out)
+
+ if event & self.scheduler.IO_HUP:
+ self.scheduler.source_remove(self._elog_reg_id)
+ self._elog_reg_id = None
+ os.close(self._elog_reader_fd)
+ self._elog_reader_fd = None
+ return False
+
+ return True
+
+ def _spawn(self, args, fd_pipes, **kwargs):
+ """
+ Fork a subprocess, apply local settings, and call
+ dblink.merge(). TODO: Share code with ForkProcess.
+ """
+
+ elog_reader_fd, elog_writer_fd = os.pipe()
+
+ fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
+ fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(elog_reader_fd, fcntl.F_SETFD,
+ fcntl.fcntl(elog_reader_fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ blockers = None
+ if self.blockers is not None:
+ # Query blockers in the main process, since closing
+ # of file descriptors in the subprocess can prevent
+ # access to open database connections such as that
+ # used by the sqlite metadata cache module.
+ blockers = self.blockers()
+ mylink = portage.dblink(self.mycat, self.mypkg, settings=self.settings,
+ treetype=self.treetype, vartree=self.vartree,
+ blockers=blockers, pipe=elog_writer_fd)
+ fd_pipes[elog_writer_fd] = elog_writer_fd
+ self._elog_reg_id = self.scheduler.io_add_watch(elog_reader_fd,
+ self._registered_events, self._elog_output_handler)
+
+ # If a concurrent emerge process tries to install a package
+ # in the same SLOT as this one at the same time, there is an
+ # extremely unlikely chance that the COUNTER values will not be
+ # ordered correctly unless we lock the vdb here.
+ # FEATURES=parallel-install skips this lock in order to
+ # improve performance, and the risk is practically negligible.
+ self._lock_vdb()
+ counter = None
+ if not self.unmerge:
+ counter = self.vartree.dbapi.counter_tick()
+
+ parent_pid = os.getpid()
+ pid = None
+ try:
+ pid = os.fork()
+
+ if pid != 0:
+ if not isinstance(pid, int):
+ raise AssertionError(
+ "fork returned non-integer: %s" % (repr(pid),))
+
+ os.close(elog_writer_fd)
+ self._elog_reader_fd = elog_reader_fd
+ self._buf = ""
+ self._elog_keys = set()
+ # Discard messages which will be collected by the subprocess,
+ # in order to avoid duplicates (bug #446136).
+ portage.elog.messages.collect_messages(key=mylink.mycpv)
+
+ # invalidate relevant vardbapi caches
+ if self.vartree.dbapi._categories is not None:
+ self.vartree.dbapi._categories = None
+ self.vartree.dbapi._pkgs_changed = True
+ self.vartree.dbapi._clear_pkg_cache(mylink)
+
+ return [pid]
+
+ os.close(elog_reader_fd)
+
+ # Use default signal handlers in order to avoid problems
+ # killing subprocesses as reported in bug #353239.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ portage.locks._close_fds()
+ # We don't exec, so use close_fds=False
+ # (see _setup_pipes docstring).
+ portage.process._setup_pipes(fd_pipes, close_fds=False)
+
+ portage.output.havecolor = self.settings.get('NOCOLOR') \
+ not in ('yes', 'true')
+
+ # Avoid wastful updates of the vdb cache.
+ self.vartree.dbapi._flush_cache_enabled = False
+
+ # In this subprocess we don't want PORTAGE_BACKGROUND to
+ # suppress stdout/stderr output since they are pipes. We
+ # also don't want to open PORTAGE_LOG_FILE, since it will
+ # already be opened by the parent process, so we set the
+ # "subprocess" value for use in conditional logging code
+ # involving PORTAGE_LOG_FILE.
+ if not self.unmerge:
+ # unmerge phases have separate logs
+ if self.settings.get("PORTAGE_BACKGROUND") == "1":
+ self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
+ else:
+ self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
+ self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
+ self.settings["PORTAGE_BACKGROUND"] = "subprocess"
+ self.settings.backup_changes("PORTAGE_BACKGROUND")
+
+ rval = 1
+ try:
+ if self.unmerge:
+ if not mylink.exists():
+ rval = os.EX_OK
+ elif mylink.unmerge(
+ ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
+ mylink.lockdb()
+ try:
+ mylink.delete()
+ finally:
+ mylink.unlockdb()
+ rval = os.EX_OK
+ else:
+ rval = mylink.merge(self.pkgloc, self.infloc,
+ myebuild=self.myebuild, mydbapi=self.mydbapi,
+ prev_mtimes=self.prev_mtimes, counter=counter)
+ except SystemExit:
+ raise
+ except:
+ traceback.print_exc()
+ # os._exit() skips stderr flush!
+ sys.stderr.flush()
+ finally:
+ os._exit(rval)
+
+ finally:
+ if pid == 0 or (pid is None and os.getpid() != parent_pid):
+ # Call os._exit() from a finally block in order
+ # to suppress any finally blocks from earlier
+ # in the call stack (see bug #345289). This
+ # finally block has to be setup before the fork
+ # in order to avoid a race condition.
+ os._exit(1)
+
+ def _unregister(self):
+ """
+ Unregister from the scheduler and close open files.
+ """
+
+ if not self.unmerge:
+ # Populate the vardbapi cache for the new package
+ # while its inodes are still hot.
+ try:
+ self.vartree.dbapi.aux_get(self.settings.mycpv, ["EAPI"])
+ except KeyError:
+ pass
+
+ self._unlock_vdb()
+ if self._elog_reg_id is not None:
+ self.scheduler.source_remove(self._elog_reg_id)
+ self._elog_reg_id = None
+ if self._elog_reader_fd is not None:
+ os.close(self._elog_reader_fd)
+ self._elog_reader_fd = None
+ if self._elog_keys is not None:
+ for key in self._elog_keys:
+ portage.elog.elog_process(key, self.settings,
+ phasefilter=("prerm", "postrm"))
+ self._elog_keys = None
+
+ super(MergeProcess, self)._unregister()
diff --git a/usr/lib/portage/pym/portage/dbapi/_SyncfsProcess.py b/usr/lib/portage/pym/portage/dbapi/_SyncfsProcess.py
new file mode 100644
index 0000000..7518214
--- /dev/null
+++ b/usr/lib/portage/pym/portage/dbapi/_SyncfsProcess.py
@@ -0,0 +1,53 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.util._ctypes import find_library, LoadLibrary
+from portage.util._async.ForkProcess import ForkProcess
+
+class SyncfsProcess(ForkProcess):
+ """
+ Isolate ctypes usage in a subprocess, in order to avoid
+ potential problems with stale cached libraries as
+ described in bug #448858, comment #14 (also see
+ http://bugs.python.org/issue14597).
+ """
+
+ __slots__ = ('paths',)
+
+ @staticmethod
+ def _get_syncfs():
+
+ filename = find_library("c")
+ if filename is not None:
+ library = LoadLibrary(filename)
+ if library is not None:
+ try:
+ return library.syncfs
+ except AttributeError:
+ pass
+
+ return None
+
+ def _run(self):
+
+ syncfs_failed = False
+ syncfs = self._get_syncfs()
+
+ if syncfs is not None:
+ for path in self.paths:
+ try:
+ fd = os.open(path, os.O_RDONLY)
+ except OSError:
+ pass
+ else:
+ try:
+ if syncfs(fd) != 0:
+ # Happens with PyPy (bug #446610)
+ syncfs_failed = True
+ finally:
+ os.close(fd)
+
+ if syncfs is None or syncfs_failed:
+ return 1
+ return os.EX_OK
diff --git a/usr/lib/portage/pym/portage/dbapi/__init__.py b/usr/lib/portage/pym/portage/dbapi/__init__.py
new file mode 100644
index 0000000..34dfaa7
--- /dev/null
+++ b/usr/lib/portage/pym/portage/dbapi/__init__.py
@@ -0,0 +1,387 @@
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ["dbapi"]
+
+import re
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dbapi.dep_expand:dep_expand@_dep_expand',
+ 'portage.dep:Atom,match_from_list,_match_slot',
+ 'portage.output:colorize',
+ 'portage.util:cmp_sort_key,writemsg',
+ 'portage.versions:catsplit,catpkgsplit,vercmp,_pkg_str',
+)
+
+from portage.const import MERGING_IDENTIFIER
+
+from portage import os
+from portage import auxdbkeys
+from portage.eapi import _get_eapi_attrs
+from portage.exception import InvalidData
+from portage.localization import _
+from _emerge.Package import Package
+
+class dbapi(object):
+ _category_re = re.compile(r'^\w[-.+\w]*$', re.UNICODE)
+ _categories = None
+ _use_mutable = False
+ _known_keys = frozenset(x for x in auxdbkeys
+ if not x.startswith("UNUSED_0"))
+ _pkg_str_aux_keys = ("EAPI", "KEYWORDS", "SLOT", "repository")
+
+ def __init__(self):
+ pass
+
+ @property
+ def categories(self):
+ """
+ Use self.cp_all() to generate a category list. Mutable instances
+ can delete the self._categories attribute in cases when the cached
+ categories become invalid and need to be regenerated.
+ """
+ if self._categories is not None:
+ return self._categories
+ self._categories = tuple(sorted(set(catsplit(x)[0] \
+ for x in self.cp_all())))
+ return self._categories
+
+ def close_caches(self):
+ pass
+
+ def cp_list(self, cp, use_cache=1):
+ raise NotImplementedError(self)
+
+ @staticmethod
+ def _cmp_cpv(cpv1, cpv2):
+ return vercmp(cpv1.version, cpv2.version)
+
+ @staticmethod
+ def _cpv_sort_ascending(cpv_list):
+ """
+ Use this to sort self.cp_list() results in ascending
+ order. It sorts in place and returns None.
+ """
+ if len(cpv_list) > 1:
+ # If the cpv includes explicit -r0, it has to be preserved
+ # for consistency in findname and aux_get calls, so use a
+ # dict to map strings back to their original values.
+ cpv_list.sort(key=cmp_sort_key(dbapi._cmp_cpv))
+
+ def cpv_all(self):
+ """Return all CPVs in the db
+ Args:
+ None
+ Returns:
+ A list of Strings, 1 per CPV
+
+ This function relies on a subclass implementing cp_all, this is why the hasattr is there
+ """
+
+ if not hasattr(self, "cp_all"):
+ raise NotImplementedError
+ cpv_list = []
+ for cp in self.cp_all():
+ cpv_list.extend(self.cp_list(cp))
+ return cpv_list
+
+ def cp_all(self):
+ """ Implement this in a child class
+ Args
+ None
+ Returns:
+ A list of strings 1 per CP in the datastore
+ """
+ return NotImplementedError
+
+ def aux_get(self, mycpv, mylist, myrepo=None):
+ """Return the metadata keys in mylist for mycpv
+ Args:
+ mycpv - "sys-apps/foo-1.0"
+ mylist - ["SLOT","DEPEND","HOMEPAGE"]
+ myrepo - The repository name.
+ Returns:
+ a list of results, in order of keys in mylist, such as:
+ ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
+ """
+ raise NotImplementedError
+
+ def aux_update(self, cpv, metadata_updates):
+ """
+ Args:
+ cpv - "sys-apps/foo-1.0"
+ metadata_updates = { key : newvalue }
+ Returns:
+ None
+ """
+ raise NotImplementedError
+
+ def match(self, origdep, use_cache=1):
+ """Given a dependency, try to find packages that match
+ Args:
+ origdep - Depend atom
+ use_cache - Boolean indicating if we should use the cache or not
+ NOTE: Do we ever not want the cache?
+ Returns:
+ a list of packages that match origdep
+ """
+ mydep = _dep_expand(origdep, mydb=self, settings=self.settings)
+ return list(self._iter_match(mydep,
+ self.cp_list(mydep.cp, use_cache=use_cache)))
+
+ def _iter_match(self, atom, cpv_iter):
+ cpv_iter = iter(match_from_list(atom, cpv_iter))
+ if atom.repo:
+ cpv_iter = self._iter_match_repo(atom, cpv_iter)
+ if atom.slot:
+ cpv_iter = self._iter_match_slot(atom, cpv_iter)
+ if atom.unevaluated_atom.use:
+ cpv_iter = self._iter_match_use(atom, cpv_iter)
+ return cpv_iter
+
+ def _pkg_str(self, cpv, repo):
+ """
+ This is used to contruct _pkg_str instances on-demand during
+ matching. If cpv is a _pkg_str instance with slot attribute,
+ then simply return it. Otherwise, fetch metadata and construct
+ a _pkg_str instance. This may raise KeyError or InvalidData.
+ """
+ try:
+ cpv.slot
+ except AttributeError:
+ pass
+ else:
+ return cpv
+
+ metadata = dict(zip(self._pkg_str_aux_keys,
+ self.aux_get(cpv, self._pkg_str_aux_keys, myrepo=repo)))
+
+ return _pkg_str(cpv, metadata=metadata, settings=self.settings)
+
+ def _iter_match_repo(self, atom, cpv_iter):
+ for cpv in cpv_iter:
+ try:
+ pkg_str = self._pkg_str(cpv, atom.repo)
+ except (KeyError, InvalidData):
+ pass
+ else:
+ if pkg_str.repo == atom.repo:
+ yield pkg_str
+
+ def _iter_match_slot(self, atom, cpv_iter):
+ for cpv in cpv_iter:
+ try:
+ pkg_str = self._pkg_str(cpv, atom.repo)
+ except (KeyError, InvalidData):
+ pass
+ else:
+ if _match_slot(atom, pkg_str):
+ yield pkg_str
+
+ def _iter_match_use(self, atom, cpv_iter):
+ """
+ 1) Check for required IUSE intersection (need implicit IUSE here).
+ 2) Check enabled/disabled flag states.
+ """
+
+ aux_keys = ["EAPI", "IUSE", "KEYWORDS", "SLOT", "USE", "repository"]
+ for cpv in cpv_iter:
+ try:
+ metadata = dict(zip(aux_keys,
+ self.aux_get(cpv, aux_keys, myrepo=atom.repo)))
+ except KeyError:
+ continue
+
+ try:
+ cpv.slot
+ except AttributeError:
+ try:
+ cpv = _pkg_str(cpv, metadata=metadata,
+ settings=self.settings)
+ except InvalidData:
+ continue
+
+ if not self._match_use(atom, cpv, metadata):
+ continue
+
+ yield cpv
+
+ def _match_use(self, atom, pkg, metadata):
+ eapi_attrs = _get_eapi_attrs(metadata["EAPI"])
+ if eapi_attrs.iuse_effective:
+ iuse_implicit_match = self.settings._iuse_effective_match
+ else:
+ iuse_implicit_match = self.settings._iuse_implicit_match
+ usealiases = self.settings._use_manager.getUseAliases(pkg)
+ iuse = Package._iuse(None, metadata["IUSE"].split(), iuse_implicit_match, usealiases, metadata["EAPI"])
+
+ for x in atom.unevaluated_atom.use.required:
+ if iuse.get_real_flag(x) is None:
+ return False
+
+ if atom.use is None:
+ pass
+
+ elif not self._use_mutable:
+ # Use IUSE to validate USE settings for built packages,
+ # in case the package manager that built this package
+ # failed to do that for some reason (or in case of
+ # data corruption). The enabled flags must be consistent
+ # with implicit IUSE, in order to avoid potential
+ # inconsistencies in USE dep matching (see bug #453400).
+ use = frozenset(x for x in metadata["USE"].split() if iuse.get_real_flag(x) is not None)
+ missing_enabled = frozenset(x for x in atom.use.missing_enabled if iuse.get_real_flag(x) is None)
+ missing_disabled = frozenset(x for x in atom.use.missing_disabled if iuse.get_real_flag(x) is None)
+ enabled = frozenset((iuse.get_real_flag(x) or x) for x in atom.use.enabled)
+ disabled = frozenset((iuse.get_real_flag(x) or x) for x in atom.use.disabled)
+
+ if enabled:
+ if any(x in enabled for x in missing_disabled):
+ return False
+ need_enabled = enabled.difference(use)
+ if need_enabled:
+ if any(x not in missing_enabled for x in need_enabled):
+ return False
+
+ if disabled:
+ if any(x in disabled for x in missing_enabled):
+ return False
+ need_disabled = disabled.intersection(use)
+ if need_disabled:
+ if any(x not in missing_disabled for x in need_disabled):
+ return False
+
+ elif not self.settings.local_config:
+ # Check masked and forced flags for repoman.
+ usemask = self.settings._getUseMask(pkg,
+ stable=self.settings._parent_stable)
+ if any(x in usemask for x in atom.use.enabled):
+ return False
+
+ useforce = self.settings._getUseForce(pkg,
+ stable=self.settings._parent_stable)
+ if any(x in useforce and x not in usemask
+ for x in atom.use.disabled):
+ return False
+
+ # Check unsatisfied use-default deps
+ if atom.use.enabled:
+ missing_disabled = frozenset(x for x in atom.use.missing_disabled if iuse.get_real_flag(x) is None)
+ if any(x in atom.use.enabled for x in missing_disabled):
+ return False
+ if atom.use.disabled:
+ missing_enabled = frozenset(x for x in atom.use.missing_enabled if iuse.get_real_flag(x) is None)
+ if any(x in atom.use.disabled for x in missing_enabled):
+ return False
+
+ return True
+
+ def invalidentry(self, mypath):
+ if "/" + MERGING_IDENTIFIER in mypath:
+ if os.path.exists(mypath):
+ writemsg(colorize("BAD", _("INCOMPLETE MERGE:"))+" %s\n" % mypath,
+ noiselevel=-1)
+ else:
+ writemsg("!!! Invalid db entry: %s\n" % mypath, noiselevel=-1)
+
+ def update_ents(self, updates, onProgress=None, onUpdate=None):
+ """
+ Update metadata of all packages for package moves.
+ @param updates: A list of move commands, or dict of {repo_name: list}
+ @type updates: list or dict
+ @param onProgress: A progress callback function
+ @type onProgress: a callable that takes 2 integer arguments: maxval and curval
+ @param onUpdate: A progress callback function called only
+ for packages that are modified by updates.
+ @type onUpdate: a callable that takes 2 integer arguments:
+ maxval and curval
+ """
+ cpv_all = self.cpv_all()
+ cpv_all.sort()
+ maxval = len(cpv_all)
+ aux_get = self.aux_get
+ aux_update = self.aux_update
+ update_keys = Package._dep_keys + ("PROVIDE",)
+ meta_keys = update_keys + self._pkg_str_aux_keys
+ repo_dict = None
+ if isinstance(updates, dict):
+ repo_dict = updates
+ if onUpdate:
+ onUpdate(maxval, 0)
+ if onProgress:
+ onProgress(maxval, 0)
+ for i, cpv in enumerate(cpv_all):
+ try:
+ metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
+ except KeyError:
+ continue
+ try:
+ pkg = _pkg_str(cpv, metadata=metadata, settings=self.settings)
+ except InvalidData:
+ continue
+ metadata = dict((k, metadata[k]) for k in update_keys)
+ if repo_dict is None:
+ updates_list = updates
+ else:
+ try:
+ updates_list = repo_dict[pkg.repo]
+ except KeyError:
+ try:
+ updates_list = repo_dict['DEFAULT']
+ except KeyError:
+ continue
+
+ if not updates_list:
+ continue
+
+ metadata_updates = \
+ portage.update_dbentries(updates_list, metadata, parent=pkg)
+ if metadata_updates:
+ aux_update(cpv, metadata_updates)
+ if onUpdate:
+ onUpdate(maxval, i+1)
+ if onProgress:
+ onProgress(maxval, i+1)
+
+ def move_slot_ent(self, mylist, repo_match=None):
+ """This function takes a sequence:
+ Args:
+ mylist: a sequence of (atom, originalslot, newslot)
+ repo_match: callable that takes single repo_name argument
+ and returns True if the update should be applied
+ Returns:
+ The number of slotmoves this function did
+ """
+ atom = mylist[1]
+ origslot = mylist[2]
+ newslot = mylist[3]
+
+ try:
+ atom.with_slot
+ except AttributeError:
+ atom = Atom(atom).with_slot(origslot)
+ else:
+ atom = atom.with_slot(origslot)
+
+ origmatches = self.match(atom)
+ moves = 0
+ if not origmatches:
+ return moves
+ for mycpv in origmatches:
+ try:
+ mycpv = self._pkg_str(mycpv, atom.repo)
+ except (KeyError, InvalidData):
+ continue
+ if repo_match is not None and not repo_match(mycpv.repo):
+ continue
+ moves += 1
+ if "/" not in newslot and \
+ mycpv.sub_slot and \
+ mycpv.sub_slot not in (mycpv.slot, newslot):
+ newslot = "%s/%s" % (newslot, mycpv.sub_slot)
+ mydata = {"SLOT": newslot+"\n"}
+ self.aux_update(mycpv, mydata)
+ return moves
diff --git a/usr/lib/portage/pym/portage/dbapi/_expand_new_virt.py b/usr/lib/portage/pym/portage/dbapi/_expand_new_virt.py
new file mode 100644
index 0000000..9aa603d
--- /dev/null
+++ b/usr/lib/portage/pym/portage/dbapi/_expand_new_virt.py
@@ -0,0 +1,81 @@
+# Copyright 2011-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import portage
+from portage.dep import Atom, _get_useflag_re
+from portage.eapi import _get_eapi_attrs
+
+def expand_new_virt(vardb, atom):
+ """
+ Iterate over the recursively expanded RDEPEND atoms of
+ a new-style virtual. If atom is not a new-style virtual
+ or it does not match an installed package then it is
+ yielded without any expansion.
+ """
+ if not isinstance(atom, Atom):
+ atom = Atom(atom)
+
+ if not atom.cp.startswith("virtual/"):
+ yield atom
+ return
+
+ traversed = set()
+ stack = [atom]
+
+ while stack:
+ atom = stack.pop()
+ if atom.blocker or \
+ not atom.cp.startswith("virtual/"):
+ yield atom
+ continue
+
+ matches = vardb.match(atom)
+ if not (matches and matches[-1].startswith("virtual/")):
+ yield atom
+ continue
+
+ virt_cpv = matches[-1]
+ if virt_cpv in traversed:
+ continue
+
+ traversed.add(virt_cpv)
+ eapi, iuse, rdepend, use = vardb.aux_get(virt_cpv,
+ ["EAPI", "IUSE", "RDEPEND", "USE"])
+ if not portage.eapi_is_supported(eapi):
+ yield atom
+ continue
+
+ eapi_attrs = _get_eapi_attrs(eapi)
+ # Validate IUSE and IUSE, for early detection of vardb corruption.
+ useflag_re = _get_useflag_re(eapi)
+ valid_iuse = []
+ for x in iuse.split():
+ if x[:1] in ("+", "-"):
+ x = x[1:]
+ if useflag_re.match(x) is not None:
+ valid_iuse.append(x)
+ valid_iuse = frozenset(valid_iuse)
+
+ if eapi_attrs.iuse_effective:
+ iuse_implicit_match = vardb.settings._iuse_effective_match
+ else:
+ iuse_implicit_match = vardb.settings._iuse_implicit_match
+
+ valid_use = []
+ for x in use.split():
+ if x in valid_iuse or iuse_implicit_match(x):
+ valid_use.append(x)
+ valid_use = frozenset(valid_use)
+
+ success, atoms = portage.dep_check(rdepend,
+ None, vardb.settings, myuse=valid_use,
+ myroot=vardb.settings['EROOT'],
+ trees={vardb.settings['EROOT']:{"porttree":vardb.vartree,
+ "vartree":vardb.vartree}})
+
+ if success:
+ stack.extend(atoms)
+ else:
+ yield atom
diff --git a/usr/lib/portage/pym/portage/dbapi/_similar_name_search.py b/usr/lib/portage/pym/portage/dbapi/_similar_name_search.py
new file mode 100644
index 0000000..b6e4a1f
--- /dev/null
+++ b/usr/lib/portage/pym/portage/dbapi/_similar_name_search.py
@@ -0,0 +1,57 @@
+# Copyright 2011-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import difflib
+
+from portage.versions import catsplit
+
+def similar_name_search(dbs, atom):
+
+ cp_lower = atom.cp.lower()
+ cat, pkg = catsplit(cp_lower)
+ if cat == "null":
+ cat = None
+
+ all_cp = set()
+ for db in dbs:
+ all_cp.update(db.cp_all())
+
+ # discard dir containing no ebuilds
+ all_cp.discard(atom.cp)
+
+ orig_cp_map = {}
+ for cp_orig in all_cp:
+ orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
+ all_cp = set(orig_cp_map)
+
+ if cat:
+ matches = difflib.get_close_matches(cp_lower, all_cp)
+ else:
+ pkg_to_cp = {}
+ for other_cp in list(all_cp):
+ other_pkg = catsplit(other_cp)[1]
+ if other_pkg == pkg:
+ # Check for non-identical package that
+ # differs only by upper/lower case.
+ identical = True
+ for cp_orig in orig_cp_map[other_cp]:
+ if catsplit(cp_orig)[1] != \
+ catsplit(atom.cp)[1]:
+ identical = False
+ break
+ if identical:
+ # discard dir containing no ebuilds
+ all_cp.discard(other_cp)
+ continue
+ pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
+
+ pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
+ matches = []
+ for pkg_match in pkg_matches:
+ matches.extend(pkg_to_cp[pkg_match])
+
+ matches_orig_case = []
+ for cp in matches:
+ matches_orig_case.extend(orig_cp_map[cp])
+
+ return matches_orig_case
diff --git a/usr/lib/portage/pym/portage/dbapi/bintree.py b/usr/lib/portage/pym/portage/dbapi/bintree.py
new file mode 100644
index 0000000..45e8614
--- /dev/null
+++ b/usr/lib/portage/pym/portage/dbapi/bintree.py
@@ -0,0 +1,1500 @@
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ["bindbapi", "binarytree"]
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.checksum:hashfunc_map,perform_multiple_checksums,' + \
+ 'verify_all,_apply_hash_filter,_hash_filter',
+ 'portage.dbapi.dep_expand:dep_expand',
+ 'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list',
+ 'portage.output:EOutput,colorize',
+ 'portage.locks:lockfile,unlockfile',
+ 'portage.package.ebuild.fetch:_check_distfile,_hide_url_passwd',
+ 'portage.update:update_dbentries',
+ 'portage.util:atomic_ofstream,ensure_dirs,normalize_path,' + \
+ 'writemsg,writemsg_stdout',
+ 'portage.util.listdir:listdir',
+ 'portage.util._urlopen:urlopen@_urlopen',
+ 'portage.versions:best,catpkgsplit,catsplit,_pkg_str',
+)
+
+from portage.cache.mappings import slot_dict_class
+from portage.const import CACHE_PATH
+from portage.dbapi.virtual import fakedbapi
+from portage.dep import Atom, use_reduce, paren_enclose
+from portage.exception import AlarmSignal, InvalidData, InvalidPackageName, \
+ ParseError, PermissionDenied, PortageException
+from portage.const import EAPI
+from portage.localization import _
+from portage import _movefile
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+
+import codecs
+import errno
+import io
+import stat
+import subprocess
+import sys
+import tempfile
+import textwrap
+import traceback
+import warnings
+from gzip import GzipFile
+from itertools import chain
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ _unicode = str
+ basestring = str
+ long = int
+else:
+ _unicode = unicode
+
+class UseCachedCopyOfRemoteIndex(Exception):
+ # If the local copy is recent enough
+ # then fetching the remote index can be skipped.
+ pass
+
+class bindbapi(fakedbapi):
+ _known_keys = frozenset(list(fakedbapi._known_keys) + \
+ ["CHOST", "repository", "USE"])
+ def __init__(self, mybintree=None, **kwargs):
+ fakedbapi.__init__(self, **kwargs)
+ self.bintree = mybintree
+ self.move_ent = mybintree.move_ent
+ self.cpvdict={}
+ self.cpdict={}
+ # Selectively cache metadata in order to optimize dep matching.
+ self._aux_cache_keys = set(
+ ["BUILD_TIME", "CHOST", "DEPEND", "EAPI",
+ "HDEPEND", "IUSE", "KEYWORDS",
+ "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE",
+ "RDEPEND", "repository", "RESTRICT", "SLOT", "USE", "DEFINED_PHASES",
+ "EPREFIX"
+ ])
+ self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
+ self._aux_cache = {}
+
+ def match(self, *pargs, **kwargs):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.match(self, *pargs, **kwargs)
+
+ def cpv_exists(self, cpv, myrepo=None):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cpv_exists(self, cpv)
+
+ def cpv_inject(self, cpv, **kwargs):
+ self._aux_cache.pop(cpv, None)
+ fakedbapi.cpv_inject(self, cpv, **kwargs)
+
+ def cpv_remove(self, cpv):
+ self._aux_cache.pop(cpv, None)
+ fakedbapi.cpv_remove(self, cpv)
+
+ def aux_get(self, mycpv, wants, myrepo=None):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ cache_me = False
+ if not self._known_keys.intersection(
+ wants).difference(self._aux_cache_keys):
+ aux_cache = self._aux_cache.get(mycpv)
+ if aux_cache is not None:
+ return [aux_cache.get(x, "") for x in wants]
+ cache_me = True
+ mysplit = mycpv.split("/")
+ mylist = []
+ tbz2name = mysplit[1]+".tbz2"
+ if not self.bintree._remotepkgs or \
+ not self.bintree.isremote(mycpv):
+ tbz2_path = self.bintree.getname(mycpv)
+ if not os.path.exists(tbz2_path):
+ raise KeyError(mycpv)
+ metadata_bytes = portage.xpak.tbz2(tbz2_path).get_data()
+ def getitem(k):
+ v = metadata_bytes.get(_unicode_encode(k,
+ encoding=_encodings['repo.content'],
+ errors='backslashreplace'))
+ if v is not None:
+ v = _unicode_decode(v,
+ encoding=_encodings['repo.content'], errors='replace')
+ return v
+ else:
+ getitem = self.bintree._remotepkgs[mycpv].get
+ mydata = {}
+ mykeys = wants
+ if cache_me:
+ mykeys = self._aux_cache_keys.union(wants)
+ for x in mykeys:
+ myval = getitem(x)
+ # myval is None if the key doesn't exist
+ # or the tbz2 is corrupt.
+ if myval:
+ mydata[x] = " ".join(myval.split())
+
+ if not mydata.setdefault('EAPI', '0'):
+ mydata['EAPI'] = '0'
+
+ if cache_me:
+ aux_cache = self._aux_cache_slot_dict()
+ for x in self._aux_cache_keys:
+ aux_cache[x] = mydata.get(x, '')
+ self._aux_cache[mycpv] = aux_cache
+ return [mydata.get(x, '') for x in wants]
+
+ def aux_update(self, cpv, values):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ tbz2path = self.bintree.getname(cpv)
+ if not os.path.exists(tbz2path):
+ raise KeyError(cpv)
+ mytbz2 = portage.xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+
+ for k, v in values.items():
+ k = _unicode_encode(k,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ v = _unicode_encode(v,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ mydata[k] = v
+
+ for k, v in list(mydata.items()):
+ if not v:
+ del mydata[k]
+ mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
+ # inject will clear stale caches via cpv_inject.
+ self.bintree.inject(cpv)
+
+ def cp_list(self, *pargs, **kwargs):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cp_list(self, *pargs, **kwargs)
+
+ def cp_all(self):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cp_all(self)
+
+ def cpv_all(self):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cpv_all(self)
+
+ def getfetchsizes(self, pkg):
+ """
+ This will raise MissingSignature if SIZE signature is not available,
+ or InvalidSignature if SIZE signature is invalid.
+ """
+
+ if not self.bintree.populated:
+ self.bintree.populate()
+
+ pkg = getattr(pkg, 'cpv', pkg)
+
+ filesdict = {}
+ if not self.bintree.isremote(pkg):
+ pass
+ else:
+ metadata = self.bintree._remotepkgs[pkg]
+ try:
+ size = int(metadata["SIZE"])
+ except KeyError:
+ raise portage.exception.MissingSignature("SIZE")
+ except ValueError:
+ raise portage.exception.InvalidSignature(
+ "SIZE: %s" % metadata["SIZE"])
+ else:
+ filesdict[os.path.basename(self.bintree.getname(pkg))] = size
+
+ return filesdict
+
+def _pkgindex_cpv_map_latest_build(pkgindex):
+ """
+ Given a PackageIndex instance, create a dict of cpv -> metadata map.
+ If multiple packages have identical CPV values, prefer the package
+ with latest BUILD_TIME value.
+ @param pkgindex: A PackageIndex instance.
+ @type pkgindex: PackageIndex
+ @rtype: dict
+ @return: a dict containing entry for the give cpv.
+ """
+ cpv_map = {}
+
+ for d in pkgindex.packages:
+ cpv = d["CPV"]
+
+ try:
+ cpv = _pkg_str(cpv)
+ except InvalidData:
+ writemsg(_("!!! Invalid remote binary package: %s\n") % cpv,
+ noiselevel=-1)
+ continue
+
+ btime = d.get('BUILD_TIME', '')
+ try:
+ btime = int(btime)
+ except ValueError:
+ btime = None
+
+ other_d = cpv_map.get(cpv)
+ if other_d is not None:
+ other_btime = other_d.get('BUILD_TIME', '')
+ try:
+ other_btime = int(other_btime)
+ except ValueError:
+ other_btime = None
+ if other_btime and (not btime or other_btime > btime):
+ continue
+
+ cpv_map[_pkg_str(cpv)] = d
+
+ return cpv_map
+
+class binarytree(object):
+ "this tree scans for a list of all packages available in PKGDIR"
+ def __init__(self, _unused=DeprecationWarning, pkgdir=None,
+ virtual=DeprecationWarning, settings=None):
+
+ if pkgdir is None:
+ raise TypeError("pkgdir parameter is required")
+
+ if settings is None:
+ raise TypeError("settings parameter is required")
+
+ if _unused is not DeprecationWarning:
+ warnings.warn("The first parameter of the "
+ "portage.dbapi.bintree.binarytree"
+ " constructor is now unused. Instead "
+ "settings['ROOT'] is used.",
+ DeprecationWarning, stacklevel=2)
+
+ if virtual is not DeprecationWarning:
+ warnings.warn("The 'virtual' parameter of the "
+ "portage.dbapi.bintree.binarytree"
+ " constructor is unused",
+ DeprecationWarning, stacklevel=2)
+
+ if True:
+ self.pkgdir = normalize_path(pkgdir)
+ self.dbapi = bindbapi(self, settings=settings)
+ self.update_ents = self.dbapi.update_ents
+ self.move_slot_ent = self.dbapi.move_slot_ent
+ self.populated = 0
+ self.tree = {}
+ self._remote_has_index = False
+ self._remotepkgs = None # remote metadata indexed by cpv
+ self.invalids = []
+ self.settings = settings
+ self._pkg_paths = {}
+ self._pkgindex_uri = {}
+ self._populating = False
+ self._all_directory = os.path.isdir(
+ os.path.join(self.pkgdir, "All"))
+ self._pkgindex_version = 0
+ self._pkgindex_hashes = ["MD5","SHA1"]
+ self._pkgindex_file = os.path.join(self.pkgdir, "Packages")
+ self._pkgindex_keys = self.dbapi._aux_cache_keys.copy()
+ self._pkgindex_keys.update(["CPV", "MTIME", "SIZE"])
+ self._pkgindex_aux_keys = \
+ ["BUILD_TIME", "CHOST", "DEPEND", "DESCRIPTION", "EAPI",
+ "HDEPEND", "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES",
+ "PROVIDE", "RESTRICT", "RDEPEND", "repository", "SLOT", "USE", "DEFINED_PHASES",
+ "BASE_URI", "EPREFIX"]
+ self._pkgindex_aux_keys = list(self._pkgindex_aux_keys)
+ self._pkgindex_use_evaluated_keys = \
+ ("DEPEND", "HDEPEND", "LICENSE", "RDEPEND",
+ "PDEPEND", "PROPERTIES", "PROVIDE", "RESTRICT")
+ self._pkgindex_header_keys = set([
+ "ACCEPT_KEYWORDS", "ACCEPT_LICENSE",
+ "ACCEPT_PROPERTIES", "ACCEPT_RESTRICT", "CBUILD",
+ "CONFIG_PROTECT", "CONFIG_PROTECT_MASK", "FEATURES",
+ "GENTOO_MIRRORS", "INSTALL_MASK", "IUSE_IMPLICIT", "USE",
+ "USE_EXPAND", "USE_EXPAND_HIDDEN", "USE_EXPAND_IMPLICIT",
+ "USE_EXPAND_UNPREFIXED",
+ "EPREFIX"])
+ self._pkgindex_default_pkg_data = {
+ "BUILD_TIME" : "",
+ "DEFINED_PHASES" : "",
+ "DEPEND" : "",
+ "EAPI" : "0",
+ "HDEPEND" : "",
+ "IUSE" : "",
+ "KEYWORDS": "",
+ "LICENSE" : "",
+ "PATH" : "",
+ "PDEPEND" : "",
+ "PROPERTIES" : "",
+ "PROVIDE" : "",
+ "RDEPEND" : "",
+ "RESTRICT": "",
+ "SLOT" : "0",
+ "USE" : "",
+ }
+ self._pkgindex_inherited_keys = ["CHOST", "repository", "EPREFIX"]
+
+ # Populate the header with appropriate defaults.
+ self._pkgindex_default_header_data = {
+ "CHOST" : self.settings.get("CHOST", ""),
+ "repository" : "",
+ }
+
+ # It is especially important to populate keys like
+ # "repository" that save space when entries can
+ # inherit them from the header. If an existing
+ # pkgindex header already defines these keys, then
+ # they will appropriately override our defaults.
+ main_repo = self.settings.repositories.mainRepo()
+ if main_repo is not None and not main_repo.missing_repo_name:
+ self._pkgindex_default_header_data["repository"] = \
+ main_repo.name
+
+ self._pkgindex_translated_keys = (
+ ("DESCRIPTION" , "DESC"),
+ ("repository" , "REPO"),
+ )
+
+ self._pkgindex_allowed_pkg_keys = set(chain(
+ self._pkgindex_keys,
+ self._pkgindex_aux_keys,
+ self._pkgindex_hashes,
+ self._pkgindex_default_pkg_data,
+ self._pkgindex_inherited_keys,
+ chain(*self._pkgindex_translated_keys)
+ ))
+
+ @property
+ def root(self):
+ warnings.warn("The root attribute of "
+ "portage.dbapi.bintree.binarytree"
+ " is deprecated. Use "
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=3)
+ return self.settings['ROOT']
+
+ def move_ent(self, mylist, repo_match=None):
+ if not self.populated:
+ self.populate()
+ origcp = mylist[1]
+ newcp = mylist[2]
+ # sanity check
+ for atom in (origcp, newcp):
+ if not isjustname(atom):
+ raise InvalidPackageName(str(atom))
+ mynewcat = catsplit(newcp)[0]
+ origmatches=self.dbapi.cp_list(origcp)
+ moves = 0
+ if not origmatches:
+ return moves
+ for mycpv in origmatches:
+ try:
+ mycpv = self.dbapi._pkg_str(mycpv, None)
+ except (KeyError, InvalidData):
+ continue
+ mycpv_cp = portage.cpv_getkey(mycpv)
+ if mycpv_cp != origcp:
+ # Ignore PROVIDE virtual match.
+ continue
+ if repo_match is not None \
+ and not repo_match(mycpv.repo):
+ continue
+
+ # Use isvalidatom() to check if this move is valid for the
+ # EAPI (characters allowed in package names may vary).
+ if not isvalidatom(newcp, eapi=mycpv.eapi):
+ continue
+
+ mynewcpv = mycpv.replace(mycpv_cp, _unicode(newcp), 1)
+ myoldpkg = catsplit(mycpv)[1]
+ mynewpkg = catsplit(mynewcpv)[1]
+
+ if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
+ writemsg(_("!!! Cannot update binary: Destination exists.\n"),
+ noiselevel=-1)
+ writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
+ continue
+
+ tbz2path = self.getname(mycpv)
+ if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+ writemsg(_("!!! Cannot update readonly binary: %s\n") % mycpv,
+ noiselevel=-1)
+ continue
+
+ moves += 1
+ mytbz2 = portage.xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+ updated_items = update_dbentries([mylist], mydata, parent=mycpv)
+ mydata.update(updated_items)
+ mydata[b'PF'] = \
+ _unicode_encode(mynewpkg + "\n",
+ encoding=_encodings['repo.content'])
+ mydata[b'CATEGORY'] = \
+ _unicode_encode(mynewcat + "\n",
+ encoding=_encodings['repo.content'])
+ if mynewpkg != myoldpkg:
+ ebuild_data = mydata.pop(_unicode_encode(myoldpkg + '.ebuild',
+ encoding=_encodings['repo.content']), None)
+ if ebuild_data is not None:
+ mydata[_unicode_encode(mynewpkg + '.ebuild',
+ encoding=_encodings['repo.content'])] = ebuild_data
+
+ mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
+
+ self.dbapi.cpv_remove(mycpv)
+ del self._pkg_paths[mycpv]
+ new_path = self.getname(mynewcpv)
+ self._pkg_paths[mynewcpv] = os.path.join(
+ *new_path.split(os.path.sep)[-2:])
+ if new_path != mytbz2:
+ self._ensure_dir(os.path.dirname(new_path))
+ _movefile(tbz2path, new_path, mysettings=self.settings)
+ self._remove_symlink(mycpv)
+ if new_path.split(os.path.sep)[-2] == "All":
+ self._create_symlink(mynewcpv)
+ self.inject(mynewcpv)
+
+ return moves
+
+ def _remove_symlink(self, cpv):
+ """Remove a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink and also remove
+ the ${PKGDIR}/${CATEGORY} directory if empty. The file will not be
+ removed if os.path.islink() returns False."""
+ mycat, mypkg = catsplit(cpv)
+ mylink = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
+ if os.path.islink(mylink):
+ """Only remove it if it's really a link so that this method never
+ removes a real package that was placed here to avoid a collision."""
+ os.unlink(mylink)
+ try:
+ os.rmdir(os.path.join(self.pkgdir, mycat))
+ except OSError as e:
+ if e.errno not in (errno.ENOENT,
+ errno.ENOTEMPTY, errno.EEXIST):
+ raise
+ del e
+
+ def _create_symlink(self, cpv):
+ """Create a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink (and
+ ${PKGDIR}/${CATEGORY} directory, if necessary). Any file that may
+ exist in the location of the symlink will first be removed."""
+ mycat, mypkg = catsplit(cpv)
+ full_path = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
+ self._ensure_dir(os.path.dirname(full_path))
+ try:
+ os.unlink(full_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ os.symlink(os.path.join("..", "All", mypkg + ".tbz2"), full_path)
+
+ def prevent_collision(self, cpv):
+ """Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
+ use for a given cpv. If a collision will occur with an existing
+ package from another category, the existing package will be bumped to
+ ${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
+ if not self._all_directory:
+ return
+
+ # Copy group permissions for new directories that
+ # may have been created.
+ for path in ("All", catsplit(cpv)[0]):
+ path = os.path.join(self.pkgdir, path)
+ self._ensure_dir(path)
+ if not os.access(path, os.W_OK):
+ raise PermissionDenied("access('%s', W_OK)" % path)
+
+ full_path = self.getname(cpv)
+ if "All" == full_path.split(os.path.sep)[-2]:
+ return
+ """Move a colliding package if it exists. Code below this point only
+ executes in rare cases."""
+ mycat, mypkg = catsplit(cpv)
+ myfile = mypkg + ".tbz2"
+ mypath = os.path.join("All", myfile)
+ dest_path = os.path.join(self.pkgdir, mypath)
+
+ try:
+ st = os.lstat(dest_path)
+ except OSError:
+ st = None
+ else:
+ if stat.S_ISLNK(st.st_mode):
+ st = None
+ try:
+ os.unlink(dest_path)
+ except OSError:
+ if os.path.exists(dest_path):
+ raise
+
+ if st is not None:
+ # For invalid packages, other_cat could be None.
+ other_cat = portage.xpak.tbz2(dest_path).getfile(b"CATEGORY")
+ if other_cat:
+ other_cat = _unicode_decode(other_cat,
+ encoding=_encodings['repo.content'], errors='replace')
+ other_cat = other_cat.strip()
+ other_cpv = other_cat + "/" + mypkg
+ self._move_from_all(other_cpv)
+ self.inject(other_cpv)
+ self._move_to_all(cpv)
+
+ def _ensure_dir(self, path):
+ """
+ Create the specified directory. Also, copy gid and group mode
+ bits from self.pkgdir if possible.
+ @param cat_dir: Absolute path of the directory to be created.
+ @type cat_dir: String
+ """
+ try:
+ pkgdir_st = os.stat(self.pkgdir)
+ except OSError:
+ ensure_dirs(path)
+ return
+ pkgdir_gid = pkgdir_st.st_gid
+ pkgdir_grp_mode = 0o2070 & pkgdir_st.st_mode
+ try:
+ ensure_dirs(path, gid=pkgdir_gid, mode=pkgdir_grp_mode, mask=0)
+ except PortageException:
+ if not os.path.isdir(path):
+ raise
+
+ def _file_permissions(self, path):
+ try:
+ pkgdir_st = os.stat(self.pkgdir)
+ except OSError:
+ pass
+ else:
+ pkgdir_gid = pkgdir_st.st_gid
+ pkgdir_grp_mode = 0o0060 & pkgdir_st.st_mode
+ try:
+ portage.util.apply_permissions(path, gid=pkgdir_gid,
+ mode=pkgdir_grp_mode, mask=0)
+ except PortageException:
+ pass
+
+ def _move_to_all(self, cpv):
+ """If the file exists, move it. Whether or not it exists, update state
+ for future getname() calls."""
+ mycat, mypkg = catsplit(cpv)
+ myfile = mypkg + ".tbz2"
+ self._pkg_paths[cpv] = os.path.join("All", myfile)
+ src_path = os.path.join(self.pkgdir, mycat, myfile)
+ try:
+ mystat = os.lstat(src_path)
+ except OSError as e:
+ mystat = None
+ if mystat and stat.S_ISREG(mystat.st_mode):
+ self._ensure_dir(os.path.join(self.pkgdir, "All"))
+ dest_path = os.path.join(self.pkgdir, "All", myfile)
+ _movefile(src_path, dest_path, mysettings=self.settings)
+ self._create_symlink(cpv)
+ self.inject(cpv)
+
+ def _move_from_all(self, cpv):
+ """Move a package from ${PKGDIR}/All/${PF}.tbz2 to
+ ${PKGDIR}/${CATEGORY}/${PF}.tbz2 and update state from getname calls."""
+ self._remove_symlink(cpv)
+ mycat, mypkg = catsplit(cpv)
+ myfile = mypkg + ".tbz2"
+ mypath = os.path.join(mycat, myfile)
+ dest_path = os.path.join(self.pkgdir, mypath)
+ self._ensure_dir(os.path.dirname(dest_path))
+ src_path = os.path.join(self.pkgdir, "All", myfile)
+ _movefile(src_path, dest_path, mysettings=self.settings)
+ self._pkg_paths[cpv] = mypath
+
+ def populate(self, getbinpkgs=0):
+ "populates the binarytree"
+
+ if self._populating:
+ return
+
+ pkgindex_lock = None
+ try:
+ if os.access(self.pkgdir, os.W_OK):
+ pkgindex_lock = lockfile(self._pkgindex_file,
+ wantnewlockfile=1)
+ self._populating = True
+ self._populate(getbinpkgs)
+ finally:
+ if pkgindex_lock:
+ unlockfile(pkgindex_lock)
+ self._populating = False
+
+ def _populate(self, getbinpkgs=0):
+ if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
+ return 0
+
+ # Clear all caches in case populate is called multiple times
+ # as may be the case when _global_updates calls populate()
+ # prior to performing package moves since it only wants to
+ # operate on local packages (getbinpkgs=0).
+ self._remotepkgs = None
+ self.dbapi._clear_cache()
+ self.dbapi._aux_cache.clear()
+ if True:
+ pkg_paths = {}
+ self._pkg_paths = pkg_paths
+ dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
+ if "All" in dirs:
+ dirs.remove("All")
+ dirs.sort()
+ dirs.insert(0, "All")
+ pkgindex = self._load_pkgindex()
+ pf_index = None
+ if not self._pkgindex_version_supported(pkgindex):
+ pkgindex = self._new_pkgindex()
+ header = pkgindex.header
+ metadata = {}
+ for d in pkgindex.packages:
+ metadata[d["CPV"]] = d
+ update_pkgindex = False
+ for mydir in dirs:
+ for myfile in listdir(os.path.join(self.pkgdir, mydir)):
+ if not myfile.endswith(".tbz2"):
+ continue
+ mypath = os.path.join(mydir, myfile)
+ full_path = os.path.join(self.pkgdir, mypath)
+ s = os.lstat(full_path)
+ if stat.S_ISLNK(s.st_mode):
+ continue
+
+ # Validate data from the package index and try to avoid
+ # reading the xpak if possible.
+ if mydir != "All":
+ possibilities = None
+ d = metadata.get(mydir+"/"+myfile[:-5])
+ if d:
+ possibilities = [d]
+ else:
+ if pf_index is None:
+ pf_index = {}
+ for mycpv in metadata:
+ mycat, mypf = catsplit(mycpv)
+ pf_index.setdefault(
+ mypf, []).append(metadata[mycpv])
+ possibilities = pf_index.get(myfile[:-5])
+ if possibilities:
+ match = None
+ for d in possibilities:
+ try:
+ if long(d["MTIME"]) != s[stat.ST_MTIME]:
+ continue
+ except (KeyError, ValueError):
+ continue
+ try:
+ if long(d["SIZE"]) != long(s.st_size):
+ continue
+ except (KeyError, ValueError):
+ continue
+ if not self._pkgindex_keys.difference(d):
+ match = d
+ break
+ if match:
+ mycpv = match["CPV"]
+ if mycpv in pkg_paths:
+ # discard duplicates (All/ is preferred)
+ continue
+ mycpv = _pkg_str(mycpv)
+ pkg_paths[mycpv] = mypath
+ # update the path if the package has been moved
+ oldpath = d.get("PATH")
+ if oldpath and oldpath != mypath:
+ update_pkgindex = True
+ if mypath != mycpv + ".tbz2":
+ d["PATH"] = mypath
+ if not oldpath:
+ update_pkgindex = True
+ else:
+ d.pop("PATH", None)
+ if oldpath:
+ update_pkgindex = True
+ self.dbapi.cpv_inject(mycpv)
+ if not self.dbapi._aux_cache_keys.difference(d):
+ aux_cache = self.dbapi._aux_cache_slot_dict()
+ for k in self.dbapi._aux_cache_keys:
+ aux_cache[k] = d[k]
+ self.dbapi._aux_cache[mycpv] = aux_cache
+ continue
+ if not os.access(full_path, os.R_OK):
+ writemsg(_("!!! Permission denied to read " \
+ "binary package: '%s'\n") % full_path,
+ noiselevel=-1)
+ self.invalids.append(myfile[:-5])
+ continue
+ metadata_bytes = portage.xpak.tbz2(full_path).get_data()
+ mycat = _unicode_decode(metadata_bytes.get(b"CATEGORY", ""),
+ encoding=_encodings['repo.content'], errors='replace')
+ mypf = _unicode_decode(metadata_bytes.get(b"PF", ""),
+ encoding=_encodings['repo.content'], errors='replace')
+ slot = _unicode_decode(metadata_bytes.get(b"SLOT", ""),
+ encoding=_encodings['repo.content'], errors='replace')
+ mypkg = myfile[:-5]
+ if not mycat or not mypf or not slot:
+ #old-style or corrupt package
+ writemsg(_("\n!!! Invalid binary package: '%s'\n") % full_path,
+ noiselevel=-1)
+ missing_keys = []
+ if not mycat:
+ missing_keys.append("CATEGORY")
+ if not mypf:
+ missing_keys.append("PF")
+ if not slot:
+ missing_keys.append("SLOT")
+ msg = []
+ if missing_keys:
+ missing_keys.sort()
+ msg.append(_("Missing metadata key(s): %s.") % \
+ ", ".join(missing_keys))
+ msg.append(_(" This binary package is not " \
+ "recoverable and should be deleted."))
+ for line in textwrap.wrap("".join(msg), 72):
+ writemsg("!!! %s\n" % line, noiselevel=-1)
+ self.invalids.append(mypkg)
+ continue
+ mycat = mycat.strip()
+ slot = slot.strip()
+ if mycat != mydir and mydir != "All":
+ continue
+ if mypkg != mypf.strip():
+ continue
+ mycpv = mycat + "/" + mypkg
+ if mycpv in pkg_paths:
+ # All is first, so it's preferred.
+ continue
+ if not self.dbapi._category_re.match(mycat):
+ writemsg(_("!!! Binary package has an " \
+ "unrecognized category: '%s'\n") % full_path,
+ noiselevel=-1)
+ writemsg(_("!!! '%s' has a category that is not" \
+ " listed in %setc/portage/categories\n") % \
+ (mycpv, self.settings["PORTAGE_CONFIGROOT"]),
+ noiselevel=-1)
+ continue
+ mycpv = _pkg_str(mycpv)
+ pkg_paths[mycpv] = mypath
+ self.dbapi.cpv_inject(mycpv)
+ update_pkgindex = True
+ d = metadata.get(mycpv, {})
+ if d:
+ try:
+ if long(d["MTIME"]) != s[stat.ST_MTIME]:
+ d.clear()
+ except (KeyError, ValueError):
+ d.clear()
+ if d:
+ try:
+ if long(d["SIZE"]) != long(s.st_size):
+ d.clear()
+ except (KeyError, ValueError):
+ d.clear()
+
+ d["CPV"] = mycpv
+ d["SLOT"] = slot
+ d["MTIME"] = str(s[stat.ST_MTIME])
+ d["SIZE"] = str(s.st_size)
+
+ d.update(zip(self._pkgindex_aux_keys,
+ self.dbapi.aux_get(mycpv, self._pkgindex_aux_keys)))
+ try:
+ self._eval_use_flags(mycpv, d)
+ except portage.exception.InvalidDependString:
+ writemsg(_("!!! Invalid binary package: '%s'\n") % \
+ self.getname(mycpv), noiselevel=-1)
+ self.dbapi.cpv_remove(mycpv)
+ del pkg_paths[mycpv]
+
+ # record location if it's non-default
+ if mypath != mycpv + ".tbz2":
+ d["PATH"] = mypath
+ else:
+ d.pop("PATH", None)
+ metadata[mycpv] = d
+ if not self.dbapi._aux_cache_keys.difference(d):
+ aux_cache = self.dbapi._aux_cache_slot_dict()
+ for k in self.dbapi._aux_cache_keys:
+ aux_cache[k] = d[k]
+ self.dbapi._aux_cache[mycpv] = aux_cache
+
+ for cpv in list(metadata):
+ if cpv not in pkg_paths:
+ del metadata[cpv]
+
+ # Do not bother to write the Packages index if $PKGDIR/All/ exists
+ # since it will provide no benefit due to the need to read CATEGORY
+ # from xpak.
+ if update_pkgindex and os.access(self.pkgdir, os.W_OK):
+ del pkgindex.packages[:]
+ pkgindex.packages.extend(iter(metadata.values()))
+ self._update_pkgindex_header(pkgindex.header)
+ self._pkgindex_write(pkgindex)
+
+ if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
+ writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
+ noiselevel=-1)
+
+ if not getbinpkgs or 'PORTAGE_BINHOST' not in self.settings:
+ self.populated=1
+ return
+ self._remotepkgs = {}
+ for base_url in self.settings["PORTAGE_BINHOST"].split():
+ parsed_url = urlparse(base_url)
+ host = parsed_url.netloc
+ port = parsed_url.port
+ user = None
+ passwd = None
+ user_passwd = ""
+ if "@" in host:
+ user, host = host.split("@", 1)
+ user_passwd = user + "@"
+ if ":" in user:
+ user, passwd = user.split(":", 1)
+ port_args = []
+ if port is not None:
+ port_str = ":%s" % (port,)
+ if host.endswith(port_str):
+ host = host[:-len(port_str)]
+ pkgindex_file = os.path.join(self.settings["EROOT"], CACHE_PATH, "binhost",
+ host, parsed_url.path.lstrip("/"), "Packages")
+ pkgindex = self._new_pkgindex()
+ try:
+ f = io.open(_unicode_encode(pkgindex_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ try:
+ pkgindex.read(f)
+ finally:
+ f.close()
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ local_timestamp = pkgindex.header.get("TIMESTAMP", None)
+ remote_timestamp = None
+ rmt_idx = self._new_pkgindex()
+ proc = None
+ tmp_filename = None
+ try:
+ # urlparse.urljoin() only works correctly with recognized
+ # protocols and requires the base url to have a trailing
+ # slash, so join manually...
+ url = base_url.rstrip("/") + "/Packages"
+ f = None
+
+ # Don't use urlopen for https, since it doesn't support
+ # certificate/hostname verification (bug #469888).
+ if parsed_url.scheme not in ('https',):
+ try:
+ f = _urlopen(url, if_modified_since=local_timestamp)
+ if hasattr(f, 'headers') and f.headers.get('timestamp', ''):
+ remote_timestamp = f.headers.get('timestamp')
+ except IOError as err:
+ if hasattr(err, 'code') and err.code == 304: # not modified (since local_timestamp)
+ raise UseCachedCopyOfRemoteIndex()
+
+ if parsed_url.scheme in ('ftp', 'http', 'https'):
+ # This protocol is supposedly supported by urlopen,
+ # so apparently there's a problem with the url
+ # or a bug in urlopen.
+ if self.settings.get("PORTAGE_DEBUG", "0") != "0":
+ traceback.print_exc()
+
+ raise
+ except ValueError:
+ raise ParseError("Invalid Portage BINHOST value '%s'"
+ % url.lstrip())
+
+ if f is None:
+
+ path = parsed_url.path.rstrip("/") + "/Packages"
+
+ if parsed_url.scheme == 'ssh':
+ # Use a pipe so that we can terminate the download
+ # early if we detect that the TIMESTAMP header
+ # matches that of the cached Packages file.
+ ssh_args = ['ssh']
+ if port is not None:
+ ssh_args.append("-p%s" % (port,))
+ # NOTE: shlex evaluates embedded quotes
+ ssh_args.extend(portage.util.shlex_split(
+ self.settings.get("PORTAGE_SSH_OPTS", "")))
+ ssh_args.append(user_passwd + host)
+ ssh_args.append('--')
+ ssh_args.append('cat')
+ ssh_args.append(path)
+
+ proc = subprocess.Popen(ssh_args,
+ stdout=subprocess.PIPE)
+ f = proc.stdout
+ else:
+ setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
+ fcmd = self.settings.get(setting)
+ if not fcmd:
+ fcmd = self.settings.get('FETCHCOMMAND')
+ if not fcmd:
+ raise EnvironmentError("FETCHCOMMAND is unset")
+
+ fd, tmp_filename = tempfile.mkstemp()
+ tmp_dirname, tmp_basename = os.path.split(tmp_filename)
+ os.close(fd)
+
+ fcmd_vars = {
+ "DISTDIR": tmp_dirname,
+ "FILE": tmp_basename,
+ "URI": url
+ }
+
+ for k in ("PORTAGE_SSH_OPTS",):
+ try:
+ fcmd_vars[k] = self.settings[k]
+ except KeyError:
+ pass
+
+ success = portage.getbinpkg.file_get(
+ fcmd=fcmd, fcmd_vars=fcmd_vars)
+ if not success:
+ raise EnvironmentError("%s failed" % (setting,))
+ f = open(tmp_filename, 'rb')
+
+ f_dec = codecs.iterdecode(f,
+ _encodings['repo.content'], errors='replace')
+ try:
+ rmt_idx.readHeader(f_dec)
+ if not remote_timestamp: # in case it had not been read from HTTP header
+ remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
+ if not remote_timestamp:
+ # no timestamp in the header, something's wrong
+ pkgindex = None
+ writemsg(_("\n\n!!! Binhost package index " \
+ " has no TIMESTAMP field.\n"), noiselevel=-1)
+ else:
+ if not self._pkgindex_version_supported(rmt_idx):
+ writemsg(_("\n\n!!! Binhost package index version" \
+ " is not supported: '%s'\n") % \
+ rmt_idx.header.get("VERSION"), noiselevel=-1)
+ pkgindex = None
+ elif local_timestamp != remote_timestamp:
+ rmt_idx.readBody(f_dec)
+ pkgindex = rmt_idx
+ finally:
+ # Timeout after 5 seconds, in case close() blocks
+ # indefinitely (see bug #350139).
+ try:
+ try:
+ AlarmSignal.register(5)
+ f.close()
+ finally:
+ AlarmSignal.unregister()
+ except AlarmSignal:
+ writemsg("\n\n!!! %s\n" % \
+ _("Timed out while closing connection to binhost"),
+ noiselevel=-1)
+ except UseCachedCopyOfRemoteIndex:
+ writemsg_stdout("\n")
+ writemsg_stdout(
+ colorize("GOOD", _("Local copy of remote index is up-to-date and will be used.")) + \
+ "\n")
+ rmt_idx = pkgindex
+ except EnvironmentError as e:
+ writemsg(_("\n\n!!! Error fetching binhost package" \
+ " info from '%s'\n") % _hide_url_passwd(base_url))
+ writemsg("!!! %s\n\n" % str(e))
+ del e
+ pkgindex = None
+ if proc is not None:
+ if proc.poll() is None:
+ proc.kill()
+ proc.wait()
+ proc = None
+ if tmp_filename is not None:
+ try:
+ os.unlink(tmp_filename)
+ except OSError:
+ pass
+ if pkgindex is rmt_idx:
+ pkgindex.modified = False # don't update the header
+ try:
+ ensure_dirs(os.path.dirname(pkgindex_file))
+ f = atomic_ofstream(pkgindex_file)
+ pkgindex.write(f)
+ f.close()
+ except (IOError, PortageException):
+ if os.access(os.path.dirname(pkgindex_file), os.W_OK):
+ raise
+ # The current user doesn't have permission to cache the
+ # file, but that's alright.
+ if pkgindex:
+ # Organize remote package list as a cpv -> metadata map.
+ remotepkgs = _pkgindex_cpv_map_latest_build(pkgindex)
+ remote_base_uri = pkgindex.header.get("URI", base_url)
+ for cpv, remote_metadata in remotepkgs.items():
+ remote_metadata["BASE_URI"] = remote_base_uri
+ self._pkgindex_uri[cpv] = url
+ self._remotepkgs.update(remotepkgs)
+ self._remote_has_index = True
+ for cpv in remotepkgs:
+ self.dbapi.cpv_inject(cpv)
+ if True:
+ # Remote package instances override local package
+ # if they are not identical.
+ hash_names = ["SIZE"] + self._pkgindex_hashes
+ for cpv, local_metadata in metadata.items():
+ remote_metadata = self._remotepkgs.get(cpv)
+ if remote_metadata is None:
+ continue
+ # Use digests to compare identity.
+ identical = True
+ for hash_name in hash_names:
+ local_value = local_metadata.get(hash_name)
+ if local_value is None:
+ continue
+ remote_value = remote_metadata.get(hash_name)
+ if remote_value is None:
+ continue
+ if local_value != remote_value:
+ identical = False
+ break
+ if identical:
+ del self._remotepkgs[cpv]
+ else:
+ # Override the local package in the aux_get cache.
+ self.dbapi._aux_cache[cpv] = remote_metadata
+ else:
+ # Local package instances override remote instances.
+ for cpv in metadata:
+ self._remotepkgs.pop(cpv, None)
+
+ self.populated=1
+
+ def inject(self, cpv, filename=None):
+ """Add a freshly built package to the database. This updates
+ $PKGDIR/Packages with the new package metadata (including MD5).
+ @param cpv: The cpv of the new package to inject
+ @type cpv: string
+ @param filename: File path of the package to inject, or None if it's
+ already in the location returned by getname()
+ @type filename: string
+ @rtype: None
+ """
+ mycat, mypkg = catsplit(cpv)
+ if not self.populated:
+ self.populate()
+ if filename is None:
+ full_path = self.getname(cpv)
+ else:
+ full_path = filename
+ try:
+ s = os.stat(full_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ writemsg(_("!!! Binary package does not exist: '%s'\n") % full_path,
+ noiselevel=-1)
+ return
+ mytbz2 = portage.xpak.tbz2(full_path)
+ slot = mytbz2.getfile("SLOT")
+ if slot is None:
+ writemsg(_("!!! Invalid binary package: '%s'\n") % full_path,
+ noiselevel=-1)
+ return
+ slot = slot.strip()
+ self.dbapi.cpv_inject(cpv)
+
+ # Reread the Packages index (in case it's been changed by another
+ # process) and then updated it, all while holding a lock.
+ pkgindex_lock = None
+ created_symlink = False
+ try:
+ pkgindex_lock = lockfile(self._pkgindex_file,
+ wantnewlockfile=1)
+ if filename is not None:
+ new_filename = self.getname(cpv)
+ try:
+ samefile = os.path.samefile(filename, new_filename)
+ except OSError:
+ samefile = False
+ if not samefile:
+ self._ensure_dir(os.path.dirname(new_filename))
+ _movefile(filename, new_filename, mysettings=self.settings)
+ full_path = new_filename
+
+ self._file_permissions(full_path)
+
+ if self._all_directory and \
+ self.getname(cpv).split(os.path.sep)[-2] == "All":
+ self._create_symlink(cpv)
+ created_symlink = True
+ pkgindex = self._load_pkgindex()
+
+ if not self._pkgindex_version_supported(pkgindex):
+ pkgindex = self._new_pkgindex()
+
+ # Discard remote metadata to ensure that _pkgindex_entry
+ # gets the local metadata. This also updates state for future
+ # isremote calls.
+ if self._remotepkgs is not None:
+ self._remotepkgs.pop(cpv, None)
+
+ # Discard cached metadata to ensure that _pkgindex_entry
+ # doesn't return stale metadata.
+ self.dbapi._aux_cache.pop(cpv, None)
+
+ try:
+ d = self._pkgindex_entry(cpv)
+ except portage.exception.InvalidDependString:
+ writemsg(_("!!! Invalid binary package: '%s'\n") % \
+ self.getname(cpv), noiselevel=-1)
+ self.dbapi.cpv_remove(cpv)
+ del self._pkg_paths[cpv]
+ return
+
+ # If found, remove package(s) with duplicate path.
+ path = d.get("PATH", "")
+ for i in range(len(pkgindex.packages) - 1, -1, -1):
+ d2 = pkgindex.packages[i]
+ if path and path == d2.get("PATH"):
+ # Handle path collisions in $PKGDIR/All
+ # when CPV is not identical.
+ del pkgindex.packages[i]
+ elif cpv == d2.get("CPV"):
+ if path == d2.get("PATH", ""):
+ del pkgindex.packages[i]
+ elif created_symlink and not d2.get("PATH", ""):
+ # Delete entry for the package that was just
+ # overwritten by a symlink to this package.
+ del pkgindex.packages[i]
+
+ pkgindex.packages.append(d)
+
+ self._update_pkgindex_header(pkgindex.header)
+ self._pkgindex_write(pkgindex)
+
+ finally:
+ if pkgindex_lock:
+ unlockfile(pkgindex_lock)
+
+ def _pkgindex_write(self, pkgindex):
+ contents = codecs.getwriter(_encodings['repo.content'])(io.BytesIO())
+ pkgindex.write(contents)
+ contents = contents.getvalue()
+ atime = mtime = long(pkgindex.header["TIMESTAMP"])
+ output_files = [(atomic_ofstream(self._pkgindex_file, mode="wb"),
+ self._pkgindex_file, None)]
+
+ if "compress-index" in self.settings.features:
+ gz_fname = self._pkgindex_file + ".gz"
+ fileobj = atomic_ofstream(gz_fname, mode="wb")
+ output_files.append((GzipFile(filename='', mode="wb",
+ fileobj=fileobj, mtime=mtime), gz_fname, fileobj))
+
+ for f, fname, f_close in output_files:
+ f.write(contents)
+ f.close()
+ if f_close is not None:
+ f_close.close()
+ self._file_permissions(fname)
+ # some seconds might have elapsed since TIMESTAMP
+ os.utime(fname, (atime, mtime))
+
+ def _pkgindex_entry(self, cpv):
+ """
+ Performs checksums and evaluates USE flag conditionals.
+ Raises InvalidDependString if necessary.
+ @rtype: dict
+ @return: a dict containing entry for the give cpv.
+ """
+
+ pkg_path = self.getname(cpv)
+
+ d = dict(zip(self._pkgindex_aux_keys,
+ self.dbapi.aux_get(cpv, self._pkgindex_aux_keys)))
+
+ d.update(perform_multiple_checksums(
+ pkg_path, hashes=self._pkgindex_hashes))
+
+ d["CPV"] = cpv
+ st = os.stat(pkg_path)
+ d["MTIME"] = str(st[stat.ST_MTIME])
+ d["SIZE"] = str(st.st_size)
+
+ rel_path = self._pkg_paths[cpv]
+ # record location if it's non-default
+ if rel_path != cpv + ".tbz2":
+ d["PATH"] = rel_path
+
+ self._eval_use_flags(cpv, d)
+ return d
+
+ def _new_pkgindex(self):
+ return portage.getbinpkg.PackageIndex(
+ allowed_pkg_keys=self._pkgindex_allowed_pkg_keys,
+ default_header_data=self._pkgindex_default_header_data,
+ default_pkg_data=self._pkgindex_default_pkg_data,
+ inherited_keys=self._pkgindex_inherited_keys,
+ translated_keys=self._pkgindex_translated_keys)
+
+ def _update_pkgindex_header(self, header):
+ portdir = normalize_path(os.path.realpath(self.settings["PORTDIR"]))
+ profiles_base = os.path.join(portdir, "profiles") + os.path.sep
+ if self.settings.profile_path:
+ profile_path = normalize_path(
+ os.path.realpath(self.settings.profile_path))
+ if profile_path.startswith(profiles_base):
+ profile_path = profile_path[len(profiles_base):]
+ header["PROFILE"] = profile_path
+ header["VERSION"] = str(self._pkgindex_version)
+ base_uri = self.settings.get("PORTAGE_BINHOST_HEADER_URI")
+ if base_uri:
+ header["URI"] = base_uri
+ else:
+ header.pop("URI", None)
+ for k in self._pkgindex_header_keys:
+ v = self.settings.get(k, None)
+ if v:
+ header[k] = v
+ else:
+ header.pop(k, None)
+
+ # These values may be useful for using a binhost without
+ # having a local copy of the profile (bug #470006).
+ for k in self.settings.get("USE_EXPAND_IMPLICIT", "").split():
+ k = "USE_EXPAND_VALUES_" + k
+ v = self.settings.get(k)
+ if v:
+ header[k] = v
+ else:
+ header.pop(k, None)
+
+ def _pkgindex_version_supported(self, pkgindex):
+ version = pkgindex.header.get("VERSION")
+ if version:
+ try:
+ if int(version) <= self._pkgindex_version:
+ return True
+ except ValueError:
+ pass
+ return False
+
+ def _eval_use_flags(self, cpv, metadata):
+ use = frozenset(metadata["USE"].split())
+ for k in self._pkgindex_use_evaluated_keys:
+ if k.endswith('DEPEND'):
+ token_class = Atom
+ else:
+ token_class = None
+
+ try:
+ deps = metadata[k]
+ deps = use_reduce(deps, uselist=use, token_class=token_class)
+ deps = paren_enclose(deps)
+ except portage.exception.InvalidDependString as e:
+ writemsg("%s: %s\n" % (k, str(e)),
+ noiselevel=-1)
+ raise
+ metadata[k] = deps
+
+ def exists_specific(self, cpv):
+ if not self.populated:
+ self.populate()
+ return self.dbapi.match(
+ dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
+
+ def dep_bestmatch(self, mydep):
+ "compatibility method -- all matches, not just visible ones"
+ if not self.populated:
+ self.populate()
+ writemsg("\n\n", 1)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mykey = dep_getkey(mydep)
+ writemsg("mykey: %s\n" % mykey, 1)
+ mymatch = best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
+ writemsg("mymatch: %s\n" % mymatch, 1)
+ if mymatch is None:
+ return ""
+ return mymatch
+
+ def getname(self, pkgname):
+ """Returns a file location for this package. The default location is
+ ${PKGDIR}/All/${PF}.tbz2, but will be ${PKGDIR}/${CATEGORY}/${PF}.tbz2
+ in the rare event of a collision. The prevent_collision() method can
+ be called to ensure that ${PKGDIR}/All/${PF}.tbz2 is available for a
+ specific cpv."""
+ if not self.populated:
+ self.populate()
+ mycpv = pkgname
+ mypath = self._pkg_paths.get(mycpv, None)
+ if mypath:
+ return os.path.join(self.pkgdir, mypath)
+ mycat, mypkg = catsplit(mycpv)
+ if self._all_directory:
+ mypath = os.path.join("All", mypkg + ".tbz2")
+ if mypath in self._pkg_paths.values():
+ mypath = os.path.join(mycat, mypkg + ".tbz2")
+ else:
+ mypath = os.path.join(mycat, mypkg + ".tbz2")
+ self._pkg_paths[mycpv] = mypath # cache for future lookups
+ return os.path.join(self.pkgdir, mypath)
+
+ def isremote(self, pkgname):
+ """Returns true if the package is kept remotely and it has not been
+ downloaded (or it is only partially downloaded)."""
+ if self._remotepkgs is None or pkgname not in self._remotepkgs:
+ return False
+ # Presence in self._remotepkgs implies that it's remote. When a
+ # package is downloaded, state is updated by self.inject().
+ return True
+
+ def get_pkgindex_uri(self, pkgname):
+ """Returns the URI to the Packages file for a given package."""
+ return self._pkgindex_uri.get(pkgname)
+
+
+
+ def gettbz2(self, pkgname):
+ """Fetches the package from a remote site, if necessary. Attempts to
+ resume if the file appears to be partially downloaded."""
+ tbz2_path = self.getname(pkgname)
+ tbz2name = os.path.basename(tbz2_path)
+ resume = False
+ if os.path.exists(tbz2_path):
+ if tbz2name[:-5] not in self.invalids:
+ return
+ else:
+ resume = True
+ writemsg(_("Resuming download of this tbz2, but it is possible that it is corrupt.\n"),
+ noiselevel=-1)
+
+ mydest = os.path.dirname(self.getname(pkgname))
+ self._ensure_dir(mydest)
+ # urljoin doesn't work correctly with unrecognized protocols like sftp
+ if self._remote_has_index:
+ rel_url = self._remotepkgs[pkgname].get("PATH")
+ if not rel_url:
+ rel_url = pkgname+".tbz2"
+ remote_base_uri = self._remotepkgs[pkgname]["BASE_URI"]
+ url = remote_base_uri.rstrip("/") + "/" + rel_url.lstrip("/")
+ else:
+ url = self.settings["PORTAGE_BINHOST"].rstrip("/") + "/" + tbz2name
+ protocol = urlparse(url)[0]
+ fcmd_prefix = "FETCHCOMMAND"
+ if resume:
+ fcmd_prefix = "RESUMECOMMAND"
+ fcmd = self.settings.get(fcmd_prefix + "_" + protocol.upper())
+ if not fcmd:
+ fcmd = self.settings.get(fcmd_prefix)
+ success = portage.getbinpkg.file_get(url, mydest, fcmd=fcmd)
+ if not success:
+ try:
+ os.unlink(self.getname(pkgname))
+ except OSError:
+ pass
+ raise portage.exception.FileNotFound(mydest)
+ self.inject(pkgname)
+
+ def _load_pkgindex(self):
+ pkgindex = self._new_pkgindex()
+ try:
+ f = io.open(_unicode_encode(self._pkgindex_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ except EnvironmentError:
+ pass
+ else:
+ try:
+ pkgindex.read(f)
+ finally:
+ f.close()
+ return pkgindex
+
+ def _get_digests(self, pkg):
+
+ try:
+ cpv = pkg.cpv
+ except AttributeError:
+ cpv = pkg
+
+ digests = {}
+ metadata = None
+ if self._remotepkgs is None or cpv not in self._remotepkgs:
+ for d in self._load_pkgindex().packages:
+ if d["CPV"] == cpv:
+ metadata = d
+ break
+ else:
+ metadata = self._remotepkgs[cpv]
+ if metadata is None:
+ return digests
+
+ for k in hashfunc_map:
+ v = metadata.get(k)
+ if not v:
+ continue
+ digests[k] = v
+
+ if "SIZE" in metadata:
+ try:
+ digests["size"] = int(metadata["SIZE"])
+ except ValueError:
+ writemsg(_("!!! Malformed SIZE attribute in remote " \
+ "metadata for '%s'\n") % cpv)
+
+ return digests
+
+ def digestCheck(self, pkg):
+ """
+ Verify digests for the given package and raise DigestException
+ if verification fails.
+ @rtype: bool
+ @return: True if digests could be located, False otherwise.
+ """
+
+ digests = self._get_digests(pkg)
+
+ if not digests:
+ return False
+
+ try:
+ cpv = pkg.cpv
+ except AttributeError:
+ cpv = pkg
+
+ pkg_path = self.getname(cpv)
+ hash_filter = _hash_filter(
+ self.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if not hash_filter.transparent:
+ digests = _apply_hash_filter(digests, hash_filter)
+ eout = EOutput()
+ eout.quiet = self.settings.get("PORTAGE_QUIET") == "1"
+ ok, st = _check_distfile(pkg_path, digests, eout, show_errors=0)
+ if not ok:
+ ok, reason = verify_all(pkg_path, digests)
+ if not ok:
+ raise portage.exception.DigestException(
+ (pkg_path,) + tuple(reason))
+
+ return True
+
+ def getslot(self, mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot = self.dbapi._pkg_str(mycatpkg, None).slot
+ except KeyError:
+ pass
+ return myslot
diff --git a/usr/lib/portage/pym/portage/dbapi/cpv_expand.py b/usr/lib/portage/pym/portage/dbapi/cpv_expand.py
new file mode 100644
index 0000000..70ee782
--- /dev/null
+++ b/usr/lib/portage/pym/portage/dbapi/cpv_expand.py
@@ -0,0 +1,108 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ["cpv_expand"]
+
+import portage
+from portage.exception import AmbiguousPackageName
+from portage.localization import _
+from portage.util import writemsg
+from portage.versions import _pkgsplit
+
+def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
+ """Given a string (packagename or virtual) expand it into a valid
+ cat/package string. Virtuals use the mydb to determine which provided
+ virtual is a valid choice and defaults to the first element when there
+ are no installed/available candidates."""
+ myslash=mycpv.split("/")
+ mysplit = _pkgsplit(myslash[-1])
+ if settings is None:
+ try:
+ settings = mydb.settings
+ except AttributeError:
+ settings = portage.settings
+ if len(myslash)>2:
+ # this is illegal case.
+ mysplit=[]
+ mykey=mycpv
+ elif len(myslash)==2:
+ if mysplit:
+ mykey=myslash[0]+"/"+mysplit[0]
+ else:
+ mykey=mycpv
+
+ # Since Gentoo stopped using old-style virtuals in
+ # 2011, typically it's possible to avoid getvirtuals()
+ # calls entirely. Therefore, only call getvirtuals()
+ # if the atom category is "virtual" and cp_list()
+ # returns nothing.
+ if mykey.startswith("virtual/") and \
+ hasattr(mydb, "cp_list") and \
+ not mydb.cp_list(mykey, use_cache=use_cache):
+ if hasattr(mydb, "vartree"):
+ settings._populate_treeVirtuals_if_needed(mydb.vartree)
+ virts = settings.getvirtuals().get(mykey)
+ if virts:
+ mykey_orig = mykey
+ for vkey in virts:
+ # The virtuals file can contain a versioned atom, so
+ # it may be necessary to remove the operator and
+ # version from the atom before it is passed into
+ # dbapi.cp_list().
+ if mydb.cp_list(vkey.cp):
+ mykey = str(vkey)
+ break
+ if mykey == mykey_orig:
+ mykey = str(virts[0])
+ #we only perform virtual expansion if we are passed a dbapi
+ else:
+ #specific cpv, no category, ie. "foo-1.0"
+ if mysplit:
+ myp=mysplit[0]
+ else:
+ # "foo" ?
+ myp=mycpv
+ mykey=None
+ matches=[]
+ if mydb and hasattr(mydb, "categories"):
+ for x in mydb.categories:
+ if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
+ matches.append(x+"/"+myp)
+ if len(matches) > 1:
+ virtual_name_collision = False
+ if len(matches) == 2:
+ for x in matches:
+ if not x.startswith("virtual/"):
+ # Assume that the non-virtual is desired. This helps
+ # avoid the ValueError for invalid deps that come from
+ # installed packages (during reverse blocker detection,
+ # for example).
+ mykey = x
+ else:
+ virtual_name_collision = True
+ if not virtual_name_collision:
+ # AmbiguousPackageName inherits from ValueError,
+ # for backward compatibility with calling code
+ # that already handles ValueError.
+ raise AmbiguousPackageName(matches)
+ elif matches:
+ mykey=matches[0]
+
+ if not mykey and not isinstance(mydb, list):
+ if hasattr(mydb, "vartree"):
+ settings._populate_treeVirtuals_if_needed(mydb.vartree)
+ virts_p = settings.get_virts_p().get(myp)
+ if virts_p:
+ mykey = virts_p[0]
+ #again, we only perform virtual expansion if we have a dbapi (not a list)
+ if not mykey:
+ mykey="null/"+myp
+ if mysplit:
+ if mysplit[2]=="r0":
+ return mykey+"-"+mysplit[1]
+ else:
+ return mykey+"-"+mysplit[1]+"-"+mysplit[2]
+ else:
+ return mykey
diff --git a/usr/lib/portage/pym/portage/dbapi/dep_expand.py b/usr/lib/portage/pym/portage/dbapi/dep_expand.py
new file mode 100644
index 0000000..3de5d8f
--- /dev/null
+++ b/usr/lib/portage/pym/portage/dbapi/dep_expand.py
@@ -0,0 +1,58 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ["dep_expand"]
+
+import re
+
+from portage.dbapi.cpv_expand import cpv_expand
+from portage.dep import Atom, isvalidatom
+from portage.exception import InvalidAtom
+from portage.versions import catsplit
+
+def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
+ '''
+ @rtype: Atom
+ '''
+ orig_dep = mydep
+ if isinstance(orig_dep, Atom):
+ has_cat = True
+ else:
+ if not mydep:
+ return mydep
+ if mydep[0] == "*":
+ mydep = mydep[1:]
+ orig_dep = mydep
+ has_cat = '/' in orig_dep.split(':')[0]
+ if not has_cat:
+ alphanum = re.search(r'\w', orig_dep)
+ if alphanum:
+ mydep = orig_dep[:alphanum.start()] + "null/" + \
+ orig_dep[alphanum.start():]
+ try:
+ mydep = Atom(mydep, allow_repo=True)
+ except InvalidAtom:
+ # Missing '=' prefix is allowed for backward compatibility.
+ if not isvalidatom("=" + mydep, allow_repo=True):
+ raise
+ mydep = Atom('=' + mydep, allow_repo=True)
+ orig_dep = '=' + orig_dep
+ if not has_cat:
+ null_cat, pn = catsplit(mydep.cp)
+ mydep = pn
+
+ if has_cat:
+ # Optimize most common cases to avoid calling cpv_expand.
+ if not mydep.cp.startswith("virtual/"):
+ return mydep
+ if not hasattr(mydb, "cp_list") or \
+ mydb.cp_list(mydep.cp):
+ return mydep
+ # Fallback to legacy cpv_expand for old-style PROVIDE virtuals.
+ mydep = mydep.cp
+
+ expanded = cpv_expand(mydep, mydb=mydb,
+ use_cache=use_cache, settings=settings)
+ return Atom(orig_dep.replace(mydep, expanded, 1), allow_repo=True)
diff --git a/usr/lib/portage/pym/portage/dbapi/porttree.py b/usr/lib/portage/pym/portage/dbapi/porttree.py
new file mode 100644
index 0000000..590e3c5
--- /dev/null
+++ b/usr/lib/portage/pym/portage/dbapi/porttree.py
@@ -0,0 +1,1229 @@
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = [
+ "close_portdbapi_caches", "FetchlistDict", "portagetree", "portdbapi"
+]
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.checksum',
+ 'portage.data:portage_gid,secpass',
+ 'portage.dbapi.dep_expand:dep_expand',
+ 'portage.dep:Atom,dep_getkey,match_from_list,use_reduce,_match_slot',
+ 'portage.package.ebuild.doebuild:doebuild',
+ 'portage.util:ensure_dirs,shlex_split,writemsg,writemsg_level',
+ 'portage.util.listdir:listdir',
+ 'portage.versions:best,catpkgsplit,_pkgsplit@pkgsplit,ver_regexp,_pkg_str',
+)
+
+from portage.cache import volatile
+from portage.cache.cache_errors import CacheError
+from portage.cache.mappings import Mapping
+from portage.dbapi import dbapi
+from portage.exception import PortageException, \
+ FileNotFound, InvalidAtom, InvalidData, \
+ InvalidDependString, InvalidPackageName
+from portage.localization import _
+
+from portage import eclass_cache, \
+ eapi_is_supported, \
+ _eapi_is_deprecated
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import OrderedDict
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
+
+import os as _os
+import sys
+import traceback
+import warnings
+
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+ long = int
+
+def close_portdbapi_caches():
+ # The python interpreter does _not_ guarantee that destructors are
+ # called for objects that remain when the interpreter exits, so we
+ # use an atexit hook to call destructors for any global portdbapi
+ # instances that may have been constructed.
+ try:
+ portage._legacy_globals_constructed
+ except AttributeError:
+ pass
+ else:
+ if "db" in portage._legacy_globals_constructed:
+ try:
+ db = portage.db
+ except AttributeError:
+ pass
+ else:
+ if isinstance(db, dict):
+ for x in db.values():
+ try:
+ if "porttree" in x.lazy_items:
+ continue
+ except (AttributeError, TypeError):
+ continue
+ try:
+ x = x.pop("porttree").dbapi
+ except (AttributeError, KeyError):
+ continue
+ if not isinstance(x, portdbapi):
+ continue
+ x.close_caches()
+
+portage.process.atexit_register(close_portdbapi_caches)
+
+# It used to be necessary for API consumers to remove portdbapi instances
+# from portdbapi_instances, in order to avoid having accumulated instances
+# consume memory. Now, portdbapi_instances is just an empty dummy list, so
+# for backward compatibility, ignore ValueError for removal on non-existent
+# items.
+class _dummy_list(list):
+ def remove(self, item):
+ # TODO: Trigger a DeprecationWarning here, after stable portage
+ # has dummy portdbapi_instances.
+ try:
+ list.remove(self, item)
+ except ValueError:
+ pass
+
+class portdbapi(dbapi):
+ """this tree will scan a portage directory located at root (passed to init)"""
+ portdbapi_instances = _dummy_list()
+ _use_mutable = True
+
+ @property
+ def _categories(self):
+ return self.settings.categories
+
+ @property
+ def porttree_root(self):
+ warnings.warn("portage.dbapi.porttree.portdbapi.porttree_root is deprecated in favor of portage.repository.config.RepoConfig.location "
+ "(available as repositories[repo_name].location attribute of instances of portage.dbapi.porttree.portdbapi class)",
+ DeprecationWarning, stacklevel=2)
+ return self.settings.repositories.mainRepoLocation()
+
+ @property
+ def eclassdb(self):
+ warnings.warn("portage.dbapi.porttree.portdbapi.eclassdb is deprecated in favor of portage.repository.config.RepoConfig.eclass_db "
+ "(available as repositories[repo_name].eclass_db attribute of instances of portage.dbapi.porttree.portdbapi class)",
+ DeprecationWarning, stacklevel=2)
+ main_repo = self.repositories.mainRepo()
+ if main_repo is None:
+ return None
+ return main_repo.eclass_db
+
+ def __init__(self, _unused_param=DeprecationWarning, mysettings=None):
+ """
+ @param _unused_param: deprecated, use mysettings['PORTDIR'] instead
+ @type _unused_param: None
+ @param mysettings: an immutable config instance
+ @type mysettings: portage.config
+ """
+
+ from portage import config
+ if mysettings:
+ self.settings = mysettings
+ else:
+ from portage import settings
+ self.settings = config(clone=settings)
+
+ if _unused_param is not DeprecationWarning:
+ warnings.warn("The first parameter of the " + \
+ "portage.dbapi.porttree.portdbapi" + \
+ " constructor is unused since portage-2.1.8. " + \
+ "mysettings['PORTDIR'] is used instead.",
+ DeprecationWarning, stacklevel=2)
+
+ self.repositories = self.settings.repositories
+ self.treemap = self.repositories.treemap
+
+ # This is strictly for use in aux_get() doebuild calls when metadata
+ # is generated by the depend phase. It's safest to use a clone for
+ # this purpose because doebuild makes many changes to the config
+ # instance that is passed in.
+ self.doebuild_settings = config(clone=self.settings)
+ self.depcachedir = os.path.realpath(self.settings.depcachedir)
+
+ if os.environ.get("SANDBOX_ON") == "1":
+ # Make api consumers exempt from sandbox violations
+ # when doing metadata cache updates.
+ sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
+ if self.depcachedir not in sandbox_write:
+ sandbox_write.append(self.depcachedir)
+ os.environ["SANDBOX_WRITE"] = \
+ ":".join(filter(None, sandbox_write))
+
+ self.porttrees = list(self.settings.repositories.repoLocationList())
+
+ # This is used as sanity check for aux_get(). If there is no
+ # root eclass dir, we assume that PORTDIR is invalid or
+ # missing. This check allows aux_get() to detect a missing
+ # portage tree and return early by raising a KeyError.
+ self._have_root_eclass_dir = os.path.isdir(
+ os.path.join(self.settings.repositories.mainRepoLocation(), "eclass"))
+
+ #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
+ self.xcache = {}
+ self.frozen = 0
+
+ #Keep a list of repo names, sorted by priority (highest priority first).
+ self._ordered_repo_name_list = tuple(reversed(self.repositories.prepos_order))
+
+ self.auxdbmodule = self.settings.load_best_module("portdbapi.auxdbmodule")
+ self.auxdb = {}
+ self._pregen_auxdb = {}
+ # If the current user doesn't have depcachedir write permission,
+ # then the depcachedir cache is kept here read-only access.
+ self._ro_auxdb = {}
+ self._init_cache_dirs()
+ try:
+ depcachedir_st = os.stat(self.depcachedir)
+ depcachedir_w_ok = os.access(self.depcachedir, os.W_OK)
+ except OSError:
+ depcachedir_st = None
+ depcachedir_w_ok = False
+
+ cache_kwargs = {}
+
+ depcachedir_unshared = False
+ if portage.data.secpass < 1 and \
+ depcachedir_w_ok and \
+ depcachedir_st is not None and \
+ os.getuid() == depcachedir_st.st_uid and \
+ os.getgid() == depcachedir_st.st_gid:
+ # If this user owns depcachedir and is not in the
+ # portage group, then don't bother to set permissions
+ # on cache entries. This makes it possible to run
+ # egencache without any need to be a member of the
+ # portage group.
+ depcachedir_unshared = True
+ else:
+ cache_kwargs.update(portage._native_kwargs({
+ 'gid' : portage_gid,
+ 'perms' : 0o664
+ }))
+
+ # If secpass < 1, we don't want to write to the cache
+ # since then we won't be able to apply group permissions
+ # to the cache entries/directories.
+ if (secpass < 1 and not depcachedir_unshared) or not depcachedir_w_ok:
+ for x in self.porttrees:
+ self.auxdb[x] = volatile.database(
+ self.depcachedir, x, self._known_keys,
+ **cache_kwargs)
+ try:
+ self._ro_auxdb[x] = self.auxdbmodule(self.depcachedir, x,
+ self._known_keys, readonly=True, **cache_kwargs)
+ except CacheError:
+ pass
+ else:
+ for x in self.porttrees:
+ if x in self.auxdb:
+ continue
+ # location, label, auxdbkeys
+ self.auxdb[x] = self.auxdbmodule(
+ self.depcachedir, x, self._known_keys, **cache_kwargs)
+ if "metadata-transfer" not in self.settings.features:
+ for x in self.porttrees:
+ if x in self._pregen_auxdb:
+ continue
+ cache = self._create_pregen_cache(x)
+ if cache is not None:
+ self._pregen_auxdb[x] = cache
+ # Selectively cache metadata in order to optimize dep matching.
+ self._aux_cache_keys = set(
+ ["DEPEND", "EAPI", "HDEPEND",
+ "INHERITED", "IUSE", "KEYWORDS", "LICENSE",
+ "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", "repository",
+ "RESTRICT", "SLOT", "DEFINED_PHASES", "REQUIRED_USE"])
+
+ self._aux_cache = {}
+ self._broken_ebuilds = set()
+
+ @property
+ def _event_loop(self):
+ if portage._internal_caller:
+ # For internal portage usage, the global_event_loop is safe.
+ return global_event_loop()
+ else:
+ # For external API consumers, use a local EventLoop, since
+ # we don't want to assume that it's safe to override the
+ # global SIGCHLD handler.
+ return EventLoop(main=False)
+
+ def _create_pregen_cache(self, tree):
+ conf = self.repositories.get_repo_for_location(tree)
+ cache = conf.get_pregenerated_cache(
+ self._known_keys, readonly=True)
+ if cache is not None:
+ try:
+ cache.ec = self.repositories.get_repo_for_location(tree).eclass_db
+ except AttributeError:
+ pass
+
+ if not cache.complete_eclass_entries:
+ warnings.warn(
+ ("Repository '%s' used deprecated 'pms' cache format. "
+ "Please migrate to 'md5-dict' format.") % (conf.name,),
+ DeprecationWarning)
+
+ return cache
+
+ def _init_cache_dirs(self):
+ """Create /var/cache/edb/dep and adjust permissions for the portage
+ group."""
+
+ dirmode = 0o2070
+ modemask = 0o2
+
+ try:
+ ensure_dirs(self.depcachedir, gid=portage_gid,
+ mode=dirmode, mask=modemask)
+ except PortageException:
+ pass
+
+ def close_caches(self):
+ if not hasattr(self, "auxdb"):
+ # unhandled exception thrown from constructor
+ return
+ for x in self.auxdb:
+ self.auxdb[x].sync()
+ self.auxdb.clear()
+
+ def flush_cache(self):
+ for x in self.auxdb.values():
+ x.sync()
+
+ def findLicensePath(self, license_name):
+ for x in reversed(self.porttrees):
+ license_path = os.path.join(x, "licenses", license_name)
+ if os.access(license_path, os.R_OK):
+ return license_path
+ return None
+
+ def findname(self,mycpv, mytree = None, myrepo = None):
+ return self.findname2(mycpv, mytree, myrepo)[0]
+
+ def getRepositoryPath(self, repository_id):
+ """
+ This function is required for GLEP 42 compliance; given a valid repository ID
+ it must return a path to the repository
+ TreeMap = { id:path }
+ """
+ return self.treemap.get(repository_id)
+
+ def getRepositoryName(self, canonical_repo_path):
+ """
+ This is the inverse of getRepositoryPath().
+ @param canonical_repo_path: the canonical path of a repository, as
+ resolved by os.path.realpath()
+ @type canonical_repo_path: String
+ @return: The repo_name for the corresponding repository, or None
+ if the path does not correspond a known repository
+ @rtype: String or None
+ """
+ try:
+ return self.repositories.get_name_for_location(canonical_repo_path)
+ except KeyError:
+ return None
+
+ def getRepositories(self):
+ """
+ This function is required for GLEP 42 compliance; it will return a list of
+ repository IDs
+ TreeMap = {id: path}
+ """
+ return self._ordered_repo_name_list
+
+ def getMissingRepoNames(self):
+ """
+ Returns a list of repository paths that lack profiles/repo_name.
+ """
+ return self.settings.repositories.missing_repo_names
+
+ def getIgnoredRepos(self):
+ """
+ Returns a list of repository paths that have been ignored, because
+ another repo with the same name exists.
+ """
+ return self.settings.repositories.ignored_repos
+
+ def findname2(self, mycpv, mytree=None, myrepo = None):
+ """
+ Returns the location of the CPV, and what overlay it was in.
+ Searches overlays first, then PORTDIR; this allows us to return the first
+ matching file. As opposed to starting in portdir and then doing overlays
+ second, we would have to exhaustively search the overlays until we found
+ the file we wanted.
+ If myrepo is not None it will find packages from this repository(overlay)
+ """
+ if not mycpv:
+ return (None, 0)
+
+ if myrepo is not None:
+ mytree = self.treemap.get(myrepo)
+ if mytree is None:
+ return (None, 0)
+
+ mysplit = mycpv.split("/")
+ psplit = pkgsplit(mysplit[1])
+ if psplit is None or len(mysplit) != 2:
+ raise InvalidPackageName(mycpv)
+
+ # For optimal performace in this hot spot, we do manual unicode
+ # handling here instead of using the wrapped os module.
+ encoding = _encodings['fs']
+ errors = 'strict'
+
+ if mytree:
+ mytrees = [mytree]
+ else:
+ mytrees = reversed(self.porttrees)
+
+ relative_path = mysplit[0] + _os.sep + psplit[0] + _os.sep + \
+ mysplit[1] + ".ebuild"
+
+ for x in mytrees:
+ filename = x + _os.sep + relative_path
+ if _os.access(_unicode_encode(filename,
+ encoding=encoding, errors=errors), _os.R_OK):
+ return (filename, x)
+ return (None, 0)
+
+ def _write_cache(self, cpv, repo_path, metadata, ebuild_hash):
+
+ try:
+ cache = self.auxdb[repo_path]
+ chf = cache.validation_chf
+ metadata['_%s_' % chf] = getattr(ebuild_hash, chf)
+ except CacheError:
+ # Normally this shouldn't happen, so we'll show
+ # a traceback for debugging purposes.
+ traceback.print_exc()
+ cache = None
+
+ if cache is not None:
+ try:
+ cache[cpv] = metadata
+ except CacheError:
+ # Normally this shouldn't happen, so we'll show
+ # a traceback for debugging purposes.
+ traceback.print_exc()
+
+ def _pull_valid_cache(self, cpv, ebuild_path, repo_path):
+ try:
+ ebuild_hash = eclass_cache.hashed_path(ebuild_path)
+ # snag mtime since we use it later, and to trigger stat failure
+ # if it doesn't exist
+ ebuild_hash.mtime
+ except FileNotFound:
+ writemsg(_("!!! aux_get(): ebuild for " \
+ "'%s' does not exist at:\n") % (cpv,), noiselevel=-1)
+ writemsg("!!! %s\n" % ebuild_path, noiselevel=-1)
+ raise KeyError(cpv)
+
+ # Pull pre-generated metadata from the metadata/cache/
+ # directory if it exists and is valid, otherwise fall
+ # back to the normal writable cache.
+ auxdbs = []
+ pregen_auxdb = self._pregen_auxdb.get(repo_path)
+ if pregen_auxdb is not None:
+ auxdbs.append(pregen_auxdb)
+ ro_auxdb = self._ro_auxdb.get(repo_path)
+ if ro_auxdb is not None:
+ auxdbs.append(ro_auxdb)
+ auxdbs.append(self.auxdb[repo_path])
+ eclass_db = self.repositories.get_repo_for_location(repo_path).eclass_db
+
+ for auxdb in auxdbs:
+ try:
+ metadata = auxdb[cpv]
+ except KeyError:
+ continue
+ except CacheError:
+ if not auxdb.readonly:
+ try:
+ del auxdb[cpv]
+ except (KeyError, CacheError):
+ pass
+ continue
+ eapi = metadata.get('EAPI', '').strip()
+ if not eapi:
+ eapi = '0'
+ metadata['EAPI'] = eapi
+ if not eapi_is_supported(eapi):
+ # Since we're supposed to be able to efficiently obtain the
+ # EAPI from _parse_eapi_ebuild_head, we disregard cache entries
+ # for unsupported EAPIs.
+ continue
+ if auxdb.validate_entry(metadata, ebuild_hash, eclass_db):
+ break
+ else:
+ metadata = None
+
+ return (metadata, ebuild_hash)
+
+ def aux_get(self, mycpv, mylist, mytree=None, myrepo=None):
+ "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
+ 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
+ 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
+ cache_me = False
+ if myrepo is not None:
+ mytree = self.treemap.get(myrepo)
+ if mytree is None:
+ raise KeyError(myrepo)
+
+ if mytree is not None and len(self.porttrees) == 1 \
+ and mytree == self.porttrees[0]:
+ # mytree matches our only tree, so it's safe to
+ # ignore mytree and cache the result
+ mytree = None
+ myrepo = None
+
+ if mytree is None:
+ cache_me = True
+ if mytree is None and not self._known_keys.intersection(
+ mylist).difference(self._aux_cache_keys):
+ aux_cache = self._aux_cache.get(mycpv)
+ if aux_cache is not None:
+ return [aux_cache.get(x, "") for x in mylist]
+ cache_me = True
+
+ try:
+ cat, pkg = mycpv.split("/", 1)
+ except ValueError:
+ # Missing slash. Can't find ebuild so raise KeyError.
+ raise KeyError(mycpv)
+
+ myebuild, mylocation = self.findname2(mycpv, mytree)
+
+ if not myebuild:
+ writemsg("!!! aux_get(): %s\n" % \
+ _("ebuild not found for '%s'") % mycpv, noiselevel=1)
+ raise KeyError(mycpv)
+
+ mydata, ebuild_hash = self._pull_valid_cache(mycpv, myebuild, mylocation)
+ doregen = mydata is None
+
+ if doregen:
+ if myebuild in self._broken_ebuilds:
+ raise KeyError(mycpv)
+
+ proc = EbuildMetadataPhase(cpv=mycpv,
+ ebuild_hash=ebuild_hash, portdb=self,
+ repo_path=mylocation, scheduler=self._event_loop,
+ settings=self.doebuild_settings)
+
+ proc.start()
+ proc.wait()
+
+ if proc.returncode != os.EX_OK:
+ self._broken_ebuilds.add(myebuild)
+ raise KeyError(mycpv)
+
+ mydata = proc.metadata
+
+ mydata["repository"] = self.repositories.get_name_for_location(mylocation)
+ mydata["_mtime_"] = ebuild_hash.mtime
+ eapi = mydata.get("EAPI")
+ if not eapi:
+ eapi = "0"
+ mydata["EAPI"] = eapi
+ if eapi_is_supported(eapi):
+ mydata["INHERITED"] = " ".join(mydata.get("_eclasses_", []))
+
+ #finally, we look at our internal cache entry and return the requested data.
+ returnme = [mydata.get(x, "") for x in mylist]
+
+ if cache_me:
+ aux_cache = {}
+ for x in self._aux_cache_keys:
+ aux_cache[x] = mydata.get(x, "")
+ self._aux_cache[mycpv] = aux_cache
+
+ return returnme
+
+ def getFetchMap(self, mypkg, useflags=None, mytree=None):
+ """
+ Get the SRC_URI metadata as a dict which maps each file name to a
+ set of alternative URIs.
+
+ @param mypkg: cpv for an ebuild
+ @type mypkg: String
+ @param useflags: a collection of enabled USE flags, for evaluation of
+ conditionals
+ @type useflags: set, or None to enable all conditionals
+ @param mytree: The canonical path of the tree in which the ebuild
+ is located, or None for automatic lookup
+ @type mypkg: String
+ @return: A dict which maps each file name to a set of alternative
+ URIs.
+ @rtype: dict
+ """
+
+ try:
+ eapi, myuris = self.aux_get(mypkg,
+ ["EAPI", "SRC_URI"], mytree=mytree)
+ except KeyError:
+ # Convert this to an InvalidDependString exception since callers
+ # already handle it.
+ raise portage.exception.InvalidDependString(
+ "getFetchMap(): aux_get() error reading "+mypkg+"; aborting.")
+
+ if not eapi_is_supported(eapi):
+ # Convert this to an InvalidDependString exception
+ # since callers already handle it.
+ raise portage.exception.InvalidDependString(
+ "getFetchMap(): '%s' has unsupported EAPI: '%s'" % \
+ (mypkg, eapi))
+
+ return _parse_uri_map(mypkg, {'EAPI':eapi,'SRC_URI':myuris},
+ use=useflags)
+
+ def getfetchsizes(self, mypkg, useflags=None, debug=0, myrepo=None):
+ # returns a filename:size dictionnary of remaining downloads
+ myebuild, mytree = self.findname2(mypkg, myrepo=myrepo)
+ if myebuild is None:
+ raise AssertionError(_("ebuild not found for '%s'") % mypkg)
+ pkgdir = os.path.dirname(myebuild)
+ mf = self.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
+ pkgdir, self.settings["DISTDIR"])
+ checksums = mf.getDigests()
+ if not checksums:
+ if debug:
+ writemsg(_("[empty/missing/bad digest]: %s\n") % (mypkg,))
+ return {}
+ filesdict={}
+ myfiles = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
+ #XXX: maybe this should be improved: take partial downloads
+ # into account? check checksums?
+ for myfile in myfiles:
+ try:
+ fetch_size = int(checksums[myfile]["size"])
+ except (KeyError, ValueError):
+ if debug:
+ writemsg(_("[bad digest]: missing %(file)s for %(pkg)s\n") % {"file":myfile, "pkg":mypkg})
+ continue
+ file_path = os.path.join(self.settings["DISTDIR"], myfile)
+ mystat = None
+ try:
+ mystat = os.stat(file_path)
+ except OSError:
+ pass
+ if mystat is None:
+ existing_size = 0
+ ro_distdirs = self.settings.get("PORTAGE_RO_DISTDIRS")
+ if ro_distdirs is not None:
+ for x in shlex_split(ro_distdirs):
+ try:
+ mystat = os.stat(os.path.join(x, myfile))
+ except OSError:
+ pass
+ else:
+ if mystat.st_size == fetch_size:
+ existing_size = fetch_size
+ break
+ else:
+ existing_size = mystat.st_size
+ remaining_size = fetch_size - existing_size
+ if remaining_size > 0:
+ # Assume the download is resumable.
+ filesdict[myfile] = remaining_size
+ elif remaining_size < 0:
+ # The existing file is too large and therefore corrupt.
+ filesdict[myfile] = int(checksums[myfile]["size"])
+ return filesdict
+
+ def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False, myrepo=None):
+ """
+ TODO: account for PORTAGE_RO_DISTDIRS
+ """
+ if all:
+ useflags = None
+ elif useflags is None:
+ if mysettings:
+ useflags = mysettings["USE"].split()
+ if myrepo is not None:
+ mytree = self.treemap.get(myrepo)
+ if mytree is None:
+ return False
+ else:
+ mytree = None
+
+ myfiles = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
+ myebuild = self.findname(mypkg, myrepo=myrepo)
+ if myebuild is None:
+ raise AssertionError(_("ebuild not found for '%s'") % mypkg)
+ pkgdir = os.path.dirname(myebuild)
+ mf = self.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir)))
+ mf = mf.load_manifest(pkgdir, self.settings["DISTDIR"])
+ mysums = mf.getDigests()
+
+ failures = {}
+ for x in myfiles:
+ if not mysums or x not in mysums:
+ ok = False
+ reason = _("digest missing")
+ else:
+ try:
+ ok, reason = portage.checksum.verify_all(
+ os.path.join(self.settings["DISTDIR"], x), mysums[x])
+ except FileNotFound as e:
+ ok = False
+ reason = _("File Not Found: '%s'") % (e,)
+ if not ok:
+ failures[x] = reason
+ if failures:
+ return False
+ return True
+
+ def cpv_exists(self, mykey, myrepo=None):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ cps2 = mykey.split("/")
+ cps = catpkgsplit(mykey, silent=0)
+ if not cps:
+ #invalid cat/pkg-v
+ return 0
+ if self.findname(cps[0] + "/" + cps2[1], myrepo=myrepo):
+ return 1
+ else:
+ return 0
+
+ def cp_all(self, categories=None, trees=None, reverse=False):
+ """
+ This returns a list of all keys in our tree or trees
+ @param categories: optional list of categories to search or
+ defaults to self.settings.categories
+ @param trees: optional list of trees to search the categories in or
+ defaults to self.porttrees
+ @param reverse: reverse sort order (default is False)
+ @rtype list of [cat/pkg,...]
+ """
+ d = {}
+ if categories is None:
+ categories = self.settings.categories
+ if trees is None:
+ trees = self.porttrees
+ for x in categories:
+ for oroot in trees:
+ for y in listdir(oroot+"/"+x, EmptyOnError=1, ignorecvs=1, dirsonly=1):
+ try:
+ atom = Atom("%s/%s" % (x, y))
+ except InvalidAtom:
+ continue
+ if atom != atom.cp:
+ continue
+ d[atom.cp] = None
+ l = list(d)
+ l.sort(reverse=reverse)
+ return l
+
+ def cp_list(self, mycp, use_cache=1, mytree=None):
+ # NOTE: Cache can be safely shared with the match cache, since the
+ # match cache uses the result from dep_expand for the cache_key.
+ if self.frozen and mytree is not None \
+ and len(self.porttrees) == 1 \
+ and mytree == self.porttrees[0]:
+ # mytree matches our only tree, so it's safe to
+ # ignore mytree and cache the result
+ mytree = None
+
+ if self.frozen and mytree is None:
+ cachelist = self.xcache["cp-list"].get(mycp)
+ if cachelist is not None:
+ # Try to propagate this to the match-all cache here for
+ # repoman since he uses separate match-all caches for each
+ # profile (due to differences in _get_implicit_iuse).
+ self.xcache["match-all"][(mycp, mycp)] = cachelist
+ return cachelist[:]
+ mysplit = mycp.split("/")
+ invalid_category = mysplit[0] not in self._categories
+ d={}
+ if mytree is not None:
+ if isinstance(mytree, basestring):
+ mytrees = [mytree]
+ else:
+ # assume it's iterable
+ mytrees = mytree
+ else:
+ mytrees = self.porttrees
+ for oroot in mytrees:
+ try:
+ file_list = os.listdir(os.path.join(oroot, mycp))
+ except OSError:
+ continue
+ for x in file_list:
+ pf = None
+ if x[-7:] == '.ebuild':
+ pf = x[:-7]
+
+ if pf is not None:
+ ps = pkgsplit(pf)
+ if not ps:
+ writemsg(_("\nInvalid ebuild name: %s\n") % \
+ os.path.join(oroot, mycp, x), noiselevel=-1)
+ continue
+ if ps[0] != mysplit[1]:
+ writemsg(_("\nInvalid ebuild name: %s\n") % \
+ os.path.join(oroot, mycp, x), noiselevel=-1)
+ continue
+ ver_match = ver_regexp.match("-".join(ps[1:]))
+ if ver_match is None or not ver_match.groups():
+ writemsg(_("\nInvalid ebuild version: %s\n") % \
+ os.path.join(oroot, mycp, x), noiselevel=-1)
+ continue
+ d[_pkg_str(mysplit[0]+"/"+pf)] = None
+ if invalid_category and d:
+ writemsg(_("\n!!! '%s' has a category that is not listed in " \
+ "%setc/portage/categories\n") % \
+ (mycp, self.settings["PORTAGE_CONFIGROOT"]), noiselevel=-1)
+ mylist = []
+ else:
+ mylist = list(d)
+ # Always sort in ascending order here since it's handy
+ # and the result can be easily cached and reused.
+ self._cpv_sort_ascending(mylist)
+ if self.frozen and mytree is None:
+ cachelist = mylist[:]
+ self.xcache["cp-list"][mycp] = cachelist
+ self.xcache["match-all"][(mycp, mycp)] = cachelist
+ return mylist
+
+ def freeze(self):
+ for x in "bestmatch-visible", "cp-list", "match-all", \
+ "match-all-cpv-only", "match-visible", "minimum-all", \
+ "minimum-visible":
+ self.xcache[x]={}
+ self.frozen=1
+
+ def melt(self):
+ self.xcache = {}
+ self.frozen = 0
+
+ def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
+ "caching match function; very trick stuff"
+ if level == "list-visible":
+ level = "match-visible"
+ warnings.warn("The 'list-visible' mode of "
+ "portage.dbapi.porttree.portdbapi.xmatch "
+ "has been renamed to match-visible",
+ DeprecationWarning, stacklevel=2)
+
+ if mydep is None:
+ #this stuff only runs on first call of xmatch()
+ #create mydep, mykey from origdep
+ mydep = dep_expand(origdep, mydb=self, settings=self.settings)
+ mykey = mydep.cp
+
+ #if no updates are being made to the tree, we can consult our xcache...
+ cache_key = None
+ if self.frozen:
+ cache_key = (mydep, mydep.unevaluated_atom)
+ try:
+ return self.xcache[level][cache_key][:]
+ except KeyError:
+ pass
+
+ myval = None
+ mytree = None
+ if mydep.repo is not None:
+ mytree = self.treemap.get(mydep.repo)
+ if mytree is None:
+ if level.startswith("match-"):
+ myval = []
+ else:
+ myval = ""
+
+ if myval is not None:
+ # Unknown repo, empty result.
+ pass
+ elif level == "match-all-cpv-only":
+ # match *all* packages, only against the cpv, in order
+ # to bypass unnecessary cache access for things like IUSE
+ # and SLOT.
+ if mydep == mykey:
+ # Share cache with match-all/cp_list when the result is the
+ # same. Note that this requires that mydep.repo is None and
+ # thus mytree is also None.
+ level = "match-all"
+ myval = self.cp_list(mykey, mytree=mytree)
+ else:
+ myval = match_from_list(mydep,
+ self.cp_list(mykey, mytree=mytree))
+
+ elif level in ("bestmatch-visible", "match-all", "match-visible",
+ "minimum-all", "minimum-visible"):
+ # Find the minimum matching visible version. This is optimized to
+ # minimize the number of metadata accesses (improves performance
+ # especially in cases where metadata needs to be generated).
+ if mydep == mykey:
+ mylist = self.cp_list(mykey, mytree=mytree)
+ else:
+ mylist = match_from_list(mydep,
+ self.cp_list(mykey, mytree=mytree))
+
+ visibility_filter = level not in ("match-all", "minimum-all")
+ single_match = level not in ("match-all", "match-visible")
+ myval = []
+ aux_keys = list(self._aux_cache_keys)
+ if level == "bestmatch-visible":
+ iterfunc = reversed
+ else:
+ iterfunc = iter
+
+ if mydep.repo is not None:
+ repos = [mydep.repo]
+ else:
+ # We iterate over self.porttrees, since it's common to
+ # tweak this attribute in order to adjust match behavior.
+ repos = []
+ for tree in reversed(self.porttrees):
+ repos.append(self.repositories.get_name_for_location(tree))
+
+ for cpv in iterfunc(mylist):
+ for repo in repos:
+ try:
+ metadata = dict(zip(aux_keys,
+ self.aux_get(cpv, aux_keys, myrepo=repo)))
+ except KeyError:
+ # ebuild not in this repo, or masked by corruption
+ continue
+
+ try:
+ pkg_str = _pkg_str(cpv, metadata=metadata,
+ settings=self.settings)
+ except InvalidData:
+ continue
+
+ if visibility_filter and not self._visible(pkg_str, metadata):
+ continue
+
+ if mydep.slot is not None and \
+ not _match_slot(mydep, pkg_str):
+ continue
+
+ if mydep.unevaluated_atom.use is not None and \
+ not self._match_use(mydep, pkg_str, metadata):
+ continue
+
+ myval.append(pkg_str)
+ # only yield a given cpv once
+ break
+
+ if myval and single_match:
+ break
+
+ if single_match:
+ if myval:
+ myval = myval[0]
+ else:
+ myval = ""
+
+ elif level == "bestmatch-list":
+ #dep match -- find best match but restrict search to sublist
+ warnings.warn("The 'bestmatch-list' mode of "
+ "portage.dbapi.porttree.portdbapi.xmatch is deprecated",
+ DeprecationWarning, stacklevel=2)
+ myval = best(list(self._iter_match(mydep, mylist)))
+ elif level == "match-list":
+ #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
+ warnings.warn("The 'match-list' mode of "
+ "portage.dbapi.porttree.portdbapi.xmatch is deprecated",
+ DeprecationWarning, stacklevel=2)
+ myval = list(self._iter_match(mydep, mylist))
+ else:
+ raise AssertionError(
+ "Invalid level argument: '%s'" % level)
+
+ if self.frozen:
+ xcache_this_level = self.xcache.get(level)
+ if xcache_this_level is not None:
+ xcache_this_level[cache_key] = myval
+ if not isinstance(myval, _pkg_str):
+ myval = myval[:]
+
+ return myval
+
+ def match(self, mydep, use_cache=1):
+ return self.xmatch("match-visible", mydep)
+
+ def gvisible(self, mylist):
+ warnings.warn("The 'gvisible' method of "
+ "portage.dbapi.porttree.portdbapi "
+ "is deprecated",
+ DeprecationWarning, stacklevel=2)
+ return list(self._iter_visible(iter(mylist)))
+
+ def visible(self, cpv_iter):
+ warnings.warn("The 'visible' method of "
+ "portage.dbapi.porttree.portdbapi "
+ "is deprecated",
+ DeprecationWarning, stacklevel=2)
+ if cpv_iter is None:
+ return []
+ return list(self._iter_visible(iter(cpv_iter)))
+
+ def _iter_visible(self, cpv_iter, myrepo=None):
+ """
+ Return a new list containing only visible packages.
+ """
+ aux_keys = list(self._aux_cache_keys)
+ metadata = {}
+
+ if myrepo is not None:
+ repos = [myrepo]
+ else:
+ # We iterate over self.porttrees, since it's common to
+ # tweak this attribute in order to adjust match behavior.
+ repos = []
+ for tree in reversed(self.porttrees):
+ repos.append(self.repositories.get_name_for_location(tree))
+
+ for mycpv in cpv_iter:
+ for repo in repos:
+ metadata.clear()
+ try:
+ metadata.update(zip(aux_keys,
+ self.aux_get(mycpv, aux_keys, myrepo=repo)))
+ except KeyError:
+ continue
+ except PortageException as e:
+ writemsg("!!! Error: aux_get('%s', %s)\n" %
+ (mycpv, aux_keys), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ del e
+ continue
+
+ if not self._visible(mycpv, metadata):
+ continue
+
+ yield mycpv
+ # only yield a given cpv once
+ break
+
+ def _visible(self, cpv, metadata):
+ eapi = metadata["EAPI"]
+ if not eapi_is_supported(eapi):
+ return False
+ if _eapi_is_deprecated(eapi):
+ return False
+ if not metadata["SLOT"]:
+ return False
+
+ settings = self.settings
+ if settings._getMaskAtom(cpv, metadata):
+ return False
+ if settings._getMissingKeywords(cpv, metadata):
+ return False
+ if settings.local_config:
+ metadata['CHOST'] = settings.get('CHOST', '')
+ if not settings._accept_chost(cpv, metadata):
+ return False
+ metadata["USE"] = ""
+ if "?" in metadata["LICENSE"] or \
+ "?" in metadata["PROPERTIES"]:
+ self.doebuild_settings.setcpv(cpv, mydb=metadata)
+ metadata['USE'] = self.doebuild_settings['PORTAGE_USE']
+ try:
+ if settings._getMissingLicenses(cpv, metadata):
+ return False
+ if settings._getMissingProperties(cpv, metadata):
+ return False
+ if settings._getMissingRestrict(cpv, metadata):
+ return False
+ except InvalidDependString:
+ return False
+
+ return True
+
+class portagetree(object):
+ def __init__(self, root=DeprecationWarning, virtual=DeprecationWarning,
+ settings=None):
+ """
+ Constructor for a PortageTree
+
+ @param root: deprecated, defaults to settings['ROOT']
+ @type root: String/Path
+ @param virtual: UNUSED
+ @type virtual: No Idea
+ @param settings: Portage Configuration object (portage.settings)
+ @type settings: Instance of portage.config
+ """
+
+ if settings is None:
+ settings = portage.settings
+ self.settings = settings
+
+ if root is not DeprecationWarning:
+ warnings.warn("The root parameter of the " + \
+ "portage.dbapi.porttree.portagetree" + \
+ " constructor is now unused. Use " + \
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=2)
+
+ if virtual is not DeprecationWarning:
+ warnings.warn("The 'virtual' parameter of the "
+ "portage.dbapi.porttree.portagetree"
+ " constructor is unused",
+ DeprecationWarning, stacklevel=2)
+
+ self.portroot = settings["PORTDIR"]
+ self.__virtual = virtual
+ self.dbapi = portdbapi(mysettings=settings)
+
+ @property
+ def root(self):
+ warnings.warn("The root attribute of " + \
+ "portage.dbapi.porttree.portagetree" + \
+ " is deprecated. Use " + \
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=3)
+ return self.settings['ROOT']
+
+ @property
+ def virtual(self):
+ warnings.warn("The 'virtual' attribute of " + \
+ "portage.dbapi.porttree.portagetree" + \
+ " is deprecated.",
+ DeprecationWarning, stacklevel=3)
+ return self.__virtual
+
+ def dep_bestmatch(self,mydep):
+ "compatibility method"
+ mymatch = self.dbapi.xmatch("bestmatch-visible",mydep)
+ if mymatch is None:
+ return ""
+ return mymatch
+
+ def dep_match(self,mydep):
+ "compatibility method"
+ mymatch = self.dbapi.xmatch("match-visible",mydep)
+ if mymatch is None:
+ return []
+ return mymatch
+
+ def exists_specific(self,cpv):
+ return self.dbapi.cpv_exists(cpv)
+
+ def getallnodes(self):
+ """new behavior: these are all *unmasked* nodes. There may or may not be available
+ masked package for nodes in this nodes list."""
+ return self.dbapi.cp_all()
+
+ def getname(self, pkgname):
+ "returns file location for this particular package (DEPRECATED)"
+ if not pkgname:
+ return ""
+ mysplit = pkgname.split("/")
+ psplit = pkgsplit(mysplit[1])
+ return "/".join([self.portroot, mysplit[0], psplit[0], mysplit[1]])+".ebuild"
+
+ def getslot(self,mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot = self.dbapi._pkg_str(mycatpkg, None).slot
+ except KeyError:
+ pass
+ return myslot
+
+class FetchlistDict(Mapping):
+ """
+ This provide a mapping interface to retrieve fetch lists. It's used
+ to allow portage.manifest.Manifest to access fetch lists via a standard
+ mapping interface rather than use the dbapi directly.
+ """
+ def __init__(self, pkgdir, settings, mydbapi):
+ """pkgdir is a directory containing ebuilds and settings is passed into
+ portdbapi.getfetchlist for __getitem__ calls."""
+ self.pkgdir = pkgdir
+ self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
+ self.settings = settings
+ self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
+ self.portdb = mydbapi
+
+ def __getitem__(self, pkg_key):
+ """Returns the complete fetch list for a given package."""
+ return list(self.portdb.getFetchMap(pkg_key, mytree=self.mytree))
+
+ def __contains__(self, cpv):
+ return cpv in self.__iter__()
+
+ def has_key(self, pkg_key):
+ """Returns true if the given package exists within pkgdir."""
+ warnings.warn("portage.dbapi.porttree.FetchlistDict.has_key() is "
+ "deprecated, use the 'in' operator instead",
+ DeprecationWarning, stacklevel=2)
+ return pkg_key in self
+
+ def __iter__(self):
+ return iter(self.portdb.cp_list(self.cp, mytree=self.mytree))
+
+ def __len__(self):
+ """This needs to be implemented in order to avoid
+ infinite recursion in some cases."""
+ return len(self.portdb.cp_list(self.cp, mytree=self.mytree))
+
+ def keys(self):
+ """Returns keys for all packages within pkgdir"""
+ return self.portdb.cp_list(self.cp, mytree=self.mytree)
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+def _parse_uri_map(cpv, metadata, use=None):
+
+ myuris = use_reduce(metadata.get('SRC_URI', ''),
+ uselist=use, matchall=(use is None),
+ is_src_uri=True,
+ eapi=metadata['EAPI'])
+
+ uri_map = OrderedDict()
+
+ myuris.reverse()
+ while myuris:
+ uri = myuris.pop()
+ if myuris and myuris[-1] == "->":
+ myuris.pop()
+ distfile = myuris.pop()
+ else:
+ distfile = os.path.basename(uri)
+ if not distfile:
+ raise portage.exception.InvalidDependString(
+ ("getFetchMap(): '%s' SRC_URI has no file " + \
+ "name: '%s'") % (cpv, uri))
+
+ uri_set = uri_map.get(distfile)
+ if uri_set is None:
+ # Use OrderedDict to preserve order from SRC_URI
+ # while ensuring uniqueness.
+ uri_set = OrderedDict()
+ uri_map[distfile] = uri_set
+
+ # SRC_URI may contain a file name with no scheme, and in
+ # this case it does not belong in uri_set.
+ if urlparse(uri).scheme:
+ uri_set[uri] = True
+
+ # Convert OrderedDicts to tuples.
+ for k, v in uri_map.items():
+ uri_map[k] = tuple(v)
+
+ return uri_map
diff --git a/usr/lib/portage/pym/portage/dbapi/vartree.py b/usr/lib/portage/pym/portage/dbapi/vartree.py
new file mode 100644
index 0000000..a0881a2
--- /dev/null
+++ b/usr/lib/portage/pym/portage/dbapi/vartree.py
@@ -0,0 +1,5326 @@
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division, unicode_literals
+
+__all__ = [
+ "vardbapi", "vartree", "dblink"] + \
+ ["write_contents", "tar_contents"]
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.checksum:_perform_md5_merge@perform_md5',
+ 'portage.data:portage_gid,portage_uid,secpass',
+ 'portage.dbapi.dep_expand:dep_expand',
+ 'portage.dbapi._MergeProcess:MergeProcess',
+ 'portage.dbapi._SyncfsProcess:SyncfsProcess',
+ 'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list,' + \
+ 'use_reduce,_slot_separator,_repo_separator',
+ 'portage.eapi:_get_eapi_attrs',
+ 'portage.elog:collect_ebuild_messages,collect_messages,' + \
+ 'elog_process,_merge_logentries',
+ 'portage.locks:lockdir,unlockdir,lockfile,unlockfile',
+ 'portage.output:bold,colorize',
+ 'portage.package.ebuild.doebuild:doebuild_environment,' + \
+ '_merge_unicode_error', '_spawn_phase',
+ 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
+ 'portage.package.ebuild._ipc.QueryCommand:QueryCommand',
+ 'portage.process:find_binary',
+ 'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
+ 'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \
+ 'grabdict,normalize_path,new_protect_filename',
+ 'portage.util.digraph:digraph',
+ 'portage.util.env_update:env_update',
+ 'portage.util.listdir:dircache,listdir',
+ 'portage.util.movefile:movefile',
+ 'portage.util.writeable_check:get_ro_checker',
+ 'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry',
+ 'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap',
+ 'portage.util._dyn_libs.LinkageMapMachO:LinkageMapMachO',
+ 'portage.util._dyn_libs.LinkageMapPeCoff:LinkageMapPeCoff',
+ 'portage.util._dyn_libs.LinkageMapXCoff:LinkageMapXCoff',
+ 'portage.util._async.SchedulerInterface:SchedulerInterface',
+ 'portage.util._eventloop.EventLoop:EventLoop',
+ 'portage.util._eventloop.global_event_loop:global_event_loop',
+ 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,vercmp,' + \
+ '_get_slot_re,_pkgsplit@pkgsplit,_pkg_str,_unknown_repo',
+ 'subprocess',
+ 'tarfile',
+)
+
+from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
+ MERGING_IDENTIFIER, PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH, EPREFIX, EPREFIX_LSTRIP, BASH_BINARY
+from portage.dbapi import dbapi
+from portage.exception import CommandNotFound, \
+ InvalidData, InvalidLocation, InvalidPackageName, \
+ FileNotFound, PermissionDenied, UnsupportedAPIException
+from portage.localization import _
+
+from portage import abssymlink, _movefile, bsd_chflags
+
+# This is a special version of the os module, wrapped for unicode support.
+from portage import os
+from portage import shutil
+from portage import _encodings
+from portage import _os_merge
+from portage import _selinux_merge
+from portage import _unicode_decode
+from portage import _unicode_encode
+
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.emergelog import emergelog
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.SpawnProcess import SpawnProcess
+
+import errno
+import fnmatch
+import gc
+import grp
+import io
+from itertools import chain
+import logging
+import os as _os
+import platform
+import pwd
+import re
+import stat
+import sys
+import tempfile
+import textwrap
+import time
+import warnings
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+ long = int
+ _unicode = str
+else:
+ _unicode = unicode
+
+
+class vardbapi(dbapi):
+
+ _excluded_dirs = ["CVS", "lost+found"]
+ _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
+ _excluded_dirs = re.compile(r'^(\..*|' + MERGING_IDENTIFIER + '.*|' + \
+ "|".join(_excluded_dirs) + r')$')
+
+ _aux_cache_version = "1"
+ _owners_cache_version = "1"
+
+ # Number of uncached packages to trigger cache update, since
+ # it's wasteful to update it for every vdb change.
+ _aux_cache_threshold = 5
+
+ _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
+ _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
+
+ def __init__(self, _unused_param=DeprecationWarning,
+ categories=None, settings=None, vartree=None):
+ """
+ The categories parameter is unused since the dbapi class
+ now has a categories property that is generated from the
+ available packages.
+ """
+
+ # Used by emerge to check whether any packages
+ # have been added or removed.
+ self._pkgs_changed = False
+
+ # The _aux_cache_threshold doesn't work as designed
+ # if the cache is flushed from a subprocess, so we
+ # use this to avoid waste vdb cache updates.
+ self._flush_cache_enabled = True
+
+ #cache for category directory mtimes
+ self.mtdircache = {}
+
+ #cache for dependency checks
+ self.matchcache = {}
+
+ #cache for cp_list results
+ self.cpcache = {}
+
+ self.blockers = None
+ if settings is None:
+ settings = portage.settings
+ self.settings = settings
+
+ if _unused_param is not DeprecationWarning:
+ warnings.warn("The first parameter of the "
+ "portage.dbapi.vartree.vardbapi"
+ " constructor is now unused. Instead "
+ "settings['ROOT'] is used.",
+ DeprecationWarning, stacklevel=2)
+
+ self._eroot = settings['EROOT']
+ self._dbroot = self._eroot + VDB_PATH
+ self._lock = None
+ self._lock_count = 0
+
+ self._conf_mem_file = self._eroot + CONFIG_MEMORY_FILE
+ self._fs_lock_obj = None
+ self._fs_lock_count = 0
+
+ if vartree is None:
+ vartree = portage.db[settings['EROOT']]['vartree']
+ self.vartree = vartree
+ self._aux_cache_keys = set(
+ ["BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
+ "EAPI", "HDEPEND", "HOMEPAGE", "IUSE", "KEYWORDS",
+ "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
+ "repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
+ ])
+ self._aux_cache_obj = None
+ self._aux_cache_filename = os.path.join(self._eroot,
+ CACHE_PATH, "vdb_metadata.pickle")
+ self._counter_path = os.path.join(self._eroot,
+ CACHE_PATH, "counter")
+
+ self._plib_registry = PreservedLibsRegistry(settings["ROOT"],
+ os.path.join(self._eroot, PRIVATE_PATH, "preserved_libs_registry"))
+ self._linkmap = LinkageMap(self)
+ chost = self.settings.get('CHOST')
+ if not chost:
+ chost = 'lunix?' # this happens when profiles are not available
+ if chost.find('darwin') >= 0:
+ self._linkmap = LinkageMapMachO(self)
+ elif chost.find('interix') >= 0 or chost.find('winnt') >= 0:
+ self._linkmap = LinkageMapPeCoff(self)
+ elif chost.find('aix') >= 0:
+ self._linkmap = LinkageMapXCoff(self)
+ else:
+ self._linkmap = LinkageMap(self)
+ self._owners = self._owners_db(self)
+
+ self._cached_counter = None
+
+ @property
+ def root(self):
+ warnings.warn("The root attribute of "
+ "portage.dbapi.vartree.vardbapi"
+ " is deprecated. Use "
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=3)
+ return self.settings['ROOT']
+
+ def getpath(self, mykey, filename=None):
+ # This is an optimized hotspot, so don't use unicode-wrapped
+ # os module and don't use os.path.join().
+ rValue = self._eroot + VDB_PATH + _os.sep + mykey
+ if filename is not None:
+ # If filename is always relative, we can do just
+ # rValue += _os.sep + filename
+ rValue = _os.path.join(rValue, filename)
+ return rValue
+
+ def lock(self):
+ """
+ Acquire a reentrant lock, blocking, for cooperation with concurrent
+ processes. State is inherited by subprocesses, allowing subprocesses
+ to reenter a lock that was acquired by a parent process. However,
+ a lock can be released only by the same process that acquired it.
+ """
+ if self._lock_count:
+ self._lock_count += 1
+ else:
+ if self._lock is not None:
+ raise AssertionError("already locked")
+ # At least the parent needs to exist for the lock file.
+ ensure_dirs(self._dbroot)
+ self._lock = lockdir(self._dbroot)
+ self._lock_count += 1
+
+ def unlock(self):
+ """
+ Release a lock, decrementing the recursion level. Each unlock() call
+ must be matched with a prior lock() call, or else an AssertionError
+ will be raised if unlock() is called while not locked.
+ """
+ if self._lock_count > 1:
+ self._lock_count -= 1
+ else:
+ if self._lock is None:
+ raise AssertionError("not locked")
+ self._lock_count = 0
+ unlockdir(self._lock)
+ self._lock = None
+
+ def _fs_lock(self):
+ """
+ Acquire a reentrant lock, blocking, for cooperation with concurrent
+ processes.
+ """
+ if self._fs_lock_count < 1:
+ if self._fs_lock_obj is not None:
+ raise AssertionError("already locked")
+ try:
+ self._fs_lock_obj = lockfile(self._conf_mem_file)
+ except InvalidLocation:
+ self.settings._init_dirs()
+ self._fs_lock_obj = lockfile(self._conf_mem_file)
+ self._fs_lock_count += 1
+
+ def _fs_unlock(self):
+ """
+ Release a lock, decrementing the recursion level.
+ """
+ if self._fs_lock_count <= 1:
+ if self._fs_lock_obj is None:
+ raise AssertionError("not locked")
+ unlockfile(self._fs_lock_obj)
+ self._fs_lock_obj = None
+ self._fs_lock_count -= 1
+
+ def _bump_mtime(self, cpv):
+ """
+ This is called before an after any modifications, so that consumers
+ can use directory mtimes to validate caches. See bug #290428.
+ """
+ base = self._eroot + VDB_PATH
+ cat = catsplit(cpv)[0]
+ catdir = base + _os.sep + cat
+ t = time.time()
+ t = (t, t)
+ try:
+ for x in (catdir, base):
+ os.utime(x, t)
+ except OSError:
+ ensure_dirs(catdir)
+
+ def cpv_exists(self, mykey, myrepo=None):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ return os.path.exists(self.getpath(mykey))
+
+ def cpv_counter(self, mycpv):
+ "This method will grab the COUNTER. Returns a counter value."
+ try:
+ return long(self.aux_get(mycpv, ["COUNTER"])[0])
+ except (KeyError, ValueError):
+ pass
+ writemsg_level(_("portage: COUNTER for %s was corrupted; " \
+ "resetting to value of 0\n") % (mycpv,),
+ level=logging.ERROR, noiselevel=-1)
+ return 0
+
+ def cpv_inject(self, mycpv):
+ "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
+ ensure_dirs(self.getpath(mycpv))
+ counter = self.counter_tick(mycpv=mycpv)
+ # write local package counter so that emerge clean does the right thing
+ write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
+
+ def isInjected(self, mycpv):
+ if self.cpv_exists(mycpv):
+ if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
+ return True
+ if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
+ return True
+ return False
+
+ def move_ent(self, mylist, repo_match=None):
+ origcp = mylist[1]
+ newcp = mylist[2]
+
+ # sanity check
+ for atom in (origcp, newcp):
+ if not isjustname(atom):
+ raise InvalidPackageName(str(atom))
+ origmatches = self.match(origcp, use_cache=0)
+ moves = 0
+ if not origmatches:
+ return moves
+ for mycpv in origmatches:
+ try:
+ mycpv = self._pkg_str(mycpv, None)
+ except (KeyError, InvalidData):
+ continue
+ mycpv_cp = cpv_getkey(mycpv)
+ if mycpv_cp != origcp:
+ # Ignore PROVIDE virtual match.
+ continue
+ if repo_match is not None \
+ and not repo_match(mycpv.repo):
+ continue
+
+ # Use isvalidatom() to check if this move is valid for the
+ # EAPI (characters allowed in package names may vary).
+ if not isvalidatom(newcp, eapi=mycpv.eapi):
+ continue
+
+ mynewcpv = mycpv.replace(mycpv_cp, _unicode(newcp), 1)
+ mynewcat = catsplit(newcp)[0]
+ origpath = self.getpath(mycpv)
+ if not os.path.exists(origpath):
+ continue
+ moves += 1
+ if not os.path.exists(self.getpath(mynewcat)):
+ #create the directory
+ ensure_dirs(self.getpath(mynewcat))
+ newpath = self.getpath(mynewcpv)
+ if os.path.exists(newpath):
+ #dest already exists; keep this puppy where it is.
+ continue
+ _movefile(origpath, newpath, mysettings=self.settings)
+ self._clear_pkg_cache(self._dblink(mycpv))
+ self._clear_pkg_cache(self._dblink(mynewcpv))
+
+ # We need to rename the ebuild now.
+ old_pf = catsplit(mycpv)[1]
+ new_pf = catsplit(mynewcpv)[1]
+ if new_pf != old_pf:
+ try:
+ os.rename(os.path.join(newpath, old_pf + ".ebuild"),
+ os.path.join(newpath, new_pf + ".ebuild"))
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
+ write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
+
+ return moves
+
+ def cp_list(self, mycp, use_cache=1):
+ mysplit=catsplit(mycp)
+ if mysplit[0] == '*':
+ mysplit[0] = mysplit[0][1:]
+ try:
+ if sys.hexversion >= 0x3030000:
+ mystat = os.stat(self.getpath(mysplit[0])).st_mtime_ns
+ else:
+ mystat = os.stat(self.getpath(mysplit[0])).st_mtime
+ except OSError:
+ mystat = 0
+ if use_cache and mycp in self.cpcache:
+ cpc = self.cpcache[mycp]
+ if cpc[0] == mystat:
+ return cpc[1][:]
+ cat_dir = self.getpath(mysplit[0])
+ try:
+ dir_list = os.listdir(cat_dir)
+ except EnvironmentError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(cat_dir)
+ del e
+ dir_list = []
+
+ returnme = []
+ for x in dir_list:
+ if self._excluded_dirs.match(x) is not None:
+ continue
+ ps = pkgsplit(x)
+ if not ps:
+ self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
+ continue
+ if len(mysplit) > 1:
+ if ps[0] == mysplit[1]:
+ returnme.append(_pkg_str(mysplit[0]+"/"+x))
+ self._cpv_sort_ascending(returnme)
+ if use_cache:
+ self.cpcache[mycp] = [mystat, returnme[:]]
+ elif mycp in self.cpcache:
+ del self.cpcache[mycp]
+ return returnme
+
+ def cpv_all(self, use_cache=1):
+ """
+ Set use_cache=0 to bypass the portage.cachedir() cache in cases
+ when the accuracy of mtime staleness checks should not be trusted
+ (generally this is only necessary in critical sections that
+ involve merge or unmerge of packages).
+ """
+ returnme = []
+ basepath = os.path.join(self._eroot, VDB_PATH) + os.path.sep
+
+ if use_cache:
+ from portage import listdir
+ else:
+ def listdir(p, **kwargs):
+ try:
+ return [x for x in os.listdir(p) \
+ if os.path.isdir(os.path.join(p, x))]
+ except EnvironmentError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(p)
+ del e
+ return []
+
+ for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1):
+ if self._excluded_dirs.match(x) is not None:
+ continue
+ if not self._category_re.match(x):
+ continue
+ for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1):
+ if self._excluded_dirs.match(y) is not None:
+ continue
+ subpath = x + "/" + y
+ # -MERGING- should never be a cpv, nor should files.
+ try:
+ if catpkgsplit(subpath) is None:
+ self.invalidentry(self.getpath(subpath))
+ continue
+ except InvalidData:
+ self.invalidentry(self.getpath(subpath))
+ continue
+ returnme.append(subpath)
+
+ return returnme
+
+ def cp_all(self, use_cache=1):
+ mylist = self.cpv_all(use_cache=use_cache)
+ d={}
+ for y in mylist:
+ if y[0] == '*':
+ y = y[1:]
+ try:
+ mysplit = catpkgsplit(y)
+ except InvalidData:
+ self.invalidentry(self.getpath(y))
+ continue
+ if not mysplit:
+ self.invalidentry(self.getpath(y))
+ continue
+ d[mysplit[0]+"/"+mysplit[1]] = None
+ return list(d)
+
+ def checkblockers(self, origdep):
+ pass
+
+ def _clear_cache(self):
+ self.mtdircache.clear()
+ self.matchcache.clear()
+ self.cpcache.clear()
+ self._aux_cache_obj = None
+
+ def _add(self, pkg_dblink):
+ self._pkgs_changed = True
+ self._clear_pkg_cache(pkg_dblink)
+
+ def _remove(self, pkg_dblink):
+ self._pkgs_changed = True
+ self._clear_pkg_cache(pkg_dblink)
+
+ def _clear_pkg_cache(self, pkg_dblink):
+ # Due to 1 second mtime granularity in <python-2.5, mtime checks
+ # are not always sufficient to invalidate vardbapi caches. Therefore,
+ # the caches need to be actively invalidated here.
+ self.mtdircache.pop(pkg_dblink.cat, None)
+ self.matchcache.pop(pkg_dblink.cat, None)
+ self.cpcache.pop(pkg_dblink.mysplit[0], None)
+ dircache.pop(pkg_dblink.dbcatdir, None)
+
+ def match(self, origdep, use_cache=1):
+ "caching match function"
+ mydep = dep_expand(
+ origdep, mydb=self, use_cache=use_cache, settings=self.settings)
+ cache_key = (mydep, mydep.unevaluated_atom)
+ mykey = dep_getkey(mydep)
+ mycat = catsplit(mykey)[0]
+ if not use_cache:
+ if mycat in self.matchcache:
+ del self.mtdircache[mycat]
+ del self.matchcache[mycat]
+ return list(self._iter_match(mydep,
+ self.cp_list(mydep.cp, use_cache=use_cache)))
+ try:
+ if sys.hexversion >= 0x3030000:
+ curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime_ns
+ else:
+ curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
+ except (IOError, OSError):
+ curmtime=0
+
+ if mycat not in self.matchcache or \
+ self.mtdircache[mycat] != curmtime:
+ # clear cache entry
+ self.mtdircache[mycat] = curmtime
+ self.matchcache[mycat] = {}
+ if mydep not in self.matchcache[mycat]:
+ mymatch = list(self._iter_match(mydep,
+ self.cp_list(mydep.cp, use_cache=use_cache)))
+ self.matchcache[mycat][cache_key] = mymatch
+ return self.matchcache[mycat][cache_key][:]
+
+ def findname(self, mycpv, myrepo=None):
+ return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
+
+ def flush_cache(self):
+ """If the current user has permission and the internal aux_get cache has
+ been updated, save it to disk and mark it unmodified. This is called
+ by emerge after it has loaded the full vdb for use in dependency
+ calculations. Currently, the cache is only written if the user has
+ superuser privileges (since that's required to obtain a lock), but all
+ users have read access and benefit from faster metadata lookups (as
+ long as at least part of the cache is still valid)."""
+ if self._flush_cache_enabled and \
+ self._aux_cache is not None and \
+ len(self._aux_cache["modified"]) >= self._aux_cache_threshold and \
+ secpass >= 2:
+ self._owners.populate() # index any unindexed contents
+ valid_nodes = set(self.cpv_all())
+ for cpv in list(self._aux_cache["packages"]):
+ if cpv not in valid_nodes:
+ del self._aux_cache["packages"][cpv]
+ del self._aux_cache["modified"]
+ try:
+ f = atomic_ofstream(self._aux_cache_filename, 'wb')
+ pickle.dump(self._aux_cache, f, protocol=2)
+ f.close()
+ apply_secpass_permissions(
+ self._aux_cache_filename, gid=portage_gid, mode=0o644)
+ except (IOError, OSError) as e:
+ pass
+ self._aux_cache["modified"] = set()
+
+ @property
+ def _aux_cache(self):
+ if self._aux_cache_obj is None:
+ self._aux_cache_init()
+ return self._aux_cache_obj
+
+ def _aux_cache_init(self):
+ aux_cache = None
+ open_kwargs = {}
+ if sys.hexversion >= 0x3000000 and sys.hexversion < 0x3020000:
+ # Buffered io triggers extreme performance issues in
+ # Unpickler.load() (problem observed with python-3.0.1).
+ # Unfortunately, performance is still poor relative to
+ # python-2.x, but buffering makes it much worse (problem
+ # appears to be solved in Python >=3.2 at least).
+ open_kwargs["buffering"] = 0
+ try:
+ with open(_unicode_encode(self._aux_cache_filename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='rb', **open_kwargs) as f:
+ mypickle = pickle.Unpickler(f)
+ try:
+ mypickle.find_global = None
+ except AttributeError:
+ # TODO: If py3k, override Unpickler.find_class().
+ pass
+ aux_cache = mypickle.load()
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except Exception as e:
+ if isinstance(e, EnvironmentError) and \
+ getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
+ pass
+ else:
+ writemsg(_("!!! Error loading '%s': %s\n") % \
+ (self._aux_cache_filename, e), noiselevel=-1)
+ del e
+
+ if not aux_cache or \
+ not isinstance(aux_cache, dict) or \
+ aux_cache.get("version") != self._aux_cache_version or \
+ not aux_cache.get("packages"):
+ aux_cache = {"version": self._aux_cache_version}
+ aux_cache["packages"] = {}
+
+ owners = aux_cache.get("owners")
+ if owners is not None:
+ if not isinstance(owners, dict):
+ owners = None
+ elif "version" not in owners:
+ owners = None
+ elif owners["version"] != self._owners_cache_version:
+ owners = None
+ elif "base_names" not in owners:
+ owners = None
+ elif not isinstance(owners["base_names"], dict):
+ owners = None
+
+ if owners is None:
+ owners = {
+ "base_names" : {},
+ "version" : self._owners_cache_version
+ }
+ aux_cache["owners"] = owners
+
+ aux_cache["modified"] = set()
+ self._aux_cache_obj = aux_cache
+
+ def aux_get(self, mycpv, wants, myrepo = None):
+ """This automatically caches selected keys that are frequently needed
+ by emerge for dependency calculations. The cached metadata is
+ considered valid if the mtime of the package directory has not changed
+ since the data was cached. The cache is stored in a pickled dict
+ object with the following format:
+
+ {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
+
+ If an error occurs while loading the cache pickle or the version is
+ unrecognized, the cache will simple be recreated from scratch (it is
+ completely disposable).
+ """
+ cache_these_wants = self._aux_cache_keys.intersection(wants)
+ for x in wants:
+ if self._aux_cache_keys_re.match(x) is not None:
+ cache_these_wants.add(x)
+
+ if not cache_these_wants:
+ mydata = self._aux_get(mycpv, wants)
+ return [mydata[x] for x in wants]
+
+ cache_these = set(self._aux_cache_keys)
+ cache_these.update(cache_these_wants)
+
+ mydir = self.getpath(mycpv)
+ mydir_stat = None
+ try:
+ mydir_stat = os.stat(mydir)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ raise KeyError(mycpv)
+ # Use float mtime when available.
+ mydir_mtime = mydir_stat.st_mtime
+ pkg_data = self._aux_cache["packages"].get(mycpv)
+ pull_me = cache_these.union(wants)
+ mydata = {"_mtime_" : mydir_mtime}
+ cache_valid = False
+ cache_incomplete = False
+ cache_mtime = None
+ metadata = None
+ if pkg_data is not None:
+ if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
+ pkg_data = None
+ else:
+ cache_mtime, metadata = pkg_data
+ if not isinstance(cache_mtime, (float, long, int)) or \
+ not isinstance(metadata, dict):
+ pkg_data = None
+
+ if pkg_data:
+ cache_mtime, metadata = pkg_data
+ if isinstance(cache_mtime, float):
+ cache_valid = cache_mtime == mydir_stat.st_mtime
+ else:
+ # Cache may contain integer mtime.
+ cache_valid = cache_mtime == mydir_stat[stat.ST_MTIME]
+
+ if cache_valid:
+ # Migrate old metadata to unicode.
+ for k, v in metadata.items():
+ metadata[k] = _unicode_decode(v,
+ encoding=_encodings['repo.content'], errors='replace')
+
+ mydata.update(metadata)
+ pull_me.difference_update(mydata)
+
+ if pull_me:
+ # pull any needed data and cache it
+ aux_keys = list(pull_me)
+ mydata.update(self._aux_get(mycpv, aux_keys, st=mydir_stat))
+ if not cache_valid or cache_these.difference(metadata):
+ cache_data = {}
+ if cache_valid and metadata:
+ cache_data.update(metadata)
+ for aux_key in cache_these:
+ cache_data[aux_key] = mydata[aux_key]
+ self._aux_cache["packages"][_unicode(mycpv)] = \
+ (mydir_mtime, cache_data)
+ self._aux_cache["modified"].add(mycpv)
+
+ eapi_attrs = _get_eapi_attrs(mydata['EAPI'])
+ if _get_slot_re(eapi_attrs).match(mydata['SLOT']) is None:
+ # Empty or invalid slot triggers InvalidAtom exceptions when
+ # generating slot atoms for packages, so translate it to '0' here.
+ mydata['SLOT'] = '0'
+
+ return [mydata[x] for x in wants]
+
+ def _aux_get(self, mycpv, wants, st=None):
+ mydir = self.getpath(mycpv)
+ if st is None:
+ try:
+ st = os.stat(mydir)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise KeyError(mycpv)
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(mydir)
+ else:
+ raise
+ if not stat.S_ISDIR(st.st_mode):
+ raise KeyError(mycpv)
+ results = {}
+ env_keys = []
+ for x in wants:
+ if x == "_mtime_":
+ results[x] = st[stat.ST_MTIME]
+ continue
+ try:
+ with io.open(
+ _unicode_encode(os.path.join(mydir, x),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ myd = f.read()
+ except IOError:
+ if x not in self._aux_cache_keys and \
+ self._aux_cache_keys_re.match(x) is None:
+ env_keys.append(x)
+ continue
+ myd = ''
+
+ # Preserve \n for metadata that is known to
+ # contain multiple lines.
+ if self._aux_multi_line_re.match(x) is None:
+ myd = " ".join(myd.split())
+
+ results[x] = myd
+
+ if env_keys:
+ env_results = self._aux_env_search(mycpv, env_keys)
+ for k in env_keys:
+ v = env_results.get(k)
+ if v is None:
+ v = ''
+ if self._aux_multi_line_re.match(k) is None:
+ v = " ".join(v.split())
+ results[k] = v
+
+ if results.get("EAPI") == "":
+ results["EAPI"] = '0'
+
+ return results
+
+ def _aux_env_search(self, cpv, variables):
+ """
+ Search environment.bz2 for the specified variables. Returns
+ a dict mapping variables to values, and any variables not
+ found in the environment will not be included in the dict.
+ This is useful for querying variables like ${SRC_URI} and
+ ${A}, which are not saved in separate files but are available
+ in environment.bz2 (see bug #395463).
+ """
+ env_file = self.getpath(cpv, filename="environment.bz2")
+ if not os.path.isfile(env_file):
+ return {}
+ bunzip2_cmd = portage.util.shlex_split(
+ self.settings.get("PORTAGE_BUNZIP2_COMMAND", ""))
+ if not bunzip2_cmd:
+ bunzip2_cmd = portage.util.shlex_split(
+ self.settings["PORTAGE_BZIP2_COMMAND"])
+ bunzip2_cmd.append("-d")
+ args = bunzip2_cmd + ["-c", env_file]
+ try:
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ raise portage.exception.CommandNotFound(args[0])
+
+ # Parts of the following code are borrowed from
+ # filter-bash-environment.py (keep them in sync).
+ var_assign_re = re.compile(r'(^|^declare\s+-\S+\s+|^declare\s+|^export\s+)([^=\s]+)=("|\')?(.*)$')
+ close_quote_re = re.compile(r'(\\"|"|\')\s*$')
+ def have_end_quote(quote, line):
+ close_quote_match = close_quote_re.search(line)
+ return close_quote_match is not None and \
+ close_quote_match.group(1) == quote
+
+ variables = frozenset(variables)
+ results = {}
+ for line in proc.stdout:
+ line = _unicode_decode(line,
+ encoding=_encodings['content'], errors='replace')
+ var_assign_match = var_assign_re.match(line)
+ if var_assign_match is not None:
+ key = var_assign_match.group(2)
+ quote = var_assign_match.group(3)
+ if quote is not None:
+ if have_end_quote(quote,
+ line[var_assign_match.end(2)+2:]):
+ value = var_assign_match.group(4)
+ else:
+ value = [var_assign_match.group(4)]
+ for line in proc.stdout:
+ line = _unicode_decode(line,
+ encoding=_encodings['content'],
+ errors='replace')
+ value.append(line)
+ if have_end_quote(quote, line):
+ break
+ value = ''.join(value)
+ # remove trailing quote and whitespace
+ value = value.rstrip()[:-1]
+ else:
+ value = var_assign_match.group(4).rstrip()
+
+ if key in variables:
+ results[key] = value
+
+ proc.wait()
+ proc.stdout.close()
+ return results
+
+ def aux_update(self, cpv, values):
+ mylink = self._dblink(cpv)
+ if not mylink.exists():
+ raise KeyError(cpv)
+ self._bump_mtime(cpv)
+ self._clear_pkg_cache(mylink)
+ for k, v in values.items():
+ if v:
+ mylink.setfile(k, v)
+ else:
+ try:
+ os.unlink(os.path.join(self.getpath(cpv), k))
+ except EnvironmentError:
+ pass
+ self._bump_mtime(cpv)
+
+ def counter_tick(self, myroot=None, mycpv=None):
+ """
+ @param myroot: ignored, self._eroot is used instead
+ """
+ return self.counter_tick_core(incrementing=1, mycpv=mycpv)
+
+ def get_counter_tick_core(self, myroot=None, mycpv=None):
+ """
+ Use this method to retrieve the counter instead
+ of having to trust the value of a global counter
+ file that can lead to invalid COUNTER
+ generation. When cache is valid, the package COUNTER
+ files are not read and we rely on the timestamp of
+ the package directory to validate cache. The stat
+ calls should only take a short time, so performance
+ is sufficient without having to rely on a potentially
+ corrupt global counter file.
+
+ The global counter file located at
+ $CACHE_PATH/counter serves to record the
+ counter of the last installed package and
+ it also corresponds to the total number of
+ installation actions that have occurred in
+ the history of this package database.
+
+ @param myroot: ignored, self._eroot is used instead
+ """
+ del myroot
+ counter = -1
+ try:
+ with io.open(
+ _unicode_encode(self._counter_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ try:
+ counter = long(f.readline().strip())
+ except (OverflowError, ValueError) as e:
+ writemsg(_("!!! COUNTER file is corrupt: '%s'\n") %
+ self._counter_path, noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ except EnvironmentError as e:
+ # Silently allow ENOENT since files under
+ # /var/cache/ are allowed to disappear.
+ if e.errno != errno.ENOENT:
+ writemsg(_("!!! Unable to read COUNTER file: '%s'\n") % \
+ self._counter_path, noiselevel=-1)
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ del e
+
+ if self._cached_counter == counter:
+ max_counter = counter
+ else:
+ # We must ensure that we return a counter
+ # value that is at least as large as the
+ # highest one from the installed packages,
+ # since having a corrupt value that is too low
+ # can trigger incorrect AUTOCLEAN behavior due
+ # to newly installed packages having lower
+ # COUNTERs than the previous version in the
+ # same slot.
+ max_counter = counter
+ for cpv in self.cpv_all():
+ try:
+ pkg_counter = int(self.aux_get(cpv, ["COUNTER"])[0])
+ except (KeyError, OverflowError, ValueError):
+ continue
+ if pkg_counter > max_counter:
+ max_counter = pkg_counter
+
+ return max_counter + 1
+
+ def counter_tick_core(self, myroot=None, incrementing=1, mycpv=None):
+ """
+ This method will grab the next COUNTER value and record it back
+ to the global file. Note that every package install must have
+ a unique counter, since a slotmove update can move two packages
+ into the same SLOT and in that case it's important that both
+ packages have different COUNTER metadata.
+
+ @param myroot: ignored, self._eroot is used instead
+ @param mycpv: ignored
+ @rtype: int
+ @return: new counter value
+ """
+ myroot = None
+ mycpv = None
+ self.lock()
+ try:
+ counter = self.get_counter_tick_core() - 1
+ if incrementing:
+ #increment counter
+ counter += 1
+ # update new global counter file
+ try:
+ write_atomic(self._counter_path, str(counter))
+ except InvalidLocation:
+ self.settings._init_dirs()
+ write_atomic(self._counter_path, str(counter))
+ self._cached_counter = counter
+
+ # Since we hold a lock, this is a good opportunity
+ # to flush the cache. Note that this will only
+ # flush the cache periodically in the main process
+ # when _aux_cache_threshold is exceeded.
+ self.flush_cache()
+ finally:
+ self.unlock()
+
+ return counter
+
+ def _dblink(self, cpv):
+ category, pf = catsplit(cpv)
+ return dblink(category, pf, settings=self.settings,
+ vartree=self.vartree, treetype="vartree")
+
+ def removeFromContents(self, pkg, paths, relative_paths=True):
+ """
+ @param pkg: cpv for an installed package
+ @type pkg: string
+ @param paths: paths of files to remove from contents
+ @type paths: iterable
+ """
+ if not hasattr(pkg, "getcontents"):
+ pkg = self._dblink(pkg)
+ root = self.settings['ROOT']
+ root_len = len(root) - 1
+ new_contents = pkg.getcontents().copy()
+ removed = 0
+
+ for filename in paths:
+ filename = _unicode_decode(filename,
+ encoding=_encodings['content'], errors='strict')
+ filename = normalize_path(filename)
+ if relative_paths:
+ relative_filename = filename
+ else:
+ relative_filename = filename[root_len:]
+ contents_key = pkg._match_contents(relative_filename)
+ if contents_key:
+ # It's possible for two different paths to refer to the same
+ # contents_key, due to directory symlinks. Therefore, pass a
+ # default value to pop, in order to avoid a KeyError which
+ # could otherwise be triggered (see bug #454400).
+ new_contents.pop(contents_key, None)
+ removed += 1
+
+ if removed:
+ self.writeContentsToContentsFile(pkg, new_contents)
+
+ def writeContentsToContentsFile(self, pkg, new_contents):
+ """
+ @param pkg: package to write contents file for
+ @type pkg: dblink
+ @param new_contents: contents to write to CONTENTS file
+ @type new_contents: contents dictionary of the form
+ {u'/path/to/file' : (contents_attribute 1, ...), ...}
+ """
+ root = self.settings['ROOT']
+ self._bump_mtime(pkg.mycpv)
+ f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
+ write_contents(new_contents, root, f)
+ f.close()
+ self._bump_mtime(pkg.mycpv)
+ pkg._clear_contents_cache()
+
+ class _owners_cache(object):
+ """
+ This class maintains an hash table that serves to index package
+ contents by mapping the basename of file to a list of possible
+ packages that own it. This is used to optimize owner lookups
+ by narrowing the search down to a smaller number of packages.
+ """
+ try:
+ from hashlib import md5 as _new_hash
+ except ImportError:
+ from md5 import new as _new_hash
+
+ _hash_bits = 16
+ _hex_chars = _hash_bits // 4
+
+ def __init__(self, vardb):
+ self._vardb = vardb
+
+ def add(self, cpv):
+ eroot_len = len(self._vardb._eroot)
+ contents = self._vardb._dblink(cpv).getcontents()
+
+ if "case-insensitive-fs" in self._vardb.settings.features:
+ contents = dict((k.lower(), v)
+ for k, v in contents.items())
+
+ pkg_hash = self._hash_pkg(cpv)
+ if not contents:
+ # Empty path is a code used to represent empty contents.
+ self._add_path("", pkg_hash)
+
+ for x in contents:
+ self._add_path(x[eroot_len:], pkg_hash)
+
+ self._vardb._aux_cache["modified"].add(cpv)
+
+ def _add_path(self, path, pkg_hash):
+ """
+ Empty path is a code that represents empty contents.
+ """
+ if path:
+ name = os.path.basename(path.rstrip(os.path.sep))
+ if not name:
+ return
+ else:
+ name = path
+ name_hash = self._hash_str(name)
+ base_names = self._vardb._aux_cache["owners"]["base_names"]
+ pkgs = base_names.get(name_hash)
+ if pkgs is None:
+ pkgs = {}
+ base_names[name_hash] = pkgs
+ pkgs[pkg_hash] = None
+
+ def _hash_str(self, s):
+ h = self._new_hash()
+ # Always use a constant utf_8 encoding here, since
+ # the "default" encoding can change.
+ h.update(_unicode_encode(s,
+ encoding=_encodings['repo.content'],
+ errors='backslashreplace'))
+ h = h.hexdigest()
+ h = h[-self._hex_chars:]
+ h = int(h, 16)
+ return h
+
+ def _hash_pkg(self, cpv):
+ counter, mtime = self._vardb.aux_get(
+ cpv, ["COUNTER", "_mtime_"])
+ try:
+ counter = int(counter)
+ except ValueError:
+ counter = 0
+ return (_unicode(cpv), counter, mtime)
+
+ class _owners_db(object):
+
+ def __init__(self, vardb):
+ self._vardb = vardb
+
+ def populate(self):
+ self._populate()
+
+ def _populate(self):
+ owners_cache = vardbapi._owners_cache(self._vardb)
+ cached_hashes = set()
+ base_names = self._vardb._aux_cache["owners"]["base_names"]
+
+ # Take inventory of all cached package hashes.
+ for name, hash_values in list(base_names.items()):
+ if not isinstance(hash_values, dict):
+ del base_names[name]
+ continue
+ cached_hashes.update(hash_values)
+
+ # Create sets of valid package hashes and uncached packages.
+ uncached_pkgs = set()
+ hash_pkg = owners_cache._hash_pkg
+ valid_pkg_hashes = set()
+ for cpv in self._vardb.cpv_all():
+ hash_value = hash_pkg(cpv)
+ valid_pkg_hashes.add(hash_value)
+ if hash_value not in cached_hashes:
+ uncached_pkgs.add(cpv)
+
+ # Cache any missing packages.
+ for cpv in uncached_pkgs:
+ owners_cache.add(cpv)
+
+ # Delete any stale cache.
+ stale_hashes = cached_hashes.difference(valid_pkg_hashes)
+ if stale_hashes:
+ for base_name_hash, bucket in list(base_names.items()):
+ for hash_value in stale_hashes.intersection(bucket):
+ del bucket[hash_value]
+ if not bucket:
+ del base_names[base_name_hash]
+
+ return owners_cache
+
+ def get_owners(self, path_iter):
+ """
+ @return the owners as a dblink -> set(files) mapping.
+ """
+ owners = {}
+ for owner, f in self.iter_owners(path_iter):
+ owned_files = owners.get(owner)
+ if owned_files is None:
+ owned_files = set()
+ owners[owner] = owned_files
+ owned_files.add(f)
+ return owners
+
+ def getFileOwnerMap(self, path_iter):
+ owners = self.get_owners(path_iter)
+ file_owners = {}
+ for pkg_dblink, files in owners.items():
+ for f in files:
+ owner_set = file_owners.get(f)
+ if owner_set is None:
+ owner_set = set()
+ file_owners[f] = owner_set
+ owner_set.add(pkg_dblink)
+ return file_owners
+
+ def iter_owners(self, path_iter):
+ """
+ Iterate over tuples of (dblink, path). In order to avoid
+ consuming too many resources for too much time, resources
+ are only allocated for the duration of a given iter_owners()
+ call. Therefore, to maximize reuse of resources when searching
+ for multiple files, it's best to search for them all in a single
+ call.
+ """
+
+ if not isinstance(path_iter, list):
+ path_iter = list(path_iter)
+ owners_cache = self._populate()
+ vardb = self._vardb
+ root = vardb._eroot
+ hash_pkg = owners_cache._hash_pkg
+ hash_str = owners_cache._hash_str
+ base_names = self._vardb._aux_cache["owners"]["base_names"]
+ case_insensitive = "case-insensitive-fs" \
+ in vardb.settings.features
+
+ dblink_cache = {}
+
+ def dblink(cpv):
+ x = dblink_cache.get(cpv)
+ if x is None:
+ if len(dblink_cache) > 20:
+ # Ensure that we don't run out of memory.
+ raise StopIteration()
+ x = self._vardb._dblink(cpv)
+ dblink_cache[cpv] = x
+ return x
+
+ while path_iter:
+
+ path = path_iter.pop()
+ if case_insensitive:
+ path = path.lower()
+ is_basename = os.sep != path[:1]
+ if is_basename:
+ name = path
+ else:
+ name = os.path.basename(path.rstrip(os.path.sep))
+
+ if not name:
+ continue
+
+ name_hash = hash_str(name)
+ pkgs = base_names.get(name_hash)
+ owners = []
+ if pkgs is not None:
+ try:
+ for hash_value in pkgs:
+ if not isinstance(hash_value, tuple) or \
+ len(hash_value) != 3:
+ continue
+ cpv, counter, mtime = hash_value
+ if not isinstance(cpv, basestring):
+ continue
+ try:
+ current_hash = hash_pkg(cpv)
+ except KeyError:
+ continue
+
+ if current_hash != hash_value:
+ continue
+
+ if is_basename:
+ for p in dblink(cpv).getcontents():
+ if case_insensitive:
+ p = p.lower()
+ if os.path.basename(p) == name:
+ owners.append((cpv, p[len(root):]))
+ else:
+ if dblink(cpv).isowner(path):
+ owners.append((cpv, path))
+
+ except StopIteration:
+ path_iter.append(path)
+ del owners[:]
+ dblink_cache.clear()
+ gc.collect()
+ for x in self._iter_owners_low_mem(path_iter):
+ yield x
+ return
+ else:
+ for cpv, p in owners:
+ yield (dblink(cpv), p)
+
+ def _iter_owners_low_mem(self, path_list):
+ """
+ This implemention will make a short-lived dblink instance (and
+ parse CONTENTS) for every single installed package. This is
+ slower and but uses less memory than the method which uses the
+ basename cache.
+ """
+
+ if not path_list:
+ return
+
+ case_insensitive = "case-insensitive-fs" \
+ in self._vardb.settings.features
+ path_info_list = []
+ for path in path_list:
+ if case_insensitive:
+ path = path.lower()
+ is_basename = os.sep != path[:1]
+ if is_basename:
+ name = path
+ else:
+ name = os.path.basename(path.rstrip(os.path.sep))
+ path_info_list.append((path, name, is_basename))
+
+ # Do work via the global event loop, so that it can be used
+ # for indication of progress during the search (bug #461412).
+ event_loop = (portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
+ root = self._vardb._eroot
+
+ def search_pkg(cpv):
+ dblnk = self._vardb._dblink(cpv)
+ for path, name, is_basename in path_info_list:
+ if is_basename:
+ for p in dblnk.getcontents():
+ if case_insensitive:
+ p = p.lower()
+ if os.path.basename(p) == name:
+ search_pkg.results.append((dblnk, p[len(root):]))
+ else:
+ if dblnk.isowner(path):
+ search_pkg.results.append((dblnk, path))
+ search_pkg.complete = True
+ return False
+
+ search_pkg.results = []
+
+ for cpv in self._vardb.cpv_all():
+ del search_pkg.results[:]
+ search_pkg.complete = False
+ event_loop.idle_add(search_pkg, cpv)
+ while not search_pkg.complete:
+ event_loop.iteration()
+ for result in search_pkg.results:
+ yield result
+
+class vartree(object):
+ "this tree will scan a var/db/pkg database located at root (passed to init)"
+ def __init__(self, root=None, virtual=DeprecationWarning, categories=None,
+ settings=None):
+
+ if settings is None:
+ settings = portage.settings
+
+ if root is not None and root != settings['ROOT']:
+ warnings.warn("The 'root' parameter of the "
+ "portage.dbapi.vartree.vartree"
+ " constructor is now unused. Use "
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=2)
+
+ if virtual is not DeprecationWarning:
+ warnings.warn("The 'virtual' parameter of the "
+ "portage.dbapi.vartree.vartree"
+ " constructor is unused",
+ DeprecationWarning, stacklevel=2)
+
+ self.settings = settings
+ self.dbapi = vardbapi(settings=settings, vartree=self)
+ self.populated = 1
+
+ @property
+ def root(self):
+ warnings.warn("The root attribute of "
+ "portage.dbapi.vartree.vartree"
+ " is deprecated. Use "
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=3)
+ return self.settings['ROOT']
+
+ def getpath(self, mykey, filename=None):
+ return self.dbapi.getpath(mykey, filename=filename)
+
+ def zap(self, mycpv):
+ return
+
+ def inject(self, mycpv):
+ return
+
+ def get_provide(self, mycpv):
+ myprovides = []
+ mylines = None
+ try:
+ mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
+ if mylines:
+ myuse = myuse.split()
+ mylines = use_reduce(mylines, uselist=myuse, flat=True)
+ for myprovide in mylines:
+ mys = catpkgsplit(myprovide)
+ if not mys:
+ mys = myprovide.split("/")
+ myprovides += [mys[0] + "/" + mys[1]]
+ return myprovides
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ mydir = self.dbapi.getpath(mycpv)
+ writemsg(_("\nParse Error reading PROVIDE and USE in '%s'\n") % mydir,
+ noiselevel=-1)
+ if mylines:
+ writemsg(_("Possibly Invalid: '%s'\n") % str(mylines),
+ noiselevel=-1)
+ writemsg(_("Exception: %s\n\n") % str(e), noiselevel=-1)
+ return []
+
+ def get_all_provides(self):
+ myprovides = {}
+ for node in self.getallcpv():
+ for mykey in self.get_provide(node):
+ if mykey in myprovides:
+ myprovides[mykey] += [node]
+ else:
+ myprovides[mykey] = [node]
+ return myprovides
+
+ def dep_bestmatch(self, mydep, use_cache=1):
+ "compatibility method -- all matches, not just visible ones"
+ #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
+ mymatch = best(self.dbapi.match(
+ dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
+ use_cache=use_cache))
+ if mymatch is None:
+ return ""
+ else:
+ return mymatch
+
+ def dep_match(self, mydep, use_cache=1):
+ "compatibility method -- we want to see all matches, not just visible ones"
+ #mymatch = match(mydep,self.dbapi)
+ mymatch = self.dbapi.match(mydep, use_cache=use_cache)
+ if mymatch is None:
+ return []
+ else:
+ return mymatch
+
+ def exists_specific(self, cpv):
+ return self.dbapi.cpv_exists(cpv)
+
+ def getallcpv(self):
+ """temporary function, probably to be renamed --- Gets a list of all
+ category/package-versions installed on the system."""
+ return self.dbapi.cpv_all()
+
+ def getallnodes(self):
+ """new behavior: these are all *unmasked* nodes. There may or may not be available
+ masked package for nodes in this nodes list."""
+ return self.dbapi.cp_all()
+
+ def getebuildpath(self, fullpackage):
+ cat, package = catsplit(fullpackage)
+ return self.getpath(fullpackage, filename=package+".ebuild")
+
+ def getslot(self, mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ try:
+ return self.dbapi._pkg_str(mycatpkg, None).slot
+ except KeyError:
+ return ""
+
+ def populate(self):
+ self.populated=1
+
+class dblink(object):
+ """
+ This class provides an interface to the installed package database
+ At present this is implemented as a text backend in /var/db/pkg.
+ """
+
+ import re
+ _normalize_needed = re.compile(r'//|^[^/]|./$|(^|/)\.\.?(/|$)')
+
+ _contents_re = re.compile(r'^(' + \
+ r'(?P<dir>(dev|dir|fif) (.+))|' + \
+ r'(?P<obj>(obj) (.+) (\S+) (\d+))|' + \
+ r'(?P<sym>(sym) (.+) -> (.+) ((\d+)|(?P<oldsym>(' + \
+ r'\(\d+, \d+L, \d+L, \d+, \d+, \d+, \d+L, \d+, (\d+), \d+\)))))' + \
+ r')$'
+ )
+
+ # These files are generated by emerge, so we need to remove
+ # them when they are the only thing left in a directory.
+ _infodir_cleanup = frozenset(["dir", "dir.old"])
+
+ _ignored_unlink_errnos = (
+ errno.EBUSY, errno.ENOENT,
+ errno.ENOTDIR, errno.EISDIR)
+
+ _ignored_rmdir_errnos = (
+ errno.EEXIST, errno.ENOTEMPTY,
+ errno.EBUSY, errno.ENOENT,
+ errno.ENOTDIR, errno.EISDIR,
+ errno.EPERM)
+
+ def __init__(self, cat, pkg, myroot=None, settings=None, treetype=None,
+ vartree=None, blockers=None, scheduler=None, pipe=None):
+ """
+ Creates a DBlink object for a given CPV.
+ The given CPV may not be present in the database already.
+
+ @param cat: Category
+ @type cat: String
+ @param pkg: Package (PV)
+ @type pkg: String
+ @param myroot: ignored, settings['ROOT'] is used instead
+ @type myroot: String (Path)
+ @param settings: Typically portage.settings
+ @type settings: portage.config
+ @param treetype: one of ['porttree','bintree','vartree']
+ @type treetype: String
+ @param vartree: an instance of vartree corresponding to myroot.
+ @type vartree: vartree
+ """
+
+ if settings is None:
+ raise TypeError("settings argument is required")
+
+ mysettings = settings
+ self._eroot = mysettings['EROOT']
+ self.cat = cat
+ self.pkg = pkg
+ self.mycpv = self.cat + "/" + self.pkg
+ if self.mycpv == settings.mycpv and \
+ isinstance(settings.mycpv, _pkg_str):
+ self.mycpv = settings.mycpv
+ else:
+ self.mycpv = _pkg_str(self.mycpv)
+ self.mysplit = list(self.mycpv.cpv_split[1:])
+ self.mysplit[0] = self.mycpv.cp
+ self.treetype = treetype
+ if vartree is None:
+ vartree = portage.db[self._eroot]["vartree"]
+ self.vartree = vartree
+ self._blockers = blockers
+ self._scheduler = scheduler
+ self.dbroot = normalize_path(os.path.join(self._eroot, VDB_PATH))
+ self.dbcatdir = self.dbroot+"/"+cat
+ self.dbpkgdir = self.dbcatdir+"/"+pkg
+ self.dbtmpdir = self.dbcatdir+"/"+MERGING_IDENTIFIER+pkg
+ self.dbdir = self.dbpkgdir
+ self.settings = mysettings
+ self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
+
+ self.myroot = self.settings['ROOT']
+ self._installed_instance = None
+ self.contentscache = None
+ self._contents_inodes = None
+ self._contents_basenames = None
+ self._linkmap_broken = False
+ self._device_path_map = {}
+ self._hardlink_merge_map = {}
+ self._hash_key = (self._eroot, self.mycpv)
+ self._protect_obj = None
+ self._pipe = pipe
+
+ # When necessary, this attribute is modified for
+ # compliance with RESTRICT=preserve-libs.
+ self._preserve_libs = "preserve-libs" in mysettings.features
+
+ def __hash__(self):
+ return hash(self._hash_key)
+
+ def __eq__(self, other):
+ return isinstance(other, dblink) and \
+ self._hash_key == other._hash_key
+
+ def _get_protect_obj(self):
+
+ if self._protect_obj is None:
+ self._protect_obj = ConfigProtect(self._eroot,
+ portage.util.shlex_split(
+ self.settings.get("CONFIG_PROTECT", "")),
+ portage.util.shlex_split(
+ self.settings.get("CONFIG_PROTECT_MASK", "")),
+ case_insensitive = ("case-insensitive-fs"
+ in self.settings.features))
+
+ return self._protect_obj
+
+ def isprotected(self, obj):
+ return self._get_protect_obj().isprotected(obj)
+
+ def updateprotect(self):
+ self._get_protect_obj().updateprotect()
+
+ def lockdb(self):
+ self.vartree.dbapi.lock()
+
+ def unlockdb(self):
+ self.vartree.dbapi.unlock()
+
+ def getpath(self):
+ "return path to location of db information (for >>> informational display)"
+ return self.dbdir
+
+ def exists(self):
+ "does the db entry exist? boolean."
+ return os.path.exists(self.dbdir)
+
+ def delete(self):
+ """
+ Remove this entry from the database
+ """
+ try:
+ os.lstat(self.dbdir)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR, errno.ESTALE):
+ raise
+ return
+
+ # Check validity of self.dbdir before attempting to remove it.
+ if not self.dbdir.startswith(self.dbroot):
+ writemsg(_("portage.dblink.delete(): invalid dbdir: %s\n") % \
+ self.dbdir, noiselevel=-1)
+ return
+
+ shutil.rmtree(self.dbdir)
+ # If empty, remove parent category directory.
+ try:
+ os.rmdir(os.path.dirname(self.dbdir))
+ except OSError:
+ pass
+ self.vartree.dbapi._remove(self)
+
+ # Use self.dbroot since we need an existing path for syncfs.
+ try:
+ self._merged_path(self.dbroot, os.lstat(self.dbroot))
+ except OSError:
+ pass
+
+ self._post_merge_sync()
+
+ def clearcontents(self):
+ """
+ For a given db entry (self), erase the CONTENTS values.
+ """
+ self.lockdb()
+ try:
+ if os.path.exists(self.dbdir+"/CONTENTS"):
+ os.unlink(self.dbdir+"/CONTENTS")
+ finally:
+ self.unlockdb()
+
+ def _clear_contents_cache(self):
+ self.contentscache = None
+ self._contents_inodes = None
+ self._contents_basenames = None
+
+ def getcontents(self):
+ """
+ Get the installed files of a given package (aka what that package installed)
+ """
+ contents_file = os.path.join(self.dbdir, "CONTENTS")
+ if self.contentscache is not None:
+ return self.contentscache
+ pkgfiles = {}
+ try:
+ with io.open(_unicode_encode(contents_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ mylines = f.readlines()
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ self.contentscache = pkgfiles
+ return pkgfiles
+
+ null_byte = "\0"
+ normalize_needed = self._normalize_needed
+ contents_re = self._contents_re
+ obj_index = contents_re.groupindex['obj']
+ dir_index = contents_re.groupindex['dir']
+ sym_index = contents_re.groupindex['sym']
+ # The old symlink format may exist on systems that have packages
+ # which were installed many years ago (see bug #351814).
+ oldsym_index = contents_re.groupindex['oldsym']
+ # CONTENTS files already contain EPREFIX
+ myroot = self.settings['ROOT']
+ if myroot == os.path.sep:
+ myroot = None
+ # used to generate parent dir entries
+ dir_entry = ("dir",)
+ eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
+ pos = 0
+ errors = []
+ for pos, line in enumerate(mylines):
+ if null_byte in line:
+ # Null bytes are a common indication of corruption.
+ errors.append((pos + 1, _("Null byte found in CONTENTS entry")))
+ continue
+ line = line.rstrip("\n")
+ m = contents_re.match(line)
+ if m is None:
+ errors.append((pos + 1, _("Unrecognized CONTENTS entry")))
+ continue
+
+ if m.group(obj_index) is not None:
+ base = obj_index
+ #format: type, mtime, md5sum
+ data = (m.group(base+1), m.group(base+4), m.group(base+3))
+ elif m.group(dir_index) is not None:
+ base = dir_index
+ #format: type
+ data = (m.group(base+1),)
+ elif m.group(sym_index) is not None:
+ base = sym_index
+ if m.group(oldsym_index) is None:
+ mtime = m.group(base+5)
+ else:
+ mtime = m.group(base+8)
+ #format: type, mtime, dest
+ data = (m.group(base+1), mtime, m.group(base+3))
+ else:
+ # This won't happen as long the regular expression
+ # is written to only match valid entries.
+ raise AssertionError(_("required group not found " + \
+ "in CONTENTS entry: '%s'") % line)
+
+ path = m.group(base+2)
+ if normalize_needed.search(path) is not None:
+ path = normalize_path(path)
+ if not path.startswith(os.path.sep):
+ path = os.path.sep + path
+
+ if myroot is not None:
+ path = os.path.join(myroot, path.lstrip(os.path.sep))
+
+ # Implicitly add parent directories, since we can't necessarily
+ # assume that they are explicitly listed in CONTENTS, and it's
+ # useful for callers if they can rely on parent directory entries
+ # being generated here (crucial for things like dblink.isowner()).
+ path_split = path.split(os.sep)
+ path_split.pop()
+ while len(path_split) > eroot_split_len:
+ parent = os.sep.join(path_split)
+ if parent in pkgfiles:
+ break
+ pkgfiles[parent] = dir_entry
+ path_split.pop()
+
+ pkgfiles[path] = data
+
+ if errors:
+ writemsg(_("!!! Parse error in '%s'\n") % contents_file, noiselevel=-1)
+ for pos, e in errors:
+ writemsg(_("!!! line %d: %s\n") % (pos, e), noiselevel=-1)
+ self.contentscache = pkgfiles
+ return pkgfiles
+
+ def _prune_plib_registry(self, unmerge=False,
+ needed=None, preserve_paths=None):
+ # remove preserved libraries that don't have any consumers left
+ if not (self._linkmap_broken or
+ self.vartree.dbapi._linkmap is None or
+ self.vartree.dbapi._plib_registry is None):
+ self.vartree.dbapi._fs_lock()
+ plib_registry = self.vartree.dbapi._plib_registry
+ plib_registry.lock()
+ try:
+ plib_registry.load()
+
+ unmerge_with_replacement = \
+ unmerge and preserve_paths is not None
+ if unmerge_with_replacement:
+ # If self.mycpv is about to be unmerged and we
+ # have a replacement package, we want to exclude
+ # the irrelevant NEEDED data that belongs to
+ # files which are being unmerged now.
+ exclude_pkgs = (self.mycpv,)
+ else:
+ exclude_pkgs = None
+
+ self._linkmap_rebuild(exclude_pkgs=exclude_pkgs,
+ include_file=needed, preserve_paths=preserve_paths)
+
+ if unmerge:
+ unmerge_preserve = None
+ if not unmerge_with_replacement:
+ unmerge_preserve = \
+ self._find_libs_to_preserve(unmerge=True)
+ counter = self.vartree.dbapi.cpv_counter(self.mycpv)
+ try:
+ slot = self.mycpv.slot
+ except AttributeError:
+ slot = _pkg_str(self.mycpv, slot=self.settings["SLOT"]).slot
+ plib_registry.unregister(self.mycpv, slot, counter)
+ if unmerge_preserve:
+ for path in sorted(unmerge_preserve):
+ contents_key = self._match_contents(path)
+ if not contents_key:
+ continue
+ obj_type = self.getcontents()[contents_key][0]
+ self._display_merge(_(">>> needed %s %s\n") % \
+ (obj_type, contents_key), noiselevel=-1)
+ plib_registry.register(self.mycpv,
+ slot, counter, unmerge_preserve)
+ # Remove the preserved files from our contents
+ # so that they won't be unmerged.
+ self.vartree.dbapi.removeFromContents(self,
+ unmerge_preserve)
+
+ unmerge_no_replacement = \
+ unmerge and not unmerge_with_replacement
+ cpv_lib_map = self._find_unused_preserved_libs(
+ unmerge_no_replacement)
+ if cpv_lib_map:
+ self._remove_preserved_libs(cpv_lib_map)
+ self.vartree.dbapi.lock()
+ try:
+ for cpv, removed in cpv_lib_map.items():
+ if not self.vartree.dbapi.cpv_exists(cpv):
+ continue
+ self.vartree.dbapi.removeFromContents(cpv, removed)
+ finally:
+ self.vartree.dbapi.unlock()
+
+ plib_registry.store()
+ finally:
+ plib_registry.unlock()
+ self.vartree.dbapi._fs_unlock()
+
+ def unmerge(self, pkgfiles=None, trimworld=None, cleanup=True,
+ ldpath_mtimes=None, others_in_slot=None, needed=None,
+ preserve_paths=None):
+ """
+ Calls prerm
+ Unmerges a given package (CPV)
+ calls postrm
+ calls cleanrm
+ calls env_update
+
+ @param pkgfiles: files to unmerge (generally self.getcontents() )
+ @type pkgfiles: Dictionary
+ @param trimworld: Unused
+ @type trimworld: Boolean
+ @param cleanup: cleanup to pass to doebuild (see doebuild)
+ @type cleanup: Boolean
+ @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
+ @type ldpath_mtimes: Dictionary
+ @param others_in_slot: all dblink instances in this slot, excluding self
+ @type others_in_slot: list
+ @param needed: Filename containing libraries needed after unmerge.
+ @type needed: String
+ @param preserve_paths: Libraries preserved by a package instance that
+ is currently being merged. They need to be explicitly passed to the
+ LinkageMap, since they are not registered in the
+ PreservedLibsRegistry yet.
+ @type preserve_paths: set
+ @rtype: Integer
+ @return:
+ 1. os.EX_OK if everything went well.
+ 2. return code of the failed phase (for prerm, postrm, cleanrm)
+ """
+
+ if trimworld is not None:
+ warnings.warn("The trimworld parameter of the " + \
+ "portage.dbapi.vartree.dblink.unmerge()" + \
+ " method is now unused.",
+ DeprecationWarning, stacklevel=2)
+
+ background = False
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ if self._scheduler is None:
+ # We create a scheduler instance and use it to
+ # log unmerge output separately from merge output.
+ self._scheduler = SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
+ if self.settings.get("PORTAGE_BACKGROUND") == "subprocess":
+ if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1":
+ self.settings["PORTAGE_BACKGROUND"] = "1"
+ self.settings.backup_changes("PORTAGE_BACKGROUND")
+ background = True
+ elif self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "0":
+ self.settings["PORTAGE_BACKGROUND"] = "0"
+ self.settings.backup_changes("PORTAGE_BACKGROUND")
+ elif self.settings.get("PORTAGE_BACKGROUND") == "1":
+ background = True
+
+ self.vartree.dbapi._bump_mtime(self.mycpv)
+ showMessage = self._display_merge
+ if self.vartree.dbapi._categories is not None:
+ self.vartree.dbapi._categories = None
+
+ # When others_in_slot is not None, the backup has already been
+ # handled by the caller.
+ caller_handles_backup = others_in_slot is not None
+
+ # When others_in_slot is supplied, the security check has already been
+ # done for this slot, so it shouldn't be repeated until the next
+ # replacement or unmerge operation.
+ if others_in_slot is None:
+ slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
+ slot_matches = self.vartree.dbapi.match(
+ "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
+ others_in_slot = []
+ for cur_cpv in slot_matches:
+ if cur_cpv == self.mycpv:
+ continue
+ others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+ settings=self.settings, vartree=self.vartree,
+ treetype="vartree", pipe=self._pipe))
+
+ retval = self._security_check([self] + others_in_slot)
+ if retval:
+ return retval
+
+ contents = self.getcontents()
+ # Now, don't assume that the name of the ebuild is the same as the
+ # name of the dir; the package may have been moved.
+ myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
+ failures = 0
+ ebuild_phase = "prerm"
+ mystuff = os.listdir(self.dbdir)
+ for x in mystuff:
+ if x.endswith(".ebuild"):
+ if x[:-7] != self.pkg:
+ # Clean up after vardbapi.move_ent() breakage in
+ # portage versions before 2.1.2
+ os.rename(os.path.join(self.dbdir, x), myebuildpath)
+ write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
+ break
+
+ if self.mycpv != self.settings.mycpv or \
+ "EAPI" not in self.settings.configdict["pkg"]:
+ # We avoid a redundant setcpv call here when
+ # the caller has already taken care of it.
+ self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
+
+ eapi_unsupported = False
+ try:
+ doebuild_environment(myebuildpath, "prerm",
+ settings=self.settings, db=self.vartree.dbapi)
+ except UnsupportedAPIException as e:
+ eapi_unsupported = e
+
+ if self._preserve_libs and "preserve-libs" in \
+ self.settings["PORTAGE_RESTRICT"].split():
+ self._preserve_libs = False
+
+ builddir_lock = None
+ scheduler = self._scheduler
+ retval = os.EX_OK
+ try:
+ # Only create builddir_lock if the caller
+ # has not already acquired the lock.
+ if "PORTAGE_BUILDDIR_LOCKED" not in self.settings:
+ builddir_lock = EbuildBuildDir(
+ scheduler=scheduler,
+ settings=self.settings)
+ builddir_lock.lock()
+ prepare_build_dirs(settings=self.settings, cleanup=True)
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+
+ # Do this before the following _prune_plib_registry call, since
+ # that removes preserved libraries from our CONTENTS, and we
+ # may want to backup those libraries first.
+ if not caller_handles_backup:
+ retval = self._pre_unmerge_backup(background)
+ if retval != os.EX_OK:
+ showMessage(_("!!! FAILED prerm: quickpkg: %s\n") % retval,
+ level=logging.ERROR, noiselevel=-1)
+ return retval
+
+ self._prune_plib_registry(unmerge=True, needed=needed,
+ preserve_paths=preserve_paths)
+
+ # Log the error after PORTAGE_LOG_FILE is initialized
+ # by prepare_build_dirs above.
+ if eapi_unsupported:
+ # Sometimes this happens due to corruption of the EAPI file.
+ failures += 1
+ showMessage(_("!!! FAILED prerm: %s\n") % \
+ os.path.join(self.dbdir, "EAPI"),
+ level=logging.ERROR, noiselevel=-1)
+ showMessage("%s\n" % (eapi_unsupported,),
+ level=logging.ERROR, noiselevel=-1)
+ elif os.path.isfile(myebuildpath):
+ phase = EbuildPhase(background=background,
+ phase=ebuild_phase, scheduler=scheduler,
+ settings=self.settings)
+ phase.start()
+ retval = phase.wait()
+
+ # XXX: Decide how to handle failures here.
+ if retval != os.EX_OK:
+ failures += 1
+ showMessage(_("!!! FAILED prerm: %s\n") % retval,
+ level=logging.ERROR, noiselevel=-1)
+
+ self.vartree.dbapi._fs_lock()
+ try:
+ self._unmerge_pkgfiles(pkgfiles, others_in_slot)
+ finally:
+ self.vartree.dbapi._fs_unlock()
+ self._clear_contents_cache()
+
+ if not eapi_unsupported and os.path.isfile(myebuildpath):
+ ebuild_phase = "postrm"
+ phase = EbuildPhase(background=background,
+ phase=ebuild_phase, scheduler=scheduler,
+ settings=self.settings)
+ phase.start()
+ retval = phase.wait()
+
+ # XXX: Decide how to handle failures here.
+ if retval != os.EX_OK:
+ failures += 1
+ showMessage(_("!!! FAILED postrm: %s\n") % retval,
+ level=logging.ERROR, noiselevel=-1)
+
+ finally:
+ self.vartree.dbapi._bump_mtime(self.mycpv)
+ try:
+ if not eapi_unsupported and os.path.isfile(myebuildpath):
+ if retval != os.EX_OK:
+ msg_lines = []
+ msg = _("The '%(ebuild_phase)s' "
+ "phase of the '%(cpv)s' package "
+ "has failed with exit value %(retval)s.") % \
+ {"ebuild_phase":ebuild_phase, "cpv":self.mycpv,
+ "retval":retval}
+ from textwrap import wrap
+ msg_lines.extend(wrap(msg, 72))
+ msg_lines.append("")
+
+ ebuild_name = os.path.basename(myebuildpath)
+ ebuild_dir = os.path.dirname(myebuildpath)
+ msg = _("The problem occurred while executing "
+ "the ebuild file named '%(ebuild_name)s' "
+ "located in the '%(ebuild_dir)s' directory. "
+ "If necessary, manually remove "
+ "the environment.bz2 file and/or the "
+ "ebuild file located in that directory.") % \
+ {"ebuild_name":ebuild_name, "ebuild_dir":ebuild_dir}
+ msg_lines.extend(wrap(msg, 72))
+ msg_lines.append("")
+
+ msg = _("Removal "
+ "of the environment.bz2 file is "
+ "preferred since it may allow the "
+ "removal phases to execute successfully. "
+ "The ebuild will be "
+ "sourced and the eclasses "
+ "from the current portage tree will be used "
+ "when necessary. Removal of "
+ "the ebuild file will cause the "
+ "pkg_prerm() and pkg_postrm() removal "
+ "phases to be skipped entirely.")
+ msg_lines.extend(wrap(msg, 72))
+
+ self._eerror(ebuild_phase, msg_lines)
+
+ self._elog_process(phasefilter=("prerm", "postrm"))
+
+ if retval == os.EX_OK:
+ try:
+ doebuild_environment(myebuildpath, "cleanrm",
+ settings=self.settings, db=self.vartree.dbapi)
+ except UnsupportedAPIException:
+ pass
+ phase = EbuildPhase(background=background,
+ phase="cleanrm", scheduler=scheduler,
+ settings=self.settings)
+ phase.start()
+ retval = phase.wait()
+ finally:
+ if builddir_lock is not None:
+ builddir_lock.unlock()
+
+ if log_path is not None:
+
+ if not failures and 'unmerge-logs' not in self.settings.features:
+ try:
+ os.unlink(log_path)
+ except OSError:
+ pass
+
+ try:
+ st = os.stat(log_path)
+ except OSError:
+ pass
+ else:
+ if st.st_size == 0:
+ try:
+ os.unlink(log_path)
+ except OSError:
+ pass
+
+ if log_path is not None and os.path.exists(log_path):
+ # Restore this since it gets lost somewhere above and it
+ # needs to be set for _display_merge() to be able to log.
+ # Note that the log isn't necessarily supposed to exist
+ # since if PORT_LOGDIR is unset then it's a temp file
+ # so it gets cleaned above.
+ self.settings["PORTAGE_LOG_FILE"] = log_path
+ else:
+ self.settings.pop("PORTAGE_LOG_FILE", None)
+
+ env_update(target_root=self.settings['ROOT'],
+ prev_mtimes=ldpath_mtimes,
+ contents=contents, env=self.settings,
+ writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi)
+
+ unmerge_with_replacement = preserve_paths is not None
+ if not unmerge_with_replacement:
+ # When there's a replacement package which calls us via treewalk,
+ # treewalk will automatically call _prune_plib_registry for us.
+ # Otherwise, we need to call _prune_plib_registry ourselves.
+ # Don't pass in the "unmerge=True" flag here, since that flag
+ # is intended to be used _prior_ to unmerge, not after.
+ self._prune_plib_registry()
+
+ return os.EX_OK
+
+ def _display_merge(self, msg, level=0, noiselevel=0):
+ if not self._verbose and noiselevel >= 0 and level < logging.WARN:
+ return
+ if self._scheduler is None:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ else:
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ background = self.settings.get("PORTAGE_BACKGROUND") == "1"
+
+ if background and log_path is None:
+ if level >= logging.WARN:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ else:
+ self._scheduler.output(msg,
+ log_path=log_path, background=background,
+ level=level, noiselevel=noiselevel)
+
+ def _show_unmerge(self, zing, desc, file_type, file_name):
+ self._display_merge("%s %s %s %s\n" % \
+ (zing, desc.ljust(8), file_type, file_name))
+
+ def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
+ """
+
+ Unmerges the contents of a package from the liveFS
+ Removes the VDB entry for self
+
+ @param pkgfiles: typically self.getcontents()
+ @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
+ @param others_in_slot: all dblink instances in this slot, excluding self
+ @type others_in_slot: list
+ @rtype: None
+ """
+
+ os = _os_merge
+ perf_md5 = perform_md5
+ showMessage = self._display_merge
+ show_unmerge = self._show_unmerge
+ ignored_unlink_errnos = self._ignored_unlink_errnos
+ ignored_rmdir_errnos = self._ignored_rmdir_errnos
+
+ if not pkgfiles:
+ showMessage(_("No package files given... Grabbing a set.\n"))
+ pkgfiles = self.getcontents()
+
+ if others_in_slot is None:
+ others_in_slot = []
+ slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
+ slot_matches = self.vartree.dbapi.match(
+ "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
+ for cur_cpv in slot_matches:
+ if cur_cpv == self.mycpv:
+ continue
+ others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+ settings=self.settings,
+ vartree=self.vartree, treetype="vartree", pipe=self._pipe))
+
+ cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
+ stale_confmem = []
+ protected_symlinks = {}
+
+ unmerge_orphans = "unmerge-orphans" in self.settings.features
+ calc_prelink = "prelink-checksums" in self.settings.features
+
+ if pkgfiles:
+ self.updateprotect()
+ mykeys = list(pkgfiles)
+ mykeys.sort()
+ mykeys.reverse()
+
+ #process symlinks second-to-last, directories last.
+ mydirs = set()
+
+ uninstall_ignore = portage.util.shlex_split(
+ self.settings.get("UNINSTALL_IGNORE", ""))
+
+ def unlink(file_name, lstatobj):
+ if bsd_chflags:
+ if lstatobj.st_flags != 0:
+ bsd_chflags.lchflags(file_name, 0)
+ parent_name = os.path.dirname(file_name)
+ # Use normal stat/chflags for the parent since we want to
+ # follow any symlinks to the real parent directory.
+ pflags = os.stat(parent_name).st_flags
+ if pflags != 0:
+ bsd_chflags.chflags(parent_name, 0)
+ try:
+ if not stat.S_ISLNK(lstatobj.st_mode):
+ # Remove permissions to ensure that any hardlinks to
+ # suid/sgid files are rendered harmless.
+ os.chmod(file_name, 0)
+ os.unlink(file_name)
+ except OSError as ose:
+ # If the chmod or unlink fails, you are in trouble.
+ # With Prefix this can be because the file is owned
+ # by someone else (a screwup by root?), on a normal
+ # system maybe filesystem corruption. In any case,
+ # if we backtrace and die here, we leave the system
+ # in a totally undefined state, hence we just bleed
+ # like hell and continue to hopefully finish all our
+ # administrative and pkg_postinst stuff.
+ self._eerror("postrm",
+ ["Could not chmod or unlink '%s': %s" % \
+ (file_name, ose)])
+ else:
+
+ # Even though the file no longer exists, we log it
+ # here so that _unmerge_dirs can see that we've
+ # removed a file from this device, and will record
+ # the parent directory for a syncfs call.
+ self._merged_path(file_name, lstatobj, exists=False)
+
+ finally:
+ if bsd_chflags and pflags != 0:
+ # Restore the parent flags we saved before unlinking
+ bsd_chflags.chflags(parent_name, pflags)
+
+ unmerge_desc = {}
+ unmerge_desc["cfgpro"] = _("cfgpro")
+ unmerge_desc["replaced"] = _("replaced")
+ unmerge_desc["!dir"] = _("!dir")
+ unmerge_desc["!empty"] = _("!empty")
+ unmerge_desc["!fif"] = _("!fif")
+ unmerge_desc["!found"] = _("!found")
+ unmerge_desc["!md5"] = _("!md5")
+ unmerge_desc["!mtime"] = _("!mtime")
+ unmerge_desc["!obj"] = _("!obj")
+ unmerge_desc["!sym"] = _("!sym")
+ unmerge_desc["!prefix"] = _("!prefix")
+
+ real_root = self.settings['ROOT']
+ real_root_len = len(real_root) - 1
+ eroot = self.settings["EROOT"]
+
+ infodirs = frozenset(infodir for infodir in chain(
+ self.settings.get("INFOPATH", "").split(":"),
+ self.settings.get("INFODIR", "").split(":")) if infodir)
+ infodirs_inodes = set()
+ for infodir in infodirs:
+ infodir = os.path.join(real_root, infodir.lstrip(os.sep))
+ try:
+ statobj = os.stat(infodir)
+ except OSError:
+ pass
+ else:
+ infodirs_inodes.add((statobj.st_dev, statobj.st_ino))
+
+ for i, objkey in enumerate(mykeys):
+
+ obj = normalize_path(objkey)
+ if os is _os_merge:
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+ perf_md5 = portage.checksum.perform_md5
+
+ file_data = pkgfiles[objkey]
+ file_type = file_data[0]
+
+ # don't try to unmerge the prefix offset itself
+ if len(obj) <= len(eroot) or not obj.startswith(eroot):
+ show_unmerge("---", unmerge_desc["!prefix"], file_type, obj)
+ continue
+
+ statobj = None
+ try:
+ statobj = os.stat(obj)
+ except OSError:
+ pass
+ lstatobj = None
+ try:
+ lstatobj = os.lstat(obj)
+ except (OSError, AttributeError):
+ pass
+ islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
+ if lstatobj is None:
+ show_unmerge("---", unmerge_desc["!found"], file_type, obj)
+ continue
+
+ f_match = obj[len(eroot)-1:]
+ ignore = False
+ for pattern in uninstall_ignore:
+ if fnmatch.fnmatch(f_match, pattern):
+ ignore = True
+ break
+
+ if not ignore:
+ if islink and f_match in \
+ ("/lib", "/usr/lib", "/usr/local/lib"):
+ # Ignore libdir symlinks for bug #423127.
+ ignore = True
+
+ if ignore:
+ show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj)
+ continue
+
+ # don't use EROOT, CONTENTS entries already contain EPREFIX
+ if obj.startswith(real_root):
+ relative_path = obj[real_root_len:]
+ is_owned = False
+ for dblnk in others_in_slot:
+ if dblnk.isowner(relative_path):
+ is_owned = True
+ break
+
+ if is_owned and islink and \
+ file_type in ("sym", "dir") and \
+ statobj and stat.S_ISDIR(statobj.st_mode):
+ # A new instance of this package claims the file, so
+ # don't unmerge it. If the file is symlink to a
+ # directory and the unmerging package installed it as
+ # a symlink, but the new owner has it listed as a
+ # directory, then we'll produce a warning since the
+ # symlink is a sort of orphan in this case (see
+ # bug #326685).
+ symlink_orphan = False
+ for dblnk in others_in_slot:
+ parent_contents_key = \
+ dblnk._match_contents(relative_path)
+ if not parent_contents_key:
+ continue
+ if not parent_contents_key.startswith(
+ real_root):
+ continue
+ if dblnk.getcontents()[
+ parent_contents_key][0] == "dir":
+ symlink_orphan = True
+ break
+
+ if symlink_orphan:
+ protected_symlinks.setdefault(
+ (statobj.st_dev, statobj.st_ino),
+ []).append(relative_path)
+
+ if is_owned:
+ show_unmerge("---", unmerge_desc["replaced"], file_type, obj)
+ continue
+ elif relative_path in cfgfiledict:
+ stale_confmem.append(relative_path)
+
+ # Don't unlink symlinks to directories here since that can
+ # remove /lib and /usr/lib symlinks.
+ if unmerge_orphans and \
+ lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
+ not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
+ not self.isprotected(obj):
+ try:
+ unlink(obj, lstatobj)
+ except EnvironmentError as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("<<<", "", file_type, obj)
+ continue
+
+ lmtime = str(lstatobj[stat.ST_MTIME])
+ if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
+ show_unmerge("---", unmerge_desc["!mtime"], file_type, obj)
+ continue
+
+ if file_type == "dir" and not islink:
+ if lstatobj is None or not stat.S_ISDIR(lstatobj.st_mode):
+ show_unmerge("---", unmerge_desc["!dir"], file_type, obj)
+ continue
+ mydirs.add((obj, (lstatobj.st_dev, lstatobj.st_ino)))
+ elif file_type == "sym" or (file_type == "dir" and islink):
+ if not islink:
+ show_unmerge("---", unmerge_desc["!sym"], file_type, obj)
+ continue
+
+ # If this symlink points to a directory then we don't want
+ # to unmerge it if there are any other packages that
+ # installed files into the directory via this symlink
+ # (see bug #326685).
+ # TODO: Resolving a symlink to a directory will require
+ # simulation if $ROOT != / and the link is not relative.
+ if islink and statobj and stat.S_ISDIR(statobj.st_mode) \
+ and obj.startswith(real_root):
+
+ relative_path = obj[real_root_len:]
+ try:
+ target_dir_contents = os.listdir(obj)
+ except OSError:
+ pass
+ else:
+ if target_dir_contents:
+ # If all the children are regular files owned
+ # by this package, then the symlink should be
+ # safe to unmerge.
+ all_owned = True
+ for child in target_dir_contents:
+ child = os.path.join(relative_path, child)
+ if not self.isowner(child):
+ all_owned = False
+ break
+ try:
+ child_lstat = os.lstat(os.path.join(
+ real_root, child.lstrip(os.sep)))
+ except OSError:
+ continue
+
+ if not stat.S_ISREG(child_lstat.st_mode):
+ # Nested symlinks or directories make
+ # the issue very complex, so just
+ # preserve the symlink in order to be
+ # on the safe side.
+ all_owned = False
+ break
+
+ if not all_owned:
+ protected_symlinks.setdefault(
+ (statobj.st_dev, statobj.st_ino),
+ []).append(relative_path)
+ show_unmerge("---", unmerge_desc["!empty"],
+ file_type, obj)
+ continue
+
+ # Go ahead and unlink symlinks to directories here when
+ # they're actually recorded as symlinks in the contents.
+ # Normally, symlinks such as /lib -> lib64 are not recorded
+ # as symlinks in the contents of a package. If a package
+ # installs something into ${D}/lib/, it is recorded in the
+ # contents as a directory even if it happens to correspond
+ # to a symlink when it's merged to the live filesystem.
+ try:
+ unlink(obj, lstatobj)
+ show_unmerge("<<<", "", file_type, obj)
+ except (OSError, IOError) as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("!!!", "", file_type, obj)
+ elif pkgfiles[objkey][0] == "obj":
+ if statobj is None or not stat.S_ISREG(statobj.st_mode):
+ show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
+ continue
+ mymd5 = None
+ try:
+ mymd5 = perf_md5(obj, calc_prelink=calc_prelink)
+ except FileNotFound as e:
+ # the file has disappeared between now and our stat call
+ show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
+ continue
+
+ # string.lower is needed because db entries used to be in upper-case. The
+ # string.lower allows for backwards compatibility.
+ if mymd5 != pkgfiles[objkey][2].lower():
+ show_unmerge("---", unmerge_desc["!md5"], file_type, obj)
+ continue
+ try:
+ unlink(obj, lstatobj)
+ except (OSError, IOError) as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("<<<", "", file_type, obj)
+ elif pkgfiles[objkey][0] == "fif":
+ if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
+ show_unmerge("---", unmerge_desc["!fif"], file_type, obj)
+ continue
+ show_unmerge("---", "", file_type, obj)
+ elif pkgfiles[objkey][0] == "dev":
+ show_unmerge("---", "", file_type, obj)
+
+ self._unmerge_dirs(mydirs, infodirs_inodes,
+ protected_symlinks, unmerge_desc, unlink, os)
+ mydirs.clear()
+
+ if protected_symlinks:
+ self._unmerge_protected_symlinks(others_in_slot, infodirs_inodes,
+ protected_symlinks, unmerge_desc, unlink, os)
+
+ if protected_symlinks:
+ msg = "One or more symlinks to directories have been " + \
+ "preserved in order to ensure that files installed " + \
+ "via these symlinks remain accessible. " + \
+ "This indicates that the mentioned symlink(s) may " + \
+ "be obsolete remnants of an old install, and it " + \
+ "may be appropriate to replace a given symlink " + \
+ "with the directory that it points to."
+ lines = textwrap.wrap(msg, 72)
+ lines.append("")
+ flat_list = set()
+ flat_list.update(*protected_symlinks.values())
+ flat_list = sorted(flat_list)
+ for f in flat_list:
+ lines.append("\t%s" % (os.path.join(real_root,
+ f.lstrip(os.sep))))
+ lines.append("")
+ self._elog("elog", "postrm", lines)
+
+ # Remove stale entries from config memory.
+ if stale_confmem:
+ for filename in stale_confmem:
+ del cfgfiledict[filename]
+ writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
+
+ #remove self from vartree database so that our own virtual gets zapped if we're the last node
+ self.vartree.zap(self.mycpv)
+
+ def _unmerge_protected_symlinks(self, others_in_slot, infodirs_inodes,
+ protected_symlinks, unmerge_desc, unlink, os):
+
+ real_root = self.settings['ROOT']
+ show_unmerge = self._show_unmerge
+ ignored_unlink_errnos = self._ignored_unlink_errnos
+
+ flat_list = set()
+ flat_list.update(*protected_symlinks.values())
+ flat_list = sorted(flat_list)
+
+ for f in flat_list:
+ for dblnk in others_in_slot:
+ if dblnk.isowner(f):
+ # If another package in the same slot installed
+ # a file via a protected symlink, return early
+ # and don't bother searching for any other owners.
+ return
+
+ msg = []
+ msg.append("")
+ msg.append(_("Directory symlink(s) may need protection:"))
+ msg.append("")
+
+ for f in flat_list:
+ msg.append("\t%s" % \
+ os.path.join(real_root, f.lstrip(os.path.sep)))
+
+ msg.append("")
+ msg.append(_("Searching all installed"
+ " packages for files installed via above symlink(s)..."))
+ msg.append("")
+ self._elog("elog", "postrm", msg)
+
+ self.lockdb()
+ try:
+ owners = self.vartree.dbapi._owners.get_owners(flat_list)
+ self.vartree.dbapi.flush_cache()
+ finally:
+ self.unlockdb()
+
+ for owner in list(owners):
+ if owner.mycpv == self.mycpv:
+ owners.pop(owner, None)
+
+ if not owners:
+ msg = []
+ msg.append(_("The above directory symlink(s) are all "
+ "safe to remove. Removing them now..."))
+ msg.append("")
+ self._elog("elog", "postrm", msg)
+ dirs = set()
+ for unmerge_syms in protected_symlinks.values():
+ for relative_path in unmerge_syms:
+ obj = os.path.join(real_root,
+ relative_path.lstrip(os.sep))
+ parent = os.path.dirname(obj)
+ while len(parent) > len(self._eroot):
+ try:
+ lstatobj = os.lstat(parent)
+ except OSError:
+ break
+ else:
+ dirs.add((parent,
+ (lstatobj.st_dev, lstatobj.st_ino)))
+ parent = os.path.dirname(parent)
+ try:
+ unlink(obj, os.lstat(obj))
+ show_unmerge("<<<", "", "sym", obj)
+ except (OSError, IOError) as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("!!!", "", "sym", obj)
+
+ protected_symlinks.clear()
+ self._unmerge_dirs(dirs, infodirs_inodes,
+ protected_symlinks, unmerge_desc, unlink, os)
+ dirs.clear()
+
+ def _unmerge_dirs(self, dirs, infodirs_inodes,
+ protected_symlinks, unmerge_desc, unlink, os):
+
+ show_unmerge = self._show_unmerge
+ infodir_cleanup = self._infodir_cleanup
+ ignored_unlink_errnos = self._ignored_unlink_errnos
+ ignored_rmdir_errnos = self._ignored_rmdir_errnos
+ real_root = self.settings['ROOT']
+
+ dirs = sorted(dirs)
+ dirs.reverse()
+
+ for obj, inode_key in dirs:
+ # Treat any directory named "info" as a candidate here,
+ # since it might have been in INFOPATH previously even
+ # though it may not be there now.
+ if inode_key in infodirs_inodes or \
+ os.path.basename(obj) == "info":
+ try:
+ remaining = os.listdir(obj)
+ except OSError:
+ pass
+ else:
+ cleanup_info_dir = ()
+ if remaining and \
+ len(remaining) <= len(infodir_cleanup):
+ if not set(remaining).difference(infodir_cleanup):
+ cleanup_info_dir = remaining
+
+ for child in cleanup_info_dir:
+ child = os.path.join(obj, child)
+ try:
+ lstatobj = os.lstat(child)
+ if stat.S_ISREG(lstatobj.st_mode):
+ unlink(child, lstatobj)
+ show_unmerge("<<<", "", "obj", child)
+ except EnvironmentError as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("!!!", "", "obj", child)
+
+ try:
+ parent_name = os.path.dirname(obj)
+ parent_stat = os.stat(parent_name)
+
+ if bsd_chflags:
+ lstatobj = os.lstat(obj)
+ if lstatobj.st_flags != 0:
+ bsd_chflags.lchflags(obj, 0)
+
+ # Use normal stat/chflags for the parent since we want to
+ # follow any symlinks to the real parent directory.
+ pflags = parent_stat.st_flags
+ if pflags != 0:
+ bsd_chflags.chflags(parent_name, 0)
+ try:
+ os.rmdir(obj)
+ finally:
+ if bsd_chflags and pflags != 0:
+ # Restore the parent flags we saved before unlinking
+ bsd_chflags.chflags(parent_name, pflags)
+
+ # Record the parent directory for use in syncfs calls.
+ # Note that we use a realpath and a regular stat here, since
+ # we want to follow any symlinks back to the real device where
+ # the real parent directory resides.
+ self._merged_path(os.path.realpath(parent_name), parent_stat)
+
+ show_unmerge("<<<", "", "dir", obj)
+ except EnvironmentError as e:
+ if e.errno not in ignored_rmdir_errnos:
+ raise
+ if e.errno != errno.ENOENT:
+ show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
+
+ # Since we didn't remove this directory, record the directory
+ # itself for use in syncfs calls, if we have removed another
+ # file from the same device.
+ # Note that we use a realpath and a regular stat here, since
+ # we want to follow any symlinks back to the real device where
+ # the real directory resides.
+ try:
+ dir_stat = os.stat(obj)
+ except OSError:
+ pass
+ else:
+ if dir_stat.st_dev in self._device_path_map:
+ self._merged_path(os.path.realpath(obj), dir_stat)
+
+ else:
+ # When a directory is successfully removed, there's
+ # no need to protect symlinks that point to it.
+ unmerge_syms = protected_symlinks.pop(inode_key, None)
+ if unmerge_syms is not None:
+ for relative_path in unmerge_syms:
+ obj = os.path.join(real_root,
+ relative_path.lstrip(os.sep))
+ try:
+ unlink(obj, os.lstat(obj))
+ show_unmerge("<<<", "", "sym", obj)
+ except (OSError, IOError) as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("!!!", "", "sym", obj)
+
+ def isowner(self, filename, destroot=None):
+ """
+ Check if a file belongs to this package. This may
+ result in a stat call for the parent directory of
+ every installed file, since the inode numbers are
+ used to work around the problem of ambiguous paths
+ caused by symlinked directories. The results of
+ stat calls are cached to optimize multiple calls
+ to this method.
+
+ @param filename:
+ @type filename:
+ @param destroot:
+ @type destroot:
+ @rtype: Boolean
+ @return:
+ 1. True if this package owns the file.
+ 2. False if this package does not own the file.
+ """
+
+ if destroot is not None and destroot != self._eroot:
+ warnings.warn("The second parameter of the " + \
+ "portage.dbapi.vartree.dblink.isowner()" + \
+ " is now unused. Instead " + \
+ "self.settings['EROOT'] will be used.",
+ DeprecationWarning, stacklevel=2)
+
+ return bool(self._match_contents(filename))
+
+ def _match_contents(self, filename, destroot=None):
+ """
+ The matching contents entry is returned, which is useful
+ since the path may differ from the one given by the caller,
+ due to symlinks.
+
+ @rtype: String
+ @return: the contents entry corresponding to the given path, or False
+ if the file is not owned by this package.
+ """
+
+ filename = _unicode_decode(filename,
+ encoding=_encodings['content'], errors='strict')
+
+ if destroot is not None and destroot != self._eroot:
+ warnings.warn("The second parameter of the " + \
+ "portage.dbapi.vartree.dblink._match_contents()" + \
+ " is now unused. Instead " + \
+ "self.settings['ROOT'] will be used.",
+ DeprecationWarning, stacklevel=2)
+
+ # don't use EROOT here, image already contains EPREFIX
+ destroot = self.settings['ROOT']
+
+ # The given filename argument might have a different encoding than the
+ # the filenames contained in the contents, so use separate wrapped os
+ # modules for each. The basename is more likely to contain non-ascii
+ # characters than the directory path, so use os_filename_arg for all
+ # operations involving the basename of the filename arg.
+ os_filename_arg = _os_merge
+ os = _os_merge
+
+ try:
+ _unicode_encode(filename,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os_filename_arg = portage.os
+
+ destfile = normalize_path(
+ os_filename_arg.path.join(destroot,
+ filename.lstrip(os_filename_arg.path.sep)))
+
+ pkgfiles = self.getcontents()
+
+ preserve_case = None
+ if "case-insensitive-fs" in self.settings.features:
+ destfile = destfile.lower()
+ preserve_case = dict((k.lower(), k) for k in pkgfiles)
+ pkgfiles = dict((k.lower(), v) for k, v in pkgfiles.items())
+
+ if pkgfiles and destfile in pkgfiles:
+ if preserve_case is not None:
+ return preserve_case[destfile]
+ return destfile
+ if pkgfiles:
+ basename = os_filename_arg.path.basename(destfile)
+ if self._contents_basenames is None:
+
+ try:
+ for x in pkgfiles:
+ _unicode_encode(x,
+ encoding=_encodings['merge'],
+ errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ for x in pkgfiles:
+ _unicode_encode(x,
+ encoding=_encodings['fs'],
+ errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ self._contents_basenames = set(
+ os.path.basename(x) for x in pkgfiles)
+ if basename not in self._contents_basenames:
+ # This is a shortcut that, in most cases, allows us to
+ # eliminate this package as an owner without the need
+ # to examine inode numbers of parent directories.
+ return False
+
+ # Use stat rather than lstat since we want to follow
+ # any symlinks to the real parent directory.
+ parent_path = os_filename_arg.path.dirname(destfile)
+ try:
+ parent_stat = os_filename_arg.stat(parent_path)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ return False
+ if self._contents_inodes is None:
+
+ if os is _os_merge:
+ try:
+ for x in pkgfiles:
+ _unicode_encode(x,
+ encoding=_encodings['merge'],
+ errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ for x in pkgfiles:
+ _unicode_encode(x,
+ encoding=_encodings['fs'],
+ errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ self._contents_inodes = {}
+ parent_paths = set()
+ for x in pkgfiles:
+ p_path = os.path.dirname(x)
+ if p_path in parent_paths:
+ continue
+ parent_paths.add(p_path)
+ try:
+ s = os.stat(p_path)
+ except OSError:
+ pass
+ else:
+ inode_key = (s.st_dev, s.st_ino)
+ # Use lists of paths in case multiple
+ # paths reference the same inode.
+ p_path_list = self._contents_inodes.get(inode_key)
+ if p_path_list is None:
+ p_path_list = []
+ self._contents_inodes[inode_key] = p_path_list
+ if p_path not in p_path_list:
+ p_path_list.append(p_path)
+
+ p_path_list = self._contents_inodes.get(
+ (parent_stat.st_dev, parent_stat.st_ino))
+ if p_path_list:
+ for p_path in p_path_list:
+ x = os_filename_arg.path.join(p_path, basename)
+ if x in pkgfiles:
+ if preserve_case is not None:
+ return preserve_case[x]
+ return x
+
+ return False
+
+ def _linkmap_rebuild(self, **kwargs):
+ """
+ Rebuild the self._linkmap if it's not broken due to missing
+ scanelf binary. Also, return early if preserve-libs is disabled
+ and the preserve-libs registry is empty.
+ """
+ if self._linkmap_broken or \
+ self.vartree.dbapi._linkmap is None or \
+ self.vartree.dbapi._plib_registry is None or \
+ ("preserve-libs" not in self.settings.features and \
+ not self.vartree.dbapi._plib_registry.hasEntries()):
+ return
+ try:
+ self.vartree.dbapi._linkmap.rebuild(**kwargs)
+ except CommandNotFound as e:
+ self._linkmap_broken = True
+ self._display_merge(_("!!! Disabling preserve-libs " \
+ "due to error: Command Not Found: %s\n") % (e,),
+ level=logging.ERROR, noiselevel=-1)
+
+ def _find_libs_to_preserve(self, unmerge=False):
+ """
+ Get set of relative paths for libraries to be preserved. When
+ unmerge is False, file paths to preserve are selected from
+ self._installed_instance. Otherwise, paths are selected from
+ self.
+ """
+ if self._linkmap_broken or \
+ self.vartree.dbapi._linkmap is None or \
+ self.vartree.dbapi._plib_registry is None or \
+ (not unmerge and self._installed_instance is None) or \
+ not self._preserve_libs:
+ return set()
+
+ os = _os_merge
+ linkmap = self.vartree.dbapi._linkmap
+ if unmerge:
+ installed_instance = self
+ else:
+ installed_instance = self._installed_instance
+ old_contents = installed_instance.getcontents()
+ root = self.settings['ROOT']
+ root_len = len(root) - 1
+ lib_graph = digraph()
+ path_node_map = {}
+
+ def path_to_node(path):
+ node = path_node_map.get(path)
+ if node is None:
+ node = linkmap._LibGraphNode(linkmap._obj_key(path))
+ alt_path_node = lib_graph.get(node)
+ if alt_path_node is not None:
+ node = alt_path_node
+ node.alt_paths.add(path)
+ path_node_map[path] = node
+ return node
+
+ consumer_map = {}
+ provider_nodes = set()
+ # Create provider nodes and add them to the graph.
+ for f_abs in old_contents:
+
+ if os is _os_merge:
+ try:
+ _unicode_encode(f_abs,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(f_abs,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ f = f_abs[root_len:]
+ if not unmerge and self.isowner(f):
+ # We have an indentically named replacement file,
+ # so we don't try to preserve the old copy.
+ continue
+ try:
+ consumers = linkmap.findConsumers(f,
+ exclude_providers=(installed_instance.isowner,))
+ except KeyError:
+ continue
+ if not consumers:
+ continue
+ provider_node = path_to_node(f)
+ lib_graph.add(provider_node, None)
+ provider_nodes.add(provider_node)
+ consumer_map[provider_node] = consumers
+
+ # Create consumer nodes and add them to the graph.
+ # Note that consumers can also be providers.
+ for provider_node, consumers in consumer_map.items():
+ for c in consumers:
+ consumer_node = path_to_node(c)
+ if installed_instance.isowner(c) and \
+ consumer_node not in provider_nodes:
+ # This is not a provider, so it will be uninstalled.
+ continue
+ lib_graph.add(provider_node, consumer_node)
+
+ # Locate nodes which should be preserved. They consist of all
+ # providers that are reachable from consumers that are not
+ # providers themselves.
+ preserve_nodes = set()
+ for consumer_node in lib_graph.root_nodes():
+ if consumer_node in provider_nodes:
+ continue
+ # Preserve all providers that are reachable from this consumer.
+ node_stack = lib_graph.child_nodes(consumer_node)
+ while node_stack:
+ provider_node = node_stack.pop()
+ if provider_node in preserve_nodes:
+ continue
+ preserve_nodes.add(provider_node)
+ node_stack.extend(lib_graph.child_nodes(provider_node))
+
+ preserve_paths = set()
+ for preserve_node in preserve_nodes:
+ # Preserve the library itself, and also preserve the
+ # soname symlink which is the only symlink that is
+ # strictly required.
+ hardlinks = set()
+ soname_symlinks = set()
+ soname = linkmap.getSoname(next(iter(preserve_node.alt_paths)))
+ for f in preserve_node.alt_paths:
+ f_abs = os.path.join(root, f.lstrip(os.sep))
+ try:
+ if stat.S_ISREG(os.lstat(f_abs).st_mode):
+ hardlinks.add(f)
+ elif os.path.basename(f) == soname:
+ soname_symlinks.add(f)
+ except OSError:
+ pass
+
+ if hardlinks:
+ preserve_paths.update(hardlinks)
+ preserve_paths.update(soname_symlinks)
+
+ return preserve_paths
+
+ def _add_preserve_libs_to_contents(self, preserve_paths):
+ """
+ Preserve libs returned from _find_libs_to_preserve().
+ """
+
+ if not preserve_paths:
+ return
+
+ os = _os_merge
+ showMessage = self._display_merge
+ root = self.settings['ROOT']
+
+ # Copy contents entries from the old package to the new one.
+ new_contents = self.getcontents().copy()
+ old_contents = self._installed_instance.getcontents()
+ for f in sorted(preserve_paths):
+ f = _unicode_decode(f,
+ encoding=_encodings['content'], errors='strict')
+ f_abs = os.path.join(root, f.lstrip(os.sep))
+ contents_entry = old_contents.get(f_abs)
+ if contents_entry is None:
+ # This will probably never happen, but it might if one of the
+ # paths returned from findConsumers() refers to one of the libs
+ # that should be preserved yet the path is not listed in the
+ # contents. Such a path might belong to some other package, so
+ # it shouldn't be preserved here.
+ showMessage(_("!!! File '%s' will not be preserved "
+ "due to missing contents entry\n") % (f_abs,),
+ level=logging.ERROR, noiselevel=-1)
+ preserve_paths.remove(f)
+ continue
+ new_contents[f_abs] = contents_entry
+ obj_type = contents_entry[0]
+ showMessage(_(">>> needed %s %s\n") % (obj_type, f_abs),
+ noiselevel=-1)
+ # Add parent directories to contents if necessary.
+ parent_dir = os.path.dirname(f_abs)
+ while len(parent_dir) > len(root):
+ new_contents[parent_dir] = ["dir"]
+ prev = parent_dir
+ parent_dir = os.path.dirname(parent_dir)
+ if prev == parent_dir:
+ break
+ outfile = atomic_ofstream(os.path.join(self.dbtmpdir, "CONTENTS"))
+ write_contents(new_contents, root, outfile)
+ outfile.close()
+ self._clear_contents_cache()
+
+ def _find_unused_preserved_libs(self, unmerge_no_replacement):
+ """
+ Find preserved libraries that don't have any consumers left.
+ """
+
+ if self._linkmap_broken or \
+ self.vartree.dbapi._linkmap is None or \
+ self.vartree.dbapi._plib_registry is None or \
+ not self.vartree.dbapi._plib_registry.hasEntries():
+ return {}
+
+ # Since preserved libraries can be consumers of other preserved
+ # libraries, use a graph to track consumer relationships.
+ plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
+ linkmap = self.vartree.dbapi._linkmap
+ lib_graph = digraph()
+ preserved_nodes = set()
+ preserved_paths = set()
+ path_cpv_map = {}
+ path_node_map = {}
+ root = self.settings['ROOT']
+
+ def path_to_node(path):
+ node = path_node_map.get(path)
+ if node is None:
+ chost = self.settings.get('CHOST')
+ if chost.find('darwin') >= 0:
+ node = LinkageMapMachO._LibGraphNode(linkmap._obj_key(path))
+ elif chost.find('interix') >= 0 or chost.find('winnt') >= 0:
+ node = LinkageMapPeCoff._LibGraphNode(linkmap._obj_key(path))
+ elif chost.find('aix') >= 0:
+ node = LinkageMapXCoff._LibGraphNode(linkmap._obj_key(path))
+ else:
+ node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
+ alt_path_node = lib_graph.get(node)
+ if alt_path_node is not None:
+ node = alt_path_node
+ node.alt_paths.add(path)
+ path_node_map[path] = node
+ return node
+
+ for cpv, plibs in plib_dict.items():
+ for f in plibs:
+ path_cpv_map[f] = cpv
+ preserved_node = path_to_node(f)
+ if not preserved_node.file_exists():
+ continue
+ lib_graph.add(preserved_node, None)
+ preserved_paths.add(f)
+ preserved_nodes.add(preserved_node)
+ for c in self.vartree.dbapi._linkmap.findConsumers(f):
+ consumer_node = path_to_node(c)
+ if not consumer_node.file_exists():
+ continue
+ # Note that consumers may also be providers.
+ lib_graph.add(preserved_node, consumer_node)
+
+ # Eliminate consumers having providers with the same soname as an
+ # installed library that is not preserved. This eliminates
+ # libraries that are erroneously preserved due to a move from one
+ # directory to another.
+ # Also eliminate consumers that are going to be unmerged if
+ # unmerge_no_replacement is True.
+ provider_cache = {}
+ for preserved_node in preserved_nodes:
+ soname = linkmap.getSoname(preserved_node)
+ for consumer_node in lib_graph.parent_nodes(preserved_node):
+ if consumer_node in preserved_nodes:
+ continue
+ if unmerge_no_replacement:
+ will_be_unmerged = True
+ for path in consumer_node.alt_paths:
+ if not self.isowner(path):
+ will_be_unmerged = False
+ break
+ if will_be_unmerged:
+ # This consumer is not preserved and it is
+ # being unmerged, so drop this edge.
+ lib_graph.remove_edge(preserved_node, consumer_node)
+ continue
+
+ providers = provider_cache.get(consumer_node)
+ if providers is None:
+ providers = linkmap.findProviders(consumer_node)
+ provider_cache[consumer_node] = providers
+ providers = providers.get(soname)
+ if providers is None:
+ continue
+ for provider in providers:
+ if provider in preserved_paths:
+ continue
+ provider_node = path_to_node(provider)
+ if not provider_node.file_exists():
+ continue
+ if provider_node in preserved_nodes:
+ continue
+ # An alternative provider seems to be
+ # installed, so drop this edge.
+ lib_graph.remove_edge(preserved_node, consumer_node)
+ break
+
+ cpv_lib_map = {}
+ while lib_graph:
+ root_nodes = preserved_nodes.intersection(lib_graph.root_nodes())
+ if not root_nodes:
+ break
+ lib_graph.difference_update(root_nodes)
+ unlink_list = set()
+ for node in root_nodes:
+ unlink_list.update(node.alt_paths)
+ unlink_list = sorted(unlink_list)
+ for obj in unlink_list:
+ cpv = path_cpv_map.get(obj)
+ if cpv is None:
+ # This means that a symlink is in the preserved libs
+ # registry, but the actual lib it points to is not.
+ self._display_merge(_("!!! symlink to lib is preserved, "
+ "but not the lib itself:\n!!! '%s'\n") % (obj,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ removed = cpv_lib_map.get(cpv)
+ if removed is None:
+ removed = set()
+ cpv_lib_map[cpv] = removed
+ removed.add(obj)
+
+ return cpv_lib_map
+
+ def _remove_preserved_libs(self, cpv_lib_map):
+ """
+ Remove files returned from _find_unused_preserved_libs().
+ """
+
+ os = _os_merge
+
+ files_to_remove = set()
+ for files in cpv_lib_map.values():
+ files_to_remove.update(files)
+ files_to_remove = sorted(files_to_remove)
+ showMessage = self._display_merge
+ root = self.settings['ROOT']
+
+ parent_dirs = set()
+ for obj in files_to_remove:
+ obj = os.path.join(root, obj.lstrip(os.sep))
+ parent_dirs.add(os.path.dirname(obj))
+ if os.path.islink(obj):
+ obj_type = _("sym")
+ else:
+ obj_type = _("obj")
+ try:
+ os.unlink(obj)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ else:
+ showMessage(_("<<< !needed %s %s\n") % (obj_type, obj),
+ noiselevel=-1)
+
+ # Remove empty parent directories if possible.
+ while parent_dirs:
+ x = parent_dirs.pop()
+ while True:
+ try:
+ os.rmdir(x)
+ except OSError:
+ break
+ prev = x
+ x = os.path.dirname(x)
+ if x == prev:
+ break
+
+ self.vartree.dbapi._plib_registry.pruneNonExisting()
+
+ def _collision_protect(self, srcroot, destroot, mypkglist,
+ file_list, symlink_list):
+
+ os = _os_merge
+
+ collision_ignore = []
+ for x in portage.util.shlex_split(
+ self.settings.get("COLLISION_IGNORE", "")):
+ if os.path.isdir(os.path.join(self._eroot, x.lstrip(os.sep))):
+ x = normalize_path(x)
+ x += "/*"
+ collision_ignore.append(x)
+
+ # For collisions with preserved libraries, the current package
+ # will assume ownership and the libraries will be unregistered.
+ if self.vartree.dbapi._plib_registry is None:
+ # preserve-libs is entirely disabled
+ plib_cpv_map = None
+ plib_paths = None
+ plib_inodes = {}
+ else:
+ plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
+ plib_cpv_map = {}
+ plib_paths = set()
+ for cpv, paths in plib_dict.items():
+ plib_paths.update(paths)
+ for f in paths:
+ plib_cpv_map[f] = cpv
+ plib_inodes = self._lstat_inode_map(plib_paths)
+
+ plib_collisions = {}
+
+ showMessage = self._display_merge
+ stopmerge = False
+ collisions = []
+ symlink_collisions = []
+ destroot = self.settings['ROOT']
+ showMessage(_(" %s checking %d files for package collisions\n") % \
+ (colorize("GOOD", "*"), len(file_list) + len(symlink_list)))
+ for i, (f, f_type) in enumerate(chain(
+ ((f, "reg") for f in file_list),
+ ((f, "sym") for f in symlink_list))):
+ if i % 1000 == 0 and i != 0:
+ showMessage(_("%d files checked ...\n") % i)
+
+ dest_path = normalize_path(
+ os.path.join(destroot, f.lstrip(os.path.sep)))
+ try:
+ dest_lstat = os.lstat(dest_path)
+ except EnvironmentError as e:
+ if e.errno == errno.ENOENT:
+ del e
+ continue
+ elif e.errno == errno.ENOTDIR:
+ del e
+ # A non-directory is in a location where this package
+ # expects to have a directory.
+ dest_lstat = None
+ parent_path = dest_path
+ while len(parent_path) > len(destroot):
+ parent_path = os.path.dirname(parent_path)
+ try:
+ dest_lstat = os.lstat(parent_path)
+ break
+ except EnvironmentError as e:
+ if e.errno != errno.ENOTDIR:
+ raise
+ del e
+ if not dest_lstat:
+ raise AssertionError(
+ "unable to find non-directory " + \
+ "parent for '%s'" % dest_path)
+ dest_path = parent_path
+ f = os.path.sep + dest_path[len(destroot):]
+ if f in collisions:
+ continue
+ else:
+ raise
+ if f[0] != "/":
+ f="/"+f
+
+ if stat.S_ISDIR(dest_lstat.st_mode):
+ if f_type == "sym":
+ # This case is explicitly banned
+ # by PMS (see bug #326685).
+ symlink_collisions.append(f)
+ collisions.append(f)
+ continue
+
+ plibs = plib_inodes.get((dest_lstat.st_dev, dest_lstat.st_ino))
+ if plibs:
+ for path in plibs:
+ cpv = plib_cpv_map[path]
+ paths = plib_collisions.get(cpv)
+ if paths is None:
+ paths = set()
+ plib_collisions[cpv] = paths
+ paths.add(path)
+ # The current package will assume ownership and the
+ # libraries will be unregistered, so exclude this
+ # path from the normal collisions.
+ continue
+
+ isowned = False
+ full_path = os.path.join(destroot, f.lstrip(os.path.sep))
+ for ver in mypkglist:
+ if ver.isowner(f):
+ isowned = True
+ break
+ if not isowned and self.isprotected(full_path):
+ isowned = True
+ if not isowned:
+ f_match = full_path[len(self._eroot)-1:]
+ stopmerge = True
+ for pattern in collision_ignore:
+ if fnmatch.fnmatch(f_match, pattern):
+ stopmerge = False
+ break
+ if stopmerge:
+ collisions.append(f)
+ return collisions, symlink_collisions, plib_collisions
+
+ def _lstat_inode_map(self, path_iter):
+ """
+ Use lstat to create a map of the form:
+ {(st_dev, st_ino) : set([path1, path2, ...])}
+ Multiple paths may reference the same inode due to hardlinks.
+ All lstat() calls are relative to self.myroot.
+ """
+
+ os = _os_merge
+
+ root = self.settings['ROOT']
+ inode_map = {}
+ for f in path_iter:
+ path = os.path.join(root, f.lstrip(os.sep))
+ try:
+ st = os.lstat(path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR):
+ raise
+ del e
+ continue
+ key = (st.st_dev, st.st_ino)
+ paths = inode_map.get(key)
+ if paths is None:
+ paths = set()
+ inode_map[key] = paths
+ paths.add(f)
+ return inode_map
+
+ def _security_check(self, installed_instances):
+ if not installed_instances:
+ return 0
+
+ os = _os_merge
+
+ showMessage = self._display_merge
+
+ file_paths = set()
+ for dblnk in installed_instances:
+ file_paths.update(dblnk.getcontents())
+ inode_map = {}
+ real_paths = set()
+ for i, path in enumerate(file_paths):
+
+ if os is _os_merge:
+ try:
+ _unicode_encode(path,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(path,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ try:
+ s = os.lstat(path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR):
+ raise
+ del e
+ continue
+ if not stat.S_ISREG(s.st_mode):
+ continue
+ path = os.path.realpath(path)
+ if path in real_paths:
+ continue
+ real_paths.add(path)
+ if s.st_nlink > 1 and \
+ s.st_mode & (stat.S_ISUID | stat.S_ISGID):
+ k = (s.st_dev, s.st_ino)
+ inode_map.setdefault(k, []).append((path, s))
+ suspicious_hardlinks = []
+ for path_list in inode_map.values():
+ path, s = path_list[0]
+ if len(path_list) == s.st_nlink:
+ # All hardlinks seem to be owned by this package.
+ continue
+ suspicious_hardlinks.append(path_list)
+ if not suspicious_hardlinks:
+ return 0
+
+ msg = []
+ msg.append(_("suid/sgid file(s) "
+ "with suspicious hardlink(s):"))
+ msg.append("")
+ for path_list in suspicious_hardlinks:
+ for path, s in path_list:
+ msg.append("\t%s" % path)
+ msg.append("")
+ msg.append(_("See the Gentoo Security Handbook "
+ "guide for advice on how to proceed."))
+
+ self._eerror("preinst", msg)
+
+ return 1
+
+ def _eqawarn(self, phase, lines):
+ self._elog("eqawarn", phase, lines)
+
+ def _eerror(self, phase, lines):
+ self._elog("eerror", phase, lines)
+
+ def _elog(self, funcname, phase, lines):
+ func = getattr(portage.elog.messages, funcname)
+ if self._scheduler is None:
+ for l in lines:
+ func(l, phase=phase, key=self.mycpv)
+ else:
+ background = self.settings.get("PORTAGE_BACKGROUND") == "1"
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ out = io.StringIO()
+ for line in lines:
+ func(line, phase=phase, key=self.mycpv, out=out)
+ msg = out.getvalue()
+ self._scheduler.output(msg,
+ background=background, log_path=log_path)
+
+ def _elog_process(self, phasefilter=None):
+ cpv = self.mycpv
+ if self._pipe is None:
+ elog_process(cpv, self.settings, phasefilter=phasefilter)
+ else:
+ logdir = os.path.join(self.settings["T"], "logging")
+ ebuild_logentries = collect_ebuild_messages(logdir)
+ # phasefilter is irrelevant for the above collect_ebuild_messages
+ # call, since this package instance has a private logdir. However,
+ # it may be relevant for the following collect_messages call.
+ py_logentries = collect_messages(key=cpv, phasefilter=phasefilter).get(cpv, {})
+ logentries = _merge_logentries(py_logentries, ebuild_logentries)
+ funcnames = {
+ "INFO": "einfo",
+ "LOG": "elog",
+ "WARN": "ewarn",
+ "QA": "eqawarn",
+ "ERROR": "eerror"
+ }
+ str_buffer = []
+ for phase, messages in logentries.items():
+ for key, lines in messages:
+ funcname = funcnames[key]
+ if isinstance(lines, basestring):
+ lines = [lines]
+ for line in lines:
+ for line in line.split('\n'):
+ fields = (funcname, phase, cpv, line)
+ str_buffer.append(' '.join(fields))
+ str_buffer.append('\n')
+ if str_buffer:
+ str_buffer = _unicode_encode(''.join(str_buffer))
+ while str_buffer:
+ str_buffer = str_buffer[os.write(self._pipe, str_buffer):]
+
+ def _emerge_log(self, msg):
+ emergelog(False, msg)
+
+ def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
+ mydbapi=None, prev_mtimes=None, counter=None):
+ """
+
+ This function does the following:
+
+ calls get_ro_checker to retrieve a function for checking whether Portage
+ will write to a read-only filesystem, then runs it against the directory list
+ calls self._preserve_libs if FEATURES=preserve-libs
+ calls self._collision_protect if FEATURES=collision-protect
+ calls doebuild(mydo=pkg_preinst)
+ Merges the package to the livefs
+ unmerges old version (if required)
+ calls doebuild(mydo=pkg_postinst)
+ calls env_update
+
+ @param srcroot: Typically this is ${D}
+ @type srcroot: String (Path)
+ @param destroot: ignored, self.settings['ROOT'] is used instead
+ @type destroot: String (Path)
+ @param inforoot: root of the vardb entry ?
+ @type inforoot: String (Path)
+ @param myebuild: path to the ebuild that we are processing
+ @type myebuild: String (Path)
+ @param mydbapi: dbapi which is handed to doebuild.
+ @type mydbapi: portdbapi instance
+ @param prev_mtimes: { Filename:mtime } mapping for env_update
+ @type prev_mtimes: Dictionary
+ @rtype: Boolean
+ @return:
+ 1. 0 on success
+ 2. 1 on failure
+
+ secondhand is a list of symlinks that have been skipped due to their target
+ not existing; we will merge these symlinks at a later time.
+ """
+
+ os = _os_merge
+
+ srcroot = _unicode_decode(srcroot,
+ encoding=_encodings['content'], errors='strict')
+ destroot = self.settings['ROOT']
+ inforoot = _unicode_decode(inforoot,
+ encoding=_encodings['content'], errors='strict')
+ myebuild = _unicode_decode(myebuild,
+ encoding=_encodings['content'], errors='strict')
+
+ showMessage = self._display_merge
+ srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
+
+ if not os.path.isdir(srcroot):
+ showMessage(_("!!! Directory Not Found: D='%s'\n") % srcroot,
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ is_binpkg = self.settings.get("EMERGE_FROM") == "binary"
+ slot = ''
+ for var_name in ('CHOST', 'SLOT'):
+ if var_name == 'CHOST' and self.cat == 'virtual':
+ try:
+ os.unlink(os.path.join(inforoot, var_name))
+ except OSError:
+ pass
+ continue
+
+ try:
+ with io.open(_unicode_encode(
+ os.path.join(inforoot, var_name),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ val = f.readline().strip()
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ val = ''
+
+ if var_name == 'SLOT':
+ slot = val
+
+ if not slot.strip():
+ slot = self.settings.get(var_name, '')
+ if not slot.strip():
+ showMessage(_("!!! SLOT is undefined\n"),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ write_atomic(os.path.join(inforoot, var_name), slot + '\n')
+
+ # This check only applies when built from source, since
+ # inforoot values are written just after src_install.
+ if not is_binpkg and val != self.settings.get(var_name, ''):
+ self._eqawarn('preinst',
+ [_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \
+ {"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}])
+
+ def eerror(lines):
+ self._eerror("preinst", lines)
+
+ if not os.path.exists(self.dbcatdir):
+ ensure_dirs(self.dbcatdir)
+
+ # NOTE: We use SLOT obtained from the inforoot
+ # directory, in order to support USE=multislot.
+ # Use _pkg_str discard the sub-slot part if necessary.
+ slot = _pkg_str(self.mycpv, slot=slot).slot
+ cp = self.mysplit[0]
+ slot_atom = "%s:%s" % (cp, slot)
+
+ self.lockdb()
+ try:
+ # filter any old-style virtual matches
+ slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom)
+ if cpv_getkey(cpv) == cp]
+
+ if self.mycpv not in slot_matches and \
+ self.vartree.dbapi.cpv_exists(self.mycpv):
+ # handle multislot or unapplied slotmove
+ slot_matches.append(self.mycpv)
+
+ others_in_slot = []
+ for cur_cpv in slot_matches:
+ # Clone the config in case one of these has to be unmerged,
+ # since we need it to have private ${T} etc... for things
+ # like elog.
+ settings_clone = portage.config(clone=self.settings)
+ settings_clone.pop("PORTAGE_BUILDDIR_LOCKED", None)
+ settings_clone.setcpv(cur_cpv, mydb=self.vartree.dbapi)
+ if self._preserve_libs and "preserve-libs" in \
+ settings_clone["PORTAGE_RESTRICT"].split():
+ self._preserve_libs = False
+ others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+ settings=settings_clone,
+ vartree=self.vartree, treetype="vartree",
+ scheduler=self._scheduler, pipe=self._pipe))
+ finally:
+ self.unlockdb()
+
+ # If any instance has RESTRICT=preserve-libs, then
+ # restrict it for all instances.
+ if not self._preserve_libs:
+ for dblnk in others_in_slot:
+ dblnk._preserve_libs = False
+
+ retval = self._security_check(others_in_slot)
+ if retval:
+ return retval
+
+ if slot_matches:
+ # Used by self.isprotected().
+ max_dblnk = None
+ max_counter = -1
+ for dblnk in others_in_slot:
+ cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
+ if cur_counter > max_counter:
+ max_counter = cur_counter
+ max_dblnk = dblnk
+ self._installed_instance = max_dblnk
+
+ if self.settings.get("INSTALL_MASK") or \
+ "nodoc" in self.settings.features or \
+ "noinfo" in self.settings.features or \
+ "noman" in self.settings.features:
+ # Apply INSTALL_MASK before collision-protect, since it may
+ # be useful to avoid collisions in some scenarios.
+ phase = MiscFunctionsProcess(background=False,
+ commands=["preinst_mask"], phase="preinst",
+ scheduler=self._scheduler, settings=self.settings)
+ phase.start()
+ phase.wait()
+
+ # We check for unicode encoding issues after src_install. However,
+ # the check must be repeated here for binary packages (it's
+ # inexpensive since we call os.walk() here anyway).
+ unicode_errors = []
+ line_ending_re = re.compile('[\n\r]')
+ srcroot_len = len(srcroot)
+ ed_len = len(self.settings["ED"])
+
+ while True:
+
+ unicode_error = False
+ eagain_error = False
+
+ filelist = []
+ dirlist = []
+ linklist = []
+ paths_with_newlines = []
+ def onerror(e):
+ raise
+ walk_iter = os.walk(srcroot, onerror=onerror)
+ while True:
+ try:
+ parent, dirs, files = next(walk_iter)
+ except StopIteration:
+ break
+ except OSError as e:
+ if e.errno != errno.EAGAIN:
+ raise
+ # Observed with PyPy 1.8.
+ eagain_error = True
+ break
+
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeDecodeError:
+ new_parent = _unicode_decode(parent,
+ encoding=_encodings['merge'], errors='replace')
+ new_parent = _unicode_encode(new_parent,
+ encoding='ascii', errors='backslashreplace')
+ new_parent = _unicode_decode(new_parent,
+ encoding=_encodings['merge'], errors='replace')
+ os.rename(parent, new_parent)
+ unicode_error = True
+ unicode_errors.append(new_parent[ed_len:])
+ break
+
+ relative_path = parent[srcroot_len:]
+ dirlist.append(os.path.join(destroot, relative_path))
+
+ for fname in files:
+ try:
+ fname = _unicode_decode(fname,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeDecodeError:
+ fpath = portage._os.path.join(
+ parent.encode(_encodings['merge']), fname)
+ new_fname = _unicode_decode(fname,
+ encoding=_encodings['merge'], errors='replace')
+ new_fname = _unicode_encode(new_fname,
+ encoding='ascii', errors='backslashreplace')
+ new_fname = _unicode_decode(new_fname,
+ encoding=_encodings['merge'], errors='replace')
+ new_fpath = os.path.join(parent, new_fname)
+ os.rename(fpath, new_fpath)
+ unicode_error = True
+ unicode_errors.append(new_fpath[ed_len:])
+ fname = new_fname
+ fpath = new_fpath
+ else:
+ fpath = os.path.join(parent, fname)
+
+ relative_path = fpath[srcroot_len:]
+
+ if line_ending_re.search(relative_path) is not None:
+ paths_with_newlines.append(relative_path)
+
+ file_mode = os.lstat(fpath).st_mode
+ if stat.S_ISREG(file_mode):
+ filelist.append(relative_path)
+ elif stat.S_ISLNK(file_mode):
+ # Note: os.walk puts symlinks to directories in the "dirs"
+ # list and it does not traverse them since that could lead
+ # to an infinite recursion loop.
+ linklist.append(relative_path)
+
+ myto = _unicode_decode(
+ _os.readlink(_unicode_encode(fpath,
+ encoding=_encodings['merge'], errors='strict')),
+ encoding=_encodings['merge'], errors='replace')
+ if line_ending_re.search(myto) is not None:
+ paths_with_newlines.append(relative_path)
+
+ if unicode_error:
+ break
+
+ if not (unicode_error or eagain_error):
+ break
+
+ if unicode_errors:
+ self._elog("eqawarn", "preinst",
+ _merge_unicode_error(unicode_errors))
+
+ if paths_with_newlines:
+ msg = []
+ msg.append(_("This package installs one or more files containing line ending characters:"))
+ msg.append("")
+ paths_with_newlines.sort()
+ for f in paths_with_newlines:
+ msg.append("\t/%s" % (f.replace("\n", "\\n").replace("\r", "\\r")))
+ msg.append("")
+ msg.append(_("package %s NOT merged") % self.mycpv)
+ msg.append("")
+ eerror(msg)
+ return 1
+
+ # If there are no files to merge, and an installed package in the same
+ # slot has files, it probably means that something went wrong.
+ if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
+ not filelist and not linklist and others_in_slot:
+ installed_files = None
+ for other_dblink in others_in_slot:
+ installed_files = other_dblink.getcontents()
+ if not installed_files:
+ continue
+ from textwrap import wrap
+ wrap_width = 72
+ msg = []
+ d = {
+ "new_cpv":self.mycpv,
+ "old_cpv":other_dblink.mycpv
+ }
+ msg.extend(wrap(_("The '%(new_cpv)s' package will not install "
+ "any files, but the currently installed '%(old_cpv)s'"
+ " package has the following files: ") % d, wrap_width))
+ msg.append("")
+ msg.extend(sorted(installed_files))
+ msg.append("")
+ msg.append(_("package %s NOT merged") % self.mycpv)
+ msg.append("")
+ msg.extend(wrap(
+ _("Manually run `emerge --unmerge =%s` if you "
+ "really want to remove the above files. Set "
+ "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in "
+ "/etc/portage/make.conf if you do not want to "
+ "abort in cases like this.") % other_dblink.mycpv,
+ wrap_width))
+ eerror(msg)
+ if installed_files:
+ return 1
+
+ # Make sure the ebuild environment is initialized and that ${T}/elog
+ # exists for logging of collision-protect eerror messages.
+ if myebuild is None:
+ myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
+ doebuild_environment(myebuild, "preinst",
+ settings=self.settings, db=mydbapi)
+ self.settings["REPLACING_VERSIONS"] = " ".join(
+ [portage.versions.cpv_getversion(other.mycpv)
+ for other in others_in_slot])
+ prepare_build_dirs(settings=self.settings, cleanup=cleanup)
+
+ # Check for read-only filesystems.
+ ro_checker = get_ro_checker()
+ rofilesystems = ro_checker(dirlist)
+
+ if rofilesystems:
+ msg = _("One or more files installed to this package are "
+ "set to be installed to read-only filesystems. "
+ "Please mount the following filesystems as read-write "
+ "and retry.")
+ msg = textwrap.wrap(msg, 70)
+ msg.append("")
+ for f in rofilesystems:
+ msg.append("\t%s" % f)
+ msg.append("")
+ self._elog("eerror", "preinst", msg)
+
+ msg = _("Package '%s' NOT merged due to read-only file systems.") % \
+ self.settings.mycpv
+ msg += _(" If necessary, refer to your elog "
+ "messages for the whole content of the above message.")
+ msg = textwrap.wrap(msg, 70)
+ eerror(msg)
+ return 1
+
+ # check for package collisions
+ blockers = self._blockers
+ if blockers is None:
+ blockers = []
+ collisions, symlink_collisions, plib_collisions = \
+ self._collision_protect(srcroot, destroot,
+ others_in_slot + blockers, filelist, linklist)
+
+ if symlink_collisions:
+ # Symlink collisions need to be distinguished from other types
+ # of collisions, in order to avoid confusion (see bug #409359).
+ msg = _("Package '%s' has one or more collisions "
+ "between symlinks and directories, which is explicitly "
+ "forbidden by PMS section 13.4 (see bug #326685):") % \
+ (self.settings.mycpv,)
+ msg = textwrap.wrap(msg, 70)
+ msg.append("")
+ for f in symlink_collisions:
+ msg.append("\t%s" % os.path.join(destroot,
+ f.lstrip(os.path.sep)))
+ msg.append("")
+ self._elog("eerror", "preinst", msg)
+
+ if collisions:
+ collision_protect = "collision-protect" in self.settings.features
+ protect_owned = "protect-owned" in self.settings.features
+ msg = _("This package will overwrite one or more files that"
+ " may belong to other packages (see list below).")
+ if not (collision_protect or protect_owned):
+ msg += _(" Add either \"collision-protect\" or"
+ " \"protect-owned\" to FEATURES in"
+ " make.conf if you would like the merge to abort"
+ " in cases like this. See the make.conf man page for"
+ " more information about these features.")
+ if self.settings.get("PORTAGE_QUIET") != "1":
+ msg += _(" You can use a command such as"
+ " `portageq owners / <filename>` to identify the"
+ " installed package that owns a file. If portageq"
+ " reports that only one package owns a file then do NOT"
+ " file a bug report. A bug report is only useful if it"
+ " identifies at least two or more packages that are known"
+ " to install the same file(s)."
+ " If a collision occurs and you"
+ " can not explain where the file came from then you"
+ " should simply ignore the collision since there is not"
+ " enough information to determine if a real problem"
+ " exists. Please do NOT file a bug report at"
+ " http://bugs.gentoo.org unless you report exactly which"
+ " two packages install the same file(s). See"
+ " http://wiki.gentoo.org/wiki/Knowledge_Base:Blockers"
+ " for tips on how to solve the problem. And once again,"
+ " please do NOT file a bug report unless you have"
+ " completely understood the above message.")
+
+ self.settings["EBUILD_PHASE"] = "preinst"
+ from textwrap import wrap
+ msg = wrap(msg, 70)
+ if collision_protect:
+ msg.append("")
+ msg.append(_("package %s NOT merged") % self.settings.mycpv)
+ msg.append("")
+ msg.append(_("Detected file collision(s):"))
+ msg.append("")
+
+ for f in collisions:
+ msg.append("\t%s" % \
+ os.path.join(destroot, f.lstrip(os.path.sep)))
+
+ eerror(msg)
+
+ owners = None
+ if collision_protect or protect_owned or symlink_collisions:
+ msg = []
+ msg.append("")
+ msg.append(_("Searching all installed"
+ " packages for file collisions..."))
+ msg.append("")
+ msg.append(_("Press Ctrl-C to Stop"))
+ msg.append("")
+ eerror(msg)
+
+ if len(collisions) > 20:
+ # get_owners is slow for large numbers of files, so
+ # don't look them all up.
+ collisions = collisions[:20]
+
+ pkg_info_strs = {}
+ self.lockdb()
+ try:
+ owners = self.vartree.dbapi._owners.get_owners(collisions)
+ self.vartree.dbapi.flush_cache()
+
+ for pkg in owners:
+ pkg = self.vartree.dbapi._pkg_str(pkg.mycpv, None)
+ pkg_info_str = "%s%s%s" % (pkg,
+ _slot_separator, pkg.slot)
+ if pkg.repo != _unknown_repo:
+ pkg_info_str += "%s%s" % (_repo_separator,
+ pkg.repo)
+ pkg_info_strs[pkg] = pkg_info_str
+
+ finally:
+ self.unlockdb()
+
+ for pkg, owned_files in owners.items():
+ msg = []
+ msg.append(pkg_info_strs[pkg.mycpv])
+ for f in sorted(owned_files):
+ msg.append("\t%s" % os.path.join(destroot,
+ f.lstrip(os.path.sep)))
+ msg.append("")
+ eerror(msg)
+
+ if not owners:
+ eerror([_("None of the installed"
+ " packages claim the file(s)."), ""])
+
+ symlink_abort_msg =_("Package '%s' NOT merged since it has "
+ "one or more collisions between symlinks and directories, "
+ "which is explicitly forbidden by PMS section 13.4 "
+ "(see bug #326685).")
+
+ # The explanation about the collision and how to solve
+ # it may not be visible via a scrollback buffer, especially
+ # if the number of file collisions is large. Therefore,
+ # show a summary at the end.
+ abort = False
+ if symlink_collisions:
+ abort = True
+ msg = symlink_abort_msg % (self.settings.mycpv,)
+ elif collision_protect:
+ abort = True
+ msg = _("Package '%s' NOT merged due to file collisions.") % \
+ self.settings.mycpv
+ elif protect_owned and owners:
+ abort = True
+ msg = _("Package '%s' NOT merged due to file collisions.") % \
+ self.settings.mycpv
+ else:
+ msg = _("Package '%s' merged despite file collisions.") % \
+ self.settings.mycpv
+ msg += _(" If necessary, refer to your elog "
+ "messages for the whole content of the above message.")
+ eerror(wrap(msg, 70))
+
+ if abort:
+ return 1
+
+ # The merge process may move files out of the image directory,
+ # which causes invalidation of the .installed flag.
+ try:
+ os.unlink(os.path.join(
+ os.path.dirname(normalize_path(srcroot)), ".installed"))
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ self.dbdir = self.dbtmpdir
+ self.delete()
+ ensure_dirs(self.dbtmpdir)
+
+ downgrade = False
+ if self._installed_instance is not None and \
+ vercmp(self.mycpv.version,
+ self._installed_instance.mycpv.version) < 0:
+ downgrade = True
+
+ if self._installed_instance is not None:
+ rval = self._pre_merge_backup(self._installed_instance, downgrade)
+ if rval != os.EX_OK:
+ showMessage(_("!!! FAILED preinst: ") +
+ "quickpkg: %s\n" % rval,
+ level=logging.ERROR, noiselevel=-1)
+ return rval
+
+ # run preinst script
+ showMessage(_(">>> Merging %(cpv)s to %(destroot)s\n") % \
+ {"cpv":self.mycpv, "destroot":destroot})
+ phase = EbuildPhase(background=False, phase="preinst",
+ scheduler=self._scheduler, settings=self.settings)
+ phase.start()
+ a = phase.wait()
+
+ # XXX: Decide how to handle failures here.
+ if a != os.EX_OK:
+ showMessage(_("!!! FAILED preinst: ")+str(a)+"\n",
+ level=logging.ERROR, noiselevel=-1)
+ return a
+
+ # copy "info" files (like SLOT, CFLAGS, etc.) into the database
+ for x in os.listdir(inforoot):
+ self.copyfile(inforoot+"/"+x)
+
+ # write local package counter for recording
+ if counter is None:
+ counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv)
+ with io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace') as f:
+ f.write("%s" % counter)
+
+ self.updateprotect()
+
+ #if we have a file containing previously-merged config file md5sums, grab it.
+ self.vartree.dbapi._fs_lock()
+ try:
+ # Always behave like --noconfmem is enabled for downgrades
+ # so that people who don't know about this option are less
+ # likely to get confused when doing upgrade/downgrade cycles.
+ cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
+ if "NOCONFMEM" in self.settings or downgrade:
+ cfgfiledict["IGNORE"]=1
+ else:
+ cfgfiledict["IGNORE"]=0
+
+ rval = self._merge_contents(srcroot, destroot, cfgfiledict)
+ if rval != os.EX_OK:
+ return rval
+ finally:
+ self.vartree.dbapi._fs_unlock()
+
+ # These caches are populated during collision-protect and the data
+ # they contain is now invalid. It's very important to invalidate
+ # the contents_inodes cache so that FEATURES=unmerge-orphans
+ # doesn't unmerge anything that belongs to this package that has
+ # just been merged.
+ for dblnk in others_in_slot:
+ dblnk._clear_contents_cache()
+ self._clear_contents_cache()
+
+ linkmap = self.vartree.dbapi._linkmap
+ plib_registry = self.vartree.dbapi._plib_registry
+ # We initialize preserve_paths to an empty set rather
+ # than None here because it plays an important role
+ # in prune_plib_registry logic by serving to indicate
+ # that we have a replacement for a package that's
+ # being unmerged.
+
+ preserve_paths = set()
+ needed = None
+ if not (self._linkmap_broken or linkmap is None or
+ plib_registry is None):
+ self.vartree.dbapi._fs_lock()
+ plib_registry.lock()
+ try:
+ plib_registry.load()
+ needed = os.path.join(inforoot, linkmap._needed_aux_key)
+ self._linkmap_rebuild(include_file=needed)
+
+ # Preserve old libs if they are still in use
+ # TODO: Handle cases where the previous instance
+ # has already been uninstalled but it still has some
+ # preserved libraries in the registry that we may
+ # want to preserve here.
+ preserve_paths = self._find_libs_to_preserve()
+ finally:
+ plib_registry.unlock()
+ self.vartree.dbapi._fs_unlock()
+
+ if preserve_paths:
+ self._add_preserve_libs_to_contents(preserve_paths)
+
+ # If portage is reinstalling itself, remove the old
+ # version now since we want to use the temporary
+ # PORTAGE_BIN_PATH that will be removed when we return.
+ reinstall_self = False
+ if self.myroot == "/" and \
+ match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]):
+ reinstall_self = True
+
+ emerge_log = self._emerge_log
+
+ # If we have any preserved libraries then autoclean
+ # is forced so that preserve-libs logic doesn't have
+ # to account for the additional complexity of the
+ # AUTOCLEAN=no mode.
+ autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes" \
+ or preserve_paths
+
+ if autoclean:
+ emerge_log(_(" >>> AUTOCLEAN: %s") % (slot_atom,))
+
+ others_in_slot.append(self) # self has just been merged
+ for dblnk in list(others_in_slot):
+ if dblnk is self:
+ continue
+ if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
+ continue
+ showMessage(_(">>> Safely unmerging already-installed instance...\n"))
+ emerge_log(_(" === Unmerging... (%s)") % (dblnk.mycpv,))
+ others_in_slot.remove(dblnk) # dblnk will unmerge itself now
+ dblnk._linkmap_broken = self._linkmap_broken
+ dblnk.settings["REPLACED_BY_VERSION"] = portage.versions.cpv_getversion(self.mycpv)
+ dblnk.settings.backup_changes("REPLACED_BY_VERSION")
+ unmerge_rval = dblnk.unmerge(ldpath_mtimes=prev_mtimes,
+ others_in_slot=others_in_slot, needed=needed,
+ preserve_paths=preserve_paths)
+ dblnk.settings.pop("REPLACED_BY_VERSION", None)
+
+ if unmerge_rval == os.EX_OK:
+ emerge_log(_(" >>> unmerge success: %s") % (dblnk.mycpv,))
+ else:
+ emerge_log(_(" !!! unmerge FAILURE: %s") % (dblnk.mycpv,))
+
+ self.lockdb()
+ try:
+ # TODO: Check status and abort if necessary.
+ dblnk.delete()
+ finally:
+ self.unlockdb()
+ showMessage(_(">>> Original instance of package unmerged safely.\n"))
+
+ if len(others_in_slot) > 1:
+ showMessage(colorize("WARN", _("WARNING:"))
+ + _(" AUTOCLEAN is disabled. This can cause serious"
+ " problems due to overlapping packages.\n"),
+ level=logging.WARN, noiselevel=-1)
+
+ # We hold both directory locks.
+ self.dbdir = self.dbpkgdir
+ self.lockdb()
+ try:
+ self.delete()
+ _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
+ self._merged_path(self.dbpkgdir, os.lstat(self.dbpkgdir))
+ finally:
+ self.unlockdb()
+
+ # Check for file collisions with blocking packages
+ # and remove any colliding files from their CONTENTS
+ # since they now belong to this package.
+ self._clear_contents_cache()
+ contents = self.getcontents()
+ destroot_len = len(destroot) - 1
+ self.lockdb()
+ try:
+ for blocker in blockers:
+ self.vartree.dbapi.removeFromContents(blocker, iter(contents),
+ relative_paths=False)
+ finally:
+ self.unlockdb()
+
+ plib_registry = self.vartree.dbapi._plib_registry
+ if plib_registry:
+ self.vartree.dbapi._fs_lock()
+ plib_registry.lock()
+ try:
+ plib_registry.load()
+
+ if preserve_paths:
+ # keep track of the libs we preserved
+ plib_registry.register(self.mycpv, slot, counter,
+ sorted(preserve_paths))
+
+ # Unregister any preserved libs that this package has overwritten
+ # and update the contents of the packages that owned them.
+ plib_dict = plib_registry.getPreservedLibs()
+ for cpv, paths in plib_collisions.items():
+ if cpv not in plib_dict:
+ continue
+ has_vdb_entry = False
+ if cpv != self.mycpv:
+ # If we've replaced another instance with the
+ # same cpv then the vdb entry no longer belongs
+ # to it, so we'll have to get the slot and counter
+ # from plib_registry._data instead.
+ self.vartree.dbapi.lock()
+ try:
+ try:
+ slot = self.vartree.dbapi._pkg_str(cpv, None).slot
+ counter = self.vartree.dbapi.cpv_counter(cpv)
+ except (KeyError, InvalidData):
+ pass
+ else:
+ has_vdb_entry = True
+ self.vartree.dbapi.removeFromContents(
+ cpv, paths)
+ finally:
+ self.vartree.dbapi.unlock()
+
+ if not has_vdb_entry:
+ # It's possible for previously unmerged packages
+ # to have preserved libs in the registry, so try
+ # to retrieve the slot and counter from there.
+ has_registry_entry = False
+ for plib_cps, (plib_cpv, plib_counter, plib_paths) in \
+ plib_registry._data.items():
+ if plib_cpv != cpv:
+ continue
+ try:
+ cp, slot = plib_cps.split(":", 1)
+ except ValueError:
+ continue
+ counter = plib_counter
+ has_registry_entry = True
+ break
+
+ if not has_registry_entry:
+ continue
+
+ remaining = [f for f in plib_dict[cpv] if f not in paths]
+ plib_registry.register(cpv, slot, counter, remaining)
+
+ plib_registry.store()
+ finally:
+ plib_registry.unlock()
+ self.vartree.dbapi._fs_unlock()
+
+ self.vartree.dbapi._add(self)
+ contents = self.getcontents()
+
+ #do postinst script
+ self.settings["PORTAGE_UPDATE_ENV"] = \
+ os.path.join(self.dbpkgdir, "environment.bz2")
+ self.settings.backup_changes("PORTAGE_UPDATE_ENV")
+ try:
+ phase = EbuildPhase(background=False, phase="postinst",
+ scheduler=self._scheduler, settings=self.settings)
+ phase.start()
+ a = phase.wait()
+ if a == os.EX_OK:
+ showMessage(_(">>> %s merged.\n") % self.mycpv)
+ finally:
+ self.settings.pop("PORTAGE_UPDATE_ENV", None)
+
+ if a != os.EX_OK:
+ # It's stupid to bail out here, so keep going regardless of
+ # phase return code.
+ showMessage(_("!!! FAILED postinst: ")+str(a)+"\n",
+ level=logging.ERROR, noiselevel=-1)
+
+ #update environment settings, library paths. DO NOT change symlinks.
+ env_update(
+ target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes,
+ contents=contents, env=self.settings,
+ writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi)
+
+ # For gcc upgrades, preserved libs have to be removed after the
+ # the library path has been updated.
+ self._prune_plib_registry()
+ self._post_merge_sync()
+
+ return os.EX_OK
+
+ def _new_backup_path(self, p):
+ """
+ The works for any type path, such as a regular file, symlink,
+ or directory. The parent directory is assumed to exist.
+ The returned filename is of the form p + '.backup.' + x, where
+ x guarantees that the returned path does not exist yet.
+ """
+ os = _os_merge
+
+ x = -1
+ while True:
+ x += 1
+ backup_p = '%s.backup.%04d' % (p, x)
+ try:
+ os.lstat(backup_p)
+ except OSError:
+ break
+
+ return backup_p
+
+ def _merge_contents(self, srcroot, destroot, cfgfiledict):
+
+ cfgfiledict_orig = cfgfiledict.copy()
+
+ # open CONTENTS file (possibly overwriting old one) for recording
+ # Use atomic_ofstream for automatic coercion of raw bytes to
+ # unicode, in order to prevent TypeError when writing raw bytes
+ # to TextIOWrapper with python2.
+ outfile = atomic_ofstream(_unicode_encode(
+ os.path.join(self.dbtmpdir, 'CONTENTS'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+
+ # Don't bump mtimes on merge since some application require
+ # preservation of timestamps. This means that the unmerge phase must
+ # check to see if file belongs to an installed instance in the same
+ # slot.
+ mymtime = None
+
+ # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
+ prevmask = os.umask(0)
+ secondhand = []
+
+ # we do a first merge; this will recurse through all files in our srcroot but also build up a
+ # "second hand" of symlinks to merge later
+ if self.mergeme(srcroot, destroot, outfile, secondhand,
+ self.settings["EPREFIX"].lstrip(os.sep), cfgfiledict, mymtime):
+ return 1
+
+ # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
+ # broken symlinks. We'll merge them too.
+ lastlen = 0
+ while len(secondhand) and len(secondhand)!=lastlen:
+ # clear the thirdhand. Anything from our second hand that
+ # couldn't get merged will be added to thirdhand.
+
+ thirdhand = []
+ if self.mergeme(srcroot, destroot, outfile, thirdhand,
+ secondhand, cfgfiledict, mymtime):
+ return 1
+
+ #swap hands
+ lastlen = len(secondhand)
+
+ # our thirdhand now becomes our secondhand. It's ok to throw
+ # away secondhand since thirdhand contains all the stuff that
+ # couldn't be merged.
+ secondhand = thirdhand
+
+ if len(secondhand):
+ # force merge of remaining symlinks (broken or circular; oh well)
+ if self.mergeme(srcroot, destroot, outfile, None,
+ secondhand, cfgfiledict, mymtime):
+ return 1
+
+ #restore umask
+ os.umask(prevmask)
+
+ #if we opened it, close it
+ outfile.flush()
+ outfile.close()
+
+ # write out our collection of md5sums
+ if cfgfiledict != cfgfiledict_orig:
+ cfgfiledict.pop("IGNORE", None)
+ try:
+ writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
+ except InvalidLocation:
+ self.settings._init_dirs()
+ writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
+
+ return os.EX_OK
+
+ def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
+ """
+
+ This function handles actual merging of the package contents to the livefs.
+ It also handles config protection.
+
+ @param srcroot: Where are we copying files from (usually ${D})
+ @type srcroot: String (Path)
+ @param destroot: Typically ${ROOT}
+ @type destroot: String (Path)
+ @param outfile: File to log operations to
+ @type outfile: File Object
+ @param secondhand: A set of items to merge in pass two (usually
+ or symlinks that point to non-existing files that may get merged later)
+ @type secondhand: List
+ @param stufftomerge: Either a diretory to merge, or a list of items.
+ @type stufftomerge: String or List
+ @param cfgfiledict: { File:mtime } mapping for config_protected files
+ @type cfgfiledict: Dictionary
+ @param thismtime: None or new mtime for merged files (expressed in seconds
+ in Python <3.3 and nanoseconds in Python >=3.3)
+ @type thismtime: None or Int
+ @rtype: None or Boolean
+ @return:
+ 1. True on failure
+ 2. None otherwise
+
+ """
+
+ showMessage = self._display_merge
+ writemsg = self._display_merge
+
+ os = _os_merge
+ sep = os.sep
+ join = os.path.join
+ srcroot = normalize_path(srcroot).rstrip(sep) + sep
+ destroot = normalize_path(destroot).rstrip(sep) + sep
+ calc_prelink = "prelink-checksums" in self.settings.features
+
+ protect_if_modified = \
+ "config-protect-if-modified" in self.settings.features and \
+ self._installed_instance is not None
+
+ # this is supposed to merge a list of files. There will be 2 forms of argument passing.
+ if isinstance(stufftomerge, basestring):
+ #A directory is specified. Figure out protection paths, listdir() it and process it.
+ mergelist = [join(stufftomerge, child) for child in \
+ os.listdir(join(srcroot, stufftomerge))]
+ else:
+ mergelist = stufftomerge[:]
+
+ while mergelist:
+
+ relative_path = mergelist.pop()
+ mysrc = join(srcroot, relative_path)
+ mydest = join(destroot, relative_path)
+ # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
+ myrealdest = join(sep, relative_path)
+ # stat file once, test using S_* macros many times (faster that way)
+ mystat = os.lstat(mysrc)
+ mymode = mystat[stat.ST_MODE]
+ # handy variables; mydest is the target object on the live filesystems;
+ # mysrc is the source object in the temporary install dir
+ try:
+ mydstat = os.lstat(mydest)
+ mydmode = mydstat.st_mode
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ #dest file doesn't exist
+ mydstat = None
+ mydmode = None
+
+ if stat.S_ISLNK(mymode):
+ # we are merging a symbolic link
+ # The file name of mysrc and the actual file that it points to
+ # will have earlier been forcefully converted to the 'merge'
+ # encoding if necessary, but the content of the symbolic link
+ # may need to be forcefully converted here.
+ myto = _os.readlink(_unicode_encode(mysrc,
+ encoding=_encodings['merge'], errors='strict'))
+ try:
+ myto = _unicode_decode(myto,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeDecodeError:
+ myto = _unicode_decode(myto, encoding=_encodings['merge'],
+ errors='replace')
+ myto = _unicode_encode(myto, encoding='ascii',
+ errors='backslashreplace')
+ myto = _unicode_decode(myto, encoding=_encodings['merge'],
+ errors='replace')
+ os.unlink(mysrc)
+ os.symlink(myto, mysrc)
+
+ # Pass in the symlink target in order to bypass the
+ # os.readlink() call inside abssymlink(), since that
+ # call is unsafe if the merge encoding is not ascii
+ # or utf_8 (see bug #382021).
+ myabsto = abssymlink(mysrc, target=myto)
+
+ if myabsto.startswith(srcroot):
+ myabsto = myabsto[len(srcroot):]
+ myabsto = myabsto.lstrip(sep)
+ if self.settings and self.settings["D"]:
+ if myto.startswith(self.settings["D"]):
+ myto = myto[len(self.settings["D"])-1:]
+ # myrealto contains the path of the real file to which this symlink points.
+ # we can simply test for existence of this file to see if the target has been merged yet
+ myrealto = normalize_path(os.path.join(destroot, myabsto))
+ if mydmode!=None:
+ #destination exists
+ if stat.S_ISDIR(mydmode):
+ # we can't merge a symlink over a directory
+ newdest = self._new_backup_path(mydest)
+ msg = []
+ msg.append("")
+ msg.append(_("Installation of a symlink is blocked by a directory:"))
+ msg.append(" '%s'" % mydest)
+ msg.append(_("This symlink will be merged with a different name:"))
+ msg.append(" '%s'" % newdest)
+ msg.append("")
+ self._eerror("preinst", msg)
+ mydest = newdest
+
+ elif not stat.S_ISLNK(mydmode):
+ if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
+ # Kill file blocking installation of symlink to dir #71787
+ pass
+ elif self.isprotected(mydest):
+ # Use md5 of the target in ${D} if it exists...
+ try:
+ newmd5 = perform_md5(join(srcroot, myabsto))
+ except FileNotFound:
+ # Maybe the target is merged already.
+ try:
+ newmd5 = perform_md5(myrealto)
+ except FileNotFound:
+ newmd5 = None
+ mydest = new_protect_filename(mydest, newmd5=newmd5)
+
+ # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
+ if (secondhand != None) and (not os.path.exists(myrealto)):
+ # either the target directory doesn't exist yet or the target file doesn't exist -- or
+ # the target is a broken symlink. We will add this file to our "second hand" and merge
+ # it later.
+ secondhand.append(mysrc[len(srcroot):])
+ continue
+ # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
+ mymtime = movefile(mysrc, mydest, newmtime=thismtime,
+ sstat=mystat, mysettings=self.settings,
+ encoding=_encodings['merge'])
+
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
+ if mymtime != None:
+ # Use lexists, since if the target happens to be a broken
+ # symlink then that should trigger an independent warning.
+ if not (os.path.lexists(myrealto) or
+ os.path.lexists(join(srcroot, myabsto))):
+ self._eqawarn('preinst',
+ [_("QA Notice: Symbolic link /%s points to /%s which does not exist.")
+ % (relative_path, myabsto)])
+
+ showMessage(">>> %s -> %s\n" % (mydest, myto))
+ if sys.hexversion >= 0x3030000:
+ outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime // 1000000000)+"\n")
+ else:
+ outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
+ else:
+ showMessage(_("!!! Failed to move file.\n"),
+ level=logging.ERROR, noiselevel=-1)
+ showMessage("!!! %s -> %s\n" % (mydest, myto),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ elif stat.S_ISDIR(mymode):
+ # we are merging a directory
+ if mydmode != None:
+ # destination exists
+
+ if bsd_chflags:
+ # Save then clear flags on dest.
+ dflags = mydstat.st_flags
+ if dflags != 0:
+ bsd_chflags.lchflags(mydest, 0)
+
+ if not os.access(mydest, os.W_OK):
+ pkgstuff = pkgsplit(self.pkg)
+ writemsg(_("\n!!! Cannot write to '%s'.\n") % mydest, noiselevel=-1)
+ writemsg(_("!!! Please check permissions and directories for broken symlinks.\n"))
+ writemsg(_("!!! You may start the merge process again by using ebuild:\n"))
+ writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
+ writemsg(_("!!! And finish by running this: env-update\n\n"))
+ return 1
+
+ if stat.S_ISDIR(mydmode) or \
+ (stat.S_ISLNK(mydmode) and os.path.isdir(mydest)):
+ # a symlink to an existing directory will work for us; keep it:
+ showMessage("--- %s/\n" % mydest)
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, dflags)
+ else:
+ # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
+ backup_dest = self._new_backup_path(mydest)
+ msg = []
+ msg.append("")
+ msg.append(_("Installation of a directory is blocked by a file:"))
+ msg.append(" '%s'" % mydest)
+ msg.append(_("This file will be renamed to a different name:"))
+ msg.append(" '%s'" % backup_dest)
+ msg.append("")
+ self._eerror("preinst", msg)
+ if movefile(mydest, backup_dest,
+ mysettings=self.settings,
+ encoding=_encodings['merge']) is None:
+ return 1
+ showMessage(_("bak %s %s.backup\n") % (mydest, mydest),
+ level=logging.ERROR, noiselevel=-1)
+ #now create our directory
+ try:
+ if self.settings.selinux_enabled():
+ _selinux_merge.mkdir(mydest, mysrc)
+ else:
+ os.mkdir(mydest)
+ except OSError as e:
+ # Error handling should be equivalent to
+ # portage.util.ensure_dirs() for cases
+ # like bug #187518.
+ if e.errno in (errno.EEXIST,):
+ pass
+ elif os.path.isdir(mydest):
+ pass
+ else:
+ raise
+ del e
+
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, dflags)
+ os.chmod(mydest, mystat[0])
+ os.chown(mydest, mystat[4], mystat[5])
+ showMessage(">>> %s/\n" % mydest)
+ else:
+ try:
+ #destination doesn't exist
+ if self.settings.selinux_enabled():
+ _selinux_merge.mkdir(mydest, mysrc)
+ else:
+ os.mkdir(mydest)
+ except OSError as e:
+ # Error handling should be equivalent to
+ # portage.util.ensure_dirs() for cases
+ # like bug #187518.
+ if e.errno in (errno.EEXIST,):
+ pass
+ elif os.path.isdir(mydest):
+ pass
+ else:
+ raise
+ del e
+ os.chmod(mydest, mystat[0])
+ os.chown(mydest, mystat[4], mystat[5])
+ showMessage(">>> %s/\n" % mydest)
+
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
+ outfile.write("dir "+myrealdest+"\n")
+ # recurse and merge this directory
+ mergelist.extend(join(relative_path, child) for child in
+ os.listdir(join(srcroot, relative_path)))
+
+ elif stat.S_ISREG(mymode):
+ # we are merging a regular file
+ mymd5 = perform_md5(mysrc, calc_prelink=calc_prelink)
+ # calculate config file protection stuff
+ mydestdir = os.path.dirname(mydest)
+ moveme = 1
+ zing = "!!!"
+ mymtime = None
+ protected = self.isprotected(mydest)
+ if mydmode is not None and stat.S_ISDIR(mydmode):
+ # install of destination is blocked by an existing directory with the same name
+ newdest = self._new_backup_path(mydest)
+ msg = []
+ msg.append("")
+ msg.append(_("Installation of a regular file is blocked by a directory:"))
+ msg.append(" '%s'" % mydest)
+ msg.append(_("This file will be merged with a different name:"))
+ msg.append(" '%s'" % newdest)
+ msg.append("")
+ self._eerror("preinst", msg)
+ mydest = newdest
+
+ elif mydmode is None or stat.S_ISREG(mydmode) or \
+ (stat.S_ISLNK(mydmode) and os.path.exists(mydest)
+ and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
+ # install of destination is blocked by an existing regular file,
+ # or by a symlink to an existing regular file;
+ # now, config file management may come into play.
+ # we only need to tweak mydest if cfg file management is in play.
+ destmd5 = None
+ if protected and mydmode is not None:
+ destmd5 = perform_md5(mydest, calc_prelink=calc_prelink)
+ if protect_if_modified:
+ contents_key = \
+ self._installed_instance._match_contents(myrealdest)
+ if contents_key:
+ inst_info = self._installed_instance.getcontents()[contents_key]
+ if inst_info[0] == "obj" and inst_info[2] == destmd5:
+ protected = False
+
+ if protected:
+ # we have a protection path; enable config file management.
+ cfgprot = 0
+ cfgprot_force = False
+ if mydmode is None:
+ if self._installed_instance is not None and \
+ self._installed_instance._match_contents(
+ myrealdest) is not False:
+ # If the file doesn't exist, then it may
+ # have been deleted or renamed by the
+ # admin. Therefore, force the file to be
+ # merged with a ._cfg name, so that the
+ # admin will be prompted for this update
+ # (see bug #523684).
+ cfgprot_force = True
+ moveme = True
+ cfgprot = True
+ elif mymd5 == destmd5:
+ #file already in place; simply update mtimes of destination
+ moveme = 1
+ else:
+ if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
+ """ An identical update has previously been
+ merged. Skip it unless the user has chosen
+ --noconfmem."""
+ moveme = cfgfiledict["IGNORE"]
+ cfgprot = cfgfiledict["IGNORE"]
+ if not moveme:
+ zing = "---"
+ if sys.hexversion >= 0x3030000:
+ mymtime = mystat.st_mtime_ns
+ else:
+ mymtime = mystat[stat.ST_MTIME]
+ else:
+ moveme = 1
+ cfgprot = 1
+ if moveme:
+ # Merging a new file, so update confmem.
+ cfgfiledict[myrealdest] = [mymd5]
+ elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
+ """A previously remembered update has been
+ accepted, so it is removed from confmem."""
+ del cfgfiledict[myrealdest]
+
+ if cfgprot:
+ mydest = new_protect_filename(mydest,
+ newmd5=mymd5,
+ force=cfgprot_force)
+
+ # whether config protection or not, we merge the new file the
+ # same way. Unless moveme=0 (blocking directory)
+ if moveme:
+ # Create hardlinks only for source files that already exist
+ # as hardlinks (having identical st_dev and st_ino).
+ hardlink_key = (mystat.st_dev, mystat.st_ino)
+
+ hardlink_candidates = self._hardlink_merge_map.get(hardlink_key)
+ if hardlink_candidates is None:
+ hardlink_candidates = []
+ self._hardlink_merge_map[hardlink_key] = hardlink_candidates
+
+ mymtime = movefile(mysrc, mydest, newmtime=thismtime,
+ sstat=mystat, mysettings=self.settings,
+ hardlink_candidates=hardlink_candidates,
+ encoding=_encodings['merge'])
+ if mymtime is None:
+ return 1
+ hardlink_candidates.append(mydest)
+ zing = ">>>"
+
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
+ if mymtime != None:
+ if sys.hexversion >= 0x3030000:
+ outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime // 1000000000)+"\n")
+ else:
+ outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
+ showMessage("%s %s\n" % (zing,mydest))
+ else:
+ # we are merging a fifo or device node
+ zing = "!!!"
+ if mydmode is None:
+ # destination doesn't exist
+ if movefile(mysrc, mydest, newmtime=thismtime,
+ sstat=mystat, mysettings=self.settings,
+ encoding=_encodings['merge']) is not None:
+ zing = ">>>"
+
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
+ else:
+ return 1
+ if stat.S_ISFIFO(mymode):
+ outfile.write("fif %s\n" % myrealdest)
+ else:
+ outfile.write("dev %s\n" % myrealdest)
+ showMessage(zing + " " + mydest + "\n")
+
+ def _merged_path(self, path, lstatobj, exists=True):
+ previous_path = self._device_path_map.get(lstatobj.st_dev)
+ if previous_path is None or previous_path is False or \
+ (exists and len(path) < len(previous_path)):
+ if exists:
+ self._device_path_map[lstatobj.st_dev] = path
+ else:
+ # This entry is used to indicate that we've unmerged
+ # a file from this device, and later, this entry is
+ # replaced by a parent directory.
+ self._device_path_map[lstatobj.st_dev] = False
+
+ def _post_merge_sync(self):
+ """
+ Call this after merge or unmerge, in order to sync relevant files to
+ disk and avoid data-loss in the event of a power failure. This method
+ does nothing if FEATURES=merge-sync is disabled.
+ """
+ if not self._device_path_map or \
+ "merge-sync" not in self.settings.features:
+ return
+
+ returncode = None
+ if platform.system() == "Linux":
+
+ paths = []
+ for path in self._device_path_map.values():
+ if path is not False:
+ paths.append(path)
+ paths = tuple(paths)
+
+ proc = SyncfsProcess(paths=paths,
+ scheduler=(self._scheduler or
+ portage._internal_caller and global_event_loop() or
+ EventLoop(main=False)))
+ proc.start()
+ returncode = proc.wait()
+
+ if returncode is None or returncode != os.EX_OK:
+ try:
+ proc = subprocess.Popen(["sync"])
+ except EnvironmentError:
+ pass
+ else:
+ proc.wait()
+
+ def merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
+ mydbapi=None, prev_mtimes=None, counter=None):
+ """
+ @param myroot: ignored, self._eroot is used instead
+ """
+ myroot = None
+ retval = -1
+ parallel_install = "parallel-install" in self.settings.features
+ if not parallel_install:
+ self.lockdb()
+ self.vartree.dbapi._bump_mtime(self.mycpv)
+ if self._scheduler is None:
+ self._scheduler = SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
+ try:
+ retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
+ cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes,
+ counter=counter)
+
+ # If PORTAGE_BUILDDIR doesn't exist, then it probably means
+ # fail-clean is enabled, and the success/die hooks have
+ # already been called by EbuildPhase.
+ if os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
+
+ if retval == os.EX_OK:
+ phase = 'success_hooks'
+ else:
+ phase = 'die_hooks'
+
+ ebuild_phase = MiscFunctionsProcess(
+ background=False, commands=[phase],
+ scheduler=self._scheduler, settings=self.settings)
+ ebuild_phase.start()
+ ebuild_phase.wait()
+ self._elog_process()
+
+ if 'noclean' not in self.settings.features and \
+ (retval == os.EX_OK or \
+ 'fail-clean' in self.settings.features):
+ if myebuild is None:
+ myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
+
+ doebuild_environment(myebuild, "clean",
+ settings=self.settings, db=mydbapi)
+ phase = EbuildPhase(background=False, phase="clean",
+ scheduler=self._scheduler, settings=self.settings)
+ phase.start()
+ phase.wait()
+ finally:
+ self.settings.pop('REPLACING_VERSIONS', None)
+ if self.vartree.dbapi._linkmap is None:
+ # preserve-libs is entirely disabled
+ pass
+ else:
+ self.vartree.dbapi._linkmap._clear_cache()
+ self.vartree.dbapi._bump_mtime(self.mycpv)
+ if not parallel_install:
+ self.unlockdb()
+ return retval
+
+ def getstring(self,name):
+ "returns contents of a file with whitespace converted to spaces"
+ if not os.path.exists(self.dbdir+"/"+name):
+ return ""
+ with io.open(
+ _unicode_encode(os.path.join(self.dbdir, name),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace'
+ ) as f:
+ mydata = f.read().split()
+ return " ".join(mydata)
+
+ def copyfile(self,fname):
+ shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
+
+ def getfile(self,fname):
+ if not os.path.exists(self.dbdir+"/"+fname):
+ return ""
+ with io.open(_unicode_encode(os.path.join(self.dbdir, fname),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace'
+ ) as f:
+ return f.read()
+
+ def setfile(self,fname,data):
+ kwargs = {}
+ if fname == 'environment.bz2' or not isinstance(data, basestring):
+ kwargs['mode'] = 'wb'
+ else:
+ kwargs['mode'] = 'w'
+ kwargs['encoding'] = _encodings['repo.content']
+ write_atomic(os.path.join(self.dbdir, fname), data,
+ **portage._native_kwargs(kwargs))
+
+ def getelements(self,ename):
+ if not os.path.exists(self.dbdir+"/"+ename):
+ return []
+ with io.open(_unicode_encode(
+ os.path.join(self.dbdir, ename),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace'
+ ) as f:
+ mylines = f.readlines()
+ myreturn = []
+ for x in mylines:
+ for y in x[:-1].split():
+ myreturn.append(y)
+ return myreturn
+
+ def setelements(self,mylist,ename):
+ with io.open(_unicode_encode(
+ os.path.join(self.dbdir, ename),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace') as f:
+ for x in mylist:
+ f.write("%s\n" % x)
+
+ def isregular(self):
+ "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
+ return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
+
+ def _pre_merge_backup(self, backup_dblink, downgrade):
+
+ if ("unmerge-backup" in self.settings.features or
+ (downgrade and "downgrade-backup" in self.settings.features)):
+ return self._quickpkg_dblink(backup_dblink, False, None)
+
+ return os.EX_OK
+
+ def _pre_unmerge_backup(self, background):
+
+ if "unmerge-backup" in self.settings.features :
+ logfile = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+ return self._quickpkg_dblink(self, background, logfile)
+
+ return os.EX_OK
+
+ def _quickpkg_dblink(self, backup_dblink, background, logfile):
+
+ trees = QueryCommand.get_db()[self.settings["EROOT"]]
+ bintree = trees["bintree"]
+ binpkg_path = bintree.getname(backup_dblink.mycpv)
+ if os.path.exists(binpkg_path) and \
+ catsplit(backup_dblink.mycpv)[1] not in bintree.invalids:
+ return os.EX_OK
+
+ self.lockdb()
+ try:
+
+ if not backup_dblink.exists():
+ # It got unmerged by a concurrent process.
+ return os.EX_OK
+
+ # Call quickpkg for support of QUICKPKG_DEFAULT_OPTS and stuff.
+ quickpkg_binary = os.path.join(self.settings["PORTAGE_BIN_PATH"],
+ "quickpkg")
+
+ if not os.access(quickpkg_binary, os.X_OK):
+ # If not running from the source tree, use PATH.
+ quickpkg_binary = find_binary("quickpkg")
+ if quickpkg_binary is None:
+ self._display_merge(
+ _("%s: command not found") % "quickpkg",
+ level=logging.ERROR, noiselevel=-1)
+ return 127
+
+ # Let quickpkg inherit the global vartree config's env.
+ env = dict(self.vartree.settings.items())
+ env["__PORTAGE_INHERIT_VARDB_LOCK"] = "1"
+
+ pythonpath = [x for x in env.get('PYTHONPATH', '').split(":") if x]
+ if not pythonpath or \
+ not os.path.samefile(pythonpath[0], portage._pym_path):
+ pythonpath.insert(0, portage._pym_path)
+ env['PYTHONPATH'] = ":".join(pythonpath)
+
+ quickpkg_proc = SpawnProcess(
+ args=[portage._python_interpreter, quickpkg_binary,
+ "=%s" % (backup_dblink.mycpv,)],
+ background=background, env=env,
+ scheduler=self._scheduler, logfile=logfile)
+ quickpkg_proc.start()
+
+ return quickpkg_proc.wait()
+
+ finally:
+ self.unlockdb()
+
+def merge(mycat, mypkg, pkgloc, infloc,
+ myroot=None, settings=None, myebuild=None,
+ mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
+ scheduler=None, fd_pipes=None):
+ """
+ @param myroot: ignored, settings['EROOT'] is used instead
+ """
+ myroot = None
+ if settings is None:
+ raise TypeError("settings argument is required")
+ if not os.access(settings['EROOT'], os.W_OK):
+ writemsg(_("Permission denied: access('%s', W_OK)\n") % settings['EROOT'],
+ noiselevel=-1)
+ return errno.EACCES
+ background = (settings.get('PORTAGE_BACKGROUND') == '1')
+ merge_task = MergeProcess(
+ mycat=mycat, mypkg=mypkg, settings=settings,
+ treetype=mytree, vartree=vartree,
+ scheduler=(scheduler or portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ background=background, blockers=blockers, pkgloc=pkgloc,
+ infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
+ prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'),
+ fd_pipes=fd_pipes)
+ merge_task.start()
+ retcode = merge_task.wait()
+ return retcode
+
+def unmerge(cat, pkg, myroot=None, settings=None,
+ mytrimworld=None, vartree=None,
+ ldpath_mtimes=None, scheduler=None):
+ """
+ @param myroot: ignored, settings['EROOT'] is used instead
+ @param mytrimworld: ignored
+ """
+ myroot = None
+ if settings is None:
+ raise TypeError("settings argument is required")
+ mylink = dblink(cat, pkg, settings=settings, treetype="vartree",
+ vartree=vartree, scheduler=scheduler)
+ vartree = mylink.vartree
+ parallel_install = "parallel-install" in settings.features
+ if not parallel_install:
+ mylink.lockdb()
+ try:
+ if mylink.exists():
+ retval = mylink.unmerge(ldpath_mtimes=ldpath_mtimes)
+ if retval == os.EX_OK:
+ mylink.lockdb()
+ try:
+ mylink.delete()
+ finally:
+ mylink.unlockdb()
+ return retval
+ return os.EX_OK
+ finally:
+ if vartree.dbapi._linkmap is None:
+ # preserve-libs is entirely disabled
+ pass
+ else:
+ vartree.dbapi._linkmap._clear_cache()
+ if not parallel_install:
+ mylink.unlockdb()
+
+def write_contents(contents, root, f):
+ """
+ Write contents to any file like object. The file will be left open.
+ """
+ root_len = len(root) - 1
+ for filename in sorted(contents):
+ entry_data = contents[filename]
+ entry_type = entry_data[0]
+ relative_filename = filename[root_len:]
+ if entry_type == "obj":
+ entry_type, mtime, md5sum = entry_data
+ line = "%s %s %s %s\n" % \
+ (entry_type, relative_filename, md5sum, mtime)
+ elif entry_type == "sym":
+ entry_type, mtime, link = entry_data
+ line = "%s %s -> %s %s\n" % \
+ (entry_type, relative_filename, link, mtime)
+ else: # dir, dev, fif
+ line = "%s %s\n" % (entry_type, relative_filename)
+ f.write(line)
+
+def tar_contents(contents, root, tar, protect=None, onProgress=None):
+ os = _os_merge
+ encoding = _encodings['merge']
+
+ try:
+ for x in contents:
+ _unicode_encode(x,
+ encoding=_encodings['merge'],
+ errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ for x in contents:
+ _unicode_encode(x,
+ encoding=_encodings['fs'],
+ errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+ encoding = _encodings['fs']
+
+ tar.encoding = encoding
+ root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
+ id_strings = {}
+ maxval = len(contents)
+ curval = 0
+ if onProgress:
+ onProgress(maxval, 0)
+ paths = list(contents)
+ paths.sort()
+ for path in paths:
+ curval += 1
+ try:
+ lst = os.lstat(path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ if onProgress:
+ onProgress(maxval, curval)
+ continue
+ contents_type = contents[path][0]
+ if path.startswith(root):
+ arcname = "./" + path[len(root):]
+ else:
+ raise ValueError("invalid root argument: '%s'" % root)
+ live_path = path
+ if 'dir' == contents_type and \
+ not stat.S_ISDIR(lst.st_mode) and \
+ os.path.isdir(live_path):
+ # Even though this was a directory in the original ${D}, it exists
+ # as a symlink to a directory in the live filesystem. It must be
+ # recorded as a real directory in the tar file to ensure that tar
+ # can properly extract it's children.
+ live_path = os.path.realpath(live_path)
+ lst = os.lstat(live_path)
+
+ # Since os.lstat() inside TarFile.gettarinfo() can trigger a
+ # UnicodeEncodeError when python has something other than utf_8
+ # return from sys.getfilesystemencoding() (as in bug #388773),
+ # we implement the needed functionality here, using the result
+ # of our successful lstat call. An alternative to this would be
+ # to pass in the fileobj argument to TarFile.gettarinfo(), so
+ # that it could use fstat instead of lstat. However, that would
+ # have the unwanted effect of dereferencing symlinks.
+
+ tarinfo = tar.tarinfo()
+ tarinfo.name = arcname
+ tarinfo.mode = lst.st_mode
+ tarinfo.uid = lst.st_uid
+ tarinfo.gid = lst.st_gid
+ tarinfo.size = 0
+ tarinfo.mtime = lst.st_mtime
+ tarinfo.linkname = ""
+ if stat.S_ISREG(lst.st_mode):
+ inode = (lst.st_ino, lst.st_dev)
+ if (lst.st_nlink > 1 and
+ inode in tar.inodes and
+ arcname != tar.inodes[inode]):
+ tarinfo.type = tarfile.LNKTYPE
+ tarinfo.linkname = tar.inodes[inode]
+ else:
+ tar.inodes[inode] = arcname
+ tarinfo.type = tarfile.REGTYPE
+ tarinfo.size = lst.st_size
+ elif stat.S_ISDIR(lst.st_mode):
+ tarinfo.type = tarfile.DIRTYPE
+ elif stat.S_ISLNK(lst.st_mode):
+ tarinfo.type = tarfile.SYMTYPE
+ tarinfo.linkname = os.readlink(live_path)
+ else:
+ continue
+ try:
+ tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
+ except KeyError:
+ pass
+ try:
+ tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
+ except KeyError:
+ pass
+
+ if stat.S_ISREG(lst.st_mode):
+ if protect and protect(path):
+ # Create an empty file as a place holder in order to avoid
+ # potential collision-protect issues.
+ f = tempfile.TemporaryFile()
+ f.write(_unicode_encode(
+ "# empty file because --include-config=n " + \
+ "when `quickpkg` was used\n"))
+ f.flush()
+ f.seek(0)
+ tarinfo.size = os.fstat(f.fileno()).st_size
+ tar.addfile(tarinfo, f)
+ f.close()
+ else:
+ with open(_unicode_encode(path,
+ encoding=encoding,
+ errors='strict'), 'rb') as f:
+ tar.addfile(tarinfo, f)
+
+ else:
+ tar.addfile(tarinfo)
+ if onProgress:
+ onProgress(maxval, curval)
diff --git a/usr/lib/portage/pym/portage/dbapi/virtual.py b/usr/lib/portage/pym/portage/dbapi/virtual.py
new file mode 100644
index 0000000..ba9745c
--- /dev/null
+++ b/usr/lib/portage/pym/portage/dbapi/virtual.py
@@ -0,0 +1,167 @@
+# Copyright 1998-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+from portage.dbapi import dbapi
+from portage.dbapi.dep_expand import dep_expand
+from portage.versions import cpv_getkey, _pkg_str
+
+class fakedbapi(dbapi):
+ """A fake dbapi that allows consumers to inject/remove packages to/from it
+ portage.settings is required to maintain the dbAPI.
+ """
+ def __init__(self, settings=None, exclusive_slots=True):
+ """
+ @param exclusive_slots: When True, injecting a package with SLOT
+ metadata causes an existing package in the same slot to be
+ automatically removed (default is True).
+ @type exclusive_slots: Boolean
+ """
+ self._exclusive_slots = exclusive_slots
+ self.cpvdict = {}
+ self.cpdict = {}
+ if settings is None:
+ from portage import settings
+ self.settings = settings
+ self._match_cache = {}
+
+ def _clear_cache(self):
+ if self._categories is not None:
+ self._categories = None
+ if self._match_cache:
+ self._match_cache = {}
+
+ def match(self, origdep, use_cache=1):
+ atom = dep_expand(origdep, mydb=self, settings=self.settings)
+ cache_key = (atom, atom.unevaluated_atom)
+ result = self._match_cache.get(cache_key)
+ if result is not None:
+ return result[:]
+ result = list(self._iter_match(atom, self.cp_list(atom.cp)))
+ self._match_cache[cache_key] = result
+ return result[:]
+
+ def cpv_exists(self, mycpv, myrepo=None):
+ return mycpv in self.cpvdict
+
+ def cp_list(self, mycp, use_cache=1, myrepo=None):
+ # NOTE: Cache can be safely shared with the match cache, since the
+ # match cache uses the result from dep_expand for the cache_key.
+ cache_key = (mycp, mycp)
+ cachelist = self._match_cache.get(cache_key)
+ if cachelist is not None:
+ return cachelist[:]
+ cpv_list = self.cpdict.get(mycp)
+ if cpv_list is None:
+ cpv_list = []
+ self._cpv_sort_ascending(cpv_list)
+ self._match_cache[cache_key] = cpv_list
+ return cpv_list[:]
+
+ def cp_all(self):
+ return list(self.cpdict)
+
+ def cpv_all(self):
+ return list(self.cpvdict)
+
+ def cpv_inject(self, mycpv, metadata=None):
+ """Adds a cpv to the list of available packages. See the
+ exclusive_slots constructor parameter for behavior with
+ respect to SLOT metadata.
+ @param mycpv: cpv for the package to inject
+ @type mycpv: str
+ @param metadata: dictionary of raw metadata for aux_get() calls
+ @param metadata: dict
+ """
+ self._clear_cache()
+
+ try:
+ mycp = mycpv.cp
+ except AttributeError:
+ mycp = None
+ try:
+ myslot = mycpv.slot
+ except AttributeError:
+ myslot = None
+
+ if mycp is None or \
+ (myslot is None and metadata is not None and metadata.get('SLOT')):
+ if metadata is None:
+ mycpv = _pkg_str(mycpv)
+ else:
+ mycpv = _pkg_str(mycpv, metadata=metadata,
+ settings=self.settings)
+
+ mycp = mycpv.cp
+ try:
+ myslot = mycpv.slot
+ except AttributeError:
+ pass
+
+ self.cpvdict[mycpv] = metadata
+ if not self._exclusive_slots:
+ myslot = None
+ if myslot and mycp in self.cpdict:
+ # If necessary, remove another package in the same SLOT.
+ for cpv in self.cpdict[mycp]:
+ if mycpv != cpv:
+ try:
+ other_slot = cpv.slot
+ except AttributeError:
+ pass
+ else:
+ if myslot == other_slot:
+ self.cpv_remove(cpv)
+ break
+
+ cp_list = self.cpdict.get(mycp)
+ if cp_list is None:
+ cp_list = []
+ self.cpdict[mycp] = cp_list
+ try:
+ cp_list.remove(mycpv)
+ except ValueError:
+ pass
+ cp_list.append(mycpv)
+
+ def cpv_remove(self,mycpv):
+ """Removes a cpv from the list of available packages."""
+ self._clear_cache()
+ mycp = cpv_getkey(mycpv)
+ if mycpv in self.cpvdict:
+ del self.cpvdict[mycpv]
+ if mycp not in self.cpdict:
+ return
+ while mycpv in self.cpdict[mycp]:
+ del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
+ if not len(self.cpdict[mycp]):
+ del self.cpdict[mycp]
+
+ def aux_get(self, mycpv, wants, myrepo=None):
+ if not self.cpv_exists(mycpv):
+ raise KeyError(mycpv)
+ metadata = self.cpvdict[mycpv]
+ if not metadata:
+ return ["" for x in wants]
+ return [metadata.get(x, "") for x in wants]
+
+ def aux_update(self, cpv, values):
+ self._clear_cache()
+ self.cpvdict[cpv].update(values)
+
+class testdbapi(object):
+ """A dbapi instance with completely fake functions to get by hitting disk
+ TODO(antarus):
+ This class really needs to be rewritten to have better stubs; but these work for now.
+ The dbapi classes themselves need unit tests...and that will be a lot of work.
+ """
+
+ def __init__(self):
+ self.cpvs = {}
+ def f(*args, **kwargs):
+ return True
+ fake_api = dir(dbapi)
+ for call in fake_api:
+ if not hasattr(self, call):
+ setattr(self, call, f)
diff --git a/usr/lib/portage/pym/portage/debug.py b/usr/lib/portage/pym/portage/debug.py
new file mode 100644
index 0000000..d5a8cfb
--- /dev/null
+++ b/usr/lib/portage/pym/portage/debug.py
@@ -0,0 +1,120 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import sys
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+import portage.const
+from portage.util import writemsg
+
+def set_trace(on=True):
+ if on:
+ t = trace_handler()
+ threading.settrace(t.event_handler)
+ sys.settrace(t.event_handler)
+ else:
+ sys.settrace(None)
+ threading.settrace(None)
+
+class trace_handler(object):
+
+ def __init__(self):
+ python_system_paths = []
+ for x in sys.path:
+ if os.path.basename(x) == "python%s.%s" % sys.version_info[:2]:
+ python_system_paths.append(x)
+
+ self.ignore_prefixes = []
+ for x in python_system_paths:
+ self.ignore_prefixes.append(x + os.sep)
+
+ self.trim_filename = prefix_trimmer(os.path.join(portage.const.PORTAGE_BASE_PATH, "pym") + os.sep).trim
+ self.show_local_lines = False
+ self.max_repr_length = 200
+
+ def event_handler(self, *args):
+ frame, event, _arg = args
+ if "line" == event:
+ if self.show_local_lines:
+ self.trace_line(*args)
+ else:
+ if not self.ignore_filename(frame.f_code.co_filename):
+ self.trace_event(*args)
+ return self.event_handler
+
+ def trace_event(self, frame, event, arg):
+ writemsg("%s line=%d name=%s event=%s %slocals=%s\n" % \
+ (self.trim_filename(frame.f_code.co_filename),
+ frame.f_lineno,
+ frame.f_code.co_name,
+ event,
+ self.arg_repr(frame, event, arg),
+ self.locals_repr(frame, event, arg)))
+
+ def arg_repr(self, _frame, event, arg):
+ my_repr = None
+ if "return" == event:
+ my_repr = repr(arg)
+ if len(my_repr) > self.max_repr_length:
+ my_repr = "'omitted'"
+ return "value=%s " % my_repr
+ elif "exception" == event:
+ my_repr = repr(arg[1])
+ if len(my_repr) > self.max_repr_length:
+ my_repr = "'omitted'"
+ return "type=%s value=%s " % (arg[0], my_repr)
+
+ return ""
+
+ def trace_line(self, frame, _event, _arg):
+ writemsg("%s line=%d\n" % (self.trim_filename(frame.f_code.co_filename), frame.f_lineno))
+
+ def ignore_filename(self, filename):
+ if filename:
+ for x in self.ignore_prefixes:
+ if filename.startswith(x):
+ return True
+ return False
+
+ def locals_repr(self, frame, _event, _arg):
+ """Create a representation of the locals dict that is suitable for
+ tracing output."""
+
+ my_locals = frame.f_locals.copy()
+
+ # prevent unsafe __repr__ call on self when __init__ is called
+ # (method calls aren't safe until after __init__ has completed).
+ if frame.f_code.co_name == "__init__" and "self" in my_locals:
+ my_locals["self"] = "omitted"
+
+ # We omit items that will lead to unreasonable bloat of the trace
+ # output (and resulting log file).
+ for k, v in my_locals.items():
+ my_repr = repr(v)
+ if len(my_repr) > self.max_repr_length:
+ my_locals[k] = "omitted"
+ return my_locals
+
+class prefix_trimmer(object):
+ def __init__(self, prefix):
+ self.prefix = prefix
+ self.cut_index = len(prefix)
+ self.previous = None
+ self.previous_trimmed = None
+
+ def trim(self, s):
+ """Remove a prefix from the string and return the result.
+ The previous result is automatically cached."""
+ if s == self.previous:
+ return self.previous_trimmed
+ else:
+ if s.startswith(self.prefix):
+ self.previous_trimmed = s[self.cut_index:]
+ else:
+ self.previous_trimmed = s
+ return self.previous_trimmed
diff --git a/usr/lib/portage/pym/portage/dep/__init__.py b/usr/lib/portage/pym/portage/dep/__init__.py
new file mode 100644
index 0000000..c457df0
--- /dev/null
+++ b/usr/lib/portage/pym/portage/dep/__init__.py
@@ -0,0 +1,2821 @@
+# deps.py -- Portage dependency resolution functions
+# Copyright 2003-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = [
+ 'Atom', 'best_match_to_list', 'cpvequal',
+ 'dep_getcpv', 'dep_getkey', 'dep_getslot',
+ 'dep_getusedeps', 'dep_opconvert', 'flatten',
+ 'get_operator', 'isjustname', 'isspecific',
+ 'isvalidatom', 'match_from_list', 'match_to_list',
+ 'paren_enclose', 'paren_normalize', 'paren_reduce',
+ 'remove_slot', 'strip_empty', 'use_reduce',
+ '_repo_separator', '_slot_separator',
+]
+
+import re, sys
+import warnings
+from itertools import chain
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util:cmp_sort_key,writemsg',
+)
+
+from portage import _encodings, _unicode_decode, _unicode_encode
+from portage.eapi import _get_eapi_attrs
+from portage.exception import InvalidAtom, InvalidData, InvalidDependString
+from portage.localization import _
+from portage.versions import catpkgsplit, catsplit, \
+ vercmp, ververify, _cp, _cpv, _pkg_str, _slot, _unknown_repo, _vr
+import portage.cache.mappings
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+ _unicode = str
+else:
+ _unicode = unicode
+
+# \w is [a-zA-Z0-9_]
+
+# PMS 3.1.3: A slot name may contain any of the characters [A-Za-z0-9+_.-].
+# It must not begin with a hyphen or a dot.
+_slot_separator = ":"
+# loosly match SLOT, which may have an optional ABI part
+_slot_loose = r'([\w+./*=-]+)'
+
+_use = r'\[.*\]'
+_op = r'([=~]|[><]=?)'
+
+_repo_separator = "::"
+_repo_name = r'[\w][\w-]*'
+_repo_name_re = re.compile('^' + _repo_name + '$', re.UNICODE)
+_repo = r'(?:' + _repo_separator + '(' + _repo_name + ')' + ')?'
+
+_extended_cat = r'[\w+*][\w+.*-]*'
+
+_slot_dep_re_cache = {}
+
+def _get_slot_dep_re(eapi_attrs):
+ cache_key = eapi_attrs.slot_operator
+ slot_re = _slot_dep_re_cache.get(cache_key)
+ if slot_re is not None:
+ return slot_re
+
+ if eapi_attrs.slot_operator:
+ slot_re = _slot + r'?(\*|=|/' + _slot + r'=?)?'
+ else:
+ slot_re = _slot
+
+ slot_re = re.compile('^' + slot_re + '$', re.VERBOSE | re.UNICODE)
+
+ _slot_dep_re_cache[cache_key] = slot_re
+ return slot_re
+
+def _match_slot(atom, pkg):
+ if pkg.slot == atom.slot:
+ if not atom.sub_slot:
+ return True
+ elif atom.sub_slot == pkg.sub_slot:
+ return True
+ return False
+
+_atom_re_cache = {}
+
+def _get_atom_re(eapi_attrs):
+ cache_key = eapi_attrs.dots_in_PN
+ atom_re = _atom_re_cache.get(cache_key)
+ if atom_re is not None:
+ return atom_re
+
+ if eapi_attrs.dots_in_PN:
+ cp_re = _cp['dots_allowed_in_PN']
+ cpv_re = _cpv['dots_allowed_in_PN']
+ else:
+ cp_re = _cp['dots_disallowed_in_PN']
+ cpv_re = _cpv['dots_disallowed_in_PN']
+
+ atom_re = re.compile('^(?P<without_use>(?:' +
+ '(?P<op>' + _op + cpv_re + ')|' +
+ '(?P<star>=' + cpv_re + r'\*)|' +
+ '(?P<simple>' + cp_re + '))' +
+ '(' + _slot_separator + _slot_loose + ')?' +
+ _repo + ')(' + _use + ')?$', re.VERBOSE | re.UNICODE)
+
+ _atom_re_cache[cache_key] = atom_re
+ return atom_re
+
+_atom_wildcard_re_cache = {}
+
+def _get_atom_wildcard_re(eapi_attrs):
+ cache_key = eapi_attrs.dots_in_PN
+ atom_re = _atom_wildcard_re_cache.get(cache_key)
+ if atom_re is not None:
+ return atom_re
+
+ if eapi_attrs.dots_in_PN:
+ pkg_re = r'[\w+*][\w+.*-]*?'
+ else:
+ pkg_re = r'[\w+*][\w+*-]*?'
+
+ atom_re = re.compile(r'((?P<simple>(' +
+ _extended_cat + r')/(' + pkg_re + r'(-' + _vr + ')?))' + \
+ '|(?P<star>=((' + _extended_cat + r')/(' + pkg_re + r'))-(?P<version>\*\w+\*)))' + \
+ '(:(?P<slot>' + _slot_loose + r'))?(' +
+ _repo_separator + r'(?P<repo>' + _repo_name + r'))?$', re.UNICODE)
+
+ _atom_wildcard_re_cache[cache_key] = atom_re
+ return atom_re
+
+_usedep_re_cache = {}
+
+def _get_usedep_re(eapi_attrs):
+ """
+ @param eapi_attrs: The EAPI attributes from _get_eapi_attrs
+ @type eapi_attrs: _eapi_attrs
+ @rtype: regular expression object
+ @return: A regular expression object that matches valid USE deps for the
+ given eapi.
+ """
+ cache_key = eapi_attrs.dots_in_use_flags
+ usedep_re = _usedep_re_cache.get(cache_key)
+ if usedep_re is not None:
+ return usedep_re
+
+ if eapi_attrs.dots_in_use_flags:
+ _flag_re = r'[A-Za-z0-9][A-Za-z0-9+_@.-]*'
+ else:
+ _flag_re = r'[A-Za-z0-9][A-Za-z0-9+_@-]*'
+
+ usedep_re = re.compile(r'^(?P<prefix>[!-]?)(?P<flag>' +
+ _flag_re + r')(?P<default>(\(\+\)|\(\-\))?)(?P<suffix>[?=]?)$')
+
+ _usedep_re_cache[cache_key] = usedep_re
+ return usedep_re
+
+_useflag_re_cache = {}
+
+def _get_useflag_re(eapi):
+ """
+ When eapi is None then validation is not as strict, since we want the
+ same to work for multiple EAPIs that may have slightly different rules.
+ @param eapi: The EAPI
+ @type eapi: String or None
+ @rtype: regular expression object
+ @return: A regular expression object that matches valid USE flags for the
+ given eapi.
+ """
+ eapi_attrs = _get_eapi_attrs(eapi)
+ cache_key = eapi_attrs.dots_in_use_flags
+ useflag_re = _useflag_re_cache.get(cache_key)
+ if useflag_re is not None:
+ return useflag_re
+
+ if eapi_attrs.dots_in_use_flags:
+ flag_re = r'[A-Za-z0-9][A-Za-z0-9+_@.-]*'
+ else:
+ flag_re = r'[A-Za-z0-9][A-Za-z0-9+_@-]*'
+
+ useflag_re = re.compile(r'^' + flag_re + r'$')
+
+ _useflag_re_cache[cache_key] = useflag_re
+ return useflag_re
+
+def cpvequal(cpv1, cpv2):
+ """
+
+ @param cpv1: CategoryPackageVersion (no operators) Example: "sys-apps/portage-2.1"
+ @type cpv1: String
+ @param cpv2: CategoryPackageVersion (no operators) Example: "sys-apps/portage-2.1"
+ @type cpv2: String
+ @rtype: Boolean
+ @return:
+ 1. True if cpv1 = cpv2
+ 2. False Otherwise
+ 3. Throws PortageException if cpv1 or cpv2 is not a CPV
+
+ Example Usage:
+ >>> from portage.dep import cpvequal
+ >>> cpvequal("sys-apps/portage-2.1","sys-apps/portage-2.1")
+ >>> True
+
+ """
+
+ try:
+ try:
+ split1 = cpv1.cpv_split
+ except AttributeError:
+ cpv1 = _pkg_str(cpv1)
+ split1 = cpv1.cpv_split
+
+ try:
+ split2 = cpv2.cpv_split
+ except AttributeError:
+ cpv2 = _pkg_str(cpv2)
+ split2 = cpv2.cpv_split
+
+ except InvalidData:
+ raise portage.exception.PortageException(_("Invalid data '%s, %s', parameter was not a CPV") % (cpv1, cpv2))
+
+ if split1[0] != split2[0] or \
+ split1[1] != split2[1]:
+ return False
+
+ return vercmp(cpv1.version, cpv2.version) == 0
+
+def strip_empty(myarr):
+ """
+ Strip all empty elements from an array
+
+ @param myarr: The list of elements
+ @type myarr: List
+ @rtype: Array
+ @return: The array with empty elements removed
+ """
+ warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+ ('portage.dep.strip_empty',), DeprecationWarning, stacklevel=2)
+ return [x for x in myarr if x]
+
+def paren_reduce(mystr, _deprecation_warn=True):
+ """
+ Take a string and convert all paren enclosed entities into sublists and
+ split the list elements by spaces. All redundant brackets are removed.
+
+ Example usage:
+ >>> paren_reduce('foobar foo? ( bar baz )')
+ ['foobar', 'foo?', ['bar', 'baz']]
+
+ @param mystr: The string to reduce
+ @type mystr: String
+ @rtype: Array
+ @return: The reduced string in an array
+ """
+ if portage._internal_caller and _deprecation_warn:
+ warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+ ('portage.dep.paren_reduce',), DeprecationWarning, stacklevel=2)
+ mysplit = mystr.split()
+ level = 0
+ stack = [[]]
+ need_bracket = False
+
+ for token in mysplit:
+ if token == "(":
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+ is_single = (len(l) == 1 or (len(l)==2 and (l[0] == "||" or l[0][-1] == "?")))
+
+ def ends_in_any_of_dep(k):
+ return k>=0 and stack[k] and stack[k][-1] == "||"
+
+ def ends_in_operator(k):
+ return k>=0 and stack[k] and (stack[k][-1] == "||" or stack[k][-1][-1] == "?")
+
+ def special_append():
+ """
+ Use extend instead of append if possible. This kills all redundant brackets.
+ """
+ if is_single and (not stack[level] or not stack[level][-1][-1] == "?"):
+ if len(l) == 1 and isinstance(l[0], list):
+ # l = [[...]]
+ stack[level].extend(l[0])
+ else:
+ stack[level].extend(l)
+ else:
+ stack[level].append(l)
+
+ if l:
+ if not ends_in_any_of_dep(level-1) and not ends_in_operator(level):
+ #Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
+ stack[level].extend(l)
+ elif not stack[level]:
+ #An '||' in the level above forces us to keep to brackets.
+ special_append()
+ elif len(l) == 1 and ends_in_any_of_dep(level):
+ #Optimize: || ( A ) -> A
+ stack[level].pop()
+ special_append()
+ elif len(l) == 2 and (l[0] == "||" or l[0][-1] == "?") and stack[level][-1] in (l[0], "||"):
+ #Optimize: || ( || ( ... ) ) -> || ( ... )
+ # foo? ( foo? ( ... ) ) -> foo? ( ... )
+ # || ( foo? ( ... ) ) -> foo? ( ... )
+ stack[level].pop()
+ special_append()
+ else:
+ special_append()
+ else:
+ if stack[level] and (stack[level][-1] == "||" or stack[level][-1][-1] == "?"):
+ stack[level].pop()
+ else:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ elif token == "||":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ need_bracket = True
+ stack[level].append(token)
+ else:
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+
+ if token[-1] == "?":
+ need_bracket = True
+
+ stack[level].append(token)
+
+ if level != 0 or need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+
+ return stack[0]
+
+class paren_normalize(list):
+ """Take a dependency structure as returned by paren_reduce or use_reduce
+ and generate an equivalent structure that has no redundant lists."""
+ def __init__(self, src):
+ if portage._internal_caller:
+ warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+ ('portage.dep.paren_normalize',), DeprecationWarning, stacklevel=2)
+ list.__init__(self)
+ self._zap_parens(src, self)
+
+ def _zap_parens(self, src, dest, disjunction=False):
+ if not src:
+ return dest
+ i = iter(src)
+ for x in i:
+ if isinstance(x, basestring):
+ if x in ('||', '^^'):
+ y = self._zap_parens(next(i), [], disjunction=True)
+ if len(y) == 1:
+ dest.append(y[0])
+ else:
+ dest.append(x)
+ dest.append(y)
+ elif x.endswith("?"):
+ dest.append(x)
+ dest.append(self._zap_parens(next(i), []))
+ else:
+ dest.append(x)
+ else:
+ if disjunction:
+ x = self._zap_parens(x, [])
+ if len(x) == 1:
+ dest.append(x[0])
+ else:
+ dest.append(x)
+ else:
+ self._zap_parens(x, dest)
+ return dest
+
+def paren_enclose(mylist, unevaluated_atom=False, opconvert=False):
+ """
+ Convert a list to a string with sublists enclosed with parens.
+
+ Example usage:
+ >>> test = ['foobar','foo',['bar','baz']]
+ >>> paren_enclose(test)
+ 'foobar foo ( bar baz )'
+
+ @param mylist: The list
+ @type mylist: List
+ @rtype: String
+ @return: The paren enclosed string
+ """
+ mystrparts = []
+ for x in mylist:
+ if isinstance(x, list):
+ if opconvert and x and x[0] == "||":
+ mystrparts.append("%s ( %s )" % (x[0], paren_enclose(x[1:])))
+ else:
+ mystrparts.append("( %s )" % paren_enclose(x))
+ else:
+ if unevaluated_atom:
+ x = getattr(x, 'unevaluated_atom', x)
+ mystrparts.append(x)
+ return " ".join(mystrparts)
+
+def use_reduce(depstr, uselist=[], masklist=[], matchall=False, excludeall=[], is_src_uri=False, \
+ eapi=None, opconvert=False, flat=False, is_valid_flag=None, token_class=None, matchnone=False):
+ """
+ Takes a dep string and reduces the use? conditionals out, leaving an array
+ with subarrays. All redundant brackets are removed.
+
+ @param deparray: depstring
+ @type deparray: String
+ @param uselist: List of use enabled flags
+ @type uselist: List
+ @param masklist: List of masked flags (always treated as disabled)
+ @type masklist: List
+ @param matchall: Treat all conditionals as active. Used by repoman.
+ @type matchall: Bool
+ @param excludeall: List of flags for which negated conditionals are always treated as inactive.
+ @type excludeall: List
+ @param is_src_uri: Indicates if depstr represents a SRC_URI
+ @type is_src_uri: Bool
+ @param eapi: Indicates the EAPI the dep string has to comply to
+ @type eapi: String
+ @param opconvert: Put every operator as first element into it's argument list
+ @type opconvert: Bool
+ @param flat: Create a flat list of all tokens
+ @type flat: Bool
+ @param is_valid_flag: Function that decides if a given use flag might be used in use conditionals
+ @type is_valid_flag: Function
+ @param token_class: Convert all non operator tokens into this class
+ @type token_class: Class
+ @param matchnone: Treat all conditionals as inactive. Used by digestgen().
+ @type matchnone: Bool
+ @rtype: List
+ @return: The use reduced depend array
+ """
+ if isinstance(depstr, list):
+ if portage._internal_caller:
+ warnings.warn(_("Passing paren_reduced dep arrays to %s is deprecated. " + \
+ "Pass the original dep string instead.") % \
+ ('portage.dep.use_reduce',), DeprecationWarning, stacklevel=2)
+ depstr = paren_enclose(depstr)
+
+ if opconvert and flat:
+ raise ValueError("portage.dep.use_reduce: 'opconvert' and 'flat' are mutually exclusive")
+
+ if matchall and matchnone:
+ raise ValueError("portage.dep.use_reduce: 'matchall' and 'matchnone' are mutually exclusive")
+
+ eapi_attrs = _get_eapi_attrs(eapi)
+ useflag_re = _get_useflag_re(eapi)
+
+ def is_active(conditional):
+ """
+ Decides if a given use conditional is active.
+ """
+ if conditional.startswith("!"):
+ flag = conditional[1:-1]
+ is_negated = True
+ else:
+ flag = conditional[:-1]
+ is_negated = False
+
+ if is_valid_flag:
+ if not is_valid_flag(flag):
+ msg = _("USE flag '%s' referenced in " + \
+ "conditional '%s' is not in IUSE") \
+ % (flag, conditional)
+ e = InvalidData(msg, category='IUSE.missing')
+ raise InvalidDependString(msg, errors=(e,))
+ else:
+ if useflag_re.match(flag) is None:
+ raise InvalidDependString(
+ _("invalid use flag '%s' in conditional '%s'") % (flag, conditional))
+
+ if is_negated and flag in excludeall:
+ return False
+
+ if flag in masklist:
+ return is_negated
+
+ if matchall:
+ return True
+
+ if matchnone:
+ return False
+
+ return (flag in uselist and not is_negated) or \
+ (flag not in uselist and is_negated)
+
+ def missing_white_space_check(token, pos):
+ """
+ Used to generate good error messages for invalid tokens.
+ """
+ for x in (")", "(", "||"):
+ if token.startswith(x) or token.endswith(x):
+ raise InvalidDependString(
+ _("missing whitespace around '%s' at '%s', token %s") % (x, token, pos+1))
+
+ mysplit = depstr.split()
+ #Count the bracket level.
+ level = 0
+ #We parse into a stack. Every time we hit a '(', a new empty list is appended to the stack.
+ #When we hit a ')', the last list in the stack is merged with list one level up.
+ stack = [[]]
+ #Set need_bracket to True after use conditionals or ||. Other tokens need to ensure
+ #that need_bracket is not True.
+ need_bracket = False
+ #Set need_simple_token to True after a SRC_URI arrow. Other tokens need to ensure
+ #that need_simple_token is not True.
+ need_simple_token = False
+
+ for pos, token in enumerate(mysplit):
+ if token == "(":
+ if need_simple_token:
+ raise InvalidDependString(
+ _("expected: file name, got: '%s', token %s") % (token, pos+1))
+ if len(mysplit) >= pos+2 and mysplit[pos+1] == ")":
+ raise InvalidDependString(
+ _("expected: dependency string, got: ')', token %s") % (pos+1,))
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("expected: '(', got: '%s', token %s") % (token, pos+1))
+ if need_simple_token:
+ raise InvalidDependString(
+ _("expected: file name, got: '%s', token %s") % (token, pos+1))
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+
+ is_single = len(l) == 1 or \
+ (opconvert and l and l[0] == "||") or \
+ (not opconvert and len(l)==2 and l[0] == "||")
+ ignore = False
+
+ if flat:
+ #In 'flat' mode, we simply merge all lists into a single large one.
+ if stack[level] and stack[level][-1][-1] == "?":
+ #The last token before the '(' that matches the current ')'
+ #was a use conditional. The conditional is removed in any case.
+ #Merge the current list if needed.
+ if is_active(stack[level][-1]):
+ stack[level].pop()
+ stack[level].extend(l)
+ else:
+ stack[level].pop()
+ else:
+ stack[level].extend(l)
+ continue
+
+ if stack[level]:
+ if stack[level][-1] == "||" and not l:
+ #Optimize: || ( ) -> .
+ stack[level].pop()
+ elif stack[level][-1][-1] == "?":
+ #The last token before the '(' that matches the current ')'
+ #was a use conditional, remove it and decide if we
+ #have to keep the current list.
+ if not is_active(stack[level][-1]):
+ ignore = True
+ stack[level].pop()
+
+ def ends_in_any_of_dep(k):
+ return k>=0 and stack[k] and stack[k][-1] == "||"
+
+ def starts_with_any_of_dep(k):
+ #'ends_in_any_of_dep' for opconvert
+ return k>=0 and stack[k] and stack[k][0] == "||"
+
+ def last_any_of_operator_level(k):
+ #Returns the level of the last || operator if it is in effect for
+ #the current level. It is not in effect, if there is a level, that
+ #ends in a non-operator. This is almost equivalent to stack[level][-1]=="||",
+ #expect that it skips empty levels.
+ while k>=0:
+ if stack[k]:
+ if stack[k][-1] == "||":
+ return k
+ elif stack[k][-1][-1] != "?":
+ return -1
+ k -= 1
+ return -1
+
+ def special_append():
+ """
+ Use extend instead of append if possible. This kills all redundant brackets.
+ """
+ if is_single:
+ #Either [A], [[...]] or [|| [...]]
+ if l[0] == "||" and ends_in_any_of_dep(level-1):
+ if opconvert:
+ stack[level].extend(l[1:])
+ else:
+ stack[level].extend(l[1])
+ elif len(l) == 1 and isinstance(l[0], list):
+ # l = [[...]]
+ last = last_any_of_operator_level(level-1)
+ if last == -1:
+ if opconvert and isinstance(l[0], list) \
+ and l[0] and l[0][0] == '||':
+ stack[level].append(l[0])
+ else:
+ stack[level].extend(l[0])
+ else:
+ if opconvert and l[0] and l[0][0] == "||":
+ stack[level].extend(l[0][1:])
+ else:
+ stack[level].append(l[0])
+ else:
+ stack[level].extend(l)
+ else:
+ if opconvert and stack[level] and stack[level][-1] == '||':
+ stack[level][-1] = ['||'] + l
+ else:
+ stack[level].append(l)
+
+ if l and not ignore:
+ #The current list is not empty and we don't want to ignore it because
+ #of an inactive use conditional.
+ if not ends_in_any_of_dep(level-1) and not ends_in_any_of_dep(level):
+ #Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
+ stack[level].extend(l)
+ elif not stack[level]:
+ #An '||' in the level above forces us to keep to brackets.
+ special_append()
+ elif is_single and ends_in_any_of_dep(level):
+ #Optimize: || ( A ) -> A, || ( || ( ... ) ) -> || ( ... )
+ stack[level].pop()
+ special_append()
+ elif ends_in_any_of_dep(level) and ends_in_any_of_dep(level-1):
+ #Optimize: || ( A || ( B C ) ) -> || ( A B C )
+ stack[level].pop()
+ stack[level].extend(l)
+ else:
+ if opconvert and ends_in_any_of_dep(level):
+ #In opconvert mode, we have to move the operator from the level
+ #above into the current list.
+ stack[level].pop()
+ stack[level].append(["||"] + l)
+ else:
+ special_append()
+
+ else:
+ raise InvalidDependString(
+ _("no matching '%s' for '%s', token %s") % ("(", ")", pos+1))
+ elif token == "||":
+ if is_src_uri:
+ raise InvalidDependString(
+ _("any-of dependencies are not allowed in SRC_URI: token %s") % (pos+1,))
+ if need_bracket:
+ raise InvalidDependString(
+ _("expected: '(', got: '%s', token %s") % (token, pos+1))
+ need_bracket = True
+ stack[level].append(token)
+ elif token == "->":
+ if need_simple_token:
+ raise InvalidDependString(
+ _("expected: file name, got: '%s', token %s") % (token, pos+1))
+ if not is_src_uri:
+ raise InvalidDependString(
+ _("SRC_URI arrow are only allowed in SRC_URI: token %s") % (pos+1,))
+ if not eapi_attrs.src_uri_arrows:
+ raise InvalidDependString(
+ _("SRC_URI arrow not allowed in EAPI %s: token %s") % (eapi, pos+1))
+ need_simple_token = True
+ stack[level].append(token)
+ else:
+ missing_white_space_check(token, pos)
+
+ if need_bracket:
+ raise InvalidDependString(
+ _("expected: '(', got: '%s', token %s") % (token, pos+1))
+
+ if need_simple_token and "/" in token:
+ #The last token was a SRC_URI arrow, make sure we have a simple file name.
+ raise InvalidDependString(
+ _("expected: file name, got: '%s', token %s") % (token, pos+1))
+
+ if token[-1] == "?":
+ need_bracket = True
+ else:
+ need_simple_token = False
+ if token_class and not is_src_uri:
+ #Add a hack for SRC_URI here, to avoid conditional code at the consumer level
+ try:
+ token = token_class(token, eapi=eapi,
+ is_valid_flag=is_valid_flag)
+ except InvalidAtom as e:
+ raise InvalidDependString(
+ _("Invalid atom (%s), token %s") \
+ % (e, pos+1), errors=(e,))
+ except SystemExit:
+ raise
+ except Exception as e:
+ raise InvalidDependString(
+ _("Invalid token '%s', token %s") % (token, pos+1))
+
+ if not matchall and \
+ hasattr(token, 'evaluate_conditionals'):
+ token = token.evaluate_conditionals(uselist)
+
+ stack[level].append(token)
+
+ if level != 0:
+ raise InvalidDependString(
+ _("Missing '%s' at end of string") % (")",))
+
+ if need_bracket:
+ raise InvalidDependString(
+ _("Missing '%s' at end of string") % ("(",))
+
+ if need_simple_token:
+ raise InvalidDependString(
+ _("Missing file name at end of string"))
+
+ return stack[0]
+
+def dep_opconvert(deplist):
+ """
+ Iterate recursively through a list of deps, if the
+ dep is a '||' or '&&' operator, combine it with the
+ list of deps that follows..
+
+ Example usage:
+ >>> test = ["blah", "||", ["foo", "bar", "baz"]]
+ >>> dep_opconvert(test)
+ ['blah', ['||', 'foo', 'bar', 'baz']]
+
+ @param deplist: A list of deps to format
+ @type mydep: List
+ @rtype: List
+ @return:
+ The new list with the new ordering
+ """
+ if portage._internal_caller:
+ warnings.warn(_("%s is deprecated. Use %s with the opconvert parameter set to True instead.") % \
+ ('portage.dep.dep_opconvert', 'portage.dep.use_reduce'), DeprecationWarning, stacklevel=2)
+
+ retlist = []
+ x = 0
+ while x != len(deplist):
+ if isinstance(deplist[x], list):
+ retlist.append(dep_opconvert(deplist[x]))
+ elif deplist[x] == "||":
+ retlist.append([deplist[x]] + dep_opconvert(deplist[x+1]))
+ x += 1
+ else:
+ retlist.append(deplist[x])
+ x += 1
+ return retlist
+
+def flatten(mylist):
+ """
+ Recursively traverse nested lists and return a single list containing
+ all non-list elements that are found.
+
+ Example usage:
+ >>> flatten([1, [2, 3, [4]]])
+ [1, 2, 3, 4]
+
+ @param mylist: A list containing nested lists and non-list elements.
+ @type mylist: List
+ @rtype: List
+ @return: A single list containing only non-list elements.
+ """
+ if portage._internal_caller:
+ warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+ ('portage.dep.flatten',), DeprecationWarning, stacklevel=2)
+
+ newlist = []
+ for x in mylist:
+ if isinstance(x, list):
+ newlist.extend(flatten(x))
+ else:
+ newlist.append(x)
+ return newlist
+
+class _use_dep(object):
+
+ __slots__ = ("_eapi_attrs", "conditional", "missing_enabled", "missing_disabled",
+ "disabled", "enabled", "tokens", "required")
+
+ class _conditionals_class(object):
+ __slots__ = ("enabled", "disabled", "equal", "not_equal")
+
+ def items(self):
+ for k in self.__slots__:
+ v = getattr(self, k, None)
+ if v:
+ yield (k, v)
+
+ def values(self):
+ for k in self.__slots__:
+ v = getattr(self, k, None)
+ if v:
+ yield v
+
+ # used in InvalidAtom messages
+ _conditional_strings = {
+ 'enabled' : '%s?',
+ 'disabled': '!%s?',
+ 'equal': '%s=',
+ 'not_equal': '!%s=',
+ }
+
+ def __init__(self, use, eapi_attrs, enabled_flags=None, disabled_flags=None, missing_enabled=None,
+ missing_disabled=None, conditional=None, required=None):
+
+ self._eapi_attrs = eapi_attrs
+
+ if enabled_flags is not None:
+ #A shortcut for the classe's own methods.
+ self.tokens = use
+ if not isinstance(self.tokens, tuple):
+ self.tokens = tuple(self.tokens)
+
+ self.required = frozenset(required)
+ self.enabled = frozenset(enabled_flags)
+ self.disabled = frozenset(disabled_flags)
+ self.missing_enabled = frozenset(missing_enabled)
+ self.missing_disabled = frozenset(missing_disabled)
+ self.conditional = None
+
+ if conditional:
+ self.conditional = self._conditionals_class()
+ for k in "enabled", "disabled", "equal", "not_equal":
+ setattr(self.conditional, k, frozenset(conditional.get(k, [])))
+
+ return
+
+ enabled_flags = set()
+ disabled_flags = set()
+ missing_enabled = set()
+ missing_disabled = set()
+ no_default = set()
+
+ conditional = {}
+ usedep_re = _get_usedep_re(self._eapi_attrs)
+
+ for x in use:
+ m = usedep_re.match(x)
+ if m is None:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+
+ operator = m.group("prefix") + m.group("suffix")
+ flag = m.group("flag")
+ default = m.group("default")
+
+ if not operator:
+ enabled_flags.add(flag)
+ elif operator == "-":
+ disabled_flags.add(flag)
+ elif operator == "?":
+ conditional.setdefault("enabled", set()).add(flag)
+ elif operator == "=":
+ conditional.setdefault("equal", set()).add(flag)
+ elif operator == "!=":
+ conditional.setdefault("not_equal", set()).add(flag)
+ elif operator == "!?":
+ conditional.setdefault("disabled", set()).add(flag)
+ else:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+
+ if default:
+ if default == "(+)":
+ if flag in missing_disabled or flag in no_default:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+ missing_enabled.add(flag)
+ else:
+ if flag in missing_enabled or flag in no_default:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+ missing_disabled.add(flag)
+ else:
+ if flag in missing_enabled or flag in missing_disabled:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+ no_default.add(flag)
+
+ self.tokens = use
+ if not isinstance(self.tokens, tuple):
+ self.tokens = tuple(self.tokens)
+
+ self.required = frozenset(no_default)
+
+ self.enabled = frozenset(enabled_flags)
+ self.disabled = frozenset(disabled_flags)
+ self.missing_enabled = frozenset(missing_enabled)
+ self.missing_disabled = frozenset(missing_disabled)
+ self.conditional = None
+
+ if conditional:
+ self.conditional = self._conditionals_class()
+ for k in "enabled", "disabled", "equal", "not_equal":
+ setattr(self.conditional, k, frozenset(conditional.get(k, [])))
+
+ def __bool__(self):
+ return bool(self.tokens)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __str__(self):
+ if not self.tokens:
+ return ""
+ return "[%s]" % (",".join(self.tokens),)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'], errors='backslashreplace')
+
+ def __repr__(self):
+ return "portage.dep._use_dep(%s)" % repr(self.tokens)
+
+ def evaluate_conditionals(self, use):
+ """
+ Create a new instance with conditionals evaluated.
+
+ Conditional evaluation behavior:
+
+ parent state conditional result
+
+ x x? x
+ -x x?
+ x !x?
+ -x !x? -x
+
+ x x= x
+ -x x= -x
+ x !x= -x
+ -x !x= x
+
+ Conditional syntax examples:
+
+ Compact Form Equivalent Expanded Form
+
+ foo[bar?] bar? ( foo[bar] ) !bar? ( foo )
+ foo[!bar?] bar? ( foo ) !bar? ( foo[-bar] )
+ foo[bar=] bar? ( foo[bar] ) !bar? ( foo[-bar] )
+ foo[!bar=] bar? ( foo[-bar] ) !bar? ( foo[bar] )
+
+ """
+ enabled_flags = set(self.enabled)
+ disabled_flags = set(self.disabled)
+
+ tokens = []
+ usedep_re = _get_usedep_re(self._eapi_attrs)
+
+ for x in self.tokens:
+ m = usedep_re.match(x)
+
+ operator = m.group("prefix") + m.group("suffix")
+ flag = m.group("flag")
+ default = m.group("default")
+ if default is None:
+ default = ""
+
+ if operator == "?":
+ if flag in use:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ elif operator == "=":
+ if flag in use:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ else:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ elif operator == "!=":
+ if flag in use:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ else:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ elif operator == "!?":
+ if flag not in use:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ else:
+ tokens.append(x)
+
+ return _use_dep(tokens, self._eapi_attrs, enabled_flags=enabled_flags, disabled_flags=disabled_flags,
+ missing_enabled=self.missing_enabled, missing_disabled=self.missing_disabled, required=self.required)
+
+ def violated_conditionals(self, other_use, is_valid_flag, parent_use=None):
+ """
+ Create a new instance with satisfied use deps removed.
+ """
+ if parent_use is None and self.conditional:
+ raise InvalidAtom("violated_conditionals needs 'parent_use'" + \
+ " parameter for conditional flags.")
+
+ enabled_flags = set()
+ disabled_flags = set()
+
+ conditional = {}
+ tokens = []
+
+ all_defaults = frozenset(chain(self.missing_enabled, self.missing_disabled))
+
+ def validate_flag(flag):
+ return is_valid_flag(flag) or flag in all_defaults
+
+ usedep_re = _get_usedep_re(self._eapi_attrs)
+
+ for x in self.tokens:
+ m = usedep_re.match(x)
+
+ operator = m.group("prefix") + m.group("suffix")
+ flag = m.group("flag")
+
+ if not validate_flag(flag):
+ tokens.append(x)
+ if not operator:
+ enabled_flags.add(flag)
+ elif operator == "-":
+ disabled_flags.add(flag)
+ elif operator == "?":
+ conditional.setdefault("enabled", set()).add(flag)
+ elif operator == "=":
+ conditional.setdefault("equal", set()).add(flag)
+ elif operator == "!=":
+ conditional.setdefault("not_equal", set()).add(flag)
+ elif operator == "!?":
+ conditional.setdefault("disabled", set()).add(flag)
+
+ continue
+
+ if not operator:
+ if flag not in other_use:
+ if is_valid_flag(flag) or flag in self.missing_disabled:
+ tokens.append(x)
+ enabled_flags.add(flag)
+ elif operator == "-":
+ if flag not in other_use:
+ if not is_valid_flag(flag):
+ if flag in self.missing_enabled:
+ tokens.append(x)
+ disabled_flags.add(flag)
+ else:
+ tokens.append(x)
+ disabled_flags.add(flag)
+ elif operator == "?":
+ if flag not in parent_use or flag in other_use:
+ continue
+
+ if is_valid_flag(flag) or flag in self.missing_disabled:
+ tokens.append(x)
+ conditional.setdefault("enabled", set()).add(flag)
+ elif operator == "=":
+ if flag in parent_use and flag not in other_use:
+ if is_valid_flag(flag):
+ tokens.append(x)
+ conditional.setdefault("equal", set()).add(flag)
+ else:
+ if flag in self.missing_disabled:
+ tokens.append(x)
+ conditional.setdefault("equal", set()).add(flag)
+ elif flag not in parent_use:
+ if flag not in other_use:
+ if not is_valid_flag(flag):
+ if flag in self.missing_enabled:
+ tokens.append(x)
+ conditional.setdefault("equal", set()).add(flag)
+ else:
+ tokens.append(x)
+ conditional.setdefault("equal", set()).add(flag)
+ elif operator == "!=":
+ if flag not in parent_use and flag not in other_use:
+ if is_valid_flag(flag):
+ tokens.append(x)
+ conditional.setdefault("not_equal", set()).add(flag)
+ else:
+ if flag in self.missing_disabled:
+ tokens.append(x)
+ conditional.setdefault("not_equal", set()).add(flag)
+ elif flag in parent_use:
+ if flag not in other_use:
+ if not is_valid_flag(flag):
+ if flag in self.missing_enabled:
+ tokens.append(x)
+ conditional.setdefault("not_equal", set()).add(flag)
+ else:
+ tokens.append(x)
+ conditional.setdefault("not_equal", set()).add(flag)
+ elif operator == "!?":
+ if flag not in parent_use:
+ if flag not in other_use:
+ if not is_valid_flag(flag) and flag in self.missing_enabled:
+ tokens.append(x)
+ conditional.setdefault("disabled", set()).add(flag)
+ else:
+ tokens.append(x)
+ conditional.setdefault("disabled", set()).add(flag)
+
+ return _use_dep(tokens, self._eapi_attrs, enabled_flags=enabled_flags, disabled_flags=disabled_flags,
+ missing_enabled=self.missing_enabled, missing_disabled=self.missing_disabled, \
+ conditional=conditional, required=self.required)
+
+ def _eval_qa_conditionals(self, use_mask, use_force):
+ """
+ For repoman, evaluate all possible combinations within the constraints
+ of the given use.force and use.mask settings. The result may seem
+ ambiguous in the sense that the same flag can be in both the enabled
+ and disabled sets, but this is useful within the context of how its
+ intended to be used by repoman. It is assumed that the caller has
+ already ensured that there is no intersection between the given
+ use_mask and use_force sets when necessary.
+ """
+ enabled_flags = set(self.enabled)
+ disabled_flags = set(self.disabled)
+ missing_enabled = self.missing_enabled
+ missing_disabled = self.missing_disabled
+
+ tokens = []
+ usedep_re = _get_usedep_re(self._eapi_attrs)
+
+ for x in self.tokens:
+ m = usedep_re.match(x)
+
+ operator = m.group("prefix") + m.group("suffix")
+ flag = m.group("flag")
+ default = m.group("default")
+ if default is None:
+ default = ""
+
+ if operator == "?":
+ if flag not in use_mask:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ elif operator == "=":
+ if flag not in use_mask:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ if flag not in use_force:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ elif operator == "!=":
+ if flag not in use_force:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ if flag not in use_mask:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ elif operator == "!?":
+ if flag not in use_force:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ else:
+ tokens.append(x)
+
+ return _use_dep(tokens, self._eapi_attrs, enabled_flags=enabled_flags, disabled_flags=disabled_flags,
+ missing_enabled=missing_enabled, missing_disabled=missing_disabled, required=self.required)
+
+class Atom(_unicode):
+
+ """
+ For compatibility with existing atom string manipulation code, this
+ class emulates most of the str methods that are useful with atoms.
+ """
+
+ class _blocker(object):
+ __slots__ = ("overlap",)
+
+ class _overlap(object):
+ __slots__ = ("forbid",)
+
+ def __init__(self, forbid=False):
+ self.forbid = forbid
+
+ def __init__(self, forbid_overlap=False):
+ self.overlap = self._overlap(forbid=forbid_overlap)
+
+ def __new__(cls, s, unevaluated_atom=None, allow_wildcard=False, allow_repo=None,
+ _use=None, eapi=None, is_valid_flag=None):
+ return _unicode.__new__(cls, s)
+
+ def __init__(self, s, unevaluated_atom=None, allow_wildcard=False, allow_repo=None,
+ _use=None, eapi=None, is_valid_flag=None):
+ if isinstance(s, Atom):
+ # This is an efficiency assertion, to ensure that the Atom
+ # constructor is not called redundantly.
+ raise TypeError(_("Expected %s, got %s") % \
+ (_unicode, type(s)))
+
+ if not isinstance(s, _unicode):
+ # Avoid TypeError from _unicode.__init__ with PyPy.
+ s = _unicode_decode(s)
+
+ _unicode.__init__(s)
+
+ eapi_attrs = _get_eapi_attrs(eapi)
+ atom_re = _get_atom_re(eapi_attrs)
+
+ self.__dict__['eapi'] = eapi
+ if eapi is not None:
+ # Ignore allow_repo when eapi is specified.
+ allow_repo = eapi_attrs.repo_deps
+ else:
+ if allow_repo is None:
+ allow_repo = True
+
+ blocker_prefix = ""
+ if "!" == s[:1]:
+ blocker = self._blocker(forbid_overlap=("!" == s[1:2]))
+ if blocker.overlap.forbid:
+ blocker_prefix = s[:2]
+ s = s[2:]
+ else:
+ blocker_prefix = s[:1]
+ s = s[1:]
+ else:
+ blocker = False
+ self.__dict__['blocker'] = blocker
+ m = atom_re.match(s)
+ extended_syntax = False
+ extended_version = None
+ if m is None:
+ if allow_wildcard:
+ atom_re = _get_atom_wildcard_re(eapi_attrs)
+ m = atom_re.match(s)
+ if m is None:
+ raise InvalidAtom(self)
+ gdict = m.groupdict()
+ if m.group('star') is not None:
+ op = '=*'
+ base = atom_re.groupindex['star']
+ cp = m.group(base + 1)
+ cpv = m.group('star')[1:]
+ extended_version = m.group(base + 4)
+ else:
+ op = None
+ cpv = cp = m.group('simple')
+ if m.group(atom_re.groupindex['simple'] + 3) is not None:
+ raise InvalidAtom(self)
+ if cpv.find("**") != -1:
+ raise InvalidAtom(self)
+ slot = m.group('slot')
+ repo = m.group('repo')
+ use_str = None
+ extended_syntax = True
+ else:
+ raise InvalidAtom(self)
+ elif m.group('op') is not None:
+ base = atom_re.groupindex['op']
+ op = m.group(base + 1)
+ cpv = m.group(base + 2)
+ cp = m.group(base + 3)
+ slot = m.group(atom_re.groups - 2)
+ repo = m.group(atom_re.groups - 1)
+ use_str = m.group(atom_re.groups)
+ if m.group(base + 4) is not None:
+ raise InvalidAtom(self)
+ elif m.group('star') is not None:
+ base = atom_re.groupindex['star']
+ op = '=*'
+ cpv = m.group(base + 1)
+ cp = m.group(base + 2)
+ slot = m.group(atom_re.groups - 2)
+ repo = m.group(atom_re.groups - 1)
+ use_str = m.group(atom_re.groups)
+ if m.group(base + 3) is not None:
+ raise InvalidAtom(self)
+ elif m.group('simple') is not None:
+ op = None
+ cpv = cp = m.group(atom_re.groupindex['simple'] + 1)
+ slot = m.group(atom_re.groups - 2)
+ repo = m.group(atom_re.groups - 1)
+ use_str = m.group(atom_re.groups)
+ if m.group(atom_re.groupindex['simple'] + 2) is not None:
+ raise InvalidAtom(self)
+
+ else:
+ raise AssertionError(_("required group not found in atom: '%s'") % self)
+ self.__dict__['cp'] = cp
+ try:
+ self.__dict__['cpv'] = _pkg_str(cpv)
+ self.__dict__['version'] = self.cpv.version
+ except InvalidData:
+ # plain cp, wildcard, or something
+ self.__dict__['cpv'] = cpv
+ self.__dict__['version'] = extended_version
+ self.__dict__['repo'] = repo
+ if slot is None:
+ self.__dict__['slot'] = None
+ self.__dict__['sub_slot'] = None
+ self.__dict__['slot_operator'] = None
+ else:
+ slot_re = _get_slot_dep_re(eapi_attrs)
+ slot_match = slot_re.match(slot)
+ if slot_match is None:
+ raise InvalidAtom(self)
+ if eapi_attrs.slot_operator:
+ self.__dict__['slot'] = slot_match.group(1)
+ sub_slot = slot_match.group(2)
+ if sub_slot is not None:
+ sub_slot = sub_slot.lstrip("/")
+ if sub_slot in ("*", "="):
+ self.__dict__['sub_slot'] = None
+ self.__dict__['slot_operator'] = sub_slot
+ else:
+ slot_operator = None
+ if sub_slot is not None and sub_slot[-1:] == "=":
+ slot_operator = sub_slot[-1:]
+ sub_slot = sub_slot[:-1]
+ self.__dict__['sub_slot'] = sub_slot
+ self.__dict__['slot_operator'] = slot_operator
+ if self.slot is not None and self.slot_operator == "*":
+ raise InvalidAtom(self)
+ else:
+ self.__dict__['slot'] = slot
+ self.__dict__['sub_slot'] = None
+ self.__dict__['slot_operator'] = None
+ self.__dict__['operator'] = op
+ self.__dict__['extended_syntax'] = extended_syntax
+
+ if not (repo is None or allow_repo):
+ raise InvalidAtom(self)
+
+ if use_str is not None:
+ if _use is not None:
+ use = _use
+ else:
+ use = _use_dep(use_str[1:-1].split(","), eapi_attrs)
+ without_use = Atom(blocker_prefix + m.group('without_use'),
+ allow_repo=allow_repo)
+ else:
+ use = None
+ if unevaluated_atom is not None and \
+ unevaluated_atom.use is not None:
+ # unevaluated_atom.use is used for IUSE checks when matching
+ # packages, so it must not propagate to without_use
+ without_use = Atom(_unicode(self),
+ allow_wildcard=allow_wildcard,
+ allow_repo=allow_repo,
+ eapi=eapi)
+ else:
+ without_use = self
+
+ self.__dict__['use'] = use
+ self.__dict__['without_use'] = without_use
+
+ if unevaluated_atom:
+ self.__dict__['unevaluated_atom'] = unevaluated_atom
+ else:
+ self.__dict__['unevaluated_atom'] = self
+
+ if eapi is not None:
+ if not isinstance(eapi, basestring):
+ raise TypeError('expected eapi argument of ' + \
+ '%s, got %s: %s' % (basestring, type(eapi), eapi,))
+ if self.slot and not eapi_attrs.slot_deps:
+ raise InvalidAtom(
+ _("Slot deps are not allowed in EAPI %s: '%s'") \
+ % (eapi, self), category='EAPI.incompatible')
+ if self.use:
+ if not eapi_attrs.use_deps:
+ raise InvalidAtom(
+ _("Use deps are not allowed in EAPI %s: '%s'") \
+ % (eapi, self), category='EAPI.incompatible')
+ elif not eapi_attrs.use_dep_defaults and \
+ (self.use.missing_enabled or self.use.missing_disabled):
+ raise InvalidAtom(
+ _("Use dep defaults are not allowed in EAPI %s: '%s'") \
+ % (eapi, self), category='EAPI.incompatible')
+ if is_valid_flag is not None and self.use.conditional:
+ invalid_flag = None
+ try:
+ for conditional_type, flags in \
+ self.use.conditional.items():
+ for flag in flags:
+ if not is_valid_flag(flag):
+ invalid_flag = (conditional_type, flag)
+ raise StopIteration()
+ except StopIteration:
+ pass
+ if invalid_flag is not None:
+ conditional_type, flag = invalid_flag
+ conditional_str = _use_dep._conditional_strings[conditional_type]
+ msg = _("USE flag '%s' referenced in " + \
+ "conditional '%s' in atom '%s' is not in IUSE") \
+ % (flag, conditional_str % flag, self)
+ raise InvalidAtom(msg, category='IUSE.missing')
+ if self.blocker and self.blocker.overlap.forbid and not eapi_attrs.strong_blocks:
+ raise InvalidAtom(
+ _("Strong blocks are not allowed in EAPI %s: '%s'") \
+ % (eapi, self), category='EAPI.incompatible')
+
+ @property
+ def slot_operator_built(self):
+ """
+ Returns True if slot_operator == "=" and sub_slot is not None.
+ NOTE: foo/bar:2= is unbuilt and returns False, whereas foo/bar:2/2=
+ is built and returns True.
+ """
+ return self.slot_operator == "=" and self.sub_slot is not None
+
+ @property
+ def without_repo(self):
+ if self.repo is None:
+ return self
+ return Atom(self.replace(_repo_separator + self.repo, '', 1),
+ allow_wildcard=True)
+
+ @property
+ def without_slot(self):
+ if self.slot is None and self.slot_operator is None:
+ return self
+ atom = remove_slot(self)
+ if self.repo is not None:
+ atom += _repo_separator + self.repo
+ if self.use is not None:
+ atom += _unicode(self.use)
+ return Atom(atom,
+ allow_repo=True, allow_wildcard=True)
+
+ def with_repo(self, repo):
+ atom = remove_slot(self)
+ if self.slot is not None or self.slot_operator is not None:
+ atom += _slot_separator
+ if self.slot is not None:
+ atom += self.slot
+ if self.sub_slot is not None:
+ atom += "/%s" % self.sub_slot
+ if self.slot_operator is not None:
+ atom += self.slot_operator
+ atom += _repo_separator + repo
+ if self.use is not None:
+ atom += _unicode(self.use)
+ return Atom(atom, allow_repo=True, allow_wildcard=True)
+
+ def with_slot(self, slot):
+ atom = remove_slot(self) + _slot_separator + slot
+ if self.repo is not None:
+ atom += _repo_separator + self.repo
+ if self.use is not None:
+ atom += _unicode(self.use)
+ return Atom(atom, allow_repo=True, allow_wildcard=True)
+
+ def __setattr__(self, name, value):
+ raise AttributeError("Atom instances are immutable",
+ self.__class__, name, value)
+
+ def intersects(self, other):
+ """
+ Atoms with different cpv, operator or use attributes cause this method
+ to return False even though there may actually be some intersection.
+ TODO: Detect more forms of intersection.
+ @param other: The package atom to match
+ @type other: Atom
+ @rtype: Boolean
+ @return: True if this atom and the other atom intersect,
+ False otherwise.
+ """
+ if not isinstance(other, Atom):
+ raise TypeError("expected %s, got %s" % \
+ (Atom, type(other)))
+
+ if self == other:
+ return True
+
+ if self.cp != other.cp or \
+ self.use != other.use or \
+ self.operator != other.operator or \
+ self.cpv != other.cpv:
+ return False
+
+ if self.slot is None or \
+ other.slot is None or \
+ self.slot == other.slot:
+ return True
+
+ return False
+
+ def evaluate_conditionals(self, use):
+ """
+ Create an atom instance with any USE conditionals evaluated.
+ @param use: The set of enabled USE flags
+ @type use: set
+ @rtype: Atom
+ @return: an atom instance with any USE conditionals evaluated
+ """
+ if not (self.use and self.use.conditional):
+ return self
+ atom = remove_slot(self)
+ if self.slot is not None or self.slot_operator is not None:
+ atom += _slot_separator
+ if self.slot is not None:
+ atom += self.slot
+ if self.sub_slot is not None:
+ atom += "/%s" % self.sub_slot
+ if self.slot_operator is not None:
+ atom += self.slot_operator
+ use_dep = self.use.evaluate_conditionals(use)
+ atom += _unicode(use_dep)
+ return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
+
+ def violated_conditionals(self, other_use, is_valid_flag, parent_use=None):
+ """
+ Create an atom instance with any USE conditional removed, that is
+ satisfied by other_use.
+ @param other_use: The set of enabled USE flags
+ @type other_use: set
+ @param is_valid_flag: Function that decides if a use flag is referenceable in use deps
+ @type is_valid_flag: function
+ @param parent_use: Set of enabled use flags of the package requiring this atom
+ @type parent_use: set
+ @rtype: Atom
+ @return: an atom instance with any satisfied USE conditionals removed
+ """
+ if not self.use:
+ return self
+ atom = remove_slot(self)
+ if self.slot is not None or self.slot_operator is not None:
+ atom += _slot_separator
+ if self.slot is not None:
+ atom += self.slot
+ if self.sub_slot is not None:
+ atom += "/%s" % self.sub_slot
+ if self.slot_operator is not None:
+ atom += self.slot_operator
+ use_dep = self.use.violated_conditionals(other_use, is_valid_flag, parent_use)
+ atom += _unicode(use_dep)
+ return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
+
+ def _eval_qa_conditionals(self, use_mask, use_force):
+ if not (self.use and self.use.conditional):
+ return self
+ atom = remove_slot(self)
+ if self.slot is not None or self.slot_operator is not None:
+ atom += _slot_separator
+ if self.slot is not None:
+ atom += self.slot
+ if self.sub_slot is not None:
+ atom += "/%s" % self.sub_slot
+ if self.slot_operator is not None:
+ atom += self.slot_operator
+ use_dep = self.use._eval_qa_conditionals(use_mask, use_force)
+ atom += _unicode(use_dep)
+ return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
+
+ def __copy__(self):
+ """Immutable, so returns self."""
+ return self
+
+ def __deepcopy__(self, memo=None):
+ """Immutable, so returns self."""
+ memo[id(self)] = self
+ return self
+
+_extended_cp_re_cache = {}
+
+def extended_cp_match(extended_cp, other_cp):
+ """
+ Checks if an extended syntax cp matches a non extended cp
+ """
+ # Escape special '+' and '.' characters which are allowed in atoms,
+ # and convert '*' to regex equivalent.
+ global _extended_cp_re_cache
+ extended_cp_re = _extended_cp_re_cache.get(extended_cp)
+ if extended_cp_re is None:
+ extended_cp_re = re.compile("^" + re.escape(extended_cp).replace(
+ r'\*', '[^/]*') + "$", re.UNICODE)
+ _extended_cp_re_cache[extended_cp] = extended_cp_re
+ return extended_cp_re.match(other_cp) is not None
+
+class ExtendedAtomDict(portage.cache.mappings.MutableMapping):
+ """
+ dict() wrapper that supports extended atoms as keys and allows lookup
+ of a normal cp against other normal cp and extended cp.
+ The value type has to be given to __init__ and is assumed to be the same
+ for all values.
+ """
+
+ __slots__ = ('_extended', '_normal', '_value_class')
+
+ def __init__(self, value_class):
+ self._extended = {}
+ self._normal = {}
+ self._value_class = value_class
+
+ def copy(self):
+ result = self.__class__(self._value_class)
+ result._extended.update(self._extended)
+ result._normal.update(self._normal)
+ return result
+
+ def __iter__(self):
+ for k in self._normal:
+ yield k
+ for k in self._extended:
+ yield k
+
+ def iteritems(self):
+ try:
+ for item in self._normal.items():
+ yield item
+ for item in self._extended.items():
+ yield item
+ except AttributeError:
+ pass # FEATURES=python-trace
+
+ def __delitem__(self, cp):
+ if "*" in cp:
+ return self._extended.__delitem__(cp)
+ else:
+ return self._normal.__delitem__(cp)
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+ items = iteritems
+
+ def __len__(self):
+ return len(self._normal) + len(self._extended)
+
+ def setdefault(self, cp, default=None):
+ if "*" in cp:
+ return self._extended.setdefault(cp, default)
+ else:
+ return self._normal.setdefault(cp, default)
+
+ def __getitem__(self, cp):
+
+ if not isinstance(cp, basestring):
+ raise KeyError(cp)
+
+ if '*' in cp:
+ return self._extended[cp]
+
+ ret = self._value_class()
+ normal_match = self._normal.get(cp)
+ match = False
+
+ if normal_match is not None:
+ match = True
+ if hasattr(ret, "update"):
+ ret.update(normal_match)
+ elif hasattr(ret, "extend"):
+ ret.extend(normal_match)
+ else:
+ raise NotImplementedError()
+
+ for extended_cp in self._extended:
+ if extended_cp_match(extended_cp, cp):
+ match = True
+ if hasattr(ret, "update"):
+ ret.update(self._extended[extended_cp])
+ elif hasattr(ret, "extend"):
+ ret.extend(self._extended[extended_cp])
+ else:
+ raise NotImplementedError()
+
+ if not match:
+ raise KeyError(cp)
+
+ return ret
+
+ def __setitem__(self, cp, val):
+ if "*" in cp:
+ self._extended[cp] = val
+ else:
+ self._normal[cp] = val
+
+ def __eq__(self, other):
+ return self._value_class == other._value_class and \
+ self._extended == other._extended and \
+ self._normal == other._normal
+
+ def clear(self):
+ self._extended.clear()
+ self._normal.clear()
+
+
+def get_operator(mydep):
+ """
+ Return the operator used in a depstring.
+
+ Example usage:
+ >>> from portage.dep import *
+ >>> get_operator(">=test-1.0")
+ '>='
+
+ @param mydep: The dep string to check
+ @type mydep: String
+ @rtype: String
+ @return: The operator. One of:
+ '~', '=', '>', '<', '=*', '>=', or '<='
+ """
+ if not isinstance(mydep, Atom):
+ mydep = Atom(mydep)
+
+ return mydep.operator
+
+def dep_getcpv(mydep):
+ """
+ Return the category-package-version with any operators/slot specifications stripped off
+
+ Example usage:
+ >>> dep_getcpv('>=media-libs/test-3.0')
+ 'media-libs/test-3.0'
+
+ @param mydep: The depstring
+ @type mydep: String
+ @rtype: String
+ @return: The depstring with the operator removed
+ """
+ if not isinstance(mydep, Atom):
+ mydep = Atom(mydep)
+
+ return mydep.cpv
+
+def dep_getslot(mydep):
+ """
+ Retrieve the slot on a depend.
+
+ Example usage:
+ >>> dep_getslot('app-misc/test:3')
+ '3'
+
+ @param mydep: The depstring to retrieve the slot of
+ @type mydep: String
+ @rtype: String
+ @return: The slot
+ """
+ slot = getattr(mydep, "slot", False)
+ if slot is not False:
+ return slot
+
+ #remove repo_name if present
+ mydep = mydep.split(_repo_separator)[0]
+
+ colon = mydep.find(_slot_separator)
+ if colon != -1:
+ bracket = mydep.find("[", colon)
+ if bracket == -1:
+ return mydep[colon+1:]
+ else:
+ return mydep[colon+1:bracket]
+ return None
+
+def dep_getrepo(mydep):
+ """
+ Retrieve the repo on a depend.
+
+ Example usage:
+ >>> dep_getrepo('app-misc/test::repository')
+ 'repository'
+
+ @param mydep: The depstring to retrieve the repository of
+ @type mydep: String
+ @rtype: String
+ @return: The repository name
+ """
+ repo = getattr(mydep, "repo", False)
+ if repo is not False:
+ return repo
+
+ metadata = getattr(mydep, "metadata", False)
+ if metadata:
+ repo = metadata.get('repository', False)
+ if repo is not False:
+ return repo
+
+ colon = mydep.find(_repo_separator)
+ if colon != -1:
+ bracket = mydep.find("[", colon)
+ if bracket == -1:
+ return mydep[colon+2:]
+ else:
+ return mydep[colon+2:bracket]
+ return None
+def remove_slot(mydep):
+ """
+ Removes dep components from the right side of an atom:
+ * slot
+ * use
+ * repo
+ And repo_name from the left side.
+ """
+ colon = mydep.find(_slot_separator)
+ if colon != -1:
+ mydep = mydep[:colon]
+ else:
+ bracket = mydep.find("[")
+ if bracket != -1:
+ mydep = mydep[:bracket]
+ return mydep
+
+def dep_getusedeps( depend ):
+ """
+ Pull a listing of USE Dependencies out of a dep atom.
+
+ Example usage:
+ >>> dep_getusedeps('app-misc/test:3[foo,-bar]')
+ ('foo', '-bar')
+
+ @param depend: The depstring to process
+ @type depend: String
+ @rtype: List
+ @return: List of use flags ( or [] if no flags exist )
+ """
+ use_list = []
+ open_bracket = depend.find('[')
+ # -1 = failure (think c++ string::npos)
+ comma_separated = False
+ bracket_count = 0
+ while( open_bracket != -1 ):
+ bracket_count += 1
+ if bracket_count > 1:
+ raise InvalidAtom(_("USE Dependency with more "
+ "than one set of brackets: %s") % (depend,))
+ close_bracket = depend.find(']', open_bracket )
+ if close_bracket == -1:
+ raise InvalidAtom(_("USE Dependency with no closing bracket: %s") % depend )
+ use = depend[open_bracket + 1: close_bracket]
+ # foo[1:1] may return '' instead of None, we don't want '' in the result
+ if not use:
+ raise InvalidAtom(_("USE Dependency with "
+ "no use flag ([]): %s") % depend )
+ if not comma_separated:
+ comma_separated = "," in use
+
+ if comma_separated and bracket_count > 1:
+ raise InvalidAtom(_("USE Dependency contains a mixture of "
+ "comma and bracket separators: %s") % depend )
+
+ if comma_separated:
+ for x in use.split(","):
+ if x:
+ use_list.append(x)
+ else:
+ raise InvalidAtom(_("USE Dependency with no use "
+ "flag next to comma: %s") % depend )
+ else:
+ use_list.append(use)
+
+ # Find next use flag
+ open_bracket = depend.find( '[', open_bracket+1 )
+ return tuple(use_list)
+
+def isvalidatom(atom, allow_blockers=False, allow_wildcard=False,
+ allow_repo=False, eapi=None):
+ """
+ Check to see if a depend atom is valid
+
+ Example usage:
+ >>> isvalidatom('media-libs/test-3.0')
+ False
+ >>> isvalidatom('>=media-libs/test-3.0')
+ True
+
+ @param atom: The depend atom to check against
+ @type atom: String or Atom
+ @rtype: Boolean
+ @return: One of the following:
+ 1) False if the atom is invalid
+ 2) True if the atom is valid
+ """
+
+ if eapi is not None and isinstance(atom, Atom) and atom.eapi != eapi:
+ # We'll construct a new atom with the given eapi.
+ atom = _unicode(atom)
+
+ try:
+ if not isinstance(atom, Atom):
+ atom = Atom(atom, allow_wildcard=allow_wildcard,
+ allow_repo=allow_repo, eapi=eapi)
+ if not allow_blockers and atom.blocker:
+ return False
+ return True
+ except InvalidAtom:
+ return False
+
+def isjustname(mypkg):
+ """
+ Checks to see if the atom is only the package name (no version parts).
+
+ Example usage:
+ >>> isjustname('=media-libs/test-3.0')
+ False
+ >>> isjustname('media-libs/test')
+ True
+
+ @param mypkg: The package atom to check
+ @param mypkg: String or Atom
+ @rtype: Integer
+ @return: One of the following:
+ 1) False if the package string is not just the package name
+ 2) True if it is
+ """
+ try:
+ if not isinstance(mypkg, Atom):
+ mypkg = Atom(mypkg)
+ return mypkg == mypkg.cp
+ except InvalidAtom:
+ pass
+
+ for x in mypkg.split('-')[-2:]:
+ if ververify(x):
+ return False
+ return True
+
+def isspecific(mypkg):
+ """
+ Checks to see if a package is in =category/package-version or
+ package-version format.
+
+ Example usage:
+ >>> isspecific('media-libs/test')
+ False
+ >>> isspecific('=media-libs/test-3.0')
+ True
+
+ @param mypkg: The package depstring to check against
+ @type mypkg: String
+ @rtype: Boolean
+ @return: One of the following:
+ 1) False if the package string is not specific
+ 2) True if it is
+ """
+ try:
+ if not isinstance(mypkg, Atom):
+ mypkg = Atom(mypkg)
+ return mypkg != mypkg.cp
+ except InvalidAtom:
+ pass
+
+ # Fall back to legacy code for backward compatibility.
+ return not isjustname(mypkg)
+
+def dep_getkey(mydep):
+ """
+ Return the category/package-name of a depstring.
+
+ Example usage:
+ >>> dep_getkey('=media-libs/test-3.0')
+ 'media-libs/test'
+
+ @param mydep: The depstring to retrieve the category/package-name of
+ @type mydep: String
+ @rtype: String
+ @return: The package category/package-name
+ """
+ if not isinstance(mydep, Atom):
+ mydep = Atom(mydep, allow_wildcard=True, allow_repo=True)
+
+ return mydep.cp
+
+def match_to_list(mypkg, mylist):
+ """
+ Searches list for entries that matches the package.
+
+ @param mypkg: The package atom to match
+ @type mypkg: String
+ @param mylist: The list of package atoms to compare against
+ @param mylist: List
+ @rtype: List
+ @return: A unique list of package atoms that match the given package atom
+ """
+ matches = set()
+ result = []
+ pkgs = [mypkg]
+ for x in mylist:
+ if x not in matches and match_from_list(x, pkgs):
+ matches.add(x)
+ result.append(x)
+ return result
+
+def best_match_to_list(mypkg, mylist):
+ """
+ Returns the most specific entry that matches the package given.
+
+ @param mypkg: The package atom to check
+ @type mypkg: String
+ @param mylist: The list of package atoms to check against
+ @type mylist: List
+ @rtype: String
+ @return: The package atom which best matches given the following ordering:
+ - =cpv 6
+ - ~cpv 5
+ - =cpv* 4
+ - cp:slot 3
+ - >cpv 2
+ - <cpv 2
+ - >=cpv 2
+ - <=cpv 2
+ - cp 1
+ - cp:slot with extended syntax 0
+ - cp with extended syntax -1
+ """
+ operator_values = {'=':6, '~':5, '=*':4,
+ '>':2, '<':2, '>=':2, '<=':2, None:1}
+ maxvalue = -99
+ bestm = None
+ mypkg_cpv = None
+ for x in match_to_list(mypkg, mylist):
+ if x.extended_syntax:
+ if x.operator == '=*':
+ if maxvalue < 0:
+ maxvalue = 0
+ bestm = x
+ elif x.slot is not None:
+ if maxvalue < -1:
+ maxvalue = -1
+ bestm = x
+ else:
+ if maxvalue < -2:
+ maxvalue = -2
+ bestm = x
+ continue
+ if dep_getslot(x) is not None:
+ if maxvalue < 3:
+ maxvalue = 3
+ bestm = x
+ op_val = operator_values[x.operator]
+ if op_val > maxvalue:
+ maxvalue = op_val
+ bestm = x
+ elif op_val == maxvalue and op_val == 2:
+ # For >, <, >=, and <=, the one with the version
+ # closest to mypkg is the best match.
+ if mypkg_cpv is None:
+ try:
+ mypkg_cpv = mypkg.cpv
+ except AttributeError:
+ mypkg_cpv = _pkg_str(remove_slot(mypkg))
+ if bestm.cpv == mypkg_cpv or bestm.cpv == x.cpv:
+ pass
+ elif x.cpv == mypkg_cpv:
+ bestm = x
+ else:
+ # Sort the cpvs to find the one closest to mypkg_cpv
+ cpv_list = [bestm.cpv, mypkg_cpv, x.cpv]
+ def cmp_cpv(cpv1, cpv2):
+ return vercmp(cpv1.version, cpv2.version)
+ cpv_list.sort(key=cmp_sort_key(cmp_cpv))
+ if cpv_list[0] is mypkg_cpv or cpv_list[-1] is mypkg_cpv:
+ if cpv_list[1] is x.cpv:
+ bestm = x
+ else:
+ # TODO: handle the case where mypkg_cpv is in the middle
+ pass
+
+ return bestm
+
+def match_from_list(mydep, candidate_list):
+ """
+ Searches list for entries that matches the package.
+
+ @param mydep: The package atom to match
+ @type mydep: String
+ @param candidate_list: The list of package atoms to compare against
+ @param candidate_list: List
+ @rtype: List
+ @return: A list of package atoms that match the given package atom
+ """
+
+ if not candidate_list:
+ return []
+
+ if "!" == mydep[:1]:
+ if "!" == mydep[1:2]:
+ mydep = mydep[2:]
+ else:
+ mydep = mydep[1:]
+ if not isinstance(mydep, Atom):
+ mydep = Atom(mydep, allow_wildcard=True, allow_repo=True)
+
+ mycpv = mydep.cpv
+ mycpv_cps = catpkgsplit(mycpv) # Can be None if not specific
+ slot = mydep.slot
+
+ if not mycpv_cps:
+ cat, pkg = catsplit(mycpv)
+ ver = None
+ rev = None
+ else:
+ cat, pkg, ver, rev = mycpv_cps
+ if mydep == mycpv:
+ raise KeyError(_("Specific key requires an operator"
+ " (%s) (try adding an '=')") % (mydep))
+
+ if ver and rev:
+ operator = mydep.operator
+ if not operator:
+ writemsg(_("!!! Invalid atom: %s\n") % mydep, noiselevel=-1)
+ return []
+ else:
+ operator = None
+
+ mylist = []
+
+ if mydep.extended_syntax:
+
+ for x in candidate_list:
+ cp = getattr(x, "cp", None)
+ if cp is None:
+ mysplit = catpkgsplit(remove_slot(x))
+ if mysplit is not None:
+ cp = mysplit[0] + '/' + mysplit[1]
+
+ if cp is None:
+ continue
+
+ if cp == mycpv or extended_cp_match(mydep.cp, cp):
+ mylist.append(x)
+
+ if mylist and mydep.operator == "=*":
+
+ candidate_list = mylist
+ mylist = []
+ # Currently, only \*\w+\* is supported.
+ ver = mydep.version[1:-1]
+
+ for x in candidate_list:
+ x_ver = getattr(x, "version", None)
+ if x_ver is None:
+ xs = catpkgsplit(remove_slot(x))
+ if xs is None:
+ continue
+ x_ver = "-".join(xs[-2:])
+ if ver in x_ver:
+ mylist.append(x)
+
+ elif operator is None:
+ for x in candidate_list:
+ cp = getattr(x, "cp", None)
+ if cp is None:
+ mysplit = catpkgsplit(remove_slot(x))
+ if mysplit is not None:
+ cp = mysplit[0] + '/' + mysplit[1]
+
+ if cp is None:
+ continue
+
+ if cp == mydep.cp:
+ mylist.append(x)
+
+ elif operator == "=": # Exact match
+ for x in candidate_list:
+ xcpv = getattr(x, "cpv", None)
+ if xcpv is None:
+ xcpv = remove_slot(x)
+ if not cpvequal(xcpv, mycpv):
+ continue
+ mylist.append(x)
+
+ elif operator == "=*": # glob match
+ # XXX: Nasty special casing for leading zeros
+ # Required as =* is a literal prefix match, so can't
+ # use vercmp
+ myver = mycpv_cps[2].lstrip("0")
+ if not myver or not myver[0].isdigit():
+ myver = "0"+myver
+ if myver == mycpv_cps[2]:
+ mycpv_cmp = mycpv
+ else:
+ # Use replace to preserve the revision part if it exists
+ # (mycpv_cps[3] can't be trusted because in contains r0
+ # even when the input has no revision part).
+ mycpv_cmp = mycpv.replace(
+ mydep.cp + "-" + mycpv_cps[2],
+ mydep.cp + "-" + myver, 1)
+ for x in candidate_list:
+ try:
+ x.cp
+ except AttributeError:
+ try:
+ pkg = _pkg_str(remove_slot(x))
+ except InvalidData:
+ continue
+ else:
+ pkg = x
+
+ xs = pkg.cpv_split
+ myver = xs[2].lstrip("0")
+ if not myver or not myver[0].isdigit():
+ myver = "0"+myver
+ if myver == xs[2]:
+ xcpv = pkg.cpv
+ else:
+ # Use replace to preserve the revision part if it exists.
+ xcpv = pkg.cpv.replace(
+ pkg.cp + "-" + xs[2],
+ pkg.cp + "-" + myver, 1)
+ if xcpv.startswith(mycpv_cmp):
+ mylist.append(x)
+
+ elif operator == "~": # version, any revision, match
+ for x in candidate_list:
+ xs = getattr(x, "cpv_split", None)
+ if xs is None:
+ xs = catpkgsplit(remove_slot(x))
+ if xs is None:
+ raise InvalidData(x)
+ if not cpvequal(xs[0]+"/"+xs[1]+"-"+xs[2], mycpv_cps[0]+"/"+mycpv_cps[1]+"-"+mycpv_cps[2]):
+ continue
+ if xs[2] != ver:
+ continue
+ mylist.append(x)
+
+ elif operator in [">", ">=", "<", "<="]:
+ for x in candidate_list:
+ if hasattr(x, 'cp'):
+ pkg = x
+ else:
+ try:
+ pkg = _pkg_str(remove_slot(x))
+ except InvalidData:
+ continue
+
+ if pkg.cp != mydep.cp:
+ continue
+ try:
+ result = vercmp(pkg.version, mydep.version)
+ except ValueError: # pkgcmp may return ValueError during int() conversion
+ writemsg(_("\nInvalid package name: %s\n") % x, noiselevel=-1)
+ raise
+ if result is None:
+ continue
+ elif operator == ">":
+ if result > 0:
+ mylist.append(x)
+ elif operator == ">=":
+ if result >= 0:
+ mylist.append(x)
+ elif operator == "<":
+ if result < 0:
+ mylist.append(x)
+ elif operator == "<=":
+ if result <= 0:
+ mylist.append(x)
+ else:
+ raise KeyError(_("Unknown operator: %s") % mydep)
+ else:
+ raise KeyError(_("Unknown operator: %s") % mydep)
+
+ if mydep.slot is not None:
+ candidate_list = mylist
+ mylist = []
+ for x in candidate_list:
+ x_pkg = None
+ try:
+ x.cpv
+ except AttributeError:
+ xslot = dep_getslot(x)
+ if xslot is not None:
+ try:
+ x_pkg = _pkg_str(remove_slot(x), slot=xslot)
+ except InvalidData:
+ continue
+ else:
+ x_pkg = x
+
+ if x_pkg is None:
+ mylist.append(x)
+ else:
+ try:
+ x_pkg.slot
+ except AttributeError:
+ mylist.append(x)
+ else:
+ if _match_slot(mydep, x_pkg):
+ mylist.append(x)
+
+ if mydep.unevaluated_atom.use:
+ candidate_list = mylist
+ mylist = []
+ for x in candidate_list:
+ use = getattr(x, "use", None)
+ if use is not None:
+ if mydep.unevaluated_atom.use and \
+ not x.iuse.is_valid_flag(
+ mydep.unevaluated_atom.use.required):
+ continue
+
+ if mydep.use:
+ is_valid_flag = x.iuse.is_valid_flag
+ missing_enabled = frozenset(flag for flag in
+ mydep.use.missing_enabled if not is_valid_flag(flag))
+ missing_disabled = frozenset(flag for flag in
+ mydep.use.missing_disabled if not is_valid_flag(flag))
+
+ if mydep.use.enabled:
+ if any(f in mydep.use.enabled for f in missing_disabled):
+ continue
+ need_enabled = mydep.use.enabled.difference(use.enabled)
+ if need_enabled:
+ if any(f not in missing_enabled for f in need_enabled):
+ continue
+
+ if mydep.use.disabled:
+ if any(f in mydep.use.disabled for f in missing_enabled):
+ continue
+ need_disabled = mydep.use.disabled.intersection(use.enabled)
+ if need_disabled:
+ if any(f not in missing_disabled for f in need_disabled):
+ continue
+
+ mylist.append(x)
+
+ if mydep.repo:
+ candidate_list = mylist
+ mylist = []
+ for x in candidate_list:
+ repo = getattr(x, "repo", False)
+ if repo is False:
+ repo = dep_getrepo(x)
+ if repo is not None and repo != _unknown_repo and \
+ repo != mydep.repo:
+ continue
+ mylist.append(x)
+
+ return mylist
+
+def human_readable_required_use(required_use):
+ return required_use.replace("^^", "exactly-one-of").replace("||", "any-of").replace("??", "at-most-one-of")
+
+def get_required_use_flags(required_use, eapi=None):
+ """
+ Returns a set of use flags that are used in the given REQUIRED_USE string
+
+ @param required_use: REQUIRED_USE string
+ @type required_use: String
+ @rtype: Set
+ @return: Set of use flags that are used in the given REQUIRED_USE string
+ """
+
+ eapi_attrs = _get_eapi_attrs(eapi)
+ if eapi_attrs.required_use_at_most_one_of:
+ valid_operators = ("||", "^^", "??")
+ else:
+ valid_operators = ("||", "^^")
+
+ mysplit = required_use.split()
+ level = 0
+ stack = [[]]
+ need_bracket = False
+
+ used_flags = set()
+
+ def register_token(token):
+ if token.endswith("?"):
+ token = token[:-1]
+ if token.startswith("!"):
+ token = token[1:]
+ used_flags.add(token)
+
+ for token in mysplit:
+ if token == "(":
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+ ignore = False
+ if stack[level]:
+ if stack[level][-1] in valid_operators or \
+ (not isinstance(stack[level][-1], bool) and \
+ stack[level][-1][-1] == "?"):
+ ignore = True
+ stack[level].pop()
+ stack[level].append(True)
+
+ if l and not ignore:
+ stack[level].append(all(x for x in l))
+ else:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ elif token in valid_operators:
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ need_bracket = True
+ stack[level].append(token)
+ else:
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+
+ if token[-1] == "?":
+ need_bracket = True
+ stack[level].append(token)
+ else:
+ stack[level].append(True)
+
+ register_token(token)
+
+ if level != 0 or need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+
+ return frozenset(used_flags)
+
+class _RequiredUseLeaf(object):
+
+ __slots__ = ('_satisfied', '_token')
+
+ def __init__(self, token, satisfied):
+ self._token = token
+ self._satisfied = satisfied
+
+ def tounicode(self):
+ return self._token
+
+class _RequiredUseBranch(object):
+
+ __slots__ = ('_children', '_operator', '_parent', '_satisfied')
+
+ def __init__(self, operator=None, parent=None):
+ self._children = []
+ self._operator = operator
+ self._parent = parent
+ self._satisfied = False
+
+ def __bool__(self):
+ return self._satisfied
+
+ def tounicode(self):
+
+ include_parens = self._parent is not None
+ tokens = []
+ if self._operator is not None:
+ tokens.append(self._operator)
+
+ if include_parens:
+ tokens.append("(")
+
+ complex_nesting = False
+ node = self
+ while node != None and not complex_nesting:
+ if node._operator in ("||", "^^", "??"):
+ complex_nesting = True
+ else:
+ node = node._parent
+
+ if complex_nesting:
+ for child in self._children:
+ tokens.append(child.tounicode())
+ else:
+ for child in self._children:
+ if not child._satisfied:
+ tokens.append(child.tounicode())
+
+ if include_parens:
+ tokens.append(")")
+
+ return " ".join(tokens)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+def check_required_use(required_use, use, iuse_match, eapi=None):
+ """
+ Checks if the use flags listed in 'use' satisfy all
+ constraints specified in 'constraints'.
+
+ @param required_use: REQUIRED_USE string
+ @type required_use: String
+ @param use: Enabled use flags
+ @param use: List
+ @param iuse_match: Callable that takes a single flag argument and returns
+ True if the flag is matched, false otherwise,
+ @param iuse_match: Callable
+ @rtype: Bool
+ @return: Indicates if REQUIRED_USE constraints are satisfied
+ """
+
+ eapi_attrs = _get_eapi_attrs(eapi)
+ if eapi_attrs.required_use_at_most_one_of:
+ valid_operators = ("||", "^^", "??")
+ else:
+ valid_operators = ("||", "^^")
+
+ def is_active(token):
+ if token.startswith("!"):
+ flag = token[1:]
+ is_negated = True
+ else:
+ flag = token
+ is_negated = False
+
+ if not flag or not iuse_match(flag):
+ if not eapi_attrs.required_use_at_most_one_of and flag == "?":
+ msg = _("Operator '??' is not supported with EAPI '%s'") \
+ % (eapi,)
+ e = InvalidData(msg, category='EAPI.incompatible')
+ raise InvalidDependString(msg, errors=(e,))
+ msg = _("USE flag '%s' is not in IUSE") \
+ % (flag,)
+ e = InvalidData(msg, category='IUSE.missing')
+ raise InvalidDependString(msg, errors=(e,))
+
+ return (flag in use and not is_negated) or \
+ (flag not in use and is_negated)
+
+ def is_satisfied(operator, argument):
+ if not argument:
+ #|| ( ) -> True
+ return True
+
+ if operator == "||":
+ return (True in argument)
+ elif operator == "^^":
+ return (argument.count(True) == 1)
+ elif operator == "??":
+ return (argument.count(True) <= 1)
+ elif operator[-1] == "?":
+ return (False not in argument)
+
+ mysplit = required_use.split()
+ level = 0
+ stack = [[]]
+ tree = _RequiredUseBranch()
+ node = tree
+ need_bracket = False
+
+ for token in mysplit:
+ if token == "(":
+ if not need_bracket:
+ child = _RequiredUseBranch(parent=node)
+ node._children.append(child)
+ node = child
+
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+ op = None
+ if stack[level]:
+ if stack[level][-1] in valid_operators:
+ op = stack[level].pop()
+ satisfied = is_satisfied(op, l)
+ stack[level].append(satisfied)
+ node._satisfied = satisfied
+
+ elif not isinstance(stack[level][-1], bool) and \
+ stack[level][-1][-1] == "?":
+ op = stack[level].pop()
+ if is_active(op[:-1]):
+ satisfied = is_satisfied(op, l)
+ stack[level].append(satisfied)
+ node._satisfied = satisfied
+ else:
+ node._satisfied = True
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+ node = node._parent
+ continue
+
+ if op is None:
+ satisfied = False not in l
+ node._satisfied = satisfied
+ if l:
+ stack[level].append(satisfied)
+
+ if len(node._children) <= 1 or \
+ node._parent._operator not in valid_operators:
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+ for child in node._children:
+ node._parent._children.append(child)
+ if isinstance(child, _RequiredUseBranch):
+ child._parent = node._parent
+
+ elif not node._children:
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+
+ elif len(node._children) == 1 and op in valid_operators:
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+ node._parent._children.append(node._children[0])
+ if isinstance(node._children[0], _RequiredUseBranch):
+ node._children[0]._parent = node._parent
+ node = node._children[0]
+ if node._operator is None and \
+ node._parent._operator not in valid_operators:
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+ for child in node._children:
+ node._parent._children.append(child)
+ if isinstance(child, _RequiredUseBranch):
+ child._parent = node._parent
+
+ node = node._parent
+ else:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ elif token in valid_operators:
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ need_bracket = True
+ stack[level].append(token)
+ child = _RequiredUseBranch(operator=token, parent=node)
+ node._children.append(child)
+ node = child
+ else:
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+
+ if token[-1] == "?":
+ need_bracket = True
+ stack[level].append(token)
+ child = _RequiredUseBranch(operator=token, parent=node)
+ node._children.append(child)
+ node = child
+ else:
+ satisfied = is_active(token)
+ stack[level].append(satisfied)
+ node._children.append(_RequiredUseLeaf(token, satisfied))
+
+ if level != 0 or need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+
+ tree._satisfied = False not in stack[0]
+ return tree
+
+def extract_affecting_use(mystr, atom, eapi=None):
+ """
+ Take a dep string and an atom and return the use flags
+ that decide if the given atom is in effect.
+
+ Example usage:
+ >>> extract_affecting_use('sasl? ( dev-libs/cyrus-sasl ) \
+ !minimal? ( cxx? ( dev-libs/cyrus-sasl ) )', 'dev-libs/cyrus-sasl')
+ {'cxx', 'minimal', 'sasl'}
+
+ @param mystr: The dependency string
+ @type mystr: String
+ @param atom: The atom to get into effect
+ @type atom: String
+ @rtype: Set of strings
+ @return: Set of use flags affecting given atom
+ """
+ useflag_re = _get_useflag_re(eapi)
+ mysplit = mystr.split()
+ level = 0
+ stack = [[]]
+ need_bracket = False
+ affecting_use = set()
+
+ def flag(conditional):
+ if conditional[0] == "!":
+ flag = conditional[1:-1]
+ else:
+ flag = conditional[:-1]
+
+ if useflag_re.match(flag) is None:
+ raise InvalidDependString(
+ _("invalid use flag '%s' in conditional '%s'") % \
+ (flag, conditional))
+
+ return flag
+
+ for token in mysplit:
+ if token == "(":
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+ is_single = (len(l) == 1 or (len(l)==2 and (l[0] == "||" or l[0][-1] == "?")))
+
+ def ends_in_any_of_dep(k):
+ return k>=0 and stack[k] and stack[k][-1] == "||"
+
+ def ends_in_operator(k):
+ return k>=0 and stack[k] and (stack[k][-1] == "||" or stack[k][-1][-1] == "?")
+
+ def special_append():
+ """
+ Use extend instead of append if possible. This kills all redundant brackets.
+ """
+ if is_single and (not stack[level] or not stack[level][-1][-1] == "?"):
+ if len(l) == 1 and isinstance(l[0], list):
+ # l = [[...]]
+ stack[level].extend(l[0])
+ else:
+ stack[level].extend(l)
+ else:
+ stack[level].append(l)
+
+ if l:
+ if not ends_in_any_of_dep(level-1) and not ends_in_operator(level):
+ #Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
+ stack[level].extend(l)
+ elif not stack[level]:
+ #An '||' in the level above forces us to keep to brackets.
+ special_append()
+ elif len(l) == 1 and ends_in_any_of_dep(level):
+ #Optimize: || ( A ) -> A
+ stack[level].pop()
+ special_append()
+ elif len(l) == 2 and (l[0] == "||" or l[0][-1] == "?") and stack[level][-1] in (l[0], "||"):
+ #Optimize: || ( || ( ... ) ) -> || ( ... )
+ # foo? ( foo? ( ... ) ) -> foo? ( ... )
+ # || ( foo? ( ... ) ) -> foo? ( ... )
+ stack[level].pop()
+ special_append()
+ if l[0][-1] == "?":
+ affecting_use.add(flag(l[0]))
+ else:
+ if stack[level] and stack[level][-1][-1] == "?":
+ affecting_use.add(flag(stack[level][-1]))
+ special_append()
+ else:
+ if stack[level] and (stack[level][-1] == "||" or stack[level][-1][-1] == "?"):
+ stack[level].pop()
+ else:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ elif token == "||":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ need_bracket = True
+ stack[level].append(token)
+ else:
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+
+ if token[-1] == "?":
+ need_bracket = True
+ stack[level].append(token)
+ elif token == atom:
+ stack[level].append(token)
+
+ if level != 0 or need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+
+ return affecting_use
+
+def extract_unpack_dependencies(src_uri, unpackers):
+ """
+ Return unpack dependencies string for given SRC_URI string.
+
+ @param src_uri: SRC_URI string
+ @type src_uri: String
+ @param unpackers: Dictionary mapping archive suffixes to dependency strings
+ @type unpackers: Dictionary
+ @rtype: String
+ @return: Dependency string specifying packages required to unpack archives.
+ """
+ src_uri = src_uri.split()
+
+ depend = []
+ for i in range(len(src_uri)):
+ if src_uri[i][-1] == "?" or src_uri[i] in ("(", ")"):
+ depend.append(src_uri[i])
+ elif (i+1 < len(src_uri) and src_uri[i+1] == "->") or src_uri[i] == "->":
+ continue
+ else:
+ for suffix in sorted(unpackers, key=lambda x: len(x), reverse=True):
+ suffix = suffix.lower()
+ if src_uri[i].lower().endswith(suffix):
+ depend.append(unpackers[suffix])
+ break
+
+ while True:
+ cleaned_depend = depend[:]
+ for i in range(len(cleaned_depend)):
+ if cleaned_depend[i] is None:
+ continue
+ elif cleaned_depend[i] == "(" and cleaned_depend[i+1] == ")":
+ cleaned_depend[i] = None
+ cleaned_depend[i+1] = None
+ elif cleaned_depend[i][-1] == "?" and cleaned_depend[i+1] == "(" and cleaned_depend[i+2] == ")":
+ cleaned_depend[i] = None
+ cleaned_depend[i+1] = None
+ cleaned_depend[i+2] = None
+ if depend == cleaned_depend:
+ break
+ else:
+ depend = [x for x in cleaned_depend if x is not None]
+
+ return " ".join(depend)
diff --git a/usr/lib/portage/pym/portage/dep/_slot_operator.py b/usr/lib/portage/pym/portage/dep/_slot_operator.py
new file mode 100644
index 0000000..8b67fc5
--- /dev/null
+++ b/usr/lib/portage/pym/portage/dep/_slot_operator.py
@@ -0,0 +1,106 @@
+# Copyright 2012-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+from portage.dep import Atom, paren_enclose, use_reduce
+from portage.eapi import _get_eapi_attrs
+from portage.exception import InvalidData
+from _emerge.Package import Package
+
+def find_built_slot_operator_atoms(pkg):
+ atoms = {}
+ for k in Package._dep_keys:
+ atom_list = list(_find_built_slot_operator(use_reduce(pkg._metadata[k],
+ uselist=pkg.use.enabled, eapi=pkg.eapi,
+ token_class=Atom)))
+ if atom_list:
+ atoms[k] = atom_list
+ return atoms
+
+def _find_built_slot_operator(dep_struct):
+ for x in dep_struct:
+ if isinstance(x, list):
+ for atom in _find_built_slot_operator(x):
+ yield atom
+ elif isinstance(x, Atom) and x.slot_operator_built:
+ yield x
+
+def ignore_built_slot_operator_deps(dep_struct):
+ for i, x in enumerate(dep_struct):
+ if isinstance(x, list):
+ ignore_built_slot_operator_deps(x)
+ elif isinstance(x, Atom) and x.slot_operator_built:
+ # There's no way of knowing here whether the SLOT
+ # part of the slot/sub-slot pair should be kept, so we
+ # ignore both parts.
+ dep_struct[i] = x.without_slot
+
+def evaluate_slot_operator_equal_deps(settings, use, trees):
+
+ metadata = settings.configdict['pkg']
+ eapi = metadata['EAPI']
+ eapi_attrs = _get_eapi_attrs(eapi)
+ running_vardb = trees[trees._running_eroot]["vartree"].dbapi
+ target_vardb = trees[trees._target_eroot]["vartree"].dbapi
+ vardbs = [target_vardb]
+ deps = {}
+ for k in Package._dep_keys:
+ deps[k] = use_reduce(metadata[k],
+ uselist=use, eapi=eapi, token_class=Atom)
+
+ for k in Package._runtime_keys:
+ _eval_deps(deps[k], vardbs)
+
+ if eapi_attrs.hdepend:
+ _eval_deps(deps["HDEPEND"], [running_vardb])
+ _eval_deps(deps["DEPEND"], [target_vardb])
+ else:
+ if running_vardb is not target_vardb:
+ vardbs.append(running_vardb)
+ _eval_deps(deps["DEPEND"], vardbs)
+
+ result = {}
+ for k, v in deps.items():
+ result[k] = paren_enclose(v)
+
+ return result
+
+def _eval_deps(dep_struct, vardbs):
+ # TODO: we'd use a better || () handling, i.e. || ( A:= B:= ) with both A
+ # and B installed should record subslot on A only since the package is
+ # supposed to link against that anyway, and we have no guarantee that B
+ # has matching ABI.
+
+ for i, x in enumerate(dep_struct):
+ if isinstance(x, list):
+ _eval_deps(x, vardbs)
+ elif isinstance(x, Atom) and x.slot_operator == "=":
+ for vardb in vardbs:
+ best_version = vardb.match(x)
+ if best_version:
+ best_version = best_version[-1]
+ try:
+ best_version = \
+ vardb._pkg_str(best_version, None)
+ except (KeyError, InvalidData):
+ pass
+ else:
+ slot_part = "%s/%s=" % \
+ (best_version.slot, best_version.sub_slot)
+ x = x.with_slot(slot_part)
+ dep_struct[i] = x
+ break
+ else:
+ # this dep could not be resolved, possibilities include:
+ # 1. unsatisfied branch of || () dep,
+ # 2. package.provided,
+ # 3. --nodeps.
+ #
+ # just leave it as-is for now. this does not cause any special
+ # behavior while keeping the information in vdb -- necessary
+ # e.g. for @changed-deps to work properly.
+ #
+ # TODO: make it actually cause subslot rebuilds when switching
+ # || () branches.
+ pass
diff --git a/usr/lib/portage/pym/portage/dep/dep_check.py b/usr/lib/portage/pym/portage/dep/dep_check.py
new file mode 100644
index 0000000..4386b5e
--- /dev/null
+++ b/usr/lib/portage/pym/portage/dep/dep_check.py
@@ -0,0 +1,711 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ['dep_check', 'dep_eval', 'dep_wordreduce', 'dep_zapdeps']
+
+import logging
+import operator
+
+import portage
+from portage.dep import Atom, match_from_list, use_reduce
+from portage.exception import InvalidDependString, ParseError
+from portage.localization import _
+from portage.util import writemsg, writemsg_level
+from portage.util.SlotObject import SlotObject
+from portage.versions import vercmp, _pkg_str
+
+def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
+ trees=None, use_mask=None, use_force=None, **kwargs):
+ """
+ In order to solve bug #141118, recursively expand new-style virtuals so
+ as to collapse one or more levels of indirection, generating an expanded
+ search space. In dep_zapdeps, new-style virtuals will be assigned
+ zero cost regardless of whether or not they are currently installed. Virtual
+ blockers are supported but only when the virtual expands to a single
+ atom because it wouldn't necessarily make sense to block all the components
+ of a compound virtual. When more than one new-style virtual is matched,
+ the matches are sorted from highest to lowest versions and the atom is
+ expanded to || ( highest match ... lowest match )."""
+ newsplit = []
+ mytrees = trees[myroot]
+ portdb = mytrees["porttree"].dbapi
+ pkg_use_enabled = mytrees.get("pkg_use_enabled")
+ # Atoms are stored in the graph as (atom, id(atom)) tuples
+ # since each atom is considered to be a unique entity. For
+ # example, atoms that appear identical may behave differently
+ # in USE matching, depending on their unevaluated form. Also,
+ # specially generated virtual atoms may appear identical while
+ # having different _orig_atom attributes.
+ atom_graph = mytrees.get("atom_graph")
+ parent = mytrees.get("parent")
+ virt_parent = mytrees.get("virt_parent")
+ graph_parent = None
+ if parent is not None:
+ if virt_parent is not None:
+ graph_parent = virt_parent
+ parent = virt_parent
+ else:
+ graph_parent = parent
+ repoman = not mysettings.local_config
+ if kwargs["use_binaries"]:
+ portdb = trees[myroot]["bintree"].dbapi
+ pprovideddict = mysettings.pprovideddict
+ myuse = kwargs["myuse"]
+ for x in mysplit:
+ if x == "||":
+ newsplit.append(x)
+ continue
+ elif isinstance(x, list):
+ newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
+ mysettings, myroot=myroot, trees=trees, use_mask=use_mask,
+ use_force=use_force, **kwargs))
+ continue
+
+ if not isinstance(x, Atom):
+ raise ParseError(
+ _("invalid token: '%s'") % x)
+
+ if repoman:
+ x = x._eval_qa_conditionals(use_mask, use_force)
+
+ mykey = x.cp
+ if not mykey.startswith("virtual/"):
+ newsplit.append(x)
+ if atom_graph is not None:
+ atom_graph.add((x, id(x)), graph_parent)
+ continue
+
+ if x.blocker:
+ # Virtual blockers are no longer expanded here since
+ # the un-expanded virtual atom is more useful for
+ # maintaining a cache of blocker atoms.
+ newsplit.append(x)
+ if atom_graph is not None:
+ atom_graph.add((x, id(x)), graph_parent)
+ continue
+
+ if repoman or not hasattr(portdb, 'match_pkgs') or \
+ pkg_use_enabled is None:
+ if portdb.cp_list(x.cp):
+ newsplit.append(x)
+ else:
+ a = []
+ myvartree = mytrees.get("vartree")
+ if myvartree is not None:
+ mysettings._populate_treeVirtuals_if_needed(myvartree)
+ mychoices = mysettings.getvirtuals().get(mykey, [])
+ for y in mychoices:
+ a.append(Atom(x.replace(x.cp, y.cp, 1)))
+ if not a:
+ newsplit.append(x)
+ elif len(a) == 1:
+ newsplit.append(a[0])
+ else:
+ newsplit.append(['||'] + a)
+ continue
+
+ pkgs = []
+ # Ignore USE deps here, since otherwise we might not
+ # get any matches. Choices with correct USE settings
+ # will be preferred in dep_zapdeps().
+ matches = portdb.match_pkgs(x.without_use)
+ # Use descending order to prefer higher versions.
+ matches.reverse()
+ for pkg in matches:
+ # only use new-style matches
+ if pkg.cp.startswith("virtual/"):
+ pkgs.append(pkg)
+
+ mychoices = []
+ if not pkgs and not portdb.cp_list(x.cp):
+ myvartree = mytrees.get("vartree")
+ if myvartree is not None:
+ mysettings._populate_treeVirtuals_if_needed(myvartree)
+ mychoices = mysettings.getvirtuals().get(mykey, [])
+
+ if not (pkgs or mychoices):
+ # This one couldn't be expanded as a new-style virtual. Old-style
+ # virtuals have already been expanded by dep_virtual, so this one
+ # is unavailable and dep_zapdeps will identify it as such. The
+ # atom is not eliminated here since it may still represent a
+ # dependency that needs to be satisfied.
+ newsplit.append(x)
+ if atom_graph is not None:
+ atom_graph.add((x, id(x)), graph_parent)
+ continue
+
+ a = []
+ for pkg in pkgs:
+ virt_atom = '=' + pkg.cpv
+ if x.unevaluated_atom.use:
+ virt_atom += str(x.unevaluated_atom.use)
+ virt_atom = Atom(virt_atom)
+ if parent is None:
+ if myuse is None:
+ virt_atom = virt_atom.evaluate_conditionals(
+ mysettings.get("PORTAGE_USE", "").split())
+ else:
+ virt_atom = virt_atom.evaluate_conditionals(myuse)
+ else:
+ virt_atom = virt_atom.evaluate_conditionals(
+ pkg_use_enabled(parent))
+ else:
+ virt_atom = Atom(virt_atom)
+
+ # Allow the depgraph to map this atom back to the
+ # original, in order to avoid distortion in places
+ # like display or conflict resolution code.
+ virt_atom.__dict__['_orig_atom'] = x
+
+ # According to GLEP 37, RDEPEND is the only dependency
+ # type that is valid for new-style virtuals. Repoman
+ # should enforce this.
+ depstring = pkg._metadata['RDEPEND']
+ pkg_kwargs = kwargs.copy()
+ pkg_kwargs["myuse"] = pkg_use_enabled(pkg)
+ if edebug:
+ writemsg_level(_("Virtual Parent: %s\n") \
+ % (pkg,), noiselevel=-1, level=logging.DEBUG)
+ writemsg_level(_("Virtual Depstring: %s\n") \
+ % (depstring,), noiselevel=-1, level=logging.DEBUG)
+
+ # Set EAPI used for validation in dep_check() recursion.
+ mytrees["virt_parent"] = pkg
+
+ try:
+ mycheck = dep_check(depstring, mydbapi, mysettings,
+ myroot=myroot, trees=trees, **pkg_kwargs)
+ finally:
+ # Restore previous EAPI after recursion.
+ if virt_parent is not None:
+ mytrees["virt_parent"] = virt_parent
+ else:
+ del mytrees["virt_parent"]
+
+ if not mycheck[0]:
+ raise ParseError("%s: %s '%s'" % \
+ (pkg, mycheck[1], depstring))
+
+ # pull in the new-style virtual
+ mycheck[1].append(virt_atom)
+ a.append(mycheck[1])
+ if atom_graph is not None:
+ virt_atom_node = (virt_atom, id(virt_atom))
+ atom_graph.add(virt_atom_node, graph_parent)
+ atom_graph.add(pkg, virt_atom_node)
+
+ if not a and mychoices:
+ # Check for a virtual package.provided match.
+ for y in mychoices:
+ new_atom = Atom(x.replace(x.cp, y.cp, 1))
+ if match_from_list(new_atom,
+ pprovideddict.get(new_atom.cp, [])):
+ a.append(new_atom)
+ if atom_graph is not None:
+ atom_graph.add((new_atom, id(new_atom)), graph_parent)
+
+ if not a:
+ newsplit.append(x)
+ if atom_graph is not None:
+ atom_graph.add((x, id(x)), graph_parent)
+ elif len(a) == 1:
+ newsplit.append(a[0])
+ else:
+ newsplit.append(['||'] + a)
+
+ return newsplit
+
+def dep_eval(deplist):
+ if not deplist:
+ return 1
+ if deplist[0]=="||":
+ #or list; we just need one "1"
+ for x in deplist[1:]:
+ if isinstance(x, list):
+ if dep_eval(x)==1:
+ return 1
+ elif x==1:
+ return 1
+ #XXX: unless there's no available atoms in the list
+ #in which case we need to assume that everything is
+ #okay as some ebuilds are relying on an old bug.
+ if len(deplist) == 1:
+ return 1
+ return 0
+ else:
+ for x in deplist:
+ if isinstance(x, list):
+ if dep_eval(x)==0:
+ return 0
+ elif x==0 or x==2:
+ return 0
+ return 1
+
+class _dep_choice(SlotObject):
+ __slots__ = ('atoms', 'slot_map', 'cp_map', 'all_available',
+ 'all_installed_slots')
+
+def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
+ """
+ Takes an unreduced and reduced deplist and removes satisfied dependencies.
+ Returned deplist contains steps that must be taken to satisfy dependencies.
+ """
+ if trees is None:
+ trees = portage.db
+ writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
+ if not reduced or unreduced == ["||"] or dep_eval(reduced):
+ return []
+
+ if unreduced[0] != "||":
+ unresolved = []
+ for x, satisfied in zip(unreduced, reduced):
+ if isinstance(x, list):
+ unresolved += dep_zapdeps(x, satisfied, myroot,
+ use_binaries=use_binaries, trees=trees)
+ elif not satisfied:
+ unresolved.append(x)
+ return unresolved
+
+ # We're at a ( || atom ... ) type level and need to make a choice
+ deps = unreduced[1:]
+ satisfieds = reduced[1:]
+
+ # Our preference order is for an the first item that:
+ # a) contains all unmasked packages with the same key as installed packages
+ # b) contains all unmasked packages
+ # c) contains masked installed packages
+ # d) is the first item
+
+ preferred_installed = []
+ preferred_in_graph = []
+ preferred_any_slot = []
+ preferred_non_installed = []
+ unsat_use_in_graph = []
+ unsat_use_installed = []
+ unsat_use_non_installed = []
+ other_installed = []
+ other_installed_some = []
+ other_installed_any_slot = []
+ other = []
+
+ # unsat_use_* must come after preferred_non_installed
+ # for correct ordering in cases like || ( foo[a] foo[b] ).
+ choice_bins = (
+ preferred_in_graph,
+ preferred_installed,
+ preferred_any_slot,
+ preferred_non_installed,
+ unsat_use_in_graph,
+ unsat_use_installed,
+ unsat_use_non_installed,
+ other_installed,
+ other_installed_some,
+ other_installed_any_slot,
+ other,
+ )
+
+ # Alias the trees we'll be checking availability against
+ parent = trees[myroot].get("parent")
+ priority = trees[myroot].get("priority")
+ graph_db = trees[myroot].get("graph_db")
+ graph = trees[myroot].get("graph")
+ want_update_pkg = trees[myroot].get("want_update_pkg")
+ vardb = None
+ if "vartree" in trees[myroot]:
+ vardb = trees[myroot]["vartree"].dbapi
+ if use_binaries:
+ mydbapi = trees[myroot]["bintree"].dbapi
+ else:
+ mydbapi = trees[myroot]["porttree"].dbapi
+
+ try:
+ mydbapi_match_pkgs = mydbapi.match_pkgs
+ except AttributeError:
+ def mydbapi_match_pkgs(atom):
+ return [mydbapi._pkg_str(cpv, atom.repo)
+ for cpv in mydbapi.match(atom)]
+
+ # Sort the deps into installed, not installed but already
+ # in the graph and other, not installed and not in the graph
+ # and other, with values of [[required_atom], availablility]
+ for x, satisfied in zip(deps, satisfieds):
+ if isinstance(x, list):
+ atoms = dep_zapdeps(x, satisfied, myroot,
+ use_binaries=use_binaries, trees=trees)
+ else:
+ atoms = [x]
+ if vardb is None:
+ # When called by repoman, we can simply return the first choice
+ # because dep_eval() handles preference selection.
+ return atoms
+
+ all_available = True
+ all_use_satisfied = True
+ slot_map = {}
+ cp_map = {}
+ for atom in atoms:
+ if atom.blocker:
+ continue
+ # Ignore USE dependencies here since we don't want USE
+ # settings to adversely affect || preference evaluation.
+ avail_pkg = mydbapi_match_pkgs(atom.without_use)
+ if avail_pkg:
+ avail_pkg = avail_pkg[-1] # highest (ascending order)
+ avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot))
+ if not avail_pkg:
+ all_available = False
+ all_use_satisfied = False
+ break
+
+ if atom.use:
+ avail_pkg_use = mydbapi_match_pkgs(atom)
+ if not avail_pkg_use:
+ all_use_satisfied = False
+ else:
+ # highest (ascending order)
+ avail_pkg_use = avail_pkg_use[-1]
+ if avail_pkg_use != avail_pkg:
+ avail_pkg = avail_pkg_use
+ avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot))
+
+ slot_map[avail_slot] = avail_pkg
+ highest_cpv = cp_map.get(avail_pkg.cp)
+ if highest_cpv is None or \
+ vercmp(avail_pkg.version, highest_cpv.version) > 0:
+ cp_map[avail_pkg.cp] = avail_pkg
+
+ this_choice = _dep_choice(atoms=atoms, slot_map=slot_map,
+ cp_map=cp_map, all_available=all_available,
+ all_installed_slots=False)
+ if all_available:
+ # The "all installed" criterion is not version or slot specific.
+ # If any version of a package is already in the graph then we
+ # assume that it is preferred over other possible packages choices.
+ all_installed = True
+ for atom in set(Atom(atom.cp) for atom in atoms \
+ if not atom.blocker):
+ # New-style virtuals have zero cost to install.
+ if not vardb.match(atom) and not atom.startswith("virtual/"):
+ all_installed = False
+ break
+ all_installed_slots = False
+ if all_installed:
+ all_installed_slots = True
+ for slot_atom in slot_map:
+ # New-style virtuals have zero cost to install.
+ if not vardb.match(slot_atom) and \
+ not slot_atom.startswith("virtual/"):
+ all_installed_slots = False
+ break
+ this_choice.all_installed_slots = all_installed_slots
+ if graph_db is None:
+ if all_use_satisfied:
+ if all_installed:
+ if all_installed_slots:
+ preferred_installed.append(this_choice)
+ else:
+ preferred_any_slot.append(this_choice)
+ else:
+ preferred_non_installed.append(this_choice)
+ else:
+ if all_installed_slots:
+ unsat_use_installed.append(this_choice)
+ else:
+ unsat_use_non_installed.append(this_choice)
+ else:
+ all_in_graph = True
+ for atom in atoms:
+ # New-style virtuals have zero cost to install.
+ if atom.blocker or atom.cp.startswith("virtual/"):
+ continue
+ # We check if the matched package has actually been
+ # added to the digraph, in order to distinguish between
+ # those packages and installed packages that may need
+ # to be uninstalled in order to resolve blockers.
+ if not any(pkg in graph for pkg in
+ graph_db.match_pkgs(atom)):
+ all_in_graph = False
+ break
+ circular_atom = None
+ if all_in_graph:
+ if parent is None or priority is None:
+ pass
+ elif priority.buildtime and \
+ not (priority.satisfied or priority.optional):
+ # Check if the atom would result in a direct circular
+ # dependency and try to avoid that if it seems likely
+ # to be unresolvable. This is only relevant for
+ # buildtime deps that aren't already satisfied by an
+ # installed package.
+ cpv_slot_list = [parent]
+ for atom in atoms:
+ if atom.blocker:
+ continue
+ if vardb.match(atom):
+ # If the atom is satisfied by an installed
+ # version then it's not a circular dep.
+ continue
+ if atom.cp != parent.cp:
+ continue
+ if match_from_list(atom, cpv_slot_list):
+ circular_atom = atom
+ break
+ if circular_atom is not None:
+ other.append(this_choice)
+ else:
+ if all_use_satisfied:
+ if all_in_graph:
+ preferred_in_graph.append(this_choice)
+ elif all_installed:
+ if all_installed_slots:
+ preferred_installed.append(this_choice)
+ elif parent is None or want_update_pkg is None:
+ preferred_any_slot.append(this_choice)
+ else:
+ # When appropriate, prefer a slot that is not
+ # installed yet for bug #478188.
+ want_update = True
+ for slot_atom, avail_pkg in slot_map.items():
+ if avail_pkg in graph:
+ continue
+ # New-style virtuals have zero cost to install.
+ if slot_atom.startswith("virtual/") or \
+ vardb.match(slot_atom):
+ continue
+ if not want_update_pkg(parent, avail_pkg):
+ want_update = False
+ break
+
+ if want_update:
+ preferred_installed.append(this_choice)
+ else:
+ preferred_any_slot.append(this_choice)
+ else:
+ preferred_non_installed.append(this_choice)
+ else:
+ if all_in_graph:
+ unsat_use_in_graph.append(this_choice)
+ elif all_installed_slots:
+ unsat_use_installed.append(this_choice)
+ else:
+ unsat_use_non_installed.append(this_choice)
+ else:
+ all_installed = True
+ some_installed = False
+ for atom in atoms:
+ if not atom.blocker:
+ if vardb.match(atom):
+ some_installed = True
+ else:
+ all_installed = False
+
+ if all_installed:
+ this_choice.all_installed_slots = True
+ other_installed.append(this_choice)
+ elif some_installed:
+ other_installed_some.append(this_choice)
+
+ # Use Atom(atom.cp) for a somewhat "fuzzy" match, since
+ # the whole atom may be too specific. For example, see
+ # bug #522652, where using the whole atom leads to an
+ # unsatisfiable choice.
+ elif any(vardb.match(Atom(atom.cp)) for atom in atoms
+ if not atom.blocker):
+ other_installed_any_slot.append(this_choice)
+ else:
+ other.append(this_choice)
+
+ # Prefer choices which contain upgrades to higher slots. This helps
+ # for deps such as || ( foo:1 foo:2 ), where we want to prefer the
+ # atom which matches the higher version rather than the atom furthest
+ # to the left. Sorting is done separately for each of choice_bins, so
+ # as not to interfere with the ordering of the bins. Because of the
+ # bin separation, the main function of this code is to allow
+ # --depclean to remove old slots (rather than to pull in new slots).
+ for choices in choice_bins:
+ if len(choices) < 2:
+ continue
+ # Prefer choices with all_installed_slots for bug #480736.
+ choices.sort(key=operator.attrgetter('all_installed_slots'),
+ reverse=True)
+ for choice_1 in choices[1:]:
+ cps = set(choice_1.cp_map)
+ for choice_2 in choices:
+ if choice_1 is choice_2:
+ # choice_1 will not be promoted, so move on
+ break
+ intersecting_cps = cps.intersection(choice_2.cp_map)
+ if not intersecting_cps:
+ continue
+ has_upgrade = False
+ has_downgrade = False
+ for cp in intersecting_cps:
+ version_1 = choice_1.cp_map[cp]
+ version_2 = choice_2.cp_map[cp]
+ difference = vercmp(version_1.version, version_2.version)
+ if difference != 0:
+ if difference > 0:
+ has_upgrade = True
+ else:
+ has_downgrade = True
+ break
+ if has_upgrade and not has_downgrade:
+ # promote choice_1 in front of choice_2
+ choices.remove(choice_1)
+ index_2 = choices.index(choice_2)
+ choices.insert(index_2, choice_1)
+ break
+
+ for allow_masked in (False, True):
+ for choices in choice_bins:
+ for choice in choices:
+ if choice.all_available or allow_masked:
+ return choice.atoms
+
+ assert(False) # This point should not be reachable
+
+def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
+ use_cache=1, use_binaries=0, myroot=None, trees=None):
+ """
+ Takes a depend string, parses it, and selects atoms.
+ The myroot parameter is unused (use mysettings['EROOT'] instead).
+ """
+ myroot = mysettings['EROOT']
+ edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
+ #check_config_instance(mysettings)
+ if trees is None:
+ trees = globals()["db"]
+ if use=="yes":
+ if myuse is None:
+ #default behavior
+ myusesplit = mysettings["PORTAGE_USE"].split()
+ else:
+ myusesplit = myuse
+ # We've been given useflags to use.
+ #print "USE FLAGS PASSED IN."
+ #print myuse
+ #if "bindist" in myusesplit:
+ # print "BINDIST is set!"
+ #else:
+ # print "BINDIST NOT set."
+ else:
+ #we are being run by autouse(), don't consult USE vars yet.
+ # WE ALSO CANNOT USE SETTINGS
+ myusesplit=[]
+
+ mymasks = set()
+ useforce = set()
+ if use == "all":
+ # This is only for repoman, in order to constrain the use_reduce
+ # matchall behavior to account for profile use.mask/force. The
+ # ARCH/archlist code here may be redundant, since the profile
+ # really should be handling ARCH masking/forcing itself.
+ mymasks.update(mysettings.usemask)
+ mymasks.update(mysettings.archlist())
+ mymasks.discard(mysettings["ARCH"])
+ useforce.add(mysettings["ARCH"])
+ useforce.update(mysettings.useforce)
+ useforce.difference_update(mymasks)
+
+ # eapi code borrowed from _expand_new_virtuals()
+ mytrees = trees[myroot]
+ parent = mytrees.get("parent")
+ virt_parent = mytrees.get("virt_parent")
+ current_parent = None
+ eapi = None
+ if parent is not None:
+ if virt_parent is not None:
+ current_parent = virt_parent
+ else:
+ current_parent = parent
+
+ if current_parent is not None:
+ # Don't pass the eapi argument to use_reduce() for installed packages
+ # since previous validation will have already marked them as invalid
+ # when necessary and now we're more interested in evaluating
+ # dependencies so that things like --depclean work as well as possible
+ # in spite of partial invalidity.
+ if not current_parent.installed:
+ eapi = current_parent.eapi
+
+ if isinstance(depstring, list):
+ mysplit = depstring
+ else:
+ try:
+ mysplit = use_reduce(depstring, uselist=myusesplit,
+ masklist=mymasks, matchall=(use=="all"), excludeall=useforce,
+ opconvert=True, token_class=Atom, eapi=eapi)
+ except InvalidDependString as e:
+ return [0, "%s" % (e,)]
+
+ if mysplit == []:
+ #dependencies were reduced to nothing
+ return [1,[]]
+
+ # Recursively expand new-style virtuals so as to
+ # collapse one or more levels of indirection.
+ try:
+ mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
+ use=use, mode=mode, myuse=myuse,
+ use_force=useforce, use_mask=mymasks, use_cache=use_cache,
+ use_binaries=use_binaries, myroot=myroot, trees=trees)
+ except ParseError as e:
+ return [0, "%s" % (e,)]
+
+ mysplit2 = dep_wordreduce(mysplit,
+ mysettings, mydbapi, mode, use_cache=use_cache)
+ if mysplit2 is None:
+ return [0, _("Invalid token")]
+
+ writemsg("\n\n\n", 1)
+ writemsg("mysplit: %s\n" % (mysplit), 1)
+ writemsg("mysplit2: %s\n" % (mysplit2), 1)
+
+ selected_atoms = dep_zapdeps(mysplit, mysplit2, myroot,
+ use_binaries=use_binaries, trees=trees)
+
+ return [1, selected_atoms]
+
+def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
+ "Reduces the deplist to ones and zeros"
+ deplist=mydeplist[:]
+ for mypos, token in enumerate(deplist):
+ if isinstance(deplist[mypos], list):
+ #recurse
+ deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
+ elif deplist[mypos]=="||":
+ pass
+ elif token[:1] == "!":
+ deplist[mypos] = False
+ else:
+ mykey = deplist[mypos].cp
+ if mysettings and mykey in mysettings.pprovideddict and \
+ match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
+ deplist[mypos]=True
+ elif mydbapi is None:
+ # Assume nothing is satisfied. This forces dep_zapdeps to
+ # return all of deps the deps that have been selected
+ # (excluding those satisfied by package.provided).
+ deplist[mypos] = False
+ else:
+ if mode:
+ x = mydbapi.xmatch(mode, deplist[mypos])
+ if mode.startswith("minimum-"):
+ mydep = []
+ if x:
+ mydep.append(x)
+ else:
+ mydep = x
+ else:
+ mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
+ if mydep!=None:
+ tmp=(len(mydep)>=1)
+ if deplist[mypos][0]=="!":
+ tmp=False
+ deplist[mypos]=tmp
+ else:
+ #encountered invalid string
+ return None
+ return deplist
diff --git a/usr/lib/portage/pym/portage/dispatch_conf.py b/usr/lib/portage/pym/portage/dispatch_conf.py
new file mode 100644
index 0000000..bf6c2ca
--- /dev/null
+++ b/usr/lib/portage/pym/portage/dispatch_conf.py
@@ -0,0 +1,212 @@
+# archive_conf.py -- functionality common to archive-conf and dispatch-conf
+# Copyright 2003-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+# Library by Wayne Davison <gentoo@blorf.net>, derived from code
+# written by Jeremy Wohl (http://igmus.org)
+
+from __future__ import print_function
+
+import os, shutil, subprocess, sys
+
+import portage
+from portage.env.loaders import KeyValuePairFileLoader
+from portage.localization import _
+from portage.util import shlex_split, varexpand
+from portage.const import EPREFIX
+
+RCS_BRANCH = '1.1.1'
+RCS_LOCK = 'rcs -ko -M -l'
+RCS_PUT = 'ci -t-"Archived config file." -m"dispatch-conf update."'
+RCS_GET = 'co'
+RCS_MERGE = "rcsmerge -p -r" + RCS_BRANCH + " '%s' > '%s'"
+
+DIFF3_MERGE = "diff3 -mE '%s' '%s' '%s' > '%s'"
+
+def diffstatusoutput(cmd, file1, file2):
+ """
+ Execute the string cmd in a shell with getstatusoutput() and return a
+ 2-tuple (status, output).
+ """
+ # Use Popen to emulate getstatusoutput(), since getstatusoutput() may
+ # raise a UnicodeDecodeError which makes the output inaccessible.
+ args = shlex_split(cmd % (file1, file2))
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
+ not os.path.isabs(args[0]):
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(args[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(args[0])
+ args[0] = fullname
+
+ args = [portage._unicode_encode(x, errors='strict') for x in args]
+ proc = subprocess.Popen(args,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ output = portage._unicode_decode(proc.communicate()[0])
+ if output and output[-1] == "\n":
+ # getstatusoutput strips one newline
+ output = output[:-1]
+ return (proc.wait(), output)
+
+def read_config(mandatory_opts):
+ eprefix = portage.settings["EPREFIX"]
+ if portage._not_installed:
+ config_path = os.path.join(portage.PORTAGE_BASE_PATH, "cnf", "dispatch-conf.conf")
+ else:
+ config_path = os.path.join(eprefix or os.sep, "etc/dispatch-conf.conf")
+ loader = KeyValuePairFileLoader(config_path, None)
+ opts, _errors = loader.load()
+ if not opts:
+ print(_('dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'), file=sys.stderr)
+ sys.exit(1)
+
+ # Handle quote removal here, since KeyValuePairFileLoader doesn't do that.
+ quotes = "\"'"
+ for k, v in opts.items():
+ if v[:1] in quotes and v[:1] == v[-1:]:
+ opts[k] = v[1:-1]
+
+ for key in mandatory_opts:
+ if key not in opts:
+ if key == "merge":
+ opts["merge"] = "sdiff --suppress-common-lines --output='%s' '%s' '%s'"
+ else:
+ print(_('dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal') % (key,), file=sys.stderr)
+
+ # archive-dir supports ${EPREFIX} expansion, in order to avoid hardcoding
+ variables = {"EPREFIX": eprefix}
+ opts['archive-dir'] = varexpand(opts['archive-dir'], mydict=variables)
+
+ if not os.path.exists(opts['archive-dir']):
+ os.mkdir(opts['archive-dir'])
+ # Use restrictive permissions by default, in order to protect
+ # against vulnerabilities (like bug #315603 involving rcs).
+ os.chmod(opts['archive-dir'], 0o700)
+ elif not os.path.isdir(opts['archive-dir']):
+ print(_('dispatch-conf: Config archive dir [%s] must exist; fatal') % (opts['archive-dir'],), file=sys.stderr)
+ sys.exit(1)
+
+ return opts
+
+
+def rcs_archive(archive, curconf, newconf, mrgconf):
+ """Archive existing config in rcs (on trunk). Then, if mrgconf is
+ specified and an old branch version exists, merge the user's changes
+ and the distributed changes and put the result into mrgconf. Lastly,
+ if newconf was specified, leave it in the archive dir with a .dist.new
+ suffix along with the last 1.1.1 branch version with a .dist suffix."""
+
+ try:
+ os.makedirs(os.path.dirname(archive))
+ except OSError:
+ pass
+
+ if os.path.isfile(curconf):
+ try:
+ shutil.copy2(curconf, archive)
+ except(IOError, os.error) as why:
+ print(_('dispatch-conf: Error copying %(curconf)s to %(archive)s: %(reason)s; fatal') % \
+ {"curconf": curconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
+
+ if os.path.exists(archive + ',v'):
+ os.system(RCS_LOCK + ' ' + archive)
+ os.system(RCS_PUT + ' ' + archive)
+
+ ret = 0
+ if newconf != '':
+ os.system(RCS_GET + ' -r' + RCS_BRANCH + ' ' + archive)
+ has_branch = os.path.exists(archive)
+ if has_branch:
+ os.rename(archive, archive + '.dist')
+
+ try:
+ shutil.copy2(newconf, archive)
+ except(IOError, os.error) as why:
+ print(_('dispatch-conf: Error copying %(newconf)s to %(archive)s: %(reason)s; fatal') % \
+ {"newconf": newconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
+
+ if has_branch:
+ if mrgconf != '':
+ # This puts the results of the merge into mrgconf.
+ ret = os.system(RCS_MERGE % (archive, mrgconf))
+ mystat = os.lstat(newconf)
+ os.chmod(mrgconf, mystat.st_mode)
+ os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
+ os.rename(archive, archive + '.dist.new')
+
+ return ret
+
+
+def file_archive(archive, curconf, newconf, mrgconf):
+ """Archive existing config to the archive-dir, bumping old versions
+ out of the way into .# versions (log-rotate style). Then, if mrgconf
+ was specified and there is a .dist version, merge the user's changes
+ and the distributed changes and put the result into mrgconf. Lastly,
+ if newconf was specified, archive it as a .dist.new version (which
+ gets moved to the .dist version at the end of the processing)."""
+
+ try:
+ os.makedirs(os.path.dirname(archive))
+ except OSError:
+ pass
+
+ # Archive the current config file if it isn't already saved
+ if (os.path.exists(archive) and
+ len(diffstatusoutput("diff -aq '%s' '%s'", curconf, archive)[1]) != 0):
+ suf = 1
+ while suf < 9 and os.path.exists(archive + '.' + str(suf)):
+ suf += 1
+
+ while suf > 1:
+ os.rename(archive + '.' + str(suf-1), archive + '.' + str(suf))
+ suf -= 1
+
+ os.rename(archive, archive + '.1')
+
+ if os.path.isfile(curconf):
+ try:
+ shutil.copy2(curconf, archive)
+ except(IOError, os.error) as why:
+ print(_('dispatch-conf: Error copying %(curconf)s to %(archive)s: %(reason)s; fatal') % \
+ {"curconf": curconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
+
+ if newconf != '':
+ # Save off new config file in the archive dir with .dist.new suffix
+ try:
+ shutil.copy2(newconf, archive + '.dist.new')
+ except(IOError, os.error) as why:
+ print(_('dispatch-conf: Error copying %(newconf)s to %(archive)s: %(reason)s; fatal') % \
+ {"newconf": newconf, "archive": archive + '.dist.new', "reason": str(why)}, file=sys.stderr)
+
+ ret = 0
+ if mrgconf != '' and os.path.exists(archive + '.dist'):
+ # This puts the results of the merge into mrgconf.
+ ret = os.system(DIFF3_MERGE % (curconf, archive + '.dist', newconf, mrgconf))
+ mystat = os.lstat(newconf)
+ os.chmod(mrgconf, mystat.st_mode)
+ os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
+
+ return ret
+
+
+def rcs_archive_post_process(archive):
+ """Check in the archive file with the .dist.new suffix on the branch
+ and remove the one with the .dist suffix."""
+ os.rename(archive + '.dist.new', archive)
+ if os.path.exists(archive + '.dist'):
+ # Commit the last-distributed version onto the branch.
+ os.system(RCS_LOCK + RCS_BRANCH + ' ' + archive)
+ os.system(RCS_PUT + ' -r' + RCS_BRANCH + ' ' + archive)
+ os.unlink(archive + '.dist')
+ else:
+ # Forcefully commit the last-distributed version onto the branch.
+ os.system(RCS_PUT + ' -f -r' + RCS_BRANCH + ' ' + archive)
+
+
+def file_archive_post_process(archive):
+ """Rename the archive file with the .dist.new suffix to a .dist suffix"""
+ if os.path.exists(archive + '.dist.new'):
+ os.rename(archive + '.dist.new', archive + '.dist')
diff --git a/usr/lib/portage/pym/portage/eapi.py b/usr/lib/portage/pym/portage/eapi.py
new file mode 100644
index 0000000..4f77910
--- /dev/null
+++ b/usr/lib/portage/pym/portage/eapi.py
@@ -0,0 +1,144 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import collections
+
+from portage import eapi_is_supported
+
+def eapi_has_iuse_defaults(eapi):
+ return eapi != "0"
+
+def eapi_has_iuse_effective(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+
+def eapi_has_slot_deps(eapi):
+ return eapi != "0"
+
+def eapi_has_slot_operator(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python")
+
+def eapi_has_src_uri_arrows(eapi):
+ return eapi not in ("0", "1")
+
+def eapi_has_use_deps(eapi):
+ return eapi not in ("0", "1")
+
+def eapi_has_strong_blocks(eapi):
+ return eapi not in ("0", "1")
+
+def eapi_has_src_prepare_and_src_configure(eapi):
+ return eapi not in ("0", "1")
+
+def eapi_supports_prefix(eapi):
+ return eapi not in ("0", "1", "2")
+
+def eapi_exports_AA(eapi):
+ return eapi in ("0", "1", "2", "3")
+
+def eapi_exports_KV(eapi):
+ return eapi in ("0", "1", "2", "3")
+
+def eapi_exports_merge_type(eapi):
+ return eapi not in ("0", "1", "2", "3")
+
+def eapi_exports_replace_vars(eapi):
+ return eapi not in ("0", "1", "2", "3")
+
+def eapi_exports_EBUILD_PHASE_FUNC(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+
+def eapi_exports_REPOSITORY(eapi):
+ return eapi in ("4-python", "5-progress")
+
+def eapi_has_pkg_pretend(eapi):
+ return eapi not in ("0", "1", "2", "3")
+
+def eapi_has_implicit_rdepend(eapi):
+ return eapi in ("0", "1", "2", "3")
+
+def eapi_has_dosed_dohard(eapi):
+ return eapi in ("0", "1", "2", "3")
+
+def eapi_has_required_use(eapi):
+ return eapi not in ("0", "1", "2", "3")
+
+def eapi_has_required_use_at_most_one_of(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+
+def eapi_has_use_dep_defaults(eapi):
+ return eapi not in ("0", "1", "2", "3")
+
+def eapi_has_repo_deps(eapi):
+ return eapi in ("4-python", "5-progress")
+
+def eapi_allows_dots_in_PN(eapi):
+ return eapi in ("4-python", "5-progress")
+
+def eapi_allows_dots_in_use_flags(eapi):
+ return eapi in ("4-python", "5-progress")
+
+def eapi_supports_stable_use_forcing_and_masking(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+
+def eapi_allows_directories_on_profile_level_and_repository_level(eapi):
+ return eapi in ("4-python", "5-progress")
+
+def eapi_has_use_aliases(eapi):
+ return eapi in ("4-python", "5-progress")
+
+def eapi_has_automatic_unpack_dependencies(eapi):
+ return eapi in ("5-progress",)
+
+def eapi_has_hdepend(eapi):
+ return eapi in ("5-hdepend",)
+
+def eapi_has_targetroot(eapi):
+ return eapi in ("5-hdepend",)
+
+_eapi_attrs = collections.namedtuple('_eapi_attrs',
+ 'dots_in_PN dots_in_use_flags exports_EBUILD_PHASE_FUNC '
+ 'feature_flag_test feature_flag_targetroot '
+ 'hdepend iuse_defaults iuse_effective '
+ 'repo_deps required_use required_use_at_most_one_of slot_operator slot_deps '
+ 'src_uri_arrows strong_blocks use_deps use_dep_defaults')
+
+_eapi_attrs_cache = {}
+
+def _get_eapi_attrs(eapi):
+ """
+ When eapi is None then validation is not as strict, since we want the
+ same to work for multiple EAPIs that may have slightly different rules.
+ An unsupported eapi is handled the same as when eapi is None, which may
+ be helpful for handling of corrupt EAPI metadata in essential functions
+ such as pkgsplit.
+ """
+ eapi_attrs = _eapi_attrs_cache.get(eapi)
+ if eapi_attrs is not None:
+ return eapi_attrs
+
+ orig_eapi = eapi
+ if eapi is not None and not eapi_is_supported(eapi):
+ eapi = None
+
+ eapi_attrs = _eapi_attrs(
+ dots_in_PN = (eapi is None or eapi_allows_dots_in_PN(eapi)),
+ dots_in_use_flags = (eapi is None or eapi_allows_dots_in_use_flags(eapi)),
+ exports_EBUILD_PHASE_FUNC = (eapi is None or eapi_exports_EBUILD_PHASE_FUNC(eapi)),
+ feature_flag_test = True,
+ feature_flag_targetroot = (eapi is not None and eapi_has_targetroot(eapi)),
+ hdepend = (eapi is not None and eapi_has_hdepend(eapi)),
+ iuse_defaults = (eapi is None or eapi_has_iuse_defaults(eapi)),
+ iuse_effective = (eapi is not None and eapi_has_iuse_effective(eapi)),
+ repo_deps = (eapi is None or eapi_has_repo_deps(eapi)),
+ required_use = (eapi is None or eapi_has_required_use(eapi)),
+ required_use_at_most_one_of = (eapi is None or eapi_has_required_use_at_most_one_of(eapi)),
+ slot_deps = (eapi is None or eapi_has_slot_deps(eapi)),
+ slot_operator = (eapi is None or eapi_has_slot_operator(eapi)),
+ src_uri_arrows = (eapi is None or eapi_has_src_uri_arrows(eapi)),
+ strong_blocks = (eapi is None or eapi_has_strong_blocks(eapi)),
+ use_deps = (eapi is None or eapi_has_use_deps(eapi)),
+ use_dep_defaults = (eapi is None or eapi_has_use_dep_defaults(eapi))
+ )
+
+ _eapi_attrs_cache[orig_eapi] = eapi_attrs
+ return eapi_attrs
diff --git a/usr/lib/portage/pym/portage/eclass_cache.py b/usr/lib/portage/pym/portage/eclass_cache.py
new file mode 100644
index 0000000..2988d25
--- /dev/null
+++ b/usr/lib/portage/pym/portage/eclass_cache.py
@@ -0,0 +1,187 @@
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Nicholas Carpaski (carpaski@gentoo.org), Brian Harring (ferringb@gentoo.org)
+
+from __future__ import unicode_literals
+
+__all__ = ["cache"]
+
+import stat
+import sys
+import operator
+import warnings
+from portage.util import normalize_path
+import errno
+from portage.exception import FileNotFound, PermissionDenied
+from portage import os
+from portage import checksum
+from portage import _shell_quote
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+
+class hashed_path(object):
+
+ def __init__(self, location):
+ self.location = location
+
+ def __getattr__(self, attr):
+ if attr == 'mtime':
+ # use stat.ST_MTIME; accessing .st_mtime gets you a float
+ # depending on the python version, and long(float) introduces
+ # some rounding issues that aren't present for people using
+ # the straight c api.
+ # thus use the defacto python compatibility work around;
+ # access via index, which guarantees you get the raw long.
+ try:
+ self.mtime = obj = os.stat(self.location)[stat.ST_MTIME]
+ except OSError as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ raise FileNotFound(self.location)
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(self.location)
+ raise
+ return obj
+ if not attr.islower():
+ # we don't care to allow .mD5 as an alias for .md5
+ raise AttributeError(attr)
+ hashname = attr.upper()
+ if hashname not in checksum.hashfunc_map:
+ raise AttributeError(attr)
+ val = checksum.perform_checksum(self.location, hashname)[0]
+ setattr(self, attr, val)
+ return val
+
+ def __repr__(self):
+ return "<portage.eclass_cache.hashed_path('%s')>" % (self.location,)
+
+class cache(object):
+ """
+ Maintains the cache information about eclasses used in ebuild.
+ """
+ def __init__(self, porttree_root, overlays=None):
+ if overlays is not None:
+ warnings.warn("overlays parameter of portage.eclass_cache.cache constructor is deprecated and no longer used",
+ DeprecationWarning, stacklevel=2)
+
+ self.eclasses = {} # {"Name": hashed_path}
+ self._eclass_locations = {}
+ self._eclass_locations_str = None
+
+ # screw with the porttree ordering, w/out having bash inherit match it, and I'll hurt you.
+ # ~harring
+ if porttree_root:
+ self.porttree_root = porttree_root
+ self.porttrees = (normalize_path(self.porttree_root),)
+ self._master_eclass_root = os.path.join(self.porttrees[0], "eclass")
+ self.update_eclasses()
+ else:
+ self.porttree_root = None
+ self.porttrees = ()
+ self._master_eclass_root = None
+
+ def copy(self):
+ return self.__copy__()
+
+ def __copy__(self):
+ result = self.__class__(None)
+ result.eclasses = self.eclasses.copy()
+ result._eclass_locations = self._eclass_locations.copy()
+ result.porttree_root = self.porttree_root
+ result.porttrees = self.porttrees
+ result._master_eclass_root = self._master_eclass_root
+ return result
+
+ def append(self, other):
+ """
+ Append another instance to this instance. This will cause eclasses
+ from the other instance to override any eclasses from this instance
+ that have the same name.
+ """
+ if not isinstance(other, self.__class__):
+ raise TypeError(
+ "expected type %s, got %s" % (self.__class__, type(other)))
+ self.porttrees = self.porttrees + other.porttrees
+ self.eclasses.update(other.eclasses)
+ self._eclass_locations.update(other._eclass_locations)
+ self._eclass_locations_str = None
+
+ def update_eclasses(self):
+ self.eclasses = {}
+ self._eclass_locations = {}
+ master_eclasses = {}
+ eclass_len = len(".eclass")
+ ignored_listdir_errnos = (errno.ENOENT, errno.ENOTDIR)
+ for x in [normalize_path(os.path.join(y,"eclass")) for y in self.porttrees]:
+ try:
+ eclass_filenames = os.listdir(x)
+ except OSError as e:
+ if e.errno in ignored_listdir_errnos:
+ del e
+ continue
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(x)
+ raise
+ for y in eclass_filenames:
+ if not y.endswith(".eclass"):
+ continue
+ obj = hashed_path(os.path.join(x, y))
+ obj.eclass_dir = x
+ try:
+ mtime = obj.mtime
+ except FileNotFound:
+ continue
+ ys = y[:-eclass_len]
+ if x == self._master_eclass_root:
+ master_eclasses[ys] = mtime
+ self.eclasses[ys] = obj
+ self._eclass_locations[ys] = x
+ continue
+
+ master_mtime = master_eclasses.get(ys)
+ if master_mtime is not None:
+ if master_mtime == mtime:
+ # It appears to be identical to the master,
+ # so prefer the master entry.
+ continue
+
+ self.eclasses[ys] = obj
+ self._eclass_locations[ys] = x
+
+ def validate_and_rewrite_cache(self, ec_dict, chf_type, stores_paths):
+ """
+ This will return an empty dict if the ec_dict parameter happens
+ to be empty, therefore callers must take care to distinguish
+ between empty dict and None return values.
+ """
+ if not isinstance(ec_dict, dict):
+ return None
+ our_getter = operator.attrgetter(chf_type)
+ cache_getter = lambda x:x
+ if stores_paths:
+ cache_getter = operator.itemgetter(1)
+ d = {}
+ for eclass, ec_data in ec_dict.items():
+ cached_data = self.eclasses.get(eclass)
+ if cached_data is None:
+ return None
+ if cache_getter(ec_data) != our_getter(cached_data):
+ return None
+ d[eclass] = cached_data
+ return d
+
+ def get_eclass_data(self, inherits):
+ ec_dict = {}
+ for x in inherits:
+ ec_dict[x] = self.eclasses[x]
+
+ return ec_dict
+
+ @property
+ def eclass_locations_string(self):
+ if self._eclass_locations_str is None:
+ self._eclass_locations_str = " ".join(_shell_quote(x)
+ for x in reversed(self.porttrees))
+ return self._eclass_locations_str
diff --git a/usr/lib/portage/pym/portage/elog/__init__.py b/usr/lib/portage/pym/portage/elog/__init__.py
new file mode 100644
index 0000000..cc08612
--- /dev/null
+++ b/usr/lib/portage/pym/portage/elog/__init__.py
@@ -0,0 +1,191 @@
+# elog/__init__.py - elog core functions
+# Copyright 2006-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util:writemsg',
+)
+
+from portage.const import EBUILD_PHASES
+from portage.exception import AlarmSignal, PortageException
+from portage.process import atexit_register
+from portage.elog.messages import collect_ebuild_messages, collect_messages
+from portage.elog.filtering import filter_loglevels
+from portage.localization import _
+from portage import os
+
+def _preload_elog_modules(settings):
+ logsystems = settings.get("PORTAGE_ELOG_SYSTEM", "").split()
+ for s in logsystems:
+ # allow per module overrides of PORTAGE_ELOG_CLASSES
+ if ":" in s:
+ s, levels = s.split(":", 1)
+ levels = levels.split(",")
+ # - is nicer than _ for module names, so allow people to use it.
+ s = s.replace("-", "_")
+ try:
+ _load_mod("portage.elog.mod_" + s)
+ except ImportError:
+ pass
+
+def _merge_logentries(a, b):
+ rValue = {}
+ phases = set(a)
+ phases.update(b)
+ for p in phases:
+ merged_msgs = []
+ rValue[p] = merged_msgs
+ for d in a, b:
+ msgs = d.get(p)
+ if msgs:
+ merged_msgs.extend(msgs)
+ return rValue
+
+def _combine_logentries(logentries):
+ # generate a single string with all log messages
+ rValue = []
+ for phase in EBUILD_PHASES:
+ if not phase in logentries:
+ continue
+ previous_type = None
+ for msgtype, msgcontent in logentries[phase]:
+ if previous_type != msgtype:
+ previous_type = msgtype
+ rValue.append("%s: %s" % (msgtype, phase))
+ if isinstance(msgcontent, basestring):
+ rValue.append(msgcontent.rstrip("\n"))
+ else:
+ for line in msgcontent:
+ rValue.append(line.rstrip("\n"))
+ if rValue:
+ rValue.append("")
+ return "\n".join(rValue)
+
+_elog_mod_imports = {}
+def _load_mod(name):
+ global _elog_mod_imports
+ m = _elog_mod_imports.get(name)
+ if m is None:
+ m = __import__(name)
+ for comp in name.split(".")[1:]:
+ m = getattr(m, comp)
+ _elog_mod_imports[name] = m
+ return m
+
+_elog_listeners = []
+def add_listener(listener):
+ '''
+ Listeners should accept four arguments: settings, key, logentries and logtext
+ '''
+ _elog_listeners.append(listener)
+
+def remove_listener(listener):
+ '''
+ Remove previously added listener
+ '''
+ _elog_listeners.remove(listener)
+
+_elog_atexit_handlers = []
+
+def elog_process(cpv, mysettings, phasefilter=None):
+ global _elog_atexit_handlers
+
+ logsystems = mysettings.get("PORTAGE_ELOG_SYSTEM","").split()
+ for s in logsystems:
+ # allow per module overrides of PORTAGE_ELOG_CLASSES
+ if ":" in s:
+ s, levels = s.split(":", 1)
+ levels = levels.split(",")
+ # - is nicer than _ for module names, so allow people to use it.
+ s = s.replace("-", "_")
+ try:
+ _load_mod("portage.elog.mod_" + s)
+ except ImportError:
+ pass
+
+ if "T" in mysettings:
+ ebuild_logentries = collect_ebuild_messages(
+ os.path.join(mysettings["T"], "logging"))
+ else:
+ # A build dir isn't necessarily required since the messages.e*
+ # functions allow messages to be generated in-memory.
+ ebuild_logentries = {}
+ all_logentries = collect_messages(key=cpv, phasefilter=phasefilter)
+ if cpv in all_logentries:
+ # Messages generated by the python elog implementation are assumed
+ # to come first. For example, this ensures correct order for einfo
+ # messages that are generated prior to the setup phase.
+ all_logentries[cpv] = \
+ _merge_logentries(all_logentries[cpv], ebuild_logentries)
+ else:
+ all_logentries[cpv] = ebuild_logentries
+
+ my_elog_classes = set(mysettings.get("PORTAGE_ELOG_CLASSES", "").split())
+ logsystems = {}
+ for token in mysettings.get("PORTAGE_ELOG_SYSTEM", "").split():
+ if ":" in token:
+ s, levels = token.split(":", 1)
+ levels = levels.split(",")
+ else:
+ s = token
+ levels = ()
+ levels_set = logsystems.get(s)
+ if levels_set is None:
+ levels_set = set()
+ logsystems[s] = levels_set
+ levels_set.update(levels)
+
+ for key in all_logentries:
+ default_logentries = filter_loglevels(all_logentries[key], my_elog_classes)
+
+ # in case the filters matched all messages and no module overrides exist
+ if len(default_logentries) == 0 and (not ":" in mysettings.get("PORTAGE_ELOG_SYSTEM", "")):
+ continue
+
+ default_fulllog = _combine_logentries(default_logentries)
+
+ # call listeners
+ for listener in _elog_listeners:
+ listener(mysettings, str(key), default_logentries, default_fulllog)
+
+ # pass the processing to the individual modules
+ for s, levels in logsystems.items():
+ # allow per module overrides of PORTAGE_ELOG_CLASSES
+ if levels:
+ mod_logentries = filter_loglevels(all_logentries[key], levels)
+ mod_fulllog = _combine_logentries(mod_logentries)
+ else:
+ mod_logentries = default_logentries
+ mod_fulllog = default_fulllog
+ if len(mod_logentries) == 0:
+ continue
+ # - is nicer than _ for module names, so allow people to use it.
+ s = s.replace("-", "_")
+ try:
+ m = _load_mod("portage.elog.mod_" + s)
+ # Timeout after one minute (in case something like the mail
+ # module gets hung).
+ try:
+ AlarmSignal.register(60)
+ m.process(mysettings, str(key), mod_logentries, mod_fulllog)
+ finally:
+ AlarmSignal.unregister()
+ if hasattr(m, "finalize") and not m.finalize in _elog_atexit_handlers:
+ _elog_atexit_handlers.append(m.finalize)
+ atexit_register(m.finalize)
+ except (ImportError, AttributeError) as e:
+ writemsg(_("!!! Error while importing logging modules "
+ "while loading \"mod_%s\":\n") % str(s))
+ writemsg("%s\n" % str(e), noiselevel=-1)
+ except AlarmSignal:
+ writemsg("Timeout in elog_process for system '%s'\n" % s,
+ noiselevel=-1)
+ except PortageException as e:
+ writemsg("%s\n" % str(e), noiselevel=-1)
+
diff --git a/usr/lib/portage/pym/portage/elog/filtering.py b/usr/lib/portage/pym/portage/elog/filtering.py
new file mode 100644
index 0000000..82181a4
--- /dev/null
+++ b/usr/lib/portage/pym/portage/elog/filtering.py
@@ -0,0 +1,15 @@
+# elog/messages.py - elog core functions
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+def filter_loglevels(logentries, loglevels):
+ # remove unwanted entries from all logentries
+ rValue = {}
+ loglevels = [x.upper() for x in loglevels]
+ for phase in logentries:
+ for msgtype, msgcontent in logentries[phase]:
+ if msgtype.upper() in loglevels or "*" in loglevels:
+ if phase not in rValue:
+ rValue[phase] = []
+ rValue[phase].append((msgtype, msgcontent))
+ return rValue
diff --git a/usr/lib/portage/pym/portage/elog/messages.py b/usr/lib/portage/pym/portage/elog/messages.py
new file mode 100644
index 0000000..a4897d8
--- /dev/null
+++ b/usr/lib/portage/pym/portage/elog/messages.py
@@ -0,0 +1,190 @@
+# elog/messages.py - elog core functions
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.output:colorize',
+ 'portage.util:writemsg',
+)
+
+from portage.const import EBUILD_PHASES
+from portage.localization import _
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import _unicode_decode
+
+import io
+import sys
+
+_log_levels = frozenset([
+ "ERROR",
+ "INFO",
+ "LOG",
+ "QA",
+ "WARN",
+])
+
+def collect_ebuild_messages(path):
+ """ Collect elog messages generated by the bash logging function stored
+ at 'path'.
+ """
+ mylogfiles = None
+ try:
+ mylogfiles = os.listdir(path)
+ except OSError:
+ pass
+ # shortcut for packages without any messages
+ if not mylogfiles:
+ return {}
+ # exploit listdir() file order so we process log entries in chronological order
+ mylogfiles.reverse()
+ logentries = {}
+ for msgfunction in mylogfiles:
+ filename = os.path.join(path, msgfunction)
+ if msgfunction not in EBUILD_PHASES:
+ writemsg(_("!!! can't process invalid log file: %s\n") % filename,
+ noiselevel=-1)
+ continue
+ if not msgfunction in logentries:
+ logentries[msgfunction] = []
+ lastmsgtype = None
+ msgcontent = []
+ f = io.open(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace')
+ # Use split('\n') since normal line iteration or readlines() will
+ # split on \r characters as shown in bug #390833.
+ for l in f.read().split('\n'):
+ if not l:
+ continue
+ try:
+ msgtype, msg = l.split(" ", 1)
+ if msgtype not in _log_levels:
+ raise ValueError(msgtype)
+ except ValueError:
+ writemsg(_("!!! malformed entry in "
+ "log file: '%s': %s\n") % (filename, l), noiselevel=-1)
+ continue
+
+ if lastmsgtype is None:
+ lastmsgtype = msgtype
+
+ if msgtype == lastmsgtype:
+ msgcontent.append(msg)
+ else:
+ if msgcontent:
+ logentries[msgfunction].append((lastmsgtype, msgcontent))
+ msgcontent = [msg]
+ lastmsgtype = msgtype
+ f.close()
+ if msgcontent:
+ logentries[msgfunction].append((lastmsgtype, msgcontent))
+
+ # clean logfiles to avoid repetitions
+ for f in mylogfiles:
+ try:
+ os.unlink(os.path.join(path, f))
+ except OSError:
+ pass
+ return logentries
+
+_msgbuffer = {}
+def _elog_base(level, msg, phase="other", key=None, color=None, out=None):
+ """ Backend for the other messaging functions, should not be called
+ directly.
+ """
+
+ # TODO: Have callers pass in a more unique 'key' parameter than a plain
+ # cpv, in order to ensure that messages are properly grouped together
+ # for a given package instance, and also to ensure that each elog module's
+ # process() function is only called once for each unique package. This is
+ # needed not only when building packages in parallel, but also to preserve
+ # continuity in messages when a package is simply updated, since we don't
+ # want the elog_process() call from the uninstall of the old version to
+ # cause discontinuity in the elog messages of the new one being installed.
+
+ global _msgbuffer
+
+ if out is None:
+ out = sys.stdout
+
+ if color is None:
+ color = "GOOD"
+
+ msg = _unicode_decode(msg,
+ encoding=_encodings['content'], errors='replace')
+
+ formatted_msg = colorize(color, " * ") + msg + "\n"
+
+ # avoid potential UnicodeEncodeError
+ if out in (sys.stdout, sys.stderr):
+ formatted_msg = _unicode_encode(formatted_msg,
+ encoding=_encodings['stdio'], errors='backslashreplace')
+ if sys.hexversion >= 0x3000000:
+ out = out.buffer
+
+ out.write(formatted_msg)
+
+ if key not in _msgbuffer:
+ _msgbuffer[key] = {}
+ if phase not in _msgbuffer[key]:
+ _msgbuffer[key][phase] = []
+ _msgbuffer[key][phase].append((level, msg))
+
+ #raise NotImplementedError()
+
+def collect_messages(key=None, phasefilter=None):
+ global _msgbuffer
+
+ if key is None:
+ rValue = _msgbuffer
+ _reset_buffer()
+ else:
+ rValue = {}
+ if key in _msgbuffer:
+ if phasefilter is None:
+ rValue[key] = _msgbuffer.pop(key)
+ else:
+ rValue[key] = {}
+ for phase in phasefilter:
+ try:
+ rValue[key][phase] = _msgbuffer[key].pop(phase)
+ except KeyError:
+ pass
+ if not _msgbuffer[key]:
+ del _msgbuffer[key]
+ return rValue
+
+def _reset_buffer():
+ """ Reset the internal message buffer when it has been processed,
+ should not be called directly.
+ """
+ global _msgbuffer
+
+ _msgbuffer = {}
+
+# creating and exporting the actual messaging functions
+_functions = { "einfo": ("INFO", "GOOD"),
+ "elog": ("LOG", "GOOD"),
+ "ewarn": ("WARN", "WARN"),
+ "eqawarn": ("QA", "WARN"),
+ "eerror": ("ERROR", "BAD"),
+}
+
+class _make_msgfunction(object):
+ __slots__ = ('_color', '_level')
+ def __init__(self, level, color):
+ self._level = level
+ self._color = color
+ def __call__(self, msg, phase="other", key=None, out=None):
+ """
+ Display and log a message assigned to the given key/cpv.
+ """
+ _elog_base(self._level, msg, phase=phase,
+ key=key, color=self._color, out=out)
+
+for f in _functions:
+ setattr(sys.modules[__name__], f, _make_msgfunction(_functions[f][0], _functions[f][1]))
+del f, _functions
diff --git a/usr/lib/portage/pym/portage/elog/mod_custom.py b/usr/lib/portage/pym/portage/elog/mod_custom.py
new file mode 100644
index 0000000..e1a5223
--- /dev/null
+++ b/usr/lib/portage/pym/portage/elog/mod_custom.py
@@ -0,0 +1,19 @@
+# elog/mod_custom.py - elog dispatch module
+# Copyright 2006-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage.elog.mod_save, portage.process, portage.exception
+
+def process(mysettings, key, logentries, fulltext):
+ elogfilename = portage.elog.mod_save.process(mysettings, key, logentries, fulltext)
+
+ if not mysettings.get("PORTAGE_ELOG_COMMAND"):
+ raise portage.exception.MissingParameter("!!! Custom logging requested but PORTAGE_ELOG_COMMAND is not defined")
+ else:
+ mylogcmd = mysettings["PORTAGE_ELOG_COMMAND"]
+ mylogcmd = mylogcmd.replace("${LOGFILE}", elogfilename)
+ mylogcmd = mylogcmd.replace("${PACKAGE}", key)
+ retval = portage.process.spawn_bash(mylogcmd)
+ if retval != 0:
+ raise portage.exception.PortageException("!!! PORTAGE_ELOG_COMMAND failed with exitcode %d" % retval)
+ return
diff --git a/usr/lib/portage/pym/portage/elog/mod_echo.py b/usr/lib/portage/pym/portage/elog/mod_echo.py
new file mode 100644
index 0000000..f9cc537
--- /dev/null
+++ b/usr/lib/portage/pym/portage/elog/mod_echo.py
@@ -0,0 +1,60 @@
+# elog/mod_echo.py - elog dispatch module
+# Copyright 2007-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+from portage.output import EOutput, colorize
+from portage.const import EBUILD_PHASES
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+_items = []
+def process(mysettings, key, logentries, fulltext):
+ global _items
+ _items.append((mysettings["ROOT"], key, logentries))
+
+def finalize():
+ # For consistency, send all message types to stdout.
+ sys.stdout.flush()
+ sys.stderr.flush()
+ stderr = sys.stderr
+ try:
+ sys.stderr = sys.stdout
+ _finalize()
+ finally:
+ sys.stderr = stderr
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+def _finalize():
+ global _items
+ printer = EOutput()
+ for root, key, logentries in _items:
+ print()
+ if root == "/":
+ printer.einfo(_("Messages for package %s:") %
+ colorize("INFORM", key))
+ else:
+ printer.einfo(_("Messages for package %(pkg)s merged to %(root)s:") %
+ {"pkg": colorize("INFORM", key), "root": root})
+ print()
+ for phase in EBUILD_PHASES:
+ if phase not in logentries:
+ continue
+ for msgtype, msgcontent in logentries[phase]:
+ fmap = {"INFO": printer.einfo,
+ "WARN": printer.ewarn,
+ "ERROR": printer.eerror,
+ "LOG": printer.einfo,
+ "QA": printer.ewarn}
+ if isinstance(msgcontent, basestring):
+ msgcontent = [msgcontent]
+ for line in msgcontent:
+ fmap[msgtype](line.strip("\n"))
+ _items = []
+ return
diff --git a/usr/lib/portage/pym/portage/elog/mod_mail.py b/usr/lib/portage/pym/portage/elog/mod_mail.py
new file mode 100644
index 0000000..086c683
--- /dev/null
+++ b/usr/lib/portage/pym/portage/elog/mod_mail.py
@@ -0,0 +1,43 @@
+# elog/mod_mail.py - elog dispatch module
+# Copyright 2006-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage.mail, socket
+from portage.exception import PortageException
+from portage.localization import _
+from portage.util import writemsg
+
+def process(mysettings, key, logentries, fulltext):
+ if "PORTAGE_ELOG_MAILURI" in mysettings:
+ myrecipient = mysettings["PORTAGE_ELOG_MAILURI"].split()[0]
+ else:
+ myrecipient = "root@localhost"
+
+ myfrom = mysettings["PORTAGE_ELOG_MAILFROM"]
+ myfrom = myfrom.replace("${HOST}", socket.getfqdn())
+ mysubject = mysettings["PORTAGE_ELOG_MAILSUBJECT"]
+ mysubject = mysubject.replace("${PACKAGE}", key)
+ mysubject = mysubject.replace("${HOST}", socket.getfqdn())
+
+ # look at the phases listed in our logentries to figure out what action was performed
+ action = _("merged")
+ for phase in logentries:
+ # if we found a *rm phase assume that the package was unmerged
+ if phase in ["postrm", "prerm"]:
+ action = _("unmerged")
+ # if we think that the package was unmerged, make sure there was no unexpected
+ # phase recorded to avoid misinformation
+ if action == _("unmerged"):
+ for phase in logentries:
+ if phase not in ["postrm", "prerm", "other"]:
+ action = _("unknown")
+
+ mysubject = mysubject.replace("${ACTION}", action)
+
+ mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject, fulltext)
+ try:
+ portage.mail.send_mail(mysettings, mymessage)
+ except PortageException as e:
+ writemsg("%s\n" % str(e), noiselevel=-1)
+
+ return
diff --git a/usr/lib/portage/pym/portage/elog/mod_mail_summary.py b/usr/lib/portage/pym/portage/elog/mod_mail_summary.py
new file mode 100644
index 0000000..0bd67f2
--- /dev/null
+++ b/usr/lib/portage/pym/portage/elog/mod_mail_summary.py
@@ -0,0 +1,89 @@
+# elog/mod_mail_summary.py - elog dispatch module
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.exception import AlarmSignal, PortageException
+from portage.localization import _
+from portage.util import writemsg
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+
+import socket
+import time
+
+_config_keys = ('PORTAGE_ELOG_MAILURI', 'PORTAGE_ELOG_MAILFROM',
+ 'PORTAGE_ELOG_MAILSUBJECT',)
+_items = {}
+def process(mysettings, key, logentries, fulltext):
+ global _items
+ time_str = _unicode_decode(
+ time.strftime("%Y%m%d-%H%M%S %Z", time.localtime(time.time())),
+ encoding=_encodings['content'], errors='replace')
+ header = _(">>> Messages generated for package %(pkg)s by process %(pid)d on %(time)s:\n\n") % \
+ {"pkg": key, "pid": os.getpid(), "time": time_str}
+ config_root = mysettings["PORTAGE_CONFIGROOT"]
+
+ # Copy needed variables from the config instance,
+ # since we don't need to hold a reference for the
+ # whole thing. This also makes it possible to
+ # rely on per-package variable settings that may
+ # have come from /etc/portage/package.env, since
+ # we'll be isolated from any future mutations of
+ # mysettings.
+ config_dict = {}
+ for k in _config_keys:
+ v = mysettings.get(k)
+ if v is not None:
+ config_dict[k] = v
+
+ config_dict, items = _items.setdefault(config_root, (config_dict, {}))
+ items[key] = header + fulltext
+
+def finalize():
+ global _items
+ for mysettings, items in _items.values():
+ _finalize(mysettings, items)
+ _items.clear()
+
+def _finalize(mysettings, items):
+ if len(items) == 0:
+ return
+ elif len(items) == 1:
+ count = _("one package")
+ else:
+ count = _("multiple packages")
+ if "PORTAGE_ELOG_MAILURI" in mysettings:
+ myrecipient = mysettings["PORTAGE_ELOG_MAILURI"].split()[0]
+ else:
+ myrecipient = "root@localhost"
+
+ myfrom = mysettings.get("PORTAGE_ELOG_MAILFROM", "")
+ myfrom = myfrom.replace("${HOST}", socket.getfqdn())
+ mysubject = mysettings.get("PORTAGE_ELOG_MAILSUBJECT", "")
+ mysubject = mysubject.replace("${PACKAGE}", count)
+ mysubject = mysubject.replace("${HOST}", socket.getfqdn())
+
+ mybody = _("elog messages for the following packages generated by "
+ "process %(pid)d on host %(host)s:\n") % {"pid": os.getpid(), "host": socket.getfqdn()}
+ for key in items:
+ mybody += "- %s\n" % key
+
+ mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject,
+ mybody, attachments=list(items.values()))
+
+ # Timeout after one minute in case send_mail() blocks indefinitely.
+ try:
+ try:
+ AlarmSignal.register(60)
+ portage.mail.send_mail(mysettings, mymessage)
+ finally:
+ AlarmSignal.unregister()
+ except AlarmSignal:
+ writemsg("Timeout in finalize() for elog system 'mail_summary'\n",
+ noiselevel=-1)
+ except PortageException as e:
+ writemsg("%s\n" % (e,), noiselevel=-1)
+
+ return
diff --git a/usr/lib/portage/pym/portage/elog/mod_save.py b/usr/lib/portage/pym/portage/elog/mod_save.py
new file mode 100644
index 0000000..c81e93d
--- /dev/null
+++ b/usr/lib/portage/pym/portage/elog/mod_save.py
@@ -0,0 +1,86 @@
+# elog/mod_save.py - elog dispatch module
+# Copyright 2006-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+import time
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.data import portage_gid, portage_uid
+from portage.package.ebuild.prepare_build_dirs import _ensure_log_subdirs
+from portage.util import apply_permissions, ensure_dirs, normalize_path
+from portage.const import EPREFIX_LSTRIP
+
+def process(mysettings, key, logentries, fulltext):
+
+ if mysettings.get("PORT_LOGDIR"):
+ logdir = normalize_path(mysettings["PORT_LOGDIR"])
+ else:
+ logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep),
+ "var", "log", "portage")
+
+ if not os.path.isdir(logdir):
+ # Only initialize group/mode if the directory doesn't
+ # exist, so that we don't override permissions if they
+ # were previously set by the administrator.
+ # NOTE: These permissions should be compatible with our
+ # default logrotate config as discussed in bug 374287.
+ uid = -1
+ if portage.data.secpass >= 2:
+ uid = portage_uid
+ ensure_dirs(logdir, uid=uid, gid=portage_gid, mode=0o2770)
+
+ cat = mysettings['CATEGORY']
+ pf = mysettings['PF']
+
+ elogfilename = pf + ":" + _unicode_decode(
+ time.strftime("%Y%m%d-%H%M%S", time.gmtime(time.time())),
+ encoding=_encodings['content'], errors='replace') + ".log"
+
+ if "split-elog" in mysettings.features:
+ log_subdir = os.path.join(logdir, "elog", cat)
+ elogfilename = os.path.join(log_subdir, elogfilename)
+ else:
+ log_subdir = os.path.join(logdir, "elog")
+ elogfilename = os.path.join(log_subdir, cat + ':' + elogfilename)
+ _ensure_log_subdirs(logdir, log_subdir)
+
+ try:
+ with io.open(_unicode_encode(elogfilename,
+ encoding=_encodings['fs'], errors='strict'), mode='w',
+ encoding=_encodings['content'],
+ errors='backslashreplace') as elogfile:
+ elogfile.write(_unicode_decode(fulltext))
+ except IOError as e:
+ func_call = "open('%s', 'w')" % elogfilename
+ if e.errno == errno.EACCES:
+ raise portage.exception.PermissionDenied(func_call)
+ elif e.errno == errno.EPERM:
+ raise portage.exception.OperationNotPermitted(func_call)
+ elif e.errno == errno.EROFS:
+ raise portage.exception.ReadOnlyFileSystem(func_call)
+ else:
+ raise
+
+ # Copy group permission bits from parent directory.
+ elogdir_st = os.stat(log_subdir)
+ elogdir_gid = elogdir_st.st_gid
+ elogdir_grp_mode = 0o060 & elogdir_st.st_mode
+
+ # Copy the uid from the parent directory if we have privileges
+ # to do so, for compatibility with our default logrotate
+ # config (see bug 378451). With the "su portage portage"
+ # directive and logrotate-3.8.0, logrotate's chown call during
+ # the compression phase will only succeed if the log file's uid
+ # is portage_uid.
+ logfile_uid = -1
+ if portage.data.secpass >= 2:
+ logfile_uid = elogdir_st.st_uid
+ apply_permissions(elogfilename, uid=logfile_uid, gid=elogdir_gid,
+ mode=elogdir_grp_mode, mask=0)
+
+ return elogfilename
diff --git a/usr/lib/portage/pym/portage/elog/mod_save_summary.py b/usr/lib/portage/pym/portage/elog/mod_save_summary.py
new file mode 100644
index 0000000..786f894
--- /dev/null
+++ b/usr/lib/portage/pym/portage/elog/mod_save_summary.py
@@ -0,0 +1,92 @@
+# elog/mod_save_summary.py - elog dispatch module
+# Copyright 2006-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import errno
+import io
+import sys
+import time
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.data import portage_gid, portage_uid
+from portage.localization import _
+from portage.package.ebuild.prepare_build_dirs import _ensure_log_subdirs
+from portage.util import apply_permissions, ensure_dirs, normalize_path
+
+def process(mysettings, key, logentries, fulltext):
+ if mysettings.get("PORT_LOGDIR"):
+ logdir = normalize_path(mysettings["PORT_LOGDIR"])
+ else:
+ logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep),
+ "var", "log", "portage")
+
+ if not os.path.isdir(logdir):
+ # Only initialize group/mode if the directory doesn't
+ # exist, so that we don't override permissions if they
+ # were previously set by the administrator.
+ # NOTE: These permissions should be compatible with our
+ # default logrotate config as discussed in bug 374287.
+ logdir_uid = -1
+ if portage.data.secpass >= 2:
+ logdir_uid = portage_uid
+ ensure_dirs(logdir, uid=logdir_uid, gid=portage_gid, mode=0o2770)
+
+ elogdir = os.path.join(logdir, "elog")
+ _ensure_log_subdirs(logdir, elogdir)
+
+ # TODO: Locking
+ elogfilename = elogdir+"/summary.log"
+ try:
+ elogfile = io.open(_unicode_encode(elogfilename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='a', encoding=_encodings['content'],
+ errors='backslashreplace')
+ except IOError as e:
+ func_call = "open('%s', 'a')" % elogfilename
+ if e.errno == errno.EACCES:
+ raise portage.exception.PermissionDenied(func_call)
+ elif e.errno == errno.EPERM:
+ raise portage.exception.OperationNotPermitted(func_call)
+ elif e.errno == errno.EROFS:
+ raise portage.exception.ReadOnlyFileSystem(func_call)
+ else:
+ raise
+
+ # Copy group permission bits from parent directory.
+ elogdir_st = os.stat(elogdir)
+ elogdir_gid = elogdir_st.st_gid
+ elogdir_grp_mode = 0o060 & elogdir_st.st_mode
+
+ # Copy the uid from the parent directory if we have privileges
+ # to do so, for compatibility with our default logrotate
+ # config (see bug 378451). With the "su portage portage"
+ # directive and logrotate-3.8.0, logrotate's chown call during
+ # the compression phase will only succeed if the log file's uid
+ # is portage_uid.
+ logfile_uid = -1
+ if portage.data.secpass >= 2:
+ logfile_uid = elogdir_st.st_uid
+ apply_permissions(elogfilename, uid=logfile_uid, gid=elogdir_gid,
+ mode=elogdir_grp_mode, mask=0)
+
+ time_fmt = "%Y-%m-%d %H:%M:%S %Z"
+ if sys.hexversion < 0x3000000:
+ time_fmt = _unicode_encode(time_fmt)
+ time_str = time.strftime(time_fmt, time.localtime(time.time()))
+ # Avoid potential UnicodeDecodeError in Python 2, since strftime
+ # returns bytes in Python 2, and %Z may contain non-ascii chars.
+ time_str = _unicode_decode(time_str,
+ encoding=_encodings['content'], errors='replace')
+ elogfile.write(_(">>> Messages generated by process "
+ "%(pid)d on %(time)s for package %(pkg)s:\n\n") %
+ {"pid": os.getpid(), "time": time_str, "pkg": key})
+ elogfile.write(_unicode_decode(fulltext))
+ elogfile.write("\n")
+ elogfile.close()
+
+ return elogfilename
diff --git a/usr/lib/portage/pym/portage/elog/mod_syslog.py b/usr/lib/portage/pym/portage/elog/mod_syslog.py
new file mode 100644
index 0000000..8b26ffa
--- /dev/null
+++ b/usr/lib/portage/pym/portage/elog/mod_syslog.py
@@ -0,0 +1,37 @@
+# elog/mod_syslog.py - elog dispatch module
+# Copyright 2006-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+import syslog
+from portage.const import EBUILD_PHASES
+from portage import _encodings
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+_pri = {
+ "INFO" : syslog.LOG_INFO,
+ "WARN" : syslog.LOG_WARNING,
+ "ERROR" : syslog.LOG_ERR,
+ "LOG" : syslog.LOG_NOTICE,
+ "QA" : syslog.LOG_WARNING
+}
+
+def process(mysettings, key, logentries, fulltext):
+ syslog.openlog("portage", syslog.LOG_ERR | syslog.LOG_WARNING | syslog.LOG_INFO | syslog.LOG_NOTICE, syslog.LOG_LOCAL5)
+ for phase in EBUILD_PHASES:
+ if not phase in logentries:
+ continue
+ for msgtype, msgcontent in logentries[phase]:
+ if isinstance(msgcontent, basestring):
+ msgcontent = [msgcontent]
+ for line in msgcontent:
+ line = "%s: %s: %s" % (key, phase, line)
+ if sys.hexversion < 0x3000000 and not isinstance(line, bytes):
+ # Avoid TypeError from syslog.syslog()
+ line = line.encode(_encodings['content'],
+ 'backslashreplace')
+ syslog.syslog(_pri[msgtype], line.rstrip("\n"))
+ syslog.closelog()
diff --git a/usr/lib/portage/pym/portage/emaint/__init__.py b/usr/lib/portage/pym/portage/emaint/__init__.py
new file mode 100644
index 0000000..48bc6e2
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/__init__.py
@@ -0,0 +1,5 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""System health checks and maintenance utilities.
+"""
diff --git a/usr/lib/portage/pym/portage/emaint/defaults.py b/usr/lib/portage/pym/portage/emaint/defaults.py
new file mode 100644
index 0000000..30f36af
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/defaults.py
@@ -0,0 +1,25 @@
+# Copyright 2005-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# parser option data
+CHECK = {"short": "-c", "long": "--check",
+ "help": "Check for problems (a default option for most modules)",
+ 'status': "Checking %s for problems",
+ 'action': 'store_true',
+ 'func': 'check'
+ }
+
+FIX = {"short": "-f", "long": "--fix",
+ "help": "Attempt to fix problems (a default option for most modules)",
+ 'status': "Attempting to fix %s",
+ 'action': 'store_true',
+ 'func': 'fix'
+ }
+
+VERSION = {"long": "--version",
+ "help": "show program's version number and exit",
+ 'action': 'store_true',
+ }
+
+# parser options
+DEFAULT_OPTIONS = {'check': CHECK, 'fix': FIX, 'version': VERSION}
diff --git a/usr/lib/portage/pym/portage/emaint/main.py b/usr/lib/portage/pym/portage/emaint/main.py
new file mode 100644
index 0000000..646883d
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/main.py
@@ -0,0 +1,225 @@
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+
+import sys
+import textwrap
+
+import portage
+from portage import os
+from portage.emaint.module import Modules
+from portage.emaint.progress import ProgressBar
+from portage.emaint.defaults import DEFAULT_OPTIONS
+from portage.util._argparse import ArgumentParser
+
+class OptionItem(object):
+ """class to hold module ArgumentParser options data
+ """
+
+ def __init__(self, opt):
+ """
+ @type opt: dictionary
+ @param opt: options parser options
+ """
+ self.short = opt.get('short')
+ self.long = opt.get('long')
+ # '-' are not allowed in python identifiers
+ # so store the sanitized target variable name
+ self.target = self.long[2:].replace('-','_')
+ self.help = opt.get('help')
+ self.status = opt.get('status')
+ self.func = opt.get('func')
+ self.action = opt.get('action')
+ self.type = opt.get('type')
+ self.dest = opt.get('dest')
+
+ @property
+ def pargs(self):
+ pargs = []
+ if self.short is not None:
+ pargs.append(self.short)
+ if self.long is not None:
+ pargs.append(self.long)
+ return pargs
+
+ @property
+ def kwargs(self):
+ # Support for keyword arguments varies depending on the action,
+ # so only pass in the keywords that are needed, in order
+ # to avoid a TypeError.
+ kwargs = {}
+ if self.help is not None:
+ kwargs['help'] = self.help
+ if self.action is not None:
+ kwargs['action'] = self.action
+ if self.type is not None:
+ kwargs['type'] = self.type
+ if self.dest is not None:
+ kwargs['dest'] = self.dest
+ return kwargs
+
+def usage(module_controller):
+ _usage = "usage: emaint [options] COMMAND"
+
+ desc = "The emaint program provides an interface to system health " + \
+ "checks and maintenance. See the emaint(1) man page " + \
+ "for additional information about the following commands:"
+
+ _usage += "\n\n"
+ for line in textwrap.wrap(desc, 65):
+ _usage += "%s\n" % line
+ _usage += "\nCommands:\n"
+ _usage += " %s" % "all".ljust(15) + \
+ "Perform all supported commands\n"
+ textwrap.subsequent_indent = ' '.ljust(17)
+ for mod in module_controller.module_names:
+ desc = textwrap.wrap(module_controller.get_description(mod), 65)
+ _usage += " %s%s\n" % (mod.ljust(15), desc[0])
+ for d in desc[1:]:
+ _usage += " %s%s\n" % (' '.ljust(15), d)
+ return _usage
+
+
+def module_opts(module_controller, module):
+ _usage = " %s module options:\n" % module
+ opts = module_controller.get_func_descriptions(module)
+ if opts == {}:
+ opts = DEFAULT_OPTIONS
+ for opt in sorted(opts):
+ optd = opts[opt]
+ opto = " %s, %s" % (optd['short'], optd['long'])
+ _usage += '%s %s\n' % (opto.ljust(15), optd['help'])
+ _usage += '\n'
+ return _usage
+
+
+class TaskHandler(object):
+ """Handles the running of the tasks it is given"""
+
+ def __init__(self, show_progress_bar=True, verbose=True, callback=None, module_output=None):
+ self.show_progress_bar = show_progress_bar
+ self.verbose = verbose
+ self.callback = callback
+ self.module_output = module_output
+ self.isatty = os.environ.get('TERM') != 'dumb' and sys.stdout.isatty()
+ self.progress_bar = ProgressBar(self.isatty, title="Emaint", max_desc_length=27)
+
+ def run_tasks(self, tasks, func, status=None, verbose=True, options=None):
+ """Runs the module tasks"""
+ if tasks is None or func is None:
+ return
+ for task in tasks:
+ inst = task()
+ show_progress = self.show_progress_bar and self.isatty
+ # check if the function is capable of progressbar
+ # and possibly override it off
+ if show_progress and hasattr(inst, 'can_progressbar'):
+ show_progress = inst.can_progressbar(func)
+ if show_progress:
+ self.progress_bar.reset()
+ self.progress_bar.set_label(func + " " + inst.name())
+ onProgress = self.progress_bar.start()
+ else:
+ onProgress = None
+ kwargs = {
+ 'onProgress': onProgress,
+ 'module_output': self.module_output,
+ # pass in a copy of the options so a module can not pollute or change
+ # them for other tasks if there is more to do.
+ 'options': options.copy()
+ }
+ result = getattr(inst, func)(**kwargs)
+ if show_progress:
+ # make sure the final progress is displayed
+ self.progress_bar.display()
+ print()
+ self.progress_bar.stop()
+ if self.callback:
+ self.callback(result)
+
+
+def print_results(results):
+ if results:
+ print()
+ print("\n".join(results))
+ print("\n")
+
+
+def emaint_main(myargv):
+
+ # Similar to emerge, emaint needs a default umask so that created
+ # files (such as the world file) have sane permissions.
+ os.umask(0o22)
+
+ module_controller = Modules(namepath="portage.emaint.modules")
+ module_names = module_controller.module_names[:]
+ module_names.insert(0, "all")
+
+
+ parser = ArgumentParser(usage=usage(module_controller))
+ # add default options
+ parser_options = []
+ for opt in DEFAULT_OPTIONS:
+ parser_options.append(OptionItem(DEFAULT_OPTIONS[opt]))
+ for mod in module_names[1:]:
+ desc = module_controller.get_func_descriptions(mod)
+ if desc:
+ for opt in desc:
+ parser_options.append(OptionItem(desc[opt]))
+ for opt in parser_options:
+ parser.add_argument(*opt.pargs, **opt.kwargs)
+
+ options, args = parser.parse_known_args(args=myargv)
+
+ if options.version:
+ print(portage.VERSION)
+ return os.EX_OK
+
+ if len(args) != 1:
+ parser.error("Incorrect number of arguments")
+ if args[0] not in module_names:
+ parser.error("%s target is not a known target" % args[0])
+
+ check_opt = None
+ func = status = long_action = None
+ for opt in parser_options:
+ if opt.long == '--check':
+ # Default action
+ check_opt = opt
+ if opt.status and getattr(options, opt.target, False):
+ if long_action is not None:
+ parser.error("--%s and %s are exclusive options" %
+ (long_action, opt.long))
+ status = opt.status
+ func = opt.func
+ long_action = opt.long.lstrip('-')
+
+ if long_action is None:
+ #print("DEBUG: long_action is None: setting to 'check'")
+ long_action = 'check'
+ func = check_opt.func
+ status = check_opt.status
+
+ if args[0] == "all":
+ tasks = []
+ for m in module_names[1:]:
+ #print("DEBUG: module: %s, functions: " % (m, str(module_controller.get_functions(m))))
+ if func in module_controller.get_functions(m):
+ tasks.append(module_controller.get_class(m))
+ elif func in module_controller.get_functions(args[0]):
+ tasks = [module_controller.get_class(args[0] )]
+ else:
+ portage.util.writemsg(
+ "\nERROR: module '%s' does not have option '--%s'\n\n" %
+ (args[0], long_action), noiselevel=-1)
+ portage.util.writemsg(module_opts(module_controller, args[0]),
+ noiselevel=-1)
+ sys.exit(1)
+
+ # need to pass the parser options dict to the modules
+ # so they are available if needed.
+ task_opts = options.__dict__
+ taskmaster = TaskHandler(callback=print_results, module_output=sys.stdout)
+ taskmaster.run_tasks(tasks, func, status, options=task_opts)
diff --git a/usr/lib/portage/pym/portage/emaint/module.py b/usr/lib/portage/pym/portage/emaint/module.py
new file mode 100644
index 0000000..07a0cb7
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/module.py
@@ -0,0 +1,194 @@
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+from __future__ import print_function
+
+from portage import os
+from portage.exception import PortageException
+from portage.cache.mappings import ProtectedDict
+
+
+class InvalidModuleName(PortageException):
+ """An invalid or unknown module name."""
+
+
+class Module(object):
+ """Class to define and hold our plug-in module
+
+ @type name: string
+ @param name: the module name
+ @type path: the path to the new module
+ """
+
+ def __init__(self, name, namepath):
+ """Some variables initialization"""
+ self.name = name
+ self._namepath = namepath
+ self.kids_names = []
+ self.kids = {}
+ self.initialized = self._initialize()
+
+ def _initialize(self):
+ """Initialize the plug-in module
+
+ @rtype: boolean
+ """
+ self.valid = False
+ try:
+ mod_name = ".".join([self._namepath, self.name])
+ self._module = __import__(mod_name, [], [], ["not empty"])
+ self.valid = True
+ except ImportError as e:
+ print("MODULE; failed import", mod_name, " error was:", e)
+ return False
+ self.module_spec = self._module.module_spec
+ for submodule in self.module_spec['provides']:
+ kid = self.module_spec['provides'][submodule]
+ kidname = kid['name']
+ kid['module_name'] = '.'.join([mod_name, self.name])
+ kid['is_imported'] = False
+ self.kids[kidname] = kid
+ self.kids_names.append(kidname)
+ return True
+
+ def get_class(self, name):
+ if not name or name not in self.kids_names:
+ raise InvalidModuleName("Module name '%s' was invalid or not"
+ %name + "part of the module '%s'" %self.name)
+ kid = self.kids[name]
+ if kid['is_imported']:
+ module = kid['instance']
+ else:
+ try:
+ module = __import__(kid['module_name'], [], [], ["not empty"])
+ kid['instance'] = module
+ kid['is_imported'] = True
+ except ImportError:
+ raise
+ mod_class = getattr(module, kid['class'])
+ return mod_class
+
+
+class Modules(object):
+ """Dynamic modules system for loading and retrieving any of the
+ installed emaint modules and/or provided class's
+
+ @param path: Optional path to the "modules" directory or
+ defaults to the directory of this file + '/modules'
+ @param namepath: Optional python import path to the "modules" directory or
+ defaults to the directory name of this file + '.modules'
+ """
+
+ def __init__(self, path=None, namepath=None):
+ if path:
+ self._module_path = path
+ else:
+ self._module_path = os.path.join((
+ os.path.dirname(os.path.realpath(__file__))), "modules")
+ if namepath:
+ self._namepath = namepath
+ else:
+ self._namepath = '.'.join(os.path.dirname(
+ os.path.realpath(__file__)), "modules")
+ self._modules = self._get_all_modules()
+ self.modules = ProtectedDict(self._modules)
+ self.module_names = sorted(self._modules)
+ #self.modules = {}
+ #for mod in self.module_names:
+ #self.module[mod] = LazyLoad(
+
+ def _get_all_modules(self):
+ """scans the emaint modules dir for loadable modules
+
+ @rtype: dictionary of module_plugins
+ """
+ module_dir = self._module_path
+ importables = []
+ names = os.listdir(module_dir)
+ for entry in names:
+ # skip any __init__ or __pycache__ files or directories
+ if entry.startswith('__'):
+ continue
+ try:
+ # test for statinfo to ensure it should a real module
+ # it will bail if it errors
+ os.lstat(os.path.join(module_dir, entry, '__init__.py'))
+ importables.append(entry)
+ except EnvironmentError:
+ pass
+ kids = {}
+ for entry in importables:
+ new_module = Module(entry, self._namepath)
+ for module_name in new_module.kids:
+ kid = new_module.kids[module_name]
+ kid['parent'] = new_module
+ kids[kid['name']] = kid
+ return kids
+
+ def get_module_names(self):
+ """Convienence function to return the list of installed modules
+ available
+
+ @rtype: list
+ @return: the installed module names available
+ """
+ return self.module_names
+
+ def get_class(self, modname):
+ """Retrieves a module class desired
+
+ @type modname: string
+ @param modname: the module class name
+ """
+ if modname and modname in self.module_names:
+ mod = self._modules[modname]['parent'].get_class(modname)
+ else:
+ raise InvalidModuleName("Module name '%s' was invalid or not"
+ %modname + "found")
+ return mod
+
+ def get_description(self, modname):
+ """Retrieves the module class decription
+
+ @type modname: string
+ @param modname: the module class name
+ @type string
+ @return: the modules class decription
+ """
+ if modname and modname in self.module_names:
+ mod = self._modules[modname]['description']
+ else:
+ raise InvalidModuleName("Module name '%s' was invalid or not"
+ %modname + "found")
+ return mod
+
+ def get_functions(self, modname):
+ """Retrieves the module class exported function names
+
+ @type modname: string
+ @param modname: the module class name
+ @type list
+ @return: the modules class exported function names
+ """
+ if modname and modname in self.module_names:
+ mod = self._modules[modname]['functions']
+ else:
+ raise InvalidModuleName("Module name '%s' was invalid or not"
+ %modname + "found")
+ return mod
+
+ def get_func_descriptions(self, modname):
+ """Retrieves the module class exported functions descriptions
+
+ @type modname: string
+ @param modname: the module class name
+ @type dictionary
+ @return: the modules class exported functions descriptions
+ """
+ if modname and modname in self.module_names:
+ desc = self._modules[modname]['func_desc']
+ else:
+ raise InvalidModuleName("Module name '%s' was invalid or not"
+ %modname + "found")
+ return desc
diff --git a/usr/lib/portage/pym/portage/emaint/modules/__init__.py b/usr/lib/portage/pym/portage/emaint/modules/__init__.py
new file mode 100644
index 0000000..f67197d
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/modules/__init__.py
@@ -0,0 +1,5 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Plug-in modules for system health checks and maintenance.
+"""
diff --git a/usr/lib/portage/pym/portage/emaint/modules/binhost/__init__.py b/usr/lib/portage/pym/portage/emaint/modules/binhost/__init__.py
new file mode 100644
index 0000000..f2220e9
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/modules/binhost/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Scan and generate metadata indexes for binary packages."""
+__doc__ = doc
+
+
+module_spec = {
+ 'name': 'binhost',
+ 'description': doc,
+ 'provides':{
+ 'module1': {
+ 'name': "binhost",
+ 'class': "BinhostHandler",
+ 'description': doc,
+ 'functions': ['check', 'fix'],
+ 'func_desc': {}
+ }
+ }
+ }
diff --git a/usr/lib/portage/pym/portage/emaint/modules/binhost/binhost.py b/usr/lib/portage/pym/portage/emaint/modules/binhost/binhost.py
new file mode 100644
index 0000000..1138a8c
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/modules/binhost/binhost.py
@@ -0,0 +1,165 @@
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import stat
+
+import portage
+from portage import os
+from portage.util import writemsg
+
+import sys
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+class BinhostHandler(object):
+
+ short_desc = "Generate a metadata index for binary packages"
+
+ def name():
+ return "binhost"
+ name = staticmethod(name)
+
+ def __init__(self):
+ eroot = portage.settings['EROOT']
+ self._bintree = portage.db[eroot]["bintree"]
+ self._bintree.populate()
+ self._pkgindex_file = self._bintree._pkgindex_file
+ self._pkgindex = self._bintree._load_pkgindex()
+
+ def _need_update(self, cpv, data):
+
+ if "MD5" not in data:
+ return True
+
+ size = data.get("SIZE")
+ if size is None:
+ return True
+
+ mtime = data.get("MTIME")
+ if mtime is None:
+ return True
+
+ pkg_path = self._bintree.getname(cpv)
+ try:
+ s = os.lstat(pkg_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ # We can't update the index for this one because
+ # it disappeared.
+ return False
+
+ try:
+ if long(mtime) != s[stat.ST_MTIME]:
+ return True
+ if long(size) != long(s.st_size):
+ return True
+ except ValueError:
+ return True
+
+ return False
+
+ def check(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ missing = []
+ cpv_all = self._bintree.dbapi.cpv_all()
+ cpv_all.sort()
+ maxval = len(cpv_all)
+ if onProgress:
+ onProgress(maxval, 0)
+ pkgindex = self._pkgindex
+ missing = []
+ metadata = {}
+ for d in pkgindex.packages:
+ metadata[d["CPV"]] = d
+ for i, cpv in enumerate(cpv_all):
+ d = metadata.get(cpv)
+ if not d or self._need_update(cpv, d):
+ missing.append(cpv)
+ if onProgress:
+ onProgress(maxval, i+1)
+ errors = ["'%s' is not in Packages" % cpv for cpv in missing]
+ stale = set(metadata).difference(cpv_all)
+ for cpv in stale:
+ errors.append("'%s' is not in the repository" % cpv)
+ return errors
+
+ def fix(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ bintree = self._bintree
+ cpv_all = self._bintree.dbapi.cpv_all()
+ cpv_all.sort()
+ missing = []
+ maxval = 0
+ if onProgress:
+ onProgress(maxval, 0)
+ pkgindex = self._pkgindex
+ missing = []
+ metadata = {}
+ for d in pkgindex.packages:
+ metadata[d["CPV"]] = d
+
+ for i, cpv in enumerate(cpv_all):
+ d = metadata.get(cpv)
+ if not d or self._need_update(cpv, d):
+ missing.append(cpv)
+
+ stale = set(metadata).difference(cpv_all)
+ if missing or stale:
+ from portage import locks
+ pkgindex_lock = locks.lockfile(
+ self._pkgindex_file, wantnewlockfile=1)
+ try:
+ # Repopulate with lock held.
+ bintree._populate()
+ cpv_all = self._bintree.dbapi.cpv_all()
+ cpv_all.sort()
+
+ pkgindex = bintree._load_pkgindex()
+ self._pkgindex = pkgindex
+
+ metadata = {}
+ for d in pkgindex.packages:
+ metadata[d["CPV"]] = d
+
+ # Recount missing packages, with lock held.
+ del missing[:]
+ for i, cpv in enumerate(cpv_all):
+ d = metadata.get(cpv)
+ if not d or self._need_update(cpv, d):
+ missing.append(cpv)
+
+ maxval = len(missing)
+ for i, cpv in enumerate(missing):
+ try:
+ metadata[cpv] = bintree._pkgindex_entry(cpv)
+ except portage.exception.InvalidDependString:
+ writemsg("!!! Invalid binary package: '%s'\n" % \
+ bintree.getname(cpv), noiselevel=-1)
+
+ if onProgress:
+ onProgress(maxval, i+1)
+
+ for cpv in set(metadata).difference(
+ self._bintree.dbapi.cpv_all()):
+ del metadata[cpv]
+
+ # We've updated the pkgindex, so set it to
+ # repopulate when necessary.
+ bintree.populated = False
+
+ del pkgindex.packages[:]
+ pkgindex.packages.extend(metadata.values())
+ bintree._pkgindex_write(self._pkgindex)
+
+ finally:
+ locks.unlockfile(pkgindex_lock)
+
+ if onProgress:
+ if maxval == 0:
+ maxval = 1
+ onProgress(maxval, maxval)
+ return None
diff --git a/usr/lib/portage/pym/portage/emaint/modules/config/__init__.py b/usr/lib/portage/pym/portage/emaint/modules/config/__init__.py
new file mode 100644
index 0000000..0277d39
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/modules/config/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Check and clean the config tracker list for uninstalled packages."""
+__doc__ = doc
+
+
+module_spec = {
+ 'name': 'config',
+ 'description': doc,
+ 'provides':{
+ 'module1': {
+ 'name': "cleanconfmem",
+ 'class': "CleanConfig",
+ 'description': doc,
+ 'functions': ['check', 'fix'],
+ 'func_desc': {}
+ }
+ }
+ }
diff --git a/usr/lib/portage/pym/portage/emaint/modules/config/config.py b/usr/lib/portage/pym/portage/emaint/modules/config/config.py
new file mode 100644
index 0000000..dad024b
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/modules/config/config.py
@@ -0,0 +1,79 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.const import PRIVATE_PATH
+from portage.util import grabdict, writedict
+
+class CleanConfig(object):
+
+ short_desc = "Discard any no longer installed configs from emerge's tracker list"
+
+ def __init__(self):
+ self._root = portage.settings["ROOT"]
+ self.target = os.path.join(portage.settings["EROOT"], PRIVATE_PATH, 'config')
+
+ def name():
+ return "cleanconfmem"
+ name = staticmethod(name)
+
+ def load_configlist(self):
+ return grabdict(self.target)
+
+ def check(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ configs = self.load_configlist()
+ messages = []
+ maxval = len(configs)
+ if onProgress:
+ onProgress(maxval, 0)
+ i = 0
+ keys = sorted(configs)
+ for config in keys:
+ if not os.path.exists(config):
+ messages.append(" %s" % config)
+ if onProgress:
+ onProgress(maxval, i+1)
+ i += 1
+ return self._format_output(messages)
+
+ def fix(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ configs = self.load_configlist()
+ messages = []
+ maxval = len(configs)
+ if onProgress:
+ onProgress(maxval, 0)
+ i = 0
+
+ root = self._root
+ if root == "/":
+ root = None
+ modified = False
+ for config in sorted(configs):
+ if root is None:
+ full_path = config
+ else:
+ full_path = os.path.join(root, config.lstrip(os.sep))
+ if not os.path.exists(full_path):
+ modified = True
+ configs.pop(config)
+ messages.append(" %s" % config)
+ if onProgress:
+ onProgress(maxval, i+1)
+ i += 1
+ if modified:
+ writedict(configs, self.target)
+ return self._format_output(messages, True)
+
+ def _format_output(self, messages=[], cleaned=False):
+ output = []
+ if messages:
+ output.append('Not Installed:')
+ output += messages
+ tot = '------------------------------------\n Total %i Not installed'
+ if cleaned:
+ tot += ' ...Cleaned'
+ output.append(tot % len(messages))
+ return output
diff --git a/usr/lib/portage/pym/portage/emaint/modules/logs/__init__.py b/usr/lib/portage/pym/portage/emaint/modules/logs/__init__.py
new file mode 100644
index 0000000..a7891fd
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/modules/logs/__init__.py
@@ -0,0 +1,45 @@
+# Copyright 2005-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Check and clean old logs in the PORT_LOGDIR."""
+__doc__ = doc
+
+
+module_spec = {
+ 'name': 'logs',
+ 'description': doc,
+ 'provides':{
+ 'module1': {
+ 'name': "logs",
+ 'class': "CleanLogs",
+ 'description': doc,
+ 'functions': ['check','clean'],
+ 'func_desc': {
+ 'clean': {
+ "short": "-C", "long": "--clean",
+ "help": "Cleans out logs more than 7 days old (cleanlogs only)" + \
+ " module-options: -t, -p",
+ 'status': "Cleaning %s",
+ 'action': 'store_true',
+ 'func': 'clean',
+ },
+ 'time': {
+ "short": "-t", "long": "--time",
+ "help": "(cleanlogs only): -t, --time Delete logs older than NUM of days",
+ 'status': "",
+ 'type': int,
+ 'dest': 'NUM',
+ 'func': 'clean'
+ },
+ 'pretend': {
+ "short": "-p", "long": "--pretend",
+ "help": "(cleanlogs only): -p, --pretend Output logs that would be deleted",
+ 'status': "",
+ 'action': 'store_true',
+ 'dest': 'pretend',
+ 'func': 'clean'
+ }
+ }
+ }
+ }
+ }
diff --git a/usr/lib/portage/pym/portage/emaint/modules/logs/logs.py b/usr/lib/portage/pym/portage/emaint/modules/logs/logs.py
new file mode 100644
index 0000000..fe65cf5
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/modules/logs/logs.py
@@ -0,0 +1,103 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.util import shlex_split, varexpand
+
+## default clean command from make.globals
+## PORT_LOGDIR_CLEAN = 'find "${PORT_LOGDIR}" -type f ! -name "summary.log*" -mtime +7 -delete'
+
+class CleanLogs(object):
+
+ short_desc = "Clean PORT_LOGDIR logs"
+
+ def name():
+ return "logs"
+ name = staticmethod(name)
+
+
+ def can_progressbar(self, func):
+ return False
+
+
+ def check(self, **kwargs):
+ if kwargs:
+ options = kwargs.get('options', None)
+ if options:
+ options['pretend'] = True
+ return self.clean(**kwargs)
+
+
+ def clean(self, **kwargs):
+ """Log directory cleaning function
+
+ @param **kwargs: optional dictionary of values used in this function are:
+ settings: portage settings instance: defaults to portage.settings
+ "PORT_LOGDIR": directory to clean
+ "PORT_LOGDIR_CLEAN": command for cleaning the logs.
+ options: dict:
+ 'NUM': int: number of days
+ 'pretend': boolean
+ """
+ messages = []
+ num_of_days = None
+ pretend = False
+ if kwargs:
+ # convuluted, I know, but portage.settings does not exist in
+ # kwargs.get() when called from _emerge.main.clean_logs()
+ settings = kwargs.get('settings', None)
+ if not settings:
+ settings = portage.settings
+ options = kwargs.get('options', None)
+ if options:
+ num_of_days = options.get('NUM', None)
+ pretend = options.get('pretend', False)
+
+ clean_cmd = settings.get("PORT_LOGDIR_CLEAN")
+ if clean_cmd:
+ clean_cmd = shlex_split(clean_cmd)
+ if '-mtime' in clean_cmd and num_of_days is not None:
+ if num_of_days == 0:
+ i = clean_cmd.index('-mtime')
+ clean_cmd.remove('-mtime')
+ clean_cmd.pop(i)
+ else:
+ clean_cmd[clean_cmd.index('-mtime') +1] = \
+ '+%s' % str(num_of_days)
+ if pretend:
+ if "-delete" in clean_cmd:
+ clean_cmd.remove("-delete")
+
+ if not clean_cmd:
+ return []
+ rval = self._clean_logs(clean_cmd, settings)
+ messages += self._convert_errors(rval)
+ return messages
+
+
+ @staticmethod
+ def _clean_logs(clean_cmd, settings):
+ logdir = settings.get("PORT_LOGDIR")
+ if logdir is None or not os.path.isdir(logdir):
+ return
+
+ variables = {"PORT_LOGDIR" : logdir}
+ cmd = [varexpand(x, mydict=variables) for x in clean_cmd]
+
+ try:
+ rval = portage.process.spawn(cmd, env=os.environ)
+ except portage.exception.CommandNotFound:
+ rval = 127
+ return rval
+
+
+ @staticmethod
+ def _convert_errors(rval):
+ msg = []
+ if rval != os.EX_OK:
+ msg.append("PORT_LOGDIR_CLEAN command returned %s"
+ % ("%d" % rval if rval else "None"))
+ msg.append("See the make.conf(5) man page for "
+ "PORT_LOGDIR_CLEAN usage instructions.")
+ return msg
diff --git a/usr/lib/portage/pym/portage/emaint/modules/merges/__init__.py b/usr/lib/portage/pym/portage/emaint/modules/merges/__init__.py
new file mode 100644
index 0000000..bcb2ac8
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/modules/merges/__init__.py
@@ -0,0 +1,31 @@
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Scan for failed merges and fix them."""
+__doc__ = doc
+
+
+module_spec = {
+ 'name': 'merges',
+ 'description': doc,
+ 'provides': {
+ 'merges': {
+ 'name': "merges",
+ 'class': "MergesHandler",
+ 'description': doc,
+ 'functions': ['check', 'fix', 'purge'],
+ 'func_desc': {
+ 'purge': {
+ 'short': '-P', 'long': '--purge-tracker',
+ 'help': 'Removes the list of previously failed merges.' +
+ ' WARNING: Only use this option if you plan on' +
+ ' manually fixing them or do not want them'
+ ' re-installed.',
+ 'status': "Removing %s",
+ 'action': 'store_true',
+ 'func': 'purge'
+ }
+ }
+ }
+ }
+}
diff --git a/usr/lib/portage/pym/portage/emaint/modules/merges/merges.py b/usr/lib/portage/pym/portage/emaint/modules/merges/merges.py
new file mode 100644
index 0000000..1a67cb5
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/modules/merges/merges.py
@@ -0,0 +1,290 @@
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.actions import load_emerge_config
+
+import portage
+from portage import os, _unicode_encode
+from portage.const import MERGING_IDENTIFIER, PORTAGE_BIN_PATH, PRIVATE_PATH, \
+ VDB_PATH
+from portage.dep import isvalidatom
+
+import shutil
+import subprocess
+import sys
+import time
+
+class TrackingFile(object):
+ """File for keeping track of failed merges."""
+
+
+ def __init__(self, tracking_path):
+ """
+ Create a TrackingFile object.
+
+ @param tracking_path: file path used to keep track of failed merges
+ @type tracking_path: String
+ """
+ self._tracking_path = _unicode_encode(tracking_path)
+
+
+ def save(self, failed_pkgs):
+ """
+ Save the specified packages that failed to merge.
+
+ @param failed_pkgs: dictionary of failed packages
+ @type failed_pkgs: dict
+ """
+ tracking_path = self._tracking_path
+ lines = ['%s %s' % (pkg, mtime) for pkg, mtime in failed_pkgs.items()]
+ portage.util.write_atomic(tracking_path, '\n'.join(lines))
+
+
+ def load(self):
+ """
+ Load previously failed merges.
+
+ @rtype: dict
+ @return: dictionary of packages that failed to merge
+ """
+ tracking_path = self._tracking_path
+ if not self.exists():
+ return {}
+ failed_pkgs = {}
+ with open(tracking_path, 'r') as tracking_file:
+ for failed_merge in tracking_file:
+ pkg, mtime = failed_merge.strip().split()
+ failed_pkgs[pkg] = mtime
+ return failed_pkgs
+
+
+ def exists(self):
+ """
+ Check if tracking file exists.
+
+ @rtype: bool
+ @return: true if tracking file exists, false otherwise
+ """
+ return os.path.exists(self._tracking_path)
+
+
+ def purge(self):
+ """Delete previously saved tracking file if one exists."""
+ if self.exists():
+ os.remove(self._tracking_path)
+
+
+ def __iter__(self):
+ """
+ Provide an interator over failed merges.
+
+ @return: iterator of packages that failed to merge
+ """
+ return self.load().items().__iter__()
+
+
+class MergesHandler(object):
+ """Handle failed package merges."""
+
+ short_desc = "Remove failed merges"
+
+ @staticmethod
+ def name():
+ return "merges"
+
+
+ def __init__(self):
+ """Create MergesHandler object."""
+ eroot = portage.settings['EROOT']
+ tracking_path = os.path.join(eroot, PRIVATE_PATH, 'failed-merges');
+ self._tracking_file = TrackingFile(tracking_path)
+ self._vardb_path = os.path.join(eroot, VDB_PATH)
+
+
+ def can_progressbar(self, func):
+ return func == 'check'
+
+
+ def _scan(self, onProgress=None):
+ """
+ Scan the file system for failed merges and return any found.
+
+ @param onProgress: function to call for updating progress
+ @type onProgress: Function
+ @rtype: dict
+ @return: dictionary of packages that failed to merges
+ """
+ failed_pkgs = {}
+ for cat in os.listdir(self._vardb_path):
+ pkgs_path = os.path.join(self._vardb_path, cat)
+ if not os.path.isdir(pkgs_path):
+ continue
+ pkgs = os.listdir(pkgs_path)
+ maxval = len(pkgs)
+ for i, pkg in enumerate(pkgs):
+ if onProgress:
+ onProgress(maxval, i+1)
+ if MERGING_IDENTIFIER in pkg:
+ mtime = int(os.stat(os.path.join(pkgs_path, pkg)).st_mtime)
+ pkg = os.path.join(cat, pkg)
+ failed_pkgs[pkg] = mtime
+ return failed_pkgs
+
+
+ def _failed_pkgs(self, onProgress=None):
+ """
+ Return failed packages from both the file system and tracking file.
+
+ @rtype: dict
+ @return: dictionary of packages that failed to merges
+ """
+ failed_pkgs = self._scan(onProgress)
+ for pkg, mtime in self._tracking_file:
+ if pkg not in failed_pkgs:
+ failed_pkgs[pkg] = mtime
+ return failed_pkgs
+
+
+ def _remove_failed_dirs(self, failed_pkgs):
+ """
+ Remove the directories of packages that failed to merge.
+
+ @param failed_pkgs: failed packages whose directories to remove
+ @type failed_pkg: dict
+ """
+ for failed_pkg in failed_pkgs:
+ pkg_path = os.path.join(self._vardb_path, failed_pkg)
+ # delete failed merge directory if it exists (it might not exist
+ # if loaded from tracking file)
+ if os.path.exists(pkg_path):
+ shutil.rmtree(pkg_path)
+ # TODO: try removing package CONTENTS to prevent orphaned
+ # files
+
+
+ def _get_pkg_atoms(self, failed_pkgs, pkg_atoms, pkg_invalid_entries):
+ """
+ Get the package atoms for the specified failed packages.
+
+ @param failed_pkgs: failed packages to iterate
+ @type failed_pkgs: dict
+ @param pkg_atoms: add package atoms to this set
+ @type pkg_atoms: set
+ @param pkg_invalid_entries: add any packages that are invalid to this set
+ @type pkg_invalid_entries: set
+ """
+
+ emerge_config = load_emerge_config()
+ portdb = emerge_config.target_config.trees['porttree'].dbapi
+ for failed_pkg in failed_pkgs:
+ # validate pkg name
+ pkg_name = '%s' % failed_pkg.replace(MERGING_IDENTIFIER, '')
+ pkg_atom = '=%s' % pkg_name
+
+ if not isvalidatom(pkg_atom):
+ pkg_invalid_entries.add("'%s' is an invalid package atom."
+ % pkg_atom)
+ if not portdb.cpv_exists(pkg_name):
+ pkg_invalid_entries.add(
+ "'%s' does not exist in the portage tree." % pkg_name)
+ pkg_atoms.add(pkg_atom)
+
+
+ def _emerge_pkg_atoms(self, module_output, pkg_atoms):
+ """
+ Emerge the specified packages atoms.
+
+ @param module_output: output will be written to
+ @type module_output: Class
+ @param pkg_atoms: packages atoms to emerge
+ @type pkg_atoms: set
+ @rtype: list
+ @return: List of results
+ """
+ # TODO: rewrite code to use portage's APIs instead of a subprocess
+ env = {
+ "FEATURES" : "-collision-protect -protect-owned",
+ "PATH" : os.environ["PATH"]
+ }
+ emerge_cmd = (
+ portage._python_interpreter,
+ '-b',
+ os.path.join(PORTAGE_BIN_PATH, 'emerge'),
+ '--quiet',
+ '--oneshot',
+ '--complete-graph=y'
+ )
+ results = []
+ msg = 'Re-Emerging packages that failed to merge...\n'
+ if module_output:
+ module_output.write(msg)
+ else:
+ module_output = subprocess.PIPE
+ results.append(msg)
+ proc = subprocess.Popen(emerge_cmd + tuple(pkg_atoms), env=env,
+ stdout=module_output, stderr=sys.stderr)
+ output = proc.communicate()[0]
+ if output:
+ results.append(output)
+ if proc.returncode != os.EX_OK:
+ emerge_status = "Failed to emerge '%s'" % (' '.join(pkg_atoms))
+ else:
+ emerge_status = "Successfully emerged '%s'" % (' '.join(pkg_atoms))
+ results.append(emerge_status)
+ return results
+
+
+ def check(self, **kwargs):
+ """Check for failed merges."""
+ onProgress = kwargs.get('onProgress', None)
+ failed_pkgs = self._failed_pkgs(onProgress)
+ errors = []
+ for pkg, mtime in failed_pkgs.items():
+ mtime_str = time.ctime(int(mtime))
+ errors.append("'%s' failed to merge on '%s'" % (pkg, mtime_str))
+ return errors
+
+
+ def fix(self, **kwargs):
+ """Attempt to fix any failed merges."""
+ module_output = kwargs.get('module_output', None)
+ failed_pkgs = self._failed_pkgs()
+ if not failed_pkgs:
+ return ['No failed merges found.']
+
+ pkg_invalid_entries = set()
+ pkg_atoms = set()
+ self._get_pkg_atoms(failed_pkgs, pkg_atoms, pkg_invalid_entries)
+ if pkg_invalid_entries:
+ return pkg_invalid_entries
+
+ try:
+ self._tracking_file.save(failed_pkgs)
+ except IOError as ex:
+ errors = ['Unable to save failed merges to tracking file: %s\n'
+ % str(ex)]
+ errors.append(', '.join(sorted(failed_pkgs)))
+ return errors
+ self._remove_failed_dirs(failed_pkgs)
+ results = self._emerge_pkg_atoms(module_output, pkg_atoms)
+ # list any new failed merges
+ for pkg in sorted(self._scan()):
+ results.append("'%s' still found as a failed merge." % pkg)
+ # reload config and remove successful packages from tracking file
+ emerge_config = load_emerge_config()
+ vardb = emerge_config.target_config.trees['vartree'].dbapi
+ still_failed_pkgs = {}
+ for pkg, mtime in failed_pkgs.items():
+ pkg_name = '%s' % pkg.replace(MERGING_IDENTIFIER, '')
+ if not vardb.cpv_exists(pkg_name):
+ still_failed_pkgs[pkg] = mtime
+ self._tracking_file.save(still_failed_pkgs)
+ return results
+
+
+ def purge(self, **kwargs):
+ """Attempt to remove previously saved tracking file."""
+ if not self._tracking_file.exists():
+ return ['Tracking file not found.']
+ self._tracking_file.purge()
+ return ['Removed tracking file.']
diff --git a/usr/lib/portage/pym/portage/emaint/modules/move/__init__.py b/usr/lib/portage/pym/portage/emaint/modules/move/__init__.py
new file mode 100644
index 0000000..5162430
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/modules/move/__init__.py
@@ -0,0 +1,30 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Perform package move updates for installed and binary packages."""
+__doc__ = doc
+
+
+module_spec = {
+ 'name': 'move',
+ 'description': doc,
+ 'provides':{
+ 'module1': {
+ 'name': "moveinst",
+ 'class': "MoveInstalled",
+ 'description': doc,
+ 'options': ['check', 'fix'],
+ 'functions': ['check', 'fix'],
+ 'func_desc': {
+ }
+ },
+ 'module2':{
+ 'name': "movebin",
+ 'class': "MoveBinary",
+ 'description': "Perform package move updates for binary packages",
+ 'functions': ['check', 'fix'],
+ 'func_desc': {
+ }
+ }
+ }
+ }
diff --git a/usr/lib/portage/pym/portage/emaint/modules/move/move.py b/usr/lib/portage/pym/portage/emaint/modules/move/move.py
new file mode 100644
index 0000000..41ca167
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/modules/move/move.py
@@ -0,0 +1,181 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.exception import InvalidData
+from _emerge.Package import Package
+from portage.versions import _pkg_str
+
+class MoveHandler(object):
+
+ def __init__(self, tree, porttree):
+ self._tree = tree
+ self._portdb = porttree.dbapi
+ self._update_keys = Package._dep_keys + ("PROVIDE",)
+ self._master_repo = self._portdb.repositories.mainRepo()
+ if self._master_repo is not None:
+ self._master_repo = self._master_repo.name
+
+ def _grab_global_updates(self):
+ from portage.update import grab_updates, parse_updates
+ retupdates = {}
+ errors = []
+
+ for repo_name in self._portdb.getRepositories():
+ repo = self._portdb.getRepositoryPath(repo_name)
+ updpath = os.path.join(repo, "profiles", "updates")
+ if not os.path.isdir(updpath):
+ continue
+
+ try:
+ rawupdates = grab_updates(updpath)
+ except portage.exception.DirectoryNotFound:
+ rawupdates = []
+ upd_commands = []
+ for mykey, mystat, mycontent in rawupdates:
+ commands, errors = parse_updates(mycontent)
+ upd_commands.extend(commands)
+ errors.extend(errors)
+ retupdates[repo_name] = upd_commands
+
+ if self._master_repo in retupdates:
+ retupdates['DEFAULT'] = retupdates[self._master_repo]
+
+ return retupdates, errors
+
+ def check(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ allupdates, errors = self._grab_global_updates()
+ # Matching packages and moving them is relatively fast, so the
+ # progress bar is updated in indeterminate mode.
+ match = self._tree.dbapi.match
+ aux_get = self._tree.dbapi.aux_get
+ pkg_str = self._tree.dbapi._pkg_str
+ settings = self._tree.dbapi.settings
+ if onProgress:
+ onProgress(0, 0)
+ for repo, updates in allupdates.items():
+ if repo == 'DEFAULT':
+ continue
+ if not updates:
+ continue
+
+ def repo_match(repository):
+ return repository == repo or \
+ (repo == self._master_repo and \
+ repository not in allupdates)
+
+ for i, update_cmd in enumerate(updates):
+ if update_cmd[0] == "move":
+ origcp, newcp = update_cmd[1:]
+ for cpv in match(origcp):
+ try:
+ cpv = pkg_str(cpv, origcp.repo)
+ except (KeyError, InvalidData):
+ continue
+ if repo_match(cpv.repo):
+ errors.append("'%s' moved to '%s'" % (cpv, newcp))
+ elif update_cmd[0] == "slotmove":
+ pkg, origslot, newslot = update_cmd[1:]
+ atom = pkg.with_slot(origslot)
+ for cpv in match(atom):
+ try:
+ cpv = pkg_str(cpv, atom.repo)
+ except (KeyError, InvalidData):
+ continue
+ if repo_match(cpv.repo):
+ errors.append("'%s' slot moved from '%s' to '%s'" % \
+ (cpv, origslot, newslot))
+ if onProgress:
+ onProgress(0, 0)
+
+ # Searching for updates in all the metadata is relatively slow, so this
+ # is where the progress bar comes out of indeterminate mode.
+ cpv_all = self._tree.dbapi.cpv_all()
+ cpv_all.sort()
+ maxval = len(cpv_all)
+ meta_keys = self._update_keys + self._portdb._pkg_str_aux_keys
+ if onProgress:
+ onProgress(maxval, 0)
+ for i, cpv in enumerate(cpv_all):
+ try:
+ metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
+ except KeyError:
+ continue
+ try:
+ pkg = _pkg_str(cpv, metadata=metadata, settings=settings)
+ except InvalidData:
+ continue
+ metadata = dict((k, metadata[k]) for k in self._update_keys)
+ try:
+ updates = allupdates[pkg.repo]
+ except KeyError:
+ try:
+ updates = allupdates['DEFAULT']
+ except KeyError:
+ continue
+ if not updates:
+ continue
+ metadata_updates = \
+ portage.update_dbentries(updates, metadata, parent=pkg)
+ if metadata_updates:
+ errors.append("'%s' has outdated metadata" % cpv)
+ if onProgress:
+ onProgress(maxval, i+1)
+ return errors
+
+ def fix(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ allupdates, errors = self._grab_global_updates()
+ # Matching packages and moving them is relatively fast, so the
+ # progress bar is updated in indeterminate mode.
+ move = self._tree.dbapi.move_ent
+ slotmove = self._tree.dbapi.move_slot_ent
+ if onProgress:
+ onProgress(0, 0)
+ for repo, updates in allupdates.items():
+ if repo == 'DEFAULT':
+ continue
+ if not updates:
+ continue
+
+ def repo_match(repository):
+ return repository == repo or \
+ (repo == self._master_repo and \
+ repository not in allupdates)
+
+ for i, update_cmd in enumerate(updates):
+ if update_cmd[0] == "move":
+ move(update_cmd, repo_match=repo_match)
+ elif update_cmd[0] == "slotmove":
+ slotmove(update_cmd, repo_match=repo_match)
+ if onProgress:
+ onProgress(0, 0)
+
+ # Searching for updates in all the metadata is relatively slow, so this
+ # is where the progress bar comes out of indeterminate mode.
+ self._tree.dbapi.update_ents(allupdates, onProgress=onProgress)
+ return errors
+
+class MoveInstalled(MoveHandler):
+
+ short_desc = "Perform package move updates for installed packages"
+
+ def name():
+ return "moveinst"
+ name = staticmethod(name)
+ def __init__(self):
+ eroot = portage.settings['EROOT']
+ MoveHandler.__init__(self, portage.db[eroot]["vartree"], portage.db[eroot]["porttree"])
+
+class MoveBinary(MoveHandler):
+
+ short_desc = "Perform package move updates for binary packages"
+
+ def name():
+ return "movebin"
+ name = staticmethod(name)
+ def __init__(self):
+ eroot = portage.settings['EROOT']
+ MoveHandler.__init__(self, portage.db[eroot]["bintree"], portage.db[eroot]['porttree'])
diff --git a/usr/lib/portage/pym/portage/emaint/modules/resume/__init__.py b/usr/lib/portage/pym/portage/emaint/modules/resume/__init__.py
new file mode 100644
index 0000000..ebe4a37
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/modules/resume/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Check and fix problems in the resume and/or resume_backup files."""
+__doc__ = doc
+
+
+module_spec = {
+ 'name': 'resume',
+ 'description': doc,
+ 'provides':{
+ 'module1': {
+ 'name': "cleanresume",
+ 'class': "CleanResume",
+ 'description': "Discard emerge --resume merge lists",
+ 'functions': ['check', 'fix'],
+ 'func_desc': {}
+ }
+ }
+ }
diff --git a/usr/lib/portage/pym/portage/emaint/modules/resume/resume.py b/usr/lib/portage/pym/portage/emaint/modules/resume/resume.py
new file mode 100644
index 0000000..1bada52
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/modules/resume/resume.py
@@ -0,0 +1,58 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+
+
+class CleanResume(object):
+
+ short_desc = "Discard emerge --resume merge lists"
+
+ def name():
+ return "cleanresume"
+ name = staticmethod(name)
+
+ def check(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ messages = []
+ mtimedb = portage.mtimedb
+ resume_keys = ("resume", "resume_backup")
+ maxval = len(resume_keys)
+ if onProgress:
+ onProgress(maxval, 0)
+ for i, k in enumerate(resume_keys):
+ try:
+ d = mtimedb.get(k)
+ if d is None:
+ continue
+ if not isinstance(d, dict):
+ messages.append("unrecognized resume list: '%s'" % k)
+ continue
+ mergelist = d.get("mergelist")
+ if mergelist is None or not hasattr(mergelist, "__len__"):
+ messages.append("unrecognized resume list: '%s'" % k)
+ continue
+ messages.append("resume list '%s' contains %d packages" % \
+ (k, len(mergelist)))
+ finally:
+ if onProgress:
+ onProgress(maxval, i+1)
+ return messages
+
+ def fix(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ delete_count = 0
+ mtimedb = portage.mtimedb
+ resume_keys = ("resume", "resume_backup")
+ maxval = len(resume_keys)
+ if onProgress:
+ onProgress(maxval, 0)
+ for i, k in enumerate(resume_keys):
+ try:
+ if mtimedb.pop(k, None) is not None:
+ delete_count += 1
+ finally:
+ if onProgress:
+ onProgress(maxval, i+1)
+ if delete_count:
+ mtimedb.commit()
diff --git a/usr/lib/portage/pym/portage/emaint/modules/world/__init__.py b/usr/lib/portage/pym/portage/emaint/modules/world/__init__.py
new file mode 100644
index 0000000..0af73d4
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/modules/world/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Check and fix problems in the world file."""
+__doc__ = doc
+
+
+module_spec = {
+ 'name': 'world',
+ 'description': doc,
+ 'provides':{
+ 'module1':{
+ 'name': "world",
+ 'class': "WorldHandler",
+ 'description': doc,
+ 'functions': ['check', 'fix'],
+ 'func_desc': {}
+ }
+ }
+ }
diff --git a/usr/lib/portage/pym/portage/emaint/modules/world/world.py b/usr/lib/portage/pym/portage/emaint/modules/world/world.py
new file mode 100644
index 0000000..2c9dbff
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/modules/world/world.py
@@ -0,0 +1,89 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+
+
+class WorldHandler(object):
+
+ short_desc = "Fix problems in the world file"
+
+ def name():
+ return "world"
+ name = staticmethod(name)
+
+ def __init__(self):
+ self.invalid = []
+ self.not_installed = []
+ self.okay = []
+ from portage._sets import load_default_config
+ setconfig = load_default_config(portage.settings,
+ portage.db[portage.settings['EROOT']])
+ self._sets = setconfig.getSets()
+
+ def _check_world(self, onProgress):
+ eroot = portage.settings['EROOT']
+ self.world_file = os.path.join(eroot, portage.const.WORLD_FILE)
+ self.found = os.access(self.world_file, os.R_OK)
+ vardb = portage.db[eroot]["vartree"].dbapi
+
+ from portage._sets import SETPREFIX
+ sets = self._sets
+ world_atoms = list(sets["selected"])
+ maxval = len(world_atoms)
+ if onProgress:
+ onProgress(maxval, 0)
+ for i, atom in enumerate(world_atoms):
+ if not isinstance(atom, portage.dep.Atom):
+ if atom.startswith(SETPREFIX):
+ s = atom[len(SETPREFIX):]
+ if s in sets:
+ self.okay.append(atom)
+ else:
+ self.not_installed.append(atom)
+ else:
+ self.invalid.append(atom)
+ if onProgress:
+ onProgress(maxval, i+1)
+ continue
+ okay = True
+ if not vardb.match(atom):
+ self.not_installed.append(atom)
+ okay = False
+ if okay:
+ self.okay.append(atom)
+ if onProgress:
+ onProgress(maxval, i+1)
+
+ def check(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ self._check_world(onProgress)
+ errors = []
+ if self.found:
+ errors += ["'%s' is not a valid atom" % x for x in self.invalid]
+ errors += ["'%s' is not installed" % x for x in self.not_installed]
+ else:
+ errors.append(self.world_file + " could not be opened for reading")
+ return errors
+
+ def fix(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ world_set = self._sets["selected"]
+ world_set.lock()
+ try:
+ world_set.load() # maybe it's changed on disk
+ before = set(world_set)
+ self._check_world(onProgress)
+ after = set(self.okay)
+ errors = []
+ if before != after:
+ try:
+ world_set.replace(self.okay)
+ except portage.exception.PortageException:
+ errors.append("%s could not be opened for writing" % \
+ self.world_file)
+ return errors
+ finally:
+ world_set.unlock()
+
diff --git a/usr/lib/portage/pym/portage/emaint/progress.py b/usr/lib/portage/pym/portage/emaint/progress.py
new file mode 100644
index 0000000..e43c2af
--- /dev/null
+++ b/usr/lib/portage/pym/portage/emaint/progress.py
@@ -0,0 +1,61 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import time
+import signal
+
+import portage
+
+
+class ProgressHandler(object):
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.curval = 0
+ self.maxval = 0
+ self.last_update = 0
+ self.min_display_latency = 0.2
+
+ def onProgress(self, maxval, curval):
+ self.maxval = maxval
+ self.curval = curval
+ cur_time = time.time()
+ if cur_time - self.last_update >= self.min_display_latency:
+ self.last_update = cur_time
+ self.display()
+
+ def display(self):
+ raise NotImplementedError(self)
+
+
+class ProgressBar(ProgressHandler):
+ """Class to set up and return a Progress Bar"""
+
+ def __init__(self, isatty, **kwargs):
+ self.isatty = isatty
+ self.kwargs = kwargs
+ ProgressHandler.__init__(self)
+ self.progressBar = None
+
+ def start(self):
+ if self.isatty:
+ self.progressBar = portage.output.TermProgressBar(**self.kwargs)
+ signal.signal(signal.SIGWINCH, self.sigwinch_handler)
+ else:
+ self.onProgress = None
+ return self.onProgress
+
+ def set_label(self, _label):
+ self.kwargs['label'] = _label
+
+ def display(self):
+ self.progressBar.set(self.curval, self.maxval)
+
+ def sigwinch_handler(self, signum, frame):
+ lines, self.progressBar.term_columns = \
+ portage.output.get_term_size()
+
+ def stop(self):
+ signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+
diff --git a/usr/lib/portage/pym/portage/env/__init__.py b/usr/lib/portage/pym/portage/env/__init__.py
new file mode 100644
index 0000000..17b66d1
--- /dev/null
+++ b/usr/lib/portage/pym/portage/env/__init__.py
@@ -0,0 +1,3 @@
+# Copyright: 2007 Gentoo Foundation
+# License: GPL2
+
diff --git a/usr/lib/portage/pym/portage/env/config.py b/usr/lib/portage/pym/portage/env/config.py
new file mode 100644
index 0000000..865d835
--- /dev/null
+++ b/usr/lib/portage/pym/portage/env/config.py
@@ -0,0 +1,105 @@
+# config.py -- Portage Config
+# Copyright 2007-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["ConfigLoaderKlass", "GenericFile", "PackageKeywordsFile",
+ "PackageUseFile", "PackageMaskFile", "PortageModulesFile"]
+
+from portage.cache.mappings import UserDict
+from portage.env.loaders import KeyListFileLoader, KeyValuePairFileLoader, ItemFileLoader
+
+class ConfigLoaderKlass(UserDict):
+ """
+ A base class stub for things to inherit from.
+ Users may want a non-file backend.
+ """
+
+ def __init__(self, loader):
+ """
+ @param loader: A class that has a load() that returns two dicts
+ the first being a data dict, the second being a dict of errors.
+ """
+ UserDict.__init__(self)
+ self._loader = loader
+
+ def load(self):
+ """
+ Load the data from the loader.
+
+ @throws LoaderError:
+ """
+
+ self.data, self.errors = self._loader.load()
+
+class GenericFile(UserDict):
+ """
+ Inherits from ConfigLoaderKlass, attempts to use all known loaders
+ until it gets <something> in data. This is probably really slow but is
+ helpful when you really have no idea what you are loading (hint hint the file
+ should perhaps declare what type it is? ;)
+ """
+
+ loaders = [KeyListFileLoader, KeyValuePairFileLoader, ItemFileLoader]
+
+ def __init__(self, filename):
+ UserDict.__init__(self)
+ self.filename = filename
+
+ def load(self):
+ for loader in self.loaders:
+ l = loader(self.filename, None)
+ data, errors = l.load()
+ if len(data) and not len(errors):
+ (self.data, self.errors) = (data, errors)
+ return
+
+
+class PackageKeywordsFile(ConfigLoaderKlass):
+ """
+ Inherits from ConfigLoaderKlass; implements a file-based backend.
+ """
+
+ default_loader = KeyListFileLoader
+
+ def __init__(self, filename):
+ super(PackageKeywordsFile, self).__init__(
+ self.default_loader(filename, validator=None))
+
+class PackageUseFile(ConfigLoaderKlass):
+ """
+ Inherits from PackageUse; implements a file-based backend. Doesn't handle recursion yet.
+ """
+
+ default_loader = KeyListFileLoader
+ def __init__(self, filename):
+ super(PackageUseFile, self).__init__(
+ self.default_loader(filename, validator=None))
+
+class PackageMaskFile(ConfigLoaderKlass):
+ """
+ A class that implements a file-based package.mask
+
+ Entires in package.mask are of the form:
+ atom1
+ atom2
+ or optionally
+ -atom3
+ to revert a previous mask; this only works when masking files are stacked
+ """
+
+ default_loader = ItemFileLoader
+
+ def __init__(self, filename):
+ super(PackageMaskFile, self).__init__(
+ self.default_loader(filename, validator=None))
+
+class PortageModulesFile(ConfigLoaderKlass):
+ """
+ File Class for /etc/portage/modules
+ """
+
+ default_loader = KeyValuePairFileLoader
+
+ def __init__(self, filename):
+ super(PortageModulesFile, self).__init__(
+ self.default_loader(filename, validator=None))
diff --git a/usr/lib/portage/pym/portage/env/loaders.py b/usr/lib/portage/pym/portage/env/loaders.py
new file mode 100644
index 0000000..f869884
--- /dev/null
+++ b/usr/lib/portage/pym/portage/env/loaders.py
@@ -0,0 +1,327 @@
+# config.py -- Portage Config
+# Copyright 2007-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+import stat
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util:writemsg',
+)
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.localization import _
+
+class LoaderError(Exception):
+
+ def __init__(self, resource, error_msg):
+ """
+ @param resource: Resource that failed to load (file/sql/etc)
+ @type resource: String
+ @param error_msg: Error from underlying Loader system
+ @type error_msg: String
+ """
+
+ self.resource = resource
+ self.error_msg = error_msg
+
+ def __str__(self):
+ return "Failed while loading resource: %s, error was: %s" % (
+ self.resource, self.error_msg)
+
+
+def RecursiveFileLoader(filename):
+ """
+ If filename is of type file, return a generate that yields filename
+ else if filename is of type directory, return a generator that fields
+ files in that directory.
+
+ Ignore files beginning with . or ending in ~.
+ Prune CVS directories.
+
+ @param filename: name of a file/directory to traverse
+ @rtype: list
+ @return: List of files to process
+ """
+
+ try:
+ st = os.stat(filename)
+ except OSError:
+ return
+ if stat.S_ISDIR(st.st_mode):
+ for root, dirs, files in os.walk(filename):
+ for d in list(dirs):
+ if d[:1] == '.' or d == 'CVS':
+ dirs.remove(d)
+ for f in files:
+ try:
+ f = _unicode_decode(f,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if f[:1] == '.' or f[-1:] == '~':
+ continue
+ yield os.path.join(root, f)
+ else:
+ yield filename
+
+
+class DataLoader(object):
+
+ def __init__(self, validator):
+ f = validator
+ if f is None:
+ # if they pass in no validator, just make a fake one
+ # that always returns true
+ def validate(key):
+ return True
+ f = validate
+ self._validate = f
+
+ def load(self):
+ """
+ Function to do the actual work of a Loader
+ """
+ raise NotImplementedError("Please override in a subclass")
+
+class EnvLoader(DataLoader):
+ """ Class to access data in the environment """
+ def __init__(self, validator):
+ DataLoader.__init__(self, validator)
+
+ def load(self):
+ return os.environ
+
+class TestTextLoader(DataLoader):
+ """ You give it some data, it 'loads' it for you, no filesystem access
+ """
+ def __init__(self, validator):
+ DataLoader.__init__(self, validator)
+ self.data = {}
+ self.errors = {}
+
+ def setData(self, text):
+ """Explicitly set the data field
+ Args:
+ text - a dict of data typical of Loaders
+ Returns:
+ None
+ """
+ if isinstance(text, dict):
+ self.data = text
+ else:
+ raise ValueError("setData requires a dict argument")
+
+ def setErrors(self, errors):
+ self.errors = errors
+
+ def load(self):
+ return (self.data, self.errors)
+
+
+class FileLoader(DataLoader):
+ """ Class to access data in files """
+
+ def __init__(self, filename, validator):
+ """
+ Args:
+ filename : Name of file or directory to open
+ validator : class with validate() method to validate data.
+ """
+ DataLoader.__init__(self, validator)
+ self.fname = filename
+
+ def load(self):
+ """
+ Return the {source: {key: value}} pairs from a file
+ Return the {source: [list of errors] from a load
+
+ @param recursive: If set and self.fname is a directory;
+ load all files in self.fname
+ @type: Boolean
+ @rtype: tuple
+ @return:
+ Returns (data,errors), both may be empty dicts or populated.
+ """
+ data = {}
+ errors = {}
+ # I tried to save a nasty lookup on lineparser by doing the lookup
+ # once, which may be expensive due to digging in child classes.
+ func = self.lineParser
+ for fn in RecursiveFileLoader(self.fname):
+ try:
+ with io.open(_unicode_encode(fn,
+ encoding=_encodings['fs'], errors='strict'), mode='r',
+ encoding=_encodings['content'], errors='replace') as f:
+ lines = f.readlines()
+ except EnvironmentError as e:
+ if e.errno == errno.EACCES:
+ writemsg(_("Permission denied: '%s'\n") % fn, noiselevel=-1)
+ del e
+ elif e.errno in (errno.ENOENT, errno.ESTALE):
+ del e
+ else:
+ raise
+ else:
+ for line_num, line in enumerate(lines):
+ func(line, line_num, data, errors)
+ return (data, errors)
+
+ def lineParser(self, line, line_num, data, errors):
+ """ This function parses 1 line at a time
+ Args:
+ line: a string representing 1 line of a file
+ line_num: an integer representing what line we are processing
+ data: a dict that contains the data we have extracted from the file
+ already
+ errors: a dict representing parse errors.
+ Returns:
+ Nothing (None). Writes to data and errors
+ """
+ raise NotImplementedError("Please over-ride this in a child class")
+
+class ItemFileLoader(FileLoader):
+ """
+ Class to load data from a file full of items one per line
+
+ >>> item1
+ >>> item2
+ >>> item3
+ >>> item1
+
+ becomes { 'item1':None, 'item2':None, 'item3':None }
+ Note that due to the data store being a dict, duplicates
+ are removed.
+ """
+
+ def __init__(self, filename, validator):
+ FileLoader.__init__(self, filename, validator)
+
+ def lineParser(self, line, line_num, data, errors):
+ line = line.strip()
+ if line.startswith('#'): # Skip commented lines
+ return
+ if not len(line): # skip empty lines
+ return
+ split = line.split()
+ if not len(split):
+ errors.setdefault(self.fname, []).append(
+ _("Malformed data at line: %s, data: %s")
+ % (line_num + 1, line))
+ return
+ key = split[0]
+ if not self._validate(key):
+ errors.setdefault(self.fname, []).append(
+ _("Validation failed at line: %s, data %s")
+ % (line_num + 1, key))
+ return
+ data[key] = None
+
+class KeyListFileLoader(FileLoader):
+ """
+ Class to load data from a file full of key [list] tuples
+
+ >>>>key foo1 foo2 foo3
+ becomes
+ {'key':['foo1','foo2','foo3']}
+ """
+
+ def __init__(self, filename, validator=None, valuevalidator=None):
+ FileLoader.__init__(self, filename, validator)
+
+ f = valuevalidator
+ if f is None:
+ # if they pass in no validator, just make a fake one
+ # that always returns true
+ def validate(key):
+ return True
+ f = validate
+ self._valueValidate = f
+
+ def lineParser(self, line, line_num, data, errors):
+ line = line.strip()
+ if line.startswith('#'): # Skip commented lines
+ return
+ if not len(line): # skip empty lines
+ return
+ split = line.split()
+ if len(split) < 1:
+ errors.setdefault(self.fname, []).append(
+ _("Malformed data at line: %s, data: %s")
+ % (line_num + 1, line))
+ return
+ key = split[0]
+ value = split[1:]
+ if not self._validate(key):
+ errors.setdefault(self.fname, []).append(
+ _("Key validation failed at line: %s, data %s")
+ % (line_num + 1, key))
+ return
+ if not self._valueValidate(value):
+ errors.setdefault(self.fname, []).append(
+ _("Value validation failed at line: %s, data %s")
+ % (line_num + 1, value))
+ return
+ if key in data:
+ data[key].append(value)
+ else:
+ data[key] = value
+
+
+class KeyValuePairFileLoader(FileLoader):
+ """
+ Class to load data from a file full of key=value pairs
+
+ >>>>key=value
+ >>>>foo=bar
+ becomes:
+ {'key':'value',
+ 'foo':'bar'}
+ """
+
+ def __init__(self, filename, validator, valuevalidator=None):
+ FileLoader.__init__(self, filename, validator)
+
+ f = valuevalidator
+ if f is None:
+ # if they pass in no validator, just make a fake one
+ # that always returns true
+ def validate(key):
+ return True
+ f = validate
+ self._valueValidate = f
+
+
+ def lineParser(self, line, line_num, data, errors):
+ line = line.strip()
+ if line.startswith('#'): # skip commented lines
+ return
+ if not len(line): # skip empty lines
+ return
+ split = line.split('=', 1)
+ if len(split) < 2:
+ errors.setdefault(self.fname, []).append(
+ _("Malformed data at line: %s, data %s")
+ % (line_num + 1, line))
+ return
+ key = split[0].strip()
+ value = split[1].strip()
+ if not key:
+ errors.setdefault(self.fname, []).append(
+ _("Malformed key at line: %s, key %s")
+ % (line_num + 1, key))
+ return
+ if not self._validate(key):
+ errors.setdefault(self.fname, []).append(
+ _("Key validation failed at line: %s, data %s")
+ % (line_num + 1, key))
+ return
+ if not self._valueValidate(value):
+ errors.setdefault(self.fname, []).append(
+ _("Value validation failed at line: %s, data %s")
+ % (line_num + 1, value))
+ return
+ data[key] = value
diff --git a/usr/lib/portage/pym/portage/env/validators.py b/usr/lib/portage/pym/portage/env/validators.py
new file mode 100644
index 0000000..4d11d69
--- /dev/null
+++ b/usr/lib/portage/pym/portage/env/validators.py
@@ -0,0 +1,20 @@
+# validators.py Portage File Loader Code
+# Copyright 2007 Gentoo Foundation
+
+from portage.dep import isvalidatom
+
+ValidAtomValidator = isvalidatom
+
+def PackagesFileValidator(atom):
+ """ This function mutates atoms that begin with - or *
+ It then checks to see if that atom is valid, and if
+ so returns True, else it returns False.
+
+ Args:
+ atom: a string representing an atom such as sys-apps/portage-2.1
+ """
+ if atom.startswith("*") or atom.startswith("-"):
+ atom = atom[1:]
+ if not isvalidatom(atom):
+ return False
+ return True
diff --git a/usr/lib/portage/pym/portage/exception.py b/usr/lib/portage/pym/portage/exception.py
new file mode 100644
index 0000000..ef62e7a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/exception.py
@@ -0,0 +1,204 @@
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import signal
+import sys
+from portage import _encodings, _unicode_encode, _unicode_decode
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+class PortageException(Exception):
+ """General superclass for portage exceptions"""
+ if sys.hexversion >= 0x3000000:
+ def __init__(self, value):
+ self.value = value[:]
+
+ def __str__(self):
+ if isinstance(self.value, str):
+ return self.value
+ else:
+ return repr(self.value)
+ else:
+ def __init__(self, value):
+ self.value = value[:]
+ if isinstance(self.value, basestring):
+ self.value = _unicode_decode(self.value,
+ encoding=_encodings['content'], errors='replace')
+
+ def __unicode__(self):
+ if isinstance(self.value, unicode):
+ return self.value
+ else:
+ return _unicode_decode(repr(self.value),
+ encoding=_encodings['content'], errors='replace')
+
+ def __str__(self):
+ if isinstance(self.value, unicode):
+ return _unicode_encode(self.value,
+ encoding=_encodings['content'], errors='backslashreplace')
+ else:
+ return repr(self.value)
+
+class CorruptionError(PortageException):
+ """Corruption indication"""
+
+class InvalidDependString(PortageException):
+ """An invalid depend string has been encountered"""
+ def __init__(self, value, errors=None):
+ PortageException.__init__(self, value)
+ self.errors = errors
+
+class InvalidVersionString(PortageException):
+ """An invalid version string has been encountered"""
+
+class SecurityViolation(PortageException):
+ """An incorrect formatting was passed instead of the expected one"""
+
+class IncorrectParameter(PortageException):
+ """A parameter of the wrong type was passed"""
+
+class MissingParameter(PortageException):
+ """A parameter is required for the action requested but was not passed"""
+
+class ParseError(PortageException):
+ """An error was generated while attempting to parse the request"""
+
+class InvalidData(PortageException):
+ """An incorrect formatting was passed instead of the expected one"""
+ def __init__(self, value, category=None):
+ PortageException.__init__(self, value)
+ self.category = category
+
+class InvalidDataType(PortageException):
+ """An incorrect type was passed instead of the expected one"""
+
+class InvalidLocation(PortageException):
+ """Data was not found when it was expected to exist or was specified incorrectly"""
+
+class FileNotFound(InvalidLocation):
+ """A file was not found when it was expected to exist"""
+
+class DirectoryNotFound(InvalidLocation):
+ """A directory was not found when it was expected to exist"""
+
+class IsADirectory(PortageException):
+ """A directory was found when it was expected to be a file"""
+ from errno import EISDIR as errno
+
+class OperationNotPermitted(PortageException):
+ """An operation was not permitted operating system"""
+ from errno import EPERM as errno
+
+class OperationNotSupported(PortageException):
+ """Operation not supported"""
+ from errno import EOPNOTSUPP as errno
+
+class PermissionDenied(PortageException):
+ """Permission denied"""
+ from errno import EACCES as errno
+
+class TryAgain(PortageException):
+ """Try again"""
+ from errno import EAGAIN as errno
+
+class TimeoutException(PortageException):
+ """Operation timed out"""
+ # NOTE: ETIME is undefined on FreeBSD (bug #336875)
+ #from errno import ETIME as errno
+
+class AlarmSignal(TimeoutException):
+ def __init__(self, value, signum=None, frame=None):
+ TimeoutException.__init__(self, value)
+ self.signum = signum
+ self.frame = frame
+
+ @classmethod
+ def register(cls, time):
+ signal.signal(signal.SIGALRM, cls._signal_handler)
+ signal.alarm(time)
+
+ @classmethod
+ def unregister(cls):
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, signal.SIG_DFL)
+
+ @classmethod
+ def _signal_handler(cls, signum, frame):
+ signal.signal(signal.SIGALRM, signal.SIG_DFL)
+ raise AlarmSignal("alarm signal",
+ signum=signum, frame=frame)
+
+class ReadOnlyFileSystem(PortageException):
+ """Read-only file system"""
+
+class CommandNotFound(PortageException):
+ """A required binary was not available or executable"""
+
+class AmbiguousPackageName(ValueError, PortageException):
+ """Raised by portage.cpv_expand() when the package name is ambiguous due
+ to the existence of multiple matches in different categories. This inherits
+ from ValueError, for backward compatibility with calling code that already
+ handles ValueError."""
+ def __str__(self):
+ return ValueError.__str__(self)
+
+class PortagePackageException(PortageException):
+ """Malformed or missing package data"""
+
+class PackageNotFound(PortagePackageException):
+ """Missing Ebuild or Binary"""
+
+class PackageSetNotFound(PortagePackageException):
+ """Missing package set"""
+
+class InvalidPackageName(PortagePackageException):
+ """Malformed package name"""
+
+class InvalidAtom(PortagePackageException):
+ """Malformed atom spec"""
+ def __init__(self, value, category=None):
+ PortagePackageException.__init__(self, value)
+ self.category = category
+
+class UnsupportedAPIException(PortagePackageException):
+ """Unsupported API"""
+ def __init__(self, cpv, eapi):
+ self.cpv, self.eapi = cpv, eapi
+ def __str__(self):
+ eapi = self.eapi
+ if not isinstance(eapi, basestring):
+ eapi = str(eapi)
+ eapi = eapi.lstrip("-")
+ msg = _("Unable to do any operations on '%(cpv)s', since "
+ "its EAPI is higher than this portage version's. Please upgrade"
+ " to a portage version that supports EAPI '%(eapi)s'.") % \
+ {"cpv": self.cpv, "eapi": eapi}
+ return _unicode_decode(msg,
+ encoding=_encodings['content'], errors='replace')
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'], errors='backslashreplace')
+
+class SignatureException(PortageException):
+ """Signature was not present in the checked file"""
+
+class DigestException(SignatureException):
+ """A problem exists in the digest"""
+
+class MissingSignature(SignatureException):
+ """Signature was not present in the checked file"""
+
+class InvalidSignature(SignatureException):
+ """Signature was checked and was not a valid, current, nor trusted signature"""
+
+class UntrustedSignature(SignatureException):
+ """Signature was not certified to the desired security level"""
+
diff --git a/usr/lib/portage/pym/portage/getbinpkg.py b/usr/lib/portage/pym/portage/getbinpkg.py
new file mode 100644
index 0000000..997cd2e
--- /dev/null
+++ b/usr/lib/portage/pym/portage/getbinpkg.py
@@ -0,0 +1,935 @@
+# getbinpkg.py -- Portage binary-package helper functions
+# Copyright 2003-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+from portage.output import colorize
+from portage.cache.mappings import slot_dict_class
+from portage.localization import _
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.package.ebuild.fetch import _hide_url_passwd
+from _emerge.Package import _all_metadata_keys
+
+import sys
+import socket
+import time
+import tempfile
+import base64
+from portage.const import CACHE_PATH
+import warnings
+
+_all_errors = [NotImplementedError, ValueError, socket.error]
+
+try:
+ from html.parser import HTMLParser as html_parser_HTMLParser
+except ImportError:
+ from HTMLParser import HTMLParser as html_parser_HTMLParser
+
+try:
+ from urllib.parse import unquote as urllib_parse_unquote
+except ImportError:
+ from urllib2 import unquote as urllib_parse_unquote
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+try:
+ import ftplib
+except ImportError as e:
+ sys.stderr.write(colorize("BAD", "!!! CANNOT IMPORT FTPLIB: ") + str(e) + "\n")
+else:
+ _all_errors.extend(ftplib.all_errors)
+
+try:
+ try:
+ from http.client import HTTPConnection as http_client_HTTPConnection
+ from http.client import BadStatusLine as http_client_BadStatusLine
+ from http.client import ResponseNotReady as http_client_ResponseNotReady
+ from http.client import error as http_client_error
+ except ImportError:
+ from httplib import HTTPConnection as http_client_HTTPConnection
+ from httplib import BadStatusLine as http_client_BadStatusLine
+ from httplib import ResponseNotReady as http_client_ResponseNotReady
+ from httplib import error as http_client_error
+except ImportError as e:
+ sys.stderr.write(colorize("BAD", "!!! CANNOT IMPORT HTTP.CLIENT: ") + str(e) + "\n")
+else:
+ _all_errors.append(http_client_error)
+
+_all_errors = tuple(_all_errors)
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+def make_metadata_dict(data):
+
+ warnings.warn("portage.getbinpkg.make_metadata_dict() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ myid, _myglob = data
+
+ mydict = {}
+ for k_bytes in portage.xpak.getindex_mem(myid):
+ k = _unicode_decode(k_bytes,
+ encoding=_encodings['repo.content'], errors='replace')
+ if k not in _all_metadata_keys and k != "CATEGORY":
+ continue
+ v = _unicode_decode(portage.xpak.getitem(data, k_bytes),
+ encoding=_encodings['repo.content'], errors='replace')
+ mydict[k] = v
+
+ return mydict
+
+class ParseLinks(html_parser_HTMLParser):
+ """Parser class that overrides HTMLParser to grab all anchors from an html
+ page and provide suffix and prefix limitors"""
+ def __init__(self):
+
+ warnings.warn("portage.getbinpkg.ParseLinks is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ self.PL_anchors = []
+ html_parser_HTMLParser.__init__(self)
+
+ def get_anchors(self):
+ return self.PL_anchors
+
+ def get_anchors_by_prefix(self, prefix):
+ newlist = []
+ for x in self.PL_anchors:
+ if x.startswith(prefix):
+ if x not in newlist:
+ newlist.append(x[:])
+ return newlist
+
+ def get_anchors_by_suffix(self, suffix):
+ newlist = []
+ for x in self.PL_anchors:
+ if x.endswith(suffix):
+ if x not in newlist:
+ newlist.append(x[:])
+ return newlist
+
+ def handle_endtag(self, tag):
+ pass
+
+ def handle_starttag(self, tag, attrs):
+ if tag == "a":
+ for x in attrs:
+ if x[0] == 'href':
+ if x[1] not in self.PL_anchors:
+ self.PL_anchors.append(urllib_parse_unquote(x[1]))
+
+
+def create_conn(baseurl, conn=None):
+ """Takes a protocol://site:port/address url, and an
+ optional connection. If connection is already active, it is passed on.
+ baseurl is reduced to address and is returned in tuple (conn,address)"""
+
+ warnings.warn("portage.getbinpkg.create_conn() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ parts = baseurl.split("://", 1)
+ if len(parts) != 2:
+ raise ValueError(_("Provided URI does not "
+ "contain protocol identifier. '%s'") % baseurl)
+ protocol, url_parts = parts
+ del parts
+
+ url_parts = url_parts.split("/")
+ host = url_parts[0]
+ if len(url_parts) < 2:
+ address = "/"
+ else:
+ address = "/"+"/".join(url_parts[1:])
+ del url_parts
+
+ userpass_host = host.split("@", 1)
+ if len(userpass_host) == 1:
+ host = userpass_host[0]
+ userpass = ["anonymous"]
+ else:
+ host = userpass_host[1]
+ userpass = userpass_host[0].split(":")
+ del userpass_host
+
+ if len(userpass) > 2:
+ raise ValueError(_("Unable to interpret username/password provided."))
+ elif len(userpass) == 2:
+ username = userpass[0]
+ password = userpass[1]
+ elif len(userpass) == 1:
+ username = userpass[0]
+ password = None
+ del userpass
+
+ http_headers = {}
+ http_params = {}
+ if username and password:
+ try:
+ encodebytes = base64.encodebytes
+ except AttributeError:
+ # Python 2
+ encodebytes = base64.encodestring
+ http_headers = {
+ b"Authorization": "Basic %s" % \
+ encodebytes(_unicode_encode("%s:%s" % (username, password))).replace(
+ b"\012",
+ b""
+ ),
+ }
+
+ if not conn:
+ if protocol == "https":
+ # Use local import since https typically isn't needed, and
+ # this way we can usually avoid triggering the global scope
+ # http.client ImportError handler (like during stage1 -> stage2
+ # builds where USE=ssl is disabled for python).
+ try:
+ try:
+ from http.client import HTTPSConnection as http_client_HTTPSConnection
+ except ImportError:
+ from httplib import HTTPSConnection as http_client_HTTPSConnection
+ except ImportError:
+ raise NotImplementedError(
+ _("python must have ssl enabled for https support"))
+ conn = http_client_HTTPSConnection(host)
+ elif protocol == "http":
+ conn = http_client_HTTPConnection(host)
+ elif protocol == "ftp":
+ passive = 1
+ if(host[-1] == "*"):
+ passive = 0
+ host = host[:-1]
+ conn = ftplib.FTP(host)
+ if password:
+ conn.login(username, password)
+ else:
+ sys.stderr.write(colorize("WARN",
+ _(" * No password provided for username")) + " '%s'" % \
+ (username,) + "\n\n")
+ conn.login(username)
+ conn.set_pasv(passive)
+ conn.set_debuglevel(0)
+ elif protocol == "sftp":
+ try:
+ import paramiko
+ except ImportError:
+ raise NotImplementedError(
+ _("paramiko must be installed for sftp support"))
+ t = paramiko.Transport(host)
+ t.connect(username=username, password=password)
+ conn = paramiko.SFTPClient.from_transport(t)
+ else:
+ raise NotImplementedError(_("%s is not a supported protocol.") % protocol)
+
+ return (conn, protocol, address, http_params, http_headers)
+
+def make_ftp_request(conn, address, rest=None, dest=None):
+ """Uses the |conn| object to request the data
+ from address and issuing a rest if it is passed."""
+
+ warnings.warn("portage.getbinpkg.make_ftp_request() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ try:
+
+ if dest:
+ fstart_pos = dest.tell()
+
+ conn.voidcmd("TYPE I")
+ fsize = conn.size(address)
+
+ if (rest != None) and (rest < 0):
+ rest = fsize+int(rest)
+ if rest < 0:
+ rest = 0
+
+ if rest != None:
+ mysocket = conn.transfercmd("RETR %s" % str(address), rest)
+ else:
+ mysocket = conn.transfercmd("RETR %s" % str(address))
+
+ mydata = ""
+ while 1:
+ somedata = mysocket.recv(8192)
+ if somedata:
+ if dest:
+ dest.write(somedata)
+ else:
+ mydata = mydata + somedata
+ else:
+ break
+
+ if dest:
+ data_size = fstart_pos - dest.tell()
+ else:
+ data_size = len(mydata)
+
+ mysocket.close()
+ conn.voidresp()
+ conn.voidcmd("TYPE A")
+
+ return mydata, (fsize != data_size), ""
+
+ except ValueError as e:
+ return None, int(str(e)[:4]), str(e)
+
+
+def make_http_request(conn, address, _params={}, headers={}, dest=None):
+ """Uses the |conn| object to request
+ the data from address, performing Location forwarding and using the
+ optional params and headers."""
+
+ warnings.warn("portage.getbinpkg.make_http_request() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ rc = 0
+ response = None
+ while (rc == 0) or (rc == 301) or (rc == 302):
+ try:
+ if rc != 0:
+ conn = create_conn(address)[0]
+ conn.request("GET", address, body=None, headers=headers)
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ return None, None, "Server request failed: %s" % str(e)
+ response = conn.getresponse()
+ rc = response.status
+
+ # 301 means that the page address is wrong.
+ if ((rc == 301) or (rc == 302)):
+ ignored_data = response.read()
+ del ignored_data
+ for x in str(response.msg).split("\n"):
+ parts = x.split(": ", 1)
+ if parts[0] == "Location":
+ if (rc == 301):
+ sys.stderr.write(colorize("BAD",
+ _("Location has moved: ")) + str(parts[1]) + "\n")
+ if (rc == 302):
+ sys.stderr.write(colorize("BAD",
+ _("Location has temporarily moved: ")) + \
+ str(parts[1]) + "\n")
+ address = parts[1]
+ break
+
+ if (rc != 200) and (rc != 206):
+ return None, rc, "Server did not respond successfully (%s: %s)" % (str(response.status), str(response.reason))
+
+ if dest:
+ dest.write(response.read())
+ return "", 0, ""
+
+ return response.read(), 0, ""
+
+
+def match_in_array(array, prefix="", suffix="", match_both=1, allow_overlap=0):
+
+ warnings.warn("portage.getbinpkg.match_in_array() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ myarray = []
+
+ if not (prefix and suffix):
+ match_both = 0
+
+ for x in array:
+ add_p = 0
+ if prefix and (len(x) >= len(prefix)) and (x[:len(prefix)] == prefix):
+ add_p = 1
+
+ if match_both:
+ if prefix and not add_p: # Require both, but don't have first one.
+ continue
+ else:
+ if add_p: # Only need one, and we have it.
+ myarray.append(x[:])
+ continue
+
+ if not allow_overlap: # Not allow to overlap prefix and suffix
+ if len(x) >= (len(prefix)+len(suffix)):
+ pass
+ else:
+ continue # Too short to match.
+ else:
+ pass # Do whatever... We're overlapping.
+
+ if suffix and (len(x) >= len(suffix)) and (x[-len(suffix):] == suffix):
+ myarray.append(x) # It matches
+ else:
+ continue # Doesn't match.
+
+ return myarray
+
+
+def dir_get_list(baseurl, conn=None):
+ """Takes a base url to connect to and read from.
+ URI should be in the form <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ warnings.warn("portage.getbinpkg.dir_get_list() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn, protocol, address, params, headers = create_conn(baseurl, conn)
+
+ listing = None
+ if protocol in ["http","https"]:
+ if not address.endswith("/"):
+ # http servers can return a 400 error here
+ # if the address doesn't end with a slash.
+ address += "/"
+ page, rc, msg = make_http_request(conn, address, params, headers)
+
+ if page:
+ parser = ParseLinks()
+ parser.feed(_unicode_decode(page))
+ del page
+ listing = parser.get_anchors()
+ else:
+ import portage.exception
+ raise portage.exception.PortageException(
+ _("Unable to get listing: %s %s") % (rc,msg))
+ elif protocol in ["ftp"]:
+ if address[-1] == '/':
+ olddir = conn.pwd()
+ conn.cwd(address)
+ listing = conn.nlst()
+ conn.cwd(olddir)
+ del olddir
+ else:
+ listing = conn.nlst(address)
+ elif protocol == "sftp":
+ listing = conn.listdir(address)
+ else:
+ raise TypeError(_("Unknown protocol. '%s'") % protocol)
+
+ if not keepconnection:
+ conn.close()
+
+ return listing
+
+def file_get_metadata(baseurl, conn=None, chunk_size=3000):
+ """Takes a base url to connect to and read from.
+ URI should be in the form <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ warnings.warn("portage.getbinpkg.file_get_metadata() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn, protocol, address, params, headers = create_conn(baseurl, conn)
+
+ if protocol in ["http","https"]:
+ headers["Range"] = "bytes=-%s" % str(chunk_size)
+ data, _x, _x = make_http_request(conn, address, params, headers)
+ elif protocol in ["ftp"]:
+ data, _x, _x = make_ftp_request(conn, address, -chunk_size)
+ elif protocol == "sftp":
+ f = conn.open(address)
+ try:
+ f.seek(-chunk_size, 2)
+ data = f.read()
+ finally:
+ f.close()
+ else:
+ raise TypeError(_("Unknown protocol. '%s'") % protocol)
+
+ if data:
+ xpaksize = portage.xpak.decodeint(data[-8:-4])
+ if (xpaksize + 8) > chunk_size:
+ myid = file_get_metadata(baseurl, conn, xpaksize + 8)
+ if not keepconnection:
+ conn.close()
+ return myid
+ else:
+ xpak_data = data[len(data) - (xpaksize + 8):-8]
+ del data
+
+ myid = portage.xpak.xsplit_mem(xpak_data)
+ if not myid:
+ myid = None, None
+ del xpak_data
+ else:
+ myid = None, None
+
+ if not keepconnection:
+ conn.close()
+
+ return myid
+
+
+def file_get(baseurl=None, dest=None, conn=None, fcmd=None, filename=None,
+ fcmd_vars=None):
+ """Takes a base url to connect to and read from.
+ URI should be in the form <proto>://[user[:pass]@]<site>[:port]<path>"""
+
+ if not fcmd:
+
+ warnings.warn("Use of portage.getbinpkg.file_get() without the fcmd "
+ "parameter is deprecated", DeprecationWarning, stacklevel=2)
+
+ return file_get_lib(baseurl, dest, conn)
+
+ variables = {}
+
+ if fcmd_vars is not None:
+ variables.update(fcmd_vars)
+
+ if "DISTDIR" not in variables:
+ if dest is None:
+ raise portage.exception.MissingParameter(
+ _("%s is missing required '%s' key") %
+ ("fcmd_vars", "DISTDIR"))
+ variables["DISTDIR"] = dest
+
+ if "URI" not in variables:
+ if baseurl is None:
+ raise portage.exception.MissingParameter(
+ _("%s is missing required '%s' key") %
+ ("fcmd_vars", "URI"))
+ variables["URI"] = baseurl
+
+ if "FILE" not in variables:
+ if filename is None:
+ filename = os.path.basename(variables["URI"])
+ variables["FILE"] = filename
+
+ from portage.util import varexpand
+ from portage.process import spawn
+ myfetch = portage.util.shlex_split(fcmd)
+ myfetch = [varexpand(x, mydict=variables) for x in myfetch]
+ fd_pipes = {
+ 0: portage._get_stdin().fileno(),
+ 1: sys.__stdout__.fileno(),
+ 2: sys.__stdout__.fileno()
+ }
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
+ retval = spawn(myfetch, env=os.environ.copy(), fd_pipes=fd_pipes)
+ if retval != os.EX_OK:
+ sys.stderr.write(_("Fetcher exited with a failure condition.\n"))
+ return 0
+ return 1
+
+def file_get_lib(baseurl, dest, conn=None):
+ """Takes a base url to connect to and read from.
+ URI should be in the form <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ warnings.warn("portage.getbinpkg.file_get_lib() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn, protocol, address, params, headers = create_conn(baseurl, conn)
+
+ sys.stderr.write("Fetching '" + str(os.path.basename(address)) + "'\n")
+ if protocol in ["http", "https"]:
+ data, rc, _msg = make_http_request(conn, address, params, headers, dest=dest)
+ elif protocol in ["ftp"]:
+ data, rc, _msg = make_ftp_request(conn, address, dest=dest)
+ elif protocol == "sftp":
+ rc = 0
+ try:
+ f = conn.open(address)
+ except SystemExit:
+ raise
+ except Exception:
+ rc = 1
+ else:
+ try:
+ if dest:
+ bufsize = 8192
+ while True:
+ data = f.read(bufsize)
+ if not data:
+ break
+ dest.write(data)
+ finally:
+ f.close()
+ else:
+ raise TypeError(_("Unknown protocol. '%s'") % protocol)
+
+ if not keepconnection:
+ conn.close()
+
+ return rc
+
+
+def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=1, makepickle=None):
+
+ warnings.warn("portage.getbinpkg.dir_get_metadata() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ cache_path = CACHE_PATH
+ metadatafilename = os.path.join(cache_path, 'remote_metadata.pickle')
+
+ if makepickle is None:
+ makepickle = CACHE_PATH+"/metadata.idx.most_recent"
+
+ try:
+ conn = create_conn(baseurl, conn)[0]
+ except _all_errors as e:
+ # ftplib.FTP(host) can raise errors like this:
+ # socket.error: (111, 'Connection refused')
+ sys.stderr.write("!!! %s\n" % (e,))
+ return {}
+
+ out = sys.stdout
+ try:
+ metadatafile = open(_unicode_encode(metadatafilename,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ mypickle = pickle.Unpickler(metadatafile)
+ try:
+ mypickle.find_global = None
+ except AttributeError:
+ # TODO: If py3k, override Unpickler.find_class().
+ pass
+ metadata = mypickle.load()
+ out.write(_("Loaded metadata pickle.\n"))
+ out.flush()
+ metadatafile.close()
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except Exception:
+ metadata = {}
+ if baseurl not in metadata:
+ metadata[baseurl] = {}
+ if "indexname" not in metadata[baseurl]:
+ metadata[baseurl]["indexname"] = ""
+ if "timestamp" not in metadata[baseurl]:
+ metadata[baseurl]["timestamp"] = 0
+ if "unmodified" not in metadata[baseurl]:
+ metadata[baseurl]["unmodified"] = 0
+ if "data" not in metadata[baseurl]:
+ metadata[baseurl]["data"] = {}
+
+ if not os.access(cache_path, os.W_OK):
+ sys.stderr.write(_("!!! Unable to write binary metadata to disk!\n"))
+ sys.stderr.write(_("!!! Permission denied: '%s'\n") % cache_path)
+ return metadata[baseurl]["data"]
+
+ import portage.exception
+ try:
+ filelist = dir_get_list(baseurl, conn)
+ except portage.exception.PortageException as e:
+ sys.stderr.write(_("!!! Error connecting to '%s'.\n") %
+ _hide_url_passwd(baseurl))
+ sys.stderr.write("!!! %s\n" % str(e))
+ del e
+ return metadata[baseurl]["data"]
+ tbz2list = match_in_array(filelist, suffix=".tbz2")
+ metalist = match_in_array(filelist, prefix="metadata.idx")
+ del filelist
+
+ # Determine if our metadata file is current.
+ metalist.sort()
+ metalist.reverse() # makes the order new-to-old.
+ for mfile in metalist:
+ if usingcache and \
+ ((metadata[baseurl]["indexname"] != mfile) or \
+ (metadata[baseurl]["timestamp"] < int(time.time() - (60 * 60 * 24)))):
+ # Try to download new cache until we succeed on one.
+ data = ""
+ for trynum in [1, 2, 3]:
+ mytempfile = tempfile.TemporaryFile()
+ try:
+ file_get(baseurl + "/" + mfile, mytempfile, conn)
+ if mytempfile.tell() > len(data):
+ mytempfile.seek(0)
+ data = mytempfile.read()
+ except ValueError as e:
+ sys.stderr.write("--- %s\n" % str(e))
+ if trynum < 3:
+ sys.stderr.write(_("Retrying...\n"))
+ sys.stderr.flush()
+ mytempfile.close()
+ continue
+ if match_in_array([mfile], suffix=".gz"):
+ out.write("gzip'd\n")
+ out.flush()
+ try:
+ import gzip
+ mytempfile.seek(0)
+ gzindex = gzip.GzipFile(mfile[:-3], 'rb', 9, mytempfile)
+ data = gzindex.read()
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ mytempfile.close()
+ sys.stderr.write(_("!!! Failed to use gzip: ") + str(e) + "\n")
+ sys.stderr.flush()
+ mytempfile.close()
+ try:
+ metadata[baseurl]["data"] = pickle.loads(data)
+ del data
+ metadata[baseurl]["indexname"] = mfile
+ metadata[baseurl]["timestamp"] = int(time.time())
+ metadata[baseurl]["modified"] = 0 # It's not, right after download.
+ out.write(_("Pickle loaded.\n"))
+ out.flush()
+ break
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ sys.stderr.write(_("!!! Failed to read data from index: ") + str(mfile) + "\n")
+ sys.stderr.write("!!! %s" % str(e))
+ sys.stderr.flush()
+ try:
+ metadatafile = open(_unicode_encode(metadatafilename,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ pickle.dump(metadata, metadatafile, protocol=2)
+ metadatafile.close()
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ sys.stderr.write(_("!!! Failed to write binary metadata to disk!\n"))
+ sys.stderr.write("!!! %s\n" % str(e))
+ sys.stderr.flush()
+ break
+ # We may have metadata... now we run through the tbz2 list and check.
+
+ class CacheStats(object):
+ from time import time
+ def __init__(self, out):
+ self.misses = 0
+ self.hits = 0
+ self.last_update = 0
+ self.out = out
+ self.min_display_latency = 0.2
+ def update(self):
+ cur_time = self.time()
+ if cur_time - self.last_update >= self.min_display_latency:
+ self.last_update = cur_time
+ self.display()
+ def display(self):
+ self.out.write("\r"+colorize("WARN",
+ _("cache miss: '") + str(self.misses) + "'") + \
+ " --- " + colorize("GOOD", _("cache hit: '") + str(self.hits) + "'"))
+ self.out.flush()
+
+ cache_stats = CacheStats(out)
+ have_tty = os.environ.get('TERM') != 'dumb' and out.isatty()
+ if have_tty:
+ cache_stats.display()
+ binpkg_filenames = set()
+ for x in tbz2list:
+ x = os.path.basename(x)
+ binpkg_filenames.add(x)
+ if x not in metadata[baseurl]["data"]:
+ cache_stats.misses += 1
+ if have_tty:
+ cache_stats.update()
+ metadata[baseurl]["modified"] = 1
+ myid = None
+ for _x in range(3):
+ try:
+ myid = file_get_metadata(
+ "/".join((baseurl.rstrip("/"), x.lstrip("/"))),
+ conn, chunk_size)
+ break
+ except http_client_BadStatusLine:
+ # Sometimes this error is thrown from conn.getresponse() in
+ # make_http_request(). The docstring for this error in
+ # httplib.py says "Presumably, the server closed the
+ # connection before sending a valid response".
+ conn = create_conn(baseurl)[0]
+ except http_client_ResponseNotReady:
+ # With some http servers this error is known to be thrown
+ # from conn.getresponse() in make_http_request() when the
+ # remote file does not have appropriate read permissions.
+ # Maybe it's possible to recover from this exception in
+ # cases though, so retry.
+ conn = create_conn(baseurl)[0]
+
+ if myid and myid[0]:
+ metadata[baseurl]["data"][x] = make_metadata_dict(myid)
+ elif verbose:
+ sys.stderr.write(colorize("BAD",
+ _("!!! Failed to retrieve metadata on: ")) + str(x) + "\n")
+ sys.stderr.flush()
+ else:
+ cache_stats.hits += 1
+ if have_tty:
+ cache_stats.update()
+ cache_stats.display()
+ # Cleanse stale cache for files that don't exist on the server anymore.
+ stale_cache = set(metadata[baseurl]["data"]).difference(binpkg_filenames)
+ if stale_cache:
+ for x in stale_cache:
+ del metadata[baseurl]["data"][x]
+ metadata[baseurl]["modified"] = 1
+ del stale_cache
+ del binpkg_filenames
+ out.write("\n")
+ out.flush()
+
+ try:
+ if "modified" in metadata[baseurl] and metadata[baseurl]["modified"]:
+ metadata[baseurl]["timestamp"] = int(time.time())
+ metadatafile = open(_unicode_encode(metadatafilename,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ pickle.dump(metadata, metadatafile, protocol=2)
+ metadatafile.close()
+ if makepickle:
+ metadatafile = open(_unicode_encode(makepickle,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ pickle.dump(metadata[baseurl]["data"], metadatafile, protocol=2)
+ metadatafile.close()
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ sys.stderr.write(_("!!! Failed to write binary metadata to disk!\n"))
+ sys.stderr.write("!!! "+str(e)+"\n")
+ sys.stderr.flush()
+
+ if not keepconnection:
+ conn.close()
+
+ return metadata[baseurl]["data"]
+
+def _cmp_cpv(d1, d2):
+ cpv1 = d1["CPV"]
+ cpv2 = d2["CPV"]
+ if cpv1 > cpv2:
+ return 1
+ elif cpv1 == cpv2:
+ return 0
+ else:
+ return -1
+
+class PackageIndex(object):
+
+ def __init__(self,
+ allowed_pkg_keys=None,
+ default_header_data=None,
+ default_pkg_data=None,
+ inherited_keys=None,
+ translated_keys=None):
+
+ self._pkg_slot_dict = None
+ if allowed_pkg_keys is not None:
+ self._pkg_slot_dict = slot_dict_class(allowed_pkg_keys)
+
+ self._default_header_data = default_header_data
+ self._default_pkg_data = default_pkg_data
+ self._inherited_keys = inherited_keys
+ self._write_translation_map = {}
+ self._read_translation_map = {}
+ if translated_keys:
+ self._write_translation_map.update(translated_keys)
+ self._read_translation_map.update(((y, x) for (x, y) in translated_keys))
+ self.header = {}
+ if self._default_header_data:
+ self.header.update(self._default_header_data)
+ self.packages = []
+ self.modified = True
+
+ def _readpkgindex(self, pkgfile, pkg_entry=True):
+
+ allowed_keys = None
+ if self._pkg_slot_dict is None or not pkg_entry:
+ d = {}
+ else:
+ d = self._pkg_slot_dict()
+ allowed_keys = d.allowed_keys
+
+ for line in pkgfile:
+ line = line.rstrip("\n")
+ if not line:
+ break
+ line = line.split(":", 1)
+ if not len(line) == 2:
+ continue
+ k, v = line
+ if v:
+ v = v[1:]
+ k = self._read_translation_map.get(k, k)
+ if allowed_keys is not None and \
+ k not in allowed_keys:
+ continue
+ d[k] = v
+ return d
+
+ def _writepkgindex(self, pkgfile, items):
+ for k, v in items:
+ pkgfile.write("%s: %s\n" % \
+ (self._write_translation_map.get(k, k), v))
+ pkgfile.write("\n")
+
+ def read(self, pkgfile):
+ self.readHeader(pkgfile)
+ self.readBody(pkgfile)
+
+ def readHeader(self, pkgfile):
+ self.header.update(self._readpkgindex(pkgfile, pkg_entry=False))
+
+ def readBody(self, pkgfile):
+ while True:
+ d = self._readpkgindex(pkgfile)
+ if not d:
+ break
+ mycpv = d.get("CPV")
+ if not mycpv:
+ continue
+ if self._default_pkg_data:
+ for k, v in self._default_pkg_data.items():
+ d.setdefault(k, v)
+ if self._inherited_keys:
+ for k in self._inherited_keys:
+ v = self.header.get(k)
+ if v is not None:
+ d.setdefault(k, v)
+ self.packages.append(d)
+
+ def write(self, pkgfile):
+ if self.modified:
+ self.header["TIMESTAMP"] = str(long(time.time()))
+ self.header["PACKAGES"] = str(len(self.packages))
+ keys = list(self.header)
+ keys.sort()
+ self._writepkgindex(pkgfile, [(k, self.header[k]) \
+ for k in keys if self.header[k]])
+ for metadata in sorted(self.packages,
+ key=portage.util.cmp_sort_key(_cmp_cpv)):
+ metadata = metadata.copy()
+ if self._inherited_keys:
+ for k in self._inherited_keys:
+ v = self.header.get(k)
+ if v is not None and v == metadata.get(k):
+ del metadata[k]
+ if self._default_pkg_data:
+ for k, v in self._default_pkg_data.items():
+ if metadata.get(k) == v:
+ metadata.pop(k, None)
+ keys = list(metadata)
+ keys.sort()
+ self._writepkgindex(pkgfile,
+ [(k, metadata[k]) for k in keys if metadata[k]])
diff --git a/usr/lib/portage/pym/portage/glsa.py b/usr/lib/portage/pym/portage/glsa.py
new file mode 100644
index 0000000..1b19fb1
--- /dev/null
+++ b/usr/lib/portage/pym/portage/glsa.py
@@ -0,0 +1,726 @@
+# Copyright 2003-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import absolute_import, unicode_literals
+
+import io
+import sys
+try:
+ from urllib.request import urlopen as urllib_request_urlopen
+except ImportError:
+ from urllib import urlopen as urllib_request_urlopen
+import codecs
+import re
+import operator
+import xml.dom.minidom
+from io import StringIO
+from functools import reduce
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.versions import pkgsplit, vercmp
+from portage.util import grabfile
+from portage.const import PRIVATE_PATH
+from portage.localization import _
+from portage.dep import _slot_separator
+
+# Note: the space for rgt and rlt is important !!
+# FIXME: use slot deps instead, requires GLSA format versioning
+opMapping = {"le": "<=", "lt": "<", "eq": "=", "gt": ">", "ge": ">=",
+ "rge": ">=~", "rle": "<=~", "rgt": " >~", "rlt": " <~"}
+NEWLINE_ESCAPE = "!;\\n" # some random string to mark newlines that should be preserved
+SPACE_ESCAPE = "!;_" # some random string to mark spaces that should be preserved
+
+def get_applied_glsas(settings):
+ """
+ Return a list of applied or injected GLSA IDs
+
+ @type settings: portage.config
+ @param settings: portage config instance
+ @rtype: list
+ @return: list of glsa IDs
+ """
+ return grabfile(os.path.join(settings["EROOT"], PRIVATE_PATH, "glsa_injected"))
+
+
+# TODO: use the textwrap module instead
+def wrap(text, width, caption=""):
+ """
+ Wraps the given text at column I{width}, optionally indenting
+ it so that no text is under I{caption}. It's possible to encode
+ hard linebreaks in I{text} with L{NEWLINE_ESCAPE}.
+
+ @type text: String
+ @param text: the text to be wrapped
+ @type width: Integer
+ @param width: the column at which the text should be wrapped
+ @type caption: String
+ @param caption: this string is inserted at the beginning of the
+ return value and the paragraph is indented up to
+ C{len(caption)}.
+ @rtype: String
+ @return: the wrapped and indented paragraph
+ """
+ rValue = ""
+ line = caption
+ text = text.replace(2*NEWLINE_ESCAPE, NEWLINE_ESCAPE+" "+NEWLINE_ESCAPE)
+ words = text.split()
+ indentLevel = len(caption)+1
+
+ for w in words:
+ if line != "" and line[-1] == "\n":
+ rValue += line
+ line = " "*indentLevel
+ if len(line)+len(w.replace(NEWLINE_ESCAPE, ""))+1 > width:
+ rValue += line+"\n"
+ line = " "*indentLevel+w.replace(NEWLINE_ESCAPE, "\n")
+ elif w.find(NEWLINE_ESCAPE) >= 0:
+ if len(line.strip()) > 0:
+ rValue += line+" "+w.replace(NEWLINE_ESCAPE, "\n")
+ else:
+ rValue += line+w.replace(NEWLINE_ESCAPE, "\n")
+ line = " "*indentLevel
+ else:
+ if len(line.strip()) > 0:
+ line += " "+w
+ else:
+ line += w
+ if len(line) > 0:
+ rValue += line.replace(NEWLINE_ESCAPE, "\n")
+ rValue = rValue.replace(SPACE_ESCAPE, " ")
+ return rValue
+
+def get_glsa_list(myconfig):
+ """
+ Returns a list of all available GLSAs in the given repository
+ by comparing the filelist there with the pattern described in
+ the config.
+
+ @type myconfig: portage.config
+ @param myconfig: Portage settings instance
+
+ @rtype: List of Strings
+ @return: a list of GLSA IDs in this repository
+ """
+ rValue = []
+
+ if "GLSA_DIR" in myconfig:
+ repository = myconfig["GLSA_DIR"]
+ else:
+ repository = os.path.join(myconfig["PORTDIR"], "metadata", "glsa")
+
+ if not os.access(repository, os.R_OK):
+ return []
+ dirlist = os.listdir(repository)
+ prefix = "glsa-"
+ suffix = ".xml"
+
+ for f in dirlist:
+ try:
+ if f[:len(prefix)] == prefix and f[-1*len(suffix):] == suffix:
+ rValue.append(f[len(prefix):-1*len(suffix)])
+ except IndexError:
+ pass
+ return rValue
+
+def getListElements(listnode):
+ """
+ Get all <li> elements for a given <ol> or <ul> node.
+
+ @type listnode: xml.dom.Node
+ @param listnode: <ul> or <ol> list to get the elements for
+ @rtype: List of Strings
+ @return: a list that contains the value of the <li> elements
+ """
+ if not listnode.nodeName in ["ul", "ol"]:
+ raise GlsaFormatException("Invalid function call: listnode is not <ul> or <ol>")
+ rValue = [getText(li, format="strip") \
+ for li in listnode.childNodes \
+ if li.nodeType == xml.dom.Node.ELEMENT_NODE]
+ return rValue
+
+def getText(node, format, textfd = None):
+ """
+ This is the main parser function. It takes a node and traverses
+ recursive over the subnodes, getting the text of each (and the
+ I{link} attribute for <uri> and <mail>). Depending on the I{format}
+ parameter the text might be formatted by adding/removing newlines,
+ tabs and spaces. This function is only useful for the GLSA DTD,
+ it's not applicable for other DTDs.
+
+ @type node: xml.dom.Node
+ @param node: the root node to start with the parsing
+ @type format: String
+ @param format: this should be either I{strip}, I{keep} or I{xml}
+ I{keep} just gets the text and does no formatting.
+ I{strip} replaces newlines and tabs with spaces and
+ replaces multiple spaces with one space.
+ I{xml} does some more formatting, depending on the
+ type of the encountered nodes.
+ @type textfd: writable file-like object
+ @param textfd: the file-like object to write the output to
+ @rtype: String
+ @return: the (formatted) content of the node and its subnodes
+ except if textfd was not none
+ """
+ if not textfd:
+ textfd = StringIO()
+ returnNone = False
+ else:
+ returnNone = True
+ if format in ["strip", "keep"]:
+ if node.nodeName in ["uri", "mail"]:
+ textfd.write(node.childNodes[0].data+": "+node.getAttribute("link"))
+ else:
+ for subnode in node.childNodes:
+ if subnode.nodeName == "#text":
+ textfd.write(subnode.data)
+ else:
+ getText(subnode, format, textfd)
+ else: # format = "xml"
+ for subnode in node.childNodes:
+ if subnode.nodeName == "p":
+ for p_subnode in subnode.childNodes:
+ if p_subnode.nodeName == "#text":
+ textfd.write(p_subnode.data.strip())
+ elif p_subnode.nodeName in ["uri", "mail"]:
+ textfd.write(p_subnode.childNodes[0].data)
+ textfd.write(" ( "+p_subnode.getAttribute("link")+" )")
+ textfd.write(NEWLINE_ESCAPE)
+ elif subnode.nodeName == "ul":
+ for li in getListElements(subnode):
+ textfd.write("-"+SPACE_ESCAPE+li+NEWLINE_ESCAPE+" ")
+ elif subnode.nodeName == "ol":
+ i = 0
+ for li in getListElements(subnode):
+ i = i+1
+ textfd.write(str(i)+"."+SPACE_ESCAPE+li+NEWLINE_ESCAPE+" ")
+ elif subnode.nodeName == "code":
+ textfd.write(getText(subnode, format="keep").lstrip().replace("\n", NEWLINE_ESCAPE))
+ textfd.write(NEWLINE_ESCAPE)
+ elif subnode.nodeName == "#text":
+ textfd.write(subnode.data)
+ else:
+ raise GlsaFormatException(_("Invalid Tag found: "), subnode.nodeName)
+ if returnNone:
+ return None
+ rValue = textfd.getvalue()
+ if format == "strip":
+ rValue = rValue.strip(" \n\t")
+ rValue = re.sub("[\s]{2,}", " ", rValue)
+ return rValue
+
+def getMultiTagsText(rootnode, tagname, format):
+ """
+ Returns a list with the text of all subnodes of type I{tagname}
+ under I{rootnode} (which itself is not parsed) using the given I{format}.
+
+ @type rootnode: xml.dom.Node
+ @param rootnode: the node to search for I{tagname}
+ @type tagname: String
+ @param tagname: the name of the tags to search for
+ @type format: String
+ @param format: see L{getText}
+ @rtype: List of Strings
+ @return: a list containing the text of all I{tagname} childnodes
+ """
+ rValue = [getText(e, format) \
+ for e in rootnode.getElementsByTagName(tagname)]
+ return rValue
+
+def makeAtom(pkgname, versionNode):
+ """
+ creates from the given package name and information in the
+ I{versionNode} a (syntactical) valid portage atom.
+
+ @type pkgname: String
+ @param pkgname: the name of the package for this atom
+ @type versionNode: xml.dom.Node
+ @param versionNode: a <vulnerable> or <unaffected> Node that
+ contains the version information for this atom
+ @rtype: String
+ @return: the portage atom
+ """
+ rValue = opMapping[versionNode.getAttribute("range")] \
+ + pkgname \
+ + "-" + getText(versionNode, format="strip")
+ try:
+ slot = versionNode.getAttribute("slot").strip()
+ except KeyError:
+ pass
+ else:
+ if slot and slot != "*":
+ rValue += _slot_separator + slot
+ return str(rValue)
+
+def makeVersion(versionNode):
+ """
+ creates from the information in the I{versionNode} a
+ version string (format <op><version>).
+
+ @type versionNode: xml.dom.Node
+ @param versionNode: a <vulnerable> or <unaffected> Node that
+ contains the version information for this atom
+ @rtype: String
+ @return: the version string
+ """
+ rValue = opMapping[versionNode.getAttribute("range")] \
+ + getText(versionNode, format="strip")
+ try:
+ slot = versionNode.getAttribute("slot").strip()
+ except KeyError:
+ pass
+ else:
+ if slot and slot != "*":
+ rValue += _slot_separator + slot
+ return rValue
+
+def match(atom, dbapi, match_type="default"):
+ """
+ wrapper that calls revisionMatch() or portage.dbapi.dbapi.match() depending on
+ the given atom.
+
+ @type atom: string
+ @param atom: a <~ or >~ atom or a normal portage atom that contains the atom to match against
+ @type dbapi: portage.dbapi.dbapi
+ @param dbapi: one of the portage databases to use as information source
+ @type match_type: string
+ @param match_type: if != "default" passed as first argument to dbapi.xmatch
+ to apply the wanted visibility filters
+
+ @rtype: list of strings
+ @return: a list with the matching versions
+ """
+ if atom[2] == "~":
+ return revisionMatch(atom, dbapi, match_type=match_type)
+ elif match_type == "default" or not hasattr(dbapi, "xmatch"):
+ return dbapi.match(atom)
+ else:
+ return dbapi.xmatch(match_type, atom)
+
+def revisionMatch(revisionAtom, dbapi, match_type="default"):
+ """
+ handler for the special >~, >=~, <=~ and <~ atoms that are supposed to behave
+ as > and < except that they are limited to the same version, the range only
+ applies to the revision part.
+
+ @type revisionAtom: string
+ @param revisionAtom: a <~ or >~ atom that contains the atom to match against
+ @type dbapi: portage.dbapi.dbapi
+ @param dbapi: one of the portage databases to use as information source
+ @type match_type: string
+ @param match_type: if != "default" passed as first argument to portdb.xmatch
+ to apply the wanted visibility filters
+
+ @rtype: list of strings
+ @return: a list with the matching versions
+ """
+ if match_type == "default" or not hasattr(dbapi, "xmatch"):
+ if ":" in revisionAtom:
+ mylist = dbapi.match(re.sub(r'-r[0-9]+(:[^ ]+)?$', r'\1', revisionAtom[2:]))
+ else:
+ mylist = dbapi.match(re.sub("-r[0-9]+$", "", revisionAtom[2:]))
+ else:
+ if ":" in revisionAtom:
+ mylist = dbapi.xmatch(match_type, re.sub(r'-r[0-9]+(:[^ ]+)?$', r'\1', revisionAtom[2:]))
+ else:
+ mylist = dbapi.xmatch(match_type, re.sub("-r[0-9]+$", "", revisionAtom[2:]))
+ rValue = []
+ for v in mylist:
+ r1 = pkgsplit(v)[-1][1:]
+ r2 = pkgsplit(revisionAtom[3:])[-1][1:]
+ if eval(r1+" "+revisionAtom[0:2]+" "+r2):
+ rValue.append(v)
+ return rValue
+
+
+def getMinUpgrade(vulnerableList, unaffectedList, portdbapi, vardbapi, minimize=True):
+ """
+ Checks if the systemstate is matching an atom in
+ I{vulnerableList} and returns string describing
+ the lowest version for the package that matches an atom in
+ I{unaffectedList} and is greater than the currently installed
+ version. It will return an empty list if the system is affected,
+ and no upgrade is possible or None if the system is not affected.
+ Both I{vulnerableList} and I{unaffectedList} should have the
+ same base package.
+
+ @type vulnerableList: List of Strings
+ @param vulnerableList: atoms matching vulnerable package versions
+ @type unaffectedList: List of Strings
+ @param unaffectedList: atoms matching unaffected package versions
+ @type portdbapi: portage.dbapi.porttree.portdbapi
+ @param portdbapi: Ebuild repository
+ @type vardbapi: portage.dbapi.vartree.vardbapi
+ @param vardbapi: Installed package repository
+ @type minimize: Boolean
+ @param minimize: True for a least-change upgrade, False for emerge-like algorithm
+
+ @rtype: String | None
+ @return: the lowest unaffected version that is greater than
+ the installed version.
+ """
+ rValue = ""
+ v_installed = reduce(operator.add, [match(v, vardbapi) for v in vulnerableList], [])
+ u_installed = reduce(operator.add, [match(u, vardbapi) for u in unaffectedList], [])
+
+ # remove all unaffected atoms from vulnerable list
+ v_installed = list(set(v_installed).difference(set(u_installed)))
+
+ if not v_installed:
+ return None
+
+ # this tuple holds all vulnerable atoms, and the related upgrade atom
+ vuln_update = []
+ avail_updates = set()
+ for u in unaffectedList:
+ # TODO: This had match_type="match-all" before. I don't think it should
+ # since we disregarded masked items later anyway (match(=rValue, "porttree"))
+ avail_updates.update(match(u, portdbapi))
+ # if an atom is already installed, we should not consider it for upgrades
+ avail_updates.difference_update(u_installed)
+
+ for vuln in v_installed:
+ update = ""
+ for c in avail_updates:
+ c_pv = portage.catpkgsplit(c)
+ if vercmp(c.version, vuln.version) > 0 \
+ and (update == "" \
+ or (minimize ^ (vercmp(c.version, update.version) > 0))) \
+ and portdbapi._pkg_str(c, None).slot == vardbapi._pkg_str(vuln, None).slot:
+ update = c_pv[0]+"/"+c_pv[1]+"-"+c_pv[2]
+ if c_pv[3] != "r0": # we don't like -r0 for display
+ update += "-"+c_pv[3]
+ update = portdbapi._pkg_str(update, None)
+ vuln_update.append([vuln, update])
+
+ return vuln_update
+
+def format_date(datestr):
+ """
+ Takes a date (announced, revised) date from a GLSA and formats
+ it as readable text (i.e. "January 1, 2008").
+
+ @type date: String
+ @param date: the date string to reformat
+ @rtype: String
+ @return: a reformatted string, or the original string
+ if it cannot be reformatted.
+ """
+ splitdate = datestr.split("-", 2)
+ if len(splitdate) != 3:
+ return datestr
+
+ # This cannot raise an error as we use () instead of []
+ splitdate = (int(x) for x in splitdate)
+
+ from datetime import date
+ try:
+ d = date(*splitdate)
+ except ValueError:
+ return datestr
+
+ # TODO We could format to local date format '%x' here?
+ return _unicode_decode(d.strftime("%B %d, %Y"),
+ encoding=_encodings['content'], errors='replace')
+
+# simple Exception classes to catch specific errors
+class GlsaTypeException(Exception):
+ def __init__(self, doctype):
+ Exception.__init__(self, "wrong DOCTYPE: %s" % doctype)
+
+class GlsaFormatException(Exception):
+ pass
+
+class GlsaArgumentException(Exception):
+ pass
+
+# GLSA xml data wrapper class
+class Glsa:
+ """
+ This class is a wrapper for the XML data and provides methods to access
+ and display the contained data.
+ """
+ def __init__(self, myid, myconfig, vardbapi, portdbapi):
+ """
+ Simple constructor to set the ID, store the config and gets the
+ XML data by calling C{self.read()}.
+
+ @type myid: String
+ @param myid: String describing the id for the GLSA object (standard
+ GLSAs have an ID of the form YYYYMM-nn) or an existing
+ filename containing a GLSA.
+ @type myconfig: portage.config
+ @param myconfig: the config that should be used for this object.
+ @type vardbapi: portage.dbapi.vartree.vardbapi
+ @param vardbapi: installed package repository
+ @type portdbapi: portage.dbapi.porttree.portdbapi
+ @param portdbapi: ebuild repository
+ """
+ myid = _unicode_decode(myid,
+ encoding=_encodings['content'], errors='strict')
+ if re.match(r'\d{6}-\d{2}', myid):
+ self.type = "id"
+ elif os.path.exists(myid):
+ self.type = "file"
+ else:
+ raise GlsaArgumentException(_("Given ID %s isn't a valid GLSA ID or filename.") % myid)
+ self.nr = myid
+ self.config = myconfig
+ self.vardbapi = vardbapi
+ self.portdbapi = portdbapi
+ self.read()
+
+ def read(self):
+ """
+ Here we build the filename from the config and the ID and pass
+ it to urllib to fetch it from the filesystem or a remote server.
+
+ @rtype: None
+ @return: None
+ """
+ if "GLSA_DIR" in self.config:
+ repository = "file://" + self.config["GLSA_DIR"]+"/"
+ else:
+ repository = "file://" + self.config["PORTDIR"] + "/metadata/glsa/"
+ if self.type == "file":
+ myurl = "file://"+self.nr
+ else:
+ myurl = repository + "glsa-%s.xml" % str(self.nr)
+
+ f = urllib_request_urlopen(myurl)
+ try:
+ self.parse(f)
+ finally:
+ f.close()
+
+ return None
+
+ def parse(self, myfile):
+ """
+ This method parses the XML file and sets up the internal data
+ structures by calling the different helper functions in this
+ module.
+
+ @type myfile: String
+ @param myfile: Filename to grab the XML data from
+ @rtype: None
+ @return: None
+ """
+ self.DOM = xml.dom.minidom.parse(myfile)
+ if not self.DOM.doctype:
+ raise GlsaTypeException(None)
+ elif self.DOM.doctype.systemId == "http://www.gentoo.org/dtd/glsa.dtd":
+ self.dtdversion = 0
+ elif self.DOM.doctype.systemId == "http://www.gentoo.org/dtd/glsa-2.dtd":
+ self.dtdversion = 2
+ else:
+ raise GlsaTypeException(self.DOM.doctype.systemId)
+ myroot = self.DOM.getElementsByTagName("glsa")[0]
+ if self.type == "id" and myroot.getAttribute("id") != self.nr:
+ raise GlsaFormatException(_("filename and internal id don't match:") + myroot.getAttribute("id") + " != " + self.nr)
+
+ # the simple (single, required, top-level, #PCDATA) tags first
+ self.title = getText(myroot.getElementsByTagName("title")[0], format="strip")
+ self.synopsis = getText(myroot.getElementsByTagName("synopsis")[0], format="strip")
+ self.announced = format_date(getText(myroot.getElementsByTagName("announced")[0], format="strip"))
+
+ # Support both formats of revised:
+ # <revised>December 30, 2007: 02</revised>
+ # <revised count="2">2007-12-30</revised>
+ revisedEl = myroot.getElementsByTagName("revised")[0]
+ self.revised = getText(revisedEl, format="strip")
+ count = revisedEl.attributes.get("count")
+ if count is None:
+ if self.revised.find(":") >= 0:
+ (self.revised, count) = self.revised.split(":")
+ else:
+ count = 1
+
+ self.revised = format_date(self.revised)
+
+ try:
+ self.count = int(count)
+ except ValueError:
+ # TODO should this raise a GlsaFormatException?
+ self.count = 1
+
+ # now the optional and 0-n toplevel, #PCDATA tags and references
+ try:
+ self.access = getText(myroot.getElementsByTagName("access")[0], format="strip")
+ except IndexError:
+ self.access = ""
+ self.bugs = getMultiTagsText(myroot, "bug", format="strip")
+ self.references = getMultiTagsText(myroot.getElementsByTagName("references")[0], "uri", format="keep")
+
+ # and now the formatted text elements
+ self.description = getText(myroot.getElementsByTagName("description")[0], format="xml")
+ self.workaround = getText(myroot.getElementsByTagName("workaround")[0], format="xml")
+ self.resolution = getText(myroot.getElementsByTagName("resolution")[0], format="xml")
+ self.impact_text = getText(myroot.getElementsByTagName("impact")[0], format="xml")
+ self.impact_type = myroot.getElementsByTagName("impact")[0].getAttribute("type")
+ try:
+ self.background = getText(myroot.getElementsByTagName("background")[0], format="xml")
+ except IndexError:
+ self.background = ""
+
+ # finally the interesting tags (product, affected, package)
+ self.glsatype = myroot.getElementsByTagName("product")[0].getAttribute("type")
+ self.product = getText(myroot.getElementsByTagName("product")[0], format="strip")
+ self.affected = myroot.getElementsByTagName("affected")[0]
+ self.packages = {}
+ for p in self.affected.getElementsByTagName("package"):
+ name = p.getAttribute("name")
+ try:
+ name = portage.dep.Atom(name)
+ except portage.exception.InvalidAtom:
+ raise GlsaFormatException(_("invalid package name: %s") % name)
+ if name != name.cp:
+ raise GlsaFormatException(_("invalid package name: %s") % name)
+ name = name.cp
+ if name not in self.packages:
+ self.packages[name] = []
+ tmp = {}
+ tmp["arch"] = p.getAttribute("arch")
+ tmp["auto"] = (p.getAttribute("auto") == "yes")
+ tmp["vul_vers"] = [makeVersion(v) for v in p.getElementsByTagName("vulnerable")]
+ tmp["unaff_vers"] = [makeVersion(v) for v in p.getElementsByTagName("unaffected")]
+ tmp["vul_atoms"] = [makeAtom(name, v) for v in p.getElementsByTagName("vulnerable")]
+ tmp["unaff_atoms"] = [makeAtom(name, v) for v in p.getElementsByTagName("unaffected")]
+ self.packages[name].append(tmp)
+ # TODO: services aren't really used yet
+ self.services = self.affected.getElementsByTagName("service")
+ return None
+
+ def dump(self, outstream=sys.stdout, encoding="utf-8"):
+ """
+ Dumps a plaintext representation of this GLSA to I{outfile} or
+ B{stdout} if it is ommitted. You can specify an alternate
+ I{encoding} if needed (default is utf-8).
+
+ @type outstream: File
+ @param outfile: Stream that should be used for writing
+ (defaults to sys.stdout)
+ """
+ outstream = getattr(outstream, "buffer", outstream)
+ outstream = codecs.getwriter(encoding)(outstream)
+ width = 76
+ outstream.write(("GLSA %s: \n%s" % (self.nr, self.title)).center(width)+"\n")
+ outstream.write((width*"=")+"\n")
+ outstream.write(wrap(self.synopsis, width, caption=_("Synopsis: "))+"\n")
+ outstream.write(_("Announced on: %s\n") % self.announced)
+ outstream.write(_("Last revised on: %s : %02d\n\n") % (self.revised, self.count))
+ if self.glsatype == "ebuild":
+ for k in self.packages:
+ pkg = self.packages[k]
+ for path in pkg:
+ vul_vers = "".join(path["vul_vers"])
+ unaff_vers = "".join(path["unaff_vers"])
+ outstream.write(_("Affected package: %s\n") % k)
+ outstream.write(_("Affected archs: "))
+ if path["arch"] == "*":
+ outstream.write(_("All\n"))
+ else:
+ outstream.write("%s\n" % path["arch"])
+ outstream.write(_("Vulnerable: %s\n") % vul_vers)
+ outstream.write(_("Unaffected: %s\n\n") % unaff_vers)
+ elif self.glsatype == "infrastructure":
+ pass
+ if len(self.bugs) > 0:
+ outstream.write(_("\nRelated bugs: "))
+ outstream.write(", ".join(self.bugs))
+ outstream.write("\n")
+ if self.background:
+ outstream.write("\n"+wrap(self.background, width, caption=_("Background: ")))
+ outstream.write("\n"+wrap(self.description, width, caption=_("Description: ")))
+ outstream.write("\n"+wrap(self.impact_text, width, caption=_("Impact: ")))
+ outstream.write("\n"+wrap(self.workaround, width, caption=_("Workaround: ")))
+ outstream.write("\n"+wrap(self.resolution, width, caption=_("Resolution: ")))
+ myreferences = " ".join(r.replace(" ", SPACE_ESCAPE)+NEWLINE_ESCAPE for r in self.references)
+ outstream.write("\n"+wrap(myreferences, width, caption=_("References: ")))
+ outstream.write("\n")
+
+ def isVulnerable(self):
+ """
+ Tests if the system is affected by this GLSA by checking if any
+ vulnerable package versions are installed. Also checks for affected
+ architectures.
+
+ @rtype: Boolean
+ @return: True if the system is affected, False if not
+ """
+ rValue = False
+ for k in self.packages:
+ pkg = self.packages[k]
+ for path in pkg:
+ if path["arch"] == "*" or self.config["ARCH"] in path["arch"].split():
+ for v in path["vul_atoms"]:
+ rValue = rValue \
+ or (len(match(v, self.vardbapi)) > 0 \
+ and None != getMinUpgrade(path["vul_atoms"], path["unaff_atoms"], \
+ self.portdbapi, self.vardbapi))
+ return rValue
+
+ def isInjected(self):
+ """
+ Looks if the GLSA ID is in the GLSA checkfile to check if this
+ GLSA should be marked as applied.
+
+ @rtype: Boolean
+ @returns: True if the GLSA is in the inject file, False if not
+ """
+ if not os.access(os.path.join(self.config["EROOT"],
+ PRIVATE_PATH, "glsa_injected"), os.R_OK):
+ return False
+ return (self.nr in get_applied_glsas(self.config))
+
+ def inject(self):
+ """
+ Puts the ID of this GLSA into the GLSA checkfile, so it won't
+ show up on future checks. Should be called after a GLSA is
+ applied or on explicit user request.
+
+ @rtype: None
+ @return: None
+ """
+ if not self.isInjected():
+ checkfile = io.open(
+ _unicode_encode(os.path.join(self.config["EROOT"],
+ PRIVATE_PATH, "glsa_injected"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='a+', encoding=_encodings['content'], errors='strict')
+ checkfile.write(_unicode_decode(self.nr + "\n"))
+ checkfile.close()
+ return None
+
+ def getMergeList(self, least_change=True):
+ """
+ Returns the list of package-versions that have to be merged to
+ apply this GLSA properly. The versions are as low as possible
+ while avoiding downgrades (see L{getMinUpgrade}).
+
+ @type least_change: Boolean
+ @param least_change: True if the smallest possible upgrade should be selected,
+ False for an emerge-like algorithm
+ @rtype: List of Strings
+ @return: list of package-versions that have to be merged
+ """
+ return list(set(update for (vuln, update) in self.getAffectionTable(least_change) if update))
+
+ def getAffectionTable(self, least_change=True):
+ """
+ Will initialize the self.systemAffection list of
+ atoms installed on the system that are affected
+ by this GLSA, and the atoms that are minimal upgrades.
+ """
+ systemAffection = []
+ for pkg in self.packages.keys():
+ for path in self.packages[pkg]:
+ update = getMinUpgrade(path["vul_atoms"], path["unaff_atoms"],
+ self.portdbapi, self.vardbapi, minimize=least_change)
+ if update:
+ systemAffection.extend(update)
+ return systemAffection
diff --git a/usr/lib/portage/pym/portage/localization.py b/usr/lib/portage/pym/portage/localization.py
new file mode 100644
index 0000000..2db4b7a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/localization.py
@@ -0,0 +1,42 @@
+# localization.py -- Code to manage/help portage localization.
+# Copyright 2004-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division
+
+import locale
+import math
+
+from portage import _encodings, _unicode_decode
+
+# We define this to make the transition easier for us.
+def _(mystr):
+ """
+ Always returns unicode, regardless of the input type. This is
+ helpful for avoiding UnicodeDecodeError from __str__() with
+ Python 2, by ensuring that string format operations invoke
+ __unicode__() instead of __str__().
+ """
+ return _unicode_decode(mystr)
+
+def localization_example():
+ # Dict references allow translators to rearrange word order.
+ print(_("You can use this string for translating."))
+ print(_("Strings can be formatted with %(mystr)s like this.") % {"mystr": "VALUES"})
+
+ a_value = "value.of.a"
+ b_value = 123
+ c_value = [1, 2, 3, 4]
+ print(_("A: %(a)s -- B: %(b)s -- C: %(c)s") %
+ {"a": a_value, "b": b_value, "c": c_value})
+
+def localized_size(num_bytes):
+ """
+ Return pretty localized size string for num_bytes size
+ (given in bytes). The output will be in kibibytes.
+ """
+
+ # always round up, so that small files don't end up as '0 KiB'
+ num_kib = math.ceil(num_bytes / 1024)
+ formatted_num = locale.format('%d', num_kib, grouping=True)
+ return (_unicode_decode(formatted_num, encoding=_encodings['stdio']) + ' KiB')
diff --git a/usr/lib/portage/pym/portage/locks.py b/usr/lib/portage/pym/portage/locks.py
new file mode 100644
index 0000000..0789f89
--- /dev/null
+++ b/usr/lib/portage/pym/portage/locks.py
@@ -0,0 +1,557 @@
+# portage: Lock management code
+# Copyright 2004-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["lockdir", "unlockdir", "lockfile", "unlockfile", \
+ "hardlock_name", "hardlink_is_mine", "hardlink_lockfile", \
+ "unhardlink_lockfile", "hardlock_cleanup"]
+
+import errno
+import fcntl
+import platform
+import sys
+import time
+import warnings
+
+import portage
+from portage import os, _encodings, _unicode_decode
+from portage.exception import DirectoryNotFound, FileNotFound, \
+ InvalidData, TryAgain, OperationNotPermitted, PermissionDenied
+from portage.util import writemsg
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+HARDLINK_FD = -2
+_HARDLINK_POLL_LATENCY = 3 # seconds
+_default_lock_fn = fcntl.lockf
+
+if platform.python_implementation() == 'PyPy':
+ # workaround for https://bugs.pypy.org/issue747
+ _default_lock_fn = fcntl.flock
+
+# Used by emerge in order to disable the "waiting for lock" message
+# so that it doesn't interfere with the status display.
+_quiet = False
+
+
+_open_fds = set()
+
+def _close_fds():
+ """
+ This is intended to be called after a fork, in order to close file
+ descriptors for locks held by the parent process. This can be called
+ safely after a fork without exec, unlike the _setup_pipes close_fds
+ behavior.
+ """
+ while _open_fds:
+ os.close(_open_fds.pop())
+
+def lockdir(mydir, flags=0):
+ return lockfile(mydir, wantnewlockfile=1, flags=flags)
+def unlockdir(mylock):
+ return unlockfile(mylock)
+
+def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
+ waiting_msg=None, flags=0):
+ """
+ If wantnewlockfile is True then this creates a lockfile in the parent
+ directory as the file: '.' + basename + '.portage_lockfile'.
+ """
+
+ if not mypath:
+ raise InvalidData(_("Empty path given"))
+
+ # Since Python 3.4, chown requires int type (no proxies).
+ portage_gid = int(portage.data.portage_gid)
+
+ # Support for file object or integer file descriptor parameters is
+ # deprecated due to ambiguity in whether or not it's safe to close
+ # the file descriptor, making it prone to "Bad file descriptor" errors
+ # or file descriptor leaks.
+ if isinstance(mypath, basestring) and mypath[-1] == '/':
+ mypath = mypath[:-1]
+
+ lockfilename_path = mypath
+ if hasattr(mypath, 'fileno'):
+ warnings.warn("portage.locks.lockfile() support for "
+ "file object parameters is deprecated. Use a file path instead.",
+ DeprecationWarning, stacklevel=2)
+ lockfilename_path = getattr(mypath, 'name', None)
+ mypath = mypath.fileno()
+ if isinstance(mypath, int):
+ warnings.warn("portage.locks.lockfile() support for integer file "
+ "descriptor parameters is deprecated. Use a file path instead.",
+ DeprecationWarning, stacklevel=2)
+ lockfilename = mypath
+ wantnewlockfile = 0
+ unlinkfile = 0
+ elif wantnewlockfile:
+ base, tail = os.path.split(mypath)
+ lockfilename = os.path.join(base, "." + tail + ".portage_lockfile")
+ lockfilename_path = lockfilename
+ unlinkfile = 1
+ else:
+ lockfilename = mypath
+
+ if isinstance(mypath, basestring):
+ if not os.path.exists(os.path.dirname(mypath)):
+ raise DirectoryNotFound(os.path.dirname(mypath))
+ preexisting = os.path.exists(lockfilename)
+ old_mask = os.umask(000)
+ try:
+ try:
+ myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR, 0o660)
+ except OSError as e:
+ func_call = "open('%s')" % lockfilename
+ if e.errno == OperationNotPermitted.errno:
+ raise OperationNotPermitted(func_call)
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(func_call)
+ else:
+ raise
+
+ if not preexisting:
+ try:
+ if os.stat(lockfilename).st_gid != portage_gid:
+ os.chown(lockfilename, -1, portage_gid)
+ except OSError as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ return lockfile(mypath,
+ wantnewlockfile=wantnewlockfile,
+ unlinkfile=unlinkfile, waiting_msg=waiting_msg,
+ flags=flags)
+ else:
+ writemsg("%s: chown('%s', -1, %d)\n" % \
+ (e, lockfilename, portage_gid), noiselevel=-1)
+ writemsg(_("Cannot chown a lockfile: '%s'\n") % \
+ lockfilename, noiselevel=-1)
+ writemsg(_("Group IDs of current user: %s\n") % \
+ " ".join(str(n) for n in os.getgroups()),
+ noiselevel=-1)
+ finally:
+ os.umask(old_mask)
+
+ elif isinstance(mypath, int):
+ myfd = mypath
+
+ else:
+ raise ValueError(_("Unknown type passed in '%s': '%s'") % \
+ (type(mypath), mypath))
+
+ # try for a non-blocking lock, if it's held, throw a message
+ # we're waiting on lockfile and use a blocking attempt.
+ locking_method = _default_lock_fn
+ try:
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ raise IOError(errno.ENOSYS, "Function not implemented")
+ locking_method(myfd, fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except IOError as e:
+ if not hasattr(e, "errno"):
+ raise
+ if e.errno in (errno.EACCES, errno.EAGAIN, errno.ENOLCK):
+ # resource temp unavailable; eg, someone beat us to the lock.
+ if flags & os.O_NONBLOCK:
+ os.close(myfd)
+ raise TryAgain(mypath)
+
+ global _quiet
+ if _quiet:
+ out = None
+ else:
+ out = portage.output.EOutput()
+ if waiting_msg is None:
+ if isinstance(mypath, int):
+ waiting_msg = _("waiting for lock on fd %i") % myfd
+ else:
+ waiting_msg = _("waiting for lock on %s") % lockfilename
+ if out is not None:
+ out.ebegin(waiting_msg)
+ # try for the exclusive lock now.
+ enolock_msg_shown = False
+ while True:
+ try:
+ locking_method(myfd, fcntl.LOCK_EX)
+ except EnvironmentError as e:
+ if e.errno == errno.ENOLCK:
+ # This is known to occur on Solaris NFS (see
+ # bug #462694). Assume that the error is due
+ # to temporary exhaustion of record locks,
+ # and loop until one becomes available.
+ if not enolock_msg_shown:
+ enolock_msg_shown = True
+ if isinstance(mypath, int):
+ context_desc = _("Error while waiting "
+ "to lock fd %i") % myfd
+ else:
+ context_desc = _("Error while waiting "
+ "to lock '%s'") % lockfilename
+ writemsg("\n!!! %s: %s\n" % (context_desc, e),
+ noiselevel=-1)
+
+ time.sleep(_HARDLINK_POLL_LATENCY)
+ continue
+
+ if out is not None:
+ out.eend(1, str(e))
+ raise
+ else:
+ break
+
+ if out is not None:
+ out.eend(os.EX_OK)
+ elif e.errno in (errno.ENOSYS,):
+ # We're not allowed to lock on this FS.
+ if not isinstance(lockfilename, int):
+ # If a file object was passed in, it's not safe
+ # to close the file descriptor because it may
+ # still be in use.
+ os.close(myfd)
+ lockfilename_path = _unicode_decode(lockfilename_path,
+ encoding=_encodings['fs'], errors='strict')
+ if not isinstance(lockfilename_path, basestring):
+ raise
+ link_success = hardlink_lockfile(lockfilename_path,
+ waiting_msg=waiting_msg, flags=flags)
+ if not link_success:
+ raise
+ lockfilename = lockfilename_path
+ locking_method = None
+ myfd = HARDLINK_FD
+ else:
+ raise
+
+
+ if isinstance(lockfilename, basestring) and \
+ myfd != HARDLINK_FD and _fstat_nlink(myfd) == 0:
+ # The file was deleted on us... Keep trying to make one...
+ os.close(myfd)
+ writemsg(_("lockfile recurse\n"), 1)
+ lockfilename, myfd, unlinkfile, locking_method = lockfile(
+ mypath, wantnewlockfile=wantnewlockfile, unlinkfile=unlinkfile,
+ waiting_msg=waiting_msg, flags=flags)
+
+ if myfd != HARDLINK_FD:
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(myfd, fcntl.F_SETFD,
+ fcntl.fcntl(myfd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ _open_fds.add(myfd)
+
+ writemsg(str((lockfilename, myfd, unlinkfile)) + "\n", 1)
+ return (lockfilename, myfd, unlinkfile, locking_method)
+
+def _fstat_nlink(fd):
+ """
+ @param fd: an open file descriptor
+ @type fd: Integer
+ @rtype: Integer
+ @return: the current number of hardlinks to the file
+ """
+ try:
+ return os.fstat(fd).st_nlink
+ except EnvironmentError as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ # Some filesystems such as CIFS return
+ # ENOENT which means st_nlink == 0.
+ return 0
+ raise
+
+def unlockfile(mytuple):
+
+ #XXX: Compatability hack.
+ if len(mytuple) == 3:
+ lockfilename, myfd, unlinkfile = mytuple
+ locking_method = fcntl.flock
+ elif len(mytuple) == 4:
+ lockfilename, myfd, unlinkfile, locking_method = mytuple
+ else:
+ raise InvalidData
+
+ if(myfd == HARDLINK_FD):
+ unhardlink_lockfile(lockfilename, unlinkfile=unlinkfile)
+ return True
+
+ # myfd may be None here due to myfd = mypath in lockfile()
+ if isinstance(lockfilename, basestring) and \
+ not os.path.exists(lockfilename):
+ writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1)
+ if myfd is not None:
+ os.close(myfd)
+ _open_fds.remove(myfd)
+ return False
+
+ try:
+ if myfd is None:
+ myfd = os.open(lockfilename, os.O_WRONLY, 0o660)
+ unlinkfile = 1
+ locking_method(myfd, fcntl.LOCK_UN)
+ except OSError:
+ if isinstance(lockfilename, basestring):
+ os.close(myfd)
+ _open_fds.remove(myfd)
+ raise IOError(_("Failed to unlock file '%s'\n") % lockfilename)
+
+ try:
+ # This sleep call was added to allow other processes that are
+ # waiting for a lock to be able to grab it before it is deleted.
+ # lockfile() already accounts for this situation, however, and
+ # the sleep here adds more time than is saved overall, so am
+ # commenting until it is proved necessary.
+ #time.sleep(0.0001)
+ if unlinkfile:
+ locking_method(myfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ # We won the lock, so there isn't competition for it.
+ # We can safely delete the file.
+ writemsg(_("Got the lockfile...\n"), 1)
+ if _fstat_nlink(myfd) == 1:
+ os.unlink(lockfilename)
+ writemsg(_("Unlinked lockfile...\n"), 1)
+ locking_method(myfd, fcntl.LOCK_UN)
+ else:
+ writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1)
+ os.close(myfd)
+ _open_fds.remove(myfd)
+ return False
+ except SystemExit:
+ raise
+ except Exception as e:
+ writemsg(_("Failed to get lock... someone took it.\n"), 1)
+ writemsg(str(e) + "\n", 1)
+
+ # why test lockfilename? because we may have been handed an
+ # fd originally, and the caller might not like having their
+ # open fd closed automatically on them.
+ if isinstance(lockfilename, basestring):
+ os.close(myfd)
+ _open_fds.remove(myfd)
+
+ return True
+
+
+def hardlock_name(path):
+ base, tail = os.path.split(path)
+ return os.path.join(base, ".%s.hardlock-%s-%s" %
+ (tail, os.uname()[1], os.getpid()))
+
+def hardlink_is_mine(link, lock):
+ try:
+ lock_st = os.stat(lock)
+ if lock_st.st_nlink == 2:
+ link_st = os.stat(link)
+ return lock_st.st_ino == link_st.st_ino and \
+ lock_st.st_dev == link_st.st_dev
+ except OSError:
+ pass
+ return False
+
+def hardlink_lockfile(lockfilename, max_wait=DeprecationWarning,
+ waiting_msg=None, flags=0):
+ """Does the NFS, hardlink shuffle to ensure locking on the disk.
+ We create a PRIVATE hardlink to the real lockfile, that is just a
+ placeholder on the disk.
+ If our file can 2 references, then we have the lock. :)
+ Otherwise we lather, rise, and repeat.
+ """
+
+ if max_wait is not DeprecationWarning:
+ warnings.warn("The 'max_wait' parameter of "
+ "portage.locks.hardlink_lockfile() is now unused. Use "
+ "flags=os.O_NONBLOCK instead.",
+ DeprecationWarning, stacklevel=2)
+
+ global _quiet
+ out = None
+ displayed_waiting_msg = False
+ preexisting = os.path.exists(lockfilename)
+ myhardlock = hardlock_name(lockfilename)
+
+ # Since Python 3.4, chown requires int type (no proxies).
+ portage_gid = int(portage.data.portage_gid)
+
+ # myhardlock must not exist prior to our link() call, and we can
+ # safely unlink it since its file name is unique to our PID
+ try:
+ os.unlink(myhardlock)
+ except OSError as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ pass
+ else:
+ func_call = "unlink('%s')" % myhardlock
+ if e.errno == OperationNotPermitted.errno:
+ raise OperationNotPermitted(func_call)
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(func_call)
+ else:
+ raise
+
+ while True:
+ # create lockfilename if it doesn't exist yet
+ try:
+ myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR, 0o660)
+ except OSError as e:
+ func_call = "open('%s')" % lockfilename
+ if e.errno == OperationNotPermitted.errno:
+ raise OperationNotPermitted(func_call)
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(func_call)
+ else:
+ raise
+ else:
+ myfd_st = None
+ try:
+ myfd_st = os.fstat(myfd)
+ if not preexisting:
+ # Don't chown the file if it is preexisting, since we
+ # want to preserve existing permissions in that case.
+ if myfd_st.st_gid != portage_gid:
+ os.fchown(myfd, -1, portage_gid)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ writemsg("%s: fchown('%s', -1, %d)\n" % \
+ (e, lockfilename, portage_gid), noiselevel=-1)
+ writemsg(_("Cannot chown a lockfile: '%s'\n") % \
+ lockfilename, noiselevel=-1)
+ writemsg(_("Group IDs of current user: %s\n") % \
+ " ".join(str(n) for n in os.getgroups()),
+ noiselevel=-1)
+ else:
+ # another process has removed the file, so we'll have
+ # to create it again
+ continue
+ finally:
+ os.close(myfd)
+
+ # If fstat shows more than one hardlink, then it's extremely
+ # unlikely that the following link call will result in a lock,
+ # so optimize away the wasteful link call and sleep or raise
+ # TryAgain.
+ if myfd_st is not None and myfd_st.st_nlink < 2:
+ try:
+ os.link(lockfilename, myhardlock)
+ except OSError as e:
+ func_call = "link('%s', '%s')" % (lockfilename, myhardlock)
+ if e.errno == OperationNotPermitted.errno:
+ raise OperationNotPermitted(func_call)
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(func_call)
+ elif e.errno in (errno.ESTALE, errno.ENOENT):
+ # another process has removed the file, so we'll have
+ # to create it again
+ continue
+ else:
+ raise
+ else:
+ if hardlink_is_mine(myhardlock, lockfilename):
+ if out is not None:
+ out.eend(os.EX_OK)
+ break
+
+ try:
+ os.unlink(myhardlock)
+ except OSError as e:
+ # This should not happen, since the file name of
+ # myhardlock is unique to our host and PID,
+ # and the above link() call succeeded.
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ raise FileNotFound(myhardlock)
+
+ if flags & os.O_NONBLOCK:
+ raise TryAgain(lockfilename)
+
+ if out is None and not _quiet:
+ out = portage.output.EOutput()
+ if out is not None and not displayed_waiting_msg:
+ displayed_waiting_msg = True
+ if waiting_msg is None:
+ waiting_msg = _("waiting for lock on %s\n") % lockfilename
+ out.ebegin(waiting_msg)
+
+ time.sleep(_HARDLINK_POLL_LATENCY)
+
+ return True
+
+def unhardlink_lockfile(lockfilename, unlinkfile=True):
+ myhardlock = hardlock_name(lockfilename)
+ if unlinkfile and hardlink_is_mine(myhardlock, lockfilename):
+ # Make sure not to touch lockfilename unless we really have a lock.
+ try:
+ os.unlink(lockfilename)
+ except OSError:
+ pass
+ try:
+ os.unlink(myhardlock)
+ except OSError:
+ pass
+
+def hardlock_cleanup(path, remove_all_locks=False):
+ myhost = os.uname()[1]
+ mydl = os.listdir(path)
+
+ results = []
+ mycount = 0
+
+ mylist = {}
+ for x in mydl:
+ if os.path.isfile(path + "/" + x):
+ parts = x.split(".hardlock-")
+ if len(parts) == 2:
+ filename = parts[0][1:]
+ hostpid = parts[1].split("-")
+ host = "-".join(hostpid[:-1])
+ pid = hostpid[-1]
+
+ if filename not in mylist:
+ mylist[filename] = {}
+ if host not in mylist[filename]:
+ mylist[filename][host] = []
+ mylist[filename][host].append(pid)
+
+ mycount += 1
+
+
+ results.append(_("Found %(count)s locks") % {"count": mycount})
+
+ for x in mylist:
+ if myhost in mylist[x] or remove_all_locks:
+ mylockname = hardlock_name(path + "/" + x)
+ if hardlink_is_mine(mylockname, path + "/" + x) or \
+ not os.path.exists(path + "/" + x) or \
+ remove_all_locks:
+ for y in mylist[x]:
+ for z in mylist[x][y]:
+ filename = path + "/." + x + ".hardlock-" + y + "-" + z
+ if filename == mylockname:
+ continue
+ try:
+ # We're sweeping through, unlinking everyone's locks.
+ os.unlink(filename)
+ results.append(_("Unlinked: ") + filename)
+ except OSError:
+ pass
+ try:
+ os.unlink(path + "/" + x)
+ results.append(_("Unlinked: ") + path + "/" + x)
+ os.unlink(mylockname)
+ results.append(_("Unlinked: ") + mylockname)
+ except OSError:
+ pass
+ else:
+ try:
+ os.unlink(mylockname)
+ results.append(_("Unlinked: ") + mylockname)
+ except OSError:
+ pass
+
+ return results
+
diff --git a/usr/lib/portage/pym/portage/mail.py b/usr/lib/portage/pym/portage/mail.py
new file mode 100644
index 0000000..11923ee
--- /dev/null
+++ b/usr/lib/portage/pym/portage/mail.py
@@ -0,0 +1,177 @@
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Since python ebuilds remove the 'email' module when USE=build
+# is enabled, use a local import so that
+# portage.proxy.lazyimport._preload_portage_submodules()
+# can load this module even though the 'email' module is missing.
+# The elog mail modules won't work, but at least an ImportError
+# won't cause portage to crash during stage builds. Since the
+# 'smtlib' module imports the 'email' module, that's imported
+# locally as well.
+
+import socket
+import sys
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode, _unicode_encode
+from portage.localization import _
+import portage
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+ def _force_ascii_if_necessary(s):
+ # Force ascii encoding in order to avoid UnicodeEncodeError
+ # from smtplib.sendmail with python3 (bug #291331).
+ s = _unicode_encode(s,
+ encoding='ascii', errors='backslashreplace')
+ s = _unicode_decode(s,
+ encoding='ascii', errors='replace')
+ return s
+
+else:
+
+ def _force_ascii_if_necessary(s):
+ return s
+
+def TextMessage(_text):
+ from email.mime.text import MIMEText
+ mimetext = MIMEText(_text)
+ mimetext.set_charset("UTF-8")
+ return mimetext
+
+def create_message(sender, recipient, subject, body, attachments=None):
+
+ from email.header import Header
+ from email.mime.base import MIMEBase as BaseMessage
+ from email.mime.multipart import MIMEMultipart as MultipartMessage
+ from email.utils import formatdate
+
+ if sys.hexversion < 0x3000000:
+ sender = _unicode_encode(sender,
+ encoding=_encodings['content'], errors='strict')
+ recipient = _unicode_encode(recipient,
+ encoding=_encodings['content'], errors='strict')
+ subject = _unicode_encode(subject,
+ encoding=_encodings['content'], errors='backslashreplace')
+ body = _unicode_encode(body,
+ encoding=_encodings['content'], errors='backslashreplace')
+
+ if attachments == None:
+ mymessage = TextMessage(body)
+ else:
+ mymessage = MultipartMessage()
+ mymessage.attach(TextMessage(body))
+ for x in attachments:
+ if isinstance(x, BaseMessage):
+ mymessage.attach(x)
+ elif isinstance(x, basestring):
+ if sys.hexversion < 0x3000000:
+ x = _unicode_encode(x,
+ encoding=_encodings['content'],
+ errors='backslashreplace')
+ mymessage.attach(TextMessage(x))
+ else:
+ raise portage.exception.PortageException(_("Can't handle type of attachment: %s") % type(x))
+
+ mymessage.set_unixfrom(sender)
+ mymessage["To"] = recipient
+ mymessage["From"] = sender
+
+ # Use Header as a workaround so that long subject lines are wrapped
+ # correctly by <=python-2.6 (gentoo bug #263370, python issue #1974).
+ # Also, need to force ascii for python3, in order to avoid
+ # UnicodeEncodeError with non-ascii characters:
+ # File "/usr/lib/python3.1/email/header.py", line 189, in __init__
+ # self.append(s, charset, errors)
+ # File "/usr/lib/python3.1/email/header.py", line 262, in append
+ # input_bytes = s.encode(input_charset, errors)
+ #UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-9: ordinal not in range(128)
+ mymessage["Subject"] = Header(_force_ascii_if_necessary(subject))
+ mymessage["Date"] = formatdate(localtime=True)
+
+ return mymessage
+
+def send_mail(mysettings, message):
+
+ import smtplib
+
+ mymailhost = "localhost"
+ mymailport = 25
+ mymailuser = ""
+ mymailpasswd = ""
+ myrecipient = "root@localhost"
+
+ # Syntax for PORTAGE_ELOG_MAILURI (if defined):
+ # address [[user:passwd@]mailserver[:port]]
+ # where address: recipient address
+ # user: username for smtp auth (defaults to none)
+ # passwd: password for smtp auth (defaults to none)
+ # mailserver: smtp server that should be used to deliver the mail (defaults to localhost)
+ # alternatively this can also be the absolute path to a sendmail binary if you don't want to use smtp
+ # port: port to use on the given smtp server (defaults to 25, values > 100000 indicate that starttls should be used on (port-100000))
+ if " " in mysettings.get("PORTAGE_ELOG_MAILURI", ""):
+ myrecipient, mymailuri = mysettings["PORTAGE_ELOG_MAILURI"].split()
+ if "@" in mymailuri:
+ myauthdata, myconndata = mymailuri.rsplit("@", 1)
+ try:
+ mymailuser, mymailpasswd = myauthdata.split(":")
+ except ValueError:
+ print(_("!!! invalid SMTP AUTH configuration, trying unauthenticated ..."))
+ else:
+ myconndata = mymailuri
+ if ":" in myconndata:
+ mymailhost, mymailport = myconndata.split(":")
+ else:
+ mymailhost = myconndata
+ else:
+ myrecipient = mysettings.get("PORTAGE_ELOG_MAILURI", "")
+
+ myfrom = message.get("From")
+
+ if sys.hexversion < 0x3000000:
+ myrecipient = _unicode_encode(myrecipient,
+ encoding=_encodings['content'], errors='strict')
+ mymailhost = _unicode_encode(mymailhost,
+ encoding=_encodings['content'], errors='strict')
+ mymailport = _unicode_encode(mymailport,
+ encoding=_encodings['content'], errors='strict')
+ myfrom = _unicode_encode(myfrom,
+ encoding=_encodings['content'], errors='strict')
+ mymailuser = _unicode_encode(mymailuser,
+ encoding=_encodings['content'], errors='strict')
+ mymailpasswd = _unicode_encode(mymailpasswd,
+ encoding=_encodings['content'], errors='strict')
+
+ # user wants to use a sendmail binary instead of smtp
+ if mymailhost[0] == os.sep and os.path.exists(mymailhost):
+ fd = os.popen(mymailhost+" -f "+myfrom+" "+myrecipient, "w")
+ fd.write(_force_ascii_if_necessary(message.as_string()))
+ if fd.close() != None:
+ sys.stderr.write(_("!!! %s returned with a non-zero exit code. This generally indicates an error.\n") % mymailhost)
+ else:
+ try:
+ if int(mymailport) > 100000:
+ myconn = smtplib.SMTP(mymailhost, int(mymailport) - 100000)
+ myconn.ehlo()
+ if not myconn.has_extn("STARTTLS"):
+ raise portage.exception.PortageException(_("!!! TLS support requested for logmail but not supported by server"))
+ myconn.starttls()
+ myconn.ehlo()
+ else:
+ myconn = smtplib.SMTP(mymailhost, mymailport)
+ if mymailuser != "" and mymailpasswd != "":
+ myconn.login(mymailuser, mymailpasswd)
+
+ message_str = _force_ascii_if_necessary(message.as_string())
+ myconn.sendmail(myfrom, myrecipient, message_str)
+ myconn.quit()
+ except smtplib.SMTPException as e:
+ raise portage.exception.PortageException(_("!!! An error occurred while trying to send logmail:\n")+str(e))
+ except socket.error as e:
+ raise portage.exception.PortageException(_("!!! A network error occurred while trying to send logmail:\n%s\nSure you configured PORTAGE_ELOG_MAILURI correctly?") % str(e))
+ return
+
diff --git a/usr/lib/portage/pym/portage/manifest.py b/usr/lib/portage/pym/portage/manifest.py
new file mode 100644
index 0000000..3936b9a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/manifest.py
@@ -0,0 +1,650 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import errno
+import io
+import re
+import sys
+import warnings
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.checksum:hashfunc_map,perform_multiple_checksums,' + \
+ 'verify_all,_apply_hash_filter,_filter_unaccelarated_hashes',
+ 'portage.repository.config:_find_invalid_path_char',
+ 'portage.util:write_atomic',
+)
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.exception import DigestException, FileNotFound, \
+ InvalidDataType, MissingParameter, PermissionDenied, \
+ PortageException, PortagePackageException
+from portage.const import (MANIFEST1_HASH_FUNCTIONS, MANIFEST2_HASH_DEFAULTS,
+ MANIFEST2_HASH_FUNCTIONS, MANIFEST2_IDENTIFIERS, MANIFEST2_REQUIRED_HASH)
+from portage.localization import _
+
+_manifest_re = re.compile(
+ r'^(' + '|'.join(MANIFEST2_IDENTIFIERS) + r') (.*)( \d+( \S+ \S+)+)$',
+ re.UNICODE)
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ _unicode = str
+ basestring = str
+else:
+ _unicode = unicode
+
+class FileNotInManifestException(PortageException):
+ pass
+
+def manifest2AuxfileFilter(filename):
+ filename = filename.strip(os.sep)
+ mysplit = filename.split(os.path.sep)
+ if "CVS" in mysplit:
+ return False
+ for x in mysplit:
+ if x[:1] == '.':
+ return False
+ return not filename[:7] == 'digest-'
+
+def manifest2MiscfileFilter(filename):
+ return not (filename == "Manifest" or filename.endswith(".ebuild"))
+
+def guessManifestFileType(filename):
+ """ Perform a best effort guess of which type the given filename is, avoid using this if possible """
+ if filename.startswith("files" + os.sep + "digest-"):
+ return None
+ if filename.startswith("files" + os.sep):
+ return "AUX"
+ elif filename.endswith(".ebuild"):
+ return "EBUILD"
+ elif filename in ["ChangeLog", "metadata.xml"]:
+ return "MISC"
+ else:
+ return "DIST"
+
+def guessThinManifestFileType(filename):
+ type = guessManifestFileType(filename)
+ if type != "DIST":
+ return None
+ return "DIST"
+
+def parseManifest2(line):
+ if not isinstance(line, basestring):
+ line = ' '.join(line)
+ myentry = None
+ match = _manifest_re.match(line)
+ if match is not None:
+ tokens = match.group(3).split()
+ hashes = dict(zip(tokens[1::2], tokens[2::2]))
+ hashes["size"] = int(tokens[0])
+ myentry = Manifest2Entry(type=match.group(1),
+ name=match.group(2), hashes=hashes)
+ return myentry
+
+class ManifestEntry(object):
+ __slots__ = ("type", "name", "hashes")
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+class Manifest2Entry(ManifestEntry):
+ def __str__(self):
+ myline = " ".join([self.type, self.name, str(self.hashes["size"])])
+ myhashkeys = list(self.hashes)
+ myhashkeys.remove("size")
+ myhashkeys.sort()
+ for h in myhashkeys:
+ myline += " " + h + " " + str(self.hashes[h])
+ return myline
+
+ def __eq__(self, other):
+ if not isinstance(other, Manifest2Entry) or \
+ self.type != other.type or \
+ self.name != other.name or \
+ self.hashes != other.hashes:
+ return False
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['repo.content'], errors='strict')
+
+class Manifest(object):
+ parsers = (parseManifest2,)
+ def __init__(self, pkgdir, distdir=None, fetchlist_dict=None,
+ manifest1_compat=DeprecationWarning, from_scratch=False, thin=False,
+ allow_missing=False, allow_create=True, hashes=None,
+ find_invalid_path_char=None):
+ """ Create new Manifest instance for package in pkgdir.
+ Do not parse Manifest file if from_scratch == True (only for internal use)
+ The fetchlist_dict parameter is required only for generation of
+ a Manifest (not needed for parsing and checking sums).
+ If thin is specified, then the manifest carries only info for
+ distfiles."""
+
+ if manifest1_compat is not DeprecationWarning:
+ warnings.warn("The manifest1_compat parameter of the "
+ "portage.manifest.Manifest constructor is deprecated.",
+ DeprecationWarning, stacklevel=2)
+
+ if find_invalid_path_char is None:
+ find_invalid_path_char = _find_invalid_path_char
+ self._find_invalid_path_char = find_invalid_path_char
+ self.pkgdir = _unicode_decode(pkgdir).rstrip(os.sep) + os.sep
+ self.fhashdict = {}
+ self.hashes = set()
+
+ if hashes is None:
+ hashes = MANIFEST2_HASH_DEFAULTS
+
+ self.hashes.update(hashes.intersection(MANIFEST2_HASH_FUNCTIONS))
+ self.hashes.difference_update(hashname for hashname in \
+ list(self.hashes) if hashname not in hashfunc_map)
+ self.hashes.add("size")
+ self.hashes.add(MANIFEST2_REQUIRED_HASH)
+ for t in MANIFEST2_IDENTIFIERS:
+ self.fhashdict[t] = {}
+ if not from_scratch:
+ self._read()
+ if fetchlist_dict != None:
+ self.fetchlist_dict = fetchlist_dict
+ else:
+ self.fetchlist_dict = {}
+ self.distdir = distdir
+ self.thin = thin
+ if thin:
+ self.guessType = guessThinManifestFileType
+ else:
+ self.guessType = guessManifestFileType
+ self.allow_missing = allow_missing
+ self.allow_create = allow_create
+
+ def getFullname(self):
+ """ Returns the absolute path to the Manifest file for this instance """
+ return os.path.join(self.pkgdir, "Manifest")
+
+ def getDigests(self):
+ """ Compability function for old digest/manifest code, returns dict of filename:{hashfunction:hashvalue} """
+ rval = {}
+ for t in MANIFEST2_IDENTIFIERS:
+ rval.update(self.fhashdict[t])
+ return rval
+
+ def getTypeDigests(self, ftype):
+ """ Similar to getDigests(), but restricted to files of the given type. """
+ return self.fhashdict[ftype]
+
+ def _readManifest(self, file_path, myhashdict=None, **kwargs):
+ """Parse a manifest. If myhashdict is given then data will be added too it.
+ Otherwise, a new dict will be created and returned."""
+ try:
+ with io.open(_unicode_encode(file_path,
+ encoding=_encodings['fs'], errors='strict'), mode='r',
+ encoding=_encodings['repo.content'], errors='replace') as f:
+ if myhashdict is None:
+ myhashdict = {}
+ self._parseDigests(f, myhashdict=myhashdict, **kwargs)
+ return myhashdict
+ except (OSError, IOError) as e:
+ if e.errno == errno.ENOENT:
+ raise FileNotFound(file_path)
+ else:
+ raise
+
+ def _read(self):
+ """ Parse Manifest file for this instance """
+ try:
+ self._readManifest(self.getFullname(), myhashdict=self.fhashdict)
+ except FileNotFound:
+ pass
+
+ def _parseManifestLines(self, mylines):
+ """Parse manifest lines and return a list of manifest entries."""
+ for myline in mylines:
+ myentry = None
+ for parser in self.parsers:
+ myentry = parser(myline)
+ if myentry is not None:
+ yield myentry
+ break # go to the next line
+
+ def _parseDigests(self, mylines, myhashdict=None, mytype=None):
+ """Parse manifest entries and store the data in myhashdict. If mytype
+ is specified, it will override the type for all parsed entries."""
+ if myhashdict is None:
+ myhashdict = {}
+ for myentry in self._parseManifestLines(mylines):
+ if mytype is None:
+ myentry_type = myentry.type
+ else:
+ myentry_type = mytype
+ myhashdict.setdefault(myentry_type, {})
+ myhashdict[myentry_type].setdefault(myentry.name, {})
+ myhashdict[myentry_type][myentry.name].update(myentry.hashes)
+ return myhashdict
+
+ def _getDigestData(self, distlist):
+ """create a hash dict for a specific list of files"""
+ myhashdict = {}
+ for myname in distlist:
+ for mytype in self.fhashdict:
+ if myname in self.fhashdict[mytype]:
+ myhashdict.setdefault(mytype, {})
+ myhashdict[mytype].setdefault(myname, {})
+ myhashdict[mytype][myname].update(self.fhashdict[mytype][myname])
+ return myhashdict
+
+ def _createManifestEntries(self):
+ valid_hashes = set(MANIFEST2_HASH_FUNCTIONS)
+ valid_hashes.add('size')
+ mytypes = list(self.fhashdict)
+ mytypes.sort()
+ for t in mytypes:
+ myfiles = list(self.fhashdict[t])
+ myfiles.sort()
+ for f in myfiles:
+ myentry = Manifest2Entry(
+ type=t, name=f, hashes=self.fhashdict[t][f].copy())
+ for h in list(myentry.hashes):
+ if h not in valid_hashes:
+ del myentry.hashes[h]
+ yield myentry
+
+ def checkIntegrity(self):
+ for t in self.fhashdict:
+ for f in self.fhashdict[t]:
+ if MANIFEST2_REQUIRED_HASH not in self.fhashdict[t][f]:
+ raise MissingParameter(_("Missing %s checksum: %s %s") %
+ (MANIFEST2_REQUIRED_HASH, t, f))
+
+ def write(self, sign=False, force=False):
+ """ Write Manifest instance to disk, optionally signing it. Returns
+ True if the Manifest is actually written, and False if the write
+ is skipped due to existing Manifest being identical."""
+ rval = False
+ if not self.allow_create:
+ return rval
+ self.checkIntegrity()
+ try:
+ myentries = list(self._createManifestEntries())
+ update_manifest = True
+ if myentries and not force:
+ try:
+ f = io.open(_unicode_encode(self.getFullname(),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ oldentries = list(self._parseManifestLines(f))
+ f.close()
+ if len(oldentries) == len(myentries):
+ update_manifest = False
+ for i in range(len(oldentries)):
+ if oldentries[i] != myentries[i]:
+ update_manifest = True
+ break
+ except (IOError, OSError) as e:
+ if e.errno == errno.ENOENT:
+ pass
+ else:
+ raise
+
+ if update_manifest:
+ if myentries or not (self.thin or self.allow_missing):
+ # If myentries is empty, don't write an empty manifest
+ # when thin or allow_missing is enabled. Except for
+ # thin manifests with no DIST entries, myentries is
+ # non-empty for all currently known use cases.
+ write_atomic(self.getFullname(), "".join("%s\n" %
+ _unicode(myentry) for myentry in myentries))
+ rval = True
+ else:
+ # With thin manifest, there's no need to have
+ # a Manifest file if there are no DIST entries.
+ try:
+ os.unlink(self.getFullname())
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ rval = True
+
+ if sign:
+ self.sign()
+ except (IOError, OSError) as e:
+ if e.errno == errno.EACCES:
+ raise PermissionDenied(str(e))
+ raise
+ return rval
+
+ def sign(self):
+ """ Sign the Manifest """
+ raise NotImplementedError()
+
+ def validateSignature(self):
+ """ Validate signature on Manifest """
+ raise NotImplementedError()
+
+ def addFile(self, ftype, fname, hashdict=None, ignoreMissing=False):
+ """ Add entry to Manifest optionally using hashdict to avoid recalculation of hashes """
+ if ftype == "AUX" and not fname.startswith("files/"):
+ fname = os.path.join("files", fname)
+ if not os.path.exists(self.pkgdir+fname) and not ignoreMissing:
+ raise FileNotFound(fname)
+ if not ftype in MANIFEST2_IDENTIFIERS:
+ raise InvalidDataType(ftype)
+ if ftype == "AUX" and fname.startswith("files"):
+ fname = fname[6:]
+ self.fhashdict[ftype][fname] = {}
+ if hashdict != None:
+ self.fhashdict[ftype][fname].update(hashdict)
+ if not MANIFEST2_REQUIRED_HASH in self.fhashdict[ftype][fname]:
+ self.updateFileHashes(ftype, fname, checkExisting=False, ignoreMissing=ignoreMissing)
+
+ def removeFile(self, ftype, fname):
+ """ Remove given entry from Manifest """
+ del self.fhashdict[ftype][fname]
+
+ def hasFile(self, ftype, fname):
+ """ Return whether the Manifest contains an entry for the given type,filename pair """
+ return (fname in self.fhashdict[ftype])
+
+ def findFile(self, fname):
+ """ Return entrytype of the given file if present in Manifest or None if not present """
+ for t in MANIFEST2_IDENTIFIERS:
+ if fname in self.fhashdict[t]:
+ return t
+ return None
+
+ def create(self, checkExisting=False, assumeDistHashesSometimes=False,
+ assumeDistHashesAlways=False, requiredDistfiles=[]):
+ """ Recreate this Manifest from scratch. This will not use any
+ existing checksums unless assumeDistHashesSometimes or
+ assumeDistHashesAlways is true (assumeDistHashesSometimes will only
+ cause DIST checksums to be reused if the file doesn't exist in
+ DISTDIR). The requiredDistfiles parameter specifies a list of
+ distfiles to raise a FileNotFound exception for (if no file or existing
+ checksums are available), and defaults to all distfiles when not
+ specified."""
+ if not self.allow_create:
+ return
+ if checkExisting:
+ self.checkAllHashes()
+ if assumeDistHashesSometimes or assumeDistHashesAlways:
+ distfilehashes = self.fhashdict["DIST"]
+ else:
+ distfilehashes = {}
+ self.__init__(self.pkgdir, distdir=self.distdir,
+ fetchlist_dict=self.fetchlist_dict, from_scratch=True,
+ thin=self.thin, allow_missing=self.allow_missing,
+ allow_create=self.allow_create, hashes=self.hashes,
+ find_invalid_path_char=self._find_invalid_path_char)
+ pn = os.path.basename(self.pkgdir.rstrip(os.path.sep))
+ cat = self._pkgdir_category()
+
+ pkgdir = self.pkgdir
+ if self.thin:
+ cpvlist = self._update_thin_pkgdir(cat, pn, pkgdir)
+ else:
+ cpvlist = self._update_thick_pkgdir(cat, pn, pkgdir)
+
+ distlist = set()
+ for cpv in cpvlist:
+ distlist.update(self._getCpvDistfiles(cpv))
+
+ if requiredDistfiles is None:
+ # This allows us to force removal of stale digests for the
+ # ebuild --force digest option (no distfiles are required).
+ requiredDistfiles = set()
+ elif len(requiredDistfiles) == 0:
+ # repoman passes in an empty list, which implies that all distfiles
+ # are required.
+ requiredDistfiles = distlist.copy()
+ required_hash_types = set()
+ required_hash_types.add("size")
+ required_hash_types.add(MANIFEST2_REQUIRED_HASH)
+ for f in distlist:
+ fname = os.path.join(self.distdir, f)
+ mystat = None
+ try:
+ mystat = os.stat(fname)
+ except OSError:
+ pass
+ if f in distfilehashes and \
+ not required_hash_types.difference(distfilehashes[f]) and \
+ ((assumeDistHashesSometimes and mystat is None) or \
+ (assumeDistHashesAlways and mystat is None) or \
+ (assumeDistHashesAlways and mystat is not None and \
+ set(distfilehashes[f]) == set(self.hashes) and \
+ distfilehashes[f]["size"] == mystat.st_size)):
+ self.fhashdict["DIST"][f] = distfilehashes[f]
+ else:
+ try:
+ self.fhashdict["DIST"][f] = perform_multiple_checksums(fname, self.hashes)
+ except FileNotFound:
+ if f in requiredDistfiles:
+ raise
+
+ def _is_cpv(self, cat, pn, filename):
+ if not filename.endswith(".ebuild"):
+ return None
+ pf = filename[:-7]
+ ps = portage.versions._pkgsplit(pf)
+ cpv = "%s/%s" % (cat, pf)
+ if not ps:
+ raise PortagePackageException(
+ _("Invalid package name: '%s'") % cpv)
+ if ps[0] != pn:
+ raise PortagePackageException(
+ _("Package name does not "
+ "match directory name: '%s'") % cpv)
+ return cpv
+
+ def _update_thin_pkgdir(self, cat, pn, pkgdir):
+ for pkgdir, pkgdir_dirs, pkgdir_files in os.walk(pkgdir):
+ break
+ cpvlist = []
+ for f in pkgdir_files:
+ try:
+ f = _unicode_decode(f,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if f[:1] == '.':
+ continue
+ pf = self._is_cpv(cat, pn, f)
+ if pf is not None:
+ cpvlist.append(pf)
+ return cpvlist
+
+ def _update_thick_pkgdir(self, cat, pn, pkgdir):
+ cpvlist = []
+ for pkgdir, pkgdir_dirs, pkgdir_files in os.walk(pkgdir):
+ break
+ for f in pkgdir_files:
+ try:
+ f = _unicode_decode(f,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if f[:1] == ".":
+ continue
+ pf = self._is_cpv(cat, pn, f)
+ if pf is not None:
+ mytype = "EBUILD"
+ cpvlist.append(pf)
+ elif self._find_invalid_path_char(f) == -1 and \
+ manifest2MiscfileFilter(f):
+ mytype = "MISC"
+ else:
+ continue
+ self.fhashdict[mytype][f] = perform_multiple_checksums(self.pkgdir+f, self.hashes)
+ recursive_files = []
+
+ pkgdir = self.pkgdir
+ cut_len = len(os.path.join(pkgdir, "files") + os.sep)
+ for parentdir, dirs, files in os.walk(os.path.join(pkgdir, "files")):
+ for f in files:
+ try:
+ f = _unicode_decode(f,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ full_path = os.path.join(parentdir, f)
+ recursive_files.append(full_path[cut_len:])
+ for f in recursive_files:
+ if self._find_invalid_path_char(f) != -1 or \
+ not manifest2AuxfileFilter(f):
+ continue
+ self.fhashdict["AUX"][f] = perform_multiple_checksums(
+ os.path.join(self.pkgdir, "files", f.lstrip(os.sep)), self.hashes)
+ return cpvlist
+
+ def _pkgdir_category(self):
+ return self.pkgdir.rstrip(os.sep).split(os.sep)[-2]
+
+ def _getAbsname(self, ftype, fname):
+ if ftype == "DIST":
+ absname = os.path.join(self.distdir, fname)
+ elif ftype == "AUX":
+ absname = os.path.join(self.pkgdir, "files", fname)
+ else:
+ absname = os.path.join(self.pkgdir, fname)
+ return absname
+
+ def checkAllHashes(self, ignoreMissingFiles=False):
+ for t in MANIFEST2_IDENTIFIERS:
+ self.checkTypeHashes(t, ignoreMissingFiles=ignoreMissingFiles)
+
+ def checkTypeHashes(self, idtype, ignoreMissingFiles=False, hash_filter=None):
+ for f in self.fhashdict[idtype]:
+ self.checkFileHashes(idtype, f, ignoreMissing=ignoreMissingFiles,
+ hash_filter=hash_filter)
+
+ def checkFileHashes(self, ftype, fname, ignoreMissing=False, hash_filter=None):
+ digests = _filter_unaccelarated_hashes(self.fhashdict[ftype][fname])
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
+ try:
+ ok, reason = verify_all(self._getAbsname(ftype, fname), digests)
+ if not ok:
+ raise DigestException(tuple([self._getAbsname(ftype, fname)]+list(reason)))
+ return ok, reason
+ except FileNotFound as e:
+ if not ignoreMissing:
+ raise
+ return False, _("File Not Found: '%s'") % str(e)
+
+ def checkCpvHashes(self, cpv, checkDistfiles=True, onlyDistfiles=False, checkMiscfiles=False):
+ """ check the hashes for all files associated to the given cpv, include all
+ AUX files and optionally all MISC files. """
+ if not onlyDistfiles:
+ self.checkTypeHashes("AUX", ignoreMissingFiles=False)
+ if checkMiscfiles:
+ self.checkTypeHashes("MISC", ignoreMissingFiles=False)
+ ebuildname = "%s.ebuild" % self._catsplit(cpv)[1]
+ self.checkFileHashes("EBUILD", ebuildname, ignoreMissing=False)
+ if checkDistfiles or onlyDistfiles:
+ for f in self._getCpvDistfiles(cpv):
+ self.checkFileHashes("DIST", f, ignoreMissing=False)
+
+ def _getCpvDistfiles(self, cpv):
+ """ Get a list of all DIST files associated to the given cpv """
+ return self.fetchlist_dict[cpv]
+
+ def getDistfilesSize(self, fetchlist):
+ total_bytes = 0
+ for f in fetchlist:
+ total_bytes += int(self.fhashdict["DIST"][f]["size"])
+ return total_bytes
+
+ def updateFileHashes(self, ftype, fname, checkExisting=True, ignoreMissing=True, reuseExisting=False):
+ """ Regenerate hashes for the given file """
+ if checkExisting:
+ self.checkFileHashes(ftype, fname, ignoreMissing=ignoreMissing)
+ if not ignoreMissing and fname not in self.fhashdict[ftype]:
+ raise FileNotInManifestException(fname)
+ if fname not in self.fhashdict[ftype]:
+ self.fhashdict[ftype][fname] = {}
+ myhashkeys = list(self.hashes)
+ if reuseExisting:
+ for k in [h for h in self.fhashdict[ftype][fname] if h in myhashkeys]:
+ myhashkeys.remove(k)
+ myhashes = perform_multiple_checksums(self._getAbsname(ftype, fname), myhashkeys)
+ self.fhashdict[ftype][fname].update(myhashes)
+
+ def updateTypeHashes(self, idtype, checkExisting=False, ignoreMissingFiles=True):
+ """ Regenerate all hashes for all files of the given type """
+ for fname in self.fhashdict[idtype]:
+ self.updateFileHashes(idtype, fname, checkExisting)
+
+ def updateAllHashes(self, checkExisting=False, ignoreMissingFiles=True):
+ """ Regenerate all hashes for all files in this Manifest. """
+ for idtype in MANIFEST2_IDENTIFIERS:
+ self.updateTypeHashes(idtype, checkExisting=checkExisting,
+ ignoreMissingFiles=ignoreMissingFiles)
+
+ def updateCpvHashes(self, cpv, ignoreMissingFiles=True):
+ """ Regenerate all hashes associated to the given cpv (includes all AUX and MISC
+ files)."""
+ self.updateTypeHashes("AUX", ignoreMissingFiles=ignoreMissingFiles)
+ self.updateTypeHashes("MISC", ignoreMissingFiles=ignoreMissingFiles)
+ ebuildname = "%s.ebuild" % self._catsplit(cpv)[1]
+ self.updateFileHashes("EBUILD", ebuildname, ignoreMissingFiles=ignoreMissingFiles)
+ for f in self._getCpvDistfiles(cpv):
+ self.updateFileHashes("DIST", f, ignoreMissingFiles=ignoreMissingFiles)
+
+ def updateHashesGuessType(self, fname, *args, **kwargs):
+ """ Regenerate hashes for the given file (guesses the type and then
+ calls updateFileHashes)."""
+ mytype = self.guessType(fname)
+ if mytype == "AUX":
+ fname = fname[len("files" + os.sep):]
+ elif mytype is None:
+ return
+ myrealtype = self.findFile(fname)
+ if myrealtype is not None:
+ mytype = myrealtype
+ return self.updateFileHashes(mytype, fname, *args, **kwargs)
+
+ def getFileData(self, ftype, fname, key):
+ """ Return the value of a specific (type,filename,key) triple, mainly useful
+ to get the size for distfiles."""
+ return self.fhashdict[ftype][fname][key]
+
+ def getVersions(self):
+ """ Returns a list of manifest versions present in the manifest file. """
+ rVal = []
+ mfname = self.getFullname()
+ if not os.path.exists(mfname):
+ return rVal
+ myfile = io.open(_unicode_encode(mfname,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace')
+ lines = myfile.readlines()
+ myfile.close()
+ for l in lines:
+ mysplit = l.split()
+ if len(mysplit) == 4 and mysplit[0] in MANIFEST1_HASH_FUNCTIONS \
+ and 1 not in rVal:
+ rVal.append(1)
+ elif len(mysplit) > 4 and mysplit[0] in MANIFEST2_IDENTIFIERS \
+ and ((len(mysplit) - 3) % 2) == 0 and not 2 in rVal:
+ rVal.append(2)
+ return rVal
+
+ def _catsplit(self, pkg_key):
+ """Split a category and package, returning a list of [cat, pkg].
+ This is compatible with portage.catsplit()"""
+ return pkg_key.split("/", 1)
diff --git a/usr/lib/portage/pym/portage/news.py b/usr/lib/portage/pym/portage/news.py
new file mode 100644
index 0000000..0d72b00
--- /dev/null
+++ b/usr/lib/portage/pym/portage/news.py
@@ -0,0 +1,424 @@
+# portage: news management code
+# Copyright 2006-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function, unicode_literals
+
+__all__ = ["NewsManager", "NewsItem", "DisplayRestriction",
+ "DisplayProfileRestriction", "DisplayKeywordRestriction",
+ "DisplayInstalledRestriction",
+ "count_unread_news", "display_news_notifications"]
+
+import io
+import logging
+import os as _os
+import re
+import portage
+from portage import OrderedDict
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.const import NEWS_LIB_PATH
+from portage.util import apply_secpass_permissions, ensure_dirs, \
+ grabfile, normalize_path, write_atomic, writemsg_level
+from portage.data import portage_gid
+from portage.dep import isvalidatom
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage.output import colorize
+from portage.exception import InvalidLocation, OperationNotPermitted, \
+ PermissionDenied
+
+class NewsManager(object):
+ """
+ This object manages GLEP 42 style news items. It will cache news items
+ that have previously shown up and notify users when there are relevant news
+ items that apply to their packages that the user has not previously read.
+
+ Creating a news manager requires:
+ root - typically ${ROOT} see man make.conf and man emerge for details
+ news_path - path to news items; usually $REPODIR/metadata/news
+ unread_path - path to the news.repoid.unread file; this helps us track news items
+
+ """
+
+ def __init__(self, portdb, vardb, news_path, unread_path, language_id='en'):
+ self.news_path = news_path
+ self.unread_path = unread_path
+ self.language_id = language_id
+ self.config = vardb.settings
+ self.vdb = vardb
+ self.portdb = portdb
+
+ # GLEP 42 says:
+ # All news item related files should be root owned and in the
+ # portage group with the group write (and, for directories,
+ # execute) bits set. News files should be world readable.
+ self._uid = int(self.config["PORTAGE_INST_UID"])
+ self._gid = portage_gid
+ self._file_mode = 0o0064
+ self._dir_mode = 0o0074
+ self._mode_mask = 0o0000
+
+ portdir = portdb.repositories.mainRepoLocation()
+ profiles_base = None
+ if portdir is not None:
+ profiles_base = os.path.join(portdir, 'profiles') + os.path.sep
+ profile_path = None
+ if profiles_base is not None and portdb.settings.profile_path:
+ profile_path = normalize_path(
+ os.path.realpath(portdb.settings.profile_path))
+ if profile_path.startswith(profiles_base):
+ profile_path = profile_path[len(profiles_base):]
+ self._profile_path = profile_path
+
+ def _unread_filename(self, repoid):
+ return os.path.join(self.unread_path, 'news-%s.unread' % repoid)
+
+ def _skip_filename(self, repoid):
+ return os.path.join(self.unread_path, 'news-%s.skip' % repoid)
+
+ def _news_dir(self, repoid):
+ repo_path = self.portdb.getRepositoryPath(repoid)
+ if repo_path is None:
+ raise AssertionError(_("Invalid repoID: %s") % repoid)
+ return os.path.join(repo_path, self.news_path)
+
+ def updateItems(self, repoid):
+ """
+ Figure out which news items from NEWS_PATH are both unread and relevant to
+ the user (according to the GLEP 42 standards of relevancy). Then add these
+ items into the news.repoid.unread file.
+ """
+
+ # Ensure that the unread path exists and is writable.
+
+ try:
+ ensure_dirs(self.unread_path, uid=self._uid, gid=self._gid,
+ mode=self._dir_mode, mask=self._mode_mask)
+ except (OperationNotPermitted, PermissionDenied):
+ return
+
+ if not os.access(self.unread_path, os.W_OK):
+ return
+
+ news_dir = self._news_dir(repoid)
+ try:
+ news = _os.listdir(_unicode_encode(news_dir,
+ encoding=_encodings['fs'], errors='strict'))
+ except OSError:
+ return
+
+ skip_filename = self._skip_filename(repoid)
+ unread_filename = self._unread_filename(repoid)
+ unread_lock = lockfile(unread_filename, wantnewlockfile=1)
+ try:
+ try:
+ unread = set(grabfile(unread_filename))
+ unread_orig = unread.copy()
+ skip = set(grabfile(skip_filename))
+ skip_orig = skip.copy()
+ except PermissionDenied:
+ return
+
+ for itemid in news:
+ try:
+ itemid = _unicode_decode(itemid,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ itemid = _unicode_decode(itemid,
+ encoding=_encodings['fs'], errors='replace')
+ writemsg_level(
+ _("!!! Invalid encoding in news item name: '%s'\n") % \
+ itemid, level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if itemid in skip:
+ continue
+ filename = os.path.join(news_dir, itemid,
+ itemid + "." + self.language_id + ".txt")
+ if not os.path.isfile(filename):
+ continue
+ item = NewsItem(filename, itemid)
+ if not item.isValid():
+ continue
+ if item.isRelevant(profile=self._profile_path,
+ config=self.config, vardb=self.vdb):
+ unread.add(item.name)
+ skip.add(item.name)
+
+ if unread != unread_orig:
+ write_atomic(unread_filename,
+ "".join("%s\n" % x for x in sorted(unread)))
+ apply_secpass_permissions(unread_filename,
+ uid=self._uid, gid=self._gid,
+ mode=self._file_mode, mask=self._mode_mask)
+
+ if skip != skip_orig:
+ write_atomic(skip_filename,
+ "".join("%s\n" % x for x in sorted(skip)))
+ apply_secpass_permissions(skip_filename,
+ uid=self._uid, gid=self._gid,
+ mode=self._file_mode, mask=self._mode_mask)
+
+ finally:
+ unlockfile(unread_lock)
+
+ def getUnreadItems(self, repoid, update=False):
+ """
+ Determine if there are unread relevant items in news.repoid.unread.
+ If there are unread items return their number.
+ If update is specified, updateNewsItems( repoid ) will be called to
+ check for new items.
+ """
+
+ if update:
+ self.updateItems(repoid)
+
+ unread_filename = self._unread_filename(repoid)
+ unread_lock = None
+ try:
+ unread_lock = lockfile(unread_filename, wantnewlockfile=1)
+ except (InvalidLocation, OperationNotPermitted, PermissionDenied):
+ pass
+ try:
+ try:
+ return len(grabfile(unread_filename))
+ except PermissionDenied:
+ return 0
+ finally:
+ if unread_lock:
+ unlockfile(unread_lock)
+
+_formatRE = re.compile("News-Item-Format:\s*([^\s]*)\s*$")
+_installedRE = re.compile("Display-If-Installed:(.*)\n")
+_profileRE = re.compile("Display-If-Profile:(.*)\n")
+_keywordRE = re.compile("Display-If-Keyword:(.*)\n")
+
+class NewsItem(object):
+ """
+ This class encapsulates a GLEP 42 style news item.
+ It's purpose is to wrap parsing of these news items such that portage can determine
+ whether a particular item is 'relevant' or not. This requires parsing the item
+ and determining 'relevancy restrictions'; these include "Display if Installed" or
+ "display if arch: x86" and so forth.
+
+ Creation of a news item involves passing in the path to the particular news item.
+ """
+
+ def __init__(self, path, name):
+ """
+ For a given news item we only want if it path is a file.
+ """
+ self.path = path
+ self.name = name
+ self._parsed = False
+ self._valid = True
+
+ def isRelevant(self, vardb, config, profile):
+ """
+ This function takes a dict of keyword arguments; one should pass in any
+ objects need to do to lookups (like what keywords we are on, what profile,
+ and a vardb so we can look at installed packages).
+ Each restriction will pluck out the items that are required for it to match
+ or raise a ValueError exception if the required object is not present.
+
+ Restrictions of the form Display-X are OR'd with like-restrictions;
+ otherwise restrictions are AND'd. any_match is the ORing and
+ all_match is the ANDing.
+ """
+
+ if not self._parsed:
+ self.parse()
+
+ if not len(self.restrictions):
+ return True
+
+ kwargs = \
+ { 'vardb' : vardb,
+ 'config' : config,
+ 'profile' : profile }
+
+ all_match = True
+ for values in self.restrictions.values():
+ any_match = False
+ for restriction in values:
+ if restriction.checkRestriction(
+ **portage._native_kwargs(kwargs)):
+ any_match = True
+ if not any_match:
+ all_match = False
+
+ return all_match
+
+ def isValid(self):
+ if not self._parsed:
+ self.parse()
+ return self._valid
+
+ def parse(self):
+ f = io.open(_unicode_encode(self.path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace')
+ lines = f.readlines()
+ f.close()
+ self.restrictions = {}
+ invalids = []
+ for i, line in enumerate(lines):
+ # Optimization to ignore regex matchines on lines that
+ # will never match
+ format_match = _formatRE.match(line)
+ if format_match is not None and format_match.group(1) != '1.0':
+ invalids.append((i + 1, line.rstrip('\n')))
+ break
+ if not line.startswith('D'):
+ continue
+ restricts = { _installedRE : DisplayInstalledRestriction,
+ _profileRE : DisplayProfileRestriction,
+ _keywordRE : DisplayKeywordRestriction }
+ for regex, restriction in restricts.items():
+ match = regex.match(line)
+ if match:
+ restrict = restriction(match.groups()[0].strip())
+ if not restrict.isValid():
+ invalids.append((i + 1, line.rstrip("\n")))
+ else:
+ self.restrictions.setdefault(
+ id(restriction), []).append(restrict)
+ continue
+ if invalids:
+ self._valid = False
+ msg = []
+ msg.append(_("Invalid news item: %s") % (self.path,))
+ for lineno, line in invalids:
+ msg.append(_(" line %d: %s") % (lineno, line))
+ writemsg_level("".join("!!! %s\n" % x for x in msg),
+ level=logging.ERROR, noiselevel=-1)
+
+ self._parsed = True
+
+class DisplayRestriction(object):
+ """
+ A base restriction object representing a restriction of display.
+ news items may have 'relevancy restrictions' preventing them from
+ being important. In this case we need a manner of figuring out if
+ a particular item is relevant or not. If any of it's restrictions
+ are met, then it is displayed
+ """
+
+ def isValid(self):
+ return True
+
+ def checkRestriction(self, **kwargs):
+ raise NotImplementedError('Derived class should override this method')
+
+class DisplayProfileRestriction(DisplayRestriction):
+ """
+ A profile restriction where a particular item shall only be displayed
+ if the user is running a specific profile.
+ """
+
+ def __init__(self, profile):
+ self.profile = profile
+
+ def checkRestriction(self, **kwargs):
+ if self.profile == kwargs['profile']:
+ return True
+ return False
+
+class DisplayKeywordRestriction(DisplayRestriction):
+ """
+ A keyword restriction where a particular item shall only be displayed
+ if the user is running a specific keyword.
+ """
+
+ def __init__(self, keyword):
+ self.keyword = keyword
+
+ def checkRestriction(self, **kwargs):
+ if kwargs['config']['ARCH'] == self.keyword:
+ return True
+ return False
+
+class DisplayInstalledRestriction(DisplayRestriction):
+ """
+ An Installation restriction where a particular item shall only be displayed
+ if the user has that item installed.
+ """
+
+ def __init__(self, atom):
+ self.atom = atom
+
+ def isValid(self):
+ return isvalidatom(self.atom)
+
+ def checkRestriction(self, **kwargs):
+ vdb = kwargs['vardb']
+ if vdb.match(self.atom):
+ return True
+ return False
+
+def count_unread_news(portdb, vardb, repos=None, update=True):
+ """
+ Returns a dictionary mapping repos to integer counts of unread news items.
+ By default, this will scan all repos and check for new items that have
+ appeared since the last scan.
+
+ @param portdb: a portage tree database
+ @type portdb: pordbapi
+ @param vardb: an installed package database
+ @type vardb: vardbapi
+ @param repos: names of repos to scan (None means to scan all available repos)
+ @type repos: list or None
+ @param update: check for new items (default is True)
+ @type update: boolean
+ @rtype: dict
+ @return: dictionary mapping repos to integer counts of unread news items
+ """
+
+ NEWS_PATH = os.path.join("metadata", "news")
+ UNREAD_PATH = os.path.join(vardb.settings['EROOT'], NEWS_LIB_PATH, "news")
+ news_counts = OrderedDict()
+ if repos is None:
+ repos = portdb.getRepositories()
+
+ permission_msgs = set()
+ for repo in repos:
+ try:
+ manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
+ count = manager.getUnreadItems(repo, update=True)
+ except PermissionDenied as e:
+ # NOTE: The NewsManager typically handles permission errors by
+ # returning silently, so PermissionDenied won't necessarily be
+ # raised even if we do trigger a permission error above.
+ msg = "Permission denied: '%s'\n" % (e,)
+ if msg in permission_msgs:
+ pass
+ else:
+ permission_msgs.add(msg)
+ writemsg_level(msg, level=logging.ERROR, noiselevel=-1)
+ news_counts[repo] = 0
+ else:
+ news_counts[repo] = count
+
+ return news_counts
+
+def display_news_notifications(news_counts):
+ """
+ Display a notification for unread news items, using a dictionary mapping
+ repos to integer counts, like that returned from count_unread_news().
+ """
+ newsReaderDisplay = False
+ for repo, count in news_counts.items():
+ if count > 0:
+ if not newsReaderDisplay:
+ newsReaderDisplay = True
+ print()
+ print(colorize("WARN", " * IMPORTANT:"), end=' ')
+ print("%s news items need reading for repository '%s'." % (count, repo))
+
+ if newsReaderDisplay:
+ print(colorize("WARN", " *"), end=' ')
+ print("Use " + colorize("GOOD", "eselect news") + " to read news items.")
+ print()
diff --git a/usr/lib/portage/pym/portage/output.py b/usr/lib/portage/pym/portage/output.py
new file mode 100644
index 0000000..7851687
--- /dev/null
+++ b/usr/lib/portage/pym/portage/output.py
@@ -0,0 +1,844 @@
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division
+
+__docformat__ = "epytext"
+
+import errno
+import io
+import formatter
+import re
+import subprocess
+import sys
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util:writemsg',
+)
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import _unicode_decode
+from portage.const import COLOR_MAP_FILE, EPREFIX
+from portage.exception import CommandNotFound, FileNotFound, \
+ ParseError, PermissionDenied, PortageException
+from portage.localization import _
+
+havecolor = 1
+dotitles = 1
+
+_styles = {}
+"""Maps style class to tuple of attribute names."""
+
+codes = {}
+"""Maps attribute name to ansi code."""
+
+esc_seq = "\x1b["
+
+codes["normal"] = esc_seq + "0m"
+codes["reset"] = esc_seq + "39;49;00m"
+
+codes["bold"] = esc_seq + "01m"
+codes["faint"] = esc_seq + "02m"
+codes["standout"] = esc_seq + "03m"
+codes["underline"] = esc_seq + "04m"
+codes["blink"] = esc_seq + "05m"
+codes["overline"] = esc_seq + "06m"
+codes["reverse"] = esc_seq + "07m"
+codes["invisible"] = esc_seq + "08m"
+
+codes["no-attr"] = esc_seq + "22m"
+codes["no-standout"] = esc_seq + "23m"
+codes["no-underline"] = esc_seq + "24m"
+codes["no-blink"] = esc_seq + "25m"
+codes["no-overline"] = esc_seq + "26m"
+codes["no-reverse"] = esc_seq + "27m"
+
+codes["bg_black"] = esc_seq + "40m"
+codes["bg_darkred"] = esc_seq + "41m"
+codes["bg_darkgreen"] = esc_seq + "42m"
+codes["bg_brown"] = esc_seq + "43m"
+codes["bg_darkblue"] = esc_seq + "44m"
+codes["bg_purple"] = esc_seq + "45m"
+codes["bg_teal"] = esc_seq + "46m"
+codes["bg_lightgray"] = esc_seq + "47m"
+codes["bg_default"] = esc_seq + "49m"
+codes["bg_darkyellow"] = codes["bg_brown"]
+
+def color(fg, bg="default", attr=["normal"]):
+ mystr = codes[fg]
+ for x in [bg]+attr:
+ mystr += codes[x]
+ return mystr
+
+
+ansi_codes = []
+for x in range(30, 38):
+ ansi_codes.append("%im" % x)
+ ansi_codes.append("%i;01m" % x)
+
+rgb_ansi_colors = ['0x000000', '0x555555', '0xAA0000', '0xFF5555', '0x00AA00',
+ '0x55FF55', '0xAA5500', '0xFFFF55', '0x0000AA', '0x5555FF', '0xAA00AA',
+ '0xFF55FF', '0x00AAAA', '0x55FFFF', '0xAAAAAA', '0xFFFFFF']
+
+for x in range(len(rgb_ansi_colors)):
+ codes[rgb_ansi_colors[x]] = esc_seq + ansi_codes[x]
+
+del x
+
+codes["black"] = codes["0x000000"]
+codes["darkgray"] = codes["0x555555"]
+
+codes["red"] = codes["0xFF5555"]
+codes["darkred"] = codes["0xAA0000"]
+
+codes["green"] = codes["0x55FF55"]
+codes["darkgreen"] = codes["0x00AA00"]
+
+codes["yellow"] = codes["0xFFFF55"]
+codes["brown"] = codes["0xAA5500"]
+
+codes["blue"] = codes["0x5555FF"]
+codes["darkblue"] = codes["0x0000AA"]
+
+codes["fuchsia"] = codes["0xFF55FF"]
+codes["purple"] = codes["0xAA00AA"]
+
+codes["turquoise"] = codes["0x55FFFF"]
+codes["teal"] = codes["0x00AAAA"]
+
+codes["white"] = codes["0xFFFFFF"]
+codes["lightgray"] = codes["0xAAAAAA"]
+
+codes["darkteal"] = codes["turquoise"]
+# Some terminals have darkyellow instead of brown.
+codes["0xAAAA00"] = codes["brown"]
+codes["darkyellow"] = codes["0xAAAA00"]
+
+
+
+# Colors from /etc/init.d/functions.sh
+_styles["NORMAL"] = ( "normal", )
+_styles["GOOD"] = ( "green", )
+_styles["WARN"] = ( "yellow", )
+_styles["BAD"] = ( "red", )
+_styles["HILITE"] = ( "teal", )
+_styles["BRACKET"] = ( "blue", )
+
+# Portage functions
+_styles["INFORM"] = ( "darkgreen", )
+_styles["UNMERGE_WARN"] = ( "red", )
+_styles["SECURITY_WARN"] = ( "red", )
+_styles["MERGE_LIST_PROGRESS"] = ( "yellow", )
+_styles["PKG_BLOCKER"] = ( "red", )
+_styles["PKG_BLOCKER_SATISFIED"] = ( "darkblue", )
+_styles["PKG_MERGE"] = ( "darkgreen", )
+_styles["PKG_MERGE_SYSTEM"] = ( "darkgreen", )
+_styles["PKG_MERGE_WORLD"] = ( "green", )
+_styles["PKG_BINARY_MERGE"] = ( "purple", )
+_styles["PKG_BINARY_MERGE_SYSTEM"] = ( "purple", )
+_styles["PKG_BINARY_MERGE_WORLD"] = ( "fuchsia", )
+_styles["PKG_UNINSTALL"] = ( "red", )
+_styles["PKG_NOMERGE"] = ( "darkblue", )
+_styles["PKG_NOMERGE_SYSTEM"] = ( "darkblue", )
+_styles["PKG_NOMERGE_WORLD"] = ( "blue", )
+_styles["PROMPT_CHOICE_DEFAULT"] = ( "green", )
+_styles["PROMPT_CHOICE_OTHER"] = ( "red", )
+
+def _parse_color_map(config_root='/', onerror=None):
+ """
+ Parse /etc/portage/color.map and return a dict of error codes.
+
+ @param onerror: an optional callback to handle any ParseError that would
+ otherwise be raised
+ @type onerror: callable
+ @rtype: dict
+ @return: a dictionary mapping color classes to color codes
+ """
+ global codes, _styles
+ myfile = os.path.join(config_root, COLOR_MAP_FILE)
+ ansi_code_pattern = re.compile("^[0-9;]*m$")
+ quotes = '\'"'
+ def strip_quotes(token):
+ if token[0] in quotes and token[0] == token[-1]:
+ token = token[1:-1]
+ return token
+
+ try:
+ with io.open(_unicode_encode(myfile,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace') as f:
+ lines = f.readlines()
+ for lineno, line in enumerate(lines):
+ commenter_pos = line.find("#")
+ line = line[:commenter_pos].strip()
+
+ if len(line) == 0:
+ continue
+
+ split_line = line.split("=")
+ if len(split_line) != 2:
+ e = ParseError(_("'%s', line %s: expected exactly one occurrence of '=' operator") % \
+ (myfile, lineno))
+ raise e
+ if onerror:
+ onerror(e)
+ else:
+ raise e
+ continue
+
+ k = strip_quotes(split_line[0].strip())
+ v = strip_quotes(split_line[1].strip())
+ if not k in _styles and not k in codes:
+ e = ParseError(_("'%s', line %s: Unknown variable: '%s'") % \
+ (myfile, lineno, k))
+ if onerror:
+ onerror(e)
+ else:
+ raise e
+ continue
+ if ansi_code_pattern.match(v):
+ if k in _styles:
+ _styles[k] = ( esc_seq + v, )
+ elif k in codes:
+ codes[k] = esc_seq + v
+ else:
+ code_list = []
+ for x in v.split():
+ if x in codes:
+ if k in _styles:
+ code_list.append(x)
+ elif k in codes:
+ code_list.append(codes[x])
+ else:
+ e = ParseError(_("'%s', line %s: Undefined: '%s'") % \
+ (myfile, lineno, x))
+ if onerror:
+ onerror(e)
+ else:
+ raise e
+ if k in _styles:
+ _styles[k] = tuple(code_list)
+ elif k in codes:
+ codes[k] = "".join(code_list)
+ except (IOError, OSError) as e:
+ if e.errno == errno.ENOENT:
+ raise FileNotFound(myfile)
+ elif e.errno == errno.EACCES:
+ raise PermissionDenied(myfile)
+ raise
+
+def nc_len(mystr):
+ tmp = re.sub(esc_seq + "^m]+m", "", mystr);
+ return len(tmp)
+
+_legal_terms_re = re.compile(r'^(xterm|xterm-color|Eterm|aterm|rxvt|screen|kterm|rxvt-unicode|gnome|interix)')
+_disable_xtermTitle = None
+_max_xtermTitle_len = 253
+
+def xtermTitle(mystr, raw=False):
+ global _disable_xtermTitle
+ if _disable_xtermTitle is None:
+ _disable_xtermTitle = not (sys.__stderr__.isatty() and \
+ 'TERM' in os.environ and \
+ _legal_terms_re.match(os.environ['TERM']) is not None)
+
+ if dotitles and not _disable_xtermTitle:
+ # If the title string is too big then the terminal can
+ # misbehave. Therefore, truncate it if it's too big.
+ if len(mystr) > _max_xtermTitle_len:
+ mystr = mystr[:_max_xtermTitle_len]
+ if not raw:
+ mystr = '\x1b]0;%s\x07' % mystr
+
+ # avoid potential UnicodeEncodeError
+ mystr = _unicode_encode(mystr,
+ encoding=_encodings['stdio'], errors='backslashreplace')
+ f = sys.stderr
+ if sys.hexversion >= 0x3000000:
+ f = f.buffer
+ f.write(mystr)
+ f.flush()
+
+default_xterm_title = None
+
+def xtermTitleReset():
+ global default_xterm_title
+ if default_xterm_title is None:
+ prompt_command = os.environ.get('PROMPT_COMMAND')
+ if prompt_command == "":
+ default_xterm_title = ""
+ elif prompt_command is not None:
+ if dotitles and \
+ 'TERM' in os.environ and \
+ _legal_terms_re.match(os.environ['TERM']) is not None and \
+ sys.__stderr__.isatty():
+ from portage.process import find_binary, spawn
+ shell = os.environ.get("SHELL")
+ if not shell or not os.access(shell, os.EX_OK):
+ shell = find_binary("sh")
+ if shell:
+ spawn([shell, "-c", prompt_command], env=os.environ,
+ fd_pipes={
+ 0: portage._get_stdin().fileno(),
+ 1: sys.__stderr__.fileno(),
+ 2: sys.__stderr__.fileno()
+ })
+ else:
+ os.system(prompt_command)
+ return
+ else:
+ pwd = os.environ.get('PWD','')
+ home = os.environ.get('HOME', '')
+ if home != '' and pwd.startswith(home):
+ pwd = '~' + pwd[len(home):]
+ default_xterm_title = '\x1b]0;%s@%s:%s\x07' % (
+ os.environ.get('LOGNAME', ''),
+ os.environ.get('HOSTNAME', '').split('.', 1)[0], pwd)
+ xtermTitle(default_xterm_title, raw=True)
+
+def notitles():
+ "turn off title setting"
+ dotitles = 0
+
+def nocolor():
+ "turn off colorization"
+ global havecolor
+ havecolor = 0
+
+def resetColor():
+ return codes["reset"]
+
+def style_to_ansi_code(style):
+ """
+ @param style: A style name
+ @type style: String
+ @rtype: String
+ @return: A string containing one or more ansi escape codes that are
+ used to render the given style.
+ """
+ ret = ""
+ for attr_name in _styles[style]:
+ # allow stuff that has found it's way through ansi_code_pattern
+ ret += codes.get(attr_name, attr_name)
+ return ret
+
+def colormap():
+ mycolors = []
+ for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET", "NORMAL"):
+ mycolors.append("%s=$'%s'" % (c, style_to_ansi_code(c)))
+ return "\n".join(mycolors)
+
+def colorize(color_key, text):
+ global havecolor
+ if havecolor:
+ if color_key in codes:
+ return codes[color_key] + text + codes["reset"]
+ elif color_key in _styles:
+ return style_to_ansi_code(color_key) + text + codes["reset"]
+ else:
+ return text
+ else:
+ return text
+
+compat_functions_colors = [
+ "bold", "white", "teal", "turquoise", "darkteal",
+ "fuchsia", "purple", "blue", "darkblue", "green", "darkgreen", "yellow",
+ "brown", "darkyellow", "red", "darkred",
+]
+
+class create_color_func(object):
+ __slots__ = ("_color_key",)
+ def __init__(self, color_key):
+ self._color_key = color_key
+ def __call__(self, text):
+ return colorize(self._color_key, text)
+
+for c in compat_functions_colors:
+ globals()[c] = create_color_func(c)
+
+class ConsoleStyleFile(object):
+ """
+ A file-like object that behaves something like
+ the colorize() function. Style identifiers
+ passed in via the new_styles() method will be used to
+ apply console codes to output.
+ """
+ def __init__(self, f):
+ self._file = f
+ self._styles = None
+ self.write_listener = None
+
+ def new_styles(self, styles):
+ self._styles = styles
+
+ def write(self, s):
+ # In python-2.6, DumbWriter.send_line_break() can write
+ # non-unicode '\n' which fails with TypeError if self._file
+ # is a text stream such as io.StringIO. Therefore, make sure
+ # input is converted to unicode when necessary.
+ s = _unicode_decode(s)
+ global havecolor
+ if havecolor and self._styles:
+ styled_s = []
+ for style in self._styles:
+ styled_s.append(style_to_ansi_code(style))
+ styled_s.append(s)
+ styled_s.append(codes["reset"])
+ self._write(self._file, "".join(styled_s))
+ else:
+ self._write(self._file, s)
+ if self.write_listener:
+ self._write(self.write_listener, s)
+
+ def _write(self, f, s):
+ # avoid potential UnicodeEncodeError
+ if f in (sys.stdout, sys.stderr):
+ s = _unicode_encode(s,
+ encoding=_encodings['stdio'], errors='backslashreplace')
+ if sys.hexversion >= 0x3000000:
+ f = f.buffer
+ f.write(s)
+
+ def writelines(self, lines):
+ for s in lines:
+ self.write(s)
+
+ def flush(self):
+ self._file.flush()
+
+ def close(self):
+ self._file.close()
+
+class StyleWriter(formatter.DumbWriter):
+ """
+ This is just a DumbWriter with a hook in the new_styles() method
+ that passes a styles tuple as a single argument to a callable
+ style_listener attribute.
+ """
+ def __init__(self, **kwargs):
+ formatter.DumbWriter.__init__(self, **kwargs)
+ self.style_listener = None
+
+ def new_styles(self, styles):
+ formatter.DumbWriter.new_styles(self, styles)
+ if self.style_listener:
+ self.style_listener(styles)
+
+def get_term_size(fd=None):
+ """
+ Get the number of lines and columns of the tty that is connected to
+ fd. Returns a tuple of (lines, columns) or (0, 0) if an error
+ occurs. The curses module is used if available, otherwise the output of
+ `stty size` is parsed. The lines and columns values are guaranteed to be
+ greater than or equal to zero, since a negative COLUMNS variable is
+ known to prevent some commands from working (see bug #394091).
+ """
+ if fd is None:
+ fd = sys.stdout
+ if not hasattr(fd, 'isatty') or not fd.isatty():
+ return (0, 0)
+ try:
+ import curses
+ try:
+ curses.setupterm(term=os.environ.get("TERM", "unknown"),
+ fd=fd.fileno())
+ return curses.tigetnum('lines'), curses.tigetnum('cols')
+ except curses.error:
+ pass
+ except ImportError:
+ pass
+
+ try:
+ proc = subprocess.Popen(["stty", "size"],
+ stdout=subprocess.PIPE, stderr=fd)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ # stty command not found
+ return (0, 0)
+
+ out = _unicode_decode(proc.communicate()[0])
+ if proc.wait() == os.EX_OK:
+ out = out.split()
+ if len(out) == 2:
+ try:
+ val = (int(out[0]), int(out[1]))
+ except ValueError:
+ pass
+ else:
+ if val[0] >= 0 and val[1] >= 0:
+ return val
+ return (0, 0)
+
+def set_term_size(lines, columns, fd):
+ """
+ Set the number of lines and columns for the tty that is connected to fd.
+ For portability, this simply calls `stty rows $lines columns $columns`.
+ """
+ from portage.process import spawn
+ cmd = ["stty", "rows", str(lines), "columns", str(columns)]
+ try:
+ spawn(cmd, env=os.environ, fd_pipes={0:fd})
+ except CommandNotFound:
+ writemsg(_("portage: stty: command not found\n"), noiselevel=-1)
+
+class EOutput(object):
+ """
+ Performs fancy terminal formatting for status and informational messages.
+
+ The provided methods produce identical terminal output to the eponymous
+ functions in the shell script C{/sbin/functions.sh} and also accept
+ identical parameters.
+
+ This is not currently a drop-in replacement however, as the output-related
+ functions in C{/sbin/functions.sh} are oriented for use mainly by system
+ init scripts and ebuilds and their output can be customized via certain
+ C{RC_*} environment variables (see C{/etc/conf.d/rc}). B{EOutput} is not
+ customizable in this manner since it's intended for more general uses.
+ Likewise, no logging is provided.
+
+ @ivar quiet: Specifies if output should be silenced.
+ @type quiet: BooleanType
+ @ivar term_columns: Width of terminal in characters. Defaults to the value
+ specified by the shell's C{COLUMNS} variable, else to the queried tty
+ size, else to C{80}.
+ @type term_columns: IntType
+ """
+
+ def __init__(self, quiet=False):
+ self.__last_e_cmd = ""
+ self.__last_e_len = 0
+ self.quiet = quiet
+ lines, columns = get_term_size()
+ if columns <= 0:
+ columns = 80
+ self.term_columns = columns
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ def _write(self, f, s):
+ # avoid potential UnicodeEncodeError
+ writemsg(s, noiselevel=-1, fd=f)
+
+ def __eend(self, caller, errno, msg):
+ if errno == 0:
+ status_brackets = colorize("BRACKET", "[ ") + colorize("GOOD", "ok") + colorize("BRACKET", " ]")
+ else:
+ status_brackets = colorize("BRACKET", "[ ") + colorize("BAD", "!!") + colorize("BRACKET", " ]")
+ if msg:
+ if caller == "eend":
+ self.eerror(msg[0])
+ elif caller == "ewend":
+ self.ewarn(msg[0])
+ if self.__last_e_cmd != "ebegin":
+ self.__last_e_len = 0
+ if not self.quiet:
+ out = sys.stdout
+ self._write(out,
+ "%*s%s\n" % ((self.term_columns - self.__last_e_len - 7),
+ "", status_brackets))
+
+ def ebegin(self, msg):
+ """
+ Shows a message indicating the start of a process.
+
+ @param msg: A very brief (shorter than one line) description of the
+ starting process.
+ @type msg: StringType
+ """
+ msg += " ..."
+ if not self.quiet:
+ self.einfon(msg)
+ self.__last_e_len = len(msg) + 3
+ self.__last_e_cmd = "ebegin"
+
+ def eend(self, errno, *msg):
+ """
+ Indicates the completion of a process, optionally displaying a message
+ via L{eerror} if the process's exit status isn't C{0}.
+
+ @param errno: A standard UNIX C{errno} code returned by processes upon
+ exit.
+ @type errno: IntType
+ @param msg: I{(optional)} An error message, typically a standard UNIX
+ error string corresponding to C{errno}.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ self.__eend("eend", errno, msg)
+ self.__last_e_cmd = "eend"
+
+ def eerror(self, msg):
+ """
+ Shows an error message.
+
+ @param msg: A very brief (shorter than one line) error message.
+ @type msg: StringType
+ """
+ out = sys.stderr
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin":
+ self._write(out, "\n")
+ self._write(out, colorize("BAD", " * ") + msg + "\n")
+ self.__last_e_cmd = "eerror"
+
+ def einfo(self, msg):
+ """
+ Shows an informative message terminated with a newline.
+
+ @param msg: A very brief (shorter than one line) informative message.
+ @type msg: StringType
+ """
+ out = sys.stdout
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin":
+ self._write(out, "\n")
+ self._write(out, colorize("GOOD", " * ") + msg + "\n")
+ self.__last_e_cmd = "einfo"
+
+ def einfon(self, msg):
+ """
+ Shows an informative message terminated without a newline.
+
+ @param msg: A very brief (shorter than one line) informative message.
+ @type msg: StringType
+ """
+ out = sys.stdout
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin":
+ self._write(out, "\n")
+ self._write(out, colorize("GOOD", " * ") + msg)
+ self.__last_e_cmd = "einfon"
+
+ def ewarn(self, msg):
+ """
+ Shows a warning message.
+
+ @param msg: A very brief (shorter than one line) warning message.
+ @type msg: StringType
+ """
+ out = sys.stderr
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin":
+ self._write(out, "\n")
+ self._write(out, colorize("WARN", " * ") + msg + "\n")
+ self.__last_e_cmd = "ewarn"
+
+ def ewend(self, errno, *msg):
+ """
+ Indicates the completion of a process, optionally displaying a message
+ via L{ewarn} if the process's exit status isn't C{0}.
+
+ @param errno: A standard UNIX C{errno} code returned by processes upon
+ exit.
+ @type errno: IntType
+ @param msg: I{(optional)} A warning message, typically a standard UNIX
+ error string corresponding to C{errno}.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ self.__eend("ewend", errno, msg)
+ self.__last_e_cmd = "ewend"
+
+class ProgressBar(object):
+ """The interface is copied from the ProgressBar class from the EasyDialogs
+ module (which is Mac only)."""
+ def __init__(self, title=None, maxval=0, label=None, max_desc_length=25):
+ self._title = title or ""
+ self._maxval = maxval
+ self._label = label or ""
+ self._curval = 0
+ self._desc = ""
+ self._desc_max_length = max_desc_length
+ self._set_desc()
+
+ @property
+ def curval(self):
+ """
+ The current value (of type integer or long integer) of the progress
+ bar. The normal access methods coerce curval between 0 and maxval. This
+ attribute should not be altered directly.
+ """
+ return self._curval
+
+ @property
+ def maxval(self):
+ """
+ The maximum value (of type integer or long integer) of the progress
+ bar; the progress bar (thermometer style) is full when curval equals
+ maxval. If maxval is 0, the bar will be indeterminate (barber-pole).
+ This attribute should not be altered directly.
+ """
+ return self._maxval
+
+ def title(self, newstr):
+ """Sets the text in the title bar of the progress dialog to newstr."""
+ self._title = newstr
+ self._set_desc()
+
+ def label(self, newstr):
+ """Sets the text in the progress box of the progress dialog to newstr."""
+ self._label = newstr
+ self._set_desc()
+
+ def _set_desc(self):
+ self._desc = "%s%s" % (
+ "%s: " % self._title if self._title else "",
+ "%s" % self._label if self._label else ""
+ )
+ if len(self._desc) > self._desc_max_length: # truncate if too long
+ self._desc = "%s..." % self._desc[:self._desc_max_length - 3]
+ if len(self._desc):
+ self._desc = self._desc.ljust(self._desc_max_length)
+
+
+ def set(self, value, maxval=None):
+ """
+ Sets the progress bar's curval to value, and also maxval to max if the
+ latter is provided. value is first coerced between 0 and maxval. The
+ thermometer bar is updated to reflect the changes, including a change
+ from indeterminate to determinate or vice versa.
+ """
+ if maxval is not None:
+ self._maxval = maxval
+ if value < 0:
+ value = 0
+ elif value > self._maxval:
+ value = self._maxval
+ self._curval = value
+
+ def inc(self, n=1):
+ """Increments the progress bar's curval by n, or by 1 if n is not
+ provided. (Note that n may be negative, in which case the effect is a
+ decrement.) The progress bar is updated to reflect the change. If the
+ bar is indeterminate, this causes one ``spin'' of the barber pole. The
+ resulting curval is coerced between 0 and maxval if incrementing causes
+ it to fall outside this range.
+ """
+ self.set(self._curval+n)
+
+class TermProgressBar(ProgressBar):
+ """A tty progress bar similar to wget's."""
+ def __init__(self, fd=sys.stdout, **kwargs):
+ ProgressBar.__init__(self, **kwargs)
+ lines, self.term_columns = get_term_size(fd)
+ self.file = fd
+ self._min_columns = 11
+ self._max_columns = 80
+ # for indeterminate mode, ranges from 0.0 to 1.0
+ self._position = 0.0
+
+ def set(self, value, maxval=None):
+ ProgressBar.set(self, value, maxval=maxval)
+ self._display_image(self._create_image())
+
+ def _display_image(self, image):
+ self.file.write('\r')
+ self.file.write(image)
+ self.file.flush()
+
+ def _create_image(self):
+ cols = self.term_columns
+ if cols > self._max_columns:
+ cols = self._max_columns
+ min_columns = self._min_columns
+ curval = self._curval
+ maxval = self._maxval
+ position = self._position
+ percentage_str_width = 5
+ square_brackets_width = 2
+ if cols < percentage_str_width:
+ return ""
+ bar_space = cols - percentage_str_width - square_brackets_width - 1
+ if self._desc:
+ bar_space -= self._desc_max_length
+ if maxval == 0:
+ max_bar_width = bar_space-3
+ _percent = "".ljust(percentage_str_width)
+ if cols < min_columns:
+ return ""
+ if position <= 0.5:
+ offset = 2 * position
+ else:
+ offset = 2 * (1 - position)
+ delta = 0.5 / max_bar_width
+ position += delta
+ if position >= 1.0:
+ position = 0.0
+ # make sure it touches the ends
+ if 1.0 - position < delta:
+ position = 1.0
+ if position < 0.5 and 0.5 - position < delta:
+ position = 0.5
+ self._position = position
+ bar_width = int(offset * max_bar_width)
+ image = "%s%s%s" % (self._desc, _percent,
+ "[" + (bar_width * " ") + \
+ "<=>" + ((max_bar_width - bar_width) * " ") + "]")
+ return image
+ else:
+ percentage = 100 * curval // maxval
+ max_bar_width = bar_space - 1
+ _percent = ("%d%% " % percentage).rjust(percentage_str_width)
+ image = "%s%s" % (self._desc, _percent)
+
+ if cols < min_columns:
+ return image
+ offset = curval / maxval
+ bar_width = int(offset * max_bar_width)
+ image = image + "[" + (bar_width * "=") + \
+ ">" + ((max_bar_width - bar_width) * " ") + "]"
+ return image
+
+_color_map_loaded = False
+
+def _init(config_root='/'):
+ """
+ Load color.map from the given config_root. This is called automatically
+ on first access of the codes or _styles attributes (unless it has already
+ been called for some other reason).
+ """
+
+ global _color_map_loaded, codes, _styles
+ if _color_map_loaded:
+ return
+
+ _color_map_loaded = True
+ codes = object.__getattribute__(codes, '_attr')
+ _styles = object.__getattribute__(_styles, '_attr')
+
+ for k, v in codes.items():
+ codes[k] = _unicode_decode(v)
+
+ for k, v in _styles.items():
+ _styles[k] = _unicode_decode(v)
+
+ try:
+ _parse_color_map(config_root=config_root,
+ onerror=lambda e: writemsg("%s\n" % str(e), noiselevel=-1))
+ except FileNotFound:
+ pass
+ except PermissionDenied as e:
+ writemsg(_("Permission denied: '%s'\n") % str(e), noiselevel=-1)
+ del e
+ except PortageException as e:
+ writemsg("%s\n" % str(e), noiselevel=-1)
+ del e
+
+class _LazyInitColorMap(portage.proxy.objectproxy.ObjectProxy):
+
+ __slots__ = ('_attr',)
+
+ def __init__(self, attr):
+ portage.proxy.objectproxy.ObjectProxy.__init__(self)
+ object.__setattr__(self, '_attr', attr)
+
+ def _get_target(self):
+ _init()
+ return object.__getattribute__(self, '_attr')
+
+codes = _LazyInitColorMap(codes)
+_styles = _LazyInitColorMap(_styles)
diff --git a/usr/lib/portage/pym/portage/package/__init__.py b/usr/lib/portage/pym/portage/package/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/package/ebuild/__init__.py b/usr/lib/portage/pym/portage/package/ebuild/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_config/KeywordsManager.py b/usr/lib/portage/pym/portage/package/ebuild/_config/KeywordsManager.py
new file mode 100644
index 0000000..af606f1
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_config/KeywordsManager.py
@@ -0,0 +1,335 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'KeywordsManager',
+)
+
+from _emerge.Package import Package
+from portage import os
+from portage.dep import ExtendedAtomDict, _repo_separator, _slot_separator
+from portage.localization import _
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity
+from portage.util import grabdict_package, stack_lists, writemsg
+from portage.versions import _pkg_str
+
+class KeywordsManager(object):
+ """Manager class to handle keywords processing and validation"""
+
+ def __init__(self, profiles, abs_user_config, user_config=True,
+ global_accept_keywords=""):
+ self._pkeywords_list = []
+ rawpkeywords = [grabdict_package(
+ os.path.join(x.location, "package.keywords"),
+ recursive=x.portage1_directories,
+ verify_eapi=True) \
+ for x in profiles]
+ for pkeyworddict in rawpkeywords:
+ if not pkeyworddict:
+ # Omit non-existent files from the stack.
+ continue
+ cpdict = {}
+ for k, v in pkeyworddict.items():
+ cpdict.setdefault(k.cp, {})[k] = v
+ self._pkeywords_list.append(cpdict)
+ self._pkeywords_list = tuple(self._pkeywords_list)
+
+ self._p_accept_keywords = []
+ raw_p_accept_keywords = [grabdict_package(
+ os.path.join(x.location, "package.accept_keywords"),
+ recursive=x.portage1_directories,
+ verify_eapi=True) \
+ for x in profiles]
+ for d in raw_p_accept_keywords:
+ if not d:
+ # Omit non-existent files from the stack.
+ continue
+ cpdict = {}
+ for k, v in d.items():
+ cpdict.setdefault(k.cp, {})[k] = tuple(v)
+ self._p_accept_keywords.append(cpdict)
+ self._p_accept_keywords = tuple(self._p_accept_keywords)
+
+ self.pkeywordsdict = ExtendedAtomDict(dict)
+
+ if user_config:
+ pkgdict = grabdict_package(
+ os.path.join(abs_user_config, "package.keywords"),
+ recursive=1, allow_wildcard=True, allow_repo=True,
+ verify_eapi=False)
+
+ for k, v in grabdict_package(
+ os.path.join(abs_user_config, "package.accept_keywords"),
+ recursive=1, allow_wildcard=True, allow_repo=True,
+ verify_eapi=False).items():
+ pkgdict.setdefault(k, []).extend(v)
+
+ accept_keywords_defaults = global_accept_keywords.split()
+ accept_keywords_defaults = tuple('~' + keyword for keyword in \
+ accept_keywords_defaults if keyword[:1] not in "~-")
+ for k, v in pkgdict.items():
+ # default to ~arch if no specific keyword is given
+ if not v:
+ v = accept_keywords_defaults
+ else:
+ v = tuple(v)
+ self.pkeywordsdict.setdefault(k.cp, {})[k] = v
+
+
+ def getKeywords(self, cpv, slot, keywords, repo):
+ try:
+ cpv.slot
+ except AttributeError:
+ pkg = _pkg_str(cpv, slot=slot, repo=repo)
+ else:
+ pkg = cpv
+ cp = pkg.cp
+ keywords = [[x for x in keywords.split() if x != "-*"]]
+ for pkeywords_dict in self._pkeywords_list:
+ cpdict = pkeywords_dict.get(cp)
+ if cpdict:
+ pkg_keywords = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_keywords:
+ keywords.extend(pkg_keywords)
+ return stack_lists(keywords, incremental=True)
+
+ def isStable(self, pkg, global_accept_keywords, backuped_accept_keywords):
+ mygroups = self.getKeywords(pkg, None, pkg._metadata["KEYWORDS"], None)
+ pgroups = global_accept_keywords.split()
+
+ unmaskgroups = self.getPKeywords(pkg, None, None,
+ global_accept_keywords)
+ pgroups.extend(unmaskgroups)
+
+ egroups = backuped_accept_keywords.split()
+
+ if unmaskgroups or egroups:
+ pgroups = self._getEgroups(egroups, pgroups)
+ else:
+ pgroups = set(pgroups)
+
+ if self._getMissingKeywords(pkg, pgroups, mygroups):
+ return False
+
+ if pkg.cpv._settings.local_config:
+ # If replacing all keywords with unstable variants would mask the
+ # package, then it's considered stable.
+ unstable = []
+ for kw in mygroups:
+ if kw[:1] != "~":
+ kw = "~" + kw
+ unstable.append(kw)
+
+ return bool(self._getMissingKeywords(pkg, pgroups, set(unstable)))
+ else:
+ # For repoman, if the package has an effective stable keyword that
+ # intersects with the effective ACCEPT_KEYWORDS for the current
+ # profile, then consider it stable.
+ for kw in pgroups:
+ if kw[:1] != "~":
+ if kw in mygroups or '*' in mygroups:
+ return True
+ if kw == '*':
+ for x in mygroups:
+ if x[:1] != "~":
+ return True
+ return False
+
+ def getMissingKeywords(self,
+ cpv,
+ slot,
+ keywords,
+ repo,
+ global_accept_keywords,
+ backuped_accept_keywords):
+ """
+ Take a package and return a list of any KEYWORDS that the user may
+ need to accept for the given package. If the KEYWORDS are empty
+ and the the ** keyword has not been accepted, the returned list will
+ contain ** alone (in order to distinguish from the case of "none
+ missing").
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param slot: The 'SLOT' key from the raw package metadata
+ @type slot: String
+ @param keywords: The 'KEYWORDS' key from the raw package metadata
+ @type keywords: String
+ @param global_accept_keywords: The current value of ACCEPT_KEYWORDS
+ @type global_accept_keywords: String
+ @param backuped_accept_keywords: ACCEPT_KEYWORDS from the backup env
+ @type backuped_accept_keywords: String
+ @rtype: List
+ @return: A list of KEYWORDS that have not been accepted.
+ """
+
+ mygroups = self.getKeywords(cpv, slot, keywords, repo)
+ # Repoman may modify this attribute as necessary.
+ pgroups = global_accept_keywords.split()
+
+ unmaskgroups = self.getPKeywords(cpv, slot, repo,
+ global_accept_keywords)
+ pgroups.extend(unmaskgroups)
+
+ # Hack: Need to check the env directly here as otherwise stacking
+ # doesn't work properly as negative values are lost in the config
+ # object (bug #139600)
+ egroups = backuped_accept_keywords.split()
+
+ if unmaskgroups or egroups:
+ pgroups = self._getEgroups(egroups, pgroups)
+ else:
+ pgroups = set(pgroups)
+
+ return self._getMissingKeywords(cpv, pgroups, mygroups)
+
+
+ def getRawMissingKeywords(self,
+ cpv,
+ slot,
+ keywords,
+ repo,
+ global_accept_keywords):
+ """
+ Take a package and return a list of any KEYWORDS that the user may
+ need to accept for the given package. If the KEYWORDS are empty,
+ the returned list will contain ** alone (in order to distinguish
+ from the case of "none missing"). This DOES NOT apply any user config
+ package.accept_keywords acceptance.
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param slot: The 'SLOT' key from the raw package metadata
+ @type slot: String
+ @param keywords: The 'KEYWORDS' key from the raw package metadata
+ @type keywords: String
+ @param global_accept_keywords: The current value of ACCEPT_KEYWORDS
+ @type global_accept_keywords: String
+ @rtype: List
+ @return: lists of KEYWORDS that have not been accepted
+ and the keywords it looked for.
+ """
+
+ mygroups = self.getKeywords(cpv, slot, keywords, repo)
+ pgroups = global_accept_keywords.split()
+ pgroups = set(pgroups)
+ return self._getMissingKeywords(cpv, pgroups, mygroups)
+
+
+ @staticmethod
+ def _getEgroups(egroups, mygroups):
+ """gets any keywords defined in the environment
+
+ @param backuped_accept_keywords: ACCEPT_KEYWORDS from the backup env
+ @type backuped_accept_keywords: String
+ @rtype: List
+ @return: list of KEYWORDS that have been accepted
+ """
+ mygroups = list(mygroups)
+ mygroups.extend(egroups)
+ inc_pgroups = set()
+ for x in mygroups:
+ if x[:1] == "-":
+ if x == "-*":
+ inc_pgroups.clear()
+ else:
+ inc_pgroups.discard(x[1:])
+ else:
+ inc_pgroups.add(x)
+ return inc_pgroups
+
+
+ @staticmethod
+ def _getMissingKeywords(cpv, pgroups, mygroups):
+ """Determines the missing keywords
+
+ @param pgroups: The pkg keywords accepted
+ @type pgroups: list
+ @param mygroups: The ebuild keywords
+ @type mygroups: list
+ """
+ match = False
+ hasstable = False
+ hastesting = False
+ for gp in mygroups:
+ if gp == "*":
+ match = True
+ break
+ elif gp == "~*":
+ hastesting = True
+ for x in pgroups:
+ if x[:1] == "~":
+ match = True
+ break
+ if match:
+ break
+ elif gp in pgroups:
+ match = True
+ break
+ elif gp.startswith("~"):
+ hastesting = True
+ elif not gp.startswith("-"):
+ hasstable = True
+ if not match and \
+ ((hastesting and "~*" in pgroups) or \
+ (hasstable and "*" in pgroups) or "**" in pgroups):
+ match = True
+ if match:
+ missing = []
+ else:
+ if not mygroups:
+ # If KEYWORDS is empty then we still have to return something
+ # in order to distinguish from the case of "none missing".
+ mygroups = ["**"]
+ missing = mygroups
+ return missing
+
+
+ def getPKeywords(self, cpv, slot, repo, global_accept_keywords):
+ """Gets any package.keywords settings for cp for the given
+ cpv, slot and repo
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param slot: The 'SLOT' key from the raw package metadata
+ @type slot: String
+ @param keywords: The 'KEYWORDS' key from the raw package metadata
+ @type keywords: String
+ @param global_accept_keywords: The current value of ACCEPT_KEYWORDS
+ @type global_accept_keywords: String
+ @param backuped_accept_keywords: ACCEPT_KEYWORDS from the backup env
+ @type backuped_accept_keywords: String
+ @rtype: List
+ @return: list of KEYWORDS that have been accepted
+ """
+
+ pgroups = global_accept_keywords.split()
+ try:
+ cpv.slot
+ except AttributeError:
+ cpv = _pkg_str(cpv, slot=slot, repo=repo)
+ cp = cpv.cp
+
+ unmaskgroups = []
+ if self._p_accept_keywords:
+ accept_keywords_defaults = tuple('~' + keyword for keyword in \
+ pgroups if keyword[:1] not in "~-")
+ for d in self._p_accept_keywords:
+ cpdict = d.get(cp)
+ if cpdict:
+ pkg_accept_keywords = \
+ ordered_by_atom_specificity(cpdict, cpv)
+ if pkg_accept_keywords:
+ for x in pkg_accept_keywords:
+ if not x:
+ x = accept_keywords_defaults
+ unmaskgroups.extend(x)
+
+ pkgdict = self.pkeywordsdict.get(cp)
+ if pkgdict:
+ pkg_accept_keywords = \
+ ordered_by_atom_specificity(pkgdict, cpv)
+ if pkg_accept_keywords:
+ for x in pkg_accept_keywords:
+ unmaskgroups.extend(x)
+ return unmaskgroups
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_config/LicenseManager.py b/usr/lib/portage/pym/portage/package/ebuild/_config/LicenseManager.py
new file mode 100644
index 0000000..f76e7e2
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_config/LicenseManager.py
@@ -0,0 +1,237 @@
+# Copyright 201-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'LicenseManager',
+)
+
+from portage import os
+from portage.dep import ExtendedAtomDict, use_reduce
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.util import grabdict, grabdict_package, writemsg
+from portage.versions import cpv_getkey, _pkg_str
+
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity
+
+
+class LicenseManager(object):
+
+ def __init__(self, license_group_locations, abs_user_config, user_config=True):
+
+ self._accept_license_str = None
+ self._accept_license = None
+ self._license_groups = {}
+ self._plicensedict = ExtendedAtomDict(dict)
+ self._undef_lic_groups = set()
+
+ if user_config:
+ license_group_locations = list(license_group_locations) + [abs_user_config]
+
+ self._read_license_groups(license_group_locations)
+
+ if user_config:
+ self._read_user_config(abs_user_config)
+
+ def _read_user_config(self, abs_user_config):
+ licdict = grabdict_package(os.path.join(
+ abs_user_config, "package.license"), recursive=1, allow_wildcard=True, allow_repo=True, verify_eapi=False)
+ for k, v in licdict.items():
+ self._plicensedict.setdefault(k.cp, {})[k] = \
+ self.expandLicenseTokens(v)
+
+ def _read_license_groups(self, locations):
+ for loc in locations:
+ for k, v in grabdict(
+ os.path.join(loc, "license_groups")).items():
+ self._license_groups.setdefault(k, []).extend(v)
+
+ for k, v in self._license_groups.items():
+ self._license_groups[k] = frozenset(v)
+
+ def extract_global_changes(self, old=""):
+ ret = old
+ atom_license_map = self._plicensedict.get("*/*")
+ if atom_license_map is not None:
+ v = atom_license_map.pop("*/*", None)
+ if v is not None:
+ ret = " ".join(v)
+ if old:
+ ret = old + " " + ret
+ if not atom_license_map:
+ #No tokens left in atom_license_map, remove it.
+ del self._plicensedict["*/*"]
+ return ret
+
+ def expandLicenseTokens(self, tokens):
+ """ Take a token from ACCEPT_LICENSE or package.license and expand it
+ if it's a group token (indicated by @) or just return it if it's not a
+ group. If a group is negated then negate all group elements."""
+ expanded_tokens = []
+ for x in tokens:
+ expanded_tokens.extend(self._expandLicenseToken(x, None))
+ return expanded_tokens
+
+ def _expandLicenseToken(self, token, traversed_groups):
+ negate = False
+ rValue = []
+ if token.startswith("-"):
+ negate = True
+ license_name = token[1:]
+ else:
+ license_name = token
+ if not license_name.startswith("@"):
+ rValue.append(token)
+ return rValue
+ group_name = license_name[1:]
+ if traversed_groups is None:
+ traversed_groups = set()
+ license_group = self._license_groups.get(group_name)
+ if group_name in traversed_groups:
+ writemsg(_("Circular license group reference"
+ " detected in '%s'\n") % group_name, noiselevel=-1)
+ rValue.append("@"+group_name)
+ elif license_group:
+ traversed_groups.add(group_name)
+ for l in license_group:
+ if l.startswith("-"):
+ writemsg(_("Skipping invalid element %s"
+ " in license group '%s'\n") % (l, group_name),
+ noiselevel=-1)
+ else:
+ rValue.extend(self._expandLicenseToken(l, traversed_groups))
+ else:
+ if self._license_groups and \
+ group_name not in self._undef_lic_groups:
+ self._undef_lic_groups.add(group_name)
+ writemsg(_("Undefined license group '%s'\n") % group_name,
+ noiselevel=-1)
+ rValue.append("@"+group_name)
+ if negate:
+ rValue = ["-" + token for token in rValue]
+ return rValue
+
+ def _getPkgAcceptLicense(self, cpv, slot, repo):
+ """
+ Get an ACCEPT_LICENSE list, accounting for package.license.
+ """
+ accept_license = self._accept_license
+ cp = cpv_getkey(cpv)
+ cpdict = self._plicensedict.get(cp)
+ if cpdict:
+ if not hasattr(cpv, slot):
+ cpv = _pkg_str(cpv, slot=slot, repo=repo)
+ plicence_list = ordered_by_atom_specificity(cpdict, cpv)
+ if plicence_list:
+ accept_license = list(self._accept_license)
+ for x in plicence_list:
+ accept_license.extend(x)
+ return accept_license
+
+ def get_prunned_accept_license(self, cpv, use, lic, slot, repo):
+ """
+ Generate a pruned version of ACCEPT_LICENSE, by intersection with
+ LICENSE. This is required since otherwise ACCEPT_LICENSE might be
+ too big (bigger than ARG_MAX), causing execve() calls to fail with
+ E2BIG errors as in bug #262647.
+ """
+ try:
+ licenses = set(use_reduce(lic, uselist=use, flat=True))
+ except InvalidDependString:
+ licenses = set()
+ licenses.discard('||')
+
+ accept_license = self._getPkgAcceptLicense(cpv, slot, repo)
+
+ if accept_license:
+ acceptable_licenses = set()
+ for x in accept_license:
+ if x == '*':
+ acceptable_licenses.update(licenses)
+ elif x == '-*':
+ acceptable_licenses.clear()
+ elif x[:1] == '-':
+ acceptable_licenses.discard(x[1:])
+ elif x in licenses:
+ acceptable_licenses.add(x)
+
+ licenses = acceptable_licenses
+ return ' '.join(sorted(licenses))
+
+ def getMissingLicenses(self, cpv, use, lic, slot, repo):
+ """
+ Take a LICENSE string and return a list of any licenses that the user
+ may need to accept for the given package. The returned list will not
+ contain any licenses that have already been accepted. This method
+ can throw an InvalidDependString exception.
+
+ @param cpv: The package name (for package.license support)
+ @type cpv: String
+ @param use: "USE" from the cpv's metadata
+ @type use: String
+ @param lic: "LICENSE" from the cpv's metadata
+ @type lic: String
+ @param slot: "SLOT" from the cpv's metadata
+ @type slot: String
+ @rtype: List
+ @return: A list of licenses that have not been accepted.
+ """
+
+ licenses = set(use_reduce(lic, matchall=1, flat=True))
+ licenses.discard('||')
+
+ acceptable_licenses = set()
+ for x in self._getPkgAcceptLicense(cpv, slot, repo):
+ if x == '*':
+ acceptable_licenses.update(licenses)
+ elif x == '-*':
+ acceptable_licenses.clear()
+ elif x[:1] == '-':
+ acceptable_licenses.discard(x[1:])
+ else:
+ acceptable_licenses.add(x)
+
+ license_str = lic
+ if "?" in license_str:
+ use = use.split()
+ else:
+ use = []
+
+ license_struct = use_reduce(license_str, uselist=use, opconvert=True)
+ return self._getMaskedLicenses(license_struct, acceptable_licenses)
+
+ def _getMaskedLicenses(self, license_struct, acceptable_licenses):
+ if not license_struct:
+ return []
+ if license_struct[0] == "||":
+ ret = []
+ for element in license_struct[1:]:
+ if isinstance(element, list):
+ if element:
+ tmp = self._getMaskedLicenses(element, acceptable_licenses)
+ if not tmp:
+ return []
+ ret.extend(tmp)
+ else:
+ if element in acceptable_licenses:
+ return []
+ ret.append(element)
+ # Return all masked licenses, since we don't know which combination
+ # (if any) the user will decide to unmask.
+ return ret
+
+ ret = []
+ for element in license_struct:
+ if isinstance(element, list):
+ if element:
+ ret.extend(self._getMaskedLicenses(element,
+ acceptable_licenses))
+ else:
+ if element not in acceptable_licenses:
+ ret.append(element)
+ return ret
+
+ def set_accept_license_str(self, accept_license_str):
+ if accept_license_str != self._accept_license_str:
+ self._accept_license_str = accept_license_str
+ self._accept_license = tuple(self.expandLicenseTokens(accept_license_str.split()))
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_config/LocationsManager.py b/usr/lib/portage/pym/portage/package/ebuild/_config/LocationsManager.py
new file mode 100644
index 0000000..4427f1d
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_config/LocationsManager.py
@@ -0,0 +1,308 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = (
+ 'LocationsManager',
+)
+
+import collections
+import io
+import warnings
+
+import portage
+from portage import os, eapi_is_supported, _encodings, _unicode_encode
+from portage.const import CUSTOM_PROFILE_PATH, GLOBAL_CONFIG_PATH, \
+ PROFILE_PATH, USER_CONFIG_PATH
+from portage.eapi import eapi_allows_directories_on_profile_level_and_repository_level
+from portage.exception import DirectoryNotFound, ParseError
+from portage.localization import _
+from portage.util import ensure_dirs, grabfile, \
+ normalize_path, shlex_split, writemsg
+from portage.util._path import exists_raise_eaccess, isdir_raise_eaccess
+from portage.repository.config import parse_layout_conf, \
+ _portage1_profiles_allow_directories
+
+
+_PORTAGE1_DIRECTORIES = frozenset([
+ 'package.mask', 'package.provided',
+ 'package.use', 'package.use.mask', 'package.use.force',
+ 'use.mask', 'use.force'])
+
+_profile_node = collections.namedtuple('_profile_node',
+ 'location portage1_directories user_config')
+
+_allow_parent_colon = frozenset(
+ ["portage-2"])
+
+class LocationsManager(object):
+
+ def __init__(self, config_root=None, eprefix=None, config_profile_path=None, local_config=True, \
+ target_root=None):
+ self.user_profile_dir = None
+ self._local_repo_conf_path = None
+ self.eprefix = eprefix
+ self.config_root = config_root
+ self.target_root = target_root
+ self._user_config = local_config
+
+ if self.eprefix is None:
+ self.eprefix = portage.const.EPREFIX
+ elif self.eprefix:
+ self.eprefix = normalize_path(self.eprefix)
+ if self.eprefix == os.sep:
+ self.eprefix = ""
+
+ if self.config_root is None:
+ self.config_root = portage.const.EPREFIX + os.sep
+
+ self.config_root = normalize_path(os.path.abspath(
+ self.config_root)).rstrip(os.path.sep) + os.path.sep
+
+ self._check_var_directory("PORTAGE_CONFIGROOT", self.config_root)
+ self.abs_user_config = os.path.join(self.config_root, USER_CONFIG_PATH)
+ self.config_profile_path = config_profile_path
+
+ def load_profiles(self, repositories, known_repository_paths):
+ known_repository_paths = set(os.path.realpath(x)
+ for x in known_repository_paths)
+
+ known_repos = []
+ for x in known_repository_paths:
+ try:
+ layout_data = {"profile-formats":
+ repositories.get_repo_for_location(x).profile_formats}
+ except KeyError:
+ layout_data = parse_layout_conf(x)[0]
+ # force a trailing '/' for ease of doing startswith checks
+ known_repos.append((x + '/', layout_data))
+ known_repos = tuple(known_repos)
+
+ if self.config_profile_path is None:
+ deprecated_profile_path = os.path.join(
+ self.config_root, 'etc', 'make.profile')
+ self.config_profile_path = \
+ os.path.join(self.config_root, PROFILE_PATH)
+ if isdir_raise_eaccess(self.config_profile_path):
+ self.profile_path = self.config_profile_path
+ if isdir_raise_eaccess(deprecated_profile_path) and not \
+ os.path.samefile(self.profile_path,
+ deprecated_profile_path):
+ # Don't warn if they refer to the same path, since
+ # that can be used for backward compatibility with
+ # old software.
+ writemsg("!!! %s\n" %
+ _("Found 2 make.profile dirs: "
+ "using '%s', ignoring '%s'") %
+ (self.profile_path, deprecated_profile_path),
+ noiselevel=-1)
+ else:
+ self.config_profile_path = deprecated_profile_path
+ if isdir_raise_eaccess(self.config_profile_path):
+ self.profile_path = self.config_profile_path
+ else:
+ self.profile_path = None
+ else:
+ # NOTE: repoman may pass in an empty string
+ # here, in order to create an empty profile
+ # for checking dependencies of packages with
+ # empty KEYWORDS.
+ self.profile_path = self.config_profile_path
+
+
+ # The symlink might not exist or might not be a symlink.
+ self.profiles = []
+ self.profiles_complex = []
+ if self.profile_path:
+ try:
+ self._addProfile(os.path.realpath(self.profile_path),
+ repositories, known_repos)
+ except ParseError as e:
+ if not portage._sync_mode:
+ writemsg(_("!!! Unable to parse profile: '%s'\n") % self.profile_path, noiselevel=-1)
+ writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
+ self.profiles = []
+ self.profiles_complex = []
+
+ if self._user_config and self.profiles:
+ custom_prof = os.path.join(
+ self.config_root, CUSTOM_PROFILE_PATH)
+ if os.path.exists(custom_prof):
+ self.user_profile_dir = custom_prof
+ self.profiles.append(custom_prof)
+ self.profiles_complex.append(
+ _profile_node(custom_prof, True, True))
+ del custom_prof
+
+ self.profiles = tuple(self.profiles)
+ self.profiles_complex = tuple(self.profiles_complex)
+
+ def _check_var_directory(self, varname, var):
+ if not isdir_raise_eaccess(var):
+ writemsg(_("!!! Error: %s='%s' is not a directory. "
+ "Please correct this.\n") % (varname, var),
+ noiselevel=-1)
+ raise DirectoryNotFound(var)
+
+ def _addProfile(self, currentPath, repositories, known_repos):
+ current_abs_path = os.path.abspath(currentPath)
+ allow_directories = True
+ allow_parent_colon = True
+ repo_loc = None
+ compat_mode = False
+
+ eapi_file = os.path.join(currentPath, "eapi")
+ eapi = "0"
+ f = None
+ try:
+ f = io.open(_unicode_encode(eapi_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace')
+ eapi = f.readline().strip()
+ except IOError:
+ pass
+ else:
+ if not eapi_is_supported(eapi):
+ raise ParseError(_(
+ "Profile contains unsupported "
+ "EAPI '%s': '%s'") % \
+ (eapi, os.path.realpath(eapi_file),))
+ finally:
+ if f is not None:
+ f.close()
+
+ intersecting_repos = [x for x in known_repos if current_abs_path.startswith(x[0])]
+ if intersecting_repos:
+ # protect against nested repositories. Insane configuration, but the longest
+ # path will be the correct one.
+ repo_loc, layout_data = max(intersecting_repos, key=lambda x:len(x[0]))
+ allow_directories = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \
+ any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats'])
+ compat_mode = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \
+ layout_data['profile-formats'] == ('portage-1-compat',)
+ allow_parent_colon = any(x in _allow_parent_colon
+ for x in layout_data['profile-formats'])
+
+ if compat_mode:
+ offenders = _PORTAGE1_DIRECTORIES.intersection(os.listdir(currentPath))
+ offenders = sorted(x for x in offenders
+ if os.path.isdir(os.path.join(currentPath, x)))
+ if offenders:
+ warnings.warn(_(
+ "\nThe selected profile is implicitly using the 'portage-1' format:\n"
+ "\tprofile = %(profile_path)s\n"
+ "But this repository is not using that format:\n"
+ "\trepo = %(repo_name)s\n"
+ "This will break in the future. Please convert these dirs to files:\n"
+ "\t%(files)s\n"
+ "Or, add this line to the repository's layout.conf:\n"
+ "\tprofile-formats = portage-1")
+ % dict(profile_path=currentPath, repo_name=repo_loc,
+ files='\n\t'.join(offenders)))
+
+ parentsFile = os.path.join(currentPath, "parent")
+ if exists_raise_eaccess(parentsFile):
+ parents = grabfile(parentsFile)
+ if not parents:
+ raise ParseError(
+ _("Empty parent file: '%s'") % parentsFile)
+ for parentPath in parents:
+ abs_parent = parentPath[:1] == os.sep
+ if not abs_parent and allow_parent_colon:
+ parentPath = self._expand_parent_colon(parentsFile,
+ parentPath, repo_loc, repositories)
+
+ # NOTE: This os.path.join() call is intended to ignore
+ # currentPath if parentPath is already absolute.
+ parentPath = normalize_path(os.path.join(
+ currentPath, parentPath))
+
+ if abs_parent or repo_loc is None or \
+ not parentPath.startswith(repo_loc):
+ # It seems that this parent may point outside
+ # of the current repo, so realpath it.
+ parentPath = os.path.realpath(parentPath)
+
+ if exists_raise_eaccess(parentPath):
+ self._addProfile(parentPath, repositories, known_repos)
+ else:
+ raise ParseError(
+ _("Parent '%s' not found: '%s'") % \
+ (parentPath, parentsFile))
+
+ self.profiles.append(currentPath)
+ self.profiles_complex.append(
+ _profile_node(currentPath, allow_directories, False))
+
+ def _expand_parent_colon(self, parentsFile, parentPath,
+ repo_loc, repositories):
+ colon = parentPath.find(":")
+ if colon == -1:
+ return parentPath
+
+ if colon == 0:
+ if repo_loc is None:
+ raise ParseError(
+ _("Parent '%s' not found: '%s'") % \
+ (parentPath, parentsFile))
+ else:
+ parentPath = normalize_path(os.path.join(
+ repo_loc, 'profiles', parentPath[colon+1:]))
+ else:
+ p_repo_name = parentPath[:colon]
+ try:
+ p_repo_loc = repositories.get_location_for_name(p_repo_name)
+ except KeyError:
+ raise ParseError(
+ _("Parent '%s' not found: '%s'") % \
+ (parentPath, parentsFile))
+ else:
+ parentPath = normalize_path(os.path.join(
+ p_repo_loc, 'profiles', parentPath[colon+1:]))
+
+ return parentPath
+
+ def set_root_override(self, root_overwrite=None):
+ # Allow ROOT setting to come from make.conf if it's not overridden
+ # by the constructor argument (from the calling environment).
+ if self.target_root is None and root_overwrite is not None:
+ self.target_root = root_overwrite
+ if not self.target_root.strip():
+ self.target_root = None
+ if self.target_root is None:
+ self.target_root = "/"
+
+ self.target_root = normalize_path(os.path.abspath(
+ self.target_root)).rstrip(os.path.sep) + os.path.sep
+
+ ensure_dirs(self.target_root)
+ self._check_var_directory("ROOT", self.target_root)
+
+ self.eroot = self.target_root.rstrip(os.sep) + self.eprefix + os.sep
+
+ self.global_config_path = GLOBAL_CONFIG_PATH
+ if portage.const.EPREFIX:
+ self.global_config_path = os.path.join(portage.const.EPREFIX,
+ GLOBAL_CONFIG_PATH.lstrip(os.sep))
+
+ def set_port_dirs(self, portdir, portdir_overlay):
+ self.portdir = portdir
+ self.portdir_overlay = portdir_overlay
+ if self.portdir_overlay is None:
+ self.portdir_overlay = ""
+
+ self.overlay_profiles = []
+ for ov in shlex_split(self.portdir_overlay):
+ ov = normalize_path(ov)
+ profiles_dir = os.path.join(ov, "profiles")
+ if isdir_raise_eaccess(profiles_dir):
+ self.overlay_profiles.append(profiles_dir)
+
+ self.profile_locations = [os.path.join(portdir, "profiles")] + self.overlay_profiles
+ self.profile_and_user_locations = self.profile_locations[:]
+ if self._user_config:
+ self.profile_and_user_locations.append(self.abs_user_config)
+
+ self.profile_locations = tuple(self.profile_locations)
+ self.profile_and_user_locations = tuple(self.profile_and_user_locations)
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_config/MaskManager.py b/usr/lib/portage/pym/portage/package/ebuild/_config/MaskManager.py
new file mode 100644
index 0000000..05913fe
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_config/MaskManager.py
@@ -0,0 +1,268 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'MaskManager',
+)
+
+import warnings
+
+from portage import os
+from portage.dep import ExtendedAtomDict, match_from_list
+from portage.localization import _
+from portage.util import append_repo, grabfile_package, stack_lists, writemsg
+from portage.versions import _pkg_str
+
+class MaskManager(object):
+
+ def __init__(self, repositories, profiles, abs_user_config,
+ user_config=True, strict_umatched_removal=False):
+ self._punmaskdict = ExtendedAtomDict(list)
+ self._pmaskdict = ExtendedAtomDict(list)
+ # Preserves atoms that are eliminated by negative
+ # incrementals in user_pkgmasklines.
+ self._pmaskdict_raw = ExtendedAtomDict(list)
+
+ #Read profile/package.mask from every repo.
+ #Repositories inherit masks from their parent profiles and
+ #are able to remove mask from them with -atoms.
+ #Such a removal affects only the current repo, but not the parent.
+ #Add ::repo specs to every atom to make sure atoms only affect
+ #packages from the current repo.
+
+ # Cache the repository-wide package.mask files as a particular
+ # repo may be often referenced by others as the master.
+ pmask_cache = {}
+
+ def grab_pmask(loc, repo_config):
+ if loc not in pmask_cache:
+ path = os.path.join(loc, 'profiles', 'package.mask')
+ pmask_cache[loc] = grabfile_package(path,
+ recursive=repo_config.portage1_profiles,
+ remember_source_file=True, verify_eapi=True)
+ if repo_config.portage1_profiles_compat and os.path.isdir(path):
+ warnings.warn(_("Repository '%(repo_name)s' is implicitly using "
+ "'portage-1' profile format in its profiles/package.mask, but "
+ "the repository profiles are not marked as that format. This will break "
+ "in the future. Please either convert the following paths "
+ "to files, or add\nprofile-formats = portage-1\nto the "
+ "repository's layout.conf.\n")
+ % dict(repo_name=repo_config.name))
+
+ return pmask_cache[loc]
+
+ repo_pkgmasklines = []
+ for repo in repositories.repos_with_profiles():
+ lines = []
+ repo_lines = grab_pmask(repo.location, repo)
+ removals = frozenset(line[0][1:] for line in repo_lines
+ if line[0][:1] == "-")
+ matched_removals = set()
+ for master in repo.masters:
+ master_lines = grab_pmask(master.location, master)
+ for line in master_lines:
+ if line[0] in removals:
+ matched_removals.add(line[0])
+ # Since we don't stack masters recursively, there aren't any
+ # atoms earlier in the stack to be matched by negative atoms in
+ # master_lines. Also, repo_lines may contain negative atoms
+ # that are intended to negate atoms from a different master
+ # than the one with which we are currently stacking. Therefore,
+ # we disable warn_for_unmatched_removal here (see bug #386569).
+ lines.append(stack_lists([master_lines, repo_lines], incremental=1,
+ remember_source_file=True, warn_for_unmatched_removal=False))
+
+ # It's safe to warn for unmatched removal if masters have not
+ # been overridden by the user, which is guaranteed when
+ # user_config is false (when called by repoman).
+ if repo.masters:
+ unmatched_removals = removals.difference(matched_removals)
+ if unmatched_removals and not user_config:
+ source_file = os.path.join(repo.location,
+ "profiles", "package.mask")
+ unmatched_removals = list(unmatched_removals)
+ if len(unmatched_removals) > 3:
+ writemsg(
+ _("--- Unmatched removal atoms in %s: %s and %s more\n") %
+ (source_file,
+ ", ".join("-" + x for x in unmatched_removals[:3]),
+ len(unmatched_removals) - 3), noiselevel=-1)
+ else:
+ writemsg(
+ _("--- Unmatched removal atom(s) in %s: %s\n") %
+ (source_file,
+ ", ".join("-" + x for x in unmatched_removals)),
+ noiselevel=-1)
+
+ else:
+ lines.append(stack_lists([repo_lines], incremental=1,
+ remember_source_file=True, warn_for_unmatched_removal=not user_config,
+ strict_warn_for_unmatched_removal=strict_umatched_removal))
+ repo_pkgmasklines.extend(append_repo(stack_lists(lines), repo.name, remember_source_file=True))
+
+ repo_pkgunmasklines = []
+ for repo in repositories.repos_with_profiles():
+ if not repo.portage1_profiles:
+ continue
+ repo_lines = grabfile_package(os.path.join(repo.location, "profiles", "package.unmask"), \
+ recursive=1, remember_source_file=True, verify_eapi=True)
+ lines = stack_lists([repo_lines], incremental=1, \
+ remember_source_file=True, warn_for_unmatched_removal=True,
+ strict_warn_for_unmatched_removal=strict_umatched_removal)
+ repo_pkgunmasklines.extend(append_repo(lines, repo.name, remember_source_file=True))
+
+ #Read package.mask from the user's profile. Stack them in the end
+ #to allow profiles to override masks from their parent profiles.
+ profile_pkgmasklines = []
+ profile_pkgunmasklines = []
+ # PREFIX LOCAL: Prefix has unmasks for stuff in profiles/package.mask
+ # If we don't consider the repomasks here, those unmasks are
+ # lost, causing lots of issues (e.g. Portage being masked)
+ # for minimal/concentrated code change, empty repo_pkgmasklines here
+ # such that they don't count double
+ import collections
+ _profile_node = collections.namedtuple('_profile_node',
+ 'location portage1_directories')
+ repo_pkgmasklines = []
+ repo_pkgunmasklines = []
+ all_profiles = []
+ for repo in repositories.repos_with_profiles():
+ all_profiles.append(_profile_node(
+ os.path.join(repo.location, "profiles"), True))
+ all_profiles.extend(profiles)
+ # END PREFIX LOCAL
+ for x in all_profiles:
+ profile_pkgmasklines.append(grabfile_package(
+ os.path.join(x.location, "package.mask"),
+ recursive=x.portage1_directories,
+ remember_source_file=True, verify_eapi=True))
+ if x.portage1_directories:
+ profile_pkgunmasklines.append(grabfile_package(
+ os.path.join(x.location, "package.unmask"),
+ recursive=x.portage1_directories,
+ remember_source_file=True, verify_eapi=True))
+ profile_pkgmasklines = stack_lists(profile_pkgmasklines, incremental=1, \
+ remember_source_file=True, warn_for_unmatched_removal=True,
+ strict_warn_for_unmatched_removal=strict_umatched_removal)
+ profile_pkgunmasklines = stack_lists(profile_pkgunmasklines, incremental=1, \
+ remember_source_file=True, warn_for_unmatched_removal=True,
+ strict_warn_for_unmatched_removal=strict_umatched_removal)
+
+ #Read /etc/portage/package.mask. Don't stack it to allow the user to
+ #remove mask atoms from everywhere with -atoms.
+ user_pkgmasklines = []
+ user_pkgunmasklines = []
+ if user_config:
+ user_pkgmasklines = grabfile_package(
+ os.path.join(abs_user_config, "package.mask"), recursive=1, \
+ allow_wildcard=True, allow_repo=True, remember_source_file=True, verify_eapi=False)
+ user_pkgunmasklines = grabfile_package(
+ os.path.join(abs_user_config, "package.unmask"), recursive=1, \
+ allow_wildcard=True, allow_repo=True, remember_source_file=True, verify_eapi=False)
+
+ #Stack everything together. At this point, only user_pkgmasklines may contain -atoms.
+ #Don't warn for unmatched -atoms here, since we don't do it for any other user config file.
+ raw_pkgmasklines = stack_lists([repo_pkgmasklines, profile_pkgmasklines], \
+ incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)
+ pkgmasklines = stack_lists([repo_pkgmasklines, profile_pkgmasklines, user_pkgmasklines], \
+ incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)
+ pkgunmasklines = stack_lists([repo_pkgunmasklines, profile_pkgunmasklines, user_pkgunmasklines], \
+ incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)
+
+ for x, source_file in raw_pkgmasklines:
+ self._pmaskdict_raw.setdefault(x.cp, []).append(x)
+
+ for x, source_file in pkgmasklines:
+ self._pmaskdict.setdefault(x.cp, []).append(x)
+
+ for x, source_file in pkgunmasklines:
+ self._punmaskdict.setdefault(x.cp, []).append(x)
+
+ for d in (self._pmaskdict_raw, self._pmaskdict, self._punmaskdict):
+ for k, v in d.items():
+ d[k] = tuple(v)
+
+ def _getMaskAtom(self, cpv, slot, repo, unmask_atoms=None):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists or it has been cancelled by package.unmask. PROVIDE
+ is not checked, so atoms will not be found for old-style virtuals.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param slot: The package's slot
+ @type slot: String
+ @param repo: The package's repository [optional]
+ @type repo: String
+ @param unmask_atoms: if desired pass in self._punmaskdict.get(cp)
+ @type unmask_atoms: list
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+
+ try:
+ cpv.slot
+ except AttributeError:
+ pkg = _pkg_str(cpv, slot=slot, repo=repo)
+ else:
+ pkg = cpv
+
+ mask_atoms = self._pmaskdict.get(pkg.cp)
+ if mask_atoms:
+ pkg_list = [pkg]
+ for x in mask_atoms:
+ if not match_from_list(x, pkg_list):
+ continue
+ if unmask_atoms:
+ for y in unmask_atoms:
+ if match_from_list(y, pkg_list):
+ return None
+ return x
+ return None
+
+
+ def getMaskAtom(self, cpv, slot, repo):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists or it has been cancelled by package.unmask. PROVIDE
+ is not checked, so atoms will not be found for old-style virtuals.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param slot: The package's slot
+ @type slot: String
+ @param repo: The package's repository [optional]
+ @type repo: String
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+
+ try:
+ cpv.slot
+ except AttributeError:
+ pkg = _pkg_str(cpv, slot=slot, repo=repo)
+ else:
+ pkg = cpv
+
+ return self._getMaskAtom(pkg, slot, repo,
+ self._punmaskdict.get(pkg.cp))
+
+
+ def getRawMaskAtom(self, cpv, slot, repo):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists. It HAS NOT! been cancelled by any package.unmask.
+ PROVIDE is not checked, so atoms will not be found for old-style
+ virtuals.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param slot: The package's slot
+ @type slot: String
+ @param repo: The package's repository [optional]
+ @type repo: String
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+
+ return self._getMaskAtom(cpv, slot, repo)
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_config/UseManager.py b/usr/lib/portage/pym/portage/package/ebuild/_config/UseManager.py
new file mode 100644
index 0000000..1c8c60e
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_config/UseManager.py
@@ -0,0 +1,489 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'UseManager',
+)
+
+from _emerge.Package import Package
+from portage import os
+from portage.dep import Atom, dep_getrepo, dep_getslot, ExtendedAtomDict, remove_slot, _get_useflag_re, _repo_separator
+from portage.eapi import eapi_has_use_aliases, eapi_supports_stable_use_forcing_and_masking
+from portage.exception import InvalidAtom
+from portage.localization import _
+from portage.util import grabfile, grabdict, grabdict_package, read_corresponding_eapi_file, stack_lists, writemsg
+from portage.versions import _pkg_str
+
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity
+
+class UseManager(object):
+
+ def __init__(self, repositories, profiles, abs_user_config, is_stable,
+ user_config=True):
+ # file variable
+ #--------------------------------
+ # repositories
+ #--------------------------------
+ # use.mask _repo_usemask_dict
+ # use.stable.mask _repo_usestablemask_dict
+ # use.force _repo_useforce_dict
+ # use.stable.force _repo_usestableforce_dict
+ # use.aliases _repo_usealiases_dict
+ # package.use.mask _repo_pusemask_dict
+ # package.use.stable.mask _repo_pusestablemask_dict
+ # package.use.force _repo_puseforce_dict
+ # package.use.stable.force _repo_pusestableforce_dict
+ # package.use.aliases _repo_pusealiases_dict
+ #--------------------------------
+ # profiles
+ #--------------------------------
+ # use.mask _usemask_list
+ # use.stable.mask _usestablemask_list
+ # use.force _useforce_list
+ # use.stable.force _usestableforce_list
+ # package.use.mask _pusemask_list
+ # package.use.stable.mask _pusestablemask_list
+ # package.use _pkgprofileuse
+ # package.use.force _puseforce_list
+ # package.use.stable.force _pusestableforce_list
+ #--------------------------------
+ # user config
+ #--------------------------------
+ # package.use _pusedict
+
+ # Dynamic variables tracked by the config class
+ #--------------------------------
+ # profiles
+ #--------------------------------
+ # usemask
+ # useforce
+ #--------------------------------
+ # user config
+ #--------------------------------
+ # puse
+
+ self._user_config = user_config
+ self._is_stable = is_stable
+ self._repo_usemask_dict = self._parse_repository_files_to_dict_of_tuples("use.mask", repositories)
+ self._repo_usestablemask_dict = \
+ self._parse_repository_files_to_dict_of_tuples("use.stable.mask",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._repo_useforce_dict = self._parse_repository_files_to_dict_of_tuples("use.force", repositories)
+ self._repo_usestableforce_dict = \
+ self._parse_repository_files_to_dict_of_tuples("use.stable.force",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._repo_pusemask_dict = self._parse_repository_files_to_dict_of_dicts("package.use.mask", repositories)
+ self._repo_pusestablemask_dict = \
+ self._parse_repository_files_to_dict_of_dicts("package.use.stable.mask",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._repo_puseforce_dict = self._parse_repository_files_to_dict_of_dicts("package.use.force", repositories)
+ self._repo_pusestableforce_dict = \
+ self._parse_repository_files_to_dict_of_dicts("package.use.stable.force",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._repo_puse_dict = self._parse_repository_files_to_dict_of_dicts("package.use", repositories)
+
+ self._usemask_list = self._parse_profile_files_to_tuple_of_tuples("use.mask", profiles)
+ self._usestablemask_list = \
+ self._parse_profile_files_to_tuple_of_tuples("use.stable.mask",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._useforce_list = self._parse_profile_files_to_tuple_of_tuples("use.force", profiles)
+ self._usestableforce_list = \
+ self._parse_profile_files_to_tuple_of_tuples("use.stable.force",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._pusemask_list = self._parse_profile_files_to_tuple_of_dicts("package.use.mask", profiles)
+ self._pusestablemask_list = \
+ self._parse_profile_files_to_tuple_of_dicts("package.use.stable.mask",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._pkgprofileuse = self._parse_profile_files_to_tuple_of_dicts("package.use", profiles, juststrings=True)
+ self._puseforce_list = self._parse_profile_files_to_tuple_of_dicts("package.use.force", profiles)
+ self._pusestableforce_list = \
+ self._parse_profile_files_to_tuple_of_dicts("package.use.stable.force",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+
+ self._pusedict = self._parse_user_files_to_extatomdict("package.use", abs_user_config, user_config)
+
+ self._repo_usealiases_dict = self._parse_repository_usealiases(repositories)
+ self._repo_pusealiases_dict = self._parse_repository_packageusealiases(repositories)
+
+ self.repositories = repositories
+
+ def _parse_file_to_tuple(self, file_name, recursive=True, eapi_filter=None):
+ ret = []
+ lines = grabfile(file_name, recursive=recursive)
+ eapi = read_corresponding_eapi_file(file_name)
+ if eapi_filter is not None and not eapi_filter(eapi):
+ if lines:
+ writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") %
+ (eapi, os.path.basename(file_name), file_name),
+ noiselevel=-1)
+ return ()
+ useflag_re = _get_useflag_re(eapi)
+ for prefixed_useflag in lines:
+ if prefixed_useflag[:1] == "-":
+ useflag = prefixed_useflag[1:]
+ else:
+ useflag = prefixed_useflag
+ if useflag_re.match(useflag) is None:
+ writemsg(_("--- Invalid USE flag in '%s': '%s'\n") %
+ (file_name, prefixed_useflag), noiselevel=-1)
+ else:
+ ret.append(prefixed_useflag)
+ return tuple(ret)
+
+ def _parse_file_to_dict(self, file_name, juststrings=False, recursive=True,
+ eapi_filter=None, user_config=False):
+ ret = {}
+ location_dict = {}
+ eapi = read_corresponding_eapi_file(file_name, default=None)
+ if eapi is None and not user_config:
+ eapi = "0"
+ if eapi is None:
+ ret = ExtendedAtomDict(dict)
+ else:
+ ret = {}
+ file_dict = grabdict_package(file_name, recursive=recursive,
+ allow_wildcard=(eapi is None), allow_repo=(eapi is None),
+ verify_eapi=(eapi is not None))
+ if eapi is not None and eapi_filter is not None and not eapi_filter(eapi):
+ if file_dict:
+ writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") %
+ (eapi, os.path.basename(file_name), file_name),
+ noiselevel=-1)
+ return ret
+ useflag_re = _get_useflag_re(eapi)
+ for k, v in file_dict.items():
+ useflags = []
+ for prefixed_useflag in v:
+ if prefixed_useflag[:1] == "-":
+ useflag = prefixed_useflag[1:]
+ else:
+ useflag = prefixed_useflag
+ if useflag_re.match(useflag) is None:
+ writemsg(_("--- Invalid USE flag for '%s' in '%s': '%s'\n") %
+ (k, file_name, prefixed_useflag), noiselevel=-1)
+ else:
+ useflags.append(prefixed_useflag)
+ location_dict.setdefault(k, []).extend(useflags)
+ for k, v in location_dict.items():
+ if juststrings:
+ v = " ".join(v)
+ else:
+ v = tuple(v)
+ ret.setdefault(k.cp, {})[k] = v
+ return ret
+
+ def _parse_user_files_to_extatomdict(self, file_name, location, user_config):
+ ret = ExtendedAtomDict(dict)
+ if user_config:
+ pusedict = grabdict_package(
+ os.path.join(location, file_name), recursive=1, allow_wildcard=True, allow_repo=True, verify_eapi=False)
+ for k, v in pusedict.items():
+ ret.setdefault(k.cp, {})[k] = tuple(v)
+
+ return ret
+
+ def _parse_repository_files_to_dict_of_tuples(self, file_name, repositories, eapi_filter=None):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ ret[repo.name] = self._parse_file_to_tuple(os.path.join(repo.location, "profiles", file_name), eapi_filter=eapi_filter)
+ return ret
+
+ def _parse_repository_files_to_dict_of_dicts(self, file_name, repositories, eapi_filter=None):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ ret[repo.name] = self._parse_file_to_dict(os.path.join(repo.location, "profiles", file_name), eapi_filter=eapi_filter)
+ return ret
+
+ def _parse_profile_files_to_tuple_of_tuples(self, file_name, locations,
+ eapi_filter=None):
+ return tuple(self._parse_file_to_tuple(
+ os.path.join(profile.location, file_name),
+ recursive=profile.portage1_directories, eapi_filter=eapi_filter)
+ for profile in locations)
+
+ def _parse_profile_files_to_tuple_of_dicts(self, file_name, locations,
+ juststrings=False, eapi_filter=None):
+ return tuple(self._parse_file_to_dict(
+ os.path.join(profile.location, file_name), juststrings,
+ recursive=profile.portage1_directories, eapi_filter=eapi_filter,
+ user_config=profile.user_config)
+ for profile in locations)
+
+ def _parse_repository_usealiases(self, repositories):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ file_name = os.path.join(repo.location, "profiles", "use.aliases")
+ eapi = read_corresponding_eapi_file(file_name)
+ useflag_re = _get_useflag_re(eapi)
+ raw_file_dict = grabdict(file_name, recursive=True)
+ file_dict = {}
+ for real_flag, aliases in raw_file_dict.items():
+ if useflag_re.match(real_flag) is None:
+ writemsg(_("--- Invalid real USE flag in '%s': '%s'\n") % (file_name, real_flag), noiselevel=-1)
+ else:
+ for alias in aliases:
+ if useflag_re.match(alias) is None:
+ writemsg(_("--- Invalid USE flag alias for '%s' real USE flag in '%s': '%s'\n") %
+ (real_flag, file_name, alias), noiselevel=-1)
+ else:
+ if any(alias in v for k, v in file_dict.items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias in '%s': '%s'\n") %
+ (file_name, alias), noiselevel=-1)
+ else:
+ file_dict.setdefault(real_flag, []).append(alias)
+ ret[repo.name] = file_dict
+ return ret
+
+ def _parse_repository_packageusealiases(self, repositories):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ file_name = os.path.join(repo.location, "profiles", "package.use.aliases")
+ eapi = read_corresponding_eapi_file(file_name)
+ useflag_re = _get_useflag_re(eapi)
+ lines = grabfile(file_name, recursive=True)
+ file_dict = {}
+ for line in lines:
+ elements = line.split()
+ atom = elements[0]
+ try:
+ atom = Atom(atom, eapi=eapi)
+ except InvalidAtom:
+ writemsg(_("--- Invalid atom in '%s': '%s'\n") % (file_name, atom))
+ continue
+ if len(elements) == 1:
+ writemsg(_("--- Missing real USE flag for '%s' in '%s'\n") % (atom, file_name), noiselevel=-1)
+ continue
+ real_flag = elements[1]
+ if useflag_re.match(real_flag) is None:
+ writemsg(_("--- Invalid real USE flag for '%s' in '%s': '%s'\n") % (atom, file_name, real_flag), noiselevel=-1)
+ else:
+ for alias in elements[2:]:
+ if useflag_re.match(alias) is None:
+ writemsg(_("--- Invalid USE flag alias for '%s' real USE flag for '%s' in '%s': '%s'\n") %
+ (real_flag, atom, file_name, alias), noiselevel=-1)
+ else:
+ # Duplicated USE flag aliases in entries for different atoms
+ # matching the same package version are detected in getUseAliases().
+ if any(alias in v for k, v in file_dict.get(atom.cp, {}).get(atom, {}).items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias for '%s' in '%s': '%s'\n") %
+ (atom, file_name, alias), noiselevel=-1)
+ else:
+ file_dict.setdefault(atom.cp, {}).setdefault(atom, {}).setdefault(real_flag, []).append(alias)
+ ret[repo.name] = file_dict
+ return ret
+
+ def _isStable(self, pkg):
+ if self._user_config:
+ try:
+ return pkg.stable
+ except AttributeError:
+ # KEYWORDS is unavailable (prior to "depend" phase)
+ return False
+
+ try:
+ pkg._metadata
+ except AttributeError:
+ # KEYWORDS is unavailable (prior to "depend" phase)
+ return False
+
+ # Since repoman uses different config instances for
+ # different profiles, we have to be careful to do the
+ # stable check against the correct profile here.
+ return self._is_stable(pkg)
+
+ def getUseMask(self, pkg=None, stable=None):
+ if pkg is None:
+ return frozenset(stack_lists(
+ self._usemask_list, incremental=True))
+
+ slot = None
+ cp = getattr(pkg, "cp", None)
+ if cp is None:
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
+
+ if stable is None:
+ stable = self._isStable(pkg)
+
+ usemask = []
+
+ if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
+ repos = []
+ try:
+ repos.extend(repo.name for repo in
+ self.repositories[pkg.repo].masters)
+ except KeyError:
+ pass
+ repos.append(pkg.repo)
+ for repo in repos:
+ usemask.append(self._repo_usemask_dict.get(repo, {}))
+ if stable:
+ usemask.append(self._repo_usestablemask_dict.get(repo, {}))
+ cpdict = self._repo_pusemask_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+ if stable:
+ cpdict = self._repo_pusestablemask_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+
+ for i, pusemask_dict in enumerate(self._pusemask_list):
+ if self._usemask_list[i]:
+ usemask.append(self._usemask_list[i])
+ if stable and self._usestablemask_list[i]:
+ usemask.append(self._usestablemask_list[i])
+ cpdict = pusemask_dict.get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+ if stable:
+ cpdict = self._pusestablemask_list[i].get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+
+ return frozenset(stack_lists(usemask, incremental=True))
+
+ def getUseForce(self, pkg=None, stable=None):
+ if pkg is None:
+ return frozenset(stack_lists(
+ self._useforce_list, incremental=True))
+
+ cp = getattr(pkg, "cp", None)
+ if cp is None:
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
+
+ if stable is None:
+ stable = self._isStable(pkg)
+
+ useforce = []
+
+ if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
+ repos = []
+ try:
+ repos.extend(repo.name for repo in
+ self.repositories[pkg.repo].masters)
+ except KeyError:
+ pass
+ repos.append(pkg.repo)
+ for repo in repos:
+ useforce.append(self._repo_useforce_dict.get(repo, {}))
+ if stable:
+ useforce.append(self._repo_usestableforce_dict.get(repo, {}))
+ cpdict = self._repo_puseforce_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+ if stable:
+ cpdict = self._repo_pusestableforce_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+
+ for i, puseforce_dict in enumerate(self._puseforce_list):
+ if self._useforce_list[i]:
+ useforce.append(self._useforce_list[i])
+ if stable and self._usestableforce_list[i]:
+ useforce.append(self._usestableforce_list[i])
+ cpdict = puseforce_dict.get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+ if stable:
+ cpdict = self._pusestableforce_list[i].get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+
+ return frozenset(stack_lists(useforce, incremental=True))
+
+ def getUseAliases(self, pkg):
+ if hasattr(pkg, "eapi") and not eapi_has_use_aliases(pkg.eapi):
+ return {}
+
+ cp = getattr(pkg, "cp", None)
+ if cp is None:
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
+
+ usealiases = {}
+
+ if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
+ repos = []
+ try:
+ repos.extend(repo.name for repo in
+ self.repositories[pkg.repo].masters)
+ except KeyError:
+ pass
+ repos.append(pkg.repo)
+ for repo in repos:
+ usealiases_dict = self._repo_usealiases_dict.get(repo, {})
+ for real_flag, aliases in usealiases_dict.items():
+ for alias in aliases:
+ if any(alias in v for k, v in usealiases.items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias for '%s%s%s': '%s'\n") %
+ (pkg.cpv, _repo_separator, pkg.repo, alias), noiselevel=-1)
+ else:
+ usealiases.setdefault(real_flag, []).append(alias)
+ cp_usealiases_dict = self._repo_pusealiases_dict.get(repo, {}).get(cp)
+ if cp_usealiases_dict:
+ usealiases_dict_list = ordered_by_atom_specificity(cp_usealiases_dict, pkg)
+ for usealiases_dict in usealiases_dict_list:
+ for real_flag, aliases in usealiases_dict.items():
+ for alias in aliases:
+ if any(alias in v for k, v in usealiases.items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias for '%s%s%s': '%s'\n") %
+ (pkg.cpv, _repo_separator, pkg.repo, alias), noiselevel=-1)
+ else:
+ usealiases.setdefault(real_flag, []).append(alias)
+
+ return usealiases
+
+ def getPUSE(self, pkg):
+ cp = getattr(pkg, "cp", None)
+ if cp is None:
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
+ ret = ""
+ cpdict = self._pusedict.get(cp)
+ if cpdict:
+ puse_matches = ordered_by_atom_specificity(cpdict, pkg)
+ if puse_matches:
+ puse_list = []
+ for x in puse_matches:
+ puse_list.extend(x)
+ ret = " ".join(puse_list)
+ return ret
+
+ def extract_global_USE_changes(self, old=""):
+ ret = old
+ cpdict = self._pusedict.get("*/*")
+ if cpdict is not None:
+ v = cpdict.pop("*/*", None)
+ if v is not None:
+ ret = " ".join(v)
+ if old:
+ ret = old + " " + ret
+ if not cpdict:
+ #No tokens left in atom_license_map, remove it.
+ del self._pusedict["*/*"]
+ return ret
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_config/VirtualsManager.py b/usr/lib/portage/pym/portage/package/ebuild/_config/VirtualsManager.py
new file mode 100644
index 0000000..c4d1e36
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_config/VirtualsManager.py
@@ -0,0 +1,233 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'VirtualsManager',
+)
+
+from copy import deepcopy
+
+from portage import os
+from portage.dep import Atom
+from portage.exception import InvalidAtom
+from portage.localization import _
+from portage.util import grabdict, stack_dictlist, writemsg
+from portage.versions import cpv_getkey
+
+class VirtualsManager(object):
+
+ def __init__(self, *args, **kwargs):
+ if kwargs.get("_copy"):
+ return
+
+ assert len(args) == 1, "VirtualsManager.__init__ takes one positional argument"
+ assert not kwargs, "unknown keyword argument(s) '%s' passed to VirtualsManager.__init__" % \
+ ", ".join(kwargs)
+
+ profiles = args[0]
+ self._virtuals = None
+ self._dirVirtuals = None
+ self._virts_p = None
+
+ # Virtuals obtained from the vartree
+ self._treeVirtuals = None
+ # Virtuals added by the depgraph via self.add_depgraph_virtuals().
+ self._depgraphVirtuals = {}
+
+ #Initialise _dirVirtuals.
+ self._read_dirVirtuals(profiles)
+
+ #We could initialise _treeVirtuals here, but some consumers want to
+ #pass their own vartree.
+
+ def _read_dirVirtuals(self, profiles):
+ """
+ Read the 'virtuals' file in all profiles.
+ """
+ virtuals_list = []
+ for x in profiles:
+ virtuals_file = os.path.join(x, "virtuals")
+ virtuals_dict = grabdict(virtuals_file)
+ atoms_dict = {}
+ for k, v in virtuals_dict.items():
+ try:
+ virt_atom = Atom(k)
+ except InvalidAtom:
+ virt_atom = None
+ else:
+ if virt_atom.blocker or \
+ str(virt_atom) != str(virt_atom.cp):
+ virt_atom = None
+ if virt_atom is None:
+ writemsg(_("--- Invalid virtuals atom in %s: %s\n") % \
+ (virtuals_file, k), noiselevel=-1)
+ continue
+ providers = []
+ for atom in v:
+ atom_orig = atom
+ if atom[:1] == '-':
+ # allow incrementals
+ atom = atom[1:]
+ try:
+ atom = Atom(atom)
+ except InvalidAtom:
+ atom = None
+ else:
+ if atom.blocker:
+ atom = None
+ if atom is None:
+ writemsg(_("--- Invalid atom in %s: %s\n") % \
+ (virtuals_file, atom_orig), noiselevel=-1)
+ else:
+ if atom_orig == str(atom):
+ # normal atom, so return as Atom instance
+ providers.append(atom)
+ else:
+ # atom has special prefix, so return as string
+ providers.append(atom_orig)
+ if providers:
+ atoms_dict[virt_atom] = providers
+ if atoms_dict:
+ virtuals_list.append(atoms_dict)
+
+ self._dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
+
+ for virt in self._dirVirtuals:
+ # Preference for virtuals decreases from left to right.
+ self._dirVirtuals[virt].reverse()
+
+ def __deepcopy__(self, memo=None):
+ if memo is None:
+ memo = {}
+ result = VirtualsManager(_copy=True)
+ memo[id(self)] = result
+
+ # immutable attributes (internal policy ensures lack of mutation)
+ # _treeVirtuals is initilised by _populate_treeVirtuals().
+ # Before that it's 'None'.
+ result._treeVirtuals = self._treeVirtuals
+ memo[id(self._treeVirtuals)] = self._treeVirtuals
+ # _dirVirtuals is initilised by __init__.
+ result._dirVirtuals = self._dirVirtuals
+ memo[id(self._dirVirtuals)] = self._dirVirtuals
+
+ # mutable attributes (change when add_depgraph_virtuals() is called)
+ result._virtuals = deepcopy(self._virtuals, memo)
+ result._depgraphVirtuals = deepcopy(self._depgraphVirtuals, memo)
+ result._virts_p = deepcopy(self._virts_p, memo)
+
+ return result
+
+ def _compile_virtuals(self):
+ """Stack installed and profile virtuals. Preference for virtuals
+ decreases from left to right.
+ Order of preference:
+ 1. installed and in profile
+ 2. installed only
+ 3. profile only
+ """
+
+ assert self._treeVirtuals is not None, "_populate_treeVirtuals() must be called before " + \
+ "any query about virtuals"
+
+ # Virtuals by profile+tree preferences.
+ ptVirtuals = {}
+
+ for virt, installed_list in self._treeVirtuals.items():
+ profile_list = self._dirVirtuals.get(virt, None)
+ if not profile_list:
+ continue
+ for cp in installed_list:
+ if cp in profile_list:
+ ptVirtuals.setdefault(virt, [])
+ ptVirtuals[virt].append(cp)
+
+ virtuals = stack_dictlist([ptVirtuals, self._treeVirtuals,
+ self._dirVirtuals, self._depgraphVirtuals])
+ self._virtuals = virtuals
+ self._virts_p = None
+
+ def getvirtuals(self):
+ """
+ Computes self._virtuals if necessary and returns it.
+ self._virtuals is only computed on the first call.
+ """
+ if self._virtuals is None:
+ self._compile_virtuals()
+
+ return self._virtuals
+
+ def _populate_treeVirtuals(self, vartree):
+ """
+ Initialize _treeVirtuals from the given vartree.
+ It must not have been initialized already, otherwise
+ our assumptions about immutability don't hold.
+ """
+ assert self._treeVirtuals is None, "treeVirtuals must not be reinitialized"
+
+ self._treeVirtuals = {}
+
+ for provide, cpv_list in vartree.get_all_provides().items():
+ try:
+ provide = Atom(provide)
+ except InvalidAtom:
+ continue
+ self._treeVirtuals[provide.cp] = \
+ [Atom(cpv_getkey(cpv)) for cpv in cpv_list]
+
+ def populate_treeVirtuals_if_needed(self, vartree):
+ """
+ Initialize _treeVirtuals if it hasn't been done already.
+ This is a hack for consumers that already have an populated vartree.
+ """
+ if self._treeVirtuals is not None:
+ return
+
+ self._populate_treeVirtuals(vartree)
+
+ def add_depgraph_virtuals(self, mycpv, virts):
+ """This updates the preferences for old-style virtuals,
+ affecting the behavior of dep_expand() and dep_check()
+ calls. It can change dbapi.match() behavior since that
+ calls dep_expand(). However, dbapi instances have
+ internal match caches that are not invalidated when
+ preferences are updated here. This can potentially
+ lead to some inconsistency (relevant to bug #1343)."""
+
+ #Ensure that self._virtuals is populated.
+ if self._virtuals is None:
+ self.getvirtuals()
+
+ modified = False
+ cp = Atom(cpv_getkey(mycpv))
+ for virt in virts:
+ try:
+ virt = Atom(virt).cp
+ except InvalidAtom:
+ continue
+ providers = self._virtuals.get(virt)
+ if providers and cp in providers:
+ continue
+ providers = self._depgraphVirtuals.get(virt)
+ if providers is None:
+ providers = []
+ self._depgraphVirtuals[virt] = providers
+ if cp not in providers:
+ providers.append(cp)
+ modified = True
+
+ if modified:
+ self._compile_virtuals()
+
+ def get_virts_p(self):
+ if self._virts_p is not None:
+ return self._virts_p
+
+ virts = self.getvirtuals()
+ virts_p = {}
+ for x in virts:
+ vkeysplit = x.split("/")
+ if vkeysplit[1] not in virts_p:
+ virts_p[vkeysplit[1]] = virts[x]
+ self._virts_p = virts_p
+ return virts_p
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_config/__init__.py b/usr/lib/portage/pym/portage/package/ebuild/_config/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_config/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_config/env_var_validation.py b/usr/lib/portage/pym/portage/package/ebuild/_config/env_var_validation.py
new file mode 100644
index 0000000..d3db545
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_config/env_var_validation.py
@@ -0,0 +1,23 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.process import find_binary
+from portage.util import shlex_split
+
+def validate_cmd_var(v):
+ """
+ Validate an evironment variable value to see if it
+ contains an executable command as the first token.
+ returns (valid, token_list) where 'valid' is boolean and 'token_list'
+ is the (possibly empty) list of tokens split by shlex.
+ """
+ invalid = False
+ v_split = shlex_split(v)
+ if not v_split:
+ invalid = True
+ elif os.path.isabs(v_split[0]):
+ invalid = not os.access(v_split[0], os.EX_OK)
+ elif find_binary(v_split[0]) is None:
+ invalid = True
+ return (not invalid, v_split)
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_config/features_set.py b/usr/lib/portage/pym/portage/package/ebuild/_config/features_set.py
new file mode 100644
index 0000000..62236fd
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_config/features_set.py
@@ -0,0 +1,128 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'features_set',
+)
+
+import logging
+
+from portage.const import SUPPORTED_FEATURES
+from portage.localization import _
+from portage.output import colorize
+from portage.util import writemsg_level
+
+class features_set(object):
+ """
+ Provides relevant set operations needed for access and modification of
+ config.features. The FEATURES variable is automatically synchronized
+ upon modification.
+
+ Modifications result in a permanent override that will cause the change
+ to propagate to the incremental stacking mechanism in config.regenerate().
+ This eliminates the need to call config.backup_changes() when FEATURES
+ is modified, since any overrides are guaranteed to persist despite calls
+ to config.reset().
+ """
+
+ def __init__(self, settings):
+ self._settings = settings
+ self._features = set()
+
+ def __contains__(self, k):
+ return k in self._features
+
+ def __iter__(self):
+ return iter(self._features)
+
+ def _sync_env_var(self):
+ self._settings['FEATURES'] = ' '.join(sorted(self._features))
+
+ def add(self, k):
+ self._settings.modifying()
+ self._settings._features_overrides.append(k)
+ if k not in self._features:
+ self._features.add(k)
+ self._sync_env_var()
+
+ def update(self, values):
+ self._settings.modifying()
+ values = list(values)
+ self._settings._features_overrides.extend(values)
+ need_sync = False
+ for k in values:
+ if k in self._features:
+ continue
+ self._features.add(k)
+ need_sync = True
+ if need_sync:
+ self._sync_env_var()
+
+ def difference_update(self, values):
+ self._settings.modifying()
+ values = list(values)
+ self._settings._features_overrides.extend('-' + k for k in values)
+ remove_us = self._features.intersection(values)
+ if remove_us:
+ self._features.difference_update(values)
+ self._sync_env_var()
+
+ def remove(self, k):
+ """
+ This never raises KeyError, since it records a permanent override
+ that will prevent the given flag from ever being added again by
+ incremental stacking in config.regenerate().
+ """
+ self.discard(k)
+
+ def discard(self, k):
+ self._settings.modifying()
+ self._settings._features_overrides.append('-' + k)
+ if k in self._features:
+ self._features.remove(k)
+ self._sync_env_var()
+
+ def _validate(self):
+ """
+ Implements unknown-features-warn and unknown-features-filter.
+ """
+ if 'unknown-features-warn' in self._features:
+ unknown_features = \
+ self._features.difference(SUPPORTED_FEATURES)
+ if unknown_features:
+ unknown_features = unknown_features.difference(
+ self._settings._unknown_features)
+ if unknown_features:
+ self._settings._unknown_features.update(unknown_features)
+ writemsg_level(colorize("BAD",
+ _("FEATURES variable contains unknown value(s): %s") % \
+ ", ".join(sorted(unknown_features))) \
+ + "\n", level=logging.WARNING, noiselevel=-1)
+
+ if 'unknown-features-filter' in self._features:
+ unknown_features = \
+ self._features.difference(SUPPORTED_FEATURES)
+ if unknown_features:
+ self.difference_update(unknown_features)
+ self._prune_overrides()
+
+ def _prune_overrides(self):
+ """
+ If there are lots of invalid package.env FEATURES settings
+ then unknown-features-filter can make _features_overrides
+ grow larger and larger, so prune it. This performs incremental
+ stacking with preservation of negative values since they need
+ to persist for future config.regenerate() calls.
+ """
+ overrides_set = set(self._settings._features_overrides)
+ positive = set()
+ negative = set()
+ for x in self._settings._features_overrides:
+ if x[:1] == '-':
+ positive.discard(x[1:])
+ negative.add(x[1:])
+ else:
+ positive.add(x)
+ negative.discard(x)
+ self._settings._features_overrides[:] = \
+ list(positive) + list('-' + x for x in negative)
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_config/helper.py b/usr/lib/portage/pym/portage/package/ebuild/_config/helper.py
new file mode 100644
index 0000000..ee0c090
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_config/helper.py
@@ -0,0 +1,64 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'ordered_by_atom_specificity', 'prune_incremental',
+)
+
+from _emerge.Package import Package
+from portage.dep import best_match_to_list, _repo_separator
+
+def ordered_by_atom_specificity(cpdict, pkg, repo=None):
+ """
+ Return a list of matched values from the given cpdict,
+ in ascending order by atom specificity. The rationale
+ for this order is that package.* config files are
+ typically written in ChangeLog like fashion, so it's
+ most friendly if the order that the atoms are written
+ does not matter. Therefore, settings from more specific
+ atoms override those of less specific atoms. Without
+ this behavior, settings from relatively unspecific atoms
+ would (somewhat confusingly) override the settings of
+ more specific atoms, requiring people to make adjustments
+ to the order that atoms are listed in the config file in
+ order to achieve desired results (and thus corrupting
+ the ChangeLog like ordering of the file).
+ """
+ if not hasattr(pkg, 'repo') and repo and repo != Package.UNKNOWN_REPO:
+ pkg = pkg + _repo_separator + repo
+
+ results = []
+ keys = list(cpdict)
+
+ while keys:
+ bestmatch = best_match_to_list(pkg, keys)
+ if bestmatch:
+ keys.remove(bestmatch)
+ results.append(cpdict[bestmatch])
+ else:
+ break
+
+ if results:
+ # reverse, so the most specific atoms come last
+ results.reverse()
+
+ return results
+
+def prune_incremental(split):
+ """
+ Prune off any parts of an incremental variable that are
+ made irrelevant by the latest occuring * or -*. This
+ could be more aggressive but that might be confusing
+ and the point is just to reduce noise a bit.
+ """
+ for i, x in enumerate(reversed(split)):
+ if x == '*':
+ split = split[-i-1:]
+ break
+ elif x == '-*':
+ if i == 0:
+ split = []
+ else:
+ split = split[-i:]
+ break
+ return split
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_config/special_env_vars.py b/usr/lib/portage/pym/portage/package/ebuild/_config/special_env_vars.py
new file mode 100644
index 0000000..3abf218
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_config/special_env_vars.py
@@ -0,0 +1,217 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = (
+ 'case_insensitive_vars', 'default_globals', 'env_blacklist', \
+ 'environ_filter', 'environ_whitelist', 'environ_whitelist_re',
+)
+
+import re
+
+# Blacklisted variables are internal variables that are never allowed
+# to enter the config instance from the external environment or
+# configuration files.
+env_blacklist = frozenset((
+ "A", "AA", "CATEGORY", "DEPEND", "DESCRIPTION", "EAPI",
+ "EBUILD_FORCE_TEST", "EBUILD_PHASE",
+ "EBUILD_PHASE_FUNC", "EBUILD_SKIP_MANIFEST",
+ "ED", "EMERGE_FROM", "EPREFIX", "EROOT",
+ "GREP_OPTIONS", "HDEPEND", "HOMEPAGE",
+ "INHERITED", "IUSE", "IUSE_EFFECTIVE",
+ "KEYWORDS", "LICENSE", "MERGE_TYPE",
+ "PDEPEND", "PF", "PKGUSE", "PORTAGE_BACKGROUND",
+ "PORTAGE_BACKGROUND_UNMERGE", "PORTAGE_BUILDDIR_LOCKED",
+ "PORTAGE_BUILT_USE", "PORTAGE_CONFIGROOT",
+ "PORTAGE_INTERNAL_CALLER", "PORTAGE_IUSE",
+ "PORTAGE_NONFATAL", "PORTAGE_PIPE_FD", "PORTAGE_REPO_NAME",
+ "PORTAGE_USE", "PROPERTIES", "PROVIDE", "RDEPEND", "REPOSITORY",
+ "RESTRICT", "ROOT", "SLOT", "SRC_URI"
+))
+
+environ_whitelist = []
+
+# Whitelisted variables are always allowed to enter the ebuild
+# environment. Generally, this only includes special portage
+# variables. Ebuilds can unset variables that are not whitelisted
+# and rely on them remaining unset for future phases, without them
+# leaking back in from various locations (bug #189417). It's very
+# important to set our special BASH_ENV variable in the ebuild
+# environment in order to prevent sandbox from sourcing /etc/profile
+# in it's bashrc (causing major leakage).
+environ_whitelist += [
+ "ACCEPT_LICENSE", "BASH_ENV", "BUILD_PREFIX", "COLUMNS", "D",
+ "DISTDIR", "DOC_SYMLINKS_DIR", "EAPI", "EBUILD",
+ "EBUILD_FORCE_TEST",
+ "EBUILD_PHASE", "EBUILD_PHASE_FUNC", "ECLASSDIR", "ECLASS_DEPTH", "ED",
+ "EMERGE_FROM", "EPREFIX", "EROOT",
+ "FEATURES", "FILESDIR", "HOME", "MERGE_TYPE", "NOCOLOR", "PATH",
+ "PKGDIR",
+ "PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
+ "PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST",
+ "PORTAGE_BASHRC", "PM_EBUILD_HOOK_DIR",
+ "PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
+ "PORTAGE_BINPKG_TMPFILE",
+ "PORTAGE_BIN_PATH",
+ "PORTAGE_BUILDDIR", "PORTAGE_BUILD_GROUP", "PORTAGE_BUILD_USER",
+ "PORTAGE_BUNZIP2_COMMAND", "PORTAGE_BZIP2_COMMAND",
+ "PORTAGE_COLORMAP", "PORTAGE_COMPRESS",
+ "PORTAGE_COMPRESS_EXCLUDE_SUFFIXES",
+ "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
+ "PORTAGE_DOHTML_UNWARNED_SKIPPED_EXTENSIONS",
+ "PORTAGE_DOHTML_UNWARNED_SKIPPED_FILES",
+ "PORTAGE_DOHTML_WARN_ON_SKIPPED_FILES",
+ "PORTAGE_EBUILD_EXIT_FILE", "PORTAGE_FEATURES",
+ "PORTAGE_GID", "PORTAGE_GRPNAME",
+ "PORTAGE_INTERNAL_CALLER",
+ "PORTAGE_INST_GID", "PORTAGE_INST_UID",
+ "PORTAGE_IPC_DAEMON", "PORTAGE_IUSE", "PORTAGE_ECLASS_LOCATIONS",
+ "PORTAGE_LOG_FILE", "PORTAGE_OVERRIDE_EPREFIX", "PORTAGE_PIPE_FD",
+ "PORTAGE_PYM_PATH", "PORTAGE_PYTHON",
+ "PORTAGE_PYTHONPATH", "PORTAGE_QUIET",
+ "PORTAGE_REPO_NAME", "PORTAGE_REPOSITORIES", "PORTAGE_RESTRICT",
+ "PORTAGE_SIGPIPE_STATUS",
+ "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV", "PORTAGE_USERNAME",
+ "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE", "PORTAGE_XATTR_EXCLUDE",
+ "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS",
+ "REPLACING_VERSIONS", "REPLACED_BY_VERSION",
+ "ROOT", "ROOTPATH", "T", "TMP", "TMPDIR",
+ "USE_EXPAND", "USE_ORDER", "WORKDIR",
+ "XARGS", "__PORTAGE_TEST_HARDLINK_LOCKS",
+ "DEFAULT_PATH", "EXTRA_PATH",
+ "PORTAGE_GROUP", "PORTAGE_USER",
+]
+
+# user config variables
+environ_whitelist += [
+ "DOC_SYMLINKS_DIR", "INSTALL_MASK", "PKG_INSTALL_MASK"
+]
+
+environ_whitelist += [
+ "A", "AA", "CATEGORY", "P", "PF", "PN", "PR", "PV", "PVR"
+]
+
+# misc variables inherited from the calling environment
+environ_whitelist += [
+ "COLORTERM", "DISPLAY", "EDITOR", "LESS",
+ "LESSOPEN", "LOGNAME", "LS_COLORS", "PAGER",
+ "TERM", "TERMCAP", "USER",
+ 'ftp_proxy', 'http_proxy', 'no_proxy',
+]
+
+# tempdir settings
+environ_whitelist += [
+ "TMPDIR", "TEMP", "TMP",
+]
+
+# localization settings
+environ_whitelist += [
+ "LANG", "LC_COLLATE", "LC_CTYPE", "LC_MESSAGES",
+ "LC_MONETARY", "LC_NUMERIC", "LC_TIME", "LC_PAPER",
+ "LC_ALL",
+]
+
+# other variables inherited from the calling environment
+# UNIXMODE is necessary for MiNT
+environ_whitelist += [
+ "CVS_RSH", "ECHANGELOG_USER",
+ "GPG_AGENT_INFO",
+ "SSH_AGENT_PID", "SSH_AUTH_SOCK",
+ "STY", "WINDOW", "XAUTHORITY",
+ "UNIXMODE",
+]
+
+environ_whitelist = frozenset(environ_whitelist)
+
+environ_whitelist_re = re.compile(r'^(CCACHE_|DISTCC_).*')
+
+# Filter selected variables in the config.environ() method so that
+# they don't needlessly propagate down into the ebuild environment.
+environ_filter = []
+
+# Exclude anything that could be extremely long here (like SRC_URI)
+# since that could cause execve() calls to fail with E2BIG errors. For
+# example, see bug #262647.
+environ_filter += [
+ 'DEPEND', 'RDEPEND', 'PDEPEND', 'SRC_URI',
+]
+
+# misc variables inherited from the calling environment
+environ_filter += [
+ "INFOPATH", "MANPATH", "USER",
+ "HOST", "GROUP", "LOGNAME", "MAIL", "REMOTEHOST",
+ "SECURITYSESSIONID",
+ "TERMINFO", "TERM_PROGRAM", "TERM_PROGRAM_VERSION",
+ "VENDOR", "__CF_USER_TEXT_ENCODING",
+]
+
+# variables that break bash
+environ_filter += [
+ "HISTFILE", "POSIXLY_CORRECT",
+]
+
+# portage config variables and variables set directly by portage
+environ_filter += [
+ "ACCEPT_CHOSTS", "ACCEPT_KEYWORDS", "ACCEPT_PROPERTIES",
+ "ACCEPT_RESTRICT", "AUTOCLEAN",
+ "CLEAN_DELAY", "COLLISION_IGNORE",
+ "CONFIG_PROTECT", "CONFIG_PROTECT_MASK",
+ "DCO_SIGNED_OFF_BY",
+ "EGENCACHE_DEFAULT_OPTS", "EMERGE_DEFAULT_OPTS",
+ "EMERGE_LOG_DIR",
+ "EMERGE_WARNING_DELAY",
+ "FETCHCOMMAND", "FETCHCOMMAND_FTP",
+ "FETCHCOMMAND_HTTP", "FETCHCOMMAND_HTTPS",
+ "FETCHCOMMAND_RSYNC", "FETCHCOMMAND_SFTP",
+ "GENTOO_MIRRORS", "NOCONFMEM", "O",
+ "PORTAGE_BACKGROUND", "PORTAGE_BACKGROUND_UNMERGE",
+ "PORTAGE_BINHOST", "PORTAGE_BINPKG_FORMAT",
+ "PORTAGE_BUILDDIR_LOCKED",
+ "PORTAGE_CHECKSUM_FILTER",
+ "PORTAGE_ELOG_CLASSES",
+ "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
+ "PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
+ "PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", "PORTAGE_FETCH_RESUME_MIN_SIZE",
+ "PORTAGE_GPG_DIR",
+ "PORTAGE_GPG_KEY", "PORTAGE_GPG_SIGNING_COMMAND",
+ "PORTAGE_IONICE_COMMAND",
+ "PORTAGE_PACKAGE_EMPTY_ABORT",
+ "PORTAGE_REPO_DUPLICATE_WARN",
+ "PORTAGE_RO_DISTDIRS",
+ "PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
+ "PORTAGE_RSYNC_RETRIES", "PORTAGE_SSH_OPTS", "PORTAGE_SYNC_STALE",
+ "PORTAGE_USE",
+ "PORT_LOGDIR", "PORT_LOGDIR_CLEAN",
+ "QUICKPKG_DEFAULT_OPTS", "REPOMAN_DEFAULT_OPTS",
+ "RESUMECOMMAND", "RESUMECOMMAND_FTP",
+ "RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTPS",
+ "RESUMECOMMAND_RSYNC", "RESUMECOMMAND_SFTP",
+ "UNINSTALL_IGNORE", "USE_EXPAND_HIDDEN", "USE_ORDER",
+ "__PORTAGE_HELPER"
+]
+
+# No longer supported variables
+environ_filter += [
+ "SYNC"
+]
+
+environ_filter = frozenset(environ_filter)
+
+# Variables that are not allowed to have per-repo or per-package
+# settings.
+global_only_vars = frozenset([
+ "CONFIG_PROTECT",
+])
+
+default_globals = {
+ 'ACCEPT_LICENSE': '* -@EULA',
+ 'ACCEPT_PROPERTIES': '*',
+ 'PORTAGE_BZIP2_COMMAND': 'bzip2',
+}
+
+validate_commands = ('PORTAGE_BZIP2_COMMAND', 'PORTAGE_BUNZIP2_COMMAND',)
+
+# To enhance usability, make some vars case insensitive
+# by forcing them to lower case.
+case_insensitive_vars = ('AUTOCLEAN', 'NOCOLOR',)
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_config/unpack_dependencies.py b/usr/lib/portage/pym/portage/package/ebuild/_config/unpack_dependencies.py
new file mode 100644
index 0000000..1375189
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_config/unpack_dependencies.py
@@ -0,0 +1,38 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os, _supported_eapis
+from portage.dep import use_reduce
+from portage.eapi import eapi_has_automatic_unpack_dependencies
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.util import grabfile, writemsg
+
+def load_unpack_dependencies_configuration(repositories):
+ repo_dict = {}
+ for repo in repositories.repos_with_profiles():
+ for eapi in _supported_eapis:
+ if eapi_has_automatic_unpack_dependencies(eapi):
+ file_name = os.path.join(repo.location, "profiles", "unpack_dependencies", eapi)
+ lines = grabfile(file_name, recursive=True)
+ for line in lines:
+ elements = line.split()
+ suffix = elements[0].lower()
+ if len(elements) == 1:
+ writemsg(_("--- Missing unpack dependencies for '%s' suffix in '%s'\n") % (suffix, file_name))
+ depend = " ".join(elements[1:])
+ try:
+ use_reduce(depend, eapi=eapi)
+ except InvalidDependString as e:
+ writemsg(_("--- Invalid unpack dependencies for '%s' suffix in '%s': '%s'\n" % (suffix, file_name, e)))
+ else:
+ repo_dict.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend
+
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ for repo_name in [x.name for x in repo.masters] + [repo.name]:
+ for eapi in repo_dict.get(repo_name, {}):
+ for suffix, depend in repo_dict.get(repo_name, {}).get(eapi, {}).items():
+ ret.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend
+
+ return ret
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_ipc/ExitCommand.py b/usr/lib/portage/pym/portage/package/ebuild/_ipc/ExitCommand.py
new file mode 100644
index 0000000..f14050b
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_ipc/ExitCommand.py
@@ -0,0 +1,27 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.package.ebuild._ipc.IpcCommand import IpcCommand
+
+class ExitCommand(IpcCommand):
+
+ __slots__ = ('exitcode', 'reply_hook',)
+
+ def __init__(self):
+ IpcCommand.__init__(self)
+ self.reply_hook = None
+ self.exitcode = None
+
+ def __call__(self, argv):
+
+ if self.exitcode is not None:
+ # Ignore all but the first call, since if die is called
+ # then we certainly want to honor that exitcode, even
+ # the ebuild process manages to send a second exit
+ # command.
+ self.reply_hook = None
+ else:
+ self.exitcode = int(argv[1])
+
+ # (stdout, stderr, returncode)
+ return ('', '', 0)
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_ipc/IpcCommand.py b/usr/lib/portage/pym/portage/package/ebuild/_ipc/IpcCommand.py
new file mode 100644
index 0000000..efb27f0
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_ipc/IpcCommand.py
@@ -0,0 +1,9 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class IpcCommand(object):
+
+ __slots__ = ()
+
+ def __call__(self, argv):
+ raise NotImplementedError(self)
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_ipc/QueryCommand.py b/usr/lib/portage/pym/portage/package/ebuild/_ipc/QueryCommand.py
new file mode 100644
index 0000000..351c956
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_ipc/QueryCommand.py
@@ -0,0 +1,140 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import io
+
+import portage
+from portage import os
+from portage.dep import Atom, _repo_name_re
+from portage.eapi import eapi_has_repo_deps
+from portage.elog import messages as elog_messages
+from portage.exception import InvalidAtom
+from portage.package.ebuild._ipc.IpcCommand import IpcCommand
+from portage.util import normalize_path
+from portage.versions import best
+
+class QueryCommand(IpcCommand):
+
+ __slots__ = ('phase', 'settings',)
+
+ _db = None
+
+ @classmethod
+ def get_db(cls):
+ if cls._db is not None:
+ return cls._db
+ return portage.db
+
+ def __init__(self, settings, phase):
+ IpcCommand.__init__(self)
+ self.settings = settings
+ self.phase = phase
+
+ def __call__(self, argv):
+ """
+ @return: tuple of (stdout, stderr, returncode)
+ """
+
+ # Python 3:
+ # cmd, root, *args = argv
+ cmd = argv[0]
+ root = argv[1]
+ args = argv[2:]
+
+ warnings = []
+ warnings_str = ''
+
+ db = self.get_db()
+ eapi = self.settings.get('EAPI')
+
+ root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
+ if root not in db:
+ return ('', '%s: Invalid ROOT: %s\n' % (cmd, root), 3)
+
+ portdb = db[root]["porttree"].dbapi
+ vardb = db[root]["vartree"].dbapi
+
+ if cmd in ('best_version', 'has_version'):
+ allow_repo = eapi_has_repo_deps(eapi)
+ try:
+ atom = Atom(args[0], allow_repo=allow_repo)
+ except InvalidAtom:
+ return ('', '%s: Invalid atom: %s\n' % (cmd, args[0]), 2)
+
+ try:
+ atom = Atom(args[0], allow_repo=allow_repo, eapi=eapi)
+ except InvalidAtom as e:
+ warnings.append("QA Notice: %s: %s" % (cmd, e))
+
+ use = self.settings.get('PORTAGE_BUILT_USE')
+ if use is None:
+ use = self.settings['PORTAGE_USE']
+
+ use = frozenset(use.split())
+ atom = atom.evaluate_conditionals(use)
+
+ if warnings:
+ warnings_str = self._elog('eqawarn', warnings)
+
+ if cmd == 'has_version':
+ if vardb.match(atom):
+ returncode = 0
+ else:
+ returncode = 1
+ return ('', warnings_str, returncode)
+ elif cmd == 'best_version':
+ m = best(vardb.match(atom))
+ return ('%s\n' % m, warnings_str, 0)
+ elif cmd in ('master_repositories', 'repository_path', 'available_eclasses', 'eclass_path', 'license_path'):
+ repo = _repo_name_re.match(args[0])
+ if repo is None:
+ return ('', '%s: Invalid repository: %s\n' % (cmd, args[0]), 2)
+ try:
+ repo = portdb.repositories[args[0]]
+ except KeyError:
+ return ('', warnings_str, 1)
+
+ if cmd == 'master_repositories':
+ return ('%s\n' % ' '.join(x.name for x in repo.masters), warnings_str, 0)
+ elif cmd == 'repository_path':
+ return ('%s\n' % repo.location, warnings_str, 0)
+ elif cmd == 'available_eclasses':
+ return ('%s\n' % ' '.join(sorted(repo.eclass_db.eclasses)), warnings_str, 0)
+ elif cmd == 'eclass_path':
+ try:
+ eclass = repo.eclass_db.eclasses[args[1]]
+ except KeyError:
+ return ('', warnings_str, 1)
+ return ('%s\n' % eclass.location, warnings_str, 0)
+ elif cmd == 'license_path':
+ paths = reversed([os.path.join(x.location, 'licenses', args[1]) for x in list(repo.masters) + [repo]])
+ for path in paths:
+ if os.path.exists(path):
+ return ('%s\n' % path, warnings_str, 0)
+ return ('', warnings_str, 1)
+ else:
+ return ('', 'Invalid command: %s\n' % cmd, 3)
+
+ def _elog(self, elog_funcname, lines):
+ """
+ This returns a string, to be returned via ipc and displayed at the
+ appropriate place in the build output. We wouldn't want to open the
+ log here since it is already opened by AbstractEbuildProcess and we
+ don't want to corrupt it, especially if it is being written with
+ compression.
+ """
+ out = io.StringIO()
+ phase = self.phase
+ elog_func = getattr(elog_messages, elog_funcname)
+ global_havecolor = portage.output.havecolor
+ try:
+ portage.output.havecolor = \
+ self.settings.get('NOCOLOR', 'false').lower() in ('no', 'false')
+ for line in lines:
+ elog_func(line, phase=phase, key=self.settings.mycpv, out=out)
+ finally:
+ portage.output.havecolor = global_havecolor
+ msg = out.getvalue()
+ return msg
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_ipc/__init__.py b/usr/lib/portage/pym/portage/package/ebuild/_ipc/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_ipc/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_metadata_invalid.py b/usr/lib/portage/pym/portage/package/ebuild/_metadata_invalid.py
new file mode 100644
index 0000000..bcf1f7f
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_metadata_invalid.py
@@ -0,0 +1,41 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import textwrap
+
+import portage
+from portage.dep import _repo_separator
+from portage.elog import elog_process
+from portage.elog.messages import eerror
+
+def eapi_invalid(self, cpv, repo_name, settings,
+ eapi_var, eapi_parsed, eapi_lineno):
+
+ msg = []
+ msg.extend(textwrap.wrap(("EAPI assignment in ebuild '%s%s%s' does not"
+ " conform with PMS section 7.3.1 (see bug #402167):") %
+ (cpv, _repo_separator, repo_name), 70))
+
+ if not eapi_parsed:
+ # None means the assignment was not found, while an
+ # empty string indicates an (invalid) empty assingment.
+ msg.append(
+ "\tvalid EAPI assignment must"
+ " occur on or before line: %s" %
+ eapi_lineno)
+ else:
+ msg.append(("\tbash returned EAPI '%s' which does not match "
+ "assignment on line: %s") %
+ (eapi_var, eapi_lineno))
+
+ if portage.data.secpass >= 2:
+ # TODO: improve elog permission error handling (bug #416231)
+ for line in msg:
+ eerror(line, phase="other", key=cpv)
+ elog_process(cpv, settings,
+ phasefilter=("other",))
+
+ else:
+ out = portage.output.EOutput()
+ for line in msg:
+ out.eerror(line)
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py b/usr/lib/portage/pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py
new file mode 100644
index 0000000..44e2576
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py
@@ -0,0 +1,43 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.exception import (FileNotFound,
+ PermissionDenied, PortagePackageException)
+from portage.localization import _
+from portage.util._async.ForkProcess import ForkProcess
+
+class ManifestProcess(ForkProcess):
+
+ __slots__ = ("cp", "distdir", "fetchlist_dict", "repo_config")
+
+ MODIFIED = 16
+
+ def _run(self):
+ mf = self.repo_config.load_manifest(
+ os.path.join(self.repo_config.location, self.cp),
+ self.distdir, fetchlist_dict=self.fetchlist_dict)
+
+ try:
+ mf.create(assumeDistHashesAlways=True)
+ except FileNotFound as e:
+ portage.writemsg(_("!!! File %s doesn't exist, can't update "
+ "Manifest\n") % e, noiselevel=-1)
+ return 1
+
+ except PortagePackageException as e:
+ portage.writemsg(("!!! %s\n") % (e,), noiselevel=-1)
+ return 1
+
+ try:
+ modified = mf.write(sign=False)
+ except PermissionDenied as e:
+ portage.writemsg("!!! %s: %s\n" % (_("Permission Denied"), e,),
+ noiselevel=-1)
+ return 1
+ else:
+ if modified:
+ return self.MODIFIED
+ else:
+ return os.EX_OK
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py b/usr/lib/portage/pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py
new file mode 100644
index 0000000..38ac482
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py
@@ -0,0 +1,93 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.dep import _repo_separator
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.util._async.AsyncScheduler import AsyncScheduler
+from .ManifestTask import ManifestTask
+
+class ManifestScheduler(AsyncScheduler):
+
+ def __init__(self, portdb, cp_iter=None,
+ gpg_cmd=None, gpg_vars=None, force_sign_key=None, **kwargs):
+
+ AsyncScheduler.__init__(self, **kwargs)
+
+ self._portdb = portdb
+
+ if cp_iter is None:
+ cp_iter = self._iter_every_cp()
+ self._cp_iter = cp_iter
+ self._gpg_cmd = gpg_cmd
+ self._gpg_vars = gpg_vars
+ self._force_sign_key = force_sign_key
+ self._task_iter = self._iter_tasks()
+
+ def _next_task(self):
+ return next(self._task_iter)
+
+ def _iter_every_cp(self):
+ # List categories individually, in order to start yielding quicker,
+ # and in order to reduce latency in case of a signal interrupt.
+ cp_all = self._portdb.cp_all
+ for category in sorted(self._portdb.categories):
+ for cp in cp_all(categories=(category,)):
+ yield cp
+
+ def _iter_tasks(self):
+ portdb = self._portdb
+ distdir = portdb.settings["DISTDIR"]
+ disabled_repos = set()
+
+ for cp in self._cp_iter:
+ if self._terminated.is_set():
+ break
+ # We iterate over portdb.porttrees, since it's common to
+ # tweak this attribute in order to adjust repo selection.
+ for mytree in portdb.porttrees:
+ if self._terminated.is_set():
+ break
+ repo_config = portdb.repositories.get_repo_for_location(mytree)
+ if not repo_config.create_manifest:
+ if repo_config.name not in disabled_repos:
+ disabled_repos.add(repo_config.name)
+ portage.writemsg(
+ _(">>> Skipping creating Manifest for %s%s%s; "
+ "repository is configured to not use them\n") %
+ (cp, _repo_separator, repo_config.name),
+ noiselevel=-1)
+ continue
+ cpv_list = portdb.cp_list(cp, mytree=[repo_config.location])
+ if not cpv_list:
+ continue
+ fetchlist_dict = {}
+ try:
+ for cpv in cpv_list:
+ fetchlist_dict[cpv] = \
+ list(portdb.getFetchMap(cpv, mytree=mytree))
+ except InvalidDependString as e:
+ portage.writemsg(
+ _("!!! %s%s%s: SRC_URI: %s\n") %
+ (cp, _repo_separator, repo_config.name, e),
+ noiselevel=-1)
+ self._error_count += 1
+ continue
+
+ yield ManifestTask(cp=cp, distdir=distdir,
+ fetchlist_dict=fetchlist_dict, repo_config=repo_config,
+ gpg_cmd=self._gpg_cmd, gpg_vars=self._gpg_vars,
+ force_sign_key=self._force_sign_key)
+
+ def _task_exit(self, task):
+
+ if task.returncode != os.EX_OK:
+ if not self._terminated_tasks:
+ portage.writemsg(
+ "Error processing %s%s%s, continuing...\n" %
+ (task.cp, _repo_separator, task.repo_config.name),
+ noiselevel=-1)
+
+ AsyncScheduler._task_exit(self, task)
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py b/usr/lib/portage/pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py
new file mode 100644
index 0000000..0ee2b91
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py
@@ -0,0 +1,186 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import re
+import subprocess
+
+from portage import os
+from portage import _unicode_encode, _encodings
+from portage.const import MANIFEST2_IDENTIFIERS
+from portage.util import (atomic_ofstream, grablines,
+ shlex_split, varexpand, writemsg)
+from portage.util._async.PipeLogger import PipeLogger
+from portage.util._async.PopenProcess import PopenProcess
+from _emerge.CompositeTask import CompositeTask
+from _emerge.PipeReader import PipeReader
+from .ManifestProcess import ManifestProcess
+
+class ManifestTask(CompositeTask):
+
+ __slots__ = ("cp", "distdir", "fetchlist_dict", "gpg_cmd",
+ "gpg_vars", "repo_config", "force_sign_key", "_manifest_path")
+
+ _PGP_HEADER = b"BEGIN PGP SIGNED MESSAGE"
+ _manifest_line_re = re.compile(r'^(%s) ' % "|".join(MANIFEST2_IDENTIFIERS))
+ _gpg_key_id_re = re.compile(r'^[0-9A-F]*$')
+ _gpg_key_id_lengths = (8, 16, 24, 32, 40)
+
+ def _start(self):
+ self._manifest_path = os.path.join(self.repo_config.location,
+ self.cp, "Manifest")
+ manifest_proc = ManifestProcess(cp=self.cp, distdir=self.distdir,
+ fetchlist_dict=self.fetchlist_dict, repo_config=self.repo_config,
+ scheduler=self.scheduler)
+ self._start_task(manifest_proc, self._manifest_proc_exit)
+
+ def _manifest_proc_exit(self, manifest_proc):
+ self._assert_current(manifest_proc)
+ if manifest_proc.returncode not in (os.EX_OK, manifest_proc.MODIFIED):
+ self.returncode = manifest_proc.returncode
+ self._current_task = None
+ self.wait()
+ return
+
+ modified = manifest_proc.returncode == manifest_proc.MODIFIED
+ sign = self.gpg_cmd is not None
+
+ if not modified and sign:
+ sign = self._need_signature()
+ if not sign and self.force_sign_key is not None \
+ and os.path.exists(self._manifest_path):
+ self._check_sig_key()
+ return
+
+ if not sign or not os.path.exists(self._manifest_path):
+ self.returncode = os.EX_OK
+ self._current_task = None
+ self.wait()
+ return
+
+ self._start_gpg_proc()
+
+ def _check_sig_key(self):
+ null_fd = os.open('/dev/null', os.O_RDONLY)
+ popen_proc = PopenProcess(proc=subprocess.Popen(
+ ["gpg", "--verify", self._manifest_path],
+ stdin=null_fd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ pipe_reader=PipeReader())
+ os.close(null_fd)
+ popen_proc.pipe_reader.input_files = {
+ "producer" : popen_proc.proc.stdout}
+ self._start_task(popen_proc, self._check_sig_key_exit)
+
+ @staticmethod
+ def _parse_gpg_key(output):
+ """
+ Returns the first token which appears to represent a gpg key
+ id, or None if there is no such token.
+ """
+ regex = ManifestTask._gpg_key_id_re
+ lengths = ManifestTask._gpg_key_id_lengths
+ for token in output.split():
+ m = regex.match(token)
+ if m is not None and len(m.group(0)) in lengths:
+ return m.group(0)
+ return None
+
+ @staticmethod
+ def _normalize_gpg_key(key_str):
+ """
+ Strips leading "0x" and trailing "!", and converts to uppercase
+ (intended to be the same format as that in gpg --verify output).
+ """
+ key_str = key_str.upper()
+ if key_str.startswith("0X"):
+ key_str = key_str[2:]
+ key_str = key_str.rstrip("!")
+ return key_str
+
+ def _check_sig_key_exit(self, proc):
+ self._assert_current(proc)
+
+ parsed_key = self._parse_gpg_key(
+ proc.pipe_reader.getvalue().decode('utf_8', 'replace'))
+ if parsed_key is not None and \
+ self._normalize_gpg_key(parsed_key) == \
+ self._normalize_gpg_key(self.force_sign_key):
+ self.returncode = os.EX_OK
+ self._current_task = None
+ self.wait()
+ return
+
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ self._strip_sig(self._manifest_path)
+ self._start_gpg_proc()
+
+ @staticmethod
+ def _strip_sig(manifest_path):
+ """
+ Strip an existing signature from a Manifest file.
+ """
+ line_re = ManifestTask._manifest_line_re
+ lines = grablines(manifest_path)
+ f = None
+ try:
+ f = atomic_ofstream(manifest_path)
+ for line in lines:
+ if line_re.match(line) is not None:
+ f.write(line)
+ f.close()
+ f = None
+ finally:
+ if f is not None:
+ f.abort()
+
+ def _start_gpg_proc(self):
+ gpg_vars = self.gpg_vars
+ if gpg_vars is None:
+ gpg_vars = {}
+ else:
+ gpg_vars = gpg_vars.copy()
+ gpg_vars["FILE"] = self._manifest_path
+ gpg_cmd = varexpand(self.gpg_cmd, mydict=gpg_vars)
+ gpg_cmd = shlex_split(gpg_cmd)
+ gpg_proc = PopenProcess(proc=subprocess.Popen(gpg_cmd,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
+ # PipeLogger echos output and efficiently monitors for process
+ # exit by listening for the stdout EOF event.
+ gpg_proc.pipe_reader = PipeLogger(background=self.background,
+ input_fd=gpg_proc.proc.stdout, scheduler=self.scheduler)
+ self._start_task(gpg_proc, self._gpg_proc_exit)
+
+ def _gpg_proc_exit(self, gpg_proc):
+ if self._default_exit(gpg_proc) != os.EX_OK:
+ self.wait()
+ return
+
+ rename_args = (self._manifest_path + ".asc", self._manifest_path)
+ try:
+ os.rename(*rename_args)
+ except OSError as e:
+ writemsg("!!! rename('%s', '%s'): %s\n" % rename_args + (e,),
+ noiselevel=-1)
+ try:
+ os.unlink(self._manifest_path + ".asc")
+ except OSError:
+ pass
+ self.returncode = 1
+ else:
+ self.returncode = os.EX_OK
+
+ self._current_task = None
+ self.wait()
+
+ def _need_signature(self):
+ try:
+ with open(_unicode_encode(self._manifest_path,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ return self._PGP_HEADER not in f.readline()
+ except IOError as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ return False
+ raise
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_parallel_manifest/__init__.py b/usr/lib/portage/pym/portage/package/ebuild/_parallel_manifest/__init__.py
new file mode 100644
index 0000000..418ad86
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_parallel_manifest/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/package/ebuild/_spawn_nofetch.py b/usr/lib/portage/pym/portage/package/ebuild/_spawn_nofetch.py
new file mode 100644
index 0000000..0fc53c8
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/_spawn_nofetch.py
@@ -0,0 +1,93 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+import portage
+from portage import os
+from portage import shutil
+from portage.const import EBUILD_PHASES
+from portage.elog import elog_process
+from portage.package.ebuild.config import config
+from portage.package.ebuild.doebuild import doebuild_environment
+from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.EbuildPhase import EbuildPhase
+
+def spawn_nofetch(portdb, ebuild_path, settings=None, fd_pipes=None):
+ """
+ This spawns pkg_nofetch if appropriate. The settings parameter
+ is useful only if setcpv has already been called in order
+ to cache metadata. It will be cloned internally, in order to
+ prevent any changes from interfering with the calling code.
+ If settings is None then a suitable config instance will be
+ acquired from the given portdbapi instance. Do not use the
+ settings parameter unless setcpv has been called on the given
+ instance, since otherwise it's possible to trigger issues like
+ bug #408817 due to fragile assumptions involving the config
+ state inside doebuild_environment().
+
+ A private PORTAGE_BUILDDIR will be created and cleaned up, in
+ order to avoid any interference with any other processes.
+ If PORTAGE_TMPDIR is writable, that will be used, otherwise
+ the default directory for the tempfile module will be used.
+
+ We only call the pkg_nofetch phase if either RESTRICT=fetch
+ is set or the package has explicitly overridden the default
+ pkg_nofetch implementation. This allows specialized messages
+ to be displayed for problematic packages even though they do
+ not set RESTRICT=fetch (bug #336499).
+
+ This function does nothing if the PORTAGE_PARALLEL_FETCHONLY
+ variable is set in the config instance.
+ """
+
+ if settings is None:
+ settings = config(clone=portdb.settings)
+ else:
+ settings = config(clone=settings)
+
+ if 'PORTAGE_PARALLEL_FETCHONLY' in settings:
+ return os.EX_OK
+
+ # We must create our private PORTAGE_TMPDIR before calling
+ # doebuild_environment(), since lots of variables such
+ # as PORTAGE_BUILDDIR refer to paths inside PORTAGE_TMPDIR.
+ portage_tmpdir = settings.get('PORTAGE_TMPDIR')
+ if not portage_tmpdir or not os.access(portage_tmpdir, os.W_OK):
+ portage_tmpdir = None
+ private_tmpdir = tempfile.mkdtemp(dir=portage_tmpdir)
+ settings['PORTAGE_TMPDIR'] = private_tmpdir
+ settings.backup_changes('PORTAGE_TMPDIR')
+ # private temp dir was just created, so it's not locked yet
+ settings.pop('PORTAGE_BUILDDIR_LOCKED', None)
+
+ try:
+ doebuild_environment(ebuild_path, 'nofetch',
+ settings=settings, db=portdb)
+ restrict = settings['PORTAGE_RESTRICT'].split()
+ defined_phases = settings['DEFINED_PHASES'].split()
+ if not defined_phases:
+ # When DEFINED_PHASES is undefined, assume all
+ # phases are defined.
+ defined_phases = EBUILD_PHASES
+
+ if 'fetch' not in restrict and \
+ 'nofetch' not in defined_phases:
+ return os.EX_OK
+
+ prepare_build_dirs(settings=settings)
+ ebuild_phase = EbuildPhase(background=False,
+ phase='nofetch',
+ scheduler=SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ fd_pipes=fd_pipes, settings=settings)
+ ebuild_phase.start()
+ ebuild_phase.wait()
+ elog_process(settings.mycpv, settings)
+ finally:
+ shutil.rmtree(private_tmpdir)
+
+ return ebuild_phase.returncode
diff --git a/usr/lib/portage/pym/portage/package/ebuild/config.py b/usr/lib/portage/pym/portage/package/ebuild/config.py
new file mode 100644
index 0000000..6e578a9
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/config.py
@@ -0,0 +1,2739 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = [
+ 'autouse', 'best_from_dict', 'check_config_instance', 'config',
+]
+
+import copy
+from itertools import chain
+import grp
+import logging
+import platform
+import pwd
+import re
+import sys
+import warnings
+
+from _emerge.Package import Package
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.data:portage_gid',
+ 'portage.dbapi.vartree:vartree',
+ 'portage.package.ebuild.doebuild:_phase_func_map',
+)
+from portage import bsd_chflags, \
+ load_mod, os, selinux, _unicode_decode
+from portage.const import CACHE_PATH, \
+ DEPCACHE_PATH, INCREMENTALS, MAKE_CONF_FILE, \
+ MODULES_FILE_PATH, PORTAGE_BASE_PATH, \
+ PRIVATE_PATH, PROFILE_PATH, USER_CONFIG_PATH, \
+ USER_VIRTUALS_FILE
+from portage.dbapi import dbapi
+from portage.dbapi.porttree import portdbapi
+from portage.dep import Atom, isvalidatom, match_from_list, use_reduce, _repo_separator, _slot_separator
+from portage.eapi import eapi_exports_AA, eapi_exports_merge_type, \
+ eapi_supports_prefix, eapi_exports_replace_vars, _get_eapi_attrs
+from portage.env.loaders import KeyValuePairFileLoader
+from portage.exception import InvalidDependString, IsADirectory, \
+ PortageException
+from portage.localization import _
+from portage.output import colorize
+from portage.process import fakeroot_capable, sandbox_capable, macossandbox_capable
+from portage.repository.config import load_repository_config
+from portage.util import ensure_dirs, getconfig, grabdict, \
+ grabdict_package, grabfile, grabfile_package, LazyItemsDict, \
+ normalize_path, shlex_split, stack_dictlist, stack_dicts, stack_lists, \
+ writemsg, writemsg_level, _eapi_cache
+from portage.util._path import exists_raise_eaccess, isdir_raise_eaccess
+from portage.versions import catpkgsplit, catsplit, cpv_getkey, _pkg_str
+
+from portage.package.ebuild._config import special_env_vars
+from portage.package.ebuild._config.env_var_validation import validate_cmd_var
+from portage.package.ebuild._config.features_set import features_set
+from portage.package.ebuild._config.KeywordsManager import KeywordsManager
+from portage.package.ebuild._config.LicenseManager import LicenseManager
+from portage.package.ebuild._config.UseManager import UseManager
+from portage.package.ebuild._config.LocationsManager import LocationsManager
+from portage.package.ebuild._config.MaskManager import MaskManager
+from portage.package.ebuild._config.VirtualsManager import VirtualsManager
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity, prune_incremental
+from portage.package.ebuild._config.unpack_dependencies import load_unpack_dependencies_configuration
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+_feature_flags_cache = {}
+
+def _get_feature_flags(eapi_attrs):
+ cache_key = (eapi_attrs.feature_flag_test, eapi_attrs.feature_flag_targetroot)
+ flags = _feature_flags_cache.get(cache_key)
+ if flags is not None:
+ return flags
+
+ flags = []
+ if eapi_attrs.feature_flag_test:
+ flags.append("test")
+ if eapi_attrs.feature_flag_targetroot:
+ flags.append("targetroot")
+
+ flags = frozenset(flags)
+ _feature_flags_cache[cache_key] = flags
+ return flags
+
+def autouse(myvartree, use_cache=1, mysettings=None):
+ warnings.warn("portage.autouse() is deprecated",
+ DeprecationWarning, stacklevel=2)
+ return ""
+
+def check_config_instance(test):
+ if not isinstance(test, config):
+ raise TypeError("Invalid type for config object: %s (should be %s)" % (test.__class__, config))
+
+def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
+ for x in key_order:
+ if x in top_dict and key in top_dict[x]:
+ if FullCopy:
+ return copy.deepcopy(top_dict[x][key])
+ else:
+ return top_dict[x][key]
+ if EmptyOnError:
+ return ""
+ else:
+ raise KeyError("Key not found in list; '%s'" % key)
+
+def _lazy_iuse_regex(iuse_implicit):
+ """
+ The PORTAGE_IUSE value is lazily evaluated since re.escape() is slow
+ and the value is only used when an ebuild phase needs to be executed
+ (it's used only to generate QA notices).
+ """
+ # Escape anything except ".*" which is supposed to pass through from
+ # _get_implicit_iuse().
+ regex = sorted(re.escape(x) for x in iuse_implicit)
+ regex = "^(%s)$" % "|".join(regex)
+ regex = regex.replace("\\.\\*", ".*")
+ return regex
+
+class _iuse_implicit_match_cache(object):
+
+ def __init__(self, settings):
+ self._iuse_implicit_re = re.compile("^(%s)$" % \
+ "|".join(settings._get_implicit_iuse()))
+ self._cache = {}
+
+ def __call__(self, flag):
+ """
+ Returns True if the flag is matched, False otherwise.
+ """
+ try:
+ return self._cache[flag]
+ except KeyError:
+ m = self._iuse_implicit_re.match(flag) is not None
+ self._cache[flag] = m
+ return m
+
+class config(object):
+ """
+ This class encompasses the main portage configuration. Data is pulled from
+ ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
+ parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
+ overrides.
+
+ Generally if you need data like USE flags, FEATURES, environment variables,
+ virtuals ...etc you look in here.
+ """
+
+ _constant_keys = frozenset(['PORTAGE_BIN_PATH', 'PORTAGE_GID',
+ 'PORTAGE_PYM_PATH', 'PORTAGE_PYTHONPATH'])
+
+ _setcpv_aux_keys = ('DEFINED_PHASES', 'DEPEND', 'EAPI', 'HDEPEND',
+ 'INHERITED', 'IUSE', 'REQUIRED_USE', 'KEYWORDS', 'LICENSE', 'PDEPEND',
+ 'PROPERTIES', 'PROVIDE', 'RDEPEND', 'SLOT',
+ 'repository', 'RESTRICT', 'LICENSE',)
+
+ _module_aliases = {
+ "cache.metadata_overlay.database" : "portage.cache.flat_hash.database",
+ "portage.cache.metadata_overlay.database" : "portage.cache.flat_hash.database",
+ }
+
+ _case_insensitive_vars = special_env_vars.case_insensitive_vars
+ _default_globals = special_env_vars.default_globals
+ _env_blacklist = special_env_vars.env_blacklist
+ _environ_filter = special_env_vars.environ_filter
+ _environ_whitelist = special_env_vars.environ_whitelist
+ _environ_whitelist_re = special_env_vars.environ_whitelist_re
+ _global_only_vars = special_env_vars.global_only_vars
+
+ def __init__(self, clone=None, mycpv=None, config_profile_path=None,
+ config_incrementals=None, config_root=None, target_root=None,
+ eprefix=None, local_config=True, env=None,
+ _unmatched_removal=False, repositories=None):
+ """
+ @param clone: If provided, init will use deepcopy to copy by value the instance.
+ @type clone: Instance of config class.
+ @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
+ and then calling instance.setcpv(mycpv).
+ @type mycpv: String
+ @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const)
+ @type config_profile_path: String
+ @param config_incrementals: List of incremental variables
+ (defaults to portage.const.INCREMENTALS)
+ @type config_incrementals: List
+ @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
+ @type config_root: String
+ @param target_root: the target root, which typically corresponds to the
+ value of the $ROOT env variable (default is /)
+ @type target_root: String
+ @param eprefix: set the EPREFIX variable (default is portage.const.EPREFIX)
+ @type eprefix: String
+ @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
+ ignore local config (keywording and unmasking)
+ @type local_config: Boolean
+ @param env: The calling environment which is used to override settings.
+ Defaults to os.environ if unspecified.
+ @type env: dict
+ @param _unmatched_removal: Enabled by repoman when the
+ --unmatched-removal option is given.
+ @type _unmatched_removal: Boolean
+ @param repositories: Configuration of repositories.
+ Defaults to portage.repository.config.load_repository_config().
+ @type repositories: Instance of portage.repository.config.RepoConfigLoader class.
+ """
+
+ # This is important when config is reloaded after emerge --sync.
+ _eapi_cache.clear()
+
+ # When initializing the global portage.settings instance, avoid
+ # raising exceptions whenever possible since exceptions thrown
+ # from 'import portage' or 'import portage.exceptions' statements
+ # can practically render the api unusable for api consumers.
+ tolerant = hasattr(portage, '_initializing_globals')
+ self._tolerant = tolerant
+ self._unmatched_removal = _unmatched_removal
+
+ self.locked = 0
+ self.mycpv = None
+ self._setcpv_args_hash = None
+ self.puse = ""
+ self._penv = []
+ self.modifiedkeys = []
+ self.uvlist = []
+ self._accept_chost_re = None
+ self._accept_properties = None
+ self._accept_restrict = None
+ self._features_overrides = []
+ self._make_defaults = None
+ self._parent_stable = None
+
+ # _unknown_features records unknown features that
+ # have triggered warning messages, and ensures that
+ # the same warning isn't shown twice.
+ self._unknown_features = set()
+
+ self.local_config = local_config
+
+ if clone:
+ # For immutable attributes, use shallow copy for
+ # speed and memory conservation.
+ self._tolerant = clone._tolerant
+ self._unmatched_removal = clone._unmatched_removal
+ self.categories = clone.categories
+ self.depcachedir = clone.depcachedir
+ self.incrementals = clone.incrementals
+ self.module_priority = clone.module_priority
+ self.profile_path = clone.profile_path
+ self.profiles = clone.profiles
+ self.packages = clone.packages
+ self.repositories = clone.repositories
+ self.unpack_dependencies = clone.unpack_dependencies
+ self._iuse_effective = clone._iuse_effective
+ self._iuse_implicit_match = clone._iuse_implicit_match
+ self._non_user_variables = clone._non_user_variables
+ self._env_d_blacklist = clone._env_d_blacklist
+ self._repo_make_defaults = clone._repo_make_defaults
+ self.usemask = clone.usemask
+ self.useforce = clone.useforce
+ self.puse = clone.puse
+ self.user_profile_dir = clone.user_profile_dir
+ self.local_config = clone.local_config
+ self.make_defaults_use = clone.make_defaults_use
+ self.mycpv = clone.mycpv
+ self._setcpv_args_hash = clone._setcpv_args_hash
+
+ # immutable attributes (internal policy ensures lack of mutation)
+ self._locations_manager = clone._locations_manager
+ self._use_manager = clone._use_manager
+ # force instantiation of lazy immutable objects when cloning, so
+ # that they're not instantiated more than once
+ self._keywords_manager_obj = clone._keywords_manager
+ self._mask_manager_obj = clone._mask_manager
+
+ # shared mutable attributes
+ self._unknown_features = clone._unknown_features
+
+ self.modules = copy.deepcopy(clone.modules)
+ self._penv = copy.deepcopy(clone._penv)
+
+ self.configdict = copy.deepcopy(clone.configdict)
+ self.configlist = [
+ self.configdict['env.d'],
+ self.configdict['repo'],
+ self.configdict['pkginternal'],
+ self.configdict['globals'],
+ self.configdict['defaults'],
+ self.configdict['conf'],
+ self.configdict['pkg'],
+ self.configdict['env'],
+ ]
+ self.lookuplist = self.configlist[:]
+ self.lookuplist.reverse()
+ self._use_expand_dict = copy.deepcopy(clone._use_expand_dict)
+ self.backupenv = self.configdict["backupenv"]
+ self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
+ self.pprovideddict = copy.deepcopy(clone.pprovideddict)
+ self.features = features_set(self)
+ self.features._features = copy.deepcopy(clone.features._features)
+ self._features_overrides = copy.deepcopy(clone._features_overrides)
+
+ #Strictly speaking _license_manager is not immutable. Users need to ensure that
+ #extract_global_changes() is called right after __init__ (if at all).
+ #It also has the mutable member _undef_lic_groups. It is used to track
+ #undefined license groups, to not display an error message for the same
+ #group again and again. Because of this, it's useful to share it between
+ #all LicenseManager instances.
+ self._license_manager = clone._license_manager
+
+ # force instantiation of lazy objects when cloning, so
+ # that they're not instantiated more than once
+ self._virtuals_manager_obj = copy.deepcopy(clone._virtuals_manager)
+
+ self._accept_properties = copy.deepcopy(clone._accept_properties)
+ self._ppropertiesdict = copy.deepcopy(clone._ppropertiesdict)
+ self._accept_restrict = copy.deepcopy(clone._accept_restrict)
+ self._paccept_restrict = copy.deepcopy(clone._paccept_restrict)
+ self._penvdict = copy.deepcopy(clone._penvdict)
+ self._expand_map = copy.deepcopy(clone._expand_map)
+
+ else:
+ # lazily instantiated objects
+ self._keywords_manager_obj = None
+ self._mask_manager_obj = None
+ self._virtuals_manager_obj = None
+
+ locations_manager = LocationsManager(config_root=config_root,
+ config_profile_path=config_profile_path, eprefix=eprefix,
+ local_config=local_config, target_root=target_root)
+ self._locations_manager = locations_manager
+
+ eprefix = locations_manager.eprefix
+ config_root = locations_manager.config_root
+ abs_user_config = locations_manager.abs_user_config
+ make_conf_paths = [
+ os.path.join(config_root, 'etc', 'make.conf'),
+ os.path.join(config_root, MAKE_CONF_FILE)
+ ]
+ try:
+ if os.path.samefile(*make_conf_paths):
+ make_conf_paths.pop()
+ except OSError:
+ pass
+
+ make_conf_count = 0
+ make_conf = {}
+ for x in make_conf_paths:
+ mygcfg = getconfig(x,
+ tolerant=tolerant, allow_sourcing=True,
+ expand=make_conf, recursive=True)
+ if mygcfg is not None:
+ make_conf.update(mygcfg)
+ make_conf_count += 1
+
+ if make_conf_count == 2:
+ writemsg("!!! %s\n" %
+ _("Found 2 make.conf files, using both '%s' and '%s'") %
+ tuple(make_conf_paths), noiselevel=-1)
+
+ # Allow ROOT setting to come from make.conf if it's not overridden
+ # by the constructor argument (from the calling environment).
+ locations_manager.set_root_override(make_conf.get("ROOT"))
+ target_root = locations_manager.target_root
+ eroot = locations_manager.eroot
+ self.global_config_path = locations_manager.global_config_path
+
+ # The expand_map is used for variable substitution
+ # in getconfig() calls, and the getconfig() calls
+ # update expand_map with the value of each variable
+ # assignment that occurs. Variable substitution occurs
+ # in the following order, which corresponds to the
+ # order of appearance in self.lookuplist:
+ #
+ # * env.d
+ # * make.globals
+ # * make.defaults
+ # * make.conf
+ #
+ # Notably absent is "env", since we want to avoid any
+ # interaction with the calling environment that might
+ # lead to unexpected results.
+
+ env_d = getconfig(os.path.join(eroot, "etc", "profile.env"),
+ tolerant=tolerant, expand=False) or {}
+ expand_map = env_d.copy()
+ self._expand_map = expand_map
+
+ # Allow make.globals to set default paths relative to ${EPREFIX}.
+ expand_map["EPREFIX"] = eprefix
+
+ if portage._not_installed:
+ make_globals_path = os.path.join(PORTAGE_BASE_PATH, "cnf", "make.globals")
+ else:
+ make_globals_path = os.path.join(self.global_config_path, "make.globals")
+ old_make_globals = os.path.join(config_root, "etc", "make.globals")
+ if os.path.isfile(old_make_globals) and \
+ not os.path.samefile(make_globals_path, old_make_globals):
+ # Don't warn if they refer to the same path, since
+ # that can be used for backward compatibility with
+ # old software.
+ writemsg("!!! %s\n" %
+ _("Found obsolete make.globals file: "
+ "'%s', (using '%s' instead)") %
+ (old_make_globals, make_globals_path),
+ noiselevel=-1)
+
+ make_globals = getconfig(make_globals_path,
+ tolerant=tolerant, expand=expand_map)
+ if make_globals is None:
+ make_globals = {}
+
+ for k, v in self._default_globals.items():
+ make_globals.setdefault(k, v)
+
+ if config_incrementals is None:
+ self.incrementals = INCREMENTALS
+ else:
+ self.incrementals = config_incrementals
+ if not isinstance(self.incrementals, frozenset):
+ self.incrementals = frozenset(self.incrementals)
+
+ self.module_priority = ("user", "default")
+ self.modules = {}
+ modules_file = os.path.join(config_root, MODULES_FILE_PATH)
+ modules_loader = KeyValuePairFileLoader(modules_file, None, None)
+ modules_dict, modules_errors = modules_loader.load()
+ self.modules["user"] = modules_dict
+ if self.modules["user"] is None:
+ self.modules["user"] = {}
+ user_auxdbmodule = \
+ self.modules["user"].get("portdbapi.auxdbmodule")
+ if user_auxdbmodule is not None and \
+ user_auxdbmodule in self._module_aliases:
+ warnings.warn("'%s' is deprecated: %s" %
+ (user_auxdbmodule, modules_file))
+
+ self.modules["default"] = {
+ "portdbapi.auxdbmodule": "portage.cache.flat_hash.database",
+ }
+
+ self.configlist=[]
+
+ # back up our incremental variables:
+ self.configdict={}
+ self._use_expand_dict = {}
+ # configlist will contain: [ env.d, globals, defaults, conf, pkg, backupenv, env ]
+ self.configlist.append({})
+ self.configdict["env.d"] = self.configlist[-1]
+
+ self.configlist.append({})
+ self.configdict["repo"] = self.configlist[-1]
+
+ self.configlist.append({})
+ self.configdict["pkginternal"] = self.configlist[-1]
+
+ # env_d will be None if profile.env doesn't exist.
+ if env_d:
+ self.configdict["env.d"].update(env_d)
+
+ # backupenv is used for calculating incremental variables.
+ if env is None:
+ env = os.environ
+
+ # Avoid potential UnicodeDecodeError exceptions later.
+ env_unicode = dict((_unicode_decode(k), _unicode_decode(v))
+ for k, v in env.items())
+
+ self.backupenv = env_unicode
+
+ if env_d:
+ # Remove duplicate values so they don't override updated
+ # profile.env values later (profile.env is reloaded in each
+ # call to self.regenerate).
+ for k, v in env_d.items():
+ try:
+ if self.backupenv[k] == v:
+ del self.backupenv[k]
+ except KeyError:
+ pass
+ del k, v
+
+ self.configdict["env"] = LazyItemsDict(self.backupenv)
+
+ self.configlist.append(make_globals)
+ self.configdict["globals"]=self.configlist[-1]
+
+ self.make_defaults_use = []
+
+ #Loading Repositories
+ self["PORTAGE_CONFIGROOT"] = config_root
+ self["ROOT"] = target_root
+ self["EPREFIX"] = eprefix
+ self["EROOT"] = eroot
+ known_repos = []
+ portdir = ""
+ portdir_overlay = ""
+ portdir_sync = None
+ for confs in [make_globals, make_conf, self.configdict["env"]]:
+ v = confs.get("PORTDIR")
+ if v is not None:
+ portdir = v
+ known_repos.append(v)
+ v = confs.get("PORTDIR_OVERLAY")
+ if v is not None:
+ portdir_overlay = v
+ known_repos.extend(shlex_split(v))
+ v = confs.get("SYNC")
+ if v is not None:
+ portdir_sync = v
+
+ known_repos = frozenset(known_repos)
+ self["PORTDIR"] = portdir
+ self["PORTDIR_OVERLAY"] = portdir_overlay
+ if portdir_sync:
+ self["SYNC"] = portdir_sync
+ self.lookuplist = [self.configdict["env"]]
+ if repositories is None:
+ self.repositories = load_repository_config(self)
+ else:
+ self.repositories = repositories
+
+ self['PORTAGE_REPOSITORIES'] = self.repositories.config_string()
+ self.backup_changes('PORTAGE_REPOSITORIES')
+
+ #filling PORTDIR and PORTDIR_OVERLAY variable for compatibility
+ main_repo = self.repositories.mainRepo()
+ if main_repo is not None:
+ self["PORTDIR"] = main_repo.user_location
+ self.backup_changes("PORTDIR")
+ expand_map["PORTDIR"] = self["PORTDIR"]
+
+ # repoman controls PORTDIR_OVERLAY via the environment, so no
+ # special cases are needed here.
+ portdir_overlay = list(self.repositories.repoUserLocationList())
+ if portdir_overlay and portdir_overlay[0] == self["PORTDIR"]:
+ portdir_overlay = portdir_overlay[1:]
+
+ new_ov = []
+ if portdir_overlay:
+ for ov in portdir_overlay:
+ ov = normalize_path(ov)
+ if isdir_raise_eaccess(ov) or portage._sync_mode:
+ new_ov.append(portage._shell_quote(ov))
+ else:
+ writemsg(_("!!! Invalid PORTDIR_OVERLAY"
+ " (not a dir): '%s'\n") % ov, noiselevel=-1)
+
+ self["PORTDIR_OVERLAY"] = " ".join(new_ov)
+ self.backup_changes("PORTDIR_OVERLAY")
+ expand_map["PORTDIR_OVERLAY"] = self["PORTDIR_OVERLAY"]
+
+ locations_manager.set_port_dirs(self["PORTDIR"], self["PORTDIR_OVERLAY"])
+ locations_manager.load_profiles(self.repositories, known_repos)
+
+ profiles_complex = locations_manager.profiles_complex
+ self.profiles = locations_manager.profiles
+ self.profile_path = locations_manager.profile_path
+ self.user_profile_dir = locations_manager.user_profile_dir
+
+ try:
+ packages_list = [grabfile_package(os.path.join(x, "packages"),
+ verify_eapi=True) for x in self.profiles]
+ except IOError as e:
+ if e.errno == IsADirectory.errno:
+ raise IsADirectory(os.path.join(self.profile_path,
+ "packages"))
+
+ self.packages = tuple(stack_lists(packages_list, incremental=1))
+
+ # revmaskdict
+ self.prevmaskdict={}
+ for x in self.packages:
+ # Negative atoms are filtered by the above stack_lists() call.
+ if not isinstance(x, Atom):
+ x = Atom(x.lstrip('*'))
+ self.prevmaskdict.setdefault(x.cp, []).append(x)
+
+ self.unpack_dependencies = load_unpack_dependencies_configuration(self.repositories)
+
+ mygcfg = {}
+ if profiles_complex:
+ mygcfg_dlists = [getconfig(os.path.join(x.location, "make.defaults"),
+ tolerant=tolerant, expand=expand_map, recursive=x.portage1_directories)
+ for x in profiles_complex]
+ self._make_defaults = mygcfg_dlists
+ mygcfg = stack_dicts(mygcfg_dlists,
+ incrementals=self.incrementals)
+ if mygcfg is None:
+ mygcfg = {}
+ self.configlist.append(mygcfg)
+ self.configdict["defaults"]=self.configlist[-1]
+
+ mygcfg = {}
+ for x in make_conf_paths:
+ mygcfg.update(getconfig(x,
+ tolerant=tolerant, allow_sourcing=True,
+ expand=expand_map, recursive=True) or {})
+
+ # Don't allow the user to override certain variables in make.conf
+ profile_only_variables = self.configdict["defaults"].get(
+ "PROFILE_ONLY_VARIABLES", "").split()
+ profile_only_variables = stack_lists([profile_only_variables])
+ non_user_variables = set()
+ non_user_variables.update(profile_only_variables)
+ non_user_variables.update(self._env_blacklist)
+ non_user_variables.update(self._global_only_vars)
+ non_user_variables = frozenset(non_user_variables)
+ self._non_user_variables = non_user_variables
+
+ self._env_d_blacklist = frozenset(chain(
+ profile_only_variables,
+ self._env_blacklist,
+ ))
+ env_d = self.configdict["env.d"]
+ for k in self._env_d_blacklist:
+ env_d.pop(k, None)
+
+ for k in profile_only_variables:
+ mygcfg.pop(k, None)
+
+ self.configlist.append(mygcfg)
+ self.configdict["conf"]=self.configlist[-1]
+
+ self.configlist.append(LazyItemsDict())
+ self.configdict["pkg"]=self.configlist[-1]
+
+ self.configdict["backupenv"] = self.backupenv
+
+ # Don't allow the user to override certain variables in the env
+ for k in profile_only_variables:
+ self.backupenv.pop(k, None)
+
+ self.configlist.append(self.configdict["env"])
+
+ # make lookuplist for loading package.*
+ self.lookuplist=self.configlist[:]
+ self.lookuplist.reverse()
+
+ # Blacklist vars that could interfere with portage internals.
+ for blacklisted in self._env_blacklist:
+ for cfg in self.lookuplist:
+ cfg.pop(blacklisted, None)
+ self.backupenv.pop(blacklisted, None)
+ del blacklisted, cfg
+
+ self["PORTAGE_CONFIGROOT"] = config_root
+ self.backup_changes("PORTAGE_CONFIGROOT")
+ self["ROOT"] = target_root
+ self.backup_changes("ROOT")
+ self["EPREFIX"] = eprefix
+ self.backup_changes("EPREFIX")
+ self["EROOT"] = eroot
+ self.backup_changes("EROOT")
+
+ # The prefix of the running portage instance is used in the
+ # ebuild environment to implement the --host-root option for
+ # best_version and has_version.
+ self["PORTAGE_OVERRIDE_EPREFIX"] = portage.const.EPREFIX
+ self.backup_changes("PORTAGE_OVERRIDE_EPREFIX")
+
+ self._ppropertiesdict = portage.dep.ExtendedAtomDict(dict)
+ self._paccept_restrict = portage.dep.ExtendedAtomDict(dict)
+ self._penvdict = portage.dep.ExtendedAtomDict(dict)
+
+ self._repo_make_defaults = {}
+ for repo in self.repositories.repos_with_profiles():
+ d = getconfig(os.path.join(repo.location, "profiles", "make.defaults"),
+ tolerant=tolerant, expand=self.configdict["globals"].copy(), recursive=repo.portage1_profiles) or {}
+ if d:
+ for k in chain(self._env_blacklist,
+ profile_only_variables, self._global_only_vars):
+ d.pop(k, None)
+ self._repo_make_defaults[repo.name] = d
+
+ #Read all USE related files from profiles and optionally from user config.
+ self._use_manager = UseManager(self.repositories, profiles_complex,
+ abs_user_config, self._isStable, user_config=local_config)
+ #Initialize all USE related variables we track ourselves.
+ self.usemask = self._use_manager.getUseMask()
+ self.useforce = self._use_manager.getUseForce()
+ self.configdict["conf"]["USE"] = \
+ self._use_manager.extract_global_USE_changes( \
+ self.configdict["conf"].get("USE", ""))
+
+ #Read license_groups and optionally license_groups and package.license from user config
+ self._license_manager = LicenseManager(locations_manager.profile_locations, \
+ abs_user_config, user_config=local_config)
+ #Extract '*/*' entries from package.license
+ self.configdict["conf"]["ACCEPT_LICENSE"] = \
+ self._license_manager.extract_global_changes( \
+ self.configdict["conf"].get("ACCEPT_LICENSE", ""))
+
+ if local_config:
+ #package.properties
+ propdict = grabdict_package(os.path.join(
+ abs_user_config, "package.properties"), recursive=1, allow_wildcard=True, \
+ allow_repo=True, verify_eapi=False)
+ v = propdict.pop("*/*", None)
+ if v is not None:
+ if "ACCEPT_PROPERTIES" in self.configdict["conf"]:
+ self.configdict["conf"]["ACCEPT_PROPERTIES"] += " " + " ".join(v)
+ else:
+ self.configdict["conf"]["ACCEPT_PROPERTIES"] = " ".join(v)
+ for k, v in propdict.items():
+ self._ppropertiesdict.setdefault(k.cp, {})[k] = v
+
+ # package.accept_restrict
+ d = grabdict_package(os.path.join(
+ abs_user_config, "package.accept_restrict"),
+ recursive=True, allow_wildcard=True,
+ allow_repo=True, verify_eapi=False)
+ v = d.pop("*/*", None)
+ if v is not None:
+ if "ACCEPT_RESTRICT" in self.configdict["conf"]:
+ self.configdict["conf"]["ACCEPT_RESTRICT"] += " " + " ".join(v)
+ else:
+ self.configdict["conf"]["ACCEPT_RESTRICT"] = " ".join(v)
+ for k, v in d.items():
+ self._paccept_restrict.setdefault(k.cp, {})[k] = v
+
+ #package.env
+ penvdict = grabdict_package(os.path.join(
+ abs_user_config, "package.env"), recursive=1, allow_wildcard=True, \
+ allow_repo=True, verify_eapi=False)
+ v = penvdict.pop("*/*", None)
+ if v is not None:
+ global_wildcard_conf = {}
+ self._grab_pkg_env(v, global_wildcard_conf)
+ incrementals = self.incrementals
+ conf_configdict = self.configdict["conf"]
+ for k, v in global_wildcard_conf.items():
+ if k in incrementals:
+ if k in conf_configdict:
+ conf_configdict[k] = \
+ conf_configdict[k] + " " + v
+ else:
+ conf_configdict[k] = v
+ else:
+ conf_configdict[k] = v
+ expand_map[k] = v
+
+ for k, v in penvdict.items():
+ self._penvdict.setdefault(k.cp, {})[k] = v
+
+ #getting categories from an external file now
+ self.categories = [grabfile(os.path.join(x, "categories")) \
+ for x in locations_manager.profile_and_user_locations]
+ category_re = dbapi._category_re
+ # categories used to be a tuple, but now we use a frozenset
+ # for hashed category validation in pordbapi.cp_list()
+ self.categories = frozenset(
+ x for x in stack_lists(self.categories, incremental=1)
+ if category_re.match(x) is not None)
+
+ archlist = [grabfile(os.path.join(x, "arch.list")) \
+ for x in locations_manager.profile_and_user_locations]
+ archlist = stack_lists(archlist, incremental=1)
+ self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
+
+ pkgprovidedlines = [grabfile(
+ os.path.join(x.location, "package.provided"),
+ recursive=x.portage1_directories)
+ for x in profiles_complex]
+ pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
+ has_invalid_data = False
+ for x in range(len(pkgprovidedlines)-1, -1, -1):
+ myline = pkgprovidedlines[x]
+ if not isvalidatom("=" + myline):
+ writemsg(_("Invalid package name in package.provided: %s\n") % \
+ myline, noiselevel=-1)
+ has_invalid_data = True
+ del pkgprovidedlines[x]
+ continue
+ cpvr = catpkgsplit(pkgprovidedlines[x])
+ if not cpvr or cpvr[0] == "null":
+ writemsg(_("Invalid package name in package.provided: ")+pkgprovidedlines[x]+"\n",
+ noiselevel=-1)
+ has_invalid_data = True
+ del pkgprovidedlines[x]
+ continue
+ if cpvr[0] == "virtual":
+ writemsg(_("Virtual package in package.provided: %s\n") % \
+ myline, noiselevel=-1)
+ has_invalid_data = True
+ del pkgprovidedlines[x]
+ continue
+ if has_invalid_data:
+ writemsg(_("See portage(5) for correct package.provided usage.\n"),
+ noiselevel=-1)
+ self.pprovideddict = {}
+ for x in pkgprovidedlines:
+ x_split = catpkgsplit(x)
+ if x_split is None:
+ continue
+ mycatpkg = cpv_getkey(x)
+ if mycatpkg in self.pprovideddict:
+ self.pprovideddict[mycatpkg].append(x)
+ else:
+ self.pprovideddict[mycatpkg]=[x]
+
+ # reasonable defaults; this is important as without USE_ORDER,
+ # USE will always be "" (nothing set)!
+ if "USE_ORDER" not in self:
+ self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:repo:env.d"
+
+ self.depcachedir = DEPCACHE_PATH
+ if portage.const.EPREFIX:
+ self.depcachedir = os.path.join(portage.const.EPREFIX,
+ DEPCACHE_PATH.lstrip(os.sep))
+
+ if self.get("PORTAGE_DEPCACHEDIR", None):
+ self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
+ self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
+ self.backup_changes("PORTAGE_DEPCACHEDIR")
+
+ if "CBUILD" not in self and "CHOST" in self:
+ self["CBUILD"] = self["CHOST"]
+ self.backup_changes("CBUILD")
+
+ if "USERLAND" not in self:
+ # Set default USERLAND so that our test cases can assume that
+ # it's always set. This allows isolated-functions.sh to avoid
+ # calling uname -s when sourced.
+ system = platform.system()
+ if system is not None and \
+ (system.endswith("BSD") or system == "DragonFly"):
+ self["USERLAND"] = "BSD"
+ else:
+ self["USERLAND"] = "GNU"
+ self.backup_changes("USERLAND")
+
+ default_inst_ids = {
+ "PORTAGE_INST_GID": "0",
+ "PORTAGE_INST_UID": "0",
+ }
+
+ # PREFIX LOCAL: inventing UID/GID based on a path is a very
+ # bad idea, it breaks almost everything since group ids
+ # don't have to match, when a user has many
+ # This in particularly breaks the configure-set portage
+ # group and user (in portage/data.py)
+ #if eprefix:
+ # # For prefix environments, default to the UID and GID of
+ # # the top-level EROOT directory.
+ # try:
+ # eroot_st = os.stat(eroot)
+ # except OSError:
+ # pass
+ # else:
+ # default_inst_ids["PORTAGE_INST_GID"] = str(eroot_st.st_gid)
+ # default_inst_ids["PORTAGE_INST_UID"] = str(eroot_st.st_uid)
+
+ # if "PORTAGE_USERNAME" not in self:
+ # try:
+ # pwd_struct = pwd.getpwuid(eroot_st.st_uid)
+ # except KeyError:
+ # pass
+ # else:
+ # self["PORTAGE_USERNAME"] = pwd_struct.pw_name
+ # self.backup_changes("PORTAGE_USERNAME")
+
+ # if "PORTAGE_GRPNAME" not in self:
+ # try:
+ # grp_struct = grp.getgrgid(eroot_st.st_gid)
+ # except KeyError:
+ # pass
+ # else:
+ # self["PORTAGE_GRPNAME"] = grp_struct.gr_name
+ # self.backup_changes("PORTAGE_GRPNAME")
+ # END PREFIX LOCAL
+
+ for var, default_val in default_inst_ids.items():
+ try:
+ self[var] = str(int(self.get(var, default_val)))
+ except ValueError:
+ writemsg(_("!!! %s='%s' is not a valid integer. "
+ "Falling back to %s.\n") % (var, self[var], default_val),
+ noiselevel=-1)
+ self[var] = default_val
+ self.backup_changes(var)
+
+ if portage._internal_caller:
+ self["PORTAGE_INTERNAL_CALLER"] = "1"
+ self.backup_changes("PORTAGE_INTERNAL_CALLER")
+
+ # initialize self.features
+ self.regenerate()
+
+ if bsd_chflags:
+ self.features.add('chflags')
+
+ self._iuse_effective = self._calc_iuse_effective()
+ self._iuse_implicit_match = _iuse_implicit_match_cache(self)
+
+ self._validate_commands()
+
+ for k in self._case_insensitive_vars:
+ if k in self:
+ self[k] = self[k].lower()
+ self.backup_changes(k)
+
+ # The first constructed config object initializes these modules,
+ # and subsequent calls to the _init() functions have no effect.
+ portage.output._init(config_root=self['PORTAGE_CONFIGROOT'])
+ portage.data._init(self)
+
+ if mycpv:
+ self.setcpv(mycpv)
+
+ @property
+ def mygcfg(self):
+ warnings.warn("portage.config.mygcfg is deprecated", stacklevel=3)
+ return {}
+
+ def _validate_commands(self):
+ for k in special_env_vars.validate_commands:
+ v = self.get(k)
+ if v is not None:
+ valid, v_split = validate_cmd_var(v)
+
+ if not valid:
+ if v_split:
+ writemsg_level(_("%s setting is invalid: '%s'\n") % \
+ (k, v), level=logging.ERROR, noiselevel=-1)
+
+ # before deleting the invalid setting, backup
+ # the default value if available
+ v = self.configdict['globals'].get(k)
+ if v is not None:
+ default_valid, v_split = validate_cmd_var(v)
+ if not default_valid:
+ if v_split:
+ writemsg_level(
+ _("%s setting from make.globals" + \
+ " is invalid: '%s'\n") % \
+ (k, v), level=logging.ERROR, noiselevel=-1)
+ # make.globals seems corrupt, so try for
+ # a hardcoded default instead
+ v = self._default_globals.get(k)
+
+ # delete all settings for this key,
+ # including the invalid one
+ del self[k]
+ self.backupenv.pop(k, None)
+ if v:
+ # restore validated default
+ self.configdict['globals'][k] = v
+
+ def _init_dirs(self):
+ """
+ Create a few directories that are critical to portage operation
+ """
+ if not os.access(self["EROOT"], os.W_OK):
+ return
+
+ # gid, mode, mask, preserve_perms
+ dir_mode_map = {
+ "tmp" : ( -1, 0o1777, 0, True),
+ "var/tmp" : ( -1, 0o1777, 0, True),
+ PRIVATE_PATH : (portage_gid, 0o2750, 0o2, False),
+ CACHE_PATH : (portage_gid, 0o755, 0o2, False)
+ }
+
+ for mypath, (gid, mode, modemask, preserve_perms) \
+ in dir_mode_map.items():
+ mydir = os.path.join(self["EROOT"], mypath)
+ if preserve_perms and os.path.isdir(mydir):
+ # Only adjust permissions on some directories if
+ # they don't exist yet. This gives freedom to the
+ # user to adjust permissions to suit their taste.
+ continue
+ try:
+ ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
+ except PortageException as e:
+ writemsg(_("!!! Directory initialization failed: '%s'\n") % mydir,
+ noiselevel=-1)
+ writemsg("!!! %s\n" % str(e),
+ noiselevel=-1)
+
+ @property
+ def _keywords_manager(self):
+ if self._keywords_manager_obj is None:
+ self._keywords_manager_obj = KeywordsManager(
+ self._locations_manager.profiles_complex,
+ self._locations_manager.abs_user_config,
+ self.local_config,
+ global_accept_keywords=self.configdict["defaults"].get("ACCEPT_KEYWORDS", ""))
+ return self._keywords_manager_obj
+
+ @property
+ def _mask_manager(self):
+ if self._mask_manager_obj is None:
+ self._mask_manager_obj = MaskManager(self.repositories,
+ self._locations_manager.profiles_complex,
+ self._locations_manager.abs_user_config,
+ user_config=self.local_config,
+ strict_umatched_removal=self._unmatched_removal)
+ return self._mask_manager_obj
+
+ @property
+ def _virtuals_manager(self):
+ if self._virtuals_manager_obj is None:
+ self._virtuals_manager_obj = VirtualsManager(self.profiles)
+ return self._virtuals_manager_obj
+
+ @property
+ def pkeywordsdict(self):
+ result = self._keywords_manager.pkeywordsdict.copy()
+ for k, v in result.items():
+ result[k] = v.copy()
+ return result
+
+ @property
+ def pmaskdict(self):
+ return self._mask_manager._pmaskdict.copy()
+
+ @property
+ def punmaskdict(self):
+ return self._mask_manager._punmaskdict.copy()
+
+ def expandLicenseTokens(self, tokens):
+ """ Take a token from ACCEPT_LICENSE or package.license and expand it
+ if it's a group token (indicated by @) or just return it if it's not a
+ group. If a group is negated then negate all group elements."""
+ return self._license_manager.expandLicenseTokens(tokens)
+
+ def validate(self):
+ """Validate miscellaneous settings and display warnings if necessary.
+ (This code was previously in the global scope of portage.py)"""
+
+ groups = self["ACCEPT_KEYWORDS"].split()
+ archlist = self.archlist()
+ if not archlist:
+ writemsg(_("--- 'profiles/arch.list' is empty or "
+ "not available. Empty portage tree?\n"), noiselevel=1)
+ else:
+ for group in groups:
+ if group not in archlist and \
+ not (group.startswith("-") and group[1:] in archlist) and \
+ group not in ("*", "~*", "**"):
+ writemsg(_("!!! INVALID ACCEPT_KEYWORDS: %s\n") % str(group),
+ noiselevel=-1)
+
+ profile_broken = False
+
+ if not self.profile_path:
+ profile_broken = True
+ else:
+ # If any one of these files exists, then
+ # the profile is considered valid.
+ for x in ("make.defaults", "parent",
+ "packages", "use.force", "use.mask"):
+ if exists_raise_eaccess(os.path.join(self.profile_path, x)):
+ break
+ else:
+ profile_broken = True
+
+ if profile_broken and not portage._sync_mode:
+ abs_profile_path = None
+ for x in (PROFILE_PATH, 'etc/make.profile'):
+ x = os.path.join(self["PORTAGE_CONFIGROOT"], x)
+ try:
+ os.lstat(x)
+ except OSError:
+ pass
+ else:
+ abs_profile_path = x
+ break
+
+ if abs_profile_path is None:
+ abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
+ PROFILE_PATH)
+
+ writemsg(_("\n\n!!! %s is not a symlink and will probably prevent most merges.\n") % abs_profile_path,
+ noiselevel=-1)
+ writemsg(_("!!! It should point into a profile within %s/profiles/\n") % self["PORTDIR"])
+ writemsg(_("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n"))
+
+ abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
+ USER_VIRTUALS_FILE)
+ if os.path.exists(abs_user_virtuals):
+ writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
+ writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
+ writemsg("!!! this new location.\n\n")
+
+ if not sandbox_capable and not macossandbox_capable and \
+ ("sandbox" in self.features or "usersandbox" in self.features):
+ if self.profile_path is not None and \
+ os.path.realpath(self.profile_path) == \
+ os.path.realpath(os.path.join(
+ self["PORTAGE_CONFIGROOT"], PROFILE_PATH)):
+ # Don't show this warning when running repoman and the
+ # sandbox feature came from a profile that doesn't belong
+ # to the user.
+ writemsg(colorize("BAD", _("!!! Problem with sandbox"
+ " binary. Disabling...\n\n")), noiselevel=-1)
+
+ if "fakeroot" in self.features and \
+ not fakeroot_capable:
+ writemsg(_("!!! FEATURES=fakeroot is enabled, but the "
+ "fakeroot binary is not installed.\n"), noiselevel=-1)
+
+ if os.getuid() == 0 and not hasattr(os, "setgroups"):
+ warning_shown = False
+
+ if "userpriv" in self.features:
+ writemsg(_("!!! FEATURES=userpriv is enabled, but "
+ "os.setgroups is not available.\n"), noiselevel=-1)
+ warning_shown = True
+
+ if "userfetch" in self.features:
+ writemsg(_("!!! FEATURES=userfetch is enabled, but "
+ "os.setgroups is not available.\n"), noiselevel=-1)
+ warning_shown = True
+
+ if warning_shown and platform.python_implementation() == 'PyPy':
+ writemsg(_("!!! See https://bugs.pypy.org/issue833 for details.\n"),
+ noiselevel=-1)
+
+ def load_best_module(self,property_string):
+ best_mod = best_from_dict(property_string,self.modules,self.module_priority)
+ mod = None
+ try:
+ mod = load_mod(best_mod)
+ except ImportError:
+ if best_mod in self._module_aliases:
+ mod = load_mod(self._module_aliases[best_mod])
+ elif not best_mod.startswith("cache."):
+ raise
+ else:
+ best_mod = "portage." + best_mod
+ try:
+ mod = load_mod(best_mod)
+ except ImportError:
+ raise
+ return mod
+
+ def lock(self):
+ self.locked = 1
+
+ def unlock(self):
+ self.locked = 0
+
+ def modifying(self):
+ if self.locked:
+ raise Exception(_("Configuration is locked."))
+
+ def backup_changes(self,key=None):
+ self.modifying()
+ if key and key in self.configdict["env"]:
+ self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
+ else:
+ raise KeyError(_("No such key defined in environment: %s") % key)
+
+ def reset(self, keeping_pkg=0, use_cache=None):
+ """
+ Restore environment from self.backupenv, call self.regenerate()
+ @param keeping_pkg: Should we keep the setcpv() data or delete it.
+ @type keeping_pkg: Boolean
+ @rype: None
+ """
+
+ if use_cache is not None:
+ warnings.warn("The use_cache parameter for config.reset() is deprecated and without effect.",
+ DeprecationWarning, stacklevel=2)
+
+ self.modifying()
+ self.configdict["env"].clear()
+ self.configdict["env"].update(self.backupenv)
+
+ self.modifiedkeys = []
+ if not keeping_pkg:
+ self.mycpv = None
+ self._setcpv_args_hash = None
+ self.puse = ""
+ del self._penv[:]
+ self.configdict["pkg"].clear()
+ self.configdict["pkginternal"].clear()
+ self.configdict["repo"].clear()
+ self.configdict["defaults"]["USE"] = \
+ " ".join(self.make_defaults_use)
+ self.usemask = self._use_manager.getUseMask()
+ self.useforce = self._use_manager.getUseForce()
+ self.regenerate()
+
+ class _lazy_vars(object):
+
+ __slots__ = ('built_use', 'settings', 'values')
+
+ def __init__(self, built_use, settings):
+ self.built_use = built_use
+ self.settings = settings
+ self.values = None
+
+ def __getitem__(self, k):
+ if self.values is None:
+ self.values = self._init_values()
+ return self.values[k]
+
+ def _init_values(self):
+ values = {}
+ settings = self.settings
+ use = self.built_use
+ if use is None:
+ use = frozenset(settings['PORTAGE_USE'].split())
+
+ values['ACCEPT_LICENSE'] = settings._license_manager.get_prunned_accept_license( \
+ settings.mycpv, use, settings['LICENSE'], settings['SLOT'], settings.get('PORTAGE_REPO_NAME'))
+ values['PORTAGE_RESTRICT'] = self._restrict(use, settings)
+ return values
+
+ def _restrict(self, use, settings):
+ try:
+ restrict = set(use_reduce(settings['RESTRICT'], uselist=use, flat=True))
+ except InvalidDependString:
+ restrict = set()
+ return ' '.join(sorted(restrict))
+
+ class _lazy_use_expand(object):
+ """
+ Lazily evaluate USE_EXPAND variables since they are only needed when
+ an ebuild shell is spawned. Variables values are made consistent with
+ the previously calculated USE settings.
+ """
+
+ def __init__(self, settings, unfiltered_use,
+ use, usemask, iuse_implicit,
+ use_expand_split, use_expand_dict):
+ self._settings = settings
+ self._unfiltered_use = unfiltered_use
+ self._use = use
+ self._usemask = usemask
+ self._iuse_implicit = iuse_implicit
+ self._use_expand_split = use_expand_split
+ self._use_expand_dict = use_expand_dict
+
+ def __getitem__(self, key):
+ prefix = key.lower() + '_'
+ prefix_len = len(prefix)
+ expand_flags = set( x[prefix_len:] for x in self._use \
+ if x[:prefix_len] == prefix )
+ var_split = self._use_expand_dict.get(key, '').split()
+ # Preserve the order of var_split because it can matter for things
+ # like LINGUAS.
+ var_split = [ x for x in var_split if x in expand_flags ]
+ var_split.extend(expand_flags.difference(var_split))
+ has_wildcard = '*' in expand_flags
+ if has_wildcard:
+ var_split = [ x for x in var_split if x != "*" ]
+ has_iuse = set()
+ for x in self._iuse_implicit:
+ if x[:prefix_len] == prefix:
+ has_iuse.add(x[prefix_len:])
+ if has_wildcard:
+ # * means to enable everything in IUSE that's not masked
+ if has_iuse:
+ usemask = self._usemask
+ for suffix in has_iuse:
+ x = prefix + suffix
+ if x not in usemask:
+ if suffix not in expand_flags:
+ var_split.append(suffix)
+ else:
+ # If there is a wildcard and no matching flags in IUSE then
+ # LINGUAS should be unset so that all .mo files are
+ # installed.
+ var_split = []
+ # Make the flags unique and filter them according to IUSE.
+ # Also, continue to preserve order for things like LINGUAS
+ # and filter any duplicates that variable may contain.
+ filtered_var_split = []
+ remaining = has_iuse.intersection(var_split)
+ for x in var_split:
+ if x in remaining:
+ remaining.remove(x)
+ filtered_var_split.append(x)
+ var_split = filtered_var_split
+
+ if var_split:
+ value = ' '.join(var_split)
+ else:
+ # Don't export empty USE_EXPAND vars unless the user config
+ # exports them as empty. This is required for vars such as
+ # LINGUAS, where unset and empty have different meanings.
+ # The special '*' token is understood by ebuild.sh, which
+ # will unset the variable so that things like LINGUAS work
+ # properly (see bug #459350).
+ if has_wildcard:
+ value = '*'
+ else:
+ if has_iuse:
+ already_set = False
+ # Skip the first 'env' configdict, in order to
+ # avoid infinite recursion here, since that dict's
+ # __getitem__ calls the current __getitem__.
+ for d in self._settings.lookuplist[1:]:
+ if key in d:
+ already_set = True
+ break
+
+ if not already_set:
+ for x in self._unfiltered_use:
+ if x[:prefix_len] == prefix:
+ already_set = True
+ break
+
+ if already_set:
+ value = ''
+ else:
+ value = '*'
+ else:
+ # It's not in IUSE, so just allow the variable content
+ # to pass through if it is defined somewhere. This
+ # allows packages that support LINGUAS but don't
+ # declare it in IUSE to use the variable outside of the
+ # USE_EXPAND context.
+ value = None
+
+ return value
+
+ def setcpv(self, mycpv, use_cache=None, mydb=None):
+ """
+ Load a particular CPV into the config, this lets us see the
+ Default USE flags for a particular ebuild as well as the USE
+ flags from package.use.
+
+ @param mycpv: A cpv to load
+ @type mycpv: string
+ @param mydb: a dbapi instance that supports aux_get with the IUSE key.
+ @type mydb: dbapi or derivative.
+ @rtype: None
+ """
+
+ if use_cache is not None:
+ warnings.warn("The use_cache parameter for config.setcpv() is deprecated and without effect.",
+ DeprecationWarning, stacklevel=2)
+
+ self.modifying()
+
+ pkg = None
+ built_use = None
+ explicit_iuse = None
+ if not isinstance(mycpv, basestring):
+ pkg = mycpv
+ mycpv = pkg.cpv
+ mydb = pkg._metadata
+ explicit_iuse = pkg.iuse.all
+ args_hash = (mycpv, id(pkg))
+ if pkg.built:
+ built_use = pkg.use.enabled
+ else:
+ args_hash = (mycpv, id(mydb))
+
+ if args_hash == self._setcpv_args_hash:
+ return
+ self._setcpv_args_hash = args_hash
+
+ has_changed = False
+ self.mycpv = mycpv
+ cat, pf = catsplit(mycpv)
+ cp = cpv_getkey(mycpv)
+ cpv_slot = self.mycpv
+ pkginternaluse = ""
+ iuse = ""
+ pkg_configdict = self.configdict["pkg"]
+ previous_iuse = pkg_configdict.get("IUSE")
+ previous_iuse_effective = pkg_configdict.get("IUSE_EFFECTIVE")
+ previous_features = pkg_configdict.get("FEATURES")
+ previous_penv = self._penv
+
+ aux_keys = self._setcpv_aux_keys
+
+ # Discard any existing metadata and package.env settings from
+ # the previous package instance.
+ pkg_configdict.clear()
+
+ pkg_configdict["CATEGORY"] = cat
+ pkg_configdict["PF"] = pf
+ repository = None
+ eapi = None
+ if mydb:
+ if not hasattr(mydb, "aux_get"):
+ for k in aux_keys:
+ if k in mydb:
+ # Make these lazy, since __getitem__ triggers
+ # evaluation of USE conditionals which can't
+ # occur until PORTAGE_USE is calculated below.
+ pkg_configdict.addLazySingleton(k,
+ mydb.__getitem__, k)
+ else:
+ # When calling dbapi.aux_get(), grab USE for built/installed
+ # packages since we want to save it PORTAGE_BUILT_USE for
+ # evaluating conditional USE deps in atoms passed via IPC to
+ # helpers like has_version and best_version.
+ aux_keys = set(aux_keys)
+ if hasattr(mydb, '_aux_cache_keys'):
+ aux_keys = aux_keys.intersection(mydb._aux_cache_keys)
+ aux_keys.add('USE')
+ aux_keys = list(aux_keys)
+ for k, v in zip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
+ pkg_configdict[k] = v
+ built_use = frozenset(pkg_configdict.pop('USE').split())
+ if not built_use:
+ # Empty USE means this dbapi instance does not contain
+ # built packages.
+ built_use = None
+ eapi = pkg_configdict['EAPI']
+
+ repository = pkg_configdict.pop("repository", None)
+ if repository is not None:
+ pkg_configdict["PORTAGE_REPO_NAME"] = repository
+ iuse = pkg_configdict["IUSE"]
+ if pkg is None:
+ self.mycpv = _pkg_str(self.mycpv, metadata=pkg_configdict,
+ settings=self)
+ cpv_slot = self.mycpv
+ else:
+ cpv_slot = pkg
+ pkginternaluse = []
+ for x in iuse.split():
+ if x.startswith("+"):
+ pkginternaluse.append(x[1:])
+ elif x.startswith("-"):
+ pkginternaluse.append(x)
+ pkginternaluse = " ".join(pkginternaluse)
+
+ eapi_attrs = _get_eapi_attrs(eapi)
+
+ if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
+ self.configdict["pkginternal"]["USE"] = pkginternaluse
+ has_changed = True
+
+ repo_env = []
+ if repository and repository != Package.UNKNOWN_REPO:
+ repos = []
+ try:
+ repos.extend(repo.name for repo in
+ self.repositories[repository].masters)
+ except KeyError:
+ pass
+ repos.append(repository)
+ for repo in repos:
+ d = self._repo_make_defaults.get(repo)
+ if d is None:
+ d = {}
+ else:
+ # make a copy, since we might modify it with
+ # package.use settings
+ d = d.copy()
+ cpdict = self._use_manager._repo_puse_dict.get(repo, {}).get(cp)
+ if cpdict:
+ repo_puse = ordered_by_atom_specificity(cpdict, cpv_slot)
+ if repo_puse:
+ for x in repo_puse:
+ d["USE"] = d.get("USE", "") + " " + " ".join(x)
+ if d:
+ repo_env.append(d)
+
+ if repo_env or self.configdict["repo"]:
+ self.configdict["repo"].clear()
+ self.configdict["repo"].update(stack_dicts(repo_env,
+ incrementals=self.incrementals))
+ has_changed = True
+
+ defaults = []
+ for i, pkgprofileuse_dict in enumerate(self._use_manager._pkgprofileuse):
+ if self.make_defaults_use[i]:
+ defaults.append(self.make_defaults_use[i])
+ cpdict = pkgprofileuse_dict.get(cp)
+ if cpdict:
+ pkg_defaults = ordered_by_atom_specificity(cpdict, cpv_slot)
+ if pkg_defaults:
+ defaults.extend(pkg_defaults)
+ defaults = " ".join(defaults)
+ if defaults != self.configdict["defaults"].get("USE",""):
+ self.configdict["defaults"]["USE"] = defaults
+ has_changed = True
+
+ useforce = self._use_manager.getUseForce(cpv_slot)
+ if useforce != self.useforce:
+ self.useforce = useforce
+ has_changed = True
+
+ usemask = self._use_manager.getUseMask(cpv_slot)
+ if usemask != self.usemask:
+ self.usemask = usemask
+ has_changed = True
+
+ oldpuse = self.puse
+ self.puse = self._use_manager.getPUSE(cpv_slot)
+ if oldpuse != self.puse:
+ has_changed = True
+ self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
+ self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
+
+ if previous_features:
+ # The package from the previous setcpv call had package.env
+ # settings which modified FEATURES. Therefore, trigger a
+ # regenerate() call in order to ensure that self.features
+ # is accurate.
+ has_changed = True
+
+ self._penv = []
+ cpdict = self._penvdict.get(cp)
+ if cpdict:
+ penv_matches = ordered_by_atom_specificity(cpdict, cpv_slot)
+ if penv_matches:
+ for x in penv_matches:
+ self._penv.extend(x)
+
+ protected_pkg_keys = set(pkg_configdict)
+ protected_pkg_keys.discard('USE')
+
+ # If there are _any_ package.env settings for this package
+ # then it automatically triggers config.reset(), in order
+ # to account for possible incremental interaction between
+ # package.use, package.env, and overrides from the calling
+ # environment (configdict['env']).
+ if self._penv:
+ has_changed = True
+ # USE is special because package.use settings override
+ # it. Discard any package.use settings here and they'll
+ # be added back later.
+ pkg_configdict.pop('USE', None)
+ self._grab_pkg_env(self._penv, pkg_configdict,
+ protected_keys=protected_pkg_keys)
+
+ # Now add package.use settings, which override USE from
+ # package.env
+ if self.puse:
+ if 'USE' in pkg_configdict:
+ pkg_configdict['USE'] = \
+ pkg_configdict['USE'] + " " + self.puse
+ else:
+ pkg_configdict['USE'] = self.puse
+
+ elif previous_penv:
+ has_changed = True
+
+ if has_changed:
+ self.reset(keeping_pkg=1)
+
+ env_configdict = self.configdict['env']
+
+ # Ensure that "pkg" values are always preferred over "env" values.
+ # This must occur _after_ the above reset() call, since reset()
+ # copies values from self.backupenv.
+ for k in protected_pkg_keys:
+ env_configdict.pop(k, None)
+
+ lazy_vars = self._lazy_vars(built_use, self)
+ env_configdict.addLazySingleton('ACCEPT_LICENSE',
+ lazy_vars.__getitem__, 'ACCEPT_LICENSE')
+ env_configdict.addLazySingleton('PORTAGE_RESTRICT',
+ lazy_vars.__getitem__, 'PORTAGE_RESTRICT')
+
+ if built_use is not None:
+ pkg_configdict['PORTAGE_BUILT_USE'] = ' '.join(built_use)
+
+ # If reset() has not been called, it's safe to return
+ # early if IUSE has not changed.
+ if not has_changed and previous_iuse == iuse and \
+ (previous_iuse_effective is not None == eapi_attrs.iuse_effective):
+ return
+
+ # Filter out USE flags that aren't part of IUSE. This has to
+ # be done for every setcpv() call since practically every
+ # package has different IUSE.
+ use = set(self["USE"].split())
+ unfiltered_use = frozenset(use)
+ if explicit_iuse is None:
+ explicit_iuse = frozenset(x.lstrip("+-") for x in iuse.split())
+
+ if eapi_attrs.iuse_effective:
+ iuse_implicit_match = self._iuse_effective_match
+ portage_iuse = set(self._iuse_effective)
+ portage_iuse.update(explicit_iuse)
+ self.configdict["pkg"]["IUSE_EFFECTIVE"] = \
+ " ".join(sorted(portage_iuse))
+ else:
+ iuse_implicit_match = self._iuse_implicit_match
+ portage_iuse = self._get_implicit_iuse()
+ portage_iuse.update(explicit_iuse)
+
+ # PORTAGE_IUSE is not always needed so it's lazily evaluated.
+ self.configdict["env"].addLazySingleton(
+ "PORTAGE_IUSE", _lazy_iuse_regex, portage_iuse)
+
+ if pkg is None:
+ raw_restrict = pkg_configdict.get("RESTRICT")
+ else:
+ raw_restrict = pkg._raw_metadata["RESTRICT"]
+
+ restrict_test = False
+ if raw_restrict:
+ try:
+ if built_use is not None:
+ restrict = use_reduce(raw_restrict,
+ uselist=built_use, flat=True)
+ else:
+ # Use matchnone=True to ignore USE conditional parts
+ # of RESTRICT, since we want to know whether to mask
+ # the "test" flag _before_ we know the USE values
+ # that would be needed to evaluate the USE
+ # conditionals (see bug #273272).
+ restrict = use_reduce(raw_restrict,
+ matchnone=True, flat=True)
+ except PortageException:
+ pass
+ else:
+ restrict_test = "test" in restrict
+
+ ebuild_force_test = not restrict_test and \
+ self.get("EBUILD_FORCE_TEST") == "1"
+
+ if ebuild_force_test and \
+ not hasattr(self, "_ebuild_force_test_msg_shown"):
+ self._ebuild_force_test_msg_shown = True
+ writemsg(_("Forcing test.\n"), noiselevel=-1)
+
+ if "test" in explicit_iuse or iuse_implicit_match("test"):
+ if "test" not in self.features:
+ use.discard("test")
+ elif restrict_test or \
+ ("test" in self.usemask and not ebuild_force_test):
+ # "test" is in IUSE and USE=test is masked, so execution
+ # of src_test() probably is not reliable. Therefore,
+ # temporarily disable FEATURES=test just for this package.
+ self["FEATURES"] = " ".join(x for x in self.features \
+ if x != "test")
+ use.discard("test")
+ else:
+ use.add("test")
+ if ebuild_force_test and "test" in self.usemask:
+ self.usemask = \
+ frozenset(x for x in self.usemask if x != "test")
+
+ if eapi_attrs.feature_flag_targetroot and \
+ ("targetroot" in explicit_iuse or iuse_implicit_match("targetroot")):
+ if self["ROOT"] != "/":
+ use.add("targetroot")
+ else:
+ use.discard("targetroot")
+
+ # Allow _* flags from USE_EXPAND wildcards to pass through here.
+ use.difference_update([x for x in use \
+ if (x not in explicit_iuse and \
+ not iuse_implicit_match(x)) and x[-2:] != '_*'])
+
+ # Use the calculated USE flags to regenerate the USE_EXPAND flags so
+ # that they are consistent. For optimal performance, use slice
+ # comparison instead of startswith().
+ use_expand_split = set(x.lower() for \
+ x in self.get('USE_EXPAND', '').split())
+ lazy_use_expand = self._lazy_use_expand(
+ self, unfiltered_use, use, self.usemask,
+ portage_iuse, use_expand_split, self._use_expand_dict)
+
+ use_expand_iuses = {}
+ for x in portage_iuse:
+ x_split = x.split('_')
+ if len(x_split) == 1:
+ continue
+ for i in range(len(x_split) - 1):
+ k = '_'.join(x_split[:i+1])
+ if k in use_expand_split:
+ v = use_expand_iuses.get(k)
+ if v is None:
+ v = set()
+ use_expand_iuses[k] = v
+ v.add(x)
+ break
+
+ # If it's not in IUSE, variable content is allowed
+ # to pass through if it is defined somewhere. This
+ # allows packages that support LINGUAS but don't
+ # declare it in IUSE to use the variable outside of the
+ # USE_EXPAND context.
+ for k, use_expand_iuse in use_expand_iuses.items():
+ if k + '_*' in use:
+ use.update( x for x in use_expand_iuse if x not in usemask )
+ k = k.upper()
+ self.configdict['env'].addLazySingleton(k,
+ lazy_use_expand.__getitem__, k)
+
+ for k in self.get("USE_EXPAND_UNPREFIXED", "").split():
+ var_split = self.get(k, '').split()
+ var_split = [ x for x in var_split if x in use ]
+ if var_split:
+ self.configlist[-1][k] = ' '.join(var_split)
+ elif k in self:
+ self.configlist[-1][k] = ''
+
+ # Filtered for the ebuild environment. Store this in a separate
+ # attribute since we still want to be able to see global USE
+ # settings for things like emerge --info.
+
+ self.configdict["env"]["PORTAGE_USE"] = \
+ " ".join(sorted(x for x in use if x[-2:] != '_*'))
+
+ # Clear the eapi cache here rather than in the constructor, since
+ # setcpv triggers lazy instantiation of things like _use_manager.
+ _eapi_cache.clear()
+
+ def _grab_pkg_env(self, penv, container, protected_keys=None):
+ if protected_keys is None:
+ protected_keys = ()
+ abs_user_config = os.path.join(
+ self['PORTAGE_CONFIGROOT'], USER_CONFIG_PATH)
+ non_user_variables = self._non_user_variables
+ # Make a copy since we don't want per-package settings
+ # to pollute the global expand_map.
+ expand_map = self._expand_map.copy()
+ incrementals = self.incrementals
+ for envname in penv:
+ penvfile = os.path.join(abs_user_config, "env", envname)
+ penvconfig = getconfig(penvfile, tolerant=self._tolerant,
+ allow_sourcing=True, expand=expand_map)
+ if penvconfig is None:
+ writemsg("!!! %s references non-existent file: %s\n" % \
+ (os.path.join(abs_user_config, 'package.env'), penvfile),
+ noiselevel=-1)
+ else:
+ for k, v in penvconfig.items():
+ if k in protected_keys or \
+ k in non_user_variables:
+ writemsg("!!! Illegal variable " + \
+ "'%s' assigned in '%s'\n" % \
+ (k, penvfile), noiselevel=-1)
+ elif k in incrementals:
+ if k in container:
+ container[k] = container[k] + " " + v
+ else:
+ container[k] = v
+ else:
+ container[k] = v
+
+ def _iuse_effective_match(self, flag):
+ return flag in self._iuse_effective
+
+ def _calc_iuse_effective(self):
+ """
+ Beginning with EAPI 5, IUSE_EFFECTIVE is defined by PMS.
+ """
+ iuse_effective = []
+ iuse_effective.extend(self.get("IUSE_IMPLICIT", "").split())
+
+ # USE_EXPAND_IMPLICIT should contain things like ARCH, ELIBC,
+ # KERNEL, and USERLAND.
+ use_expand_implicit = frozenset(
+ self.get("USE_EXPAND_IMPLICIT", "").split())
+
+ # USE_EXPAND_UNPREFIXED should contain at least ARCH, and
+ # USE_EXPAND_VALUES_ARCH should contain all valid ARCH flags.
+ for v in self.get("USE_EXPAND_UNPREFIXED", "").split():
+ if v not in use_expand_implicit:
+ continue
+ iuse_effective.extend(
+ self.get("USE_EXPAND_VALUES_" + v, "").split())
+
+ use_expand = frozenset(self.get("USE_EXPAND", "").split())
+ for v in use_expand_implicit:
+ if v not in use_expand:
+ continue
+ lower_v = v.lower()
+ for x in self.get("USE_EXPAND_VALUES_" + v, "").split():
+ iuse_effective.append(lower_v + "_" + x)
+
+ return frozenset(iuse_effective)
+
+ def _get_implicit_iuse(self):
+ """
+ Prior to EAPI 5, these flags are considered to
+ be implicit members of IUSE:
+ * Flags derived from ARCH
+ * Flags derived from USE_EXPAND_HIDDEN variables
+ * Masked flags, such as those from {,package}use.mask
+ * Forced flags, such as those from {,package}use.force
+ * build and bootstrap flags used by bootstrap.sh
+ """
+ iuse_implicit = set()
+ # Flags derived from ARCH.
+ arch = self.configdict["defaults"].get("ARCH")
+ if arch:
+ iuse_implicit.add(arch)
+ iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split())
+
+ # Flags derived from USE_EXPAND_HIDDEN variables
+ # such as ELIBC, KERNEL, and USERLAND.
+ use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split()
+ for x in use_expand_hidden:
+ iuse_implicit.add(x.lower() + "_.*")
+
+ # Flags that have been masked or forced.
+ iuse_implicit.update(self.usemask)
+ iuse_implicit.update(self.useforce)
+
+ # build and bootstrap flags used by bootstrap.sh
+ iuse_implicit.add("build")
+ iuse_implicit.add("bootstrap")
+
+ # Controlled by FEATURES=test. Make this implicit, so handling
+ # of FEATURES=test is consistent regardless of explicit IUSE.
+ # Users may use use.mask/package.use.mask to control
+ # FEATURES=test for all ebuilds, regardless of explicit IUSE.
+ iuse_implicit.add("test")
+
+ return iuse_implicit
+
+ def _getUseMask(self, pkg, stable=None):
+ return self._use_manager.getUseMask(pkg, stable=stable)
+
+ def _getUseForce(self, pkg, stable=None):
+ return self._use_manager.getUseForce(pkg, stable=stable)
+
+ def _getMaskAtom(self, cpv, metadata):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists or it has been cancelled by package.unmask. PROVIDE
+ is not checked, so atoms will not be found for old-style virtuals.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+ return self._mask_manager.getMaskAtom(cpv, metadata["SLOT"], metadata.get('repository'))
+
+ def _getRawMaskAtom(self, cpv, metadata):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists or it has been cancelled by package.unmask. PROVIDE
+ is not checked, so atoms will not be found for old-style virtuals.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+ return self._mask_manager.getRawMaskAtom(cpv, metadata["SLOT"], metadata.get('repository'))
+
+
+ def _getProfileMaskAtom(self, cpv, metadata):
+ """
+ Take a package and return a matching profile atom, or None if no
+ such atom exists. Note that a profile atom may or may not have a "*"
+ prefix. PROVIDE is not checked, so atoms will not be found for
+ old-style virtuals.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: String
+ @return: A matching profile atom string or None if one is not found.
+ """
+
+ warnings.warn("The config._getProfileMaskAtom() method is deprecated.",
+ DeprecationWarning, stacklevel=2)
+
+ cp = cpv_getkey(cpv)
+ profile_atoms = self.prevmaskdict.get(cp)
+ if profile_atoms:
+ pkg = "".join((cpv, _slot_separator, metadata["SLOT"]))
+ repo = metadata.get("repository")
+ if repo and repo != Package.UNKNOWN_REPO:
+ pkg = "".join((pkg, _repo_separator, repo))
+ pkg_list = [pkg]
+ for x in profile_atoms:
+ if match_from_list(x, pkg_list):
+ continue
+ return x
+ return None
+
+ def _isStable(self, pkg):
+ return self._keywords_manager.isStable(pkg,
+ self.get("ACCEPT_KEYWORDS", ""),
+ self.configdict["backupenv"].get("ACCEPT_KEYWORDS", ""))
+
+ def _getKeywords(self, cpv, metadata):
+ return self._keywords_manager.getKeywords(cpv, metadata["SLOT"], \
+ metadata.get("KEYWORDS", ""), metadata.get("repository"))
+
+ def _getMissingKeywords(self, cpv, metadata):
+ """
+ Take a package and return a list of any KEYWORDS that the user may
+ need to accept for the given package. If the KEYWORDS are empty
+ and the the ** keyword has not been accepted, the returned list will
+ contain ** alone (in order to distinguish from the case of "none
+ missing").
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: List
+ @return: A list of KEYWORDS that have not been accepted.
+ """
+
+ # Hack: Need to check the env directly here as otherwise stacking
+ # doesn't work properly as negative values are lost in the config
+ # object (bug #139600)
+ backuped_accept_keywords = self.configdict["backupenv"].get("ACCEPT_KEYWORDS", "")
+ global_accept_keywords = self["ACCEPT_KEYWORDS"]
+
+ return self._keywords_manager.getMissingKeywords(cpv, metadata["SLOT"], \
+ metadata.get("KEYWORDS", ""), metadata.get('repository'), \
+ global_accept_keywords, backuped_accept_keywords)
+
+ def _getRawMissingKeywords(self, cpv, metadata):
+ """
+ Take a package and return a list of any KEYWORDS that the user may
+ need to accept for the given package. If the KEYWORDS are empty,
+ the returned list will contain ** alone (in order to distinguish
+ from the case of "none missing"). This DOES NOT apply any user config
+ package.accept_keywords acceptance.
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: List
+ @return: lists of KEYWORDS that have not been accepted
+ and the keywords it looked for.
+ """
+ return self._keywords_manager.getRawMissingKeywords(cpv, metadata["SLOT"], \
+ metadata.get("KEYWORDS", ""), metadata.get('repository'), \
+ self.get("ACCEPT_KEYWORDS", ""))
+
+ def _getPKeywords(self, cpv, metadata):
+ global_accept_keywords = self.get("ACCEPT_KEYWORDS", "")
+
+ return self._keywords_manager.getPKeywords(cpv, metadata["SLOT"], \
+ metadata.get('repository'), global_accept_keywords)
+
+ def _getMissingLicenses(self, cpv, metadata):
+ """
+ Take a LICENSE string and return a list of any licenses that the user
+ may need to accept for the given package. The returned list will not
+ contain any licenses that have already been accepted. This method
+ can throw an InvalidDependString exception.
+
+ @param cpv: The package name (for package.license support)
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: List
+ @return: A list of licenses that have not been accepted.
+ """
+ return self._license_manager.getMissingLicenses( \
+ cpv, metadata["USE"], metadata["LICENSE"], metadata["SLOT"], metadata.get('repository'))
+
+ def _getMissingProperties(self, cpv, metadata):
+ """
+ Take a PROPERTIES string and return a list of any properties the user
+ may need to accept for the given package. The returned list will not
+ contain any properties that have already been accepted. This method
+ can throw an InvalidDependString exception.
+
+ @param cpv: The package name (for package.properties support)
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: List
+ @return: A list of properties that have not been accepted.
+ """
+ accept_properties = self._accept_properties
+ try:
+ cpv.slot
+ except AttributeError:
+ cpv = _pkg_str(cpv, metadata=metadata, settings=self)
+ cp = cpv_getkey(cpv)
+ cpdict = self._ppropertiesdict.get(cp)
+ if cpdict:
+ pproperties_list = ordered_by_atom_specificity(cpdict, cpv)
+ if pproperties_list:
+ accept_properties = list(self._accept_properties)
+ for x in pproperties_list:
+ accept_properties.extend(x)
+
+ properties_str = metadata.get("PROPERTIES", "")
+ properties = set(use_reduce(properties_str, matchall=1, flat=True))
+
+ acceptable_properties = set()
+ for x in accept_properties:
+ if x == '*':
+ acceptable_properties.update(properties)
+ elif x == '-*':
+ acceptable_properties.clear()
+ elif x[:1] == '-':
+ acceptable_properties.discard(x[1:])
+ else:
+ acceptable_properties.add(x)
+
+ if "?" in properties_str:
+ use = metadata["USE"].split()
+ else:
+ use = []
+
+ return [x for x in use_reduce(properties_str, uselist=use, flat=True)
+ if x not in acceptable_properties]
+
+ def _getMissingRestrict(self, cpv, metadata):
+ """
+ Take a RESTRICT string and return a list of any tokens the user
+ may need to accept for the given package. The returned list will not
+ contain any tokens that have already been accepted. This method
+ can throw an InvalidDependString exception.
+
+ @param cpv: The package name (for package.accept_restrict support)
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: List
+ @return: A list of tokens that have not been accepted.
+ """
+ accept_restrict = self._accept_restrict
+ try:
+ cpv.slot
+ except AttributeError:
+ cpv = _pkg_str(cpv, metadata=metadata, settings=self)
+ cp = cpv_getkey(cpv)
+ cpdict = self._paccept_restrict.get(cp)
+ if cpdict:
+ paccept_restrict_list = ordered_by_atom_specificity(cpdict, cpv)
+ if paccept_restrict_list:
+ accept_restrict = list(self._accept_restrict)
+ for x in paccept_restrict_list:
+ accept_restrict.extend(x)
+
+ restrict_str = metadata.get("RESTRICT", "")
+ all_restricts = set(use_reduce(restrict_str, matchall=1, flat=True))
+
+ acceptable_restricts = set()
+ for x in accept_restrict:
+ if x == '*':
+ acceptable_restricts.update(all_restricts)
+ elif x == '-*':
+ acceptable_restricts.clear()
+ elif x[:1] == '-':
+ acceptable_restricts.discard(x[1:])
+ else:
+ acceptable_restricts.add(x)
+
+ if "?" in restrict_str:
+ use = metadata["USE"].split()
+ else:
+ use = []
+
+ return [x for x in use_reduce(restrict_str, uselist=use, flat=True)
+ if x not in acceptable_restricts]
+
+ def _accept_chost(self, cpv, metadata):
+ """
+ @return True if pkg CHOST is accepted, False otherwise.
+ """
+ if self._accept_chost_re is None:
+ accept_chost = self.get("ACCEPT_CHOSTS", "").split()
+ if not accept_chost:
+ chost = self.get("CHOST")
+ if chost:
+ accept_chost.append(chost)
+ if not accept_chost:
+ self._accept_chost_re = re.compile(".*")
+ elif len(accept_chost) == 1:
+ try:
+ self._accept_chost_re = re.compile(r'^%s$' % accept_chost[0])
+ except re.error as e:
+ writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
+ (accept_chost[0], e), noiselevel=-1)
+ self._accept_chost_re = re.compile("^$")
+ else:
+ try:
+ self._accept_chost_re = re.compile(
+ r'^(%s)$' % "|".join(accept_chost))
+ except re.error as e:
+ writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
+ (" ".join(accept_chost), e), noiselevel=-1)
+ self._accept_chost_re = re.compile("^$")
+
+ pkg_chost = metadata.get('CHOST', '')
+ return not pkg_chost or \
+ self._accept_chost_re.match(pkg_chost) is not None
+
+ def setinst(self, mycpv, mydbapi):
+ """This updates the preferences for old-style virtuals,
+ affecting the behavior of dep_expand() and dep_check()
+ calls. It can change dbapi.match() behavior since that
+ calls dep_expand(). However, dbapi instances have
+ internal match caches that are not invalidated when
+ preferences are updated here. This can potentially
+ lead to some inconsistency (relevant to bug #1343)."""
+ self.modifying()
+
+ # Grab the virtuals this package provides and add them into the tree virtuals.
+ if not hasattr(mydbapi, "aux_get"):
+ provides = mydbapi["PROVIDE"]
+ else:
+ provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
+ if not provides:
+ return
+ if isinstance(mydbapi, portdbapi):
+ self.setcpv(mycpv, mydb=mydbapi)
+ myuse = self["PORTAGE_USE"]
+ elif not hasattr(mydbapi, "aux_get"):
+ myuse = mydbapi["USE"]
+ else:
+ myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
+ virts = use_reduce(provides, uselist=myuse.split(), flat=True)
+
+ # Ensure that we don't trigger the _treeVirtuals
+ # assertion in VirtualsManager._compile_virtuals().
+ self.getvirtuals()
+ self._virtuals_manager.add_depgraph_virtuals(mycpv, virts)
+
+ def reload(self):
+ """Reload things like /etc/profile.env that can change during runtime."""
+ env_d_filename = os.path.join(self["EROOT"], "etc", "profile.env")
+ self.configdict["env.d"].clear()
+ env_d = getconfig(env_d_filename,
+ tolerant=self._tolerant, expand=False)
+ if env_d:
+ # env_d will be None if profile.env doesn't exist.
+ for k in self._env_d_blacklist:
+ env_d.pop(k, None)
+ self.configdict["env.d"].update(env_d)
+
+ def regenerate(self, useonly=0, use_cache=None):
+ """
+ Regenerate settings
+ This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
+ re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
+ variables. This also updates the env.d configdict; useful in case an ebuild
+ changes the environment.
+
+ If FEATURES has already stacked, it is not stacked twice.
+
+ @param useonly: Only regenerate USE flags (not any other incrementals)
+ @type useonly: Boolean
+ @rtype: None
+ """
+
+ if use_cache is not None:
+ warnings.warn("The use_cache parameter for config.regenerate() is deprecated and without effect.",
+ DeprecationWarning, stacklevel=2)
+
+ self.modifying()
+
+ if useonly:
+ myincrementals=["USE"]
+ else:
+ myincrementals = self.incrementals
+ myincrementals = set(myincrementals)
+
+ # Process USE last because it depends on USE_EXPAND which is also
+ # an incremental!
+ myincrementals.discard("USE")
+
+ mydbs = self.configlist[:-1]
+ mydbs.append(self.backupenv)
+
+ # ACCEPT_LICENSE is a lazily evaluated incremental, so that * can be
+ # used to match all licenses without every having to explicitly expand
+ # it to all licenses.
+ if self.local_config:
+ mysplit = []
+ for curdb in mydbs:
+ mysplit.extend(curdb.get('ACCEPT_LICENSE', '').split())
+ mysplit = prune_incremental(mysplit)
+ accept_license_str = ' '.join(mysplit)
+ self.configlist[-1]['ACCEPT_LICENSE'] = accept_license_str
+ self._license_manager.set_accept_license_str(accept_license_str)
+ else:
+ # repoman will accept any license
+ self._license_manager.set_accept_license_str("*")
+
+ # ACCEPT_PROPERTIES works like ACCEPT_LICENSE, without groups
+ if self.local_config:
+ mysplit = []
+ for curdb in mydbs:
+ mysplit.extend(curdb.get('ACCEPT_PROPERTIES', '').split())
+ mysplit = prune_incremental(mysplit)
+ self.configlist[-1]['ACCEPT_PROPERTIES'] = ' '.join(mysplit)
+ if tuple(mysplit) != self._accept_properties:
+ self._accept_properties = tuple(mysplit)
+ else:
+ # repoman will accept any property
+ self._accept_properties = ('*',)
+
+ if self.local_config:
+ mysplit = []
+ for curdb in mydbs:
+ mysplit.extend(curdb.get('ACCEPT_RESTRICT', '').split())
+ mysplit = prune_incremental(mysplit)
+ self.configlist[-1]['ACCEPT_RESTRICT'] = ' '.join(mysplit)
+ if tuple(mysplit) != self._accept_restrict:
+ self._accept_restrict = tuple(mysplit)
+ else:
+ # repoman will accept any property
+ self._accept_restrict = ('*',)
+
+ increment_lists = {}
+ for k in myincrementals:
+ incremental_list = []
+ increment_lists[k] = incremental_list
+ for curdb in mydbs:
+ v = curdb.get(k)
+ if v is not None:
+ incremental_list.append(v.split())
+
+ if 'FEATURES' in increment_lists:
+ increment_lists['FEATURES'].append(self._features_overrides)
+
+ myflags = set()
+ for mykey, incremental_list in increment_lists.items():
+
+ myflags.clear()
+ for mysplit in incremental_list:
+
+ for x in mysplit:
+ if x=="-*":
+ # "-*" is a special "minus" var that means "unset all settings".
+ # so USE="-* gnome" will have *just* gnome enabled.
+ myflags.clear()
+ continue
+
+ if x[0]=="+":
+ # Not legal. People assume too much. Complain.
+ writemsg(colorize("BAD",
+ _("%s values should not start with a '+': %s") % (mykey,x)) \
+ + "\n", noiselevel=-1)
+ x=x[1:]
+ if not x:
+ continue
+
+ if (x[0]=="-"):
+ myflags.discard(x[1:])
+ continue
+
+ # We got here, so add it now.
+ myflags.add(x)
+
+ #store setting in last element of configlist, the original environment:
+ if myflags or mykey in self:
+ self.configlist[-1][mykey] = " ".join(sorted(myflags))
+
+ # Do the USE calculation last because it depends on USE_EXPAND.
+ use_expand = self.get("USE_EXPAND", "").split()
+ use_expand_dict = self._use_expand_dict
+ use_expand_dict.clear()
+ for k in use_expand:
+ v = self.get(k)
+ if v is not None:
+ use_expand_dict[k] = v
+
+ use_expand_unprefixed = self.get("USE_EXPAND_UNPREFIXED", "").split()
+
+ # In order to best accomodate the long-standing practice of
+ # setting default USE_EXPAND variables in the profile's
+ # make.defaults, we translate these variables into their
+ # equivalent USE flags so that useful incremental behavior
+ # is enabled (for sub-profiles).
+ configdict_defaults = self.configdict['defaults']
+ if self._make_defaults is not None:
+ for i, cfg in enumerate(self._make_defaults):
+ if not cfg:
+ self.make_defaults_use.append("")
+ continue
+ use = cfg.get("USE", "")
+ expand_use = []
+
+ for k in use_expand_unprefixed:
+ v = cfg.get(k)
+ if v is not None:
+ expand_use.extend(v.split())
+
+ for k in use_expand_dict:
+ v = cfg.get(k)
+ if v is None:
+ continue
+ prefix = k.lower() + '_'
+ if k in myincrementals:
+ for x in v.split():
+ if x[:1] == '-':
+ expand_use.append('-' + prefix + x[1:])
+ else:
+ expand_use.append(prefix + x)
+ else:
+ for x in v.split():
+ expand_use.append(prefix + x)
+ if expand_use:
+ expand_use.append(use)
+ use = ' '.join(expand_use)
+ self.make_defaults_use.append(use)
+ self.make_defaults_use = tuple(self.make_defaults_use)
+ configdict_defaults['USE'] = ' '.join(
+ stack_lists([x.split() for x in self.make_defaults_use]))
+ # Set to None so this code only runs once.
+ self._make_defaults = None
+
+ if not self.uvlist:
+ for x in self["USE_ORDER"].split(":"):
+ if x in self.configdict:
+ self.uvlist.append(self.configdict[x])
+ self.uvlist.reverse()
+
+ # For optimal performance, use slice
+ # comparison instead of startswith().
+ iuse = self.configdict["pkg"].get("IUSE")
+ if iuse is not None:
+ iuse = [x.lstrip("+-") for x in iuse.split()]
+ myflags = set()
+ for curdb in self.uvlist:
+
+ for k in use_expand_unprefixed:
+ v = curdb.get(k)
+ if v is None:
+ continue
+ for x in v.split():
+ if x[:1] == "-":
+ myflags.discard(x[1:])
+ else:
+ myflags.add(x)
+
+ cur_use_expand = [x for x in use_expand if x in curdb]
+ mysplit = curdb.get("USE", "").split()
+ if not mysplit and not cur_use_expand:
+ continue
+ for x in mysplit:
+ if x == "-*":
+ myflags.clear()
+ continue
+
+ if x[0] == "+":
+ writemsg(colorize("BAD", _("USE flags should not start "
+ "with a '+': %s\n") % x), noiselevel=-1)
+ x = x[1:]
+ if not x:
+ continue
+
+ if x[0] == "-":
+ if x[-2:] == '_*':
+ prefix = x[1:-1]
+ prefix_len = len(prefix)
+ myflags.difference_update(
+ [y for y in myflags if \
+ y[:prefix_len] == prefix])
+ myflags.discard(x[1:])
+ continue
+
+ if iuse is not None and x[-2:] == '_*':
+ # Expand wildcards here, so that cases like
+ # USE="linguas_* -linguas_en_US" work correctly.
+ prefix = x[:-1]
+ prefix_len = len(prefix)
+ has_iuse = False
+ for y in iuse:
+ if y[:prefix_len] == prefix:
+ has_iuse = True
+ myflags.add(y)
+ if not has_iuse:
+ # There are no matching IUSE, so allow the
+ # wildcard to pass through. This allows
+ # linguas_* to trigger unset LINGUAS in
+ # cases when no linguas_ flags are in IUSE.
+ myflags.add(x)
+ else:
+ myflags.add(x)
+
+ if curdb is configdict_defaults:
+ # USE_EXPAND flags from make.defaults are handled
+ # earlier, in order to provide useful incremental
+ # behavior (for sub-profiles).
+ continue
+
+ for var in cur_use_expand:
+ var_lower = var.lower()
+ is_not_incremental = var not in myincrementals
+ if is_not_incremental:
+ prefix = var_lower + "_"
+ prefix_len = len(prefix)
+ for x in list(myflags):
+ if x[:prefix_len] == prefix:
+ myflags.remove(x)
+ for x in curdb[var].split():
+ if x[0] == "+":
+ if is_not_incremental:
+ writemsg(colorize("BAD", _("Invalid '+' "
+ "operator in non-incremental variable "
+ "'%s': '%s'\n") % (var, x)), noiselevel=-1)
+ continue
+ else:
+ writemsg(colorize("BAD", _("Invalid '+' "
+ "operator in incremental variable "
+ "'%s': '%s'\n") % (var, x)), noiselevel=-1)
+ x = x[1:]
+ if x[0] == "-":
+ if is_not_incremental:
+ writemsg(colorize("BAD", _("Invalid '-' "
+ "operator in non-incremental variable "
+ "'%s': '%s'\n") % (var, x)), noiselevel=-1)
+ continue
+ myflags.discard(var_lower + "_" + x[1:])
+ continue
+ myflags.add(var_lower + "_" + x)
+
+ if hasattr(self, "features"):
+ self.features._features.clear()
+ else:
+ self.features = features_set(self)
+ self.features._features.update(self.get('FEATURES', '').split())
+ self.features._sync_env_var()
+ self.features._validate()
+
+ myflags.update(self.useforce)
+ arch = self.configdict["defaults"].get("ARCH")
+ if arch:
+ myflags.add(arch)
+
+ myflags.difference_update(self.usemask)
+ self.configlist[-1]["USE"]= " ".join(sorted(myflags))
+
+ if self.mycpv is None:
+ # Generate global USE_EXPAND variables settings that are
+ # consistent with USE, for display by emerge --info. For
+ # package instances, these are instead generated via
+ # setcpv().
+ for k in use_expand:
+ prefix = k.lower() + '_'
+ prefix_len = len(prefix)
+ expand_flags = set( x[prefix_len:] for x in myflags \
+ if x[:prefix_len] == prefix )
+ var_split = use_expand_dict.get(k, '').split()
+ var_split = [ x for x in var_split if x in expand_flags ]
+ var_split.extend(sorted(expand_flags.difference(var_split)))
+ if var_split:
+ self.configlist[-1][k] = ' '.join(var_split)
+ elif k in self:
+ self.configlist[-1][k] = ''
+
+ for k in use_expand_unprefixed:
+ var_split = self.get(k, '').split()
+ var_split = [ x for x in var_split if x in myflags ]
+ if var_split:
+ self.configlist[-1][k] = ' '.join(var_split)
+ elif k in self:
+ self.configlist[-1][k] = ''
+
+ @property
+ def virts_p(self):
+ warnings.warn("portage config.virts_p attribute " + \
+ "is deprecated, use config.get_virts_p()",
+ DeprecationWarning, stacklevel=2)
+ return self.get_virts_p()
+
+ @property
+ def virtuals(self):
+ warnings.warn("portage config.virtuals attribute " + \
+ "is deprecated, use config.getvirtuals()",
+ DeprecationWarning, stacklevel=2)
+ return self.getvirtuals()
+
+ def get_virts_p(self):
+ # Ensure that we don't trigger the _treeVirtuals
+ # assertion in VirtualsManager._compile_virtuals().
+ self.getvirtuals()
+ return self._virtuals_manager.get_virts_p()
+
+ def getvirtuals(self):
+ if self._virtuals_manager._treeVirtuals is None:
+ #Hack around the fact that VirtualsManager needs a vartree
+ #and vartree needs a config instance.
+ #This code should be part of VirtualsManager.getvirtuals().
+ if self.local_config:
+ temp_vartree = vartree(settings=self)
+ self._virtuals_manager._populate_treeVirtuals(temp_vartree)
+ else:
+ self._virtuals_manager._treeVirtuals = {}
+
+ return self._virtuals_manager.getvirtuals()
+
+ def _populate_treeVirtuals_if_needed(self, vartree):
+ """Reduce the provides into a list by CP."""
+ if self._virtuals_manager._treeVirtuals is None:
+ if self.local_config:
+ self._virtuals_manager._populate_treeVirtuals(vartree)
+ else:
+ self._virtuals_manager._treeVirtuals = {}
+
+ def __delitem__(self,mykey):
+ self.pop(mykey)
+
+ def __getitem__(self, key):
+ try:
+ return self._getitem(key)
+ except KeyError:
+ return '' # for backward compat, don't raise KeyError
+
+ def _getitem(self, mykey):
+
+ if mykey in self._constant_keys:
+ # These two point to temporary values when
+ # portage plans to update itself.
+ if mykey == "PORTAGE_BIN_PATH":
+ return portage._bin_path
+ elif mykey == "PORTAGE_PYM_PATH":
+ return portage._pym_path
+
+ elif mykey == "PORTAGE_PYTHONPATH":
+ value = [x for x in \
+ self.backupenv.get("PYTHONPATH", "").split(":") if x]
+ need_pym_path = True
+ if value:
+ try:
+ need_pym_path = not os.path.samefile(value[0],
+ portage._pym_path)
+ except OSError:
+ pass
+ if need_pym_path:
+ value.insert(0, portage._pym_path)
+ return ":".join(value)
+
+ elif mykey == "PORTAGE_GID":
+ return "%s" % portage_gid
+
+ for d in self.lookuplist:
+ try:
+ return d[mykey]
+ except KeyError:
+ pass
+
+ raise KeyError(mykey)
+
+ def get(self, k, x=None):
+ try:
+ return self._getitem(k)
+ except KeyError:
+ return x
+
+ def pop(self, key, *args):
+ self.modifying()
+ if len(args) > 1:
+ raise TypeError(
+ "pop expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+ v = self
+ for d in reversed(self.lookuplist):
+ v = d.pop(key, v)
+ if v is self:
+ if args:
+ return args[0]
+ raise KeyError(key)
+ return v
+
+ def __contains__(self, mykey):
+ """Called to implement membership test operators (in and not in)."""
+ try:
+ self._getitem(mykey)
+ except KeyError:
+ return False
+ else:
+ return True
+
+ def setdefault(self, k, x=None):
+ v = self.get(k)
+ if v is not None:
+ return v
+ else:
+ self[k] = x
+ return x
+
+ def keys(self):
+ return list(self)
+
+ def __iter__(self):
+ keys = set()
+ keys.update(self._constant_keys)
+ for d in self.lookuplist:
+ keys.update(d)
+ return iter(keys)
+
+ def iterkeys(self):
+ return iter(self)
+
+ def iteritems(self):
+ for k in self:
+ yield (k, self._getitem(k))
+
+ def items(self):
+ return list(self.iteritems())
+
+ def __setitem__(self,mykey,myvalue):
+ "set a value; will be thrown away at reset() time"
+ if not isinstance(myvalue, basestring):
+ raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
+
+ # Avoid potential UnicodeDecodeError exceptions later.
+ mykey = _unicode_decode(mykey)
+ myvalue = _unicode_decode(myvalue)
+
+ self.modifying()
+ self.modifiedkeys.append(mykey)
+ self.configdict["env"][mykey]=myvalue
+
+ def environ(self):
+ "return our locally-maintained environment"
+ mydict={}
+ environ_filter = self._environ_filter
+
+ eapi = self.get('EAPI')
+ eapi_attrs = _get_eapi_attrs(eapi)
+ phase = self.get('EBUILD_PHASE')
+ filter_calling_env = False
+ if self.mycpv is not None and \
+ phase not in ('clean', 'cleanrm', 'depend', 'fetch'):
+ temp_dir = self.get('T')
+ if temp_dir is not None and \
+ os.path.exists(os.path.join(temp_dir, 'environment')):
+ filter_calling_env = True
+
+ environ_whitelist = self._environ_whitelist
+ for x in self:
+ if x in environ_filter:
+ continue
+ myvalue = self[x]
+ if not isinstance(myvalue, basestring):
+ writemsg(_("!!! Non-string value in config: %s=%s\n") % \
+ (x, myvalue), noiselevel=-1)
+ continue
+ if filter_calling_env and \
+ x not in environ_whitelist and \
+ not self._environ_whitelist_re.match(x):
+ # Do not allow anything to leak into the ebuild
+ # environment unless it is explicitly whitelisted.
+ # This ensures that variables unset by the ebuild
+ # remain unset (bug #189417).
+ continue
+ mydict[x] = myvalue
+ if "HOME" not in mydict and "BUILD_PREFIX" in mydict:
+ writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
+ mydict["HOME"]=mydict["BUILD_PREFIX"][:]
+
+ if filter_calling_env:
+ if phase:
+ whitelist = []
+ if "rpm" == phase:
+ whitelist.append("RPMDIR")
+ for k in whitelist:
+ v = self.get(k)
+ if v is not None:
+ mydict[k] = v
+
+ # At some point we may want to stop exporting FEATURES to the ebuild
+ # environment, in order to prevent ebuilds from abusing it. In
+ # preparation for that, export it as PORTAGE_FEATURES so that bashrc
+ # users will be able to migrate any FEATURES conditional code to
+ # use this alternative variable.
+ mydict["PORTAGE_FEATURES"] = self["FEATURES"]
+
+ # Filtered by IUSE and implicit IUSE.
+ mydict["USE"] = self.get("PORTAGE_USE", "")
+
+ # Don't export AA to the ebuild environment in EAPIs that forbid it
+ if not eapi_exports_AA(eapi):
+ mydict.pop("AA", None)
+
+ if not eapi_exports_merge_type(eapi):
+ mydict.pop("MERGE_TYPE", None)
+
+ # Prefix variables are supported beginning with EAPI 3, or when
+ # force-prefix is in FEATURES, since older EAPIs would otherwise be
+ # useless with prefix configurations. This brings compatibility with
+ # the prefix branch of portage, which also supports EPREFIX for all
+ # EAPIs (for obvious reasons).
+ if phase == 'depend' or \
+ ('force-prefix' not in self.features and
+ eapi is not None and not eapi_supports_prefix(eapi)):
+ mydict.pop("ED", None)
+ mydict.pop("EPREFIX", None)
+ mydict.pop("EROOT", None)
+
+ if phase == 'depend':
+ mydict.pop('FILESDIR', None)
+
+ if phase not in ("pretend", "setup", "preinst", "postinst") or \
+ not eapi_exports_replace_vars(eapi):
+ mydict.pop("REPLACING_VERSIONS", None)
+
+ if phase not in ("prerm", "postrm") or \
+ not eapi_exports_replace_vars(eapi):
+ mydict.pop("REPLACED_BY_VERSION", None)
+
+ if phase is not None and eapi_attrs.exports_EBUILD_PHASE_FUNC:
+ phase_func = _phase_func_map.get(phase)
+ if phase_func is not None:
+ mydict["EBUILD_PHASE_FUNC"] = phase_func
+
+ return mydict
+
+ def thirdpartymirrors(self):
+ if getattr(self, "_thirdpartymirrors", None) is None:
+ thirdparty_lists = []
+ for repo_name in reversed(self.repositories.prepos_order):
+ thirdparty_lists.append(grabdict(os.path.join(
+ self.repositories[repo_name].location,
+ "profiles", "thirdpartymirrors")))
+ self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
+ return self._thirdpartymirrors
+
+ def archlist(self):
+ _archlist = []
+ for myarch in self["PORTAGE_ARCHLIST"].split():
+ _archlist.append(myarch)
+ _archlist.append("~" + myarch)
+ return _archlist
+
+ def selinux_enabled(self):
+ if getattr(self, "_selinux_enabled", None) is None:
+ self._selinux_enabled = 0
+ if "selinux" in self["USE"].split():
+ if selinux:
+ if selinux.is_selinux_enabled() == 1:
+ self._selinux_enabled = 1
+ else:
+ self._selinux_enabled = 0
+ else:
+ writemsg(_("!!! SELinux module not found. Please verify that it was installed.\n"),
+ noiselevel=-1)
+ self._selinux_enabled = 0
+
+ return self._selinux_enabled
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+ items = iteritems
diff --git a/usr/lib/portage/pym/portage/package/ebuild/deprecated_profile_check.py b/usr/lib/portage/pym/portage/package/ebuild/deprecated_profile_check.py
new file mode 100644
index 0000000..fdb19b4
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/deprecated_profile_check.py
@@ -0,0 +1,83 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['deprecated_profile_check']
+
+import io
+
+import portage
+from portage import os, _encodings, _unicode_encode
+from portage.const import DEPRECATED_PROFILE_FILE
+from portage.localization import _
+from portage.output import colorize
+from portage.util import writemsg
+
+def deprecated_profile_check(settings=None):
+ config_root = None
+ eprefix = None
+ deprecated_profile_file = None
+ if settings is not None:
+ config_root = settings["PORTAGE_CONFIGROOT"]
+ eprefix = settings["EPREFIX"]
+ for x in reversed(settings.profiles):
+ deprecated_profile_file = os.path.join(x, "deprecated")
+ if os.access(deprecated_profile_file, os.R_OK):
+ break
+ else:
+ deprecated_profile_file = None
+
+ if deprecated_profile_file is None:
+ deprecated_profile_file = os.path.join(config_root or "/",
+ DEPRECATED_PROFILE_FILE)
+ if not os.access(deprecated_profile_file, os.R_OK):
+ deprecated_profile_file = os.path.join(config_root or "/",
+ 'etc', 'make.profile', 'deprecated')
+ if not os.access(deprecated_profile_file, os.R_OK):
+ return
+
+ with io.open(_unicode_encode(deprecated_profile_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace') as f:
+ dcontent = f.readlines()
+ writemsg(colorize("BAD", _("\n!!! Your current profile is "
+ "deprecated and not supported anymore.")) + "\n", noiselevel=-1)
+ writemsg(colorize("BAD", _("!!! Use eselect profile to update your "
+ "profile.")) + "\n", noiselevel=-1)
+ if not dcontent:
+ writemsg(colorize("BAD", _("!!! Please refer to the "
+ "Gentoo Upgrading Guide.")) + "\n", noiselevel=-1)
+ return True
+ newprofile = dcontent[0].rstrip("\n")
+ writemsg(colorize("BAD", _("!!! Please upgrade to the "
+ "following profile if possible:")) + "\n\n", noiselevel=-1)
+ writemsg(8*" " + colorize("GOOD", newprofile) + "\n\n", noiselevel=-1)
+ if len(dcontent) > 1:
+ writemsg(_("To upgrade do the following steps:\n"), noiselevel=-1)
+ for myline in dcontent[1:]:
+ writemsg(myline, noiselevel=-1)
+ writemsg("\n\n", noiselevel=-1)
+ else:
+ writemsg(_("You may use the following command to upgrade:\n\n"), noiselevel=-1)
+ writemsg(8*" " + colorize("INFORM", 'eselect profile set ' +
+ newprofile) + "\n\n", noiselevel=-1)
+
+ if settings is not None:
+ main_repo_loc = settings.repositories.mainRepoLocation()
+ new_profile_path = os.path.join(main_repo_loc,
+ "profiles", newprofile.rstrip("\n"))
+
+ if os.path.isdir(new_profile_path):
+ new_config = portage.config(config_root=config_root,
+ config_profile_path=new_profile_path,
+ eprefix=eprefix)
+
+ if not new_config.profiles:
+ writemsg("\n %s %s\n" % (colorize("WARN", "*"),
+ _("You must update portage before you "
+ "can migrate to the above profile.")), noiselevel=-1)
+ writemsg(" %s %s\n\n" % (colorize("WARN", "*"),
+ _("In order to update portage, "
+ "run 'emerge --oneshot portage'.")),
+ noiselevel=-1)
+
+ return True
diff --git a/usr/lib/portage/pym/portage/package/ebuild/digestcheck.py b/usr/lib/portage/pym/portage/package/ebuild/digestcheck.py
new file mode 100644
index 0000000..e207ba8
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/digestcheck.py
@@ -0,0 +1,155 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['digestcheck']
+
+import warnings
+
+from portage import os, _encodings, _unicode_decode
+from portage.checksum import _hash_filter
+from portage.exception import DigestException, FileNotFound
+from portage.localization import _
+from portage.output import EOutput
+from portage.util import writemsg
+
+def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None):
+ """
+ Verifies checksums. Assumes all files have been downloaded.
+ @rtype: int
+ @return: 1 on success and 0 on failure
+ """
+
+ if justmanifest is not None:
+ warnings.warn("The justmanifest parameter of the " + \
+ "portage.package.ebuild.digestcheck.digestcheck()" + \
+ " function is now unused.",
+ DeprecationWarning, stacklevel=2)
+ justmanifest = None
+
+ if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
+ return 1
+ pkgdir = mysettings["O"]
+ hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
+ if mf is None:
+ mf = mysettings.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir)))
+ mf = mf.load_manifest(pkgdir, mysettings["DISTDIR"])
+ eout = EOutput()
+ eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
+ try:
+ if not mf.thin and strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
+ if mf.fhashdict.get("EBUILD"):
+ eout.ebegin(_("checking ebuild checksums ;-)"))
+ mf.checkTypeHashes("EBUILD", hash_filter=hash_filter)
+ eout.eend(0)
+ if mf.fhashdict.get("AUX"):
+ eout.ebegin(_("checking auxfile checksums ;-)"))
+ mf.checkTypeHashes("AUX", hash_filter=hash_filter)
+ eout.eend(0)
+ if mf.fhashdict.get("MISC"):
+ eout.ebegin(_("checking miscfile checksums ;-)"))
+ mf.checkTypeHashes("MISC", ignoreMissingFiles=True,
+ hash_filter=hash_filter)
+ eout.eend(0)
+ for f in myfiles:
+ eout.ebegin(_("checking %s ;-)") % f)
+ ftype = mf.findFile(f)
+ if ftype is None:
+ if mf.allow_missing:
+ continue
+ eout.eend(1)
+ writemsg(_("\n!!! Missing digest for '%s'\n") % (f,),
+ noiselevel=-1)
+ return 0
+ mf.checkFileHashes(ftype, f, hash_filter=hash_filter)
+ eout.eend(0)
+ except FileNotFound as e:
+ eout.eend(1)
+ writemsg(_("\n!!! A file listed in the Manifest could not be found: %s\n") % str(e),
+ noiselevel=-1)
+ return 0
+ except DigestException as e:
+ eout.eend(1)
+ writemsg(_("\n!!! Digest verification failed:\n"), noiselevel=-1)
+ writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
+ writemsg(_("!!! Reason: %s\n") % e.value[1], noiselevel=-1)
+ writemsg(_("!!! Got: %s\n") % e.value[2], noiselevel=-1)
+ writemsg(_("!!! Expected: %s\n") % e.value[3], noiselevel=-1)
+ return 0
+ if mf.thin or mf.allow_missing:
+ # In this case we ignore any missing digests that
+ # would otherwise be detected below.
+ return 1
+ # Make sure that all of the ebuilds are actually listed in the Manifest.
+ for f in os.listdir(pkgdir):
+ pf = None
+ if f[-7:] == '.ebuild':
+ pf = f[:-7]
+ if pf is not None and not mf.hasFile("EBUILD", f):
+ writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
+ os.path.join(pkgdir, f), noiselevel=-1)
+ if strict:
+ return 0
+ # epatch will just grab all the patches out of a directory, so we have to
+ # make sure there aren't any foreign files that it might grab.
+ filesdir = os.path.join(pkgdir, "files")
+
+ for parent, dirs, files in os.walk(filesdir):
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='replace')
+ writemsg(_("!!! Path contains invalid "
+ "character(s) for encoding '%s': '%s'") \
+ % (_encodings['fs'], parent), noiselevel=-1)
+ if strict:
+ return 0
+ continue
+ for d in dirs:
+ d_bytes = d
+ try:
+ d = _unicode_decode(d,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ d = _unicode_decode(d,
+ encoding=_encodings['fs'], errors='replace')
+ writemsg(_("!!! Path contains invalid "
+ "character(s) for encoding '%s': '%s'") \
+ % (_encodings['fs'], os.path.join(parent, d)),
+ noiselevel=-1)
+ if strict:
+ return 0
+ dirs.remove(d_bytes)
+ continue
+ if d.startswith(".") or d == "CVS":
+ dirs.remove(d_bytes)
+ for f in files:
+ try:
+ f = _unicode_decode(f,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ f = _unicode_decode(f,
+ encoding=_encodings['fs'], errors='replace')
+ if f.startswith("."):
+ continue
+ f = os.path.join(parent, f)[len(filesdir) + 1:]
+ writemsg(_("!!! File name contains invalid "
+ "character(s) for encoding '%s': '%s'") \
+ % (_encodings['fs'], f), noiselevel=-1)
+ if strict:
+ return 0
+ continue
+ if f.startswith("."):
+ continue
+ f = os.path.join(parent, f)[len(filesdir) + 1:]
+ file_type = mf.findFile(f)
+ if file_type != "AUX" and not f.startswith("digest-"):
+ writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
+ os.path.join(filesdir, f), noiselevel=-1)
+ if strict:
+ return 0
+ return 1
diff --git a/usr/lib/portage/pym/portage/package/ebuild/digestgen.py b/usr/lib/portage/pym/portage/package/ebuild/digestgen.py
new file mode 100644
index 0000000..95d02db
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/digestgen.py
@@ -0,0 +1,205 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['digestgen']
+
+import errno
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild._spawn_nofetch:spawn_nofetch',
+)
+
+from portage import os
+from portage.const import MANIFEST2_REQUIRED_HASH
+from portage.dbapi.porttree import FetchlistDict
+from portage.dep import use_reduce
+from portage.exception import InvalidDependString, FileNotFound, \
+ PermissionDenied, PortagePackageException
+from portage.localization import _
+from portage.output import colorize
+from portage.package.ebuild.fetch import fetch
+from portage.util import writemsg, writemsg_stdout
+from portage.versions import catsplit
+
+def digestgen(myarchives=None, mysettings=None, myportdb=None):
+ """
+ Generates a digest file if missing. Fetches files if necessary.
+ NOTE: myarchives and mysettings used to be positional arguments,
+ so their order must be preserved for backward compatibility.
+ @param mysettings: the ebuild config (mysettings["O"] must correspond
+ to the ebuild's parent directory)
+ @type mysettings: config
+ @param myportdb: a portdbapi instance
+ @type myportdb: portdbapi
+ @rtype: int
+ @return: 1 on success and 0 on failure
+ """
+ if mysettings is None or myportdb is None:
+ raise TypeError("portage.digestgen(): 'mysettings' and 'myportdb' parameter are required.")
+
+ try:
+ portage._doebuild_manifest_exempt_depend += 1
+ distfiles_map = {}
+ fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
+ for cpv in fetchlist_dict:
+ try:
+ for myfile in fetchlist_dict[cpv]:
+ distfiles_map.setdefault(myfile, []).append(cpv)
+ except InvalidDependString as e:
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ del e
+ return 0
+ mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
+ try:
+ mf = mysettings.repositories.get_repo_for_location(mytree)
+ except KeyError:
+ # backward compatibility
+ mytree = os.path.realpath(mytree)
+ mf = mysettings.repositories.get_repo_for_location(mytree)
+
+ mf = mf.load_manifest(mysettings["O"], mysettings["DISTDIR"],
+ fetchlist_dict=fetchlist_dict)
+
+ if not mf.allow_create:
+ writemsg_stdout(_(">>> Skipping creating Manifest for %s; "
+ "repository is configured to not use them\n") % mysettings["O"])
+ return 1
+
+ # Don't require all hashes since that can trigger excessive
+ # fetches when sufficient digests already exist. To ease transition
+ # while Manifest 1 is being removed, only require hashes that will
+ # exist before and after the transition.
+ required_hash_types = set()
+ required_hash_types.add("size")
+ required_hash_types.add(MANIFEST2_REQUIRED_HASH)
+ dist_hashes = mf.fhashdict.get("DIST", {})
+
+ # To avoid accidental regeneration of digests with the incorrect
+ # files (such as partially downloaded files), trigger the fetch
+ # code if the file exists and it's size doesn't match the current
+ # manifest entry. If there really is a legitimate reason for the
+ # digest to change, `ebuild --force digest` can be used to avoid
+ # triggering this code (or else the old digests can be manually
+ # removed from the Manifest).
+ missing_files = []
+ for myfile in distfiles_map:
+ myhashes = dist_hashes.get(myfile)
+ if not myhashes:
+ try:
+ st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
+ except OSError:
+ st = None
+ if st is None or st.st_size == 0:
+ missing_files.append(myfile)
+ continue
+ size = myhashes.get("size")
+
+ try:
+ st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ if size == 0:
+ missing_files.append(myfile)
+ continue
+ if required_hash_types.difference(myhashes):
+ missing_files.append(myfile)
+ continue
+ else:
+ if st.st_size == 0 or size is not None and size != st.st_size:
+ missing_files.append(myfile)
+ continue
+
+ for myfile in missing_files:
+ uris = set()
+ all_restrict = set()
+ for cpv in distfiles_map[myfile]:
+ uris.update(myportdb.getFetchMap(
+ cpv, mytree=mytree)[myfile])
+ restrict = myportdb.aux_get(cpv, ['RESTRICT'], mytree=mytree)[0]
+ # Here we ignore conditional parts of RESTRICT since
+ # they don't apply unconditionally. Assume such
+ # conditionals only apply on the client side where
+ # digestgen() does not need to be called.
+ all_restrict.update(use_reduce(restrict,
+ flat=True, matchnone=True))
+
+ # fetch() uses CATEGORY and PF to display a message
+ # when fetch restriction is triggered.
+ cat, pf = catsplit(cpv)
+ mysettings["CATEGORY"] = cat
+ mysettings["PF"] = pf
+
+ # fetch() uses PORTAGE_RESTRICT to control fetch
+ # restriction, which is only applied to files that
+ # are not fetchable via a mirror:// URI.
+ mysettings["PORTAGE_RESTRICT"] = " ".join(all_restrict)
+
+ try:
+ st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
+ except OSError:
+ st = None
+
+ if not fetch({myfile : uris}, mysettings):
+ myebuild = os.path.join(mysettings["O"],
+ catsplit(cpv)[1] + ".ebuild")
+ spawn_nofetch(myportdb, myebuild)
+ writemsg(_("!!! Fetch failed for %s, can't update Manifest\n")
+ % myfile, noiselevel=-1)
+ if myfile in dist_hashes and \
+ st is not None and st.st_size > 0:
+ # stat result is obtained before calling fetch(),
+ # since fetch may rename the existing file if the
+ # digest does not match.
+ cmd = colorize("INFORM", "ebuild --force %s manifest" %
+ os.path.basename(myebuild))
+ writemsg((_(
+ "!!! If you would like to forcefully replace the existing Manifest entry\n"
+ "!!! for %s, use the following command:\n") % myfile) +
+ "!!! %s\n" % cmd,
+ noiselevel=-1)
+ return 0
+
+ writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
+ try:
+ mf.create(assumeDistHashesSometimes=True,
+ assumeDistHashesAlways=(
+ "assume-digests" in mysettings.features))
+ except FileNotFound as e:
+ writemsg(_("!!! File %s doesn't exist, can't update Manifest\n")
+ % e, noiselevel=-1)
+ return 0
+ except PortagePackageException as e:
+ writemsg(("!!! %s\n") % (e,), noiselevel=-1)
+ return 0
+ try:
+ mf.write(sign=False)
+ except PermissionDenied as e:
+ writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
+ return 0
+ if "assume-digests" not in mysettings.features:
+ distlist = list(mf.fhashdict.get("DIST", {}))
+ distlist.sort()
+ auto_assumed = []
+ for filename in distlist:
+ if not os.path.exists(
+ os.path.join(mysettings["DISTDIR"], filename)):
+ auto_assumed.append(filename)
+ if auto_assumed:
+ cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
+ pkgs = myportdb.cp_list(cp, mytree=mytree)
+ pkgs.sort()
+ writemsg_stdout(" digest.assumed" + colorize("WARN",
+ str(len(auto_assumed)).rjust(18)) + "\n")
+ for pkg_key in pkgs:
+ fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
+ pv = pkg_key.split("/")[1]
+ for filename in auto_assumed:
+ if filename in fetchlist:
+ writemsg_stdout(
+ " %s::%s\n" % (pv, filename))
+ return 1
+ finally:
+ portage._doebuild_manifest_exempt_depend -= 1
diff --git a/usr/lib/portage/pym/portage/package/ebuild/doebuild.py b/usr/lib/portage/pym/portage/package/ebuild/doebuild.py
new file mode 100644
index 0000000..8e55fe2
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/doebuild.py
@@ -0,0 +1,2425 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ['doebuild', 'doebuild_environment', 'spawn', 'spawnebuild']
+
+import grp
+import gzip
+import errno
+import io
+from itertools import chain
+import logging
+import os as _os
+import platform
+import pwd
+import re
+import signal
+import stat
+import sys
+import tempfile
+from textwrap import wrap
+import time
+import warnings
+import zlib
+import platform
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild.config:check_config_instance',
+ 'portage.package.ebuild.digestcheck:digestcheck',
+ 'portage.package.ebuild.digestgen:digestgen',
+ 'portage.package.ebuild.fetch:fetch',
+ 'portage.package.ebuild._ipc.QueryCommand:QueryCommand',
+ 'portage.dep._slot_operator:evaluate_slot_operator_equal_deps',
+ 'portage.package.ebuild._spawn_nofetch:spawn_nofetch',
+ 'portage.util._desktop_entry:validate_desktop_entry',
+ 'portage.util._async.SchedulerInterface:SchedulerInterface',
+ 'portage.util._eventloop.EventLoop:EventLoop',
+ 'portage.util._eventloop.global_event_loop:global_event_loop',
+ 'portage.util.ExtractKernelVersion:ExtractKernelVersion'
+)
+
+from portage import auxdbkeys, bsd_chflags, \
+ eapi_is_supported, merge, os, selinux, shutil, \
+ unmerge, _encodings, _os_merge, \
+ _shell_quote, _unicode_decode, _unicode_encode
+from portage.const import EBUILD_SH_ENV_FILE, EBUILD_SH_ENV_DIR, \
+ EBUILD_SH_BINARY, INVALID_ENV_FILE, MISC_SH_BINARY, PORTAGE_PYM_PACKAGES, EPREFIX, MACOSSANDBOX_PROFILE
+from portage.data import portage_gid, portage_uid, secpass, \
+ uid, userpriv_groups
+from portage.dbapi.porttree import _parse_uri_map
+from portage.dep import Atom, check_required_use, \
+ human_readable_required_use, paren_enclose, use_reduce
+from portage.eapi import eapi_exports_KV, eapi_exports_merge_type, \
+ eapi_exports_replace_vars, eapi_exports_REPOSITORY, \
+ eapi_has_required_use, eapi_has_src_prepare_and_src_configure, \
+ eapi_has_pkg_pretend, _get_eapi_attrs
+from portage.elog import elog_process, _preload_elog_modules
+from portage.elog.messages import eerror, eqawarn
+from portage.exception import DigestException, FileNotFound, \
+ IncorrectParameter, InvalidDependString, PermissionDenied, \
+ UnsupportedAPIException
+from portage.localization import _
+from portage.output import colormap
+from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
+from portage.util import apply_recursive_permissions, \
+ apply_secpass_permissions, noiselimit, normalize_path, \
+ writemsg, writemsg_stdout, write_atomic
+from portage.util.lafilefixer import rewrite_lafile
+from portage.versions import _pkgsplit
+from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.EbuildSpawnProcess import EbuildSpawnProcess
+from _emerge.Package import Package
+from _emerge.RootConfig import RootConfig
+
+_unsandboxed_phases = frozenset([
+ "clean", "cleanrm", "config",
+ "help", "info", "postinst",
+ "preinst", "pretend", "postrm",
+ "prerm", "setup"
+])
+
+# phases in which IPC with host is allowed
+_ipc_phases = frozenset([
+ "setup", "pretend",
+ "preinst", "postinst", "prerm", "postrm",
+])
+
+# phases in which networking access is allowed
+_networked_phases = frozenset([
+ # for VCS fetching
+ "unpack",
+ # + for network-bound IPC
+] + list(_ipc_phases))
+
+_phase_func_map = {
+ "config": "pkg_config",
+ "setup": "pkg_setup",
+ "nofetch": "pkg_nofetch",
+ "unpack": "src_unpack",
+ "prepare": "src_prepare",
+ "configure": "src_configure",
+ "compile": "src_compile",
+ "test": "src_test",
+ "install": "src_install",
+ "preinst": "pkg_preinst",
+ "postinst": "pkg_postinst",
+ "prerm": "pkg_prerm",
+ "postrm": "pkg_postrm",
+ "info": "pkg_info",
+ "pretend": "pkg_pretend",
+}
+
+_vdb_use_conditional_keys = Package._dep_keys + \
+ ('LICENSE', 'PROPERTIES', 'PROVIDE', 'RESTRICT',)
+
+def _doebuild_spawn(phase, settings, actionmap=None, **kwargs):
+ """
+ All proper ebuild phases which execute ebuild.sh are spawned
+ via this function. No exceptions.
+ """
+
+ if phase in _unsandboxed_phases:
+ kwargs['free'] = True
+
+ kwargs['ipc'] = 'ipc-sandbox' not in settings.features or \
+ phase in _ipc_phases
+ kwargs['networked'] = 'network-sandbox' not in settings.features or \
+ phase in _networked_phases
+
+ if phase == 'depend':
+ kwargs['droppriv'] = 'userpriv' in settings.features
+ # It's not necessary to close_fds for this phase, since
+ # it should not spawn any daemons, and close_fds is
+ # best avoided since it can interact badly with some
+ # garbage collectors (see _setup_pipes docstring).
+ kwargs['close_fds'] = False
+
+ if actionmap is not None and phase in actionmap:
+ kwargs.update(actionmap[phase]["args"])
+ cmd = actionmap[phase]["cmd"] % phase
+ else:
+ if phase == 'cleanrm':
+ ebuild_sh_arg = 'clean'
+ else:
+ ebuild_sh_arg = phase
+
+ cmd = "%s %s" % (_shell_quote(
+ os.path.join(settings["PORTAGE_BIN_PATH"],
+ os.path.basename(EBUILD_SH_BINARY))),
+ ebuild_sh_arg)
+
+ settings['EBUILD_PHASE'] = phase
+ try:
+ return spawn(cmd, settings, **portage._native_kwargs(kwargs))
+ finally:
+ settings.pop('EBUILD_PHASE', None)
+
+def _spawn_phase(phase, settings, actionmap=None, returnpid=False,
+ logfile=None, **kwargs):
+
+ if returnpid:
+ return _doebuild_spawn(phase, settings, actionmap=actionmap,
+ returnpid=returnpid, logfile=logfile, **kwargs)
+
+ # The logfile argument is unused here, since EbuildPhase uses
+ # the PORTAGE_LOG_FILE variable if set.
+ ebuild_phase = EbuildPhase(actionmap=actionmap, background=False,
+ phase=phase, scheduler=SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ settings=settings, **kwargs)
+
+ ebuild_phase.start()
+ ebuild_phase.wait()
+ return ebuild_phase.returncode
+
+def _doebuild_path(settings, eapi=None):
+ """
+ Generate the PATH variable.
+ """
+
+ # Note: PORTAGE_BIN_PATH may differ from the global constant
+ # when portage is reinstalling itself.
+ portage_bin_path = settings["PORTAGE_BIN_PATH"]
+ eprefix = portage.const.EPREFIX
+ prerootpath = [x for x in settings.get("PREROOTPATH", "").split(":") if x]
+ rootpath = [x for x in settings.get("ROOTPATH", "").split(":") if x]
+ overrides = [x for x in settings.get(
+ "__PORTAGE_TEST_PATH_OVERRIDE", "").split(":") if x]
+
+ prefixes = []
+ if eprefix:
+ prefixes.append(eprefix)
+ prefixes.append("/")
+
+ path = overrides
+ # PREFIX LOCAL: use DEFAULT_PATH and EXTRA_PATH from make.globals
+ defaultpath = [x for x in settings.get("DEFAULT_PATH", "").split(":") if x]
+ extrapath = [x for x in settings.get("EXTRA_PATH", "").split(":") if x]
+
+ if "xattr" in settings.features:
+ path.append(os.path.join(portage_bin_path, "ebuild-helpers", "xattr"))
+
+ if eprefix and uid != 0 and "fakeroot" not in settings.features:
+ path.append(os.path.join(portage_bin_path,
+ "ebuild-helpers", "unprivileged"))
+
+ if settings.get("USERLAND", "GNU") != "GNU":
+ path.append(os.path.join(portage_bin_path, "ebuild-helpers", "bsd"))
+
+ path.append(os.path.join(portage_bin_path, "ebuild-helpers"))
+ path.extend(prerootpath)
+ path.extend(defaultpath)
+ path.extend(rootpath)
+ path.extend(extrapath)
+ # END PREFIX LOCAL
+
+ settings["PATH"] = ":".join(path)
+
+def doebuild_environment(myebuild, mydo, myroot=None, settings=None,
+ debug=False, use_cache=None, db=None):
+ """
+ Create and store environment variable in the config instance
+ that's passed in as the "settings" parameter. This will raise
+ UnsupportedAPIException if the given ebuild has an unsupported
+ EAPI. All EAPI dependent code comes last, so that essential
+ variables like PORTAGE_BUILDDIR are still initialized even in
+ cases when UnsupportedAPIException needs to be raised, which
+ can be useful when uninstalling a package that has corrupt
+ EAPI metadata.
+ The myroot and use_cache parameters are unused.
+ """
+ myroot = None
+ use_cache = None
+
+ if settings is None:
+ raise TypeError("settings argument is required")
+
+ if db is None:
+ raise TypeError("db argument is required")
+
+ mysettings = settings
+ mydbapi = db
+ ebuild_path = os.path.abspath(myebuild)
+ pkg_dir = os.path.dirname(ebuild_path)
+ mytree = os.path.dirname(os.path.dirname(pkg_dir))
+ mypv = os.path.basename(ebuild_path)[:-7]
+ mysplit = _pkgsplit(mypv, eapi=mysettings.configdict["pkg"].get("EAPI"))
+ if mysplit is None:
+ raise IncorrectParameter(
+ _("Invalid ebuild path: '%s'") % myebuild)
+
+ if mysettings.mycpv is not None and \
+ mysettings.configdict["pkg"].get("PF") == mypv and \
+ "CATEGORY" in mysettings.configdict["pkg"]:
+ # Assume that PF is enough to assume that we've got
+ # the correct CATEGORY, though this is not really
+ # a solid assumption since it's possible (though
+ # unlikely) that two packages in different
+ # categories have the same PF. Callers should call
+ # setcpv or create a clean clone of a locked config
+ # instance in order to ensure that this assumption
+ # does not fail like in bug #408817.
+ cat = mysettings.configdict["pkg"]["CATEGORY"]
+ mycpv = mysettings.mycpv
+ elif os.path.basename(pkg_dir) in (mysplit[0], mypv):
+ # portdbapi or vardbapi
+ cat = os.path.basename(os.path.dirname(pkg_dir))
+ mycpv = cat + "/" + mypv
+ else:
+ raise AssertionError("unable to determine CATEGORY")
+
+ # Make a backup of PORTAGE_TMPDIR prior to calling config.reset()
+ # so that the caller can override it.
+ tmpdir = mysettings["PORTAGE_TMPDIR"]
+
+ if mydo == 'depend':
+ if mycpv != mysettings.mycpv:
+ # Don't pass in mydbapi here since the resulting aux_get
+ # call would lead to infinite 'depend' phase recursion.
+ mysettings.setcpv(mycpv)
+ else:
+ # If EAPI isn't in configdict["pkg"], it means that setcpv()
+ # hasn't been called with the mydb argument, so we have to
+ # call it here (portage code always calls setcpv properly,
+ # but api consumers might not).
+ if mycpv != mysettings.mycpv or \
+ "EAPI" not in mysettings.configdict["pkg"]:
+ # Reload env.d variables and reset any previous settings.
+ mysettings.reload()
+ mysettings.reset()
+ mysettings.setcpv(mycpv, mydb=mydbapi)
+
+ # config.reset() might have reverted a change made by the caller,
+ # so restore it to its original value. Sandbox needs canonical
+ # paths, so realpath it.
+ mysettings["PORTAGE_TMPDIR"] = os.path.realpath(tmpdir)
+
+ mysettings.pop("EBUILD_PHASE", None) # remove from backupenv
+ mysettings["EBUILD_PHASE"] = mydo
+
+ # Set requested Python interpreter for Portage helpers.
+ mysettings['PORTAGE_PYTHON'] = portage._python_interpreter
+
+ # This is used by assert_sigpipe_ok() that's used by the ebuild
+ # unpack() helper. SIGPIPE is typically 13, but its better not
+ # to assume that.
+ mysettings['PORTAGE_SIGPIPE_STATUS'] = str(128 + signal.SIGPIPE)
+
+ # We are disabling user-specific bashrc files.
+ mysettings["BASH_ENV"] = INVALID_ENV_FILE
+
+ if debug: # Otherwise it overrides emerge's settings.
+ # We have no other way to set debug... debug can't be passed in
+ # due to how it's coded... Don't overwrite this so we can use it.
+ mysettings["PORTAGE_DEBUG"] = "1"
+
+ mysettings["EBUILD"] = ebuild_path
+ mysettings["O"] = pkg_dir
+ mysettings.configdict["pkg"]["CATEGORY"] = cat
+ mysettings["FILESDIR"] = pkg_dir+"/files"
+ mysettings["PF"] = mypv
+
+ if hasattr(mydbapi, 'repositories'):
+ repo = mydbapi.repositories.get_repo_for_location(mytree)
+ mysettings['PORTDIR'] = repo.eclass_db.porttrees[0]
+ mysettings['PORTAGE_ECLASS_LOCATIONS'] = repo.eclass_db.eclass_locations_string
+ mysettings.configdict["pkg"]["PORTAGE_REPO_NAME"] = repo.name
+
+ mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
+ mysettings.pop("PORTDIR_OVERLAY", None)
+ mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
+ mysettings["RPMDIR"] = os.path.realpath(mysettings["RPMDIR"])
+
+ mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
+ mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
+
+ mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)
+ mysettings["P"] = mysplit[0]+"-"+mysplit[1]
+ mysettings["PN"] = mysplit[0]
+ mysettings["PV"] = mysplit[1]
+ mysettings["PR"] = mysplit[2]
+
+ if noiselimit < 0:
+ mysettings["PORTAGE_QUIET"] = "1"
+
+ if mysplit[2] == "r0":
+ mysettings["PVR"]=mysplit[1]
+ else:
+ mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
+
+ # All temporary directories should be subdirectories of
+ # $PORTAGE_TMPDIR/portage, since it's common for /tmp and /var/tmp
+ # to be mounted with the "noexec" option (see bug #346899).
+ mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
+ mysettings["PKG_TMPDIR"] = mysettings["BUILD_PREFIX"]+"/._unmerge_"
+
+ # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
+ # locations in order to prevent interference.
+ if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
+ mysettings["PORTAGE_BUILDDIR"] = os.path.join(
+ mysettings["PKG_TMPDIR"],
+ mysettings["CATEGORY"], mysettings["PF"])
+ else:
+ mysettings["PORTAGE_BUILDDIR"] = os.path.join(
+ mysettings["BUILD_PREFIX"],
+ mysettings["CATEGORY"], mysettings["PF"])
+
+ mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
+ mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
+ mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
+ mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
+
+ # Prefix forward compatability
+ eprefix_lstrip = mysettings["EPREFIX"].lstrip(os.sep)
+ mysettings["ED"] = os.path.join(
+ mysettings["D"], eprefix_lstrip).rstrip(os.sep) + os.sep
+
+ mysettings["PORTAGE_BASHRC"] = os.path.join(
+ mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE)
+ mysettings["PM_EBUILD_HOOK_DIR"] = os.path.join(
+ mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_DIR)
+
+ # Allow color.map to control colors associated with einfo, ewarn, etc...
+ mysettings["PORTAGE_COLORMAP"] = colormap()
+
+ if "COLUMNS" not in mysettings:
+ # Set COLUMNS, in order to prevent unnecessary stty calls
+ # inside the set_colors function of isolated-functions.sh.
+ # We cache the result in os.environ, in order to avoid
+ # multiple stty calls in cases when get_term_size() falls
+ # back to stty due to a missing or broken curses module.
+ columns = os.environ.get("COLUMNS")
+ if columns is None:
+ rows, columns = portage.output.get_term_size()
+ if columns < 1:
+ # Force a sane value for COLUMNS, so that tools
+ # like ls don't complain (see bug #394091).
+ columns = 80
+ columns = str(columns)
+ os.environ["COLUMNS"] = columns
+ mysettings["COLUMNS"] = columns
+
+ # EAPI is always known here, even for the "depend" phase, because
+ # EbuildMetadataPhase gets it from _parse_eapi_ebuild_head().
+ eapi = mysettings.configdict['pkg']['EAPI']
+ _doebuild_path(mysettings, eapi=eapi)
+
+ # All EAPI dependent code comes last, so that essential variables like
+ # PATH and PORTAGE_BUILDDIR are still initialized even in cases when
+ # UnsupportedAPIException needs to be raised, which can be useful
+ # when uninstalling a package that has corrupt EAPI metadata.
+ if not eapi_is_supported(eapi):
+ raise UnsupportedAPIException(mycpv, eapi)
+
+ if eapi_exports_REPOSITORY(eapi) and "PORTAGE_REPO_NAME" in mysettings.configdict["pkg"]:
+ mysettings.configdict["pkg"]["REPOSITORY"] = mysettings.configdict["pkg"]["PORTAGE_REPO_NAME"]
+
+ if mydo != "depend":
+ if hasattr(mydbapi, "getFetchMap") and \
+ ("A" not in mysettings.configdict["pkg"] or \
+ "AA" not in mysettings.configdict["pkg"]):
+ src_uri, = mydbapi.aux_get(mysettings.mycpv,
+ ["SRC_URI"], mytree=mytree)
+ metadata = {
+ "EAPI" : eapi,
+ "SRC_URI" : src_uri,
+ }
+ use = frozenset(mysettings["PORTAGE_USE"].split())
+ try:
+ uri_map = _parse_uri_map(mysettings.mycpv, metadata, use=use)
+ except InvalidDependString:
+ mysettings.configdict["pkg"]["A"] = ""
+ else:
+ mysettings.configdict["pkg"]["A"] = " ".join(uri_map)
+
+ try:
+ uri_map = _parse_uri_map(mysettings.mycpv, metadata)
+ except InvalidDependString:
+ mysettings.configdict["pkg"]["AA"] = ""
+ else:
+ mysettings.configdict["pkg"]["AA"] = " ".join(uri_map)
+
+ ccache = "ccache" in mysettings.features
+ distcc = "distcc" in mysettings.features
+ if ccache or distcc:
+ # Use default ABI libdir in accordance with bug #355283.
+ libdir = None
+ default_abi = mysettings.get("DEFAULT_ABI")
+ if default_abi:
+ libdir = mysettings.get("LIBDIR_" + default_abi)
+ if not libdir:
+ libdir = "lib"
+
+ if distcc:
+ mysettings["PATH"] = os.path.join(os.sep, eprefix_lstrip,
+ "usr", libdir, "distcc", "bin") + ":" + mysettings["PATH"]
+
+ if ccache:
+ mysettings["PATH"] = os.path.join(os.sep, eprefix_lstrip,
+ "usr", libdir, "ccache", "bin") + ":" + mysettings["PATH"]
+
+ if not eapi_exports_KV(eapi):
+ # Discard KV for EAPIs that don't support it. Cached KV is restored
+ # from the backupenv whenever config.reset() is called.
+ mysettings.pop('KV', None)
+ elif 'KV' not in mysettings and \
+ mydo in ('compile', 'config', 'configure', 'info',
+ 'install', 'nofetch', 'postinst', 'postrm', 'preinst',
+ 'prepare', 'prerm', 'setup', 'test', 'unpack'):
+ mykv, err1 = ExtractKernelVersion(
+ os.path.join(mysettings['EROOT'], "usr/src/linux"))
+ if mykv:
+ # Regular source tree
+ mysettings["KV"] = mykv
+ else:
+ mysettings["KV"] = ""
+ mysettings.backup_changes("KV")
+
+_doebuild_manifest_cache = None
+_doebuild_broken_ebuilds = set()
+_doebuild_broken_manifests = set()
+_doebuild_commands_without_builddir = (
+ 'clean', 'cleanrm', 'depend', 'digest',
+ 'fetch', 'fetchall', 'help', 'manifest'
+)
+
+def doebuild(myebuild, mydo, _unused=DeprecationWarning, settings=None, debug=0, listonly=0,
+ fetchonly=0, cleanup=0, dbkey=DeprecationWarning, use_cache=1, fetchall=0, tree=None,
+ mydbapi=None, vartree=None, prev_mtimes=None,
+ fd_pipes=None, returnpid=False):
+ """
+ Wrapper function that invokes specific ebuild phases through the spawning
+ of ebuild.sh
+
+ @param myebuild: name of the ebuild to invoke the phase on (CPV)
+ @type myebuild: String
+ @param mydo: Phase to run
+ @type mydo: String
+ @param _unused: Deprecated (use settings["ROOT"] instead)
+ @type _unused: String
+ @param settings: Portage Configuration
+ @type settings: instance of portage.config
+ @param debug: Turns on various debug information (eg, debug for spawn)
+ @type debug: Boolean
+ @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
+ @type listonly: Boolean
+ @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
+ @type fetchonly: Boolean
+ @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
+ @type cleanup: Boolean
+ @param dbkey: A file path where metadata generated by the 'depend' phase
+ will be written.
+ @type dbkey: String
+ @param use_cache: Enables the cache
+ @type use_cache: Boolean
+ @param fetchall: Used to wrap fetch(), fetches all URIs (even ones invalid due to USE conditionals)
+ @type fetchall: Boolean
+ @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
+ @type tree: String
+ @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
+ @type mydbapi: portdbapi instance
+ @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
+ @type vartree: vartree instance
+ @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
+ @type prev_mtimes: dictionary
+ @param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout }
+ for example.
+ @type fd_pipes: Dictionary
+ @param returnpid: Return a list of process IDs for a successful spawn, or
+ an integer value if spawn is unsuccessful. NOTE: This requires the
+ caller clean up all returned PIDs.
+ @type returnpid: Boolean
+ @rtype: Boolean
+ @return:
+ 1. 0 for success
+ 2. 1 for error
+
+ Most errors have an accompanying error message.
+
+ listonly and fetchonly are only really necessary for operations involving 'fetch'
+ prev_mtimes are only necessary for merge operations.
+ Other variables may not be strictly required, many have defaults that are set inside of doebuild.
+
+ """
+
+ if settings is None:
+ raise TypeError("settings parameter is required")
+ mysettings = settings
+ myroot = settings['EROOT']
+
+ if _unused is not DeprecationWarning:
+ warnings.warn("The third parameter of the "
+ "portage.doebuild() is deprecated. Instead "
+ "settings['EROOT'] is used.",
+ DeprecationWarning, stacklevel=2)
+
+ if dbkey is not DeprecationWarning:
+ warnings.warn("portage.doebuild() called "
+ "with deprecated dbkey argument.",
+ DeprecationWarning, stacklevel=2)
+
+ if not tree:
+ writemsg("Warning: tree not specified to doebuild\n")
+ tree = "porttree"
+
+ # chunked out deps for each phase, so that ebuild binary can use it
+ # to collapse targets down.
+ actionmap_deps={
+ "pretend" : [],
+ "setup": ["pretend"],
+ "unpack": ["setup"],
+ "prepare": ["unpack"],
+ "configure": ["prepare"],
+ "compile":["configure"],
+ "test": ["compile"],
+ "install":["test"],
+ "rpm": ["install"],
+ "package":["install"],
+ "merge" :["install"],
+ }
+
+ if mydbapi is None:
+ mydbapi = portage.db[myroot][tree].dbapi
+
+ if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
+ vartree = portage.db[myroot]["vartree"]
+
+ features = mysettings.features
+
+ clean_phases = ("clean", "cleanrm")
+ validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
+ "config", "info", "setup", "depend", "pretend",
+ "fetch", "fetchall", "digest",
+ "unpack", "prepare", "configure", "compile", "test",
+ "install", "rpm", "qmerge", "merge",
+ "package", "unmerge", "manifest", "nofetch"]
+
+ if mydo not in validcommands:
+ validcommands.sort()
+ writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
+ noiselevel=-1)
+ for vcount in range(len(validcommands)):
+ if vcount%6 == 0:
+ writemsg("\n!!! ", noiselevel=-1)
+ writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+ return 1
+
+ if returnpid and mydo != 'depend':
+ # This case is not supported, since it bypasses the EbuildPhase class
+ # which implements important functionality (including post phase hooks
+ # and IPC for things like best/has_version and die).
+ warnings.warn("portage.doebuild() called "
+ "with returnpid parameter enabled. This usage will "
+ "not be supported in the future.",
+ DeprecationWarning, stacklevel=2)
+
+ if mydo == "fetchall":
+ fetchall = 1
+ mydo = "fetch"
+
+ if mydo not in clean_phases and not os.path.exists(myebuild):
+ writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
+ noiselevel=-1)
+ return 1
+
+ global _doebuild_manifest_cache
+ pkgdir = os.path.dirname(myebuild)
+ manifest_path = os.path.join(pkgdir, "Manifest")
+ if tree == "porttree":
+ repo_config = mysettings.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir)))
+ else:
+ repo_config = None
+
+ mf = None
+ if "strict" in features and \
+ "digest" not in features and \
+ tree == "porttree" and \
+ not repo_config.thin_manifest and \
+ mydo not in ("digest", "manifest", "help") and \
+ not portage._doebuild_manifest_exempt_depend and \
+ not (repo_config.allow_missing_manifest and not os.path.exists(manifest_path)):
+ # Always verify the ebuild checksums before executing it.
+ global _doebuild_broken_ebuilds
+
+ if myebuild in _doebuild_broken_ebuilds:
+ return 1
+
+ # Avoid checking the same Manifest several times in a row during a
+ # regen with an empty cache.
+ if _doebuild_manifest_cache is None or \
+ _doebuild_manifest_cache.getFullname() != manifest_path:
+ _doebuild_manifest_cache = None
+ if not os.path.exists(manifest_path):
+ out = portage.output.EOutput()
+ out.eerror(_("Manifest not found for '%s'") % (myebuild,))
+ _doebuild_broken_ebuilds.add(myebuild)
+ return 1
+ mf = repo_config.load_manifest(pkgdir, mysettings["DISTDIR"])
+
+ else:
+ mf = _doebuild_manifest_cache
+
+ try:
+ mf.checkFileHashes("EBUILD", os.path.basename(myebuild))
+ except KeyError:
+ if not (mf.allow_missing and
+ os.path.basename(myebuild) not in mf.fhashdict["EBUILD"]):
+ out = portage.output.EOutput()
+ out.eerror(_("Missing digest for '%s'") % (myebuild,))
+ _doebuild_broken_ebuilds.add(myebuild)
+ return 1
+ except FileNotFound:
+ out = portage.output.EOutput()
+ out.eerror(_("A file listed in the Manifest "
+ "could not be found: '%s'") % (myebuild,))
+ _doebuild_broken_ebuilds.add(myebuild)
+ return 1
+ except DigestException as e:
+ out = portage.output.EOutput()
+ out.eerror(_("Digest verification failed:"))
+ out.eerror("%s" % e.value[0])
+ out.eerror(_("Reason: %s") % e.value[1])
+ out.eerror(_("Got: %s") % e.value[2])
+ out.eerror(_("Expected: %s") % e.value[3])
+ _doebuild_broken_ebuilds.add(myebuild)
+ return 1
+
+ if mf.getFullname() in _doebuild_broken_manifests:
+ return 1
+
+ if mf is not _doebuild_manifest_cache and not mf.allow_missing:
+
+ # Make sure that all of the ebuilds are
+ # actually listed in the Manifest.
+ for f in os.listdir(pkgdir):
+ pf = None
+ if f[-7:] == '.ebuild':
+ pf = f[:-7]
+ if pf is not None and not mf.hasFile("EBUILD", f):
+ f = os.path.join(pkgdir, f)
+ if f not in _doebuild_broken_ebuilds:
+ out = portage.output.EOutput()
+ out.eerror(_("A file is not listed in the "
+ "Manifest: '%s'") % (f,))
+ _doebuild_broken_manifests.add(manifest_path)
+ return 1
+
+ # We cache it only after all above checks succeed.
+ _doebuild_manifest_cache = mf
+
+ logfile=None
+ builddir_lock = None
+ tmpdir = None
+ tmpdir_orig = None
+
+ try:
+ if mydo in ("digest", "manifest", "help"):
+ # Temporarily exempt the depend phase from manifest checks, in case
+ # aux_get calls trigger cache generation.
+ portage._doebuild_manifest_exempt_depend += 1
+
+ # If we don't need much space and we don't need a constant location,
+ # we can temporarily override PORTAGE_TMPDIR with a random temp dir
+ # so that there's no need for locking and it can be used even if the
+ # user isn't in the portage group.
+ if not returnpid and mydo in ("info",):
+ tmpdir = tempfile.mkdtemp()
+ tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
+ mysettings["PORTAGE_TMPDIR"] = tmpdir
+
+ doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
+ use_cache, mydbapi)
+
+ if mydo in clean_phases:
+ builddir_lock = None
+ if not returnpid and \
+ 'PORTAGE_BUILDDIR_LOCKED' not in mysettings:
+ builddir_lock = EbuildBuildDir(
+ scheduler=(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ settings=mysettings)
+ builddir_lock.lock()
+ try:
+ return _spawn_phase(mydo, mysettings,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+ finally:
+ if builddir_lock is not None:
+ builddir_lock.unlock()
+
+ # get possible slot information from the deps file
+ if mydo == "depend":
+ writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
+ if returnpid:
+ return _spawn_phase(mydo, mysettings,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+ elif dbkey and dbkey is not DeprecationWarning:
+ mysettings["dbkey"] = dbkey
+ else:
+ mysettings["dbkey"] = \
+ os.path.join(mysettings.depcachedir, "aux_db_key_temp")
+
+ return _spawn_phase(mydo, mysettings,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+
+ elif mydo == "nofetch":
+
+ if returnpid:
+ writemsg("!!! doebuild: %s\n" %
+ _("returnpid is not supported for phase '%s'\n" % mydo),
+ noiselevel=-1)
+
+ return spawn_nofetch(mydbapi, myebuild, settings=mysettings,
+ fd_pipes=fd_pipes)
+
+ if tree == "porttree":
+
+ if not returnpid:
+ # Validate dependency metadata here to ensure that ebuilds with
+ # invalid data are never installed via the ebuild command. Skip
+ # this when returnpid is True (assume the caller handled it).
+ rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
+ if rval != os.EX_OK:
+ return rval
+
+ else:
+ # FEATURES=noauto only makes sense for porttree, and we don't want
+ # it to trigger redundant sourcing of the ebuild for API consumers
+ # that are using binary packages
+ if "noauto" in mysettings.features:
+ mysettings.features.discard("noauto")
+
+ # If we are not using a private temp dir, then check access
+ # to the global temp dir.
+ if tmpdir is None and \
+ mydo not in _doebuild_commands_without_builddir:
+ rval = _check_temp_dir(mysettings)
+ if rval != os.EX_OK:
+ return rval
+
+ if mydo == "unmerge":
+ if returnpid:
+ writemsg("!!! doebuild: %s\n" %
+ _("returnpid is not supported for phase '%s'\n" % mydo),
+ noiselevel=-1)
+ return unmerge(mysettings["CATEGORY"],
+ mysettings["PF"], myroot, mysettings, vartree=vartree)
+
+ phases_to_run = set()
+ if returnpid or \
+ "noauto" in mysettings.features or \
+ mydo not in actionmap_deps:
+ phases_to_run.add(mydo)
+ else:
+ phase_stack = [mydo]
+ while phase_stack:
+ x = phase_stack.pop()
+ if x in phases_to_run:
+ continue
+ phases_to_run.add(x)
+ phase_stack.extend(actionmap_deps.get(x, []))
+ del phase_stack
+
+ alist = set(mysettings.configdict["pkg"].get("A", "").split())
+
+ unpacked = False
+ if tree != "porttree":
+ pass
+ elif "unpack" not in phases_to_run:
+ unpacked = os.path.exists(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"], ".unpacked"))
+ else:
+ try:
+ workdir_st = os.stat(mysettings["WORKDIR"])
+ except OSError:
+ pass
+ else:
+ newstuff = False
+ if not os.path.exists(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"], ".unpacked")):
+ writemsg_stdout(_(
+ ">>> Not marked as unpacked; recreating WORKDIR...\n"))
+ newstuff = True
+ else:
+ for x in alist:
+ writemsg_stdout(">>> Checking %s's mtime...\n" % x)
+ try:
+ x_st = os.stat(os.path.join(
+ mysettings["DISTDIR"], x))
+ except OSError:
+ # file not fetched yet
+ x_st = None
+
+ if x_st is None or x_st.st_mtime > workdir_st.st_mtime:
+ writemsg_stdout(_(">>> Timestamp of "
+ "%s has changed; recreating WORKDIR...\n") % x)
+ newstuff = True
+ break
+
+ if newstuff:
+ if builddir_lock is None and \
+ 'PORTAGE_BUILDDIR_LOCKED' not in mysettings:
+ builddir_lock = EbuildBuildDir(
+ scheduler=(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ settings=mysettings)
+ builddir_lock.lock()
+ try:
+ _spawn_phase("clean", mysettings)
+ finally:
+ if builddir_lock is not None:
+ builddir_lock.unlock()
+ builddir_lock = None
+ else:
+ writemsg_stdout(_(">>> WORKDIR is up-to-date, keeping...\n"))
+ unpacked = True
+
+ # Build directory creation isn't required for any of these.
+ # In the fetch phase, the directory is needed only for RESTRICT=fetch
+ # in order to satisfy the sane $PWD requirement (from bug #239560)
+ # when pkg_nofetch is spawned.
+ have_build_dirs = False
+ if mydo not in ('digest', 'fetch', 'help', 'manifest'):
+ if not returnpid and \
+ 'PORTAGE_BUILDDIR_LOCKED' not in mysettings:
+ builddir_lock = EbuildBuildDir(
+ scheduler=(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ settings=mysettings)
+ builddir_lock.lock()
+ mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
+ if mystatus:
+ return mystatus
+ have_build_dirs = True
+
+ # emerge handles logging externally
+ if not returnpid:
+ # PORTAGE_LOG_FILE is set by the
+ # above prepare_build_dirs() call.
+ logfile = mysettings.get("PORTAGE_LOG_FILE")
+
+ if have_build_dirs:
+ rval = _prepare_env_file(mysettings)
+ if rval != os.EX_OK:
+ return rval
+
+ if eapi_exports_merge_type(mysettings["EAPI"]) and \
+ "MERGE_TYPE" not in mysettings.configdict["pkg"]:
+ if tree == "porttree":
+ mysettings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
+ mysettings.configdict["pkg"]["MERGE_TYPE"] = "source"
+ elif tree == "bintree":
+ mysettings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+ mysettings.configdict["pkg"]["MERGE_TYPE"] = "binary"
+
+ # NOTE: It's not possible to set REPLACED_BY_VERSION for prerm
+ # and postrm here, since we don't necessarily know what
+ # versions are being installed. This could be a problem
+ # for API consumers if they don't use dblink.treewalk()
+ # to execute prerm and postrm.
+ if eapi_exports_replace_vars(mysettings["EAPI"]) and \
+ (mydo in ("postinst", "preinst", "pretend", "setup") or \
+ ("noauto" not in features and not returnpid and \
+ (mydo in actionmap_deps or mydo in ("merge", "package", "qmerge")))):
+ if not vartree:
+ writemsg("Warning: vartree not given to doebuild. " + \
+ "Cannot set REPLACING_VERSIONS in pkg_{pretend,setup}\n")
+ else:
+ vardb = vartree.dbapi
+ cpv = mysettings.mycpv
+ cpv_slot = "%s%s%s" % \
+ (cpv.cp, portage.dep._slot_separator, cpv.slot)
+ mysettings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(match) \
+ for match in vardb.match(cpv_slot) + \
+ vardb.match('='+cpv)))
+
+ # if any of these are being called, handle them -- running them out of
+ # the sandbox -- and stop now.
+ if mydo in ("config", "help", "info", "postinst",
+ "preinst", "pretend", "postrm", "prerm"):
+ if mydo in ("preinst", "postinst"):
+ env_file = os.path.join(os.path.dirname(mysettings["EBUILD"]),
+ "environment.bz2")
+ if os.path.isfile(env_file):
+ mysettings["PORTAGE_UPDATE_ENV"] = env_file
+ try:
+ return _spawn_phase(mydo, mysettings,
+ fd_pipes=fd_pipes, logfile=logfile, returnpid=returnpid)
+ finally:
+ mysettings.pop("PORTAGE_UPDATE_ENV", None)
+
+ mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
+
+ # Only try and fetch the files if we are going to need them ...
+ # otherwise, if user has FEATURES=noauto and they run `ebuild clean
+ # unpack compile install`, we will try and fetch 4 times :/
+ need_distfiles = tree == "porttree" and not unpacked and \
+ (mydo in ("fetch", "unpack") or \
+ mydo not in ("digest", "manifest") and "noauto" not in features)
+ if need_distfiles:
+
+ src_uri, = mydbapi.aux_get(mysettings.mycpv,
+ ["SRC_URI"], mytree=os.path.dirname(os.path.dirname(
+ os.path.dirname(myebuild))))
+ metadata = {
+ "EAPI" : mysettings["EAPI"],
+ "SRC_URI" : src_uri,
+ }
+ use = frozenset(mysettings["PORTAGE_USE"].split())
+ try:
+ alist = _parse_uri_map(mysettings.mycpv, metadata, use=use)
+ aalist = _parse_uri_map(mysettings.mycpv, metadata)
+ except InvalidDependString as e:
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ writemsg(_("!!! Invalid SRC_URI for '%s'.\n") % mycpv,
+ noiselevel=-1)
+ del e
+ return 1
+
+ if "mirror" in features or fetchall:
+ fetchme = aalist
+ else:
+ fetchme = alist
+
+ dist_digests = None
+ if mf is not None:
+ dist_digests = mf.getTypeDigests("DIST")
+ if not fetch(fetchme, mysettings, listonly=listonly,
+ fetchonly=fetchonly, allow_missing_digests=True,
+ digests=dist_digests):
+ spawn_nofetch(mydbapi, myebuild, settings=mysettings,
+ fd_pipes=fd_pipes)
+ if listonly:
+ # The convention for listonly mode is to report
+ # success in any case, even though fetch() may
+ # return unsuccessfully in order to trigger the
+ # nofetch phase.
+ return 0
+ return 1
+
+ if need_distfiles:
+ # Files are already checked inside fetch(),
+ # so do not check them again.
+ checkme = []
+ elif unpacked:
+ # The unpack phase is marked as complete, so it
+ # would be wasteful to check distfiles again.
+ checkme = []
+ else:
+ checkme = alist
+
+ if mydo == "fetch" and listonly:
+ return 0
+
+ try:
+ if mydo == "manifest":
+ mf = None
+ _doebuild_manifest_cache = None
+ return not digestgen(mysettings=mysettings, myportdb=mydbapi)
+ elif mydo == "digest":
+ mf = None
+ _doebuild_manifest_cache = None
+ return not digestgen(mysettings=mysettings, myportdb=mydbapi)
+ elif "digest" in mysettings.features:
+ mf = None
+ _doebuild_manifest_cache = None
+ digestgen(mysettings=mysettings, myportdb=mydbapi)
+ except PermissionDenied as e:
+ writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
+ if mydo in ("digest", "manifest"):
+ return 1
+
+ if mydo == "fetch":
+ # Return after digestgen for FEATURES=digest support.
+ # Return before digestcheck, since fetch() already
+ # checked any relevant digests.
+ return 0
+
+ # See above comment about fetching only when needed
+ if tree == 'porttree' and \
+ not digestcheck(checkme, mysettings, "strict" in features, mf=mf):
+ return 1
+
+ # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
+ if tree == 'porttree' and \
+ ((mydo != "setup" and "noauto" not in features) \
+ or mydo in ("install", "unpack")):
+ _prepare_fake_distdir(mysettings, alist)
+
+ #initial dep checks complete; time to process main commands
+ actionmap = _spawn_actionmap(mysettings)
+
+ # merge the deps in so we have again a 'full' actionmap
+ # be glad when this can die.
+ for x in actionmap:
+ if len(actionmap_deps.get(x, [])):
+ actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
+
+ regular_actionmap_phase = mydo in actionmap
+
+ if regular_actionmap_phase:
+ bintree = None
+ if mydo == "package":
+ # Make sure the package directory exists before executing
+ # this phase. This can raise PermissionDenied if
+ # the current user doesn't have write access to $PKGDIR.
+ if hasattr(portage, 'db'):
+ bintree = portage.db[mysettings['EROOT']]['bintree']
+ mysettings["PORTAGE_BINPKG_TMPFILE"] = \
+ bintree.getname(mysettings.mycpv) + \
+ ".%s" % (os.getpid(),)
+ bintree._ensure_dir(os.path.dirname(
+ mysettings["PORTAGE_BINPKG_TMPFILE"]))
+ else:
+ parent_dir = os.path.join(mysettings["PKGDIR"],
+ mysettings["CATEGORY"])
+ portage.util.ensure_dirs(parent_dir)
+ if not os.access(parent_dir, os.W_OK):
+ raise PermissionDenied(
+ "access('%s', os.W_OK)" % parent_dir)
+ retval = spawnebuild(mydo,
+ actionmap, mysettings, debug, logfile=logfile,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+
+ if returnpid and isinstance(retval, list):
+ return retval
+
+ if retval == os.EX_OK:
+ if mydo == "package" and bintree is not None:
+ bintree.inject(mysettings.mycpv,
+ filename=mysettings["PORTAGE_BINPKG_TMPFILE"])
+ else:
+ if "PORTAGE_BINPKG_TMPFILE" in mysettings:
+ try:
+ os.unlink(mysettings["PORTAGE_BINPKG_TMPFILE"])
+ except OSError:
+ pass
+
+ elif returnpid:
+ writemsg("!!! doebuild: %s\n" %
+ _("returnpid is not supported for phase '%s'\n" % mydo),
+ noiselevel=-1)
+
+ if regular_actionmap_phase:
+ # handled above
+ pass
+ elif mydo == "qmerge":
+ # check to ensure install was run. this *only* pops up when users
+ # forget it and are using ebuild
+ if not os.path.exists(
+ os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
+ writemsg(_("!!! mydo=qmerge, but the install phase has not been run\n"),
+ noiselevel=-1)
+ return 1
+ # qmerge is a special phase that implies noclean.
+ if "noclean" not in mysettings.features:
+ mysettings.features.add("noclean")
+ _handle_self_update(mysettings, vartree.dbapi)
+ #qmerge is specifically not supposed to do a runtime dep check
+ retval = merge(
+ mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
+ os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
+ myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
+ mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes,
+ fd_pipes=fd_pipes)
+ elif mydo=="merge":
+ retval = spawnebuild("install", actionmap, mysettings, debug,
+ alwaysdep=1, logfile=logfile, fd_pipes=fd_pipes,
+ returnpid=returnpid)
+ if retval != os.EX_OK:
+ # The merge phase handles this already. Callers don't know how
+ # far this function got, so we have to call elog_process() here
+ # so that it's only called once.
+ elog_process(mysettings.mycpv, mysettings)
+ if retval == os.EX_OK:
+ _handle_self_update(mysettings, vartree.dbapi)
+ retval = merge(mysettings["CATEGORY"], mysettings["PF"],
+ mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
+ "build-info"), myroot, mysettings,
+ myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
+ vartree=vartree, prev_mtimes=prev_mtimes,
+ fd_pipes=fd_pipes)
+
+ else:
+ writemsg_stdout(_("!!! Unknown mydo: %s\n") % mydo, noiselevel=-1)
+ return 1
+
+ return retval
+
+ finally:
+
+ if builddir_lock is not None:
+ builddir_lock.unlock()
+ if tmpdir:
+ mysettings["PORTAGE_TMPDIR"] = tmpdir_orig
+ shutil.rmtree(tmpdir)
+
+ mysettings.pop("REPLACING_VERSIONS", None)
+
+ # Make sure that DISTDIR is restored to it's normal value before we return!
+ if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
+ mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
+ del mysettings["PORTAGE_ACTUAL_DISTDIR"]
+
+ if logfile and not returnpid:
+ try:
+ if os.stat(logfile).st_size == 0:
+ os.unlink(logfile)
+ except OSError:
+ pass
+
+ if mydo in ("digest", "manifest", "help"):
+ # If necessary, depend phase has been triggered by aux_get calls
+ # and the exemption is no longer needed.
+ portage._doebuild_manifest_exempt_depend -= 1
+
+def _check_temp_dir(settings):
+ if "PORTAGE_TMPDIR" not in settings or \
+ not os.path.isdir(settings["PORTAGE_TMPDIR"]):
+ writemsg(_("The directory specified in your "
+ "PORTAGE_TMPDIR variable, '%s',\n"
+ "does not exist. Please create this directory or "
+ "correct your PORTAGE_TMPDIR setting.\n") % \
+ settings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
+ return 1
+
+ # as some people use a separate PORTAGE_TMPDIR mount
+ # we prefer that as the checks below would otherwise be pointless
+ # for those people.
+ tmpdir = os.path.realpath(settings["PORTAGE_TMPDIR"])
+ if os.path.exists(os.path.join(tmpdir, "portage")):
+ checkdir = os.path.realpath(os.path.join(tmpdir, "portage"))
+ if ("sandbox" in settings.features or
+ "usersandox" in settings.features) and \
+ not checkdir.startswith(tmpdir + os.sep):
+ msg = _("The 'portage' subdirectory of the directory "
+ "referenced by the PORTAGE_TMPDIR variable appears to be "
+ "a symlink. In order to avoid sandbox violations (see bug "
+ "#378379), you must adjust PORTAGE_TMPDIR instead of using "
+ "the symlink located at '%s'. A suitable PORTAGE_TMPDIR "
+ "setting would be '%s'.") % \
+ (os.path.join(tmpdir, "portage"), checkdir)
+ lines = []
+ lines.append("")
+ lines.append("")
+ lines.extend(wrap(msg, 72))
+ lines.append("")
+ for line in lines:
+ if line:
+ line = "!!! %s" % (line,)
+ writemsg("%s\n" % (line,), noiselevel=-1)
+ return 1
+ else:
+ checkdir = tmpdir
+
+ if not os.access(checkdir, os.W_OK):
+ writemsg(_("%s is not writable.\n"
+ "Likely cause is that you've mounted it as readonly.\n") % checkdir,
+ noiselevel=-1)
+ return 1
+
+ with tempfile.NamedTemporaryFile(prefix="exectest-", dir=checkdir) as fd:
+ os.chmod(fd.name, 0o755)
+ if not os.access(fd.name, os.X_OK):
+ writemsg(_("Can not execute files in %s\n"
+ "Likely cause is that you've mounted it with one of the\n"
+ "following mount options: 'noexec', 'user', 'users'\n\n"
+ "Please make sure that portage can execute files in this directory.\n") % checkdir,
+ noiselevel=-1)
+ return 1
+
+ return os.EX_OK
+
+def _prepare_env_file(settings):
+ """
+ Extract environment.bz2 if it exists, but only if the destination
+ environment file doesn't already exist. There are lots of possible
+ states when doebuild() calls this function, and we want to avoid
+ clobbering an existing environment file.
+ """
+
+ env_extractor = BinpkgEnvExtractor(background=False,
+ scheduler=(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ settings=settings)
+
+ if env_extractor.dest_env_exists():
+ # There are lots of possible states when doebuild()
+ # calls this function, and we want to avoid
+ # clobbering an existing environment file.
+ return os.EX_OK
+
+ if not env_extractor.saved_env_exists():
+ # If the environment.bz2 doesn't exist, then ebuild.sh will
+ # source the ebuild as a fallback.
+ return os.EX_OK
+
+ env_extractor.start()
+ env_extractor.wait()
+ return env_extractor.returncode
+
+def _prepare_fake_distdir(settings, alist):
+ orig_distdir = settings["DISTDIR"]
+ settings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
+ edpath = settings["DISTDIR"] = \
+ os.path.join(settings["PORTAGE_BUILDDIR"], "distdir")
+ portage.util.ensure_dirs(edpath, gid=portage_gid, mode=0o755)
+
+ # Remove any unexpected files or directories.
+ for x in os.listdir(edpath):
+ symlink_path = os.path.join(edpath, x)
+ st = os.lstat(symlink_path)
+ if x in alist and stat.S_ISLNK(st.st_mode):
+ continue
+ if stat.S_ISDIR(st.st_mode):
+ shutil.rmtree(symlink_path)
+ else:
+ os.unlink(symlink_path)
+
+ # Check for existing symlinks and recreate if necessary.
+ for x in alist:
+ symlink_path = os.path.join(edpath, x)
+ target = os.path.join(orig_distdir, x)
+ try:
+ link_target = os.readlink(symlink_path)
+ except OSError:
+ os.symlink(target, symlink_path)
+ else:
+ if link_target != target:
+ os.unlink(symlink_path)
+ os.symlink(target, symlink_path)
+
+def _spawn_actionmap(settings):
+ features = settings.features
+ restrict = settings["PORTAGE_RESTRICT"].split()
+ nosandbox = (("userpriv" in features) and \
+ ("usersandbox" not in features) and \
+ "userpriv" not in restrict and \
+ "nouserpriv" not in restrict)
+ if nosandbox and ("userpriv" not in features or \
+ "userpriv" in restrict or \
+ "nouserpriv" in restrict):
+ nosandbox = ("sandbox" not in features and \
+ "usersandbox" not in features)
+
+ if not (portage.process.sandbox_capable or \
+ portage.process.macossandbox_capable):
+ nosandbox = True
+
+ sesandbox = settings.selinux_enabled() and \
+ "sesandbox" in features
+
+ droppriv = "userpriv" in features and \
+ "userpriv" not in restrict and \
+ secpass >= 2
+
+ fakeroot = "fakeroot" in features
+
+ portage_bin_path = settings["PORTAGE_BIN_PATH"]
+ ebuild_sh_binary = os.path.join(portage_bin_path,
+ os.path.basename(EBUILD_SH_BINARY))
+ misc_sh_binary = os.path.join(portage_bin_path,
+ os.path.basename(MISC_SH_BINARY))
+ ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
+ misc_sh = _shell_quote(misc_sh_binary) + " __dyn_%s"
+
+ # args are for the to spawn function
+ actionmap = {
+"pretend": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
+"setup": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
+"unpack": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
+"prepare": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
+"configure":{"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
+"compile": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
+"test": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
+"install": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox, "fakeroot":fakeroot}},
+"rpm": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
+"package": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
+ }
+
+ return actionmap
+
+def _validate_deps(mysettings, myroot, mydo, mydbapi):
+
+ invalid_dep_exempt_phases = \
+ set(["clean", "cleanrm", "help", "prerm", "postrm"])
+ all_keys = set(Package.metadata_keys)
+ all_keys.add("SRC_URI")
+ all_keys = tuple(all_keys)
+ metadata = dict(zip(all_keys,
+ mydbapi.aux_get(mysettings.mycpv, all_keys,
+ myrepo=mysettings.get("PORTAGE_REPO_NAME"))))
+
+ class FakeTree(object):
+ def __init__(self, mydb):
+ self.dbapi = mydb
+
+ root_config = RootConfig(mysettings, {"porttree":FakeTree(mydbapi)}, None)
+
+ pkg = Package(built=False, cpv=mysettings.mycpv,
+ metadata=metadata, root_config=root_config,
+ type_name="ebuild")
+
+ msgs = []
+ if pkg.invalid:
+ for k, v in pkg.invalid.items():
+ for msg in v:
+ msgs.append(" %s\n" % (msg,))
+
+ if msgs:
+ portage.util.writemsg_level(_("Error(s) in metadata for '%s':\n") % \
+ (mysettings.mycpv,), level=logging.ERROR, noiselevel=-1)
+ for x in msgs:
+ portage.util.writemsg_level(x,
+ level=logging.ERROR, noiselevel=-1)
+ if mydo not in invalid_dep_exempt_phases:
+ return 1
+
+ if not pkg.built and \
+ mydo not in ("digest", "help", "manifest") and \
+ pkg._metadata["REQUIRED_USE"] and \
+ eapi_has_required_use(pkg.eapi):
+ result = check_required_use(pkg._metadata["REQUIRED_USE"],
+ pkg.use.enabled, pkg.iuse.is_valid_flag, eapi=pkg.eapi)
+ if not result:
+ reduced_noise = result.tounicode()
+ writemsg("\n %s\n" % _("The following REQUIRED_USE flag" + \
+ " constraints are unsatisfied:"), noiselevel=-1)
+ writemsg(" %s\n" % reduced_noise,
+ noiselevel=-1)
+ normalized_required_use = \
+ " ".join(pkg._metadata["REQUIRED_USE"].split())
+ if reduced_noise != normalized_required_use:
+ writemsg("\n %s\n" % _("The above constraints " + \
+ "are a subset of the following complete expression:"),
+ noiselevel=-1)
+ writemsg(" %s\n" % \
+ human_readable_required_use(normalized_required_use),
+ noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+ return 1
+
+ return os.EX_OK
+
+# XXX This would be to replace getstatusoutput completely.
+# XXX Issue: cannot block execution. Deadlock condition.
+def spawn(mystring, mysettings, debug=False, free=False, droppriv=False,
+ sesandbox=False, fakeroot=False, networked=True, ipc=True, **keywords):
+ """
+ Spawn a subprocess with extra portage-specific options.
+ Optiosn include:
+
+ Sandbox: Sandbox means the spawned process will be limited in its ability t
+ read and write files (normally this means it is restricted to ${D}/)
+ SElinux Sandbox: Enables sandboxing on SElinux
+ Reduced Privileges: Drops privilages such that the process runs as portage:portage
+ instead of as root.
+
+ Notes: os.system cannot be used because it messes with signal handling. Instead we
+ use the portage.process spawn* family of functions.
+
+ This function waits for the process to terminate.
+
+ @param mystring: Command to run
+ @type mystring: String
+ @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
+ @type mysettings: Dictionary or config instance
+ @param debug: Ignored
+ @type debug: Boolean
+ @param free: Enable sandboxing for this process
+ @type free: Boolean
+ @param droppriv: Drop to portage:portage when running this command
+ @type droppriv: Boolean
+ @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
+ @type sesandbox: Boolean
+ @param fakeroot: Run this command with faked root privileges
+ @type fakeroot: Boolean
+ @param networked: Run this command with networking access enabled
+ @type networked: Boolean
+ @param ipc: Run this command with host IPC access enabled
+ @type ipc: Boolean
+ @param keywords: Extra options encoded as a dict, to be passed to spawn
+ @type keywords: Dictionary
+ @rtype: Integer
+ @return:
+ 1. The return code of the spawned process.
+ """
+
+ check_config_instance(mysettings)
+
+ fd_pipes = keywords.get("fd_pipes")
+ if fd_pipes is None:
+ fd_pipes = {
+ 0:portage._get_stdin().fileno(),
+ 1:sys.__stdout__.fileno(),
+ 2:sys.__stderr__.fileno(),
+ }
+ # In some cases the above print statements don't flush stdout, so
+ # it needs to be flushed before allowing a child process to use it
+ # so that output always shows in the correct order.
+ stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
+ for fd in fd_pipes.values():
+ if fd in stdout_filenos:
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
+ break
+
+ features = mysettings.features
+
+ # Use Linux namespaces if available
+ if uid == 0 and platform.system() == 'Linux':
+ keywords['unshare_net'] = not networked
+ keywords['unshare_ipc'] = not ipc
+
+ # TODO: Enable fakeroot to be used together with droppriv. The
+ # fake ownership/permissions will have to be converted to real
+ # permissions in the merge phase.
+ fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
+ portage_build_uid = os.getuid()
+ portage_build_gid = os.getgid()
+ if uid == 0 and portage_uid and portage_gid and hasattr(os, "setgroups"):
+ if droppriv:
+ keywords.update({
+ "uid": portage_uid,
+ "gid": portage_gid,
+ "groups": userpriv_groups,
+ "umask": 0o02
+ })
+
+ # Adjust pty ownership so that subprocesses
+ # can directly access /dev/fd/{1,2}.
+ stdout_fd = fd_pipes.get(1)
+ if stdout_fd is not None:
+ try:
+ subprocess_tty = _os.ttyname(stdout_fd)
+ except OSError:
+ pass
+ else:
+ try:
+ parent_tty = _os.ttyname(sys.__stdout__.fileno())
+ except OSError:
+ parent_tty = None
+
+ if subprocess_tty != parent_tty:
+ _os.chown(subprocess_tty,
+ int(portage_uid), int(portage_gid))
+
+ if "userpriv" in features and "userpriv" not in mysettings["PORTAGE_RESTRICT"].split() and secpass >= 2:
+ # Since Python 3.4, getpwuid and getgrgid
+ # require int type (no proxies).
+ portage_build_uid = int(portage_uid)
+ portage_build_gid = int(portage_gid)
+
+ if "PORTAGE_BUILD_USER" not in mysettings:
+ user = None
+ try:
+ user = pwd.getpwuid(portage_build_uid).pw_name
+ except KeyError:
+ if portage_build_uid == 0:
+ user = "root"
+ elif portage_build_uid == portage_uid:
+ user = portage.data._portage_username
+ else:
+ user = portage_uid
+ if user is not None:
+ mysettings["PORTAGE_BUILD_USER"] = user
+
+ if "PORTAGE_BUILD_GROUP" not in mysettings:
+ group = None
+ try:
+ group = grp.getgrgid(portage_build_gid).gr_name
+ except KeyError:
+ if portage_build_gid == 0:
+ group = "root"
+ elif portage_build_gid == portage_gid:
+ group = portage.data._portage_grpname
+ else:
+ group = portage_gid
+ if group is not None:
+ mysettings["PORTAGE_BUILD_GROUP"] = group
+
+ if not free:
+ free=((droppriv and "usersandbox" not in features) or \
+ (not droppriv and "sandbox" not in features and \
+ "usersandbox" not in features and not fakeroot))
+
+ if not free and not (fakeroot or portage.process.sandbox_capable or \
+ portage.process.macossandbox_capable):
+ free = True
+
+ if mysettings.mycpv is not None:
+ keywords["opt_name"] = "[%s]" % mysettings.mycpv
+ else:
+ keywords["opt_name"] = "[%s/%s]" % \
+ (mysettings.get("CATEGORY",""), mysettings.get("PF",""))
+
+ if free or "SANDBOX_ACTIVE" in os.environ:
+ keywords["opt_name"] += " bash"
+ spawn_func = portage.process.spawn_bash
+ elif fakeroot:
+ keywords["opt_name"] += " fakeroot"
+ keywords["fakeroot_state"] = os.path.join(mysettings["T"], "fakeroot.state")
+ spawn_func = portage.process.spawn_fakeroot
+ elif "sandbox" in features and platform.system() == 'Darwin':
+ keywords["opt_name"] += " macossandbox"
+ sbprofile = MACOSSANDBOX_PROFILE
+
+ # determine variable names from profile: split
+ # "text@@VARNAME@@moretext@@OTHERVAR@@restoftext" into
+ # ("text", # "VARNAME", "moretext", "OTHERVAR", "restoftext")
+ # and extract variable named by reading every second item.
+ variables = []
+ for line in sbprofile.split("\n"):
+ variables.extend(line.split("@@")[1:-1:2])
+
+ for var in variables:
+ paths = ""
+ if var in mysettings:
+ paths = mysettings[var]
+ else:
+ writemsg("Warning: sandbox profile references variable %s "
+ "which is not set.\nThe rule using it will have no "
+ "effect, which is most likely not the intended "
+ "result.\nPlease check make.conf/make.globals.\n" %
+ var)
+
+ # not set or empty value
+ if not paths:
+ sbprofile = sbprofile.replace("@@%s@@" % var, "")
+ continue
+
+ rules_literal = ""
+ rules_regex = ""
+
+ # FIXME: Allow for quoting inside the variable to allow paths with
+ # spaces in them?
+ for path in paths.split(" "):
+ # do a second round of token replacements to be able to
+ # reference settings like EPREFIX or PORTAGE_BUILDDIR.
+ for token in path.split("@@")[1:-1:2]:
+ if token not in mysettings:
+ continue
+
+ path = path.replace("@@%s@@" % token, mysettings[token])
+
+ if "@@" in path:
+ # unreplaced tokens left - silently ignore path - needed
+ # for PORTAGE_ACTUAL_DISTDIR which isn't always set
+ pass
+ elif path[-1] == os.sep:
+ # path ends in slash - make it a regex and allow access
+ # recursively.
+ path = path.replace("+", "\+")
+ path = path.replace("*", "\*")
+ path = path.replace("[", "\[")
+ path = path.replace("[", "\[")
+ rules_regex += " #\"^%s\"\n" % path
+ else:
+ rules_literal += " #\"%s\"\n" % path
+
+ rules = ""
+ if rules_literal:
+ rules += " (literal\n" + rules_literal + " )\n"
+ if rules_regex:
+ rules += " (regex\n" + rules_regex + " )\n"
+ sbprofile = sbprofile.replace("@@%s@@" % var, rules)
+
+ keywords["profile"] = sbprofile
+ spawn_func = portage.process.spawn_macossandbox
+ else:
+ keywords["opt_name"] += " sandbox"
+ spawn_func = portage.process.spawn_sandbox
+
+ if sesandbox:
+ spawn_func = selinux.spawn_wrapper(spawn_func,
+ mysettings["PORTAGE_SANDBOX_T"])
+
+ if keywords.get("returnpid"):
+ return spawn_func(mystring, env=mysettings.environ(),
+ **portage._native_kwargs(keywords))
+
+ proc = EbuildSpawnProcess(
+ background=False, args=mystring,
+ scheduler=SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ spawn_func=spawn_func,
+ settings=mysettings, **portage._native_kwargs(keywords))
+
+ proc.start()
+ proc.wait()
+
+ return proc.returncode
+
+# parse actionmap to spawn ebuild with the appropriate args
+def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0,
+ logfile=None, fd_pipes=None, returnpid=False):
+
+ if returnpid:
+ warnings.warn("portage.spawnebuild() called "
+ "with returnpid parameter enabled. This usage will "
+ "not be supported in the future.",
+ DeprecationWarning, stacklevel=2)
+
+ if not returnpid and \
+ (alwaysdep or "noauto" not in mysettings.features):
+ # process dependency first
+ if "dep" in actionmap[mydo]:
+ retval = spawnebuild(actionmap[mydo]["dep"], actionmap,
+ mysettings, debug, alwaysdep=alwaysdep, logfile=logfile,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+ if retval:
+ return retval
+
+ eapi = mysettings["EAPI"]
+
+ if mydo in ("configure", "prepare") and not eapi_has_src_prepare_and_src_configure(eapi):
+ return os.EX_OK
+
+ if mydo == "pretend" and not eapi_has_pkg_pretend(eapi):
+ return os.EX_OK
+
+ if not (mydo == "install" and "noauto" in mysettings.features):
+ check_file = os.path.join(
+ mysettings["PORTAGE_BUILDDIR"], ".%sed" % mydo.rstrip('e'))
+ if os.path.exists(check_file):
+ writemsg_stdout(_(">>> It appears that "
+ "'%(action)s' has already executed for '%(pkg)s'; skipping.\n") %
+ {"action":mydo, "pkg":mysettings["PF"]})
+ writemsg_stdout(_(">>> Remove '%(file)s' to force %(action)s.\n") %
+ {"file":check_file, "action":mydo})
+ return os.EX_OK
+
+ return _spawn_phase(mydo, mysettings,
+ actionmap=actionmap, logfile=logfile,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+
+_post_phase_cmds = {
+
+ "install" : [
+ "install_qa_check",
+ "install_symlink_html_docs",
+ "install_hooks"],
+
+ "preinst" : [
+ "preinst_aix",
+ "preinst_sfperms",
+ "preinst_selinux_labels",
+ "preinst_suid_scan",
+ ],
+
+ "postinst" : [
+ "postinst_aix"]
+}
+
+def _post_phase_userpriv_perms(mysettings):
+ if "userpriv" in mysettings.features and secpass >= 2:
+ """ Privileged phases may have left files that need to be made
+ writable to a less privileged user."""
+ apply_recursive_permissions(mysettings["T"],
+ uid=portage_uid, gid=portage_gid, dirmode=0o70, dirmask=0,
+ filemode=0o60, filemask=0)
+
+def _check_build_log(mysettings, out=None):
+ """
+ Search the content of $PORTAGE_LOG_FILE if it exists
+ and generate the following QA Notices when appropriate:
+
+ * Automake "maintainer mode"
+ * command not found
+ * Unrecognized configure options
+ """
+ logfile = mysettings.get("PORTAGE_LOG_FILE")
+ if logfile is None:
+ return
+ try:
+ f = open(_unicode_encode(logfile, encoding=_encodings['fs'],
+ errors='strict'), mode='rb')
+ except EnvironmentError:
+ return
+
+ f_real = None
+ if logfile.endswith('.gz'):
+ f_real = f
+ f = gzip.GzipFile(filename='', mode='rb', fileobj=f)
+
+ am_maintainer_mode = []
+ bash_command_not_found = []
+ bash_command_not_found_re = re.compile(
+ r'(.*): line (\d*): (.*): command not found$')
+ command_not_found_exclude_re = re.compile(r'/configure: line ')
+ helper_missing_file = []
+ helper_missing_file_re = re.compile(
+ r'^!!! (do|new).*: .* does not exist$')
+
+ configure_opts_warn = []
+ configure_opts_warn_re = re.compile(
+ r'^configure: WARNING: [Uu]nrecognized options: (.*)')
+
+ qa_configure_opts = ""
+ try:
+ with io.open(_unicode_encode(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"],
+ "build-info", "QA_CONFIGURE_OPTIONS"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as qa_configure_opts_f:
+ qa_configure_opts = qa_configure_opts_f.read()
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+
+ qa_configure_opts = qa_configure_opts.split()
+ if qa_configure_opts:
+ if len(qa_configure_opts) > 1:
+ qa_configure_opts = "|".join("(%s)" % x for x in qa_configure_opts)
+ qa_configure_opts = "^(%s)$" % qa_configure_opts
+ else:
+ qa_configure_opts = "^%s$" % qa_configure_opts[0]
+ qa_configure_opts = re.compile(qa_configure_opts)
+
+ qa_am_maintainer_mode = []
+ try:
+ with io.open(_unicode_encode(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"],
+ "build-info", "QA_AM_MAINTAINER_MODE"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as qa_am_maintainer_mode_f:
+ qa_am_maintainer_mode = [x for x in
+ qa_am_maintainer_mode_f.read().splitlines() if x]
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+
+ if qa_am_maintainer_mode:
+ if len(qa_am_maintainer_mode) > 1:
+ qa_am_maintainer_mode = \
+ "|".join("(%s)" % x for x in qa_am_maintainer_mode)
+ qa_am_maintainer_mode = "^(%s)$" % qa_am_maintainer_mode
+ else:
+ qa_am_maintainer_mode = "^%s$" % qa_am_maintainer_mode[0]
+ qa_am_maintainer_mode = re.compile(qa_am_maintainer_mode)
+
+ # Exclude output from dev-libs/yaz-3.0.47 which looks like this:
+ #
+ #Configuration:
+ # Automake: ${SHELL} /var/tmp/portage/dev-libs/yaz-3.0.47/work/yaz-3.0.47/config/missing --run automake-1.10
+ am_maintainer_mode_re = re.compile(r'/missing --run ')
+ am_maintainer_mode_exclude_re = \
+ re.compile(r'(/missing --run (autoheader|autotest|help2man|makeinfo)|^\s*Automake:\s)')
+
+ make_jobserver_re = \
+ re.compile(r'g?make\[\d+\]: warning: jobserver unavailable:')
+ make_jobserver = []
+
+ def _eerror(lines):
+ for line in lines:
+ eerror(line, phase="install", key=mysettings.mycpv, out=out)
+
+ try:
+ for line in f:
+ line = _unicode_decode(line)
+ if am_maintainer_mode_re.search(line) is not None and \
+ am_maintainer_mode_exclude_re.search(line) is None and \
+ (not qa_am_maintainer_mode or
+ qa_am_maintainer_mode.search(line) is None):
+ am_maintainer_mode.append(line.rstrip("\n"))
+
+ if bash_command_not_found_re.match(line) is not None and \
+ command_not_found_exclude_re.search(line) is None:
+ bash_command_not_found.append(line.rstrip("\n"))
+
+ if helper_missing_file_re.match(line) is not None:
+ helper_missing_file.append(line.rstrip("\n"))
+
+ m = configure_opts_warn_re.match(line)
+ if m is not None:
+ for x in m.group(1).split(", "):
+ if not qa_configure_opts or qa_configure_opts.match(x) is None:
+ configure_opts_warn.append(x)
+
+ if make_jobserver_re.match(line) is not None:
+ make_jobserver.append(line.rstrip("\n"))
+
+ except zlib.error as e:
+ _eerror(["portage encountered a zlib error: '%s'" % (e,),
+ "while reading the log file: '%s'" % logfile])
+ finally:
+ f.close()
+
+ def _eqawarn(lines):
+ for line in lines:
+ eqawarn(line, phase="install", key=mysettings.mycpv, out=out)
+ wrap_width = 70
+
+ if am_maintainer_mode:
+ msg = [_("QA Notice: Automake \"maintainer mode\" detected:")]
+ msg.append("")
+ msg.extend("\t" + line for line in am_maintainer_mode)
+ msg.append("")
+ msg.extend(wrap(_(
+ "If you patch Makefile.am, "
+ "configure.in, or configure.ac then you "
+ "should use autotools.eclass and "
+ "eautomake or eautoreconf. Exceptions "
+ "are limited to system packages "
+ "for which it is impossible to run "
+ "autotools during stage building. "
+ "See http://www.gentoo.org/p"
+ "roj/en/qa/autofailure.xml for more information."),
+ wrap_width))
+ _eqawarn(msg)
+
+ if bash_command_not_found:
+ msg = [_("QA Notice: command not found:")]
+ msg.append("")
+ msg.extend("\t" + line for line in bash_command_not_found)
+ _eqawarn(msg)
+
+ if helper_missing_file:
+ msg = [_("QA Notice: file does not exist:")]
+ msg.append("")
+ msg.extend("\t" + line[4:] for line in helper_missing_file)
+ _eqawarn(msg)
+
+ if configure_opts_warn:
+ msg = [_("QA Notice: Unrecognized configure options:")]
+ msg.append("")
+ msg.extend("\t%s" % x for x in configure_opts_warn)
+ _eqawarn(msg)
+
+ if make_jobserver:
+ msg = [_("QA Notice: make jobserver unavailable:")]
+ msg.append("")
+ msg.extend("\t" + line for line in make_jobserver)
+ _eqawarn(msg)
+
+ f.close()
+ if f_real is not None:
+ f_real.close()
+
+def _post_src_install_write_metadata(settings):
+ """
+ It's possible that the ebuild has changed the
+ CHOST variable, so revert it to the initial
+ setting. Also, revert IUSE in case it's corrupted
+ due to local environment settings like in bug #386829.
+ """
+
+ eapi_attrs = _get_eapi_attrs(settings.configdict['pkg']['EAPI'])
+
+ build_info_dir = os.path.join(settings['PORTAGE_BUILDDIR'], 'build-info')
+
+ metadata_keys = ['IUSE']
+ if eapi_attrs.iuse_effective:
+ metadata_keys.append('IUSE_EFFECTIVE')
+
+ for k in metadata_keys:
+ v = settings.configdict['pkg'].get(k)
+ if v is not None:
+ write_atomic(os.path.join(build_info_dir, k), v + '\n')
+
+ # The following variables are irrelevant for virtual packages.
+ if settings.get('CATEGORY') != 'virtual':
+
+ for k in ('CHOST',):
+ v = settings.get(k)
+ if v is not None:
+ write_atomic(os.path.join(build_info_dir, k), v + '\n')
+
+ with io.open(_unicode_encode(os.path.join(build_info_dir,
+ 'BUILD_TIME'), encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='strict') as f:
+ f.write("%.0f\n" % (time.time(),))
+
+ use = frozenset(settings['PORTAGE_USE'].split())
+ for k in _vdb_use_conditional_keys:
+ v = settings.configdict['pkg'].get(k)
+ filename = os.path.join(build_info_dir, k)
+ if v is None:
+ try:
+ os.unlink(filename)
+ except OSError:
+ pass
+ continue
+
+ if k.endswith('DEPEND'):
+ if eapi_attrs.slot_operator:
+ continue
+ token_class = Atom
+ else:
+ token_class = None
+
+ v = use_reduce(v, uselist=use, token_class=token_class)
+ v = paren_enclose(v)
+ if not v:
+ try:
+ os.unlink(filename)
+ except OSError:
+ pass
+ continue
+ with io.open(_unicode_encode(os.path.join(build_info_dir,
+ k), encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='strict') as f:
+ f.write('%s\n' % v)
+
+ if eapi_attrs.slot_operator:
+ deps = evaluate_slot_operator_equal_deps(settings, use, QueryCommand.get_db())
+ for k, v in deps.items():
+ filename = os.path.join(build_info_dir, k)
+ if not v:
+ try:
+ os.unlink(filename)
+ except OSError:
+ pass
+ continue
+ with io.open(_unicode_encode(os.path.join(build_info_dir,
+ k), encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='strict') as f:
+ f.write('%s\n' % v)
+
+def _preinst_bsdflags(mysettings):
+ if bsd_chflags:
+ # Save all the file flags for restoration later.
+ os.system("mtree -c -p %s -k flags > %s" % \
+ (_shell_quote(mysettings["D"]),
+ _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
+
+ # Remove all the file flags to avoid EPERM errors.
+ os.system("chflags -R noschg,nouchg,nosappnd,nouappnd %s" % \
+ (_shell_quote(mysettings["D"]),))
+ os.system("chflags -R nosunlnk,nouunlnk %s 2>/dev/null" % \
+ (_shell_quote(mysettings["D"]),))
+
+
+def _postinst_bsdflags(mysettings):
+ if bsd_chflags:
+ # Restore all of the flags saved above.
+ os.system("mtree -e -p %s -U -k flags < %s > /dev/null" % \
+ (_shell_quote(mysettings["ROOT"]),
+ _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
+
+def _post_src_install_uid_fix(mysettings, out):
+ """
+ Files in $D with user and group bits that match the "portage"
+ user or group are automatically mapped to PORTAGE_INST_UID and
+ PORTAGE_INST_GID if necessary. The chown system call may clear
+ S_ISUID and S_ISGID bits, so those bits are restored if
+ necessary.
+ """
+
+ os = _os_merge
+
+ inst_uid = int(mysettings["PORTAGE_INST_UID"])
+ inst_gid = int(mysettings["PORTAGE_INST_GID"])
+
+ _preinst_bsdflags(mysettings)
+
+ destdir = mysettings["D"]
+ ed_len = len(mysettings["ED"])
+ unicode_errors = []
+ desktop_file_validate = \
+ portage.process.find_binary("desktop-file-validate") is not None
+ xdg_dirs = mysettings.get('XDG_DATA_DIRS', '/usr/share').split(':')
+ xdg_dirs = tuple(os.path.join(i, "applications") + os.sep
+ for i in xdg_dirs if i)
+
+ qa_desktop_file = ""
+ try:
+ with io.open(_unicode_encode(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"],
+ "build-info", "QA_DESKTOP_FILE"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ qa_desktop_file = f.read()
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+
+ qa_desktop_file = qa_desktop_file.split()
+ if qa_desktop_file:
+ if len(qa_desktop_file) > 1:
+ qa_desktop_file = "|".join("(%s)" % x for x in qa_desktop_file)
+ qa_desktop_file = "^(%s)$" % qa_desktop_file
+ else:
+ qa_desktop_file = "^%s$" % qa_desktop_file[0]
+ qa_desktop_file = re.compile(qa_desktop_file)
+
+ while True:
+
+ unicode_error = False
+ size = 0
+ counted_inodes = set()
+ fixlafiles_announced = False
+ fixlafiles = "fixlafiles" in mysettings.features
+ desktopfile_errors = []
+
+ for parent, dirs, files in os.walk(destdir):
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeDecodeError:
+ new_parent = _unicode_decode(parent,
+ encoding=_encodings['merge'], errors='replace')
+ new_parent = _unicode_encode(new_parent,
+ encoding='ascii', errors='backslashreplace')
+ new_parent = _unicode_decode(new_parent,
+ encoding=_encodings['merge'], errors='replace')
+ os.rename(parent, new_parent)
+ unicode_error = True
+ unicode_errors.append(new_parent[ed_len:])
+ break
+
+ for fname in chain(dirs, files):
+ try:
+ fname = _unicode_decode(fname,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeDecodeError:
+ fpath = _os.path.join(
+ parent.encode(_encodings['merge']), fname)
+ new_fname = _unicode_decode(fname,
+ encoding=_encodings['merge'], errors='replace')
+ new_fname = _unicode_encode(new_fname,
+ encoding='ascii', errors='backslashreplace')
+ new_fname = _unicode_decode(new_fname,
+ encoding=_encodings['merge'], errors='replace')
+ new_fpath = os.path.join(parent, new_fname)
+ os.rename(fpath, new_fpath)
+ unicode_error = True
+ unicode_errors.append(new_fpath[ed_len:])
+ fname = new_fname
+ fpath = new_fpath
+ else:
+ fpath = os.path.join(parent, fname)
+
+ fpath_relative = fpath[ed_len - 1:]
+ if desktop_file_validate and fname.endswith(".desktop") and \
+ os.path.isfile(fpath) and \
+ fpath_relative.startswith(xdg_dirs) and \
+ not (qa_desktop_file and qa_desktop_file.match(fpath_relative.strip(os.sep)) is not None):
+
+ desktop_validate = validate_desktop_entry(fpath)
+ if desktop_validate:
+ desktopfile_errors.extend(desktop_validate)
+
+ if fixlafiles and \
+ fname.endswith(".la") and os.path.isfile(fpath):
+ f = open(_unicode_encode(fpath,
+ encoding=_encodings['merge'], errors='strict'),
+ mode='rb')
+ has_lafile_header = b'.la - a libtool library file' \
+ in f.readline()
+ f.seek(0)
+ contents = f.read()
+ f.close()
+ try:
+ needs_update, new_contents = rewrite_lafile(contents)
+ except portage.exception.InvalidData as e:
+ needs_update = False
+ if not fixlafiles_announced:
+ fixlafiles_announced = True
+ writemsg("Fixing .la files\n", fd=out)
+
+ # Suppress warnings if the file does not have the
+ # expected header (bug #340725). Even if the header is
+ # missing, we still call rewrite_lafile() since some
+ # valid libtool archives may not have the header.
+ msg = " %s is not a valid libtool archive, skipping\n" % fpath[len(destdir):]
+ qa_msg = "QA Notice: invalid .la file found: %s, %s" % (fpath[len(destdir):], e)
+ if has_lafile_header:
+ writemsg(msg, fd=out)
+ eqawarn(qa_msg, key=mysettings.mycpv, out=out)
+
+ if needs_update:
+ if not fixlafiles_announced:
+ fixlafiles_announced = True
+ writemsg("Fixing .la files\n", fd=out)
+ writemsg(" %s\n" % fpath[len(destdir):], fd=out)
+ # write_atomic succeeds even in some cases in which
+ # a normal write might fail due to file permission
+ # settings on some operating systems such as HP-UX
+ write_atomic(_unicode_encode(fpath,
+ encoding=_encodings['merge'], errors='strict'),
+ new_contents, mode='wb')
+
+ mystat = os.lstat(fpath)
+ if stat.S_ISREG(mystat.st_mode) and \
+ mystat.st_ino not in counted_inodes:
+ counted_inodes.add(mystat.st_ino)
+ size += mystat.st_size
+ if mystat.st_uid != portage_uid and \
+ mystat.st_gid != portage_gid:
+ continue
+ myuid = -1
+ mygid = -1
+ if mystat.st_uid == portage_uid:
+ myuid = inst_uid
+ if mystat.st_gid == portage_gid:
+ mygid = inst_gid
+ apply_secpass_permissions(
+ _unicode_encode(fpath, encoding=_encodings['merge']),
+ uid=myuid, gid=mygid,
+ mode=mystat.st_mode, stat_cached=mystat,
+ follow_links=False)
+
+ if unicode_error:
+ break
+
+ if not unicode_error:
+ break
+
+ if desktopfile_errors:
+ for l in _merge_desktopfile_error(desktopfile_errors):
+ l = l.replace(mysettings["ED"], '/')
+ eqawarn(l, phase='install', key=mysettings.mycpv, out=out)
+
+ if unicode_errors:
+ for l in _merge_unicode_error(unicode_errors):
+ eqawarn(l, phase='install', key=mysettings.mycpv, out=out)
+
+ build_info_dir = os.path.join(mysettings['PORTAGE_BUILDDIR'],
+ 'build-info')
+
+ f = io.open(_unicode_encode(os.path.join(build_info_dir,
+ 'SIZE'), encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='strict')
+ f.write('%d\n' % size)
+ f.close()
+
+ _reapply_bsdflags_to_image(mysettings)
+
+def _reapply_bsdflags_to_image(mysettings):
+ """
+ Reapply flags saved and removed by _preinst_bsdflags.
+ """
+ if bsd_chflags:
+ os.system("mtree -e -p %s -U -k flags < %s > /dev/null" % \
+ (_shell_quote(mysettings["D"]),
+ _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
+
+def _post_src_install_soname_symlinks(mysettings, out):
+ """
+ Check that libraries in $D have corresponding soname symlinks.
+ If symlinks are missing then create them and trigger a QA Notice.
+ This requires $PORTAGE_BUILDDIR/build-info/NEEDED.ELF.2 for
+ operation.
+ """
+
+ image_dir = mysettings["D"]
+ needed_filename = os.path.join(mysettings["PORTAGE_BUILDDIR"],
+ "build-info", "NEEDED.ELF.2")
+
+ f = None
+ try:
+ f = io.open(_unicode_encode(needed_filename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ lines = f.readlines()
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ return
+ finally:
+ if f is not None:
+ f.close()
+
+ qa_no_symlink = ""
+ f = None
+ try:
+ f = io.open(_unicode_encode(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"],
+ "build-info", "QA_SONAME_NO_SYMLINK"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ qa_no_symlink = f.read()
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ finally:
+ if f is not None:
+ f.close()
+
+ qa_no_symlink = qa_no_symlink.split()
+ if qa_no_symlink:
+ if len(qa_no_symlink) > 1:
+ qa_no_symlink = "|".join("(%s)" % x for x in qa_no_symlink)
+ qa_no_symlink = "^(%s)$" % qa_no_symlink
+ else:
+ qa_no_symlink = "^%s$" % qa_no_symlink[0]
+ qa_no_symlink = re.compile(qa_no_symlink)
+
+ libpaths = set(portage.util.getlibpaths(
+ mysettings["ROOT"], env=mysettings))
+ libpath_inodes = set()
+ for libpath in libpaths:
+ libdir = os.path.join(mysettings["ROOT"], libpath.lstrip(os.sep))
+ try:
+ s = os.stat(libdir)
+ except OSError:
+ continue
+ else:
+ libpath_inodes.add((s.st_dev, s.st_ino))
+
+ is_libdir_cache = {}
+
+ def is_libdir(obj_parent):
+ try:
+ return is_libdir_cache[obj_parent]
+ except KeyError:
+ pass
+
+ rval = False
+ if obj_parent in libpaths:
+ rval = True
+ else:
+ parent_path = os.path.join(mysettings["ROOT"],
+ obj_parent.lstrip(os.sep))
+ try:
+ s = os.stat(parent_path)
+ except OSError:
+ pass
+ else:
+ if (s.st_dev, s.st_ino) in libpath_inodes:
+ rval = True
+
+ is_libdir_cache[obj_parent] = rval
+ return rval
+
+ missing_symlinks = []
+
+ # Parse NEEDED.ELF.2 like LinkageMapELF.rebuild() does.
+ for l in lines:
+ l = l.rstrip("\n")
+ if not l:
+ continue
+ fields = l.split(";")
+ if len(fields) < 5:
+ portage.util.writemsg_level(_("\nWrong number of fields " \
+ "in %s: %s\n\n") % (needed_filename, l),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+
+ obj, soname = fields[1:3]
+ if not soname:
+ continue
+ if not is_libdir(os.path.dirname(obj)):
+ continue
+ if qa_no_symlink and qa_no_symlink.match(obj.strip(os.sep)) is not None:
+ continue
+
+ obj_file_path = os.path.join(image_dir, obj.lstrip(os.sep))
+ sym_file_path = os.path.join(os.path.dirname(obj_file_path), soname)
+ try:
+ os.lstat(sym_file_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ else:
+ continue
+
+ missing_symlinks.append((obj, soname))
+
+ if not missing_symlinks:
+ return
+
+ qa_msg = ["QA Notice: Missing soname symlink(s):"]
+ qa_msg.append("")
+ qa_msg.extend("\t%s -> %s" % (os.path.join(
+ os.path.dirname(obj).lstrip(os.sep), soname),
+ os.path.basename(obj))
+ for obj, soname in missing_symlinks)
+ qa_msg.append("")
+ for line in qa_msg:
+ eqawarn(line, key=mysettings.mycpv, out=out)
+
+def _merge_desktopfile_error(errors):
+ lines = []
+
+ msg = _("QA Notice: This package installs one or more .desktop files "
+ "that do not pass validation.")
+ lines.extend(wrap(msg, 72))
+
+ lines.append("")
+ errors.sort()
+ lines.extend("\t" + x for x in errors)
+ lines.append("")
+
+ return lines
+
+def _merge_unicode_error(errors):
+ lines = []
+
+ msg = _("QA Notice: This package installs one or more file names "
+ "containing characters that are not encoded with the UTF-8 encoding.")
+ lines.extend(wrap(msg, 72))
+
+ lines.append("")
+ errors.sort()
+ lines.extend("\t" + x for x in errors)
+ lines.append("")
+
+ return lines
+
+def _prepare_self_update(settings):
+ """
+ Call this when portage is updating itself, in order to create
+ temporary copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH, since
+ the new versions may be incompatible. An atexit hook will
+ automatically clean up the temporary copies.
+ """
+
+ # sanity check: ensure that that this routine only runs once
+ if portage._bin_path != portage.const.PORTAGE_BIN_PATH:
+ return
+
+ # Load lazily referenced portage submodules into memory,
+ # so imports won't fail during portage upgrade/downgrade.
+ _preload_elog_modules(settings)
+ portage.proxy.lazyimport._preload_portage_submodules()
+
+ # Make the temp directory inside $PORTAGE_TMPDIR/portage, since
+ # it's common for /tmp and /var/tmp to be mounted with the
+ # "noexec" option (see bug #346899).
+ build_prefix = os.path.join(settings["PORTAGE_TMPDIR"], "portage")
+ portage.util.ensure_dirs(build_prefix)
+ base_path_tmp = tempfile.mkdtemp(
+ "", "._portage_reinstall_.", build_prefix)
+ portage.process.atexit_register(shutil.rmtree, base_path_tmp)
+
+ orig_bin_path = portage._bin_path
+ portage._bin_path = os.path.join(base_path_tmp, "bin")
+ shutil.copytree(orig_bin_path, portage._bin_path, symlinks=True)
+
+ orig_pym_path = portage._pym_path
+ portage._pym_path = os.path.join(base_path_tmp, "pym")
+ os.mkdir(portage._pym_path)
+ for pmod in PORTAGE_PYM_PACKAGES:
+ shutil.copytree(os.path.join(orig_pym_path, pmod),
+ os.path.join(portage._pym_path, pmod),
+ symlinks=True)
+
+ for dir_path in (base_path_tmp, portage._bin_path, portage._pym_path):
+ os.chmod(dir_path, 0o755)
+
+def _handle_self_update(settings, vardb):
+ cpv = settings.mycpv
+ if settings["ROOT"] == "/" and \
+ portage.dep.match_from_list(
+ portage.const.PORTAGE_PACKAGE_ATOM, [cpv]):
+ _prepare_self_update(settings)
+ return True
+ return False
diff --git a/usr/lib/portage/pym/portage/package/ebuild/fetch.py b/usr/lib/portage/pym/portage/package/ebuild/fetch.py
new file mode 100644
index 0000000..2a60188
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/fetch.py
@@ -0,0 +1,1171 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+__all__ = ['fetch']
+
+import errno
+import io
+import logging
+import random
+import re
+import stat
+import sys
+import tempfile
+
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild.config:check_config_instance,config',
+ 'portage.package.ebuild.doebuild:doebuild_environment,' + \
+ '_doebuild_spawn',
+ 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
+)
+
+from portage import OrderedDict, os, selinux, shutil, _encodings, \
+ _shell_quote, _unicode_encode
+from portage.checksum import (hashfunc_map, perform_md5, verify_all,
+ _filter_unaccelarated_hashes, _hash_filter, _apply_hash_filter)
+from portage.const import BASH_BINARY, CUSTOM_MIRRORS_FILE, \
+ GLOBAL_CONFIG_PATH
+from portage.const import rootgid
+from portage.data import portage_gid, portage_uid, secpass, userpriv_groups
+from portage.exception import FileNotFound, OperationNotPermitted, \
+ PortageException, TryAgain
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage.output import colorize, EOutput
+from portage.util import apply_recursive_permissions, \
+ apply_secpass_permissions, ensure_dirs, grabdict, shlex_split, \
+ varexpand, writemsg, writemsg_level, writemsg_stdout
+from portage.process import spawn
+
+_userpriv_spawn_kwargs = (
+ ("uid", portage_uid),
+ ("gid", portage_gid),
+ ("groups", userpriv_groups),
+ ("umask", 0o02),
+)
+
+def _hide_url_passwd(url):
+ return re.sub(r'//(.+):.+@(.+)', r'//\1:*password*@\2', url)
+
+def _spawn_fetch(settings, args, **kwargs):
+ """
+ Spawn a process with appropriate settings for fetching, including
+ userfetch and selinux support.
+ """
+
+ global _userpriv_spawn_kwargs
+
+ # Redirect all output to stdout since some fetchers like
+ # wget pollute stderr (if portage detects a problem then it
+ # can send it's own message to stderr).
+ if "fd_pipes" not in kwargs:
+
+ kwargs["fd_pipes"] = {
+ 0 : portage._get_stdin().fileno(),
+ 1 : sys.__stdout__.fileno(),
+ 2 : sys.__stdout__.fileno(),
+ }
+
+ if "userfetch" in settings.features and \
+ os.getuid() == 0 and portage_gid and portage_uid and \
+ hasattr(os, "setgroups"):
+ kwargs.update(_userpriv_spawn_kwargs)
+
+ spawn_func = spawn
+
+ if settings.selinux_enabled():
+ spawn_func = selinux.spawn_wrapper(spawn_func,
+ settings["PORTAGE_FETCH_T"])
+
+ # bash is an allowed entrypoint, while most binaries are not
+ if args[0] != BASH_BINARY:
+ args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
+
+ # Ensure that EBUILD_PHASE is set to fetch, so that config.environ()
+ # does not filter the calling environment (which may contain needed
+ # proxy variables, as in bug #315421).
+ phase_backup = settings.get('EBUILD_PHASE')
+ settings['EBUILD_PHASE'] = 'fetch'
+ try:
+ rval = spawn_func(args, env=settings.environ(), **kwargs)
+ finally:
+ if phase_backup is None:
+ settings.pop('EBUILD_PHASE', None)
+ else:
+ settings['EBUILD_PHASE'] = phase_backup
+
+ return rval
+
+_userpriv_test_write_file_cache = {}
+_userpriv_test_write_cmd_script = ">> %(file_path)s 2>/dev/null ; rval=$? ; " + \
+ "rm -f %(file_path)s ; exit $rval"
+
+def _userpriv_test_write_file(settings, file_path):
+ """
+ Drop privileges and try to open a file for writing. The file may or
+ may not exist, and the parent directory is assumed to exist. The file
+ is removed before returning.
+
+ @param settings: A config instance which is passed to _spawn_fetch()
+ @param file_path: A file path to open and write.
+ @return: True if write succeeds, False otherwise.
+ """
+
+ global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
+ rval = _userpriv_test_write_file_cache.get(file_path)
+ if rval is not None:
+ return rval
+
+ args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
+ {"file_path" : _shell_quote(file_path)}]
+
+ returncode = _spawn_fetch(settings, args)
+
+ rval = returncode == os.EX_OK
+ _userpriv_test_write_file_cache[file_path] = rval
+ return rval
+
+def _checksum_failure_temp_file(distdir, basename):
+ """
+ First try to find a duplicate temp file with the same checksum and return
+ that filename if available. Otherwise, use mkstemp to create a new unique
+ filename._checksum_failure_.$RANDOM, rename the given file, and return the
+ new filename. In any case, filename will be renamed or removed before this
+ function returns a temp filename.
+ """
+
+ filename = os.path.join(distdir, basename)
+ size = os.stat(filename).st_size
+ checksum = None
+ tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
+ for temp_filename in os.listdir(distdir):
+ if not tempfile_re.match(temp_filename):
+ continue
+ temp_filename = os.path.join(distdir, temp_filename)
+ try:
+ if size != os.stat(temp_filename).st_size:
+ continue
+ except OSError:
+ continue
+ try:
+ temp_checksum = perform_md5(temp_filename)
+ except FileNotFound:
+ # Apparently the temp file disappeared. Let it go.
+ continue
+ if checksum is None:
+ checksum = perform_md5(filename)
+ if checksum == temp_checksum:
+ os.unlink(filename)
+ return temp_filename
+
+ fd, temp_filename = \
+ tempfile.mkstemp("", basename + "._checksum_failure_.", distdir)
+ os.close(fd)
+ os.rename(filename, temp_filename)
+ return temp_filename
+
+def _check_digests(filename, digests, show_errors=1):
+ """
+ Check digests and display a message if an error occurs.
+ @return True if all digests match, False otherwise.
+ """
+ verified_ok, reason = verify_all(filename, digests)
+ if not verified_ok:
+ if show_errors:
+ writemsg(_("!!! Previously fetched"
+ " file: '%s'\n") % filename, noiselevel=-1)
+ writemsg(_("!!! Reason: %s\n") % reason[0],
+ noiselevel=-1)
+ writemsg(_("!!! Got: %s\n"
+ "!!! Expected: %s\n") % \
+ (reason[1], reason[2]), noiselevel=-1)
+ return False
+ return True
+
+def _check_distfile(filename, digests, eout, show_errors=1, hash_filter=None):
+ """
+ @return a tuple of (match, stat_obj) where match is True if filename
+ matches all given digests (if any) and stat_obj is a stat result, or
+ None if the file does not exist.
+ """
+ if digests is None:
+ digests = {}
+ size = digests.get("size")
+ if size is not None and len(digests) == 1:
+ digests = None
+
+ try:
+ st = os.stat(filename)
+ except OSError:
+ return (False, None)
+ if size is not None and size != st.st_size:
+ return (False, st)
+ if not digests:
+ if size is not None:
+ eout.ebegin(_("%s size ;-)") % os.path.basename(filename))
+ eout.eend(0)
+ elif st.st_size == 0:
+ # Zero-byte distfiles are always invalid.
+ return (False, st)
+ else:
+ digests = _filter_unaccelarated_hashes(digests)
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
+ if _check_digests(filename, digests, show_errors=show_errors):
+ eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
+ " ".join(sorted(digests))))
+ eout.eend(0)
+ else:
+ return (False, st)
+ return (True, st)
+
+_fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
+
+_size_suffix_map = {
+ '' : 0,
+ 'K' : 10,
+ 'M' : 20,
+ 'G' : 30,
+ 'T' : 40,
+ 'P' : 50,
+ 'E' : 60,
+ 'Z' : 70,
+ 'Y' : 80,
+}
+
+def fetch(myuris, mysettings, listonly=0, fetchonly=0,
+ locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
+ allow_missing_digests=True):
+ "fetch files. Will use digest file if available."
+
+ if not myuris:
+ return 1
+
+ features = mysettings.features
+ restrict = mysettings.get("PORTAGE_RESTRICT","").split()
+
+ userfetch = secpass >= 2 and "userfetch" in features
+ userpriv = secpass >= 2 and "userpriv" in features
+
+ # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
+ restrict_mirror = "mirror" in restrict or "nomirror" in restrict
+ if restrict_mirror:
+ if ("mirror" in features) and ("lmirror" not in features):
+ # lmirror should allow you to bypass mirror restrictions.
+ # XXX: This is not a good thing, and is temporary at best.
+ print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
+ return 1
+
+ # Generally, downloading the same file repeatedly from
+ # every single available mirror is a waste of bandwidth
+ # and time, so there needs to be a cap.
+ checksum_failure_max_tries = 5
+ v = checksum_failure_max_tries
+ try:
+ v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
+ checksum_failure_max_tries))
+ except (ValueError, OverflowError):
+ writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
+ " contains non-integer value: '%s'\n") % \
+ mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
+ writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
+ "default value: %s\n") % checksum_failure_max_tries,
+ noiselevel=-1)
+ v = checksum_failure_max_tries
+ if v < 1:
+ writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
+ " contains value less than 1: '%s'\n") % v, noiselevel=-1)
+ writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
+ "default value: %s\n") % checksum_failure_max_tries,
+ noiselevel=-1)
+ v = checksum_failure_max_tries
+ checksum_failure_max_tries = v
+ del v
+
+ fetch_resume_size_default = "350K"
+ fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
+ if fetch_resume_size is not None:
+ fetch_resume_size = "".join(fetch_resume_size.split())
+ if not fetch_resume_size:
+ # If it's undefined or empty, silently use the default.
+ fetch_resume_size = fetch_resume_size_default
+ match = _fetch_resume_size_re.match(fetch_resume_size)
+ if match is None or \
+ (match.group(2).upper() not in _size_suffix_map):
+ writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
+ " contains an unrecognized format: '%s'\n") % \
+ mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
+ writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
+ "default value: %s\n") % fetch_resume_size_default,
+ noiselevel=-1)
+ fetch_resume_size = None
+ if fetch_resume_size is None:
+ fetch_resume_size = fetch_resume_size_default
+ match = _fetch_resume_size_re.match(fetch_resume_size)
+ fetch_resume_size = int(match.group(1)) * \
+ 2 ** _size_suffix_map[match.group(2).upper()]
+
+ # Behave like the package has RESTRICT="primaryuri" after a
+ # couple of checksum failures, to increase the probablility
+ # of success before checksum_failure_max_tries is reached.
+ checksum_failure_primaryuri = 2
+ thirdpartymirrors = mysettings.thirdpartymirrors()
+
+ # In the background parallel-fetch process, it's safe to skip checksum
+ # verification of pre-existing files in $DISTDIR that have the correct
+ # file size. The parent process will verify their checksums prior to
+ # the unpack phase.
+
+ parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
+ if parallel_fetchonly:
+ fetchonly = 1
+
+ check_config_instance(mysettings)
+
+ custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
+ CUSTOM_MIRRORS_FILE), recursive=1)
+
+ mymirrors=[]
+
+ if listonly or ("distlocks" not in features):
+ use_locks = 0
+
+ fetch_to_ro = 0
+ if "skiprocheck" in features:
+ fetch_to_ro = 1
+
+ if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
+ if use_locks:
+ writemsg(colorize("BAD",
+ _("!!! For fetching to a read-only filesystem, "
+ "locking should be turned off.\n")), noiselevel=-1)
+ writemsg(_("!!! This can be done by adding -distlocks to "
+ "FEATURES in /etc/portage/make.conf\n"), noiselevel=-1)
+# use_locks = 0
+
+ # local mirrors are always added
+ if "local" in custommirrors:
+ mymirrors += custommirrors["local"]
+
+ if restrict_mirror:
+ # We don't add any mirrors.
+ pass
+ else:
+ if try_mirrors:
+ mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
+
+ hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
+ skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
+ if skip_manifest:
+ allow_missing_digests = True
+ pkgdir = mysettings.get("O")
+ if digests is None and not (pkgdir is None or skip_manifest):
+ mydigests = mysettings.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
+ pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
+ elif digests is None or skip_manifest:
+ # no digests because fetch was not called for a specific package
+ mydigests = {}
+ else:
+ mydigests = digests
+
+ ro_distdirs = [x for x in \
+ shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
+ if os.path.isdir(x)]
+
+ fsmirrors = []
+ for x in range(len(mymirrors)-1,-1,-1):
+ if mymirrors[x] and mymirrors[x][0]=='/':
+ fsmirrors += [mymirrors[x]]
+ del mymirrors[x]
+
+ restrict_fetch = "fetch" in restrict
+ force_mirror = "force-mirror" in features and not restrict_mirror
+ custom_local_mirrors = custommirrors.get("local", [])
+ if restrict_fetch:
+ # With fetch restriction, a normal uri may only be fetched from
+ # custom local mirrors (if available). A mirror:// uri may also
+ # be fetched from specific mirrors (effectively overriding fetch
+ # restriction, but only for specific mirrors).
+ locations = custom_local_mirrors
+ else:
+ locations = mymirrors
+
+ file_uri_tuples = []
+ # Check for 'items' attribute since OrderedDict is not a dict.
+ if hasattr(myuris, 'items'):
+ for myfile, uri_set in myuris.items():
+ for myuri in uri_set:
+ file_uri_tuples.append((myfile, myuri))
+ if not uri_set:
+ file_uri_tuples.append((myfile, None))
+ else:
+ for myuri in myuris:
+ if urlparse(myuri).scheme:
+ file_uri_tuples.append((os.path.basename(myuri), myuri))
+ else:
+ file_uri_tuples.append((os.path.basename(myuri), None))
+
+ filedict = OrderedDict()
+ primaryuri_dict = {}
+ thirdpartymirror_uris = {}
+ for myfile, myuri in file_uri_tuples:
+ if myfile not in filedict:
+ filedict[myfile]=[]
+ for y in range(0,len(locations)):
+ filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
+ if myuri is None:
+ continue
+ if myuri[:9]=="mirror://":
+ eidx = myuri.find("/", 9)
+ if eidx != -1:
+ mirrorname = myuri[9:eidx]
+ path = myuri[eidx+1:]
+
+ # Try user-defined mirrors first
+ if mirrorname in custommirrors:
+ for cmirr in custommirrors[mirrorname]:
+ filedict[myfile].append(
+ cmirr.rstrip("/") + "/" + path)
+
+ # now try the official mirrors
+ if mirrorname in thirdpartymirrors:
+ uris = [locmirr.rstrip("/") + "/" + path \
+ for locmirr in thirdpartymirrors[mirrorname]]
+ random.shuffle(uris)
+ filedict[myfile].extend(uris)
+ thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
+
+ if mirrorname not in custommirrors and \
+ mirrorname not in thirdpartymirrors:
+ writemsg(_("!!! No known mirror by the name: %s\n") % (mirrorname))
+ else:
+ writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
+ writemsg(" %s\n" % (myuri), noiselevel=-1)
+ else:
+ if restrict_fetch or force_mirror:
+ # Only fetch from specific mirrors is allowed.
+ continue
+ primaryuris = primaryuri_dict.get(myfile)
+ if primaryuris is None:
+ primaryuris = []
+ primaryuri_dict[myfile] = primaryuris
+ primaryuris.append(myuri)
+
+ # Order primaryuri_dict values to match that in SRC_URI.
+ for uris in primaryuri_dict.values():
+ uris.reverse()
+
+ # Prefer thirdpartymirrors over normal mirrors in cases when
+ # the file does not yet exist on the normal mirrors.
+ for myfile, uris in thirdpartymirror_uris.items():
+ primaryuri_dict.setdefault(myfile, []).extend(uris)
+
+ # Now merge primaryuri values into filedict (includes mirrors
+ # explicitly referenced in SRC_URI).
+ if "primaryuri" in restrict:
+ for myfile, uris in filedict.items():
+ filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
+ else:
+ for myfile in filedict:
+ filedict[myfile] += primaryuri_dict.get(myfile, [])
+
+ can_fetch=True
+
+ if listonly:
+ can_fetch = False
+
+ if can_fetch and not fetch_to_ro:
+ global _userpriv_test_write_file_cache
+ dirmode = 0o070
+ filemode = 0o60
+ modemask = 0o2
+ dir_gid = portage_gid
+ if "FAKED_MODE" in mysettings:
+ # When inside fakeroot, directories with portage's gid appear
+ # to have root's gid. Therefore, use root's gid instead of
+ # portage's gid to avoid spurrious permissions adjustments
+ # when inside fakeroot.
+ dir_gid = rootgid
+ distdir_dirs = [""]
+ try:
+
+ for x in distdir_dirs:
+ mydir = os.path.join(mysettings["DISTDIR"], x)
+ write_test_file = os.path.join(
+ mydir, ".__portage_test_write__")
+
+ try:
+ st = os.stat(mydir)
+ except OSError:
+ st = None
+
+ if st is not None and stat.S_ISDIR(st.st_mode):
+ if not (userfetch or userpriv):
+ continue
+ if _userpriv_test_write_file(mysettings, write_test_file):
+ continue
+
+ _userpriv_test_write_file_cache.pop(write_test_file, None)
+ if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
+ if st is None:
+ # The directory has just been created
+ # and therefore it must be empty.
+ continue
+ writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
+ noiselevel=-1)
+ def onerror(e):
+ raise # bail out on the first error that occurs during recursion
+ if not apply_recursive_permissions(mydir,
+ gid=dir_gid, dirmode=dirmode, dirmask=modemask,
+ filemode=filemode, filemask=modemask, onerror=onerror):
+ raise OperationNotPermitted(
+ _("Failed to apply recursive permissions for the portage group."))
+ except PortageException as e:
+ if not os.path.isdir(mysettings["DISTDIR"]):
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
+ writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)
+
+ if can_fetch and \
+ not fetch_to_ro and \
+ not os.access(mysettings["DISTDIR"], os.W_OK):
+ writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
+ noiselevel=-1)
+ can_fetch = False
+
+ distdir_writable = can_fetch and not fetch_to_ro
+ failed_files = set()
+ restrict_fetch_msg = False
+
+ for myfile in filedict:
+ """
+ fetched status
+ 0 nonexistent
+ 1 partially downloaded
+ 2 completely downloaded
+ """
+ fetched = 0
+
+ orig_digests = mydigests.get(myfile, {})
+
+ if not (allow_missing_digests or listonly):
+ verifiable_hash_types = set(orig_digests).intersection(hashfunc_map)
+ verifiable_hash_types.discard("size")
+ if not verifiable_hash_types:
+ expected = set(hashfunc_map)
+ expected.discard("size")
+ expected = " ".join(sorted(expected))
+ got = set(orig_digests)
+ got.discard("size")
+ got = " ".join(sorted(got))
+ reason = (_("Insufficient data for checksum verification"),
+ got, expected)
+ writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
+ noiselevel=-1)
+ writemsg(_("!!! Reason: %s\n") % reason[0],
+ noiselevel=-1)
+ writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
+ (reason[1], reason[2]), noiselevel=-1)
+
+ if fetchonly:
+ failed_files.add(myfile)
+ continue
+ else:
+ return 0
+
+ size = orig_digests.get("size")
+ if size == 0:
+ # Zero-byte distfiles are always invalid, so discard their digests.
+ del mydigests[myfile]
+ orig_digests.clear()
+ size = None
+ pruned_digests = orig_digests
+ if parallel_fetchonly:
+ pruned_digests = {}
+ if size is not None:
+ pruned_digests["size"] = size
+
+ myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
+ has_space = True
+ has_space_superuser = True
+ file_lock = None
+ if listonly:
+ writemsg_stdout("\n", noiselevel=-1)
+ else:
+ # check if there is enough space in DISTDIR to completely store myfile
+ # overestimate the filesize so we aren't bitten by FS overhead
+ vfs_stat = None
+ if size is not None and hasattr(os, "statvfs"):
+ try:
+ vfs_stat = os.statvfs(mysettings["DISTDIR"])
+ except OSError as e:
+ writemsg_level("!!! statvfs('%s'): %s\n" %
+ (mysettings["DISTDIR"], e),
+ noiselevel=-1, level=logging.ERROR)
+ del e
+
+ if vfs_stat is not None:
+ try:
+ mysize = os.stat(myfile_path).st_size
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ mysize = 0
+ if (size - mysize + vfs_stat.f_bsize) >= \
+ (vfs_stat.f_bsize * vfs_stat.f_bavail):
+
+ if (size - mysize + vfs_stat.f_bsize) >= \
+ (vfs_stat.f_bsize * vfs_stat.f_bfree):
+ has_space_superuser = False
+
+ if not has_space_superuser:
+ has_space = False
+ elif secpass < 2:
+ has_space = False
+ elif userfetch:
+ has_space = False
+
+ if distdir_writable and use_locks:
+
+ lock_kwargs = {}
+ if fetchonly:
+ lock_kwargs["flags"] = os.O_NONBLOCK
+
+ try:
+ file_lock = lockfile(myfile_path,
+ wantnewlockfile=1, **lock_kwargs)
+ except TryAgain:
+ writemsg(_(">>> File '%s' is already locked by "
+ "another fetcher. Continuing...\n") % myfile,
+ noiselevel=-1)
+ continue
+ try:
+ if not listonly:
+
+ eout = EOutput()
+ eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
+ match, mystat = _check_distfile(
+ myfile_path, pruned_digests, eout, hash_filter=hash_filter)
+ if match:
+ # Skip permission adjustment for symlinks, since we don't
+ # want to modify anything outside of the primary DISTDIR,
+ # and symlinks typically point to PORTAGE_RO_DISTDIRS.
+ if distdir_writable and not os.path.islink(myfile_path):
+ try:
+ apply_secpass_permissions(myfile_path,
+ gid=portage_gid, mode=0o664, mask=0o2,
+ stat_cached=mystat)
+ except PortageException as e:
+ if not os.access(myfile_path, os.R_OK):
+ writemsg(_("!!! Failed to adjust permissions:"
+ " %s\n") % str(e), noiselevel=-1)
+ del e
+ continue
+
+ if distdir_writable and mystat is None:
+ # Remove broken symlinks if necessary.
+ try:
+ os.unlink(myfile_path)
+ except OSError:
+ pass
+
+ if mystat is not None:
+ if stat.S_ISDIR(mystat.st_mode):
+ writemsg_level(
+ _("!!! Unable to fetch file since "
+ "a directory is in the way: \n"
+ "!!! %s\n") % myfile_path,
+ level=logging.ERROR, noiselevel=-1)
+ return 0
+
+ if mystat.st_size == 0:
+ if distdir_writable:
+ try:
+ os.unlink(myfile_path)
+ except OSError:
+ pass
+ elif distdir_writable:
+ if mystat.st_size < fetch_resume_size and \
+ mystat.st_size < size:
+ # If the file already exists and the size does not
+ # match the existing digests, it may be that the
+ # user is attempting to update the digest. In this
+ # case, the digestgen() function will advise the
+ # user to use `ebuild --force foo.ebuild manifest`
+ # in order to force the old digests to be replaced.
+ # Since the user may want to keep this file, rename
+ # it instead of deleting it.
+ writemsg(_(">>> Renaming distfile with size "
+ "%d (smaller than " "PORTAGE_FETCH_RESU"
+ "ME_MIN_SIZE)\n") % mystat.st_size)
+ temp_filename = \
+ _checksum_failure_temp_file(
+ mysettings["DISTDIR"], myfile)
+ writemsg_stdout(_("Refetching... "
+ "File renamed to '%s'\n\n") % \
+ temp_filename, noiselevel=-1)
+ elif mystat.st_size >= size:
+ temp_filename = \
+ _checksum_failure_temp_file(
+ mysettings["DISTDIR"], myfile)
+ writemsg_stdout(_("Refetching... "
+ "File renamed to '%s'\n\n") % \
+ temp_filename, noiselevel=-1)
+
+ if distdir_writable and ro_distdirs:
+ readonly_file = None
+ for x in ro_distdirs:
+ filename = os.path.join(x, myfile)
+ match, mystat = _check_distfile(
+ filename, pruned_digests, eout, hash_filter=hash_filter)
+ if match:
+ readonly_file = filename
+ break
+ if readonly_file is not None:
+ try:
+ os.unlink(myfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ os.symlink(readonly_file, myfile_path)
+ continue
+
+ # this message is shown only after we know that
+ # the file is not already fetched
+ if not has_space:
+ writemsg(_("!!! Insufficient space to store %s in %s\n") % \
+ (myfile, mysettings["DISTDIR"]), noiselevel=-1)
+
+ if has_space_superuser:
+ writemsg(_("!!! Insufficient privileges to use "
+ "remaining space.\n"), noiselevel=-1)
+ if userfetch:
+ writemsg(_("!!! You may set FEATURES=\"-userfetch\""
+ " in /etc/portage/make.conf in order to fetch with\n"
+ "!!! superuser privileges.\n"), noiselevel=-1)
+
+ if fsmirrors and not os.path.exists(myfile_path) and has_space:
+ for mydir in fsmirrors:
+ mirror_file = os.path.join(mydir, myfile)
+ try:
+ shutil.copyfile(mirror_file, myfile_path)
+ writemsg(_("Local mirror has file: %s\n") % myfile)
+ break
+ except (IOError, OSError) as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+
+ try:
+ mystat = os.stat(myfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ else:
+ # Skip permission adjustment for symlinks, since we don't
+ # want to modify anything outside of the primary DISTDIR,
+ # and symlinks typically point to PORTAGE_RO_DISTDIRS.
+ if not os.path.islink(myfile_path):
+ try:
+ apply_secpass_permissions(myfile_path,
+ gid=portage_gid, mode=0o664, mask=0o2,
+ stat_cached=mystat)
+ except PortageException as e:
+ if not os.access(myfile_path, os.R_OK):
+ writemsg(_("!!! Failed to adjust permissions:"
+ " %s\n") % (e,), noiselevel=-1)
+
+ # If the file is empty then it's obviously invalid. Remove
+ # the empty file and try to download if possible.
+ if mystat.st_size == 0:
+ if distdir_writable:
+ try:
+ os.unlink(myfile_path)
+ except EnvironmentError:
+ pass
+ elif myfile not in mydigests:
+ # We don't have a digest, but the file exists. We must
+ # assume that it is fully downloaded.
+ continue
+ else:
+ if mystat.st_size < mydigests[myfile]["size"] and \
+ not restrict_fetch:
+ fetched = 1 # Try to resume this download.
+ elif parallel_fetchonly and \
+ mystat.st_size == mydigests[myfile]["size"]:
+ eout = EOutput()
+ eout.quiet = \
+ mysettings.get("PORTAGE_QUIET") == "1"
+ eout.ebegin(
+ "%s size ;-)" % (myfile, ))
+ eout.eend(0)
+ continue
+ else:
+ digests = _filter_unaccelarated_hashes(mydigests[myfile])
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
+ verified_ok, reason = verify_all(myfile_path, digests)
+ if not verified_ok:
+ writemsg(_("!!! Previously fetched"
+ " file: '%s'\n") % myfile, noiselevel=-1)
+ writemsg(_("!!! Reason: %s\n") % reason[0],
+ noiselevel=-1)
+ writemsg(_("!!! Got: %s\n"
+ "!!! Expected: %s\n") % \
+ (reason[1], reason[2]), noiselevel=-1)
+ if reason[0] == _("Insufficient data for checksum verification"):
+ return 0
+ if distdir_writable:
+ temp_filename = \
+ _checksum_failure_temp_file(
+ mysettings["DISTDIR"], myfile)
+ writemsg_stdout(_("Refetching... "
+ "File renamed to '%s'\n\n") % \
+ temp_filename, noiselevel=-1)
+ else:
+ eout = EOutput()
+ eout.quiet = \
+ mysettings.get("PORTAGE_QUIET", None) == "1"
+ if digests:
+ digests = list(digests)
+ digests.sort()
+ eout.ebegin(
+ "%s %s ;-)" % (myfile, " ".join(digests)))
+ eout.eend(0)
+ continue # fetch any remaining files
+
+ # Create a reversed list since that is optimal for list.pop().
+ uri_list = filedict[myfile][:]
+ uri_list.reverse()
+ checksum_failure_count = 0
+ tried_locations = set()
+ while uri_list:
+ loc = uri_list.pop()
+ # Eliminate duplicates here in case we've switched to
+ # "primaryuri" mode on the fly due to a checksum failure.
+ if loc in tried_locations:
+ continue
+ tried_locations.add(loc)
+ if listonly:
+ writemsg_stdout(loc+" ", noiselevel=-1)
+ continue
+ # allow different fetchcommands per protocol
+ protocol = loc[0:loc.find("://")]
+
+ global_config_path = GLOBAL_CONFIG_PATH
+ if portage.const.EPREFIX:
+ global_config_path = os.path.join(portage.const.EPREFIX,
+ GLOBAL_CONFIG_PATH.lstrip(os.sep))
+
+ missing_file_param = False
+ fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
+ fetchcommand = mysettings.get(fetchcommand_var)
+ if fetchcommand is None:
+ fetchcommand_var = "FETCHCOMMAND"
+ fetchcommand = mysettings.get(fetchcommand_var)
+ if fetchcommand is None:
+ writemsg_level(
+ _("!!! %s is unset. It should "
+ "have been defined in\n!!! %s/make.globals.\n") \
+ % (fetchcommand_var, global_config_path),
+ level=logging.ERROR, noiselevel=-1)
+ return 0
+ if "${FILE}" not in fetchcommand:
+ writemsg_level(
+ _("!!! %s does not contain the required ${FILE}"
+ " parameter.\n") % fetchcommand_var,
+ level=logging.ERROR, noiselevel=-1)
+ missing_file_param = True
+
+ resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
+ resumecommand = mysettings.get(resumecommand_var)
+ if resumecommand is None:
+ resumecommand_var = "RESUMECOMMAND"
+ resumecommand = mysettings.get(resumecommand_var)
+ if resumecommand is None:
+ writemsg_level(
+ _("!!! %s is unset. It should "
+ "have been defined in\n!!! %s/make.globals.\n") \
+ % (resumecommand_var, global_config_path),
+ level=logging.ERROR, noiselevel=-1)
+ return 0
+ if "${FILE}" not in resumecommand:
+ writemsg_level(
+ _("!!! %s does not contain the required ${FILE}"
+ " parameter.\n") % resumecommand_var,
+ level=logging.ERROR, noiselevel=-1)
+ missing_file_param = True
+
+ if missing_file_param:
+ writemsg_level(
+ _("!!! Refer to the make.conf(5) man page for "
+ "information about how to\n!!! correctly specify "
+ "FETCHCOMMAND and RESUMECOMMAND.\n"),
+ level=logging.ERROR, noiselevel=-1)
+ if myfile != os.path.basename(loc):
+ return 0
+
+ if not can_fetch:
+ if fetched != 2:
+ try:
+ mysize = os.stat(myfile_path).st_size
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ mysize = 0
+
+ if mysize == 0:
+ writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
+ noiselevel=-1)
+ elif size is None or size > mysize:
+ writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
+ noiselevel=-1)
+ else:
+ writemsg(_("!!! File %s is incorrect size, "
+ "but unable to retry.\n") % myfile, noiselevel=-1)
+ return 0
+ else:
+ continue
+
+ if fetched != 2 and has_space:
+ #we either need to resume or start the download
+ if fetched == 1:
+ try:
+ mystat = os.stat(myfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ fetched = 0
+ else:
+ if mystat.st_size < fetch_resume_size:
+ writemsg(_(">>> Deleting distfile with size "
+ "%d (smaller than " "PORTAGE_FETCH_RESU"
+ "ME_MIN_SIZE)\n") % mystat.st_size)
+ try:
+ os.unlink(myfile_path)
+ except OSError as e:
+ if e.errno not in \
+ (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ fetched = 0
+ if fetched == 1:
+ #resume mode:
+ writemsg(_(">>> Resuming download...\n"))
+ locfetch=resumecommand
+ command_var = resumecommand_var
+ else:
+ #normal mode:
+ locfetch=fetchcommand
+ command_var = fetchcommand_var
+ writemsg_stdout(_(">>> Downloading '%s'\n") % \
+ _hide_url_passwd(loc))
+ variables = {
+ "URI": loc,
+ "FILE": myfile
+ }
+
+ for k in ("DISTDIR", "PORTAGE_SSH_OPTS"):
+ try:
+ variables[k] = mysettings[k]
+ except KeyError:
+ pass
+
+ myfetch = shlex_split(locfetch)
+ myfetch = [varexpand(x, mydict=variables) for x in myfetch]
+ myret = -1
+ try:
+
+ myret = _spawn_fetch(mysettings, myfetch)
+
+ finally:
+ try:
+ apply_secpass_permissions(myfile_path,
+ gid=portage_gid, mode=0o664, mask=0o2)
+ except FileNotFound:
+ pass
+ except PortageException as e:
+ if not os.access(myfile_path, os.R_OK):
+ writemsg(_("!!! Failed to adjust permissions:"
+ " %s\n") % str(e), noiselevel=-1)
+ del e
+
+ # If the file is empty then it's obviously invalid. Don't
+ # trust the return value from the fetcher. Remove the
+ # empty file and try to download again.
+ try:
+ if os.stat(myfile_path).st_size == 0:
+ os.unlink(myfile_path)
+ fetched = 0
+ continue
+ except EnvironmentError:
+ pass
+
+ if mydigests is not None and myfile in mydigests:
+ try:
+ mystat = os.stat(myfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ fetched = 0
+ else:
+
+ if stat.S_ISDIR(mystat.st_mode):
+ # This can happen if FETCHCOMMAND erroneously
+ # contains wget's -P option where it should
+ # instead have -O.
+ writemsg_level(
+ _("!!! The command specified in the "
+ "%s variable appears to have\n!!! "
+ "created a directory instead of a "
+ "normal file.\n") % command_var,
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level(
+ _("!!! Refer to the make.conf(5) "
+ "man page for information about how "
+ "to\n!!! correctly specify "
+ "FETCHCOMMAND and RESUMECOMMAND.\n"),
+ level=logging.ERROR, noiselevel=-1)
+ return 0
+
+ # no exception? file exists. let digestcheck() report
+ # an appropriately for size or checksum errors
+
+ # If the fetcher reported success and the file is
+ # too small, it's probably because the digest is
+ # bad (upstream changed the distfile). In this
+ # case we don't want to attempt to resume. Show a
+ # digest verification failure to that the user gets
+ # a clue about what just happened.
+ if myret != os.EX_OK and \
+ mystat.st_size < mydigests[myfile]["size"]:
+ # Fetch failed... Try the next one... Kill 404 files though.
+ if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
+ html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
+ with io.open(
+ _unicode_encode(myfile_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace'
+ ) as f:
+ if html404.search(f.read()):
+ try:
+ os.unlink(mysettings["DISTDIR"]+"/"+myfile)
+ writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
+ fetched = 0
+ continue
+ except (IOError, OSError):
+ pass
+ fetched = 1
+ continue
+ if True:
+ # File is the correct size--check the checksums for the fetched
+ # file NOW, for those users who don't have a stable/continuous
+ # net connection. This way we have a chance to try to download
+ # from another mirror...
+ digests = _filter_unaccelarated_hashes(mydigests[myfile])
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
+ verified_ok, reason = verify_all(myfile_path, digests)
+ if not verified_ok:
+ writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
+ noiselevel=-1)
+ writemsg(_("!!! Reason: %s\n") % reason[0],
+ noiselevel=-1)
+ writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
+ (reason[1], reason[2]), noiselevel=-1)
+ if reason[0] == _("Insufficient data for checksum verification"):
+ return 0
+ temp_filename = \
+ _checksum_failure_temp_file(
+ mysettings["DISTDIR"], myfile)
+ writemsg_stdout(_("Refetching... "
+ "File renamed to '%s'\n\n") % \
+ temp_filename, noiselevel=-1)
+ fetched=0
+ checksum_failure_count += 1
+ if checksum_failure_count == \
+ checksum_failure_primaryuri:
+ # Switch to "primaryuri" mode in order
+ # to increase the probablility of
+ # of success.
+ primaryuris = \
+ primaryuri_dict.get(myfile)
+ if primaryuris:
+ uri_list.extend(
+ reversed(primaryuris))
+ if checksum_failure_count >= \
+ checksum_failure_max_tries:
+ break
+ else:
+ eout = EOutput()
+ eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
+ if digests:
+ eout.ebegin("%s %s ;-)" % \
+ (myfile, " ".join(sorted(digests))))
+ eout.eend(0)
+ fetched=2
+ break
+ else:
+ if not myret:
+ fetched=2
+ break
+ elif mydigests!=None:
+ writemsg(_("No digest file available and download failed.\n\n"),
+ noiselevel=-1)
+ finally:
+ if use_locks and file_lock:
+ unlockfile(file_lock)
+ file_lock = None
+
+ if listonly:
+ writemsg_stdout("\n", noiselevel=-1)
+ if fetched != 2:
+ if restrict_fetch and not restrict_fetch_msg:
+ restrict_fetch_msg = True
+ msg = _("\n!!! %s/%s"
+ " has fetch restriction turned on.\n"
+ "!!! This probably means that this "
+ "ebuild's files must be downloaded\n"
+ "!!! manually. See the comments in"
+ " the ebuild for more information.\n\n") % \
+ (mysettings["CATEGORY"], mysettings["PF"])
+ writemsg_level(msg,
+ level=logging.ERROR, noiselevel=-1)
+ elif restrict_fetch:
+ pass
+ elif listonly:
+ pass
+ elif not filedict[myfile]:
+ writemsg(_("Warning: No mirrors available for file"
+ " '%s'\n") % (myfile), noiselevel=-1)
+ else:
+ writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
+ noiselevel=-1)
+
+ if listonly:
+ failed_files.add(myfile)
+ continue
+ elif fetchonly:
+ failed_files.add(myfile)
+ continue
+ return 0
+ if failed_files:
+ return 0
+ return 1
diff --git a/usr/lib/portage/pym/portage/package/ebuild/getmaskingreason.py b/usr/lib/portage/pym/portage/package/ebuild/getmaskingreason.py
new file mode 100644
index 0000000..1e4ed21
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/getmaskingreason.py
@@ -0,0 +1,126 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['getmaskingreason']
+
+import portage
+from portage import os
+from portage.const import USER_CONFIG_PATH
+from portage.dep import Atom, match_from_list
+from portage.exception import InvalidAtom
+from portage.localization import _
+from portage.repository.config import _gen_valid_repo
+from portage.util import grablines, normalize_path
+from portage.versions import catpkgsplit, _pkg_str
+
+def getmaskingreason(mycpv, metadata=None, settings=None,
+ portdb=None, return_location=False, myrepo=None):
+ """
+ If specified, the myrepo argument is assumed to be valid. This
+ should be a safe assumption since portdbapi methods always
+ return valid repo names and valid "repository" metadata from
+ aux_get.
+ """
+ if settings is None:
+ settings = portage.settings
+ if portdb is None:
+ portdb = portage.portdb
+ mysplit = catpkgsplit(mycpv)
+ if not mysplit:
+ raise ValueError(_("invalid CPV: %s") % mycpv)
+
+ if metadata is None:
+ db_keys = list(portdb._aux_cache_keys)
+ try:
+ metadata = dict(zip(db_keys,
+ portdb.aux_get(mycpv, db_keys, myrepo=myrepo)))
+ except KeyError:
+ if not portdb.cpv_exists(mycpv):
+ raise
+ else:
+ if myrepo is None:
+ myrepo = _gen_valid_repo(metadata["repository"])
+
+ elif myrepo is None:
+ myrepo = metadata.get("repository")
+ if myrepo is not None:
+ myrepo = _gen_valid_repo(metadata["repository"])
+
+ if metadata is not None and \
+ not portage.eapi_is_supported(metadata["EAPI"]):
+ # Return early since otherwise we might produce invalid
+ # results given that the EAPI is not supported. Also,
+ # metadata is mostly useless in this case since it doesn't
+ # contain essential things like SLOT.
+ if return_location:
+ return (None, None)
+ else:
+ return None
+
+ # Sometimes we can't access SLOT or repository due to corruption.
+ pkg = mycpv
+ try:
+ pkg.slot
+ except AttributeError:
+ pkg = _pkg_str(mycpv, metadata=metadata, repo=myrepo)
+
+ cpv_slot_list = [pkg]
+
+ mycp = pkg.cp
+
+ locations = []
+ if pkg.repo in settings.repositories:
+ for repo in settings.repositories[pkg.repo].masters + (settings.repositories[pkg.repo],):
+ locations.append(os.path.join(repo.location, "profiles"))
+ locations.extend(settings.profiles)
+ locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
+ USER_CONFIG_PATH))
+ locations.reverse()
+ pmasklists = []
+ for profile in locations:
+ pmask_filename = os.path.join(profile, "package.mask")
+ node = None
+ for l, recursive_filename in grablines(pmask_filename,
+ recursive=1, remember_source_file=True):
+ if node is None or node[0] != recursive_filename:
+ node = (recursive_filename, [])
+ pmasklists.append(node)
+ node[1].append(l)
+
+ pmaskdict = settings._mask_manager._pmaskdict
+ if mycp in pmaskdict:
+ for x in pmaskdict[mycp]:
+ if match_from_list(x, cpv_slot_list):
+ x = x.without_repo
+ for pmask in pmasklists:
+ comment = ""
+ comment_valid = -1
+ pmask_filename = pmask[0]
+ for i in range(len(pmask[1])):
+ l = pmask[1][i].strip()
+ try:
+ l_atom = Atom(l, allow_repo=True,
+ allow_wildcard=True).without_repo
+ except InvalidAtom:
+ l_atom = None
+ if l == "":
+ comment = ""
+ comment_valid = -1
+ elif l[0] == "#":
+ comment += (l+"\n")
+ comment_valid = i + 1
+ elif l_atom == x:
+ if comment_valid != i:
+ comment = ""
+ if return_location:
+ return (comment, pmask_filename)
+ else:
+ return comment
+ elif comment_valid != -1:
+ # Apparently this comment applies to multiple masks, so
+ # it remains valid until a blank line is encountered.
+ comment_valid += 1
+ if return_location:
+ return (None, None)
+ else:
+ return None
diff --git a/usr/lib/portage/pym/portage/package/ebuild/getmaskingstatus.py b/usr/lib/portage/pym/portage/package/ebuild/getmaskingstatus.py
new file mode 100644
index 0000000..4b9e588
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/getmaskingstatus.py
@@ -0,0 +1,192 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ['getmaskingstatus']
+
+import sys
+
+import portage
+from portage import eapi_is_supported, _eapi_is_deprecated
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.package.ebuild.config import config
+from portage.versions import catpkgsplit, _pkg_str
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+class _UnmaskHint(object):
+
+ __slots__ = ('key', 'value')
+
+ def __init__(self, key, value):
+ self.key = key
+ self.value = value
+
+class _MaskReason(object):
+
+ __slots__ = ('category', 'message', 'unmask_hint')
+
+ def __init__(self, category, message, unmask_hint=None):
+ self.category = category
+ self.message = message
+ self.unmask_hint = unmask_hint
+
+def getmaskingstatus(mycpv, settings=None, portdb=None, myrepo=None):
+ if settings is None:
+ settings = config(clone=portage.settings)
+ if portdb is None:
+ portdb = portage.portdb
+
+ return [mreason.message for \
+ mreason in _getmaskingstatus(mycpv, settings, portdb,myrepo)]
+
+def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
+
+ metadata = None
+ installed = False
+ if not isinstance(mycpv, basestring):
+ # emerge passed in a Package instance
+ pkg = mycpv
+ mycpv = pkg.cpv
+ metadata = pkg._metadata
+ installed = pkg.installed
+
+ if metadata is None:
+ db_keys = list(portdb._aux_cache_keys)
+ try:
+ metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo)))
+ except KeyError:
+ if not portdb.cpv_exists(mycpv):
+ raise
+ return [_MaskReason("corruption", "corruption")]
+ if "?" in metadata["LICENSE"]:
+ settings.setcpv(mycpv, mydb=metadata)
+ metadata["USE"] = settings["PORTAGE_USE"]
+ else:
+ metadata["USE"] = ""
+
+ try:
+ mycpv.slot
+ except AttributeError:
+ try:
+ mycpv = _pkg_str(mycpv, metadata=metadata, settings=settings)
+ except portage.exception.InvalidData:
+ raise ValueError(_("invalid CPV: %s") % mycpv)
+
+ rValue = []
+
+ # package.mask checking
+ if settings._getMaskAtom(mycpv, metadata):
+ rValue.append(_MaskReason("package.mask", "package.mask", _UnmaskHint("p_mask", None)))
+
+ # keywords checking
+ eapi = metadata["EAPI"]
+ mygroups = settings._getKeywords(mycpv, metadata)
+ licenses = metadata["LICENSE"]
+ properties = metadata["PROPERTIES"]
+ restrict = metadata["RESTRICT"]
+ if not eapi_is_supported(eapi):
+ return [_MaskReason("EAPI", "EAPI %s" % eapi)]
+ elif _eapi_is_deprecated(eapi) and not installed:
+ return [_MaskReason("EAPI", "EAPI %s" % eapi)]
+ egroups = settings.configdict["backupenv"].get(
+ "ACCEPT_KEYWORDS", "").split()
+ global_accept_keywords = settings.get("ACCEPT_KEYWORDS", "")
+ pgroups = global_accept_keywords.split()
+ myarch = settings["ARCH"]
+ if pgroups and myarch not in pgroups:
+ """For operating systems other than Linux, ARCH is not necessarily a
+ valid keyword."""
+ myarch = pgroups[0].lstrip("~")
+
+ # NOTE: This logic is copied from KeywordsManager.getMissingKeywords().
+ unmaskgroups = settings._keywords_manager.getPKeywords(mycpv,
+ metadata["SLOT"], metadata["repository"], global_accept_keywords)
+ pgroups.extend(unmaskgroups)
+ if unmaskgroups or egroups:
+ pgroups = settings._keywords_manager._getEgroups(egroups, pgroups)
+ else:
+ pgroups = set(pgroups)
+
+ kmask = "missing"
+ kmask_hint = None
+
+ if '**' in pgroups:
+ kmask = None
+ else:
+ for keyword in pgroups:
+ if keyword in mygroups:
+ kmask = None
+ break
+
+ if kmask:
+ for gp in mygroups:
+ if gp=="*":
+ kmask=None
+ break
+ elif gp == "~*":
+ for x in pgroups:
+ if x[:1] == "~":
+ kmask = None
+ break
+ if kmask is None:
+ break
+ elif gp=="-"+myarch and myarch in pgroups:
+ kmask="-"+myarch
+ break
+ elif gp=="~"+myarch and myarch in pgroups:
+ kmask="~"+myarch
+ kmask_hint = _UnmaskHint("unstable keyword", kmask)
+ break
+
+ if kmask == "missing":
+ kmask_hint = _UnmaskHint("unstable keyword", "**")
+
+ try:
+ missing_licenses = settings._getMissingLicenses(mycpv, metadata)
+ if missing_licenses:
+ allowed_tokens = set(["||", "(", ")"])
+ allowed_tokens.update(missing_licenses)
+ license_split = licenses.split()
+ license_split = [x for x in license_split \
+ if x in allowed_tokens]
+ msg = license_split[:]
+ msg.append("license(s)")
+ rValue.append(_MaskReason("LICENSE", " ".join(msg), _UnmaskHint("license", set(missing_licenses))))
+ except portage.exception.InvalidDependString as e:
+ rValue.append(_MaskReason("invalid", "LICENSE: "+str(e)))
+
+ try:
+ missing_properties = settings._getMissingProperties(mycpv, metadata)
+ if missing_properties:
+ allowed_tokens = set(["||", "(", ")"])
+ allowed_tokens.update(missing_properties)
+ properties_split = properties.split()
+ properties_split = [x for x in properties_split \
+ if x in allowed_tokens]
+ msg = properties_split[:]
+ msg.append("properties")
+ rValue.append(_MaskReason("PROPERTIES", " ".join(msg)))
+ except portage.exception.InvalidDependString as e:
+ rValue.append(_MaskReason("invalid", "PROPERTIES: "+str(e)))
+
+ try:
+ missing_restricts = settings._getMissingRestrict(mycpv, metadata)
+ if missing_restricts:
+ msg = list(missing_restricts)
+ msg.append("in RESTRICT")
+ rValue.append(_MaskReason("RESTRICT", " ".join(msg)))
+ except InvalidDependString as e:
+ rValue.append(_MaskReason("invalid", "RESTRICT: %s" % (e,)))
+
+ # Only show KEYWORDS masks for installed packages
+ # if they're not masked for any other reason.
+ if kmask and (not installed or not rValue):
+ rValue.append(_MaskReason("KEYWORDS",
+ kmask + " keyword", unmask_hint=kmask_hint))
+
+ return rValue
diff --git a/usr/lib/portage/pym/portage/package/ebuild/prepare_build_dirs.py b/usr/lib/portage/pym/portage/package/ebuild/prepare_build_dirs.py
new file mode 100644
index 0000000..89d4166
--- /dev/null
+++ b/usr/lib/portage/pym/portage/package/ebuild/prepare_build_dirs.py
@@ -0,0 +1,409 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ['prepare_build_dirs']
+
+import errno
+import gzip
+import stat
+import time
+
+import portage
+from portage import os, shutil, _encodings, _unicode_encode, _unicode_decode
+from portage.data import portage_gid, portage_uid, secpass
+from portage.exception import DirectoryNotFound, FileNotFound, \
+ OperationNotPermitted, PermissionDenied, PortageException
+from portage.localization import _
+from portage.output import colorize
+from portage.util import apply_recursive_permissions, \
+ apply_secpass_permissions, ensure_dirs, normalize_path, writemsg
+from portage.const import EPREFIX
+
+def prepare_build_dirs(myroot=None, settings=None, cleanup=False):
+ """
+ The myroot parameter is ignored.
+ """
+ myroot = None
+
+ if settings is None:
+ raise TypeError("settings argument is required")
+
+ mysettings = settings
+ clean_dirs = [mysettings["HOME"]]
+
+ # We enable cleanup when we want to make sure old cruft (such as the old
+ # environment) doesn't interfere with the current phase.
+ if cleanup and 'keeptemp' not in mysettings.features:
+ clean_dirs.append(mysettings["T"])
+
+ for clean_dir in clean_dirs:
+ try:
+ shutil.rmtree(clean_dir)
+ except OSError as oe:
+ if errno.ENOENT == oe.errno:
+ pass
+ elif errno.EPERM == oe.errno:
+ writemsg("%s\n" % oe, noiselevel=-1)
+ writemsg(_("Operation Not Permitted: rmtree('%s')\n") % \
+ clean_dir, noiselevel=-1)
+ return 1
+ else:
+ raise
+
+ def makedirs(dir_path):
+ try:
+ os.makedirs(dir_path)
+ except OSError as oe:
+ if errno.EEXIST == oe.errno:
+ pass
+ elif errno.EPERM == oe.errno:
+ writemsg("%s\n" % oe, noiselevel=-1)
+ writemsg(_("Operation Not Permitted: makedirs('%s')\n") % \
+ dir_path, noiselevel=-1)
+ return False
+ else:
+ raise
+ return True
+
+ mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
+
+ mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
+ mydirs.append(os.path.dirname(mydirs[-1]))
+
+ try:
+ for mydir in mydirs:
+ ensure_dirs(mydir)
+ try:
+ apply_secpass_permissions(mydir,
+ gid=portage_gid, uid=portage_uid, mode=0o70, mask=0)
+ except PortageException:
+ if not os.path.isdir(mydir):
+ raise
+ for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
+ """These directories don't necessarily need to be group writable.
+ However, the setup phase is commonly run as a privileged user prior
+ to the other phases being run by an unprivileged user. Currently,
+ we use the portage group to ensure that the unprivleged user still
+ has write access to these directories in any case."""
+ ensure_dirs(mysettings[dir_key], mode=0o775)
+ apply_secpass_permissions(mysettings[dir_key],
+ uid=portage_uid, gid=portage_gid)
+ except PermissionDenied as e:
+ writemsg(_("Permission Denied: %s\n") % str(e), noiselevel=-1)
+ return 1
+ except OperationNotPermitted as e:
+ writemsg(_("Operation Not Permitted: %s\n") % str(e), noiselevel=-1)
+ return 1
+ except FileNotFound as e:
+ writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
+ return 1
+
+ # Reset state for things like noauto and keepwork in FEATURES.
+ for x in ('.die_hooks',):
+ try:
+ os.unlink(os.path.join(mysettings['PORTAGE_BUILDDIR'], x))
+ except OSError:
+ pass
+
+ _prepare_workdir(mysettings)
+ if mysettings.get("EBUILD_PHASE") not in ("info", "fetch", "pretend"):
+ # Avoid spurious permissions adjustments when fetching with
+ # a temporary PORTAGE_TMPDIR setting (for fetchonly).
+ _prepare_features_dirs(mysettings)
+
+def _adjust_perms_msg(settings, msg):
+
+ def write(msg):
+ writemsg(msg, noiselevel=-1)
+
+ background = settings.get("PORTAGE_BACKGROUND") == "1"
+ log_path = settings.get("PORTAGE_LOG_FILE")
+ log_file = None
+ log_file_real = None
+
+ if background and log_path is not None:
+ try:
+ log_file = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'), mode='ab')
+ log_file_real = log_file
+ except IOError:
+ def write(msg):
+ pass
+ else:
+ if log_path.endswith('.gz'):
+ log_file = gzip.GzipFile(filename='',
+ mode='ab', fileobj=log_file)
+ def write(msg):
+ log_file.write(_unicode_encode(msg))
+ log_file.flush()
+
+ try:
+ write(msg)
+ finally:
+ if log_file is not None:
+ log_file.close()
+ if log_file_real is not log_file:
+ log_file_real.close()
+
+def _prepare_features_dirs(mysettings):
+
+ # Use default ABI libdir in accordance with bug #355283.
+ libdir = None
+ default_abi = mysettings.get("DEFAULT_ABI")
+ if default_abi:
+ libdir = mysettings.get("LIBDIR_" + default_abi)
+ if not libdir:
+ libdir = "lib"
+
+ features_dirs = {
+ "ccache":{
+ "path_dir": EPREFIX+"/usr/%s/ccache/bin" % (libdir,),
+ "basedir_var":"CCACHE_DIR",
+ "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
+ "always_recurse":False},
+ "distcc":{
+ "path_dir": EPREFIX+"/usr/%s/distcc/bin" % (libdir,),
+ "basedir_var":"DISTCC_DIR",
+ "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
+ "subdirs":("lock", "state"),
+ "always_recurse":True}
+ }
+ dirmode = 0o2070
+ filemode = 0o60
+ modemask = 0o2
+ restrict = mysettings.get("PORTAGE_RESTRICT","").split()
+ droppriv = secpass >= 2 and \
+ "userpriv" in mysettings.features and \
+ "userpriv" not in restrict
+ for myfeature, kwargs in features_dirs.items():
+ if myfeature in mysettings.features:
+ failure = False
+ basedir = mysettings.get(kwargs["basedir_var"])
+ if basedir is None or not basedir.strip():
+ basedir = kwargs["default_dir"]
+ mysettings[kwargs["basedir_var"]] = basedir
+ try:
+ path_dir = kwargs["path_dir"]
+ if not os.path.isdir(path_dir):
+ raise DirectoryNotFound(path_dir)
+
+ mydirs = [mysettings[kwargs["basedir_var"]]]
+ if "subdirs" in kwargs:
+ for subdir in kwargs["subdirs"]:
+ mydirs.append(os.path.join(basedir, subdir))
+ for mydir in mydirs:
+ modified = ensure_dirs(mydir)
+ # Generally, we only want to apply permissions for
+ # initial creation. Otherwise, we don't know exactly what
+ # permissions the user wants, so should leave them as-is.
+ droppriv_fix = False
+ if droppriv:
+ st = os.stat(mydir)
+ if st.st_gid != portage_gid or \
+ not dirmode == (stat.S_IMODE(st.st_mode) & dirmode):
+ droppriv_fix = True
+ if not droppriv_fix:
+ # Check permissions of files in the directory.
+ for filename in os.listdir(mydir):
+ try:
+ subdir_st = os.lstat(
+ os.path.join(mydir, filename))
+ except OSError:
+ continue
+ if subdir_st.st_gid != portage_gid or \
+ ((stat.S_ISDIR(subdir_st.st_mode) and \
+ not dirmode == (stat.S_IMODE(subdir_st.st_mode) & dirmode))):
+ droppriv_fix = True
+ break
+
+ if droppriv_fix:
+ _adjust_perms_msg(mysettings,
+ colorize("WARN", " * ") + \
+ _("Adjusting permissions "
+ "for FEATURES=userpriv: '%s'\n") % mydir)
+ elif modified:
+ _adjust_perms_msg(mysettings,
+ colorize("WARN", " * ") + \
+ _("Adjusting permissions "
+ "for FEATURES=%s: '%s'\n") % (myfeature, mydir))
+
+ if modified or kwargs["always_recurse"] or droppriv_fix:
+ def onerror(e):
+ raise # The feature is disabled if a single error
+ # occurs during permissions adjustment.
+ if not apply_recursive_permissions(mydir,
+ gid=portage_gid, dirmode=dirmode, dirmask=modemask,
+ filemode=filemode, filemask=modemask, onerror=onerror):
+ raise OperationNotPermitted(
+ _("Failed to apply recursive permissions for the portage group."))
+
+ except DirectoryNotFound as e:
+ failure = True
+ writemsg(_("\n!!! Directory does not exist: '%s'\n") % \
+ (e,), noiselevel=-1)
+ writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
+ noiselevel=-1)
+
+ except PortageException as e:
+ failure = True
+ writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
+ writemsg(_("!!! Failed resetting perms on %s='%s'\n") % \
+ (kwargs["basedir_var"], basedir), noiselevel=-1)
+ writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
+ noiselevel=-1)
+
+ if failure:
+ mysettings.features.remove(myfeature)
+ time.sleep(5)
+
+def _prepare_workdir(mysettings):
+ workdir_mode = 0o700
+ try:
+ mode = mysettings["PORTAGE_WORKDIR_MODE"]
+ if mode.isdigit():
+ parsed_mode = int(mode, 8)
+ elif mode == "":
+ raise KeyError()
+ else:
+ raise ValueError()
+ if parsed_mode & 0o7777 != parsed_mode:
+ raise ValueError("Invalid file mode: %s" % mode)
+ else:
+ workdir_mode = parsed_mode
+ except KeyError as e:
+ writemsg(_("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n") % oct(workdir_mode))
+ except ValueError as e:
+ if len(str(e)) > 0:
+ writemsg("%s\n" % e)
+ writemsg(_("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n") % \
+ (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
+ mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode).replace('o', '')
+ try:
+ apply_secpass_permissions(mysettings["WORKDIR"],
+ uid=portage_uid, gid=portage_gid, mode=workdir_mode)
+ except FileNotFound:
+ pass # ebuild.sh will create it
+
+ if mysettings.get("PORT_LOGDIR", "") == "":
+ while "PORT_LOGDIR" in mysettings:
+ del mysettings["PORT_LOGDIR"]
+ if "PORT_LOGDIR" in mysettings:
+ try:
+ modified = ensure_dirs(mysettings["PORT_LOGDIR"])
+ if modified:
+ # Only initialize group/mode if the directory doesn't
+ # exist, so that we don't override permissions if they
+ # were previously set by the administrator.
+ # NOTE: These permissions should be compatible with our
+ # default logrotate config as discussed in bug 374287.
+ apply_secpass_permissions(mysettings["PORT_LOGDIR"],
+ uid=portage_uid, gid=portage_gid, mode=0o2770)
+ except PortageException as e:
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ writemsg(_("!!! Permission issues with PORT_LOGDIR='%s'\n") % \
+ mysettings["PORT_LOGDIR"], noiselevel=-1)
+ writemsg(_("!!! Disabling logging.\n"), noiselevel=-1)
+ while "PORT_LOGDIR" in mysettings:
+ del mysettings["PORT_LOGDIR"]
+
+ compress_log_ext = ''
+ if 'compress-build-logs' in mysettings.features:
+ compress_log_ext = '.gz'
+
+ logdir_subdir_ok = False
+ if "PORT_LOGDIR" in mysettings and \
+ os.access(mysettings["PORT_LOGDIR"], os.W_OK):
+ logdir = normalize_path(mysettings["PORT_LOGDIR"])
+ logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
+ if not os.path.exists(logid_path):
+ open(_unicode_encode(logid_path), 'w').close()
+ logid_time = _unicode_decode(time.strftime("%Y%m%d-%H%M%S",
+ time.gmtime(os.stat(logid_path).st_mtime)),
+ encoding=_encodings['content'], errors='replace')
+
+ if "split-log" in mysettings.features:
+ log_subdir = os.path.join(logdir, "build", mysettings["CATEGORY"])
+ mysettings["PORTAGE_LOG_FILE"] = os.path.join(
+ log_subdir, "%s:%s.log%s" %
+ (mysettings["PF"], logid_time, compress_log_ext))
+ else:
+ log_subdir = logdir
+ mysettings["PORTAGE_LOG_FILE"] = os.path.join(
+ logdir, "%s:%s:%s.log%s" % \
+ (mysettings["CATEGORY"], mysettings["PF"], logid_time,
+ compress_log_ext))
+
+ if log_subdir is logdir:
+ logdir_subdir_ok = True
+ else:
+ try:
+ _ensure_log_subdirs(logdir, log_subdir)
+ except PortageException as e:
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+
+ if os.access(log_subdir, os.W_OK):
+ logdir_subdir_ok = True
+ else:
+ writemsg("!!! %s: %s\n" %
+ (_("Permission Denied"), log_subdir), noiselevel=-1)
+
+ tmpdir_log_path = os.path.join(
+ mysettings["T"], "build.log%s" % compress_log_ext)
+ if not logdir_subdir_ok:
+ # NOTE: When sesandbox is enabled, the local SELinux security policies
+ # may not allow output to be piped out of the sesandbox domain. The
+ # current policy will allow it to work when a pty is available, but
+ # not through a normal pipe. See bug #162404.
+ mysettings["PORTAGE_LOG_FILE"] = tmpdir_log_path
+ else:
+ # Create a symlink from tmpdir_log_path to PORTAGE_LOG_FILE, as
+ # requested in bug #412865.
+ make_new_symlink = False
+ try:
+ target = os.readlink(tmpdir_log_path)
+ except OSError:
+ make_new_symlink = True
+ else:
+ if target != mysettings["PORTAGE_LOG_FILE"]:
+ make_new_symlink = True
+ if make_new_symlink:
+ try:
+ os.unlink(tmpdir_log_path)
+ except OSError:
+ pass
+ os.symlink(mysettings["PORTAGE_LOG_FILE"], tmpdir_log_path)
+
+def _ensure_log_subdirs(logdir, subdir):
+ """
+ This assumes that logdir exists, and creates subdirectories down
+ to subdir as necessary. The gid of logdir is copied to all
+ subdirectories, along with 0x2070 mode bits if present. Both logdir
+ and subdir are assumed to be normalized absolute paths.
+ """
+ st = os.stat(logdir)
+ uid = -1
+ gid = st.st_gid
+ grp_mode = 0o2070 & st.st_mode
+
+ # If logdir is writable by the portage group but its uid
+ # is not portage_uid, then set the uid to portage_uid if
+ # we have privileges to do so, for compatibility with our
+ # default logrotate config (see bug 378451). With the
+ # "su portage portage" directive and logrotate-3.8.0,
+ # logrotate's chown call during the compression phase will
+ # only succeed if the log file's uid is portage_uid.
+ if grp_mode and gid == portage_gid and \
+ portage.data.secpass >= 2:
+ uid = portage_uid
+ if st.st_uid != portage_uid:
+ ensure_dirs(logdir, uid=uid)
+
+ logdir_split_len = len(logdir.split(os.sep))
+ subdir_split = subdir.split(os.sep)[logdir_split_len:]
+ subdir_split.reverse()
+ current = logdir
+ while subdir_split:
+ current = os.path.join(current, subdir_split.pop())
+ ensure_dirs(current, uid=uid, gid=gid, mode=grp_mode, mask=0)
diff --git a/usr/lib/portage/pym/portage/process.py b/usr/lib/portage/pym/portage/process.py
new file mode 100644
index 0000000..5bcf81b
--- /dev/null
+++ b/usr/lib/portage/pym/portage/process.py
@@ -0,0 +1,665 @@
+# portage.py -- core Portage functionality
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+import atexit
+import errno
+import fcntl
+import platform
+import signal
+import socket
+import struct
+import sys
+import traceback
+import os as _os
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util:dump_traceback,writemsg',
+)
+
+from portage.const import BASH_BINARY, SANDBOX_BINARY, MACOSSANDBOX_BINARY, FAKEROOT_BINARY
+from portage.exception import CommandNotFound
+from portage.util._ctypes import find_library, LoadLibrary, ctypes
+
+try:
+ import resource
+ max_fd_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
+except ImportError:
+ max_fd_limit = 256
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+# Support PEP 446 for Python >=3.4
+try:
+ _set_inheritable = _os.set_inheritable
+except AttributeError:
+ _set_inheritable = None
+
+try:
+ _FD_CLOEXEC = fcntl.FD_CLOEXEC
+except AttributeError:
+ _FD_CLOEXEC = None
+
+# Prefer /proc/self/fd if available (/dev/fd
+# doesn't work on solaris, see bug #474536).
+for _fd_dir in ("/proc/self/fd", "/dev/fd"):
+ if os.path.isdir(_fd_dir):
+ break
+ else:
+ _fd_dir = None
+
+# /dev/fd does not work on FreeBSD, see bug #478446
+if platform.system() in ('FreeBSD',) and _fd_dir == '/dev/fd':
+ _fd_dir = None
+
+if _fd_dir is not None:
+ def get_open_fds():
+ return (int(fd) for fd in os.listdir(_fd_dir) if fd.isdigit())
+
+ if platform.python_implementation() == 'PyPy':
+ # EAGAIN observed with PyPy 1.8.
+ _get_open_fds = get_open_fds
+ def get_open_fds():
+ try:
+ return _get_open_fds()
+ except OSError as e:
+ if e.errno != errno.EAGAIN:
+ raise
+ return range(max_fd_limit)
+
+elif os.path.isdir("/proc/%s/fd" % os.getpid()):
+ # In order for this function to work in forked subprocesses,
+ # os.getpid() must be called from inside the function.
+ def get_open_fds():
+ return (int(fd) for fd in os.listdir("/proc/%s/fd" % os.getpid())
+ if fd.isdigit())
+
+else:
+ def get_open_fds():
+ return range(max_fd_limit)
+
+sandbox_capable = (os.path.isfile(SANDBOX_BINARY) and
+ os.access(SANDBOX_BINARY, os.X_OK))
+
+fakeroot_capable = (os.path.isfile(FAKEROOT_BINARY) and
+ os.access(FAKEROOT_BINARY, os.X_OK))
+
+macossandbox_capable = (os.path.isfile(MACOSSANDBOX_BINARY) and
+ os.access(MACOSSANDBOX_BINARY, os.X_OK))
+
+def spawn_bash(mycommand, debug=False, opt_name=None, **keywords):
+ """
+ Spawns a bash shell running a specific commands
+
+ @param mycommand: The command for bash to run
+ @type mycommand: String
+ @param debug: Turn bash debugging on (set -x)
+ @type debug: Boolean
+ @param opt_name: Name of the spawned process (detaults to binary name)
+ @type opt_name: String
+ @param keywords: Extra Dictionary arguments to pass to spawn
+ @type keywords: Dictionary
+ """
+
+ args = [BASH_BINARY]
+ if not opt_name:
+ opt_name = os.path.basename(mycommand.split()[0])
+ if debug:
+ # Print commands and their arguments as they are executed.
+ args.append("-x")
+ args.append("-c")
+ args.append(mycommand)
+ return spawn(args, opt_name=opt_name, **keywords)
+
+def spawn_sandbox(mycommand, opt_name=None, **keywords):
+ if not sandbox_capable:
+ return spawn_bash(mycommand, opt_name=opt_name, **keywords)
+ args = [SANDBOX_BINARY]
+ if not opt_name:
+ opt_name = os.path.basename(mycommand.split()[0])
+ args.append(mycommand)
+ return spawn(args, opt_name=opt_name, **keywords)
+
+def spawn_fakeroot(mycommand, fakeroot_state=None, opt_name=None, **keywords):
+ args = [FAKEROOT_BINARY]
+ if not opt_name:
+ opt_name = os.path.basename(mycommand.split()[0])
+ if fakeroot_state:
+ open(fakeroot_state, "a").close()
+ args.append("-s")
+ args.append(fakeroot_state)
+ args.append("-i")
+ args.append(fakeroot_state)
+ args.append("--")
+ args.append(BASH_BINARY)
+ args.append("-c")
+ args.append(mycommand)
+ return spawn(args, opt_name=opt_name, **keywords)
+
+def spawn_macossandbox(mycommand, profile=None, opt_name=None, **keywords):
+ if not macossandbox_capable:
+ return spawn_bash(mycommand, opt_name=opt_name, **keywords)
+ args=[MACOSSANDBOX_BINARY]
+ if not opt_name:
+ opt_name = os.path.basename(mycommand.split()[0])
+ args.append("-p")
+ args.append(profile)
+ args.append(BASH_BINARY)
+ args.append("-c")
+ args.append(mycommand)
+ return spawn(args, opt_name=opt_name, **keywords)
+
+_exithandlers = []
+def atexit_register(func, *args, **kargs):
+ """Wrapper around atexit.register that is needed in order to track
+ what is registered. For example, when portage restarts itself via
+ os.execv, the atexit module does not work so we have to do it
+ manually by calling the run_exitfuncs() function in this module."""
+ _exithandlers.append((func, args, kargs))
+
+def run_exitfuncs():
+ """This should behave identically to the routine performed by
+ the atexit module at exit time. It's only necessary to call this
+ function when atexit will not work (because of os.execv, for
+ example)."""
+
+ # This function is a copy of the private atexit._run_exitfuncs()
+ # from the python 2.4.2 sources. The only difference from the
+ # original function is in the output to stderr.
+ exc_info = None
+ while _exithandlers:
+ func, targs, kargs = _exithandlers.pop()
+ try:
+ func(*targs, **kargs)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ except: # No idea what they called, so we need this broad except here.
+ dump_traceback("Error in portage.process.run_exitfuncs", noiselevel=0)
+ exc_info = sys.exc_info()
+
+ if exc_info is not None:
+ if sys.hexversion >= 0x3000000:
+ raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
+ else:
+ exec("raise exc_info[0], exc_info[1], exc_info[2]")
+
+atexit.register(run_exitfuncs)
+
+# It used to be necessary for API consumers to remove pids from spawned_pids,
+# since otherwise it would accumulate a pids endlessly. Now, spawned_pids is
+# just an empty dummy list, so for backward compatibility, ignore ValueError
+# for removal on non-existent items.
+class _dummy_list(list):
+ def remove(self, item):
+ # TODO: Trigger a DeprecationWarning here, after stable portage
+ # has dummy spawned_pids.
+ try:
+ list.remove(self, item)
+ except ValueError:
+ pass
+
+spawned_pids = _dummy_list()
+
+def cleanup():
+ pass
+
+def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
+ uid=None, gid=None, groups=None, umask=None, logfile=None,
+ path_lookup=True, pre_exec=None, close_fds=True, unshare_net=False,
+ unshare_ipc=False, cgroup=None):
+ """
+ Spawns a given command.
+
+ @param mycommand: the command to execute
+ @type mycommand: String or List (Popen style list)
+ @param env: A dict of Key=Value pairs for env variables
+ @type env: Dictionary
+ @param opt_name: an optional name for the spawn'd process (defaults to the binary name)
+ @type opt_name: String
+ @param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout } for example
+ (default is {0:stdin, 1:stdout, 2:stderr})
+ @type fd_pipes: Dictionary
+ @param returnpid: Return the Process IDs for a successful spawn.
+ NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them.
+ @type returnpid: Boolean
+ @param uid: User ID to spawn as; useful for dropping privilages
+ @type uid: Integer
+ @param gid: Group ID to spawn as; useful for dropping privilages
+ @type gid: Integer
+ @param groups: Group ID's to spawn in: useful for having the process run in multiple group contexts.
+ @type groups: List
+ @param umask: An integer representing the umask for the process (see man chmod for umask details)
+ @type umask: Integer
+ @param logfile: name of a file to use for logging purposes
+ @type logfile: String
+ @param path_lookup: If the binary is not fully specified then look for it in PATH
+ @type path_lookup: Boolean
+ @param pre_exec: A function to be called with no arguments just prior to the exec call.
+ @type pre_exec: callable
+ @param close_fds: If True, then close all file descriptors except those
+ referenced by fd_pipes (default is True).
+ @type close_fds: Boolean
+ @param unshare_net: If True, networking will be unshared from the spawned process
+ @type unshare_net: Boolean
+ @param unshare_ipc: If True, IPC will be unshared from the spawned process
+ @type unshare_ipc: Boolean
+ @param cgroup: CGroup path to bind the process to
+ @type cgroup: String
+
+ logfile requires stdout and stderr to be assigned to this process (ie not pointed
+ somewhere else.)
+
+ """
+
+ # mycommand is either a str or a list
+ if isinstance(mycommand, basestring):
+ mycommand = mycommand.split()
+
+ if sys.hexversion < 0x3000000:
+ # Avoid a potential UnicodeEncodeError from os.execve().
+ env_bytes = {}
+ for k, v in env.items():
+ env_bytes[_unicode_encode(k, encoding=_encodings['content'])] = \
+ _unicode_encode(v, encoding=_encodings['content'])
+ env = env_bytes
+ del env_bytes
+
+ # If an absolute path to an executable file isn't given
+ # search for it unless we've been told not to.
+ binary = mycommand[0]
+ if binary not in (BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY) and \
+ (not os.path.isabs(binary) or not os.path.isfile(binary)
+ or not os.access(binary, os.X_OK)):
+ binary = path_lookup and find_binary(binary) or None
+ if not binary:
+ raise CommandNotFound(mycommand[0])
+
+ # If we haven't been told what file descriptors to use
+ # default to propagating our stdin, stdout and stderr.
+ if fd_pipes is None:
+ fd_pipes = {
+ 0:portage._get_stdin().fileno(),
+ 1:sys.__stdout__.fileno(),
+ 2:sys.__stderr__.fileno(),
+ }
+
+ # mypids will hold the pids of all processes created.
+ mypids = []
+
+ if logfile:
+ # Using a log file requires that stdout and stderr
+ # are assigned to the process we're running.
+ if 1 not in fd_pipes or 2 not in fd_pipes:
+ raise ValueError(fd_pipes)
+
+ # Create a pipe
+ (pr, pw) = os.pipe()
+
+ # Create a tee process, giving it our stdout and stderr
+ # as well as the read end of the pipe.
+ mypids.extend(spawn(('tee', '-i', '-a', logfile),
+ returnpid=True, fd_pipes={0:pr,
+ 1:fd_pipes[1], 2:fd_pipes[2]}))
+
+ # We don't need the read end of the pipe, so close it.
+ os.close(pr)
+
+ # Assign the write end of the pipe to our stdout and stderr.
+ fd_pipes[1] = pw
+ fd_pipes[2] = pw
+
+ # This caches the libc library lookup in the current
+ # process, so that it's only done once rather than
+ # for each child process.
+ if unshare_net or unshare_ipc:
+ find_library("c")
+
+ parent_pid = os.getpid()
+ pid = None
+ try:
+ pid = os.fork()
+
+ if pid == 0:
+ try:
+ _exec(binary, mycommand, opt_name, fd_pipes,
+ env, gid, groups, uid, umask, pre_exec, close_fds,
+ unshare_net, unshare_ipc, cgroup)
+ except SystemExit:
+ raise
+ except Exception as e:
+ # We need to catch _any_ exception so that it doesn't
+ # propagate out of this function and cause exiting
+ # with anything other than os._exit()
+ writemsg("%s:\n %s\n" % (e, " ".join(mycommand)),
+ noiselevel=-1)
+ traceback.print_exc()
+ sys.stderr.flush()
+
+ finally:
+ if pid == 0 or (pid is None and os.getpid() != parent_pid):
+ # Call os._exit() from a finally block in order
+ # to suppress any finally blocks from earlier
+ # in the call stack (see bug #345289). This
+ # finally block has to be setup before the fork
+ # in order to avoid a race condition.
+ os._exit(1)
+
+ if not isinstance(pid, int):
+ raise AssertionError("fork returned non-integer: %s" % (repr(pid),))
+
+ # Add the pid to our local and the global pid lists.
+ mypids.append(pid)
+
+ # If we started a tee process the write side of the pipe is no
+ # longer needed, so close it.
+ if logfile:
+ os.close(pw)
+
+ # If the caller wants to handle cleaning up the processes, we tell
+ # it about all processes that were created.
+ if returnpid:
+ return mypids
+
+ # Otherwise we clean them up.
+ while mypids:
+
+ # Pull the last reader in the pipe chain. If all processes
+ # in the pipe are well behaved, it will die when the process
+ # it is reading from dies.
+ pid = mypids.pop(0)
+
+ # and wait for it.
+ retval = os.waitpid(pid, 0)[1]
+
+ if retval:
+ # If it failed, kill off anything else that
+ # isn't dead yet.
+ for pid in mypids:
+ # With waitpid and WNOHANG, only check the
+ # first element of the tuple since the second
+ # element may vary (bug #337465).
+ if os.waitpid(pid, os.WNOHANG)[0] == 0:
+ os.kill(pid, signal.SIGTERM)
+ os.waitpid(pid, 0)
+
+ # If it got a signal, return the signal that was sent.
+ if (retval & 0xff):
+ return ((retval & 0xff) << 8)
+
+ # Otherwise, return its exit code.
+ return (retval >> 8)
+
+ # Everything succeeded
+ return 0
+
+def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
+ pre_exec, close_fds, unshare_net, unshare_ipc, cgroup):
+
+ """
+ Execute a given binary with options
+
+ @param binary: Name of program to execute
+ @type binary: String
+ @param mycommand: Options for program
+ @type mycommand: String
+ @param opt_name: Name of process (defaults to binary)
+ @type opt_name: String
+ @param fd_pipes: Mapping pipes to destination; { 0:0, 1:1, 2:2 }
+ @type fd_pipes: Dictionary
+ @param env: Key,Value mapping for Environmental Variables
+ @type env: Dictionary
+ @param gid: Group ID to run the process under
+ @type gid: Integer
+ @param groups: Groups the Process should be in.
+ @type groups: Integer
+ @param uid: User ID to run the process under
+ @type uid: Integer
+ @param umask: an int representing a unix umask (see man chmod for umask details)
+ @type umask: Integer
+ @param pre_exec: A function to be called with no arguments just prior to the exec call.
+ @type pre_exec: callable
+ @param unshare_net: If True, networking will be unshared from the spawned process
+ @type unshare_net: Boolean
+ @param unshare_ipc: If True, IPC will be unshared from the spawned process
+ @type unshare_ipc: Boolean
+ @param cgroup: CGroup path to bind the process to
+ @type cgroup: String
+ @rtype: None
+ @return: Never returns (calls os.execve)
+ """
+
+ # If the process we're creating hasn't been given a name
+ # assign it the name of the executable.
+ if not opt_name:
+ if binary is portage._python_interpreter:
+ # NOTE: PyPy 1.7 will die due to "libary path not found" if argv[0]
+ # does not contain the full path of the binary.
+ opt_name = binary
+ else:
+ opt_name = os.path.basename(binary)
+
+ # Set up the command's argument list.
+ myargs = [opt_name]
+ myargs.extend(mycommand[1:])
+
+ # Avoid a potential UnicodeEncodeError from os.execve().
+ myargs = [_unicode_encode(x, encoding=_encodings['fs'],
+ errors='strict') for x in myargs]
+
+ # Use default signal handlers in order to avoid problems
+ # killing subprocesses as reported in bug #353239.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ # Quiet killing of subprocesses by SIGPIPE (see bug #309001).
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+ # Avoid issues triggered by inheritance of SIGQUIT handler from
+ # the parent process (see bug #289486).
+ signal.signal(signal.SIGQUIT, signal.SIG_DFL)
+
+ _setup_pipes(fd_pipes, close_fds=close_fds, inheritable=True)
+
+ # Add to cgroup
+ # it's better to do it from the child since we can guarantee
+ # it is done before we start forking children
+ if cgroup:
+ with open(os.path.join(cgroup, 'cgroup.procs'), 'a') as f:
+ f.write('%d\n' % os.getpid())
+
+ # Unshare (while still uid==0)
+ if unshare_net or unshare_ipc:
+ filename = find_library("c")
+ if filename is not None:
+ libc = LoadLibrary(filename)
+ if libc is not None:
+ CLONE_NEWIPC = 0x08000000
+ CLONE_NEWNET = 0x40000000
+
+ flags = 0
+ if unshare_net:
+ flags |= CLONE_NEWNET
+ if unshare_ipc:
+ flags |= CLONE_NEWIPC
+
+ try:
+ if libc.unshare(flags) != 0:
+ writemsg("Unable to unshare: %s\n" % (
+ errno.errorcode.get(ctypes.get_errno(), '?')),
+ noiselevel=-1)
+ else:
+ if unshare_net:
+ # 'up' the loopback
+ IFF_UP = 0x1
+ ifreq = struct.pack('16sh', b'lo', IFF_UP)
+ SIOCSIFFLAGS = 0x8914
+
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
+ try:
+ fcntl.ioctl(sock, SIOCSIFFLAGS, ifreq)
+ except IOError as e:
+ writemsg("Unable to enable loopback interface: %s\n" % (
+ errno.errorcode.get(e.errno, '?')),
+ noiselevel=-1)
+ sock.close()
+ except AttributeError:
+ # unshare() not supported by libc
+ pass
+
+ # Set requested process permissions.
+ if gid:
+ # Cast proxies to int, in case it matters.
+ os.setgid(int(gid))
+ if groups:
+ os.setgroups(groups)
+ if uid:
+ # Cast proxies to int, in case it matters.
+ os.setuid(int(uid))
+ if umask:
+ os.umask(umask)
+ if pre_exec:
+ pre_exec()
+
+ # And switch to the new process.
+ os.execve(binary, myargs, env)
+
+def _setup_pipes(fd_pipes, close_fds=True, inheritable=None):
+ """Setup pipes for a forked process.
+
+ Even when close_fds is False, file descriptors referenced as
+ values in fd_pipes are automatically closed if they do not also
+ occur as keys in fd_pipes. It is assumed that the caller will
+ explicitly add them to the fd_pipes keys if they are intended
+ to remain open. This allows for convenient elimination of
+ unnecessary duplicate file descriptors.
+
+ WARNING: When not followed by exec, the close_fds behavior
+ can trigger interference from destructors that close file
+ descriptors. This interference happens when the garbage
+ collector intermittently executes such destructors after their
+ corresponding file descriptors have been re-used, leading
+ to intermittent "[Errno 9] Bad file descriptor" exceptions in
+ forked processes. This problem has been observed with PyPy 1.8,
+ and also with CPython under some circumstances (as triggered
+ by xmpppy in bug #374335). In order to close a safe subset of
+ file descriptors, see portage.locks._close_fds().
+
+ NOTE: When not followed by exec, even when close_fds is False,
+ it's still possible for dup2() calls to cause interference in a
+ way that's similar to the way that close_fds interferes (since
+ dup2() has to close the target fd if it happens to be open).
+ It's possible to avoid such interference by using allocated
+ file descriptors as the keys in fd_pipes. For example:
+
+ pr, pw = os.pipe()
+ fd_pipes[pw] = pw
+
+ By using the allocated pw file descriptor as the key in fd_pipes,
+ it's not necessary for dup2() to close a file descriptor (it
+ actually does nothing in this case), which avoids possible
+ interference.
+ """
+
+ reverse_map = {}
+ # To protect from cases where direct assignment could
+ # clobber needed fds ({1:2, 2:1}) we create a reverse map
+ # in order to know when it's necessary to create temporary
+ # backup copies with os.dup().
+ for newfd, oldfd in fd_pipes.items():
+ newfds = reverse_map.get(oldfd)
+ if newfds is None:
+ newfds = []
+ reverse_map[oldfd] = newfds
+ newfds.append(newfd)
+
+ # Assign newfds via dup2(), making temporary backups when
+ # necessary, and closing oldfd if the caller has not
+ # explicitly requested for it to remain open by adding
+ # it to the keys of fd_pipes.
+ while reverse_map:
+
+ oldfd, newfds = reverse_map.popitem()
+ old_fdflags = None
+
+ for newfd in newfds:
+ if newfd in reverse_map:
+ # Make a temporary backup before re-assignment, assuming
+ # that backup_fd won't collide with a key in reverse_map
+ # (since all of the keys correspond to open file
+ # descriptors, and os.dup() only allocates a previously
+ # unused file discriptors).
+ backup_fd = os.dup(newfd)
+ reverse_map[backup_fd] = reverse_map.pop(newfd)
+
+ if oldfd != newfd:
+ os.dup2(oldfd, newfd)
+ if _set_inheritable is not None:
+ # Don't do this unless _set_inheritable is available,
+ # since it's used below to ensure correct state, and
+ # otherwise /dev/null stdin fails to inherit (at least
+ # with Python versions from 3.1 to 3.3).
+ if old_fdflags is None:
+ old_fdflags = fcntl.fcntl(oldfd, fcntl.F_GETFD)
+ fcntl.fcntl(newfd, fcntl.F_SETFD, old_fdflags)
+
+ if _set_inheritable is not None:
+
+ inheritable_state = None
+ if not (old_fdflags is None or _FD_CLOEXEC is None):
+ inheritable_state = not bool(old_fdflags & _FD_CLOEXEC)
+
+ if inheritable is not None:
+ if inheritable_state is not inheritable:
+ _set_inheritable(newfd, inheritable)
+
+ elif newfd in (0, 1, 2):
+ if inheritable_state is not True:
+ _set_inheritable(newfd, True)
+
+ if oldfd not in fd_pipes:
+ # If oldfd is not a key in fd_pipes, then it's safe
+ # to close now, since we've already made all of the
+ # requested duplicates. This also closes every
+ # backup_fd that may have been created on previous
+ # iterations of this loop.
+ os.close(oldfd)
+
+ if close_fds:
+ # Then close _all_ fds that haven't been explicitly
+ # requested to be kept open.
+ for fd in get_open_fds():
+ if fd not in fd_pipes:
+ try:
+ os.close(fd)
+ except OSError:
+ pass
+
+def find_binary(binary):
+ """
+ Given a binary name, find the binary in PATH
+
+ @param binary: Name of the binary to find
+ @type string
+ @rtype: None or string
+ @return: full path to binary or None if the binary could not be located.
+ """
+ paths = os.environ.get("PATH", "")
+ if sys.hexversion >= 0x3000000 and isinstance(binary, bytes):
+ # return bytes when input is bytes
+ paths = paths.encode(sys.getfilesystemencoding(), 'surrogateescape')
+ paths = paths.split(b':')
+ else:
+ paths = paths.split(':')
+
+ for path in paths:
+ filename = _os.path.join(path, binary)
+ if _os.access(filename, os.X_OK) and _os.path.isfile(filename):
+ return filename
+ return None
diff --git a/usr/lib/portage/pym/portage/proxy/__init__.py b/usr/lib/portage/pym/portage/proxy/__init__.py
new file mode 100644
index 0000000..f98c564
--- /dev/null
+++ b/usr/lib/portage/pym/portage/proxy/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/proxy/lazyimport.py b/usr/lib/portage/pym/portage/proxy/lazyimport.py
new file mode 100644
index 0000000..5aa7e50
--- /dev/null
+++ b/usr/lib/portage/pym/portage/proxy/lazyimport.py
@@ -0,0 +1,213 @@
+# Copyright 2009-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['lazyimport']
+
+import sys
+import types
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+from portage.proxy.objectproxy import ObjectProxy
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+_module_proxies = {}
+_module_proxies_lock = threading.RLock()
+
+def _preload_portage_submodules():
+ """
+ Load lazily referenced portage submodules into memory,
+ so imports won't fail during portage upgrade/downgrade.
+ Note that this recursively loads only the modules that
+ are lazily referenced by currently imported modules,
+ so some portage submodules may still remain unimported
+ after this function is called.
+ """
+ imported = set()
+ while True:
+ remaining = False
+ for name in list(_module_proxies):
+ if name.startswith('portage.') or name.startswith('_emerge.'):
+ if name in imported:
+ continue
+ imported.add(name)
+ remaining = True
+ __import__(name)
+ _unregister_module_proxy(name)
+ if not remaining:
+ break
+
+def _register_module_proxy(name, proxy):
+ _module_proxies_lock.acquire()
+ try:
+ proxy_list = _module_proxies.get(name)
+ if proxy_list is None:
+ proxy_list = []
+ _module_proxies[name] = proxy_list
+ proxy_list.append(proxy)
+ finally:
+ _module_proxies_lock.release()
+
+def _unregister_module_proxy(name):
+ """
+ Destroy all proxies that reference the give module name. Also, check
+ for other proxies referenced by modules that have been imported and
+ destroy those proxies too. This way, destruction of a single proxy
+ can trigger destruction of all the rest. If a target module appears
+ to be partially imported (indicated when an AttributeError is caught),
+ this function will leave in place proxies that reference it.
+ """
+ _module_proxies_lock.acquire()
+ try:
+ if name in _module_proxies:
+ modules = sys.modules
+ for name, proxy_list in list(_module_proxies.items()):
+ if name not in modules:
+ continue
+ # First delete this name from the dict so that
+ # if this same thread reenters below, it won't
+ # enter this path again.
+ del _module_proxies[name]
+ try:
+ while proxy_list:
+ proxy = proxy_list.pop()
+ object.__getattribute__(proxy, '_get_target')()
+ except AttributeError:
+ # Apparently the target module is only partially
+ # imported, so proxies that reference it cannot
+ # be destroyed yet.
+ proxy_list.append(proxy)
+ _module_proxies[name] = proxy_list
+ finally:
+ _module_proxies_lock.release()
+
+class _LazyImport(ObjectProxy):
+
+ __slots__ = ('_scope', '_alias', '_name', '_target')
+
+ def __init__(self, scope, alias, name):
+ ObjectProxy.__init__(self)
+ object.__setattr__(self, '_scope', scope)
+ object.__setattr__(self, '_alias', alias)
+ object.__setattr__(self, '_name', name)
+ _register_module_proxy(name, self)
+
+ def _get_target(self):
+ try:
+ return object.__getattribute__(self, '_target')
+ except AttributeError:
+ pass
+ name = object.__getattribute__(self, '_name')
+ __import__(name)
+ target = sys.modules[name]
+ object.__setattr__(self, '_target', target)
+ object.__getattribute__(self, '_scope')[
+ object.__getattribute__(self, '_alias')] = target
+ _unregister_module_proxy(name)
+ return target
+
+class _LazyImportFrom(_LazyImport):
+
+ __slots__ = ('_attr_name',)
+
+ def __init__(self, scope, name, attr_name, alias):
+ object.__setattr__(self, '_attr_name', attr_name)
+ _LazyImport.__init__(self, scope, alias, name)
+
+ def _get_target(self):
+ try:
+ return object.__getattribute__(self, '_target')
+ except AttributeError:
+ pass
+ name = object.__getattribute__(self, '_name')
+ attr_name = object.__getattribute__(self, '_attr_name')
+ __import__(name)
+ # If called by _unregister_module_proxy() and the target module is
+ # partially imported, then the following getattr call may raise an
+ # AttributeError for _unregister_module_proxy() to handle.
+ target = getattr(sys.modules[name], attr_name)
+ object.__setattr__(self, '_target', target)
+ object.__getattribute__(self, '_scope')[
+ object.__getattribute__(self, '_alias')] = target
+ _unregister_module_proxy(name)
+ return target
+
+def lazyimport(scope, *args):
+ """
+ Create a proxy in the given scope in order to performa a lazy import.
+
+ Syntax Result
+ foo import foo
+ foo:bar,baz from foo import bar, baz
+ foo:bar@baz from foo import bar as baz
+
+ @param scope: the scope in which to place the import, typically globals()
+ @type myfilename: dict
+ @param args: module names to import
+ @type args: strings
+ """
+
+ modules = sys.modules
+
+ for s in args:
+ parts = s.split(':', 1)
+ if len(parts) == 1:
+ name = s
+
+ if not name or not isinstance(name, basestring):
+ raise ValueError(name)
+
+ components = name.split('.')
+ parent_scope = scope
+ for i in range(len(components)):
+ alias = components[i]
+ if i < len(components) - 1:
+ parent_name = ".".join(components[:i+1])
+ __import__(parent_name)
+ mod = modules.get(parent_name)
+ if not isinstance(mod, types.ModuleType):
+ # raise an exception
+ __import__(name)
+ parent_scope[alias] = mod
+ parent_scope = mod.__dict__
+ continue
+
+ already_imported = modules.get(name)
+ if already_imported is not None:
+ parent_scope[alias] = already_imported
+ else:
+ parent_scope[alias] = \
+ _LazyImport(parent_scope, alias, name)
+
+ else:
+ name, fromlist = parts
+ already_imported = modules.get(name)
+ fromlist = fromlist.split(',')
+ for s in fromlist:
+ if not s:
+ # This happens if there's an extra comma in fromlist.
+ raise ValueError('Empty module attribute name')
+ alias = s.split('@', 1)
+ if len(alias) == 1:
+ alias = alias[0]
+ attr_name = alias
+ else:
+ attr_name, alias = alias
+ if already_imported is not None:
+ try:
+ scope[alias] = getattr(already_imported, attr_name)
+ except AttributeError:
+ # Apparently the target module is only partially
+ # imported, so create a proxy.
+ already_imported = None
+ scope[alias] = \
+ _LazyImportFrom(scope, name, attr_name, alias)
+ else:
+ scope[alias] = \
+ _LazyImportFrom(scope, name, attr_name, alias)
diff --git a/usr/lib/portage/pym/portage/proxy/objectproxy.py b/usr/lib/portage/pym/portage/proxy/objectproxy.py
new file mode 100644
index 0000000..a755774
--- /dev/null
+++ b/usr/lib/portage/pym/portage/proxy/objectproxy.py
@@ -0,0 +1,98 @@
+# Copyright 2008-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+__all__ = ['ObjectProxy']
+
+class ObjectProxy(object):
+
+ """
+ Object that acts as a proxy to another object, forwarding
+ attribute accesses and method calls. This can be useful
+ for implementing lazy initialization.
+ """
+
+ __slots__ = ()
+
+ def _get_target(self):
+ raise NotImplementedError(self)
+
+ def __getattribute__(self, attr):
+ result = object.__getattribute__(self, '_get_target')()
+ return getattr(result, attr)
+
+ def __setattr__(self, attr, value):
+ result = object.__getattribute__(self, '_get_target')()
+ setattr(result, attr, value)
+
+ def __call__(self, *args, **kwargs):
+ result = object.__getattribute__(self, '_get_target')()
+ return result(*args, **kwargs)
+
+ def __enter__(self):
+ return object.__getattribute__(self, '_get_target')().__enter__()
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ return object.__getattribute__(self, '_get_target')().__exit__(
+ exc_type, exc_value, traceback)
+
+ def __setitem__(self, key, value):
+ object.__getattribute__(self, '_get_target')()[key] = value
+
+ def __getitem__(self, key):
+ return object.__getattribute__(self, '_get_target')()[key]
+
+ def __delitem__(self, key):
+ del object.__getattribute__(self, '_get_target')()[key]
+
+ def __contains__(self, key):
+ return key in object.__getattribute__(self, '_get_target')()
+
+ def __iter__(self):
+ return iter(object.__getattribute__(self, '_get_target')())
+
+ def __len__(self):
+ return len(object.__getattribute__(self, '_get_target')())
+
+ def __repr__(self):
+ return repr(object.__getattribute__(self, '_get_target')())
+
+ def __str__(self):
+ return str(object.__getattribute__(self, '_get_target')())
+
+ def __add__(self, other):
+ return self.__str__() + other
+
+ def __hash__(self):
+ return hash(object.__getattribute__(self, '_get_target')())
+
+ def __ge__(self, other):
+ return object.__getattribute__(self, '_get_target')() >= other
+
+ def __gt__(self, other):
+ return object.__getattribute__(self, '_get_target')() > other
+
+ def __le__(self, other):
+ return object.__getattribute__(self, '_get_target')() <= other
+
+ def __lt__(self, other):
+ return object.__getattribute__(self, '_get_target')() < other
+
+ def __eq__(self, other):
+ return object.__getattribute__(self, '_get_target')() == other
+
+ def __ne__(self, other):
+ return object.__getattribute__(self, '_get_target')() != other
+
+ def __bool__(self):
+ return bool(object.__getattribute__(self, '_get_target')())
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __unicode__(self):
+ return unicode(object.__getattribute__(self, '_get_target')())
+
+ def __int__(self):
+ return int(object.__getattribute__(self, '_get_target')())
diff --git a/usr/lib/portage/pym/portage/repository/__init__.py b/usr/lib/portage/pym/portage/repository/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/repository/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/repository/config.py b/usr/lib/portage/pym/portage/repository/config.py
new file mode 100644
index 0000000..5e0d055
--- /dev/null
+++ b/usr/lib/portage/pym/portage/repository/config.py
@@ -0,0 +1,1080 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import io
+import logging
+import warnings
+import sys
+import re
+
+try:
+ from configparser import Error as ConfigParserError
+ if sys.hexversion >= 0x3020000:
+ from configparser import ConfigParser as SafeConfigParser
+ else:
+ from configparser import SafeConfigParser
+except ImportError:
+ from ConfigParser import SafeConfigParser, Error as ConfigParserError
+import portage
+from portage import eclass_cache, os
+from portage.const import (MANIFEST2_HASH_FUNCTIONS, MANIFEST2_REQUIRED_HASH,
+ PORTAGE_BASE_PATH, REPO_NAME_LOC, USER_CONFIG_PATH)
+from portage.eapi import eapi_allows_directories_on_profile_level_and_repository_level
+from portage.env.loaders import KeyValuePairFileLoader
+from portage.util import (normalize_path, read_corresponding_eapi_file, shlex_split,
+ stack_lists, writemsg, writemsg_level, _recursive_file_list)
+from portage.util._path import exists_raise_eaccess, isdir_raise_eaccess
+from portage.localization import _
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage import _encodings
+from portage import manifest
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+# Characters prohibited by repoman's file.name check.
+_invalid_path_char_re = re.compile(r'[^a-zA-Z0-9._\-+:/]')
+
+_valid_profile_formats = frozenset(
+ ['pms', 'portage-1', 'portage-2'])
+
+_portage1_profiles_allow_directories = frozenset(
+ ["portage-1-compat", "portage-1", 'portage-2'])
+
+_repo_name_sub_re = re.compile(r'[^\w-]')
+
+def _gen_valid_repo(name):
+ """
+ Substitute hyphen in place of characters that don't conform to PMS 3.1.5,
+ and strip hyphen from left side if necessary. This returns None if the
+ given name contains no valid characters.
+ """
+ name = _repo_name_sub_re.sub(' ', name.strip())
+ name = '-'.join(name.split())
+ name = name.lstrip('-')
+ if not name:
+ name = None
+ return name
+
+def _find_invalid_path_char(path, pos=0, endpos=None):
+ """
+ Returns the position of the first invalid character found in basename,
+ or -1 if no invalid characters are found.
+ """
+ if endpos is None:
+ endpos = len(path)
+
+ m = _invalid_path_char_re.search(path, pos=pos, endpos=endpos)
+ if m is not None:
+ return m.start()
+
+ return -1
+
+class RepoConfig(object):
+ """Stores config of one repository"""
+
+ __slots__ = ('aliases', 'allow_missing_manifest', 'allow_provide_virtual',
+ 'cache_formats', 'create_manifest', 'disable_manifest', 'eapi',
+ 'eclass_db', 'eclass_locations', 'eclass_overrides',
+ 'find_invalid_path_char', 'force', 'format', 'local_config', 'location',
+ 'main_repo', 'manifest_hashes', 'masters', 'missing_repo_name',
+ 'name', 'portage1_profiles', 'portage1_profiles_compat', 'priority',
+ 'profile_formats', 'sign_commit', 'sign_manifest', 'sync_cvs_repo',
+ 'sync_type', 'sync_uri', 'thin_manifest', 'update_changelog',
+ 'user_location', '_eapis_banned', '_eapis_deprecated', '_masters_orig')
+
+ def __init__(self, name, repo_opts, local_config=True):
+ """Build a RepoConfig with options in repo_opts
+ Try to read repo_name in repository location, but if
+ it is not found use variable name as repository name"""
+
+ force = repo_opts.get('force')
+ if force is not None:
+ force = tuple(force.split())
+ self.force = force
+ if force is None:
+ force = ()
+
+ self.local_config = local_config
+
+ if local_config or 'aliases' in force:
+ aliases = repo_opts.get('aliases')
+ if aliases is not None:
+ aliases = tuple(aliases.split())
+ else:
+ aliases = None
+
+ self.aliases = aliases
+
+ if local_config or 'eclass-overrides' in force:
+ eclass_overrides = repo_opts.get('eclass-overrides')
+ if eclass_overrides is not None:
+ eclass_overrides = tuple(eclass_overrides.split())
+ else:
+ eclass_overrides = None
+
+ self.eclass_overrides = eclass_overrides
+ # Eclass databases and locations are computed later.
+ self.eclass_db = None
+ self.eclass_locations = None
+
+ if local_config or 'masters' in force:
+ # Masters from repos.conf override layout.conf.
+ masters = repo_opts.get('masters')
+ if masters is not None:
+ masters = tuple(masters.split())
+ else:
+ masters = None
+
+ self.masters = masters
+
+ #The main-repo key makes only sense for the 'DEFAULT' section.
+ self.main_repo = repo_opts.get('main-repo')
+
+ priority = repo_opts.get('priority')
+ if priority is not None:
+ try:
+ priority = int(priority)
+ except ValueError:
+ priority = None
+ self.priority = priority
+
+ sync_cvs_repo = repo_opts.get('sync-cvs-repo')
+ if sync_cvs_repo is not None:
+ sync_cvs_repo = sync_cvs_repo.strip()
+ self.sync_cvs_repo = sync_cvs_repo or None
+
+ sync_type = repo_opts.get('sync-type')
+ if sync_type is not None:
+ sync_type = sync_type.strip()
+ self.sync_type = sync_type or None
+
+ sync_uri = repo_opts.get('sync-uri')
+ if sync_uri is not None:
+ sync_uri = sync_uri.strip()
+ self.sync_uri = sync_uri or None
+
+ # Not implemented.
+ format = repo_opts.get('format')
+ if format is not None:
+ format = format.strip()
+ self.format = format
+
+ location = repo_opts.get('location')
+ self.user_location = location
+ if location is not None and location.strip():
+ if os.path.isdir(location) or portage._sync_mode:
+ location = os.path.realpath(location)
+ else:
+ location = None
+ self.location = location
+
+ eapi = None
+ missing = True
+ self.name = name
+ if self.location is not None:
+ eapi = read_corresponding_eapi_file(os.path.join(self.location, REPO_NAME_LOC))
+ self.name, missing = self._read_valid_repo_name(self.location)
+ if missing:
+ # The name from repos.conf has to be used here for
+ # things like emerge-webrsync to work when the repo
+ # is empty (bug #484950).
+ if name is not None:
+ self.name = name
+ if portage._sync_mode:
+ missing = False
+
+ elif name == "DEFAULT":
+ missing = False
+
+ self.eapi = eapi
+ self.missing_repo_name = missing
+ # sign_commit is disabled by default, since it requires Git >=1.7.9,
+ # and key_id configured by `git config user.signingkey key_id`
+ self.sign_commit = False
+ self.sign_manifest = True
+ self.thin_manifest = False
+ self.allow_missing_manifest = False
+ self.allow_provide_virtual = False
+ self.create_manifest = True
+ self.disable_manifest = False
+ self.manifest_hashes = None
+ self.update_changelog = False
+ self.cache_formats = None
+ self.portage1_profiles = True
+ self.portage1_profiles_compat = False
+ self.find_invalid_path_char = _find_invalid_path_char
+ self._masters_orig = None
+
+ # Parse layout.conf.
+ if self.location:
+ layout_data = parse_layout_conf(self.location, self.name)[0]
+ self._masters_orig = layout_data['masters']
+
+ # layout.conf masters may be overridden here if we have a masters
+ # setting from the user's repos.conf
+ if self.masters is None:
+ self.masters = layout_data['masters']
+
+ if (local_config or 'aliases' in force) and layout_data['aliases']:
+ aliases = self.aliases
+ if aliases is None:
+ aliases = ()
+ # repos.conf aliases come after layout.conf aliases, giving
+ # them the ability to do incremental overrides
+ self.aliases = layout_data['aliases'] + tuple(aliases)
+
+ if layout_data['repo-name']:
+ # allow layout.conf to override repository name
+ # useful when having two copies of the same repo enabled
+ # to avoid modifying profiles/repo_name in one of them
+ self.name = layout_data['repo-name']
+
+ for value in ('allow-missing-manifest',
+ 'allow-provide-virtual', 'cache-formats',
+ 'create-manifest', 'disable-manifest', 'manifest-hashes',
+ 'profile-formats',
+ 'sign-commit', 'sign-manifest', 'thin-manifest', 'update-changelog'):
+ setattr(self, value.lower().replace("-", "_"), layout_data[value])
+
+ self.portage1_profiles = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \
+ any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats'])
+ self.portage1_profiles_compat = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \
+ layout_data['profile-formats'] == ('portage-1-compat',)
+
+ self._eapis_banned = frozenset(layout_data['eapis-banned'])
+ self._eapis_deprecated = frozenset(layout_data['eapis-deprecated'])
+
+ def eapi_is_banned(self, eapi):
+ return eapi in self._eapis_banned
+
+ def eapi_is_deprecated(self, eapi):
+ return eapi in self._eapis_deprecated
+
+ def iter_pregenerated_caches(self, auxdbkeys, readonly=True, force=False):
+ """
+ Reads layout.conf cache-formats from left to right and yields cache
+ instances for each supported type that's found. If no cache-formats
+ are specified in layout.conf, 'pms' type is assumed if the
+ metadata/cache directory exists or force is True.
+ """
+ formats = self.cache_formats
+ if not formats:
+ if not force:
+ return
+ # The default egencache format was 'pms' prior to portage-2.1.11.32
+ # (portage versions prior to portage-2.1.11.14 will NOT
+ # recognize md5-dict format unless it is explicitly listed in
+ # layout.conf).
+ formats = ('md5-dict',)
+
+ for fmt in formats:
+ name = None
+ if fmt == 'pms':
+ from portage.cache.metadata import database
+ name = 'metadata/cache'
+ elif fmt == 'md5-dict':
+ from portage.cache.flat_hash import md5_database as database
+ name = 'metadata/md5-cache'
+
+ if name is not None:
+ yield database(self.location, name,
+ auxdbkeys, readonly=readonly)
+
+ def get_pregenerated_cache(self, auxdbkeys, readonly=True, force=False):
+ """
+ Returns the first cache instance yielded from
+ iter_pregenerated_caches(), or None if no cache is available or none
+ of the available formats are supported.
+ """
+ return next(self.iter_pregenerated_caches(
+ auxdbkeys, readonly=readonly, force=force), None)
+
+ def load_manifest(self, *args, **kwds):
+ kwds['thin'] = self.thin_manifest
+ kwds['allow_missing'] = self.allow_missing_manifest
+ kwds['allow_create'] = self.create_manifest
+ kwds['hashes'] = self.manifest_hashes
+ if self.disable_manifest:
+ kwds['from_scratch'] = True
+ kwds['find_invalid_path_char'] = self.find_invalid_path_char
+ return manifest.Manifest(*args, **portage._native_kwargs(kwds))
+
+ def update(self, new_repo):
+ """Update repository with options in another RepoConfig"""
+
+ keys = set(self.__slots__)
+ keys.discard("missing_repo_name")
+ for k in keys:
+ v = getattr(new_repo, k, None)
+ if v is not None:
+ setattr(self, k, v)
+
+ if new_repo.name is not None:
+ self.missing_repo_name = new_repo.missing_repo_name
+
+ @staticmethod
+ def _read_valid_repo_name(repo_path):
+ name, missing = RepoConfig._read_repo_name(repo_path)
+ # We must ensure that the name conforms to PMS 3.1.5
+ # in order to avoid InvalidAtom exceptions when we
+ # use it to generate atoms.
+ name = _gen_valid_repo(name)
+ if not name:
+ # name only contains invalid characters
+ name = "x-" + os.path.basename(repo_path)
+ name = _gen_valid_repo(name)
+ # If basename only contains whitespace then the
+ # end result is name = 'x-'.
+ return name, missing
+
+ @staticmethod
+ def _read_repo_name(repo_path):
+ """
+ Read repo_name from repo_path.
+ Returns repo_name, missing.
+ """
+ repo_name_path = os.path.join(repo_path, REPO_NAME_LOC)
+ f = None
+ try:
+ f = io.open(
+ _unicode_encode(repo_name_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ return f.readline().strip(), False
+ except EnvironmentError:
+ return "x-" + os.path.basename(repo_path), True
+ finally:
+ if f is not None:
+ f.close()
+
+ def info_string(self):
+ """
+ Returns a formatted string containing informations about the repository.
+ Used by emerge --info.
+ """
+ indent = " " * 4
+ repo_msg = []
+ repo_msg.append(self.name)
+ if self.format:
+ repo_msg.append(indent + "format: " + self.format)
+ if self.user_location:
+ repo_msg.append(indent + "location: " + self.user_location)
+ if self.sync_cvs_repo:
+ repo_msg.append(indent + "sync-cvs-repo: " + self.sync_cvs_repo)
+ if self.sync_type:
+ repo_msg.append(indent + "sync-type: " + self.sync_type)
+ if self.sync_uri:
+ repo_msg.append(indent + "sync-uri: " + self.sync_uri)
+ if self.masters:
+ repo_msg.append(indent + "masters: " + " ".join(master.name for master in self.masters))
+ if self.priority is not None:
+ repo_msg.append(indent + "priority: " + str(self.priority))
+ if self.aliases:
+ repo_msg.append(indent + "aliases: " + " ".join(self.aliases))
+ if self.eclass_overrides:
+ repo_msg.append(indent + "eclass-overrides: " + \
+ " ".join(self.eclass_overrides))
+ repo_msg.append("")
+ return "\n".join(repo_msg)
+
+ def __repr__(self):
+ return "<portage.repository.config.RepoConfig(name=%r, location=%r)>" % (self.name, _unicode_decode(self.location))
+
+ def __str__(self):
+ d = {}
+ for k in self.__slots__:
+ d[k] = getattr(self, k, None)
+ return "%s" % (d,)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__())
+
+class RepoConfigLoader(object):
+ """Loads and store config of several repositories, loaded from PORTDIR_OVERLAY or repos.conf"""
+
+ @staticmethod
+ def _add_repositories(portdir, portdir_overlay, prepos,
+ ignored_map, ignored_location_map, local_config, default_portdir):
+ """Add overlays in PORTDIR_OVERLAY as repositories"""
+ overlays = []
+ portdir_orig = None
+ if portdir:
+ portdir = normalize_path(portdir)
+ portdir_orig = portdir
+ overlays.append(portdir)
+ try:
+ port_ov = [normalize_path(i) for i in shlex_split(portdir_overlay)]
+ except ValueError as e:
+ #File "/usr/lib/python3.2/shlex.py", line 168, in read_token
+ # raise ValueError("No closing quotation")
+ writemsg(_("!!! Invalid PORTDIR_OVERLAY:"
+ " %s: %s\n") % (e, portdir_overlay), noiselevel=-1)
+ port_ov = []
+ overlays.extend(port_ov)
+ default_repo_opts = {}
+ if prepos['DEFAULT'].aliases is not None:
+ default_repo_opts['aliases'] = \
+ ' '.join(prepos['DEFAULT'].aliases)
+ if prepos['DEFAULT'].eclass_overrides is not None:
+ default_repo_opts['eclass-overrides'] = \
+ ' '.join(prepos['DEFAULT'].eclass_overrides)
+ if prepos['DEFAULT'].masters is not None:
+ default_repo_opts['masters'] = \
+ ' '.join(prepos['DEFAULT'].masters)
+
+ if overlays:
+ # We need a copy of the original repos.conf data, since we're
+ # going to modify the prepos dict and some of the RepoConfig
+ # objects that we put in prepos may have to be discarded if
+ # they get overridden by a repository with the same name but
+ # a different location. This is common with repoman, for example,
+ # when temporarily overriding an rsync repo with another copy
+ # of the same repo from CVS.
+ repos_conf = prepos.copy()
+ #overlay priority is negative because we want them to be looked before any other repo
+ base_priority = 0
+ for ov in overlays:
+ # Ignore missing directory for 'gentoo' so that
+ # first sync with emerge-webrsync is possible.
+ if isdir_raise_eaccess(ov) or \
+ (base_priority == 0 and ov is portdir):
+ repo_opts = default_repo_opts.copy()
+ repo_opts['location'] = ov
+ repo = RepoConfig(None, repo_opts, local_config=local_config)
+ # repos_conf_opts contains options from repos.conf
+ repos_conf_opts = repos_conf.get(repo.name)
+ if repos_conf_opts is not None:
+ # Selectively copy only the attributes which
+ # repos.conf is allowed to override.
+ for k in ('aliases', 'eclass_overrides', 'force', 'masters',
+ 'priority', 'sync_cvs_repo', 'sync_type', 'sync_uri'):
+ v = getattr(repos_conf_opts, k, None)
+ if v is not None:
+ setattr(repo, k, v)
+
+ if repo.name in prepos:
+ # Silently ignore when PORTDIR overrides the location
+ # setting from the default repos.conf (bug #478544).
+ old_location = prepos[repo.name].location
+ if old_location is not None and \
+ old_location != repo.location and \
+ not (base_priority == 0 and
+ old_location == default_portdir):
+ ignored_map.setdefault(repo.name, []).append(old_location)
+ ignored_location_map[old_location] = repo.name
+ if old_location == portdir:
+ portdir = repo.user_location
+
+ if repo.priority is None:
+ if base_priority == 0 and ov == portdir_orig:
+ # If it's the original PORTDIR setting and it's not
+ # in PORTDIR_OVERLAY, then it will be assigned a
+ # special priority setting later.
+ pass
+ else:
+ repo.priority = base_priority
+ base_priority += 1
+
+ prepos[repo.name] = repo
+ else:
+
+ if not portage._sync_mode:
+ writemsg(_("!!! Invalid PORTDIR_OVERLAY (not a dir): '%s'\n") % ov, noiselevel=-1)
+
+ return portdir
+
+ @staticmethod
+ def _parse(paths, prepos, ignored_map, ignored_location_map, local_config, portdir):
+ """Parse files in paths to load config"""
+ parser = SafeConfigParser()
+
+ # use read_file/readfp in order to control decoding of unicode
+ try:
+ # Python >=3.2
+ read_file = parser.read_file
+ source_kwarg = 'source'
+ except AttributeError:
+ read_file = parser.readfp
+ source_kwarg = 'filename'
+
+ recursive_paths = []
+ for p in paths:
+ if isinstance(p, basestring):
+ recursive_paths.extend(_recursive_file_list(p))
+ else:
+ recursive_paths.append(p)
+
+ for p in recursive_paths:
+ if isinstance(p, basestring):
+ f = None
+ try:
+ f = io.open(_unicode_encode(p,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ except EnvironmentError:
+ pass
+ else:
+ # The 'source' keyword argument is needed since otherwise
+ # ConfigParser in Python <3.3.3 may throw a TypeError
+ # because it assumes that f.name is a native string rather
+ # than binary when constructing error messages.
+ kwargs = {source_kwarg: p}
+ read_file(f, **portage._native_kwargs(kwargs))
+ finally:
+ if f is not None:
+ f.close()
+ elif isinstance(p, io.StringIO):
+ kwargs = {source_kwarg: "<io.StringIO>"}
+ read_file(p, **portage._native_kwargs(kwargs))
+ else:
+ raise TypeError("Unsupported type %r of element %r of 'paths' argument" % (type(p), p))
+
+ prepos['DEFAULT'] = RepoConfig("DEFAULT",
+ parser.defaults(), local_config=local_config)
+
+ for sname in parser.sections():
+ optdict = {}
+ for oname in parser.options(sname):
+ optdict[oname] = parser.get(sname, oname)
+
+ repo = RepoConfig(sname, optdict, local_config=local_config)
+
+ if repo.sync_type is not None and repo.sync_uri is None:
+ writemsg_level("!!! %s\n" % _("Repository '%s' has sync-type attribute, but is missing sync-uri attribute") %
+ sname, level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if repo.sync_uri is not None and repo.sync_type is None:
+ writemsg_level("!!! %s\n" % _("Repository '%s' has sync-uri attribute, but is missing sync-type attribute") %
+ sname, level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if repo.sync_type not in (None, "cvs", "git", "rsync"):
+ writemsg_level("!!! %s\n" % _("Repository '%s' has sync-type attribute set to unsupported value: '%s'") %
+ (sname, repo.sync_type), level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if repo.sync_type == "cvs" and repo.sync_cvs_repo is None:
+ writemsg_level("!!! %s\n" % _("Repository '%s' has sync-type=cvs, but is missing sync-cvs-repo attribute") %
+ sname, level=logging.ERROR, noiselevel=-1)
+ continue
+
+ # For backward compatibility with locations set via PORTDIR and
+ # PORTDIR_OVERLAY, delay validation of the location and repo.name
+ # until after PORTDIR and PORTDIR_OVERLAY have been processed.
+ prepos[sname] = repo
+
+ def __init__(self, paths, settings):
+ """Load config from files in paths"""
+
+ prepos = {}
+ location_map = {}
+ treemap = {}
+ ignored_map = {}
+ ignored_location_map = {}
+
+ if "PORTAGE_REPOSITORIES" in settings:
+ portdir = ""
+ portdir_overlay = ""
+ portdir_sync = ""
+ else:
+ portdir = settings.get("PORTDIR", "")
+ portdir_overlay = settings.get("PORTDIR_OVERLAY", "")
+ portdir_sync = settings.get("SYNC", "")
+
+ try:
+ self._parse(paths, prepos, ignored_map,
+ ignored_location_map, settings.local_config,
+ portdir)
+ except ConfigParserError as e:
+ writemsg(
+ _("!!! Error while reading repo config file: %s\n") % e,
+ noiselevel=-1)
+ # The configparser state is unreliable (prone to quirky
+ # exceptions) after it has thrown an error, so use empty
+ # config and try to fall back to PORTDIR{,_OVERLAY}.
+ prepos.clear()
+ prepos['DEFAULT'] = RepoConfig('DEFAULT',
+ {}, local_config=settings.local_config)
+ location_map.clear()
+ treemap.clear()
+ ignored_map.clear()
+ ignored_location_map.clear()
+
+ default_portdir = os.path.join(os.sep,
+ settings['EPREFIX'].lstrip(os.sep), 'usr', 'portage')
+
+ # If PORTDIR_OVERLAY contains a repo with the same repo_name as
+ # PORTDIR, then PORTDIR is overridden.
+ portdir = self._add_repositories(portdir, portdir_overlay, prepos,
+ ignored_map, ignored_location_map, settings.local_config,
+ default_portdir)
+ if portdir and portdir.strip():
+ portdir = os.path.realpath(portdir)
+
+ ignored_repos = tuple((repo_name, tuple(paths)) \
+ for repo_name, paths in ignored_map.items())
+
+ self.missing_repo_names = frozenset(repo.location
+ for repo in prepos.values()
+ if repo.location is not None and repo.missing_repo_name)
+
+ # Do this before expanding aliases, so that location_map and
+ # treemap consistently map unaliased names whenever available.
+ for repo_name, repo in list(prepos.items()):
+ if repo.location is None:
+ if repo_name != 'DEFAULT':
+ # Skip this warning for repoman (bug #474578).
+ if settings.local_config and paths:
+ writemsg_level("!!! %s\n" % _("Section '%s' in repos.conf is missing location attribute") %
+ repo.name, level=logging.ERROR, noiselevel=-1)
+ del prepos[repo_name]
+ continue
+ else:
+ if not portage._sync_mode:
+ if not isdir_raise_eaccess(repo.location):
+ writemsg_level("!!! %s\n" % _("Section '%s' in repos.conf has location attribute set "
+ "to nonexistent directory: '%s'") %
+ (repo_name, repo.location), level=logging.ERROR, noiselevel=-1)
+
+ # Ignore missing directory for 'gentoo' so that
+ # first sync with emerge-webrsync is possible.
+ if repo.name != 'gentoo':
+ del prepos[repo_name]
+ continue
+
+ # After removing support for PORTDIR_OVERLAY, the following check can be:
+ # if repo.missing_repo_name:
+ if repo.missing_repo_name and repo.name != repo_name:
+ writemsg_level("!!! %s\n" % _("Section '%s' in repos.conf refers to repository "
+ "without repository name set in '%s'") %
+ (repo_name, os.path.join(repo.location, REPO_NAME_LOC)), level=logging.ERROR, noiselevel=-1)
+ del prepos[repo_name]
+ continue
+
+ if repo.name != repo_name:
+ writemsg_level("!!! %s\n" % _("Section '%s' in repos.conf has name different "
+ "from repository name '%s' set inside repository") %
+ (repo_name, repo.name), level=logging.ERROR, noiselevel=-1)
+ del prepos[repo_name]
+ continue
+
+ location_map[repo.location] = repo_name
+ treemap[repo_name] = repo.location
+
+ # Add alias mappings, but never replace unaliased mappings.
+ for repo_name, repo in list(prepos.items()):
+ names = set()
+ names.add(repo_name)
+ if repo.aliases:
+ aliases = stack_lists([repo.aliases], incremental=True)
+ names.update(aliases)
+
+ for name in names:
+ if name in prepos and prepos[name].location is not None:
+ if name == repo_name:
+ # unaliased names already handled earlier
+ continue
+ writemsg_level(_("!!! Repository name or alias '%s', " + \
+ "defined for repository '%s', overrides " + \
+ "existing alias or repository.\n") % (name, repo_name), level=logging.WARNING, noiselevel=-1)
+ # Never replace an unaliased mapping with
+ # an aliased mapping.
+ continue
+ prepos[name] = repo
+ if repo.location is not None:
+ if repo.location not in location_map:
+ # Never replace an unaliased mapping with
+ # an aliased mapping.
+ location_map[repo.location] = name
+ treemap[name] = repo.location
+
+ main_repo = prepos['DEFAULT'].main_repo
+ if main_repo is None or main_repo not in prepos:
+ #setting main_repo if it was not set in repos.conf
+ main_repo = location_map.get(portdir)
+ if main_repo is not None:
+ prepos['DEFAULT'].main_repo = main_repo
+ else:
+ prepos['DEFAULT'].main_repo = None
+ if portdir and not portage._sync_mode:
+ writemsg(_("!!! main-repo not set in DEFAULT and PORTDIR is empty.\n"), noiselevel=-1)
+
+ if main_repo is not None and prepos[main_repo].priority is None:
+ # This happens if main-repo has been set in repos.conf.
+ prepos[main_repo].priority = -1000
+
+ # Backward compatible SYNC support for mirrorselect.
+ if portdir_sync and main_repo is not None:
+ if portdir_sync.startswith("rsync://"):
+ prepos[main_repo].sync_uri = portdir_sync
+ prepos[main_repo].sync_type = "rsync"
+
+ # Include repo.name in sort key, for predictable sorting
+ # even when priorities are equal.
+ prepos_order = sorted(prepos.items(),
+ key=lambda r:(r[1].priority or 0, r[1].name))
+
+ # filter duplicates from aliases, by only including
+ # items where repo.name == key
+ prepos_order = [repo.name for (key, repo) in prepos_order
+ if repo.name == key and key != 'DEFAULT' and
+ repo.location is not None]
+
+ self.prepos = prepos
+ self.prepos_order = prepos_order
+ self.ignored_repos = ignored_repos
+ self.location_map = location_map
+ self.treemap = treemap
+ self._prepos_changed = True
+ self._repo_location_list = []
+
+ #The 'masters' key currently contains repo names. Replace them with the matching RepoConfig.
+ for repo_name, repo in prepos.items():
+ if repo_name == "DEFAULT":
+ continue
+ if repo.masters is None:
+ if self.mainRepo() and repo_name != self.mainRepo().name:
+ repo.masters = self.mainRepo(),
+ else:
+ repo.masters = ()
+ else:
+ if repo.masters and isinstance(repo.masters[0], RepoConfig):
+ # This one has already been processed
+ # because it has an alias.
+ continue
+ master_repos = []
+ for master_name in repo.masters:
+ if master_name not in prepos:
+ layout_filename = os.path.join(repo.user_location,
+ "metadata", "layout.conf")
+ writemsg_level(_("Unavailable repository '%s' " \
+ "referenced by masters entry in '%s'\n") % \
+ (master_name, layout_filename),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ master_repos.append(prepos[master_name])
+ repo.masters = tuple(master_repos)
+
+ #The 'eclass_overrides' key currently contains repo names. Replace them with the matching repo paths.
+ for repo_name, repo in prepos.items():
+ if repo_name == "DEFAULT":
+ continue
+
+ eclass_locations = []
+ eclass_locations.extend(master_repo.location for master_repo in repo.masters)
+ # Only append the current repo to eclass_locations if it's not
+ # there already. This allows masters to have more control over
+ # eclass override order, which may be useful for scenarios in
+ # which there is a plan to migrate eclasses to a master repo.
+ if repo.location not in eclass_locations:
+ eclass_locations.append(repo.location)
+
+ if repo.eclass_overrides:
+ for other_repo_name in repo.eclass_overrides:
+ if other_repo_name in self.treemap:
+ eclass_locations.append(self.get_location_for_name(other_repo_name))
+ else:
+ writemsg_level(_("Unavailable repository '%s' " \
+ "referenced by eclass-overrides entry for " \
+ "'%s'\n") % (other_repo_name, repo_name), \
+ level=logging.ERROR, noiselevel=-1)
+ repo.eclass_locations = tuple(eclass_locations)
+
+ eclass_dbs = {}
+ for repo_name, repo in prepos.items():
+ if repo_name == "DEFAULT":
+ continue
+
+ eclass_db = None
+ for eclass_location in repo.eclass_locations:
+ tree_db = eclass_dbs.get(eclass_location)
+ if tree_db is None:
+ tree_db = eclass_cache.cache(eclass_location)
+ eclass_dbs[eclass_location] = tree_db
+ if eclass_db is None:
+ eclass_db = tree_db.copy()
+ else:
+ eclass_db.append(tree_db)
+ repo.eclass_db = eclass_db
+
+ for repo_name, repo in prepos.items():
+ if repo_name == "DEFAULT":
+ continue
+
+ if repo._masters_orig is None and self.mainRepo() and \
+ repo.name != self.mainRepo().name and not portage._sync_mode:
+ # TODO: Delete masters code in pym/portage/tests/resolver/ResolverPlayground.py when deleting this warning.
+ writemsg_level("!!! %s\n" % _("Repository '%s' is missing masters attribute in '%s'") %
+ (repo.name, os.path.join(repo.location, "metadata", "layout.conf")) +
+ "!!! %s\n" % _("Set 'masters = %s' in this file for future compatibility") %
+ self.mainRepo().name, level=logging.WARNING, noiselevel=-1)
+
+ self._prepos_changed = True
+ self._repo_location_list = []
+
+ self._check_locations()
+
+ def repoLocationList(self):
+ """Get a list of repositories location. Replaces PORTDIR_OVERLAY"""
+ if self._prepos_changed:
+ _repo_location_list = []
+ for repo in self.prepos_order:
+ if self.prepos[repo].location is not None:
+ _repo_location_list.append(self.prepos[repo].location)
+ self._repo_location_list = tuple(_repo_location_list)
+
+ self._prepos_changed = False
+ return self._repo_location_list
+
+ def repoUserLocationList(self):
+ """Get a list of repositories location. Replaces PORTDIR_OVERLAY"""
+ user_location_list = []
+ for repo in self.prepos_order:
+ if self.prepos[repo].location is not None:
+ user_location_list.append(self.prepos[repo].user_location)
+ return tuple(user_location_list)
+
+ def mainRepoLocation(self):
+ """Returns the location of main repo"""
+ main_repo = self.prepos['DEFAULT'].main_repo
+ if main_repo is not None and main_repo in self.prepos:
+ return self.prepos[main_repo].location
+ else:
+ return ''
+
+ def mainRepo(self):
+ """Returns the main repo"""
+ main_repo = self.prepos['DEFAULT'].main_repo
+ if main_repo is None:
+ return None
+ return self.prepos[main_repo]
+
+ def _check_locations(self):
+ """Check if repositories location are correct and show a warning message if not"""
+ for (name, r) in self.prepos.items():
+ if name != 'DEFAULT':
+ if r.location is None:
+ writemsg(_("!!! Location not set for repository %s\n") % name, noiselevel=-1)
+ else:
+ if not isdir_raise_eaccess(r.location) and not portage._sync_mode:
+ self.prepos_order.remove(name)
+ writemsg(_("!!! Invalid Repository Location"
+ " (not a dir): '%s'\n") % r.location, noiselevel=-1)
+
+ def repos_with_profiles(self):
+ for repo_name in self.prepos_order:
+ repo = self.prepos[repo_name]
+ if repo.format != "unavailable":
+ yield repo
+
+ def get_name_for_location(self, location):
+ return self.location_map[location]
+
+ def get_location_for_name(self, repo_name):
+ if repo_name is None:
+ # This simplifies code in places where
+ # we want to be able to pass in Atom.repo
+ # even if it is None.
+ return None
+ return self.treemap[repo_name]
+
+ def get_repo_for_location(self, location):
+ return self.prepos[self.get_name_for_location(location)]
+
+ def __setitem__(self, repo_name, repo):
+ # self.prepos[repo_name] = repo
+ raise NotImplementedError
+
+ def __getitem__(self, repo_name):
+ return self.prepos[repo_name]
+
+ def __delitem__(self, repo_name):
+ if repo_name == self.prepos['DEFAULT'].main_repo:
+ self.prepos['DEFAULT'].main_repo = None
+ location = self.prepos[repo_name].location
+ del self.prepos[repo_name]
+ if repo_name in self.prepos_order:
+ self.prepos_order.remove(repo_name)
+ for k, v in self.location_map.copy().items():
+ if v == repo_name:
+ del self.location_map[k]
+ if repo_name in self.treemap:
+ del self.treemap[repo_name]
+ self._repo_location_list = tuple(x for x in self._repo_location_list if x != location)
+
+ def __iter__(self):
+ for repo_name in self.prepos_order:
+ yield self.prepos[repo_name]
+
+ def __contains__(self, repo_name):
+ return repo_name in self.prepos
+
+ def config_string(self):
+ str_or_int_keys = ("format", "location", "main_repo", "priority", "sync_cvs_repo", "sync_type", "sync_uri")
+ str_tuple_keys = ("aliases", "eclass_overrides", "force")
+ repo_config_tuple_keys = ("masters",)
+ keys = str_or_int_keys + str_tuple_keys + repo_config_tuple_keys
+ config_string = ""
+ for repo_name, repo in sorted(self.prepos.items(), key=lambda x: (x[0] != "DEFAULT", x[0])):
+ config_string += "\n[%s]\n" % repo_name
+ for key in sorted(keys):
+ if key == "main_repo" and repo_name != "DEFAULT":
+ continue
+ if getattr(repo, key) is not None:
+ if key in str_or_int_keys:
+ config_string += "%s = %s\n" % (key.replace("_", "-"), getattr(repo, key))
+ elif key in str_tuple_keys:
+ config_string += "%s = %s\n" % (key.replace("_", "-"), " ".join(getattr(repo, key)))
+ elif key in repo_config_tuple_keys:
+ config_string += "%s = %s\n" % (key.replace("_", "-"), " ".join(x.name for x in getattr(repo, key)))
+ return config_string.lstrip("\n")
+
+def load_repository_config(settings, extra_files=None):
+ repoconfigpaths = []
+ if "PORTAGE_REPOSITORIES" in settings:
+ repoconfigpaths.append(io.StringIO(settings["PORTAGE_REPOSITORIES"]))
+ else:
+ if portage._not_installed:
+ repoconfigpaths.append(os.path.join(PORTAGE_BASE_PATH, "cnf", "repos.conf"))
+ else:
+ repoconfigpaths.append(os.path.join(settings.global_config_path, "repos.conf"))
+ repoconfigpaths.append(os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH, "repos.conf"))
+ if extra_files:
+ repoconfigpaths.extend(extra_files)
+ return RepoConfigLoader(repoconfigpaths, settings)
+
+def _get_repo_name(repo_location, cached=None):
+ if cached is not None:
+ return cached
+ name, missing = RepoConfig._read_repo_name(repo_location)
+ if missing:
+ return None
+ return name
+
+def parse_layout_conf(repo_location, repo_name=None):
+ eapi = read_corresponding_eapi_file(os.path.join(repo_location, REPO_NAME_LOC))
+
+ layout_filename = os.path.join(repo_location, "metadata", "layout.conf")
+ layout_file = KeyValuePairFileLoader(layout_filename, None, None)
+ layout_data, layout_errors = layout_file.load()
+
+ data = {}
+
+ # None indicates abscence of a masters setting, which later code uses
+ # to trigger a backward compatibility fallback that sets an implicit
+ # master. In order to avoid this fallback behavior, layout.conf can
+ # explicitly set masters to an empty value, which will result in an
+ # empty tuple here instead of None.
+ masters = layout_data.get('masters')
+ if masters is not None:
+ masters = tuple(masters.split())
+ data['masters'] = masters
+ data['aliases'] = tuple(layout_data.get('aliases', '').split())
+
+ data['allow-provide-virtual'] = \
+ layout_data.get('allow-provide-virtuals', 'false').lower() == 'true'
+
+ data['eapis-banned'] = tuple(layout_data.get('eapis-banned', '').split())
+ data['eapis-deprecated'] = tuple(layout_data.get('eapis-deprecated', '').split())
+
+ data['sign-commit'] = layout_data.get('sign-commits', 'false').lower() \
+ == 'true'
+
+ data['sign-manifest'] = layout_data.get('sign-manifests', 'true').lower() \
+ == 'true'
+
+ data['thin-manifest'] = layout_data.get('thin-manifests', 'false').lower() \
+ == 'true'
+
+ data['repo-name'] = _gen_valid_repo(layout_data.get('repo-name', ''))
+
+ manifest_policy = layout_data.get('use-manifests', 'strict').lower()
+ data['allow-missing-manifest'] = manifest_policy != 'strict'
+ data['create-manifest'] = manifest_policy != 'false'
+ data['disable-manifest'] = manifest_policy == 'false'
+
+ # for compatibility w/ PMS, fallback to pms; but also check if the
+ # cache exists or not.
+ cache_formats = layout_data.get('cache-formats', '').lower().split()
+ if not cache_formats:
+ # Auto-detect cache formats, and prefer md5-cache if available.
+ # This behavior was deployed in portage-2.1.11.14, so that the
+ # default egencache format could eventually be changed to md5-dict
+ # in portage-2.1.11.32. WARNING: Versions prior to portage-2.1.11.14
+ # will NOT recognize md5-dict format unless it is explicitly
+ # listed in layout.conf.
+ cache_formats = []
+ if os.path.isdir(os.path.join(repo_location, 'metadata', 'md5-cache')):
+ cache_formats.append('md5-dict')
+ if os.path.isdir(os.path.join(repo_location, 'metadata', 'cache')):
+ cache_formats.append('pms')
+ data['cache-formats'] = tuple(cache_formats)
+
+ manifest_hashes = layout_data.get('manifest-hashes')
+ if manifest_hashes is not None:
+ manifest_hashes = frozenset(manifest_hashes.upper().split())
+ if MANIFEST2_REQUIRED_HASH not in manifest_hashes:
+ repo_name = _get_repo_name(repo_location, cached=repo_name)
+ warnings.warn((_("Repository named '%(repo_name)s' has a "
+ "'manifest-hashes' setting that does not contain "
+ "the '%(hash)s' hash which is required by this "
+ "portage version. You will have to upgrade portage "
+ "if you want to generate valid manifests for this "
+ "repository: %(layout_filename)s") %
+ {"repo_name": repo_name or 'unspecified',
+ "hash":MANIFEST2_REQUIRED_HASH,
+ "layout_filename":layout_filename}),
+ DeprecationWarning)
+ unsupported_hashes = manifest_hashes.difference(
+ MANIFEST2_HASH_FUNCTIONS)
+ if unsupported_hashes:
+ repo_name = _get_repo_name(repo_location, cached=repo_name)
+ warnings.warn((_("Repository named '%(repo_name)s' has a "
+ "'manifest-hashes' setting that contains one "
+ "or more hash types '%(hashes)s' which are not supported by "
+ "this portage version. You will have to upgrade "
+ "portage if you want to generate valid manifests for "
+ "this repository: %(layout_filename)s") %
+ {"repo_name": repo_name or 'unspecified',
+ "hashes":" ".join(sorted(unsupported_hashes)),
+ "layout_filename":layout_filename}),
+ DeprecationWarning)
+ data['manifest-hashes'] = manifest_hashes
+
+ data['update-changelog'] = layout_data.get('update-changelog', 'false').lower() \
+ == 'true'
+
+ raw_formats = layout_data.get('profile-formats')
+ if raw_formats is None:
+ if eapi_allows_directories_on_profile_level_and_repository_level(eapi):
+ raw_formats = ('portage-1',)
+ else:
+ raw_formats = ('portage-1-compat',)
+ else:
+ raw_formats = set(raw_formats.split())
+ unknown = raw_formats.difference(_valid_profile_formats)
+ if unknown:
+ repo_name = _get_repo_name(repo_location, cached=repo_name)
+ warnings.warn((_("Repository named '%(repo_name)s' has unsupported "
+ "profiles in use ('profile-formats = %(unknown_fmts)s' setting in "
+ "'%(layout_filename)s; please upgrade portage.") %
+ dict(repo_name=repo_name or 'unspecified',
+ layout_filename=layout_filename,
+ unknown_fmts=" ".join(unknown))),
+ DeprecationWarning)
+ raw_formats = tuple(raw_formats.intersection(_valid_profile_formats))
+ data['profile-formats'] = raw_formats
+
+ return data, layout_errors
diff --git a/usr/lib/portage/pym/portage/tests/__init__.py b/usr/lib/portage/pym/portage/tests/__init__.py
new file mode 100644
index 0000000..afa57e3
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/__init__.py
@@ -0,0 +1,355 @@
+# tests/__init__.py -- Portage Unit Test functionality
+# Copyright 2006-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+import time
+import unittest
+
+try:
+ from unittest.runner import _TextTestResult # new in python-2.7
+except ImportError:
+ from unittest import _TextTestResult
+
+try:
+ # They added the skip framework to python-2.7.
+ # Drop this once we drop python-2.6 support.
+ unittest_skip_shims = False
+ import unittest.SkipTest as SkipTest # new in python-2.7
+except ImportError:
+ unittest_skip_shims = True
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage.const import (EPREFIX, GLOBAL_CONFIG_PATH, PORTAGE_BASE_PATH,
+ PORTAGE_BIN_PATH)
+from portage.util._argparse import ArgumentParser
+
+
+if portage._not_installed:
+ cnf_path = os.path.join(PORTAGE_BASE_PATH, 'cnf')
+ cnf_etc_path = cnf_path
+ cnf_bindir = PORTAGE_BIN_PATH
+ cnf_sbindir = cnf_bindir
+else:
+ cnf_path = os.path.join(EPREFIX or '/', GLOBAL_CONFIG_PATH)
+ cnf_etc_path = os.path.join(EPREFIX or '/', 'etc')
+ cnf_eprefix = EPREFIX
+ cnf_bindir = os.path.join(EPREFIX or '/', 'usr', 'bin')
+ cnf_sbindir = os.path.join(EPREFIX or '/', 'usr', 'sbin')
+
+
+def main():
+ suite = unittest.TestSuite()
+ basedir = os.path.dirname(os.path.realpath(__file__))
+
+ usage = "usage: %s [options] [tests to run]" % os.path.basename(sys.argv[0])
+ parser = ArgumentParser(usage=usage)
+ parser.add_argument("-l", "--list", help="list all tests",
+ action="store_true", dest="list_tests")
+ options, args = parser.parse_known_args(args=sys.argv)
+
+ if (os.environ.get('NOCOLOR') in ('yes', 'true') or
+ os.environ.get('TERM') == 'dumb' or
+ not sys.stdout.isatty()):
+ portage.output.nocolor()
+
+ if options.list_tests:
+ testdir = os.path.dirname(sys.argv[0])
+ for mydir in getTestDirs(basedir):
+ testsubdir = os.path.basename(mydir)
+ for name in getTestNames(mydir):
+ print("%s/%s/%s.py" % (testdir, testsubdir, name))
+ return os.EX_OK
+
+ if len(args) > 1:
+ suite.addTests(getTestFromCommandLine(args[1:], basedir))
+ else:
+ for mydir in getTestDirs(basedir):
+ suite.addTests(getTests(os.path.join(basedir, mydir), basedir))
+
+ result = TextTestRunner(verbosity=2).run(suite)
+ if not result.wasSuccessful():
+ return 1
+ return os.EX_OK
+
+def my_import(name):
+ mod = __import__(name)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
+def getTestFromCommandLine(args, base_path):
+ result = []
+ for arg in args:
+ realpath = os.path.realpath(arg)
+ path = os.path.dirname(realpath)
+ f = realpath[len(path)+1:]
+
+ if not f.startswith("test") or not f.endswith(".py"):
+ raise Exception("Invalid argument: '%s'" % arg)
+
+ mymodule = f[:-3]
+ result.extend(getTestsFromFiles(path, base_path, [mymodule]))
+ return result
+
+def getTestDirs(base_path):
+ TEST_FILE = b'__test__.py'
+ testDirs = []
+
+ # the os.walk help mentions relative paths as being quirky
+ # I was tired of adding dirs to the list, so now we add __test__.py
+ # to each dir we want tested.
+ for root, dirs, files in os.walk(base_path):
+ try:
+ root = _unicode_decode(root,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+
+ if TEST_FILE in files:
+ testDirs.append(root)
+
+ testDirs.sort()
+ return testDirs
+
+def getTestNames(path):
+ files = os.listdir(path)
+ files = [f[:-3] for f in files if f.startswith("test") and f.endswith(".py")]
+ files.sort()
+ return files
+
+def getTestsFromFiles(path, base_path, files):
+ parent_path = path[len(base_path)+1:]
+ parent_module = ".".join(("portage", "tests", parent_path))
+ parent_module = parent_module.replace('/', '.')
+ result = []
+ for mymodule in files:
+ # Make the trailing / a . for module importing
+ modname = ".".join((parent_module, mymodule))
+ mod = my_import(modname)
+ result.append(unittest.TestLoader().loadTestsFromModule(mod))
+ return result
+
+def getTests(path, base_path):
+ """
+
+ path is the path to a given subdir ( 'portage/' for example)
+ This does a simple filter on files in that dir to give us modules
+ to import
+
+ """
+ return getTestsFromFiles(path, base_path, getTestNames(path))
+
+class TextTestResult(_TextTestResult):
+ """
+ We need a subclass of unittest._TextTestResult to handle tests with TODO
+
+ This just adds an addTodo method that can be used to add tests
+ that are marked TODO; these can be displayed later
+ by the test runner.
+ """
+
+ def __init__(self, stream, descriptions, verbosity):
+ super(TextTestResult, self).__init__(stream, descriptions, verbosity)
+ self.todoed = []
+ self.portage_skipped = []
+
+ def addTodo(self, test, info):
+ self.todoed.append((test, info))
+ if self.showAll:
+ self.stream.writeln("TODO")
+ elif self.dots:
+ self.stream.write(".")
+
+ def addPortageSkip(self, test, info):
+ self.portage_skipped.append((test, info))
+ if self.showAll:
+ self.stream.writeln("SKIP")
+ elif self.dots:
+ self.stream.write(".")
+
+ def printErrors(self):
+ if self.dots or self.showAll:
+ self.stream.writeln()
+ self.printErrorList('ERROR', self.errors)
+ self.printErrorList('FAIL', self.failures)
+ self.printErrorList('TODO', self.todoed)
+ self.printErrorList('SKIP', self.portage_skipped)
+
+class TestCase(unittest.TestCase):
+ """
+ We need a way to mark a unit test as "ok to fail"
+ This way someone can add a broken test and mark it as failed
+ and then fix the code later. This may not be a great approach
+ (broken code!!??!11oneone) but it does happen at times.
+ """
+
+ def __init__(self, *pargs, **kwargs):
+ unittest.TestCase.__init__(self, *pargs, **kwargs)
+ self.todo = False
+ self.portage_skip = None
+ self.cnf_path = cnf_path
+ self.cnf_etc_path = cnf_etc_path
+ self.bindir = cnf_bindir
+ # sbin scripts are installed by setup.py to the bindir
+ # they are relocated to /usr/sbin dir by the ebuild later
+ self.sbindir = self.bindir
+
+ def defaultTestResult(self):
+ return TextTestResult()
+
+ def run(self, result=None):
+ if result is None: result = self.defaultTestResult()
+ result.startTest(self)
+ testMethod = getattr(self, self._testMethodName)
+ try:
+ try:
+ self.setUp()
+ except SystemExit:
+ raise
+ except KeyboardInterrupt:
+ raise
+ except:
+ result.addError(self, sys.exc_info())
+ return
+
+ ok = False
+ try:
+ testMethod()
+ ok = True
+ except SkipTest as e:
+ result.addPortageSkip(self, "%s: SKIP: %s" %
+ (testMethod, str(e)))
+ except self.failureException:
+ if self.portage_skip is not None:
+ if self.portage_skip is True:
+ result.addPortageSkip(self, "%s: SKIP" % testMethod)
+ else:
+ result.addPortageSkip(self, "%s: SKIP: %s" %
+ (testMethod, self.portage_skip))
+ elif self.todo:
+ result.addTodo(self, "%s: TODO" % testMethod)
+ else:
+ result.addFailure(self, sys.exc_info())
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ result.addError(self, sys.exc_info())
+
+ try:
+ self.tearDown()
+ except SystemExit:
+ raise
+ except KeyboardInterrupt:
+ raise
+ except:
+ result.addError(self, sys.exc_info())
+ ok = False
+ if ok:
+ result.addSuccess(self)
+ finally:
+ result.stopTest(self)
+
+ def assertRaisesMsg(self, msg, excClass, callableObj, *args, **kwargs):
+ """Fail unless an exception of class excClass is thrown
+ by callableObj when invoked with arguments args and keyword
+ arguments kwargs. If a different type of exception is
+ thrown, it will not be caught, and the test case will be
+ deemed to have suffered an error, exactly as for an
+ unexpected exception.
+ """
+ try:
+ callableObj(*args, **kwargs)
+ except excClass:
+ return
+ else:
+ if hasattr(excClass, '__name__'): excName = excClass.__name__
+ else: excName = str(excClass)
+ raise self.failureException("%s not raised: %s" % (excName, msg))
+
+ def assertExists(self, path):
+ """Make sure |path| exists"""
+ if not os.path.exists(path):
+ msg = ['path is missing: %s' % (path,)]
+ while path != '/':
+ path = os.path.dirname(path)
+ if not path:
+ # If we're given something like "foo", abort once we get to "".
+ break
+ result = os.path.exists(path)
+ msg.append('\tos.path.exists(%s): %s' % (path, result))
+ if result:
+ msg.append('\tcontents: %r' % os.listdir(path))
+ break
+ raise self.failureException('\n'.join(msg))
+
+ def assertNotExists(self, path):
+ """Make sure |path| does not exist"""
+ if os.path.exists(path):
+ raise self.failureException('path exists when it should not: %s' % path)
+
+if unittest_skip_shims:
+ # Shim code for <python-2.7.
+ class SkipTest(Exception):
+ """unittest.SkipTest shim for <python-2.7"""
+
+ def skipTest(self, reason):
+ raise SkipTest(reason)
+ setattr(TestCase, 'skipTest', skipTest)
+
+ def assertIn(self, member, container, msg=None):
+ self.assertTrue(member in container, msg=msg)
+ setattr(TestCase, 'assertIn', assertIn)
+
+ def assertNotIn(self, member, container, msg=None):
+ self.assertFalse(member in container, msg=msg)
+ setattr(TestCase, 'assertNotIn', assertNotIn)
+
+class TextTestRunner(unittest.TextTestRunner):
+ """
+ We subclass unittest.TextTestRunner to output SKIP for tests that fail but are skippable
+ """
+
+ def _makeResult(self):
+ return TextTestResult(self.stream, self.descriptions, self.verbosity)
+
+ def run(self, test):
+ """
+ Run the given test case or test suite.
+ """
+ result = self._makeResult()
+ startTime = time.time()
+ test(result)
+ stopTime = time.time()
+ timeTaken = stopTime - startTime
+ result.printErrors()
+ self.stream.writeln(result.separator2)
+ run = result.testsRun
+ self.stream.writeln("Ran %d test%s in %.3fs" %
+ (run, run != 1 and "s" or "", timeTaken))
+ self.stream.writeln()
+ if not result.wasSuccessful():
+ self.stream.write("FAILED (")
+ failed = len(result.failures)
+ errored = len(result.errors)
+ if failed:
+ self.stream.write("failures=%d" % failed)
+ if errored:
+ if failed: self.stream.write(", ")
+ self.stream.write("errors=%d" % errored)
+ self.stream.writeln(")")
+ else:
+ self.stream.writeln("OK")
+ return result
+
+test_cps = ['sys-apps/portage', 'virtual/portage']
+test_versions = ['1.0', '1.0-r1', '2.3_p4', '1.0_alpha57']
+test_slots = [None, '1', 'gentoo-sources-2.6.17', 'spankywashere']
+test_usedeps = ['foo', '-bar', ('foo', 'bar'),
+ ('foo', '-bar'), ('foo?', '!bar?')]
diff --git a/usr/lib/portage/pym/portage/tests/bin/__init__.py b/usr/lib/portage/pym/portage/tests/bin/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/bin/__init__.py
diff --git a/usr/lib/portage/pym/portage/tests/bin/__test__.py b/usr/lib/portage/pym/portage/tests/bin/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/bin/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/bin/setup_env.py b/usr/lib/portage/pym/portage/tests/bin/setup_env.py
new file mode 100644
index 0000000..9cc26df
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/bin/setup_env.py
@@ -0,0 +1,87 @@
+# setup_env.py -- Make sure bin subdir has sane env for testing
+# Copyright 2007-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+from portage import os
+from portage import shutil
+from portage.const import PORTAGE_BIN_PATH
+from portage.const import PORTAGE_PYM_PATH
+from portage.tests import TestCase
+from portage.process import spawn
+
+bindir = PORTAGE_BIN_PATH
+basedir = None
+env = None
+
+def binTestsCleanup():
+ global basedir
+ if basedir is None:
+ return
+ if os.access(basedir, os.W_OK):
+ shutil.rmtree(basedir)
+ basedir = None
+
+def binTestsInit():
+ binTestsCleanup()
+ global basedir, env
+ basedir = tempfile.mkdtemp()
+ env = {}
+ env['EAPI'] = '0'
+ env['D'] = os.path.join(basedir, 'image')
+ env['T'] = os.path.join(basedir, 'temp')
+ env['S'] = os.path.join(basedir, 'workdir')
+ env['PF'] = 'portage-tests-0.09-r1'
+ env['PATH'] = bindir + ':' + os.environ['PATH']
+ env['PORTAGE_BIN_PATH'] = bindir
+ env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
+ env['PORTAGE_INST_UID'] = str(os.getuid())
+ env['PORTAGE_INST_GID'] = str(os.getgid())
+ env['DESTTREE'] = '/usr'
+ os.mkdir(env['D'])
+ os.mkdir(env['T'])
+ os.mkdir(env['S'])
+
+class BinTestCase(TestCase):
+ def init(self):
+ binTestsInit()
+ def cleanup(self):
+ binTestsCleanup()
+
+def _exists_in_D(path):
+ # Note: do not use os.path.join() here, we assume D to end in /
+ return os.access(env['D'] + path, os.W_OK)
+def exists_in_D(path):
+ if not _exists_in_D(path):
+ raise TestCase.failureException
+def xexists_in_D(path):
+ if _exists_in_D(path):
+ raise TestCase.failureException
+
+def portage_func(func, args, exit_status=0):
+ # we don't care about the output of the programs,
+ # just their exit value and the state of $D
+ global env
+ f = open('/dev/null', 'wb')
+ fd_pipes = {0:0,1:f.fileno(),2:f.fileno()}
+ def pre_exec():
+ os.chdir(env['S'])
+ spawn([func] + args.split(), env=env,
+ fd_pipes=fd_pipes, pre_exec=pre_exec)
+ f.close()
+
+def create_portage_wrapper(bin):
+ def derived_func(*args):
+ newargs = list(args)
+ newargs.insert(0, bin)
+ return portage_func(*newargs)
+ return derived_func
+
+for bin in os.listdir(os.path.join(bindir, 'ebuild-helpers')):
+ if bin.startswith('do') or \
+ bin.startswith('new') or \
+ bin.startswith('prep') or \
+ bin in ('ecompress', 'ecompressdir', 'fowners', 'fperms'):
+ globals()[bin] = create_portage_wrapper(
+ os.path.join(bindir, 'ebuild-helpers', bin))
diff --git a/usr/lib/portage/pym/portage/tests/bin/test_dobin.py b/usr/lib/portage/pym/portage/tests/bin/test_dobin.py
new file mode 100644
index 0000000..6f50d7a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/bin/test_dobin.py
@@ -0,0 +1,16 @@
+# test_dobin.py -- Portage Unit Testing Functionality
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests.bin.setup_env import BinTestCase, dobin, xexists_in_D
+
+class DoBin(BinTestCase):
+ def testDoBin(self):
+ self.init()
+ try:
+ dobin("does-not-exist", 1)
+ xexists_in_D("does-not-exist")
+ xexists_in_D("/bin/does-not-exist")
+ xexists_in_D("/usr/bin/does-not-exist")
+ finally:
+ self.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/bin/test_dodir.py b/usr/lib/portage/pym/portage/tests/bin/test_dodir.py
new file mode 100644
index 0000000..5d40181
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/bin/test_dodir.py
@@ -0,0 +1,18 @@
+# test_dodir.py -- Portage Unit Testing Functionality
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests.bin.setup_env import BinTestCase, dodir, exists_in_D
+
+class DoDir(BinTestCase):
+ def testDoDir(self):
+ self.init()
+ try:
+ dodir("usr /usr")
+ exists_in_D("/usr")
+ dodir("boot")
+ exists_in_D("/boot")
+ dodir("/var/lib/moocow")
+ exists_in_D("/var/lib/moocow")
+ finally:
+ self.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/dbapi/__init__.py b/usr/lib/portage/pym/portage/tests/dbapi/__init__.py
new file mode 100644
index 0000000..532918b
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dbapi/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/tests/dbapi/__test__.py b/usr/lib/portage/pym/portage/tests/dbapi/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dbapi/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/dbapi/test_fakedbapi.py b/usr/lib/portage/pym/portage/tests/dbapi/test_fakedbapi.py
new file mode 100644
index 0000000..7713563
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dbapi/test_fakedbapi.py
@@ -0,0 +1,63 @@
+# Copyright 2011-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+from portage import os
+from portage import shutil
+from portage.dbapi.virtual import fakedbapi
+from portage.package.ebuild.config import config
+from portage.tests import TestCase
+
+class TestFakedbapi(TestCase):
+
+ def testFakedbapi(self):
+ packages = (
+ ("sys-apps/portage-2.1.10", {
+ "EAPI" : "2",
+ "IUSE" : "ipc doc",
+ "repository" : "gentoo",
+ "SLOT" : "0",
+ "USE" : "ipc missing-iuse",
+ }),
+ ("virtual/package-manager-0", {
+ "EAPI" : "0",
+ "repository" : "gentoo",
+ "SLOT" : "0",
+ }),
+ )
+
+ match_tests = (
+ ("sys-apps/portage:0[ipc]", ["sys-apps/portage-2.1.10"]),
+ ("sys-apps/portage:0[-ipc]", []),
+ ("sys-apps/portage:0[doc]", []),
+ ("sys-apps/portage:0[-doc]", ["sys-apps/portage-2.1.10"]),
+ ("sys-apps/portage:0", ["sys-apps/portage-2.1.10"]),
+ ("sys-apps/portage:0[missing-iuse]", []),
+ ("sys-apps/portage:0[-missing-iuse]", []),
+ ("sys-apps/portage:0::gentoo[ipc]", ["sys-apps/portage-2.1.10"]),
+ ("sys-apps/portage:0::multilib[ipc]", []),
+ ("virtual/package-manager", ["virtual/package-manager-0"]),
+ )
+
+ tempdir = tempfile.mkdtemp()
+ try:
+ test_repo = os.path.join(tempdir, "var", "repositories", "test_repo")
+ os.makedirs(os.path.join(test_repo, "profiles"))
+ with open(os.path.join(test_repo, "profiles", "repo_name"), "w") as f:
+ f.write("test_repo")
+ env = {
+ "PORTAGE_REPOSITORIES": "[DEFAULT]\nmain-repo = test_repo\n[test_repo]\nlocation = %s" % test_repo
+ }
+ fakedb = fakedbapi(settings=config(config_profile_path="",
+ env=env, eprefix=tempdir))
+ for cpv, metadata in packages:
+ fakedb.cpv_inject(cpv, metadata=metadata)
+
+ for atom, expected_result in match_tests:
+ result = fakedb.match(atom)
+ self.assertEqual(fakedb.match(atom), expected_result,
+ "fakedb.match('%s') = %s != %s" %
+ (atom, result, expected_result))
+ finally:
+ shutil.rmtree(tempdir)
diff --git a/usr/lib/portage/pym/portage/tests/dbapi/test_portdb_cache.py b/usr/lib/portage/pym/portage/tests/dbapi/test_portdb_cache.py
new file mode 100644
index 0000000..f08d0f8
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dbapi/test_portdb_cache.py
@@ -0,0 +1,182 @@
+# Copyright 2012-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+import sys
+import textwrap
+
+import portage
+from portage import os
+from portage import _unicode_decode
+from portage.const import (BASH_BINARY, PORTAGE_PYM_PATH, USER_CONFIG_PATH)
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import ensure_dirs
+
+class PortdbCacheTestCase(TestCase):
+
+ def testPortdbCache(self):
+ debug = False
+
+ ebuilds = {
+ "dev-libs/A-1": {},
+ "dev-libs/A-2": {},
+ "sys-apps/B-1": {},
+ "sys-apps/B-2": {},
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=debug)
+ settings = playground.settings
+ eprefix = settings["EPREFIX"]
+ test_repo_location = settings.repositories["test_repo"].location
+ user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
+ metadata_dir = os.path.join(test_repo_location, "metadata")
+ md5_cache_dir = os.path.join(metadata_dir, "md5-cache")
+ pms_cache_dir = os.path.join(metadata_dir, "cache")
+ layout_conf_path = os.path.join(metadata_dir, "layout.conf")
+
+ portage_python = portage._python_interpreter
+ egencache_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "egencache"),
+ "--repo", "test_repo",
+ "--repositories-configuration", settings.repositories.config_string())
+ python_cmd = (portage_python, "-b", "-Wd", "-c")
+
+ test_commands = (
+ (lambda: not os.path.exists(pms_cache_dir),),
+ (lambda: not os.path.exists(md5_cache_dir),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.repositories.mainRepoLocation() in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+
+ egencache_cmd + ("--update",),
+ (lambda: not os.path.exists(pms_cache_dir),),
+ (lambda: os.path.exists(md5_cache_dir),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.repositories.mainRepoLocation() not in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ from portage.cache.flat_hash import md5_database
+ if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.repositories.mainRepoLocation()], md5_database):
+ sys.exit(1)
+ """),),
+
+ (BASH_BINARY, "-c", "echo %s > %s" %
+ tuple(map(portage._shell_quote,
+ ("cache-formats = md5-dict pms", layout_conf_path,)))),
+ egencache_cmd + ("--update",),
+ (lambda: os.path.exists(md5_cache_dir),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.repositories.mainRepoLocation() not in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ from portage.cache.flat_hash import md5_database
+ if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.repositories.mainRepoLocation()], md5_database):
+ sys.exit(1)
+ """),),
+
+ # Disable DeprecationWarnings, since the pms format triggers them
+ # in portdbapi._create_pregen_cache().
+ (BASH_BINARY, "-c", "echo %s > %s" %
+ tuple(map(portage._shell_quote,
+ ("cache-formats = pms md5-dict", layout_conf_path,)))),
+ (portage_python, "-b", "-Wd", "-Wi::DeprecationWarning", "-c") + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.repositories.mainRepoLocation() not in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+ (portage_python, "-b", "-Wd", "-Wi::DeprecationWarning", "-c") + (textwrap.dedent("""
+ import os, sys, portage
+ from portage.cache.metadata import database as pms_database
+ if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.repositories.mainRepoLocation()], pms_database):
+ sys.exit(1)
+ """),),
+
+ # Test auto-detection and preference for md5-cache when both
+ # cache formats are available but layout.conf is absent.
+ (BASH_BINARY, "-c", "rm %s" % portage._shell_quote(layout_conf_path)),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.repositories.mainRepoLocation() not in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ from portage.cache.flat_hash import md5_database
+ if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.repositories.mainRepoLocation()], md5_database):
+ sys.exit(1)
+ """),),
+ )
+
+ pythonpath = os.environ.get("PYTHONPATH")
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is not None and \
+ pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
+ pass
+ else:
+ if pythonpath is None:
+ pythonpath = ""
+ else:
+ pythonpath = ":" + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+
+ env = {
+ "PATH" : os.environ.get("PATH", ""),
+ "PORTAGE_OVERRIDE_EPREFIX" : eprefix,
+ "PORTAGE_PYTHON" : portage_python,
+ "PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
+ "PYTHONPATH" : pythonpath,
+ }
+
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+
+ dirs = [user_config_dir]
+
+ try:
+ for d in dirs:
+ ensure_dirs(d)
+
+ if debug:
+ # The subprocess inherits both stdout and stderr, for
+ # debugging purposes.
+ stdout = None
+ else:
+ # The subprocess inherits stderr so that any warnings
+ # triggered by python -Wd will be visible.
+ stdout = subprocess.PIPE
+
+ for i, args in enumerate(test_commands):
+
+ if hasattr(args[0], '__call__'):
+ self.assertTrue(args[0](),
+ "callable at index %s failed" % (i,))
+ continue
+
+ proc = subprocess.Popen(args,
+ env=env, stdout=stdout)
+
+ if debug:
+ proc.wait()
+ else:
+ output = proc.stdout.readlines()
+ proc.wait()
+ proc.stdout.close()
+ if proc.returncode != os.EX_OK:
+ for line in output:
+ sys.stderr.write(_unicode_decode(line))
+
+ self.assertEqual(os.EX_OK, proc.returncode,
+ "command %d failed with args %s" % (i, args,))
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/dep/__init__.py b/usr/lib/portage/pym/portage/tests/dep/__init__.py
new file mode 100644
index 0000000..9c3f524
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/__init__.py
@@ -0,0 +1,3 @@
+# tests/portage.dep/__init__.py -- Portage Unit Test functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/tests/dep/__test__.py b/usr/lib/portage/pym/portage/tests/dep/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/dep/testAtom.py b/usr/lib/portage/pym/portage/tests/dep/testAtom.py
new file mode 100644
index 0000000..da58be2
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/testAtom.py
@@ -0,0 +1,341 @@
+# Copyright 2006-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import Atom
+from portage.exception import InvalidAtom
+
+class TestAtom(TestCase):
+
+ def testAtom(self):
+
+ tests = (
+ ("=sys-apps/portage-2.1-r1:0[doc,a=,!b=,c?,!d?,-e]",
+ ('=', 'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', None), False, False),
+ ("=sys-apps/portage-2.1-r1*:0[doc]",
+ ('=*', 'sys-apps/portage', '2.1-r1', '0', '[doc]', None), False, False),
+ ("sys-apps/portage:0[doc]",
+ (None, 'sys-apps/portage', None, '0', '[doc]', None), False, False),
+ ("sys-apps/portage:0[doc]",
+ (None, 'sys-apps/portage', None, '0', '[doc]', None), False, False),
+ ("*/*",
+ (None, '*/*', None, None, None, None), True, False),
+ ("=*/*-*9999*",
+ ('=*', '*/*', '*9999*', None, None, None), True, False),
+ ("=*/*-*9999*:0::repo_name",
+ ('=*', '*/*', '*9999*', '0', None, 'repo_name'), True, True),
+ ("=*/*-*_beta*",
+ ('=*', '*/*', '*_beta*', None, None, None), True, False),
+ ("=*/*-*_beta*:0::repo_name",
+ ('=*', '*/*', '*_beta*', '0', None, 'repo_name'), True, True),
+ ("sys-apps/*",
+ (None, 'sys-apps/*', None, None, None, None), True, False),
+ ("*/portage",
+ (None, '*/portage', None, None, None, None), True, False),
+ ("s*s-*/portage:1",
+ (None, 's*s-*/portage', None, '1', None, None), True, False),
+ ("*/po*ge:2",
+ (None, '*/po*ge', None, '2', None, None), True, False),
+ ("!dev-libs/A",
+ (None, 'dev-libs/A', None, None, None, None), True, True),
+ ("!!dev-libs/A",
+ (None, 'dev-libs/A', None, None, None, None), True, True),
+ ("!!dev-libs/A",
+ (None, 'dev-libs/A', None, None, None, None), True, True),
+ ("dev-libs/A[foo(+)]",
+ (None, 'dev-libs/A', None, None, "[foo(+)]", None), True, True),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
+ (None, 'dev-libs/A', None, None, "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True),
+ ("dev-libs/A:2[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
+ (None, 'dev-libs/A', None, "2", "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True),
+
+ ("=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]",
+ ('=', 'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', 'repo_name'), False, True),
+ ("=sys-apps/portage-2.1-r1*:0::repo_name[doc]",
+ ('=*', 'sys-apps/portage', '2.1-r1', '0', '[doc]', 'repo_name'), False, True),
+ ("sys-apps/portage:0::repo_name[doc]",
+ (None, 'sys-apps/portage', None, '0', '[doc]', 'repo_name'), False, True),
+
+ ("*/*::repo_name",
+ (None, '*/*', None, None, None, 'repo_name'), True, True),
+ ("sys-apps/*::repo_name",
+ (None, 'sys-apps/*', None, None, None, 'repo_name'), True, True),
+ ("*/portage::repo_name",
+ (None, '*/portage', None, None, None, 'repo_name'), True, True),
+ ("s*s-*/portage:1::repo_name",
+ (None, 's*s-*/portage', None, '1', None, 'repo_name'), True, True),
+ )
+
+ tests_xfail = (
+ (Atom("sys-apps/portage"), False, False),
+ ("cat/pkg[a!]", False, False),
+ ("cat/pkg[!a]", False, False),
+ ("cat/pkg[!a!]", False, False),
+ ("cat/pkg[!a-]", False, False),
+ ("cat/pkg[-a=]", False, False),
+ ("cat/pkg[-a?]", False, False),
+ ("cat/pkg[-a!]", False, False),
+ ("cat/pkg[=a]", False, False),
+ ("cat/pkg[=a=]", False, False),
+ ("cat/pkg[=a?]", False, False),
+ ("cat/pkg[=a!]", False, False),
+ ("cat/pkg[=a-]", False, False),
+ ("cat/pkg[?a]", False, False),
+ ("cat/pkg[?a=]", False, False),
+ ("cat/pkg[?a?]", False, False),
+ ("cat/pkg[?a!]", False, False),
+ ("cat/pkg[?a-]", False, False),
+ ("sys-apps/portage[doc]:0", False, False),
+ ("*/*", False, False),
+ ("sys-apps/*", False, False),
+ ("*/portage", False, False),
+ ("*/**", True, False),
+ ("*/portage[use]", True, False),
+ ("cat/pkg[a()]", False, False),
+ ("cat/pkg[a(]", False, False),
+ ("cat/pkg[a)]", False, False),
+ ("cat/pkg[a(,b]", False, False),
+ ("cat/pkg[a),b]", False, False),
+ ("cat/pkg[a(*)]", False, False),
+ ("cat/pkg[a(*)]", True, False),
+ ("cat/pkg[a(+-)]", False, False),
+ ("cat/pkg[a()]", False, False),
+ ("cat/pkg[(+)a]", False, False),
+ ("cat/pkg[a=(+)]", False, False),
+ ("cat/pkg[!(+)a=]", False, False),
+ ("cat/pkg[!a=(+)]", False, False),
+ ("cat/pkg[a?(+)]", False, False),
+ ("cat/pkg[!a?(+)]", False, False),
+ ("cat/pkg[!(+)a?]", False, False),
+ ("cat/pkg[-(+)a]", False, False),
+ ("cat/pkg[a(+),-a]", False, False),
+ ("cat/pkg[a(-),-a]", False, False),
+ ("cat/pkg[-a,a(+)]", False, False),
+ ("cat/pkg[-a,a(-)]", False, False),
+ ("cat/pkg[-a(+),a(-)]", False, False),
+ ("cat/pkg[-a(-),a(+)]", False, False),
+ ("sys-apps/portage[doc]::repo_name", False, False),
+ ("sys-apps/portage:0[doc]::repo_name", False, False),
+ ("sys-apps/portage[doc]:0::repo_name", False, False),
+ ("=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]", False, False),
+ ("=sys-apps/portage-2.1-r1*:0::repo_name[doc]", False, False),
+ ("sys-apps/portage:0::repo_name[doc]", False, False),
+ ("*/*::repo_name", True, False),
+ )
+
+ for atom, parts, allow_wildcard, allow_repo in tests:
+ a = Atom(atom, allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+ op, cp, ver, slot, use, repo = parts
+ self.assertEqual(op, a.operator,
+ msg="Atom('%s').operator = %s == '%s'" % (atom, a.operator, op))
+ self.assertEqual(cp, a.cp,
+ msg="Atom('%s').cp = %s == '%s'" % (atom, a.cp, cp))
+ if ver is not None:
+ cpv = "%s-%s" % (cp, ver)
+ else:
+ cpv = cp
+ self.assertEqual(cpv, a.cpv,
+ msg="Atom('%s').cpv = %s == '%s'" % (atom, a.cpv, cpv))
+ self.assertEqual(slot, a.slot,
+ msg="Atom('%s').slot = %s == '%s'" % (atom, a.slot, slot))
+ self.assertEqual(repo, a.repo,
+ msg="Atom('%s').repo == %s == '%s'" % (atom, a.repo, repo))
+
+ if a.use:
+ returned_use = str(a.use)
+ else:
+ returned_use = None
+ self.assertEqual(use, returned_use,
+ msg="Atom('%s').use = %s == '%s'" % (atom, returned_use, use))
+
+ for atom, allow_wildcard, allow_repo in tests_xfail:
+ self.assertRaisesMsg(atom, (InvalidAtom, TypeError), Atom, atom,
+ allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+
+ def testSlotAbiAtom(self):
+ tests = (
+ ("virtual/ffmpeg:0/53", "4-slot-abi", {"slot": "0", "sub_slot": "53", "slot_operator": None}),
+ ("virtual/ffmpeg:0/53=", "4-slot-abi", {"slot": "0", "sub_slot": "53", "slot_operator": "="}),
+ ("virtual/ffmpeg:=", "4-slot-abi", {"slot": None, "sub_slot": None, "slot_operator": "="}),
+ ("virtual/ffmpeg:0=", "4-slot-abi", {"slot": "0", "sub_slot": None, "slot_operator": "="}),
+ ("virtual/ffmpeg:*", "4-slot-abi", {"slot": None, "sub_slot": None, "slot_operator": "*"}),
+ ("virtual/ffmpeg:0", "4-slot-abi", {"slot": "0", "sub_slot": None, "slot_operator": None}),
+ ("virtual/ffmpeg", "4-slot-abi", {"slot": None, "sub_slot": None, "slot_operator": None}),
+ )
+
+ for atom, eapi, parts in tests:
+ a = Atom(atom, eapi=eapi)
+ for k, v in parts.items():
+ self.assertEqual(v, getattr(a, k),
+ msg="Atom('%s').%s = %s == '%s'" %
+ (atom, k, getattr(a, k), v))
+
+ def test_intersects(self):
+ test_cases = (
+ ("dev-libs/A", "dev-libs/A", True),
+ ("dev-libs/A", "dev-libs/B", False),
+ ("dev-libs/A", "sci-libs/A", False),
+ ("dev-libs/A[foo]", "sci-libs/A[bar]", False),
+ ("dev-libs/A[foo(+)]", "sci-libs/A[foo(-)]", False),
+ ("=dev-libs/A-1", "=dev-libs/A-1-r1", False),
+ ("~dev-libs/A-1", "=dev-libs/A-1", False),
+ ("=dev-libs/A-1:1", "=dev-libs/A-1", True),
+ ("=dev-libs/A-1:1", "=dev-libs/A-1:1", True),
+ ("=dev-libs/A-1:1", "=dev-libs/A-1:2", False),
+ )
+
+ for atom, other, expected_result in test_cases:
+ self.assertEqual(Atom(atom).intersects(Atom(other)), expected_result,
+ "%s and %s should intersect: %s" % (atom, other, expected_result))
+
+ def test_violated_conditionals(self):
+ test_cases = (
+ ("dev-libs/A", ["foo"], ["foo"], None, "dev-libs/A"),
+ ("dev-libs/A[foo]", [], ["foo"], None, "dev-libs/A[foo]"),
+ ("dev-libs/A[foo]", ["foo"], ["foo"], None, "dev-libs/A"),
+ ("dev-libs/A[foo]", [], ["foo"], [], "dev-libs/A[foo]"),
+ ("dev-libs/A[foo]", ["foo"], ["foo"], [], "dev-libs/A"),
+
+ ("dev-libs/A:0[foo]", ["foo"], ["foo"], [], "dev-libs/A:0"),
+
+ ("dev-libs/A[foo,-bar]", [], ["foo", "bar"], None, "dev-libs/A[foo]"),
+ ("dev-libs/A[-foo,bar]", [], ["foo", "bar"], None, "dev-libs/A[bar]"),
+
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=]"),
+
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,b=,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=,!e?]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=,-f]"),
+
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], ["a", "b", "c", "d", "e", "f"], ["a"], "dev-libs/A[!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], ["a", "b", "c", "d", "e", "f"], ["b"], "dev-libs/A[a,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], ["a", "b", "c", "d", "e", "f"], ["c"], "dev-libs/A[a,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], ["a", "b", "c", "d", "e", "f"], ["d"], "dev-libs/A[a,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], ["a", "b", "c", "d", "e", "f"], ["e"], "dev-libs/A[a,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], ["a", "b", "c", "d", "e", "f"], ["f"], "dev-libs/A[a,!c=,-f]"),
+
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["a"], ["a", "b", "c", "d", "e", "f"], ["a"], "dev-libs/A[!c(+)=]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["b"], ["a", "b", "c", "d", "e", "f"], ["b"], "dev-libs/A[a(-),!c(-)=]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["c"], ["a", "b", "c", "d", "e", "f"], ["c"], "dev-libs/A[a(+),!c(+)=]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], ["a", "b", "c", "d", "e", "f"], ["d"], "dev-libs/A[a(-),!c(-)=]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["e"], ["a", "b", "c", "d", "e", "f"], ["e"], "dev-libs/A[a(+),!c(+)=]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["f"], ["a", "b", "c", "d", "e", "f"], ["f"], "dev-libs/A[a(-),!c(-)=,-f(+)]"),
+
+ ("dev-libs/A[a(+),b(+)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["a"], ["a"], ["a"], "dev-libs/A[b(+)=,!e(+)?]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["b"], ["b"], ["b"], "dev-libs/A[a(-),!c(-)=,-f(+)]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["c"], ["c"], ["c"], "dev-libs/A[!c(+)=,!e(+)?]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], ["d"], ["d"], "dev-libs/A[a(-),b(+)=,!c(-)=,-f(+)]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["e"], ["e"], ["e"], "dev-libs/A"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["f"], ["f"], ["f"], "dev-libs/A[a(-),b(+)=,!c(-)=,-f(+)]"),
+
+ #Some more test cases to trigger all remaining code paths
+ ("dev-libs/B[x?]", [], ["x"], ["x"], "dev-libs/B[x?]"),
+ ("dev-libs/B[x(+)?]", [], [], ["x"], "dev-libs/B"),
+ ("dev-libs/B[x(-)?]", [], [], ["x"], "dev-libs/B[x(-)?]"),
+
+ ("dev-libs/C[x=]", [], ["x"], ["x"], "dev-libs/C[x=]"),
+ ("dev-libs/C[x(+)=]", [], [], ["x"], "dev-libs/C"),
+ ("dev-libs/C[x(-)=]", [], [], ["x"], "dev-libs/C[x(-)=]"),
+
+ ("dev-libs/D[!x=]", [], ["x"], ["x"], "dev-libs/D"),
+ ("dev-libs/D[!x(+)=]", [], [], ["x"], "dev-libs/D[!x(+)=]"),
+ ("dev-libs/D[!x(-)=]", [], [], ["x"], "dev-libs/D"),
+
+ #Missing IUSE test cases
+ ("dev-libs/B[x]", [], [], [], "dev-libs/B[x]"),
+ ("dev-libs/B[-x]", [], [], [], "dev-libs/B[-x]"),
+ ("dev-libs/B[x?]", [], [], [], "dev-libs/B[x?]"),
+ ("dev-libs/B[x=]", [], [], [], "dev-libs/B[x=]"),
+ ("dev-libs/B[!x=]", [], [], ["x"], "dev-libs/B[!x=]"),
+ ("dev-libs/B[!x?]", [], [], ["x"], "dev-libs/B[!x?]"),
+ )
+
+ test_cases_xfail = (
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c", "d", "e", "f"], None),
+ )
+
+ class use_flag_validator(object):
+ def __init__(self, iuse):
+ self.iuse = iuse
+
+ def is_valid_flag(self, flag):
+ return flag in iuse
+
+ for atom, other_use, iuse, parent_use, expected_violated_atom in test_cases:
+ a = Atom(atom)
+ validator = use_flag_validator(iuse)
+ violated_atom = a.violated_conditionals(other_use, validator.is_valid_flag, parent_use)
+ if parent_use is None:
+ fail_msg = "Atom: %s, other_use: %s, iuse: %s, parent_use: %s, got: %s, expected: %s" % \
+ (atom, " ".join(other_use), " ".join(iuse), "None", str(violated_atom), expected_violated_atom)
+ else:
+ fail_msg = "Atom: %s, other_use: %s, iuse: %s, parent_use: %s, got: %s, expected: %s" % \
+ (atom, " ".join(other_use), " ".join(iuse), " ".join(parent_use), str(violated_atom), expected_violated_atom)
+ self.assertEqual(str(violated_atom), expected_violated_atom, fail_msg)
+
+ for atom, other_use, iuse, parent_use in test_cases_xfail:
+ a = Atom(atom)
+ validator = use_flag_validator(iuse)
+ self.assertRaisesMsg(atom, InvalidAtom,
+ a.violated_conditionals, other_use, validator.is_valid_flag, parent_use)
+
+ def test_evaluate_conditionals(self):
+ test_cases = (
+ ("dev-libs/A[foo]", [], "dev-libs/A[foo]"),
+ ("dev-libs/A[foo]", ["foo"], "dev-libs/A[foo]"),
+
+ ("dev-libs/A:0[foo=]", ["foo"], "dev-libs/A:0[foo]"),
+
+ ("dev-libs/A[foo,-bar]", [], "dev-libs/A[foo,-bar]"),
+ ("dev-libs/A[-foo,bar]", [], "dev-libs/A[-foo,bar]"),
+
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], "dev-libs/A[a,-b,c,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], "dev-libs/A[a,-b,c,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], "dev-libs/A[a,b,c,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], "dev-libs/A[a,-b,-c,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], "dev-libs/A[a,-b,c,d,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], "dev-libs/A[a,-b,c,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], "dev-libs/A[a,-b,c,-e,-f]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], "dev-libs/A[a(-),-b(+),c(-),d(+),-e(-),-f(+)]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["f"], "dev-libs/A[a(+),-b(-),c(+),-e(+),-f(-)]"),
+ )
+
+ for atom, use, expected_atom in test_cases:
+ a = Atom(atom)
+ b = a.evaluate_conditionals(use)
+ self.assertEqual(str(b), expected_atom)
+ self.assertEqual(str(b.unevaluated_atom), atom)
+
+ def test__eval_qa_conditionals(self):
+ test_cases = (
+ ("dev-libs/A[foo]", [], [], "dev-libs/A[foo]"),
+ ("dev-libs/A[foo]", ["foo"], [], "dev-libs/A[foo]"),
+ ("dev-libs/A[foo]", [], ["foo"], "dev-libs/A[foo]"),
+
+ ("dev-libs/A:0[foo]", [], [], "dev-libs/A:0[foo]"),
+ ("dev-libs/A:0[foo]", ["foo"], [], "dev-libs/A:0[foo]"),
+ ("dev-libs/A:0[foo]", [], ["foo"], "dev-libs/A:0[foo]"),
+ ("dev-libs/A:0[foo=]", [], ["foo"], "dev-libs/A:0[foo]"),
+
+ ("dev-libs/A[foo,-bar]", ["foo"], ["bar"], "dev-libs/A[foo,-bar]"),
+ ("dev-libs/A[-foo,bar]", ["foo", "bar"], [], "dev-libs/A[-foo,bar]"),
+
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a", "b", "c"], [], "dev-libs/A[a,-b,c,d,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c"], "dev-libs/A[a,b,-c,d,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d", "e", "f"], [], "dev-libs/A[a,b,-b,c,-c,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["d", "e", "f"], "dev-libs/A[a,b,-b,c,-c,d,-f]"),
+
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]",
+ ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a(-),-b(+),c(-),-e(-),-f(+)]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
+ [], ["a", "b", "c", "d", "e", "f"], "dev-libs/A[a(+),b(-),-c(+),d(-),-f(-)]"),
+ )
+
+ for atom, use_mask, use_force, expected_atom in test_cases:
+ a = Atom(atom)
+ b = a._eval_qa_conditionals(use_mask, use_force)
+ self.assertEqual(str(b), expected_atom)
+ self.assertEqual(str(b.unevaluated_atom), atom)
diff --git a/usr/lib/portage/pym/portage/tests/dep/testCheckRequiredUse.py b/usr/lib/portage/pym/portage/tests/dep/testCheckRequiredUse.py
new file mode 100644
index 0000000..63330b5
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/testCheckRequiredUse.py
@@ -0,0 +1,233 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import check_required_use
+from portage.exception import InvalidDependString
+
+class TestCheckRequiredUse(TestCase):
+
+ def testCheckRequiredUse(self):
+ test_cases = (
+ ("|| ( a b )", [], ["a", "b"], False),
+ ("|| ( a b )", ["a"], ["a", "b"], True),
+ ("|| ( a b )", ["b"], ["a", "b"], True),
+ ("|| ( a b )", ["a", "b"], ["a", "b"], True),
+
+ ("^^ ( a b )", [], ["a", "b"], False),
+ ("^^ ( a b )", ["a"], ["a", "b"], True),
+ ("^^ ( a b )", ["b"], ["a", "b"], True),
+ ("^^ ( a b )", ["a", "b"], ["a", "b"], False),
+ ("?? ( a b )", ["a", "b"], ["a", "b"], False),
+ ("?? ( a b )", ["a"], ["a", "b"], True),
+ ("?? ( a b )", ["b"], ["a", "b"], True),
+ ("?? ( a b )", [], ["a", "b"], True),
+ ("?? ( )", [], [], True),
+
+ ("^^ ( || ( a b ) c )", [], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) c )", ["a"], ["a", "b", "c"], True),
+
+ ("^^ ( || ( ( a b ) ) ( c ) )", [], ["a", "b", "c"], False),
+ ("( ^^ ( ( || ( ( a ) ( b ) ) ) ( ( c ) ) ) )", ["a"], ["a", "b", "c"], True),
+
+ ("a || ( b c )", ["a"], ["a", "b", "c"], False),
+ ("|| ( b c ) a", ["a"], ["a", "b", "c"], False),
+
+ ("|| ( a b c )", ["a"], ["a", "b", "c"], True),
+ ("|| ( a b c )", ["b"], ["a", "b", "c"], True),
+ ("|| ( a b c )", ["c"], ["a", "b", "c"], True),
+
+ ("^^ ( a b c )", ["a"], ["a", "b", "c"], True),
+ ("^^ ( a b c )", ["b"], ["a", "b", "c"], True),
+ ("^^ ( a b c )", ["c"], ["a", "b", "c"], True),
+ ("^^ ( a b c )", ["a", "b"], ["a", "b", "c"], False),
+ ("^^ ( a b c )", ["b", "c"], ["a", "b", "c"], False),
+ ("^^ ( a b c )", ["a", "c"], ["a", "b", "c"], False),
+ ("^^ ( a b c )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+ ("a? ( ^^ ( b c ) )", [], ["a", "b", "c"], True),
+ ("a? ( ^^ ( b c ) )", ["a"], ["a", "b", "c"], False),
+ ("a? ( ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
+ ("a? ( ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+ ("a? ( ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
+ ("a? ( ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+ ("^^ ( a? ( !b ) !c? ( d ) )", [], ["a", "b", "c", "d"], False),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["c"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "c"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "c"], ["a", "b", "c", "d"], False),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "d"], ["a", "b", "c", "d"], False),
+
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+ ("^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], True),
+
+ ("|| ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
+ ("|| ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
+ ("|| ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", [], ["a", "b", "c"], False),
+
+ ("^^ ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], False),
+ ("^^ ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
+ ("^^ ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
+ ("^^ ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
+ ("^^ ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
+ ("^^ ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
+ ("^^ ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
+ ("^^ ( ( a b ) c )", [], ["a", "b", "c"], False),
+ )
+
+ test_cases_xfail = (
+ ("^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b"]),
+ ("^^ ( || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
+ ("^^( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+ ("^^ || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
+ ("^^ ( ( || ) ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+ ("^^ ( || ( a b ) ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+ )
+
+ test_cases_xfail_eapi = (
+ ("?? ( a b )", [], ["a", "b"], "4"),
+ )
+
+ for required_use, use, iuse, expected in test_cases:
+ self.assertEqual(bool(check_required_use(required_use, use, iuse.__contains__)), \
+ expected, required_use + ", USE = " + " ".join(use))
+
+ for required_use, use, iuse in test_cases_xfail:
+ self.assertRaisesMsg(required_use + ", USE = " + " ".join(use), \
+ InvalidDependString, check_required_use, required_use, use, iuse.__contains__)
+
+ for required_use, use, iuse, eapi in test_cases_xfail_eapi:
+ self.assertRaisesMsg(required_use + ", USE = " + " ".join(use), \
+ InvalidDependString, check_required_use, required_use, use,
+ iuse.__contains__, eapi=eapi)
+
+ def testCheckRequiredUseFilterSatisfied(self):
+ """
+ Test filtering of satisfied parts of REQUIRED_USE,
+ in order to reduce noise for bug #353234.
+ """
+ test_cases = (
+ (
+ "bindist? ( !amr !faac !win32codecs ) cdio? ( !cdparanoia !cddb ) dvdnav? ( dvd )",
+ ("cdio", "cdparanoia"),
+ "cdio? ( !cdparanoia )"
+ ),
+ (
+ "|| ( !amr !faac !win32codecs ) cdio? ( !cdparanoia !cddb ) ^^ ( foo bar )",
+ ["cdio", "cdparanoia", "foo"],
+ "cdio? ( !cdparanoia )"
+ ),
+ (
+ "^^ ( || ( a b ) c )",
+ ("a", "b", "c"),
+ "^^ ( || ( a b ) c )"
+ ),
+ (
+ "^^ ( || ( ( a b ) ) ( c ) )",
+ ("a", "b", "c"),
+ "^^ ( ( a b ) c )"
+ ),
+ (
+ "a? ( ( c e ) ( b d ) )",
+ ("a", "c", "e"),
+ "a? ( b d )"
+ ),
+ (
+ "a? ( ( c e ) ( b d ) )",
+ ("a", "b", "c", "e"),
+ "a? ( d )"
+ ),
+ (
+ "a? ( ( c e ) ( c e b c d e c ) )",
+ ("a", "c", "e"),
+ "a? ( b d )"
+ ),
+ (
+ "^^ ( || ( a b ) ^^ ( b c ) )",
+ ("a", "b"),
+ "^^ ( || ( a b ) ^^ ( b c ) )"
+ ),
+ (
+ "^^ ( || ( a b ) ^^ ( b c ) )",
+ ["a", "c"],
+ "^^ ( || ( a b ) ^^ ( b c ) )"
+ ),
+ (
+ "^^ ( || ( a b ) ^^ ( b c ) )",
+ ["b", "c"],
+ ""
+ ),
+ (
+ "^^ ( || ( a b ) ^^ ( b c ) )",
+ ["a", "b", "c"],
+ ""
+ ),
+ (
+ "^^ ( ( a b c ) ( b c d ) )",
+ ["a", "b", "c"],
+ ""
+ ),
+ (
+ "^^ ( ( a b c ) ( b c d ) )",
+ ["a", "b", "c", "d"],
+ "^^ ( ( a b c ) ( b c d ) )"
+ ),
+ (
+ "^^ ( ( a b c ) ( b c !d ) )",
+ ["a", "b", "c"],
+ "^^ ( ( a b c ) ( b c !d ) )"
+ ),
+ (
+ "^^ ( ( a b c ) ( b c !d ) )",
+ ["a", "b", "c", "d"],
+ ""
+ ),
+ (
+ "( ( ( a ) ) ( ( ( b c ) ) ) )",
+ [""],
+ "a b c"
+ ),
+ (
+ "|| ( ( ( ( a ) ) ( ( ( b c ) ) ) ) )",
+ [""],
+ "a b c"
+ ),
+ (
+ "|| ( ( a ( ( ) ( ) ) ( ( ) ) ( b ( ) c ) ) )",
+ [""],
+ "a b c"
+ ),
+ (
+ "|| ( ( a b c ) ) || ( ( d e f ) )",
+ [""],
+ "a b c d e f"
+ ),
+ )
+ for required_use, use, expected in test_cases:
+ result = check_required_use(required_use, use, lambda k: True).tounicode()
+ self.assertEqual(result, expected,
+ "REQUIRED_USE = '%s', USE = '%s', '%s' != '%s'" % \
+ (required_use, " ".join(use), result, expected))
diff --git a/usr/lib/portage/pym/portage/tests/dep/testExtendedAtomDict.py b/usr/lib/portage/pym/portage/tests/dep/testExtendedAtomDict.py
new file mode 100644
index 0000000..69d092e
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/testExtendedAtomDict.py
@@ -0,0 +1,18 @@
+# test_isvalidatom.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import ExtendedAtomDict
+
+class TestExtendedAtomDict(TestCase):
+
+ def testExtendedAtomDict(self):
+ d = ExtendedAtomDict(dict)
+ d["*/*"] = { "test1": "x" }
+ d["dev-libs/*"] = { "test2": "y" }
+ d.setdefault("sys-apps/portage", {})["test3"] = "z"
+ self.assertEqual(d.get("dev-libs/A"), { "test1": "x", "test2": "y" })
+ self.assertEqual(d.get("sys-apps/portage"), { "test1": "x", "test3": "z" })
+ self.assertEqual(d["dev-libs/*"], { "test2": "y" })
+ self.assertEqual(d["sys-apps/portage"], {'test1': 'x', 'test3': 'z'})
diff --git a/usr/lib/portage/pym/portage/tests/dep/testExtractAffectingUSE.py b/usr/lib/portage/pym/portage/tests/dep/testExtractAffectingUSE.py
new file mode 100644
index 0000000..026a552
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/testExtractAffectingUSE.py
@@ -0,0 +1,75 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import extract_affecting_use
+from portage.exception import InvalidDependString
+
+class TestExtractAffectingUSE(TestCase):
+
+ def testExtractAffectingUSE(self):
+ test_cases = (
+ ("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "A", ("a",)),
+ ("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "B", ("b",)),
+ ("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "C", ("c",)),
+ ("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "D", ("d",)),
+
+ ("a? ( b? ( AB ) )", "AB", ("a", "b")),
+ ("a? ( b? ( c? ( ABC ) ) )", "ABC", ("a", "b", "c")),
+
+ ("a? ( A b? ( c? ( ABC ) AB ) )", "A", ("a",)),
+ ("a? ( A b? ( c? ( ABC ) AB ) )", "AB", ("a", "b")),
+ ("a? ( A b? ( c? ( ABC ) AB ) )", "ABC", ("a", "b", "c")),
+ ("a? ( A b? ( c? ( ABC ) AB ) ) X", "X", []),
+ ("X a? ( A b? ( c? ( ABC ) AB ) )", "X", []),
+
+ ("ab? ( || ( A B ) )", "A", ("ab",)),
+ ("!ab? ( || ( A B ) )", "B", ("ab",)),
+ ("ab? ( || ( A || ( b? ( || ( B C ) ) ) ) )", "A", ("ab",)),
+ ("ab? ( || ( A || ( b? ( || ( B C ) ) ) ) )", "B", ("ab", "b")),
+ ("ab? ( || ( A || ( b? ( || ( B C ) ) ) ) )", "C", ("ab", "b")),
+
+ ("( ab? ( || ( ( A ) || ( b? ( ( ( || ( B ( C ) ) ) ) ) ) ) ) )", "A", ("ab",)),
+ ("( ab? ( || ( ( A ) || ( b? ( ( ( || ( B ( C ) ) ) ) ) ) ) ) )", "B", ("ab", "b")),
+ ("( ab? ( || ( ( A ) || ( b? ( ( ( || ( B ( C ) ) ) ) ) ) ) ) )", "C", ("ab", "b")),
+
+ ("a? ( A )", "B", []),
+
+ ("a? ( || ( A B ) )", "B", ["a"]),
+
+ # test USE dep defaults for bug #363073
+ ("a? ( >=dev-lang/php-5.2[pcre(+)] )", ">=dev-lang/php-5.2[pcre(+)]", ["a"]),
+ )
+
+ test_cases_xfail = (
+ ("? ( A )", "A"),
+ ("!? ( A )", "A"),
+ ("( A", "A"),
+ ("A )", "A"),
+
+ ("||( A B )", "A"),
+ ("|| (A B )", "A"),
+ ("|| ( A B)", "A"),
+ ("|| ( A B", "A"),
+ ("|| A B )", "A"),
+ ("|| A B", "A"),
+ ("|| ( A B ) )", "A"),
+ ("|| || B C", "A"),
+ ("|| ( A B || )", "A"),
+ ("a? A", "A"),
+ ("( || ( || || ( A ) foo? ( B ) ) )", "A"),
+ ("( || ( || bar? ( A ) foo? ( B ) ) )", "A"),
+ )
+
+ for dep, atom, expected in test_cases:
+ expected = set(expected)
+ result = extract_affecting_use(dep, atom, eapi="0")
+ fail_msg = "dep: " + dep + ", atom: " + atom + ", got: " + \
+ " ".join(sorted(result)) + ", expected: " + " ".join(sorted(expected))
+ self.assertEqual(result, expected, fail_msg)
+
+ for dep, atom in test_cases_xfail:
+ fail_msg = "dep: " + dep + ", atom: " + atom + ", got: " + \
+ " ".join(sorted(result)) + ", expected: " + " ".join(sorted(expected))
+ self.assertRaisesMsg(fail_msg, \
+ InvalidDependString, extract_affecting_use, dep, atom, eapi="0")
diff --git a/usr/lib/portage/pym/portage/tests/dep/testStandalone.py b/usr/lib/portage/pym/portage/tests/dep/testStandalone.py
new file mode 100644
index 0000000..88e3f39
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/testStandalone.py
@@ -0,0 +1,37 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import cpvequal
+from portage.exception import PortageException
+
+class TestStandalone(TestCase):
+ """ Test some small functions portage.dep
+ """
+
+ def testCPVequal(self):
+
+ test_cases = (
+ ("sys-apps/portage-2.1", "sys-apps/portage-2.1", True),
+ ("sys-apps/portage-2.1", "sys-apps/portage-2.0", False),
+ ("sys-apps/portage-2.1", "sys-apps/portage-2.1-r1", False),
+ ("sys-apps/portage-2.1-r1", "sys-apps/portage-2.1", False),
+ ("sys-apps/portage-2.1_alpha3", "sys-apps/portage-2.1", False),
+ ("sys-apps/portage-2.1_alpha3_p6", "sys-apps/portage-2.1_alpha3", False),
+ ("sys-apps/portage-2.1_alpha3", "sys-apps/portage-2.1", False),
+ ("sys-apps/portage-2.1", "sys-apps/X-2.1", False),
+ ("sys-apps/portage-2.1", "portage-2.1", False),
+ )
+
+ test_cases_xfail = (
+ ("sys-apps/portage", "sys-apps/portage"),
+ ("sys-apps/portage-2.1-6", "sys-apps/portage-2.1-6"),
+ )
+
+ for cpv1, cpv2, expected_result in test_cases:
+ self.assertEqual(cpvequal(cpv1, cpv2), expected_result,
+ "cpvequal('%s', '%s') != %s" % (cpv1, cpv2, expected_result))
+
+ for cpv1, cpv2 in test_cases_xfail:
+ self.assertRaisesMsg("cpvequal(%s, %s)" % (cpv1, cpv2),
+ PortageException, cpvequal, cpv1, cpv2)
diff --git a/usr/lib/portage/pym/portage/tests/dep/test_best_match_to_list.py b/usr/lib/portage/pym/portage/tests/dep/test_best_match_to_list.py
new file mode 100644
index 0000000..586c8bc
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/test_best_match_to_list.py
@@ -0,0 +1,63 @@
+# test_best_match_to_list.py -- Portage Unit Testing Functionality
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from itertools import permutations
+
+from portage.tests import TestCase
+from portage.dep import Atom, best_match_to_list
+
+class Test_best_match_to_list(TestCase):
+
+ def best_match_to_list_wrapper(self, mypkg, mylist):
+ """
+ This function uses best_match_to_list to create sorted
+ list of matching atoms.
+ """
+ ret = []
+ mylist = list(mylist)
+ while mylist:
+ m = best_match_to_list(mypkg, mylist)
+ if m is not None:
+ ret.append(m)
+ mylist.remove(m)
+ else:
+ break
+
+ return ret
+
+ def testBest_match_to_list(self):
+ tests = [
+ ("dev-libs/A-4", [Atom(">=dev-libs/A-3"), Atom(">=dev-libs/A-2")],
+ [Atom(">=dev-libs/A-3"), Atom(">=dev-libs/A-2")], True),
+ ("dev-libs/A-4", [Atom("<=dev-libs/A-5"), Atom("<=dev-libs/A-6")],
+ [Atom("<=dev-libs/A-5"), Atom("<=dev-libs/A-6")], True),
+ ("dev-libs/A-1", [Atom("dev-libs/A"), Atom("=dev-libs/A-1")],
+ [Atom("=dev-libs/A-1"), Atom("dev-libs/A")], True),
+ ("dev-libs/A-1", [Atom("dev-libs/B"), Atom("=dev-libs/A-1:0")],
+ [Atom("=dev-libs/A-1:0")], True),
+ ("dev-libs/A-1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=dev-libs/A-1:0")],
+ [Atom("=dev-libs/A-1:0"), Atom("dev-libs/*", allow_wildcard=True)], True),
+ ("dev-libs/A-4.9999-r1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=*/*-*9999*", allow_wildcard=True)],
+ [Atom("=*/*-*9999*", allow_wildcard=True), Atom("dev-libs/*", allow_wildcard=True)], True),
+ ("dev-libs/A-4_beta-r1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=*/*-*_beta*", allow_wildcard=True)],
+ [Atom("=*/*-*_beta*", allow_wildcard=True), Atom("dev-libs/*", allow_wildcard=True)], True),
+ ("dev-libs/A-4_beta1-r1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=*/*-*_beta*", allow_wildcard=True)],
+ [Atom("=*/*-*_beta*", allow_wildcard=True), Atom("dev-libs/*", allow_wildcard=True)], True),
+ ("dev-libs/A-1:0", [Atom("dev-*/*", allow_wildcard=True), Atom("dev-*/*:0", allow_wildcard=True),
+ Atom("dev-libs/A"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A:0"),
+ Atom("=dev-libs/A-1*"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1")],
+ [Atom("=dev-libs/A-1"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1*"),
+ Atom("dev-libs/A:0"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A"),
+ Atom("dev-*/*:0", allow_wildcard=True), Atom("dev-*/*", allow_wildcard=True)], False)
+ ]
+
+ for pkg, atom_list, result, all_permutations in tests:
+ if all_permutations:
+ atom_lists = permutations(atom_list)
+ else:
+ atom_lists = [atom_list]
+ for atom_list in atom_lists:
+ self.assertEqual(
+ self.best_match_to_list_wrapper(pkg, atom_list),
+ result)
diff --git a/usr/lib/portage/pym/portage/tests/dep/test_dep_getcpv.py b/usr/lib/portage/pym/portage/tests/dep/test_dep_getcpv.py
new file mode 100644
index 0000000..79c1514
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/test_dep_getcpv.py
@@ -0,0 +1,37 @@
+# test_dep_getcpv.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import dep_getcpv
+
+class DepGetCPV(TestCase):
+ """ A simple testcase for isvalidatom
+ """
+
+ def testDepGetCPV(self):
+
+ prefix_ops = [
+ "<", ">", "=", "~", "<=",
+ ">=", "!=", "!<", "!>", "!~"
+ ]
+
+ bad_prefix_ops = [">~", "<~", "~>", "~<"]
+ postfix_ops = [("=", "*"),]
+
+ cpvs = ["sys-apps/portage-2.1", "sys-apps/portage-2.1",
+ "sys-apps/portage-2.1"]
+ slots = [None, ":foo", ":2"]
+ for cpv in cpvs:
+ for slot in slots:
+ for prefix in prefix_ops:
+ mycpv = prefix + cpv
+ if slot:
+ mycpv += slot
+ self.assertEqual(dep_getcpv(mycpv), cpv)
+
+ for prefix, postfix in postfix_ops:
+ mycpv = prefix + cpv + postfix
+ if slot:
+ mycpv += slot
+ self.assertEqual(dep_getcpv(mycpv), cpv)
diff --git a/usr/lib/portage/pym/portage/tests/dep/test_dep_getrepo.py b/usr/lib/portage/pym/portage/tests/dep/test_dep_getrepo.py
new file mode 100644
index 0000000..6c17d3c
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/test_dep_getrepo.py
@@ -0,0 +1,29 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import dep_getrepo
+
+class DepGetRepo(TestCase):
+ """ A simple testcase for isvalidatom
+ """
+
+ def testDepGetRepo(self):
+
+ repo_char = "::"
+ repos = ("a", "repo-name", "repo_name", "repo123", None)
+ cpvs = ["sys-apps/portage"]
+ versions = ["2.1.1", "2.1-r1", None]
+ uses = ["[use]", None]
+ for cpv in cpvs:
+ for version in versions:
+ for use in uses:
+ for repo in repos:
+ pkg = cpv
+ if version:
+ pkg = '=' + pkg + '-' + version
+ if repo is not None:
+ pkg = pkg + repo_char + repo
+ if use:
+ pkg = pkg + use
+ self.assertEqual(dep_getrepo(pkg), repo)
diff --git a/usr/lib/portage/pym/portage/tests/dep/test_dep_getslot.py b/usr/lib/portage/pym/portage/tests/dep/test_dep_getslot.py
new file mode 100644
index 0000000..8482864
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/test_dep_getslot.py
@@ -0,0 +1,28 @@
+# test_dep_getslot.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import dep_getslot
+
+class DepGetSlot(TestCase):
+ """ A simple testcase for isvalidatom
+ """
+
+ def testDepGetSlot(self):
+
+ slot_char = ":"
+ slots = ("a", "1.2", "1", "IloveVapier", None)
+ cpvs = ["sys-apps/portage"]
+ versions = ["2.1.1", "2.1-r1"]
+ for cpv in cpvs:
+ for version in versions:
+ for slot in slots:
+ mycpv = cpv
+ if version:
+ mycpv = '=' + mycpv + '-' + version
+ if slot is not None:
+ self.assertEqual(dep_getslot(
+ mycpv + slot_char + slot), slot)
+ else:
+ self.assertEqual(dep_getslot(mycpv), slot)
diff --git a/usr/lib/portage/pym/portage/tests/dep/test_dep_getusedeps.py b/usr/lib/portage/pym/portage/tests/dep/test_dep_getusedeps.py
new file mode 100644
index 0000000..cd58eab
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/test_dep_getusedeps.py
@@ -0,0 +1,35 @@
+# test_dep_getusedeps.py -- Portage Unit Testing Functionality
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import dep_getusedeps
+
+from portage.tests import test_cps, test_slots, test_versions, test_usedeps
+
+class DepGetUseDeps(TestCase):
+ """ A simple testcase for dep_getusedeps
+ """
+
+ def testDepGetUseDeps(self):
+
+ for mycpv in test_cps:
+ for version in test_versions:
+ for slot in test_slots:
+ for use in test_usedeps:
+ cpv = mycpv[:]
+ if version:
+ cpv += version
+ if slot:
+ cpv += ":" + slot
+ if isinstance(use, tuple):
+ cpv += "[%s]" % (",".join(use),)
+ self.assertEqual(dep_getusedeps(
+ cpv), use)
+ else:
+ if len(use):
+ self.assertEqual(dep_getusedeps(
+ cpv + "[" + use + "]"), (use,))
+ else:
+ self.assertEqual(dep_getusedeps(
+ cpv + "[" + use + "]"), ())
diff --git a/usr/lib/portage/pym/portage/tests/dep/test_get_operator.py b/usr/lib/portage/pym/portage/tests/dep/test_get_operator.py
new file mode 100644
index 0000000..5076e21
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/test_get_operator.py
@@ -0,0 +1,37 @@
+# test_get_operator.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import get_operator
+
+class GetOperator(TestCase):
+
+ def testGetOperator(self):
+
+ # get_operator does not validate operators
+ tests = [
+ ("~", "~"),
+ ("=", "="),
+ (">", ">"),
+ (">=", ">="),
+ ("<=", "<="),
+ ]
+
+ test_cpvs = ["sys-apps/portage-2.1"]
+ slots = [None, "1", "linux-2.5.6"]
+ for cpv in test_cpvs:
+ for test in tests:
+ for slot in slots:
+ atom = cpv[:]
+ if slot:
+ atom += ":" + slot
+ result = get_operator(test[0] + atom)
+ self.assertEqual(result, test[1],
+ msg="get_operator(%s) != %s" % (test[0] + atom, test[1]))
+
+ result = get_operator("sys-apps/portage")
+ self.assertEqual(result, None)
+
+ result = get_operator("=sys-apps/portage-2.1*")
+ self.assertEqual(result , "=*")
diff --git a/usr/lib/portage/pym/portage/tests/dep/test_get_required_use_flags.py b/usr/lib/portage/pym/portage/tests/dep/test_get_required_use_flags.py
new file mode 100644
index 0000000..90e096c
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/test_get_required_use_flags.py
@@ -0,0 +1,44 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import get_required_use_flags
+from portage.exception import InvalidDependString
+
+class TestCheckRequiredUse(TestCase):
+
+ def testCheckRequiredUse(self):
+ test_cases = (
+ ("a b c", ["a", "b", "c"]),
+
+ ("|| ( a b c )", ["a", "b", "c"]),
+ ("^^ ( a b c )", ["a", "b", "c"]),
+ ("?? ( a b c )", ["a", "b", "c"]),
+ ("?? ( )", []),
+
+ ("|| ( a b ^^ ( d e f ) )", ["a", "b", "d", "e", "f"]),
+ ("^^ ( a b || ( d e f ) )", ["a", "b", "d", "e", "f"]),
+
+ ("( ^^ ( a ( b ) ( || ( ( d e ) ( f ) ) ) ) )", ["a", "b", "d", "e", "f"]),
+
+ ("a? ( ^^ ( b c ) )", ["a", "b", "c"]),
+ ("a? ( ^^ ( !b !d? ( c ) ) )", ["a", "b", "c", "d"]),
+ )
+
+ test_cases_xfail = (
+ ("^^ ( || ( a b ) ^^ ( b c )"),
+ ("^^( || ( a b ) ^^ ( b c ) )"),
+ ("^^ || ( a b ) ^^ ( b c )"),
+ ("^^ ( ( || ) ( a b ) ^^ ( b c ) )"),
+ ("^^ ( || ( a b ) ) ^^ ( b c ) )"),
+ )
+
+ for required_use, expected in test_cases:
+ result = get_required_use_flags(required_use)
+ expected = set(expected)
+ self.assertEqual(result, expected, \
+ "REQUIRED_USE: '%s', expected: '%s', got: '%s'" % (required_use, expected, result))
+
+ for required_use in test_cases_xfail:
+ self.assertRaisesMsg("REQUIRED_USE: '%s'" % (required_use,), \
+ InvalidDependString, get_required_use_flags, required_use)
diff --git a/usr/lib/portage/pym/portage/tests/dep/test_isjustname.py b/usr/lib/portage/pym/portage/tests/dep/test_isjustname.py
new file mode 100644
index 0000000..9b95bcd
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/test_isjustname.py
@@ -0,0 +1,24 @@
+# test_isjustname.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import isjustname
+
+class IsJustName(TestCase):
+
+ def testIsJustName(self):
+
+ cats = ("", "sys-apps/", "foo/", "virtual/")
+ pkgs = ("portage", "paludis", "pkgcore", "notARealPkg")
+ vers = ("", "-2.0-r3", "-1.0_pre2", "-3.1b")
+
+ for pkg in pkgs:
+ for cat in cats:
+ for ver in vers:
+ if len(ver):
+ self.assertFalse(isjustname(cat + pkg + ver),
+ msg="isjustname(%s) is True!" % (cat + pkg + ver))
+ else:
+ self.assertTrue(isjustname(cat + pkg + ver),
+ msg="isjustname(%s) is False!" % (cat + pkg + ver))
diff --git a/usr/lib/portage/pym/portage/tests/dep/test_isvalidatom.py b/usr/lib/portage/pym/portage/tests/dep/test_isvalidatom.py
new file mode 100644
index 0000000..67ba603
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/test_isvalidatom.py
@@ -0,0 +1,158 @@
+# Copyright 2006-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import isvalidatom
+
+class IsValidAtomTestCase(object):
+ def __init__(self, atom, expected, allow_wildcard=False, allow_repo=False):
+ self.atom = atom
+ self.expected = expected
+ self.allow_wildcard = allow_wildcard
+ self.allow_repo = allow_repo
+
+class IsValidAtom(TestCase):
+
+ def testIsValidAtom(self):
+
+ test_cases = (
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.1", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.1*", True),
+ IsValidAtomTestCase(">=sys-apps/portage-2.1", True),
+ IsValidAtomTestCase("<=sys-apps/portage-2.1", True),
+ IsValidAtomTestCase(">sys-apps/portage-2.1", True),
+ IsValidAtomTestCase("<sys-apps/portage-2.1", True),
+ IsValidAtomTestCase("~sys-apps/portage-2.1", True),
+ IsValidAtomTestCase("sys-apps/portage:foo", True),
+ IsValidAtomTestCase("sys-apps/portage-2.1:foo", False),
+ IsValidAtomTestCase("sys-apps/portage-2.1:", False),
+ IsValidAtomTestCase("sys-apps/portage-2.1:", False),
+ IsValidAtomTestCase("sys-apps/portage-2.1:[foo]", False),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar?,!baz?,!doc=,build=]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[doc?]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!doc?]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[doc=]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!doc=]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!doc]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!-doc]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!-doc=]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!-doc?]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc?]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc=]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc!=]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc=]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar][-baz][doc?][!build?]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar,-baz,doc?,!build?]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar,-baz,doc?,!build?,]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[,bar,-baz,doc?,!build?]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar,-baz][doc?,!build?]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar][doc,build]", False),
+ IsValidAtomTestCase(">~cate-gory/foo-1.0", False),
+ IsValidAtomTestCase(">~category/foo-1.0", False),
+ IsValidAtomTestCase("<~category/foo-1.0", False),
+ IsValidAtomTestCase("###cat/foo-1.0", False),
+ IsValidAtomTestCase("~sys-apps/portage", False),
+ IsValidAtomTestCase("portage", False),
+ IsValidAtomTestCase("=portage", False),
+ IsValidAtomTestCase(">=portage-2.1", False),
+ IsValidAtomTestCase("~portage-2.1", False),
+ IsValidAtomTestCase("=portage-2.1*", False),
+ IsValidAtomTestCase("null/portage", True),
+ IsValidAtomTestCase("null/portage*:0", False),
+ IsValidAtomTestCase(">=null/portage-2.1", True),
+ IsValidAtomTestCase(">=null/portage", False),
+ IsValidAtomTestCase(">null/portage", False),
+ IsValidAtomTestCase("=null/portage*", False),
+ IsValidAtomTestCase("=null/portage", False),
+ IsValidAtomTestCase("~null/portage", False),
+ IsValidAtomTestCase("<=null/portage", False),
+ IsValidAtomTestCase("<null/portage", False),
+ IsValidAtomTestCase("~null/portage-2.1", True),
+ IsValidAtomTestCase("=null/portage-2.1*", True),
+ IsValidAtomTestCase("null/portage-2.1*", False),
+ IsValidAtomTestCase("app-doc/php-docs-20071125", False),
+ IsValidAtomTestCase("app-doc/php-docs-20071125-r2", False),
+ IsValidAtomTestCase("=foo/bar-1-r1-1-r1", False),
+ IsValidAtomTestCase("foo/-z-1", False),
+
+ # These are invalid because pkg name must not end in hyphen
+ # followed by numbers
+ IsValidAtomTestCase("=foo/bar-1-r1-1-r1", False),
+ IsValidAtomTestCase("=foo/bar-123-1", False),
+ IsValidAtomTestCase("=foo/bar-123-1*", False),
+ IsValidAtomTestCase("foo/bar-123", False),
+ IsValidAtomTestCase("=foo/bar-123-1-r1", False),
+ IsValidAtomTestCase("=foo/bar-123-1-r1*", False),
+ IsValidAtomTestCase("foo/bar-123-r1", False),
+ IsValidAtomTestCase("foo/bar-1", False),
+
+ IsValidAtomTestCase("=foo/bar--baz-1-r1", True),
+ IsValidAtomTestCase("=foo/bar-baz--1-r1", True),
+ IsValidAtomTestCase("=foo/bar-baz---1-r1", True),
+ IsValidAtomTestCase("=foo/bar-baz---1", True),
+ IsValidAtomTestCase("=foo/bar-baz-1--r1", False),
+ IsValidAtomTestCase("games-strategy/ufo2000", True),
+ IsValidAtomTestCase("~games-strategy/ufo2000-0.1", True),
+ IsValidAtomTestCase("=media-libs/x264-20060810", True),
+ IsValidAtomTestCase("foo/b", True),
+ IsValidAtomTestCase("app-text/7plus", True),
+ IsValidAtomTestCase("foo/666", True),
+ IsValidAtomTestCase("=dev-libs/poppler-qt3-0.11*", True),
+
+ #Testing atoms with repositories
+ IsValidAtomTestCase("sys-apps/portage::repo_123-name", True, allow_repo=True),
+ IsValidAtomTestCase("=sys-apps/portage-2.1::repo", True, allow_repo=True),
+ IsValidAtomTestCase("=sys-apps/portage-2.1*::repo", True, allow_repo=True),
+ IsValidAtomTestCase("sys-apps/portage:foo::repo", True, allow_repo=True),
+ IsValidAtomTestCase("sys-apps/portage-2.1:foo::repo", False, allow_repo=True),
+ IsValidAtomTestCase("sys-apps/portage-2.1:::repo", False, allow_repo=True),
+ IsValidAtomTestCase("sys-apps/portage-2.1:::repo[foo]", False, allow_repo=True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[bar?,!baz?,!doc=,build=]", True, allow_repo=True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[doc?]", True, allow_repo=True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[!doc]", False, allow_repo=True),
+ IsValidAtomTestCase("###cat/foo-1.0::repo", False, allow_repo=True),
+ IsValidAtomTestCase("~sys-apps/portage::repo", False, allow_repo=True),
+ IsValidAtomTestCase("portage::repo", False, allow_repo=True),
+ IsValidAtomTestCase("=portage::repo", False, allow_repo=True),
+ IsValidAtomTestCase("null/portage::repo", True, allow_repo=True),
+ IsValidAtomTestCase("app-doc/php-docs-20071125::repo", False, allow_repo=True),
+ IsValidAtomTestCase("=foo/bar-1-r1-1-r1::repo", False, allow_repo=True),
+
+ IsValidAtomTestCase("sys-apps/portage::repo_123-name", False, allow_repo=False),
+ IsValidAtomTestCase("=sys-apps/portage-2.1::repo", False, allow_repo=False),
+ IsValidAtomTestCase("=sys-apps/portage-2.1*::repo", False, allow_repo=False),
+ IsValidAtomTestCase("sys-apps/portage:foo::repo", False, allow_repo=False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[bar?,!baz?,!doc=,build=]", False, allow_repo=False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[doc?]", False, allow_repo=False),
+ IsValidAtomTestCase("null/portage::repo", False, allow_repo=False),
+
+ IsValidAtomTestCase("virtual/ffmpeg:0/53", True),
+ IsValidAtomTestCase("virtual/ffmpeg:0/53=", True),
+ IsValidAtomTestCase("virtual/ffmpeg:0/53*", False),
+ IsValidAtomTestCase("virtual/ffmpeg:=", True),
+ IsValidAtomTestCase("virtual/ffmpeg:0=", True),
+ IsValidAtomTestCase("virtual/ffmpeg:*", True),
+ IsValidAtomTestCase("virtual/ffmpeg:0*", False),
+ IsValidAtomTestCase("virtual/ffmpeg:0", True),
+
+ # Wildcard atoms
+ IsValidAtomTestCase("*/portage-2.1", False, allow_wildcard=True),
+ )
+
+ for test_case in test_cases:
+ if test_case.expected:
+ atom_type = "valid"
+ else:
+ atom_type = "invalid"
+ self.assertEqual(bool(isvalidatom(test_case.atom, allow_wildcard=test_case.allow_wildcard,
+ allow_repo=test_case.allow_repo)), test_case.expected,
+ msg="isvalidatom(%s) != %s" % (test_case.atom, test_case.expected))
diff --git a/usr/lib/portage/pym/portage/tests/dep/test_match_from_list.py b/usr/lib/portage/pym/portage/tests/dep/test_match_from_list.py
new file mode 100644
index 0000000..75ac8fd
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/test_match_from_list.py
@@ -0,0 +1,137 @@
+# Copyright 2006-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.tests import TestCase
+from portage.dep import Atom, match_from_list, _repo_separator
+from portage.versions import catpkgsplit, _pkg_str
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+class Package(object):
+ """
+ Provides a minimal subset of attributes of _emerge.Package.Package
+ """
+ def __init__(self, atom):
+ atom = Atom(atom, allow_repo=True)
+ self.cp = atom.cp
+ slot = atom.slot
+ if atom.sub_slot:
+ slot = "%s/%s" % (slot, atom.sub_slot)
+ if not slot:
+ slot = '0'
+ self.cpv = _pkg_str(atom.cpv, slot=slot, repo=atom.repo)
+ self.cpv_split = catpkgsplit(self.cpv)
+ self.slot = self.cpv.slot
+ self.sub_slot = self.cpv.sub_slot
+ self.repo = atom.repo
+ if atom.use:
+ self.use = self._use_class(atom.use.enabled)
+ self.iuse = self._iuse_class(atom.use.required)
+ else:
+ self.use = self._use_class([])
+ self.iuse = self._iuse_class([])
+
+ class _use_class(object):
+ def __init__(self, use):
+ self.enabled = frozenset(use)
+
+ class _iuse_class(object):
+ def __init__(self, iuse):
+ self.all = frozenset(iuse)
+
+ def is_valid_flag(self, flags):
+ if isinstance(flags, basestring):
+ flags = [flags]
+ for flag in flags:
+ if not flag in self.all:
+ return False
+ return True
+
+class Test_match_from_list(TestCase):
+
+ def testMatch_from_list(self):
+ tests = (
+ ("=sys-apps/portage-45*", [], []),
+ ("=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("!!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("=sys-apps/portage-045", ["sys-apps/portage-046"], []),
+ ("~sys-apps/portage-045", ["sys-apps/portage-045-r1"], ["sys-apps/portage-045-r1"]),
+ ("~sys-apps/portage-045", ["sys-apps/portage-046-r1"], []),
+ ("<=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("<=sys-apps/portage-045", ["sys-apps/portage-046"], []),
+ ("<sys-apps/portage-046", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("<sys-apps/portage-046", ["sys-apps/portage-046"], []),
+ (">=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ (">=sys-apps/portage-047", ["sys-apps/portage-046-r1"], []),
+ (">sys-apps/portage-044", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ (">sys-apps/portage-047", ["sys-apps/portage-046-r1"], []),
+ ("sys-apps/portage:0", [Package("=sys-apps/portage-045:0")], ["sys-apps/portage-045"]),
+ ("sys-apps/portage:0", [Package("=sys-apps/portage-045:1")], []),
+ ("=cat/pkg-1-r1*", ["cat/pkg-1_alpha1"], []),
+ ("=cat/pkg-1-r1*", ["cat/pkg-1-r11"], ["cat/pkg-1-r11"]),
+ ("=cat/pkg-1-r1*", ["cat/pkg-01-r11"], ["cat/pkg-01-r11"]),
+ ("=cat/pkg-01-r1*", ["cat/pkg-1-r11"], ["cat/pkg-1-r11"]),
+ ("=cat/pkg-01-r1*", ["cat/pkg-001-r11"], ["cat/pkg-001-r11"]),
+ ("=sys-fs/udev-1*", ["sys-fs/udev-123"], ["sys-fs/udev-123"]),
+ ("=sys-fs/udev-4*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]),
+ ("*/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]),
+ ("*/*:0", ["sys-fs/udev-456:0"], ["sys-fs/udev-456:0"]),
+ ("*/*:1", ["sys-fs/udev-456:0"], []),
+ ("sys-fs/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]),
+ ("*/udev", ["sys-fs/udev-456"], ["sys-fs/udev-456"]),
+ ("=sys-apps/portage-2*", ["sys-apps/portage-2.1"], ["sys-apps/portage-2.1"]),
+ ("=sys-apps/portage-2.1*", ["sys-apps/portage-2.1.2"], ["sys-apps/portage-2.1.2"]),
+ ("dev-libs/*", ["sys-apps/portage-2.1.2"], []),
+ ("*/tar", ["sys-apps/portage-2.1.2"], []),
+ ("*/*", ["dev-libs/A-1", "dev-libs/B-1"], ["dev-libs/A-1", "dev-libs/B-1"]),
+ ("dev-libs/*", ["dev-libs/A-1", "sci-libs/B-1"], ["dev-libs/A-1"]),
+
+ ("dev-libs/A[foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-1"]),
+ ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-2"]),
+ ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2")], []),
+ ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], []),
+ ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo,bar]")], []),
+ ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[foo,bar]")], ["dev-libs/A-2"]),
+ ("dev-libs/A[foo,bar(+)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"]),
+ ("dev-libs/A[foo,bar(-)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], []),
+ ("dev-libs/A[foo,-bar(-)]", [Package("=dev-libs/A-1[-foo,bar]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"]),
+
+ ("dev-libs/A::repo1", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo1"]),
+ ("dev-libs/A::repo2", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo2"]),
+ ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[foo]"), Package("=dev-libs/A-1::repo2[-foo]")], []),
+ ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[-foo]"), Package("=dev-libs/A-1::repo2[foo]")], ["dev-libs/A-1::repo2"]),
+ ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:1::repo1"), Package("=dev-libs/A-1:2::repo2")], []),
+ ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:2::repo1"), Package("=dev-libs/A-1:1::repo2[foo]")], ["dev-libs/A-1::repo2"]),
+
+ ("virtual/ffmpeg:0/53", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:0/53=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:0/52", [Package("=virtual/ffmpeg-0.10.3:0/53")], []),
+ ("virtual/ffmpeg:=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:0=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:*", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:0", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+
+ ("sys-libs/db:4.8/4.8", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]),
+ ("sys-libs/db:4.8/4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]),
+ ("sys-libs/db:4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]),
+ ("sys-libs/db:*", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]),
+ ("sys-libs/db:4.8/0", [Package("=sys-libs/db-4.8.30:4.8")], []),
+ ("sys-libs/db:4.8/0=", [Package("=sys-libs/db-4.8.30:4.8")], []),
+ )
+
+ for atom, cpv_list, expected_result in tests:
+ result = []
+ for pkg in match_from_list(atom, cpv_list):
+ if isinstance(pkg, Package):
+ if pkg.repo:
+ result.append(pkg.cpv + _repo_separator + pkg.repo)
+ else:
+ result.append(pkg.cpv)
+ else:
+ result.append(pkg)
+ self.assertEqual(result, expected_result)
diff --git a/usr/lib/portage/pym/portage/tests/dep/test_paren_reduce.py b/usr/lib/portage/pym/portage/tests/dep/test_paren_reduce.py
new file mode 100644
index 0000000..3244652
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/test_paren_reduce.py
@@ -0,0 +1,69 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import paren_reduce
+from portage.exception import InvalidDependString
+
+class TestParenReduce(TestCase):
+
+ def testParenReduce(self):
+
+ test_cases = (
+ ("A", ["A"]),
+ ("( A )", ["A"]),
+ ("|| ( A B )", ["||", ["A", "B"]]),
+ ("|| ( A || ( B C ) )", ["||", ["A", "||", ["B", "C"]]]),
+ ("|| ( A || ( B C D ) )", ["||", ["A", "||", ["B", "C", "D"]]]),
+ ("|| ( A || ( B || ( C D ) E ) )", ["||", ["A", "||", ["B", "||", ["C", "D"], "E"]]]),
+ ("a? ( A )", ["a?", ["A"]]),
+
+ ("( || ( ( ( A ) B ) ) )", ["A", "B"]),
+ ("( || ( || ( ( A ) B ) ) )", ["||", ["A", "B"]]),
+ ("|| ( A )", ["A"]),
+ ("( || ( || ( || ( A ) foo? ( B ) ) ) )", ["||", ["A", "foo?", ["B"]]]),
+ ("( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )", ["||", ["bar?", ["A"], "foo?", ["B"]]]),
+ ("A || ( ) foo? ( ) B", ["A", "B"]),
+
+ ("|| ( A ) || ( B )", ["A", "B"]),
+ ("foo? ( A ) foo? ( B )", ["foo?", ["A"], "foo?", ["B"]]),
+
+ ("|| ( ( A B ) C )", ["||", [["A", "B"], "C"]]),
+ ("|| ( ( A B ) ( C ) )", ["||", [["A", "B"], "C"]]),
+ # test USE dep defaults for bug #354003
+ (">=dev-lang/php-5.2[pcre(+)]", [">=dev-lang/php-5.2[pcre(+)]"]),
+ )
+
+ test_cases_xfail = (
+ "( A",
+ "A )",
+
+ "||( A B )",
+ "|| (A B )",
+ "|| ( A B)",
+ "|| ( A B",
+ "|| A B )",
+
+ "|| A B",
+ "|| ( A B ) )",
+ "|| || B C",
+
+ "|| ( A B || )",
+
+ "a? A",
+
+ "( || ( || || ( A ) foo? ( B ) ) )",
+ "( || ( || bar? ( A ) foo? ( B ) ) )",
+ )
+
+ for dep_str, expected_result in test_cases:
+ self.assertEqual(paren_reduce(dep_str, _deprecation_warn=False),
+ expected_result,
+ "input: '%s' result: %s != %s" % (dep_str,
+ paren_reduce(dep_str, _deprecation_warn=False),
+ expected_result))
+
+ for dep_str in test_cases_xfail:
+ self.assertRaisesMsg(dep_str,
+ InvalidDependString, paren_reduce, dep_str,
+ _deprecation_warn=False)
diff --git a/usr/lib/portage/pym/portage/tests/dep/test_use_reduce.py b/usr/lib/portage/pym/portage/tests/dep/test_use_reduce.py
new file mode 100644
index 0000000..4f65567
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/dep/test_use_reduce.py
@@ -0,0 +1,626 @@
+# Copyright 2009-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.exception import InvalidDependString
+from portage.dep import Atom, use_reduce
+
+class UseReduceTestCase(object):
+ def __init__(self, deparray, uselist=[], masklist=[],
+ matchall=0, excludeall=[], is_src_uri=False,
+ eapi='0', opconvert=False, flat=False, expected_result=None,
+ is_valid_flag=None, token_class=None):
+ self.deparray = deparray
+ self.uselist = uselist
+ self.masklist = masklist
+ self.matchall = matchall
+ self.excludeall = excludeall
+ self.is_src_uri = is_src_uri
+ self.eapi = eapi
+ self.opconvert = opconvert
+ self.flat = flat
+ self.is_valid_flag = is_valid_flag
+ self.token_class = token_class
+ self.expected_result = expected_result
+
+ def run(self):
+ try:
+ return use_reduce(self.deparray, self.uselist, self.masklist,
+ self.matchall, self.excludeall, self.is_src_uri, self.eapi,
+ self.opconvert, self.flat, self.is_valid_flag, self.token_class)
+ except InvalidDependString as e:
+ raise InvalidDependString("%s: %s" % (e, self.deparray))
+
+class UseReduce(TestCase):
+
+ def always_true(self, ununsed_parameter):
+ return True
+
+ def always_false(self, ununsed_parameter):
+ return False
+
+ def testUseReduce(self):
+
+ EAPI_WITH_SRC_URI_ARROWS = "2"
+ EAPI_WITHOUT_SRC_URI_ARROWS = "0"
+
+ test_cases = (
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ uselist=["a", "b", "c", "d"],
+ expected_result=["A", "B"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ uselist=["a", "b", "c"],
+ expected_result=["A", "B", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ uselist=["b", "c"],
+ expected_result=["B", "D"]
+ ),
+
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ matchall=True,
+ expected_result=["A", "B", "C", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ masklist=["a", "c"],
+ expected_result=["C", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ matchall=True,
+ masklist=["a", "c"],
+ expected_result=["B", "C", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ uselist=["a", "b"],
+ masklist=["a", "c"],
+ expected_result=["B", "C", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ excludeall=["a", "c"],
+ expected_result=["D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ uselist=["b"],
+ excludeall=["a", "c"],
+ expected_result=["B", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ matchall=True,
+ excludeall=["a", "c"],
+ expected_result=["A", "B", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ matchall=True,
+ excludeall=["a", "c"],
+ masklist=["b"],
+ expected_result=["A", "D"]
+ ),
+
+ UseReduceTestCase(
+ "a? ( b? ( AB ) )",
+ uselist=["a", "b"],
+ expected_result=["AB"]
+ ),
+ UseReduceTestCase(
+ "a? ( b? ( AB ) C )",
+ uselist=["a"],
+ expected_result=["C"]
+ ),
+ UseReduceTestCase(
+ "a? ( b? ( || ( AB CD ) ) )",
+ uselist=["a", "b"],
+ expected_result=["||", ["AB", "CD"]]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( A ) b? ( B ) ) )",
+ uselist=["a", "b"],
+ expected_result=["||", ["A", "B"]]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( A ) b? ( B ) ) )",
+ uselist=["a"],
+ expected_result=["A"]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( A ) b? ( B ) ) )",
+ uselist=[],
+ expected_result=[]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+ uselist=[],
+ expected_result=[]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+ uselist=["a"],
+ expected_result=["A"]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+ uselist=["b"],
+ expected_result=["B"]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+ uselist=["c"],
+ expected_result=[]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+ uselist=["a", "c"],
+ expected_result=["||", ["A", "C"]]
+ ),
+
+ # paren_reduce tests
+ UseReduceTestCase(
+ "A",
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "( A )",
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "|| ( A B )",
+ expected_result=["||", ["A", "B"]]),
+ UseReduceTestCase(
+ "|| ( ( A B ) C )",
+ expected_result=["||", [["A", "B"], "C"]]),
+ UseReduceTestCase(
+ "|| ( ( A B ) ( C ) )",
+ expected_result=["||", [["A", "B"], "C"]]),
+ UseReduceTestCase(
+ "|| ( A || ( B C ) )",
+ expected_result=["||", ["A", "B", "C"]]),
+ UseReduceTestCase(
+ "|| ( A || ( B C D ) )",
+ expected_result=["||", ["A", "B", "C", "D"]]),
+ UseReduceTestCase(
+ "|| ( A || ( B || ( C D ) E ) )",
+ expected_result=["||", ["A", "B", "C", "D", "E"]]),
+ UseReduceTestCase(
+ "( || ( ( ( A ) B ) ) )",
+ expected_result=["A", "B"]),
+ UseReduceTestCase(
+ "( || ( || ( ( A ) B ) ) )",
+ expected_result=["||", ["A", "B"]]),
+ UseReduceTestCase(
+ "( || ( || ( ( A ) B ) ) )",
+ expected_result=["||", ["A", "B"]]),
+ UseReduceTestCase(
+ "|| ( A )",
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ uselist=["foo"],
+ expected_result=["||", ["A", "B"]]),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ expected_result=[]),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ uselist=["foo", "bar"],
+ expected_result=["||", ["A", "B"]]),
+ UseReduceTestCase(
+ "A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
+ expected_result=["A", "B"]),
+ UseReduceTestCase(
+ "|| ( A ) || ( B )",
+ expected_result=["A", "B"]),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ expected_result=[]),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ uselist=["foo"],
+ expected_result=["A", "B"]),
+ UseReduceTestCase(
+ "|| ( A B ) C",
+ expected_result=['||', ['A', 'B'], 'C']),
+ UseReduceTestCase(
+ "A || ( B C )",
+ expected_result=['A', '||', ['B', 'C']]),
+
+ # SRC_URI stuff
+ UseReduceTestCase(
+ "http://foo/bar -> blah.tbz2",
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo/bar", "->", "blah.tbz2"]),
+ UseReduceTestCase(
+ "foo? ( http://foo/bar -> blah.tbz2 )",
+ uselist=[],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=[]),
+ UseReduceTestCase(
+ "foo? ( http://foo/bar -> blah.tbz2 )",
+ uselist=["foo"],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo/bar", "->", "blah.tbz2"]),
+ UseReduceTestCase(
+ "http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )",
+ uselist=[],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo/bar", "->", "bar.tbz2"]),
+ UseReduceTestCase(
+ "http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )",
+ uselist=["foo"],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo/bar", "->", "bar.tbz2", "ftp://foo/a"]),
+ UseReduceTestCase(
+ "http://foo.com/foo http://foo/bar -> blah.tbz2",
+ uselist=["foo"],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo.com/foo", "http://foo/bar", "->", "blah.tbz2"]),
+
+ # opconvert tests
+ UseReduceTestCase(
+ "A",
+ opconvert=True,
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "( A )",
+ opconvert=True,
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "|| ( A B )",
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
+ UseReduceTestCase(
+ "|| ( ( A B ) C )",
+ opconvert=True,
+ expected_result=[['||', ['A', 'B'], 'C']]),
+ UseReduceTestCase(
+ "|| ( A || ( B C ) )",
+ opconvert=True,
+ expected_result=[['||', 'A', 'B', 'C']]),
+ UseReduceTestCase(
+ "|| ( A || ( B C D ) )",
+ opconvert=True,
+ expected_result=[['||', 'A', 'B', 'C', 'D']]),
+ UseReduceTestCase(
+ "|| ( A || ( B || ( C D ) E ) )",
+ expected_result=["||", ["A", "B", "C", "D", "E"]]),
+ UseReduceTestCase(
+ "( || ( ( ( A ) B ) ) )",
+ opconvert=True,
+ expected_result=['A', 'B']),
+ UseReduceTestCase(
+ "( || ( || ( ( A ) B ) ) )",
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
+ UseReduceTestCase(
+ "|| ( A B ) C",
+ opconvert=True,
+ expected_result=[['||', 'A', 'B'], 'C']),
+ UseReduceTestCase(
+ "A || ( B C )",
+ opconvert=True,
+ expected_result=['A', ['||', 'B', 'C']]),
+ UseReduceTestCase(
+ "A foo? ( || ( B || ( bar? ( || ( C D E ) ) !bar? ( F ) ) ) ) G",
+ uselist=["foo", "bar"],
+ opconvert=True,
+ expected_result=['A', ['||', 'B', 'C', 'D', 'E'], 'G']),
+ UseReduceTestCase(
+ "A foo? ( || ( B || ( bar? ( || ( C D E ) ) !bar? ( F ) ) ) ) G",
+ uselist=["foo", "bar"],
+ opconvert=False,
+ expected_result=['A', '||', ['B', 'C', 'D', 'E'], 'G']),
+
+ UseReduceTestCase(
+ "|| ( A )",
+ opconvert=True,
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ opconvert=True,
+ expected_result=[]),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ uselist=["foo", "bar"],
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
+ UseReduceTestCase(
+ "A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
+ opconvert=True,
+ expected_result=["A", "B"]),
+ UseReduceTestCase(
+ "|| ( A ) || ( B )",
+ opconvert=True,
+ expected_result=["A", "B"]),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ opconvert=True,
+ expected_result=[]),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=["A", "B"]),
+ UseReduceTestCase(
+ "|| ( foo? ( || ( A B ) ) )",
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) foo? ( || ( C D ) ) )",
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=[['||', ['A', 'B'], 'C', 'D']]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) foo? ( || ( C D ) ) )",
+ uselist=["foo"],
+ opconvert=False,
+ expected_result=['||', [['A', 'B'], 'C', 'D']]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) || ( C D ) )",
+ expected_result=['||', [['A', 'B'], 'C', 'D']]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) || ( C D || ( E ( F G ) || ( H ) ) ) )",
+ expected_result=['||', [['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) || ( C D || ( E ( F G ) || ( H ) ) ) )",
+ opconvert=True,
+ expected_result=[['||', ['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
+
+ UseReduceTestCase(
+ "|| ( foo? ( A B ) )",
+ uselist=["foo"],
+ expected_result=['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( || ( foo? ( A B ) ) )",
+ uselist=["foo"],
+ expected_result=['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( || ( || ( a? ( b? ( c? ( || ( || ( || ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) )",
+ uselist=["a", "b", "c", "d", "e", "f"],
+ expected_result=['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( || ( ( || ( a? ( ( b? ( c? ( || ( || ( || ( ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) ) ) ) )",
+ uselist=["a", "b", "c", "d", "e", "f"],
+ expected_result=['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( ( A ( || ( B ) ) ) )",
+ expected_result=['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) || ( foo? ( bar? ( ( C D || ( baz? ( E ) ( F G ) || ( H ) ) ) ) ) ) )",
+ uselist=["foo", "bar", "baz"],
+ expected_result=['||', [['A', 'B'], ['C', 'D', '||', ['E', ['F', 'G'], 'H']]]]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) || ( foo? ( bar? ( ( C D || ( baz? ( E ) ( F G ) || ( H ) ) ) ) ) ) )",
+ uselist=["foo", "bar", "baz"],
+ opconvert=True,
+ expected_result=[['||', ['A', 'B'], ['C', 'D', ['||', 'E', ['F', 'G'], 'H']]]]),
+
+ UseReduceTestCase(
+ "|| ( foo? ( A B ) )",
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( || ( foo? ( A B ) ) )",
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( || ( || ( a? ( b? ( c? ( || ( || ( || ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) )",
+ uselist=["a", "b", "c", "d", "e", "f"],
+ opconvert=True,
+ expected_result=['A', 'B']),
+
+ # flat test
+ UseReduceTestCase(
+ "A",
+ flat=True,
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "( A )",
+ flat=True,
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "|| ( A B )",
+ flat=True,
+ expected_result=["||", "A", "B"]),
+ UseReduceTestCase(
+ "|| ( A || ( B C ) )",
+ flat=True,
+ expected_result=["||", "A", "||", "B", "C"]),
+ UseReduceTestCase(
+ "|| ( A || ( B C D ) )",
+ flat=True,
+ expected_result=["||", "A", "||", "B", "C", "D"]),
+ UseReduceTestCase(
+ "|| ( A || ( B || ( C D ) E ) )",
+ flat=True,
+ expected_result=["||", "A", "||", "B", "||", "C", "D", "E"]),
+ UseReduceTestCase(
+ "( || ( ( ( A ) B ) ) )",
+ flat=True,
+ expected_result=["||", "A", "B"]),
+ UseReduceTestCase(
+ "( || ( || ( ( A ) B ) ) )",
+ flat=True,
+ expected_result=["||", "||", "A", "B"]),
+ UseReduceTestCase(
+ "( || ( || ( ( A ) B ) ) )",
+ flat=True,
+ expected_result=["||", "||", "A", "B"]),
+ UseReduceTestCase(
+ "|| ( A )",
+ flat=True,
+ expected_result=["||", "A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ uselist=["foo"],
+ flat=True,
+ expected_result=["||", "||", "||", "A", "B"]),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ flat=True,
+ expected_result=["||", "||", "||"]),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ uselist=["foo", "bar"],
+ flat=True,
+ expected_result=["||", "||", "A", "||", "B"]),
+ UseReduceTestCase(
+ "A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
+ flat=True,
+ expected_result=["A", "||", "B"]),
+ UseReduceTestCase(
+ "|| ( A ) || ( B )",
+ flat=True,
+ expected_result=["||", "A", "||", "B"]),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ flat=True,
+ expected_result=[]),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ uselist=["foo"],
+ flat=True,
+ expected_result=["A", "B"]),
+
+ # use flag validation
+ UseReduceTestCase(
+ "foo? ( A )",
+ uselist=["foo"],
+ is_valid_flag=self.always_true,
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "foo? ( A )",
+ is_valid_flag=self.always_true,
+ expected_result=[]),
+
+ # token_class
+ UseReduceTestCase(
+ "foo? ( dev-libs/A )",
+ uselist=["foo"],
+ token_class=Atom,
+ expected_result=["dev-libs/A"]),
+ UseReduceTestCase(
+ "foo? ( dev-libs/A )",
+ token_class=Atom,
+ expected_result=[]),
+ )
+
+ test_cases_xfail = (
+ UseReduceTestCase("? ( A )"),
+ UseReduceTestCase("!? ( A )"),
+ UseReduceTestCase("( A"),
+ UseReduceTestCase("A )"),
+ UseReduceTestCase("||( A B )"),
+ UseReduceTestCase("|| (A B )"),
+ UseReduceTestCase("|| ( A B)"),
+ UseReduceTestCase("|| ( A B"),
+ UseReduceTestCase("|| A B )"),
+ UseReduceTestCase("|| A B"),
+ UseReduceTestCase("|| ( A B ) )"),
+ UseReduceTestCase("|| || B C"),
+ UseReduceTestCase("|| ( A B || )"),
+ UseReduceTestCase("a? A"),
+ UseReduceTestCase("( || ( || || ( A ) foo? ( B ) ) )"),
+ UseReduceTestCase("( || ( || bar? ( A ) foo? ( B ) ) )"),
+ UseReduceTestCase("foo?"),
+ UseReduceTestCase("foo? || ( A )"),
+ UseReduceTestCase("|| ( )"),
+ UseReduceTestCase("foo? ( )"),
+
+ # SRC_URI stuff
+ UseReduceTestCase("http://foo/bar -> blah.tbz2", is_src_uri=True, eapi=EAPI_WITHOUT_SRC_URI_ARROWS),
+ UseReduceTestCase("|| ( http://foo/bar -> blah.tbz2 )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> foo? ( ftp://foo/a )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar blah.tbz2 ->", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("-> http://foo/bar blah.tbz2 )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar ->", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> foo? ( http://foo.com/foo )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("foo? ( http://foo/bar -> ) blah.tbz2", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> foo/blah.tbz2", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+
+ UseReduceTestCase("http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri=False, eapi=EAPI_WITH_SRC_URI_ARROWS),
+
+ UseReduceTestCase(
+ "A",
+ opconvert=True,
+ flat=True),
+
+ # use flag validation
+ UseReduceTestCase("1.0? ( A )"),
+ UseReduceTestCase("!1.0? ( A )"),
+ UseReduceTestCase("!? ( A )"),
+ UseReduceTestCase("!?? ( A )"),
+ UseReduceTestCase(
+ "foo? ( A )",
+ is_valid_flag=self.always_false,
+ ),
+ UseReduceTestCase(
+ "foo? ( A )",
+ uselist=["foo"],
+ is_valid_flag=self.always_false,
+ ),
+
+ # token_class
+ UseReduceTestCase(
+ "foo? ( A )",
+ uselist=["foo"],
+ token_class=Atom),
+ UseReduceTestCase(
+ "A(B",
+ token_class=Atom),
+ )
+
+ for test_case in test_cases:
+ # If it fails then show the input, since lots of our
+ # test cases have the same output but different input,
+ # making it difficult deduce which test has failed.
+ self.assertEqual(test_case.run(), test_case.expected_result,
+ "input: '%s' result: %s != %s" % (test_case.deparray,
+ test_case.run(), test_case.expected_result))
+
+ for test_case in test_cases_xfail:
+ self.assertRaisesMsg(test_case.deparray, (InvalidDependString, ValueError), test_case.run)
diff --git a/usr/lib/portage/pym/portage/tests/ebuild/__init__.py b/usr/lib/portage/pym/portage/tests/ebuild/__init__.py
new file mode 100644
index 0000000..e2d487e
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/ebuild/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 1998-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/tests/ebuild/__test__.py b/usr/lib/portage/pym/portage/tests/ebuild/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/ebuild/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/ebuild/test_array_fromfile_eof.py b/usr/lib/portage/pym/portage/tests/ebuild/test_array_fromfile_eof.py
new file mode 100644
index 0000000..efcd915
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/ebuild/test_array_fromfile_eof.py
@@ -0,0 +1,47 @@
+# Copyright 2009-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import array
+import tempfile
+
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.tests import TestCase
+
+class ArrayFromfileEofTestCase(TestCase):
+
+ def testArrayFromfileEof(self):
+ # This tests if the following python issue is fixed
+ # in the currently running version of python:
+ # http://bugs.python.org/issue5334
+
+ input_data = "an arbitrary string"
+ input_bytes = _unicode_encode(input_data,
+ encoding='utf_8', errors='strict')
+ f = tempfile.TemporaryFile()
+ f.write(input_bytes)
+
+ f.seek(0)
+ data = []
+ eof = False
+ while not eof:
+ a = array.array('B')
+ try:
+ a.fromfile(f, len(input_bytes) + 1)
+ except (EOFError, IOError):
+ # python-3.0 lost data here
+ eof = True
+
+ if not a:
+ eof = True
+ else:
+ try:
+ # Python >=3.2
+ data.append(a.tobytes())
+ except AttributeError:
+ data.append(a.tostring())
+
+ f.close()
+
+ self.assertEqual(input_data, _unicode_decode(b''.join(data),
+ encoding='utf_8', errors='strict'))
diff --git a/usr/lib/portage/pym/portage/tests/ebuild/test_config.py b/usr/lib/portage/pym/portage/tests/ebuild/test_config.py
new file mode 100644
index 0000000..20aac51
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/ebuild/test_config.py
@@ -0,0 +1,345 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import io
+import tempfile
+
+import portage
+from portage import os, shutil, _encodings
+from portage.const import USER_CONFIG_PATH
+from portage.dep import Atom
+from portage.package.ebuild.config import config
+from portage.package.ebuild._config.LicenseManager import LicenseManager
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+from portage.util import normalize_path
+
+class ConfigTestCase(TestCase):
+
+ def testClone(self):
+ """
+ Test the clone via constructor.
+ """
+
+ ebuilds = {
+ "dev-libs/A-1": { },
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ settings = config(clone=playground.settings)
+ result = playground.run(["=dev-libs/A-1"])
+ pkg, existing_node = result.depgraph._select_package(
+ playground.eroot, Atom("=dev-libs/A-1"))
+ settings.setcpv(pkg)
+
+ # clone after setcpv tests deepcopy of LazyItemsDict
+ settings2 = config(clone=settings)
+ finally:
+ playground.cleanup()
+
+ def testFeaturesMutation(self):
+ """
+ Test whether mutation of config.features updates the FEATURES
+ variable and persists through config.regenerate() calls. Also
+ verify that features_set._prune_overrides() works correctly.
+ """
+ playground = ResolverPlayground()
+ try:
+ settings = config(clone=playground.settings)
+
+ settings.features.add('noclean')
+ self.assertEqual('noclean' in settings['FEATURES'].split(), True)
+ settings.regenerate()
+ self.assertEqual('noclean' in settings['FEATURES'].split(), True)
+
+ settings.features.discard('noclean')
+ self.assertEqual('noclean' in settings['FEATURES'].split(), False)
+ settings.regenerate()
+ self.assertEqual('noclean' in settings['FEATURES'].split(), False)
+
+ settings.features.add('noclean')
+ self.assertEqual('noclean' in settings['FEATURES'].split(), True)
+ settings.regenerate()
+ self.assertEqual('noclean' in settings['FEATURES'].split(), True)
+
+ # before: ['noclean', '-noclean', 'noclean']
+ settings.features._prune_overrides()
+ # after: ['noclean']
+ self.assertEqual(settings._features_overrides.count('noclean'), 1)
+ self.assertEqual(settings._features_overrides.count('-noclean'), 0)
+
+ settings.features.remove('noclean')
+
+ # before: ['noclean', '-noclean']
+ settings.features._prune_overrides()
+ # after: ['-noclean']
+ self.assertEqual(settings._features_overrides.count('noclean'), 0)
+ self.assertEqual(settings._features_overrides.count('-noclean'), 1)
+ finally:
+ playground.cleanup()
+
+ def testLicenseManager(self):
+
+ user_config = {
+ "package.license":
+ (
+ "dev-libs/* TEST",
+ "dev-libs/A -TEST2",
+ "=dev-libs/A-2 TEST3 @TEST",
+ "*/* @EULA TEST2",
+ "=dev-libs/C-1 *",
+ "=dev-libs/C-2 -*",
+ ),
+ }
+
+ playground = ResolverPlayground(user_config=user_config)
+ try:
+ portage.util.noiselimit = -2
+
+ license_group_locations = (os.path.join(playground.settings.repositories["test_repo"].location, "profiles"),)
+ pkg_license = os.path.join(playground.eroot, "etc", "portage")
+
+ lic_man = LicenseManager(license_group_locations, pkg_license)
+
+ self.assertEqual(lic_man._accept_license_str, None)
+ self.assertEqual(lic_man._accept_license, None)
+ self.assertEqual(lic_man._license_groups, {"EULA": frozenset(["TEST"])})
+ self.assertEqual(lic_man._undef_lic_groups, set(["TEST"]))
+
+ self.assertEqual(lic_man.extract_global_changes(), "TEST TEST2")
+ self.assertEqual(lic_man.extract_global_changes(), "")
+
+ lic_man.set_accept_license_str("TEST TEST2")
+ self.assertEqual(lic_man._getPkgAcceptLicense("dev-libs/B-1", "0", None), ["TEST", "TEST2", "TEST"])
+ self.assertEqual(lic_man._getPkgAcceptLicense("dev-libs/A-1", "0", None), ["TEST", "TEST2", "TEST", "-TEST2"])
+ self.assertEqual(lic_man._getPkgAcceptLicense("dev-libs/A-2", "0", None), ["TEST", "TEST2", "TEST", "-TEST2", "TEST3", "@TEST"])
+
+ self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/B-1", [], "TEST", "0", None), "TEST")
+ self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/A-1", [], "-TEST2", "0", None), "")
+ self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/A-2", [], "|| ( TEST TEST2 )", "0", None), "TEST")
+ self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/C-1", [], "TEST5", "0", None), "TEST5")
+ self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/C-2", [], "TEST2", "0", None), "")
+
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/B-1", [], "TEST", "0", None), [])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/A-1", [], "-TEST2", "0", None), ["-TEST2"])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/A-2", [], "|| ( TEST TEST2 )", "0", None), [])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/A-3", [], "|| ( TEST2 || ( TEST3 TEST4 ) )", "0", None), ["TEST2", "TEST3", "TEST4"])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/C-1", [], "TEST5", "0", None), [])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/C-2", [], "TEST2", "0", None), ["TEST2"])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/D-1", [], "", "0", None), [])
+ finally:
+ portage.util.noiselimit = 0
+ playground.cleanup()
+
+ def testPackageMaskOrder(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { },
+ "dev-libs/B-1": { },
+ "dev-libs/C-1": { },
+ "dev-libs/D-1": { },
+ "dev-libs/E-1": { },
+ }
+
+ repo_configs = {
+ "test_repo": {
+ "package.mask":
+ (
+ "dev-libs/A",
+ "dev-libs/C",
+ ),
+ }
+ }
+
+ profile = {
+ "package.mask":
+ (
+ "-dev-libs/A",
+ "dev-libs/B",
+ "-dev-libs/B",
+ "dev-libs/D",
+ ),
+ }
+
+ user_config = {
+ "package.mask":
+ (
+ "-dev-libs/C",
+ "-dev-libs/D",
+ "dev-libs/E",
+ ),
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = { "--autounmask": 'n' },
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ success = True,
+ mergelist = ["dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ success = True,
+ mergelist = ["dev-libs/C-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/D"],
+ success = True,
+ mergelist = ["dev-libs/D-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E"],
+ options = { "--autounmask": 'n' },
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, repo_configs=repo_configs, \
+ profile=profile, user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testManifest(self):
+
+ distfiles = {
+ 'B-2.tar.bz2': b'binary\0content',
+ 'C-2.zip': b'binary\0content',
+ 'C-2.tar.bz2': b'binary\0content',
+ }
+
+ ebuilds = {
+ "dev-libs/A-1::old_repo": { },
+ "dev-libs/A-2::new_repo": { },
+ "dev-libs/B-2::new_repo": {"SRC_URI" : "B-2.tar.bz2"},
+ "dev-libs/C-2::new_repo": {"SRC_URI" : "C-2.zip C-2.tar.bz2"},
+ }
+
+ repo_configs = {
+ "new_repo": {
+ "layout.conf":
+ (
+ "profile-formats = pms",
+ "thin-manifests = true",
+ "manifest-hashes = SHA256 SHA512 WHIRLPOOL",
+ "# use implicit masters"
+ ),
+ }
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ mergelist= ["dev-libs/A-1"],
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-2"],
+ mergelist= ["dev-libs/A-2"],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ repo_configs=repo_configs, distfiles=distfiles)
+ settings = playground.settings
+
+ new_repo_config = settings.repositories["new_repo"]
+ old_repo_config = settings.repositories["old_repo"]
+ self.assertTrue(len(new_repo_config.masters) > 0, "new_repo has no default master")
+ self.assertEqual(new_repo_config.masters[0].user_location, playground.settings.repositories["test_repo"].location,
+ "new_repo default master is not test_repo")
+ self.assertEqual(new_repo_config.thin_manifest, True,
+ "new_repo_config.thin_manifest != True")
+
+ new_manifest_file = os.path.join(new_repo_config.location, "dev-libs", "A", "Manifest")
+ self.assertNotExists(new_manifest_file)
+
+ new_manifest_file = os.path.join(new_repo_config.location, "dev-libs", "B", "Manifest")
+ f = open(new_manifest_file)
+ self.assertEqual(len(list(f)), 1)
+ f.close()
+
+ new_manifest_file = os.path.join(new_repo_config.location, "dev-libs", "C", "Manifest")
+ f = open(new_manifest_file)
+ self.assertEqual(len(list(f)), 2)
+ f.close()
+
+ old_manifest_file = os.path.join(old_repo_config.location, "dev-libs", "A", "Manifest")
+ f = open(old_manifest_file)
+ self.assertEqual(len(list(f)), 1)
+ f.close()
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSetCpv(self):
+ """
+ Test the clone via constructor.
+ """
+
+ ebuilds = {
+ "dev-libs/A-1": {"IUSE": "static-libs"},
+ "dev-libs/B-1": {"IUSE": "static-libs"},
+ }
+
+ env_files = {
+ "A" : ("USE=\"static-libs\"",)
+ }
+
+ package_env = (
+ "dev-libs/A A",
+ )
+
+ eprefix = normalize_path(tempfile.mkdtemp())
+ playground = None
+ try:
+ user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
+ os.makedirs(user_config_dir)
+
+ with io.open(os.path.join(user_config_dir, "package.env"),
+ mode='w', encoding=_encodings['content']) as f:
+ for line in package_env:
+ f.write(line + "\n")
+
+ env_dir = os.path.join(user_config_dir, "env")
+ os.makedirs(env_dir)
+ for k, v in env_files.items():
+ with io.open(os.path.join(env_dir, k), mode='w',
+ encoding=_encodings['content']) as f:
+ for line in v:
+ f.write(line + "\n")
+
+ playground = ResolverPlayground(eprefix=eprefix, ebuilds=ebuilds)
+ settings = config(clone=playground.settings)
+
+ result = playground.run(["=dev-libs/A-1"])
+ pkg, existing_node = result.depgraph._select_package(
+ playground.eroot, Atom("=dev-libs/A-1"))
+ settings.setcpv(pkg)
+ self.assertTrue("static-libs" in
+ settings["PORTAGE_USE"].split())
+
+ # Test bug #522362, where a USE=static-libs package.env
+ # setting leaked from one setcpv call to the next.
+ pkg, existing_node = result.depgraph._select_package(
+ playground.eroot, Atom("=dev-libs/B-1"))
+ settings.setcpv(pkg)
+ self.assertTrue("static-libs" not in
+ settings["PORTAGE_USE"].split())
+
+ finally:
+ if playground is None:
+ shutil.rmtree(eprefix)
+ else:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/ebuild/test_doebuild_fd_pipes.py b/usr/lib/portage/pym/portage/tests/ebuild/test_doebuild_fd_pipes.py
new file mode 100644
index 0000000..61392dd
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/ebuild/test_doebuild_fd_pipes.py
@@ -0,0 +1,137 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import textwrap
+
+import portage
+from portage import os
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.package.ebuild._ipc.QueryCommand import QueryCommand
+from portage.util._async.ForkProcess import ForkProcess
+from portage.util._async.TaskScheduler import TaskScheduler
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.Package import Package
+from _emerge.PipeReader import PipeReader
+
+class DoebuildProcess(ForkProcess):
+
+ __slots__ = ('doebuild_kwargs', 'doebuild_pargs')
+
+ def _run(self):
+ return portage.doebuild(*self.doebuild_pargs, **self.doebuild_kwargs)
+
+class DoebuildFdPipesTestCase(TestCase):
+
+ def testDoebuild(self):
+ """
+ Invoke portage.doebuild() with the fd_pipes parameter, and
+ check that the expected output appears in the pipe. This
+ functionality is not used by portage internally, but it is
+ supported for API consumers (see bug #475812).
+ """
+
+ ebuild_body = textwrap.dedent("""
+ S=${WORKDIR}
+ pkg_info() { echo info ; }
+ pkg_nofetch() { echo nofetch ; }
+ pkg_pretend() { echo pretend ; }
+ pkg_setup() { echo setup ; }
+ src_unpack() { echo unpack ; }
+ src_prepare() { echo prepare ; }
+ src_configure() { echo configure ; }
+ src_compile() { echo compile ; }
+ src_test() { echo test ; }
+ src_install() { echo install ; }
+ """)
+
+ ebuilds = {
+ 'app-misct/foo-1': {
+ 'EAPI' : '5',
+ "MISC_CONTENT": ebuild_body,
+ }
+ }
+
+ # Override things that may be unavailable, or may have portability
+ # issues when running tests in exotic environments.
+ # prepstrip - bug #447810 (bash read builtin EINTR problem)
+ true_symlinks = ("find", "prepstrip", "sed", "scanelf")
+ true_binary = portage.process.find_binary("true")
+ self.assertEqual(true_binary is None, False,
+ "true command not found")
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ QueryCommand._db = playground.trees
+ root_config = playground.trees[playground.eroot]['root_config']
+ portdb = root_config.trees["porttree"].dbapi
+ settings = portage.config(clone=playground.settings)
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+ settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")
+
+ settings.features.add("noauto")
+ settings.features.add("test")
+ settings['PORTAGE_PYTHON'] = portage._python_interpreter
+ settings['PORTAGE_QUIET'] = "1"
+
+ fake_bin = os.path.join(settings["EPREFIX"], "bin")
+ portage.util.ensure_dirs(fake_bin)
+ for x in true_symlinks:
+ os.symlink(true_binary, os.path.join(fake_bin, x))
+
+ settings["__PORTAGE_TEST_PATH_OVERRIDE"] = fake_bin
+ settings.backup_changes("__PORTAGE_TEST_PATH_OVERRIDE")
+
+ cpv = 'app-misct/foo-1'
+ metadata = dict(zip(Package.metadata_keys,
+ portdb.aux_get(cpv, Package.metadata_keys)))
+
+ pkg = Package(built=False, cpv=cpv, installed=False,
+ metadata=metadata, root_config=root_config,
+ type_name='ebuild')
+ settings.setcpv(pkg)
+ ebuildpath = portdb.findname(cpv)
+ self.assertNotEqual(ebuildpath, None)
+
+ for phase in ('info', 'nofetch',
+ 'pretend', 'setup', 'unpack', 'prepare', 'configure',
+ 'compile', 'test', 'install', 'qmerge', 'clean', 'merge'):
+
+ pr, pw = os.pipe()
+
+ producer = DoebuildProcess(doebuild_pargs=(ebuildpath, phase),
+ doebuild_kwargs={"settings" : settings,
+ "mydbapi": portdb, "tree": "porttree",
+ "vartree": root_config.trees["vartree"],
+ "fd_pipes": {1: pw, 2: pw},
+ "prev_mtimes": {}})
+
+ consumer = PipeReader(
+ input_files={"producer" : pr})
+
+ task_scheduler = TaskScheduler(iter([producer, consumer]),
+ max_jobs=2)
+
+ try:
+ task_scheduler.start()
+ finally:
+ # PipeReader closes pr
+ os.close(pw)
+
+ task_scheduler.wait()
+ output = portage._unicode_decode(
+ consumer.getvalue()).rstrip("\n")
+
+ if task_scheduler.returncode != os.EX_OK:
+ portage.writemsg(output, noiselevel=-1)
+
+ self.assertEqual(task_scheduler.returncode, os.EX_OK)
+
+ if phase not in ('clean', 'merge', 'qmerge'):
+ self.assertEqual(phase, output)
+
+ finally:
+ playground.cleanup()
+ QueryCommand._db = None
diff --git a/usr/lib/portage/pym/portage/tests/ebuild/test_doebuild_spawn.py b/usr/lib/portage/pym/portage/tests/ebuild/test_doebuild_spawn.py
new file mode 100644
index 0000000..ae9a5c5
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/ebuild/test_doebuild_spawn.py
@@ -0,0 +1,105 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import textwrap
+
+from portage import os
+from portage import _python_interpreter
+from portage import _shell_quote
+from portage.const import EBUILD_SH_BINARY
+from portage.package.ebuild.config import config
+from portage.package.ebuild.doebuild import spawn as doebuild_spawn
+from portage.package.ebuild._spawn_nofetch import spawn_nofetch
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.Package import Package
+
+class DoebuildSpawnTestCase(TestCase):
+ """
+ Invoke portage.package.ebuild.doebuild.spawn() with a
+ minimal environment. This gives coverage to some of
+ the ebuild execution internals, like ebuild.sh,
+ AbstractEbuildProcess, and EbuildIpcDaemon.
+ """
+
+ def testDoebuildSpawn(self):
+
+ ebuild_body = textwrap.dedent("""
+ pkg_nofetch() { : ; }
+ """)
+
+ ebuilds = {
+ 'sys-apps/portage-2.1': {
+ 'EAPI' : '2',
+ 'IUSE' : 'build doc epydoc python3 selinux',
+ 'KEYWORDS' : 'x86',
+ 'LICENSE' : 'GPL-2',
+ 'RDEPEND' : '>=app-shells/bash-3.2_p17 >=dev-lang/python-2.6',
+ 'SLOT' : '0',
+ "MISC_CONTENT": ebuild_body,
+ }
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ root_config = playground.trees[playground.eroot]['root_config']
+ portdb = root_config.trees["porttree"].dbapi
+ settings = config(clone=playground.settings)
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+ settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")
+
+ cpv = 'sys-apps/portage-2.1'
+ metadata = dict(zip(Package.metadata_keys,
+ portdb.aux_get(cpv, Package.metadata_keys)))
+
+ pkg = Package(built=False, cpv=cpv, installed=False,
+ metadata=metadata, root_config=root_config,
+ type_name='ebuild')
+ settings.setcpv(pkg)
+ settings['PORTAGE_PYTHON'] = _python_interpreter
+ settings['PORTAGE_BUILDDIR'] = os.path.join(
+ settings['PORTAGE_TMPDIR'], cpv)
+ settings['T'] = os.path.join(
+ settings['PORTAGE_BUILDDIR'], 'temp')
+ for x in ('PORTAGE_BUILDDIR', 'T'):
+ os.makedirs(settings[x])
+ # Create a fake environment, to pretend as if the ebuild
+ # has been sourced already.
+ open(os.path.join(settings['T'], 'environment'), 'wb').close()
+
+ scheduler = SchedulerInterface(global_event_loop())
+ for phase in ('_internal_test',):
+
+ # Test EbuildSpawnProcess by calling doebuild.spawn() with
+ # returnpid=False. This case is no longer used by portage
+ # internals since EbuildPhase is used instead and that passes
+ # returnpid=True to doebuild.spawn().
+ rval = doebuild_spawn("%s %s" % (_shell_quote(
+ os.path.join(settings["PORTAGE_BIN_PATH"],
+ os.path.basename(EBUILD_SH_BINARY))), phase),
+ settings, free=1)
+ self.assertEqual(rval, os.EX_OK)
+
+ ebuild_phase = EbuildPhase(background=False,
+ phase=phase, scheduler=scheduler,
+ settings=settings)
+ ebuild_phase.start()
+ ebuild_phase.wait()
+ self.assertEqual(ebuild_phase.returncode, os.EX_OK)
+
+ ebuild_phase = MiscFunctionsProcess(background=False,
+ commands=['success_hooks'],
+ scheduler=scheduler, settings=settings)
+ ebuild_phase.start()
+ ebuild_phase.wait()
+ self.assertEqual(ebuild_phase.returncode, os.EX_OK)
+
+ spawn_nofetch(portdb, portdb.findname(cpv), settings=settings)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/ebuild/test_ipc_daemon.py b/usr/lib/portage/pym/portage/tests/ebuild/test_ipc_daemon.py
new file mode 100644
index 0000000..a871076
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/ebuild/test_ipc_daemon.py
@@ -0,0 +1,157 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+import time
+from portage import os
+from portage import shutil
+from portage import _python_interpreter
+from portage.tests import TestCase
+from portage.const import PORTAGE_BIN_PATH
+from portage.const import PORTAGE_PYM_PATH
+from portage.const import BASH_BINARY
+from portage.locks import hardlock_cleanup
+from portage.package.ebuild._ipc.ExitCommand import ExitCommand
+from portage.util import ensure_dirs
+from portage.util._async.ForkProcess import ForkProcess
+from portage.util._async.TaskScheduler import TaskScheduler
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.SpawnProcess import SpawnProcess
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildIpcDaemon import EbuildIpcDaemon
+
+class SleepProcess(ForkProcess):
+ """
+ Emulate the sleep command, in order to ensure a consistent
+ return code when it is killed by SIGTERM (see bug #437180).
+ """
+ __slots__ = ('seconds',)
+ def _run(self):
+ time.sleep(self.seconds)
+
+class IpcDaemonTestCase(TestCase):
+
+ _SCHEDULE_TIMEOUT = 40000 # 40 seconds
+
+ def testIpcDaemon(self):
+ event_loop = global_event_loop()
+ tmpdir = tempfile.mkdtemp()
+ build_dir = None
+ try:
+ env = {}
+
+ # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
+ # need to be inherited by ebuild subprocesses.
+ if 'PORTAGE_USERNAME' in os.environ:
+ env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
+ if 'PORTAGE_GRPNAME' in os.environ:
+ env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']
+
+ env['PORTAGE_PYTHON'] = _python_interpreter
+ env['PORTAGE_BIN_PATH'] = PORTAGE_BIN_PATH
+ env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
+ env['PORTAGE_BUILDDIR'] = os.path.join(tmpdir, 'cat', 'pkg-1')
+
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+
+ build_dir = EbuildBuildDir(
+ scheduler=event_loop,
+ settings=env)
+ build_dir.lock()
+ ensure_dirs(env['PORTAGE_BUILDDIR'])
+
+ input_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_in')
+ output_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_out')
+ os.mkfifo(input_fifo)
+ os.mkfifo(output_fifo)
+
+ for exitcode in (0, 1, 2):
+ exit_command = ExitCommand()
+ commands = {'exit' : exit_command}
+ daemon = EbuildIpcDaemon(commands=commands,
+ input_fifo=input_fifo,
+ output_fifo=output_fifo)
+ proc = SpawnProcess(
+ args=[BASH_BINARY, "-c",
+ '"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode],
+ env=env)
+ task_scheduler = TaskScheduler(iter([daemon, proc]),
+ max_jobs=2, event_loop=event_loop)
+
+ self.received_command = False
+ def exit_command_callback():
+ self.received_command = True
+ task_scheduler.cancel()
+
+ exit_command.reply_hook = exit_command_callback
+ start_time = time.time()
+ self._run(event_loop, task_scheduler, self._SCHEDULE_TIMEOUT)
+
+ hardlock_cleanup(env['PORTAGE_BUILDDIR'],
+ remove_all_locks=True)
+
+ self.assertEqual(self.received_command, True,
+ "command not received after %d seconds" % \
+ (time.time() - start_time,))
+ self.assertEqual(proc.isAlive(), False)
+ self.assertEqual(daemon.isAlive(), False)
+ self.assertEqual(exit_command.exitcode, exitcode)
+
+ # Intentionally short timeout test for EventLoop/AsyncScheduler.
+ # Use a ridiculously long sleep_time_s in case the user's
+ # system is heavily loaded (see bug #436334).
+ sleep_time_s = 600 #600.000 seconds
+ short_timeout_ms = 10 # 0.010 seconds
+
+ for i in range(3):
+ exit_command = ExitCommand()
+ commands = {'exit' : exit_command}
+ daemon = EbuildIpcDaemon(commands=commands,
+ input_fifo=input_fifo,
+ output_fifo=output_fifo)
+ proc = SleepProcess(seconds=sleep_time_s)
+ task_scheduler = TaskScheduler(iter([daemon, proc]),
+ max_jobs=2, event_loop=event_loop)
+
+ self.received_command = False
+ def exit_command_callback():
+ self.received_command = True
+ task_scheduler.cancel()
+
+ exit_command.reply_hook = exit_command_callback
+ start_time = time.time()
+ self._run(event_loop, task_scheduler, short_timeout_ms)
+
+ hardlock_cleanup(env['PORTAGE_BUILDDIR'],
+ remove_all_locks=True)
+
+ self.assertEqual(self.received_command, False,
+ "command received after %d seconds" % \
+ (time.time() - start_time,))
+ self.assertEqual(proc.isAlive(), False)
+ self.assertEqual(daemon.isAlive(), False)
+ self.assertEqual(proc.returncode == os.EX_OK, False)
+
+ finally:
+ if build_dir is not None:
+ build_dir.unlock()
+ shutil.rmtree(tmpdir)
+
+ def _timeout_callback(self):
+ self._timed_out = True
+
+ def _run(self, event_loop, task_scheduler, timeout):
+ self._timed_out = False
+ timeout_id = event_loop.timeout_add(timeout, self._timeout_callback)
+
+ try:
+ task_scheduler.start()
+ while not self._timed_out and task_scheduler.poll() is None:
+ event_loop.iteration()
+ if self._timed_out:
+ task_scheduler.cancel()
+ task_scheduler.wait()
+ finally:
+ event_loop.source_remove(timeout_id)
diff --git a/usr/lib/portage/pym/portage/tests/ebuild/test_spawn.py b/usr/lib/portage/pym/portage/tests/ebuild/test_spawn.py
new file mode 100644
index 0000000..a38e109
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/ebuild/test_spawn.py
@@ -0,0 +1,57 @@
+# Copyright 1998-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+import sys
+import tempfile
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage.const import BASH_BINARY
+from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.SpawnProcess import SpawnProcess
+
+class SpawnTestCase(TestCase):
+
+ def testLogfile(self):
+ logfile = None
+ try:
+ fd, logfile = tempfile.mkstemp()
+ os.close(fd)
+ null_fd = os.open('/dev/null', os.O_RDWR)
+ test_string = 2 * "blah blah blah\n"
+ proc = SpawnProcess(
+ args=[BASH_BINARY, "-c",
+ "echo -n '%s'" % test_string],
+ env={},
+ fd_pipes={
+ 0: portage._get_stdin().fileno(),
+ 1: null_fd,
+ 2: null_fd
+ },
+ scheduler=global_event_loop(),
+ logfile=logfile)
+ proc.start()
+ os.close(null_fd)
+ self.assertEqual(proc.wait(), os.EX_OK)
+ f = io.open(_unicode_encode(logfile,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='strict')
+ log_content = f.read()
+ f.close()
+ # When logging passes through a pty, this comparison will fail
+ # unless the oflag terminal attributes have the termios.OPOST
+ # bit disabled. Otherwise, tranformations such as \n -> \r\n
+ # may occur.
+ self.assertEqual(test_string, log_content)
+ finally:
+ if logfile:
+ try:
+ os.unlink(logfile)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
diff --git a/usr/lib/portage/pym/portage/tests/emerge/__init__.py b/usr/lib/portage/pym/portage/tests/emerge/__init__.py
new file mode 100644
index 0000000..532918b
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/emerge/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/tests/emerge/__test__.py b/usr/lib/portage/pym/portage/tests/emerge/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/emerge/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/emerge/test_emerge_slot_abi.py b/usr/lib/portage/pym/portage/tests/emerge/test_emerge_slot_abi.py
new file mode 100644
index 0000000..d1f2d92
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/emerge/test_emerge_slot_abi.py
@@ -0,0 +1,178 @@
+# Copyright 2012-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+import sys
+
+import portage
+from portage import os
+from portage import _unicode_decode
+from portage.const import (BASH_BINARY, PORTAGE_PYM_PATH, USER_CONFIG_PATH)
+from portage.process import find_binary
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import ensure_dirs
+
+class SlotAbiEmergeTestCase(TestCase):
+
+ def testSlotAbiEmerge(self):
+
+ debug = False
+
+ ebuilds = {
+ "dev-libs/glib-1.2.10" : {
+ "SLOT": "1"
+ },
+ "dev-libs/glib-2.30.2" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.30"
+ },
+ "dev-libs/glib-2.32.3" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.32"
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/glib:2=",
+ "RDEPEND": "dev-libs/glib:2="
+ },
+ }
+ installed = {
+ "dev-libs/glib-1.2.10" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "1"
+ },
+ "dev-libs/glib-2.30.2" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.30"
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/glib:2/2.30=",
+ "RDEPEND": "dev-libs/glib:2/2.30="
+ },
+ }
+
+ world = ["dev-libs/glib:1", "dev-libs/dbus-glib"]
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=debug)
+ settings = playground.settings
+ eprefix = settings["EPREFIX"]
+ eroot = settings["EROOT"]
+ trees = playground.trees
+ portdb = trees[eroot]["porttree"].dbapi
+ vardb = trees[eroot]["vartree"].dbapi
+ var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
+ user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
+ package_mask_path = os.path.join(user_config_dir, "package.mask")
+
+ portage_python = portage._python_interpreter
+ ebuild_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "ebuild"))
+ emerge_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "emerge"))
+
+ test_ebuild = portdb.findname("dev-libs/dbus-glib-0.98")
+ self.assertFalse(test_ebuild is None)
+
+ test_commands = (
+ emerge_cmd + ("--oneshot", "dev-libs/glib",),
+ (lambda: "dev-libs/glib:2/2.32=" in vardb.aux_get("dev-libs/dbus-glib-0.98", ["RDEPEND"])[0],),
+ (BASH_BINARY, "-c", "echo %s >> %s" %
+ tuple(map(portage._shell_quote,
+ (">=dev-libs/glib-2.32", package_mask_path,)))),
+ emerge_cmd + ("--oneshot", "dev-libs/glib",),
+ (lambda: "dev-libs/glib:2/2.30=" in vardb.aux_get("dev-libs/dbus-glib-0.98", ["RDEPEND"])[0],),
+ )
+
+ distdir = playground.distdir
+ pkgdir = playground.pkgdir
+ fake_bin = os.path.join(eprefix, "bin")
+ portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
+ profile_path = settings.profile_path
+
+ path = os.environ.get("PATH")
+ if path is not None and not path.strip():
+ path = None
+ if path is None:
+ path = ""
+ else:
+ path = ":" + path
+ path = fake_bin + path
+
+ pythonpath = os.environ.get("PYTHONPATH")
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is not None and \
+ pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
+ pass
+ else:
+ if pythonpath is None:
+ pythonpath = ""
+ else:
+ pythonpath = ":" + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+
+ env = {
+ "PORTAGE_OVERRIDE_EPREFIX" : eprefix,
+ "PATH" : path,
+ "PORTAGE_PYTHON" : portage_python,
+ "PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
+ "PYTHONPATH" : pythonpath,
+ }
+
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+
+ dirs = [distdir, fake_bin, portage_tmpdir,
+ user_config_dir, var_cache_edb]
+ true_symlinks = ["chown", "chgrp"]
+ true_binary = find_binary("true")
+ self.assertEqual(true_binary is None, False,
+ "true command not found")
+ try:
+ for d in dirs:
+ ensure_dirs(d)
+ for x in true_symlinks:
+ os.symlink(true_binary, os.path.join(fake_bin, x))
+ with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
+ f.write(b"100")
+ # non-empty system set keeps --depclean quiet
+ with open(os.path.join(profile_path, "packages"), 'w') as f:
+ f.write("*dev-libs/token-system-pkg")
+
+ if debug:
+ # The subprocess inherits both stdout and stderr, for
+ # debugging purposes.
+ stdout = None
+ else:
+ # The subprocess inherits stderr so that any warnings
+ # triggered by python -Wd will be visible.
+ stdout = subprocess.PIPE
+
+ for i, args in enumerate(test_commands):
+
+ if hasattr(args[0], '__call__'):
+ self.assertTrue(args[0](),
+ "callable at index %s failed" % (i,))
+ continue
+
+ proc = subprocess.Popen(args,
+ env=env, stdout=stdout)
+
+ if debug:
+ proc.wait()
+ else:
+ output = proc.stdout.readlines()
+ proc.wait()
+ proc.stdout.close()
+ if proc.returncode != os.EX_OK:
+ for line in output:
+ sys.stderr.write(_unicode_decode(line))
+
+ self.assertEqual(os.EX_OK, proc.returncode,
+ "emerge failed with args %s" % (args,))
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/emerge/test_global_updates.py b/usr/lib/portage/pym/portage/tests/emerge/test_global_updates.py
new file mode 100644
index 0000000..eb54310
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/emerge/test_global_updates.py
@@ -0,0 +1,41 @@
+# Copyright 2011-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.update import parse_updates
+from portage.dep import Atom
+
+class ParseUpdatesTestCase(TestCase):
+
+ def testParseUpdates(self):
+ test_cases = (
+ (
+ """
+slotmove invalid_atom 0 3
+slotmove !=invalid/blocker-3* 0 3
+slotmove =valid/atom-3* 0 3 invalid_extra_token
+slotmove =valid/atom-3* 0 3
+slotmove =valid/atom-3* 0 3/3.1
+slotmove =valid/atom-3* 0/0 3
+move valid/atom1 valid/atom2 invalid_extra_token
+move valid/atom1 invalid_atom2
+move invalid_atom1 valid/atom2
+move !invalid/blocker1 valid/atom2
+move valid/atom1 !invalid/blocker2
+move =invalid/operator-1* valid/atom2
+move valid/atom1 =invalid/operator-2*
+move valid/atom1 valid/atom2
+""",
+ [
+ ['slotmove', Atom('=valid/atom-3*'), '0', '3'],
+ ['move', Atom('valid/atom1'), Atom('valid/atom2')],
+ ],
+ 12,
+ ),
+
+ )
+
+ for input_content, expected_output, expected_error_count in test_cases:
+ output_data, errors = parse_updates(input_content)
+ self.assertEqual(output_data, expected_output)
+ self.assertEqual(len(errors), expected_error_count)
diff --git a/usr/lib/portage/pym/portage/tests/emerge/test_simple.py b/usr/lib/portage/pym/portage/tests/emerge/test_simple.py
new file mode 100644
index 0000000..6fc81ab
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/emerge/test_simple.py
@@ -0,0 +1,446 @@
+# Copyright 2011-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+import sys
+
+import portage
+from portage import os
+from portage import _unicode_decode
+from portage.const import (BASH_BINARY, PORTAGE_BASE_PATH,
+ PORTAGE_PYM_PATH, USER_CONFIG_PATH)
+from portage.process import find_binary
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import (ensure_dirs, find_updated_config_files,
+ shlex_split)
+
+class SimpleEmergeTestCase(TestCase):
+
+ def _have_python_xml(self):
+ try:
+ __import__("xml.etree.ElementTree")
+ __import__("xml.parsers.expat").parsers.expat.ExpatError
+ except (AttributeError, ImportError):
+ return False
+ return True
+
+ def testSimple(self):
+
+ debug = False
+
+ install_something = """
+S="${WORKDIR}"
+
+pkg_pretend() {
+ einfo "called pkg_pretend for $CATEGORY/$PF"
+}
+
+src_install() {
+ einfo "installing something..."
+ insinto /usr/lib/${P}
+ echo "blah blah blah" > "${T}"/regular-file
+ doins "${T}"/regular-file
+ dosym regular-file /usr/lib/${P}/symlink || die
+
+ # Test CONFIG_PROTECT
+ insinto /etc
+ newins "${T}"/regular-file ${PN}-${SLOT%/*}
+
+ # Test code for bug #381629, using a copyright symbol encoded with latin-1.
+ # We use $(printf "\\xa9") rather than $'\\xa9', since printf apparently
+ # works in any case, while $'\\xa9' transforms to \\xef\\xbf\\xbd under
+ # some conditions. TODO: Find out why it transforms to \\xef\\xbf\\xbd when
+ # running tests for Python 3.2 (even though it's bash that is ultimately
+ # responsible for performing the transformation).
+ local latin_1_dir=/usr/lib/${P}/latin-1-$(printf "\\xa9")-directory
+ insinto "${latin_1_dir}"
+ echo "blah blah blah" > "${T}"/latin-1-$(printf "\\xa9")-regular-file || die
+ doins "${T}"/latin-1-$(printf "\\xa9")-regular-file
+ dosym latin-1-$(printf "\\xa9")-regular-file ${latin_1_dir}/latin-1-$(printf "\\xa9")-symlink || die
+}
+
+pkg_config() {
+ einfo "called pkg_config for $CATEGORY/$PF"
+}
+
+pkg_info() {
+ einfo "called pkg_info for $CATEGORY/$PF"
+}
+
+pkg_preinst() {
+ einfo "called pkg_preinst for $CATEGORY/$PF"
+
+ # Test that has_version and best_version work correctly with
+ # prefix (involves internal ROOT -> EROOT calculation in order
+ # to support ROOT override via the environment with EAPIs 3
+ # and later which support prefix).
+ if has_version $CATEGORY/$PN:$SLOT ; then
+ einfo "has_version detects an installed instance of $CATEGORY/$PN:$SLOT"
+ einfo "best_version reports that the installed instance is $(best_version $CATEGORY/$PN:$SLOT)"
+ else
+ einfo "has_version does not detect an installed instance of $CATEGORY/$PN:$SLOT"
+ fi
+ if [[ ${EPREFIX} != ${PORTAGE_OVERRIDE_EPREFIX} ]] ; then
+ if has_version --host-root $CATEGORY/$PN:$SLOT ; then
+ einfo "has_version --host-root detects an installed instance of $CATEGORY/$PN:$SLOT"
+ einfo "best_version --host-root reports that the installed instance is $(best_version $CATEGORY/$PN:$SLOT)"
+ else
+ einfo "has_version --host-root does not detect an installed instance of $CATEGORY/$PN:$SLOT"
+ fi
+ fi
+}
+
+"""
+
+ ebuilds = {
+ "dev-libs/A-1": {
+ "EAPI" : "5",
+ "IUSE" : "+flag",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ "MISC_CONTENT": install_something,
+ "RDEPEND": "flag? ( dev-libs/B[flag] )",
+ },
+ "dev-libs/B-1": {
+ "EAPI" : "5",
+ "IUSE" : "+flag",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ "MISC_CONTENT": install_something,
+ },
+ "virtual/foo-0": {
+ "EAPI" : "5",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ },
+ }
+
+ installed = {
+ "dev-libs/A-1": {
+ "EAPI" : "5",
+ "IUSE" : "+flag",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ "RDEPEND": "flag? ( dev-libs/B[flag] )",
+ "USE": "flag",
+ },
+ "dev-libs/B-1": {
+ "EAPI" : "5",
+ "IUSE" : "+flag",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ "USE": "flag",
+ },
+ "dev-libs/depclean-me-1": {
+ "EAPI" : "5",
+ "IUSE" : "",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ "USE": "",
+ },
+ "app-misc/depclean-me-1": {
+ "EAPI" : "5",
+ "IUSE" : "",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ "RDEPEND": "dev-libs/depclean-me",
+ "USE": "",
+ },
+ }
+
+ metadata_xml_files = (
+ (
+ "dev-libs/A",
+ {
+ "herd" : "base-system",
+ "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
+ },
+ ),
+ (
+ "dev-libs/B",
+ {
+ "herd" : "no-herd",
+ "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
+ },
+ ),
+ )
+
+ playground = ResolverPlayground(
+ ebuilds=ebuilds, installed=installed, debug=debug)
+ settings = playground.settings
+ eprefix = settings["EPREFIX"]
+ eroot = settings["EROOT"]
+ trees = playground.trees
+ portdb = trees[eroot]["porttree"].dbapi
+ test_repo_location = settings.repositories["test_repo"].location
+ var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
+ cachedir = os.path.join(var_cache_edb, "dep")
+ cachedir_pregen = os.path.join(test_repo_location, "metadata", "md5-cache")
+
+ portage_python = portage._python_interpreter
+ dispatch_conf_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.sbindir, "dispatch-conf"))
+ ebuild_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "ebuild"))
+ egencache_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "egencache"),
+ "--repo", "test_repo",
+ "--repositories-configuration", settings.repositories.config_string())
+ emerge_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "emerge"))
+ emaint_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.sbindir, "emaint"))
+ env_update_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.sbindir, "env-update"))
+ etc_update_cmd = (BASH_BINARY,
+ os.path.join(self.sbindir, "etc-update"))
+ fixpackages_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.sbindir, "fixpackages"))
+ portageq_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "portageq"))
+ quickpkg_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "quickpkg"))
+ regenworld_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.sbindir, "regenworld"))
+
+ rm_binary = find_binary("rm")
+ self.assertEqual(rm_binary is None, False,
+ "rm command not found")
+ rm_cmd = (rm_binary,)
+
+ egencache_extra_args = []
+ if self._have_python_xml():
+ egencache_extra_args.append("--update-use-local-desc")
+
+ test_ebuild = portdb.findname("dev-libs/A-1")
+ self.assertFalse(test_ebuild is None)
+
+ cross_prefix = os.path.join(eprefix, "cross_prefix")
+
+ test_commands = (
+ env_update_cmd,
+ portageq_cmd + ("envvar", "-v", "CONFIG_PROTECT", "EROOT",
+ "PORTAGE_CONFIGROOT", "PORTAGE_TMPDIR", "USERLAND"),
+ etc_update_cmd,
+ dispatch_conf_cmd,
+ emerge_cmd + ("--version",),
+ emerge_cmd + ("--info",),
+ emerge_cmd + ("--info", "--verbose"),
+ emerge_cmd + ("--list-sets",),
+ emerge_cmd + ("--check-news",),
+ rm_cmd + ("-rf", cachedir),
+ rm_cmd + ("-rf", cachedir_pregen),
+ emerge_cmd + ("--regen",),
+ rm_cmd + ("-rf", cachedir),
+ ({"FEATURES" : "metadata-transfer"},) + \
+ emerge_cmd + ("--regen",),
+ rm_cmd + ("-rf", cachedir),
+ ({"FEATURES" : "metadata-transfer"},) + \
+ emerge_cmd + ("--regen",),
+ rm_cmd + ("-rf", cachedir),
+ egencache_cmd + ("--update",) + tuple(egencache_extra_args),
+ ({"FEATURES" : "metadata-transfer"},) + \
+ emerge_cmd + ("--metadata",),
+ rm_cmd + ("-rf", cachedir),
+ ({"FEATURES" : "metadata-transfer"},) + \
+ emerge_cmd + ("--metadata",),
+ emerge_cmd + ("--metadata",),
+ rm_cmd + ("-rf", cachedir),
+ emerge_cmd + ("--oneshot", "virtual/foo"),
+ lambda: self.assertFalse(os.path.exists(
+ os.path.join(pkgdir, "virtual", "foo-0.tbz2"))),
+ ({"FEATURES" : "unmerge-backup"},) + \
+ emerge_cmd + ("--unmerge", "virtual/foo"),
+ lambda: self.assertTrue(os.path.exists(
+ os.path.join(pkgdir, "virtual", "foo-0.tbz2"))),
+ emerge_cmd + ("--pretend", "dev-libs/A"),
+ ebuild_cmd + (test_ebuild, "manifest", "clean", "package", "merge"),
+ emerge_cmd + ("--pretend", "--tree", "--complete-graph", "dev-libs/A"),
+ emerge_cmd + ("-p", "dev-libs/B"),
+ emerge_cmd + ("-p", "--newrepo", "dev-libs/B"),
+ emerge_cmd + ("-B", "dev-libs/B",),
+ emerge_cmd + ("--oneshot", "--usepkg", "dev-libs/B",),
+
+ # trigger clean prior to pkg_pretend as in bug #390711
+ ebuild_cmd + (test_ebuild, "unpack"),
+ emerge_cmd + ("--oneshot", "dev-libs/A",),
+
+ emerge_cmd + ("--noreplace", "dev-libs/A",),
+ emerge_cmd + ("--config", "dev-libs/A",),
+ emerge_cmd + ("--info", "dev-libs/A", "dev-libs/B"),
+ emerge_cmd + ("--pretend", "--depclean", "--verbose", "dev-libs/B"),
+ emerge_cmd + ("--pretend", "--depclean",),
+ emerge_cmd + ("--depclean",),
+ quickpkg_cmd + ("--include-config", "y", "dev-libs/A",),
+ # Test bug #523684, where a file renamed or removed by the
+ # admin forces replacement files to be merged with config
+ # protection.
+ lambda: self.assertEqual(0,
+ len(list(find_updated_config_files(eroot,
+ shlex_split(settings["CONFIG_PROTECT"]))))),
+ lambda: os.unlink(os.path.join(eprefix, "etc", "A-0")),
+ emerge_cmd + ("--usepkgonly", "dev-libs/A"),
+ lambda: self.assertEqual(1,
+ len(list(find_updated_config_files(eroot,
+ shlex_split(settings["CONFIG_PROTECT"]))))),
+ emaint_cmd + ("--check", "all"),
+ emaint_cmd + ("--fix", "all"),
+ fixpackages_cmd,
+ regenworld_cmd,
+ portageq_cmd + ("match", eroot, "dev-libs/A"),
+ portageq_cmd + ("best_visible", eroot, "dev-libs/A"),
+ portageq_cmd + ("best_visible", eroot, "binary", "dev-libs/A"),
+ portageq_cmd + ("contents", eroot, "dev-libs/A-1"),
+ portageq_cmd + ("metadata", eroot, "ebuild", "dev-libs/A-1", "EAPI", "IUSE", "RDEPEND"),
+ portageq_cmd + ("metadata", eroot, "binary", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"),
+ portageq_cmd + ("metadata", eroot, "installed", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"),
+ portageq_cmd + ("owners", eroot, eroot + "usr"),
+ emerge_cmd + ("-p", eroot + "usr"),
+ emerge_cmd + ("-p", "--unmerge", "-q", eroot + "usr"),
+ emerge_cmd + ("--unmerge", "--quiet", "dev-libs/A"),
+ emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
+
+ # Test cross-prefix usage, including chpathtool for binpkgs.
+ ({"EPREFIX" : cross_prefix},) + \
+ emerge_cmd + ("--usepkgonly", "dev-libs/A"),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),
+ ({"EPREFIX" : cross_prefix},) + \
+ emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
+ ({"EPREFIX" : cross_prefix},) + \
+ emerge_cmd + ("-C", "--quiet", "dev-libs/A"),
+ ({"EPREFIX" : cross_prefix},) + \
+ emerge_cmd + ("dev-libs/A",),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),
+ )
+
+ distdir = playground.distdir
+ pkgdir = playground.pkgdir
+ fake_bin = os.path.join(eprefix, "bin")
+ portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
+ profile_path = settings.profile_path
+ user_config_dir = os.path.join(os.sep, eprefix, USER_CONFIG_PATH)
+
+ path = os.environ.get("PATH")
+ if path is not None and not path.strip():
+ path = None
+ if path is None:
+ path = ""
+ else:
+ path = ":" + path
+ path = fake_bin + path
+
+ pythonpath = os.environ.get("PYTHONPATH")
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is not None and \
+ pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
+ pass
+ else:
+ if pythonpath is None:
+ pythonpath = ""
+ else:
+ pythonpath = ":" + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+
+ env = {
+ "PORTAGE_OVERRIDE_EPREFIX" : eprefix,
+ "CLEAN_DELAY" : "0",
+ "DISTDIR" : distdir,
+ "EMERGE_WARNING_DELAY" : "0",
+ "INFODIR" : "",
+ "INFOPATH" : "",
+ "PATH" : path,
+ "PKGDIR" : pkgdir,
+ "PORTAGE_INST_GID" : str(portage.data.portage_gid),
+ "PORTAGE_INST_UID" : str(portage.data.portage_uid),
+ "PORTAGE_PYTHON" : portage_python,
+ "PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
+ "PORTAGE_TMPDIR" : portage_tmpdir,
+ "PYTHONPATH" : pythonpath,
+ "__PORTAGE_TEST_PATH_OVERRIDE" : fake_bin,
+ }
+
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
+ dirs = [cachedir, cachedir_pregen, distdir, fake_bin,
+ portage_tmpdir, updates_dir,
+ user_config_dir, var_cache_edb]
+ etc_symlinks = ("dispatch-conf.conf", "etc-update.conf")
+ # Override things that may be unavailable, or may have portability
+ # issues when running tests in exotic environments.
+ # prepstrip - bug #447810 (bash read builtin EINTR problem)
+ true_symlinks = ["find", "prepstrip", "sed", "scanelf"]
+ true_binary = find_binary("true")
+ self.assertEqual(true_binary is None, False,
+ "true command not found")
+ try:
+ for d in dirs:
+ ensure_dirs(d)
+ for x in true_symlinks:
+ os.symlink(true_binary, os.path.join(fake_bin, x))
+ for x in etc_symlinks:
+ os.symlink(os.path.join(self.cnf_etc_path, x),
+ os.path.join(eprefix, "etc", x))
+ with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
+ f.write(b"100")
+ # non-empty system set keeps --depclean quiet
+ with open(os.path.join(profile_path, "packages"), 'w') as f:
+ f.write("*dev-libs/token-system-pkg")
+ for cp, xml_data in metadata_xml_files:
+ with open(os.path.join(test_repo_location, cp, "metadata.xml"), 'w') as f:
+ f.write(playground.metadata_xml_template % xml_data)
+ with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
+ f.write("""
+slotmove =app-doc/pms-3 2 3
+move dev-util/git dev-vcs/git
+""")
+
+ if debug:
+ # The subprocess inherits both stdout and stderr, for
+ # debugging purposes.
+ stdout = None
+ else:
+ # The subprocess inherits stderr so that any warnings
+ # triggered by python -Wd will be visible.
+ stdout = subprocess.PIPE
+
+ for args in test_commands:
+
+ if hasattr(args, '__call__'):
+ args()
+ continue
+
+ if isinstance(args[0], dict):
+ local_env = env.copy()
+ local_env.update(args[0])
+ args = args[1:]
+ else:
+ local_env = env
+
+ proc = subprocess.Popen(args,
+ env=local_env, stdout=stdout)
+
+ if debug:
+ proc.wait()
+ else:
+ output = proc.stdout.readlines()
+ proc.wait()
+ proc.stdout.close()
+ if proc.returncode != os.EX_OK:
+ for line in output:
+ sys.stderr.write(_unicode_decode(line))
+
+ self.assertEqual(os.EX_OK, proc.returncode,
+ "emerge failed with args %s" % (args,))
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/env/__init__.py b/usr/lib/portage/pym/portage/tests/env/__init__.py
new file mode 100644
index 0000000..cbeabe5
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/env/__init__.py
@@ -0,0 +1,4 @@
+# tests/portage/env/__init__.py -- Portage Unit Test functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
diff --git a/usr/lib/portage/pym/portage/tests/env/__test__.py b/usr/lib/portage/pym/portage/tests/env/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/env/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/env/config/__init__.py b/usr/lib/portage/pym/portage/tests/env/config/__init__.py
new file mode 100644
index 0000000..ef5cc43
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/env/config/__init__.py
@@ -0,0 +1,4 @@
+# tests/portage/env/config/__init__.py -- Portage Unit Test functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
diff --git a/usr/lib/portage/pym/portage/tests/env/config/__test__.py b/usr/lib/portage/pym/portage/tests/env/config/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/env/config/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/env/config/test_PackageKeywordsFile.py b/usr/lib/portage/pym/portage/tests/env/config/test_PackageKeywordsFile.py
new file mode 100644
index 0000000..609c0fd
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/env/config/test_PackageKeywordsFile.py
@@ -0,0 +1,40 @@
+# test_PackageKeywordsFile.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.env.config import PackageKeywordsFile
+from tempfile import mkstemp
+
+class PackageKeywordsFileTestCase(TestCase):
+
+ cpv = ['sys-apps/portage']
+ keywords = ['~x86', 'amd64', '-mips']
+
+ def testPackageKeywordsFile(self):
+ """
+ A simple test to ensure the load works properly
+ """
+
+ self.BuildFile()
+ try:
+ f = PackageKeywordsFile(self.fname)
+ f.load()
+ i = 0
+ for cpv, keyword in f.items():
+ self.assertEqual(cpv, self.cpv[i])
+ [k for k in keyword if self.assertTrue(k in self.keywords)]
+ i = i + 1
+ finally:
+ self.NukeFile()
+
+ def BuildFile(self):
+ fd, self.fname = mkstemp()
+ f = os.fdopen(fd, 'w')
+ for c in self.cpv:
+ f.write("%s %s\n" % (c, ' '.join(self.keywords)))
+ f.close()
+
+ def NukeFile(self):
+ os.unlink(self.fname)
diff --git a/usr/lib/portage/pym/portage/tests/env/config/test_PackageMaskFile.py b/usr/lib/portage/pym/portage/tests/env/config/test_PackageMaskFile.py
new file mode 100644
index 0000000..0c5b30f
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/env/config/test_PackageMaskFile.py
@@ -0,0 +1,29 @@
+# test_PackageMaskFile.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.env.config import PackageMaskFile
+from portage.tests import TestCase, test_cps
+from tempfile import mkstemp
+
+class PackageMaskFileTestCase(TestCase):
+
+ def testPackageMaskFile(self):
+ self.BuildFile()
+ try:
+ f = PackageMaskFile(self.fname)
+ f.load()
+ for atom in f:
+ self.assertTrue(atom in test_cps)
+ finally:
+ self.NukeFile()
+
+ def BuildFile(self):
+ fd, self.fname = mkstemp()
+ f = os.fdopen(fd, 'w')
+ f.write("\n".join(test_cps))
+ f.close()
+
+ def NukeFile(self):
+ os.unlink(self.fname)
diff --git a/usr/lib/portage/pym/portage/tests/env/config/test_PackageUseFile.py b/usr/lib/portage/pym/portage/tests/env/config/test_PackageUseFile.py
new file mode 100644
index 0000000..b1a6ccb
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/env/config/test_PackageUseFile.py
@@ -0,0 +1,37 @@
+# test_PackageUseFile.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.env.config import PackageUseFile
+from tempfile import mkstemp
+
+
+class PackageUseFileTestCase(TestCase):
+
+ cpv = 'sys-apps/portage'
+ useflags = ['cdrom', 'far', 'boo', 'flag', 'blat']
+
+ def testPackageUseFile(self):
+ """
+ A simple test to ensure the load works properly
+ """
+ self.BuildFile()
+ try:
+ f = PackageUseFile(self.fname)
+ f.load()
+ for cpv, use in f.items():
+ self.assertEqual(cpv, self.cpv)
+ [flag for flag in use if self.assertTrue(flag in self.useflags)]
+ finally:
+ self.NukeFile()
+
+ def BuildFile(self):
+ fd, self.fname = mkstemp()
+ f = os.fdopen(fd, 'w')
+ f.write("%s %s" % (self.cpv, ' '.join(self.useflags)))
+ f.close()
+
+ def NukeFile(self):
+ os.unlink(self.fname)
diff --git a/usr/lib/portage/pym/portage/tests/env/config/test_PortageModulesFile.py b/usr/lib/portage/pym/portage/tests/env/config/test_PortageModulesFile.py
new file mode 100644
index 0000000..05584a5
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/env/config/test_PortageModulesFile.py
@@ -0,0 +1,38 @@
+# Copyright 2006-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.env.config import PortageModulesFile
+from tempfile import mkstemp
+
+class PortageModulesFileTestCase(TestCase):
+
+ keys = ['foo.bar', 'baz', 'bob', 'extra_key']
+ invalid_keys = ['', ""]
+ modules = ['spanky', 'zmedico', 'antarus', 'ricer', '5', '6']
+
+ def setUp(self):
+ self.items = {}
+ for k, v in zip(self.keys + self.invalid_keys, self.modules):
+ self.items[k] = v
+
+ def testPortageModulesFile(self):
+ self.BuildFile()
+ f = PortageModulesFile(self.fname)
+ f.load()
+ for k in self.keys:
+ self.assertEqual(f[k], self.items[k])
+ for ik in self.invalid_keys:
+ self.assertEqual(False, ik in f)
+ self.NukeFile()
+
+ def BuildFile(self):
+ fd, self.fname = mkstemp()
+ f = os.fdopen(fd, 'w')
+ for k, v in self.items.items():
+ f.write('%s=%s\n' % (k, v))
+ f.close()
+
+ def NukeFile(self):
+ os.unlink(self.fname)
diff --git a/usr/lib/portage/pym/portage/tests/glsa/__init__.py b/usr/lib/portage/pym/portage/tests/glsa/__init__.py
new file mode 100644
index 0000000..6cde932
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/glsa/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/tests/glsa/__test__.py b/usr/lib/portage/pym/portage/tests/glsa/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/glsa/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/glsa/test_security_set.py b/usr/lib/portage/pym/portage/tests/glsa/test_security_set.py
new file mode 100644
index 0000000..bf1f82b
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/glsa/test_security_set.py
@@ -0,0 +1,145 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import io
+
+import portage
+from portage import os, _encodings
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SecuritySetTestCase(TestCase):
+
+ glsa_template = """\
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet href="/xsl/glsa.xsl" type="text/xsl"?>
+<?xml-stylesheet href="/xsl/guide.xsl" type="text/xsl"?>
+<!DOCTYPE glsa SYSTEM "http://www.gentoo.org/dtd/glsa.dtd">
+<glsa id="%(glsa_id)s">
+ <title>%(pkgname)s: Multiple vulnerabilities</title>
+ <synopsis>Multiple vulnerabilities have been found in %(pkgname)s.
+ </synopsis>
+ <product type="ebuild">%(pkgname)s</product>
+ <announced>January 18, 2013</announced>
+ <revised>January 18, 2013: 1</revised>
+ <bug>55555</bug>
+ <access>remote</access>
+ <affected>
+ <package name="%(cp)s" auto="yes" arch="*">
+ <unaffected range="ge">%(unaffected_version)s</unaffected>
+ <vulnerable range="lt">%(unaffected_version)s</vulnerable>
+ </package>
+ </affected>
+ <background>
+ <p>%(pkgname)s is software package.</p>
+ </background>
+ <description>
+ <p>Multiple vulnerabilities have been discovered in %(pkgname)s.
+ </p>
+ </description>
+ <impact type="normal">
+ <p>A remote attacker could exploit these vulnerabilities.</p>
+ </impact>
+ <workaround>
+ <p>There is no known workaround at this time.</p>
+ </workaround>
+ <resolution>
+ <p>All %(pkgname)s users should upgrade to the latest version:</p>
+ <code>
+ # emerge --sync
+ # emerge --ask --oneshot --verbose "&gt;=%(cp)s-%(unaffected_version)s"
+ </code>
+ </resolution>
+ <references>
+ </references>
+</glsa>
+"""
+
+ def _must_skip(self):
+ try:
+ __import__("xml.etree.ElementTree")
+ __import__("xml.parsers.expat").parsers.expat.ExpatError
+ except (AttributeError, ImportError):
+ return "python is missing xml support"
+
+ def testSecuritySet(self):
+
+ skip_reason = self._must_skip()
+ if skip_reason:
+ self.portage_skip = skip_reason
+ self.assertFalse(True, skip_reason)
+ return
+
+ ebuilds = {
+ "cat/A-vulnerable-2.2": {
+ "KEYWORDS": "x86"
+ },
+ "cat/B-not-vulnerable-4.5": {
+ "KEYWORDS": "x86"
+ },
+ }
+
+ installed = {
+ "cat/A-vulnerable-2.1": {
+ "KEYWORDS": "x86"
+ },
+ "cat/B-not-vulnerable-4.4": {
+ "KEYWORDS": "x86"
+ },
+ }
+
+ glsas = (
+ {
+ "glsa_id": "201301-01",
+ "pkgname": "A-vulnerable",
+ "cp": "cat/A-vulnerable",
+ "unaffected_version": "2.2"
+ },
+ {
+ "glsa_id": "201301-02",
+ "pkgname": "B-not-vulnerable",
+ "cp": "cat/B-not-vulnerable",
+ "unaffected_version": "4.4"
+ },
+ {
+ "glsa_id": "201301-03",
+ "pkgname": "NotInstalled",
+ "cp": "cat/NotInstalled",
+ "unaffected_version": "3.5"
+ },
+ )
+
+ world = ["cat/A"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["@security"],
+ options = {},
+ success = True,
+ mergelist = ["cat/A-vulnerable-2.2"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+
+ try:
+
+ portdb = playground.trees[playground.eroot]["porttree"].dbapi
+ glsa_dir = os.path.join(
+ portdb.repositories.mainRepoLocation(), 'metadata', 'glsa')
+ portage.util.ensure_dirs(glsa_dir)
+ for glsa in glsas:
+ with io.open(os.path.join(glsa_dir,
+ 'glsa-' + glsa["glsa_id"] + '.xml'),
+ encoding=_encodings['repo.content'], mode='w') as f:
+ f.write(self.glsa_template % glsa)
+
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/lafilefixer/__init__.py b/usr/lib/portage/pym/portage/tests/lafilefixer/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/lafilefixer/__init__.py
diff --git a/usr/lib/portage/pym/portage/tests/lafilefixer/__test__.py b/usr/lib/portage/pym/portage/tests/lafilefixer/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/lafilefixer/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/lafilefixer/test_lafilefixer.py b/usr/lib/portage/pym/portage/tests/lafilefixer/test_lafilefixer.py
new file mode 100644
index 0000000..0bcffaa
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/lafilefixer/test_lafilefixer.py
@@ -0,0 +1,145 @@
+# test_lafilefixer.py -- Portage Unit Testing Functionality
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.exception import InvalidData
+
+class test_lafilefixer(TestCase):
+
+ def get_test_cases_clean(self):
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -lm'\n" + \
+ b"current=6\n" + \
+ b"age=0\n" + \
+ b"revision=2\n" + \
+ b"installed=yes\n" + \
+ b"dlopen=''\n" + \
+ b"dlpreopen=''\n" + \
+ b"libdir='/usr/lib64'\n"
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -lm'\n" + \
+ b"current=6\n" + \
+ b"age=0\n" + \
+ b"revision=2\n" + \
+ b"installed=yes\n" + \
+ b"dlopen=''\n" + \
+ b"dlpreopen=''\n" + \
+ b"libdir='/usr/lib64'\n"
+ yield b"dependency_libs=' liba.la /usr/lib64/bar.la -lc'\n"
+
+ def get_test_cases_update(self):
+ #.la -> -l*
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc'\n", \
+ b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -L/usr/lib64 -la -lb -lc'\n"
+ #move stuff into inherited_linker_flags
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la -pthread /usr/lib64/libb.la -lc'\n" + \
+ b"inherited_linker_flags=''\n", \
+ b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -L/usr/lib64 -la -lb -lc'\n" + \
+ b"inherited_linker_flags=' -pthread'\n"
+ #reorder
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la -R/usr/lib64 /usr/lib64/libb.la -lc'\n", \
+ b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -R/usr/lib64 -L/usr/lib64 -la -lb -lc'\n"
+ #remove duplicates from dependency_libs (the original version didn't do it for inherited_linker_flags)
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libc.la -pthread -mt" + \
+ b" -L/usr/lib -R/usr/lib64 -lc /usr/lib64/libb.la -lc'\n" +\
+ b"inherited_linker_flags=' -pthread -pthread'\n", \
+ b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -R/usr/lib64 -L/usr/lib64 -L/usr/lib -la -lc -lb'\n" +\
+ b"inherited_linker_flags=' -pthread -pthread -mt'\n"
+ #-L rewriting
+ yield b"dependency_libs=' -L/usr/X11R6/lib'\n", \
+ b"dependency_libs=' -L/usr/lib'\n"
+ yield b"dependency_libs=' -L/usr/local/lib'\n", \
+ b"dependency_libs=' -L/usr/lib'\n"
+ yield b"dependency_libs=' -L/usr/lib64/pkgconfig/../..'\n", \
+ b"dependency_libs=' -L/usr'\n"
+ yield b"dependency_libs=' -L/usr/lib/pkgconfig/..'\n", \
+ b"dependency_libs=' -L/usr/lib'\n"
+ yield b"dependency_libs=' -L/usr/lib/pkgconfig/../.. -L/usr/lib/pkgconfig/..'\n", \
+ b"dependency_libs=' -L/usr -L/usr/lib'\n"
+ #we once got a backtrace on this one
+ yield b"dependency_libs=' /usr/lib64/libMagickCore.la -L/usr/lib64 -llcms2 /usr/lib64/libtiff.la " + \
+ b"-ljbig -lc /usr/lib64/libfreetype.la /usr/lib64/libjpeg.la /usr/lib64/libXext.la " + \
+ b"/usr/lib64/libXt.la /usr/lib64/libSM.la -lICE -luuid /usr/lib64/libICE.la /usr/lib64/libX11.la " + \
+ b"/usr/lib64/libxcb.la /usr/lib64/libXau.la /usr/lib64/libXdmcp.la -lbz2 -lz -lm " + \
+ b"/usr/lib/gcc/x86_64-pc-linux-gnu/4.4.4/libgomp.la -lrt -lpthread /usr/lib64/libltdl.la -ldl " + \
+ b"/usr/lib64/libfpx.la -lstdc++'", \
+ b"dependency_libs=' -L/usr/lib64 -L/usr/lib/gcc/x86_64-pc-linux-gnu/4.4.4 -lMagickCore -llcms2 " + \
+ b"-ltiff -ljbig -lc -lfreetype -ljpeg -lXext -lXt -lSM -lICE -luuid -lX11 -lxcb -lXau -lXdmcp " + \
+ b"-lbz2 -lz -lm -lgomp -lrt -lpthread -lltdl -ldl -lfpx -lstdc++'"
+
+
+ def get_test_cases_broken(self):
+ yield b""
+ #no dependency_libs
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"current=6\n" + \
+ b"age=0\n" + \
+ b"revision=2\n"
+ #borken dependency_libs
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc' \n"
+ #borken dependency_libs
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc\n"
+ #crap in dependency_libs
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n"
+ #dependency_libs twice
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n" +\
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n"
+ #inherited_linker_flags twice
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"inherited_linker_flags=''\n" +\
+ b"inherited_linker_flags=''\n"
+
+ def testlafilefixer(self):
+ from portage.util.lafilefixer import _parse_lafile_contents, rewrite_lafile
+
+ for clean_contents in self.get_test_cases_clean():
+ self.assertEqual(rewrite_lafile(clean_contents), (False, None))
+
+ for original_contents, fixed_contents in self.get_test_cases_update():
+ self.assertEqual(rewrite_lafile(original_contents), (True, fixed_contents))
+
+ for broken_contents in self.get_test_cases_broken():
+ self.assertRaises(InvalidData, rewrite_lafile, broken_contents)
diff --git a/usr/lib/portage/pym/portage/tests/lazyimport/__init__.py b/usr/lib/portage/pym/portage/tests/lazyimport/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/lazyimport/__init__.py
diff --git a/usr/lib/portage/pym/portage/tests/lazyimport/__test__.py b/usr/lib/portage/pym/portage/tests/lazyimport/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/lazyimport/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py b/usr/lib/portage/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
new file mode 100644
index 0000000..080cf3f
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
@@ -0,0 +1,81 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import re
+import portage
+from portage import os
+from portage.const import PORTAGE_PYM_PATH
+from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
+
+from _emerge.PipeReader import PipeReader
+from _emerge.SpawnProcess import SpawnProcess
+
+class LazyImportPortageBaselineTestCase(TestCase):
+
+ _module_re = re.compile(r'^(portage|repoman|_emerge)\.')
+
+ _baseline_imports = frozenset([
+ 'portage.const', 'portage.localization',
+ 'portage.proxy', 'portage.proxy.lazyimport',
+ 'portage.proxy.objectproxy',
+ 'portage._selinux',
+ ])
+
+ _baseline_import_cmd = [portage._python_interpreter, '-c', '''
+import os
+import sys
+sys.path.insert(0, os.environ["PORTAGE_PYM_PATH"])
+import portage
+sys.stdout.write(" ".join(k for k in sys.modules
+ if sys.modules[k] is not None))
+''']
+
+ def testLazyImportPortageBaseline(self):
+ """
+ Check what modules are imported by a baseline module import.
+ """
+
+ env = os.environ.copy()
+ pythonpath = env.get('PYTHONPATH')
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is None:
+ pythonpath = ''
+ else:
+ pythonpath = ':' + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+ env['PYTHONPATH'] = pythonpath
+
+ # If python is patched to insert the path of the
+ # currently installed portage module into sys.path,
+ # then the above PYTHONPATH override doesn't help.
+ env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
+
+ scheduler = global_event_loop()
+ master_fd, slave_fd = os.pipe()
+ master_file = os.fdopen(master_fd, 'rb', 0)
+ slave_file = os.fdopen(slave_fd, 'wb')
+ producer = SpawnProcess(
+ args=self._baseline_import_cmd,
+ env=env, fd_pipes={1:slave_fd},
+ scheduler=scheduler)
+ producer.start()
+ slave_file.close()
+
+ consumer = PipeReader(
+ input_files={"producer" : master_file},
+ scheduler=scheduler)
+
+ consumer.start()
+ consumer.wait()
+ self.assertEqual(producer.wait(), os.EX_OK)
+ self.assertEqual(consumer.wait(), os.EX_OK)
+
+ output = consumer.getvalue().decode('ascii', 'replace').split()
+
+ unexpected_modules = " ".join(sorted(x for x in output \
+ if self._module_re.match(x) is not None and \
+ x not in self._baseline_imports))
+
+ self.assertEqual("", unexpected_modules)
diff --git a/usr/lib/portage/pym/portage/tests/lazyimport/test_preload_portage_submodules.py b/usr/lib/portage/pym/portage/tests/lazyimport/test_preload_portage_submodules.py
new file mode 100644
index 0000000..9d20eba
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/lazyimport/test_preload_portage_submodules.py
@@ -0,0 +1,16 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.tests import TestCase
+
+class PreloadPortageSubmodulesTestCase(TestCase):
+
+ def testPreloadPortageSubmodules(self):
+ """
+ Verify that _preload_portage_submodules() doesn't leave any
+ remaining proxies that refer to the portage.* namespace.
+ """
+ portage.proxy.lazyimport._preload_portage_submodules()
+ for name in portage.proxy.lazyimport._module_proxies:
+ self.assertEqual(name.startswith('portage.'), False)
diff --git a/usr/lib/portage/pym/portage/tests/lint/__init__.py b/usr/lib/portage/pym/portage/tests/lint/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/lint/__init__.py
diff --git a/usr/lib/portage/pym/portage/tests/lint/__test__.py b/usr/lib/portage/pym/portage/tests/lint/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/lint/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/lint/test_bash_syntax.py b/usr/lib/portage/pym/portage/tests/lint/test_bash_syntax.py
new file mode 100644
index 0000000..fdbb6fe
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/lint/test_bash_syntax.py
@@ -0,0 +1,54 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from itertools import chain
+import stat
+import subprocess
+import sys
+
+from portage.const import BASH_BINARY, PORTAGE_BASE_PATH, PORTAGE_BIN_PATH
+from portage.tests import TestCase
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode, _unicode_encode
+
+class BashSyntaxTestCase(TestCase):
+
+ def testBashSyntax(self):
+ locations = [PORTAGE_BIN_PATH]
+ misc_dir = os.path.join(PORTAGE_BASE_PATH, "misc")
+ if os.path.isdir(misc_dir):
+ locations.append(misc_dir)
+ for parent, dirs, files in \
+ chain.from_iterable(os.walk(x) for x in locations):
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ for x in files:
+ x = _unicode_decode(x,
+ encoding=_encodings['fs'], errors='strict')
+ ext = x.split('.')[-1]
+ if ext in ('.py', '.pyc', '.pyo'):
+ continue
+ x = os.path.join(parent, x)
+ st = os.lstat(x)
+ if not stat.S_ISREG(st.st_mode):
+ continue
+
+ # Check for bash shebang
+ f = open(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ line = _unicode_decode(f.readline(),
+ encoding=_encodings['content'], errors='replace')
+ f.close()
+ if line[:2] == '#!' and \
+ 'bash' in line:
+ cmd = [BASH_BINARY, "-n", x]
+ cmd = [_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict') for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = _unicode_decode(proc.communicate()[0],
+ encoding=_encodings['fs'])
+ status = proc.wait()
+ self.assertEqual(os.WIFEXITED(status) and \
+ os.WEXITSTATUS(status) == os.EX_OK, True, msg=output)
diff --git a/usr/lib/portage/pym/portage/tests/lint/test_compile_modules.py b/usr/lib/portage/pym/portage/tests/lint/test_compile_modules.py
new file mode 100644
index 0000000..4826cad
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/lint/test_compile_modules.py
@@ -0,0 +1,54 @@
+# Copyright 2009-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import itertools
+import stat
+
+from portage.const import PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PORTAGE_PYM_PACKAGES
+from portage.tests import TestCase
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode, _unicode_encode
+
+class CompileModulesTestCase(TestCase):
+
+ def testCompileModules(self):
+ iters = [os.walk(os.path.join(PORTAGE_PYM_PATH, x))
+ for x in PORTAGE_PYM_PACKAGES]
+ iters.append(os.walk(PORTAGE_BIN_PATH))
+
+ for parent, _dirs, files in itertools.chain(*iters):
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ for x in files:
+ x = _unicode_decode(x,
+ encoding=_encodings['fs'], errors='strict')
+ if x[-4:] in ('.pyc', '.pyo'):
+ continue
+ x = os.path.join(parent, x)
+ st = os.lstat(x)
+ if not stat.S_ISREG(st.st_mode):
+ continue
+ do_compile = False
+ if x[-3:] == '.py':
+ do_compile = True
+ else:
+ # Check for python shebang.
+ try:
+ with open(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ line = _unicode_decode(f.readline(),
+ encoding=_encodings['content'], errors='replace')
+ except IOError as e:
+ # Some tests create files that are unreadable by the
+ # user (by design), so ignore EACCES issues.
+ if e.errno != errno.EACCES:
+ raise
+ continue
+ if line[:2] == '#!' and 'python' in line:
+ do_compile = True
+ if do_compile:
+ with open(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ compile(f.read(), x, 'exec')
diff --git a/usr/lib/portage/pym/portage/tests/lint/test_import_modules.py b/usr/lib/portage/pym/portage/tests/lint/test_import_modules.py
new file mode 100644
index 0000000..fcdcb3b
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/lint/test_import_modules.py
@@ -0,0 +1,44 @@
+# Copyright 2011-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from itertools import chain
+
+from portage.const import PORTAGE_PYM_PATH, PORTAGE_PYM_PACKAGES
+from portage.tests import TestCase
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+
+class ImportModulesTestCase(TestCase):
+
+ def testImportModules(self):
+ expected_failures = frozenset((
+ ))
+
+ iters = (self._iter_modules(os.path.join(PORTAGE_PYM_PATH, x))
+ for x in PORTAGE_PYM_PACKAGES)
+ for mod in chain(*iters):
+ try:
+ __import__(mod)
+ except ImportError as e:
+ if mod not in expected_failures:
+ self.assertTrue(False, "failed to import '%s': %s" % (mod, e))
+ del e
+
+ def _iter_modules(self, base_dir):
+ for parent, dirs, files in os.walk(base_dir):
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ parent_mod = parent[len(PORTAGE_PYM_PATH)+1:]
+ parent_mod = parent_mod.replace("/", ".")
+ for x in files:
+ x = _unicode_decode(x,
+ encoding=_encodings['fs'], errors='strict')
+ if x[-3:] != '.py':
+ continue
+ x = x[:-3]
+ if x[-8:] == '__init__':
+ x = parent_mod
+ else:
+ x = parent_mod + "." + x
+ yield x
diff --git a/usr/lib/portage/pym/portage/tests/locks/__init__.py b/usr/lib/portage/pym/portage/tests/locks/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/locks/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/tests/locks/__test__.py b/usr/lib/portage/pym/portage/tests/locks/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/locks/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/locks/test_asynchronous_lock.py b/usr/lib/portage/pym/portage/tests/locks/test_asynchronous_lock.py
new file mode 100644
index 0000000..3a2ccfb
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/locks/test_asynchronous_lock.py
@@ -0,0 +1,176 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import signal
+import tempfile
+
+from portage import os
+from portage import shutil
+from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.AsynchronousLock import AsynchronousLock
+
+class AsynchronousLockTestCase(TestCase):
+
+ def _testAsynchronousLock(self):
+ scheduler = global_event_loop()
+ tempdir = tempfile.mkdtemp()
+ try:
+ path = os.path.join(tempdir, 'lock_me')
+ for force_async in (True, False):
+ for force_dummy in (True, False):
+ async_lock = AsynchronousLock(path=path,
+ scheduler=scheduler, _force_async=force_async,
+ _force_thread=True,
+ _force_dummy=force_dummy)
+ async_lock.start()
+ self.assertEqual(async_lock.wait(), os.EX_OK)
+ self.assertEqual(async_lock.returncode, os.EX_OK)
+ async_lock.unlock()
+
+ async_lock = AsynchronousLock(path=path,
+ scheduler=scheduler, _force_async=force_async,
+ _force_process=True)
+ async_lock.start()
+ self.assertEqual(async_lock.wait(), os.EX_OK)
+ self.assertEqual(async_lock.returncode, os.EX_OK)
+ async_lock.unlock()
+
+ finally:
+ shutil.rmtree(tempdir)
+
+ def testAsynchronousLock(self):
+ self._testAsynchronousLock()
+
+ def testAsynchronousLockHardlink(self):
+ prev_state = os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = "1"
+ try:
+ self._testAsynchronousLock()
+ finally:
+ os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ if prev_state is not None:
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
+
+ def _testAsynchronousLockWait(self):
+ scheduler = global_event_loop()
+ tempdir = tempfile.mkdtemp()
+ try:
+ path = os.path.join(tempdir, 'lock_me')
+ lock1 = AsynchronousLock(path=path, scheduler=scheduler)
+ lock1.start()
+ self.assertEqual(lock1.wait(), os.EX_OK)
+ self.assertEqual(lock1.returncode, os.EX_OK)
+
+ # lock2 requires _force_async=True since the portage.locks
+ # module is not designed to work as intended here if the
+ # same process tries to lock the same file more than
+ # one time concurrently.
+ lock2 = AsynchronousLock(path=path, scheduler=scheduler,
+ _force_async=True, _force_process=True)
+ lock2.start()
+ # lock2 should be waiting for lock1 to release
+ self.assertEqual(lock2.poll(), None)
+ self.assertEqual(lock2.returncode, None)
+
+ lock1.unlock()
+ self.assertEqual(lock2.wait(), os.EX_OK)
+ self.assertEqual(lock2.returncode, os.EX_OK)
+ lock2.unlock()
+ finally:
+ shutil.rmtree(tempdir)
+
+ def testAsynchronousLockWait(self):
+ self._testAsynchronousLockWait()
+
+ def testAsynchronousLockWaitHardlink(self):
+ prev_state = os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = "1"
+ try:
+ self._testAsynchronousLockWait()
+ finally:
+ os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ if prev_state is not None:
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
+
+ def _testAsynchronousLockWaitCancel(self):
+ scheduler = global_event_loop()
+ tempdir = tempfile.mkdtemp()
+ try:
+ path = os.path.join(tempdir, 'lock_me')
+ lock1 = AsynchronousLock(path=path, scheduler=scheduler)
+ lock1.start()
+ self.assertEqual(lock1.wait(), os.EX_OK)
+ self.assertEqual(lock1.returncode, os.EX_OK)
+ lock2 = AsynchronousLock(path=path, scheduler=scheduler,
+ _force_async=True, _force_process=True)
+ lock2.start()
+ # lock2 should be waiting for lock1 to release
+ self.assertEqual(lock2.poll(), None)
+ self.assertEqual(lock2.returncode, None)
+
+ # Cancel lock2 and then check wait() and returncode results.
+ lock2.cancel()
+ self.assertEqual(lock2.wait() == os.EX_OK, False)
+ self.assertEqual(lock2.returncode == os.EX_OK, False)
+ self.assertEqual(lock2.returncode is None, False)
+ lock1.unlock()
+ finally:
+ shutil.rmtree(tempdir)
+
+ def testAsynchronousLockWaitCancel(self):
+ self._testAsynchronousLockWaitCancel()
+
+ def testAsynchronousLockWaitCancelHardlink(self):
+ prev_state = os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = "1"
+ try:
+ self._testAsynchronousLockWaitCancel()
+ finally:
+ os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ if prev_state is not None:
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
+
+ def _testAsynchronousLockWaitKill(self):
+ scheduler = global_event_loop()
+ tempdir = tempfile.mkdtemp()
+ try:
+ path = os.path.join(tempdir, 'lock_me')
+ lock1 = AsynchronousLock(path=path, scheduler=scheduler)
+ lock1.start()
+ self.assertEqual(lock1.wait(), os.EX_OK)
+ self.assertEqual(lock1.returncode, os.EX_OK)
+ lock2 = AsynchronousLock(path=path, scheduler=scheduler,
+ _force_async=True, _force_process=True)
+ lock2.start()
+ # lock2 should be waiting for lock1 to release
+ self.assertEqual(lock2.poll(), None)
+ self.assertEqual(lock2.returncode, None)
+
+ # Kill lock2's process and then check wait() and
+ # returncode results. This is intended to simulate
+ # a SIGINT sent via the controlling tty.
+ self.assertEqual(lock2._imp is not None, True)
+ self.assertEqual(lock2._imp._proc is not None, True)
+ self.assertEqual(lock2._imp._proc.pid is not None, True)
+ lock2._imp._kill_test = True
+ os.kill(lock2._imp._proc.pid, signal.SIGTERM)
+ self.assertEqual(lock2.wait() == os.EX_OK, False)
+ self.assertEqual(lock2.returncode == os.EX_OK, False)
+ self.assertEqual(lock2.returncode is None, False)
+ lock1.unlock()
+ finally:
+ shutil.rmtree(tempdir)
+
+ def testAsynchronousLockWaitKill(self):
+ self._testAsynchronousLockWaitKill()
+
+ def testAsynchronousLockWaitKillHardlink(self):
+ prev_state = os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = "1"
+ try:
+ self._testAsynchronousLockWaitKill()
+ finally:
+ os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ if prev_state is not None:
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
diff --git a/usr/lib/portage/pym/portage/tests/locks/test_lock_nonblock.py b/usr/lib/portage/pym/portage/tests/locks/test_lock_nonblock.py
new file mode 100644
index 0000000..2ff7b35
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/locks/test_lock_nonblock.py
@@ -0,0 +1,62 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+import traceback
+
+import portage
+from portage import os
+from portage import shutil
+from portage.tests import TestCase
+
+class LockNonblockTestCase(TestCase):
+
+ def _testLockNonblock(self):
+ tempdir = tempfile.mkdtemp()
+ try:
+ path = os.path.join(tempdir, 'lock_me')
+ lock1 = portage.locks.lockfile(path)
+ pid = os.fork()
+ if pid == 0:
+ portage.locks._close_fds()
+ # Disable close_fds since we don't exec
+ # (see _setup_pipes docstring).
+ portage.process._setup_pipes({0:0, 1:1, 2:2}, close_fds=False)
+ rval = 2
+ try:
+ try:
+ lock2 = portage.locks.lockfile(path, flags=os.O_NONBLOCK)
+ except portage.exception.TryAgain:
+ rval = os.EX_OK
+ else:
+ rval = 1
+ portage.locks.unlockfile(lock2)
+ except SystemExit:
+ raise
+ except:
+ traceback.print_exc()
+ finally:
+ os._exit(rval)
+
+ self.assertEqual(pid > 0, True)
+ pid, status = os.waitpid(pid, 0)
+ self.assertEqual(os.WIFEXITED(status), True)
+ self.assertEqual(os.WEXITSTATUS(status), os.EX_OK)
+
+ portage.locks.unlockfile(lock1)
+ finally:
+ shutil.rmtree(tempdir)
+
+ def testLockNonblock(self):
+ self._testLockNonblock()
+
+ def testLockNonblockHardlink(self):
+ prev_state = os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = "1"
+ try:
+ self._testLockNonblock()
+ finally:
+ os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ if prev_state is not None:
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
+
diff --git a/usr/lib/portage/pym/portage/tests/news/__init__.py b/usr/lib/portage/pym/portage/tests/news/__init__.py
new file mode 100644
index 0000000..28a753f
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/news/__init__.py
@@ -0,0 +1,3 @@
+# tests/portage.news/__init__.py -- Portage Unit Test functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/tests/news/__test__.py b/usr/lib/portage/pym/portage/tests/news/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/news/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/news/test_NewsItem.py b/usr/lib/portage/pym/portage/tests/news/test_NewsItem.py
new file mode 100644
index 0000000..a4e76f3
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/news/test_NewsItem.py
@@ -0,0 +1,95 @@
+# test_NewsItem.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.news import NewsItem
+from portage.dbapi.virtual import testdbapi
+from tempfile import mkstemp
+# TODO(antarus) Make newsitem use a loader so we can load using a string instead of a tempfile
+
+class NewsItemTestCase(TestCase):
+ """These tests suck: they use your running config instead of making their own"""
+ fakeItem = """
+Title: YourSQL Upgrades from 4.0 to 4.1
+Author: Ciaran McCreesh <ciaranm@gentoo.org>
+Content-Type: text/plain
+Posted: 01-Nov-2005
+Revision: 1
+#Display-If-Installed:
+#Display-If-Profile:
+#Display-If-Arch:
+
+YourSQL databases created using YourSQL version 4.0 are incompatible
+with YourSQL version 4.1 or later. There is no reliable way to
+automate the database format conversion, so action from the system
+administrator is required before an upgrade can take place.
+
+Please see the Gentoo YourSQL Upgrade Guide for instructions:
+
+ http://www.gentoo.org/doc/en/yoursql-upgrading.xml
+
+Also see the official YourSQL documentation:
+
+ http://dev.yoursql.com/doc/refman/4.1/en/upgrading-from-4-0.html
+
+After upgrading, you should also recompile any packages which link
+against YourSQL:
+
+ revdep-rebuild --library=libyoursqlclient.so.12
+
+The revdep-rebuild tool is provided by app-portage/gentoolkit.
+"""
+ def setUp(self):
+ self.profile = "/usr/portage/profiles/default-linux/x86/2007.0/"
+ self.keywords = "x86"
+ # Use fake/test dbapi to avoid slow tests
+ self.vardb = testdbapi()
+ # self.vardb.inject_cpv('sys-apps/portage-2.0', { 'SLOT' : 0 })
+ # Consumers only use ARCH, so avoid portage.settings by using a dict
+ self.settings = { 'ARCH' : 'x86' }
+
+ def testDisplayIfProfile(self):
+ tmpItem = self.fakeItem[:].replace("#Display-If-Profile:", "Display-If-Profile: %s" %
+ self.profile)
+
+ item = self._processItem(tmpItem)
+ try:
+ self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
+ msg="Expected %s to be relevant, but it was not!" % tmpItem)
+ finally:
+ os.unlink(item.path)
+
+ def testDisplayIfInstalled(self):
+ tmpItem = self.fakeItem[:].replace("#Display-If-Installed:", "Display-If-Installed: %s" %
+ "sys-apps/portage")
+
+ try:
+ item = self._processItem(tmpItem)
+ self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
+ msg="Expected %s to be relevant, but it was not!" % tmpItem)
+ finally:
+ os.unlink(item.path)
+
+ def testDisplayIfKeyword(self):
+ tmpItem = self.fakeItem[:].replace("#Display-If-Keyword:", "Display-If-Keyword: %s" %
+ self.keywords)
+
+ try:
+ item = self._processItem(tmpItem)
+ self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
+ msg="Expected %s to be relevant, but it was not!" % tmpItem)
+ finally:
+ os.unlink(item.path)
+
+ def _processItem(self, item):
+ filename = None
+ fd, filename = mkstemp()
+ f = os.fdopen(fd, 'w')
+ f.write(item)
+ f.close()
+ try:
+ return NewsItem(filename, 0)
+ except TypeError:
+ self.fail("Error while processing news item %s" % filename)
diff --git a/usr/lib/portage/pym/portage/tests/process/__init__.py b/usr/lib/portage/pym/portage/tests/process/__init__.py
new file mode 100644
index 0000000..d19e353
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/process/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 1998-2008 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/tests/process/__test__.py b/usr/lib/portage/pym/portage/tests/process/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/process/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/process/test_PopenProcess.py b/usr/lib/portage/pym/portage/tests/process/test_PopenProcess.py
new file mode 100644
index 0000000..88da0b3
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/process/test_PopenProcess.py
@@ -0,0 +1,85 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+import tempfile
+
+from portage import os
+from portage.tests import TestCase
+from portage.util._async.PipeLogger import PipeLogger
+from portage.util._async.PopenProcess import PopenProcess
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.PipeReader import PipeReader
+
+class PopenPipeTestCase(TestCase):
+ """
+ Test PopenProcess, which can be useful for Jython support, since it
+ uses the subprocess.Popen instead of os.fork().
+ """
+
+ _echo_cmd = "echo -n '%s'"
+
+ def _testPipeReader(self, test_string):
+ """
+ Use a poll loop to read data from a pipe and assert that
+ the data written to the pipe is identical to the data
+ read from the pipe.
+ """
+
+ producer = PopenProcess(proc=subprocess.Popen(
+ ["bash", "-c", self._echo_cmd % test_string],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ pipe_reader=PipeReader(), scheduler=global_event_loop())
+
+ consumer = producer.pipe_reader
+ consumer.input_files = {"producer" : producer.proc.stdout}
+
+ producer.start()
+ producer.wait()
+
+ self.assertEqual(producer.returncode, os.EX_OK)
+ self.assertEqual(consumer.returncode, os.EX_OK)
+
+ return consumer.getvalue().decode('ascii', 'replace')
+
+ def _testPipeLogger(self, test_string):
+
+ producer = PopenProcess(proc=subprocess.Popen(
+ ["bash", "-c", self._echo_cmd % test_string],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ scheduler=global_event_loop())
+
+ fd, log_file_path = tempfile.mkstemp()
+ try:
+
+ consumer = PipeLogger(background=True,
+ input_fd=producer.proc.stdout,
+ log_file_path=log_file_path)
+
+ producer.pipe_reader = consumer
+
+ producer.start()
+ producer.wait()
+
+ self.assertEqual(producer.returncode, os.EX_OK)
+ self.assertEqual(consumer.returncode, os.EX_OK)
+
+ with open(log_file_path, 'rb') as f:
+ content = f.read()
+
+ finally:
+ os.close(fd)
+ os.unlink(log_file_path)
+
+ return content.decode('ascii', 'replace')
+
+ def testPopenPipe(self):
+ for x in (1, 2, 5, 6, 7, 8, 2**5, 2**10, 2**12, 2**13, 2**14):
+ test_string = x * "a"
+ output = self._testPipeReader(test_string)
+ self.assertEqual(test_string, output,
+ "x = %s, len(output) = %s" % (x, len(output)))
+
+ output = self._testPipeLogger(test_string)
+ self.assertEqual(test_string, output,
+ "x = %s, len(output) = %s" % (x, len(output)))
diff --git a/usr/lib/portage/pym/portage/tests/process/test_PopenProcessBlockingIO.py b/usr/lib/portage/pym/portage/tests/process/test_PopenProcessBlockingIO.py
new file mode 100644
index 0000000..9ee291a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/process/test_PopenProcessBlockingIO.py
@@ -0,0 +1,63 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+
+try:
+ import threading
+except ImportError:
+ # dummy_threading will not suffice
+ threading = None
+
+from portage import os
+from portage.tests import TestCase
+from portage.util._async.PopenProcess import PopenProcess
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage.util._async.PipeReaderBlockingIO import PipeReaderBlockingIO
+
+class PopenPipeBlockingIOTestCase(TestCase):
+ """
+ Test PopenProcess, which can be useful for Jython support:
+ * use subprocess.Popen since Jython does not support os.fork()
+ * use blocking IO with threads, since Jython does not support
+ fcntl non-blocking IO)
+ """
+
+ _echo_cmd = "echo -n '%s'"
+
+ def _testPipeReader(self, test_string):
+ """
+ Use a poll loop to read data from a pipe and assert that
+ the data written to the pipe is identical to the data
+ read from the pipe.
+ """
+
+ producer = PopenProcess(proc=subprocess.Popen(
+ ["bash", "-c", self._echo_cmd % test_string],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ pipe_reader=PipeReaderBlockingIO(), scheduler=global_event_loop())
+
+ consumer = producer.pipe_reader
+ consumer.input_files = {"producer" : producer.proc.stdout}
+
+ producer.start()
+ producer.wait()
+
+ self.assertEqual(producer.returncode, os.EX_OK)
+ self.assertEqual(consumer.returncode, os.EX_OK)
+
+ return consumer.getvalue().decode('ascii', 'replace')
+
+ def testPopenPipeBlockingIO(self):
+
+ if threading is None:
+ skip_reason = "threading disabled"
+ self.portage_skip = "threading disabled"
+ self.assertFalse(True, skip_reason)
+ return
+
+ for x in (1, 2, 5, 6, 7, 8, 2**5, 2**10, 2**12, 2**13, 2**14):
+ test_string = x * "a"
+ output = self._testPipeReader(test_string)
+ self.assertEqual(test_string, output,
+ "x = %s, len(output) = %s" % (x, len(output)))
diff --git a/usr/lib/portage/pym/portage/tests/process/test_poll.py b/usr/lib/portage/pym/portage/tests/process/test_poll.py
new file mode 100644
index 0000000..8c57c23
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/process/test_poll.py
@@ -0,0 +1,86 @@
+# Copyright 1998-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+
+from portage import os
+from portage.tests import TestCase
+from portage.util._pty import _create_pty_or_pipe
+from portage.util._async.PopenProcess import PopenProcess
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.PipeReader import PipeReader
+
+class PipeReaderTestCase(TestCase):
+
+ _use_array = False
+ _use_pty = False
+ _echo_cmd = "echo -n '%s'"
+
+ def _testPipeReader(self, test_string):
+ """
+ Use a poll loop to read data from a pipe and assert that
+ the data written to the pipe is identical to the data
+ read from the pipe.
+ """
+
+ if self._use_pty:
+ got_pty, master_fd, slave_fd = _create_pty_or_pipe()
+ if not got_pty:
+ os.close(slave_fd)
+ os.close(master_fd)
+ skip_reason = "pty not acquired"
+ self.portage_skip = skip_reason
+ self.fail(skip_reason)
+ return
+ else:
+ master_fd, slave_fd = os.pipe()
+
+ # WARNING: It is very important to use unbuffered mode here,
+ # in order to avoid issue 5380 with python3.
+ master_file = os.fdopen(master_fd, 'rb', 0)
+ scheduler = global_event_loop()
+
+ consumer = PipeReader(
+ input_files={"producer" : master_file},
+ _use_array=self._use_array,
+ scheduler=scheduler)
+
+ producer = PopenProcess(
+ pipe_reader=consumer,
+ proc=subprocess.Popen(["bash", "-c", self._echo_cmd % test_string],
+ stdout=slave_fd),
+ scheduler=scheduler)
+
+ producer.start()
+ os.close(slave_fd)
+ producer.wait()
+
+ self.assertEqual(producer.returncode, os.EX_OK)
+ self.assertEqual(consumer.returncode, os.EX_OK)
+
+ return consumer.getvalue().decode('ascii', 'replace')
+
+ def testPipeReader(self):
+ for x in (1, 2, 5, 6, 7, 8, 2**5, 2**10, 2**12, 2**13, 2**14):
+ test_string = x * "a"
+ output = self._testPipeReader(test_string)
+ self.assertEqual(test_string, output,
+ "x = %s, len(output) = %s" % (x, len(output)))
+
+class PipeReaderPtyTestCase(PipeReaderTestCase):
+ _use_pty = True
+
+class PipeReaderArrayTestCase(PipeReaderTestCase):
+
+ _use_array = True
+ # sleep allows reliable triggering of the failure mode on fast computers
+ _echo_cmd = "sleep 0.1 ; echo -n '%s'"
+
+ def __init__(self, *args, **kwargs):
+ super(PipeReaderArrayTestCase, self).__init__(*args, **kwargs)
+ # http://bugs.python.org/issue5380
+ # https://bugs.pypy.org/issue956
+ self.todo = True
+
+class PipeReaderPtyArrayTestCase(PipeReaderArrayTestCase):
+ _use_pty = True
diff --git a/usr/lib/portage/pym/portage/tests/repoman/__init__.py b/usr/lib/portage/pym/portage/tests/repoman/__init__.py
new file mode 100644
index 0000000..532918b
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/repoman/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/tests/repoman/__test__.py b/usr/lib/portage/pym/portage/tests/repoman/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/repoman/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/repoman/test_echangelog.py b/usr/lib/portage/pym/portage/tests/repoman/test_echangelog.py
new file mode 100644
index 0000000..1640be2
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/repoman/test_echangelog.py
@@ -0,0 +1,106 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+import time
+
+from portage import os
+from portage import shutil
+from portage.tests import TestCase
+from repoman.utilities import UpdateChangeLog
+
+class RepomanEchangelogTestCase(TestCase):
+
+ def setUp(self):
+ super(RepomanEchangelogTestCase, self).setUp()
+
+ self.tmpdir = tempfile.mkdtemp(prefix='repoman.echangelog.')
+
+ self.skel_changelog = os.path.join(self.tmpdir, 'skel.ChangeLog')
+ skel = [
+ '# ChangeLog for <CATEGORY>/<PACKAGE_NAME>\n',
+ '# Copyright 1999-2000 Gentoo Foundation; Distributed under the GPL v2\n',
+ '# $Header: $\n'
+ ]
+ self._writelines(self.skel_changelog, skel)
+
+ self.cat = 'mycat'
+ self.pkg = 'mypkg'
+ self.pkgdir = os.path.join(self.tmpdir, self.cat, self.pkg)
+ os.makedirs(self.pkgdir)
+
+ self.header_pkg = '# ChangeLog for %s/%s\n' % (self.cat, self.pkg)
+ self.header_copyright = '# Copyright 1999-%s Gentoo Foundation; Distributed under the GPL v2\n' % \
+ time.strftime('%Y', time.gmtime())
+ self.header_cvs = '# $Header: $\n'
+
+ self.changelog = os.path.join(self.pkgdir, 'ChangeLog')
+
+ self.user = 'Testing User <portage@gentoo.org>'
+
+ def tearDown(self):
+ super(RepomanEchangelogTestCase, self).tearDown()
+ shutil.rmtree(self.tmpdir)
+
+ def _readlines(self, file):
+ with open(file, 'r') as f:
+ return f.readlines()
+
+ def _writelines(self, file, data):
+ with open(file, 'w') as f:
+ f.writelines(data)
+
+ def testRejectRootUser(self):
+ self.assertEqual(UpdateChangeLog(self.pkgdir, 'me <root@gentoo.org>', '', '', '', '', quiet=True), None)
+
+ def testMissingSkelFile(self):
+ # Test missing ChangeLog, but with empty skel (i.e. do nothing).
+ UpdateChangeLog(self.pkgdir, self.user, 'test!', '/does/not/exist', self.cat, self.pkg, quiet=True)
+ actual_cl = self._readlines(self.changelog)
+ self.assertTrue(len(actual_cl[0]) > 0)
+
+ def testEmptyChangeLog(self):
+ # Make sure we do the right thing with a 0-byte ChangeLog
+ open(self.changelog, 'w').close()
+ UpdateChangeLog(self.pkgdir, self.user, 'test!', self.skel_changelog, self.cat, self.pkg, quiet=True)
+ actual_cl = self._readlines(self.changelog)
+ self.assertEqual(actual_cl[0], self.header_pkg)
+ self.assertEqual(actual_cl[1], self.header_copyright)
+ self.assertEqual(actual_cl[2], self.header_cvs)
+
+ def testCopyrightUpdate(self):
+ # Make sure updating the copyright line works
+ UpdateChangeLog(self.pkgdir, self.user, 'test!', self.skel_changelog, self.cat, self.pkg, quiet=True)
+ actual_cl = self._readlines(self.changelog)
+ self.assertEqual(actual_cl[1], self.header_copyright)
+
+ def testSkelHeader(self):
+ # Test skel.ChangeLog -> ChangeLog
+ UpdateChangeLog(self.pkgdir, self.user, 'test!', self.skel_changelog, self.cat, self.pkg, quiet=True)
+ actual_cl = self._readlines(self.changelog)
+ self.assertEqual(actual_cl[0], self.header_pkg)
+ self.assertNotEqual(actual_cl[-1], '\n')
+
+ def testExistingGoodHeader(self):
+ # Test existing ChangeLog (correct values)
+ self._writelines(self.changelog, [self.header_pkg])
+
+ UpdateChangeLog(self.pkgdir, self.user, 'test!', self.skel_changelog, self.cat, self.pkg, quiet=True)
+ actual_cl = self._readlines(self.changelog)
+ self.assertEqual(actual_cl[0], self.header_pkg)
+
+ def testExistingBadHeader(self):
+ # Test existing ChangeLog (wrong values)
+ self._writelines(self.changelog, ['# ChangeLog for \n'])
+
+ UpdateChangeLog(self.pkgdir, self.user, 'test!', self.skel_changelog, self.cat, self.pkg, quiet=True)
+ actual_cl = self._readlines(self.changelog)
+ self.assertEqual(actual_cl[0], self.header_pkg)
+
+ def testTrailingNewlines(self):
+ # Make sure trailing newlines get chomped.
+ self._writelines(self.changelog, ['#\n', 'foo\n', '\n', 'bar\n', '\n', '\n'])
+
+ UpdateChangeLog(self.pkgdir, self.user, 'test!', self.skel_changelog, self.cat, self.pkg, quiet=True)
+ actual_cl = self._readlines(self.changelog)
+ self.assertNotEqual(actual_cl[-1], '\n')
diff --git a/usr/lib/portage/pym/portage/tests/repoman/test_simple.py b/usr/lib/portage/pym/portage/tests/repoman/test_simple.py
new file mode 100644
index 0000000..5dbb767
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/repoman/test_simple.py
@@ -0,0 +1,327 @@
+# Copyright 2011-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+import sys
+import time
+
+import portage
+from portage import os
+from portage import shutil
+from portage import _unicode_decode
+from portage.const import PORTAGE_BASE_PATH, PORTAGE_PYM_PATH
+from portage.process import find_binary
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import ensure_dirs
+from repoman.utilities import _update_copyright_year
+
+class SimpleRepomanTestCase(TestCase):
+
+ def testCopyrightUpdate(self):
+ test_cases = (
+ (
+ '2011',
+ '# Copyright 1999-2008 Gentoo Foundation; Distributed under the GPL v2',
+ '# Copyright 1999-2011 Gentoo Foundation; Distributed under the GPL v2',
+ ),
+ (
+ '2011',
+ '# Copyright 1999 Gentoo Foundation; Distributed under the GPL v2',
+ '# Copyright 1999-2011 Gentoo Foundation; Distributed under the GPL v2',
+ ),
+ (
+ '1999',
+ '# Copyright 1999 Gentoo Foundation; Distributed under the GPL v2',
+ '# Copyright 1999 Gentoo Foundation; Distributed under the GPL v2',
+ ),
+ )
+
+ for year, before, after in test_cases:
+ self.assertEqual(_update_copyright_year(year, before), after)
+
+ def _must_skip(self):
+ xmllint = find_binary("xmllint")
+ if not xmllint:
+ return "xmllint not found"
+
+ try:
+ __import__("xml.etree.ElementTree")
+ __import__("xml.parsers.expat").parsers.expat.ExpatError
+ except (AttributeError, ImportError):
+ return "python is missing xml support"
+
+ def testSimple(self):
+ debug = False
+
+ skip_reason = self._must_skip()
+ if skip_reason:
+ self.portage_skip = skip_reason
+ self.assertFalse(True, skip_reason)
+ return
+
+ copyright_header = """# Copyright 1999-%s Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: $
+""" % time.gmtime().tm_year
+
+ repo_configs = {
+ "test_repo": {
+ "layout.conf":
+ (
+ "update-changelog = true",
+ ),
+ }
+ }
+
+ profiles = (
+ ("x86", "default/linux/x86/test_profile", "stable"),
+ ("x86", "default/linux/x86/test_dev", "dev"),
+ ("x86", "default/linux/x86/test_exp", "exp"),
+ )
+
+ profile = {
+ "eapi": ("5",),
+ "package.use.stable.mask": ("dev-libs/A flag",)
+ }
+
+ ebuilds = {
+ "dev-libs/A-0": {
+ "COPYRIGHT_HEADER" : copyright_header,
+ "DESCRIPTION" : "Desc goes here",
+ "EAPI" : "5",
+ "HOMEPAGE" : "http://example.com",
+ "IUSE" : "flag",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ "RDEPEND": "flag? ( dev-libs/B[flag] )",
+ },
+ "dev-libs/A-1": {
+ "COPYRIGHT_HEADER" : copyright_header,
+ "DESCRIPTION" : "Desc goes here",
+ "EAPI" : "4",
+ "HOMEPAGE" : "http://example.com",
+ "IUSE" : "flag",
+ "KEYWORDS": "~x86",
+ "LICENSE": "GPL-2",
+ "RDEPEND": "flag? ( dev-libs/B[flag] )",
+ },
+ "dev-libs/B-1": {
+ "COPYRIGHT_HEADER" : copyright_header,
+ "DESCRIPTION" : "Desc goes here",
+ "EAPI" : "4",
+ "HOMEPAGE" : "http://example.com",
+ "IUSE" : "flag",
+ "KEYWORDS": "~x86",
+ "LICENSE": "GPL-2",
+ },
+ "dev-libs/C-0": {
+ "COPYRIGHT_HEADER" : copyright_header,
+ "DESCRIPTION" : "Desc goes here",
+ "EAPI" : "4",
+ "HOMEPAGE" : "http://example.com",
+ "IUSE" : "flag",
+ # must be unstable, since dev-libs/A[flag] is stable masked
+ "KEYWORDS": "~x86",
+ "LICENSE": "GPL-2",
+ "RDEPEND": "flag? ( dev-libs/A[flag] )",
+ },
+ }
+ licenses = ["GPL-2"]
+ arch_list = ["x86"]
+ metadata_dtd = os.path.join(PORTAGE_BASE_PATH, "cnf/metadata.dtd")
+ metadata_xml_files = (
+ (
+ "dev-libs/A",
+ {
+ "herd" : "base-system",
+ "flags" : "<flag name='flag' restrict='&gt;=dev-libs/A-0'>Description of how USE='flag' affects this package</flag>",
+ },
+ ),
+ (
+ "dev-libs/B",
+ {
+ "herd" : "no-herd",
+ "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
+ },
+ ),
+ (
+ "dev-libs/C",
+ {
+ "herd" : "no-herd",
+ "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
+ },
+ ),
+ )
+
+ use_desc = (
+ ("flag", "Description of how USE='flag' affects packages"),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ profile=profile, repo_configs=repo_configs, debug=debug)
+ settings = playground.settings
+ eprefix = settings["EPREFIX"]
+ eroot = settings["EROOT"]
+ portdb = playground.trees[playground.eroot]["porttree"].dbapi
+ homedir = os.path.join(eroot, "home")
+ distdir = os.path.join(eprefix, "distdir")
+ test_repo_location = settings.repositories["test_repo"].location
+ profiles_dir = os.path.join(test_repo_location, "profiles")
+ license_dir = os.path.join(test_repo_location, "licenses")
+
+ repoman_cmd = (portage._python_interpreter, "-b", "-Wd",
+ os.path.join(self.bindir, "repoman"))
+
+ git_binary = find_binary("git")
+ git_cmd = (git_binary,)
+
+ cp_binary = find_binary("cp")
+ self.assertEqual(cp_binary is None, False,
+ "cp command not found")
+ cp_cmd = (cp_binary,)
+
+ test_ebuild = portdb.findname("dev-libs/A-1")
+ self.assertFalse(test_ebuild is None)
+
+ committer_name = "Gentoo Dev"
+ committer_email = "gentoo-dev@gentoo.org"
+
+ git_test = (
+ ("", repoman_cmd + ("manifest",)),
+ ("", git_cmd + ("config", "--global", "user.name", committer_name,)),
+ ("", git_cmd + ("config", "--global", "user.email", committer_email,)),
+ ("", git_cmd + ("init-db",)),
+ ("", git_cmd + ("add", ".")),
+ ("", git_cmd + ("commit", "-a", "-m", "add whole repo")),
+ ("", repoman_cmd + ("full", "-d")),
+ ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "2.ebuild")),
+ ("", git_cmd + ("add", test_ebuild[:-8] + "2.ebuild")),
+ ("", repoman_cmd + ("commit", "-m", "bump to version 2")),
+ ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "3.ebuild")),
+ ("", git_cmd + ("add", test_ebuild[:-8] + "3.ebuild")),
+ ("dev-libs", repoman_cmd + ("commit", "-m", "bump to version 3")),
+ ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "4.ebuild")),
+ ("", git_cmd + ("add", test_ebuild[:-8] + "4.ebuild")),
+ ("dev-libs/A", repoman_cmd + ("commit", "-m", "bump to version 4")),
+ )
+
+ pythonpath = os.environ.get("PYTHONPATH")
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is not None and \
+ pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
+ pass
+ else:
+ if pythonpath is None:
+ pythonpath = ""
+ else:
+ pythonpath = ":" + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+
+ env = {
+ "PORTAGE_OVERRIDE_EPREFIX" : eprefix,
+ "DISTDIR" : distdir,
+ "GENTOO_COMMITTER_NAME" : committer_name,
+ "GENTOO_COMMITTER_EMAIL" : committer_email,
+ "HOME" : homedir,
+ "PATH" : os.environ["PATH"],
+ "PORTAGE_GRPNAME" : os.environ["PORTAGE_GRPNAME"],
+ "PORTAGE_USERNAME" : os.environ["PORTAGE_USERNAME"],
+ "PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
+ "PYTHONPATH" : pythonpath,
+ }
+
+ if os.environ.get("SANDBOX_ON") == "1":
+ # avoid problems from nested sandbox instances
+ env["FEATURES"] = "-sandbox -usersandbox"
+
+ dirs = [homedir, license_dir, profiles_dir, distdir]
+ try:
+ for d in dirs:
+ ensure_dirs(d)
+ with open(os.path.join(test_repo_location, "skel.ChangeLog"), 'w') as f:
+ f.write(copyright_header)
+ with open(os.path.join(profiles_dir, "profiles.desc"), 'w') as f:
+ for x in profiles:
+ f.write("%s %s %s\n" % x)
+
+ # ResolverPlayground only created the first profile,
+ # so create the remaining ones.
+ for x in profiles[1:]:
+ sub_profile_dir = os.path.join(profiles_dir, x[1])
+ ensure_dirs(sub_profile_dir)
+ for config_file, lines in profile.items():
+ file_name = os.path.join(sub_profile_dir, config_file)
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
+
+ for x in licenses:
+ open(os.path.join(license_dir, x), 'wb').close()
+ with open(os.path.join(profiles_dir, "arch.list"), 'w') as f:
+ for x in arch_list:
+ f.write("%s\n" % x)
+ with open(os.path.join(profiles_dir, "use.desc"), 'w') as f:
+ for k, v in use_desc:
+ f.write("%s - %s\n" % (k, v))
+ for cp, xml_data in metadata_xml_files:
+ with open(os.path.join(test_repo_location, cp, "metadata.xml"), 'w') as f:
+ f.write(playground.metadata_xml_template % xml_data)
+ # Use a symlink to test_repo, in order to trigger bugs
+ # involving canonical vs. non-canonical paths.
+ test_repo_symlink = os.path.join(eroot, "test_repo_symlink")
+ os.symlink(test_repo_location, test_repo_symlink)
+ # repoman checks metadata.dtd for recent CTIME, so copy the file in
+ # order to ensure that the CTIME is current
+ # NOTE: if we don't have the file around, let repoman try to fetch it.
+ if os.path.exists(metadata_dtd):
+ shutil.copyfile(metadata_dtd, os.path.join(distdir, "metadata.dtd"))
+
+ if debug:
+ # The subprocess inherits both stdout and stderr, for
+ # debugging purposes.
+ stdout = None
+ else:
+ # The subprocess inherits stderr so that any warnings
+ # triggered by python -Wd will be visible.
+ stdout = subprocess.PIPE
+
+ for cwd in ("", "dev-libs", "dev-libs/A", "dev-libs/B"):
+ abs_cwd = os.path.join(test_repo_symlink, cwd)
+ proc = subprocess.Popen(repoman_cmd + ("full",),
+ cwd=abs_cwd, env=env, stdout=stdout)
+
+ if debug:
+ proc.wait()
+ else:
+ output = proc.stdout.readlines()
+ proc.wait()
+ proc.stdout.close()
+ if proc.returncode != os.EX_OK:
+ for line in output:
+ sys.stderr.write(_unicode_decode(line))
+
+ self.assertEqual(os.EX_OK, proc.returncode,
+ "repoman failed in %s" % (cwd,))
+
+ if git_binary is not None:
+ for cwd, cmd in git_test:
+ abs_cwd = os.path.join(test_repo_symlink, cwd)
+ proc = subprocess.Popen(cmd,
+ cwd=abs_cwd, env=env, stdout=stdout)
+
+ if debug:
+ proc.wait()
+ else:
+ output = proc.stdout.readlines()
+ proc.wait()
+ proc.stdout.close()
+ if proc.returncode != os.EX_OK:
+ for line in output:
+ sys.stderr.write(_unicode_decode(line))
+
+ self.assertEqual(os.EX_OK, proc.returncode,
+ "%s failed in %s" % (cmd, cwd,))
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/ResolverPlayground.py b/usr/lib/portage/pym/portage/tests/resolver/ResolverPlayground.py
new file mode 100644
index 0000000..2d16251
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/ResolverPlayground.py
@@ -0,0 +1,813 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from itertools import permutations
+import fnmatch
+import sys
+import tempfile
+import portage
+from portage import os
+from portage import shutil
+from portage.const import (GLOBAL_CONFIG_PATH, PORTAGE_BASE_PATH,
+ USER_CONFIG_PATH)
+from portage.dep import Atom, _repo_separator
+from portage.package.ebuild.config import config
+from portage.package.ebuild.digestgen import digestgen
+from portage._sets import load_default_config
+from portage._sets.base import InternalPackageSet
+from portage.tests import cnf_path
+from portage.util import ensure_dirs, normalize_path
+from portage.versions import catsplit
+
+import _emerge
+from _emerge.actions import calc_depclean
+from _emerge.Blocker import Blocker
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.depgraph import backtrack_depgraph
+from _emerge.RootConfig import RootConfig
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+class ResolverPlayground(object):
+ """
+ This class helps to create the necessary files on disk and
+ the needed settings instances, etc. for the resolver to do
+ its work.
+ """
+
+ config_files = frozenset(("eapi", "layout.conf", "make.conf", "package.accept_keywords",
+ "package.keywords", "package.license", "package.mask", "package.properties",
+ "package.unmask", "package.use", "package.use.aliases", "package.use.stable.mask",
+ "unpack_dependencies", "use.aliases", "use.force", "use.mask", "layout.conf"))
+
+ metadata_xml_template = """<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
+<pkgmetadata>
+<herd>%(herd)s</herd>
+<maintainer>
+<email>maintainer-needed@gentoo.org</email>
+<description>Description of the maintainership</description>
+</maintainer>
+<longdescription>Long description of the package</longdescription>
+<use>
+%(flags)s
+</use>
+</pkgmetadata>
+"""
+
+ def __init__(self, ebuilds={}, binpkgs={}, installed={}, profile={}, repo_configs={}, \
+ user_config={}, sets={}, world=[], world_sets=[], distfiles={},
+ eprefix=None, targetroot=False, debug=False):
+ """
+ ebuilds: cpv -> metadata mapping simulating available ebuilds.
+ installed: cpv -> metadata mapping simulating installed packages.
+ If a metadata key is missing, it gets a default value.
+ profile: settings defined by the profile.
+ """
+
+ self.debug = debug
+ if eprefix is None:
+ self.eprefix = normalize_path(tempfile.mkdtemp())
+ else:
+ self.eprefix = normalize_path(eprefix)
+ portage.const.EPREFIX = self.eprefix.rstrip(os.sep)
+
+ self.eroot = self.eprefix + os.sep
+ if targetroot:
+ self.target_root = os.path.join(self.eroot, 'target_root')
+ else:
+ self.target_root = os.sep
+ self.distdir = os.path.join(self.eroot, "var", "portage", "distfiles")
+ self.pkgdir = os.path.join(self.eprefix, "pkgdir")
+ self.vdbdir = os.path.join(self.eroot, "var/db/pkg")
+ os.makedirs(self.vdbdir)
+
+ if not debug:
+ portage.util.noiselimit = -2
+
+ self._repositories = {}
+ #Make sure the main repo is always created
+ self._get_repo_dir("test_repo")
+
+ self._create_distfiles(distfiles)
+ self._create_ebuilds(ebuilds)
+ self._create_binpkgs(binpkgs)
+ self._create_installed(installed)
+ self._create_profile(ebuilds, installed, profile, repo_configs, user_config, sets)
+ self._create_world(world, world_sets)
+
+ self.settings, self.trees = self._load_config()
+
+ self._create_ebuild_manifests(ebuilds)
+
+ portage.util.noiselimit = 0
+
+ def _get_repo_dir(self, repo):
+ """
+ Create the repo directory if needed.
+ """
+ if repo not in self._repositories:
+ if repo == "test_repo":
+ self._repositories["DEFAULT"] = {"main-repo": repo}
+
+ repo_path = os.path.join(self.eroot, "var", "repositories", repo)
+ self._repositories[repo] = {"location": repo_path}
+ profile_path = os.path.join(repo_path, "profiles")
+
+ try:
+ os.makedirs(profile_path)
+ except os.error:
+ pass
+
+ repo_name_file = os.path.join(profile_path, "repo_name")
+ with open(repo_name_file, "w") as f:
+ f.write("%s\n" % repo)
+
+ return self._repositories[repo]["location"]
+
+ def _create_distfiles(self, distfiles):
+ os.makedirs(self.distdir)
+ for k, v in distfiles.items():
+ with open(os.path.join(self.distdir, k), 'wb') as f:
+ f.write(v)
+
+ def _create_ebuilds(self, ebuilds):
+ for cpv in ebuilds:
+ a = Atom("=" + cpv, allow_repo=True)
+ repo = a.repo
+ if repo is None:
+ repo = "test_repo"
+
+ metadata = ebuilds[cpv].copy()
+ copyright_header = metadata.pop("COPYRIGHT_HEADER", None)
+ eapi = metadata.pop("EAPI", "0")
+ misc_content = metadata.pop("MISC_CONTENT", None)
+ metadata.setdefault("DEPEND", "")
+ metadata.setdefault("SLOT", "0")
+ metadata.setdefault("KEYWORDS", "x86")
+ metadata.setdefault("IUSE", "")
+
+ unknown_keys = set(metadata).difference(
+ portage.dbapi.dbapi._known_keys)
+ if unknown_keys:
+ raise ValueError("metadata of ebuild '%s' contains unknown keys: %s" %
+ (cpv, sorted(unknown_keys)))
+
+ repo_dir = self._get_repo_dir(repo)
+ ebuild_dir = os.path.join(repo_dir, a.cp)
+ ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
+ try:
+ os.makedirs(ebuild_dir)
+ except os.error:
+ pass
+
+ with open(ebuild_path, "w") as f:
+ if copyright_header is not None:
+ f.write(copyright_header)
+ f.write('EAPI="%s"\n' % eapi)
+ for k, v in metadata.items():
+ f.write('%s="%s"\n' % (k, v))
+ if misc_content is not None:
+ f.write(misc_content)
+
+ def _create_ebuild_manifests(self, ebuilds):
+ tmpsettings = config(clone=self.settings)
+ tmpsettings['PORTAGE_QUIET'] = '1'
+ for cpv in ebuilds:
+ a = Atom("=" + cpv, allow_repo=True)
+ repo = a.repo
+ if repo is None:
+ repo = "test_repo"
+
+ repo_dir = self._get_repo_dir(repo)
+ ebuild_dir = os.path.join(repo_dir, a.cp)
+ ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
+
+ portdb = self.trees[self.eroot]["porttree"].dbapi
+ tmpsettings['O'] = ebuild_dir
+ if not digestgen(mysettings=tmpsettings, myportdb=portdb):
+ raise AssertionError('digest creation failed for %s' % ebuild_path)
+
+ def _create_binpkgs(self, binpkgs):
+ for cpv, metadata in binpkgs.items():
+ a = Atom("=" + cpv, allow_repo=True)
+ repo = a.repo
+ if repo is None:
+ repo = "test_repo"
+
+ cat, pf = catsplit(a.cpv)
+ metadata = metadata.copy()
+ metadata.setdefault("SLOT", "0")
+ metadata.setdefault("KEYWORDS", "x86")
+ metadata.setdefault("BUILD_TIME", "0")
+ metadata["repository"] = repo
+ metadata["CATEGORY"] = cat
+ metadata["PF"] = pf
+
+ repo_dir = self.pkgdir
+ category_dir = os.path.join(repo_dir, cat)
+ binpkg_path = os.path.join(category_dir, pf + ".tbz2")
+ ensure_dirs(category_dir)
+ t = portage.xpak.tbz2(binpkg_path)
+ t.recompose_mem(portage.xpak.xpak_mem(metadata))
+
+ def _create_installed(self, installed):
+ for cpv in installed:
+ a = Atom("=" + cpv, allow_repo=True)
+ repo = a.repo
+ if repo is None:
+ repo = "test_repo"
+
+ vdb_pkg_dir = os.path.join(self.vdbdir, a.cpv)
+ try:
+ os.makedirs(vdb_pkg_dir)
+ except os.error:
+ pass
+
+ metadata = installed[cpv].copy()
+ metadata.setdefault("SLOT", "0")
+ metadata.setdefault("BUILD_TIME", "0")
+ metadata.setdefault("COUNTER", "0")
+ metadata.setdefault("KEYWORDS", "~x86")
+
+ unknown_keys = set(metadata).difference(
+ portage.dbapi.dbapi._known_keys)
+ unknown_keys.discard("BUILD_TIME")
+ unknown_keys.discard("COUNTER")
+ unknown_keys.discard("repository")
+ unknown_keys.discard("USE")
+ if unknown_keys:
+ raise ValueError("metadata of installed '%s' contains unknown keys: %s" %
+ (cpv, sorted(unknown_keys)))
+
+ metadata["repository"] = repo
+ for k, v in metadata.items():
+ with open(os.path.join(vdb_pkg_dir, k), "w") as f:
+ f.write("%s\n" % v)
+
+ def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets):
+
+ user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH)
+
+ try:
+ os.makedirs(user_config_dir)
+ except os.error:
+ pass
+
+ for repo in self._repositories:
+ if repo == "DEFAULT":
+ continue
+
+ repo_dir = self._get_repo_dir(repo)
+ profile_dir = os.path.join(repo_dir, "profiles")
+ metadata_dir = os.path.join(repo_dir, "metadata")
+ os.makedirs(metadata_dir)
+
+ #Create $REPO/profiles/categories
+ categories = set()
+ for cpv in ebuilds:
+ ebuilds_repo = Atom("="+cpv, allow_repo=True).repo
+ if ebuilds_repo is None:
+ ebuilds_repo = "test_repo"
+ if ebuilds_repo == repo:
+ categories.add(catsplit(cpv)[0])
+
+ categories_file = os.path.join(profile_dir, "categories")
+ with open(categories_file, "w") as f:
+ for cat in categories:
+ f.write(cat + "\n")
+
+ #Create $REPO/profiles/license_groups
+ license_file = os.path.join(profile_dir, "license_groups")
+ with open(license_file, "w") as f:
+ f.write("EULA TEST\n")
+
+ repo_config = repo_configs.get(repo)
+ if repo_config:
+ for config_file, lines in repo_config.items():
+ if config_file not in self.config_files and not any(fnmatch.fnmatch(config_file, os.path.join(x, "*")) for x in self.config_files):
+ raise ValueError("Unknown config file: '%s'" % config_file)
+
+ if config_file in ("layout.conf",):
+ file_name = os.path.join(repo_dir, "metadata", config_file)
+ else:
+ file_name = os.path.join(profile_dir, config_file)
+ if "/" in config_file and not os.path.isdir(os.path.dirname(file_name)):
+ os.makedirs(os.path.dirname(file_name))
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
+ # Temporarily write empty value of masters until it becomes default.
+ # TODO: Delete all references to "# use implicit masters" when empty value becomes default.
+ if config_file == "layout.conf" and not any(line.startswith(("masters =", "# use implicit masters")) for line in lines):
+ f.write("masters =\n")
+
+ #Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
+ os.makedirs(os.path.join(repo_dir, "eclass"))
+
+ # Temporarily write empty value of masters until it becomes default.
+ if not repo_config or "layout.conf" not in repo_config:
+ layout_conf_path = os.path.join(repo_dir, "metadata", "layout.conf")
+ with open(layout_conf_path, "w") as f:
+ f.write("masters =\n")
+
+ if repo == "test_repo":
+ #Create a minimal profile in /usr/portage
+ sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile")
+ os.makedirs(sub_profile_dir)
+
+ if not (profile and "eapi" in profile):
+ eapi_file = os.path.join(sub_profile_dir, "eapi")
+ with open(eapi_file, "w") as f:
+ f.write("0\n")
+
+ make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
+ with open(make_defaults_file, "w") as f:
+ f.write("ARCH=\"x86\"\n")
+ f.write("ACCEPT_KEYWORDS=\"x86\"\n")
+
+ use_force_file = os.path.join(sub_profile_dir, "use.force")
+ with open(use_force_file, "w") as f:
+ f.write("x86\n")
+
+ parent_file = os.path.join(sub_profile_dir, "parent")
+ with open(parent_file, "w") as f:
+ f.write("..\n")
+
+ if profile:
+ for config_file, lines in profile.items():
+ if config_file not in self.config_files:
+ raise ValueError("Unknown config file: '%s'" % config_file)
+
+ file_name = os.path.join(sub_profile_dir, config_file)
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
+
+ #Create profile symlink
+ os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile"))
+
+ #Create minimal herds.xml
+ herds_xml = """<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE herds SYSTEM "http://www.gentoo.org/dtd/herds.dtd">
+<?xml-stylesheet href="/xsl/herds.xsl" type="text/xsl" ?>
+<?xml-stylesheet href="/xsl/guide.xsl" type="text/xsl" ?>
+<herds>
+<herd>
+ <name>base-system</name>
+ <email>base-system@gentoo.org</email>
+ <description>Core system utilities and libraries.</description>
+ <maintainer>
+ <email>base-system@gentoo.orgg</email>
+ <name>Base System</name>
+ <role>Base System Maintainer</role>
+ </maintainer>
+</herd>
+</herds>
+"""
+ with open(os.path.join(metadata_dir, "metadata.xml"), 'w') as f:
+ f.write(herds_xml)
+
+ make_conf = {
+ "ACCEPT_KEYWORDS": "x86",
+ "CLEAN_DELAY": "0",
+ "DISTDIR" : self.distdir,
+ "EMERGE_WARNING_DELAY": "0",
+ "PKGDIR": self.pkgdir,
+ "PORTAGE_INST_GID": str(portage.data.portage_gid),
+ "PORTAGE_INST_UID": str(portage.data.portage_uid),
+ "PORTAGE_TMPDIR": os.path.join(self.eroot, 'var/tmp'),
+ }
+
+ if os.environ.get("NOCOLOR"):
+ make_conf["NOCOLOR"] = os.environ["NOCOLOR"]
+
+ # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
+ # need to be inherited by ebuild subprocesses.
+ if 'PORTAGE_USERNAME' in os.environ:
+ make_conf['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
+ if 'PORTAGE_GRPNAME' in os.environ:
+ make_conf['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']
+
+ make_conf_lines = []
+ for k_v in make_conf.items():
+ make_conf_lines.append('%s="%s"' % k_v)
+
+ if "make.conf" in user_config:
+ make_conf_lines.extend(user_config["make.conf"])
+
+ if not portage.process.sandbox_capable or \
+ os.environ.get("SANDBOX_ON") == "1":
+ # avoid problems from nested sandbox instances
+ make_conf_lines.append('FEATURES="${FEATURES} -sandbox -usersandbox"')
+
+ configs = user_config.copy()
+ configs["make.conf"] = make_conf_lines
+
+ for config_file, lines in configs.items():
+ if config_file not in self.config_files:
+ raise ValueError("Unknown config file: '%s'" % config_file)
+
+ file_name = os.path.join(user_config_dir, config_file)
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
+
+ #Create /usr/share/portage/config/make.globals
+ make_globals_path = os.path.join(self.eroot,
+ GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals")
+ ensure_dirs(os.path.dirname(make_globals_path))
+ os.symlink(os.path.join(cnf_path, "make.globals"),
+ make_globals_path)
+
+ #Create /usr/share/portage/config/sets/portage.conf
+ default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets")
+
+ try:
+ os.makedirs(default_sets_conf_dir)
+ except os.error:
+ pass
+
+ provided_sets_portage_conf = (
+ os.path.join(cnf_path, "sets", "portage.conf"))
+ os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf"))
+
+ set_config_dir = os.path.join(user_config_dir, "sets")
+
+ try:
+ os.makedirs(set_config_dir)
+ except os.error:
+ pass
+
+ for sets_file, lines in sets.items():
+ file_name = os.path.join(set_config_dir, sets_file)
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
+
+ def _create_world(self, world, world_sets):
+ #Create /var/lib/portage/world
+ var_lib_portage = os.path.join(self.eroot, "var", "lib", "portage")
+ os.makedirs(var_lib_portage)
+
+ world_file = os.path.join(var_lib_portage, "world")
+ world_set_file = os.path.join(var_lib_portage, "world_sets")
+
+ with open(world_file, "w") as f:
+ for atom in world:
+ f.write("%s\n" % atom)
+
+ with open(world_set_file, "w") as f:
+ for atom in world_sets:
+ f.write("%s\n" % atom)
+
+ def _load_config(self):
+
+ create_trees_kwargs = {}
+ if self.target_root != os.sep:
+ create_trees_kwargs["target_root"] = self.target_root
+
+ env = {
+ "PORTAGE_REPOSITORIES": "\n".join("[%s]\n%s" % (repo_name, "\n".join("%s = %s" % (k, v) for k, v in repo_config.items())) for repo_name, repo_config in self._repositories.items())
+ }
+
+ trees = portage.create_trees(env=env, eprefix=self.eprefix,
+ **create_trees_kwargs)
+
+ for root, root_trees in trees.items():
+ settings = root_trees["vartree"].settings
+ settings._init_dirs()
+ setconfig = load_default_config(settings, root_trees)
+ root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
+
+ return trees[trees._target_eroot]["vartree"].settings, trees
+
+ def run(self, atoms, options={}, action=None):
+ options = options.copy()
+ options["--pretend"] = True
+ if self.debug:
+ options["--debug"] = True
+
+ if action is None:
+ if options.get("--depclean"):
+ action = "depclean"
+ elif options.get("--prune"):
+ action = "prune"
+
+ if "--usepkgonly" in options:
+ options["--usepkg"] = True
+
+ global_noiselimit = portage.util.noiselimit
+ global_emergelog_disable = _emerge.emergelog._disable
+ try:
+
+ if not self.debug:
+ portage.util.noiselimit = -2
+ _emerge.emergelog._disable = True
+
+ if action in ("depclean", "prune"):
+ rval, cleanlist, ordered, req_pkg_count = \
+ calc_depclean(self.settings, self.trees, None,
+ options, action, InternalPackageSet(initial_atoms=atoms, allow_wildcard=True), None)
+ result = ResolverPlaygroundDepcleanResult(
+ atoms, rval, cleanlist, ordered, req_pkg_count)
+ else:
+ params = create_depgraph_params(options, action)
+ success, depgraph, favorites = backtrack_depgraph(
+ self.settings, self.trees, options, params, action, atoms, None)
+ depgraph._show_merge_list()
+ depgraph.display_problems()
+ result = ResolverPlaygroundResult(atoms, success, depgraph, favorites)
+ finally:
+ portage.util.noiselimit = global_noiselimit
+ _emerge.emergelog._disable = global_emergelog_disable
+
+ return result
+
+ def run_TestCase(self, test_case):
+ if not isinstance(test_case, ResolverPlaygroundTestCase):
+ raise TypeError("ResolverPlayground needs a ResolverPlaygroundTestCase")
+ for atoms in test_case.requests:
+ result = self.run(atoms, test_case.options, test_case.action)
+ if not test_case.compare_with_result(result):
+ return
+
+ def cleanup(self):
+ for eroot in self.trees:
+ portdb = self.trees[eroot]["porttree"].dbapi
+ portdb.close_caches()
+ if self.debug:
+ print("\nEROOT=%s" % self.eroot)
+ else:
+ shutil.rmtree(self.eroot)
+
+class ResolverPlaygroundTestCase(object):
+
+ def __init__(self, request, **kwargs):
+ self.all_permutations = kwargs.pop("all_permutations", False)
+ self.ignore_mergelist_order = kwargs.pop("ignore_mergelist_order", False)
+ self.ambiguous_merge_order = kwargs.pop("ambiguous_merge_order", False)
+ self.ambiguous_slot_collision_solutions = kwargs.pop("ambiguous_slot_collision_solutions", False)
+ self.check_repo_names = kwargs.pop("check_repo_names", False)
+ self.merge_order_assertions = kwargs.pop("merge_order_assertions", False)
+
+ if self.all_permutations:
+ self.requests = list(permutations(request))
+ else:
+ self.requests = [request]
+
+ self.options = kwargs.pop("options", {})
+ self.action = kwargs.pop("action", None)
+ self.test_success = True
+ self.fail_msg = None
+ self._checks = kwargs.copy()
+
+ def compare_with_result(self, result):
+ checks = dict.fromkeys(result.checks)
+ for key, value in self._checks.items():
+ if not key in checks:
+ raise KeyError("Not an available check: '%s'" % key)
+ checks[key] = value
+
+ fail_msgs = []
+ for key, value in checks.items():
+ got = getattr(result, key)
+ expected = value
+
+ if key in result.optional_checks and expected is None:
+ continue
+
+ if key == "mergelist":
+ if not self.check_repo_names:
+ #Strip repo names if we don't check them
+ if got:
+ new_got = []
+ for cpv in got:
+ if cpv[:1] == "!":
+ new_got.append(cpv)
+ continue
+ new_got.append(cpv.split(_repo_separator)[0])
+ got = new_got
+ if expected:
+ new_expected = []
+ for obj in expected:
+ if isinstance(obj, basestring):
+ if obj[:1] == "!":
+ new_expected.append(obj)
+ continue
+ new_expected.append(
+ obj.split(_repo_separator)[0])
+ continue
+ new_expected.append(set())
+ for cpv in obj:
+ if cpv[:1] != "!":
+ cpv = cpv.split(_repo_separator)[0]
+ new_expected[-1].add(cpv)
+ expected = new_expected
+ if self.ignore_mergelist_order and got is not None:
+ got = set(got)
+ expected = set(expected)
+
+ if self.ambiguous_merge_order and got:
+ expected_stack = list(reversed(expected))
+ got_stack = list(reversed(got))
+ new_expected = []
+ match = True
+ while got_stack and expected_stack:
+ got_token = got_stack.pop()
+ expected_obj = expected_stack.pop()
+ if isinstance(expected_obj, basestring):
+ new_expected.append(expected_obj)
+ if got_token == expected_obj:
+ continue
+ # result doesn't match, so stop early
+ match = False
+ break
+ expected_obj = set(expected_obj)
+ try:
+ expected_obj.remove(got_token)
+ except KeyError:
+ # result doesn't match, so stop early
+ match = False
+ break
+ new_expected.append(got_token)
+ while got_stack and expected_obj:
+ got_token = got_stack.pop()
+ try:
+ expected_obj.remove(got_token)
+ except KeyError:
+ match = False
+ break
+ new_expected.append(got_token)
+ if not match:
+ # result doesn't match, so stop early
+ break
+ if expected_obj:
+ # result does not match, so stop early
+ match = False
+ new_expected.append(tuple(expected_obj))
+ break
+ if expected_stack:
+ # result does not match, add leftovers to new_expected
+ match = False
+ expected_stack.reverse()
+ new_expected.extend(expected_stack)
+ expected = new_expected
+
+ if match and self.merge_order_assertions:
+ for node1, node2 in self.merge_order_assertions:
+ if not (got.index(node1) < got.index(node2)):
+ fail_msgs.append("atoms: (" + \
+ ", ".join(result.atoms) + "), key: " + \
+ ("merge_order_assertions, expected: %s" % \
+ str((node1, node2))) + \
+ ", got: " + str(got))
+
+ elif key == "slot_collision_solutions" and \
+ self.ambiguous_slot_collision_solutions:
+ # Tests that use all_permutations can have multiple
+ # outcomes here.
+ for x in expected:
+ if x == got:
+ expected = x
+ break
+ elif key in ("unstable_keywords", "needed_p_mask_changes",
+ "unsatisfied_deps", "required_use_unsatisfied") and \
+ expected is not None:
+ expected = set(expected)
+
+ elif key == "forced_rebuilds" and expected is not None:
+ expected = dict((k, set(v)) for k, v in expected.items())
+
+ if got != expected:
+ fail_msgs.append("atoms: (" + ", ".join(result.atoms) + "), key: " + \
+ key + ", expected: " + str(expected) + ", got: " + str(got))
+ if fail_msgs:
+ self.test_success = False
+ self.fail_msg = "\n".join(fail_msgs)
+ return False
+ return True
+
+class ResolverPlaygroundResult(object):
+
+ checks = (
+ "success", "mergelist", "use_changes", "license_changes",
+ "unstable_keywords", "slot_collision_solutions",
+ "circular_dependency_solutions", "needed_p_mask_changes",
+ "unsatisfied_deps", "forced_rebuilds", "required_use_unsatisfied"
+ )
+ optional_checks = (
+ "forced_rebuilds",
+ "required_use_unsatisfied",
+ "unsatisfied_deps"
+ )
+
+ def __init__(self, atoms, success, mydepgraph, favorites):
+ self.atoms = atoms
+ self.success = success
+ self.depgraph = mydepgraph
+ self.favorites = favorites
+ self.mergelist = None
+ self.use_changes = None
+ self.license_changes = None
+ self.unstable_keywords = None
+ self.needed_p_mask_changes = None
+ self.slot_collision_solutions = None
+ self.circular_dependency_solutions = None
+ self.unsatisfied_deps = frozenset()
+ self.forced_rebuilds = None
+ self.required_use_unsatisfied = None
+
+ if self.depgraph._dynamic_config._serialized_tasks_cache is not None:
+ self.mergelist = []
+ host_root = self.depgraph._frozen_config._running_root.root
+ for x in self.depgraph._dynamic_config._serialized_tasks_cache:
+ if isinstance(x, Blocker):
+ self.mergelist.append(x.atom)
+ else:
+ repo_str = ""
+ if x.repo != "test_repo":
+ repo_str = _repo_separator + x.repo
+ mergelist_str = x.cpv + repo_str
+ if x.built:
+ if x.operation == "merge":
+ desc = x.type_name
+ else:
+ desc = x.operation
+ mergelist_str = "[%s]%s" % (desc, mergelist_str)
+ if x.root != host_root:
+ mergelist_str += "{targetroot}"
+ self.mergelist.append(mergelist_str)
+
+ if self.depgraph._dynamic_config._needed_use_config_changes:
+ self.use_changes = {}
+ for pkg, needed_use_config_changes in \
+ self.depgraph._dynamic_config._needed_use_config_changes.items():
+ new_use, changes = needed_use_config_changes
+ self.use_changes[pkg.cpv] = changes
+
+ if self.depgraph._dynamic_config._needed_unstable_keywords:
+ self.unstable_keywords = set()
+ for pkg in self.depgraph._dynamic_config._needed_unstable_keywords:
+ self.unstable_keywords.add(pkg.cpv)
+
+ if self.depgraph._dynamic_config._needed_p_mask_changes:
+ self.needed_p_mask_changes = set()
+ for pkg in self.depgraph._dynamic_config._needed_p_mask_changes:
+ self.needed_p_mask_changes.add(pkg.cpv)
+
+ if self.depgraph._dynamic_config._needed_license_changes:
+ self.license_changes = {}
+ for pkg, missing_licenses in self.depgraph._dynamic_config._needed_license_changes.items():
+ self.license_changes[pkg.cpv] = missing_licenses
+
+ if self.depgraph._dynamic_config._slot_conflict_handler is not None:
+ self.slot_collision_solutions = []
+ handler = self.depgraph._dynamic_config._slot_conflict_handler
+
+ for change in handler.changes:
+ new_change = {}
+ for pkg in change:
+ new_change[pkg.cpv] = change[pkg]
+ self.slot_collision_solutions.append(new_change)
+
+ if self.depgraph._dynamic_config._circular_dependency_handler is not None:
+ handler = self.depgraph._dynamic_config._circular_dependency_handler
+ sol = handler.solutions
+ self.circular_dependency_solutions = dict(zip([x.cpv for x in sol.keys()], sol.values()))
+
+ if self.depgraph._dynamic_config._unsatisfied_deps_for_display:
+ self.unsatisfied_deps = set(dep_info[0][1]
+ for dep_info in self.depgraph._dynamic_config._unsatisfied_deps_for_display)
+
+ if self.depgraph._forced_rebuilds:
+ self.forced_rebuilds = dict(
+ (child.cpv, set(parent.cpv for parent in parents))
+ for child_dict in self.depgraph._forced_rebuilds.values()
+ for child, parents in child_dict.items())
+
+ required_use_unsatisfied = []
+ for pargs, kwargs in \
+ self.depgraph._dynamic_config._unsatisfied_deps_for_display:
+ if "show_req_use" in kwargs:
+ required_use_unsatisfied.append(pargs[1])
+ if required_use_unsatisfied:
+ self.required_use_unsatisfied = set(required_use_unsatisfied)
+
+class ResolverPlaygroundDepcleanResult(object):
+
+ checks = (
+ "success", "cleanlist", "ordered", "req_pkg_count",
+ )
+ optional_checks = (
+ "ordered", "req_pkg_count",
+ )
+
+ def __init__(self, atoms, rval, cleanlist, ordered, req_pkg_count):
+ self.atoms = atoms
+ self.success = rval == 0
+ self.cleanlist = cleanlist
+ self.ordered = ordered
+ self.req_pkg_count = req_pkg_count
diff --git a/usr/lib/portage/pym/portage/tests/resolver/__init__.py b/usr/lib/portage/pym/portage/tests/resolver/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/tests/resolver/__test__.py b/usr/lib/portage/pym/portage/tests/resolver/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_autounmask.py b/usr/lib/portage/pym/portage/tests/resolver/test_autounmask.py
new file mode 100644
index 0000000..75fb368
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_autounmask.py
@@ -0,0 +1,481 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class AutounmaskTestCase(TestCase):
+
+ def testAutounmask(self):
+
+ ebuilds = {
+ #ebuilds to test use changes
+ "dev-libs/A-1": { "SLOT": 1, "DEPEND": "dev-libs/B[foo]", "EAPI": 2},
+ "dev-libs/A-2": { "SLOT": 2, "DEPEND": "dev-libs/B[bar]", "EAPI": 2},
+ "dev-libs/B-1": { "DEPEND": "foo? ( dev-libs/C ) bar? ( dev-libs/D )", "IUSE": "foo bar"},
+ "dev-libs/C-1": {},
+ "dev-libs/D-1": {},
+
+ #ebuilds to test if we allow changing of masked or forced flags
+ "dev-libs/E-1": { "SLOT": 1, "DEPEND": "dev-libs/F[masked-flag]", "EAPI": 2},
+ "dev-libs/E-2": { "SLOT": 2, "DEPEND": "dev-libs/G[-forced-flag]", "EAPI": 2},
+ "dev-libs/F-1": { "IUSE": "masked-flag"},
+ "dev-libs/G-1": { "IUSE": "forced-flag"},
+
+ #ebuilds to test keyword changes
+ "app-misc/Z-1": { "KEYWORDS": "~x86", "DEPEND": "app-misc/Y" },
+ "app-misc/Y-1": { "KEYWORDS": "~x86" },
+ "app-misc/W-1": {},
+ "app-misc/W-2": { "KEYWORDS": "~x86" },
+ "app-misc/V-1": { "KEYWORDS": "~x86", "DEPEND": ">=app-misc/W-2"},
+
+ #ebuilds to test mask and keyword changes
+ "app-text/A-1": {},
+ "app-text/B-1": { "KEYWORDS": "~x86" },
+ "app-text/C-1": { "KEYWORDS": "" },
+ "app-text/D-1": { "KEYWORDS": "~x86" },
+ "app-text/D-2": { "KEYWORDS": "" },
+
+ #ebuilds for mixed test for || dep handling
+ "sci-libs/K-1": { "DEPEND": " || ( sci-libs/L[bar] || ( sci-libs/M sci-libs/P ) )", "EAPI": 2},
+ "sci-libs/K-2": { "DEPEND": " || ( sci-libs/L[bar] || ( sci-libs/P sci-libs/M ) )", "EAPI": 2},
+ "sci-libs/K-3": { "DEPEND": " || ( sci-libs/M || ( sci-libs/L[bar] sci-libs/P ) )", "EAPI": 2},
+ "sci-libs/K-4": { "DEPEND": " || ( sci-libs/M || ( sci-libs/P sci-libs/L[bar] ) )", "EAPI": 2},
+ "sci-libs/K-5": { "DEPEND": " || ( sci-libs/P || ( sci-libs/L[bar] sci-libs/M ) )", "EAPI": 2},
+ "sci-libs/K-6": { "DEPEND": " || ( sci-libs/P || ( sci-libs/M sci-libs/L[bar] ) )", "EAPI": 2},
+ "sci-libs/K-7": { "DEPEND": " || ( sci-libs/M sci-libs/L[bar] )", "EAPI": 2},
+ "sci-libs/K-8": { "DEPEND": " || ( sci-libs/L[bar] sci-libs/M )", "EAPI": 2},
+
+ "sci-libs/L-1": { "IUSE": "bar" },
+ "sci-libs/M-1": { "KEYWORDS": "~x86" },
+ "sci-libs/P-1": { },
+
+ #ebuilds to test these nice "required by cat/pkg[foo]" messages
+ "dev-util/Q-1": { "DEPEND": "foo? ( dev-util/R[bar] )", "IUSE": "+foo", "EAPI": 2 },
+ "dev-util/Q-2": { "RDEPEND": "!foo? ( dev-util/R[bar] )", "IUSE": "foo", "EAPI": 2 },
+ "dev-util/R-1": { "IUSE": "bar" },
+
+ #ebuilds to test interaction with REQUIRED_USE
+ "app-portage/A-1": { "DEPEND": "app-portage/B[foo]", "EAPI": 2 },
+ "app-portage/A-2": { "DEPEND": "app-portage/B[foo=]", "IUSE": "+foo", "REQUIRED_USE": "foo", "EAPI": "4" },
+
+ "app-portage/B-1": { "IUSE": "foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ "app-portage/C-1": { "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ }
+
+ test_cases = (
+ #Test USE changes.
+ #The simple case.
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A:1"],
+ options={"--autounmask": "n"},
+ success=False),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A:1"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
+ use_changes={ "dev-libs/B-1": {"foo": True} }),
+
+ #Make sure we restart if needed.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A:1", "dev-libs/B"],
+ options={"--autounmask": True},
+ all_permutations=True,
+ success=False,
+ mergelist=["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
+ use_changes={ "dev-libs/B-1": {"foo": True} }),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A:1", "dev-libs/A:2", "dev-libs/B"],
+ options={"--autounmask": True},
+ all_permutations=True,
+ success=False,
+ mergelist=["dev-libs/D-1", "dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1", "dev-libs/A-2"],
+ ignore_mergelist_order=True,
+ use_changes={ "dev-libs/B-1": {"foo": True, "bar": True} }),
+
+ #Test keywording.
+ #The simple case.
+
+ ResolverPlaygroundTestCase(
+ ["app-misc/Z"],
+ options={"--autounmask": "n"},
+ success=False),
+ ResolverPlaygroundTestCase(
+ ["app-misc/Z"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-misc/Y-1", "app-misc/Z-1"],
+ unstable_keywords=["app-misc/Y-1", "app-misc/Z-1"]),
+
+ #Make sure that the backtracking for slot conflicts handles our mess.
+
+ ResolverPlaygroundTestCase(
+ ["=app-misc/V-1", "app-misc/W"],
+ options={"--autounmask": True},
+ all_permutations=True,
+ success=False,
+ mergelist=["app-misc/W-2", "app-misc/V-1"],
+ unstable_keywords=["app-misc/W-2", "app-misc/V-1"]),
+
+ #Mixed testing
+ #Make sure we don't change use for something in a || dep if there is another choice
+ #that needs no change.
+
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-1"],
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-1"]),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-2"],
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-2"]),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-3"],
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-3"]),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-4"],
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-4"]),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-5"],
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-5"]),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-6"],
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-6"]),
+
+ #Make sure we prefer use changes over keyword changes.
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-7"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["sci-libs/L-1", "sci-libs/K-7"],
+ use_changes={ "sci-libs/L-1": { "bar": True } }),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-8"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["sci-libs/L-1", "sci-libs/K-8"],
+ use_changes={ "sci-libs/L-1": { "bar": True } }),
+
+ #Test these nice "required by cat/pkg[foo]" messages.
+ ResolverPlaygroundTestCase(
+ ["=dev-util/Q-1"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-util/R-1", "dev-util/Q-1"],
+ use_changes={ "dev-util/R-1": { "bar": True } }),
+ ResolverPlaygroundTestCase(
+ ["=dev-util/Q-2"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-util/R-1", "dev-util/Q-2"],
+ use_changes={ "dev-util/R-1": { "bar": True } }),
+
+ #Test interaction with REQUIRED_USE.
+ ResolverPlaygroundTestCase(
+ ["=app-portage/A-1"],
+ options={ "--autounmask": True },
+ use_changes=None,
+ success=False),
+ ResolverPlaygroundTestCase(
+ ["=app-portage/A-2"],
+ options={ "--autounmask": True },
+ use_changes=None,
+ success=False),
+ ResolverPlaygroundTestCase(
+ ["=app-portage/C-1"],
+ options={ "--autounmask": True },
+ use_changes=None,
+ success=False),
+
+ #Make sure we don't change masked/forced flags.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E:1"],
+ options={"--autounmask": True},
+ use_changes=None,
+ success=False),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E:2"],
+ options={"--autounmask": True},
+ use_changes=None,
+ success=False),
+
+ #Test mask and keyword changes.
+ ResolverPlaygroundTestCase(
+ ["app-text/A"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/A-1"],
+ needed_p_mask_changes=["app-text/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["app-text/B"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/B-1"],
+ unstable_keywords=["app-text/B-1"],
+ needed_p_mask_changes=["app-text/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["app-text/C"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/C-1"],
+ unstable_keywords=["app-text/C-1"],
+ needed_p_mask_changes=["app-text/C-1"]),
+ #Make sure unstable keyword is preferred over missing keyword
+ ResolverPlaygroundTestCase(
+ ["app-text/D"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/D-1"],
+ unstable_keywords=["app-text/D-1"]),
+ #Test missing keyword
+ ResolverPlaygroundTestCase(
+ ["=app-text/D-2"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/D-2"],
+ unstable_keywords=["app-text/D-2"])
+ )
+
+ profile = {
+ "use.mask":
+ (
+ "masked-flag",
+ ),
+ "use.force":
+ (
+ "forced-flag",
+ ),
+ "package.mask":
+ (
+ "app-text/A",
+ "app-text/B",
+ "app-text/C",
+ ),
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds, profile=profile)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testAutounmaskForLicenses(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { "LICENSE": "TEST" },
+ "dev-libs/B-1": { "LICENSE": "TEST", "IUSE": "foo", "KEYWORDS": "~x86"},
+ "dev-libs/C-1": { "DEPEND": "dev-libs/B[foo]", "EAPI": 2 },
+
+ "dev-libs/D-1": { "DEPEND": "dev-libs/E dev-libs/F", "LICENSE": "TEST" },
+ "dev-libs/E-1": { "LICENSE": "TEST" },
+ "dev-libs/E-2": { "LICENSE": "TEST" },
+ "dev-libs/F-1": { "DEPEND": "=dev-libs/E-1", "LICENSE": "TEST" },
+
+ "dev-java/sun-jdk-1.6.0.32": { "LICENSE": "TEST", "KEYWORDS": "~x86" },
+ "dev-java/sun-jdk-1.6.0.31": { "LICENSE": "TEST", "KEYWORDS": "x86" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ options={"--autounmask": 'n'},
+ success=False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-libs/A-1"],
+ license_changes={ "dev-libs/A-1": set(["TEST"]) }),
+
+ #Test license+keyword+use change at once.
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/C-1"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-libs/B-1", "dev-libs/C-1"],
+ license_changes={ "dev-libs/B-1": set(["TEST"]) },
+ unstable_keywords=["dev-libs/B-1"],
+ use_changes={ "dev-libs/B-1": { "foo": True } }),
+
+ #Test license with backtracking.
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/D-1"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-libs/E-1", "dev-libs/F-1", "dev-libs/D-1"],
+ license_changes={ "dev-libs/D-1": set(["TEST"]), "dev-libs/E-1": set(["TEST"]), "dev-libs/E-2": set(["TEST"]), "dev-libs/F-1": set(["TEST"]) }),
+
+ #Test license only for bug #420847
+ ResolverPlaygroundTestCase(
+ ["dev-java/sun-jdk"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-java/sun-jdk-1.6.0.31"],
+ license_changes={ "dev-java/sun-jdk-1.6.0.31": set(["TEST"]) }),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testAutounmaskAndSets(self):
+
+ ebuilds = {
+ #ebuilds to test use changes
+ "dev-libs/A-1": { },
+ "dev-libs/A-2": { "KEYWORDS": "~x86" },
+ "dev-libs/B-1": { "DEPEND": "dev-libs/A" },
+ "dev-libs/C-1": { "DEPEND": ">=dev-libs/A-2" },
+ "dev-libs/D-1": { "DEPEND": "dev-libs/A" },
+ }
+
+ world_sets = ["@test-set"]
+ sets = {
+ "test-set": (
+ "dev-libs/A", "dev-libs/B", "dev-libs/C", "dev-libs/D",
+ ),
+ }
+
+ test_cases = (
+ #Test USE changes.
+ #The simple case.
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B", "dev-libs/C", "dev-libs/D"],
+ all_permutations=True,
+ options={"--autounmask": "y"},
+ mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order=True,
+ unstable_keywords=["dev-libs/A-2"],
+ success=False),
+
+ ResolverPlaygroundTestCase(
+ ["@test-set"],
+ all_permutations=True,
+ options={"--autounmask": "y"},
+ mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order=True,
+ unstable_keywords=["dev-libs/A-2"],
+ success=False),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ all_permutations=True,
+ options={"--autounmask": "y"},
+ mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order=True,
+ unstable_keywords=["dev-libs/A-2"],
+ success=False),
+ )
+
+
+ playground = ResolverPlayground(ebuilds=ebuilds, world_sets=world_sets, sets=sets)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testAutounmaskKeepMasks(self):
+ """
+ Ensure that we try to use a masked version with keywords before trying
+ masked version with missing keywords (prefer masked regular version
+ over -9999 version).
+ """
+ ebuilds = {
+ "app-text/A-1": {},
+ }
+
+ test_cases = (
+ #Test mask and keyword changes.
+ ResolverPlaygroundTestCase(
+ ["app-text/A"],
+ options={"--autounmask": True,
+ "--autounmask-keep-masks": "y"},
+ success=False),
+ ResolverPlaygroundTestCase(
+ ["app-text/A"],
+ options={"--autounmask": True,
+ "--autounmask-keep-masks": "n"},
+ success=False,
+ mergelist=["app-text/A-1"],
+ needed_p_mask_changes=["app-text/A-1"]),
+ )
+
+ profile = {
+ "package.mask":
+ (
+ "app-text/A",
+ ),
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds, profile=profile)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testAutounmask9999(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { },
+ "dev-libs/A-2": { },
+ "dev-libs/A-9999": { "KEYWORDS": "" },
+ "dev-libs/B-1": { "DEPEND": ">=dev-libs/A-2" },
+ "dev-libs/C-1": { "DEPEND": ">=dev-libs/A-3" },
+ }
+
+ profile = {
+ "package.mask":
+ (
+ ">=dev-libs/A-2",
+ ),
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ success=False,
+ mergelist=["dev-libs/A-2", "dev-libs/B-1"],
+ needed_p_mask_changes=set(["dev-libs/A-2"])),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ success=False,
+ mergelist=["dev-libs/A-9999", "dev-libs/C-1"],
+ unstable_keywords=set(["dev-libs/A-9999"]),
+ needed_p_mask_changes=set(["dev-libs/A-9999"])),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, profile=profile)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_autounmask_multilib_use.py b/usr/lib/portage/pym/portage/tests/resolver/test_autounmask_multilib_use.py
new file mode 100644
index 0000000..e160c77
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_autounmask_multilib_use.py
@@ -0,0 +1,85 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class AutounmaskMultilibUseTestCase(TestCase):
+
+ def testAutounmaskMultilibUse(self):
+
+ self.todo = True
+
+ ebuilds = {
+ "x11-proto/xextproto-7.2.1-r1": {
+ "EAPI": "5",
+ "IUSE": "abi_x86_32 abi_x86_64",
+ },
+ "x11-libs/libXaw-1.0.11-r2": {
+ "EAPI": "5",
+ "IUSE": "abi_x86_32 abi_x86_64",
+ "RDEPEND": "x11-proto/xextproto[abi_x86_32(-)?,abi_x86_64(-)?]"
+ },
+ "app-emulation/emul-linux-x86-xlibs-20130224-r2": {
+ "EAPI": "5",
+ "RDEPEND": "x11-libs/libXaw[abi_x86_32]"
+ },
+ "games-util/steam-client-meta-0-r20130514": {
+ "EAPI": "5",
+ "RDEPEND": "app-emulation/emul-linux-x86-xlibs"
+ }
+ }
+
+ installed = {
+ "x11-proto/xextproto-7.2.1-r1": {
+ "EAPI": "5",
+ "IUSE": "abi_x86_32 abi_x86_64",
+ "USE": "abi_x86_32 abi_x86_64"
+ },
+ "x11-libs/libXaw-1.0.11-r2": {
+ "EAPI": "5",
+ "IUSE": "abi_x86_32 abi_x86_64",
+ "RDEPEND": "x11-proto/xextproto[abi_x86_32(-)?,abi_x86_64(-)?]",
+ "USE": "abi_x86_32 abi_x86_64"
+ },
+ "app-emulation/emul-linux-x86-xlibs-20130224-r2": {
+ "EAPI": "5",
+ "RDEPEND": "x11-libs/libXaw[abi_x86_32]"
+ },
+ "games-util/steam-client-meta-0-r20130514": {
+ "EAPI": "5",
+ "RDEPEND": "app-emulation/emul-linux-x86-xlibs"
+ }
+ }
+
+ user_config = {
+ #"make.conf" : ("USE=\"abi_x86_32 abi_x86_64\"",)
+ "make.conf" : ("USE=\"abi_x86_64\"",)
+ }
+
+ world = ("games-util/steam-client-meta",)
+
+ test_cases = (
+
+ # Test autounmask solving of multilib use deps for bug #481628.
+ # We would like it to suggest some USE changes, but instead it
+ # currently fails with a SLOT conflict.
+
+ ResolverPlaygroundTestCase(
+ ["x11-proto/xextproto", "x11-libs/libXaw"],
+ options = {"--oneshot": True, "--autounmask": True,
+ "--backtrack": 30},
+ mergelist = ["x11-proto/xextproto-7.2.1-r1", "x11-libs/libXaw-1.0.11-r2"],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
+ user_config=user_config, world=world, debug=False)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_autounmask_use_breakage.py b/usr/lib/portage/pym/portage/tests/resolver/test_autounmask_use_breakage.py
new file mode 100644
index 0000000..3654aa6
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_autounmask_use_breakage.py
@@ -0,0 +1,63 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class AutounmaskUseBreakageTestCase(TestCase):
+
+ def testAutounmaskUseBreakage(self):
+
+ ebuilds = {
+
+ "app-misc/A-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/D[-foo]",
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/D[foo]"
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "RDEPEND": ">=app-misc/D-1"
+ },
+
+ "app-misc/D-0" : {
+ "EAPI": "5",
+ "IUSE": "foo"
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "5",
+ "IUSE": "bar"
+ },
+
+ }
+
+ test_cases = (
+
+ # Bug 510270
+ # _solve_non_slot_operator_slot_conflicts throws
+ # IndexError: tuple index out of range
+ # due to autounmask USE breakage.
+ ResolverPlaygroundTestCase(
+ ["app-misc/C", "app-misc/B", "app-misc/A"],
+ all_permutations = True,
+ success = False,
+ ambiguous_slot_collision_solutions = True,
+ slot_collision_solutions = [None, []]
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_backtracking.py b/usr/lib/portage/pym/portage/tests/resolver/test_backtracking.py
new file mode 100644
index 0000000..3b69eda
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_backtracking.py
@@ -0,0 +1,174 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class BacktrackingTestCase(TestCase):
+
+ def testBacktracking(self):
+ ebuilds = {
+ "dev-libs/A-1": {},
+ "dev-libs/A-2": {},
+ "dev-libs/B-1": { "DEPEND": "dev-libs/A" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1", "dev-libs/B"],
+ all_permutations = True,
+ mergelist = ["dev-libs/A-1", "dev-libs/B-1"],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testBacktrackNotNeeded(self):
+ ebuilds = {
+ "dev-libs/A-1": {},
+ "dev-libs/A-2": {},
+ "dev-libs/B-1": {},
+ "dev-libs/B-2": {},
+ "dev-libs/C-1": { "DEPEND": "dev-libs/A dev-libs/B" },
+ "dev-libs/D-1": { "DEPEND": "=dev-libs/A-1 =dev-libs/B-1" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C", "dev-libs/D"],
+ all_permutations = True,
+ options = { "--backtrack": 1 },
+ mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order = True,
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testBacktrackWithoutUpdates(self):
+ """
+ If --update is not given we might have to mask the old installed version later.
+ """
+
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/Z" },
+ "dev-libs/B-1": { "DEPEND": ">=dev-libs/Z-2" },
+ "dev-libs/Z-1": { },
+ "dev-libs/Z-2": { },
+ }
+
+ installed = {
+ "dev-libs/Z-1": { "USE": "" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B", "dev-libs/A"],
+ all_permutations = True,
+ mergelist = ["dev-libs/Z-2", "dev-libs/B-1", "dev-libs/A-1",],
+ ignore_mergelist_order = True,
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testBacktrackMissedUpdates(self):
+ """
+ An update is missed due to a dependency on an older version.
+ """
+
+ ebuilds = {
+ "dev-libs/A-1": { },
+ "dev-libs/A-2": { },
+ "dev-libs/B-1": { "RDEPEND": "<=dev-libs/A-1" },
+ }
+
+ installed = {
+ "dev-libs/A-1": { "USE": "" },
+ "dev-libs/B-1": { "USE": "", "RDEPEND": "<=dev-libs/A-1" },
+ }
+
+ options = {'--update' : True, '--deep' : True, '--selective' : True}
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ options = options,
+ all_permutations = True,
+ mergelist = [],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testBacktrackNoWrongRebuilds(self):
+ """
+ Ensure we remove backtrack masks if the reason for the mask gets masked itself.
+ """
+
+ ebuilds = {
+ "dev-libs/A-1": { },
+ "dev-libs/A-2": { },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/D"},
+ "dev-libs/C-1": { },
+ "dev-libs/C-2": { "RDEPEND": ">=dev-libs/A-2" },
+ "dev-libs/D-1": { "RDEPEND": "<dev-libs/A-2" },
+ }
+
+ installed = {
+ "dev-libs/A-1": { },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/D" },
+ "dev-libs/C-1": { },
+ "dev-libs/D-1": { "RDEPEND": "<dev-libs/A-2" },
+ }
+
+ world = ["dev-libs/B", "dev-libs/C"]
+
+ options = {'--update' : True, '--deep' : True, '--selective' : True}
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = options,
+ mergelist = [],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_blocker.py b/usr/lib/portage/pym/portage/tests/resolver/test_blocker.py
new file mode 100644
index 0000000..94a88b8
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_blocker.py
@@ -0,0 +1,48 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SlotConflictWithBlockerTestCase(TestCase):
+
+ def testBlocker(self):
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/X" },
+ "dev-libs/B-1": { "DEPEND": "<dev-libs/X-2" },
+ "dev-libs/C-1": { "DEPEND": "<dev-libs/X-3" },
+
+ "dev-libs/X-1": { "EAPI": "2", "RDEPEND": "!=dev-libs/Y-1" },
+ "dev-libs/X-2": { "EAPI": "2", "RDEPEND": "!=dev-libs/Y-2" },
+ "dev-libs/X-3": { "EAPI": "2", "RDEPEND": "!=dev-libs/Y-3" },
+
+ "dev-libs/Y-1": { "SLOT": "1" },
+ "dev-libs/Y-2": { "SLOT": "2" },
+ "dev-libs/Y-3": { "SLOT": "3" },
+ }
+
+ installed = {
+ "dev-libs/Y-1": { "SLOT": "1" },
+ "dev-libs/Y-2": { "SLOT": "2" },
+ "dev-libs/Y-3": { "SLOT": "3" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B", "dev-libs/C"],
+ options = { "--backtrack": 0 },
+ all_permutations = True,
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = ["dev-libs/X-1", "[uninstall]dev-libs/Y-1", "!=dev-libs/Y-1", \
+ ("dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1")]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_circular_choices.py b/usr/lib/portage/pym/portage/tests/resolver/test_circular_choices.py
new file mode 100644
index 0000000..33b7306
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_circular_choices.py
@@ -0,0 +1,61 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class CircularChoicesTestCase(TestCase):
+
+ def testDirectCircularDependency(self):
+
+ ebuilds = {
+ "dev-lang/gwydion-dylan-2.4.0": {"DEPEND": "|| ( dev-lang/gwydion-dylan dev-lang/gwydion-dylan-bin )" },
+ "dev-lang/gwydion-dylan-bin-2.4.0": {},
+ }
+
+ test_cases = (
+ # Automatically pull in gwydion-dylan-bin to solve a circular dep
+ ResolverPlaygroundTestCase(
+ ["dev-lang/gwydion-dylan"],
+ mergelist = ['dev-lang/gwydion-dylan-bin-2.4.0', 'dev-lang/gwydion-dylan-2.4.0'],
+ success = True,
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+class VirtualCircularChoicesTestCase(TestCase):
+ def testDirectVirtualCircularDependency(self):
+
+ # Bug #384107
+ self.todo = True
+
+ ebuilds = {
+ "dev-java/icedtea-6.1.10.3": { "SLOT" : "6", "DEPEND": "virtual/jdk" },
+ "dev-java/icedtea6-bin-1.10.3": {},
+ "virtual/jdk-1.6.0": { "SLOT" : "1.6", "RDEPEND": "|| ( dev-java/icedtea6-bin =dev-java/icedtea-6* )" },
+ }
+
+ test_cases = (
+ # Automatically pull in icedtea6-bin to solve a circular dep
+ ResolverPlaygroundTestCase(
+ ["dev-java/icedtea"],
+ mergelist = ["dev-java/icedtea6-bin-1.10.3", "virtual/jdk-1.6.0", "dev-java/icedtea-6.1.10.3"],
+ success = True,
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_circular_dependencies.py b/usr/lib/portage/pym/portage/tests/resolver/test_circular_dependencies.py
new file mode 100644
index 0000000..f8331ac
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_circular_dependencies.py
@@ -0,0 +1,84 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class CircularDependencyTestCase(TestCase):
+
+ #TODO:
+ # use config change by autounmask
+ # conflict on parent's parent
+ # difference in RDEPEND and DEPEND
+ # is there anything else than priority buildtime and runtime?
+ # play with use.{mask,force}
+ # play with REQUIRED_USE
+
+
+ def testCircularDependency(self):
+
+ ebuilds = {
+ "dev-libs/Z-1": { "DEPEND": "foo? ( !bar? ( dev-libs/Y ) )", "IUSE": "+foo bar", "EAPI": 1 },
+ "dev-libs/Z-2": { "DEPEND": "foo? ( dev-libs/Y ) !bar? ( dev-libs/Y )", "IUSE": "+foo bar", "EAPI": 1 },
+ "dev-libs/Z-3": { "DEPEND": "foo? ( !bar? ( dev-libs/Y ) ) foo? ( dev-libs/Y ) !bar? ( dev-libs/Y )", "IUSE": "+foo bar", "EAPI": 1 },
+ "dev-libs/Y-1": { "DEPEND": "dev-libs/Z" },
+ "dev-libs/W-1": { "DEPEND": "dev-libs/Z[foo] dev-libs/Y", "EAPI": 2 },
+ "dev-libs/W-2": { "DEPEND": "dev-libs/Z[foo=] dev-libs/Y", "IUSE": "+foo", "EAPI": 2 },
+ "dev-libs/W-3": { "DEPEND": "dev-libs/Z[bar] dev-libs/Y", "EAPI": 2 },
+
+ "app-misc/A-1": { "DEPEND": "foo? ( =app-misc/B-1 )", "IUSE": "+foo bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ "app-misc/A-2": { "DEPEND": "foo? ( =app-misc/B-2 ) bar? ( =app-misc/B-2 )", "IUSE": "+foo bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ "app-misc/B-1": { "DEPEND": "=app-misc/A-1" },
+ "app-misc/B-2": { "DEPEND": "=app-misc/A-2" },
+ }
+
+ test_cases = (
+ #Simple tests
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/Z-1"],
+ circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False)]), frozenset([("bar", True)])])},
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/Z-2"],
+ circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/Z-3"],
+ circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
+ success = False),
+
+ #Conflict on parent
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/W-1"],
+ circular_dependency_solutions = {},
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/W-2"],
+ circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
+ success = False),
+
+ #Conflict with autounmask
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/W-3"],
+ circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False)])])},
+ use_changes = { "dev-libs/Z-3": {"bar": True}},
+ success = False),
+
+ #Conflict with REQUIRED_USE
+ ResolverPlaygroundTestCase(
+ ["=app-misc/B-1"],
+ circular_dependency_solutions = { "app-misc/B-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=app-misc/B-2"],
+ circular_dependency_solutions = {},
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_complete_graph.py b/usr/lib/portage/pym/portage/tests/resolver/test_complete_graph.py
new file mode 100644
index 0000000..95b1f88
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_complete_graph.py
@@ -0,0 +1,130 @@
+# Copyright 2011-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class CompleteGraphTestCase(TestCase):
+
+ def testCompleteGraphUseChange(self):
+ """
+ Prevent reverse dependency breakage triggered by USE changes.
+ """
+
+ ebuilds = {
+ "dev-libs/libxml2-2.8.0": {
+ "EAPI": "2",
+ "IUSE": "+icu",
+ "SLOT": "2",
+ },
+ "x11-libs/qt-webkit-4.8.2": {
+ "EAPI": "2",
+ "IUSE": "icu",
+ "RDEPEND" : "dev-libs/libxml2:2[!icu?]",
+ },
+ }
+
+ installed = {
+ "dev-libs/libxml2-2.8.0": {
+ "EAPI": "2",
+ "IUSE": "+icu",
+ "USE": "",
+ "SLOT": "2",
+ },
+ "x11-libs/qt-webkit-4.8.2": {
+ "EAPI": "2",
+ "IUSE": "icu",
+ "RDEPEND" : "dev-libs/libxml2:2[-icu]",
+ "USE": "",
+ }
+ }
+
+ world = ["x11-libs/qt-webkit"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/libxml2"],
+ options = {"--complete-graph-if-new-use" : "y" },
+ mergelist = ["dev-libs/libxml2-2.8.0"],
+ slot_collision_solutions = [{'dev-libs/libxml2-2.8.0': {'icu': False}}],
+ success = False,
+ ),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/libxml2"],
+ options = {"--complete-graph-if-new-use" : "n" },
+ mergelist = ["dev-libs/libxml2-2.8.0"],
+ success = True,
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testCompleteGraphVersionChange(self):
+ """
+ Prevent reverse dependency breakage triggered by version changes.
+ """
+
+ ebuilds = {
+ "sys-libs/x-0.1": {},
+ "sys-libs/x-1": {},
+ "sys-libs/x-2": {},
+ "sys-apps/a-1": {"RDEPEND" : ">=sys-libs/x-1 <sys-libs/x-2"},
+ }
+
+ installed = {
+ "sys-libs/x-1": {},
+ "sys-apps/a-1": {"RDEPEND" : ">=sys-libs/x-1 <sys-libs/x-2"},
+ }
+
+ world = ["sys-apps/a"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [">=sys-libs/x-2"],
+ options = {"--complete-graph-if-new-ver" : "n", "--rebuild-if-new-slot": "n"},
+ mergelist = ["sys-libs/x-2"],
+ success = True,
+ ),
+ ResolverPlaygroundTestCase(
+ [">=sys-libs/x-2"],
+ options = {"--complete-graph-if-new-ver" : "y"},
+ mergelist = ["sys-libs/x-2"],
+ slot_collision_solutions = [],
+ success = False,
+ ),
+ ResolverPlaygroundTestCase(
+ ["<sys-libs/x-1"],
+ options = {"--complete-graph-if-new-ver" : "n", "--rebuild-if-new-slot": "n"},
+ mergelist = ["sys-libs/x-0.1"],
+ success = True,
+ ),
+ ResolverPlaygroundTestCase(
+ ["<sys-libs/x-1"],
+ options = {"--complete-graph-if-new-ver" : "y"},
+ mergelist = ["sys-libs/x-0.1"],
+ slot_collision_solutions = [],
+ success = False,
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_complete_if_new_subslot_without_revbump.py b/usr/lib/portage/pym/portage/tests/resolver/test_complete_if_new_subslot_without_revbump.py
new file mode 100644
index 0000000..fddbead
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_complete_if_new_subslot_without_revbump.py
@@ -0,0 +1,74 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class CompeteIfNewSubSlotWithoutRevBumpTestCase(TestCase):
+
+ def testCompeteIfNewSubSlotWithoutRevBump(self):
+
+ ebuilds = {
+ "media-libs/libpng-1.5.14" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+ "x11-libs/gdk-pixbuf-2.26.5" : {
+ "EAPI": "5",
+ "DEPEND": ">=media-libs/libpng-1.4:=",
+ "RDEPEND": ">=media-libs/libpng-1.4:="
+ },
+ }
+
+ binpkgs = {
+ "x11-libs/gdk-pixbuf-2.26.5" : {
+ "EAPI": "5",
+ "DEPEND": ">=media-libs/libpng-1.4:0/15=",
+ "RDEPEND": ">=media-libs/libpng-1.4:0/15="
+ },
+ }
+
+ installed = {
+ "media-libs/libpng-1.5.14" : {
+ "EAPI": "5",
+ "SLOT": "0/15"
+ },
+
+ "x11-libs/gdk-pixbuf-2.26.5" : {
+ "EAPI": "5",
+ "DEPEND": ">=media-libs/libpng-1.4:0/15=",
+ "RDEPEND": ">=media-libs/libpng-1.4:0/15="
+ },
+ }
+
+ world = ["x11-libs/gdk-pixbuf"]
+
+ test_cases = (
+ # Test that --complete-graph-if-new-ver=y triggers rebuild
+ # when the sub-slot changes without a revbump.
+ ResolverPlaygroundTestCase(
+ ["media-libs/libpng"],
+ options = {
+ "--oneshot": True,
+ "--complete-graph-if-new-ver": "y",
+ "--rebuild-if-new-slot": "n",
+ "--usepkg": True
+ },
+ success = True,
+ mergelist = [
+ "media-libs/libpng-1.5.14",
+ "x11-libs/gdk-pixbuf-2.26.5"
+ ]
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_depclean.py b/usr/lib/portage/pym/portage/tests/resolver/test_depclean.py
new file mode 100644
index 0000000..42350be
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_depclean.py
@@ -0,0 +1,285 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SimpleDepcleanTestCase(TestCase):
+
+ def testSimpleDepclean(self):
+ ebuilds = {
+ "dev-libs/A-1": {},
+ "dev-libs/B-1": {},
+ }
+ installed = {
+ "dev-libs/A-1": {},
+ "dev-libs/B-1": {},
+ }
+
+ world = (
+ "dev-libs/A",
+ )
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+class DepcleanWithDepsTestCase(TestCase):
+
+ def testDepcleanWithDeps(self):
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "dev-libs/C" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/D" },
+ "dev-libs/C-1": {},
+ "dev-libs/D-1": { "RDEPEND": "dev-libs/E" },
+ "dev-libs/E-1": { "RDEPEND": "dev-libs/F" },
+ "dev-libs/F-1": {},
+ }
+ installed = {
+ "dev-libs/A-1": { "RDEPEND": "dev-libs/C" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/D" },
+ "dev-libs/C-1": {},
+ "dev-libs/D-1": { "RDEPEND": "dev-libs/E" },
+ "dev-libs/E-1": { "RDEPEND": "dev-libs/F" },
+ "dev-libs/F-1": {},
+ }
+
+ world = (
+ "dev-libs/A",
+ )
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-1", "dev-libs/D-1",
+ "dev-libs/E-1", "dev-libs/F-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+class DepcleanWithInstalledMaskedTestCase(TestCase):
+
+ def testDepcleanWithInstalledMasked(self):
+ """
+ Test case for bug 332719.
+ emerge --declean ignores that B is masked by license and removes C.
+ The next emerge -uDN world doesn't take B and installs C again.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "|| ( dev-libs/B dev-libs/C )" },
+ "dev-libs/B-1": { "LICENSE": "TEST", "KEYWORDS": "x86" },
+ "dev-libs/C-1": { "KEYWORDS": "x86" },
+ }
+ installed = {
+ "dev-libs/A-1": { "RDEPEND": "|| ( dev-libs/B dev-libs/C )" },
+ "dev-libs/B-1": { "LICENSE": "TEST", "KEYWORDS": "x86" },
+ "dev-libs/C-1": { "KEYWORDS": "x86" },
+ }
+
+ world = (
+ "dev-libs/A",
+ )
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ #cleanlist=["dev-libs/C-1"]),
+ cleanlist=["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+class DepcleanInstalledKeywordMaskedSlotTestCase(TestCase):
+
+ def testDepcleanInstalledKeywordMaskedSlot(self):
+ """
+ Verify that depclean removes newer slot
+ masked by KEYWORDS (see bug #350285).
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "|| ( =dev-libs/B-2.7* =dev-libs/B-2.6* )" },
+ "dev-libs/B-2.6": { "SLOT":"2.6", "KEYWORDS": "x86" },
+ "dev-libs/B-2.7": { "SLOT":"2.7", "KEYWORDS": "~x86" },
+ }
+ installed = {
+ "dev-libs/A-1": { "EAPI" : "3", "RDEPEND": "|| ( dev-libs/B:2.7 dev-libs/B:2.6 )" },
+ "dev-libs/B-2.6": { "SLOT":"2.6", "KEYWORDS": "x86" },
+ "dev-libs/B-2.7": { "SLOT":"2.7", "KEYWORDS": "~x86" },
+ }
+
+ world = (
+ "dev-libs/A",
+ )
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-2.7"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+class DepcleanWithExcludeTestCase(TestCase):
+
+ def testDepcleanWithExclude(self):
+
+ installed = {
+ "dev-libs/A-1": {},
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/A" },
+ }
+
+ test_cases = (
+ #Without --exclude.
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-1", "dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=[]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-1"]),
+
+ #With --exclude
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True, "--exclude": ["dev-libs/A"]},
+ success=True,
+ cleanlist=["dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options={"--depclean": True, "--exclude": ["dev-libs/B"]},
+ success=True,
+ cleanlist=[]),
+ )
+
+ playground = ResolverPlayground(installed=installed)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+class DepcleanWithExcludeAndSlotsTestCase(TestCase):
+
+ def testDepcleanWithExcludeAndSlots(self):
+
+ installed = {
+ "dev-libs/Z-1": { "SLOT": 1},
+ "dev-libs/Z-2": { "SLOT": 2},
+ "dev-libs/Y-1": { "RDEPEND": "=dev-libs/Z-1", "SLOT": 1 },
+ "dev-libs/Y-2": { "RDEPEND": "=dev-libs/Z-2", "SLOT": 2 },
+ }
+
+ world=["dev-libs/Y"]
+
+ test_cases = (
+ #Without --exclude.
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/Y-1", "dev-libs/Z-1"]),
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True, "--exclude": ["dev-libs/Z"]},
+ success=True,
+ cleanlist=["dev-libs/Y-1"]),
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True, "--exclude": ["dev-libs/Y"]},
+ success=True,
+ cleanlist=[]),
+ )
+
+ playground = ResolverPlayground(installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+class DepcleanAndWildcardsTestCase(TestCase):
+
+ def testDepcleanAndWildcards(self):
+
+ installed = {
+ "dev-libs/A-1": { "RDEPEND": "dev-libs/B" },
+ "dev-libs/B-1": {},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["*/*"],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/A-1", "dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/*"],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/A-1", "dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["*/A"],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["*/B"],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=[]),
+ )
+
+ playground = ResolverPlayground(installed=installed)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_depclean_order.py b/usr/lib/portage/pym/portage/tests/resolver/test_depclean_order.py
new file mode 100644
index 0000000..9511d29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_depclean_order.py
@@ -0,0 +1,57 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SimpleDepcleanTestCase(TestCase):
+
+ def testSimpleDepclean(self):
+
+ ebuilds = {
+ "dev-libs/A-1": {
+ "EAPI": "5",
+ "RDEPEND": "dev-libs/B:=",
+ },
+ "dev-libs/B-1": {
+ "EAPI": "5",
+ "RDEPEND": "dev-libs/A",
+ },
+ "dev-libs/C-1": {},
+ }
+
+ installed = {
+ "dev-libs/A-1": {
+ "EAPI": "5",
+ "RDEPEND": "dev-libs/B:0/0=",
+ },
+ "dev-libs/B-1": {
+ "EAPI": "5",
+ "RDEPEND": "dev-libs/A",
+ },
+ "dev-libs/C-1": {},
+ }
+
+ world = (
+ "dev-libs/C",
+ )
+
+ test_cases = (
+ # Remove dev-libs/A-1 first because of dev-libs/B:0/0= (built
+ # slot-operator dep).
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ ordered=True,
+ cleanlist=["dev-libs/A-1", "dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_depclean_slot_unavailable.py b/usr/lib/portage/pym/portage/tests/resolver/test_depclean_slot_unavailable.py
new file mode 100644
index 0000000..689392b
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_depclean_slot_unavailable.py
@@ -0,0 +1,78 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class DepcleanUnavailableSlotTestCase(TestCase):
+
+ def testDepcleanUnavailableSlot(self):
+ """
+ Test bug #445506, where we want to remove the slot
+ for which the ebuild is no longer available, even
+ though its version is higher.
+ """
+
+ ebuilds = {
+ "sys-kernel/gentoo-sources-3.0.53": {
+ "SLOT": "3.0.53",
+ "KEYWORDS": "x86"
+ },
+ }
+
+ installed = {
+ "sys-kernel/gentoo-sources-3.0.53": {
+ "SLOT": "3.0.53",
+ "KEYWORDS": "x86"
+ },
+ "sys-kernel/gentoo-sources-3.2.21": {
+ "SLOT": "3.2.21",
+ "KEYWORDS": "x86"
+ },
+ }
+
+ world = ["sys-kernel/gentoo-sources"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["sys-kernel/gentoo-sources-3.2.21"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ # Now make the newer version availale and verify that
+ # the lower version is depcleaned.
+ ebuilds.update({
+ "sys-kernel/gentoo-sources-3.2.21": {
+ "SLOT": "3.2.21",
+ "KEYWORDS": "x86"
+ },
+ })
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["sys-kernel/gentoo-sources-3.0.53"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_depth.py b/usr/lib/portage/pym/portage/tests/resolver/test_depth.py
new file mode 100644
index 0000000..cb1e2dd
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_depth.py
@@ -0,0 +1,252 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class ResolverDepthTestCase(TestCase):
+
+ def testResolverDepth(self):
+
+ ebuilds = {
+ "dev-libs/A-1": {"RDEPEND" : "dev-libs/B"},
+ "dev-libs/A-2": {"RDEPEND" : "dev-libs/B"},
+ "dev-libs/B-1": {"RDEPEND" : "dev-libs/C"},
+ "dev-libs/B-2": {"RDEPEND" : "dev-libs/C"},
+ "dev-libs/C-1": {},
+ "dev-libs/C-2": {},
+
+ "virtual/libusb-0" : {"EAPI" :"2", "SLOT" : "0", "RDEPEND" : "|| ( >=dev-libs/libusb-0.1.12-r1:0 dev-libs/libusb-compat >=sys-freebsd/freebsd-lib-8.0[usb] )"},
+ "virtual/libusb-1" : {"EAPI" :"2", "SLOT" : "1", "RDEPEND" : ">=dev-libs/libusb-1.0.4:1"},
+ "dev-libs/libusb-0.1.13" : {},
+ "dev-libs/libusb-1.0.5" : {"SLOT":"1"},
+ "dev-libs/libusb-compat-1" : {},
+ "sys-freebsd/freebsd-lib-8": {"IUSE" : "+usb"},
+
+ "sys-fs/udev-164" : {"EAPI" : "1", "RDEPEND" : "virtual/libusb:0"},
+
+ "virtual/jre-1.5.0" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.5.0* =virtual/jdk-1.5.0* )"},
+ "virtual/jre-1.5.0-r1" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.5.0* =virtual/jdk-1.5.0* )"},
+ "virtual/jre-1.6.0" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.6.0* =virtual/jdk-1.6.0* )"},
+ "virtual/jre-1.6.0-r1" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.6.0* =virtual/jdk-1.6.0* )"},
+ "virtual/jdk-1.5.0" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
+ "virtual/jdk-1.5.0-r1" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
+ "virtual/jdk-1.6.0" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ "virtual/jdk-1.6.0-r1" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ "dev-java/gcj-jdk-4.5" : {},
+ "dev-java/gcj-jdk-4.5-r1" : {},
+ "dev-java/icedtea-6.1" : {},
+ "dev-java/icedtea-6.1-r1" : {},
+ "dev-java/sun-jdk-1.5" : {"SLOT" : "1.5"},
+ "dev-java/sun-jdk-1.6" : {"SLOT" : "1.6"},
+ "dev-java/sun-jre-bin-1.5" : {"SLOT" : "1.5"},
+ "dev-java/sun-jre-bin-1.6" : {"SLOT" : "1.6"},
+
+ "dev-java/ant-core-1.8" : {"DEPEND" : ">=virtual/jdk-1.4"},
+ "dev-db/hsqldb-1.8" : {"RDEPEND" : ">=virtual/jre-1.6"},
+ }
+
+ installed = {
+ "dev-libs/A-1": {"RDEPEND" : "dev-libs/B"},
+ "dev-libs/B-1": {"RDEPEND" : "dev-libs/C"},
+ "dev-libs/C-1": {},
+
+ "virtual/jre-1.5.0" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =virtual/jdk-1.5.0* =dev-java/sun-jre-bin-1.5.0* )"},
+ "virtual/jre-1.6.0" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =virtual/jdk-1.6.0* =dev-java/sun-jre-bin-1.6.0* )"},
+ "virtual/jdk-1.5.0" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
+ "virtual/jdk-1.6.0" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ "dev-java/gcj-jdk-4.5" : {},
+ "dev-java/icedtea-6.1" : {},
+
+ "virtual/libusb-0" : {"EAPI" :"2", "SLOT" : "0", "RDEPEND" : "|| ( >=dev-libs/libusb-0.1.12-r1:0 dev-libs/libusb-compat >=sys-freebsd/freebsd-lib-8.0[usb] )"},
+ }
+
+ world = ["dev-libs/A"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--update": True, "--deep": 0},
+ success = True,
+ mergelist = ["dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--update": True, "--deep": 1},
+ success = True,
+ mergelist = ["dev-libs/B-2", "dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--update": True, "--deep": 2},
+ success = True,
+ mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--emptytree": True},
+ success = True,
+ mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--selective": True, "--deep": True},
+ success = True,
+ mergelist = []),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--deep": 2},
+ success = True,
+ mergelist = ["dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["virtual/jre"],
+ options = {},
+ success = True,
+ mergelist = ['virtual/jre-1.6.0-r1']),
+
+ ResolverPlaygroundTestCase(
+ ["virtual/jre"],
+ options = {"--deep" : True},
+ success = True,
+ mergelist = ['virtual/jre-1.6.0-r1']),
+
+ # Test bug #141118, where we avoid pulling in
+ # redundant deps, satisfying nested virtuals
+ # as efficiently as possible.
+ ResolverPlaygroundTestCase(
+ ["virtual/jre"],
+ options = {"--selective" : True, "--deep" : True},
+ success = True,
+ mergelist = []),
+
+ # Test bug #150361, where depgraph._greedy_slots()
+ # is triggered by --update with AtomArg.
+ ResolverPlaygroundTestCase(
+ ["virtual/jre"],
+ options = {"--update" : True},
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [('virtual/jre-1.6.0-r1', 'virtual/jre-1.5.0-r1')]),
+
+ # Recursively traversed virtual dependencies, and their
+ # direct dependencies, are considered to have the same
+ # depth as direct dependencies.
+ ResolverPlaygroundTestCase(
+ ["virtual/jre"],
+ options = {"--update" : True, "--deep" : 1},
+ success = True,
+ ambiguous_merge_order = True,
+ merge_order_assertions=(('dev-java/icedtea-6.1-r1', 'virtual/jdk-1.6.0-r1'), ('virtual/jdk-1.6.0-r1', 'virtual/jre-1.6.0-r1'),
+ ('dev-java/gcj-jdk-4.5-r1', 'virtual/jdk-1.5.0-r1'), ('virtual/jdk-1.5.0-r1', 'virtual/jre-1.5.0-r1')),
+ mergelist = [('dev-java/icedtea-6.1-r1', 'dev-java/gcj-jdk-4.5-r1', 'virtual/jdk-1.6.0-r1', 'virtual/jdk-1.5.0-r1', 'virtual/jre-1.6.0-r1', 'virtual/jre-1.5.0-r1')]),
+
+ ResolverPlaygroundTestCase(
+ ["virtual/jre:1.5"],
+ options = {"--update" : True},
+ success = True,
+ mergelist = ['virtual/jre-1.5.0-r1']),
+
+ ResolverPlaygroundTestCase(
+ ["virtual/jre:1.6"],
+ options = {"--update" : True},
+ success = True,
+ mergelist = ['virtual/jre-1.6.0-r1']),
+
+ # Test that we don't pull in any unnecessary updates
+ # when --update is not specified, even though we
+ # specified --deep.
+ ResolverPlaygroundTestCase(
+ ["dev-java/ant-core"],
+ options = {"--deep" : True},
+ success = True,
+ mergelist = ["dev-java/ant-core-1.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-java/ant-core"],
+ options = {"--update" : True},
+ success = True,
+ mergelist = ["dev-java/ant-core-1.8"]),
+
+ # Recursively traversed virtual dependencies, and their
+ # direct dependencies, are considered to have the same
+ # depth as direct dependencies.
+ ResolverPlaygroundTestCase(
+ ["dev-java/ant-core"],
+ options = {"--update" : True, "--deep" : 1},
+ success = True,
+ mergelist = ['dev-java/icedtea-6.1-r1', 'virtual/jdk-1.6.0-r1', 'dev-java/ant-core-1.8']),
+
+ ResolverPlaygroundTestCase(
+ ["dev-db/hsqldb"],
+ options = {"--deep" : True},
+ success = True,
+ mergelist = ["dev-db/hsqldb-1.8"]),
+
+ # Don't traverse deps of an installed package with --deep=0,
+ # even if it's a virtual.
+ ResolverPlaygroundTestCase(
+ ["virtual/libusb:0"],
+ options = {"--selective" : True, "--deep" : 0},
+ success = True,
+ mergelist = []),
+
+ # Satisfy unsatisfied dep of installed package with --deep=1.
+ ResolverPlaygroundTestCase(
+ ["virtual/libusb:0"],
+ options = {"--selective" : True, "--deep" : 1},
+ success = True,
+ mergelist = ['dev-libs/libusb-0.1.13']),
+
+ # Pull in direct dep of virtual, even with --deep=0.
+ ResolverPlaygroundTestCase(
+ ["sys-fs/udev"],
+ options = {"--deep" : 0},
+ success = True,
+ mergelist = ['dev-libs/libusb-0.1.13', 'sys-fs/udev-164']),
+
+ # Test --nodeps with direct virtual deps.
+ ResolverPlaygroundTestCase(
+ ["sys-fs/udev"],
+ options = {"--nodeps" : True},
+ success = True,
+ mergelist = ["sys-fs/udev-164"]),
+
+ # Test that --nodeps overrides --deep.
+ ResolverPlaygroundTestCase(
+ ["sys-fs/udev"],
+ options = {"--nodeps" : True, "--deep" : True},
+ success = True,
+ mergelist = ["sys-fs/udev-164"]),
+
+ # Test that --nodeps overrides --emptytree.
+ ResolverPlaygroundTestCase(
+ ["sys-fs/udev"],
+ options = {"--nodeps" : True, "--emptytree" : True},
+ success = True,
+ mergelist = ["sys-fs/udev-164"]),
+
+ # Test --emptytree with virtuals.
+ ResolverPlaygroundTestCase(
+ ["sys-fs/udev"],
+ options = {"--emptytree" : True},
+ success = True,
+ mergelist = ['dev-libs/libusb-0.1.13', 'virtual/libusb-0', 'sys-fs/udev-164']),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
+ world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_eapi.py b/usr/lib/portage/pym/portage/tests/resolver/test_eapi.py
new file mode 100644
index 0000000..525b585
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_eapi.py
@@ -0,0 +1,115 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class EAPITestCase(TestCase):
+
+ def testEAPI(self):
+
+ ebuilds = {
+ #EAPI-1: IUSE-defaults
+ "dev-libs/A-1.0": { "EAPI": 0, "IUSE": "+foo" },
+ "dev-libs/A-1.1": { "EAPI": 1, "IUSE": "+foo" },
+ "dev-libs/A-1.2": { "EAPI": 2, "IUSE": "+foo" },
+ "dev-libs/A-1.3": { "EAPI": 3, "IUSE": "+foo" },
+ "dev-libs/A-1.4": { "EAPI": "4", "IUSE": "+foo" },
+
+ #EAPI-1: slot deps
+ "dev-libs/A-2.0": { "EAPI": 0, "DEPEND": "dev-libs/B:0" },
+ "dev-libs/A-2.1": { "EAPI": 1, "DEPEND": "dev-libs/B:0" },
+ "dev-libs/A-2.2": { "EAPI": 2, "DEPEND": "dev-libs/B:0" },
+ "dev-libs/A-2.3": { "EAPI": 3, "DEPEND": "dev-libs/B:0" },
+ "dev-libs/A-2.4": { "EAPI": "4", "DEPEND": "dev-libs/B:0" },
+
+ #EAPI-2: use deps
+ "dev-libs/A-3.0": { "EAPI": 0, "DEPEND": "dev-libs/B[foo]" },
+ "dev-libs/A-3.1": { "EAPI": 1, "DEPEND": "dev-libs/B[foo]" },
+ "dev-libs/A-3.2": { "EAPI": 2, "DEPEND": "dev-libs/B[foo]" },
+ "dev-libs/A-3.3": { "EAPI": 3, "DEPEND": "dev-libs/B[foo]" },
+ "dev-libs/A-3.4": { "EAPI": "4", "DEPEND": "dev-libs/B[foo]" },
+
+ #EAPI-2: strong blocks
+ "dev-libs/A-4.0": { "EAPI": 0, "DEPEND": "!!dev-libs/B" },
+ "dev-libs/A-4.1": { "EAPI": 1, "DEPEND": "!!dev-libs/B" },
+ "dev-libs/A-4.2": { "EAPI": 2, "DEPEND": "!!dev-libs/B" },
+ "dev-libs/A-4.3": { "EAPI": 3, "DEPEND": "!!dev-libs/B" },
+ "dev-libs/A-4.4": { "EAPI": "4", "DEPEND": "!!dev-libs/B" },
+
+ #EAPI-4: slot operator deps
+ #~ "dev-libs/A-5.0": { "EAPI": 0, "DEPEND": "dev-libs/B:*" },
+ #~ "dev-libs/A-5.1": { "EAPI": 1, "DEPEND": "dev-libs/B:*" },
+ #~ "dev-libs/A-5.2": { "EAPI": 2, "DEPEND": "dev-libs/B:*" },
+ #~ "dev-libs/A-5.3": { "EAPI": 3, "DEPEND": "dev-libs/B:*" },
+ #~ "dev-libs/A-5.4": { "EAPI": "4", "DEPEND": "dev-libs/B:*" },
+
+ #EAPI-4: use dep defaults
+ "dev-libs/A-6.0": { "EAPI": 0, "DEPEND": "dev-libs/B[bar(+)]" },
+ "dev-libs/A-6.1": { "EAPI": 1, "DEPEND": "dev-libs/B[bar(+)]" },
+ "dev-libs/A-6.2": { "EAPI": 2, "DEPEND": "dev-libs/B[bar(+)]" },
+ "dev-libs/A-6.3": { "EAPI": 3, "DEPEND": "dev-libs/B[bar(+)]" },
+ "dev-libs/A-6.4": { "EAPI": "4", "DEPEND": "dev-libs/B[bar(+)]" },
+
+ #EAPI-4: REQUIRED_USE
+ "dev-libs/A-7.0": { "EAPI": 0, "IUSE": "foo bar", "REQUIRED_USE": "|| ( foo bar )" },
+ "dev-libs/A-7.1": { "EAPI": 1, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" },
+ "dev-libs/A-7.2": { "EAPI": 2, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" },
+ "dev-libs/A-7.3": { "EAPI": 3, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" },
+ "dev-libs/A-7.4": { "EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" },
+
+ "dev-libs/B-1": {"EAPI": 1, "IUSE": "+foo"},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(["=dev-libs/A-1.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-1.1"], success = True, mergelist = ["dev-libs/A-1.1"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-1.2"], success = True, mergelist = ["dev-libs/A-1.2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-1.3"], success = True, mergelist = ["dev-libs/A-1.3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-1.4"], success = True, mergelist = ["dev-libs/A-1.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-2.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2.1"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.1"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2.2"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2.3"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-3.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-3.1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-3.2"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-3.3"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-3.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-4.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-4.1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-4.2"], success = True, mergelist = ["dev-libs/A-4.2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-4.3"], success = True, mergelist = ["dev-libs/A-4.3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-4.4"], success = True, mergelist = ["dev-libs/A-4.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-5.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-5.1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-5.2"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-5.3"], success = False),
+ # not implemented: EAPI-4: slot operator deps
+ #~ ResolverPlaygroundTestCase(["=dev-libs/A-5.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-5.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-6.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-6.1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-6.2"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-6.3"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-6.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-6.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-7.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-7.1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-7.2"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-7.3"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-7.4"], success = True, mergelist = ["dev-libs/A-7.4"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_features_test_use.py b/usr/lib/portage/pym/portage/tests/resolver/test_features_test_use.py
new file mode 100644
index 0000000..bdd179d
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_features_test_use.py
@@ -0,0 +1,68 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class FeaturesTestUse(TestCase):
+
+ def testFeaturesTestUse(self):
+ ebuilds = {
+ "dev-libs/A-1" : {
+ "IUSE": "test"
+ },
+ "dev-libs/B-1" : {
+ "IUSE": "test foo"
+ },
+ }
+
+ installed = {
+ "dev-libs/A-1" : {
+ "USE": "",
+ "IUSE": "test"
+ },
+ "dev-libs/B-1" : {
+ "USE": "foo",
+ "IUSE": "test foo"
+ },
+ }
+
+ user_config = {
+ "make.conf" : ("FEATURES=test", "USE=\"-test -foo\"")
+ }
+
+ test_cases = (
+
+ # USE=test state should not trigger --newuse rebuilds, as
+ # specified in bug #373209, comment #3.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--newuse": True, "--selective": True},
+ success = True,
+ mergelist = []),
+
+ # USE=-test -> USE=test, with USE=test forced by FEATURES=test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {},
+ success = True,
+ mergelist = ["dev-libs/A-1"]),
+
+ # USE=foo -> USE=-foo, with USE=test forced by FEATURES=test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--newuse": True, "--selective": True},
+ success = True,
+ mergelist = ["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, user_config=user_config, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_keywords.py b/usr/lib/portage/pym/portage/tests/resolver/test_keywords.py
new file mode 100644
index 0000000..d59ea58
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_keywords.py
@@ -0,0 +1,356 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class KeywordsTestCase(TestCase):
+
+ def testStableConfig(self):
+ # Only accept stable keywords for a particular ARCH.
+
+ user_config = {
+ 'package.accept_keywords':
+ (
+ '*/* -* x86',
+ ),
+ }
+
+ ebuilds = {
+ 'app-misc/A-1': {'KEYWORDS': 'x86'},
+ 'app-misc/B-1': {'KEYWORDS': '~x86'},
+ 'app-misc/C-1': {'KEYWORDS': '*'},
+ 'app-misc/D-1': {'KEYWORDS': '~*'},
+ 'app-misc/E-1': {'KEYWORDS': 'arm'},
+ 'app-misc/F-1': {'KEYWORDS': '~arm'},
+ 'app-misc/G-1': {'KEYWORDS': ''},
+ }
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/A'],
+ success = True,
+ mergelist = ['app-misc/A-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/B'],
+ success = False,
+ unstable_keywords = ('app-misc/B-1',),
+ mergelist = ['app-misc/B-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/C'],
+ success = True,
+ mergelist = ['app-misc/C-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/D'],
+ success = False,
+ unstable_keywords = ('app-misc/D-1',),
+ mergelist = ['app-misc/D-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/E'],
+ success = False,
+ unstable_keywords = ('app-misc/E-1',),
+ mergelist = ['app-misc/E-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/F'],
+ success = False,
+ unstable_keywords = ('app-misc/F-1',),
+ mergelist = ['app-misc/F-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/G'],
+ success = False,
+ unstable_keywords = ('app-misc/G-1',),
+ mergelist = ['app-misc/G-1']),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testAnyStableConfig(self):
+ # Accept stable keywords for any ARCH.
+
+ user_config = {
+ 'package.accept_keywords':
+ (
+ '*/* -* *',
+ ),
+ }
+
+ ebuilds = {
+ 'app-misc/A-1': {'KEYWORDS': 'x86'},
+ 'app-misc/B-1': {'KEYWORDS': '~x86'},
+ 'app-misc/C-1': {'KEYWORDS': '*'},
+ 'app-misc/D-1': {'KEYWORDS': '~*'},
+ 'app-misc/E-1': {'KEYWORDS': 'arm'},
+ 'app-misc/F-1': {'KEYWORDS': '~arm'},
+ 'app-misc/G-1': {'KEYWORDS': ''},
+ }
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/A'],
+ success = True,
+ mergelist = ['app-misc/A-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/B'],
+ success = False,
+ unstable_keywords = ('app-misc/B-1',),
+ mergelist = ['app-misc/B-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/C'],
+ success = True,
+ mergelist = ['app-misc/C-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/D'],
+ success = False,
+ unstable_keywords = ('app-misc/D-1',),
+ mergelist = ['app-misc/D-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/E'],
+ success = True,
+ mergelist = ['app-misc/E-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/F'],
+ success = False,
+ unstable_keywords = ('app-misc/F-1',),
+ mergelist = ['app-misc/F-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/G'],
+ success = False,
+ unstable_keywords = ('app-misc/G-1',),
+ mergelist = ['app-misc/G-1']),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testUnstableConfig(self):
+ # Accept stable and unstable keywords for a particular ARCH.
+
+ user_config = {
+ 'package.accept_keywords':
+ (
+ '*/* -* x86 ~x86',
+ ),
+ }
+
+ ebuilds = {
+ 'app-misc/A-1': {'KEYWORDS': 'x86'},
+ 'app-misc/B-1': {'KEYWORDS': '~x86'},
+ 'app-misc/C-1': {'KEYWORDS': '*'},
+ 'app-misc/D-1': {'KEYWORDS': '~*'},
+ 'app-misc/E-1': {'KEYWORDS': 'arm'},
+ 'app-misc/F-1': {'KEYWORDS': '~arm'},
+ 'app-misc/G-1': {'KEYWORDS': ''},
+ }
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/A'],
+ success = True,
+ mergelist = ['app-misc/A-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/B'],
+ success = True,
+ mergelist = ['app-misc/B-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/C'],
+ success = True,
+ mergelist = ['app-misc/C-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/D'],
+ success = True,
+ mergelist = ['app-misc/D-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/E'],
+ success = False,
+ unstable_keywords = ('app-misc/E-1',),
+ mergelist = ['app-misc/E-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/F'],
+ success = False,
+ unstable_keywords = ('app-misc/F-1',),
+ mergelist = ['app-misc/F-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/G'],
+ success = False,
+ unstable_keywords = ('app-misc/G-1',),
+ mergelist = ['app-misc/G-1']),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testAnyUnstableConfig(self):
+ # Accept unstable keywords for any ARCH.
+
+ user_config = {
+ 'package.accept_keywords':
+ (
+ '*/* -* * ~*',
+ ),
+ }
+
+ ebuilds = {
+ 'app-misc/A-1': {'KEYWORDS': 'x86'},
+ 'app-misc/B-1': {'KEYWORDS': '~x86'},
+ 'app-misc/C-1': {'KEYWORDS': '*'},
+ 'app-misc/D-1': {'KEYWORDS': '~*'},
+ 'app-misc/E-1': {'KEYWORDS': 'arm'},
+ 'app-misc/F-1': {'KEYWORDS': '~arm'},
+ 'app-misc/G-1': {'KEYWORDS': ''},
+ }
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/A'],
+ success = True,
+ mergelist = ['app-misc/A-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/B'],
+ success = True,
+ mergelist = ['app-misc/B-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/C'],
+ success = True,
+ mergelist = ['app-misc/C-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/D'],
+ success = True,
+ mergelist = ['app-misc/D-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/E'],
+ success = True,
+ mergelist = ['app-misc/E-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/F'],
+ success = True,
+ mergelist = ['app-misc/F-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/G'],
+ success = False,
+ unstable_keywords = ('app-misc/G-1',),
+ mergelist = ['app-misc/G-1']),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testIgnoreKeywordsConfig(self):
+ # Ignore keywords entirely (accept **)
+
+ user_config = {
+ 'package.accept_keywords':
+ (
+ '*/* -* **',
+ ),
+ }
+
+ ebuilds = {
+ 'app-misc/A-1': {'KEYWORDS': 'x86'},
+ 'app-misc/B-1': {'KEYWORDS': '~x86'},
+ 'app-misc/C-1': {'KEYWORDS': '*'},
+ 'app-misc/D-1': {'KEYWORDS': '~*'},
+ 'app-misc/E-1': {'KEYWORDS': 'arm'},
+ 'app-misc/F-1': {'KEYWORDS': '~arm'},
+ 'app-misc/G-1': {'KEYWORDS': ''},
+ }
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/A'],
+ success = True,
+ mergelist = ['app-misc/A-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/B'],
+ success = True,
+ mergelist = ['app-misc/B-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/C'],
+ success = True,
+ mergelist = ['app-misc/C-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/D'],
+ success = True,
+ mergelist = ['app-misc/D-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/E'],
+ success = True,
+ mergelist = ['app-misc/E-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/F'],
+ success = True,
+ mergelist = ['app-misc/F-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/G'],
+ success = True,
+ mergelist = ['app-misc/G-1']),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_merge_order.py b/usr/lib/portage/pym/portage/tests/resolver/test_merge_order.py
new file mode 100644
index 0000000..5d000d1
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_merge_order.py
@@ -0,0 +1,478 @@
+# Copyright 2011-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class MergeOrderTestCase(TestCase):
+
+ def testMergeOrder(self):
+ ebuilds = {
+ "app-misc/blocker-buildtime-a-1" : {},
+ "app-misc/blocker-buildtime-unbuilt-a-1" : {
+ "DEPEND" : "!app-misc/installed-blocker-a",
+ },
+ "app-misc/blocker-buildtime-unbuilt-hard-a-1" : {
+ "EAPI" : "2",
+ "DEPEND" : "!!app-misc/installed-blocker-a",
+ },
+ "app-misc/blocker-update-order-a-1" : {},
+ "app-misc/blocker-update-order-hard-a-1" : {},
+ "app-misc/blocker-update-order-hard-unsolvable-a-1" : {},
+ "app-misc/blocker-runtime-a-1" : {},
+ "app-misc/blocker-runtime-b-1" : {},
+ "app-misc/blocker-runtime-hard-a-1" : {},
+ "app-misc/circ-buildtime-a-0": {},
+ "app-misc/circ-buildtime-a-1": {
+ "RDEPEND": "app-misc/circ-buildtime-b",
+ },
+ "app-misc/circ-buildtime-b-1": {
+ "RDEPEND": "app-misc/circ-buildtime-c",
+ },
+ "app-misc/circ-buildtime-c-1": {
+ "DEPEND": "app-misc/circ-buildtime-a",
+ },
+ "app-misc/circ-buildtime-unsolvable-a-1": {
+ "RDEPEND": "app-misc/circ-buildtime-unsolvable-b",
+ },
+ "app-misc/circ-buildtime-unsolvable-b-1": {
+ "RDEPEND": "app-misc/circ-buildtime-unsolvable-c",
+ },
+ "app-misc/circ-buildtime-unsolvable-c-1": {
+ "DEPEND": "app-misc/circ-buildtime-unsolvable-a",
+ },
+ "app-misc/circ-post-runtime-a-1": {
+ "PDEPEND": "app-misc/circ-post-runtime-b",
+ },
+ "app-misc/circ-post-runtime-b-1": {
+ "RDEPEND": "app-misc/circ-post-runtime-c",
+ },
+ "app-misc/circ-post-runtime-c-1": {
+ "RDEPEND": "app-misc/circ-post-runtime-a",
+ },
+ "app-misc/circ-runtime-a-1": {
+ "RDEPEND": "app-misc/circ-runtime-b",
+ },
+ "app-misc/circ-runtime-b-1": {
+ "RDEPEND": "app-misc/circ-runtime-c",
+ },
+ "app-misc/circ-runtime-c-1": {
+ "RDEPEND": "app-misc/circ-runtime-a",
+ },
+ "app-misc/circ-satisfied-a-0": {
+ "RDEPEND": "app-misc/circ-satisfied-b",
+ },
+ "app-misc/circ-satisfied-a-1": {
+ "RDEPEND": "app-misc/circ-satisfied-b",
+ },
+ "app-misc/circ-satisfied-b-0": {
+ "RDEPEND": "app-misc/circ-satisfied-c",
+ },
+ "app-misc/circ-satisfied-b-1": {
+ "RDEPEND": "app-misc/circ-satisfied-c",
+ },
+ "app-misc/circ-satisfied-c-0": {
+ "DEPEND": "app-misc/circ-satisfied-a",
+ "RDEPEND": "app-misc/circ-satisfied-a",
+ },
+ "app-misc/circ-satisfied-c-1": {
+ "DEPEND": "app-misc/circ-satisfied-a",
+ "RDEPEND": "app-misc/circ-satisfied-a",
+ },
+ "app-misc/circ-smallest-a-1": {
+ "RDEPEND": "app-misc/circ-smallest-b",
+ },
+ "app-misc/circ-smallest-b-1": {
+ "RDEPEND": "app-misc/circ-smallest-a",
+ },
+ "app-misc/circ-smallest-c-1": {
+ "RDEPEND": "app-misc/circ-smallest-d",
+ },
+ "app-misc/circ-smallest-d-1": {
+ "RDEPEND": "app-misc/circ-smallest-e",
+ },
+ "app-misc/circ-smallest-e-1": {
+ "RDEPEND": "app-misc/circ-smallest-c",
+ },
+ "app-misc/circ-smallest-f-1": {
+ "RDEPEND": "app-misc/circ-smallest-g app-misc/circ-smallest-a app-misc/circ-smallest-c",
+ },
+ "app-misc/circ-smallest-g-1": {
+ "RDEPEND": "app-misc/circ-smallest-f",
+ },
+ "app-misc/installed-blocker-a-1" : {
+ "EAPI" : "2",
+ "DEPEND" : "!app-misc/blocker-buildtime-a",
+ "RDEPEND" : "!app-misc/blocker-runtime-a !app-misc/blocker-runtime-b !!app-misc/blocker-runtime-hard-a",
+ },
+ "app-misc/installed-old-version-blocks-a-1" : {
+ "RDEPEND" : "!app-misc/blocker-update-order-a",
+ },
+ "app-misc/installed-old-version-blocks-a-2" : {},
+ "app-misc/installed-old-version-blocks-hard-a-1" : {
+ "EAPI" : "2",
+ "RDEPEND" : "!!app-misc/blocker-update-order-hard-a",
+ },
+ "app-misc/installed-old-version-blocks-hard-a-2" : {},
+ "app-misc/installed-old-version-blocks-hard-unsolvable-a-1" : {
+ "EAPI" : "2",
+ "RDEPEND" : "!!app-misc/blocker-update-order-hard-unsolvable-a",
+ },
+ "app-misc/installed-old-version-blocks-hard-unsolvable-a-2" : {
+ "DEPEND" : "app-misc/blocker-update-order-hard-unsolvable-a",
+ "RDEPEND" : "",
+ },
+ "app-misc/some-app-a-1": {
+ "RDEPEND": "app-misc/circ-runtime-a app-misc/circ-runtime-b",
+ },
+ "app-misc/some-app-b-1": {
+ "RDEPEND": "app-misc/circ-post-runtime-a app-misc/circ-post-runtime-b",
+ },
+ "app-misc/some-app-c-1": {
+ "RDEPEND": "app-misc/circ-buildtime-a app-misc/circ-buildtime-b",
+ },
+ "app-admin/eselect-python-20100321" : {},
+ "sys-apps/portage-2.1.9.42" : {
+ "DEPEND" : "dev-lang/python",
+ "RDEPEND" : "dev-lang/python",
+ },
+ "sys-apps/portage-2.1.9.49" : {
+ "DEPEND" : "dev-lang/python >=app-admin/eselect-python-20091230",
+ "RDEPEND" : "dev-lang/python",
+ },
+ "dev-lang/python-3.1" : {},
+ "dev-lang/python-3.2" : {},
+ "virtual/libc-0" : {
+ "RDEPEND" : "sys-libs/glibc",
+ },
+ "sys-devel/gcc-4.5.2" : {},
+ "sys-devel/binutils-2.18" : {},
+ "sys-devel/binutils-2.20.1" : {},
+ "sys-libs/glibc-2.11" : {
+ "DEPEND" : "virtual/os-headers sys-devel/gcc sys-devel/binutils",
+ "RDEPEND": "",
+ },
+ "sys-libs/glibc-2.13" : {
+ "DEPEND" : "virtual/os-headers sys-devel/gcc sys-devel/binutils",
+ "RDEPEND": "",
+ },
+ "virtual/os-headers-0" : {
+ "RDEPEND" : "sys-kernel/linux-headers",
+ },
+ "sys-kernel/linux-headers-2.6.38": {
+ "DEPEND" : "app-arch/xz-utils",
+ "RDEPEND": "",
+ },
+ "sys-kernel/linux-headers-2.6.39": {
+ "DEPEND" : "app-arch/xz-utils",
+ "RDEPEND": "",
+ },
+ "app-arch/xz-utils-5.0.1" : {},
+ "app-arch/xz-utils-5.0.2" : {},
+ "dev-util/pkgconfig-0.25-r2" : {},
+ "kde-base/kdelibs-3.5.7" : {
+ "PDEPEND" : "kde-misc/kdnssd-avahi",
+ },
+ "kde-misc/kdnssd-avahi-0.1.2" : {
+ "DEPEND" : "kde-base/kdelibs app-arch/xz-utils dev-util/pkgconfig",
+ "RDEPEND" : "kde-base/kdelibs",
+ },
+ "kde-base/kdnssd-3.5.7" : {
+ "DEPEND" : "kde-base/kdelibs",
+ "RDEPEND" : "kde-base/kdelibs",
+ },
+ "kde-base/libkdegames-3.5.7" : {
+ "DEPEND" : "kde-base/kdelibs",
+ "RDEPEND" : "kde-base/kdelibs",
+ },
+ "kde-base/kmines-3.5.7" : {
+ "DEPEND" : "kde-base/libkdegames",
+ "RDEPEND" : "kde-base/libkdegames",
+ },
+ "media-libs/mesa-9.1.3" : {
+ "EAPI" : "5",
+ "IUSE" : "+xorg",
+ "DEPEND" : "xorg? ( x11-base/xorg-server:= )",
+ "RDEPEND" : "xorg? ( x11-base/xorg-server:= )",
+ },
+ "media-video/libav-0.7_pre20110327" : {
+ "EAPI" : "2",
+ "IUSE" : "X +encode",
+ "RDEPEND" : "!media-video/ffmpeg",
+ },
+ "media-video/ffmpeg-0.7_rc1" : {
+ "EAPI" : "2",
+ "IUSE" : "X +encode",
+ },
+ "virtual/ffmpeg-0.6.90" : {
+ "EAPI" : "2",
+ "IUSE" : "X +encode",
+ "RDEPEND" : "|| ( >=media-video/ffmpeg-0.6.90_rc0-r2[X=,encode=] >=media-video/libav-0.6.90_rc[X=,encode=] )",
+ },
+ "x11-base/xorg-server-1.14.1" : {
+ "EAPI" : "5",
+ "SLOT": "0/1.14.1",
+ "DEPEND" : "media-libs/mesa",
+ "RDEPEND" : "media-libs/mesa",
+ },
+ }
+
+ installed = {
+ "app-misc/circ-buildtime-a-0": {},
+ "app-misc/circ-satisfied-a-0": {
+ "RDEPEND": "app-misc/circ-satisfied-b",
+ },
+ "app-misc/circ-satisfied-b-0": {
+ "RDEPEND": "app-misc/circ-satisfied-c",
+ },
+ "app-misc/circ-satisfied-c-0": {
+ "DEPEND": "app-misc/circ-satisfied-a",
+ "RDEPEND": "app-misc/circ-satisfied-a",
+ },
+ "app-misc/installed-blocker-a-1" : {
+ "EAPI" : "2",
+ "DEPEND" : "!app-misc/blocker-buildtime-a",
+ "RDEPEND" : "!app-misc/blocker-runtime-a !app-misc/blocker-runtime-b !!app-misc/blocker-runtime-hard-a",
+ },
+ "app-misc/installed-old-version-blocks-a-1" : {
+ "RDEPEND" : "!app-misc/blocker-update-order-a",
+ },
+ "app-misc/installed-old-version-blocks-hard-a-1" : {
+ "EAPI" : "2",
+ "RDEPEND" : "!!app-misc/blocker-update-order-hard-a",
+ },
+ "app-misc/installed-old-version-blocks-hard-unsolvable-a-1" : {
+ "EAPI" : "2",
+ "RDEPEND" : "!!app-misc/blocker-update-order-hard-unsolvable-a",
+ },
+ "sys-apps/portage-2.1.9.42" : {
+ "DEPEND" : "dev-lang/python",
+ "RDEPEND" : "dev-lang/python",
+ },
+ "dev-lang/python-3.1" : {},
+ "virtual/libc-0" : {
+ "RDEPEND" : "sys-libs/glibc",
+ },
+ "sys-devel/binutils-2.18" : {},
+ "sys-libs/glibc-2.11" : {
+ "DEPEND" : "virtual/os-headers sys-devel/gcc sys-devel/binutils",
+ "RDEPEND": "",
+ },
+ "virtual/os-headers-0" : {
+ "RDEPEND" : "sys-kernel/linux-headers",
+ },
+ "sys-kernel/linux-headers-2.6.38": {
+ "DEPEND" : "app-arch/xz-utils",
+ "RDEPEND": "",
+ },
+ "app-arch/xz-utils-5.0.1" : {},
+ "media-libs/mesa-9.1.3" : {
+ "EAPI" : "5",
+ "IUSE" : "+xorg",
+ "USE": "xorg",
+ "DEPEND" : "x11-base/xorg-server:0/1.14.1=",
+ "RDEPEND" : "x11-base/xorg-server:0/1.14.1=",
+ },
+ "media-video/ffmpeg-0.7_rc1" : {
+ "EAPI" : "2",
+ "IUSE" : "X +encode",
+ "USE" : "encode",
+ },
+ "virtual/ffmpeg-0.6.90" : {
+ "EAPI" : "2",
+ "IUSE" : "X +encode",
+ "USE" : "encode",
+ "RDEPEND" : "|| ( >=media-video/ffmpeg-0.6.90_rc0-r2[X=,encode=] >=media-video/libav-0.6.90_rc[X=,encode=] )",
+ },
+ "x11-base/xorg-server-1.14.1" : {
+ "EAPI" : "5",
+ "SLOT": "0/1.14.1",
+ "DEPEND" : "media-libs/mesa",
+ "RDEPEND" : "media-libs/mesa",
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["app-misc/some-app-a"],
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [("app-misc/circ-runtime-a-1", "app-misc/circ-runtime-b-1", "app-misc/circ-runtime-c-1"), "app-misc/some-app-a-1"]),
+ ResolverPlaygroundTestCase(
+ ["app-misc/some-app-a"],
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [("app-misc/circ-runtime-c-1", "app-misc/circ-runtime-b-1", "app-misc/circ-runtime-a-1"), "app-misc/some-app-a-1"]),
+ # Test unsolvable circular dep that is RDEPEND in one
+ # direction and DEPEND in the other.
+ ResolverPlaygroundTestCase(
+ ["app-misc/circ-buildtime-unsolvable-a"],
+ success = False,
+ circular_dependency_solutions = {}),
+ # Test optimal merge order for a circular dep that is
+ # RDEPEND in one direction and DEPEND in the other.
+ # This requires an installed instance of the DEPEND
+ # package in order to be solvable.
+ ResolverPlaygroundTestCase(
+ ["app-misc/some-app-c", "app-misc/circ-buildtime-a"],
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [("app-misc/circ-buildtime-b-1", "app-misc/circ-buildtime-c-1"), "app-misc/circ-buildtime-a-1", "app-misc/some-app-c-1"]),
+ # Test optimal merge order for a circular dep that is
+ # RDEPEND in one direction and PDEPEND in the other.
+ ResolverPlaygroundTestCase(
+ ["app-misc/some-app-b"],
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = ["app-misc/circ-post-runtime-a-1", ("app-misc/circ-post-runtime-b-1", "app-misc/circ-post-runtime-c-1"), "app-misc/some-app-b-1"]),
+ # Test optimal merge order for a circular dep that is
+ # RDEPEND in one direction and DEPEND in the other,
+ # with all dependencies initially satisfied. Optimally,
+ # the DEPEND/buildtime dep should be updated before the
+ # package that depends on it, even though it's feasible
+ # to update it later since it is already satisfied.
+ ResolverPlaygroundTestCase(
+ ["app-misc/circ-satisfied-a", "app-misc/circ-satisfied-b", "app-misc/circ-satisfied-c"],
+ success = True,
+ all_permutations = True,
+ ambiguous_merge_order = True,
+ merge_order_assertions = (("app-misc/circ-satisfied-a-1", "app-misc/circ-satisfied-c-1"),),
+ mergelist = [("app-misc/circ-satisfied-a-1", "app-misc/circ-satisfied-b-1", "app-misc/circ-satisfied-c-1")]),
+ # In the case of multiple runtime cycles, where some cycles
+ # may depend on smaller independent cycles, it's optimal
+ # to merge smaller independent cycles before other cycles
+ # that depend on them.
+ ResolverPlaygroundTestCase(
+ ["app-misc/circ-smallest-a", "app-misc/circ-smallest-c", "app-misc/circ-smallest-f"],
+ success = True,
+ ambiguous_merge_order = True,
+ all_permutations = True,
+ mergelist = [('app-misc/circ-smallest-a-1', 'app-misc/circ-smallest-b-1'),
+ ('app-misc/circ-smallest-c-1', 'app-misc/circ-smallest-d-1', 'app-misc/circ-smallest-e-1'),
+ ('app-misc/circ-smallest-f-1', 'app-misc/circ-smallest-g-1')]),
+ # installed package has buildtime-only blocker
+ # that should be ignored
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-buildtime-a"],
+ success = True,
+ mergelist = ["app-misc/blocker-buildtime-a-1"]),
+ # We're installing a package that an old version of
+ # an installed package blocks. However, an update is
+ # available to the old package. The old package should
+ # be updated first, in order to solve the blocker without
+ # any need for blocking packages to temporarily overlap.
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-update-order-a", "app-misc/installed-old-version-blocks-a"],
+ success = True,
+ all_permutations = True,
+ mergelist = ["app-misc/installed-old-version-blocks-a-2", "app-misc/blocker-update-order-a-1"]),
+ # This is the same as above but with a hard blocker. The hard
+ # blocker is solved automatically since the update makes it
+ # irrelevant.
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-update-order-hard-a", "app-misc/installed-old-version-blocks-hard-a"],
+ success = True,
+ all_permutations = True,
+ mergelist = ["app-misc/installed-old-version-blocks-hard-a-2", "app-misc/blocker-update-order-hard-a-1"]),
+ # This is similar to the above case except that it's unsolvable
+ # due to merge order, unless bug 250286 is implemented so that
+ # the installed blocker will be unmerged before installation
+ # of the package it blocks (rather than after like a soft blocker
+ # would be handled). The "unmerge before" behavior requested
+ # in bug 250286 must be optional since essential programs or
+ # libraries may be temporarily unavailable during a
+ # non-overlapping update like this.
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-update-order-hard-unsolvable-a", "app-misc/installed-old-version-blocks-hard-unsolvable-a"],
+ success = False,
+ all_permutations = True,
+ ambiguous_merge_order = True,
+ merge_order_assertions = (('app-misc/blocker-update-order-hard-unsolvable-a-1', 'app-misc/installed-old-version-blocks-hard-unsolvable-a-2'),),
+ mergelist = [('app-misc/blocker-update-order-hard-unsolvable-a-1', 'app-misc/installed-old-version-blocks-hard-unsolvable-a-2', '!!app-misc/blocker-update-order-hard-unsolvable-a')]),
+ # The installed package has runtime blockers that
+ # should cause it to be uninstalled. The uninstall
+ # task is executed only after blocking packages have
+ # been merged.
+ # TODO: distinguish between install/uninstall tasks in mergelist
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-runtime-a", "app-misc/blocker-runtime-b"],
+ success = True,
+ all_permutations = True,
+ ambiguous_merge_order = True,
+ mergelist = [("app-misc/blocker-runtime-a-1", "app-misc/blocker-runtime-b-1"), "[uninstall]app-misc/installed-blocker-a-1", ("!app-misc/blocker-runtime-a", "!app-misc/blocker-runtime-b")]),
+ # We have a soft buildtime blocker against an installed
+ # package that should cause it to be uninstalled. Note that with
+ # soft blockers, the blocking packages are allowed to temporarily
+ # overlap. This allows any essential programs/libraries provided
+ # by both packages to be available at all times.
+ # TODO: distinguish between install/uninstall tasks in mergelist
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-buildtime-unbuilt-a"],
+ success = True,
+ mergelist = ["app-misc/blocker-buildtime-unbuilt-a-1", "[uninstall]app-misc/installed-blocker-a-1", "!app-misc/installed-blocker-a"]),
+ # We have a hard buildtime blocker against an installed
+ # package that will not resolve automatically (unless
+ # the option requested in bug 250286 is implemented).
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-buildtime-unbuilt-hard-a"],
+ success = False,
+ mergelist = ['app-misc/blocker-buildtime-unbuilt-hard-a-1', '!!app-misc/installed-blocker-a']),
+ # An installed package has a hard runtime blocker that
+ # will not resolve automatically (unless the option
+ # requested in bug 250286 is implemented).
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-runtime-hard-a"],
+ success = False,
+ mergelist = ['app-misc/blocker-runtime-hard-a-1', '!!app-misc/blocker-runtime-hard-a']),
+ # Test swapping of providers for a new-style virtual package,
+ # which relies on delayed evaluation of disjunctive (virtual
+ # and ||) deps as required to solve bug #264434. Note that
+ # this behavior is not supported for old-style PROVIDE virtuals,
+ # as reported in bug #339164.
+ ResolverPlaygroundTestCase(
+ ["media-video/libav"],
+ success=True,
+ mergelist = ['media-video/libav-0.7_pre20110327', '[uninstall]media-video/ffmpeg-0.7_rc1', '!media-video/ffmpeg']),
+ # Test that OS_HEADERS_PACKAGE_ATOM and LIBC_PACKAGE_ATOM
+ # are merged asap, in order to account for implicit
+ # dependencies. See bug #303567. Optimally, satisfied deps
+ # are always merged after the asap nodes that depend on them.
+ ResolverPlaygroundTestCase(
+ ["app-arch/xz-utils", "sys-kernel/linux-headers", "sys-devel/binutils", "sys-libs/glibc"],
+ options = {"--complete-graph" : True},
+ success = True,
+ all_permutations = True,
+ ambiguous_merge_order = True,
+ mergelist = ['sys-kernel/linux-headers-2.6.39', 'sys-devel/gcc-4.5.2', 'sys-libs/glibc-2.13', ('app-arch/xz-utils-5.0.2', 'sys-devel/binutils-2.20.1')]),
+ # Test asap install of PDEPEND for bug #180045.
+ ResolverPlaygroundTestCase(
+ ["kde-base/kmines", "kde-base/kdnssd", "kde-base/kdelibs", "app-arch/xz-utils"],
+ success = True,
+ all_permutations = True,
+ ambiguous_merge_order = True,
+ merge_order_assertions = (
+ ('dev-util/pkgconfig-0.25-r2', 'kde-misc/kdnssd-avahi-0.1.2'),
+ ('kde-misc/kdnssd-avahi-0.1.2', 'kde-base/libkdegames-3.5.7'),
+ ('kde-misc/kdnssd-avahi-0.1.2', 'kde-base/kdnssd-3.5.7'),
+ ('kde-base/libkdegames-3.5.7', 'kde-base/kmines-3.5.7'),
+ ),
+ mergelist = [('kde-base/kdelibs-3.5.7', 'dev-util/pkgconfig-0.25-r2', 'kde-misc/kdnssd-avahi-0.1.2', 'app-arch/xz-utils-5.0.2', 'kde-base/libkdegames-3.5.7', 'kde-base/kdnssd-3.5.7', 'kde-base/kmines-3.5.7')]),
+ # Test satisfied circular DEPEND/RDEPEND with one := operator.
+ # Both deps are already satisfied by installed packages, but
+ # the := dep is given higher priority in merge order.
+ ResolverPlaygroundTestCase(
+ ["media-libs/mesa", "x11-base/xorg-server"],
+ success=True,
+ all_permutations = True,
+ mergelist = ['x11-base/xorg-server-1.14.1', 'media-libs/mesa-9.1.3']),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py b/usr/lib/portage/pym/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py
new file mode 100644
index 0000000..a860e7b
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py
@@ -0,0 +1,31 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class MissingIUSEandEvaluatedAtomsTestCase(TestCase):
+
+ def testMissingIUSEandEvaluatedAtoms(self):
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/B[foo?]", "IUSE": "foo bar", "EAPI": 2 },
+ "dev-libs/A-2": { "DEPEND": "dev-libs/B[foo?,bar]", "IUSE": "foo bar", "EAPI": 2 },
+ "dev-libs/B-1": { "IUSE": "bar" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-2"],
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_multirepo.py b/usr/lib/portage/pym/portage/tests/resolver/test_multirepo.py
new file mode 100644
index 0000000..2b1a6d0
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_multirepo.py
@@ -0,0 +1,398 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class MultirepoTestCase(TestCase):
+
+ def testMultirepo(self):
+ ebuilds = {
+ #Simple repo selection
+ "dev-libs/A-1": { },
+ "dev-libs/A-1::repo1": { },
+ "dev-libs/A-2::repo1": { },
+ "dev-libs/A-1::repo2": { },
+
+ #Packages in exactly one repo
+ "dev-libs/B-1": { },
+ "dev-libs/C-1::repo1": { },
+
+ #Package in repository 1 and 2, but 1 must be used
+ "dev-libs/D-1::repo1": { },
+ "dev-libs/D-1::repo2": { },
+
+ "dev-libs/E-1": { },
+ "dev-libs/E-1::repo1": { },
+ "dev-libs/E-1::repo2": { "SLOT": "1" },
+
+ "dev-libs/F-1::repo1": { "SLOT": "1" },
+ "dev-libs/F-1::repo2": { "SLOT": "1" },
+
+ "dev-libs/G-1::repo1": { "EAPI" : "4", "IUSE":"+x +y", "REQUIRED_USE" : "" },
+ "dev-libs/G-1::repo2": { "EAPI" : "4", "IUSE":"+x +y", "REQUIRED_USE" : "^^ ( x y )" },
+
+ "dev-libs/H-1": { "KEYWORDS": "x86", "EAPI" : "3",
+ "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )" },
+
+ "dev-libs/I-1::repo2": { "SLOT" : "1"},
+ "dev-libs/I-2::repo2": { "SLOT" : "2"},
+
+ "dev-libs/K-1::repo2": { },
+ }
+
+ installed = {
+ "dev-libs/H-1": { "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )"},
+ "dev-libs/I-2::repo1": {"SLOT" : "2"},
+ "dev-libs/K-1::repo1": { },
+ }
+
+ binpkgs = {
+ "dev-libs/C-1::repo2": { },
+ "dev-libs/I-2::repo1": {"SLOT" : "2"},
+ "dev-libs/K-1::repo2": { },
+ }
+
+ sets = {
+ "multirepotest":
+ ("dev-libs/A::test_repo",)
+ }
+
+ test_cases = (
+ #Simple repo selection
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-2::repo1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A::test_repo"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A::repo2"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1::repo1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-1::repo1"]),
+ ResolverPlaygroundTestCase(
+ ["@multirepotest"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-1"]),
+
+ #Packages in exactly one repo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/C-1::repo1"]),
+
+ #Package in repository 1 and 2, but 2 must be used
+ ResolverPlaygroundTestCase(
+ ["dev-libs/D"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/D-1::repo2"]),
+
+ #--usepkg: don't reinstall on new repo without --newrepo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--usepkg": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["[binary]dev-libs/C-1::repo2"]),
+
+ #--usepkgonly: don't reinstall on new repo without --newrepo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--usepkgonly": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["[binary]dev-libs/C-1::repo2"]),
+
+ #--newrepo: pick ebuild if binpkg/ebuild have different repo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--usepkg": True, "--newrepo": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/C-1::repo1"]),
+
+ #--newrepo --usepkgonly: ebuild is ignored
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--usepkgonly": True, "--newrepo": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["[binary]dev-libs/C-1::repo2"]),
+
+ #--newrepo: pick ebuild if binpkg/ebuild have different repo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/I"],
+ options = {"--usepkg": True, "--newrepo": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/I-2::repo2"]),
+
+ #--newrepo --usepkgonly: if binpkg matches installed, do nothing
+ ResolverPlaygroundTestCase(
+ ["dev-libs/I"],
+ options = {"--usepkgonly": True, "--newrepo": True, "--selective": True},
+ success = True,
+ mergelist = []),
+
+ #--newrepo --usepkgonly: reinstall if binpkg has new repo.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/K"],
+ options = {"--usepkgonly": True, "--newrepo": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["[binary]dev-libs/K-1::repo2"]),
+
+ #--usepkgonly: don't reinstall on new repo without --newrepo.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/K"],
+ options = {"--usepkgonly": True, "--selective": True},
+ success = True,
+ mergelist = []),
+
+ #Atoms with slots
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/E-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E:1::repo2"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/E-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E:1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/E-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/F:1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/F-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/F-1:1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/F-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/F-1:1::repo1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/F-1::repo1"]),
+
+ # Dependency on installed dev-libs/C-2 ebuild for which ebuild is
+ # not available from the same repo should not unnecessarily
+ # reinstall the same version from a different repo.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/H"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = []),
+
+ # Dependency on installed dev-libs/I-2 ebuild should trigger reinstall
+ # when --newrepo flag is used.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/H"],
+ options = {"--update": True, "--deep": True, "--newrepo": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/I-2::repo2"]),
+
+ # Check interaction between repo priority and unsatisfied
+ # REQUIRED_USE, for bug #350254.
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/G-1"],
+ check_repo_names = True,
+ success = False),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ binpkgs=binpkgs, installed=installed, sets=sets)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testMultirepoUserConfig(self):
+ ebuilds = {
+ #package.use test
+ "dev-libs/A-1": { "IUSE": "foo" },
+ "dev-libs/A-2::repo1": { "IUSE": "foo" },
+ "dev-libs/A-3::repo2": { },
+ "dev-libs/B-1": { "DEPEND": "dev-libs/A", "EAPI": 2 },
+ "dev-libs/B-2": { "DEPEND": "dev-libs/A[foo]", "EAPI": 2 },
+ "dev-libs/B-3": { "DEPEND": "dev-libs/A[-foo]", "EAPI": 2 },
+
+ #package.keywords test
+ "dev-libs/C-1": { "KEYWORDS": "~x86" },
+ "dev-libs/C-1::repo1": { "KEYWORDS": "~x86" },
+
+ #package.license
+ "dev-libs/D-1": { "LICENSE": "TEST" },
+ "dev-libs/D-1::repo1": { "LICENSE": "TEST" },
+
+ #package.mask
+ "dev-libs/E-1": { },
+ "dev-libs/E-1::repo1": { },
+ "dev-libs/H-1": { },
+ "dev-libs/H-1::repo1": { },
+ "dev-libs/I-1::repo2": { "SLOT" : "1"},
+ "dev-libs/I-2::repo2": { "SLOT" : "2"},
+ "dev-libs/J-1": { "KEYWORDS": "x86", "EAPI" : "3",
+ "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )" },
+
+ #package.properties
+ "dev-libs/F-1": { "PROPERTIES": "bar"},
+ "dev-libs/F-1::repo1": { "PROPERTIES": "bar"},
+
+ #package.unmask
+ "dev-libs/G-1": { },
+ "dev-libs/G-1::repo1": { },
+
+ #package.mask with wildcards
+ "dev-libs/Z-1::repo3": { },
+ }
+
+ installed = {
+ "dev-libs/J-1": { "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )"},
+ "dev-libs/I-2::repo1": {"SLOT" : "2"},
+ }
+
+ user_config = {
+ "package.use":
+ (
+ "dev-libs/A::repo1 foo",
+ ),
+ "package.keywords":
+ (
+ "=dev-libs/C-1::test_repo",
+ ),
+ "package.license":
+ (
+ "=dev-libs/D-1::test_repo TEST",
+ ),
+ "package.mask":
+ (
+ "dev-libs/E::repo1",
+ "dev-libs/H",
+ "dev-libs/I::repo1",
+ #needed for package.unmask test
+ "dev-libs/G",
+ #wildcard test
+ "*/*::repo3",
+ ),
+ "package.properties":
+ (
+ "dev-libs/F::repo1 -bar",
+ ),
+ "package.unmask":
+ (
+ "dev-libs/G::test_repo",
+ ),
+ }
+
+ test_cases = (
+ #package.use test
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/B-1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-3::repo2", "dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/B-2"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-2::repo1", "dev-libs/B-2"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/B-3"],
+ options = { "--autounmask": 'n' },
+ success = False,
+ check_repo_names = True),
+
+ #package.keywords test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/C-1"]),
+
+ #package.license test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/D"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/D-1"]),
+
+ #package.mask test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/E-1"]),
+
+ # Dependency on installed dev-libs/C-2 ebuild for which ebuild is
+ # masked from the same repo should not unnecessarily pull
+ # in a different slot. It should just pull in the same slot from
+ # a different repo (bug #351828).
+ ResolverPlaygroundTestCase(
+ ["dev-libs/J"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["dev-libs/I-2"]),
+
+ #package.properties test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/F"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/F-1"]),
+
+ #package.mask test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/G"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/G-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/H"],
+ options = { "--autounmask": 'n' },
+ success = False),
+
+ #package.mask with wildcards
+ ResolverPlaygroundTestCase(
+ ["dev-libs/Z"],
+ options = { "--autounmask": 'n' },
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_multislot.py b/usr/lib/portage/pym/portage/tests/resolver/test_multislot.py
new file mode 100644
index 0000000..cbb1bee
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_multislot.py
@@ -0,0 +1,54 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class MultSlotTestCase(TestCase):
+
+ def testMultiSlotSelective(self):
+ """
+ Test that a package isn't reinstalled due to SLOT dependency
+ interaction with USE=multislot (bug #220341).
+ """
+
+ ebuilds = {
+ "sys-devel/gcc-4.4.4": { "SLOT": "4.4" },
+ "dev-util/nvidia-cuda-toolkit-4.0" : {"EAPI": "1", "RDEPEND": "sys-devel/gcc:4.4"},
+ }
+
+ installed = {
+ "sys-devel/gcc-4.4.4": { "SLOT": "i686-pc-linux-gnu-4.4.4" },
+ "dev-util/nvidia-cuda-toolkit-4.0" : {"EAPI": "1", "RDEPEND": "sys-devel/gcc:4.4"},
+ }
+
+ world = (
+ "dev-util/nvidia-cuda-toolkit",
+ )
+
+ options = {'--update' : True, '--deep' : True, '--selective' : True}
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["sys-devel/gcc:4.4"],
+ options = options,
+ mergelist = [],
+ success = True),
+
+ # depclean test for bug #382823
+ ResolverPlaygroundTestCase(
+ [],
+ options = {"--depclean": True},
+ success = True,
+ cleanlist = []),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
+ world=world)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_old_dep_chain_display.py b/usr/lib/portage/pym/portage/tests/resolver/test_old_dep_chain_display.py
new file mode 100644
index 0000000..8aedf59
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_old_dep_chain_display.py
@@ -0,0 +1,35 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class OldDepChainDisplayTestCase(TestCase):
+
+ def testOldDepChainDisplay(self):
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "foo? ( dev-libs/B[-bar] )", "IUSE": "+foo", "EAPI": "2" },
+ "dev-libs/A-2": { "DEPEND": "foo? ( dev-libs/C )", "IUSE": "+foo", "EAPI": "1" },
+ "dev-libs/B-1": { "IUSE": "bar", "DEPEND": "!bar? ( dev-libs/D[-baz] )", "EAPI": "2" },
+ "dev-libs/C-1": { "KEYWORDS": "~x86" },
+ "dev-libs/D-1": { "IUSE": "+baz", "EAPI": "1" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ options = { "--autounmask": 'n' },
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-2"],
+ options = { "--autounmask": 'n' },
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_onlydeps.py b/usr/lib/portage/pym/portage/tests/resolver/test_onlydeps.py
new file mode 100644
index 0000000..986769a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_onlydeps.py
@@ -0,0 +1,34 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class OnlydepsTestCase(TestCase):
+
+ def testOnlydeps(self):
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/B" },
+ "dev-libs/B-1": { },
+ }
+ installed = {
+ "dev-libs/B-1": { },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ all_permutations = True,
+ success = True,
+ options = { "--onlydeps": True },
+ mergelist = ["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_or_choices.py b/usr/lib/portage/pym/portage/tests/resolver/test_or_choices.py
new file mode 100644
index 0000000..d9d14f0
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_or_choices.py
@@ -0,0 +1,207 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class OrChoicesTestCase(TestCase):
+
+ def testOrChoices(self):
+ ebuilds = {
+ "dev-lang/vala-0.20.0" : {
+ "EAPI": "5",
+ "SLOT": "0.20"
+ },
+ "dev-lang/vala-0.18.0" : {
+ "EAPI": "5",
+ "SLOT": "0.18"
+ },
+ #"dev-libs/gobject-introspection-1.36.0" : {
+ # "EAPI": "5",
+ # "RDEPEND" : "!<dev-lang/vala-0.20.0",
+ #},
+ "dev-libs/gobject-introspection-1.34.0" : {
+ "EAPI": "5"
+ },
+ "sys-apps/systemd-ui-2" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( dev-lang/vala:0.20 dev-lang/vala:0.18 )"
+ },
+ }
+
+ installed = {
+ "dev-lang/vala-0.18.0" : {
+ "EAPI": "5",
+ "SLOT": "0.18"
+ },
+ "dev-libs/gobject-introspection-1.34.0" : {
+ "EAPI": "5"
+ },
+ "sys-apps/systemd-ui-2" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( dev-lang/vala:0.20 dev-lang/vala:0.18 )"
+ },
+ }
+
+ world = ["dev-libs/gobject-introspection", "sys-apps/systemd-ui"]
+
+ test_cases = (
+ # Demonstrate that vala:0.20 update is pulled in, for bug #478188
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success=True,
+ all_permutations = True,
+ mergelist = ['dev-lang/vala-0.20.0']),
+ # Verify that vala:0.20 is not pulled in without --deep
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True},
+ success=True,
+ all_permutations = True,
+ mergelist = []),
+ # Verify that vala:0.20 is not pulled in without --update
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--selective": True, "--deep": True},
+ success=True,
+ all_permutations = True,
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testOrChoicesLibpostproc(self):
+ ebuilds = {
+ "media-video/ffmpeg-0.10" : {
+ "EAPI": "5",
+ "SLOT": "0.10"
+ },
+ "media-video/ffmpeg-1.2.2" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+ "media-libs/libpostproc-0.8.0.20121125" : {
+ "EAPI": "5"
+ },
+ "media-plugins/gst-plugins-ffmpeg-0.10.13_p201211-r1" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( media-video/ffmpeg:0 media-libs/libpostproc )"
+ },
+ }
+
+ installed = {
+ "media-video/ffmpeg-0.10" : {
+ "EAPI": "5",
+ "SLOT": "0.10"
+ },
+ "media-libs/libpostproc-0.8.0.20121125" : {
+ "EAPI": "5"
+ },
+ "media-plugins/gst-plugins-ffmpeg-0.10.13_p201211-r1" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( media-video/ffmpeg:0 media-libs/libpostproc )"
+ },
+ }
+
+ world = ["media-plugins/gst-plugins-ffmpeg"]
+
+ test_cases = (
+ # Demonstrate that libpostproc is preferred
+ # over ffmpeg:0 for bug #480736.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success=True,
+ all_permutations = True,
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
+ world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testInitiallyUnsatisfied(self):
+
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "RDEPEND": "|| ( app-misc/X <app-misc/A-2 )"
+ },
+
+ }
+
+ installed = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "RDEPEND": "|| ( app-misc/X <app-misc/A-2 )"
+ },
+
+ }
+
+ world = ["app-misc/B", "app-misc/C"]
+
+ test_cases = (
+
+ # Test bug #522652, where the unsatisfiable app-misc/X
+ # atom is selected, and the dependency is placed into
+ # _initially_unsatisfied_deps where it is ignored, causing
+ # upgrade to app-misc/A-2 (breaking a dependency of
+ # app-misc/C-0).
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ options = {},
+ success = True,
+ mergelist = ['app-misc/A-1']
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_output.py b/usr/lib/portage/pym/portage/tests/resolver/test_output.py
new file mode 100644
index 0000000..34efe9c
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_output.py
@@ -0,0 +1,88 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class MergelistOutputTestCase(TestCase):
+
+ def testMergelistOutput(self):
+ """
+ This test doesn't check if the output is correct, but makes sure
+ that we don't backtrace somewhere in the output code.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/B dev-libs/C", "IUSE": "+foo", "EAPI": 1 },
+ "dev-libs/B-1": { "DEPEND": "dev-libs/D", "IUSE": "foo +bar", "EAPI": 1 },
+ "dev-libs/C-1": { "DEPEND": "dev-libs/E", "IUSE": "foo bar" },
+ "dev-libs/D-1": { "IUSE": "" },
+ "dev-libs/E-1": {},
+
+ #reinstall for flags
+ "dev-libs/Z-1": { "IUSE": "+foo", "EAPI": 1 },
+ "dev-libs/Y-1": { "IUSE": "foo", "EAPI": 1 },
+ "dev-libs/X-1": {},
+ "dev-libs/W-1": { "IUSE": "+foo", "EAPI": 1 },
+ }
+
+ installed = {
+ "dev-libs/Z-1": { "USE": "", "IUSE": "foo" },
+ "dev-libs/Y-1": { "USE": "foo", "IUSE": "+foo", "EAPI": 1 },
+ "dev-libs/X-1": { "USE": "foo", "IUSE": "+foo", "EAPI": 1 },
+ "dev-libs/W-1": { },
+ }
+
+ option_cobos = (
+ (),
+ ("verbose",),
+ ("tree",),
+ ("tree", "unordered-display",),
+ ("verbose",),
+ ("verbose", "tree",),
+ ("verbose", "tree", "unordered-display",),
+ )
+
+ test_cases = []
+ for options in option_cobos:
+ testcase_opts = {}
+ for opt in options:
+ testcase_opts["--" + opt] = True
+
+ test_cases.append(ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = testcase_opts,
+ success = True,
+ ignore_mergelist_order=True,
+ mergelist = ["dev-libs/D-1", "dev-libs/E-1", "dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"]))
+
+ test_cases.append(ResolverPlaygroundTestCase(
+ ["dev-libs/Z"],
+ options = testcase_opts,
+ success = True,
+ mergelist = ["dev-libs/Z-1"]))
+
+ test_cases.append(ResolverPlaygroundTestCase(
+ ["dev-libs/Y"],
+ options = testcase_opts,
+ success = True,
+ mergelist = ["dev-libs/Y-1"]))
+
+ test_cases.append(ResolverPlaygroundTestCase(
+ ["dev-libs/X"],
+ options = testcase_opts,
+ success = True,
+ mergelist = ["dev-libs/X-1"]))
+
+ test_cases.append(ResolverPlaygroundTestCase(
+ ["dev-libs/W"],
+ options = testcase_opts,
+ success = True,
+ mergelist = ["dev-libs/W-1"]))
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_package_tracker.py b/usr/lib/portage/pym/portage/tests/resolver/test_package_tracker.py
new file mode 100644
index 0000000..8fa3513
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_package_tracker.py
@@ -0,0 +1,261 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import collections
+
+from portage.dep import Atom
+from portage.tests import TestCase
+from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper
+
+class PackageTrackerTestCase(TestCase):
+
+ FakePackage = collections.namedtuple("FakePackage",
+ ["root", "cp", "cpv", "slot", "slot_atom", "version", "repo"])
+
+ FakeConflict = collections.namedtuple("FakeConflict",
+ ["description", "root", "pkgs"])
+
+ def make_pkg(self, root, atom, repo="test_repo"):
+ atom = Atom(atom)
+ slot_atom = Atom("%s:%s" % (atom.cp, atom.slot))
+ slot = atom.slot
+
+ return self.FakePackage(root=root, cp=atom.cp, cpv=atom.cpv,
+ slot=slot, slot_atom=slot_atom, version=atom.version, repo=repo)
+
+ def make_conflict(self, description, root, pkgs):
+ return self.FakeConflict(description=description, root=root, pkgs=pkgs)
+
+ def test_add_remove_discard(self):
+ p = PackageTracker()
+
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+
+ p.add_pkg(x1)
+ self.assertTrue(x1 in p)
+ self.assertTrue(p.contains(x1, installed=True))
+ self.assertTrue(p.contains(x1, installed=False))
+ p.remove_pkg(x1)
+ self.assertTrue(x1 not in p)
+
+ p.add_pkg(x1)
+ self.assertTrue(x1 in p)
+ p.add_pkg(x1)
+ self.assertTrue(x1 in p)
+
+ self.assertRaises(KeyError, p.remove_pkg, x2)
+
+ p.add_pkg(x2)
+ self.assertTrue(x2 in p)
+ p.remove_pkg(x2)
+ self.assertTrue(x2 not in p)
+ p.discard_pkg(x2)
+ self.assertTrue(x2 not in p)
+ p.add_pkg(x2)
+ self.assertTrue(x2 in p)
+
+ all_pkgs = list(p.all_pkgs("/"))
+ self.assertEqual(len(all_pkgs), 2)
+ self.assertTrue(all_pkgs[0] is x1 and all_pkgs[1] is x2)
+
+ self.assertEqual(len(list(p.all_pkgs("/"))), 2)
+ self.assertEqual(len(list(p.all_pkgs("/xxx"))), 0)
+
+ def test_match(self):
+ p = PackageTracker()
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+ x3 = self.make_pkg("/", "=dev-libs/X-3:1")
+
+ p.add_pkg(x2)
+ p.add_pkg(x1)
+
+ matches = list(p.match("/", Atom("=dev-libs/X-1")))
+ self.assertTrue(x1 in matches)
+ self.assertEqual(len(matches), 1)
+
+ matches = list(p.match("/", Atom("dev-libs/X")))
+ self.assertTrue(x1 is matches[0] and x2 is matches[1])
+ self.assertEqual(len(matches), 2)
+
+ matches = list(p.match("/xxx", Atom("dev-libs/X")))
+ self.assertEqual(len(matches), 0)
+
+ matches = list(p.match("/", Atom("dev-libs/Y")))
+ self.assertEqual(len(matches), 0)
+
+ p.add_pkg(x3)
+ matches = list(p.match("/", Atom("dev-libs/X")))
+ self.assertTrue(x1 is matches[0] and x2 is matches[1] and x3 is matches[2])
+ self.assertEqual(len(matches), 3)
+
+ p.remove_pkg(x3)
+ matches = list(p.match("/", Atom("dev-libs/X")))
+ self.assertTrue(x1 is matches[0] and x2 is matches[1])
+ self.assertEqual(len(matches), 2)
+
+ def test_dbapi_interface(self):
+ p = PackageTracker()
+ dbapi = PackageTrackerDbapiWrapper("/", p)
+ installed = self.make_pkg("/", "=dev-libs/X-0:0")
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+ x3 = self.make_pkg("/", "=dev-libs/X-3:0")
+ x4 = self.make_pkg("/", "=dev-libs/X-4:6")
+ x5 = self.make_pkg("/xxx", "=dev-libs/X-5:6")
+
+ def check_dbapi(pkgs):
+ all_pkgs = set(dbapi)
+ self.assertEqual(len(all_pkgs), len(pkgs))
+
+ x_atom = "dev-libs/X"
+ y_atom = "dev-libs/Y"
+ matches = dbapi.cp_list(x_atom)
+ for pkg in pkgs:
+ if pkg.root == "/" and pkg.cp == x_atom:
+ self.assertTrue(pkg in matches)
+ self.assertTrue(not dbapi.cp_list(y_atom))
+ matches = dbapi.match(x_atom)
+ for pkg in pkgs:
+ if pkg.root == "/" and pkg.cp == x_atom:
+ self.assertTrue(pkg in matches)
+ self.assertTrue(not dbapi.match(y_atom))
+
+ check_dbapi([])
+
+ p.add_installed_pkg(installed)
+ check_dbapi([installed])
+
+ p.add_pkg(x1)
+ check_dbapi([x1])
+
+ p.remove_pkg(x1)
+ check_dbapi([installed])
+
+ dbapi.cpv_inject(x1)
+ check_dbapi([x1])
+
+ dbapi.cpv_inject(x2)
+ check_dbapi([x1, x2])
+
+ p.remove_pkg(x1)
+ check_dbapi([x2])
+
+ p.add_pkg(x5)
+ check_dbapi([x2])
+
+
+ def test_installed(self):
+ p = PackageTracker()
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x1b = self.make_pkg("/", "=dev-libs/X-1.1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+ x3 = self.make_pkg("/", "=dev-libs/X-3:1")
+
+ def check_installed(x, should_contain, num_pkgs):
+ self.assertEqual(x in p, should_contain)
+ self.assertEqual(p.contains(x), should_contain)
+ self.assertEqual(p.contains(x1, installed=True), should_contain)
+ self.assertEqual(p.contains(x1, installed=False), False)
+ self.assertEqual(len(list(p.all_pkgs("/"))), num_pkgs)
+
+ def check_matches(atom, expected):
+ matches = list(p.match("/", Atom(atom)))
+ self.assertEqual(len(matches), len(expected))
+ for x, y in zip(matches, expected):
+ self.assertTrue(x is y)
+
+ p.add_installed_pkg(x1)
+ check_installed(x1, True, 1)
+ check_matches("dev-libs/X", [x1])
+
+ p.add_installed_pkg(x1)
+ check_installed(x1, True, 1)
+ check_matches("dev-libs/X", [x1])
+
+ p.add_pkg(x2)
+ check_installed(x1, False, 1)
+ check_matches("dev-libs/X", [x2])
+
+ p.add_installed_pkg(x1)
+ check_installed(x1, False, 1)
+ check_matches("dev-libs/X", [x2])
+
+ p.add_installed_pkg(x1b)
+ check_installed(x1, False, 1)
+ check_installed(x1b, False, 1)
+ check_matches("dev-libs/X", [x2])
+
+ p.remove_pkg(x2)
+ check_installed(x1, True, 2)
+ check_installed(x1b, True, 2)
+ check_matches("dev-libs/X", [x1, x1b])
+
+ def test_conflicts(self):
+ p = PackageTracker()
+ installed1 = self.make_pkg("/", "=dev-libs/X-0:0")
+ installed2 = self.make_pkg("/", "=dev-libs/X-0.1:0")
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+ x3 = self.make_pkg("/", "=dev-libs/X-3:0")
+ x4 = self.make_pkg("/", "=dev-libs/X-4:4")
+ x4b = self.make_pkg("/", "=dev-libs/X-4:4b::x-repo")
+
+ def check_conflicts(expected, slot_conflicts_only=False):
+ if slot_conflicts_only:
+ conflicts = list(p.slot_conflicts())
+ else:
+ conflicts = list(p.conflicts())
+ self.assertEqual(len(conflicts), len(expected))
+ for got, exp in zip(conflicts, expected):
+ self.assertEqual(got.description, exp.description)
+ self.assertEqual(got.root, exp.root)
+ self.assertEqual(len(got.pkgs), len(exp.pkgs))
+ self.assertEqual(len(got), len(exp.pkgs))
+ for x, y in zip(got.pkgs, exp.pkgs):
+ self.assertTrue(x is y)
+ for x, y in zip(got, exp.pkgs):
+ self.assertTrue(x is y)
+ for x in exp.pkgs:
+ self.assertTrue(x in got)
+
+ check_conflicts([])
+ check_conflicts([])
+
+ p.add_installed_pkg(installed1)
+ p.add_installed_pkg(installed2)
+ check_conflicts([])
+
+ p.add_pkg(x1)
+ check_conflicts([])
+ p.add_pkg(x2)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x2])])
+ p.add_pkg(x3)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x2, x3])])
+ p.remove_pkg(x3)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x2])])
+ p.remove_pkg(x2)
+ check_conflicts([])
+ p.add_pkg(x3)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x3])])
+ p.add_pkg(x2)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x3, x2])])
+
+ p.add_pkg(x4)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x3, x2])])
+
+ p.add_pkg(x4b)
+ check_conflicts(
+ [
+ self.make_conflict("slot conflict", "/", [x1, x3, x2]),
+ self.make_conflict("cpv conflict", "/", [x4, x4b]),
+ ]
+ )
+
+ check_conflicts(
+ [
+ self.make_conflict("slot conflict", "/", [x1, x3, x2]),
+ ],
+ slot_conflicts_only=True
+ )
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_rebuild.py b/usr/lib/portage/pym/portage/tests/resolver/test_rebuild.py
new file mode 100644
index 0000000..6f1a783
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_rebuild.py
@@ -0,0 +1,143 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class RebuildTestCase(TestCase):
+
+ def testRebuild(self):
+ """
+ Rebuild packages when build-time dependencies are upgraded.
+ """
+
+ ebuilds = {
+ "sys-libs/x-1": { },
+ "sys-libs/x-1-r1": { },
+ "sys-libs/x-2": { },
+ "sys-apps/a-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/a-2": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/b-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/b-2": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/c-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/c-2": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/d-1": { "RDEPEND" : "sys-libs/x"},
+ "sys-apps/d-2": { "RDEPEND" : "sys-libs/x"},
+ "sys-apps/e-2": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/f-2": { "DEPEND" : "sys-apps/a", "RDEPEND" : ""},
+ "sys-apps/g-2": { "DEPEND" : "sys-apps/b sys-libs/x",
+ "RDEPEND" : ""},
+ }
+
+ installed = {
+ "sys-libs/x-1": { },
+ "sys-apps/a-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/b-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/c-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/d-1": { "RDEPEND" : "sys-libs/x"},
+ "sys-apps/e-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/f-1": { "DEPEND" : "sys-apps/a", "RDEPEND" : ""},
+ "sys-apps/g-1": { "DEPEND" : "sys-apps/b",
+ "RDEPEND" : ""},
+ }
+
+ world = ["sys-apps/a", "sys-apps/b", "sys-apps/c", "sys-apps/d",
+ "sys-apps/e", "sys-apps/f", "sys-apps/g"]
+
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["sys-libs/x", "sys-apps/b"],
+ options = {"--rebuild-if-unbuilt" : True,
+ "--rebuild-exclude" : ["sys-apps/c"]},
+ mergelist = ['sys-libs/x-2', 'sys-apps/a-2', 'sys-apps/b-2',
+ 'sys-apps/e-2', 'sys-apps/g-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/x", "sys-apps/b"],
+ options = {"--rebuild-if-unbuilt" : True},
+ mergelist = ['sys-libs/x-2', 'sys-apps/a-2', 'sys-apps/b-2',
+ 'sys-apps/c-2', 'sys-apps/e-2', 'sys-apps/g-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/x"],
+ options = {"--rebuild-if-unbuilt" : True,
+ "--rebuild-ignore" : ["sys-libs/x"]},
+ mergelist = ['sys-libs/x-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/x", "sys-apps/b"],
+ options = {"--rebuild-if-unbuilt" : True,
+ "--rebuild-ignore" : ["sys-apps/b"]},
+ mergelist = ['sys-libs/x-2', 'sys-apps/a-2', 'sys-apps/b-2',
+ 'sys-apps/c-2', 'sys-apps/e-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["=sys-libs/x-1-r1", "sys-apps/b"],
+ options = {"--rebuild-if-unbuilt" : True},
+ mergelist = ['sys-libs/x-1-r1', 'sys-apps/a-2',
+ 'sys-apps/b-2', 'sys-apps/c-2', 'sys-apps/e-2',
+ 'sys-apps/g-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["=sys-libs/x-1-r1", "sys-apps/b"],
+ options = {"--rebuild-if-new-rev" : True},
+ mergelist = ['sys-libs/x-1-r1', 'sys-apps/a-2',
+ 'sys-apps/b-2', 'sys-apps/c-2', 'sys-apps/e-2',
+ 'sys-apps/g-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["=sys-libs/x-1-r1"],
+ options = {"--rebuild-if-new-ver" : True},
+ mergelist = ['sys-libs/x-1-r1'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/x", "sys-apps/b"],
+ options = {"--rebuild-if-new-ver" : True},
+ mergelist = ['sys-libs/x-2', 'sys-apps/a-2',
+ 'sys-apps/b-2', 'sys-apps/c-2', 'sys-apps/e-2',
+ 'sys-apps/g-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["=sys-libs/x-1"],
+ options = {"--rebuild-if-new-rev" : True},
+ mergelist = ['sys-libs/x-1'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["=sys-libs/x-1", "=sys-apps/b-1"],
+ options = {"--rebuild-if-unbuilt" : True},
+ mergelist = ['sys-libs/x-1', 'sys-apps/a-2',
+ 'sys-apps/b-1', 'sys-apps/c-2', 'sys-apps/e-2',
+ 'sys-apps/g-2'],
+ ignore_mergelist_order = True,
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_regular_slot_change_without_revbump.py b/usr/lib/portage/pym/portage/tests/resolver/test_regular_slot_change_without_revbump.py
new file mode 100644
index 0000000..415277b
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_regular_slot_change_without_revbump.py
@@ -0,0 +1,59 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class RegularSlotChangeWithoutRevBumpTestCase(TestCase):
+
+ def testRegularSlotChangeWithoutRevBumpTestCase(self):
+
+ ebuilds = {
+ "dev-libs/boost-1.52.0" : {
+ "SLOT": "0"
+ },
+ "app-office/libreoffice-4.0.0.2" : {
+ "EAPI": "5",
+ "DEPEND": ">=dev-libs/boost-1.46:=",
+ "RDEPEND": ">=dev-libs/boost-1.46:=",
+ },
+ }
+
+ binpkgs = {
+ "dev-libs/boost-1.52.0" : {
+ "SLOT": "1.52"
+ },
+ }
+
+ installed = {
+ "dev-libs/boost-1.52.0" : {
+ "SLOT": "1.52"
+ },
+ }
+
+ world = []
+
+ test_cases = (
+ # Test that @__auto_slot_operator_replace_installed__
+ # pulls in the available slot, even though it's
+ # different from the installed slot (0 instead of 1.52).
+ ResolverPlaygroundTestCase(
+ ["app-office/libreoffice"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = [
+ 'dev-libs/boost-1.52.0',
+ 'app-office/libreoffice-4.0.0.2'
+ ]
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_required_use.py b/usr/lib/portage/pym/portage/tests/resolver/test_required_use.py
new file mode 100644
index 0000000..c8810fa
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_required_use.py
@@ -0,0 +1,114 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class RequiredUSETestCase(TestCase):
+
+ def testRequiredUSE(self):
+ """
+ Only simple REQUIRED_USE values here. The parser is tested under in dep/testCheckRequiredUse
+ """
+
+ ebuilds = {
+ "dev-libs/A-1" : {"EAPI": "4", "IUSE": "foo bar", "REQUIRED_USE": "|| ( foo bar )"},
+ "dev-libs/A-2" : {"EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )"},
+ "dev-libs/A-3" : {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "|| ( foo bar )"},
+ "dev-libs/A-4" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "|| ( foo bar )"},
+ "dev-libs/A-5" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "|| ( )"},
+
+ "dev-libs/B-1" : {"EAPI": "4", "IUSE": "foo bar", "REQUIRED_USE": "^^ ( foo bar )"},
+ "dev-libs/B-2" : {"EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "^^ ( foo bar )"},
+ "dev-libs/B-3" : {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "^^ ( foo bar )"},
+ "dev-libs/B-4" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( foo bar )"},
+ "dev-libs/B-5" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( )"},
+
+ "dev-libs/C-1" : {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "foo? ( !bar )"},
+ "dev-libs/C-2" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "foo? ( !bar )"},
+ "dev-libs/C-3" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "foo? ( bar )"},
+ "dev-libs/C-4" : {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "foo? ( bar )"},
+ "dev-libs/C-5" : {"EAPI": "4", "IUSE": "foo bar", "REQUIRED_USE": "foo? ( bar )"},
+ "dev-libs/C-6" : {"EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "foo? ( bar )"},
+ "dev-libs/C-7" : {"EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "!foo? ( bar )"},
+ "dev-libs/C-8" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "!foo? ( bar )"},
+ "dev-libs/C-9" : {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "!foo? ( bar )"},
+ "dev-libs/C-10": {"EAPI": "4", "IUSE": "foo bar", "REQUIRED_USE": "!foo? ( bar )"},
+ "dev-libs/C-11": {"EAPI": "4", "IUSE": "foo bar", "REQUIRED_USE": "!foo? ( !bar )"},
+ "dev-libs/C-12": {"EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "!foo? ( !bar )"},
+ "dev-libs/C-13": {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "!foo? ( !bar )"},
+ "dev-libs/C-14": {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "!foo? ( !bar )"},
+
+ "dev-libs/D-1" : {"EAPI": "4", "IUSE": "+w +x +y z", "REQUIRED_USE": "w? ( x || ( y z ) )"},
+ "dev-libs/D-2" : {"EAPI": "4", "IUSE": "+w +x +y +z", "REQUIRED_USE": "w? ( x || ( y z ) )"},
+ "dev-libs/D-3" : {"EAPI": "4", "IUSE": "+w +x y z", "REQUIRED_USE": "w? ( x || ( y z ) )"},
+ "dev-libs/D-4" : {"EAPI": "4", "IUSE": "+w x +y +z", "REQUIRED_USE": "w? ( x || ( y z ) )"},
+ "dev-libs/D-5" : {"EAPI": "4", "IUSE": "w x y z", "REQUIRED_USE": "w? ( x || ( y z ) )"},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(["=dev-libs/A-1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2"], success = True, mergelist=["dev-libs/A-2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-3"], success = True, mergelist=["dev-libs/A-3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-4"], success = True, mergelist=["dev-libs/A-4"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-5"], success = True, mergelist=["dev-libs/A-5"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/B-1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/B-2"], success = True, mergelist=["dev-libs/B-2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/B-3"], success = True, mergelist=["dev-libs/B-3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/B-4"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/B-5"], success = True, mergelist=["dev-libs/B-5"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/C-1"], success = True, mergelist=["dev-libs/C-1"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-2"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/C-3"], success = True, mergelist=["dev-libs/C-3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-4"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/C-5"], success = True, mergelist=["dev-libs/C-5"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-6"], success = True, mergelist=["dev-libs/C-6"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-7"], success = True, mergelist=["dev-libs/C-7"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-8"], success = True, mergelist=["dev-libs/C-8"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-9"], success = True, mergelist=["dev-libs/C-9"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-10"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/C-11"], success = True, mergelist=["dev-libs/C-11"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-12"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/C-13"], success = True, mergelist=["dev-libs/C-13"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-14"], success = True, mergelist=["dev-libs/C-14"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/D-1"], success = True, mergelist=["dev-libs/D-1"]),
+ ResolverPlaygroundTestCase(["=dev-libs/D-2"], success = True, mergelist=["dev-libs/D-2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/D-3"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/D-4"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/D-5"], success = True, mergelist=["dev-libs/D-5"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testRequiredUseOrDeps(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { "IUSE": "+x +y", "REQUIRED_USE": "^^ ( x y )", "EAPI": "4" },
+ "dev-libs/B-1": { "IUSE": "+x +y", "REQUIRED_USE": "", "EAPI": "4" },
+ "app-misc/p-1": { "RDEPEND": "|| ( =dev-libs/A-1 =dev-libs/B-1 )" },
+ }
+
+ test_cases = (
+ # This should fail and show a REQUIRED_USE error for
+ # dev-libs/A-1, since this choice it preferred.
+ ResolverPlaygroundTestCase(
+ ["=app-misc/p-1"],
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_simple.py b/usr/lib/portage/pym/portage/tests/resolver/test_simple.py
new file mode 100644
index 0000000..324ffa2
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_simple.py
@@ -0,0 +1,74 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SimpleResolverTestCase(TestCase):
+
+ def testSimple(self):
+ ebuilds = {
+ "dev-libs/A-1": { "KEYWORDS": "x86" },
+ "dev-libs/A-2": { "KEYWORDS": "~x86" },
+ "dev-libs/B-1.2": {},
+
+ "app-misc/Z-1": { "DEPEND": "|| ( app-misc/Y ( app-misc/X app-misc/W ) )", "RDEPEND": "" },
+ "app-misc/Y-1": { "KEYWORDS": "~x86" },
+ "app-misc/X-1": {},
+ "app-misc/W-1": {},
+ }
+ binpkgs = {
+ "dev-libs/B-1.2": {},
+ }
+ installed = {
+ "dev-libs/A-1": {},
+ "dev-libs/B-1.1": {},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(["dev-libs/A"], success = True, mergelist = ["dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2"], options = { "--autounmask": 'n' }, success = False),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--noreplace": True},
+ success = True,
+ mergelist = []),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--noreplace": True},
+ success = True,
+ mergelist = []),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--update": True},
+ success = True,
+ mergelist = ["dev-libs/B-1.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--update": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/B-1.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--update": True, "--usepkgonly": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/B-1.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["app-misc/Z"],
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [("app-misc/W-1", "app-misc/X-1"), "app-misc/Z-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ binpkgs=binpkgs, installed=installed)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_slot_abi.py b/usr/lib/portage/pym/portage/tests/resolver/test_slot_abi.py
new file mode 100644
index 0000000..7263504
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_slot_abi.py
@@ -0,0 +1,459 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotAbiTestCase(TestCase):
+
+ def __init__(self, *args, **kwargs):
+ super(SlotAbiTestCase, self).__init__(*args, **kwargs)
+
+ def testSubSlot(self):
+ ebuilds = {
+ "dev-libs/icu-49" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/49"
+ },
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:=",
+ "RDEPEND": "dev-libs/icu:="
+ },
+ }
+ binpkgs = {
+ "dev-libs/icu-49" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/49"
+ },
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:0/48=",
+ "RDEPEND": "dev-libs/icu:0/48="
+ },
+ }
+ installed = {
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:0/48=",
+ "RDEPEND": "dev-libs/icu:0/48="
+ },
+ }
+
+ world = ["dev-libs/libxml2"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True},
+ success = True,
+ mergelist = ["dev-libs/icu-49", "dev-libs/libxml2-2.7.8" ]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49", "dev-libs/libxml2-2.7.8" ]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkgonly": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["dev-libs/icu-49", "dev-libs/libxml2-2.7.8" ]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49", "dev-libs/libxml2-2.7.8" ]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True},
+ success = True,
+ mergelist = []),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49"]),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testWholeSlot(self):
+ ebuilds = {
+ "sys-libs/db-4.8" : {
+ "SLOT": "4.8"
+ },
+ "sys-libs/db-4.7" : {
+ "SLOT": "4.7"
+ },
+ "app-office/libreoffice-3.5.4.2" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": ">=sys-libs/db-4:=",
+ "RDEPEND": ">=sys-libs/db-4:="
+ },
+ }
+ binpkgs = {
+ "sys-libs/db-4.8" : {
+ "SLOT": "4.8"
+ },
+ "sys-libs/db-4.7" : {
+ "SLOT": "4.7"
+ },
+ "app-office/libreoffice-3.5.4.2" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": ">=sys-libs/db-4:4.7/4.7=",
+ "RDEPEND": ">=sys-libs/db-4:4.7/4.7="
+ },
+ }
+ installed = {
+ "sys-libs/db-4.7" : {
+ "SLOT": "4.7"
+ },
+ "app-office/libreoffice-3.5.4.2" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": ">=sys-libs/db-4:4.7/4.7=",
+ "RDEPEND": ">=sys-libs/db-4:4.7/4.7="
+ },
+ }
+
+ world = ["app-office/libreoffice"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/db"],
+ options = {"--oneshot": True},
+ success = True,
+ mergelist = ["sys-libs/db-4.8", "app-office/libreoffice-3.5.4.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/db"],
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["sys-libs/db-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/db"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]sys-libs/db-4.8", "app-office/libreoffice-3.5.4.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/db"],
+ options = {"--oneshot": True, "--usepkgonly": True},
+ success = True,
+ mergelist = ["[binary]sys-libs/db-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/db"],
+ options = {"--oneshot": True, "--rebuild-if-new-slot": "n"},
+ success = True,
+ mergelist = ["sys-libs/db-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["sys-libs/db-4.8", "app-office/libreoffice-3.5.4.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]sys-libs/db-4.8", "app-office/libreoffice-3.5.4.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkg": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]sys-libs/db-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True},
+ success = True,
+ mergelist = []),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]sys-libs/db-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--rebuild-if-new-slot": "n"},
+ success = True,
+ mergelist = []),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testWholeSlotConditional(self):
+ ebuilds = {
+ "dev-libs/libnl-3.2.14" : {
+ "SLOT": "3"
+ },
+ "dev-libs/libnl-1.1-r3" : {
+ "SLOT": "1.1"
+ },
+ "net-misc/networkmanager-0.9.6.4-r1" : {
+ "EAPI": "5",
+ "IUSE": "wimax",
+ "DEPEND": "wimax? ( dev-libs/libnl:1.1= ) !wimax? ( dev-libs/libnl:3= )",
+ "RDEPEND": "wimax? ( dev-libs/libnl:1.1= ) !wimax? ( dev-libs/libnl:3= )"
+ },
+ }
+ installed = {
+ "dev-libs/libnl-1.1-r3" : {
+ "SLOT": "1.1"
+ },
+ "net-misc/networkmanager-0.9.6.4-r1" : {
+ "EAPI": "5",
+ "IUSE": "wimax",
+ "USE": "wimax",
+ "DEPEND": "dev-libs/libnl:1.1/1.1=",
+ "RDEPEND": "dev-libs/libnl:1.1/1.1="
+ },
+ }
+
+ user_config = {
+ "make.conf" : ("USE=\"wimax\"",)
+ }
+
+ world = ["net-misc/networkmanager"]
+
+ test_cases = (
+
+ # Demonstrate bug #460304, where _slot_operator_update_probe needs
+ # to account for USE conditional deps.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = []),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, user_config=user_config, world=world,
+ debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ user_config = {
+ "make.conf" : ("USE=\"-wimax\"",)
+ }
+
+ test_cases = (
+
+ # Demonstrate bug #460304 again, but with inverted USE
+ # settings this time.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ['dev-libs/libnl-3.2.14', 'net-misc/networkmanager-0.9.6.4-r1']),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, user_config=user_config, world=world,
+ debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testWholeSlotSubSlotMix(self):
+ ebuilds = {
+ "dev-libs/glib-1.2.10" : {
+ "SLOT": "1"
+ },
+ "dev-libs/glib-2.30.2" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.30"
+ },
+ "dev-libs/glib-2.32.3" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.32"
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/glib:2=",
+ "RDEPEND": "dev-libs/glib:2="
+ },
+ }
+ binpkgs = {
+ "dev-libs/glib-1.2.10" : {
+ "SLOT": "1"
+ },
+ "dev-libs/glib-2.30.2" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.30"
+ },
+ "dev-libs/glib-2.32.3" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.32"
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/glib:2/2.30=",
+ "RDEPEND": "dev-libs/glib:2/2.30="
+ },
+ }
+ installed = {
+ "dev-libs/glib-1.2.10" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "1"
+ },
+ "dev-libs/glib-2.30.2" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.30"
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/glib:2/2.30=",
+ "RDEPEND": "dev-libs/glib:2/2.30="
+ },
+ }
+
+ world = ["dev-libs/glib:1", "dev-libs/dbus-glib"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True},
+ success = True,
+ mergelist = ["dev-libs/glib-2.32.3", "dev-libs/dbus-glib-0.98" ]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/glib-2.32.3"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/glib-2.32.3", "dev-libs/dbus-glib-0.98" ]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True, "--usepkgonly": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/glib-2.30.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]dev-libs/glib-2.32.3"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["dev-libs/glib-2.32.3", "dev-libs/dbus-glib-0.98" ]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/glib-2.32.3"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/glib-2.32.3", "dev-libs/dbus-glib-0.98" ]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True},
+ success = True,
+ mergelist = []),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]dev-libs/glib-2.32.3"]),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_slot_abi_downgrade.py b/usr/lib/portage/pym/portage/tests/resolver/test_slot_abi_downgrade.py
new file mode 100644
index 0000000..08e9a9d
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_slot_abi_downgrade.py
@@ -0,0 +1,225 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotAbiDowngradeTestCase(TestCase):
+
+ def __init__(self, *args, **kwargs):
+ super(SlotAbiDowngradeTestCase, self).__init__(*args, **kwargs)
+
+ def testSubSlot(self):
+ ebuilds = {
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:=",
+ "RDEPEND": "dev-libs/icu:="
+ },
+ }
+ binpkgs = {
+ "dev-libs/icu-49" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/49"
+ },
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:0/49=",
+ "RDEPEND": "dev-libs/icu:0/49="
+ },
+ }
+ installed = {
+ "dev-libs/icu-49" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/49"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:0/49=",
+ "RDEPEND": "dev-libs/icu:0/49="
+ },
+ }
+
+ world = ["dev-libs/libxml2"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True},
+ success = True,
+ mergelist = ["dev-libs/icu-4.8", "dev-libs/libxml2-2.7.8" ]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/icu-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-4.8", "dev-libs/libxml2-2.7.8" ]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkgonly": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["dev-libs/icu-4.8", "dev-libs/libxml2-2.7.8" ]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/icu-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-4.8", "dev-libs/libxml2-2.7.8" ]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True},
+ success = True,
+ mergelist = []),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testWholeSlotSubSlotMix(self):
+ ebuilds = {
+ "dev-libs/glib-1.2.10" : {
+ "SLOT": "1"
+ },
+ "dev-libs/glib-2.30.2" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.30"
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/glib:2=",
+ "RDEPEND": "dev-libs/glib:2="
+ },
+ }
+ binpkgs = {
+ "dev-libs/glib-1.2.10" : {
+ "SLOT": "1"
+ },
+ "dev-libs/glib-2.30.2" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.30"
+ },
+ "dev-libs/glib-2.32.3" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.32"
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/glib:2/2.32=",
+ "RDEPEND": "dev-libs/glib:2/2.32="
+ },
+ }
+ installed = {
+ "dev-libs/glib-1.2.10" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "1"
+ },
+ "dev-libs/glib-2.32.3" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.32"
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/glib:2/2.32=",
+ "RDEPEND": "dev-libs/glib:2/2.32="
+ },
+ }
+
+ world = ["dev-libs/glib:1", "dev-libs/dbus-glib"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True},
+ success = True,
+ mergelist = ["dev-libs/glib-2.30.2", "dev-libs/dbus-glib-0.98" ]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/glib-2.30.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/glib-2.30.2", "dev-libs/dbus-glib-0.98" ]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True, "--usepkgonly": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/glib-2.32.3"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["dev-libs/glib-2.30.2", "dev-libs/dbus-glib-0.98" ]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/glib-2.30.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/glib-2.30.2", "dev-libs/dbus-glib-0.98" ]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True},
+ success = True,
+ mergelist = []),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_slot_change_without_revbump.py b/usr/lib/portage/pym/portage/tests/resolver/test_slot_change_without_revbump.py
new file mode 100644
index 0000000..d85ff7e
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_slot_change_without_revbump.py
@@ -0,0 +1,69 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotChangeWithoutRevBumpTestCase(TestCase):
+
+ def testSlotChangeWithoutRevBump(self):
+
+ ebuilds = {
+ "app-arch/libarchive-3.1.1" : {
+ "EAPI": "5",
+ "SLOT": "0/13"
+ },
+ "app-arch/libarchive-3.0.4-r1" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+ "kde-base/ark-4.10.0" : {
+ "EAPI": "5",
+ "DEPEND": "app-arch/libarchive:=",
+ "RDEPEND": "app-arch/libarchive:="
+ },
+ }
+
+ binpkgs = {
+ "app-arch/libarchive-3.1.1" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+ }
+
+ installed = {
+ "app-arch/libarchive-3.1.1" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+ "kde-base/ark-4.10.0" : {
+ "EAPI": "5",
+ "DEPEND": "app-arch/libarchive:0/0=",
+ "RDEPEND": "app-arch/libarchive:0/0="
+ },
+ }
+
+ world = ["kde-base/ark"]
+
+ test_cases = (
+
+ # Demonstrate bug #456208, where a sub-slot change
+ # without revbump needs to trigger a rebuild.
+ ResolverPlaygroundTestCase(
+ ["kde-base/ark"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = ['app-arch/libarchive-3.1.1', "kde-base/ark-4.10.0"]),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_slot_collisions.py b/usr/lib/portage/pym/portage/tests/resolver/test_slot_collisions.py
new file mode 100644
index 0000000..9fcd529
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_slot_collisions.py
@@ -0,0 +1,259 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SlotCollisionTestCase(TestCase):
+
+ def testSlotCollision(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { "PDEPEND": "foo? ( dev-libs/B )", "IUSE": "foo" },
+ "dev-libs/B-1": { "IUSE": "foo" },
+ "dev-libs/C-1": { "DEPEND": "dev-libs/A[foo]", "EAPI": 2 },
+ "dev-libs/D-1": { "DEPEND": "dev-libs/A[foo=] dev-libs/B[foo=]", "IUSE": "foo", "EAPI": 2 },
+ "dev-libs/E-1": { },
+ "dev-libs/E-2": { "IUSE": "foo" },
+
+ "app-misc/Z-1": { },
+ "app-misc/Z-2": { },
+ "app-misc/Y-1": { "DEPEND": "=app-misc/Z-1" },
+ "app-misc/Y-2": { "DEPEND": ">app-misc/Z-1" },
+ "app-misc/X-1": { "DEPEND": "=app-misc/Z-2" },
+ "app-misc/X-2": { "DEPEND": "<app-misc/Z-2" },
+
+ "sci-libs/K-1": { "IUSE": "+foo", "EAPI": 1 },
+ "sci-libs/L-1": { "DEPEND": "sci-libs/K[-foo]", "EAPI": 2 },
+ "sci-libs/M-1": { "DEPEND": "sci-libs/K[foo=]", "IUSE": "+foo", "EAPI": 2 },
+
+ "sci-libs/Q-1": { "SLOT": "1", "IUSE": "+bar foo", "EAPI": 1 },
+ "sci-libs/Q-2": { "SLOT": "2", "IUSE": "+bar +foo", "EAPI": 2, "PDEPEND": "sci-libs/Q:1[bar?,foo?]" },
+ "sci-libs/P-1": { "DEPEND": "sci-libs/Q:1[foo=]", "IUSE": "foo", "EAPI": 2 },
+
+ "sys-libs/A-1": { "RDEPEND": "foo? ( sys-libs/J[foo=] )", "IUSE": "+foo", "EAPI": "4" },
+ "sys-libs/B-1": { "RDEPEND": "bar? ( sys-libs/J[bar=] )", "IUSE": "+bar", "EAPI": "4" },
+ "sys-libs/C-1": { "RDEPEND": "sys-libs/J[bar]", "EAPI": "4" },
+ "sys-libs/D-1": { "RDEPEND": "sys-libs/J[bar?]", "IUSE": "bar", "EAPI": "4" },
+ "sys-libs/E-1": { "RDEPEND": "sys-libs/J[foo(+)?]", "IUSE": "+foo", "EAPI": "4" },
+ "sys-libs/F-1": { "RDEPEND": "sys-libs/J[foo(+)]", "EAPI": "4" },
+ "sys-libs/J-1": { "IUSE": "+foo", "EAPI": "4" },
+ "sys-libs/J-2": { "IUSE": "+bar", "EAPI": "4" },
+
+ "app-misc/A-1": { "IUSE": "foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ "app-misc/B-1": { "DEPEND": "=app-misc/A-1[foo=]", "IUSE": "foo", "EAPI": 2 },
+ "app-misc/C-1": { "DEPEND": "=app-misc/A-1[foo]", "EAPI": 2 },
+ "app-misc/E-1": { "RDEPEND": "dev-libs/E[foo?]", "IUSE": "foo", "EAPI": "2" },
+ "app-misc/F-1": { "RDEPEND": "=dev-libs/E-1", "IUSE": "foo", "EAPI": "2" },
+
+ "dev-lang/perl-5.12": {"SLOT": "0/5.12", "EAPI": "4-slot-abi"},
+ "dev-lang/perl-5.16": {"SLOT": "0/5.16", "EAPI": "4-slot-abi"},
+ }
+ installed = {
+ "dev-libs/A-1": { "PDEPEND": "foo? ( dev-libs/B )", "IUSE": "foo", "USE": "foo" },
+ "dev-libs/B-1": { "IUSE": "foo", "USE": "foo" },
+ "dev-libs/C-1": { "DEPEND": "dev-libs/A[foo]", "EAPI": 2 },
+ "dev-libs/D-1": { "DEPEND": "dev-libs/A[foo=] dev-libs/B[foo=]", "IUSE": "foo", "USE": "foo", "EAPI": 2 },
+
+ "sci-libs/K-1": { "IUSE": "foo", "USE": "" },
+ "sci-libs/L-1": { "DEPEND": "sci-libs/K[-foo]" },
+
+ "sci-libs/Q-1": { "SLOT": "1", "IUSE": "+bar +foo", "USE": "bar foo", "EAPI": 1 },
+ "sci-libs/Q-2": { "SLOT": "2", "IUSE": "+bar +foo", "USE": "bar foo", "EAPI": 2, "PDEPEND": "sci-libs/Q:1[bar?,foo?]" },
+
+ "app-misc/A-1": { "IUSE": "+foo bar", "USE": "foo", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ }
+
+ test_cases = (
+ #A qt-*[qt3support] like mess.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B", "dev-libs/C", "dev-libs/D"],
+ options = { "--autounmask": 'n' },
+ success = False,
+ mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = [ {"dev-libs/A-1": {"foo": True}, "dev-libs/D-1": {"foo": True}} ]),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/A", "sys-libs/B", "sys-libs/C", "sys-libs/D", "sys-libs/E", "sys-libs/F"],
+ options = { "--autounmask": 'n' },
+ success = False,
+ ignore_mergelist_order = True,
+ slot_collision_solutions = [],
+ mergelist = ['sys-libs/J-2', 'sys-libs/J-1', 'sys-libs/A-1', 'sys-libs/B-1', 'sys-libs/C-1', 'sys-libs/D-1', 'sys-libs/E-1', 'sys-libs/F-1'],
+ ),
+
+ #A version based conflicts, nothing we can do.
+ ResolverPlaygroundTestCase(
+ ["=app-misc/X-1", "=app-misc/Y-1"],
+ success = False,
+ mergelist = ["app-misc/Z-1", "app-misc/Z-2", "app-misc/X-1", "app-misc/Y-1"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = []
+ ),
+ ResolverPlaygroundTestCase(
+ ["=app-misc/X-2", "=app-misc/Y-2"],
+ success = False,
+ mergelist = ["app-misc/Z-1", "app-misc/Z-2", "app-misc/X-2", "app-misc/Y-2"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = []
+ ),
+
+ ResolverPlaygroundTestCase(
+ ["=app-misc/E-1", "=app-misc/F-1"],
+ success = False,
+ mergelist = ["dev-libs/E-1", "dev-libs/E-2", "app-misc/E-1", "app-misc/F-1"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = []
+ ),
+
+ # sub-slot
+ ResolverPlaygroundTestCase(
+ ["dev-lang/perl:0/5.12", "dev-lang/perl:0/5.16", "=dev-lang/perl-5.12*"],
+ success = False,
+ mergelist = ["dev-lang/perl-5.12", "dev-lang/perl-5.16"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = []
+ ),
+
+ #Simple cases.
+ ResolverPlaygroundTestCase(
+ ["sci-libs/L", "sci-libs/M"],
+ success = False,
+ mergelist = ["sci-libs/L-1", "sci-libs/M-1", "sci-libs/K-1"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = [{"sci-libs/K-1": {"foo": False}, "sci-libs/M-1": {"foo": False}}]
+ ),
+
+ #Avoid duplicates.
+ ResolverPlaygroundTestCase(
+ ["sci-libs/P", "sci-libs/Q:2"],
+ success = False,
+ options = { "--update": True, "--complete-graph": True, "--autounmask": 'n' },
+ mergelist = ["sci-libs/P-1", "sci-libs/Q-1"],
+ ignore_mergelist_order = True,
+ all_permutations=True,
+ slot_collision_solutions = [{"sci-libs/Q-1": {"foo": True}, "sci-libs/P-1": {"foo": True}}]
+ ),
+
+ #Conflict with REQUIRED_USE
+ ResolverPlaygroundTestCase(
+ ["=app-misc/C-1", "=app-misc/B-1"],
+ all_permutations = True,
+ slot_collision_solutions = [],
+ mergelist = ["app-misc/A-1", "app-misc/C-1", "app-misc/B-1"],
+ ignore_mergelist_order = True,
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testConnectedCollision(self):
+ """
+ Ensure that we are able to solve connected slot conflicts
+ which cannot be solved each on their own.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "=dev-libs/X-1" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/X" },
+
+ "dev-libs/X-1": { "RDEPEND": "=dev-libs/Y-1" },
+ "dev-libs/X-2": { "RDEPEND": "=dev-libs/Y-2" },
+
+ "dev-libs/Y-1": { "PDEPEND": "=dev-libs/X-1" },
+ "dev-libs/Y-2": { "PDEPEND": "=dev-libs/X-2" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ all_permutations = True,
+ options = { "--backtrack": 0 },
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = ["dev-libs/Y-1", "dev-libs/X-1", ("dev-libs/A-1", "dev-libs/B-1")]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testDeeplyConnectedCollision(self):
+ """
+ Like testConnectedCollision, except that there is another
+ level of dependencies between the two conflicts.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "=dev-libs/X-1" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/X" },
+
+ "dev-libs/X-1": { "RDEPEND": "dev-libs/K" },
+ "dev-libs/X-2": { "RDEPEND": "dev-libs/L" },
+
+ "dev-libs/K-1": { "RDEPEND": "=dev-libs/Y-1" },
+ "dev-libs/L-1": { "RDEPEND": "=dev-libs/Y-2" },
+
+ "dev-libs/Y-1": { "PDEPEND": "=dev-libs/X-1" },
+ "dev-libs/Y-2": { "PDEPEND": "=dev-libs/X-2" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ all_permutations = True,
+ options = { "--backtrack": 0 },
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["dev-libs/Y-1", "dev-libs/X-1", "dev-libs/K-1", \
+ "dev-libs/A-1", "dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSelfDEPENDRemovalCrash(self):
+ """
+ Make sure we don't try to remove a packages twice. This happened
+ in the past when a package had a DEPEND on itself.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "=dev-libs/X-1" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/X" },
+
+ "dev-libs/X-1": { },
+ "dev-libs/X-2": { "DEPEND": ">=dev-libs/X-2" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ all_permutations = True,
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["dev-libs/X-1", "dev-libs/A-1", "dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_force_rebuild.py b/usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_force_rebuild.py
new file mode 100644
index 0000000..4170bfd
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_force_rebuild.py
@@ -0,0 +1,84 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotConflictForceRebuildTestCase(TestCase):
+
+ def testSlotConflictForceRebuild(self):
+
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A"
+ },
+
+ }
+
+ installed = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+
+ }
+
+ world = ["app-misc/B", "app-misc/C"]
+
+ test_cases = (
+
+ # Test bug #521990, where forced_rebuilds omits ebuilds that
+ # had have had their slot operator atoms removed from the
+ # ebuilds, even though the corresponding installed
+ # instances had really forced rebuilds due to being built
+ # with slot-operators in their deps.
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ options = {},
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = ['app-misc/A-2', ('app-misc/B-0', 'app-misc/C-0')],
+ forced_rebuilds = {
+ 'app-misc/A-2': ['app-misc/B-0', 'app-misc/C-0']
+ }
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_mask_update.py b/usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_mask_update.py
new file mode 100644
index 0000000..a90eeac
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_mask_update.py
@@ -0,0 +1,41 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotConflictMaskUpdateTestCase(TestCase):
+
+ def testBacktrackingGoodVersionFirst(self):
+ """
+ When backtracking due to slot conflicts, we masked the version that has been pulled
+ in first. This is not always a good idea. Mask the highest version instead.
+ """
+
+
+ self.todo = True
+
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "=dev-libs/C-1 dev-libs/B" },
+ "dev-libs/B-1": { "DEPEND": "=dev-libs/C-1" },
+ "dev-libs/B-2": { "DEPEND": "=dev-libs/C-2" },
+ "dev-libs/C-1": { },
+ "dev-libs/C-2": { },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1",],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_rebuild.py b/usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_rebuild.py
new file mode 100644
index 0000000..b39eaf0
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_rebuild.py
@@ -0,0 +1,449 @@
+# Copyright 2012-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotConflictRebuildTestCase(TestCase):
+
+ def testSlotConflictRebuild(self):
+
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/A:=",
+ "RDEPEND": "app-misc/A:="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "DEPEND": "<app-misc/A-2",
+ "RDEPEND": "<app-misc/A-2"
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/D-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/E-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/D:=",
+ "RDEPEND": "app-misc/D:="
+ },
+
+ }
+
+ installed = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/A:0/1=",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "DEPEND": "<app-misc/A-2",
+ "RDEPEND": "<app-misc/A-2"
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/E-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/D:0/1=",
+ "RDEPEND": "app-misc/D:0/1="
+ },
+
+ }
+
+ world = ["app-misc/B", "app-misc/C", "app-misc/E"]
+
+ test_cases = (
+
+ # Test bug #439688, where a slot conflict prevents an
+ # upgrade and we don't want to trigger unnecessary rebuilds.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["app-misc/D-2", "app-misc/E-0"]),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSlotConflictMassRebuild(self):
+ """
+ Bug 486580
+ Before this bug was fixed, emerge would backtrack for each package that needs
+ a rebuild. This could cause it to hit the backtrack limit and not rebuild all
+ needed packages.
+ """
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:=",
+ "RDEPEND": "app-misc/B:="
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "1"
+ },
+
+ "app-misc/B-2" : {
+ "EAPI": "5",
+ "SLOT": "2/2"
+ },
+ }
+
+ installed = {
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "1"
+ },
+ }
+
+ expected_mergelist = ['app-misc/A-1', 'app-misc/B-2']
+
+ for i in range(5):
+ ebuilds["app-misc/C%sC-1" % i] = {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:=",
+ "RDEPEND": "app-misc/B:="
+ }
+
+ installed["app-misc/C%sC-1" % i] = {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:1/1=",
+ "RDEPEND": "app-misc/B:1/1="
+ }
+ for x in ("DEPEND", "RDEPEND"):
+ ebuilds["app-misc/A-1"][x] += " app-misc/C%sC" % i
+
+ expected_mergelist.append("app-misc/C%sC-1" % i)
+
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ ignore_mergelist_order=True,
+ all_permutations=True,
+ options = {"--backtrack": 3, '--deep': True},
+ success = True,
+ mergelist = expected_mergelist),
+ )
+
+ world = []
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testSlotConflictForgottenChild(self):
+ """
+ Similar to testSlotConflictMassRebuild above, but this time the rebuilds are scheduled,
+ but the package causing the rebuild (the child) is not installed.
+ """
+ ebuilds = {
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:= app-misc/C",
+ "RDEPEND": "app-misc/B:= app-misc/C",
+ },
+
+ "app-misc/B-2" : {
+ "EAPI": "5",
+ "SLOT": "2"
+ },
+
+ "app-misc/C-1": {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:=",
+ "RDEPEND": "app-misc/B:="
+ },
+ }
+
+ installed = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:1/1= app-misc/C",
+ "RDEPEND": "app-misc/B:1/1= app-misc/C",
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "1"
+ },
+
+ "app-misc/C-1": {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:1/1=",
+ "RDEPEND": "app-misc/B:1/1="
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ success = True,
+ mergelist = ['app-misc/B-2', 'app-misc/C-1', 'app-misc/A-2']),
+ )
+
+ world = []
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSlotConflictDepChange(self):
+ """
+ Bug 490362
+ The dependency in the ebuild was changed form slot operator to
+ no slot operator. The vdb contained the slot operator and emerge
+ would refuse to rebuild.
+ """
+ ebuilds = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B",
+ "RDEPEND": "app-misc/B"
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/B-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+ }
+
+ installed = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:0/1=",
+ "RDEPEND": "app-misc/B:0/1="
+ },
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["app-misc/B"],
+ success = True,
+ mergelist = ['app-misc/B-2', 'app-misc/A-1']),
+ )
+
+ world = ["app-misc/A"]
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSlotConflictMixedDependencies(self):
+ """
+ Bug 487198
+ For parents with mixed >= and < dependencies, we scheduled rebuilds for the
+ >= atom, but in the end didn't install the child update because of the < atom.
+ """
+ ebuilds = {
+ "cat/slotted-lib-1" : {
+ "EAPI": "5",
+ "SLOT": "1"
+ },
+ "cat/slotted-lib-2" : {
+ "EAPI": "5",
+ "SLOT": "2"
+ },
+ "cat/slotted-lib-3" : {
+ "EAPI": "5",
+ "SLOT": "3"
+ },
+ "cat/slotted-lib-4" : {
+ "EAPI": "5",
+ "SLOT": "4"
+ },
+ "cat/slotted-lib-5" : {
+ "EAPI": "5",
+ "SLOT": "5"
+ },
+ "cat/user-1" : {
+ "EAPI": "5",
+ "DEPEND": ">=cat/slotted-lib-2:= <cat/slotted-lib-4:=",
+ "RDEPEND": ">=cat/slotted-lib-2:= <cat/slotted-lib-4:=",
+ },
+ }
+
+ installed = {
+ "cat/slotted-lib-3" : {
+ "EAPI": "5",
+ "SLOT": "3"
+ },
+ "cat/user-1" : {
+ "EAPI": "5",
+ "DEPEND": ">=cat/slotted-lib-2:3/3= <cat/slotted-lib-4:3/3=",
+ "RDEPEND": ">=cat/slotted-lib-2:3/3= <cat/slotted-lib-4:3/3=",
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["cat/user"],
+ options = {"--deep": True, "--update": True},
+ success = True,
+ mergelist = []),
+ )
+
+ world = []
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSlotConflictMultiRepo(self):
+ """
+ Bug 497238
+ Different repositories contain the same cpv with different sub-slots for
+ a slot operator child.
+ Downgrading the slot operator parent would result in a sub-slot change of
+ the installed package by changing the source repository.
+ Make sure we don't perform this undesirable rebuild.
+ """
+ ebuilds = {
+ "net-firewall/iptables-1.4.21::overlay" : { "EAPI": "5", "SLOT": "0/10" },
+ "sys-apps/iproute2-3.11.0::overlay" : { "EAPI": "5", "RDEPEND": "net-firewall/iptables:=" },
+
+ "net-firewall/iptables-1.4.21" : { "EAPI": "5", "SLOT": "0" },
+ "sys-apps/iproute2-3.12.0": { "EAPI": "5", "RDEPEND": "net-firewall/iptables:=" },
+ }
+
+ installed = {
+ "net-firewall/iptables-1.4.21::overlay" : { "EAPI": "5", "SLOT": "0/10" },
+ "sys-apps/iproute2-3.12.0": { "EAPI": "5", "RDEPEND": "net-firewall/iptables:0/10=" },
+ }
+
+ world = ["sys-apps/iproute2"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--deep": True, "--update": True, "--verbose": True},
+ success = True,
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testSlotConflictMultiRepoUpdates(self):
+ """
+ Bug 508236 (similar to testSlotConflictMultiRepo)
+ Different repositories contain the same cpv with different sub-slots for
+ a slot operator child. For both the installed version and an updated version.
+
+ """
+ ebuilds = {
+ "net-firewall/iptables-1.4.21::overlay" : { "EAPI": "5", "SLOT": "0/10" },
+ "net-firewall/iptables-1.4.21-r1::overlay" : { "EAPI": "5", "SLOT": "0/10" },
+ "sys-apps/iproute2-3.11.0::overlay" : { "EAPI": "5", "RDEPEND": "net-firewall/iptables:=" },
+
+ "net-firewall/iptables-1.4.21" : { "EAPI": "5", "SLOT": "0" },
+ "net-firewall/iptables-1.4.21-r1" : { "EAPI": "5", "SLOT": "0" },
+ "sys-apps/iproute2-3.12.0": { "EAPI": "5", "RDEPEND": "net-firewall/iptables:=" },
+ }
+
+ installed = {
+ "net-firewall/iptables-1.4.21::overlay" : { "EAPI": "5", "SLOT": "0/10" },
+ "sys-apps/iproute2-3.12.0": { "EAPI": "5", "RDEPEND": "net-firewall/iptables:0/10=" },
+ }
+
+ world = ["sys-apps/iproute2"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--deep": True, "--update": True, "--verbose": True},
+ success = True,
+ mergelist = ["net-firewall/iptables-1.4.21-r1::overlay"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_unsatisfied_deep_deps.py b/usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_unsatisfied_deep_deps.py
new file mode 100644
index 0000000..13f7e67
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_unsatisfied_deep_deps.py
@@ -0,0 +1,115 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SlotConflictUnsatisfiedDeepDepsTestCase(TestCase):
+
+ def testSlotConflictUnsatisfiedDeepDeps(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { },
+ "dev-libs/A-2": { "KEYWORDS": "~x86" },
+ "dev-libs/B-1": { "DEPEND": "dev-libs/A" },
+ "dev-libs/C-1": { "DEPEND": ">=dev-libs/A-2" },
+ "dev-libs/D-1": { "DEPEND": "dev-libs/A" },
+ }
+
+ installed = {
+ "dev-libs/broken-1": {
+ "RDEPEND": "dev-libs/A dev-libs/initially-unsatisfied"
+ },
+ }
+
+ world = (
+ "dev-libs/A",
+ "dev-libs/B",
+ "dev-libs/C",
+ "dev-libs/D",
+ "dev-libs/broken"
+ )
+
+ test_cases = (
+ # Test bug #520950, where unsatisfied deps of installed
+ # packages are supposed to be ignored when they are beyond
+ # the depth requested by the user.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B", "dev-libs/C", "dev-libs/D"],
+ all_permutations=True,
+ options={
+ "--autounmask": "y",
+ "--complete-graph": True
+ },
+ mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order=True,
+ unstable_keywords=["dev-libs/A-2"],
+ unsatisfied_deps=[],
+ success=False),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options={
+ "--autounmask": "y",
+ "--complete-graph": True
+ },
+ mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order=True,
+ unstable_keywords=["dev-libs/A-2"],
+ unsatisfied_deps=["dev-libs/broken"],
+ success=False),
+
+ # Test --selective with --deep = 0
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options={
+ "--autounmask": "y",
+ "--complete-graph": True,
+ "--selective": True,
+ "--deep": 0
+ },
+ mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order=True,
+ unstable_keywords=["dev-libs/A-2"],
+ unsatisfied_deps=[],
+ success=False),
+
+ # Test --deep = 1
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options={
+ "--autounmask": "y",
+ "--complete-graph": True,
+ "--selective": True,
+ "--deep": 1
+ },
+ mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order=True,
+ unstable_keywords=["dev-libs/A-2"],
+ unsatisfied_deps=["dev-libs/initially-unsatisfied"],
+ success=False),
+
+ # Test --deep = True
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options={
+ "--autounmask": "y",
+ "--complete-graph": True,
+ "--selective": True,
+ "--deep": True
+ },
+ mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order=True,
+ unstable_keywords=["dev-libs/A-2"],
+ unsatisfied_deps=["dev-libs/initially-unsatisfied"],
+ success=False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
+ world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_update.py b/usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_update.py
new file mode 100644
index 0000000..331e578
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_slot_conflict_update.py
@@ -0,0 +1,98 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotConflictUpdateTestCase(TestCase):
+
+ def testSlotConflictUpdate(self):
+
+ ebuilds = {
+
+ "app-text/podofo-0.9.2" : {
+ "EAPI": "5",
+ "RDEPEND" : "dev-util/boost-build"
+ },
+
+ "dev-cpp/libcmis-0.3.1" : {
+ "EAPI": "5",
+ "RDEPEND" : "dev-libs/boost:="
+ },
+
+ "dev-libs/boost-1.53.0" : {
+ "EAPI": "5",
+ "SLOT": "0/1.53",
+ "RDEPEND" : "=dev-util/boost-build-1.53.0"
+ },
+
+ "dev-libs/boost-1.52.0" : {
+ "EAPI": "5",
+ "SLOT": "0/1.52",
+ "RDEPEND" : "=dev-util/boost-build-1.52.0"
+ },
+
+ "dev-util/boost-build-1.53.0" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+ "dev-util/boost-build-1.52.0" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+
+ }
+
+ installed = {
+
+ "app-text/podofo-0.9.2" : {
+ "EAPI": "5",
+ "RDEPEND" : "dev-util/boost-build"
+ },
+
+ "dev-cpp/libcmis-0.3.1" : {
+ "EAPI": "5",
+ "RDEPEND" : "dev-libs/boost:0/1.52="
+ },
+
+ "dev-util/boost-build-1.52.0" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+ "dev-libs/boost-1.52.0" : {
+ "EAPI": "5",
+ "SLOT": "0/1.52",
+ "RDEPEND" : "=dev-util/boost-build-1.52.0"
+ }
+
+ }
+
+ world = ["dev-cpp/libcmis", "dev-libs/boost", "app-text/podofo"]
+
+ test_cases = (
+
+ # In order to avoid a missed update, first mask lower
+ # versions that conflict with higher versions. Note that
+ # this behavior makes SlotConflictMaskUpdateTestCase
+ # fail.
+ ResolverPlaygroundTestCase(
+ world,
+ all_permutations = True,
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ['dev-util/boost-build-1.53.0', 'dev-libs/boost-1.53.0', 'dev-cpp/libcmis-0.3.1']),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_autounmask.py b/usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_autounmask.py
new file mode 100644
index 0000000..624271b
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_autounmask.py
@@ -0,0 +1,120 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotOperatorAutoUnmaskTestCase(TestCase):
+
+ def __init__(self, *args, **kwargs):
+ super(SlotOperatorAutoUnmaskTestCase, self).__init__(*args, **kwargs)
+
+ def testSubSlot(self):
+ ebuilds = {
+ "dev-libs/icu-49" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/49"
+ },
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:=",
+ "RDEPEND": "dev-libs/icu:=",
+ "KEYWORDS": "~x86"
+ },
+ }
+ binpkgs = {
+ "dev-libs/icu-49" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/49"
+ },
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:0/48=",
+ "RDEPEND": "dev-libs/icu:0/48="
+ },
+ }
+ installed = {
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:0/48=",
+ "RDEPEND": "dev-libs/icu:0/48="
+ },
+ }
+
+ world = ["dev-libs/libxml2"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--autounmask": True, "--oneshot": True},
+ success = False,
+ mergelist = ["dev-libs/icu-49", "dev-libs/libxml2-2.7.8" ],
+ unstable_keywords = ['dev-libs/libxml2-2.7.8']),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = False,
+ mergelist = ["[binary]dev-libs/icu-49", "dev-libs/libxml2-2.7.8" ],
+ unstable_keywords = ['dev-libs/libxml2-2.7.8']),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkgonly": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True},
+ success = True,
+ mergelist = []),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49"]),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_rebuild.py b/usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_rebuild.py
new file mode 100644
index 0000000..42512aa
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_rebuild.py
@@ -0,0 +1,80 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotOperatorRebuildTestCase(TestCase):
+
+ def testSlotOperatorRebuild(self):
+
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "RDEPEND": "|| ( app-misc/X app-misc/A:= )"
+ },
+
+ }
+
+ installed = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "RDEPEND": "|| ( app-misc/X app-misc/A:0/1= )"
+ },
+
+ }
+
+ world = ["app-misc/B", "app-misc/C"]
+
+ test_cases = (
+
+ # Test bug #522652, where the unsatisfiable app-misc/X
+ # atom is selected, and the dependency is placed into
+ # _initially_unsatisfied_deps where it is ignored, causing
+ # the app-misc/C-0 rebuild to be missed.
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ options = {"--dynamic-deps": "n"},
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = ['app-misc/A-2', ('app-misc/B-0', 'app-misc/C-0')]
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_required_use.py b/usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_required_use.py
new file mode 100644
index 0000000..9cc6dba
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_required_use.py
@@ -0,0 +1,72 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotOperatorRequiredUseTestCase(TestCase):
+
+ def testSlotOperatorRequiredUse(self):
+
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:=",
+ "IUSE": "x y",
+ "REQUIRED_USE": "|| ( x y )"
+ },
+
+ }
+
+ installed = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:0/1=",
+ "IUSE": "x y",
+ "USE": "x"
+ },
+
+ }
+
+ world = ["app-misc/B"]
+
+ test_cases = (
+
+ # bug 523048
+ # Ensure that unsatisfied REQUIRED_USE is reported when
+ # it blocks necessary slot-operator rebuilds.
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ success = False,
+ required_use_unsatisfied = ['app-misc/B:0']
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_unsatisfied.py b/usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_unsatisfied.py
new file mode 100644
index 0000000..e3b53d1
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_unsatisfied.py
@@ -0,0 +1,70 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotOperatorUnsatisfiedTestCase(TestCase):
+
+ def testSlotOperatorUnsatisfied(self):
+
+ ebuilds = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/A:=",
+ "RDEPEND": "app-misc/A:="
+ },
+ }
+
+ installed = {
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/A:0/1=",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+ }
+
+ world = ["app-misc/B"]
+
+ test_cases = (
+
+ # Demonstrate bug #439694, where a broken slot-operator
+ # sub-slot dependency needs to trigger a rebuild.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["app-misc/B-0"]),
+
+ # This doesn't trigger a rebuild, since there's no version
+ # change to trigger complete graph mode, and initially
+ # unsatisfied deps are ignored in complete graph mode anyway.
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ options = {"--oneshot": True},
+ success = True,
+ mergelist = ["app-misc/A-2"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_unsolved.py b/usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_unsolved.py
new file mode 100644
index 0000000..c19783d
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_slot_operator_unsolved.py
@@ -0,0 +1,88 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotOperatorUnsolvedTestCase(TestCase):
+ """
+ Demonstrate bug #456340, where an unsolved circular dependency
+ interacts with an unsatisfied built slot-operator dep.
+ """
+ def __init__(self, *args, **kwargs):
+ super(SlotOperatorUnsolvedTestCase, self).__init__(*args, **kwargs)
+
+ def testSlotOperatorUnsolved(self):
+ ebuilds = {
+ "dev-libs/icu-50.1.2" : {
+ "EAPI": "5",
+ "SLOT": "0/50.1.2"
+ },
+ "net-libs/webkit-gtk-1.10.2-r300" : {
+ "EAPI": "5",
+ "DEPEND": ">=dev-libs/icu-3.8.1-r1:=",
+ "RDEPEND": ">=dev-libs/icu-3.8.1-r1:="
+ },
+ "dev-ruby/rdoc-3.12.1" : {
+ "EAPI": "5",
+ "IUSE": "test",
+ "DEPEND": "test? ( >=dev-ruby/hoe-2.7.0 )",
+ },
+ "dev-ruby/hoe-2.13.0" : {
+ "EAPI": "5",
+ "IUSE": "test",
+ "DEPEND": "test? ( >=dev-ruby/rdoc-3.10 )",
+ "RDEPEND": "test? ( >=dev-ruby/rdoc-3.10 )",
+ },
+ }
+
+ binpkgs = {
+ "net-libs/webkit-gtk-1.10.2-r300" : {
+ "EAPI": "5",
+ "DEPEND": ">=dev-libs/icu-3.8.1-r1:0/50=",
+ "RDEPEND": ">=dev-libs/icu-3.8.1-r1:0/50="
+ },
+ }
+
+ installed = {
+ "dev-libs/icu-50.1.2" : {
+ "EAPI": "5",
+ "SLOT": "0/50.1.2"
+ },
+ "net-libs/webkit-gtk-1.10.2-r300" : {
+ "EAPI": "5",
+ "DEPEND": ">=dev-libs/icu-3.8.1-r1:0/50=",
+ "RDEPEND": ">=dev-libs/icu-3.8.1-r1:0/50="
+ },
+ }
+
+ user_config = {
+ "make.conf" : ("FEATURES=test",)
+ }
+
+ world = ["net-libs/webkit-gtk", "dev-ruby/hoe"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkg": True},
+ circular_dependency_solutions = {
+ 'dev-ruby/hoe-2.13.0': frozenset([frozenset([('test', False)])]),
+ 'dev-ruby/rdoc-3.12.1': frozenset([frozenset([('test', False)])])
+ },
+ success = False
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, user_config=user_config,
+ world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_solve_non_slot_operator_slot_conflicts.py b/usr/lib/portage/pym/portage/tests/resolver/test_solve_non_slot_operator_slot_conflicts.py
new file mode 100644
index 0000000..c6024f4
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_solve_non_slot_operator_slot_conflicts.py
@@ -0,0 +1,75 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SolveNonSlotOperatorSlotConflictsTestCase(TestCase):
+
+ def testSolveNonSlotOperatorSlotConflicts(self):
+
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1",
+ "PDEPEND": "app-misc/B"
+ },
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2",
+ "PDEPEND": "app-misc/B"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:="
+ },
+
+ }
+
+ installed = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1",
+ "PDEPEND": "app-misc/B"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+
+ }
+
+ world = ["app-misc/A"]
+
+ test_cases = (
+
+ # bug 522084
+ # In this case, _solve_non_slot_operator_slot_conflicts
+ # removed both versions of app-misc/A from the graph, since
+ # they didn't have any non-conflict parents (except for
+ # @selected which matched both instances). The result was
+ # a missed update.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ['app-misc/A-2', 'app-misc/B-0']
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_targetroot.py b/usr/lib/portage/pym/portage/tests/resolver/test_targetroot.py
new file mode 100644
index 0000000..db6c60d
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_targetroot.py
@@ -0,0 +1,85 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class TargetRootTestCase(TestCase):
+
+ def testTargetRoot(self):
+ ebuilds = {
+ "dev-lang/python-3.2": {
+ "EAPI": "5-hdepend",
+ "IUSE": "targetroot",
+ "HDEPEND": "targetroot? ( ~dev-lang/python-3.2 )",
+ },
+ "dev-libs/A-1": {
+ "EAPI": "4",
+ "DEPEND": "dev-libs/B",
+ "RDEPEND": "dev-libs/C",
+ },
+ "dev-libs/B-1": {},
+ "dev-libs/C-1": {},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-lang/python"],
+ options = {},
+ success = True,
+ mergelist = ["dev-lang/python-3.2", "dev-lang/python-3.2{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-lang/python"],
+ options = {"--root-deps": True},
+ success = True,
+ mergelist = ["dev-lang/python-3.2", "dev-lang/python-3.2{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-lang/python"],
+ options = {"--root-deps": "rdeps"},
+ success = True,
+ mergelist = ["dev-lang/python-3.2", "dev-lang/python-3.2{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {},
+ ambiguous_merge_order = True,
+ success = True,
+ mergelist = [("dev-libs/B-1", "dev-libs/C-1{targetroot}"), "dev-libs/A-1{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--root-deps": True},
+ ambiguous_merge_order = True,
+ success = True,
+ mergelist = [("dev-libs/B-1{targetroot}", "dev-libs/C-1{targetroot}"), "dev-libs/A-1{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--root-deps": "rdeps"},
+ ambiguous_merge_order = True,
+ success = True,
+ mergelist = [("dev-libs/C-1{targetroot}"), "dev-libs/A-1{targetroot}"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, targetroot=True,
+ debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-lang/python"],
+ options = {},
+ success = True,
+ mergelist = ["dev-lang/python-3.2"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, targetroot=False,
+ debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_unpack_dependencies.py b/usr/lib/portage/pym/portage/tests/resolver/test_unpack_dependencies.py
new file mode 100644
index 0000000..cfceff4
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_unpack_dependencies.py
@@ -0,0 +1,65 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class UnpackDependenciesTestCase(TestCase):
+ def testUnpackDependencies(self):
+ distfiles = {
+ "A-1.tar.gz": b"binary\0content",
+ "B-1.TAR.XZ": b"binary\0content",
+ "B-docs-1.tar.bz2": b"binary\0content",
+ "C-1.TAR.XZ": b"binary\0content",
+ "C-docs-1.tar.bz2": b"binary\0content",
+ }
+
+ ebuilds = {
+ "dev-libs/A-1": {"SRC_URI": "A-1.tar.gz", "EAPI": "5-progress"},
+ "dev-libs/B-1": {"IUSE": "doc", "SRC_URI": "B-1.TAR.XZ doc? ( B-docs-1.tar.bz2 )", "EAPI": "5-progress"},
+ "dev-libs/C-1": {"IUSE": "doc", "SRC_URI": "C-1.TAR.XZ doc? ( C-docs-1.tar.bz2 )", "EAPI": "5-progress"},
+ "app-arch/bzip2-1": {},
+ "app-arch/gzip-1": {},
+ "app-arch/tar-1": {},
+ "app-arch/xz-utils-1": {},
+ }
+
+ repo_configs = {
+ "test_repo": {
+ "unpack_dependencies/5-progress": (
+ "tar.bz2 app-arch/tar app-arch/bzip2",
+ "tar.gz app-arch/tar app-arch/gzip",
+ "tar.xz app-arch/tar app-arch/xz-utils",
+ ),
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["app-arch/tar-1", "app-arch/gzip-1", "dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["app-arch/tar-1", "app-arch/xz-utils-1", "dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["app-arch/tar-1", "app-arch/xz-utils-1", "app-arch/bzip2-1", "dev-libs/C-1"]),
+ )
+
+ user_config = {
+ "package.use": ("dev-libs/C doc",)
+ }
+
+ playground = ResolverPlayground(distfiles=distfiles, ebuilds=ebuilds, repo_configs=repo_configs, user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_use_aliases.py b/usr/lib/portage/pym/portage/tests/resolver/test_use_aliases.py
new file mode 100644
index 0000000..7c2debb
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_use_aliases.py
@@ -0,0 +1,131 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class UseAliasesTestCase(TestCase):
+ def testUseAliases(self):
+ ebuilds = {
+ "dev-libs/A-1": {"DEPEND": "dev-libs/K[x]", "RDEPEND": "dev-libs/K[x]", "EAPI": "5"},
+ "dev-libs/B-1": {"DEPEND": "dev-libs/L[x]", "RDEPEND": "dev-libs/L[x]", "EAPI": "5"},
+ "dev-libs/C-1": {"DEPEND": "dev-libs/M[xx]", "RDEPEND": "dev-libs/M[xx]", "EAPI": "5"},
+ "dev-libs/D-1": {"DEPEND": "dev-libs/N[-x]", "RDEPEND": "dev-libs/N[-x]", "EAPI": "5"},
+ "dev-libs/E-1": {"DEPEND": "dev-libs/O[-xx]", "RDEPEND": "dev-libs/O[-xx]", "EAPI": "5"},
+ "dev-libs/F-1": {"DEPEND": "dev-libs/P[-xx]", "RDEPEND": "dev-libs/P[-xx]", "EAPI": "5"},
+ "dev-libs/G-1": {"DEPEND": "dev-libs/Q[x-y]", "RDEPEND": "dev-libs/Q[x-y]", "EAPI": "5"},
+ "dev-libs/H-1": {"DEPEND": "=dev-libs/R-1*[yy]", "RDEPEND": "=dev-libs/R-1*[yy]", "EAPI": "5"},
+ "dev-libs/H-2": {"DEPEND": "=dev-libs/R-2*[yy]", "RDEPEND": "=dev-libs/R-2*[yy]", "EAPI": "5"},
+ "dev-libs/I-1": {"DEPEND": "dev-libs/S[y-z]", "RDEPEND": "dev-libs/S[y-z]", "EAPI": "5"},
+ "dev-libs/I-2": {"DEPEND": "dev-libs/S[y_z]", "RDEPEND": "dev-libs/S[y_z]", "EAPI": "5"},
+ "dev-libs/J-1": {"DEPEND": "dev-libs/T[x]", "RDEPEND": "dev-libs/T[x]", "EAPI": "5"},
+ "dev-libs/K-1": {"IUSE": "+x", "EAPI": "5"},
+ "dev-libs/K-2::repo1": {"IUSE": "+X", "EAPI": "5-progress"},
+ "dev-libs/L-1": {"IUSE": "+x", "EAPI": "5"},
+ "dev-libs/M-1::repo1": {"IUSE": "X", "EAPI": "5-progress"},
+ "dev-libs/N-1": {"IUSE": "x", "EAPI": "5"},
+ "dev-libs/N-2::repo1": {"IUSE": "X", "EAPI": "5-progress"},
+ "dev-libs/O-1": {"IUSE": "x", "EAPI": "5"},
+ "dev-libs/P-1::repo1": {"IUSE": "+X", "EAPI": "5-progress"},
+ "dev-libs/Q-1::repo2": {"IUSE": "X.Y", "EAPI": "5-progress"},
+ "dev-libs/R-1::repo1": {"IUSE": "Y", "EAPI": "5-progress"},
+ "dev-libs/R-2::repo1": {"IUSE": "y", "EAPI": "5-progress"},
+ "dev-libs/S-1::repo2": {"IUSE": "Y.Z", "EAPI": "5-progress"},
+ "dev-libs/S-2::repo2": {"IUSE": "Y.Z", "EAPI": "5-progress"},
+ "dev-libs/T-1::repo1": {"IUSE": "+X", "EAPI": "5"},
+ }
+
+ installed = {
+ "dev-libs/L-2::repo1": {"IUSE": "+X", "USE": "X", "EAPI": "5-progress"},
+ "dev-libs/O-2::repo1": {"IUSE": "X", "USE": "", "EAPI": "5-progress"},
+ }
+
+ repo_configs = {
+ "repo1": {
+ "use.aliases": ("X x xx",),
+ "package.use.aliases": (
+ "=dev-libs/R-1* Y yy",
+ "=dev-libs/R-2* y yy",
+ )
+ },
+ "repo2": {
+ "eapi": ("5-progress",),
+ "use.aliases": ("X.Y x-y",),
+ "package.use.aliases": (
+ "=dev-libs/S-1* Y.Z y-z",
+ "=dev-libs/S-2* Y.Z y_z",
+ ),
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ success = True,
+ mergelist = ["dev-libs/K-2", "dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ success = True,
+ mergelist = ["dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/M-1", "dev-libs/C-1"],
+ use_changes = {"dev-libs/M-1": {"X": True}}),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/D"],
+ success = True,
+ mergelist = ["dev-libs/N-2", "dev-libs/D-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E"],
+ success = True,
+ mergelist = ["dev-libs/E-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/F"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/P-1", "dev-libs/F-1"],
+ use_changes = {"dev-libs/P-1": {"X": False}}),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/G"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/Q-1", "dev-libs/G-1"],
+ use_changes = {"dev-libs/Q-1": {"X.Y": True}}),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/H-1*"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/R-1", "dev-libs/H-1"],
+ use_changes = {"dev-libs/R-1": {"Y": True}}),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/H-2*"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/R-2", "dev-libs/H-2"],
+ use_changes = {"dev-libs/R-2": {"y": True}}),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/I-1*"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/S-1", "dev-libs/I-1"],
+ use_changes = {"dev-libs/S-1": {"Y.Z": True}}),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/I-2*"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/S-2", "dev-libs/I-2"],
+ use_changes = {"dev-libs/S-2": {"Y.Z": True}}),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/J"],
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, repo_configs=repo_configs)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_use_dep_defaults.py b/usr/lib/portage/pym/portage/tests/resolver/test_use_dep_defaults.py
new file mode 100644
index 0000000..7d17106
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_use_dep_defaults.py
@@ -0,0 +1,40 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class UseDepDefaultsTestCase(TestCase):
+
+ def testUseDepDefaultse(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/B[foo]", "RDEPEND": "dev-libs/B[foo]", "EAPI": "2" },
+ "dev-libs/A-2": { "DEPEND": "dev-libs/B[foo(+)]", "RDEPEND": "dev-libs/B[foo(+)]", "EAPI": "4" },
+ "dev-libs/A-3": { "DEPEND": "dev-libs/B[foo(-)]", "RDEPEND": "dev-libs/B[foo(-)]", "EAPI": "4" },
+ "dev-libs/B-1": { "IUSE": "+foo", "EAPI": "1" },
+ "dev-libs/B-2": {},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ success = True,
+ mergelist = ["dev-libs/B-1", "dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-2"],
+ success = True,
+ mergelist = ["dev-libs/B-2", "dev-libs/A-2"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-3"],
+ success = True,
+ mergelist = ["dev-libs/B-1", "dev-libs/A-3"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_useflags.py b/usr/lib/portage/pym/portage/tests/resolver/test_useflags.py
new file mode 100644
index 0000000..0a5f3b3
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_useflags.py
@@ -0,0 +1,78 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class UseFlagsTestCase(TestCase):
+
+ def testUseFlags(self):
+ ebuilds = {
+ "dev-libs/A-1": { "IUSE": "X", },
+ "dev-libs/B-1": { "IUSE": "X Y", },
+ }
+
+ installed = {
+ "dev-libs/A-1": { "IUSE": "X", },
+ "dev-libs/B-1": { "IUSE": "X", },
+ }
+
+ binpkgs = installed
+
+ user_config = {
+ "package.use": ( "dev-libs/A X", ),
+ "use.force": ( "Y", ),
+ }
+
+ test_cases = (
+ #default: don't reinstall on use flag change
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--selective": True, "--usepkg": True},
+ success = True,
+ mergelist = []),
+
+ #default: respect use flags for binpkgs
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--usepkg": True},
+ success = True,
+ mergelist = ["dev-libs/A-1"]),
+
+ #--binpkg-respect-use=n: use binpkgs with different use flags
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--binpkg-respect-use": "n", "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/A-1"]),
+
+ #--reinstall=changed-use: reinstall if use flag changed
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--reinstall": "changed-use", "--usepkg": True},
+ success = True,
+ mergelist = ["dev-libs/A-1"]),
+
+ #--reinstall=changed-use: don't reinstall on new use flag
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--reinstall": "changed-use", "--usepkg": True},
+ success = True,
+ mergelist = []),
+
+ #--newuse: reinstall on new use flag
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--newuse": True, "--usepkg": True},
+ success = True,
+ mergelist = ["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ binpkgs=binpkgs, installed=installed, user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_virtual_slot.py b/usr/lib/portage/pym/portage/tests/resolver/test_virtual_slot.py
new file mode 100644
index 0000000..1b19d77
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_virtual_slot.py
@@ -0,0 +1,142 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class VirtualSlotResolverTestCase(TestCase):
+
+ def testLicenseMaskedVirtualSlotUpdate(self):
+
+ ebuilds = {
+ "dev-java/oracle-jdk-bin-1.7.0" : {"SLOT": "1.7", "LICENSE": "TEST"},
+ "dev-java/sun-jdk-1.6.0" : {"SLOT": "1.6", "LICENSE": "TEST"},
+ "dev-java/icedtea-6.1.10.3" : {"SLOT": "6"},
+ "app-misc/java-app-1": {"RDEPEND": ">=virtual/jdk-1.6.0"},
+ "virtual/jdk-1.6.0": {"SLOT": "1.6", "RDEPEND": "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ "virtual/jdk-1.7.0": {"SLOT": "1.7", "RDEPEND": "|| ( =dev-java/oracle-jdk-bin-1.7.0* )"},
+ }
+
+ installed = {
+ "app-misc/java-app-1": {"RDEPEND": ">=virtual/jdk-1.6.0"},
+ "dev-java/icedtea-6.1.10.3" : {"SLOT": "6"},
+ "virtual/jdk-1.6.0": {"SLOT" : "1.6", "RDEPEND": "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ }
+
+ world = ("app-misc/java-app",)
+
+ test_cases = (
+ # Bug #382557 - Don't pull in the virtual/jdk-1.7.0 slot update
+ # since its dependencies can only be satisfied by a package that
+ # is masked by license.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update" : True, "--deep" : True},
+ success = True,
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(
+ ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testVirtualSlotUpdate(self):
+
+ ebuilds = {
+ "dev-java/oracle-jdk-bin-1.7.0" : {"SLOT": "1.7", "LICENSE": "TEST"},
+ "dev-java/sun-jdk-1.6.0" : {"SLOT": "1.6", "LICENSE": "TEST"},
+ "dev-java/icedtea-6.1.10.3" : {"SLOT": "6"},
+ "dev-java/icedtea-7" : {"SLOT": "7"},
+ "app-misc/java-app-1": {"RDEPEND": ">=virtual/jdk-1.6.0"},
+ "virtual/jdk-1.6.0": {"SLOT": "1.6", "RDEPEND": "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ "virtual/jdk-1.7.0": {"SLOT": "1.7", "RDEPEND": "|| ( =dev-java/icedtea-7* =dev-java/oracle-jdk-bin-1.7.0* )"},
+ }
+
+ installed = {
+ "app-misc/java-app-1": {"RDEPEND": ">=virtual/jdk-1.6.0"},
+ "dev-java/icedtea-6.1.10.3" : {"SLOT": "6"},
+ "virtual/jdk-1.6.0": {"SLOT" : "1.6", "RDEPEND": "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ }
+
+ world = ("app-misc/java-app",)
+
+ test_cases = (
+ # Pull in the virtual/jdk-1.7.0 slot update since its dependencies
+ # can only be satisfied by an unmasked package.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update" : True, "--deep" : True},
+ success = True,
+ mergelist = ["dev-java/icedtea-7", "virtual/jdk-1.7.0"]),
+
+ # Bug #275945 - Don't pull in the virtual/jdk-1.7.0 slot update
+ # unless --update is enabled.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--selective" : True, "--deep" : True},
+ success = True,
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(
+ ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testVirtualSlotDepclean(self):
+
+ ebuilds = {
+ "dev-java/oracle-jdk-bin-1.7.0" : {"SLOT": "1.7", "LICENSE": "TEST"},
+ "dev-java/sun-jdk-1.6.0" : {"SLOT": "1.6", "LICENSE": "TEST"},
+ "dev-java/icedtea-6.1.10.3" : {"SLOT": "6"},
+ "dev-java/icedtea-7" : {"SLOT": "7"},
+ "app-misc/java-app-1": {"RDEPEND": ">=virtual/jdk-1.6.0"},
+ "virtual/jdk-1.6.0": {"SLOT": "1.6", "RDEPEND": "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ "virtual/jdk-1.7.0": {"SLOT": "1.7", "RDEPEND": "|| ( =dev-java/icedtea-7* =dev-java/oracle-jdk-bin-1.7.0* )"},
+ }
+
+ installed = {
+ "app-misc/java-app-1": {"RDEPEND": ">=virtual/jdk-1.6.0"},
+ "dev-java/icedtea-6.1.10.3" : {"SLOT": "6"},
+ "dev-java/icedtea-7" : {"SLOT": "7"},
+ "virtual/jdk-1.6.0": {"SLOT" : "1.6", "RDEPEND": "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ "virtual/jdk-1.7.0": {"SLOT": "1.7", "RDEPEND": "|| ( =dev-java/icedtea-7* =dev-java/oracle-jdk-bin-1.7.0* )"},
+ }
+
+ world = ("virtual/jdk:1.6", "app-misc/java-app",)
+
+ test_cases = (
+ # Make sure that depclean doesn't remove a new slot even though
+ # it is redundant in the sense that the older slot will satisfy
+ # all dependencies.
+ ResolverPlaygroundTestCase(
+ [],
+ options = {"--depclean" : True},
+ success = True,
+ cleanlist = []),
+
+ # Prune redundant lower slots, even if they are in world.
+ ResolverPlaygroundTestCase(
+ [],
+ options = {"--prune" : True},
+ success = True,
+ cleanlist = ['virtual/jdk-1.6.0', 'dev-java/icedtea-6.1.10.3']),
+ )
+
+ playground = ResolverPlayground(
+ ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/resolver/test_virtual_transition.py b/usr/lib/portage/pym/portage/tests/resolver/test_virtual_transition.py
new file mode 100644
index 0000000..3f4171e
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/resolver/test_virtual_transition.py
@@ -0,0 +1,51 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class VirtualTransitionTestCase(TestCase):
+
+ def testVirtualTransition(self):
+ ebuilds = {
+ "kde-base/kcron-4.7.1" : {"RDEPEND": "virtual/cron" },
+ "sys-process/vixie-cron-4.1-r11": {},
+ "virtual/cron-0" : {"RDEPEND": "sys-process/vixie-cron" },
+ }
+ installed = {
+ "kde-base/kcron-4.7.1" : {"RDEPEND": "virtual/cron" },
+ "sys-process/vixie-cron-4.1-r11" : {"PROVIDE" : "virtual/cron"},
+ }
+
+ world = ["kde-base/kcron", "sys-process/vixie-cron"]
+
+ test_cases = (
+
+ # Pull in a new-style virtual, even though there is an installed
+ # old-style virtual to satisfy the virtual/cron dep. This case
+ # is common, due to PROVIDE being removed (without revision bump)
+ # from lots of ebuilds.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["virtual/cron-0"]),
+
+ # Make sure that depclean is satisfied with the installed
+ # old-style virutal.
+ ResolverPlaygroundTestCase(
+ [],
+ options = {"--depclean": True},
+ success = True,
+ cleanlist = []),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/runTests.py b/usr/lib/portage/pym/portage/tests/runTests.py
new file mode 100644
index 0000000..335e466
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/runTests.py
@@ -0,0 +1,67 @@
+#!/usr/bin/python -bWd
+# runTests.py -- Portage Unit Test Functionality
+# Copyright 2006-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os, sys
+import os.path as osp
+import grp
+import platform
+import pwd
+import signal
+
+def debug_signal(signum, frame):
+ import pdb
+ pdb.set_trace()
+
+if platform.python_implementation() == 'Jython':
+ debug_signum = signal.SIGUSR2 # bug #424259
+else:
+ debug_signum = signal.SIGUSR1
+
+signal.signal(debug_signum, debug_signal)
+
+# Pretend that the current user's uid/gid are the 'portage' uid/gid,
+# so things go smoothly regardless of the current user and global
+# user/group configuration.
+try:
+ os.environ["PORTAGE_USERNAME"] = pwd.getpwuid(os.getuid()).pw_name
+except KeyError:
+ os.environ["PORTAGE_USERNAME"] = str(os.getuid())
+try:
+ os.environ["PORTAGE_GRPNAME"] = grp.getgrgid(os.getgid()).gr_name
+except KeyError:
+ os.environ["PORTAGE_GRPNAME"] = str(os.getgid())
+
+# Insert our parent dir so we can do shiny import "tests"
+# This line courtesy of Marienz and Pkgcore ;)
+sys.path.insert(0, osp.dirname(osp.dirname(osp.dirname(osp.realpath(__file__)))))
+
+import portage
+portage._internal_caller = True
+
+# Ensure that we don't instantiate portage.settings, so that tests should
+# work the same regardless of global configuration file state/existence.
+portage._disable_legacy_globals()
+
+if os.environ.get('NOCOLOR') in ('yes', 'true'):
+ portage.output.nocolor()
+
+import portage.tests as tests
+from portage.const import PORTAGE_BIN_PATH
+path = os.environ.get("PATH", "").split(":")
+path = [x for x in path if x]
+
+insert_bin_path = True
+try:
+ insert_bin_path = not path or \
+ not os.path.samefile(path[0], PORTAGE_BIN_PATH)
+except OSError:
+ pass
+
+if insert_bin_path:
+ path.insert(0, PORTAGE_BIN_PATH)
+ os.environ["PATH"] = ":".join(path)
+
+if __name__ == "__main__":
+ sys.exit(tests.main())
diff --git a/usr/lib/portage/pym/portage/tests/sets/__init__.py b/usr/lib/portage/pym/portage/tests/sets/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/sets/__init__.py
diff --git a/usr/lib/portage/pym/portage/tests/sets/base/__init__.py b/usr/lib/portage/pym/portage/tests/sets/base/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/sets/base/__init__.py
diff --git a/usr/lib/portage/pym/portage/tests/sets/base/__test__.py b/usr/lib/portage/pym/portage/tests/sets/base/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/sets/base/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/sets/base/testInternalPackageSet.py b/usr/lib/portage/pym/portage/tests/sets/base/testInternalPackageSet.py
new file mode 100644
index 0000000..e0a3478
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/sets/base/testInternalPackageSet.py
@@ -0,0 +1,61 @@
+# testConfigFileSet.py -- Portage Unit Testing Functionality
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.dep import Atom
+from portage.exception import InvalidAtom
+from portage.tests import TestCase
+from portage._sets.base import InternalPackageSet
+
+class InternalPackageSetTestCase(TestCase):
+ """Simple Test Case for InternalPackageSet"""
+
+ def testInternalPackageSet(self):
+ i1_atoms = set(("dev-libs/A", ">=dev-libs/A-1", "dev-libs/B"))
+ i2_atoms = set(("dev-libs/A", "dev-libs/*", "dev-libs/C"))
+
+ i1 = InternalPackageSet(initial_atoms=i1_atoms)
+ i2 = InternalPackageSet(initial_atoms=i2_atoms, allow_wildcard=True)
+ self.assertRaises(InvalidAtom, InternalPackageSet, initial_atoms=i2_atoms)
+
+ self.assertEqual(i1.getAtoms(), i1_atoms)
+ self.assertEqual(i2.getAtoms(), i2_atoms)
+
+ new_atom = Atom("*/*", allow_wildcard=True)
+ self.assertRaises(InvalidAtom, i1.add, new_atom)
+ i2.add(new_atom)
+
+ i2_atoms.add(new_atom)
+
+ self.assertEqual(i1.getAtoms(), i1_atoms)
+ self.assertEqual(i2.getAtoms(), i2_atoms)
+
+ removed_atom = Atom("dev-libs/A")
+
+ i1.remove(removed_atom)
+ i2.remove(removed_atom)
+
+ i1_atoms.remove(removed_atom)
+ i2_atoms.remove(removed_atom)
+
+ self.assertEqual(i1.getAtoms(), i1_atoms)
+ self.assertEqual(i2.getAtoms(), i2_atoms)
+
+ update_atoms = [Atom("dev-libs/C"), Atom("dev-*/C", allow_wildcard=True)]
+
+ self.assertRaises(InvalidAtom, i1.update, update_atoms)
+ i2.update(update_atoms)
+
+ i2_atoms.update(update_atoms)
+
+ self.assertEqual(i1.getAtoms(), i1_atoms)
+ self.assertEqual(i2.getAtoms(), i2_atoms)
+
+ replace_atoms = [Atom("dev-libs/D"), Atom("*-libs/C", allow_wildcard=True)]
+
+ self.assertRaises(InvalidAtom, i1.replace, replace_atoms)
+ i2.replace(replace_atoms)
+
+ i2_atoms = set(replace_atoms)
+
+ self.assertEqual(i2.getAtoms(), i2_atoms)
diff --git a/usr/lib/portage/pym/portage/tests/sets/files/__init__.py b/usr/lib/portage/pym/portage/tests/sets/files/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/sets/files/__init__.py
diff --git a/usr/lib/portage/pym/portage/tests/sets/files/__test__.py b/usr/lib/portage/pym/portage/tests/sets/files/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/sets/files/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/sets/files/testConfigFileSet.py b/usr/lib/portage/pym/portage/tests/sets/files/testConfigFileSet.py
new file mode 100644
index 0000000..3ec26a0
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/sets/files/testConfigFileSet.py
@@ -0,0 +1,32 @@
+# testConfigFileSet.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+from portage import os
+from portage.tests import TestCase, test_cps
+from portage._sets.files import ConfigFileSet
+
+class ConfigFileSetTestCase(TestCase):
+ """Simple Test Case for ConfigFileSet"""
+
+ def setUp(self):
+ fd, self.testfile = tempfile.mkstemp(suffix=".testdata", prefix=self.__class__.__name__, text=True)
+ f = os.fdopen(fd, 'w')
+ for i in range(0, len(test_cps)):
+ atom = test_cps[i]
+ if i % 2 == 0:
+ f.write(atom + ' abc def\n')
+ else:
+ f.write(atom + '\n')
+ f.close()
+
+ def tearDown(self):
+ os.unlink(self.testfile)
+
+ def testConfigStaticFileSet(self):
+ s = ConfigFileSet(self.testfile)
+ s.load()
+ self.assertEqual(set(test_cps), s.getAtoms())
+
diff --git a/usr/lib/portage/pym/portage/tests/sets/files/testStaticFileSet.py b/usr/lib/portage/pym/portage/tests/sets/files/testStaticFileSet.py
new file mode 100644
index 0000000..d515a67
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/sets/files/testStaticFileSet.py
@@ -0,0 +1,27 @@
+# testStaticFileSet.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+from portage import os
+from portage.tests import TestCase, test_cps
+from portage._sets.files import StaticFileSet
+
+class StaticFileSetTestCase(TestCase):
+ """Simple Test Case for StaticFileSet"""
+
+ def setUp(self):
+ fd, self.testfile = tempfile.mkstemp(suffix=".testdata", prefix=self.__class__.__name__, text=True)
+ f = os.fdopen(fd, 'w')
+ f.write("\n".join(test_cps))
+ f.close()
+
+ def tearDown(self):
+ os.unlink(self.testfile)
+
+ def testSampleStaticFileSet(self):
+ s = StaticFileSet(self.testfile)
+ s.load()
+ self.assertEqual(set(test_cps), s.getAtoms())
+
diff --git a/usr/lib/portage/pym/portage/tests/sets/shell/__init__.py b/usr/lib/portage/pym/portage/tests/sets/shell/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/sets/shell/__init__.py
diff --git a/usr/lib/portage/pym/portage/tests/sets/shell/__test__.py b/usr/lib/portage/pym/portage/tests/sets/shell/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/sets/shell/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/sets/shell/testShell.py b/usr/lib/portage/pym/portage/tests/sets/shell/testShell.py
new file mode 100644
index 0000000..2cdd833
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/sets/shell/testShell.py
@@ -0,0 +1,28 @@
+# testCommandOututSet.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.process import find_binary
+from portage.tests import TestCase, test_cps
+from portage._sets.shell import CommandOutputSet
+
+class CommandOutputSetTestCase(TestCase):
+ """Simple Test Case for CommandOutputSet"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def testCommand(self):
+
+ input = set(test_cps)
+ command = find_binary("bash")
+ command += " -c '"
+ for a in input:
+ command += " echo -e \"%s\" ; " % a
+ command += "'"
+ s = CommandOutputSet(command)
+ atoms = s.getAtoms()
+ self.assertEqual(atoms, input)
diff --git a/usr/lib/portage/pym/portage/tests/unicode/__init__.py b/usr/lib/portage/pym/portage/tests/unicode/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/unicode/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/tests/unicode/__test__.py b/usr/lib/portage/pym/portage/tests/unicode/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/unicode/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/unicode/test_string_format.py b/usr/lib/portage/pym/portage/tests/unicode/test_string_format.py
new file mode 100644
index 0000000..9d4366a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/unicode/test_string_format.py
@@ -0,0 +1,108 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import sys
+
+from portage import _encodings, _unicode_encode
+from portage.exception import PortageException
+from portage.tests import TestCase
+from _emerge.DependencyArg import DependencyArg
+from _emerge.UseFlagDisplay import UseFlagDisplay
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+STR_IS_UNICODE = sys.hexversion >= 0x3000000
+
+class StringFormatTestCase(TestCase):
+ """
+ Test that string formatting works correctly in the current interpretter,
+ which may be either python2 or python3.
+ """
+
+ # We need unicode_literals in order to get some unicode test strings
+ # in a way that works in both python2 and python3.
+
+ unicode_strings = (
+ '\u2018',
+ '\u2019',
+ )
+
+ def testDependencyArg(self):
+
+ self.assertEqual(_encodings['content'], 'utf_8')
+
+ for arg_unicode in self.unicode_strings:
+ arg_bytes = _unicode_encode(arg_unicode, encoding=_encodings['content'])
+ dependency_arg = DependencyArg(arg=arg_unicode)
+
+ # Use unicode_literals for unicode format string so that
+ # __unicode__() is called in Python 2.
+ formatted_str = "%s" % (dependency_arg,)
+ self.assertEqual(formatted_str, arg_unicode)
+
+ if STR_IS_UNICODE:
+
+ # Test the __str__ method which returns unicode in python3
+ formatted_str = "%s" % (dependency_arg,)
+ self.assertEqual(formatted_str, arg_unicode)
+
+ else:
+
+ # Test the __str__ method which returns encoded bytes in python2
+ formatted_bytes = b"%s" % (dependency_arg,)
+ self.assertEqual(formatted_bytes, arg_bytes)
+
+ def testPortageException(self):
+
+ self.assertEqual(_encodings['content'], 'utf_8')
+
+ for arg_unicode in self.unicode_strings:
+ arg_bytes = _unicode_encode(arg_unicode, encoding=_encodings['content'])
+ e = PortageException(arg_unicode)
+
+ # Use unicode_literals for unicode format string so that
+ # __unicode__() is called in Python 2.
+ formatted_str = "%s" % (e,)
+ self.assertEqual(formatted_str, arg_unicode)
+
+ if STR_IS_UNICODE:
+
+ # Test the __str__ method which returns unicode in python3
+ formatted_str = "%s" % (e,)
+ self.assertEqual(formatted_str, arg_unicode)
+
+ else:
+
+ # Test the __str__ method which returns encoded bytes in python2
+ formatted_bytes = b"%s" % (e,)
+ self.assertEqual(formatted_bytes, arg_bytes)
+
+ def testUseFlagDisplay(self):
+
+ self.assertEqual(_encodings['content'], 'utf_8')
+
+ for enabled in (True, False):
+ for forced in (True, False):
+ for arg_unicode in self.unicode_strings:
+ e = UseFlagDisplay(arg_unicode, enabled, forced)
+
+ # Use unicode_literals for unicode format string so that
+ # __unicode__() is called in Python 2.
+ formatted_str = "%s" % (e,)
+ self.assertEqual(isinstance(formatted_str, basestring), True)
+
+ if STR_IS_UNICODE:
+
+ # Test the __str__ method which returns unicode in python3
+ formatted_str = "%s" % (e,)
+ self.assertEqual(isinstance(formatted_str, str), True)
+
+ else:
+
+ # Test the __str__ method which returns encoded bytes in python2
+ formatted_bytes = b"%s" % (e,)
+ self.assertEqual(isinstance(formatted_bytes, bytes), True)
diff --git a/usr/lib/portage/pym/portage/tests/update/__init__.py b/usr/lib/portage/pym/portage/tests/update/__init__.py
new file mode 100644
index 0000000..418ad86
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/update/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/tests/update/__test__.py b/usr/lib/portage/pym/portage/tests/update/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/update/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/update/test_move_ent.py b/usr/lib/portage/pym/portage/tests/update/test_move_ent.py
new file mode 100644
index 0000000..d9647a9
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/update/test_move_ent.py
@@ -0,0 +1,109 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import textwrap
+
+import portage
+from portage import os
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import ensure_dirs
+from portage._global_updates import _do_global_updates
+
+class MoveEntTestCase(TestCase):
+
+ def testMoveEnt(self):
+
+ ebuilds = {
+
+ "dev-libs/A-2::dont_apply_updates" : {
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+
+ }
+
+ installed = {
+
+ "dev-libs/A-1::test_repo" : {
+ "EAPI": "4",
+ },
+
+ "dev-libs/A-2::dont_apply_updates" : {
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+
+ }
+
+ binpkgs = {
+
+ "dev-libs/A-1::test_repo" : {
+ "EAPI": "4",
+ },
+
+ "dev-libs/A-2::dont_apply_updates" : {
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+
+ }
+
+ updates = textwrap.dedent("""
+ move dev-libs/A dev-libs/A-moved
+ """)
+
+ playground = ResolverPlayground(binpkgs=binpkgs,
+ ebuilds=ebuilds, installed=installed)
+
+ settings = playground.settings
+ trees = playground.trees
+ eroot = settings["EROOT"]
+ test_repo_location = settings.repositories["test_repo"].location
+ portdb = trees[eroot]["porttree"].dbapi
+ vardb = trees[eroot]["vartree"].dbapi
+ bindb = trees[eroot]["bintree"].dbapi
+
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
+
+ try:
+ ensure_dirs(updates_dir)
+ with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
+ f.write(updates)
+
+ # Create an empty updates directory, so that this
+ # repo doesn't inherit updates from the main repo.
+ ensure_dirs(os.path.join(
+ portdb.getRepositoryPath("dont_apply_updates"),
+ "profiles", "updates"))
+
+ global_noiselimit = portage.util.noiselimit
+ portage.util.noiselimit = -2
+ try:
+ _do_global_updates(trees, {})
+ finally:
+ portage.util.noiselimit = global_noiselimit
+
+ # Workaround for cache validation not working
+ # correctly when filesystem has timestamp precision
+ # of 1 second.
+ vardb._clear_cache()
+
+ # A -> A-moved
+ self.assertRaises(KeyError,
+ vardb.aux_get, "dev-libs/A-1", ["EAPI"])
+ vardb.aux_get("dev-libs/A-moved-1", ["EAPI"])
+ self.assertRaises(KeyError,
+ bindb.aux_get, "dev-libs/A-1", ["EAPI"])
+ bindb.aux_get("dev-libs/A-moved-1", ["EAPI"])
+
+ # dont_apply_updates
+ self.assertRaises(KeyError,
+ vardb.aux_get, "dev-libs/A-moved-2", ["EAPI"])
+ vardb.aux_get("dev-libs/A-2", ["EAPI"])
+ self.assertRaises(KeyError,
+ bindb.aux_get, "dev-libs/A-moved-2", ["EAPI"])
+ bindb.aux_get("dev-libs/A-2", ["EAPI"])
+
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/update/test_move_slot_ent.py b/usr/lib/portage/pym/portage/tests/update/test_move_slot_ent.py
new file mode 100644
index 0000000..3e49e11
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/update/test_move_slot_ent.py
@@ -0,0 +1,154 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import textwrap
+
+import portage
+from portage import os
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import ensure_dirs
+from portage._global_updates import _do_global_updates
+
+class MoveSlotEntTestCase(TestCase):
+
+ def testMoveSlotEnt(self):
+
+ ebuilds = {
+
+ "dev-libs/A-2::dont_apply_updates" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/2.30",
+ },
+
+ "dev-libs/B-2::dont_apply_updates" : {
+ "SLOT": "0",
+ },
+
+ "dev-libs/C-2.1::dont_apply_updates" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/2.1",
+ },
+
+ }
+
+ installed = {
+
+ "dev-libs/A-1::test_repo" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/2.30",
+ },
+
+ "dev-libs/B-1::test_repo" : {
+ "SLOT": "0",
+ },
+
+ "dev-libs/C-1::test_repo" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/1",
+ },
+
+ }
+
+ binpkgs = {
+
+ "dev-libs/A-1::test_repo" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/2.30",
+ },
+
+ "dev-libs/A-2::dont_apply_updates" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/2.30",
+ },
+
+ "dev-libs/B-1::test_repo" : {
+ "SLOT": "0",
+ },
+
+ "dev-libs/B-2::dont_apply_updates" : {
+ "SLOT": "0",
+ },
+
+ "dev-libs/C-1::test_repo" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/1",
+ },
+
+ "dev-libs/C-2.1::dont_apply_updates" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/2.1",
+ },
+
+ }
+
+ updates = textwrap.dedent("""
+ slotmove dev-libs/A 0 2
+ slotmove dev-libs/B 0 1
+ slotmove dev-libs/C 0 1
+ """)
+
+ playground = ResolverPlayground(binpkgs=binpkgs,
+ ebuilds=ebuilds, installed=installed)
+
+ settings = playground.settings
+ trees = playground.trees
+ eroot = settings["EROOT"]
+ test_repo_location = settings.repositories["test_repo"].location
+ portdb = trees[eroot]["porttree"].dbapi
+ vardb = trees[eroot]["vartree"].dbapi
+ bindb = trees[eroot]["bintree"].dbapi
+
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
+
+ try:
+ ensure_dirs(updates_dir)
+ with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
+ f.write(updates)
+
+ # Create an empty updates directory, so that this
+ # repo doesn't inherit updates from the main repo.
+ ensure_dirs(os.path.join(
+ portdb.getRepositoryPath("dont_apply_updates"),
+ "profiles", "updates"))
+
+ global_noiselimit = portage.util.noiselimit
+ portage.util.noiselimit = -2
+ try:
+ _do_global_updates(trees, {})
+ finally:
+ portage.util.noiselimit = global_noiselimit
+
+ # Workaround for cache validation not working
+ # correctly when filesystem has timestamp precision
+ # of 1 second.
+ vardb._clear_cache()
+
+ # 0/2.30 -> 2/2.30
+ self.assertEqual("2/2.30",
+ vardb.aux_get("dev-libs/A-1", ["SLOT"])[0])
+ self.assertEqual("2/2.30",
+ bindb.aux_get("dev-libs/A-1", ["SLOT"])[0])
+
+ # 0 -> 1
+ self.assertEqual("1",
+ vardb.aux_get("dev-libs/B-1", ["SLOT"])[0])
+ self.assertEqual("1",
+ bindb.aux_get("dev-libs/B-1", ["SLOT"])[0])
+
+ # 0/1 -> 1 (equivalent to 1/1)
+ self.assertEqual("1",
+ vardb.aux_get("dev-libs/C-1", ["SLOT"])[0])
+ self.assertEqual("1",
+ bindb.aux_get("dev-libs/C-1", ["SLOT"])[0])
+
+ # dont_apply_updates
+ self.assertEqual("0/2.30",
+ bindb.aux_get("dev-libs/A-2", ["SLOT"])[0])
+ self.assertEqual("0",
+ bindb.aux_get("dev-libs/B-2", ["SLOT"])[0])
+ self.assertEqual("0/2.1",
+ bindb.aux_get("dev-libs/C-2.1", ["SLOT"])[0])
+
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/update/test_update_dbentry.py b/usr/lib/portage/pym/portage/tests/update/test_update_dbentry.py
new file mode 100644
index 0000000..8895114
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/update/test_update_dbentry.py
@@ -0,0 +1,277 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import re
+import textwrap
+
+import portage
+from portage import os
+from portage.dep import Atom
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.update import update_dbentry
+from portage.util import ensure_dirs
+from portage.versions import _pkg_str
+from portage._global_updates import _do_global_updates
+
+class UpdateDbentryTestCase(TestCase):
+
+ def testUpdateDbentryTestCase(self):
+ cases = (
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "1",
+ " dev-libs/A:0 ", " dev-libs/B:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "1",
+ " >=dev-libs/A-1:0 ", " >=dev-libs/B-1:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "2",
+ " dev-libs/A[foo] ", " dev-libs/B[foo] "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "5",
+ " dev-libs/A:0/1=[foo] ", " dev-libs/B:0/1=[foo] "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "5",
+ " dev-libs/A:0/1[foo] ", " dev-libs/B:0/1[foo] "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "5",
+ " dev-libs/A:0/0[foo] ", " dev-libs/B:0/0[foo] "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "5",
+ " dev-libs/A:0=[foo] ", " dev-libs/B:0=[foo] "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "1",
+ " dev-libs/A:0 ", " dev-libs/A:1 "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "1",
+ " >=dev-libs/A-1:0 ", " >=dev-libs/A-1:1 "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "5",
+ " dev-libs/A:0/1=[foo] ", " dev-libs/A:1/1=[foo] "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "5",
+ " dev-libs/A:0/1[foo] ", " dev-libs/A:1/1[foo] "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "5",
+ " dev-libs/A:0/0[foo] ", " dev-libs/A:1/1[foo] "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "5",
+ " dev-libs/A:0=[foo] ", " dev-libs/A:1=[foo] "),
+ )
+ for update_cmd, eapi, input_str, output_str in cases:
+ result = update_dbentry(update_cmd, input_str, eapi=eapi)
+ self.assertEqual(result, output_str)
+
+
+ def testUpdateDbentryBlockerTestCase(self):
+ """
+ Avoid creating self-blockers for bug #367215.
+ """
+ cases = (
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/B-1", eapi="1", slot="0"),
+ " !dev-libs/A ", " !dev-libs/A "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/C-1", eapi="1", slot="0"),
+ " !dev-libs/A ", " !dev-libs/B "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/B-1", eapi="1", slot="0"),
+ " !dev-libs/A:0 ", " !dev-libs/A:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/C-1", eapi="1", slot="0"),
+ " !dev-libs/A:0 ", " !dev-libs/B:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/C-1", eapi="1", slot="0"),
+ " !>=dev-libs/A-1:0 ", " !>=dev-libs/B-1:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/B-1", eapi="1", slot="0"),
+ " !>=dev-libs/A-1:0 ", " !>=dev-libs/A-1:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/C-1", eapi="1", slot="0"),
+ " !>=dev-libs/A-1 ", " !>=dev-libs/B-1 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/B-1", eapi="1", slot="0"),
+ " !>=dev-libs/A-1 ", " !>=dev-libs/A-1 "),
+
+ )
+ for update_cmd, parent, input_str, output_str in cases:
+ result = update_dbentry(update_cmd, input_str, parent=parent)
+ self.assertEqual(result, output_str)
+
+ def testUpdateDbentryDbapiTestCase(self):
+
+ ebuilds = {
+
+ "dev-libs/A-2::dont_apply_updates" : {
+ "RDEPEND" : "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+
+ "dev-libs/B-2::dont_apply_updates" : {
+ "RDEPEND" : "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+
+ }
+
+ installed = {
+
+ "dev-libs/A-1::test_repo" : {
+ "RDEPEND" : "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ },
+
+ "dev-libs/A-2::dont_apply_updates" : {
+ "RDEPEND" : "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+
+ "dev-libs/B-1::test_repo" : {
+ "RDEPEND" : "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4-python",
+ },
+
+ "dev-libs/M-1::test_repo" : {
+ "EAPI": "4",
+ },
+
+ "dev-libs/N-1::test_repo" : {
+ "EAPI": "4",
+ },
+
+ "dev-libs/N-2::test_repo" : {
+ "EAPI": "4-python",
+ },
+
+ }
+
+ binpkgs = {
+
+ "dev-libs/A-1::test_repo" : {
+ "RDEPEND" : "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ },
+
+ "dev-libs/A-2::dont_apply_updates" : {
+ "RDEPEND" : "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+
+ "dev-libs/B-1::test_repo" : {
+ "RDEPEND" : "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4-python",
+ },
+
+ }
+
+ world = ["dev-libs/M", "dev-libs/N"]
+
+ updates = textwrap.dedent("""
+ move dev-libs/M dev-libs/M-moved
+ move dev-libs/N dev-libs/N.moved
+ """)
+
+ playground = ResolverPlayground(binpkgs=binpkgs,
+ ebuilds=ebuilds, installed=installed, world=world)
+
+ settings = playground.settings
+ trees = playground.trees
+ eroot = settings["EROOT"]
+ test_repo_location = settings.repositories["test_repo"].location
+ portdb = trees[eroot]["porttree"].dbapi
+ vardb = trees[eroot]["vartree"].dbapi
+ bindb = trees[eroot]["bintree"].dbapi
+ setconfig = trees[eroot]["root_config"].setconfig
+ selected_set = setconfig.getSets()["selected"]
+
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
+
+ try:
+ ensure_dirs(updates_dir)
+ with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
+ f.write(updates)
+
+ # Create an empty updates directory, so that this
+ # repo doesn't inherit updates from the main repo.
+ ensure_dirs(os.path.join(
+ portdb.getRepositoryPath("dont_apply_updates"),
+ "profiles", "updates"))
+
+ global_noiselimit = portage.util.noiselimit
+ portage.util.noiselimit = -2
+ try:
+ _do_global_updates(trees, {})
+ finally:
+ portage.util.noiselimit = global_noiselimit
+
+ # Workaround for cache validation not working
+ # correctly when filesystem has timestamp precision
+ # of 1 second.
+ vardb._clear_cache()
+
+ # M -> M-moved
+ old_pattern = re.compile(r"\bdev-libs/M(\s|$)")
+ rdepend = vardb.aux_get("dev-libs/A-1", ["RDEPEND"])[0]
+ self.assertTrue(old_pattern.search(rdepend) is None)
+ self.assertTrue("dev-libs/M-moved" in rdepend)
+ rdepend = bindb.aux_get("dev-libs/A-1", ["RDEPEND"])[0]
+ self.assertTrue(old_pattern.search(rdepend) is None)
+ self.assertTrue("dev-libs/M-moved" in rdepend)
+ rdepend = vardb.aux_get("dev-libs/B-1", ["RDEPEND"])[0]
+ self.assertTrue(old_pattern.search(rdepend) is None)
+ self.assertTrue("dev-libs/M-moved" in rdepend)
+ rdepend = vardb.aux_get("dev-libs/B-1", ["RDEPEND"])[0]
+ self.assertTrue(old_pattern.search(rdepend) is None)
+ self.assertTrue("dev-libs/M-moved" in rdepend)
+
+ # EAPI 4-python/*-progress N -> N.moved
+ rdepend = vardb.aux_get("dev-libs/B-1", ["RDEPEND"])[0]
+ old_pattern = re.compile(r"\bdev-libs/N(\s|$)")
+ self.assertTrue(old_pattern.search(rdepend) is None)
+ self.assertTrue("dev-libs/N.moved" in rdepend)
+ rdepend = bindb.aux_get("dev-libs/B-1", ["RDEPEND"])[0]
+ self.assertTrue(old_pattern.search(rdepend) is None)
+ self.assertTrue("dev-libs/N.moved" in rdepend)
+ self.assertRaises(KeyError,
+ vardb.aux_get, "dev-libs/N-2", ["EAPI"])
+ vardb.aux_get("dev-libs/N.moved-2", ["RDEPEND"])[0]
+
+ # EAPI 4 does not allow dots in package names for N -> N.moved
+ rdepend = vardb.aux_get("dev-libs/A-1", ["RDEPEND"])[0]
+ self.assertTrue("dev-libs/N" in rdepend)
+ self.assertTrue("dev-libs/N.moved" not in rdepend)
+ rdepend = bindb.aux_get("dev-libs/A-1", ["RDEPEND"])[0]
+ self.assertTrue("dev-libs/N" in rdepend)
+ self.assertTrue("dev-libs/N.moved" not in rdepend)
+ vardb.aux_get("dev-libs/N-1", ["RDEPEND"])[0]
+ self.assertRaises(KeyError,
+ vardb.aux_get, "dev-libs/N.moved-1", ["EAPI"])
+
+ # dont_apply_updates
+ rdepend = vardb.aux_get("dev-libs/A-2", ["RDEPEND"])[0]
+ self.assertTrue("dev-libs/M" in rdepend)
+ self.assertTrue("dev-libs/M-moved" not in rdepend)
+ rdepend = bindb.aux_get("dev-libs/A-2", ["RDEPEND"])[0]
+ self.assertTrue("dev-libs/M" in rdepend)
+ self.assertTrue("dev-libs/M-moved" not in rdepend)
+
+ selected_set.load()
+ self.assertTrue("dev-libs/M" not in selected_set)
+ self.assertTrue("dev-libs/M-moved" in selected_set)
+ self.assertTrue("dev-libs/N" not in selected_set)
+ self.assertTrue("dev-libs/N.moved" in selected_set)
+
+ finally:
+ playground.cleanup()
diff --git a/usr/lib/portage/pym/portage/tests/util/__init__.py b/usr/lib/portage/pym/portage/tests/util/__init__.py
new file mode 100644
index 0000000..69ce189
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/util/__init__.py
@@ -0,0 +1,4 @@
+# tests/portage.util/__init__.py -- Portage Unit Test functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
diff --git a/usr/lib/portage/pym/portage/tests/util/__test__.py b/usr/lib/portage/pym/portage/tests/util/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/util/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/util/test_digraph.py b/usr/lib/portage/pym/portage/tests/util/test_digraph.py
new file mode 100644
index 0000000..4e858cf
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/util/test_digraph.py
@@ -0,0 +1,237 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util.digraph import digraph
+#~ from portage.util import noiselimit
+import portage.util
+
+class DigraphTest(TestCase):
+
+ def _assertBFSEqual(self, result, expected):
+ result_stack = list(result)
+ result_stack.reverse()
+ expected_stack = list(reversed(expected))
+ result_compared = []
+ expected_compared = []
+ while result_stack:
+ if not expected_stack:
+ result_compared.append(result_stack.pop())
+ self.assertEqual(result_compared, expected_compared)
+ expected_set = expected_stack.pop()
+ if not isinstance(expected_set, list):
+ expected_set = [expected_set]
+ expected_set = set(expected_set)
+ while expected_set:
+ if not result_stack:
+ expected_compared.extend(expected_set)
+ self.assertEqual(result_compared, expected_compared)
+ obj = result_stack.pop()
+ try:
+ expected_set.remove(obj)
+ except KeyError:
+ expected_compared.extend(expected_set)
+ result_compared.append(obj)
+ self.assertEqual(result_compared, expected_compared)
+ else:
+ expected_compared.append(obj)
+ result_compared.append(obj)
+ if expected_stack:
+ expected_set = expected_stack.pop()
+ if not isinstance(expected_set, list):
+ expected_set = [expected_set]
+ expected_compared.extend(expected_set)
+ self.assertEqual(result_compared, expected_compared)
+
+ def testBackwardCompatibility(self):
+ g = digraph()
+ f = g.copy()
+ g.addnode("A", None)
+ self.assertEqual("A" in g, True)
+ self.assertEqual(bool(g), True)
+ self.assertEqual(g.allnodes(), ["A"])
+ self.assertEqual(g.allzeros(), ["A"])
+ self.assertEqual(g.hasnode("A"), True)
+
+ def testDigraphEmptyGraph(self):
+ g = digraph()
+ f = g.clone()
+ for x in g, f:
+ self.assertEqual(bool(x), False)
+ self.assertEqual(x.contains("A"), False)
+ self.assertEqual(x.firstzero(), None)
+ self.assertRaises(KeyError, x.remove, "A")
+ x.delnode("A")
+ self.assertEqual(list(x), [])
+ self.assertEqual(x.get("A"), None)
+ self.assertEqual(x.get("A", "default"), "default")
+ self.assertEqual(x.all_nodes(), [])
+ self.assertEqual(x.leaf_nodes(), [])
+ self.assertEqual(x.root_nodes(), [])
+ self.assertRaises(KeyError, x.child_nodes, "A")
+ self.assertRaises(KeyError, x.parent_nodes, "A")
+ self.assertEqual(x.hasallzeros(), True)
+ self.assertRaises(KeyError, list, x.bfs("A"))
+ self.assertRaises(KeyError, x.shortest_path, "A", "B")
+ self.assertRaises(KeyError, x.remove_edge, "A", "B")
+ self.assertEqual(x.get_cycles(), [])
+ x.difference_update("A")
+ portage.util.noiselimit = -2
+ x.debug_print()
+ portage.util.noiselimit = 0
+
+ def testDigraphCircle(self):
+ g = digraph()
+ g.add("A", "B", -1)
+ g.add("B", "C", 0)
+ g.add("C", "D", 1)
+ g.add("D", "A", 2)
+
+ f = g.clone()
+ for x in g, f:
+ self.assertEqual(bool(x), True)
+ self.assertEqual(x.contains("A"), True)
+ self.assertEqual(x.firstzero(), None)
+ self.assertRaises(KeyError, x.remove, "Z")
+ x.delnode("Z")
+ self.assertEqual(list(x), ["A", "B", "C", "D"])
+ self.assertEqual(x.get("A"), "A")
+ self.assertEqual(x.get("A", "default"), "A")
+ self.assertEqual(x.all_nodes(), ["A", "B", "C", "D"])
+ self.assertEqual(x.leaf_nodes(), [])
+ self.assertEqual(x.root_nodes(), [])
+ self.assertEqual(x.child_nodes("A"), ["D"])
+ self.assertEqual(x.child_nodes("A", ignore_priority=2), [])
+ self.assertEqual(x.parent_nodes("A"), ["B"])
+ self.assertEqual(x.parent_nodes("A", ignore_priority=-2), ["B"])
+ self.assertEqual(x.parent_nodes("A", ignore_priority=-1), [])
+ self.assertEqual(x.hasallzeros(), False)
+ self._assertBFSEqual(x.bfs("A"), [(None, "A"), ("A", "D"), ("D", "C"), ("C", "B")])
+ self.assertEqual(x.shortest_path("A", "D"), ["A", "D"])
+ self.assertEqual(x.shortest_path("D", "A"), ["D", "C", "B", "A"])
+ self.assertEqual(x.shortest_path("A", "D", ignore_priority=2), None)
+ self.assertEqual(x.shortest_path("D", "A", ignore_priority=-2), ["D", "C", "B", "A"])
+ cycles = set(tuple(y) for y in x.get_cycles())
+ self.assertEqual(cycles, set([("D", "C", "B", "A"), ("C", "B", "A", "D"), ("B", "A", "D", "C"), \
+ ("A", "D", "C", "B")]))
+ x.remove_edge("A", "B")
+ self.assertEqual(x.get_cycles(), [])
+ x.difference_update(["D"])
+ self.assertEqual(x.all_nodes(), ["A", "B", "C"])
+ portage.util.noiselimit = -2
+ x.debug_print()
+ portage.util.noiselimit = 0
+
+ def testDigraphTree(self):
+ g = digraph()
+ g.add("B", "A", -1)
+ g.add("C", "A", 0)
+ g.add("D", "C", 1)
+ g.add("E", "C", 2)
+
+ f = g.clone()
+ for x in g, f:
+ self.assertEqual(bool(x), True)
+ self.assertEqual(x.contains("A"), True)
+ self.assertEqual(x.firstzero(), "B")
+ self.assertRaises(KeyError, x.remove, "Z")
+ x.delnode("Z")
+ self.assertEqual(set(x), set(["A", "B", "C", "D", "E"]))
+ self.assertEqual(x.get("A"), "A")
+ self.assertEqual(x.get("A", "default"), "A")
+ self.assertEqual(set(x.all_nodes()), set(["A", "B", "C", "D", "E"]))
+ self.assertEqual(set(x.leaf_nodes()), set(["B", "D", "E"]))
+ self.assertEqual(set(x.leaf_nodes(ignore_priority=0)), set(["A", "B", "D", "E"]))
+ self.assertEqual(x.root_nodes(), ["A"])
+ self.assertEqual(set(x.root_nodes(ignore_priority=0)), set(["A", "B", "C"]))
+ self.assertEqual(set(x.child_nodes("A")), set(["B", "C"]))
+ self.assertEqual(x.child_nodes("A", ignore_priority=2), [])
+ self.assertEqual(x.parent_nodes("B"), ["A"])
+ self.assertEqual(x.parent_nodes("B", ignore_priority=-2), ["A"])
+ self.assertEqual(x.parent_nodes("B", ignore_priority=-1), [])
+ self.assertEqual(x.hasallzeros(), False)
+ self._assertBFSEqual(x.bfs("A"), [(None, "A"), [("A", "C"), ("A", "B")], [("C", "E"), ("C", "D")]])
+ self.assertEqual(x.shortest_path("A", "D"), ["A", "C", "D"])
+ self.assertEqual(x.shortest_path("D", "A"), None)
+ self.assertEqual(x.shortest_path("A", "D", ignore_priority=2), None)
+ cycles = set(tuple(y) for y in x.get_cycles())
+ self.assertEqual(cycles, set())
+ x.remove("D")
+ self.assertEqual(set(x.all_nodes()), set(["A", "B", "C", "E"]))
+ x.remove("C")
+ self.assertEqual(set(x.all_nodes()), set(["A", "B", "E"]))
+ portage.util.noiselimit = -2
+ x.debug_print()
+ portage.util.noiselimit = 0
+ self.assertRaises(KeyError, x.remove_edge, "A", "E")
+
+ def testDigraphCompleteGraph(self):
+ g = digraph()
+ g.add("A", "B", -1)
+ g.add("B", "A", 1)
+ g.add("A", "C", 1)
+ g.add("C", "A", -1)
+ g.add("C", "B", 1)
+ g.add("B", "C", 1)
+
+ f = g.clone()
+ for x in g, f:
+ self.assertEqual(bool(x), True)
+ self.assertEqual(x.contains("A"), True)
+ self.assertEqual(x.firstzero(), None)
+ self.assertRaises(KeyError, x.remove, "Z")
+ x.delnode("Z")
+ self.assertEqual(list(x), ["A", "B", "C"])
+ self.assertEqual(x.get("A"), "A")
+ self.assertEqual(x.get("A", "default"), "A")
+ self.assertEqual(x.all_nodes(), ["A", "B", "C"])
+ self.assertEqual(x.leaf_nodes(), [])
+ self.assertEqual(x.root_nodes(), [])
+ self.assertEqual(set(x.child_nodes("A")), set(["B", "C"]))
+ self.assertEqual(x.child_nodes("A", ignore_priority=0), ["B"])
+ self.assertEqual(set(x.parent_nodes("A")), set(["B", "C"]))
+ self.assertEqual(x.parent_nodes("A", ignore_priority=0), ["C"])
+ self.assertEqual(x.parent_nodes("A", ignore_priority=1), [])
+ self.assertEqual(x.hasallzeros(), False)
+ self._assertBFSEqual(x.bfs("A"), [(None, "A"), [("A", "C"), ("A", "B")]])
+ self.assertEqual(x.shortest_path("A", "C"), ["A", "C"])
+ self.assertEqual(x.shortest_path("C", "A"), ["C", "A"])
+ self.assertEqual(x.shortest_path("A", "C", ignore_priority=0), ["A", "B", "C"])
+ self.assertEqual(x.shortest_path("C", "A", ignore_priority=0), ["C", "A"])
+ cycles = set(frozenset(y) for y in x.get_cycles())
+ self.assertEqual(cycles, set([frozenset(["A", "B"]), frozenset(["A", "C"]), frozenset(["B", "C"])]))
+ x.remove_edge("A", "B")
+ cycles = set(frozenset(y) for y in x.get_cycles())
+ self.assertEqual(cycles, set([frozenset(["A", "C"]), frozenset(["C", "B"])]))
+ x.difference_update(["C"])
+ self.assertEqual(x.all_nodes(), ["A", "B"])
+ portage.util.noiselimit = -2
+ x.debug_print()
+ portage.util.noiselimit = 0
+
+ def testDigraphIgnorePriority(self):
+
+ def always_true(dummy):
+ return True
+
+ def always_false(dummy):
+ return False
+
+ g = digraph()
+ g.add("A", "B")
+
+ self.assertEqual(g.parent_nodes("A"), ["B"])
+ self.assertEqual(g.parent_nodes("A", ignore_priority=always_false), ["B"])
+ self.assertEqual(g.parent_nodes("A", ignore_priority=always_true), [])
+
+ self.assertEqual(g.child_nodes("B"), ["A"])
+ self.assertEqual(g.child_nodes("B", ignore_priority=always_false), ["A"])
+ self.assertEqual(g.child_nodes("B", ignore_priority=always_true), [])
+
+ self.assertEqual(g.leaf_nodes(), ["A"])
+ self.assertEqual(g.leaf_nodes(ignore_priority=always_false), ["A"])
+ self.assertEqual(g.leaf_nodes(ignore_priority=always_true), ["A", "B"])
+
+ self.assertEqual(g.root_nodes(), ["B"])
+ self.assertEqual(g.root_nodes(ignore_priority=always_false), ["B"])
+ self.assertEqual(g.root_nodes(ignore_priority=always_true), ["A", "B"])
diff --git a/usr/lib/portage/pym/portage/tests/util/test_getconfig.py b/usr/lib/portage/pym/portage/tests/util/test_getconfig.py
new file mode 100644
index 0000000..b72bd6a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/util/test_getconfig.py
@@ -0,0 +1,76 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+from portage import os
+from portage import shutil
+from portage import _unicode_encode
+from portage.const import PORTAGE_BASE_PATH
+from portage.tests import TestCase
+from portage.util import getconfig
+from portage.exception import ParseError
+
+class GetConfigTestCase(TestCase):
+ """
+ Test that getconfig() produces that same result as bash would when
+ sourcing the same input.
+ """
+
+ _cases = {
+ 'FETCHCOMMAND' : 'wget -t 3 -T 60 --passive-ftp -O "${DISTDIR}/${FILE}" "${URI}"',
+ 'FETCHCOMMAND_RSYNC' : 'rsync -avP "${URI}" "${DISTDIR}/${FILE}"',
+ 'FETCHCOMMAND_SFTP' : 'bash -c "x=\\${2#sftp://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port=22 ; eval \\"declare -a ssh_opts=(\\${3})\\" ; exec sftp -P \\${port} \\"\\${ssh_opts[@]}\\" \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" sftp "${DISTDIR}/${FILE}" "${URI}" "${PORTAGE_SSH_OPTS}"',
+ 'FETCHCOMMAND_SSH' : 'bash -c "x=\\${2#ssh://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port=22 ; exec rsync --rsh=\\"ssh -p\\${port} \\${3}\\" -avP \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" rsync "${DISTDIR}/${FILE}" "${URI}" "${PORTAGE_SSH_OPTS}"',
+ 'PORTAGE_ELOG_MAILSUBJECT' : '[portage] ebuild log for ${PACKAGE} on ${HOST}'
+ }
+
+ def testGetConfig(self):
+ make_globals_file = os.path.join(self.cnf_path, "make.globals")
+ d = getconfig(make_globals_file)
+ for k, v in self._cases.items():
+ self.assertEqual(d[k], v)
+
+ def testGetConfigSourceLex(self):
+ try:
+ tempdir = tempfile.mkdtemp()
+ make_conf_file = os.path.join(tempdir, 'make.conf')
+ with open(make_conf_file, 'w') as f:
+ f.write('source "${DIR}/sourced_file"\n')
+ sourced_file = os.path.join(tempdir, 'sourced_file')
+ with open(sourced_file, 'w') as f:
+ f.write('PASSES_SOURCING_TEST="True"\n')
+
+ d = getconfig(make_conf_file, allow_sourcing=True, expand={"DIR": tempdir})
+
+ # PASSES_SOURCING_TEST should exist in getconfig result.
+ self.assertTrue(d is not None)
+ self.assertEqual("True", d['PASSES_SOURCING_TEST'])
+
+ # With allow_sourcing=True and empty expand map, this should
+ # throw a FileNotFound exception.
+ self.assertRaisesMsg("An empty expand map should throw an exception",
+ ParseError, getconfig, make_conf_file, allow_sourcing=True, expand={})
+ finally:
+ shutil.rmtree(tempdir)
+
+ def testGetConfigProfileEnv(self):
+ # Test the mode which is used to parse /etc/env.d and /etc/profile.env.
+
+ cases = {
+ 'LESS_TERMCAP_mb': "$\E[01;31m", # bug #410625
+ }
+
+ with tempfile.NamedTemporaryFile(mode='wb') as f:
+ # Format like env_update formats /etc/profile.env.
+ for k, v in cases.items():
+ if v.startswith('$') and not v.startswith('${'):
+ line = "export %s=$'%s'\n" % (k, v[1:])
+ else:
+ line = "export %s='%s'\n" % (k, v)
+ f.write(_unicode_encode(line))
+ f.flush()
+
+ d = getconfig(f.name, expand=False)
+ for k, v in cases.items():
+ self.assertEqual(d.get(k), v)
diff --git a/usr/lib/portage/pym/portage/tests/util/test_grabdict.py b/usr/lib/portage/pym/portage/tests/util/test_grabdict.py
new file mode 100644
index 0000000..e62a75d
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/util/test_grabdict.py
@@ -0,0 +1,11 @@
+# test_grabDict.py -- Portage Unit Testing Functionality
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+#from portage.util import grabdict
+
+class GrabDictTestCase(TestCase):
+
+ def testGrabDictPass(self):
+ pass
diff --git a/usr/lib/portage/pym/portage/tests/util/test_normalizedPath.py b/usr/lib/portage/pym/portage/tests/util/test_normalizedPath.py
new file mode 100644
index 0000000..f993886
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/util/test_normalizedPath.py
@@ -0,0 +1,14 @@
+# test_normalizePath.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+
+class NormalizePathTestCase(TestCase):
+
+ def testNormalizePath(self):
+
+ from portage.util import normalize_path
+ path = "///foo/bar/baz"
+ good = "/foo/bar/baz"
+ self.assertEqual(normalize_path(path), good)
diff --git a/usr/lib/portage/pym/portage/tests/util/test_stackDictList.py b/usr/lib/portage/pym/portage/tests/util/test_stackDictList.py
new file mode 100644
index 0000000..25a723c
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/util/test_stackDictList.py
@@ -0,0 +1,19 @@
+# test_stackDictList.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+
+class StackDictListTestCase(TestCase):
+
+ def testStackDictList(self):
+ from portage.util import stack_dictlist
+
+ tests = [
+ ({'a': 'b'}, {'x': 'y'}, False, {'a': ['b'], 'x': ['y']}),
+ ({'KEYWORDS': ['alpha', 'x86']}, {'KEYWORDS': ['-*']}, True, {}),
+ ({'KEYWORDS': ['alpha', 'x86']}, {'KEYWORDS': ['-x86']}, True, {'KEYWORDS': ['alpha']}),
+ ]
+ for test in tests:
+ self.assertEqual(
+ stack_dictlist([test[0], test[1]], incremental=test[2]), test[3])
diff --git a/usr/lib/portage/pym/portage/tests/util/test_stackDicts.py b/usr/lib/portage/pym/portage/tests/util/test_stackDicts.py
new file mode 100644
index 0000000..0c1dcdb
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/util/test_stackDicts.py
@@ -0,0 +1,33 @@
+# test_stackDicts.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util import stack_dicts
+
+
+class StackDictsTestCase(TestCase):
+
+ def testStackDictsPass(self):
+
+ tests = [
+ ([{'a': 'b'}, {'b': 'c'}], {'a': 'b', 'b': 'c'}, False, [], False),
+ ([{'a': 'b'}, {'a': 'c'}], {'a': 'b c'}, True, [], False),
+ ([{'a': 'b'}, {'a': 'c'}], {'a': 'b c'}, False, ['a'], False),
+ ([{'a': 'b'}, None], {'a': 'b'}, False, [], True),
+ ([None], {}, False, [], False),
+ ([None, {}], {}, False, [], True)
+ ]
+ for test in tests:
+ result = stack_dicts(test[0], test[2], test[3], test[4])
+ self.assertEqual(result, test[1])
+
+ def testStackDictsFail(self):
+
+ tests = [
+ ([None, {}], None, False, [], True),
+ ([{'a': 'b'}, {'a': 'c'}], {'a': 'b c'}, False, [], False)
+ ]
+ for test in tests:
+ result = stack_dicts(test[0], test[2], test[3], test[4])
+ self.assertNotEqual(result, test[1])
diff --git a/usr/lib/portage/pym/portage/tests/util/test_stackLists.py b/usr/lib/portage/pym/portage/tests/util/test_stackLists.py
new file mode 100644
index 0000000..3ba69ec
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/util/test_stackLists.py
@@ -0,0 +1,21 @@
+# test_stackLists.py -- Portage Unit Testing Functionality
+# Copyright 2006-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util import stack_lists
+
+class StackListsTestCase(TestCase):
+
+ def testStackLists(self):
+
+ tests = [
+ ([['a', 'b', 'c'], ['d', 'e', 'f']], ['a', 'c', 'b', 'e', 'd', 'f'], False),
+ ([['a', 'x'], ['b', 'x']], ['a', 'x', 'b'], False),
+ ([['a', 'b', 'c'], ['-*']], [], True),
+ ([['a'], ['-a']], [], True)
+ ]
+
+ for test in tests:
+ result = stack_lists(test[0], test[2])
+ self.assertEqual(set(result), set(test[1]))
diff --git a/usr/lib/portage/pym/portage/tests/util/test_uniqueArray.py b/usr/lib/portage/pym/portage/tests/util/test_uniqueArray.py
new file mode 100644
index 0000000..aae88cc
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/util/test_uniqueArray.py
@@ -0,0 +1,26 @@
+# test_uniqueArray.py -- Portage Unit Testing Functionality
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.util import unique_array
+
+class UniqueArrayTestCase(TestCase):
+
+ def testUniqueArrayPass(self):
+ """
+ test portage.util.uniqueArray()
+ """
+
+ tests = [
+ (['a', 'a', 'a', os, os, [], [], []], ['a', os, []]),
+ ([1, 1, 1, 2, 3, 4, 4], [1, 2, 3, 4])
+ ]
+
+ for test in tests:
+ result = unique_array(test[0])
+ for item in test[1]:
+ number = result.count(item)
+ self.assertFalse(number != 1, msg=("%s contains %s of %s, "
+ "should be only 1") % (result, number, item))
diff --git a/usr/lib/portage/pym/portage/tests/util/test_varExpand.py b/usr/lib/portage/pym/portage/tests/util/test_varExpand.py
new file mode 100644
index 0000000..498b50e
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/util/test_varExpand.py
@@ -0,0 +1,92 @@
+# test_varExpand.py -- Portage Unit Testing Functionality
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util import varexpand
+
+class VarExpandTestCase(TestCase):
+
+ def testVarExpandPass(self):
+
+ varDict = {"a": "5", "b": "7", "c": "-5"}
+ for key in varDict:
+ result = varexpand("$%s" % key, varDict)
+
+ self.assertFalse(result != varDict[key],
+ msg="Got %s != %s, from varexpand(%s, %s)" %
+ (result, varDict[key], "$%s" % key, varDict))
+ result = varexpand("${%s}" % key, varDict)
+ self.assertFalse(result != varDict[key],
+ msg="Got %s != %s, from varexpand(%s, %s)" %
+ (result, varDict[key], "${%s}" % key, varDict))
+
+ def testVarExpandBackslashes(self):
+ """
+ We want to behave like bash does when expanding a variable
+ assignment in a sourced file, in which case it performs
+ backslash removal for \\ and \$ but nothing more. It also
+ removes escaped newline characters. Note that we don't
+ handle escaped quotes here, since getconfig() uses shlex
+ to handle that earlier.
+ """
+
+ varDict = {}
+ tests = [
+ ("\\", "\\"),
+ ("\\\\", "\\"),
+ ("\\\\\\", "\\\\"),
+ ("\\\\\\\\", "\\\\"),
+ ("\\$", "$"),
+ ("\\\\$", "\\$"),
+ ("\\a", "\\a"),
+ ("\\b", "\\b"),
+ ("\\n", "\\n"),
+ ("\\r", "\\r"),
+ ("\\t", "\\t"),
+ ("\\\n", ""),
+ ("\\\"", "\\\""),
+ ("\\'", "\\'"),
+ ]
+ for test in tests:
+ result = varexpand(test[0], varDict)
+ self.assertFalse(result != test[1],
+ msg="Got %s != %s from varexpand(%s, %s)"
+ % (result, test[1], test[0], varDict))
+
+ def testVarExpandDoubleQuotes(self):
+
+ varDict = {"a": "5"}
+ tests = [("\"${a}\"", "\"5\"")]
+ for test in tests:
+ result = varexpand(test[0], varDict)
+ self.assertFalse(result != test[1],
+ msg="Got %s != %s from varexpand(%s, %s)"
+ % (result, test[1], test[0], varDict))
+
+ def testVarExpandSingleQuotes(self):
+
+ varDict = {"a": "5"}
+ tests = [("\'${a}\'", "\'${a}\'")]
+ for test in tests:
+ result = varexpand(test[0], varDict)
+ self.assertFalse(result != test[1],
+ msg="Got %s != %s from varexpand(%s, %s)"
+ % (result, test[1], test[0], varDict))
+
+ def testVarExpandFail(self):
+
+ varDict = {"a": "5", "b": "7", "c": "15"}
+
+ testVars = ["fail"]
+
+ for var in testVars:
+ result = varexpand("$%s" % var, varDict)
+ self.assertFalse(len(result),
+ msg="Got %s == %s, from varexpand(%s, %s)"
+ % (result, var, "$%s" % var, varDict))
+
+ result = varexpand("${%s}" % var, varDict)
+ self.assertFalse(len(result),
+ msg="Got %s == %s, from varexpand(%s, %s)"
+ % (result, var, "${%s}" % var, varDict))
diff --git a/usr/lib/portage/pym/portage/tests/util/test_whirlpool.py b/usr/lib/portage/pym/portage/tests/util/test_whirlpool.py
new file mode 100644
index 0000000..fbe7cae
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/util/test_whirlpool.py
@@ -0,0 +1,16 @@
+# Copyright 2011-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+
+import portage
+from portage import os
+from portage.const import PORTAGE_PYM_PATH
+from portage.tests import TestCase
+
+class WhirlpoolTestCase(TestCase):
+ def testBundledWhirlpool(self):
+ # execute the tests bundled with the whirlpool module
+ retval = subprocess.call([portage._python_interpreter, "-b", "-Wd",
+ os.path.join(PORTAGE_PYM_PATH, "portage/util/whirlpool.py")])
+ self.assertEqual(retval, os.EX_OK)
diff --git a/usr/lib/portage/pym/portage/tests/versions/__init__.py b/usr/lib/portage/pym/portage/tests/versions/__init__.py
new file mode 100644
index 0000000..2b14180
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/versions/__init__.py
@@ -0,0 +1,3 @@
+# tests/portage.versions/__init__.py -- Portage Unit Test functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/tests/versions/__test__.py b/usr/lib/portage/pym/portage/tests/versions/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/versions/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/versions/test_cpv_sort_key.py b/usr/lib/portage/pym/portage/tests/versions/test_cpv_sort_key.py
new file mode 100644
index 0000000..eeb0eae
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/versions/test_cpv_sort_key.py
@@ -0,0 +1,17 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.versions import cpv_sort_key
+
+class CpvSortKeyTestCase(TestCase):
+
+ def testCpvSortKey(self):
+
+ tests = [
+ (("a/b-2_alpha", "a", "b", "a/b-2", "a/a-1", "a/b-1"),
+ ("a", "a/a-1", "a/b-1", "a/b-2_alpha", "a/b-2", "b")),
+ ]
+
+ for test in tests:
+ self.assertEqual(tuple(sorted(test[0], key=cpv_sort_key())), test[1])
diff --git a/usr/lib/portage/pym/portage/tests/versions/test_vercmp.py b/usr/lib/portage/pym/portage/tests/versions/test_vercmp.py
new file mode 100644
index 0000000..78fe7ed
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/versions/test_vercmp.py
@@ -0,0 +1,84 @@
+# test_vercmp.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.versions import vercmp
+
+class VerCmpTestCase(TestCase):
+ """ A simple testCase for portage.versions.vercmp()
+ """
+
+ def testVerCmpGreater(self):
+
+ tests = [
+ ("6.0", "5.0"), ("5.0", "5"),
+ ("1.0-r1", "1.0-r0"),
+ ("1.0-r1", "1.0"),
+ ("cvs.9999", "9999"),
+ ("999999999999999999999999999999", "999999999999999999999999999998"),
+ ("1.0.0", "1.0"),
+ ("1.0.0", "1.0b"),
+ ("1b", "1"),
+ ("1b_p1", "1_p1"),
+ ("1.1b", "1.1"),
+ ("12.2.5", "12.2b"),
+ ]
+ for test in tests:
+ self.assertFalse(vercmp(test[0], test[1]) <= 0, msg="%s < %s? Wrong!" % (test[0], test[1]))
+
+ def testVerCmpLess(self):
+ """
+ pre < alpha < beta < rc < p -> test each of these, they are inductive (or should be..)
+ """
+ tests = [
+ ("4.0", "5.0"), ("5", "5.0"), ("1.0_pre2", "1.0_p2"),
+ ("1.0_alpha2", "1.0_p2"), ("1.0_alpha1", "1.0_beta1"), ("1.0_beta3", "1.0_rc3"),
+ ("1.001000000000000000001", "1.001000000000000000002"),
+ ("1.00100000000", "1.0010000000000000001"),
+ ("9999", "cvs.9999"),
+ ("999999999999999999999999999998", "999999999999999999999999999999"),
+ ("1.01", "1.1"),
+ ("1.0-r0", "1.0-r1"),
+ ("1.0", "1.0-r1"),
+ ("1.0", "1.0.0"),
+ ("1.0b", "1.0.0"),
+ ("1_p1", "1b_p1"),
+ ("1", "1b"),
+ ("1.1", "1.1b"),
+ ("12.2b", "12.2.5"),
+ ]
+ for test in tests:
+ self.assertFalse(vercmp(test[0], test[1]) >= 0, msg="%s > %s? Wrong!" % (test[0], test[1]))
+
+ def testVerCmpEqual(self):
+
+ tests = [
+ ("4.0", "4.0"),
+ ("1.0", "1.0"),
+ ("1.0-r0", "1.0"),
+ ("1.0", "1.0-r0"),
+ ("1.0-r0", "1.0-r0"),
+ ("1.0-r1", "1.0-r1")
+ ]
+ for test in tests:
+ self.assertFalse(vercmp(test[0], test[1]) != 0, msg="%s != %s? Wrong!" % (test[0], test[1]))
+
+ def testVerNotEqual(self):
+
+ tests = [
+ ("1", "2"), ("1.0_alpha", "1.0_pre"), ("1.0_beta", "1.0_alpha"),
+ ("0", "0.0"),
+ ("cvs.9999", "9999"),
+ ("1.0-r0", "1.0-r1"),
+ ("1.0-r1", "1.0-r0"),
+ ("1.0", "1.0-r1"),
+ ("1.0-r1", "1.0"),
+ ("1.0", "1.0.0"),
+ ("1_p1", "1b_p1"),
+ ("1b", "1"),
+ ("1.1b", "1.1"),
+ ("12.2b", "12.2"),
+ ]
+ for test in tests:
+ self.assertFalse(vercmp(test[0], test[1]) == 0, msg="%s == %s? Wrong!" % (test[0], test[1]))
diff --git a/usr/lib/portage/pym/portage/tests/xpak/__init__.py b/usr/lib/portage/pym/portage/tests/xpak/__init__.py
new file mode 100644
index 0000000..9c3f524
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/xpak/__init__.py
@@ -0,0 +1,3 @@
+# tests/portage.dep/__init__.py -- Portage Unit Test functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/tests/xpak/__test__.py b/usr/lib/portage/pym/portage/tests/xpak/__test__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/xpak/__test__.py
diff --git a/usr/lib/portage/pym/portage/tests/xpak/test_decodeint.py b/usr/lib/portage/pym/portage/tests/xpak/test_decodeint.py
new file mode 100644
index 0000000..2da5735
--- /dev/null
+++ b/usr/lib/portage/pym/portage/tests/xpak/test_decodeint.py
@@ -0,0 +1,16 @@
+# xpak/test_decodeint.py
+# Copright Gentoo Foundation 2006
+# Portage Unit Testing Functionality
+
+from portage.tests import TestCase
+from portage.xpak import decodeint, encodeint
+
+class testDecodeIntTestCase(TestCase):
+
+ def testDecodeInt(self):
+
+ for n in range(1000):
+ self.assertEqual(decodeint(encodeint(n)), n)
+
+ for n in (2 ** 32 - 1,):
+ self.assertEqual(decodeint(encodeint(n)), n)
diff --git a/usr/lib/portage/pym/portage/update.py b/usr/lib/portage/pym/portage/update.py
new file mode 100644
index 0000000..7a71092
--- /dev/null
+++ b/usr/lib/portage/pym/portage/update.py
@@ -0,0 +1,427 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import errno
+import io
+import re
+import stat
+import sys
+import warnings
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dep:Atom,dep_getkey,isvalidatom,match_from_list',
+ 'portage.util:ConfigProtect,new_protect_filename,' + \
+ 'normalize_path,write_atomic,writemsg',
+ 'portage.versions:_get_slot_re',
+)
+
+from portage.const import USER_CONFIG_PATH, VCS_DIRS
+from portage.eapi import _get_eapi_attrs
+from portage.exception import DirectoryNotFound, InvalidAtom, PortageException
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+ _unicode = str
+else:
+ _unicode = unicode
+
+ignored_dbentries = ("CONTENTS", "environment.bz2")
+
+def update_dbentry(update_cmd, mycontent, eapi=None, parent=None):
+
+ if parent is not None:
+ eapi = parent.eapi
+
+ if update_cmd[0] == "move":
+ old_value = _unicode(update_cmd[1])
+ new_value = _unicode(update_cmd[2])
+
+ # Use isvalidatom() to check if this move is valid for the
+ # EAPI (characters allowed in package names may vary).
+ if old_value in mycontent and isvalidatom(new_value, eapi=eapi):
+ # this split preserves existing whitespace
+ split_content = re.split(r'(\s+)', mycontent)
+ modified = False
+ for i, token in enumerate(split_content):
+ if old_value not in token:
+ continue
+ try:
+ atom = Atom(token, eapi=eapi)
+ except InvalidAtom:
+ continue
+ if atom.cp != old_value:
+ continue
+
+ new_atom = Atom(token.replace(old_value, new_value, 1),
+ eapi=eapi)
+
+ # Avoid creating self-blockers for bug #367215.
+ if new_atom.blocker and parent is not None and \
+ parent.cp == new_atom.cp and \
+ match_from_list(new_atom, [parent]):
+ continue
+
+ split_content[i] = _unicode(new_atom)
+ modified = True
+
+ if modified:
+ mycontent = "".join(split_content)
+
+ elif update_cmd[0] == "slotmove" and update_cmd[1].operator is None:
+ orig_atom, origslot, newslot = update_cmd[1:]
+ orig_cp = orig_atom.cp
+
+ # We don't support versioned slotmove atoms here, since it can be
+ # difficult to determine if the version constraints really match
+ # the atoms that we're trying to update.
+ if orig_atom.version is None and orig_cp in mycontent:
+ # this split preserves existing whitespace
+ split_content = re.split(r'(\s+)', mycontent)
+ modified = False
+ for i, token in enumerate(split_content):
+ if orig_cp not in token:
+ continue
+ try:
+ atom = Atom(token, eapi=eapi)
+ except InvalidAtom:
+ continue
+ if atom.cp != orig_cp:
+ continue
+ if atom.slot is None or atom.slot != origslot:
+ continue
+
+ slot_part = newslot
+ if atom.sub_slot is not None:
+ if atom.sub_slot == origslot:
+ sub_slot = newslot
+ else:
+ sub_slot = atom.sub_slot
+ slot_part += "/" + sub_slot
+ if atom.slot_operator is not None:
+ slot_part += atom.slot_operator
+
+ split_content[i] = atom.with_slot(slot_part)
+ modified = True
+
+ if modified:
+ mycontent = "".join(split_content)
+
+ return mycontent
+
+def update_dbentries(update_iter, mydata, eapi=None, parent=None):
+ """Performs update commands and returns a
+ dict containing only the updated items."""
+ updated_items = {}
+ for k, mycontent in mydata.items():
+ k_unicode = _unicode_decode(k,
+ encoding=_encodings['repo.content'], errors='replace')
+ if k_unicode not in ignored_dbentries:
+ orig_content = mycontent
+ mycontent = _unicode_decode(mycontent,
+ encoding=_encodings['repo.content'], errors='replace')
+ is_encoded = mycontent is not orig_content
+ orig_content = mycontent
+ for update_cmd in update_iter:
+ mycontent = update_dbentry(update_cmd, mycontent,
+ eapi=eapi, parent=parent)
+ if mycontent != orig_content:
+ if is_encoded:
+ mycontent = _unicode_encode(mycontent,
+ encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ updated_items[k] = mycontent
+ return updated_items
+
+def fixdbentries(update_iter, dbdir, eapi=None, parent=None):
+ """Performs update commands which result in search and replace operations
+ for each of the files in dbdir (excluding CONTENTS and environment.bz2).
+ Returns True when actual modifications are necessary and False otherwise."""
+
+ warnings.warn("portage.update.fixdbentries() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ mydata = {}
+ for myfile in [f for f in os.listdir(dbdir) if f not in ignored_dbentries]:
+ file_path = os.path.join(dbdir, myfile)
+ with io.open(_unicode_encode(file_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ mydata[myfile] = f.read()
+ updated_items = update_dbentries(update_iter, mydata,
+ eapi=eapi, parent=parent)
+ for myfile, mycontent in updated_items.items():
+ file_path = os.path.join(dbdir, myfile)
+ write_atomic(file_path, mycontent, encoding=_encodings['repo.content'])
+ return len(updated_items) > 0
+
+def grab_updates(updpath, prev_mtimes=None):
+ """Returns all the updates from the given directory as a sorted list of
+ tuples, each containing (file_path, statobj, content). If prev_mtimes is
+ given then updates are only returned if one or more files have different
+ mtimes. When a change is detected for a given file, updates will be
+ returned for that file and any files that come after it in the entire
+ sequence. This ensures that all relevant updates are returned for cases
+ in which the destination package of an earlier move corresponds to
+ the source package of a move that comes somewhere later in the entire
+ sequence of files.
+ """
+ try:
+ mylist = os.listdir(updpath)
+ except OSError as oe:
+ if oe.errno == errno.ENOENT:
+ raise DirectoryNotFound(updpath)
+ raise
+ if prev_mtimes is None:
+ prev_mtimes = {}
+ # validate the file name (filter out CVS directory, etc...)
+ mylist = [myfile for myfile in mylist if len(myfile) == 7 and myfile[1:3] == "Q-"]
+ if len(mylist) == 0:
+ return []
+
+ # update names are mangled to make them sort properly
+ mylist = [myfile[3:]+"-"+myfile[:2] for myfile in mylist]
+ mylist.sort()
+ mylist = [myfile[5:]+"-"+myfile[:4] for myfile in mylist]
+
+ update_data = []
+ for myfile in mylist:
+ file_path = os.path.join(updpath, myfile)
+ mystat = os.stat(file_path)
+ if update_data or \
+ file_path not in prev_mtimes or \
+ long(prev_mtimes[file_path]) != mystat[stat.ST_MTIME]:
+ f = io.open(_unicode_encode(file_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace')
+ content = f.read()
+ f.close()
+ update_data.append((file_path, mystat, content))
+ return update_data
+
+def parse_updates(mycontent):
+ """Valid updates are returned as a list of split update commands."""
+ eapi_attrs = _get_eapi_attrs(None)
+ slot_re = _get_slot_re(eapi_attrs)
+ myupd = []
+ errors = []
+ mylines = mycontent.splitlines()
+ for myline in mylines:
+ mysplit = myline.split()
+ if len(mysplit) == 0:
+ continue
+ if mysplit[0] not in ("move", "slotmove"):
+ errors.append(_("ERROR: Update type not recognized '%s'") % myline)
+ continue
+ if mysplit[0] == "move":
+ if len(mysplit) != 3:
+ errors.append(_("ERROR: Update command invalid '%s'") % myline)
+ continue
+ valid = True
+ for i in (1, 2):
+ try:
+ atom = Atom(mysplit[i])
+ except InvalidAtom:
+ atom = None
+ else:
+ if atom.blocker or atom != atom.cp:
+ atom = None
+ if atom is not None:
+ mysplit[i] = atom
+ else:
+ errors.append(
+ _("ERROR: Malformed update entry '%s'") % myline)
+ valid = False
+ break
+ if not valid:
+ continue
+
+ if mysplit[0] == "slotmove":
+ if len(mysplit)!=4:
+ errors.append(_("ERROR: Update command invalid '%s'") % myline)
+ continue
+ pkg, origslot, newslot = mysplit[1], mysplit[2], mysplit[3]
+ try:
+ atom = Atom(pkg)
+ except InvalidAtom:
+ atom = None
+ else:
+ if atom.blocker:
+ atom = None
+ if atom is not None:
+ mysplit[1] = atom
+ else:
+ errors.append(_("ERROR: Malformed update entry '%s'") % myline)
+ continue
+
+ invalid_slot = False
+ for slot in (origslot, newslot):
+ m = slot_re.match(slot)
+ if m is None:
+ invalid_slot = True
+ break
+ if "/" in slot:
+ # EAPI 4-slot-abi style SLOT is currently not supported.
+ invalid_slot = True
+ break
+
+ if invalid_slot:
+ errors.append(_("ERROR: Malformed update entry '%s'") % myline)
+ continue
+
+ # The list of valid updates is filtered by continue statements above.
+ myupd.append(mysplit)
+ return myupd, errors
+
+def update_config_files(config_root, protect, protect_mask, update_iter,
+ match_callback = None, case_insensitive = False):
+ """Perform global updates on /etc/portage/package.*, /etc/portage/profile/package.*,
+ /etc/portage/profile/packages and /etc/portage/sets.
+ config_root - location of files to update
+ protect - list of paths from CONFIG_PROTECT
+ protect_mask - list of paths from CONFIG_PROTECT_MASK
+ update_iter - list of update commands as returned from parse_updates(),
+ or dict of {repo_name: list}
+ match_callback - a callback which will be called with three arguments:
+ match_callback(repo_name, old_atom, new_atom)
+ and should return boolean value determining whether to perform the update"""
+
+ repo_dict = None
+ if isinstance(update_iter, dict):
+ repo_dict = update_iter
+ if match_callback is None:
+ def match_callback(repo_name, atoma, atomb):
+ return True
+ config_root = normalize_path(config_root)
+ update_files = {}
+ file_contents = {}
+ myxfiles = [
+ "package.accept_keywords", "package.env",
+ "package.keywords", "package.license",
+ "package.mask", "package.properties",
+ "package.unmask", "package.use", "sets"
+ ]
+ myxfiles += [os.path.join("profile", x) for x in (
+ "packages", "package.accept_keywords",
+ "package.keywords", "package.mask",
+ "package.unmask", "package.use",
+ "package.use.force", "package.use.mask",
+ "package.use.stable.force", "package.use.stable.mask"
+ )]
+ abs_user_config = os.path.join(config_root, USER_CONFIG_PATH)
+ recursivefiles = []
+ for x in myxfiles:
+ config_file = os.path.join(abs_user_config, x)
+ if os.path.isdir(config_file):
+ for parent, dirs, files in os.walk(config_file):
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ for y_enc in list(dirs):
+ try:
+ y = _unicode_decode(y_enc,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ dirs.remove(y_enc)
+ continue
+ if y.startswith(".") or y in VCS_DIRS:
+ dirs.remove(y_enc)
+ for y in files:
+ try:
+ y = _unicode_decode(y,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if y.startswith("."):
+ continue
+ recursivefiles.append(
+ os.path.join(parent, y)[len(abs_user_config) + 1:])
+ else:
+ recursivefiles.append(x)
+ myxfiles = recursivefiles
+ for x in myxfiles:
+ f = None
+ try:
+ f = io.open(
+ _unicode_encode(os.path.join(abs_user_config, x),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'],
+ errors='replace')
+ file_contents[x] = f.readlines()
+ except IOError:
+ continue
+ finally:
+ if f is not None:
+ f.close()
+
+ ignore_line_re = re.compile(r'^#|^\s*$')
+ if repo_dict is None:
+ update_items = [(None, update_iter)]
+ else:
+ update_items = [x for x in repo_dict.items() if x[0] != 'DEFAULT']
+ for repo_name, update_iter in update_items:
+ for update_cmd in update_iter:
+ for x, contents in file_contents.items():
+ skip_next = False
+ for pos, line in enumerate(contents):
+ if skip_next:
+ skip_next = False
+ continue
+ if ignore_line_re.match(line):
+ continue
+ atom = line.split()[0]
+ if atom[:1] == "-":
+ # package.mask supports incrementals
+ atom = atom[1:]
+ if atom[:1] == "*":
+ # packages file supports "*"-prefixed atoms as indication of system packages.
+ atom = atom[1:]
+ if not isvalidatom(atom):
+ continue
+ new_atom = update_dbentry(update_cmd, atom)
+ if atom != new_atom:
+ if match_callback(repo_name, atom, new_atom):
+ # add a comment with the update command, so
+ # the user can clearly see what happened
+ contents[pos] = "# %s\n" % \
+ " ".join("%s" % (x,) for x in update_cmd)
+ contents.insert(pos + 1,
+ line.replace("%s" % (atom,),
+ "%s" % (new_atom,), 1))
+ # we've inserted an additional line, so we need to
+ # skip it when it's reached in the next iteration
+ skip_next = True
+ update_files[x] = 1
+ sys.stdout.write("p")
+ sys.stdout.flush()
+
+ protect_obj = ConfigProtect(
+ config_root, protect, protect_mask,
+ case_insensitive = case_insensitive)
+ for x in update_files:
+ updating_file = os.path.join(abs_user_config, x)
+ if protect_obj.isprotected(updating_file):
+ updating_file = new_protect_filename(updating_file)
+ try:
+ write_atomic(updating_file, "".join(file_contents[x]))
+ except PortageException as e:
+ writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
+ writemsg(_("!!! An error occurred while updating a config file:") + \
+ " '%s'\n" % updating_file, noiselevel=-1)
+ continue
+
+def dep_transform(mydep, oldkey, newkey):
+ if dep_getkey(mydep) == oldkey:
+ return mydep.replace(oldkey, newkey, 1)
+ return mydep
diff --git a/usr/lib/portage/pym/portage/util/ExtractKernelVersion.py b/usr/lib/portage/pym/portage/util/ExtractKernelVersion.py
new file mode 100644
index 0000000..af4a4fe
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/ExtractKernelVersion.py
@@ -0,0 +1,78 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['ExtractKernelVersion']
+
+import io
+
+from portage import os, _encodings, _unicode_encode
+from portage.util import getconfig, grabfile
+
+def ExtractKernelVersion(base_dir):
+ """
+ Try to figure out what kernel version we are running
+ @param base_dir: Path to sources (usually /usr/src/linux)
+ @type base_dir: string
+ @rtype: tuple( version[string], error[string])
+ @return:
+ 1. tuple( version[string], error[string])
+ Either version or error is populated (but never both)
+
+ """
+ lines = []
+ pathname = os.path.join(base_dir, 'Makefile')
+ try:
+ f = io.open(_unicode_encode(pathname,
+ encoding=_encodings['fs'], errors='strict'), mode='r',
+ encoding=_encodings['content'], errors='replace')
+ except OSError as details:
+ return (None, str(details))
+ except IOError as details:
+ return (None, str(details))
+
+ try:
+ for i in range(4):
+ lines.append(f.readline())
+ except OSError as details:
+ return (None, str(details))
+ except IOError as details:
+ return (None, str(details))
+ finally:
+ f.close()
+
+ lines = [l.strip() for l in lines]
+
+ version = ''
+
+ #XXX: The following code relies on the ordering of vars within the Makefile
+ for line in lines:
+ # split on the '=' then remove annoying whitespace
+ items = line.split("=")
+ items = [i.strip() for i in items]
+ if items[0] == 'VERSION' or \
+ items[0] == 'PATCHLEVEL':
+ version += items[1]
+ version += "."
+ elif items[0] == 'SUBLEVEL':
+ version += items[1]
+ elif items[0] == 'EXTRAVERSION' and \
+ items[-1] != items[0]:
+ version += items[1]
+
+ # Grab a list of files named localversion* and sort them
+ localversions = os.listdir(base_dir)
+ for x in range(len(localversions) - 1, -1, -1):
+ if localversions[x][:12] != "localversion":
+ del localversions[x]
+ localversions.sort()
+
+ # Append the contents of each to the version string, stripping ALL whitespace
+ for lv in localversions:
+ version += "".join(" ".join(grabfile(base_dir + "/" + lv)).split())
+
+ # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
+ kernelconfig = getconfig(base_dir+"/.config")
+ if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
+ version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
+
+ return (version, None)
diff --git a/usr/lib/portage/pym/portage/util/SlotObject.py b/usr/lib/portage/pym/portage/util/SlotObject.py
new file mode 100644
index 0000000..4bb6822
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/SlotObject.py
@@ -0,0 +1,50 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class SlotObject(object):
+ __slots__ = ("__weakref__",)
+
+ def __init__(self, **kwargs):
+ classes = [self.__class__]
+ while classes:
+ c = classes.pop()
+ if c is SlotObject:
+ continue
+ classes.extend(c.__bases__)
+ slots = getattr(c, "__slots__", None)
+ if not slots:
+ continue
+ for myattr in slots:
+ myvalue = kwargs.pop(myattr, None)
+ if myvalue is None and getattr(self, myattr, None) is not None:
+ raise AssertionError(
+ "class '%s' duplicates '%s' value in __slots__ of base class '%s'" %
+ (self.__class__.__name__, myattr, c.__name__))
+ setattr(self, myattr, myvalue)
+
+ if kwargs:
+ raise TypeError(
+ "'%s' is an invalid keyword argument for this constructor" %
+ (next(iter(kwargs)),))
+
+ def copy(self):
+ """
+ Create a new instance and copy all attributes
+ defined from __slots__ (including those from
+ inherited classes).
+ """
+ obj = self.__class__()
+
+ classes = [self.__class__]
+ while classes:
+ c = classes.pop()
+ if c is SlotObject:
+ continue
+ classes.extend(c.__bases__)
+ slots = getattr(c, "__slots__", None)
+ if not slots:
+ continue
+ for myattr in slots:
+ setattr(obj, myattr, getattr(self, myattr))
+
+ return obj
diff --git a/usr/lib/portage/pym/portage/util/_ShelveUnicodeWrapper.py b/usr/lib/portage/pym/portage/util/_ShelveUnicodeWrapper.py
new file mode 100644
index 0000000..adbd519
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_ShelveUnicodeWrapper.py
@@ -0,0 +1,45 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class ShelveUnicodeWrapper(object):
+ """
+ Convert unicode to str and back again, since python-2.x shelve
+ module doesn't support unicode.
+ """
+ def __init__(self, shelve_instance):
+ self._shelve = shelve_instance
+
+ def _encode(self, s):
+ if isinstance(s, unicode):
+ s = s.encode('utf_8')
+ return s
+
+ def __len__(self):
+ return len(self._shelve)
+
+ def __contains__(self, k):
+ return self._encode(k) in self._shelve
+
+ def __iter__(self):
+ return self._shelve.__iter__()
+
+ def items(self):
+ return self._shelve.iteritems()
+
+ def __setitem__(self, k, v):
+ self._shelve[self._encode(k)] = self._encode(v)
+
+ def __getitem__(self, k):
+ return self._shelve[self._encode(k)]
+
+ def __delitem__(self, k):
+ del self._shelve[self._encode(k)]
+
+ def get(self, k, *args):
+ return self._shelve.get(self._encode(k), *args)
+
+ def close(self):
+ self._shelve.close()
+
+ def clear(self):
+ self._shelve.clear()
diff --git a/usr/lib/portage/pym/portage/util/__init__.py b/usr/lib/portage/pym/portage/util/__init__.py
new file mode 100644
index 0000000..fd7995c
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/__init__.py
@@ -0,0 +1,1800 @@
+# Copyright 2004-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ['apply_permissions', 'apply_recursive_permissions',
+ 'apply_secpass_permissions', 'apply_stat_permissions', 'atomic_ofstream',
+ 'cmp_sort_key', 'ConfigProtect', 'dump_traceback', 'ensure_dirs',
+ 'find_updated_config_files', 'getconfig', 'getlibpaths', 'grabdict',
+ 'grabdict_package', 'grabfile', 'grabfile_package', 'grablines',
+ 'initialize_logger', 'LazyItemsDict', 'map_dictlist_vals',
+ 'new_protect_filename', 'normalize_path', 'pickle_read', 'stack_dictlist',
+ 'stack_dicts', 'stack_lists', 'unique_array', 'unique_everseen', 'varexpand',
+ 'write_atomic', 'writedict', 'writemsg', 'writemsg_level', 'writemsg_stdout']
+
+from copy import deepcopy
+import errno
+import io
+try:
+ from itertools import filterfalse
+except ImportError:
+ from itertools import ifilterfalse as filterfalse
+import logging
+import re
+import shlex
+import stat
+import string
+import sys
+import traceback
+import glob
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'pickle',
+ 'portage.dep:Atom',
+ 'subprocess',
+)
+
+from portage import os
+from portage import _encodings
+from portage import _os_merge
+from portage import _unicode_encode
+from portage import _unicode_decode
+from portage.const import VCS_DIRS
+from portage.exception import InvalidAtom, PortageException, FileNotFound, \
+ IsADirectory, OperationNotPermitted, ParseError, PermissionDenied, \
+ ReadOnlyFileSystem
+from portage.localization import _
+from portage.proxy.objectproxy import ObjectProxy
+from portage.cache.mappings import UserDict
+from portage.const import EPREFIX
+
+if sys.hexversion >= 0x3000000:
+ _unicode = str
+else:
+ _unicode = unicode
+
+noiselimit = 0
+
+def initialize_logger(level=logging.WARN):
+ """Sets up basic logging of portage activities
+ Args:
+ level: the level to emit messages at ('info', 'debug', 'warning' ...)
+ Returns:
+ None
+ """
+ logging.basicConfig(level=logging.WARN, format='[%(levelname)-4s] %(message)s')
+
+def writemsg(mystr, noiselevel=0, fd=None):
+ """Prints out warning and debug messages based on the noiselimit setting"""
+ global noiselimit
+ if fd is None:
+ fd = sys.stderr
+ if noiselevel <= noiselimit:
+ # avoid potential UnicodeEncodeError
+ if isinstance(fd, io.StringIO):
+ mystr = _unicode_decode(mystr,
+ encoding=_encodings['content'], errors='replace')
+ else:
+ mystr = _unicode_encode(mystr,
+ encoding=_encodings['stdio'], errors='backslashreplace')
+ if sys.hexversion >= 0x3000000 and fd in (sys.stdout, sys.stderr):
+ fd = fd.buffer
+ fd.write(mystr)
+ fd.flush()
+
+def writemsg_stdout(mystr, noiselevel=0):
+ """Prints messages stdout based on the noiselimit setting"""
+ writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout)
+
+def writemsg_level(msg, level=0, noiselevel=0):
+ """
+ Show a message for the given level as defined by the logging module
+ (default is 0). When level >= logging.WARNING then the message is
+ sent to stderr, otherwise it is sent to stdout. The noiselevel is
+ passed directly to writemsg().
+
+ @type msg: str
+ @param msg: a message string, including newline if appropriate
+ @type level: int
+ @param level: a numeric logging level (see the logging module)
+ @type noiselevel: int
+ @param noiselevel: passed directly to writemsg
+ """
+ if level >= logging.WARNING:
+ fd = sys.stderr
+ else:
+ fd = sys.stdout
+ writemsg(msg, noiselevel=noiselevel, fd=fd)
+
+def normalize_path(mypath):
+ """
+ os.path.normpath("//foo") returns "//foo" instead of "/foo"
+ We dislike this behavior so we create our own normpath func
+ to fix it.
+ """
+ if sys.hexversion >= 0x3000000 and isinstance(mypath, bytes):
+ path_sep = os.path.sep.encode()
+ else:
+ path_sep = os.path.sep
+
+ if mypath.startswith(path_sep):
+ # posixpath.normpath collapses 3 or more leading slashes to just 1.
+ return os.path.normpath(2*path_sep + mypath)
+ else:
+ return os.path.normpath(mypath)
+
+def grabfile(myfilename, compat_level=0, recursive=0, remember_source_file=False):
+ """This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line
+ begins with a #, it is ignored, as are empty lines"""
+
+ mylines = grablines(myfilename, recursive, remember_source_file=True)
+ newlines = []
+
+ for x, source_file in mylines:
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+ #into single spaces.
+ myline = x.split()
+ if x and x[0] != "#":
+ mylinetemp = []
+ for item in myline:
+ if item[:1] != "#":
+ mylinetemp.append(item)
+ else:
+ break
+ myline = mylinetemp
+
+ myline = " ".join(myline)
+ if not myline:
+ continue
+ if myline[0] == "#":
+ # Check if we have a compat-level string. BC-integration data.
+ # '##COMPAT==>N<==' 'some string attached to it'
+ mylinetest = myline.split("<==", 1)
+ if len(mylinetest) == 2:
+ myline_potential = mylinetest[1]
+ mylinetest = mylinetest[0].split("##COMPAT==>")
+ if len(mylinetest) == 2:
+ if compat_level >= int(mylinetest[1]):
+ # It's a compat line, and the key matches.
+ newlines.append(myline_potential)
+ continue
+ else:
+ continue
+ if remember_source_file:
+ newlines.append((myline, source_file))
+ else:
+ newlines.append(myline)
+ return newlines
+
+def map_dictlist_vals(func, myDict):
+ """Performs a function on each value of each key in a dictlist.
+ Returns a new dictlist."""
+ new_dl = {}
+ for key in myDict:
+ new_dl[key] = []
+ new_dl[key] = [func(x) for x in myDict[key]]
+ return new_dl
+
+def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0):
+ """
+ Stacks an array of dict-types into one array. Optionally merging or
+ overwriting matching key/value pairs for the dict[key]->list.
+ Returns a single dict. Higher index in lists is preferenced.
+
+ Example usage:
+ >>> from portage.util import stack_dictlist
+ >>> print stack_dictlist( [{'a':'b'},{'x':'y'}])
+ >>> {'a':'b','x':'y'}
+ >>> print stack_dictlist( [{'a':'b'},{'a':'c'}], incremental = True )
+ >>> {'a':['b','c'] }
+ >>> a = {'KEYWORDS':['x86','alpha']}
+ >>> b = {'KEYWORDS':['-x86']}
+ >>> print stack_dictlist( [a,b] )
+ >>> { 'KEYWORDS':['x86','alpha','-x86']}
+ >>> print stack_dictlist( [a,b], incremental=True)
+ >>> { 'KEYWORDS':['alpha'] }
+ >>> print stack_dictlist( [a,b], incrementals=['KEYWORDS'])
+ >>> { 'KEYWORDS':['alpha'] }
+
+ @param original_dicts a list of (dictionary objects or None)
+ @type list
+ @param incremental True or false depending on whether new keys should overwrite
+ keys which already exist.
+ @type boolean
+ @param incrementals A list of items that should be incremental (-foo removes foo from
+ the returned dict).
+ @type list
+ @param ignore_none Appears to be ignored, but probably was used long long ago.
+ @type boolean
+
+ """
+ final_dict = {}
+ for mydict in original_dicts:
+ if mydict is None:
+ continue
+ for y in mydict:
+ if not y in final_dict:
+ final_dict[y] = []
+
+ for thing in mydict[y]:
+ if thing:
+ if incremental or y in incrementals:
+ if thing == "-*":
+ final_dict[y] = []
+ continue
+ elif thing[:1] == '-':
+ try:
+ final_dict[y].remove(thing[1:])
+ except ValueError:
+ pass
+ continue
+ if thing not in final_dict[y]:
+ final_dict[y].append(thing)
+ if y in final_dict and not final_dict[y]:
+ del final_dict[y]
+ return final_dict
+
+def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0):
+ """Stacks an array of dict-types into one array. Optionally merging or
+ overwriting matching key/value pairs for the dict[key]->string.
+ Returns a single dict."""
+ final_dict = {}
+ for mydict in dicts:
+ if not mydict:
+ continue
+ for k, v in mydict.items():
+ if k in final_dict and (incremental or (k in incrementals)):
+ final_dict[k] += " " + v
+ else:
+ final_dict[k] = v
+ return final_dict
+
+def append_repo(atom_list, repo_name, remember_source_file=False):
+ """
+ Takes a list of valid atoms without repo spec and appends ::repo_name.
+ If an atom already has a repo part, then it is preserved (see bug #461948).
+ """
+ if remember_source_file:
+ return [(atom.repo is not None and atom or atom.with_repo(repo_name), source) \
+ for atom, source in atom_list]
+ else:
+ return [atom.repo is not None and atom or atom.with_repo(repo_name) \
+ for atom in atom_list]
+
+def stack_lists(lists, incremental=1, remember_source_file=False,
+ warn_for_unmatched_removal=False, strict_warn_for_unmatched_removal=False, ignore_repo=False):
+ """Stacks an array of list-types into one array. Optionally removing
+ distinct values using '-value' notation. Higher index is preferenced.
+
+ all elements must be hashable."""
+ matched_removals = set()
+ unmatched_removals = {}
+ new_list = {}
+ for sub_list in lists:
+ for token in sub_list:
+ token_key = token
+ if remember_source_file:
+ token, source_file = token
+ else:
+ source_file = False
+
+ if token is None:
+ continue
+
+ if incremental:
+ if token == "-*":
+ new_list.clear()
+ elif token[:1] == '-':
+ matched = False
+ if ignore_repo and not "::" in token:
+ #Let -cat/pkg remove cat/pkg::repo.
+ to_be_removed = []
+ token_slice = token[1:]
+ for atom in new_list:
+ atom_without_repo = atom
+ if atom.repo is not None:
+ # Atom.without_repo instantiates a new Atom,
+ # which is unnecessary here, so use string
+ # replacement instead.
+ atom_without_repo = \
+ atom.replace("::" + atom.repo, "", 1)
+ if atom_without_repo == token_slice:
+ to_be_removed.append(atom)
+ if to_be_removed:
+ matched = True
+ for atom in to_be_removed:
+ new_list.pop(atom)
+ else:
+ try:
+ new_list.pop(token[1:])
+ matched = True
+ except KeyError:
+ pass
+
+ if not matched:
+ if source_file and \
+ (strict_warn_for_unmatched_removal or \
+ token_key not in matched_removals):
+ unmatched_removals.setdefault(source_file, set()).add(token)
+ else:
+ matched_removals.add(token_key)
+ else:
+ new_list[token] = source_file
+ else:
+ new_list[token] = source_file
+
+ if warn_for_unmatched_removal:
+ for source_file, tokens in unmatched_removals.items():
+ if len(tokens) > 3:
+ selected = [tokens.pop(), tokens.pop(), tokens.pop()]
+ writemsg(_("--- Unmatched removal atoms in %s: %s and %s more\n") % \
+ (source_file, ", ".join(selected), len(tokens)),
+ noiselevel=-1)
+ else:
+ writemsg(_("--- Unmatched removal atom(s) in %s: %s\n") % (source_file, ", ".join(tokens)),
+ noiselevel=-1)
+
+ if remember_source_file:
+ return list(new_list.items())
+ else:
+ return list(new_list)
+
+def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
+ """
+ This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary
+
+ @param myfilename: file to process
+ @type myfilename: string (path)
+ @param juststrings: only return strings
+ @type juststrings: Boolean (integer)
+ @param empty: Ignore certain lines
+ @type empty: Boolean (integer)
+ @param recursive: Recursively grab ( support for /etc/portage/package.keywords/* and friends )
+ @type recursive: Boolean (integer)
+ @param incremental: Append to the return list, don't overwrite
+ @type incremental: Boolean (integer)
+ @rtype: Dictionary
+ @return:
+ 1. Returns the lines in a file in a dictionary, for example:
+ 'sys-apps/portage x86 amd64 ppc'
+ would return
+ {"sys-apps/portage" : ['x86', 'amd64', 'ppc']}
+ """
+ newdict = {}
+ for x in grablines(myfilename, recursive):
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+ #into single spaces.
+ if x[0] == "#":
+ continue
+ myline=x.split()
+ mylinetemp = []
+ for item in myline:
+ if item[:1] != "#":
+ mylinetemp.append(item)
+ else:
+ break
+ myline = mylinetemp
+ if len(myline) < 2 and empty == 0:
+ continue
+ if len(myline) < 1 and empty == 1:
+ continue
+ if incremental:
+ newdict.setdefault(myline[0], []).extend(myline[1:])
+ else:
+ newdict[myline[0]] = myline[1:]
+ if juststrings:
+ for k, v in newdict.items():
+ newdict[k] = " ".join(v)
+ return newdict
+
+_eapi_cache = {}
+
+def read_corresponding_eapi_file(filename, default="0"):
+ """
+ Read the 'eapi' file from the directory 'filename' is in.
+ Returns "0" if the file is not present or invalid.
+ """
+ eapi_file = os.path.join(os.path.dirname(filename), "eapi")
+ try:
+ eapi = _eapi_cache[eapi_file]
+ except KeyError:
+ pass
+ else:
+ if eapi is None:
+ return default
+ return eapi
+
+ eapi = None
+ try:
+ with io.open(_unicode_encode(eapi_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace') as f:
+ lines = f.readlines()
+ if len(lines) == 1:
+ eapi = lines[0].rstrip("\n")
+ else:
+ writemsg(_("--- Invalid 'eapi' file (doesn't contain exactly one line): %s\n") % (eapi_file),
+ noiselevel=-1)
+ except IOError:
+ pass
+
+ _eapi_cache[eapi_file] = eapi
+ if eapi is None:
+ return default
+ return eapi
+
+def grabdict_package(myfilename, juststrings=0, recursive=0, allow_wildcard=False, allow_repo=False,
+ verify_eapi=False, eapi=None):
+ """ Does the same thing as grabdict except it validates keys
+ with isvalidatom()"""
+
+ if recursive:
+ file_list = _recursive_file_list(myfilename)
+ else:
+ file_list = [myfilename]
+
+ atoms = {}
+ for filename in file_list:
+ d = grabdict(filename, juststrings=False,
+ empty=True, recursive=False, incremental=True)
+ if not d:
+ continue
+ if verify_eapi and eapi is None:
+ eapi = read_corresponding_eapi_file(myfilename)
+
+ for k, v in d.items():
+ try:
+ k = Atom(k, allow_wildcard=allow_wildcard,
+ allow_repo=allow_repo, eapi=eapi)
+ except InvalidAtom as e:
+ writemsg(_("--- Invalid atom in %s: %s\n") % (filename, e),
+ noiselevel=-1)
+ else:
+ atoms.setdefault(k, []).extend(v)
+
+ if juststrings:
+ for k, v in atoms.items():
+ atoms[k] = " ".join(v)
+
+ return atoms
+
+def grabfile_package(myfilename, compatlevel=0, recursive=0, allow_wildcard=False, allow_repo=False,
+ remember_source_file=False, verify_eapi=False, eapi=None):
+
+ pkgs=grabfile(myfilename, compatlevel, recursive=recursive, remember_source_file=True)
+ if not pkgs:
+ return pkgs
+ if verify_eapi and eapi is None:
+ eapi = read_corresponding_eapi_file(myfilename)
+ mybasename = os.path.basename(myfilename)
+ atoms = []
+ for pkg, source_file in pkgs:
+ pkg_orig = pkg
+ # for packages and package.mask files
+ if pkg[:1] == "-":
+ pkg = pkg[1:]
+ if pkg[:1] == '*' and mybasename == 'packages':
+ pkg = pkg[1:]
+ try:
+ pkg = Atom(pkg, allow_wildcard=allow_wildcard, allow_repo=allow_repo, eapi=eapi)
+ except InvalidAtom as e:
+ writemsg(_("--- Invalid atom in %s: %s\n") % (source_file, e),
+ noiselevel=-1)
+ else:
+ if pkg_orig == _unicode(pkg):
+ # normal atom, so return as Atom instance
+ if remember_source_file:
+ atoms.append((pkg, source_file))
+ else:
+ atoms.append(pkg)
+ else:
+ # atom has special prefix, so return as string
+ if remember_source_file:
+ atoms.append((pkg_orig, source_file))
+ else:
+ atoms.append(pkg_orig)
+ return atoms
+
+def _recursive_basename_filter(f):
+ return not f.startswith(".") and not f.endswith("~")
+
+def _recursive_file_list(path):
+ # path may be a regular file or a directory
+
+ def onerror(e):
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(path)
+
+ stack = [os.path.split(path)]
+
+ while stack:
+ parent, fname = stack.pop()
+ fullpath = os.path.join(parent, fname)
+
+ try:
+ st = os.stat(fullpath)
+ except OSError as e:
+ onerror(e)
+ continue
+
+ if stat.S_ISDIR(st.st_mode):
+ if fname in VCS_DIRS or not _recursive_basename_filter(fname):
+ continue
+ try:
+ children = os.listdir(fullpath)
+ except OSError as e:
+ onerror(e)
+ continue
+
+ # Sort in reverse, since we pop from the end of the stack.
+ # Include regular files in the stack, so files are sorted
+ # together with directories.
+ children.sort(reverse=True)
+ stack.extend((fullpath, x) for x in children)
+
+ elif stat.S_ISREG(st.st_mode):
+ if _recursive_basename_filter(fname):
+ yield fullpath
+
+def grablines(myfilename, recursive=0, remember_source_file=False):
+ mylines = []
+ if recursive:
+ for f in _recursive_file_list(myfilename):
+ mylines.extend(grablines(f, recursive=False,
+ remember_source_file=remember_source_file))
+
+ else:
+ try:
+ with io.open(_unicode_encode(myfilename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace') as myfile:
+ if remember_source_file:
+ mylines = [(line, myfilename) for line in myfile.readlines()]
+ else:
+ mylines = myfile.readlines()
+ except IOError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(myfilename)
+ elif e.errno in (errno.ENOENT, errno.ESTALE):
+ pass
+ else:
+ raise
+ return mylines
+
+def writedict(mydict, myfilename, writekey=True):
+ """Writes out a dict to a file; writekey=0 mode doesn't write out
+ the key and assumes all values are strings, not lists."""
+ lines = []
+ if not writekey:
+ for v in mydict.values():
+ lines.append(v + "\n")
+ else:
+ for k, v in mydict.items():
+ lines.append("%s %s\n" % (k, " ".join(v)))
+ write_atomic(myfilename, "".join(lines))
+
+def shlex_split(s):
+ """
+ This is equivalent to shlex.split, but if the current interpreter is
+ python2, it temporarily encodes unicode strings to bytes since python2's
+ shlex.split() doesn't handle unicode strings.
+ """
+ convert_to_bytes = sys.hexversion < 0x3000000 and not isinstance(s, bytes)
+ if convert_to_bytes:
+ s = _unicode_encode(s)
+ rval = shlex.split(s)
+ if convert_to_bytes:
+ rval = [_unicode_decode(x) for x in rval]
+ return rval
+
+class _getconfig_shlex(shlex.shlex):
+
+ def __init__(self, portage_tolerant=False, **kwargs):
+ shlex.shlex.__init__(self, **kwargs)
+ self.__portage_tolerant = portage_tolerant
+
+ def allow_sourcing(self, var_expand_map):
+ self.source = portage._native_string("source")
+ self.var_expand_map = var_expand_map
+
+ def sourcehook(self, newfile):
+ try:
+ newfile = varexpand(newfile, self.var_expand_map)
+ return shlex.shlex.sourcehook(self, newfile)
+ except EnvironmentError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(newfile)
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR):
+ writemsg("open('%s', 'r'): %s\n" % (newfile, e), noiselevel=-1)
+ raise
+
+ msg = self.error_leader()
+ if e.errno == errno.ENOTDIR:
+ msg += _("%s: Not a directory") % newfile
+ else:
+ msg += _("%s: No such file or directory") % newfile
+
+ if self.__portage_tolerant:
+ writemsg("%s\n" % msg, noiselevel=-1)
+ else:
+ raise ParseError(msg)
+ return (newfile, io.StringIO())
+
+_invalid_var_name_re = re.compile(r'^\d|\W')
+
+def getconfig(mycfg, tolerant=False, allow_sourcing=False, expand=True,
+ recursive=False):
+
+ if isinstance(expand, dict):
+ # Some existing variable definitions have been
+ # passed in, for use in substitutions.
+ expand_map = expand
+ expand = True
+ else:
+ expand_map = {}
+ mykeys = {}
+
+ if recursive:
+ # Emulate source commands so that syntax error messages
+ # can display real file names and line numbers.
+ if not expand:
+ expand_map = False
+ fname = None
+ for fname in _recursive_file_list(mycfg):
+ mykeys.update(getconfig(fname, tolerant=tolerant,
+ allow_sourcing=allow_sourcing, expand=expand_map,
+ recursive=False) or {})
+ if fname is None:
+ return None
+ return mykeys
+
+ f = None
+ try:
+ # NOTE: shlex doesn't support unicode objects with Python 2
+ # (produces spurious \0 characters).
+ if sys.hexversion < 0x3000000:
+ f = open(_unicode_encode(mycfg,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ else:
+ f = open(_unicode_encode(mycfg,
+ encoding=_encodings['fs'], errors='strict'), mode='r',
+ encoding=_encodings['content'], errors='replace')
+ content = f.read()
+ except IOError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(mycfg)
+ if e.errno != errno.ENOENT:
+ writemsg("open('%s', 'r'): %s\n" % (mycfg, e), noiselevel=-1)
+ if e.errno not in (errno.EISDIR,):
+ raise
+ return None
+ finally:
+ if f is not None:
+ f.close()
+
+ # Since this file has unicode_literals enabled, and Python 2's
+ # shlex implementation does not support unicode, the following code
+ # uses _native_string() to encode unicode literals when necessary.
+
+ # Workaround for avoiding a silent error in shlex that is
+ # triggered by a source statement at the end of the file
+ # without a trailing newline after the source statement.
+ if content and content[-1] != portage._native_string('\n'):
+ content += portage._native_string('\n')
+
+ # Warn about dos-style line endings since that prevents
+ # people from being able to source them with bash.
+ if portage._native_string('\r') in content:
+ writemsg(("!!! " + _("Please use dos2unix to convert line endings " + \
+ "in config file: '%s'") + "\n") % mycfg, noiselevel=-1)
+
+ lex = None
+ try:
+ # The default shlex.sourcehook() implementation
+ # only joins relative paths when the infile
+ # attribute is properly set.
+ lex = _getconfig_shlex(instream=content, infile=mycfg, posix=True,
+ portage_tolerant=tolerant)
+ lex.wordchars = portage._native_string(string.digits +
+ string.ascii_letters + "~!@#$%*_\:;?,./-+{}")
+ lex.quotes = portage._native_string("\"'")
+ if allow_sourcing:
+ lex.allow_sourcing(expand_map)
+
+ while True:
+ key = _unicode_decode(lex.get_token())
+ if key == "export":
+ key = _unicode_decode(lex.get_token())
+ if key is None:
+ #normal end of file
+ break
+
+ equ = _unicode_decode(lex.get_token())
+ if not equ:
+ msg = lex.error_leader() + _("Unexpected EOF")
+ if not tolerant:
+ raise ParseError(msg)
+ else:
+ writemsg("%s\n" % msg, noiselevel=-1)
+ return mykeys
+
+ elif equ != "=":
+ msg = lex.error_leader() + \
+ _("Invalid token '%s' (not '=')") % (equ,)
+ if not tolerant:
+ raise ParseError(msg)
+ else:
+ writemsg("%s\n" % msg, noiselevel=-1)
+ return mykeys
+
+ val = _unicode_decode(lex.get_token())
+ if val is None:
+ msg = lex.error_leader() + \
+ _("Unexpected end of config file: variable '%s'") % (key,)
+ if not tolerant:
+ raise ParseError(msg)
+ else:
+ writemsg("%s\n" % msg, noiselevel=-1)
+ return mykeys
+
+ if _invalid_var_name_re.search(key) is not None:
+ msg = lex.error_leader() + \
+ _("Invalid variable name '%s'") % (key,)
+ if not tolerant:
+ raise ParseError(msg)
+ writemsg("%s\n" % msg, noiselevel=-1)
+ continue
+
+ if expand:
+ mykeys[key] = varexpand(val, mydict=expand_map,
+ error_leader=lex.error_leader)
+ expand_map[key] = mykeys[key]
+ else:
+ mykeys[key] = val
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ if isinstance(e, ParseError) or lex is None:
+ raise
+ msg = "%s%s" % (lex.error_leader(), e)
+ writemsg("%s\n" % msg, noiselevel=-1)
+ raise
+
+ return mykeys
+
+_varexpand_word_chars = frozenset(string.ascii_letters + string.digits + "_")
+_varexpand_unexpected_eof_msg = "unexpected EOF while looking for matching `}'"
+
+def varexpand(mystring, mydict=None, error_leader=None):
+ if mydict is None:
+ mydict = {}
+
+ """
+ new variable expansion code. Preserves quotes, handles \n, etc.
+ This code is used by the configfile code, as well as others (parser)
+ This would be a good bunch of code to port to C.
+ """
+ numvars = 0
+ # in single, double quotes
+ insing = 0
+ indoub = 0
+ pos = 0
+ length = len(mystring)
+ newstring = []
+ while pos < length:
+ current = mystring[pos]
+ if current == "'":
+ if (indoub):
+ newstring.append("'")
+ else:
+ newstring.append("'") # Quote removal is handled by shlex.
+ insing=not insing
+ pos += 1
+ continue
+ elif current == '"':
+ if (insing):
+ newstring.append('"')
+ else:
+ newstring.append('"') # Quote removal is handled by shlex.
+ indoub=not indoub
+ pos += 1
+ continue
+ if not insing:
+ #expansion time
+ if current == "\n":
+ #convert newlines to spaces
+ newstring.append(" ")
+ pos += 1
+ elif current == "\\":
+ # For backslash expansion, this function used to behave like
+ # echo -e, but that's not needed for our purposes. We want to
+ # behave like bash does when expanding a variable assignment
+ # in a sourced file, in which case it performs backslash
+ # removal for \\ and \$ but nothing more. It also removes
+ # escaped newline characters. Note that we don't handle
+ # escaped quotes here, since getconfig() uses shlex
+ # to handle that earlier.
+ if pos + 1 >= len(mystring):
+ newstring.append(current)
+ break
+ else:
+ current = mystring[pos + 1]
+ pos += 2
+ if current == "$":
+ newstring.append(current)
+ elif current == "\\":
+ newstring.append(current)
+ # BUG: This spot appears buggy, but it's intended to
+ # be bug-for-bug compatible with existing behavior.
+ if pos < length and \
+ mystring[pos] in ("'", '"', "$"):
+ newstring.append(mystring[pos])
+ pos += 1
+ elif current == "\n":
+ pass
+ else:
+ newstring.append(mystring[pos - 2:pos])
+ continue
+ elif current == "$":
+ pos += 1
+ if mystring[pos] == "{":
+ pos += 1
+ braced = True
+ else:
+ braced = False
+ myvstart = pos
+ while mystring[pos] in _varexpand_word_chars:
+ if pos + 1 >= len(mystring):
+ if braced:
+ msg = _varexpand_unexpected_eof_msg
+ if error_leader is not None:
+ msg = error_leader() + msg
+ writemsg(msg + "\n", noiselevel=-1)
+ return ""
+ else:
+ pos += 1
+ break
+ pos += 1
+ myvarname = mystring[myvstart:pos]
+ if braced:
+ if mystring[pos] != "}":
+ msg = _varexpand_unexpected_eof_msg
+ if error_leader is not None:
+ msg = error_leader() + msg
+ writemsg(msg + "\n", noiselevel=-1)
+ return ""
+ else:
+ pos += 1
+ if len(myvarname) == 0:
+ msg = "$"
+ if braced:
+ msg += "{}"
+ msg += ": bad substitution"
+ if error_leader is not None:
+ msg = error_leader() + msg
+ writemsg(msg + "\n", noiselevel=-1)
+ return ""
+ numvars += 1
+ if myvarname in mydict:
+ newstring.append(mydict[myvarname])
+ else:
+ newstring.append(current)
+ pos += 1
+ else:
+ newstring.append(current)
+ pos += 1
+
+ return "".join(newstring)
+
+# broken and removed, but can still be imported
+pickle_write = None
+
+def pickle_read(filename, default=None, debug=0):
+ if not os.access(filename, os.R_OK):
+ writemsg(_("pickle_read(): File not readable. '") + filename + "'\n", 1)
+ return default
+ data = None
+ try:
+ myf = open(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ mypickle = pickle.Unpickler(myf)
+ data = mypickle.load()
+ myf.close()
+ del mypickle, myf
+ writemsg(_("pickle_read(): Loaded pickle. '") + filename + "'\n", 1)
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ writemsg(_("!!! Failed to load pickle: ") + str(e) + "\n", 1)
+ data = default
+ return data
+
+def dump_traceback(msg, noiselevel=1):
+ info = sys.exc_info()
+ if not info[2]:
+ stack = traceback.extract_stack()[:-1]
+ error = None
+ else:
+ stack = traceback.extract_tb(info[2])
+ error = str(info[1])
+ writemsg("\n====================================\n", noiselevel=noiselevel)
+ writemsg("%s\n\n" % msg, noiselevel=noiselevel)
+ for line in traceback.format_list(stack):
+ writemsg(line, noiselevel=noiselevel)
+ if error:
+ writemsg(error+"\n", noiselevel=noiselevel)
+ writemsg("====================================\n\n", noiselevel=noiselevel)
+
+class cmp_sort_key(object):
+ """
+ In python-3.0 the list.sort() method no longer has a "cmp" keyword
+ argument. This class acts as an adapter which converts a cmp function
+ into one that's suitable for use as the "key" keyword argument to
+ list.sort(), making it easier to port code for python-3.0 compatibility.
+ It works by generating key objects which use the given cmp function to
+ implement their __lt__ method.
+
+ Beginning with Python 2.7 and 3.2, equivalent functionality is provided
+ by functools.cmp_to_key().
+ """
+ __slots__ = ("_cmp_func",)
+
+ def __init__(self, cmp_func):
+ """
+ @type cmp_func: callable which takes 2 positional arguments
+ @param cmp_func: A cmp function.
+ """
+ self._cmp_func = cmp_func
+
+ def __call__(self, lhs):
+ return self._cmp_key(self._cmp_func, lhs)
+
+ class _cmp_key(object):
+ __slots__ = ("_cmp_func", "_obj")
+
+ def __init__(self, cmp_func, obj):
+ self._cmp_func = cmp_func
+ self._obj = obj
+
+ def __lt__(self, other):
+ if other.__class__ is not self.__class__:
+ raise TypeError("Expected type %s, got %s" % \
+ (self.__class__, other.__class__))
+ return self._cmp_func(self._obj, other._obj) < 0
+
+def unique_array(s):
+ """lifted from python cookbook, credit: Tim Peters
+ Return a list of the elements in s in arbitrary order, sans duplicates"""
+ n = len(s)
+ # assume all elements are hashable, if so, it's linear
+ try:
+ return list(set(s))
+ except TypeError:
+ pass
+
+ # so much for linear. abuse sort.
+ try:
+ t = list(s)
+ t.sort()
+ except TypeError:
+ pass
+ else:
+ assert n > 0
+ last = t[0]
+ lasti = i = 1
+ while i < n:
+ if t[i] != last:
+ t[lasti] = last = t[i]
+ lasti += 1
+ i += 1
+ return t[:lasti]
+
+ # blah. back to original portage.unique_array
+ u = []
+ for x in s:
+ if x not in u:
+ u.append(x)
+ return u
+
+def unique_everseen(iterable, key=None):
+ """
+ List unique elements, preserving order. Remember all elements ever seen.
+ Taken from itertools documentation.
+ """
+ # unique_everseen('AAAABBBCCDAABBB') --> A B C D
+ # unique_everseen('ABBCcAD', str.lower) --> A B C D
+ seen = set()
+ seen_add = seen.add
+ if key is None:
+ for element in filterfalse(seen.__contains__, iterable):
+ seen_add(element)
+ yield element
+ else:
+ for element in iterable:
+ k = key(element)
+ if k not in seen:
+ seen_add(k)
+ yield element
+
+def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
+ stat_cached=None, follow_links=True):
+ """Apply user, group, and mode bits to a file if the existing bits do not
+ already match. The default behavior is to force an exact match of mode
+ bits. When mask=0 is specified, mode bits on the target file are allowed
+ to be a superset of the mode argument (via logical OR). When mask>0, the
+ mode bits that the target file is allowed to have are restricted via
+ logical XOR.
+ Returns True if the permissions were modified and False otherwise."""
+
+ modified = False
+
+ # Since Python 3.4, chown requires int type (no proxies).
+ uid = int(uid)
+ gid = int(gid)
+
+ if stat_cached is None:
+ try:
+ if follow_links:
+ stat_cached = os.stat(filename)
+ else:
+ stat_cached = os.lstat(filename)
+ except OSError as oe:
+ func_call = "stat('%s')" % filename
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ else:
+ raise
+
+ if (uid != -1 and uid != stat_cached.st_uid) or \
+ (gid != -1 and gid != stat_cached.st_gid):
+ try:
+ if follow_links:
+ os.chown(filename, uid, gid)
+ else:
+ portage.data.lchown(filename, uid, gid)
+ modified = True
+ except OSError as oe:
+ func_call = "chown('%s', %i, %i)" % (filename, uid, gid)
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ else:
+ raise
+
+ new_mode = -1
+ st_mode = stat_cached.st_mode & 0o7777 # protect from unwanted bits
+ if mask >= 0:
+ if mode == -1:
+ mode = 0 # Don't add any mode bits when mode is unspecified.
+ else:
+ mode = mode & 0o7777
+ if (mode & st_mode != mode) or \
+ ((mask ^ st_mode) & st_mode != st_mode):
+ new_mode = mode | st_mode
+ new_mode = (mask ^ new_mode) & new_mode
+ elif mode != -1:
+ mode = mode & 0o7777 # protect from unwanted bits
+ if mode != st_mode:
+ new_mode = mode
+
+ # The chown system call may clear S_ISUID and S_ISGID
+ # bits, so those bits are restored if necessary.
+ if modified and new_mode == -1 and \
+ (st_mode & stat.S_ISUID or st_mode & stat.S_ISGID):
+ if mode == -1:
+ new_mode = st_mode
+ else:
+ mode = mode & 0o7777
+ if mask >= 0:
+ new_mode = mode | st_mode
+ new_mode = (mask ^ new_mode) & new_mode
+ else:
+ new_mode = mode
+ if not (new_mode & stat.S_ISUID or new_mode & stat.S_ISGID):
+ new_mode = -1
+
+ if not follow_links and stat.S_ISLNK(stat_cached.st_mode):
+ # Mode doesn't matter for symlinks.
+ new_mode = -1
+
+ if new_mode != -1:
+ try:
+ os.chmod(filename, new_mode)
+ modified = True
+ except OSError as oe:
+ func_call = "chmod('%s', %s)" % (filename, oct(new_mode))
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ raise
+ return modified
+
+def apply_stat_permissions(filename, newstat, **kwargs):
+ """A wrapper around apply_secpass_permissions that gets
+ uid, gid, and mode from a stat object"""
+ return apply_secpass_permissions(filename, uid=newstat.st_uid, gid=newstat.st_gid,
+ mode=newstat.st_mode, **kwargs)
+
+def apply_recursive_permissions(top, uid=-1, gid=-1,
+ dirmode=-1, dirmask=-1, filemode=-1, filemask=-1, onerror=None):
+ """A wrapper around apply_secpass_permissions that applies permissions
+ recursively. If optional argument onerror is specified, it should be a
+ function; it will be called with one argument, a PortageException instance.
+ Returns True if all permissions are applied and False if some are left
+ unapplied."""
+
+ # Avoid issues with circular symbolic links, as in bug #339670.
+ follow_links = False
+
+ if onerror is None:
+ # Default behavior is to dump errors to stderr so they won't
+ # go unnoticed. Callers can pass in a quiet instance.
+ def onerror(e):
+ if isinstance(e, OperationNotPermitted):
+ writemsg(_("Operation Not Permitted: %s\n") % str(e),
+ noiselevel=-1)
+ elif isinstance(e, FileNotFound):
+ writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
+ else:
+ raise
+
+ all_applied = True
+ for dirpath, dirnames, filenames in os.walk(top):
+ try:
+ applied = apply_secpass_permissions(dirpath,
+ uid=uid, gid=gid, mode=dirmode, mask=dirmask,
+ follow_links=follow_links)
+ if not applied:
+ all_applied = False
+ except PortageException as e:
+ all_applied = False
+ onerror(e)
+
+ for name in filenames:
+ try:
+ applied = apply_secpass_permissions(os.path.join(dirpath, name),
+ uid=uid, gid=gid, mode=filemode, mask=filemask,
+ follow_links=follow_links)
+ if not applied:
+ all_applied = False
+ except PortageException as e:
+ # Ignore InvalidLocation exceptions such as FileNotFound
+ # and DirectoryNotFound since sometimes things disappear,
+ # like when adjusting permissions on DISTCC_DIR.
+ if not isinstance(e, portage.exception.InvalidLocation):
+ all_applied = False
+ onerror(e)
+ return all_applied
+
+def apply_secpass_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
+ stat_cached=None, follow_links=True):
+ """A wrapper around apply_permissions that uses secpass and simple
+ logic to apply as much of the permissions as possible without
+ generating an obviously avoidable permission exception. Despite
+ attempts to avoid an exception, it's possible that one will be raised
+ anyway, so be prepared.
+ Returns True if all permissions are applied and False if some are left
+ unapplied."""
+
+ if stat_cached is None:
+ try:
+ if follow_links:
+ stat_cached = os.stat(filename)
+ else:
+ stat_cached = os.lstat(filename)
+ except OSError as oe:
+ func_call = "stat('%s')" % filename
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ else:
+ raise
+
+ all_applied = True
+
+ if portage.data.secpass < 2:
+
+ if uid != -1 and \
+ uid != stat_cached.st_uid:
+ all_applied = False
+ uid = -1
+
+ if gid != -1 and \
+ gid != stat_cached.st_gid and \
+ gid not in os.getgroups():
+ all_applied = False
+ gid = -1
+
+ apply_permissions(filename, uid=uid, gid=gid, mode=mode, mask=mask,
+ stat_cached=stat_cached, follow_links=follow_links)
+ return all_applied
+
+class atomic_ofstream(ObjectProxy):
+ """Write a file atomically via os.rename(). Atomic replacement prevents
+ interprocess interference and prevents corruption of the target
+ file when the write is interrupted (for example, when an 'out of space'
+ error occurs)."""
+
+ def __init__(self, filename, mode='w', follow_links=True, **kargs):
+ """Opens a temporary filename.pid in the same directory as filename."""
+ ObjectProxy.__init__(self)
+ object.__setattr__(self, '_aborted', False)
+ if 'b' in mode:
+ open_func = open
+ else:
+ open_func = io.open
+ kargs.setdefault('encoding', _encodings['content'])
+ kargs.setdefault('errors', 'backslashreplace')
+
+ if follow_links:
+ canonical_path = os.path.realpath(filename)
+ object.__setattr__(self, '_real_name', canonical_path)
+ tmp_name = "%s.%i" % (canonical_path, os.getpid())
+ try:
+ object.__setattr__(self, '_file',
+ open_func(_unicode_encode(tmp_name,
+ encoding=_encodings['fs'], errors='strict'),
+ mode=mode, **portage._native_kwargs(kargs)))
+ return
+ except IOError as e:
+ if canonical_path == filename:
+ raise
+ # Ignore this error, since it's irrelevant
+ # and the below open call will produce a
+ # new error if necessary.
+
+ object.__setattr__(self, '_real_name', filename)
+ tmp_name = "%s.%i" % (filename, os.getpid())
+ object.__setattr__(self, '_file',
+ open_func(_unicode_encode(tmp_name,
+ encoding=_encodings['fs'], errors='strict'),
+ mode=mode, **kargs))
+
+ def _get_target(self):
+ return object.__getattribute__(self, '_file')
+
+ if sys.hexversion >= 0x3000000:
+
+ def __getattribute__(self, attr):
+ if attr in ('close', 'abort', '__del__'):
+ return object.__getattribute__(self, attr)
+ return getattr(object.__getattribute__(self, '_file'), attr)
+
+ else:
+
+ # For TextIOWrapper, automatically coerce write calls to
+ # unicode, in order to avoid TypeError when writing raw
+ # bytes with python2.
+
+ def __getattribute__(self, attr):
+ if attr in ('close', 'abort', 'write', '__del__'):
+ return object.__getattribute__(self, attr)
+ return getattr(object.__getattribute__(self, '_file'), attr)
+
+ def write(self, s):
+ f = object.__getattribute__(self, '_file')
+ if isinstance(f, io.TextIOWrapper):
+ s = _unicode_decode(s)
+ return f.write(s)
+
+ def close(self):
+ """Closes the temporary file, copies permissions (if possible),
+ and performs the atomic replacement via os.rename(). If the abort()
+ method has been called, then the temp file is closed and removed."""
+ f = object.__getattribute__(self, '_file')
+ real_name = object.__getattribute__(self, '_real_name')
+ if not f.closed:
+ try:
+ f.close()
+ if not object.__getattribute__(self, '_aborted'):
+ try:
+ apply_stat_permissions(f.name, os.stat(real_name))
+ except OperationNotPermitted:
+ pass
+ except FileNotFound:
+ pass
+ except OSError as oe: # from the above os.stat call
+ if oe.errno in (errno.ENOENT, errno.EPERM):
+ pass
+ else:
+ raise
+ os.rename(f.name, real_name)
+ finally:
+ # Make sure we cleanup the temp file
+ # even if an exception is raised.
+ try:
+ os.unlink(f.name)
+ except OSError as oe:
+ pass
+
+ def abort(self):
+ """If an error occurs while writing the file, the user should
+ call this method in order to leave the target file unchanged.
+ This will call close() automatically."""
+ if not object.__getattribute__(self, '_aborted'):
+ object.__setattr__(self, '_aborted', True)
+ self.close()
+
+ def __del__(self):
+ """If the user does not explicitly call close(), it is
+ assumed that an error has occurred, so we abort()."""
+ try:
+ f = object.__getattribute__(self, '_file')
+ except AttributeError:
+ pass
+ else:
+ if not f.closed:
+ self.abort()
+ # ensure destructor from the base class is called
+ base_destructor = getattr(ObjectProxy, '__del__', None)
+ if base_destructor is not None:
+ base_destructor(self)
+
+def write_atomic(file_path, content, **kwargs):
+ f = None
+ try:
+ f = atomic_ofstream(file_path, **kwargs)
+ f.write(content)
+ f.close()
+ except (IOError, OSError) as e:
+ if f:
+ f.abort()
+ func_call = "write_atomic('%s')" % file_path
+ if e.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif e.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif e.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ elif e.errno == errno.ENOENT:
+ raise FileNotFound(file_path)
+ else:
+ raise
+
+def ensure_dirs(dir_path, **kwargs):
+ """Create a directory and call apply_permissions.
+ Returns True if a directory is created or the permissions needed to be
+ modified, and False otherwise.
+
+ This function's handling of EEXIST errors makes it useful for atomic
+ directory creation, in which multiple processes may be competing to
+ create the same directory.
+ """
+
+ created_dir = False
+
+ try:
+ os.makedirs(dir_path)
+ created_dir = True
+ except OSError as oe:
+ func_call = "makedirs('%s')" % dir_path
+ if oe.errno in (errno.EEXIST,):
+ pass
+ else:
+ if os.path.isdir(dir_path):
+ # NOTE: DragonFly raises EPERM for makedir('/')
+ # and that is supposed to be ignored here.
+ # Also, sometimes mkdir raises EISDIR on FreeBSD
+ # and we want to ignore that too (bug #187518).
+ pass
+ elif oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ else:
+ raise
+ if kwargs:
+ perms_modified = apply_permissions(dir_path, **kwargs)
+ else:
+ perms_modified = False
+ return created_dir or perms_modified
+
+class LazyItemsDict(UserDict):
+ """A mapping object that behaves like a standard dict except that it allows
+ for lazy initialization of values via callable objects. Lazy items can be
+ overwritten and deleted just as normal items."""
+
+ __slots__ = ('lazy_items',)
+
+ def __init__(self, *args, **kwargs):
+
+ self.lazy_items = {}
+ UserDict.__init__(self, *args, **kwargs)
+
+ def addLazyItem(self, item_key, value_callable, *pargs, **kwargs):
+ """Add a lazy item for the given key. When the item is requested,
+ value_callable will be called with *pargs and **kwargs arguments."""
+ self.lazy_items[item_key] = \
+ self._LazyItem(value_callable, pargs, kwargs, False)
+ # make it show up in self.keys(), etc...
+ UserDict.__setitem__(self, item_key, None)
+
+ def addLazySingleton(self, item_key, value_callable, *pargs, **kwargs):
+ """This is like addLazyItem except value_callable will only be called
+ a maximum of 1 time and the result will be cached for future requests."""
+ self.lazy_items[item_key] = \
+ self._LazyItem(value_callable, pargs, kwargs, True)
+ # make it show up in self.keys(), etc...
+ UserDict.__setitem__(self, item_key, None)
+
+ def update(self, *args, **kwargs):
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+ if args:
+ map_obj = args[0]
+ else:
+ map_obj = None
+ if map_obj is None:
+ pass
+ elif isinstance(map_obj, LazyItemsDict):
+ for k in map_obj:
+ if k in map_obj.lazy_items:
+ UserDict.__setitem__(self, k, None)
+ else:
+ UserDict.__setitem__(self, k, map_obj[k])
+ self.lazy_items.update(map_obj.lazy_items)
+ else:
+ UserDict.update(self, map_obj)
+ if kwargs:
+ UserDict.update(self, kwargs)
+
+ def __getitem__(self, item_key):
+ if item_key in self.lazy_items:
+ lazy_item = self.lazy_items[item_key]
+ pargs = lazy_item.pargs
+ if pargs is None:
+ pargs = ()
+ kwargs = lazy_item.kwargs
+ if kwargs is None:
+ kwargs = {}
+ result = lazy_item.func(*pargs, **kwargs)
+ if lazy_item.singleton:
+ self[item_key] = result
+ return result
+
+ else:
+ return UserDict.__getitem__(self, item_key)
+
+ def __setitem__(self, item_key, value):
+ if item_key in self.lazy_items:
+ del self.lazy_items[item_key]
+ UserDict.__setitem__(self, item_key, value)
+
+ def __delitem__(self, item_key):
+ if item_key in self.lazy_items:
+ del self.lazy_items[item_key]
+ UserDict.__delitem__(self, item_key)
+
+ def clear(self):
+ self.lazy_items.clear()
+ UserDict.clear(self)
+
+ def copy(self):
+ return self.__copy__()
+
+ def __copy__(self):
+ return self.__class__(self)
+
+ def __deepcopy__(self, memo=None):
+ """
+ This forces evaluation of each contained lazy item, and deepcopy of
+ the result. A TypeError is raised if any contained lazy item is not
+ a singleton, since it is not necessarily possible for the behavior
+ of this type of item to be safely preserved.
+ """
+ if memo is None:
+ memo = {}
+ result = self.__class__()
+ memo[id(self)] = result
+ for k in self:
+ k_copy = deepcopy(k, memo)
+ lazy_item = self.lazy_items.get(k)
+ if lazy_item is not None:
+ if not lazy_item.singleton:
+ raise TypeError("LazyItemsDict " + \
+ "deepcopy is unsafe with lazy items that are " + \
+ "not singletons: key=%s value=%s" % (k, lazy_item,))
+ UserDict.__setitem__(result, k_copy, deepcopy(self[k], memo))
+ return result
+
+ class _LazyItem(object):
+
+ __slots__ = ('func', 'pargs', 'kwargs', 'singleton')
+
+ def __init__(self, func, pargs, kwargs, singleton):
+
+ if not pargs:
+ pargs = None
+ if not kwargs:
+ kwargs = None
+
+ self.func = func
+ self.pargs = pargs
+ self.kwargs = kwargs
+ self.singleton = singleton
+
+ def __copy__(self):
+ return self.__class__(self.func, self.pargs,
+ self.kwargs, self.singleton)
+
+ def __deepcopy__(self, memo=None):
+ """
+ Override this since the default implementation can fail silently,
+ leaving some attributes unset.
+ """
+ if memo is None:
+ memo = {}
+ result = self.__copy__()
+ memo[id(self)] = result
+ result.func = deepcopy(self.func, memo)
+ result.pargs = deepcopy(self.pargs, memo)
+ result.kwargs = deepcopy(self.kwargs, memo)
+ result.singleton = deepcopy(self.singleton, memo)
+ return result
+
+class ConfigProtect(object):
+ def __init__(self, myroot, protect_list, mask_list,
+ case_insensitive = False):
+ self.myroot = myroot
+ self.protect_list = protect_list
+ self.mask_list = mask_list
+ self.case_insensitive = case_insensitive
+ self.updateprotect()
+
+ def updateprotect(self):
+ """Update internal state for isprotected() calls. Nonexistent paths
+ are ignored."""
+
+ os = _os_merge
+
+ self.protect = []
+ self._dirs = set()
+ for x in self.protect_list:
+ ppath = normalize_path(
+ os.path.join(self.myroot, x.lstrip(os.path.sep)))
+ if self.case_insensitive:
+ ppath = ppath.lower()
+ try:
+ if stat.S_ISDIR(os.stat(ppath).st_mode):
+ self._dirs.add(ppath)
+ self.protect.append(ppath)
+ except OSError:
+ # If it doesn't exist, there's no need to protect it.
+ pass
+
+ self.protectmask = []
+ for x in self.mask_list:
+ ppath = normalize_path(
+ os.path.join(self.myroot, x.lstrip(os.path.sep)))
+ if self.case_insensitive:
+ ppath = ppath.lower()
+ try:
+ """Use lstat so that anything, even a broken symlink can be
+ protected."""
+ if stat.S_ISDIR(os.lstat(ppath).st_mode):
+ self._dirs.add(ppath)
+ self.protectmask.append(ppath)
+ """Now use stat in case this is a symlink to a directory."""
+ if stat.S_ISDIR(os.stat(ppath).st_mode):
+ self._dirs.add(ppath)
+ except OSError:
+ # If it doesn't exist, there's no need to mask it.
+ pass
+
+ def isprotected(self, obj):
+ """Returns True if obj is protected, False otherwise. The caller must
+ ensure that obj is normalized with a single leading slash. A trailing
+ slash is optional for directories."""
+ masked = 0
+ protected = 0
+ sep = os.path.sep
+ if self.case_insensitive:
+ obj = obj.lower()
+ for ppath in self.protect:
+ if len(ppath) > masked and obj.startswith(ppath):
+ if ppath in self._dirs:
+ if obj != ppath and not obj.startswith(ppath + sep):
+ # /etc/foo does not match /etc/foobaz
+ continue
+ elif obj != ppath:
+ # force exact match when CONFIG_PROTECT lists a
+ # non-directory
+ continue
+ protected = len(ppath)
+ #config file management
+ for pmpath in self.protectmask:
+ if len(pmpath) >= protected and obj.startswith(pmpath):
+ if pmpath in self._dirs:
+ if obj != pmpath and \
+ not obj.startswith(pmpath + sep):
+ # /etc/foo does not match /etc/foobaz
+ continue
+ elif obj != pmpath:
+ # force exact match when CONFIG_PROTECT_MASK lists
+ # a non-directory
+ continue
+ #skip, it's in the mask
+ masked = len(pmpath)
+ return protected > masked
+
+def new_protect_filename(mydest, newmd5=None, force=False):
+ """Resolves a config-protect filename for merging, optionally
+ using the last filename if the md5 matches. If force is True,
+ then a new filename will be generated even if mydest does not
+ exist yet.
+ (dest,md5) ==> 'string' --- path_to_target_filename
+ (dest) ==> ('next', 'highest') --- next_target and most-recent_target
+ """
+
+ # config protection filename format:
+ # ._cfg0000_foo
+ # 0123456789012
+
+ os = _os_merge
+
+ prot_num = -1
+ last_pfile = ""
+
+ if not force and \
+ not os.path.exists(mydest):
+ return mydest
+
+ real_filename = os.path.basename(mydest)
+ real_dirname = os.path.dirname(mydest)
+ for pfile in os.listdir(real_dirname):
+ if pfile[0:5] != "._cfg":
+ continue
+ if pfile[10:] != real_filename:
+ continue
+ try:
+ new_prot_num = int(pfile[5:9])
+ if new_prot_num > prot_num:
+ prot_num = new_prot_num
+ last_pfile = pfile
+ except ValueError:
+ continue
+ prot_num = prot_num + 1
+
+ new_pfile = normalize_path(os.path.join(real_dirname,
+ "._cfg" + str(prot_num).zfill(4) + "_" + real_filename))
+ old_pfile = normalize_path(os.path.join(real_dirname, last_pfile))
+ if last_pfile and newmd5:
+ try:
+ last_pfile_md5 = portage.checksum._perform_md5_merge(old_pfile)
+ except FileNotFound:
+ # The file suddenly disappeared or it's a broken symlink.
+ pass
+ else:
+ if last_pfile_md5 == newmd5:
+ return old_pfile
+ return new_pfile
+
+def find_updated_config_files(target_root, config_protect):
+ """
+ Return a tuple of configuration files that needs to be updated.
+ The tuple contains lists organized like this:
+ [protected_dir, file_list]
+ If the protected config isn't a protected_dir but a procted_file, list is:
+ [protected_file, None]
+ If no configuration files needs to be updated, None is returned
+ """
+
+ encoding = _encodings['fs']
+
+ if config_protect:
+ # directories with some protect files in them
+ for x in config_protect:
+ files = []
+
+ x = os.path.join(target_root, x.lstrip(os.path.sep))
+ if not os.access(x, os.W_OK):
+ continue
+ try:
+ mymode = os.lstat(x).st_mode
+ except OSError:
+ continue
+
+ if stat.S_ISLNK(mymode):
+ # We want to treat it like a directory if it
+ # is a symlink to an existing directory.
+ try:
+ real_mode = os.stat(x).st_mode
+ if stat.S_ISDIR(real_mode):
+ mymode = real_mode
+ except OSError:
+ pass
+
+ if stat.S_ISDIR(mymode):
+ mycommand = \
+ "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
+ else:
+ mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
+ os.path.split(x.rstrip(os.path.sep))
+ mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
+ cmd = shlex_split(mycommand)
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(cmd[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(cmd[0])
+ cmd[0] = fullname
+
+ cmd = [_unicode_encode(arg, encoding=encoding, errors='strict')
+ for arg in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = _unicode_decode(proc.communicate()[0], encoding=encoding)
+ status = proc.wait()
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ files = output.split('\0')
+ # split always produces an empty string as the last element
+ if files and not files[-1]:
+ del files[-1]
+ if files:
+ if stat.S_ISDIR(mymode):
+ yield (x, files)
+ else:
+ yield (x, None)
+
+_ld_so_include_re = re.compile(r'^include\s+(\S.*)')
+
+def getlibpaths(root, env=None):
+ def read_ld_so_conf(path):
+ for l in grabfile(path):
+ include_match = _ld_so_include_re.match(l)
+ if include_match is not None:
+ subpath = os.path.join(os.path.dirname(path),
+ include_match.group(1))
+ for p in glob.glob(subpath):
+ for r in read_ld_so_conf(p):
+ yield r
+ else:
+ yield l
+
+ """ Return a list of paths that are used for library lookups """
+ if env is None:
+ env = os.environ
+
+ # PREFIX HACK: LD_LIBRARY_PATH isn't portable, and considered
+ # harmfull, so better not use it. We don't need any host OS lib
+ # paths either, so do Prefix case.
+ if EPREFIX != '':
+ rval = []
+ rval.append(EPREFIX + "/usr/lib")
+ rval.append(EPREFIX + "/lib")
+ # we don't know the CHOST here, so it's a bit hard to guess
+ # where GCC's and ld's libs are. Though, GCC's libs should be
+ # in lib and usr/lib, binutils' libs rarely used
+ else:
+ # the following is based on the information from ld.so(8)
+ rval = env.get("LD_LIBRARY_PATH", "").split(":")
+ rval.extend(read_ld_so_conf(os.path.join(root, "etc", "ld.so.conf")))
+ rval.append("/usr/lib")
+ rval.append("/lib")
+
+ return [normalize_path(x) for x in rval if x]
diff --git a/usr/lib/portage/pym/portage/util/_argparse.py b/usr/lib/portage/pym/portage/util/_argparse.py
new file mode 100644
index 0000000..6ca7852
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_argparse.py
@@ -0,0 +1,42 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['ArgumentParser']
+
+try:
+ from argparse import ArgumentParser
+except ImportError:
+ # Compatibility with Python 2.6 and 3.1
+ from optparse import OptionGroup, OptionParser
+
+ from portage.localization import _
+
+ class ArgumentParser(object):
+ def __init__(self, **kwargs):
+ add_help = kwargs.pop("add_help", None)
+ if add_help is not None:
+ kwargs["add_help_option"] = add_help
+ parser = OptionParser(**kwargs)
+ self._parser = parser
+ self.add_argument = parser.add_option
+ self.print_help = parser.print_help
+ self.error = parser.error
+
+ def add_argument_group(self, title=None, **kwargs):
+ optiongroup = OptionGroup(self._parser, title, **kwargs)
+ self._parser.add_option_group(optiongroup)
+ return _ArgumentGroup(optiongroup)
+
+ def parse_known_args(self, args=None, namespace=None):
+ return self._parser.parse_args(args, namespace)
+
+ def parse_args(self, args=None, namespace=None):
+ args, argv = self.parse_known_args(args, namespace)
+ if argv:
+ msg = _('unrecognized arguments: %s')
+ self.error(msg % ' '.join(argv))
+ return args
+
+ class _ArgumentGroup(object):
+ def __init__(self, optiongroup):
+ self.add_argument = optiongroup.add_option
diff --git a/usr/lib/portage/pym/portage/util/_async/AsyncScheduler.py b/usr/lib/portage/pym/portage/util/_async/AsyncScheduler.py
new file mode 100644
index 0000000..9b96c6f
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_async/AsyncScheduler.py
@@ -0,0 +1,102 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.PollScheduler import PollScheduler
+
+class AsyncScheduler(AsynchronousTask, PollScheduler):
+
+ def __init__(self, max_jobs=None, max_load=None, **kwargs):
+ AsynchronousTask.__init__(self)
+ PollScheduler.__init__(self, **kwargs)
+
+ if max_jobs is None:
+ max_jobs = 1
+ self._max_jobs = max_jobs
+ self._max_load = max_load
+ self._error_count = 0
+ self._running_tasks = set()
+ self._remaining_tasks = True
+ self._term_check_id = None
+ self._loadavg_check_id = None
+
+ def _poll(self):
+ if not (self._is_work_scheduled() or self._keep_scheduling()):
+ self.wait()
+ return self.returncode
+
+ def _cancel(self):
+ self._terminated.set()
+ self._termination_check()
+
+ def _terminate_tasks(self):
+ for task in list(self._running_tasks):
+ task.cancel()
+
+ def _next_task(self):
+ raise NotImplementedError(self)
+
+ def _keep_scheduling(self):
+ return self._remaining_tasks and not self._terminated.is_set()
+
+ def _running_job_count(self):
+ return len(self._running_tasks)
+
+ def _schedule_tasks(self):
+ while self._keep_scheduling() and self._can_add_job():
+ try:
+ task = self._next_task()
+ except StopIteration:
+ self._remaining_tasks = False
+ else:
+ self._running_tasks.add(task)
+ task.scheduler = self._sched_iface
+ task.addExitListener(self._task_exit)
+ task.start()
+
+ # Triggers cleanup and exit listeners if there's nothing left to do.
+ self.poll()
+
+ def _task_exit(self, task):
+ self._running_tasks.discard(task)
+ if task.returncode != os.EX_OK:
+ self._error_count += 1
+ self._schedule()
+
+ def _start(self):
+ self._term_check_id = self._event_loop.idle_add(self._termination_check)
+ if self._max_load is not None and \
+ self._loadavg_latency is not None and \
+ (self._max_jobs is True or self._max_jobs > 1):
+ # We have to schedule periodically, in case the load
+ # average has changed since the last call.
+ self._loadavg_check_id = self._event_loop.timeout_add(
+ self._loadavg_latency, self._schedule)
+ self._schedule()
+
+ def _wait(self):
+ # Loop while there are jobs to be scheduled.
+ while self._keep_scheduling():
+ self._event_loop.iteration()
+
+ # Clean shutdown of previously scheduled jobs. In the
+ # case of termination, this allows for basic cleanup
+ # such as flushing of buffered output to logs.
+ while self._is_work_scheduled():
+ self._event_loop.iteration()
+
+ if self._term_check_id is not None:
+ self._event_loop.source_remove(self._term_check_id)
+ self._term_check_id = None
+
+ if self._loadavg_check_id is not None:
+ self._event_loop.source_remove(self._loadavg_check_id)
+ self._loadavg_check_id = None
+
+ if self._error_count > 0:
+ self.returncode = 1
+ else:
+ self.returncode = os.EX_OK
+
+ return self.returncode
diff --git a/usr/lib/portage/pym/portage/util/_async/FileCopier.py b/usr/lib/portage/pym/portage/util/_async/FileCopier.py
new file mode 100644
index 0000000..27e5ab4
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_async/FileCopier.py
@@ -0,0 +1,17 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage import shutil
+from portage.util._async.ForkProcess import ForkProcess
+
+class FileCopier(ForkProcess):
+ """
+ Asynchronously copy a file.
+ """
+
+ __slots__ = ('src_path', 'dest_path')
+
+ def _run(self):
+ shutil.copy(self.src_path, self.dest_path)
+ return os.EX_OK
diff --git a/usr/lib/portage/pym/portage/util/_async/FileDigester.py b/usr/lib/portage/pym/portage/util/_async/FileDigester.py
new file mode 100644
index 0000000..881c692
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_async/FileDigester.py
@@ -0,0 +1,73 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.checksum import perform_multiple_checksums
+from portage.util._async.ForkProcess import ForkProcess
+from _emerge.PipeReader import PipeReader
+
+class FileDigester(ForkProcess):
+ """
+ Asynchronously generate file digests. Pass in file_path and
+ hash_names, and after successful execution, the digests
+ attribute will be a dict containing all of the requested
+ digests.
+ """
+
+ __slots__ = ('file_path', 'digests', 'hash_names',
+ '_digest_pipe_reader', '_digest_pw')
+
+ def _start(self):
+ pr, pw = os.pipe()
+ self.fd_pipes = {}
+ self.fd_pipes[pw] = pw
+ self._digest_pw = pw
+ self._digest_pipe_reader = PipeReader(
+ input_files={"input":pr},
+ scheduler=self.scheduler)
+ self._digest_pipe_reader.addExitListener(self._digest_pipe_reader_exit)
+ self._digest_pipe_reader.start()
+ ForkProcess._start(self)
+ os.close(pw)
+
+ def _run(self):
+ digests = perform_multiple_checksums(self.file_path,
+ hashes=self.hash_names)
+
+ buf = "".join("%s=%s\n" % item
+ for item in digests.items()).encode('utf_8')
+
+ while buf:
+ buf = buf[os.write(self._digest_pw, buf):]
+
+ return os.EX_OK
+
+ def _parse_digests(self, data):
+
+ digests = {}
+ for line in data.decode('utf_8').splitlines():
+ parts = line.split('=', 1)
+ if len(parts) == 2:
+ digests[parts[0]] = parts[1]
+
+ self.digests = digests
+
+ def _pipe_logger_exit(self, pipe_logger):
+ # Ignore this event, since we want to ensure that we
+ # exit only after _digest_pipe_reader has reached EOF.
+ self._pipe_logger = None
+
+ def _digest_pipe_reader_exit(self, pipe_reader):
+ self._parse_digests(pipe_reader.getvalue())
+ self._digest_pipe_reader = None
+ self._unregister()
+ self.wait()
+
+ def _unregister(self):
+ ForkProcess._unregister(self)
+
+ pipe_reader = self._digest_pipe_reader
+ if pipe_reader is not None:
+ self._digest_pipe_reader = None
+ pipe_reader.removeExitListener(self._digest_pipe_reader_exit)
+ pipe_reader.cancel()
diff --git a/usr/lib/portage/pym/portage/util/_async/ForkProcess.py b/usr/lib/portage/pym/portage/util/_async/ForkProcess.py
new file mode 100644
index 0000000..25f72d3
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_async/ForkProcess.py
@@ -0,0 +1,65 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import signal
+import sys
+import traceback
+
+import portage
+from portage import os
+from _emerge.SpawnProcess import SpawnProcess
+
+class ForkProcess(SpawnProcess):
+
+ __slots__ = ()
+
+ def _spawn(self, args, fd_pipes=None, **kwargs):
+ """
+ Fork a subprocess, apply local settings, and call fetch().
+ """
+
+ parent_pid = os.getpid()
+ pid = None
+ try:
+ pid = os.fork()
+
+ if pid != 0:
+ if not isinstance(pid, int):
+ raise AssertionError(
+ "fork returned non-integer: %s" % (repr(pid),))
+ return [pid]
+
+ rval = 1
+ try:
+
+ # Use default signal handlers in order to avoid problems
+ # killing subprocesses as reported in bug #353239.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ portage.locks._close_fds()
+ # We don't exec, so use close_fds=False
+ # (see _setup_pipes docstring).
+ portage.process._setup_pipes(fd_pipes, close_fds=False)
+
+ rval = self._run()
+ except SystemExit:
+ raise
+ except:
+ traceback.print_exc()
+ # os._exit() skips stderr flush!
+ sys.stderr.flush()
+ finally:
+ os._exit(rval)
+
+ finally:
+ if pid == 0 or (pid is None and os.getpid() != parent_pid):
+ # Call os._exit() from a finally block in order
+ # to suppress any finally blocks from earlier
+ # in the call stack (see bug #345289). This
+ # finally block has to be setup before the fork
+ # in order to avoid a race condition.
+ os._exit(1)
+
+ def _run(self):
+ raise NotImplementedError(self)
diff --git a/usr/lib/portage/pym/portage/util/_async/PipeLogger.py b/usr/lib/portage/pym/portage/util/_async/PipeLogger.py
new file mode 100644
index 0000000..aa605d9
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_async/PipeLogger.py
@@ -0,0 +1,163 @@
+# Copyright 2008-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import fcntl
+import errno
+import gzip
+import sys
+
+import portage
+from portage import os, _encodings, _unicode_encode
+from _emerge.AbstractPollTask import AbstractPollTask
+
+class PipeLogger(AbstractPollTask):
+
+ """
+ This can be used for logging output of a child process,
+ optionally outputing to log_file_path and/or stdout_fd. It can
+ also monitor for EOF on input_fd, which may be used to detect
+ termination of a child process. If log_file_path ends with
+ '.gz' then the log file is written with compression.
+ """
+
+ __slots__ = ("input_fd", "log_file_path", "stdout_fd") + \
+ ("_log_file", "_log_file_real", "_reg_id")
+
+ def _start(self):
+
+ log_file_path = self.log_file_path
+ if log_file_path is not None:
+
+ self._log_file = open(_unicode_encode(log_file_path,
+ encoding=_encodings['fs'], errors='strict'), mode='ab')
+ if log_file_path.endswith('.gz'):
+ self._log_file_real = self._log_file
+ self._log_file = gzip.GzipFile(filename='', mode='ab',
+ fileobj=self._log_file)
+
+ portage.util.apply_secpass_permissions(log_file_path,
+ uid=portage.portage_uid, gid=portage.portage_gid,
+ mode=0o660)
+
+ if isinstance(self.input_fd, int):
+ fd = self.input_fd
+ else:
+ fd = self.input_fd.fileno()
+
+ fcntl.fcntl(fd, fcntl.F_SETFL,
+ fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(fd, fcntl.F_SETFD,
+ fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_id = self.scheduler.io_add_watch(fd,
+ self._registered_events, self._output_handler)
+ self._registered = True
+
+ def _cancel(self):
+ self._unregister()
+ if self.returncode is None:
+ self.returncode = self._cancelled_returncode
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ self._wait_loop()
+ self.returncode = os.EX_OK
+ return self.returncode
+
+ def _output_handler(self, fd, event):
+
+ background = self.background
+ stdout_fd = self.stdout_fd
+ log_file = self._log_file
+
+ while True:
+ buf = self._read_buf(fd, event)
+
+ if buf is None:
+ # not a POLLIN event, EAGAIN, etc...
+ break
+
+ if not buf:
+ # EOF
+ self._unregister()
+ self.wait()
+ break
+
+ else:
+ if not background and stdout_fd is not None:
+ failures = 0
+ stdout_buf = buf
+ while stdout_buf:
+ try:
+ stdout_buf = \
+ stdout_buf[os.write(stdout_fd, stdout_buf):]
+ except OSError as e:
+ if e.errno != errno.EAGAIN:
+ raise
+ del e
+ failures += 1
+ if failures > 50:
+ # Avoid a potentially infinite loop. In
+ # most cases, the failure count is zero
+ # and it's unlikely to exceed 1.
+ raise
+
+ # This means that a subprocess has put an inherited
+ # stdio file descriptor (typically stdin) into
+ # O_NONBLOCK mode. This is not acceptable (see bug
+ # #264435), so revert it. We need to use a loop
+ # here since there's a race condition due to
+ # parallel processes being able to change the
+ # flags on the inherited file descriptor.
+ # TODO: When possible, avoid having child processes
+ # inherit stdio file descriptors from portage
+ # (maybe it can't be avoided with
+ # PROPERTIES=interactive).
+ fcntl.fcntl(stdout_fd, fcntl.F_SETFL,
+ fcntl.fcntl(stdout_fd,
+ fcntl.F_GETFL) ^ os.O_NONBLOCK)
+
+ if log_file is not None:
+ log_file.write(buf)
+ log_file.flush()
+
+ self._unregister_if_appropriate(event)
+
+ return True
+
+ def _unregister(self):
+
+ if self._reg_id is not None:
+ self.scheduler.source_remove(self._reg_id)
+ self._reg_id = None
+
+ if self.input_fd is not None:
+ if isinstance(self.input_fd, int):
+ os.close(self.input_fd)
+ else:
+ self.input_fd.close()
+ self.input_fd = None
+
+ if self.stdout_fd is not None:
+ os.close(self.stdout_fd)
+ self.stdout_fd = None
+
+ if self._log_file is not None:
+ self._log_file.close()
+ self._log_file = None
+
+ if self._log_file_real is not None:
+ # Avoid "ResourceWarning: unclosed file" since python 3.2.
+ self._log_file_real.close()
+ self._log_file_real = None
+
+ self._registered = False
diff --git a/usr/lib/portage/pym/portage/util/_async/PipeReaderBlockingIO.py b/usr/lib/portage/pym/portage/util/_async/PipeReaderBlockingIO.py
new file mode 100644
index 0000000..b06adf6
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_async/PipeReaderBlockingIO.py
@@ -0,0 +1,91 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+try:
+ import threading
+except ImportError:
+ # dummy_threading will not suffice
+ threading = None
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+
+class PipeReaderBlockingIO(AbstractPollTask):
+ """
+ Reads output from one or more files and saves it in memory, for
+ retrieval via the getvalue() method. This is driven by a thread
+ for each input file, in order to support blocking IO. This may
+ be useful for using threads to handle blocking IO with Jython,
+ since Jython lacks the fcntl module which is needed for
+ non-blocking IO (see http://bugs.jython.org/issue1074).
+ """
+
+ __slots__ = ("input_files", "_read_data", "_terminate",
+ "_threads", "_thread_rlock")
+
+ def _start(self):
+ self._terminate = threading.Event()
+ self._threads = {}
+ self._read_data = []
+
+ self._registered = True
+ self._thread_rlock = threading.RLock()
+ with self._thread_rlock:
+ for f in self.input_files.values():
+ t = threading.Thread(target=self._reader_thread, args=(f,))
+ t.daemon = True
+ t.start()
+ self._threads[f] = t
+
+ def _reader_thread(self, f):
+ try:
+ terminated = self._terminate.is_set
+ except AttributeError:
+ # Jython 2.7.0a2
+ terminated = self._terminate.isSet
+ bufsize = self._bufsize
+ while not terminated():
+ buf = f.read(bufsize)
+ with self._thread_rlock:
+ if terminated():
+ break
+ elif buf:
+ self._read_data.append(buf)
+ else:
+ del self._threads[f]
+ if not self._threads:
+ # Thread-safe callback to EventLoop
+ self.scheduler.idle_add(self._eof)
+ break
+ f.close()
+
+ def _eof(self):
+ self._registered = False
+ if self.returncode is None:
+ self.returncode = os.EX_OK
+ self.wait()
+ return False
+
+ def _cancel(self):
+ self._terminate.set()
+ self._registered = False
+ if self.returncode is None:
+ self.returncode = self._cancelled_returncode
+ self.wait()
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ self._wait_loop()
+ self.returncode = os.EX_OK
+ return self.returncode
+
+ def getvalue(self):
+ """Retrieve the entire contents"""
+ with self._thread_rlock:
+ return b''.join(self._read_data)
+
+ def close(self):
+ """Free the memory buffer."""
+ with self._thread_rlock:
+ self._read_data = None
diff --git a/usr/lib/portage/pym/portage/util/_async/PopenProcess.py b/usr/lib/portage/pym/portage/util/_async/PopenProcess.py
new file mode 100644
index 0000000..2fc56d2
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_async/PopenProcess.py
@@ -0,0 +1,33 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SubProcess import SubProcess
+
+class PopenProcess(SubProcess):
+
+ __slots__ = ("pipe_reader", "proc",)
+
+ def _start(self):
+
+ self.pid = self.proc.pid
+ self._registered = True
+
+ if self.pipe_reader is None:
+ self._reg_id = self.scheduler.child_watch_add(
+ self.pid, self._child_watch_cb)
+ else:
+ try:
+ self.pipe_reader.scheduler = self.scheduler
+ except AttributeError:
+ pass
+ self.pipe_reader.addExitListener(self._pipe_reader_exit)
+ self.pipe_reader.start()
+
+ def _pipe_reader_exit(self, pipe_reader):
+ self._reg_id = self.scheduler.child_watch_add(
+ self.pid, self._child_watch_cb)
+
+ def _child_watch_cb(self, pid, condition, user_data=None):
+ self._reg_id = None
+ self._waitpid_cb(pid, condition)
+ self.wait()
diff --git a/usr/lib/portage/pym/portage/util/_async/SchedulerInterface.py b/usr/lib/portage/pym/portage/util/_async/SchedulerInterface.py
new file mode 100644
index 0000000..2ab668e
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_async/SchedulerInterface.py
@@ -0,0 +1,79 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import gzip
+import errno
+
+from portage import _encodings
+from portage import _unicode_encode
+from portage.util import writemsg_level
+from ..SlotObject import SlotObject
+
+class SchedulerInterface(SlotObject):
+
+ _event_loop_attrs = ("IO_ERR", "IO_HUP", "IO_IN",
+ "IO_NVAL", "IO_OUT", "IO_PRI",
+ "child_watch_add", "idle_add", "io_add_watch",
+ "iteration", "source_remove", "timeout_add")
+
+ __slots__ = _event_loop_attrs + ("_event_loop", "_is_background")
+
+ def __init__(self, event_loop, is_background=None, **kwargs):
+ SlotObject.__init__(self, **kwargs)
+ self._event_loop = event_loop
+ if is_background is None:
+ is_background = self._return_false
+ self._is_background = is_background
+ for k in self._event_loop_attrs:
+ setattr(self, k, getattr(event_loop, k))
+
+ @staticmethod
+ def _return_false():
+ return False
+
+ def output(self, msg, log_path=None, background=None,
+ level=0, noiselevel=-1):
+ """
+ Output msg to stdout if not self._is_background(). If log_path
+ is not None then append msg to the log (appends with
+ compression if the filename extension of log_path corresponds
+ to a supported compression type).
+ """
+
+ global_background = self._is_background()
+ if background is None or global_background:
+ # Use the global value if the task does not have a local
+ # background value. For example, parallel-fetch tasks run
+ # in the background while other tasks concurrently run in
+ # the foreground.
+ background = global_background
+
+ msg_shown = False
+ if not background:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ msg_shown = True
+
+ if log_path is not None:
+ try:
+ f = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='ab')
+ f_real = f
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ if not msg_shown:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ else:
+
+ if log_path.endswith('.gz'):
+ # NOTE: The empty filename argument prevents us from
+ # triggering a bug in python3 which causes GzipFile
+ # to raise AttributeError if fileobj.name is bytes
+ # instead of unicode.
+ f = gzip.GzipFile(filename='', mode='ab', fileobj=f)
+
+ f.write(_unicode_encode(msg))
+ f.close()
+ if f_real is not f:
+ f_real.close()
diff --git a/usr/lib/portage/pym/portage/util/_async/TaskScheduler.py b/usr/lib/portage/pym/portage/util/_async/TaskScheduler.py
new file mode 100644
index 0000000..35b3875
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_async/TaskScheduler.py
@@ -0,0 +1,20 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from .AsyncScheduler import AsyncScheduler
+
+class TaskScheduler(AsyncScheduler):
+
+ """
+ A simple way to handle scheduling of AbstractPollTask instances. Simply
+ pass a task iterator into the constructor and call start(). Use the
+ poll, wait, or addExitListener methods to be notified when all of the
+ tasks have completed.
+ """
+
+ def __init__(self, task_iter, **kwargs):
+ AsyncScheduler.__init__(self, **kwargs)
+ self._task_iter = task_iter
+
+ def _next_task(self):
+ return next(self._task_iter)
diff --git a/usr/lib/portage/pym/portage/util/_async/__init__.py b/usr/lib/portage/pym/portage/util/_async/__init__.py
new file mode 100644
index 0000000..418ad86
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_async/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/util/_async/run_main_scheduler.py b/usr/lib/portage/pym/portage/util/_async/run_main_scheduler.py
new file mode 100644
index 0000000..10fed34
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_async/run_main_scheduler.py
@@ -0,0 +1,41 @@
+
+import signal
+
+def run_main_scheduler(scheduler):
+ """
+ Start and run an AsyncScheduler (or compatible object), and handle
+ SIGINT or SIGTERM by calling its terminate() method and waiting
+ for it to clean up after itself. If SIGINT or SIGTERM is received,
+ return signum, else return None. Any previous SIGINT or SIGTERM
+ signal handlers are automatically saved and restored before
+ returning.
+ """
+
+ received_signal = []
+
+ def sighandler(signum, frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ received_signal.append(signum)
+ scheduler.terminate()
+
+ earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
+ earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+
+ try:
+ scheduler.start()
+ scheduler.wait()
+ finally:
+ # Restore previous handlers
+ if earlier_sigint_handler is not None:
+ signal.signal(signal.SIGINT, earlier_sigint_handler)
+ else:
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ if earlier_sigterm_handler is not None:
+ signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+ else:
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ if received_signal:
+ return received_signal[0]
+ return None
diff --git a/usr/lib/portage/pym/portage/util/_ctypes.py b/usr/lib/portage/pym/portage/util/_ctypes.py
new file mode 100644
index 0000000..aeceebc
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_ctypes.py
@@ -0,0 +1,47 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+try:
+ import ctypes
+ import ctypes.util
+except ImportError:
+ ctypes = None
+else:
+ try:
+ ctypes.cdll
+ except AttributeError:
+ ctypes = None
+
+_library_names = {}
+
+def find_library(name):
+ """
+ Calls ctype.util.find_library() if the ctypes module is available,
+ and otherwise returns None. Results are cached for future invocations.
+ """
+ filename = _library_names.get(name)
+ if filename is None:
+ if ctypes is not None:
+ filename = ctypes.util.find_library(name)
+ if filename is None:
+ filename = False
+ _library_names[name] = filename
+
+ if filename is False:
+ return None
+ return filename
+
+_library_handles = {}
+
+def LoadLibrary(name):
+ """
+ Calls ctypes.cdll.LoadLibrary(name) if the ctypes module is available,
+ and otherwise returns None. Results are cached for future invocations.
+ """
+ handle = _library_handles.get(name)
+
+ if handle is None and ctypes is not None:
+ handle = ctypes.CDLL(name, use_errno=True)
+ _library_handles[name] = handle
+
+ return handle
diff --git a/usr/lib/portage/pym/portage/util/_desktop_entry.py b/usr/lib/portage/pym/portage/util/_desktop_entry.py
new file mode 100644
index 0000000..0b49547
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_desktop_entry.py
@@ -0,0 +1,104 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import re
+import subprocess
+import sys
+
+try:
+ from configparser import Error as ConfigParserError, RawConfigParser
+except ImportError:
+ from ConfigParser import Error as ConfigParserError, RawConfigParser
+
+import portage
+from portage import _encodings, _unicode_encode, _unicode_decode
+from portage.util import writemsg
+
+def parse_desktop_entry(path):
+ """
+ Parse the given file with RawConfigParser and return the
+ result. This may raise an IOError from io.open(), or a
+ ParsingError from RawConfigParser.
+ """
+ parser = RawConfigParser()
+
+ # use read_file/readfp in order to control decoding of unicode
+ try:
+ # Python >=3.2
+ read_file = parser.read_file
+ except AttributeError:
+ read_file = parser.readfp
+
+ with io.open(_unicode_encode(path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ content = f.read()
+
+ # In Python 3.2, read_file does not support bytes in file names
+ # (see bug #429544), so use StringIO to hide the file name.
+ read_file(io.StringIO(content))
+
+ return parser
+
+_trivial_warnings = re.compile(r' looks redundant with value ')
+
+_ignored_errors = (
+ # Ignore error for emacs.desktop:
+ # https://bugs.freedesktop.org/show_bug.cgi?id=35844#c6
+ 'error: (will be fatal in the future): value "TextEditor" in key "Categories" in group "Desktop Entry" requires another category to be present among the following categories: Utility',
+ 'warning: key "Encoding" in group "Desktop Entry" is deprecated'
+)
+
+_ShowIn_exemptions = (
+ # See bug #480586.
+ 'contains an unregistered value "Pantheon"',
+)
+
+def validate_desktop_entry(path):
+ args = ["desktop-file-validate", path]
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(args[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(args[0])
+ args[0] = fullname
+
+ args = [_unicode_encode(x, errors='strict') for x in args]
+ proc = subprocess.Popen(args,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ output_lines = _unicode_decode(proc.communicate()[0]).splitlines()
+ proc.wait()
+
+ if output_lines:
+ filtered_output = []
+ for line in output_lines:
+ msg = line[len(path)+2:]
+ # "hint:" output is new in desktop-file-utils-0.21
+ if msg.startswith('hint: ') or msg in _ignored_errors:
+ continue
+ if 'for key "NotShowIn" in group "Desktop Entry"' in msg or \
+ 'for key "OnlyShowIn" in group "Desktop Entry"' in msg:
+ exempt = False
+ for s in _ShowIn_exemptions:
+ if s in msg:
+ exempt = True
+ break
+ if exempt:
+ continue
+ filtered_output.append(line)
+ output_lines = filtered_output
+
+ if output_lines:
+ output_lines = [line for line in output_lines
+ if _trivial_warnings.search(line) is None]
+
+ return output_lines
+
+if __name__ == "__main__":
+ for arg in sys.argv[1:]:
+ for line in validate_desktop_entry(arg):
+ writemsg(line + "\n", noiselevel=-1)
diff --git a/usr/lib/portage/pym/portage/util/_dyn_libs/LinkageMapELF.py b/usr/lib/portage/pym/portage/util/_dyn_libs/LinkageMapELF.py
new file mode 100644
index 0000000..e4f8ee8
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_dyn_libs/LinkageMapELF.py
@@ -0,0 +1,820 @@
+# Copyright 1998-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import subprocess
+
+import portage
+from portage import _encodings
+from portage import _os_merge
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.exception import CommandNotFound
+from portage.localization import _
+from portage.util import getlibpaths
+from portage.util import grabfile
+from portage.util import normalize_path
+from portage.util import writemsg_level
+from portage.const import EPREFIX
+
+class LinkageMapELF(object):
+
+ """Models dynamic linker dependencies."""
+
+ _needed_aux_key = "NEEDED.ELF.2"
+ _soname_map_class = slot_dict_class(
+ ("consumers", "providers"), prefix="")
+
+ class _obj_properties_class(object):
+
+ __slots__ = ("arch", "needed", "runpaths", "soname", "alt_paths",
+ "owner",)
+
+ def __init__(self, arch, needed, runpaths, soname, alt_paths, owner):
+ self.arch = arch
+ self.needed = needed
+ self.runpaths = runpaths
+ self.soname = soname
+ self.alt_paths = alt_paths
+ self.owner = owner
+
+ def __init__(self, vardbapi):
+ self._dbapi = vardbapi
+ self._root = self._dbapi.settings['ROOT']
+ self._libs = {}
+ self._obj_properties = {}
+ self._obj_key_cache = {}
+ self._defpath = set()
+ self._path_key_cache = {}
+
+ def _clear_cache(self):
+ self._libs.clear()
+ self._obj_properties.clear()
+ self._obj_key_cache.clear()
+ self._defpath.clear()
+ self._path_key_cache.clear()
+
+ def _path_key(self, path):
+ key = self._path_key_cache.get(path)
+ if key is None:
+ key = self._ObjectKey(path, self._root)
+ self._path_key_cache[path] = key
+ return key
+
+ def _obj_key(self, path):
+ key = self._obj_key_cache.get(path)
+ if key is None:
+ key = self._ObjectKey(path, self._root)
+ self._obj_key_cache[path] = key
+ return key
+
+ class _ObjectKey(object):
+
+ """Helper class used as _obj_properties keys for objects."""
+
+ __slots__ = ("_key",)
+
+ def __init__(self, obj, root):
+ """
+ This takes a path to an object.
+
+ @param object: path to a file
+ @type object: string (example: '/usr/bin/bar')
+
+ """
+ self._key = self._generate_object_key(obj, root)
+
+ def __hash__(self):
+ return hash(self._key)
+
+ def __eq__(self, other):
+ return self._key == other._key
+
+ def _generate_object_key(self, obj, root):
+ """
+ Generate object key for a given object.
+
+ @param object: path to a file
+ @type object: string (example: '/usr/bin/bar')
+ @rtype: 2-tuple of types (long, int) if object exists. string if
+ object does not exist.
+ @return:
+ 1. 2-tuple of object's inode and device from a stat call, if object
+ exists.
+ 2. realpath of object if object does not exist.
+
+ """
+
+ os = _os_merge
+
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ abs_path = os.path.join(root, obj.lstrip(os.sep))
+ try:
+ object_stat = os.stat(abs_path)
+ except OSError:
+ # Use the realpath as the key if the file does not exists on the
+ # filesystem.
+ return os.path.realpath(abs_path)
+ # Return a tuple of the device and inode.
+ return (object_stat.st_dev, object_stat.st_ino)
+
+ def file_exists(self):
+ """
+ Determine if the file for this key exists on the filesystem.
+
+ @rtype: Boolean
+ @return:
+ 1. True if the file exists.
+ 2. False if the file does not exist or is a broken symlink.
+
+ """
+ return isinstance(self._key, tuple)
+
+ class _LibGraphNode(_ObjectKey):
+ __slots__ = ("alt_paths",)
+
+ def __init__(self, key):
+ """
+ Create a _LibGraphNode from an existing _ObjectKey.
+ This re-uses the _key attribute in order to avoid repeating
+ any previous stat calls, which helps to avoid potential race
+ conditions due to inconsistent stat results when the
+ file system is being modified concurrently.
+ """
+ self._key = key._key
+ self.alt_paths = set()
+
+ def __str__(self):
+ return str(sorted(self.alt_paths))
+
+ def rebuild(self, exclude_pkgs=None, include_file=None,
+ preserve_paths=None):
+ """
+ Raises CommandNotFound if there are preserved libs
+ and the scanelf binary is not available.
+
+ @param exclude_pkgs: A set of packages that should be excluded from
+ the LinkageMap, since they are being unmerged and their NEEDED
+ entries are therefore irrelevant and would only serve to corrupt
+ the LinkageMap.
+ @type exclude_pkgs: set
+ @param include_file: The path of a file containing NEEDED entries for
+ a package which does not exist in the vardbapi yet because it is
+ currently being merged.
+ @type include_file: String
+ @param preserve_paths: Libraries preserved by a package instance that
+ is currently being merged. They need to be explicitly passed to the
+ LinkageMap, since they are not registered in the
+ PreservedLibsRegistry yet.
+ @type preserve_paths: set
+ """
+
+ os = _os_merge
+ root = self._root
+ root_len = len(root) - 1
+ self._clear_cache()
+ self._defpath.update(getlibpaths(self._root, env=self._dbapi.settings))
+ libs = self._libs
+ obj_properties = self._obj_properties
+
+ lines = []
+
+ # Data from include_file is processed first so that it
+ # overrides any data from previously installed files.
+ if include_file is not None:
+ for line in grabfile(include_file):
+ lines.append((None, include_file, line))
+
+ aux_keys = [self._needed_aux_key]
+ can_lock = os.access(os.path.dirname(self._dbapi._dbroot), os.W_OK)
+ if can_lock:
+ self._dbapi.lock()
+ try:
+ for cpv in self._dbapi.cpv_all():
+ if exclude_pkgs is not None and cpv in exclude_pkgs:
+ continue
+ needed_file = self._dbapi.getpath(cpv,
+ filename=self._needed_aux_key)
+ for line in self._dbapi.aux_get(cpv, aux_keys)[0].splitlines():
+ lines.append((cpv, needed_file, line))
+ finally:
+ if can_lock:
+ self._dbapi.unlock()
+
+ # have to call scanelf for preserved libs here as they aren't
+ # registered in NEEDED.ELF.2 files
+ plibs = {}
+ if preserve_paths is not None:
+ plibs.update((x, None) for x in preserve_paths)
+ if self._dbapi._plib_registry and \
+ self._dbapi._plib_registry.hasEntries():
+ for cpv, items in \
+ self._dbapi._plib_registry.getPreservedLibs().items():
+ if exclude_pkgs is not None and cpv in exclude_pkgs:
+ # These preserved libs will either be unmerged,
+ # rendering them irrelevant, or they will be
+ # preserved in the replacement package and are
+ # already represented via the preserve_paths
+ # parameter.
+ continue
+ plibs.update((x, cpv) for x in items)
+ if plibs:
+ args = [EPREFIX + "/usr/bin/scanelf", "-qF", "%a;%F;%S;%r;%n"]
+ args.extend(os.path.join(root, x.lstrip("." + os.sep)) \
+ for x in plibs)
+ try:
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ raise CommandNotFound(args[0])
+ else:
+ for l in proc.stdout:
+ try:
+ l = _unicode_decode(l,
+ encoding=_encodings['content'], errors='strict')
+ except UnicodeDecodeError:
+ l = _unicode_decode(l,
+ encoding=_encodings['content'], errors='replace')
+ writemsg_level(_("\nError decoding characters " \
+ "returned from scanelf: %s\n\n") % (l,),
+ level=logging.ERROR, noiselevel=-1)
+ l = l[3:].rstrip("\n")
+ if not l:
+ continue
+ fields = l.split(";")
+ if len(fields) < 5:
+ writemsg_level(_("\nWrong number of fields " \
+ "returned from scanelf: %s\n\n") % (l,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ fields[1] = fields[1][root_len:]
+ owner = plibs.pop(fields[1], None)
+ lines.append((owner, "scanelf", ";".join(fields)))
+ proc.wait()
+ proc.stdout.close()
+
+ if plibs:
+ # Preserved libraries that did not appear in the scanelf output.
+ # This is known to happen with statically linked libraries.
+ # Generate dummy lines for these, so we can assume that every
+ # preserved library has an entry in self._obj_properties. This
+ # is important in order to prevent findConsumers from raising
+ # an unwanted KeyError.
+ for x, cpv in plibs.items():
+ lines.append((cpv, "plibs", ";".join(['', x, '', '', ''])))
+
+ # Share identical frozenset instances when available,
+ # in order to conserve memory.
+ frozensets = {}
+
+ for owner, location, l in lines:
+ l = l.rstrip("\n")
+ if not l:
+ continue
+ if '\0' in l:
+ # os.stat() will raise "TypeError: must be encoded string
+ # without NULL bytes, not str" in this case.
+ writemsg_level(_("\nLine contains null byte(s) " \
+ "in %s: %s\n\n") % (location, l),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ fields = l.split(";")
+ if len(fields) < 5:
+ writemsg_level(_("\nWrong number of fields " \
+ "in %s: %s\n\n") % (location, l),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ arch = fields[0]
+ obj = fields[1]
+ soname = fields[2]
+ path = frozenset(normalize_path(x) \
+ for x in filter(None, fields[3].replace(
+ "${ORIGIN}", os.path.dirname(obj)).replace(
+ "$ORIGIN", os.path.dirname(obj)).split(":")))
+ path = frozensets.setdefault(path, path)
+ needed = frozenset(x for x in fields[4].split(",") if x)
+ needed = frozensets.setdefault(needed, needed)
+
+ obj_key = self._obj_key(obj)
+ indexed = True
+ myprops = obj_properties.get(obj_key)
+ if myprops is None:
+ indexed = False
+ myprops = self._obj_properties_class(
+ arch, needed, path, soname, [], owner)
+ obj_properties[obj_key] = myprops
+ # All object paths are added into the obj_properties tuple.
+ myprops.alt_paths.append(obj)
+
+ # Don't index the same file more that once since only one
+ # set of data can be correct and therefore mixing data
+ # may corrupt the index (include_file overrides previously
+ # installed).
+ if indexed:
+ continue
+
+ arch_map = libs.get(arch)
+ if arch_map is None:
+ arch_map = {}
+ libs[arch] = arch_map
+ if soname:
+ soname_map = arch_map.get(soname)
+ if soname_map is None:
+ soname_map = self._soname_map_class(
+ providers=[], consumers=[])
+ arch_map[soname] = soname_map
+ soname_map.providers.append(obj_key)
+ for needed_soname in needed:
+ soname_map = arch_map.get(needed_soname)
+ if soname_map is None:
+ soname_map = self._soname_map_class(
+ providers=[], consumers=[])
+ arch_map[needed_soname] = soname_map
+ soname_map.consumers.append(obj_key)
+
+ for arch, sonames in libs.items():
+ for soname_node in sonames.values():
+ soname_node.providers = tuple(set(soname_node.providers))
+ soname_node.consumers = tuple(set(soname_node.consumers))
+
+ def listBrokenBinaries(self, debug=False):
+ """
+ Find binaries and their needed sonames, which have no providers.
+
+ @param debug: Boolean to enable debug output
+ @type debug: Boolean
+ @rtype: dict (example: {'/usr/bin/foo': set(['libbar.so'])})
+ @return: The return value is an object -> set-of-sonames mapping, where
+ object is a broken binary and the set consists of sonames needed by
+ object that have no corresponding libraries to fulfill the dependency.
+
+ """
+
+ os = _os_merge
+
+ class _LibraryCache(object):
+
+ """
+ Caches properties associated with paths.
+
+ The purpose of this class is to prevent multiple instances of
+ _ObjectKey for the same paths.
+
+ """
+
+ def __init__(cache_self):
+ cache_self.cache = {}
+
+ def get(cache_self, obj):
+ """
+ Caches and returns properties associated with an object.
+
+ @param obj: absolute path (can be symlink)
+ @type obj: string (example: '/usr/lib/libfoo.so')
+ @rtype: 4-tuple with types
+ (string or None, string or None, 2-tuple, Boolean)
+ @return: 4-tuple with the following components:
+ 1. arch as a string or None if it does not exist,
+ 2. soname as a string or None if it does not exist,
+ 3. obj_key as 2-tuple,
+ 4. Boolean representing whether the object exists.
+ (example: ('libfoo.so.1', (123L, 456L), True))
+
+ """
+ if obj in cache_self.cache:
+ return cache_self.cache[obj]
+ else:
+ obj_key = self._obj_key(obj)
+ # Check that the library exists on the filesystem.
+ if obj_key.file_exists():
+ # Get the arch and soname from LinkageMap._obj_properties if
+ # it exists. Otherwise, None.
+ obj_props = self._obj_properties.get(obj_key)
+ if obj_props is None:
+ arch = None
+ soname = None
+ else:
+ arch = obj_props.arch
+ soname = obj_props.soname
+ return cache_self.cache.setdefault(obj, \
+ (arch, soname, obj_key, True))
+ else:
+ return cache_self.cache.setdefault(obj, \
+ (None, None, obj_key, False))
+
+ rValue = {}
+ cache = _LibraryCache()
+ providers = self.listProviders()
+
+ # Iterate over all obj_keys and their providers.
+ for obj_key, sonames in providers.items():
+ obj_props = self._obj_properties[obj_key]
+ arch = obj_props.arch
+ path = obj_props.runpaths
+ objs = obj_props.alt_paths
+ path = path.union(self._defpath)
+ # Iterate over each needed soname and the set of library paths that
+ # fulfill the soname to determine if the dependency is broken.
+ for soname, libraries in sonames.items():
+ # validLibraries is used to store libraries, which satisfy soname,
+ # so if no valid libraries are found, the soname is not satisfied
+ # for obj_key. If unsatisfied, objects associated with obj_key
+ # must be emerged.
+ validLibraries = set()
+ # It could be the case that the library to satisfy the soname is
+ # not in the obj's runpath, but a symlink to the library is (eg
+ # libnvidia-tls.so.1 in nvidia-drivers). Also, since LinkageMap
+ # does not catalog symlinks, broken or missing symlinks may go
+ # unnoticed. As a result of these cases, check that a file with
+ # the same name as the soname exists in obj's runpath.
+ # XXX If we catalog symlinks in LinkageMap, this could be improved.
+ for directory in path:
+ cachedArch, cachedSoname, cachedKey, cachedExists = \
+ cache.get(os.path.join(directory, soname))
+ # Check that this library provides the needed soname. Doing
+ # this, however, will cause consumers of libraries missing
+ # sonames to be unnecessarily emerged. (eg libmix.so)
+ if cachedSoname == soname and cachedArch == arch:
+ validLibraries.add(cachedKey)
+ if debug and cachedKey not in \
+ set(map(self._obj_key_cache.get, libraries)):
+ # XXX This is most often due to soname symlinks not in
+ # a library's directory. We could catalog symlinks in
+ # LinkageMap to avoid checking for this edge case here.
+ writemsg_level(
+ _("Found provider outside of findProviders:") + \
+ (" %s -> %s %s\n" % (os.path.join(directory, soname),
+ self._obj_properties[cachedKey].alt_paths, libraries)),
+ level=logging.DEBUG,
+ noiselevel=-1)
+ # A valid library has been found, so there is no need to
+ # continue.
+ break
+ if debug and cachedArch == arch and \
+ cachedKey in self._obj_properties:
+ writemsg_level((_("Broken symlink or missing/bad soname: " + \
+ "%(dir_soname)s -> %(cachedKey)s " + \
+ "with soname %(cachedSoname)s but expecting %(soname)s") % \
+ {"dir_soname":os.path.join(directory, soname),
+ "cachedKey": self._obj_properties[cachedKey],
+ "cachedSoname": cachedSoname, "soname":soname}) + "\n",
+ level=logging.DEBUG,
+ noiselevel=-1)
+ # This conditional checks if there are no libraries to satisfy the
+ # soname (empty set).
+ if not validLibraries:
+ for obj in objs:
+ rValue.setdefault(obj, set()).add(soname)
+ # If no valid libraries have been found by this point, then
+ # there are no files named with the soname within obj's runpath,
+ # but if there are libraries (from the providers mapping), it is
+ # likely that soname symlinks or the actual libraries are
+ # missing or broken. Thus those libraries are added to rValue
+ # in order to emerge corrupt library packages.
+ for lib in libraries:
+ rValue.setdefault(lib, set()).add(soname)
+ if debug:
+ if not os.path.isfile(lib):
+ writemsg_level(_("Missing library:") + " %s\n" % (lib,),
+ level=logging.DEBUG,
+ noiselevel=-1)
+ else:
+ writemsg_level(_("Possibly missing symlink:") + \
+ "%s\n" % (os.path.join(os.path.dirname(lib), soname)),
+ level=logging.DEBUG,
+ noiselevel=-1)
+ return rValue
+
+ def listProviders(self):
+ """
+ Find the providers for all object keys in LinkageMap.
+
+ @rtype: dict (example:
+ {(123L, 456L): {'libbar.so': set(['/lib/libbar.so.1.5'])}})
+ @return: The return value is an object key -> providers mapping, where
+ providers is a mapping of soname -> set-of-library-paths returned
+ from the findProviders method.
+
+ """
+ rValue = {}
+ if not self._libs:
+ self.rebuild()
+ # Iterate over all object keys within LinkageMap.
+ for obj_key in self._obj_properties:
+ rValue.setdefault(obj_key, self.findProviders(obj_key))
+ return rValue
+
+ def isMasterLink(self, obj):
+ """
+ Determine whether an object is a "master" symlink, which means
+ that its basename is the same as the beginning part of the
+ soname and it lacks the soname's version component.
+
+ Examples:
+
+ soname | master symlink name
+ --------------------------------------------
+ libarchive.so.2.8.4 | libarchive.so
+ libproc-3.2.8.so | libproc.so
+
+ @param obj: absolute path to an object
+ @type obj: string (example: '/usr/bin/foo')
+ @rtype: Boolean
+ @return:
+ 1. True if obj is a master link
+ 2. False if obj is not a master link
+
+ """
+ os = _os_merge
+ obj_key = self._obj_key(obj)
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s (%s) not in object list" % (obj_key, obj))
+ basename = os.path.basename(obj)
+ soname = self._obj_properties[obj_key].soname
+ return len(basename) < len(soname) and \
+ basename.endswith(".so") and \
+ soname.startswith(basename[:-3])
+
+ def listLibraryObjects(self):
+ """
+ Return a list of library objects.
+
+ Known limitation: library objects lacking an soname are not included.
+
+ @rtype: list of strings
+ @return: list of paths to all providers
+
+ """
+ rValue = []
+ if not self._libs:
+ self.rebuild()
+ for arch_map in self._libs.values():
+ for soname_map in arch_map.values():
+ for obj_key in soname_map.providers:
+ rValue.extend(self._obj_properties[obj_key].alt_paths)
+ return rValue
+
+ def getOwners(self, obj):
+ """
+ Return the package(s) associated with an object. Raises KeyError
+ if the object is unknown. Returns an empty tuple if the owner(s)
+ are unknown.
+
+ NOTE: For preserved libraries, the owner(s) may have been
+ previously uninstalled, but these uninstalled owners can be
+ returned by this method since they are registered in the
+ PreservedLibsRegistry.
+
+ @param obj: absolute path to an object
+ @type obj: string (example: '/usr/bin/bar')
+ @rtype: tuple
+ @return: a tuple of cpv
+ """
+ if not self._libs:
+ self.rebuild()
+ if isinstance(obj, self._ObjectKey):
+ obj_key = obj
+ else:
+ obj_key = self._obj_key_cache.get(obj)
+ if obj_key is None:
+ raise KeyError("%s not in object list" % obj)
+ obj_props = self._obj_properties.get(obj_key)
+ if obj_props is None:
+ raise KeyError("%s not in object list" % obj_key)
+ if obj_props.owner is None:
+ return ()
+ return (obj_props.owner,)
+
+ def getSoname(self, obj):
+ """
+ Return the soname associated with an object.
+
+ @param obj: absolute path to an object
+ @type obj: string (example: '/usr/bin/bar')
+ @rtype: string
+ @return: soname as a string
+
+ """
+ if not self._libs:
+ self.rebuild()
+ if isinstance(obj, self._ObjectKey):
+ obj_key = obj
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s not in object list" % obj_key)
+ return self._obj_properties[obj_key].soname
+ if obj not in self._obj_key_cache:
+ raise KeyError("%s not in object list" % obj)
+ return self._obj_properties[self._obj_key_cache[obj]].soname
+
+ def findProviders(self, obj):
+ """
+ Find providers for an object or object key.
+
+ This method may be called with a key from _obj_properties.
+
+ In some cases, not all valid libraries are returned. This may occur when
+ an soname symlink referencing a library is in an object's runpath while
+ the actual library is not. We should consider cataloging symlinks within
+ LinkageMap as this would avoid those cases and would be a better model of
+ library dependencies (since the dynamic linker actually searches for
+ files named with the soname in the runpaths).
+
+ @param obj: absolute path to an object or a key from _obj_properties
+ @type obj: string (example: '/usr/bin/bar') or _ObjectKey
+ @rtype: dict (example: {'libbar.so': set(['/lib/libbar.so.1.5'])})
+ @return: The return value is a soname -> set-of-library-paths, where
+ set-of-library-paths satisfy soname.
+
+ """
+
+ os = _os_merge
+
+ rValue = {}
+
+ if not self._libs:
+ self.rebuild()
+
+ # Determine the obj_key from the arguments.
+ if isinstance(obj, self._ObjectKey):
+ obj_key = obj
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s not in object list" % obj_key)
+ else:
+ obj_key = self._obj_key(obj)
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s (%s) not in object list" % (obj_key, obj))
+
+ obj_props = self._obj_properties[obj_key]
+ arch = obj_props.arch
+ needed = obj_props.needed
+ path = obj_props.runpaths
+ path_keys = set(self._path_key(x) for x in path.union(self._defpath))
+ for soname in needed:
+ rValue[soname] = set()
+ if arch not in self._libs or soname not in self._libs[arch]:
+ continue
+ # For each potential provider of the soname, add it to rValue if it
+ # resides in the obj's runpath.
+ for provider_key in self._libs[arch][soname].providers:
+ providers = self._obj_properties[provider_key].alt_paths
+ for provider in providers:
+ if self._path_key(os.path.dirname(provider)) in path_keys:
+ rValue[soname].add(provider)
+ return rValue
+
+ def findConsumers(self, obj, exclude_providers=None, greedy=True):
+ """
+ Find consumers of an object or object key.
+
+ This method may be called with a key from _obj_properties. If this
+ method is going to be called with an object key, to avoid not catching
+ shadowed libraries, do not pass new _ObjectKey instances to this method.
+ Instead pass the obj as a string.
+
+ In some cases, not all consumers are returned. This may occur when
+ an soname symlink referencing a library is in an object's runpath while
+ the actual library is not. For example, this problem is noticeable for
+ binutils since it's libraries are added to the path via symlinks that
+ are gemerated in the /usr/$CHOST/lib/ directory by binutils-config.
+ Failure to recognize consumers of these symlinks makes preserve-libs
+ fail to preserve binutils libs that are needed by these unrecognized
+ consumers.
+
+ Note that library consumption via dlopen (common for kde plugins) is
+ currently undetected. However, it is possible to use the
+ corresponding libtool archive (*.la) files to detect such consumers
+ (revdep-rebuild is able to detect them).
+
+ The exclude_providers argument is useful for determining whether
+ removal of one or more packages will create unsatisfied consumers. When
+ this option is given, consumers are excluded from the results if there
+ is an alternative provider (which is not excluded) of the required
+ soname such that the consumers will remain satisfied if the files
+ owned by exclude_providers are removed.
+
+ @param obj: absolute path to an object or a key from _obj_properties
+ @type obj: string (example: '/usr/bin/bar') or _ObjectKey
+ @param exclude_providers: A collection of callables that each take a
+ single argument referring to the path of a library (example:
+ '/usr/lib/libssl.so.0.9.8'), and return True if the library is
+ owned by a provider which is planned for removal.
+ @type exclude_providers: collection
+ @param greedy: If True, then include consumers that are satisfied
+ by alternative providers, otherwise omit them. Default is True.
+ @type greedy: Boolean
+ @rtype: set of strings (example: set(['/bin/foo', '/usr/bin/bar']))
+ @return: The return value is a soname -> set-of-library-paths, where
+ set-of-library-paths satisfy soname.
+
+ """
+
+ os = _os_merge
+
+ if not self._libs:
+ self.rebuild()
+
+ # Determine the obj_key and the set of objects matching the arguments.
+ if isinstance(obj, self._ObjectKey):
+ obj_key = obj
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s not in object list" % obj_key)
+ objs = self._obj_properties[obj_key].alt_paths
+ else:
+ objs = set([obj])
+ obj_key = self._obj_key(obj)
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s (%s) not in object list" % (obj_key, obj))
+
+ # If there is another version of this lib with the
+ # same soname and the soname symlink points to that
+ # other version, this lib will be shadowed and won't
+ # have any consumers.
+ if not isinstance(obj, self._ObjectKey):
+ soname = self._obj_properties[obj_key].soname
+ soname_link = os.path.join(self._root,
+ os.path.dirname(obj).lstrip(os.path.sep), soname)
+ obj_path = os.path.join(self._root, obj.lstrip(os.sep))
+ try:
+ soname_st = os.stat(soname_link)
+ obj_st = os.stat(obj_path)
+ except OSError:
+ pass
+ else:
+ if (obj_st.st_dev, obj_st.st_ino) != \
+ (soname_st.st_dev, soname_st.st_ino):
+ return set()
+
+ obj_props = self._obj_properties[obj_key]
+ arch = obj_props.arch
+ soname = obj_props.soname
+
+ soname_node = None
+ arch_map = self._libs.get(arch)
+ if arch_map is not None:
+ soname_node = arch_map.get(soname)
+
+ defpath_keys = set(self._path_key(x) for x in self._defpath)
+ satisfied_consumer_keys = set()
+ if soname_node is not None:
+ if exclude_providers is not None or not greedy:
+ relevant_dir_keys = set()
+ for provider_key in soname_node.providers:
+ if not greedy and provider_key == obj_key:
+ continue
+ provider_objs = self._obj_properties[provider_key].alt_paths
+ for p in provider_objs:
+ provider_excluded = False
+ if exclude_providers is not None:
+ for excluded_provider_isowner in exclude_providers:
+ if excluded_provider_isowner(p):
+ provider_excluded = True
+ break
+ if not provider_excluded:
+ # This provider is not excluded. It will
+ # satisfy a consumer of this soname if it
+ # is in the default ld.so path or the
+ # consumer's runpath.
+ relevant_dir_keys.add(
+ self._path_key(os.path.dirname(p)))
+
+ if relevant_dir_keys:
+ for consumer_key in soname_node.consumers:
+ path = self._obj_properties[consumer_key].runpaths
+ path_keys = defpath_keys.copy()
+ path_keys.update(self._path_key(x) for x in path)
+ if relevant_dir_keys.intersection(path_keys):
+ satisfied_consumer_keys.add(consumer_key)
+
+ rValue = set()
+ if soname_node is not None:
+ # For each potential consumer, add it to rValue if an object from the
+ # arguments resides in the consumer's runpath.
+ objs_dir_keys = set(self._path_key(os.path.dirname(x))
+ for x in objs)
+ for consumer_key in soname_node.consumers:
+ if consumer_key in satisfied_consumer_keys:
+ continue
+ consumer_props = self._obj_properties[consumer_key]
+ path = consumer_props.runpaths
+ consumer_objs = consumer_props.alt_paths
+ path_keys = defpath_keys.union(self._path_key(x) for x in path)
+ if objs_dir_keys.intersection(path_keys):
+ rValue.update(consumer_objs)
+ return rValue
diff --git a/usr/lib/portage/pym/portage/util/_dyn_libs/LinkageMapMachO.py b/usr/lib/portage/pym/portage/util/_dyn_libs/LinkageMapMachO.py
new file mode 100644
index 0000000..7cfb18e
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_dyn_libs/LinkageMapMachO.py
@@ -0,0 +1,770 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import subprocess
+
+import portage
+from portage import _encodings
+from portage import _os_merge
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.exception import CommandNotFound
+from portage.localization import _
+from portage.util import getlibpaths
+from portage.util import grabfile
+from portage.util import normalize_path
+from portage.util import writemsg_level
+from portage.const import EPREFIX
+
+class LinkageMapMachO(object):
+
+ """Models dynamic linker dependencies."""
+
+ _needed_aux_key = "NEEDED.MACHO.3"
+ _installname_map_class = slot_dict_class(
+ ("consumers", "providers"), prefix="")
+
+ class _obj_properties_class(object):
+
+ __slots__ = ("arch", "needed", "install_name", "alt_paths",
+ "owner",)
+
+ def __init__(self, arch, needed, install_name, alt_paths, owner):
+ self.arch = arch
+ self.needed = needed
+ self.install_name = install_name
+ self.alt_paths = alt_paths
+ self.owner = owner
+
+ def __init__(self, vardbapi):
+ self._dbapi = vardbapi
+ self._root = self._dbapi.settings['ROOT']
+ self._libs = {}
+ self._obj_properties = {}
+ self._obj_key_cache = {}
+ self._path_key_cache = {}
+
+ def _clear_cache(self):
+ self._libs.clear()
+ self._obj_properties.clear()
+ self._obj_key_cache.clear()
+ self._path_key_cache.clear()
+
+ def _path_key(self, path):
+ key = self._path_key_cache.get(path)
+ if key is None:
+ key = self._ObjectKey(path, self._root)
+ self._path_key_cache[path] = key
+ return key
+
+ def _obj_key(self, path):
+ key = self._obj_key_cache.get(path)
+ if key is None:
+ key = self._ObjectKey(path, self._root)
+ self._obj_key_cache[path] = key
+ return key
+
+ class _ObjectKey(object):
+
+ """Helper class used as _obj_properties keys for objects."""
+
+ __slots__ = ("_key",)
+
+ def __init__(self, obj, root):
+ """
+ This takes a path to an object.
+
+ @param object: path to a file
+ @type object: string (example: '/usr/bin/bar')
+
+ """
+ self._key = self._generate_object_key(obj, root)
+
+ def __hash__(self):
+ return hash(self._key)
+
+ def __eq__(self, other):
+ return self._key == other._key
+
+ def _generate_object_key(self, obj, root):
+ """
+ Generate object key for a given object.
+
+ @param object: path to a file
+ @type object: string (example: '/usr/bin/bar')
+ @rtype: 2-tuple of types (long, int) if object exists. string if
+ object does not exist.
+ @return:
+ 1. 2-tuple of object's inode and device from a stat call, if object
+ exists.
+ 2. realpath of object if object does not exist.
+
+ """
+
+ os = _os_merge
+
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ abs_path = os.path.join(root, obj.lstrip(os.sep))
+ try:
+ object_stat = os.stat(abs_path)
+ except OSError:
+ # Use the realpath as the key if the file does not exists on the
+ # filesystem.
+ return os.path.realpath(abs_path)
+ # Return a tuple of the device and inode.
+ return (object_stat.st_dev, object_stat.st_ino)
+
+ def file_exists(self):
+ """
+ Determine if the file for this key exists on the filesystem.
+
+ @rtype: Boolean
+ @return:
+ 1. True if the file exists.
+ 2. False if the file does not exist or is a broken symlink.
+
+ """
+ return isinstance(self._key, tuple)
+
+ class _LibGraphNode(_ObjectKey):
+ __slots__ = ("alt_paths",)
+
+ def __init__(self, key):
+ """
+ Create a _LibGraphNode from an existing _ObjectKey.
+ This re-uses the _key attribute in order to avoid repeating
+ any previous stat calls, which helps to avoid potential race
+ conditions due to inconsistent stat results when the
+ file system is being modified concurrently.
+ """
+ self._key = key._key
+ self.alt_paths = set()
+
+ def __str__(self):
+ return str(sorted(self.alt_paths))
+
+ def rebuild(self, exclude_pkgs=None, include_file=None,
+ preserve_paths=None):
+ """
+ Raises CommandNotFound if there are preserved libs
+ and the scanmacho binary is not available.
+
+ @param exclude_pkgs: A set of packages that should be excluded from
+ the LinkageMap, since they are being unmerged and their NEEDED
+ entries are therefore irrelevant and would only serve to corrupt
+ the LinkageMap.
+ @type exclude_pkgs: set
+ @param include_file: The path of a file containing NEEDED entries for
+ a package which does not exist in the vardbapi yet because it is
+ currently being merged.
+ @type include_file: String
+ @param preserve_paths: Libraries preserved by a package instance that
+ is currently being merged. They need to be explicitly passed to the
+ LinkageMap, since they are not registered in the
+ PreservedLibsRegistry yet.
+ @type preserve_paths: set
+ """
+
+ os = _os_merge
+ root = self._root
+ root_len = len(root) - 1
+ self._clear_cache()
+ libs = self._libs
+ obj_properties = self._obj_properties
+
+ lines = []
+
+ # Data from include_file is processed first so that it
+ # overrides any data from previously installed files.
+ if include_file is not None:
+ for line in grabfile(include_file):
+ lines.append((None, include_file, line))
+
+ aux_keys = [self._needed_aux_key]
+ can_lock = os.access(os.path.dirname(self._dbapi._dbroot), os.W_OK)
+ if can_lock:
+ self._dbapi.lock()
+ try:
+ for cpv in self._dbapi.cpv_all():
+ if exclude_pkgs is not None and cpv in exclude_pkgs:
+ continue
+ needed_file = self._dbapi.getpath(cpv,
+ filename=self._needed_aux_key)
+ for line in self._dbapi.aux_get(cpv, aux_keys)[0].splitlines():
+ lines.append((cpv, needed_file, line))
+ finally:
+ if can_lock:
+ self._dbapi.unlock()
+
+ # have to call scanmacho for preserved libs here as they aren't
+ # registered in NEEDED.MACHO.3 files
+ plibs = {}
+ if preserve_paths is not None:
+ plibs.update((x, None) for x in preserve_paths)
+ if self._dbapi._plib_registry and \
+ self._dbapi._plib_registry.hasEntries():
+ for cpv, items in \
+ self._dbapi._plib_registry.getPreservedLibs().items():
+ if exclude_pkgs is not None and cpv in exclude_pkgs:
+ # These preserved libs will either be unmerged,
+ # rendering them irrelevant, or they will be
+ # preserved in the replacement package and are
+ # already represented via the preserve_paths
+ # parameter.
+ continue
+ plibs.update((x, cpv) for x in items)
+ if plibs:
+ args = [EPREFIX + "/usr/bin/scanmacho", "-qF", "%a;%F;%S;%n"]
+ args.extend(os.path.join(root, x.lstrip("." + os.sep)) \
+ for x in plibs)
+ try:
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ raise CommandNotFound(args[0])
+ else:
+ for l in proc.stdout:
+ try:
+ l = _unicode_decode(l,
+ encoding=_encodings['content'], errors='strict')
+ except UnicodeDecodeError:
+ l = _unicode_decode(l,
+ encoding=_encodings['content'], errors='replace')
+ writemsg_level(_("\nError decoding characters " \
+ "returned from scanmacho: %s\n\n") % (l,),
+ level=logging.ERROR, noiselevel=-1)
+ l = l.rstrip("\n")
+ if not l:
+ continue
+ fields = l.split(";")
+ if len(fields) < 4:
+ writemsg_level("\nWrong number of fields " + \
+ "returned from scanmacho: %s\n\n" % (l,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ fields[1] = fields[1][root_len:]
+ owner = plibs.pop(fields[1], None)
+ lines.append((owner, "scanmacho", ";".join(fields)))
+ proc.wait()
+ proc.stdout.close()
+
+ if plibs:
+ # Preserved libraries that did not appear in the scanmacho output.
+ # This is known to happen with statically linked libraries.
+ # Generate dummy lines for these, so we can assume that every
+ # preserved library has an entry in self._obj_properties. This
+ # is important in order to prevent findConsumers from raising
+ # an unwanted KeyError.
+ for x, cpv in plibs.items():
+ lines.append((cpv, "plibs", ";".join(['', x, '', '', ''])))
+
+ # Share identical frozenset instances when available,
+ # in order to conserve memory.
+ frozensets = {}
+
+ for owner, location, l in lines:
+ l = l.rstrip("\n")
+ if not l:
+ continue
+ fields = l.split(";")
+ if len(fields) < 4:
+ writemsg_level(_("\nWrong number of fields " \
+ "in %s: %s\n\n") % (location, l),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ arch = fields[0]
+ obj = fields[1]
+ install_name = os.path.normpath(fields[2])
+ needed = frozenset(x for x in fields[3].split(",") if x)
+ needed = frozensets.setdefault(needed, needed)
+
+ obj_key = self._obj_key(obj)
+ indexed = True
+ myprops = obj_properties.get(obj_key)
+ if myprops is None:
+ indexed = False
+ myprops = self._obj_properties_class(
+ arch, needed, install_name, [], owner)
+ obj_properties[obj_key] = myprops
+ # All object paths are added into the obj_properties tuple.
+ myprops.alt_paths.append(obj)
+
+ # Don't index the same file more that once since only one
+ # set of data can be correct and therefore mixing data
+ # may corrupt the index (include_file overrides previously
+ # installed).
+ if indexed:
+ continue
+
+ arch_map = libs.get(arch)
+ if arch_map is None:
+ arch_map = {}
+ libs[arch] = arch_map
+ if install_name:
+ installname_map = arch_map.get(install_name)
+ if installname_map is None:
+ installname_map = self._installname_map_class(
+ providers=[], consumers=[])
+ arch_map[install_name] = installname_map
+ installname_map.providers.append(obj_key)
+ for needed_installname in needed:
+ installname_map = arch_map.get(needed_installname)
+ if installname_map is None:
+ installname_map = self._installname_map_class(
+ providers=[], consumers=[])
+ arch_map[needed_installname] = installname_map
+ installname_map.consumers.append(obj_key)
+
+ for arch, install_names in libs.items():
+ for install_name_node in install_names.values():
+ install_name_node.providers = tuple(set(install_name_node.providers))
+ install_name_node.consumers = tuple(set(install_name_node.consumers))
+
+ def listBrokenBinaries(self, debug=False):
+ """
+ Find binaries and their needed install_names, which have no providers.
+
+ @param debug: Boolean to enable debug output
+ @type debug: Boolean
+ @rtype: dict (example: {'/usr/bin/foo': set(['/usr/lib/libbar.dylib'])})
+ @return: The return value is an object -> set-of-install_names mapping, where
+ object is a broken binary and the set consists of install_names needed by
+ object that have no corresponding libraries to fulfill the dependency.
+
+ """
+
+ os = _os_merge
+
+ class _LibraryCache(object):
+
+ """
+ Caches properties associated with paths.
+
+ The purpose of this class is to prevent multiple instances of
+ _ObjectKey for the same paths.
+
+ """
+
+ def __init__(cache_self):
+ cache_self.cache = {}
+
+ def get(cache_self, obj):
+ """
+ Caches and returns properties associated with an object.
+
+ @param obj: absolute path (can be symlink)
+ @type obj: string (example: '/usr/lib/libfoo.dylib')
+ @rtype: 4-tuple with types
+ (string or None, string or None, 2-tuple, Boolean)
+ @return: 4-tuple with the following components:
+ 1. arch as a string or None if it does not exist,
+ 2. soname as a string or None if it does not exist,
+ 3. obj_key as 2-tuple,
+ 4. Boolean representing whether the object exists.
+ (example: ('libfoo.1.dylib', (123L, 456L), True))
+
+ """
+ if obj in cache_self.cache:
+ return cache_self.cache[obj]
+ else:
+ obj_key = self._obj_key(obj)
+ # Check that the library exists on the filesystem.
+ if obj_key.file_exists():
+ # Get the install_name from LinkageMapMachO._obj_properties if
+ # it exists. Otherwise, None.
+ obj_props = self._obj_properties.get(obj_key)
+ if obj_props is None:
+ arch = None
+ install_name = None
+ else:
+ arch = obj_props.arch
+ install_name = obj_props.install_name
+ return cache_self.cache.setdefault(obj, \
+ (arch, install_name, obj_key, True))
+ else:
+ return cache_self.cache.setdefault(obj, \
+ (None, None, obj_key, False))
+
+ rValue = {}
+ cache = _LibraryCache()
+ providers = self.listProviders()
+
+ # Iterate over all obj_keys and their providers.
+ for obj_key, install_names in providers.items():
+ obj_props = self._obj_properties[obj_key]
+ arch = obj_props.arch
+ objs = obj_props.alt_paths
+ # Iterate over each needed install_name and the set of
+ # library paths that fulfill the install_name to determine
+ # if the dependency is broken.
+ for install_name, libraries in install_names.items():
+ # validLibraries is used to store libraries, which
+ # satisfy install_name, so if no valid libraries are
+ # found, the install_name is not satisfied for obj_key.
+ # If unsatisfied, objects associated with obj_key must
+ # be emerged.
+ validLibrary = set() # for compat with LinkageMap
+ cachedArch, cachedInstallname, cachedKey, cachedExists = \
+ cache.get(install_name)
+ # Check that the this library provides the needed soname. Doing
+ # this, however, will cause consumers of libraries missing
+ # sonames to be unnecessarily emerged. (eg libmix.so)
+ if cachedInstallname == install_name and cachedArch == arch:
+ validLibrary.add(cachedKey)
+ if debug and cachedKey not in \
+ set(map(self._obj_key_cache.get, libraries)):
+ # XXX This is most often due to soname symlinks not in
+ # a library's directory. We could catalog symlinks in
+ # LinkageMap to avoid checking for this edge case here.
+ print(_("Found provider outside of findProviders:"), \
+ install_name, "->", cachedRealpath)
+ if debug and cachedArch == arch and \
+ cachedKey in self._obj_properties:
+ print(_("Broken symlink or missing/bad install_name:"), \
+ install_name, '->', cachedRealpath, \
+ "with install_name", cachedInstallname, "but expecting", install_name)
+ # This conditional checks if there are no libraries to
+ # satisfy the install_name (empty set).
+ if not validLibrary:
+ for obj in objs:
+ rValue.setdefault(obj, set()).add(install_name)
+ # If no valid libraries have been found by this
+ # point, then the install_name does not exist in the
+ # filesystem, but if there are libraries (from the
+ # providers mapping), it is likely that soname
+ # symlinks or the actual libraries are missing or
+ # broken. Thus those libraries are added to rValue
+ # in order to emerge corrupt library packages.
+ for lib in libraries:
+ rValue.setdefault(lib, set()).add(install_name)
+ if debug:
+ if not os.path.isfile(lib):
+ writemsg_level(_("Missing library:") + " %s\n" % (lib,),
+ level=logging.DEBUG,
+ noiselevel=-1)
+ else:
+ writemsg_level(_("Possibly missing symlink:") + \
+ "%s\n" % (os.path.join(os.path.dirname(lib), soname)),
+ level=logging.DEBUG,
+ noiselevel=-1)
+ return rValue
+
+ def listProviders(self):
+ """
+ Find the providers for all object keys in LinkageMap.
+
+ @rtype: dict (example:
+ {(123L, 456L): {'libbar.dylib': set(['/lib/libbar.1.5.dylib'])}})
+ @return: The return value is an object -> providers mapping, where
+ providers is a mapping of install_name -> set-of-library-paths returned
+ from the findProviders method.
+
+ """
+ rValue = {}
+ if not self._libs:
+ self.rebuild()
+ # Iterate over all object keys within LinkageMap.
+ for obj_key in self._obj_properties:
+ rValue.setdefault(obj_key, self.findProviders(obj_key))
+ return rValue
+
+ def isMasterLink(self, obj):
+ """
+ Determine whether an object is a "master" symlink, which means
+ that its basename is the same as the beginning part of the
+ soname and it lacks the soname's version component.
+
+ Examples:
+
+ install_name | master symlink name
+ -----------------------------------------------
+ libarchive.2.8.4.dylib | libarchive.dylib
+ (typically the install_name is libarchive.2.dylib)
+
+ @param obj: absolute path to an object
+ @type obj: string (example: '/usr/bin/foo')
+ @rtype: Boolean
+ @return:
+ 1. True if obj is a master link
+ 2. False if obj is not a master link
+
+ """
+ os = _os_merge
+ obj_key = self._obj_key(obj)
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s (%s) not in object list" % (obj_key, obj))
+ basename = os.path.basename(obj)
+ install_name = self._obj_properties[obj_key].install_name
+ return (len(basename) < len(os.path.basename(install_name)) and \
+ basename.endswith(".dylib") and \
+ os.path.basename(install_name).startswith(basename[:-6]))
+
+ def listLibraryObjects(self):
+ """
+ Return a list of library objects.
+
+ Known limitation: library objects lacking an soname are not included.
+
+ @rtype: list of strings
+ @return: list of paths to all providers
+
+ """
+ rValue = []
+ if not self._libs:
+ self.rebuild()
+ for arch_map in self._libs.values():
+ for soname_map in arch_map.values():
+ for obj_key in soname_map.providers:
+ rValue.extend(self._obj_properties[obj_key].alt_paths)
+ return rValue
+
+ def getOwners(self, obj):
+ """
+ Return the package(s) associated with an object. Raises KeyError
+ if the object is unknown. Returns an empty tuple if the owner(s)
+ are unknown.
+
+ NOTE: For preserved libraries, the owner(s) may have been
+ previously uninstalled, but these uninstalled owners can be
+ returned by this method since they are registered in the
+ PreservedLibsRegistry.
+
+ @param obj: absolute path to an object
+ @type obj: string (example: '/usr/bin/bar')
+ @rtype: tuple
+ @return: a tuple of cpv
+ """
+ if not self._libs:
+ self.rebuild()
+ if isinstance(obj, self._ObjectKey):
+ obj_key = obj
+ else:
+ obj_key = self._obj_key_cache.get(obj)
+ if obj_key is None:
+ raise KeyError("%s not in object list" % obj)
+ obj_props = self._obj_properties.get(obj_key)
+ if obj_props is None:
+ raise KeyError("%s not in object list" % obj_key)
+ if obj_props.owner is None:
+ return ()
+ return (obj_props.owner,)
+
+ def getSoname(self, obj):
+ """
+ Return the soname associated with an object.
+
+ @param obj: absolute path to an object
+ @type obj: string (example: '/usr/bin/bar')
+ @rtype: string
+ @return: soname as a string
+
+ """
+ if not self._libs:
+ self.rebuild()
+ if isinstance(obj, self._ObjectKey):
+ obj_key = obj
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s not in object list" % obj_key)
+ return self._obj_properties[obj_key].install_name
+ if obj not in self._obj_key_cache:
+ raise KeyError("%s not in object list" % obj)
+ return self._obj_properties[self._obj_key_cache[obj]].install_name
+
+ def findProviders(self, obj):
+ """
+ Find providers for an object or object key.
+
+ This method may be called with a key from _obj_properties.
+
+ In some cases, not all valid libraries are returned. This may occur when
+ an soname symlink referencing a library is in an object's runpath while
+ the actual library is not. We should consider cataloging symlinks within
+ LinkageMap as this would avoid those cases and would be a better model of
+ library dependencies (since the dynamic linker actually searches for
+ files named with the soname in the runpaths).
+
+ @param obj: absolute path to an object or a key from _obj_properties
+ @type obj: string (example: '/usr/bin/bar') or _ObjectKey
+ @rtype: dict (example: {'libbar.dylib': set(['/lib/libbar.1.5.dylib'])})
+ @return: The return value is a install_name -> set-of-library-paths, where
+ set-of-library-paths satisfy install_name.
+
+ """
+
+ os = _os_merge
+
+ rValue = {}
+
+ if not self._libs:
+ self.rebuild()
+
+ # Determine the obj_key from the arguments.
+ if isinstance(obj, self._ObjectKey):
+ obj_key = obj
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s not in object list" % obj_key)
+ else:
+ obj_key = self._obj_key(obj)
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s (%s) not in object list" % (obj_key, obj))
+
+ obj_props = self._obj_properties[obj_key]
+ arch = obj_props.arch
+ needed = obj_props.needed
+ install_name = obj_props.install_name
+ for install_name in needed:
+ rValue[install_name] = set()
+ if arch not in self._libs or install_name not in self._libs[arch]:
+ continue
+ # For each potential provider of the install_name, add it to
+ # rValue if it exists. (Should be one)
+ for provider_key in self._libs[arch][install_name].providers:
+ providers = self._obj_properties[provider_key].alt_paths
+ for provider in providers:
+ if os.path.exists(provider):
+ rValue[install_name].add(provider)
+ return rValue
+
+ def findConsumers(self, obj, exclude_providers=None, greedy=True):
+ """
+ Find consumers of an object or object key.
+
+ This method may be called with a key from _obj_properties. If this
+ method is going to be called with an object key, to avoid not catching
+ shadowed libraries, do not pass new _ObjectKey instances to this method.
+ Instead pass the obj as a string.
+
+ In some cases, not all consumers are returned. This may occur when
+ an soname symlink referencing a library is in an object's runpath while
+ the actual library is not. For example, this problem is noticeable for
+ binutils since it's libraries are added to the path via symlinks that
+ are gemerated in the /usr/$CHOST/lib/ directory by binutils-config.
+ Failure to recognize consumers of these symlinks makes preserve-libs
+ fail to preserve binutils libs that are needed by these unrecognized
+ consumers.
+
+ Note that library consumption via dlopen (common for kde plugins) is
+ currently undetected. However, it is possible to use the
+ corresponding libtool archive (*.la) files to detect such consumers
+ (revdep-rebuild is able to detect them).
+
+ The exclude_providers argument is useful for determining whether
+ removal of one or more packages will create unsatisfied consumers. When
+ this option is given, consumers are excluded from the results if there
+ is an alternative provider (which is not excluded) of the required
+ soname such that the consumers will remain satisfied if the files
+ owned by exclude_providers are removed.
+
+ @param obj: absolute path to an object or a key from _obj_properties
+ @type obj: string (example: '/usr/bin/bar') or _ObjectKey
+ @param exclude_providers: A collection of callables that each take a
+ single argument referring to the path of a library (example:
+ '/usr/lib/libssl.0.9.8.dylib'), and return True if the library is
+ owned by a provider which is planned for removal.
+ @type exclude_providers: collection
+ @param greedy: If True, then include consumers that are satisfied
+ by alternative providers, otherwise omit them. Default is True.
+ @type greedy: Boolean
+ @rtype: set of strings (example: set(['/bin/foo', '/usr/bin/bar']))
+ @return: The return value is a install_name -> set-of-library-paths, where
+ set-of-library-paths satisfy install_name.
+
+ """
+
+ os = _os_merge
+
+ if not self._libs:
+ self.rebuild()
+
+ # Determine the obj_key and the set of objects matching the arguments.
+ if isinstance(obj, self._ObjectKey):
+ obj_key = obj
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s not in object list" % obj_key)
+ objs = self._obj_properties[obj_key].alt_paths
+ else:
+ objs = set([obj])
+ obj_key = self._obj_key(obj)
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s (%s) not in object list" % (obj_key, obj))
+
+ # If there is another version of this lib with the
+ # same install_name and the install_name symlink points to that
+ # other version, this lib will be shadowed and won't
+ # have any consumers.
+ if not isinstance(obj, self._ObjectKey):
+ install_name = self._obj_properties[obj_key].install_name
+ master_link = os.path.join(self._root,
+ install_name.lstrip(os.path.sep))
+ obj_path = os.path.join(self._root, obj.lstrip(os.sep))
+ try:
+ master_st = os.stat(master_link)
+ obj_st = os.stat(obj_path)
+ except OSError:
+ pass
+ else:
+ if (obj_st.st_dev, obj_st.st_ino) != \
+ (master_st.st_dev, master_st.st_ino):
+ return set()
+
+ obj_props = self._obj_properties[obj_key]
+ arch = obj_props.arch
+ install_name = obj_props.install_name
+
+ install_name_node = None
+ arch_map = self._libs.get(arch)
+ if arch_map is not None:
+ install_name_node = arch_map.get(install_name)
+
+ satisfied_consumer_keys = set()
+ if install_name_node is not None:
+ if exclude_providers is not None and not greedy:
+ relevant_dir_keys = set()
+ for provider_key in install_name_node.providers:
+ if not greedy and provider_key == obj_key:
+ continue
+ provider_objs = self._obj_properties[provider_key].alt_paths
+ for p in provider_objs:
+ provider_excluded = False
+ if exclude_providers is not None:
+ for excluded_provider_isowner in exclude_providers:
+ if excluded_provider_isowner(p):
+ provider_excluded = True
+ break
+ if not provider_excluded:
+ # This provider is not excluded. It will
+ # satisfy a consumer of this install_name.
+ relevant_dir_keys.add(self._path_key(p))
+
+ if relevant_dir_keys:
+ for consumer_key in install_name_node.consumers:
+ satisfied_consumer_keys.add(consumer_key)
+
+ rValue = set()
+ if install_name_node is not None:
+ # For each potential consumer, add it to rValue.
+ for consumer_key in install_name_node.consumers:
+ if consumer_key in satisfied_consumer_keys:
+ continue
+ consumer_props = self._obj_properties[consumer_key]
+ consumer_objs = consumer_props.alt_paths
+ rValue.update(consumer_objs)
+ return rValue
diff --git a/usr/lib/portage/pym/portage/util/_dyn_libs/LinkageMapPeCoff.py b/usr/lib/portage/pym/portage/util/_dyn_libs/LinkageMapPeCoff.py
new file mode 100644
index 0000000..fd0ab6e
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_dyn_libs/LinkageMapPeCoff.py
@@ -0,0 +1,286 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import subprocess
+
+import portage
+from portage import _encodings
+from portage import _os_merge
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.exception import CommandNotFound
+from portage.localization import _
+from portage.util import getlibpaths
+from portage.util import grabfile
+from portage.util import normalize_path
+from portage.util import writemsg_level
+from portage.const import EPREFIX
+from portage.util._dyn_libs.LinkageMapELF import LinkageMapELF
+
+class LinkageMapPeCoff(LinkageMapELF):
+
+ """Models dynamic linker dependencies."""
+
+ # NEEDED.PECOFF.1 has effectively the _same_ format as NEEDED.ELF.2,
+ # but we keep up the relation "scanelf" -> "NEEDED.ELF", "readpecoff" ->
+ # "NEEDED.PECOFF", "scanmacho" -> "NEEDED.MACHO", etc. others will follow.
+ _needed_aux_key = "NEEDED.PECOFF.1"
+
+ class _ObjectKey(LinkageMapELF._ObjectKey):
+
+ """Helper class used as _obj_properties keys for objects."""
+
+ def _generate_object_key(self, obj, root):
+ """
+ Generate object key for a given object. This is different from the
+ Linux implementation, since some systems (e.g. interix) don't have
+ "inodes", thus the inode field is always zero, or a random value,
+ making it inappropriate for identifying a file... :)
+
+ @param object: path to a file
+ @type object: string (example: '/usr/bin/bar')
+ @rtype: 2-tuple of types (bool, string)
+ @return:
+ 2-tuple of boolean indicating existance, and absolut path
+ """
+
+ os = _os_merge
+
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ abs_path = os.path.join(root, obj.lstrip(os.sep))
+ try:
+ object_stat = os.stat(abs_path)
+ except OSError:
+ return (False, os.path.realpath(abs_path))
+ # On Interix, the inode field may always be zero, since the
+ # filesystem (NTFS) has no inodes ...
+ return (True, os.path.realpath(abs_path))
+
+ def file_exists(self):
+ """
+ Determine if the file for this key exists on the filesystem.
+
+ @rtype: Boolean
+ @return:
+ 1. True if the file exists.
+ 2. False if the file does not exist or is a broken symlink.
+
+ """
+ return self._key[0]
+
+ class _LibGraphNode(_ObjectKey):
+ __slots__ = ("alt_paths",)
+
+ def __init__(self, key):
+ """
+ Create a _LibGraphNode from an existing _ObjectKey.
+ This re-uses the _key attribute in order to avoid repeating
+ any previous stat calls, which helps to avoid potential race
+ conditions due to inconsistent stat results when the
+ file system is being modified concurrently.
+ """
+ self._key = key._key
+ self.alt_paths = set()
+
+ def __str__(self):
+ return str(sorted(self.alt_paths))
+
+ def rebuild(self, exclude_pkgs=None, include_file=None,
+ preserve_paths=None):
+ """
+ Raises CommandNotFound if there are preserved libs
+ and the readpecoff binary is not available.
+
+ @param exclude_pkgs: A set of packages that should be excluded from
+ the LinkageMap, since they are being unmerged and their NEEDED
+ entries are therefore irrelevant and would only serve to corrupt
+ the LinkageMap.
+ @type exclude_pkgs: set
+ @param include_file: The path of a file containing NEEDED entries for
+ a package which does not exist in the vardbapi yet because it is
+ currently being merged.
+ @type include_file: String
+ @param preserve_paths: Libraries preserved by a package instance that
+ is currently being merged. They need to be explicitly passed to the
+ LinkageMap, since they are not registered in the
+ PreservedLibsRegistry yet.
+ @type preserve_paths: set
+ """
+
+ os = _os_merge
+ root = self._root
+ root_len = len(root) - 1
+ self._clear_cache()
+ self._defpath.update(getlibpaths(self._root, env=self._dbapi.settings))
+ libs = self._libs
+ obj_properties = self._obj_properties
+
+ lines = []
+
+ # Data from include_file is processed first so that it
+ # overrides any data from previously installed files.
+ if include_file is not None:
+ for line in grabfile(include_file):
+ lines.append((None, include_file, line))
+
+ aux_keys = [self._needed_aux_key]
+ can_lock = os.access(os.path.dirname(self._dbapi._dbroot), os.W_OK)
+ if can_lock:
+ self._dbapi.lock()
+ try:
+ for cpv in self._dbapi.cpv_all():
+ if exclude_pkgs is not None and cpv in exclude_pkgs:
+ continue
+ needed_file = self._dbapi.getpath(cpv,
+ filename=self._needed_aux_key)
+ for line in self._dbapi.aux_get(cpv, aux_keys)[0].splitlines():
+ lines.append((cpv, needed_file, line))
+ finally:
+ if can_lock:
+ self._dbapi.unlock()
+
+ # have to call readpecoff for preserved libs here as they aren't
+ # registered in NEEDED.PECOFF.1 files
+ plibs = {}
+ if preserve_paths is not None:
+ plibs.update((x, None) for x in preserve_paths)
+ if self._dbapi._plib_registry and \
+ self._dbapi._plib_registry.hasEntries():
+ for cpv, items in \
+ self._dbapi._plib_registry.getPreservedLibs().items():
+ if exclude_pkgs is not None and cpv in exclude_pkgs:
+ # These preserved libs will either be unmerged,
+ # rendering them irrelevant, or they will be
+ # preserved in the replacement package and are
+ # already represented via the preserve_paths
+ # parameter.
+ continue
+ plibs.update((x, cpv) for x in items)
+ if plibs:
+ args = ["readpecoff", self._dbapi.settings.get('CHOST')]
+ args.extend(os.path.join(root, x.lstrip("." + os.sep)) \
+ for x in plibs)
+ try:
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ raise CommandNotFound(args[0])
+ else:
+ for l in proc.stdout:
+ try:
+ l = _unicode_decode(l,
+ encoding=_encodings['content'], errors='strict')
+ except UnicodeDecodeError:
+ l = _unicode_decode(l,
+ encoding=_encodings['content'], errors='replace')
+ writemsg_level(_("\nError decoding characters " \
+ "returned from readpecoff: %s\n\n") % (l,),
+ level=logging.ERROR, noiselevel=-1)
+ l = l[3:].rstrip("\n")
+ if not l:
+ continue
+ fields = l.split(";")
+ if len(fields) < 5:
+ writemsg_level(_("\nWrong number of fields " \
+ "returned from readpecoff: %s\n\n") % (l,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ fields[1] = fields[1][root_len:]
+ owner = plibs.pop(fields[1], None)
+ lines.append((owner, "readpecoff", ";".join(fields)))
+ proc.wait()
+
+ if plibs:
+ # Preserved libraries that did not appear in the scanelf output.
+ # This is known to happen with statically linked libraries.
+ # Generate dummy lines for these, so we can assume that every
+ # preserved library has an entry in self._obj_properties. This
+ # is important in order to prevent findConsumers from raising
+ # an unwanted KeyError.
+ for x, cpv in plibs.items():
+ lines.append((cpv, "plibs", ";".join(['', x, '', '', ''])))
+
+ # Share identical frozenset instances when available,
+ # in order to conserve memory.
+ frozensets = {}
+
+ for owner, location, l in lines:
+ l = l.rstrip("\n")
+ if not l:
+ continue
+ fields = l.split(";")
+ if len(fields) < 5:
+ writemsg_level(_("\nWrong number of fields " \
+ "in %s: %s\n\n") % (location, l),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ arch = fields[0]
+ obj = fields[1]
+ soname = fields[2]
+ path = frozenset(normalize_path(x) \
+ for x in filter(None, fields[3].replace(
+ "${ORIGIN}", os.path.dirname(obj)).replace(
+ "$ORIGIN", os.path.dirname(obj)).split(":")))
+ path = frozensets.setdefault(path, path)
+ needed = frozenset(x for x in fields[4].split(",") if x)
+ needed = frozensets.setdefault(needed, needed)
+
+ obj_key = self._obj_key(obj)
+ indexed = True
+ myprops = obj_properties.get(obj_key)
+ if myprops is None:
+ indexed = False
+ myprops = self._obj_properties_class(
+ arch, needed, path, soname, [], owner)
+ obj_properties[obj_key] = myprops
+ # All object paths are added into the obj_properties tuple.
+ myprops.alt_paths.append(obj)
+
+ # Don't index the same file more that once since only one
+ # set of data can be correct and therefore mixing data
+ # may corrupt the index (include_file overrides previously
+ # installed).
+ if indexed:
+ continue
+
+ arch_map = libs.get(arch)
+ if arch_map is None:
+ arch_map = {}
+ libs[arch] = arch_map
+ if soname:
+ soname_map = arch_map.get(soname)
+ if soname_map is None:
+ soname_map = self._soname_map_class(
+ providers=[], consumers=[])
+ arch_map[soname] = soname_map
+ soname_map.providers.append(obj_key)
+ for needed_soname in needed:
+ soname_map = arch_map.get(needed_soname)
+ if soname_map is None:
+ soname_map = self._soname_map_class(
+ providers=[], consumers=[])
+ arch_map[needed_soname] = soname_map
+ soname_map.consumers.append(obj_key)
+
+ for arch, sonames in libs.items():
+ for soname_node in sonames.values():
+ soname_node.providers = tuple(set(soname_node.providers))
+ soname_node.consumers = tuple(set(soname_node.consumers))
diff --git a/usr/lib/portage/pym/portage/util/_dyn_libs/LinkageMapXCoff.py b/usr/lib/portage/pym/portage/util/_dyn_libs/LinkageMapXCoff.py
new file mode 100644
index 0000000..6c4c994
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_dyn_libs/LinkageMapXCoff.py
@@ -0,0 +1,312 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import subprocess
+
+import portage
+from portage import _encodings
+from portage import _os_merge
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.exception import CommandNotFound
+from portage.localization import _
+from portage.util import getlibpaths
+from portage.util import grabfile
+from portage.util import normalize_path
+from portage.util import writemsg_level
+from portage.const import EPREFIX, BASH_BINARY
+from portage.util._dyn_libs.LinkageMapELF import LinkageMapELF
+
+class LinkageMapXCoff(LinkageMapELF):
+
+ """Models dynamic linker dependencies."""
+
+ _needed_aux_key = "NEEDED.XCOFF.1"
+
+ class _ObjectKey(LinkageMapELF._ObjectKey):
+
+ def __init__(self, obj, root):
+ LinkageMapELF._ObjectKey.__init__(self, obj, root)
+
+ def _generate_object_key(self, obj, root):
+ """
+ Generate object key for a given object.
+
+ @param object: path to a file
+ @type object: string (example: '/usr/bin/bar')
+ @rtype: 2-tuple of types (long, int) if object exists. string if
+ object does not exist.
+ @return:
+ 1. 2-tuple of object's inode and device from a stat call, if object
+ exists.
+ 2. realpath of object if object does not exist.
+
+ """
+
+ os = _os_merge
+
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ abs_path = os.path.join(root, obj.lstrip(os.sep))
+ try:
+ object_stat = os.stat(abs_path)
+ except OSError:
+ # Use the realpath as the key if the file does not exists on the
+ # filesystem.
+ return os.path.realpath(abs_path)
+ # Return a tuple of the device and inode, as well as the basename,
+ # because of hardlinks (notably for the .libNAME[shr.o] helpers)
+ # the device and inode might be identical.
+ return (object_stat.st_dev, object_stat.st_ino, os.path.basename(abs_path.rstrip(os.sep)))
+
+ class _LibGraphNode(_ObjectKey):
+ __slots__ = ("alt_paths",)
+
+ def __init__(self, key):
+ """
+ Create a _LibGraphNode from an existing _ObjectKey.
+ This re-uses the _key attribute in order to avoid repeating
+ any previous stat calls, which helps to avoid potential race
+ conditions due to inconsistent stat results when the
+ file system is being modified concurrently.
+ """
+ self._key = key._key
+ self.alt_paths = set()
+
+ def __str__(self):
+ return str(sorted(self.alt_paths))
+
+ def rebuild(self, exclude_pkgs=None, include_file=None,
+ preserve_paths=None):
+ """
+ Raises CommandNotFound if there are preserved libs
+ and the scanelf binary is not available.
+
+ @param exclude_pkgs: A set of packages that should be excluded from
+ the LinkageMap, since they are being unmerged and their NEEDED
+ entries are therefore irrelevant and would only serve to corrupt
+ the LinkageMap.
+ @type exclude_pkgs: set
+ @param include_file: The path of a file containing NEEDED entries for
+ a package which does not exist in the vardbapi yet because it is
+ currently being merged.
+ @type include_file: String
+ @param preserve_paths: Libraries preserved by a package instance that
+ is currently being merged. They need to be explicitly passed to the
+ LinkageMap, since they are not registered in the
+ PreservedLibsRegistry yet.
+ @type preserve_paths: set
+ """
+
+ os = _os_merge
+ root = self._root
+ root_len = len(root) - 1
+ self._clear_cache()
+ self._defpath.update(getlibpaths(self._root, env=self._dbapi.settings))
+ libs = self._libs
+ obj_properties = self._obj_properties
+
+ lines = []
+
+ # Data from include_file is processed first so that it
+ # overrides any data from previously installed files.
+ if include_file is not None:
+ for line in grabfile(include_file):
+ lines.append((None, include_file, line))
+
+ aux_keys = [self._needed_aux_key]
+ can_lock = os.access(os.path.dirname(self._dbapi._dbroot), os.W_OK)
+ if can_lock:
+ self._dbapi.lock()
+ try:
+ for cpv in self._dbapi.cpv_all():
+ if exclude_pkgs is not None and cpv in exclude_pkgs:
+ continue
+ needed_file = self._dbapi.getpath(cpv,
+ filename=self._needed_aux_key)
+ for line in self._dbapi.aux_get(cpv, aux_keys)[0].splitlines():
+ lines.append((cpv, needed_file, line))
+ finally:
+ if can_lock:
+ self._dbapi.unlock()
+
+ # have to call scanelf for preserved libs here as they aren't
+ # registered in NEEDED.XCOFF.1 files
+ plibs = {}
+ if preserve_paths is not None:
+ plibs.update((x, None) for x in preserve_paths)
+ if self._dbapi._plib_registry and \
+ self._dbapi._plib_registry.hasEntries():
+ for cpv, items in \
+ self._dbapi._plib_registry.getPreservedLibs().items():
+ if exclude_pkgs is not None and cpv in exclude_pkgs:
+ # These preserved libs will either be unmerged,
+ # rendering them irrelevant, or they will be
+ # preserved in the replacement package and are
+ # already represented via the preserve_paths
+ # parameter.
+ continue
+ plibs.update((x, cpv) for x in items)
+ if plibs:
+ args = [BASH_BINARY , "-c" , ':'
+ + '; for member in "$@"'
+ + '; do archive=${member}'
+ + '; if [[ ${member##*/} == .*"["*"]" ]]'
+ + '; then member=${member%/.*}/${member##*/.}'
+ + '; archive=${member%[*}'
+ + '; fi'
+ + '; member=${member#${archive}}'
+ + '; [[ -r ${archive} ]] || chmod a+r "${archive}"'
+ + '; eval $(aixdll-query "${archive}${member}" FILE MEMBER FLAGS FORMAT RUNPATH DEPLIBS)'
+ + '; [[ -n ${member} ]] && needed=${FILE##*/} || needed='
+ + '; for deplib in ${DEPLIBS}'
+ + '; do eval deplib=${deplib}'
+ + '; if [[ ${deplib} != "." && ${deplib} != ".." ]]'
+ + '; then needed="${needed}${needed:+,}${deplib}"'
+ + '; fi'
+ + '; done'
+ + '; [[ -n ${MEMBER} ]] && MEMBER="[${MEMBER}]"'
+ + '; [[ " ${FLAGS} " == *" SHROBJ "* ]] && soname=${FILE##*/}${MEMBER} || soname='
+ + '; case ${member:+y}:${MEMBER:+y}'
+ # member requested, member found: show shared archive member
+ + ' in y:y) echo "${FORMAT##* }${FORMAT%%-*};${FILE#${ROOT%/}}${MEMBER};${soname};${RUNPATH};${needed}"'
+ # no member requested, member found: show archive
+ + ' ;; :y) echo "${FORMAT##* }${FORMAT%%-*};${FILE#${ROOT%/}};${FILE##*/};;"'
+ # no member requested, no member found: show standalone shared object
+ + ' ;; : ) echo "${FORMAT##* }${FORMAT%%-*};${FILE#${ROOT%/}};${FILE##*/};${RUNPATH};${needed}"'
+ # member requested, no member found: ignore archive replaced by standalone shared object
+ + ' ;; y: )'
+ + ' ;; esac'
+ + '; done'
+ , 'aixdll-query'
+ ]
+ args.extend(os.path.join(root, x.lstrip("." + os.sep)) \
+ for x in plibs)
+ try:
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ raise CommandNotFound(args[0])
+ else:
+ for l in proc.stdout:
+ try:
+ l = _unicode_decode(l,
+ encoding=_encodings['content'], errors='strict')
+ except UnicodeDecodeError:
+ l = _unicode_decode(l,
+ encoding=_encodings['content'], errors='replace')
+ writemsg_level(_("\nError decoding characters " \
+ "returned from aixdll-query: %s\n\n") % (l,),
+ level=logging.ERROR, noiselevel=-1)
+ l = l.rstrip("\n")
+ if not l:
+ continue
+ fields = l.split(";")
+ if len(fields) < 5:
+ writemsg_level(_("\nWrong number of fields " \
+ "returned from aixdll-query: %s\n\n") % (l,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ fields[1] = fields[1][root_len:]
+ owner = plibs.pop(fields[1], None)
+ lines.append((owner, "aixdll-query", ";".join(fields)))
+ proc.wait()
+ proc.stdout.close()
+
+ # Share identical frozenset instances when available,
+ # in order to conserve memory.
+ frozensets = {}
+
+ for owner, location, l in lines:
+ l = l.rstrip("\n")
+ if not l:
+ continue
+ fields = l.split(";")
+ if len(fields) < 5:
+ writemsg_level(_("\nWrong number of fields " \
+ "in %s: %s\n\n") % (location, l),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ arch = fields[0]
+
+ def as_contentmember(obj):
+ if obj.endswith("]"):
+ if obj.find("/") >= 0:
+ if obj[obj.rfind("/")+1] == ".":
+ return obj
+ return obj[:obj.rfind("/")] + "/." + obj[obj.rfind("/")+1:]
+ if obj[0] == ".":
+ return obj
+ return "." + obj
+ return obj
+
+ obj = as_contentmember(fields[1])
+ soname = as_contentmember(fields[2])
+ path = frozenset(normalize_path(x) \
+ for x in filter(None, fields[3].replace(
+ "${ORIGIN}", os.path.dirname(obj)).replace(
+ "$ORIGIN", os.path.dirname(obj)).split(":")))
+ path = frozensets.setdefault(path, path)
+ needed = frozenset(as_contentmember(x) for x in fields[4].split(",") if x)
+ needed = frozensets.setdefault(needed, needed)
+
+ obj_key = self._obj_key(obj)
+ indexed = True
+ myprops = obj_properties.get(obj_key)
+ if myprops is None:
+ indexed = False
+ myprops = self._obj_properties_class(
+ arch, needed, path, soname, [], owner)
+ obj_properties[obj_key] = myprops
+ # All object paths are added into the obj_properties tuple.
+ myprops.alt_paths.append(obj)
+
+ # Don't index the same file more that once since only one
+ # set of data can be correct and therefore mixing data
+ # may corrupt the index (include_file overrides previously
+ # installed).
+ if indexed:
+ continue
+
+ arch_map = libs.get(arch)
+ if arch_map is None:
+ arch_map = {}
+ libs[arch] = arch_map
+ if soname:
+ soname_map = arch_map.get(soname)
+ if soname_map is None:
+ soname_map = self._soname_map_class(
+ providers=[], consumers=[])
+ arch_map[soname] = soname_map
+ soname_map.providers.append(obj_key)
+ for needed_soname in needed:
+ soname_map = arch_map.get(needed_soname)
+ if soname_map is None:
+ soname_map = self._soname_map_class(
+ providers=[], consumers=[])
+ arch_map[needed_soname] = soname_map
+ soname_map.consumers.append(obj_key)
+
+ for arch, sonames in libs.items():
+ for soname_node in sonames.values():
+ soname_node.providers = tuple(set(soname_node.providers))
+ soname_node.consumers = tuple(set(soname_node.consumers))
+
+ pass
diff --git a/usr/lib/portage/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py b/usr/lib/portage/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py
new file mode 100644
index 0000000..a422ffe
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py
@@ -0,0 +1,254 @@
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import json
+import logging
+import stat
+import sys
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+from portage import os
+from portage import _encodings
+from portage import _os_merge
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.exception import PermissionDenied
+from portage.localization import _
+from portage.util import atomic_ofstream
+from portage.util import writemsg_level
+from portage.versions import cpv_getkey
+from portage.locks import lockfile, unlockfile
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+class PreservedLibsRegistry(object):
+ """ This class handles the tracking of preserved library objects """
+
+ # JSON read support has been available since portage-2.2.0_alpha89.
+ _json_write = True
+
+ _json_write_opts = {
+ "ensure_ascii": False,
+ "indent": "\t",
+ "sort_keys": True
+ }
+ if sys.hexversion < 0x30200F0:
+ # indent only supports int number of spaces
+ _json_write_opts["indent"] = 4
+
+ def __init__(self, root, filename):
+ """
+ @param root: root used to check existence of paths in pruneNonExisting
+ @type root: String
+ @param filename: absolute path for saving the preserved libs records
+ @type filename: String
+ """
+ self._root = root
+ self._filename = filename
+ self._data = None
+ self._lock = None
+
+ def lock(self):
+ """Grab an exclusive lock on the preserved libs registry."""
+ if self._lock is not None:
+ raise AssertionError("already locked")
+ self._lock = lockfile(self._filename)
+
+ def unlock(self):
+ """Release our exclusive lock on the preserved libs registry."""
+ if self._lock is None:
+ raise AssertionError("not locked")
+ unlockfile(self._lock)
+ self._lock = None
+
+ def load(self):
+ """ Reload the registry data from file """
+ self._data = None
+ f = None
+ content = None
+ try:
+ f = open(_unicode_encode(self._filename,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ content = f.read()
+ except EnvironmentError as e:
+ if not hasattr(e, 'errno'):
+ raise
+ elif e.errno == errno.ENOENT:
+ pass
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(self._filename)
+ else:
+ raise
+ finally:
+ if f is not None:
+ f.close()
+
+ # content is empty if it's an empty lock file
+ if content:
+ try:
+ self._data = json.loads(_unicode_decode(content,
+ encoding=_encodings['repo.content'], errors='strict'))
+ except SystemExit:
+ raise
+ except Exception as e:
+ try:
+ self._data = pickle.loads(content)
+ except SystemExit:
+ raise
+ except Exception:
+ writemsg_level(_("!!! Error loading '%s': %s\n") %
+ (self._filename, e), level=logging.ERROR,
+ noiselevel=-1)
+
+ if self._data is None:
+ self._data = {}
+ else:
+ for k, v in self._data.items():
+ if isinstance(v, (list, tuple)) and len(v) == 3 and \
+ isinstance(v[2], set):
+ # convert set to list, for write with JSONEncoder
+ self._data[k] = (v[0], v[1], list(v[2]))
+
+ self._data_orig = self._data.copy()
+ self.pruneNonExisting()
+
+ def store(self):
+ """
+ Store the registry data to the file. The existing inode will be
+ replaced atomically, so if that inode is currently being used
+ for a lock then that lock will be rendered useless. Therefore,
+ it is important not to call this method until the current lock
+ is ready to be immediately released.
+ """
+ if os.environ.get("SANDBOX_ON") == "1" or \
+ self._data == self._data_orig:
+ return
+ try:
+ f = atomic_ofstream(self._filename, 'wb')
+ if self._json_write:
+ f.write(_unicode_encode(
+ json.dumps(self._data, **self._json_write_opts),
+ encoding=_encodings['repo.content'], errors='strict'))
+ else:
+ pickle.dump(self._data, f, protocol=2)
+ f.close()
+ except EnvironmentError as e:
+ if e.errno != PermissionDenied.errno:
+ writemsg_level("!!! %s %s\n" % (e, self._filename),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ self._data_orig = self._data.copy()
+
+ def _normalize_counter(self, counter):
+ """
+ For simplicity, normalize as a unicode string
+ and strip whitespace. This avoids the need for
+ int conversion and a possible ValueError resulting
+ from vardb corruption.
+ """
+ if not isinstance(counter, basestring):
+ counter = str(counter)
+ return _unicode_decode(counter).strip()
+
+ def register(self, cpv, slot, counter, paths):
+ """ Register new objects in the registry. If there is a record with the
+ same packagename (internally derived from cpv) and slot it is
+ overwritten with the new data.
+ @param cpv: package instance that owns the objects
+ @type cpv: CPV (as String)
+ @param slot: the value of SLOT of the given package instance
+ @type slot: String
+ @param counter: vdb counter value for the package instance
+ @type counter: String
+ @param paths: absolute paths of objects that got preserved during an update
+ @type paths: List
+ """
+ cp = cpv_getkey(cpv)
+ cps = cp+":"+slot
+ counter = self._normalize_counter(counter)
+ if len(paths) == 0 and cps in self._data \
+ and self._data[cps][0] == cpv and \
+ self._normalize_counter(self._data[cps][1]) == counter:
+ del self._data[cps]
+ elif len(paths) > 0:
+ if isinstance(paths, set):
+ # convert set to list, for write with JSONEncoder
+ paths = list(paths)
+ self._data[cps] = (cpv, counter, paths)
+
+ def unregister(self, cpv, slot, counter):
+ """ Remove a previous registration of preserved objects for the given package.
+ @param cpv: package instance whose records should be removed
+ @type cpv: CPV (as String)
+ @param slot: the value of SLOT of the given package instance
+ @type slot: String
+ """
+ self.register(cpv, slot, counter, [])
+
+ def pruneNonExisting(self):
+ """ Remove all records for objects that no longer exist on the filesystem. """
+
+ os = _os_merge
+
+ for cps in list(self._data):
+ cpv, counter, _paths = self._data[cps]
+
+ paths = []
+ hardlinks = set()
+ symlinks = {}
+ for f in _paths:
+ f_abs = os.path.join(self._root, f.lstrip(os.sep))
+ try:
+ lst = os.lstat(f_abs)
+ except OSError:
+ continue
+ if stat.S_ISLNK(lst.st_mode):
+ try:
+ symlinks[f] = os.readlink(f_abs)
+ except OSError:
+ continue
+ elif stat.S_ISREG(lst.st_mode):
+ hardlinks.add(f)
+ paths.append(f)
+
+ # Only count symlinks as preserved if they still point to a hardink
+ # in the same directory, in order to handle cases where a tool such
+ # as eselect-opengl has updated the symlink to point to a hardlink
+ # in a different directory (see bug #406837). The unused hardlink
+ # is automatically found by _find_unused_preserved_libs, since the
+ # soname symlink no longer points to it. After the hardlink is
+ # removed by _remove_preserved_libs, it calls pruneNonExisting
+ # which eliminates the irrelevant symlink from the registry here.
+ for f, target in symlinks.items():
+ if os.path.join(os.path.dirname(f), target) in hardlinks:
+ paths.append(f)
+
+ if len(paths) > 0:
+ self._data[cps] = (cpv, counter, paths)
+ else:
+ del self._data[cps]
+
+ def hasEntries(self):
+ """ Check if this registry contains any records. """
+ if self._data is None:
+ self.load()
+ return len(self._data) > 0
+
+ def getPreservedLibs(self):
+ """ Return a mapping of packages->preserved objects.
+ @return mapping of package instances to preserved objects
+ @rtype Dict cpv->list-of-paths
+ """
+ if self._data is None:
+ self.load()
+ rValue = {}
+ for cps in self._data:
+ rValue[self._data[cps][0]] = self._data[cps][2]
+ return rValue
diff --git a/usr/lib/portage/pym/portage/util/_dyn_libs/__init__.py b/usr/lib/portage/pym/portage/util/_dyn_libs/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_dyn_libs/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/util/_dyn_libs/display_preserved_libs.py b/usr/lib/portage/pym/portage/util/_dyn_libs/display_preserved_libs.py
new file mode 100644
index 0000000..b16478d
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_dyn_libs/display_preserved_libs.py
@@ -0,0 +1,98 @@
+# Copyright 2007-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+
+import portage
+from portage.output import colorize
+
+def display_preserved_libs(vardb):
+
+ MAX_DISPLAY = 3
+
+ plibdata = vardb._plib_registry.getPreservedLibs()
+ linkmap = vardb._linkmap
+ consumer_map = {}
+ owners = {}
+
+ try:
+ linkmap.rebuild()
+ except portage.exception.CommandNotFound as e:
+ portage.util.writemsg_level("!!! Command Not Found: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ search_for_owners = set()
+ for cpv in plibdata:
+ internal_plib_keys = set(linkmap._obj_key(f) \
+ for f in plibdata[cpv])
+ for f in plibdata[cpv]:
+ if f in consumer_map:
+ continue
+ consumers = []
+ for c in linkmap.findConsumers(f, greedy=False):
+ # Filter out any consumers that are also preserved libs
+ # belonging to the same package as the provider.
+ if linkmap._obj_key(c) not in internal_plib_keys:
+ consumers.append(c)
+ consumers.sort()
+ consumer_map[f] = consumers
+ search_for_owners.update(consumers[:MAX_DISPLAY+1])
+
+ owners = {}
+ for f in search_for_owners:
+ owner_set = set()
+ for owner in linkmap.getOwners(f):
+ owner_dblink = vardb._dblink(owner)
+ if owner_dblink.exists():
+ owner_set.add(owner_dblink)
+ if owner_set:
+ owners[f] = owner_set
+
+ all_preserved = set()
+ all_preserved.update(*plibdata.values())
+
+ for cpv in plibdata:
+ print(colorize("WARN", ">>>") + " package: %s" % cpv)
+ samefile_map = {}
+ for f in plibdata[cpv]:
+ obj_key = linkmap._obj_key(f)
+ alt_paths = samefile_map.get(obj_key)
+ if alt_paths is None:
+ alt_paths = set()
+ samefile_map[obj_key] = alt_paths
+ alt_paths.add(f)
+
+ for alt_paths in samefile_map.values():
+ alt_paths = sorted(alt_paths)
+ for p in alt_paths:
+ print(colorize("WARN", " * ") + " - %s" % (p,))
+ f = alt_paths[0]
+ consumers = consumer_map.get(f, [])
+ consumers_non_preserved = [c for c in consumers
+ if c not in all_preserved]
+ if consumers_non_preserved:
+ # Filter the consumers that are preserved libraries, since
+ # they don't need to be rebuilt (see bug #461908).
+ consumers = consumers_non_preserved
+
+ if len(consumers) == MAX_DISPLAY + 1:
+ # Display 1 extra consumer, instead of displaying
+ # "used by 1 other files".
+ max_display = MAX_DISPLAY + 1
+ else:
+ max_display = MAX_DISPLAY
+ for c in consumers[:max_display]:
+ if c in all_preserved:
+ # The owner is displayed elsewhere due to having
+ # its libs preserved, so distinguish this special
+ # case (see bug #461908).
+ owners_desc = "preserved"
+ else:
+ owners_desc = ", ".join(x.mycpv for x in owners.get(c, []))
+ print(colorize("WARN", " * ") + " used by %s (%s)" % \
+ (c, owners_desc))
+ if len(consumers) > max_display:
+ print(colorize("WARN", " * ") + " used by %d other files" %
+ (len(consumers) - max_display))
diff --git a/usr/lib/portage/pym/portage/util/_eventloop/EventLoop.py b/usr/lib/portage/pym/portage/util/_eventloop/EventLoop.py
new file mode 100644
index 0000000..8095400
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_eventloop/EventLoop.py
@@ -0,0 +1,664 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division
+
+import errno
+import logging
+import os
+import select
+import signal
+import sys
+import time
+
+try:
+ import fcntl
+except ImportError:
+ # http://bugs.jython.org/issue1074
+ fcntl = None
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+from portage.util import writemsg_level
+from ..SlotObject import SlotObject
+from .PollConstants import PollConstants
+from .PollSelectAdapter import PollSelectAdapter
+
+class EventLoop(object):
+ """
+ An event loop, intended to be compatible with the GLib event loop.
+ Call the iteration method in order to execute one iteration of the
+ loop. The idle_add and timeout_add methods serve as thread-safe
+ means to interact with the loop's thread.
+ """
+
+ supports_multiprocessing = True
+
+ # TODO: Find out why SIGCHLD signals aren't delivered during poll
+ # calls, forcing us to wakeup in order to receive them.
+ _sigchld_interval = 250
+
+ class _child_callback_class(SlotObject):
+ __slots__ = ("callback", "data", "pid", "source_id")
+
+ class _idle_callback_class(SlotObject):
+ __slots__ = ("args", "callback", "calling", "source_id")
+
+ class _io_handler_class(SlotObject):
+ __slots__ = ("args", "callback", "f", "source_id")
+
+ class _timeout_handler_class(SlotObject):
+ __slots__ = ("args", "function", "calling", "interval", "source_id",
+ "timestamp")
+
+ def __init__(self, main=True):
+ """
+ @param main: If True then this is a singleton instance for use
+ in the main thread, otherwise it is a local instance which
+ can safely be use in a non-main thread (default is True, so
+ that global_event_loop does not need constructor arguments)
+ @type main: bool
+ """
+ self._use_signal = main and fcntl is not None
+ self._thread_rlock = threading.RLock()
+ self._thread_condition = threading.Condition(self._thread_rlock)
+ self._poll_event_queue = []
+ self._poll_event_handlers = {}
+ self._poll_event_handler_ids = {}
+ # Increment id for each new handler.
+ self._event_handler_id = 0
+ self._idle_callbacks = {}
+ self._timeout_handlers = {}
+ self._timeout_interval = None
+
+ self._poll_obj = None
+ try:
+ select.epoll
+ except AttributeError:
+ pass
+ else:
+ try:
+ epoll_obj = select.epoll()
+ except IOError:
+ # This happens with Linux 2.4 kernels:
+ # IOError: [Errno 38] Function not implemented
+ pass
+ else:
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(epoll_obj.fileno(), fcntl.F_SETFD,
+ fcntl.fcntl(epoll_obj.fileno(),
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._poll_obj = _epoll_adapter(epoll_obj)
+ self.IO_ERR = select.EPOLLERR
+ self.IO_HUP = select.EPOLLHUP
+ self.IO_IN = select.EPOLLIN
+ self.IO_NVAL = 0
+ self.IO_OUT = select.EPOLLOUT
+ self.IO_PRI = select.EPOLLPRI
+
+ if self._poll_obj is None:
+ self._poll_obj = create_poll_instance()
+ self.IO_ERR = PollConstants.POLLERR
+ self.IO_HUP = PollConstants.POLLHUP
+ self.IO_IN = PollConstants.POLLIN
+ self.IO_NVAL = PollConstants.POLLNVAL
+ self.IO_OUT = PollConstants.POLLOUT
+ self.IO_PRI = PollConstants.POLLPRI
+
+ self._child_handlers = {}
+ self._sigchld_read = None
+ self._sigchld_write = None
+ self._sigchld_src_id = None
+ self._pid = os.getpid()
+
+ def _new_source_id(self):
+ """
+ Generate a new source id. This method is thread-safe.
+ """
+ with self._thread_rlock:
+ self._event_handler_id += 1
+ return self._event_handler_id
+
+ def _poll(self, timeout=None):
+ """
+ All poll() calls pass through here. The poll events
+ are added directly to self._poll_event_queue.
+ In order to avoid endless blocking, this raises
+ StopIteration if timeout is None and there are
+ no file descriptors to poll.
+ """
+
+ if timeout is None and \
+ not self._poll_event_handlers:
+ raise StopIteration(
+ "timeout is None and there are no poll() event handlers")
+
+ while True:
+ try:
+ self._poll_event_queue.extend(self._poll_obj.poll(timeout))
+ break
+ except (IOError, select.error) as e:
+ # Silently handle EINTR, which is normal when we have
+ # received a signal such as SIGINT (epoll objects may
+ # raise IOError rather than select.error, at least in
+ # Python 3.2).
+ if not (e.args and e.args[0] == errno.EINTR):
+ writemsg_level("\n!!! select error: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ del e
+
+ # This typically means that we've received a SIGINT, so
+ # raise StopIteration in order to break out of our current
+ # iteration and respond appropriately to the signal as soon
+ # as possible.
+ raise StopIteration("interrupted")
+
+ def iteration(self, *args):
+ """
+ Like glib.MainContext.iteration(), runs a single iteration. In order
+ to avoid blocking forever when may_block is True (the default),
+ callers must be careful to ensure that at least one of the following
+ conditions is met:
+ 1) An event source or timeout is registered which is guaranteed
+ to trigger at least on event (a call to an idle function
+ only counts as an event if it returns a False value which
+ causes it to stop being called)
+ 2) Another thread is guaranteed to call one of the thread-safe
+ methods which notify iteration to stop waiting (such as
+ idle_add or timeout_add).
+ These rules ensure that iteration is able to block until an event
+ arrives, without doing any busy waiting that would waste CPU time.
+ @type may_block: bool
+ @param may_block: if True the call may block waiting for an event
+ (default is True).
+ @rtype: bool
+ @return: True if events were dispatched.
+ """
+
+ may_block = True
+
+ if args:
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 argument (%s given)" % len(args))
+ may_block = args[0]
+
+ event_queue = self._poll_event_queue
+ event_handlers = self._poll_event_handlers
+ events_handled = 0
+ timeouts_checked = False
+
+ if not event_handlers:
+ with self._thread_condition:
+ if self._run_timeouts():
+ events_handled += 1
+ timeouts_checked = True
+ if not event_handlers and not events_handled and may_block:
+ # Block so that we don't waste cpu time by looping too
+ # quickly. This makes EventLoop useful for code that needs
+ # to wait for timeout callbacks regardless of whether or
+ # not any IO handlers are currently registered.
+ timeout = self._get_poll_timeout()
+ if timeout is None:
+ wait_timeout = None
+ else:
+ wait_timeout = timeout / 1000
+ # NOTE: In order to avoid a possible infinite wait when
+ # wait_timeout is None, the previous _run_timeouts()
+ # call must have returned False *with* _thread_condition
+ # acquired. Otherwise, we would risk going to sleep after
+ # our only notify event has already passed.
+ self._thread_condition.wait(wait_timeout)
+ if self._run_timeouts():
+ events_handled += 1
+ timeouts_checked = True
+
+ # If any timeouts have executed, then return immediately,
+ # in order to minimize latency in termination of iteration
+ # loops that they may control.
+ if events_handled or not event_handlers:
+ return bool(events_handled)
+
+ if not event_queue:
+
+ if may_block:
+ timeout = self._get_poll_timeout()
+
+ # Avoid blocking for IO if there are any timeout
+ # or idle callbacks available to process.
+ if timeout != 0 and not timeouts_checked:
+ if self._run_timeouts():
+ events_handled += 1
+ timeouts_checked = True
+ if events_handled:
+ # Minimize latency for loops controlled
+ # by timeout or idle callback events.
+ timeout = 0
+ else:
+ timeout = 0
+
+ try:
+ self._poll(timeout=timeout)
+ except StopIteration:
+ # This can be triggered by EINTR which is caused by signals.
+ pass
+
+ # NOTE: IO event handlers may be re-entrant, in case something
+ # like AbstractPollTask._wait_loop() needs to be called inside
+ # a handler for some reason.
+ while event_queue:
+ events_handled += 1
+ f, event = event_queue.pop()
+ try:
+ x = event_handlers[f]
+ except KeyError:
+ # This is known to be triggered by the epoll
+ # implementation in qemu-user-1.2.2, and appears
+ # to be harmless (see bug #451326).
+ continue
+ if not x.callback(f, event, *x.args):
+ self.source_remove(x.source_id)
+
+ if not timeouts_checked:
+ if self._run_timeouts():
+ events_handled += 1
+ timeouts_checked = True
+
+ return bool(events_handled)
+
+ def _get_poll_timeout(self):
+
+ with self._thread_rlock:
+ if self._child_handlers:
+ if self._timeout_interval is None:
+ timeout = self._sigchld_interval
+ else:
+ timeout = min(self._sigchld_interval,
+ self._timeout_interval)
+ else:
+ timeout = self._timeout_interval
+
+ return timeout
+
+ def child_watch_add(self, pid, callback, data=None):
+ """
+ Like glib.child_watch_add(), sets callback to be called with the
+ user data specified by data when the child indicated by pid exits.
+ The signature for the callback is:
+
+ def callback(pid, condition, user_data)
+
+ where pid is is the child process id, condition is the status
+ information about the child process and user_data is data.
+
+ @type int
+ @param pid: process id of a child process to watch
+ @type callback: callable
+ @param callback: a function to call
+ @type data: object
+ @param data: the optional data to pass to function
+ @rtype: int
+ @return: an integer ID
+ """
+ source_id = self._new_source_id()
+ self._child_handlers[source_id] = self._child_callback_class(
+ callback=callback, data=data, pid=pid, source_id=source_id)
+
+ if self._use_signal:
+ if self._sigchld_read is None:
+ self._sigchld_read, self._sigchld_write = os.pipe()
+
+ fcntl.fcntl(self._sigchld_read, fcntl.F_SETFL,
+ fcntl.fcntl(self._sigchld_read,
+ fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(self._sigchld_read, fcntl.F_SETFD,
+ fcntl.fcntl(self._sigchld_read,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ # The IO watch is dynamically registered and unregistered as
+ # needed, since we don't want to consider it as a valid source
+ # of events when there are no child listeners. It's important
+ # to distinguish when there are no valid sources of IO events,
+ # in order to avoid an endless poll call if there's no timeout.
+ if self._sigchld_src_id is None:
+ self._sigchld_src_id = self.io_add_watch(
+ self._sigchld_read, self.IO_IN, self._sigchld_io_cb)
+ signal.signal(signal.SIGCHLD, self._sigchld_sig_cb)
+
+ # poll now, in case the SIGCHLD has already arrived
+ self._poll_child_processes()
+ return source_id
+
+ def _sigchld_sig_cb(self, signum, frame):
+ # If this signal handler was not installed by the
+ # current process then the signal doesn't belong to
+ # this EventLoop instance.
+ if os.getpid() == self._pid:
+ os.write(self._sigchld_write, b'\0')
+
+ def _sigchld_io_cb(self, fd, events):
+ try:
+ while True:
+ os.read(self._sigchld_read, 4096)
+ except OSError:
+ # read until EAGAIN
+ pass
+ self._poll_child_processes()
+ return True
+
+ def _poll_child_processes(self):
+ if not self._child_handlers:
+ return False
+
+ calls = 0
+
+ for x in list(self._child_handlers.values()):
+ if x.source_id not in self._child_handlers:
+ # it's already been called via re-entrance
+ continue
+ try:
+ wait_retval = os.waitpid(x.pid, os.WNOHANG)
+ except OSError as e:
+ if e.errno != errno.ECHILD:
+ raise
+ del e
+ self.source_remove(x.source_id)
+ else:
+ # With waitpid and WNOHANG, only check the
+ # first element of the tuple since the second
+ # element may vary (bug #337465).
+ if wait_retval[0] != 0:
+ calls += 1
+ self.source_remove(x.source_id)
+ x.callback(x.pid, wait_retval[1], x.data)
+
+ return bool(calls)
+
+ def idle_add(self, callback, *args):
+ """
+ Like glib.idle_add(), if callback returns False it is
+ automatically removed from the list of event sources and will
+ not be called again. This method is thread-safe.
+
+ @type callback: callable
+ @param callback: a function to call
+ @rtype: int
+ @return: an integer ID
+ """
+ with self._thread_condition:
+ source_id = self._new_source_id()
+ self._idle_callbacks[source_id] = self._idle_callback_class(
+ args=args, callback=callback, source_id=source_id)
+ self._thread_condition.notify()
+ return source_id
+
+ def _run_idle_callbacks(self):
+ # assumes caller has acquired self._thread_rlock
+ if not self._idle_callbacks:
+ return False
+ state_change = 0
+ # Iterate of our local list, since self._idle_callbacks can be
+ # modified during the exection of these callbacks.
+ for x in list(self._idle_callbacks.values()):
+ if x.source_id not in self._idle_callbacks:
+ # it got cancelled while executing another callback
+ continue
+ if x.calling:
+ # don't call it recursively
+ continue
+ x.calling = True
+ try:
+ if not x.callback(*x.args):
+ state_change += 1
+ self.source_remove(x.source_id)
+ finally:
+ x.calling = False
+
+ return bool(state_change)
+
+ def timeout_add(self, interval, function, *args):
+ """
+ Like glib.timeout_add(), interval argument is the number of
+ milliseconds between calls to your function, and your function
+ should return False to stop being called, or True to continue
+ being called. Any additional positional arguments given here
+ are passed to your function when it's called. This method is
+ thread-safe.
+ """
+ with self._thread_condition:
+ source_id = self._new_source_id()
+ self._timeout_handlers[source_id] = \
+ self._timeout_handler_class(
+ interval=interval, function=function, args=args,
+ source_id=source_id, timestamp=time.time())
+ if self._timeout_interval is None or \
+ self._timeout_interval > interval:
+ self._timeout_interval = interval
+ self._thread_condition.notify()
+ return source_id
+
+ def _run_timeouts(self):
+
+ calls = 0
+ if not self._use_signal:
+ if self._poll_child_processes():
+ calls += 1
+
+ with self._thread_rlock:
+
+ if self._run_idle_callbacks():
+ calls += 1
+
+ if not self._timeout_handlers:
+ return bool(calls)
+
+ ready_timeouts = []
+ current_time = time.time()
+ for x in self._timeout_handlers.values():
+ elapsed_seconds = current_time - x.timestamp
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds < 0 or \
+ (x.interval - 1000 * elapsed_seconds) <= 0:
+ ready_timeouts.append(x)
+
+ # Iterate of our local list, since self._timeout_handlers can be
+ # modified during the exection of these callbacks.
+ for x in ready_timeouts:
+ if x.source_id not in self._timeout_handlers:
+ # it got cancelled while executing another timeout
+ continue
+ if x.calling:
+ # don't call it recursively
+ continue
+ calls += 1
+ x.calling = True
+ try:
+ x.timestamp = time.time()
+ if not x.function(*x.args):
+ self.source_remove(x.source_id)
+ finally:
+ x.calling = False
+
+ return bool(calls)
+
+ def io_add_watch(self, f, condition, callback, *args):
+ """
+ Like glib.io_add_watch(), your function should return False to
+ stop being called, or True to continue being called. Any
+ additional positional arguments given here are passed to your
+ function when it's called.
+
+ @type f: int or object with fileno() method
+ @param f: a file descriptor to monitor
+ @type condition: int
+ @param condition: a condition mask
+ @type callback: callable
+ @param callback: a function to call
+ @rtype: int
+ @return: an integer ID of the event source
+ """
+ if f in self._poll_event_handlers:
+ raise AssertionError("fd %d is already registered" % f)
+ source_id = self._new_source_id()
+ self._poll_event_handler_ids[source_id] = f
+ self._poll_event_handlers[f] = self._io_handler_class(
+ args=args, callback=callback, f=f, source_id=source_id)
+ self._poll_obj.register(f, condition)
+ return source_id
+
+ def source_remove(self, reg_id):
+ """
+ Like glib.source_remove(), this returns True if the given reg_id
+ is found and removed, and False if the reg_id is invalid or has
+ already been removed.
+ """
+ x = self._child_handlers.pop(reg_id, None)
+ if x is not None:
+ if not self._child_handlers and self._use_signal:
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ self.source_remove(self._sigchld_src_id)
+ self._sigchld_src_id = None
+ return True
+
+ with self._thread_rlock:
+ idle_callback = self._idle_callbacks.pop(reg_id, None)
+ if idle_callback is not None:
+ return True
+ timeout_handler = self._timeout_handlers.pop(reg_id, None)
+ if timeout_handler is not None:
+ if timeout_handler.interval == self._timeout_interval:
+ if self._timeout_handlers:
+ self._timeout_interval = min(x.interval
+ for x in self._timeout_handlers.values())
+ else:
+ self._timeout_interval = None
+ return True
+
+ f = self._poll_event_handler_ids.pop(reg_id, None)
+ if f is None:
+ return False
+ self._poll_obj.unregister(f)
+ if self._poll_event_queue:
+ # Discard any unhandled events that belong to this file,
+ # in order to prevent these events from being erroneously
+ # delivered to a future handler that is using a reallocated
+ # file descriptor of the same numeric value (causing
+ # extremely confusing bugs).
+ remaining_events = []
+ discarded_events = False
+ for event in self._poll_event_queue:
+ if event[0] == f:
+ discarded_events = True
+ else:
+ remaining_events.append(event)
+
+ if discarded_events:
+ self._poll_event_queue[:] = remaining_events
+
+ del self._poll_event_handlers[f]
+ return True
+
+_can_poll_device = None
+
+def can_poll_device():
+ """
+ Test if it's possible to use poll() on a device such as a pty. This
+ is known to fail on Darwin.
+ @rtype: bool
+ @return: True if poll() on a device succeeds, False otherwise.
+ """
+
+ global _can_poll_device
+ if _can_poll_device is not None:
+ return _can_poll_device
+
+ if not hasattr(select, "poll"):
+ _can_poll_device = False
+ return _can_poll_device
+
+ try:
+ dev_null = open('/dev/null', 'rb')
+ except IOError:
+ _can_poll_device = False
+ return _can_poll_device
+
+ p = select.poll()
+ try:
+ p.register(dev_null.fileno(), PollConstants.POLLIN)
+ except TypeError:
+ # Jython: Object 'org.python.core.io.FileIO@f8f175' is not watchable
+ _can_poll_device = False
+ return _can_poll_device
+
+ invalid_request = False
+ for f, event in p.poll():
+ if event & PollConstants.POLLNVAL:
+ invalid_request = True
+ break
+ dev_null.close()
+
+ _can_poll_device = not invalid_request
+ return _can_poll_device
+
+def create_poll_instance():
+ """
+ Create an instance of select.poll, or an instance of
+ PollSelectAdapter there is no poll() implementation or
+ it is broken somehow.
+ """
+ if can_poll_device():
+ return select.poll()
+ return PollSelectAdapter()
+
+class _epoll_adapter(object):
+ """
+ Wraps a select.epoll instance in order to make it compatible
+ with select.poll instances. This is necessary since epoll instances
+ interpret timeout arguments differently. Note that the file descriptor
+ that is associated with an epoll instance will close automatically when
+ it is garbage collected, so it's not necessary to close it explicitly.
+ """
+ __slots__ = ('_epoll_obj',)
+
+ def __init__(self, epoll_obj):
+ self._epoll_obj = epoll_obj
+
+ def register(self, fd, *args):
+ self._epoll_obj.register(fd, *args)
+
+ def unregister(self, fd):
+ self._epoll_obj.unregister(fd)
+
+ def poll(self, *args):
+ if len(args) > 1:
+ raise TypeError(
+ "poll expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+ timeout = -1
+ if args:
+ timeout = args[0]
+ if timeout is None or timeout < 0:
+ timeout = -1
+ elif timeout != 0:
+ timeout = timeout / 1000
+
+ return self._epoll_obj.poll(timeout)
diff --git a/usr/lib/portage/pym/portage/util/_eventloop/GlibEventLoop.py b/usr/lib/portage/pym/portage/util/_eventloop/GlibEventLoop.py
new file mode 100644
index 0000000..f2f5c5e
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_eventloop/GlibEventLoop.py
@@ -0,0 +1,23 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class GlibEventLoop(object):
+
+ # TODO: Support multiprocessing by using a separate glib.MainContext
+ # instance for each process.
+ supports_multiprocessing = False
+
+ def __init__(self):
+ import gi.repository.GLib as glib
+ self.IO_ERR = glib.IO_ERR
+ self.IO_HUP = glib.IO_HUP
+ self.IO_IN = glib.IO_IN
+ self.IO_NVAL = glib.IO_NVAL
+ self.IO_OUT = glib.IO_OUT
+ self.IO_PRI = glib.IO_PRI
+ self.iteration = glib.main_context_default().iteration
+ self.child_watch_add = glib.child_watch_add
+ self.idle_add = glib.idle_add
+ self.io_add_watch = glib.io_add_watch
+ self.timeout_add = glib.timeout_add
+ self.source_remove = glib.source_remove
diff --git a/usr/lib/portage/pym/portage/util/_eventloop/PollConstants.py b/usr/lib/portage/pym/portage/util/_eventloop/PollConstants.py
new file mode 100644
index 0000000..d0270a9
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_eventloop/PollConstants.py
@@ -0,0 +1,18 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import select
+class PollConstants(object):
+
+ """
+ Provides POLL* constants that are equivalent to those from the
+ select module, for use by PollSelectAdapter.
+ """
+
+ names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
+ v = 1
+ for k in names:
+ locals()[k] = getattr(select, k, v)
+ v *= 2
+ del k, v
+
diff --git a/usr/lib/portage/pym/portage/util/_eventloop/PollSelectAdapter.py b/usr/lib/portage/pym/portage/util/_eventloop/PollSelectAdapter.py
new file mode 100644
index 0000000..32b404b
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_eventloop/PollSelectAdapter.py
@@ -0,0 +1,76 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division
+
+from .PollConstants import PollConstants
+import select
+
+class PollSelectAdapter(object):
+
+ """
+ Use select to emulate a poll object, for
+ systems that don't support poll().
+ """
+
+ def __init__(self):
+ self._registered = {}
+ self._select_args = [[], [], []]
+
+ def register(self, fd, *args):
+ """
+ Only POLLIN is currently supported!
+ """
+ if len(args) > 1:
+ raise TypeError(
+ "register expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+
+ eventmask = PollConstants.POLLIN | \
+ PollConstants.POLLPRI | PollConstants.POLLOUT
+ if args:
+ eventmask = args[0]
+
+ self._registered[fd] = eventmask
+ self._select_args = None
+
+ def unregister(self, fd):
+ self._select_args = None
+ del self._registered[fd]
+
+ def poll(self, *args):
+ if len(args) > 1:
+ raise TypeError(
+ "poll expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+
+ timeout = None
+ if args:
+ timeout = args[0]
+
+ select_args = self._select_args
+ if select_args is None:
+ select_args = [list(self._registered), [], []]
+
+ if timeout is not None:
+ select_args = select_args[:]
+ # Translate poll() timeout args to select() timeout args:
+ #
+ # | units | value(s) for indefinite block
+ # ---------|--------------|------------------------------
+ # poll | milliseconds | omitted, negative, or None
+ # ---------|--------------|------------------------------
+ # select | seconds | omitted
+ # ---------|--------------|------------------------------
+
+ if timeout is not None and timeout < 0:
+ timeout = None
+ if timeout is not None:
+ select_args.append(timeout / 1000)
+
+ select_events = select.select(*select_args)
+ poll_events = []
+ for fd in select_events[0]:
+ poll_events.append((fd, PollConstants.POLLIN))
+ return poll_events
+
diff --git a/usr/lib/portage/pym/portage/util/_eventloop/__init__.py b/usr/lib/portage/pym/portage/util/_eventloop/__init__.py
new file mode 100644
index 0000000..418ad86
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_eventloop/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/util/_eventloop/global_event_loop.py b/usr/lib/portage/pym/portage/util/_eventloop/global_event_loop.py
new file mode 100644
index 0000000..502dab8
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_eventloop/global_event_loop.py
@@ -0,0 +1,35 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+
+from .EventLoop import EventLoop
+
+_default_constructor = EventLoop
+#from .GlibEventLoop import GlibEventLoop as _default_constructor
+
+# If _default_constructor doesn't support multiprocessing,
+# then _multiprocessing_constructor is used in subprocesses.
+_multiprocessing_constructor = EventLoop
+
+_MAIN_PID = os.getpid()
+_instances = {}
+
+def global_event_loop():
+ """
+ Get a global EventLoop (or compatible object) instance which
+ belongs exclusively to the current process.
+ """
+
+ pid = os.getpid()
+ instance = _instances.get(pid)
+ if instance is not None:
+ return instance
+
+ constructor = _default_constructor
+ if not constructor.supports_multiprocessing and pid != _MAIN_PID:
+ constructor = _multiprocessing_constructor
+
+ instance = constructor()
+ _instances[pid] = instance
+ return instance
diff --git a/usr/lib/portage/pym/portage/util/_get_vm_info.py b/usr/lib/portage/pym/portage/util/_get_vm_info.py
new file mode 100644
index 0000000..e8ad938
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_get_vm_info.py
@@ -0,0 +1,80 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import platform
+import subprocess
+
+from portage import _unicode_decode
+
+def get_vm_info():
+
+ vm_info = {}
+
+ if platform.system() == 'Linux':
+ try:
+ proc = subprocess.Popen(["free"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ pass
+ else:
+ output = _unicode_decode(proc.communicate()[0])
+ if proc.wait() == os.EX_OK:
+ for line in output.splitlines():
+ line = line.split()
+ if len(line) < 2:
+ continue
+ if line[0] == "Mem:":
+ try:
+ vm_info["ram.total"] = int(line[1]) * 1024
+ except ValueError:
+ pass
+ if len(line) > 3:
+ try:
+ vm_info["ram.free"] = int(line[3]) * 1024
+ except ValueError:
+ pass
+ elif line[0] == "Swap:":
+ try:
+ vm_info["swap.total"] = int(line[1]) * 1024
+ except ValueError:
+ pass
+ if len(line) > 3:
+ try:
+ vm_info["swap.free"] = int(line[3]) * 1024
+ except ValueError:
+ pass
+
+ else:
+
+ try:
+ proc = subprocess.Popen(["sysctl", "-a"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ pass
+ else:
+ output = _unicode_decode(proc.communicate()[0])
+ if proc.wait() == os.EX_OK:
+ for line in output.splitlines():
+ line = line.split(":", 1)
+ if len(line) != 2:
+ continue
+ line[1] = line[1].strip()
+ if line[0] == "hw.physmem":
+ try:
+ vm_info["ram.total"] = int(line[1])
+ except ValueError:
+ pass
+ elif line[0] == "vm.swap_total":
+ try:
+ vm_info["swap.total"] = int(line[1])
+ except ValueError:
+ pass
+ elif line[0] == "Free Memory Pages":
+ if line[1][-1] == "K":
+ try:
+ vm_info["ram.free"] = int(line[1][:-1]) * 1024
+ except ValueError:
+ pass
+
+ return vm_info
diff --git a/usr/lib/portage/pym/portage/util/_info_files.py b/usr/lib/portage/pym/portage/util/_info_files.py
new file mode 100644
index 0000000..de44b0f
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_info_files.py
@@ -0,0 +1,139 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import re
+import stat
+import subprocess
+
+import portage
+from portage import os
+from portage.const import EPREFIX
+
+def chk_updated_info_files(root, infodirs, prev_mtimes):
+
+ if os.path.exists(EPREFIX + "/usr/bin/install-info"):
+ out = portage.output.EOutput()
+ regen_infodirs = []
+ for z in infodirs:
+ if z == '':
+ continue
+ inforoot = portage.util.normalize_path(root + EPREFIX + z)
+ if os.path.isdir(inforoot) and \
+ not [x for x in os.listdir(inforoot) \
+ if x.startswith('.keepinfodir')]:
+ infomtime = os.stat(inforoot)[stat.ST_MTIME]
+ if inforoot not in prev_mtimes or \
+ prev_mtimes[inforoot] != infomtime:
+ regen_infodirs.append(inforoot)
+
+ if not regen_infodirs:
+ portage.util.writemsg_stdout("\n")
+ if portage.util.noiselimit >= 0:
+ out.einfo("GNU info directory index is up-to-date.")
+ else:
+ portage.util.writemsg_stdout("\n")
+ if portage.util.noiselimit >= 0:
+ out.einfo("Regenerating GNU info directory index...")
+
+ dir_extensions = ("", ".gz", ".bz2")
+ icount = 0
+ badcount = 0
+ errmsg = ""
+ for inforoot in regen_infodirs:
+ if inforoot == '':
+ continue
+
+ if not os.path.isdir(inforoot) or \
+ not os.access(inforoot, os.W_OK):
+ continue
+
+ file_list = os.listdir(inforoot)
+ file_list.sort()
+ dir_file = os.path.join(inforoot, "dir")
+ moved_old_dir = False
+ processed_count = 0
+ for x in file_list:
+ if x.startswith(".") or \
+ os.path.isdir(os.path.join(inforoot, x)):
+ continue
+ if x.startswith("dir"):
+ skip = False
+ for ext in dir_extensions:
+ if x == "dir" + ext or \
+ x == "dir" + ext + ".old":
+ skip = True
+ break
+ if skip:
+ continue
+ if processed_count == 0:
+ for ext in dir_extensions:
+ try:
+ os.rename(dir_file + ext, dir_file + ext + ".old")
+ moved_old_dir = True
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ processed_count += 1
+ try:
+ proc = subprocess.Popen(
+ ['%s/usr/bin/install-info' % EPREFIX,
+ '--dir-file=%s' % os.path.join(inforoot, "dir"),
+ os.path.join(inforoot, x)],
+ env=dict(os.environ, LANG="C", LANGUAGE="C"),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ myso = None
+ else:
+ myso = portage._unicode_decode(
+ proc.communicate()[0]).rstrip("\n")
+ proc.wait()
+ existsstr = "already exists, for file `"
+ if myso:
+ if re.search(existsstr, myso):
+ # Already exists... Don't increment the count for this.
+ pass
+ elif myso[:44] == "install-info: warning: no info dir entry in ":
+ # This info file doesn't contain a DIR-header: install-info produces this
+ # (harmless) warning (the --quiet switch doesn't seem to work).
+ # Don't increment the count for this.
+ pass
+ else:
+ badcount += 1
+ errmsg += myso + "\n"
+ icount += 1
+
+ if moved_old_dir and not os.path.exists(dir_file):
+ # We didn't generate a new dir file, so put the old file
+ # back where it was originally found.
+ for ext in dir_extensions:
+ try:
+ os.rename(dir_file + ext + ".old", dir_file + ext)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ # Clean dir.old cruft so that they don't prevent
+ # unmerge of otherwise empty directories.
+ for ext in dir_extensions:
+ try:
+ os.unlink(dir_file + ext + ".old")
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ #update mtime so we can potentially avoid regenerating.
+ prev_mtimes[inforoot] = os.stat(inforoot)[stat.ST_MTIME]
+
+ if badcount:
+ out.eerror("Processed %d info files; %d errors." % \
+ (icount, badcount))
+ portage.util.writemsg_level(errmsg,
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ if icount > 0 and portage.util.noiselimit >= 0:
+ out.einfo("Processed %d info files." % (icount,))
diff --git a/usr/lib/portage/pym/portage/util/_path.py b/usr/lib/portage/pym/portage/util/_path.py
new file mode 100644
index 0000000..6fbcb43
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_path.py
@@ -0,0 +1,27 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import stat
+
+from portage import os
+from portage.exception import PermissionDenied
+
+def exists_raise_eaccess(path):
+ try:
+ os.stat(path)
+ except OSError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied("stat('%s')" % path)
+ return False
+ else:
+ return True
+
+def isdir_raise_eaccess(path):
+ try:
+ st = os.stat(path)
+ except OSError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied("stat('%s')" % path)
+ return False
+ else:
+ return stat.S_ISDIR(st.st_mode)
diff --git a/usr/lib/portage/pym/portage/util/_pty.py b/usr/lib/portage/pym/portage/util/_pty.py
new file mode 100644
index 0000000..13e468b
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_pty.py
@@ -0,0 +1,78 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import platform
+import pty
+import termios
+
+from portage import os
+from portage.output import get_term_size, set_term_size
+from portage.util import writemsg
+
+# Disable the use of openpty on Solaris as it seems Python's openpty
+# implementation doesn't play nice on Solaris with Portage's
+# behaviour causing hangs/deadlocks.
+# Additional note for the future: on Interix, pipes do NOT work, so
+# _disable_openpty on Interix must *never* be True
+_disable_openpty = platform.system() in ("SunOS","FreeMiNT",)
+
+_fbsd_test_pty = platform.system() == 'FreeBSD'
+
+def _create_pty_or_pipe(copy_term_size=None):
+ """
+ Try to create a pty and if then fails then create a normal
+ pipe instead.
+
+ @param copy_term_size: If a tty file descriptor is given
+ then the term size will be copied to the pty.
+ @type copy_term_size: int
+ @rtype: tuple
+ @return: A tuple of (is_pty, master_fd, slave_fd) where
+ is_pty is True if a pty was successfully allocated, and
+ False if a normal pipe was allocated.
+ """
+
+ got_pty = False
+
+ global _disable_openpty, _fbsd_test_pty
+
+ if _fbsd_test_pty and not _disable_openpty:
+ # Test for python openpty breakage after freebsd7 to freebsd8
+ # upgrade, which results in a 'Function not implemented' error
+ # and the process being killed.
+ pid = os.fork()
+ if pid == 0:
+ pty.openpty()
+ os._exit(os.EX_OK)
+ pid, status = os.waitpid(pid, 0)
+ if (status & 0xff) == 140:
+ _disable_openpty = True
+ _fbsd_test_pty = False
+
+ if _disable_openpty:
+ master_fd, slave_fd = os.pipe()
+ else:
+ try:
+ master_fd, slave_fd = pty.openpty()
+ got_pty = True
+ except EnvironmentError as e:
+ _disable_openpty = True
+ writemsg("openpty failed: '%s'\n" % str(e),
+ noiselevel=-1)
+ del e
+ master_fd, slave_fd = os.pipe()
+
+ if got_pty:
+ # Disable post-processing of output since otherwise weird
+ # things like \n -> \r\n transformations may occur.
+ mode = termios.tcgetattr(slave_fd)
+ mode[1] &= ~termios.OPOST
+ termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
+
+ if got_pty and \
+ copy_term_size is not None and \
+ os.isatty(copy_term_size):
+ rows, columns = get_term_size()
+ set_term_size(rows, columns, slave_fd)
+
+ return (got_pty, master_fd, slave_fd)
diff --git a/usr/lib/portage/pym/portage/util/_urlopen.py b/usr/lib/portage/pym/portage/util/_urlopen.py
new file mode 100644
index 0000000..4cfe183
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/_urlopen.py
@@ -0,0 +1,92 @@
+# Copyright 2012-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import sys
+from datetime import datetime
+from time import mktime
+from email.utils import formatdate, parsedate
+
+try:
+ from urllib.request import urlopen as _urlopen
+ import urllib.parse as urllib_parse
+ import urllib.request as urllib_request
+ from urllib.parse import splituser as urllib_parse_splituser
+except ImportError:
+ from urllib import urlopen as _urlopen
+ import urlparse as urllib_parse
+ import urllib2 as urllib_request
+ from urllib import splituser as urllib_parse_splituser
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+# to account for the difference between TIMESTAMP of the index' contents
+# and the file-'mtime'
+TIMESTAMP_TOLERANCE = 5
+
+def urlopen(url, if_modified_since=None):
+ parse_result = urllib_parse.urlparse(url)
+ if parse_result.scheme not in ("http", "https"):
+ return _urlopen(url)
+ else:
+ netloc = urllib_parse_splituser(parse_result.netloc)[1]
+ url = urllib_parse.urlunparse((parse_result.scheme, netloc, parse_result.path, parse_result.params, parse_result.query, parse_result.fragment))
+ password_manager = urllib_request.HTTPPasswordMgrWithDefaultRealm()
+ request = urllib_request.Request(url)
+ request.add_header('User-Agent', 'Gentoo Portage')
+ if if_modified_since:
+ request.add_header('If-Modified-Since', _timestamp_to_http(if_modified_since))
+ if parse_result.username is not None:
+ password_manager.add_password(None, url, parse_result.username, parse_result.password)
+ auth_handler = CompressedResponseProcessor(password_manager)
+ opener = urllib_request.build_opener(auth_handler)
+ hdl = opener.open(request)
+ if hdl.headers.get('last-modified', ''):
+ try:
+ add_header = hdl.headers.add_header
+ except AttributeError:
+ # Python 2
+ add_header = hdl.headers.addheader
+ add_header('timestamp', _http_to_timestamp(hdl.headers.get('last-modified')))
+ return hdl
+
+def _timestamp_to_http(timestamp):
+ dt = datetime.fromtimestamp(float(long(timestamp)+TIMESTAMP_TOLERANCE))
+ stamp = mktime(dt.timetuple())
+ return formatdate(timeval=stamp, localtime=False, usegmt=True)
+
+def _http_to_timestamp(http_datetime_string):
+ tuple = parsedate(http_datetime_string)
+ timestamp = mktime(tuple)
+ return str(long(timestamp))
+
+class CompressedResponseProcessor(urllib_request.HTTPBasicAuthHandler):
+ # Handler for compressed responses.
+
+ def http_request(self, req):
+ req.add_header('Accept-Encoding', 'bzip2,gzip,deflate')
+ return req
+ https_request = http_request
+
+ def http_response(self, req, response):
+ decompressed = None
+ if response.headers.get('content-encoding') == 'bzip2':
+ import bz2
+ decompressed = io.BytesIO(bz2.decompress(response.read()))
+ elif response.headers.get('content-encoding') == 'gzip':
+ from gzip import GzipFile
+ decompressed = GzipFile(fileobj=io.BytesIO(response.read()), mode='r')
+ elif response.headers.get('content-encoding') == 'deflate':
+ import zlib
+ try:
+ decompressed = io.BytesIO(zlib.decompress(response.read()))
+ except zlib.error: # they ignored RFC1950
+ decompressed = io.BytesIO(zlib.decompress(response.read(), -zlib.MAX_WBITS))
+ if decompressed:
+ old_response = response
+ response = urllib_request.addinfourl(decompressed, old_response.headers, old_response.url, old_response.code)
+ response.msg = old_response.msg
+ return response
+ https_response = http_response
diff --git a/usr/lib/portage/pym/portage/util/digraph.py b/usr/lib/portage/pym/portage/util/digraph.py
new file mode 100644
index 0000000..4a9cb43
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/digraph.py
@@ -0,0 +1,359 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ['digraph']
+
+from collections import deque
+import sys
+
+from portage.util import writemsg
+
+class digraph(object):
+ """
+ A directed graph object.
+ """
+
+ def __init__(self):
+ """Create an empty digraph"""
+
+ # { node : ( { child : priority } , { parent : priority } ) }
+ self.nodes = {}
+ self.order = []
+
+ def add(self, node, parent, priority=0):
+ """Adds the specified node with the specified parent.
+
+ If the dep is a soft-dep and the node already has a hard
+ relationship to the parent, the relationship is left as hard."""
+
+ if node not in self.nodes:
+ self.nodes[node] = ({}, {}, node)
+ self.order.append(node)
+
+ if not parent:
+ return
+
+ if parent not in self.nodes:
+ self.nodes[parent] = ({}, {}, parent)
+ self.order.append(parent)
+
+ priorities = self.nodes[node][1].get(parent)
+ if priorities is None:
+ priorities = []
+ self.nodes[node][1][parent] = priorities
+ self.nodes[parent][0][node] = priorities
+ priorities.append(priority)
+ priorities.sort()
+
+ def discard(self, node):
+ """
+ Like remove(), except it doesn't raises KeyError if the
+ node doesn't exist.
+ """
+ try:
+ self.remove(node)
+ except KeyError:
+ pass
+
+ def remove(self, node):
+ """Removes the specified node from the digraph, also removing
+ and ties to other nodes in the digraph. Raises KeyError if the
+ node doesn't exist."""
+
+ if node not in self.nodes:
+ raise KeyError(node)
+
+ for parent in self.nodes[node][1]:
+ del self.nodes[parent][0][node]
+ for child in self.nodes[node][0]:
+ del self.nodes[child][1][node]
+
+ del self.nodes[node]
+ self.order.remove(node)
+
+ def difference_update(self, t):
+ """
+ Remove all given nodes from node_set. This is more efficient
+ than multiple calls to the remove() method.
+ """
+ if isinstance(t, (list, tuple)) or \
+ not hasattr(t, "__contains__"):
+ t = frozenset(t)
+ order = []
+ for node in self.order:
+ if node not in t:
+ order.append(node)
+ continue
+ for parent in self.nodes[node][1]:
+ del self.nodes[parent][0][node]
+ for child in self.nodes[node][0]:
+ del self.nodes[child][1][node]
+ del self.nodes[node]
+ self.order = order
+
+ def remove_edge(self, child, parent):
+ """
+ Remove edge in the direction from child to parent. Note that it is
+ possible for a remaining edge to exist in the opposite direction.
+ Any endpoint vertices that become isolated will remain in the graph.
+ """
+
+ # Nothing should be modified when a KeyError is raised.
+ for k in parent, child:
+ if k not in self.nodes:
+ raise KeyError(k)
+
+ # Make sure the edge exists.
+ if child not in self.nodes[parent][0]:
+ raise KeyError(child)
+ if parent not in self.nodes[child][1]:
+ raise KeyError(parent)
+
+ # Remove the edge.
+ del self.nodes[child][1][parent]
+ del self.nodes[parent][0][child]
+
+ def __iter__(self):
+ return iter(self.order)
+
+ def contains(self, node):
+ """Checks if the digraph contains mynode"""
+ return node in self.nodes
+
+ def get(self, key, default=None):
+ node_data = self.nodes.get(key, self)
+ if node_data is self:
+ return default
+ return node_data[2]
+
+ def all_nodes(self):
+ """Return a list of all nodes in the graph"""
+ return self.order[:]
+
+ def child_nodes(self, node, ignore_priority=None):
+ """Return all children of the specified node"""
+ if ignore_priority is None:
+ return list(self.nodes[node][0])
+ children = []
+ if hasattr(ignore_priority, '__call__'):
+ for child, priorities in self.nodes[node][0].items():
+ for priority in priorities:
+ if not ignore_priority(priority):
+ children.append(child)
+ break
+ else:
+ for child, priorities in self.nodes[node][0].items():
+ if ignore_priority < priorities[-1]:
+ children.append(child)
+ return children
+
+ def parent_nodes(self, node, ignore_priority=None):
+ """Return all parents of the specified node"""
+ if ignore_priority is None:
+ return list(self.nodes[node][1])
+ parents = []
+ if hasattr(ignore_priority, '__call__'):
+ for parent, priorities in self.nodes[node][1].items():
+ for priority in priorities:
+ if not ignore_priority(priority):
+ parents.append(parent)
+ break
+ else:
+ for parent, priorities in self.nodes[node][1].items():
+ if ignore_priority < priorities[-1]:
+ parents.append(parent)
+ return parents
+
+ def leaf_nodes(self, ignore_priority=None):
+ """Return all nodes that have no children
+
+ If ignore_soft_deps is True, soft deps are not counted as
+ children in calculations."""
+
+ leaf_nodes = []
+ if ignore_priority is None:
+ for node in self.order:
+ if not self.nodes[node][0]:
+ leaf_nodes.append(node)
+ elif hasattr(ignore_priority, '__call__'):
+ for node in self.order:
+ is_leaf_node = True
+ for child, priorities in self.nodes[node][0].items():
+ for priority in priorities:
+ if not ignore_priority(priority):
+ is_leaf_node = False
+ break
+ if not is_leaf_node:
+ break
+ if is_leaf_node:
+ leaf_nodes.append(node)
+ else:
+ for node in self.order:
+ is_leaf_node = True
+ for child, priorities in self.nodes[node][0].items():
+ if ignore_priority < priorities[-1]:
+ is_leaf_node = False
+ break
+ if is_leaf_node:
+ leaf_nodes.append(node)
+ return leaf_nodes
+
+ def root_nodes(self, ignore_priority=None):
+ """Return all nodes that have no parents.
+
+ If ignore_soft_deps is True, soft deps are not counted as
+ parents in calculations."""
+
+ root_nodes = []
+ if ignore_priority is None:
+ for node in self.order:
+ if not self.nodes[node][1]:
+ root_nodes.append(node)
+ elif hasattr(ignore_priority, '__call__'):
+ for node in self.order:
+ is_root_node = True
+ for parent, priorities in self.nodes[node][1].items():
+ for priority in priorities:
+ if not ignore_priority(priority):
+ is_root_node = False
+ break
+ if not is_root_node:
+ break
+ if is_root_node:
+ root_nodes.append(node)
+ else:
+ for node in self.order:
+ is_root_node = True
+ for parent, priorities in self.nodes[node][1].items():
+ if ignore_priority < priorities[-1]:
+ is_root_node = False
+ break
+ if is_root_node:
+ root_nodes.append(node)
+ return root_nodes
+
+ def __bool__(self):
+ return bool(self.nodes)
+
+ def is_empty(self):
+ """Checks if the digraph is empty"""
+ return len(self.nodes) == 0
+
+ def clone(self):
+ clone = digraph()
+ clone.nodes = {}
+ memo = {}
+ for children, parents, node in self.nodes.values():
+ children_clone = {}
+ for child, priorities in children.items():
+ priorities_clone = memo.get(id(priorities))
+ if priorities_clone is None:
+ priorities_clone = priorities[:]
+ memo[id(priorities)] = priorities_clone
+ children_clone[child] = priorities_clone
+ parents_clone = {}
+ for parent, priorities in parents.items():
+ priorities_clone = memo.get(id(priorities))
+ if priorities_clone is None:
+ priorities_clone = priorities[:]
+ memo[id(priorities)] = priorities_clone
+ parents_clone[parent] = priorities_clone
+ clone.nodes[node] = (children_clone, parents_clone, node)
+ clone.order = self.order[:]
+ return clone
+
+ def delnode(self, node):
+ try:
+ self.remove(node)
+ except KeyError:
+ pass
+
+ def firstzero(self):
+ leaf_nodes = self.leaf_nodes()
+ if leaf_nodes:
+ return leaf_nodes[0]
+ return None
+
+ def hasallzeros(self, ignore_priority=None):
+ return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
+ len(self.order)
+
+ def debug_print(self):
+ def output(s):
+ writemsg(s, noiselevel=-1)
+ # Use unicode_literals to force unicode format
+ # strings for python-2.x safety, ensuring that
+ # node.__unicode__() is used when necessary.
+ for node in self.nodes:
+ output("%s " % (node,))
+ if self.nodes[node][0]:
+ output("depends on\n")
+ else:
+ output("(no children)\n")
+ for child, priorities in self.nodes[node][0].items():
+ output(" %s (%s)\n" % (child, priorities[-1],))
+
+ def bfs(self, start, ignore_priority=None):
+ if start not in self:
+ raise KeyError(start)
+
+ queue, enqueued = deque([(None, start)]), set([start])
+ while queue:
+ parent, n = queue.popleft()
+ yield parent, n
+ new = set(self.child_nodes(n, ignore_priority)) - enqueued
+ enqueued |= new
+ queue.extend([(n, child) for child in new])
+
+ def shortest_path(self, start, end, ignore_priority=None):
+ if start not in self:
+ raise KeyError(start)
+ elif end not in self:
+ raise KeyError(end)
+
+ paths = {None: []}
+ for parent, child in self.bfs(start, ignore_priority):
+ paths[child] = paths[parent] + [child]
+ if child == end:
+ return paths[child]
+ return None
+
+ def get_cycles(self, ignore_priority=None, max_length=None):
+ """
+ Returns all cycles that have at most length 'max_length'.
+ If 'max_length' is 'None', all cycles are returned.
+ """
+ all_cycles = []
+ for node in self.nodes:
+ # If we have multiple paths of the same length, we have to
+ # return them all, so that we always get the same results
+ # even with PYTHONHASHSEED="random" enabled.
+ shortest_path = None
+ candidates = []
+ for child in self.child_nodes(node, ignore_priority):
+ path = self.shortest_path(child, node, ignore_priority)
+ if path is None:
+ continue
+ if not shortest_path or len(shortest_path) >= len(path):
+ shortest_path = path
+ candidates.append(path)
+ if shortest_path and \
+ (not max_length or len(shortest_path) <= max_length):
+ for path in candidates:
+ if len(path) == len(shortest_path):
+ all_cycles.append(path)
+ return all_cycles
+
+ # Backward compatibility
+ addnode = add
+ allnodes = all_nodes
+ allzeros = leaf_nodes
+ hasnode = contains
+ __contains__ = contains
+ empty = is_empty
+ copy = clone
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
diff --git a/usr/lib/portage/pym/portage/util/env_update.py b/usr/lib/portage/pym/portage/util/env_update.py
new file mode 100644
index 0000000..ace492c
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/env_update.py
@@ -0,0 +1,359 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['env_update']
+
+import errno
+import glob
+import io
+import stat
+import sys
+import time
+
+import portage
+from portage import os, _encodings, _unicode_decode, _unicode_encode
+from portage.checksum import prelink_capable
+from portage.data import ostype
+from portage.exception import ParseError
+from portage.localization import _
+from portage.process import find_binary
+from portage.util import atomic_ofstream, ensure_dirs, getconfig, \
+ normalize_path, writemsg
+from portage.util.listdir import listdir
+from portage.dbapi.vartree import vartree
+from portage.package.ebuild.config import config
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
+ env=None, writemsg_level=None, vardbapi=None):
+ """
+ Parse /etc/env.d and use it to generate /etc/profile.env, csh.env,
+ ld.so.conf, and prelink.conf. Finally, run ldconfig. When ldconfig is
+ called, its -X option will be used in order to avoid potential
+ interference with installed soname symlinks that are required for
+ correct operation of FEATURES=preserve-libs for downgrade operations.
+ It's not necessary for ldconfig to create soname symlinks, since
+ portage will use NEEDED.ELF.2 data to automatically create them
+ after src_install if they happen to be missing.
+ @param makelinks: True if ldconfig should be called, False otherwise
+ @param target_root: root that is passed to the ldconfig -r option,
+ defaults to portage.settings["ROOT"].
+ @type target_root: String (Path)
+ """
+ if vardbapi is None:
+ if isinstance(env, config):
+ vardbapi = vartree(settings=env).dbapi
+ else:
+ if target_root is None:
+ eprefix = portage.settings["EPREFIX"]
+ target_root = portage.settings["ROOT"]
+ target_eroot = portage.settings['EROOT']
+ else:
+ eprefix = portage.const.EPREFIX
+ target_eroot = os.path.join(target_root,
+ eprefix.lstrip(os.sep))
+ target_eroot = target_eroot.rstrip(os.sep) + os.sep
+ if hasattr(portage, "db") and target_eroot in portage.db:
+ vardbapi = portage.db[target_eroot]["vartree"].dbapi
+ else:
+ settings = config(config_root=target_root,
+ target_root=target_root, eprefix=eprefix)
+ target_root = settings["ROOT"]
+ if env is None:
+ env = settings
+ vardbapi = vartree(settings=settings).dbapi
+
+ # Lock the config memory file to prevent symlink creation
+ # in merge_contents from overlapping with env-update.
+ vardbapi._fs_lock()
+ try:
+ return _env_update(makelinks, target_root, prev_mtimes, contents,
+ env, writemsg_level)
+ finally:
+ vardbapi._fs_unlock()
+
+def _env_update(makelinks, target_root, prev_mtimes, contents, env,
+ writemsg_level):
+ if writemsg_level is None:
+ writemsg_level = portage.util.writemsg_level
+ if target_root is None:
+ target_root = portage.settings["ROOT"]
+ if prev_mtimes is None:
+ prev_mtimes = portage.mtimedb["ldpath"]
+ if env is None:
+ settings = portage.settings
+ else:
+ settings = env
+
+ eprefix = settings.get("EPREFIX", portage.const.EPREFIX)
+ eprefix_lstrip = eprefix.lstrip(os.sep)
+ eroot = normalize_path(os.path.join(target_root, eprefix_lstrip)).rstrip(os.sep) + os.sep
+ envd_dir = os.path.join(eroot, "etc", "env.d")
+ ensure_dirs(envd_dir, mode=0o755)
+ fns = listdir(envd_dir, EmptyOnError=1)
+ fns.sort()
+ templist = []
+ for x in fns:
+ if len(x) < 3:
+ continue
+ if not x[0].isdigit() or not x[1].isdigit():
+ continue
+ if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
+ continue
+ templist.append(x)
+ fns = templist
+ del templist
+
+ space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
+ colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
+ "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
+ "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
+ "PYTHONPATH", "ROOTPATH"])
+
+ config_list = []
+
+ for x in fns:
+ file_path = os.path.join(envd_dir, x)
+ try:
+ myconfig = getconfig(file_path, expand=False)
+ except ParseError as e:
+ writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
+ del e
+ continue
+ if myconfig is None:
+ # broken symlink or file removed by a concurrent process
+ writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
+ continue
+
+ config_list.append(myconfig)
+ if "SPACE_SEPARATED" in myconfig:
+ space_separated.update(myconfig["SPACE_SEPARATED"].split())
+ del myconfig["SPACE_SEPARATED"]
+ if "COLON_SEPARATED" in myconfig:
+ colon_separated.update(myconfig["COLON_SEPARATED"].split())
+ del myconfig["COLON_SEPARATED"]
+
+ env = {}
+ specials = {}
+ for var in space_separated:
+ mylist = []
+ for myconfig in config_list:
+ if var in myconfig:
+ for item in myconfig[var].split():
+ if item and not item in mylist:
+ mylist.append(item)
+ del myconfig[var] # prepare for env.update(myconfig)
+ if mylist:
+ env[var] = " ".join(mylist)
+ specials[var] = mylist
+
+ for var in colon_separated:
+ mylist = []
+ for myconfig in config_list:
+ if var in myconfig:
+ for item in myconfig[var].split(":"):
+ if item and not item in mylist:
+ mylist.append(item)
+ del myconfig[var] # prepare for env.update(myconfig)
+ if mylist:
+ env[var] = ":".join(mylist)
+ specials[var] = mylist
+
+ for myconfig in config_list:
+ """Cumulative variables have already been deleted from myconfig so that
+ they won't be overwritten by this dict.update call."""
+ env.update(myconfig)
+
+ ldsoconf_path = os.path.join(eroot, "etc", "ld.so.conf")
+ try:
+ myld = io.open(_unicode_encode(ldsoconf_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace')
+ myldlines = myld.readlines()
+ myld.close()
+ oldld = []
+ for x in myldlines:
+ #each line has at least one char (a newline)
+ if x[:1] == "#":
+ continue
+ oldld.append(x[:-1])
+ except (IOError, OSError) as e:
+ if e.errno != errno.ENOENT:
+ raise
+ oldld = None
+
+ newld = specials["LDPATH"]
+ if (oldld != newld):
+ #ld.so.conf needs updating and ldconfig needs to be run
+ myfd = atomic_ofstream(ldsoconf_path)
+ myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
+ myfd.write("# contents of /etc/env.d directory\n")
+ for x in specials["LDPATH"]:
+ myfd.write(x + "\n")
+ myfd.close()
+
+ potential_lib_dirs = set()
+ for lib_dir_glob in ('usr/lib*', 'lib*'):
+ x = os.path.join(eroot, lib_dir_glob)
+ for y in glob.glob(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict')):
+ try:
+ y = _unicode_decode(y,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if os.path.basename(y) != 'libexec':
+ potential_lib_dirs.add(y[len(eroot):])
+
+ # Update prelink.conf if we are prelink-enabled
+ if prelink_capable:
+ prelink_d = os.path.join(eroot, 'etc', 'prelink.conf.d')
+ ensure_dirs(prelink_d)
+ newprelink = atomic_ofstream(os.path.join(prelink_d, 'portage.conf'))
+ newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
+ newprelink.write("# contents of /etc/env.d directory\n")
+
+ for x in sorted(potential_lib_dirs) + ['bin', 'sbin']:
+ newprelink.write('-l /%s\n' % (x,));
+ prelink_paths = set()
+ prelink_paths |= set(specials.get('LDPATH', []))
+ prelink_paths |= set(specials.get('PATH', []))
+ prelink_paths |= set(specials.get('PRELINK_PATH', []))
+ prelink_path_mask = specials.get('PRELINK_PATH_MASK', [])
+ for x in prelink_paths:
+ if not x:
+ continue
+ if x[-1:] != '/':
+ x += "/"
+ plmasked = 0
+ for y in prelink_path_mask:
+ if not y:
+ continue
+ if y[-1] != '/':
+ y += "/"
+ if y == x[0:len(y)]:
+ plmasked = 1
+ break
+ if not plmasked:
+ newprelink.write("-h %s\n" % (x,))
+ for x in prelink_path_mask:
+ newprelink.write("-b %s\n" % (x,))
+ newprelink.close()
+
+ # Migration code path. If /etc/prelink.conf was generated by us, then
+ # point it to the new stuff until the prelink package re-installs.
+ prelink_conf = os.path.join(eroot, 'etc', 'prelink.conf')
+ try:
+ with open(_unicode_encode(prelink_conf,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ if f.readline() == b'# prelink.conf autogenerated by env-update; make all changes to\n':
+ f = atomic_ofstream(prelink_conf)
+ f.write('-c /etc/prelink.conf.d/*.conf\n')
+ f.close()
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ current_time = long(time.time())
+ mtime_changed = False
+
+ lib_dirs = set()
+ for lib_dir in set(specials['LDPATH']) | potential_lib_dirs:
+ x = os.path.join(eroot, lib_dir.lstrip(os.sep))
+ try:
+ newldpathtime = os.stat(x)[stat.ST_MTIME]
+ lib_dirs.add(normalize_path(x))
+ except OSError as oe:
+ if oe.errno == errno.ENOENT:
+ try:
+ del prev_mtimes[x]
+ except KeyError:
+ pass
+ # ignore this path because it doesn't exist
+ continue
+ raise
+ if newldpathtime == current_time:
+ # Reset mtime to avoid the potential ambiguity of times that
+ # differ by less than 1 second.
+ newldpathtime -= 1
+ os.utime(x, (newldpathtime, newldpathtime))
+ prev_mtimes[x] = newldpathtime
+ mtime_changed = True
+ elif x in prev_mtimes:
+ if prev_mtimes[x] == newldpathtime:
+ pass
+ else:
+ prev_mtimes[x] = newldpathtime
+ mtime_changed = True
+ else:
+ prev_mtimes[x] = newldpathtime
+ mtime_changed = True
+
+ if makelinks and \
+ not mtime_changed and \
+ contents is not None:
+ libdir_contents_changed = False
+ for mypath, mydata in contents.items():
+ if mydata[0] not in ("obj", "sym"):
+ continue
+ head, tail = os.path.split(mypath)
+ if head in lib_dirs:
+ libdir_contents_changed = True
+ break
+ if not libdir_contents_changed:
+ makelinks = False
+
+ ldconfig = "/sbin/ldconfig"
+ if "CHOST" in settings and "CBUILD" in settings and \
+ settings["CHOST"] != settings["CBUILD"]:
+ ldconfig = find_binary("%s-ldconfig" % settings["CHOST"])
+
+ # Only run ldconfig as needed
+ if makelinks and ldconfig and not eprefix:
+ # ldconfig has very different behaviour between FreeBSD and Linux
+ if ostype == "Linux" or ostype.lower().endswith("gnu"):
+ # We can't update links if we haven't cleaned other versions first, as
+ # an older package installed ON TOP of a newer version will cause ldconfig
+ # to overwrite the symlinks we just made. -X means no links. After 'clean'
+ # we can safely create links.
+ writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
+ (target_root,))
+ os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
+ elif ostype in ("FreeBSD", "DragonFly"):
+ writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
+ target_root)
+ os.system(("cd / ; %s -elf -i " + \
+ "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
+ (ldconfig, target_root, target_root))
+
+ del specials["LDPATH"]
+
+ penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
+ penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
+ cenvnotice = penvnotice[:]
+ penvnotice += "# GO INTO " + eprefix + "/etc/profile NOT /etc/profile.env\n\n"
+ cenvnotice += "# GO INTO " + eprefix + "/etc/csh.cshrc NOT /etc/csh.env\n\n"
+
+ #create /etc/profile.env for bash support
+ outfile = atomic_ofstream(os.path.join(eroot, "etc", "profile.env"))
+ outfile.write(penvnotice)
+
+ env_keys = [x for x in env if x != "LDPATH"]
+ env_keys.sort()
+ for k in env_keys:
+ v = env[k]
+ if v.startswith('$') and not v.startswith('${'):
+ outfile.write("export %s=$'%s'\n" % (k, v[1:]))
+ else:
+ outfile.write("export %s='%s'\n" % (k, v))
+ outfile.close()
+
+ #create /etc/csh.env for (t)csh support
+ outfile = atomic_ofstream(os.path.join(eroot, "etc", "csh.env"))
+ outfile.write(cenvnotice)
+ for x in env_keys:
+ outfile.write("setenv %s '%s'\n" % (x, env[x]))
+ outfile.close()
diff --git a/usr/lib/portage/pym/portage/util/lafilefixer.py b/usr/lib/portage/pym/portage/util/lafilefixer.py
new file mode 100644
index 0000000..2562d9a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/lafilefixer.py
@@ -0,0 +1,185 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os as _os
+import re
+
+from portage import _unicode_decode
+from portage.exception import InvalidData
+
+#########################################################
+# This an re-implementaion of dev-util/lafilefixer-0.5.
+# rewrite_lafile() takes the contents of an lafile as a string
+# It then parses the dependency_libs and inherited_linker_flags
+# entries.
+# We insist on dependency_libs being present. inherited_linker_flags
+# is optional.
+# There are strict rules about the syntax imposed by libtool's libltdl.
+# See 'parse_dotla_file' and 'trim' functions in libltdl/ltdl.c.
+# Note that duplicated entries of dependency_libs and inherited_linker_flags
+# are ignored by libtool (last one wins), but we treat it as error (like
+# lafilefixer does).
+# What it does:
+# * Replaces all .la files with absolut paths in dependency_libs with
+# corresponding -l* and -L* entries
+# (/usr/lib64/libfoo.la -> -L/usr/lib64 -lfoo)
+# * Moves various flags (see flag_re below) to inherited_linker_flags,
+# if such an entry was present.
+# * Reorders dependency_libs such that all -R* entries precede -L* entries
+# and these precede all other entries.
+# * Remove duplicated entries from dependency_libs
+# * Takes care that no entry to inherited_linker_flags is added that is
+# already there.
+#########################################################
+
+#These regexes are used to parse the interesting entries in the la file
+dep_libs_re = re.compile(b"dependency_libs='(?P<value>[^']*)'$")
+inh_link_flags_re = re.compile(b"inherited_linker_flags='(?P<value>[^']*)'$")
+
+#regexes for replacing stuff in -L entries.
+#replace 'X11R6/lib' and 'local/lib' with 'lib', no idea what's this about.
+X11_local_sub = re.compile(b"X11R6/lib|local/lib")
+#get rid of the '..'
+pkgconfig_sub1 = re.compile(b"usr/lib[^/]*/pkgconfig/\.\./\.\.")
+pkgconfig_sub2 = re.compile(b"(?P<usrlib>usr/lib[^/]*)/pkgconfig/\.\.")
+
+#detect flags that should go into inherited_linker_flags instead of dependency_libs
+flag_re = re.compile(b"-mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads")
+
+def _parse_lafile_contents(contents):
+ """
+ Parses 'dependency_libs' and 'inherited_linker_flags' lines.
+ """
+
+ dep_libs = None
+ inh_link_flags = None
+
+ for line in contents.split(b"\n"):
+ m = dep_libs_re.match(line)
+ if m:
+ if dep_libs is not None:
+ raise InvalidData("duplicated dependency_libs entry")
+ dep_libs = m.group("value")
+ continue
+
+ m = inh_link_flags_re.match(line)
+ if m:
+ if inh_link_flags is not None:
+ raise InvalidData("duplicated inherited_linker_flags entry")
+ inh_link_flags = m.group("value")
+ continue
+
+ return dep_libs, inh_link_flags
+
+def rewrite_lafile(contents):
+ """
+ Given the contents of an .la file, parse and fix it.
+ This operates with strings of raw bytes (assumed to contain some ascii
+ characters), in order to avoid any potential character encoding issues.
+ Raises 'InvalidData' if the .la file is invalid.
+ @param contents: the contents of a libtool archive file
+ @type contents: bytes
+ @rtype: tuple
+ @return: (True, fixed_contents) if something needed to be
+ fixed, (False, None) otherwise.
+ """
+ #Parse the 'dependency_libs' and 'inherited_linker_flags' lines.
+ dep_libs, inh_link_flags = \
+ _parse_lafile_contents(contents)
+
+ if dep_libs is None:
+ raise InvalidData("missing or invalid dependency_libs")
+
+ new_dep_libs = []
+ new_inh_link_flags = []
+ librpath = []
+ libladir = []
+
+ if inh_link_flags is not None:
+ new_inh_link_flags = inh_link_flags.split()
+
+ #Check entries in 'dependency_libs'.
+ for dep_libs_entry in dep_libs.split():
+ if dep_libs_entry.startswith(b"-l"):
+ #-lfoo, keep it
+ if dep_libs_entry not in new_dep_libs:
+ new_dep_libs.append(dep_libs_entry)
+
+ elif dep_libs_entry.endswith(b".la"):
+ #Two cases:
+ #1) /usr/lib64/libfoo.la, turn it into -lfoo and append -L/usr/lib64 to libladir
+ #2) libfoo.la, keep it
+ dir, file = _os.path.split(dep_libs_entry)
+
+ if not dir or not file.startswith(b"lib"):
+ if dep_libs_entry not in new_dep_libs:
+ new_dep_libs.append(dep_libs_entry)
+ else:
+ #/usr/lib64/libfoo.la -> -lfoo
+ lib = b"-l" + file[3:-3]
+ if lib not in new_dep_libs:
+ new_dep_libs.append(lib)
+ #/usr/lib64/libfoo.la -> -L/usr/lib64
+ ladir = b"-L" + dir
+ if ladir not in libladir:
+ libladir.append(ladir)
+
+ elif dep_libs_entry.startswith(b"-L"):
+ #Do some replacement magic and store them in 'libladir'.
+ #This allows us to place all -L entries at the beginning
+ #of 'dependency_libs'.
+ ladir = dep_libs_entry
+
+ ladir = X11_local_sub.sub(b"lib", ladir)
+ ladir = pkgconfig_sub1.sub(b"usr", ladir)
+ ladir = pkgconfig_sub2.sub(b"\g<usrlib>", ladir)
+
+ if ladir not in libladir:
+ libladir.append(ladir)
+
+ elif dep_libs_entry.startswith(b"-R"):
+ if dep_libs_entry not in librpath:
+ librpath.append(dep_libs_entry)
+
+ elif flag_re.match(dep_libs_entry):
+ #All this stuff goes into inh_link_flags, if the la file has such an entry.
+ #If it doesn't, they stay in 'dependency_libs'.
+ if inh_link_flags is not None:
+ if dep_libs_entry not in new_inh_link_flags:
+ new_inh_link_flags.append(dep_libs_entry)
+ else:
+ if dep_libs_entry not in new_dep_libs:
+ new_dep_libs.append(dep_libs_entry)
+
+ else:
+ raise InvalidData("Error: Unexpected entry '%s' in 'dependency_libs'" \
+ % _unicode_decode(dep_libs_entry))
+
+ #What should 'dependency_libs' and 'inherited_linker_flags' look like?
+ expected_dep_libs = b""
+ for x in (librpath, libladir, new_dep_libs):
+ if x:
+ expected_dep_libs += b" " + b" ".join(x)
+
+ expected_inh_link_flags = b""
+ if new_inh_link_flags:
+ expected_inh_link_flags += b" " + b" ".join(new_inh_link_flags)
+
+ #Don't touch the file if we don't need to, otherwise put the expected values into
+ #'contents' and write it into the la file.
+
+ changed = False
+ if dep_libs != expected_dep_libs:
+ contents = contents.replace(b"dependency_libs='" + dep_libs + b"'", \
+ b"dependency_libs='" + expected_dep_libs + b"'")
+ changed = True
+
+ if inh_link_flags is not None and expected_inh_link_flags != inh_link_flags:
+ contents = contents.replace(b"inherited_linker_flags='" + inh_link_flags + b"'", \
+ b"inherited_linker_flags='" + expected_inh_link_flags + b"'")
+ changed = True
+
+ if changed:
+ return True, contents
+ else:
+ return False, None
diff --git a/usr/lib/portage/pym/portage/util/listdir.py b/usr/lib/portage/pym/portage/util/listdir.py
new file mode 100644
index 0000000..2012e14
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/listdir.py
@@ -0,0 +1,139 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['cacheddir', 'listdir']
+
+import errno
+import stat
+import sys
+
+if sys.hexversion < 0x3000000:
+ from itertools import izip as zip
+
+from portage import os
+from portage.const import VCS_DIRS
+from portage.exception import DirectoryNotFound, PermissionDenied, PortageException
+from portage.util import normalize_path
+
+# The global dircache is no longer supported, since it could
+# be a memory leak for API consumers. Any cacheddir callers
+# should use higher-level caches instead, when necessary.
+# TODO: Remove dircache variable after stable portage does
+# not use is (keep it for now, in case API consumers clear
+# it manually).
+dircache = {}
+
+def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
+ mypath = normalize_path(my_original_path)
+ try:
+ pathstat = os.stat(mypath)
+ if not stat.S_ISDIR(pathstat.st_mode):
+ raise DirectoryNotFound(mypath)
+ except EnvironmentError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(mypath)
+ del e
+ return [], []
+ except PortageException:
+ return [], []
+ else:
+ try:
+ fpaths = os.listdir(mypath)
+ except EnvironmentError as e:
+ if e.errno != errno.EACCES:
+ raise
+ del e
+ raise PermissionDenied(mypath)
+ ftype = []
+ for x in fpaths:
+ try:
+ if followSymlinks:
+ pathstat = os.stat(mypath+"/"+x)
+ else:
+ pathstat = os.lstat(mypath+"/"+x)
+
+ if stat.S_ISREG(pathstat[stat.ST_MODE]):
+ ftype.append(0)
+ elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
+ ftype.append(1)
+ elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
+ ftype.append(2)
+ else:
+ ftype.append(3)
+ except (IOError, OSError):
+ ftype.append(3)
+
+ if ignorelist or ignorecvs:
+ ret_list = []
+ ret_ftype = []
+ for file_path, file_type in zip(fpaths, ftype):
+ if file_path in ignorelist:
+ pass
+ elif ignorecvs:
+ if file_path[:2] != ".#" and \
+ not (file_type == 1 and file_path in VCS_DIRS):
+ ret_list.append(file_path)
+ ret_ftype.append(file_type)
+ else:
+ ret_list = fpaths
+ ret_ftype = ftype
+
+ return ret_list, ret_ftype
+
+def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
+ EmptyOnError=False, dirsonly=False):
+ """
+ Portage-specific implementation of os.listdir
+
+ @param mypath: Path whose contents you wish to list
+ @type mypath: String
+ @param recursive: Recursively scan directories contained within mypath
+ @type recursive: Boolean
+ @param filesonly; Only return files, not more directories
+ @type filesonly: Boolean
+ @param ignorecvs: Ignore VCS directories
+ @type ignorecvs: Boolean
+ @param ignorelist: List of filenames/directories to exclude
+ @type ignorelist: List
+ @param followSymlinks: Follow Symlink'd files and directories
+ @type followSymlinks: Boolean
+ @param EmptyOnError: Return [] if an error occurs (deprecated, always True)
+ @type EmptyOnError: Boolean
+ @param dirsonly: Only return directories.
+ @type dirsonly: Boolean
+ @rtype: List
+ @return: A list of files and directories (or just files or just directories) or an empty list.
+ """
+
+ fpaths, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
+
+ if fpaths is None:
+ fpaths = []
+ if ftype is None:
+ ftype = []
+
+ if not (filesonly or dirsonly or recursive):
+ return fpaths
+
+ if recursive:
+ stack = list(zip(fpaths, ftype))
+ fpaths = []
+ ftype = []
+ while stack:
+ file_path, file_type = stack.pop()
+ fpaths.append(file_path)
+ ftype.append(file_type)
+ if file_type == 1:
+ subdir_list, subdir_types = cacheddir(
+ os.path.join(mypath, file_path), ignorecvs,
+ ignorelist, EmptyOnError, followSymlinks)
+ stack.extend((os.path.join(file_path, x), x_type)
+ for x, x_type in zip(subdir_list, subdir_types))
+
+ if filesonly:
+ fpaths = [x for x, x_type in zip(fpaths, ftype) if x_type == 0]
+
+ elif dirsonly:
+ fpaths = [x for x, x_type in zip(fpaths, ftype) if x_type == 1]
+
+ return fpaths
diff --git a/usr/lib/portage/pym/portage/util/movefile.py b/usr/lib/portage/pym/portage/util/movefile.py
new file mode 100644
index 0000000..452e77f
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/movefile.py
@@ -0,0 +1,422 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ['movefile']
+
+import errno
+import fnmatch
+import os as _os
+import shutil as _shutil
+import stat
+import sys
+import subprocess
+import textwrap
+
+import portage
+from portage import bsd_chflags, _encodings, _os_overrides, _selinux, \
+ _unicode_decode, _unicode_encode, _unicode_func_wrapper, \
+ _unicode_module_wrapper
+from portage.const import MOVE_BINARY
+from portage.exception import OperationNotSupported
+from portage.localization import _
+from portage.process import spawn
+from portage.util import writemsg
+
+def _apply_stat(src_stat, dest):
+ _os.chown(dest, src_stat.st_uid, src_stat.st_gid)
+ _os.chmod(dest, stat.S_IMODE(src_stat.st_mode))
+
+_xattr_excluder_cache = {}
+
+def _get_xattr_excluder(pattern):
+
+ try:
+ value = _xattr_excluder_cache[pattern]
+ except KeyError:
+ value = _xattr_excluder(pattern)
+ _xattr_excluder_cache[pattern] = value
+
+ return value
+
+class _xattr_excluder(object):
+
+ __slots__ = ('_pattern_split',)
+
+ def __init__(self, pattern):
+
+ if pattern is None:
+ self._pattern_split = None
+ else:
+ pattern = pattern.split()
+ if not pattern:
+ self._pattern_split = None
+ else:
+ pattern.sort()
+ self._pattern_split = tuple(pattern)
+
+ def __call__(self, attr):
+
+ if self._pattern_split is None:
+ return False
+
+ match = fnmatch.fnmatch
+ for x in self._pattern_split:
+ if match(attr, x):
+ return True
+
+ return False
+
+if hasattr(_os, "getxattr"):
+ # Python >=3.3 and GNU/Linux
+ def _copyxattr(src, dest, exclude=None):
+
+ try:
+ attrs = _os.listxattr(src)
+ except OSError as e:
+ if e.errno != OperationNotSupported.errno:
+ raise
+ attrs = ()
+ if attrs:
+ if exclude is not None and isinstance(attrs[0], bytes):
+ exclude = exclude.encode(_encodings['fs'])
+ exclude = _get_xattr_excluder(exclude)
+
+ for attr in attrs:
+ if exclude(attr):
+ continue
+ try:
+ _os.setxattr(dest, attr, _os.getxattr(src, attr))
+ raise_exception = False
+ except OSError:
+ raise_exception = True
+ if raise_exception:
+ raise OperationNotSupported(_("Filesystem containing file '%s' "
+ "does not support extended attribute '%s'") %
+ (_unicode_decode(dest), _unicode_decode(attr)))
+else:
+ try:
+ import xattr
+ except ImportError:
+ xattr = None
+ if xattr is not None:
+ def _copyxattr(src, dest, exclude=None):
+
+ try:
+ attrs = xattr.list(src)
+ except IOError as e:
+ if e.errno != OperationNotSupported.errno:
+ raise
+ attrs = ()
+
+ if attrs:
+ if exclude is not None and isinstance(attrs[0], bytes):
+ exclude = exclude.encode(_encodings['fs'])
+ exclude = _get_xattr_excluder(exclude)
+
+ for attr in attrs:
+ if exclude(attr):
+ continue
+ try:
+ xattr.set(dest, attr, xattr.get(src, attr))
+ raise_exception = False
+ except IOError:
+ raise_exception = True
+ if raise_exception:
+ raise OperationNotSupported(_("Filesystem containing file '%s' "
+ "does not support extended attribute '%s'") %
+ (_unicode_decode(dest), _unicode_decode(attr)))
+ else:
+ try:
+ with open(_os.devnull, 'wb') as f:
+ subprocess.call(["getfattr", "--version"], stdout=f)
+ subprocess.call(["setfattr", "--version"], stdout=f)
+ except OSError:
+ def _copyxattr(src, dest, exclude=None):
+ # TODO: implement exclude
+ getfattr_process = subprocess.Popen(["getfattr", "-d", "--absolute-names", src], stdout=subprocess.PIPE)
+ getfattr_process.wait()
+ extended_attributes = getfattr_process.stdout.readlines()
+ getfattr_process.stdout.close()
+ if extended_attributes:
+ extended_attributes[0] = b"# file: " + _unicode_encode(dest) + b"\n"
+ setfattr_process = subprocess.Popen(["setfattr", "--restore=-"], stdin=subprocess.PIPE, stderr=subprocess.PIPE)
+ setfattr_process.communicate(input=b"".join(extended_attributes))
+ if setfattr_process.returncode != 0:
+ raise OperationNotSupported("Filesystem containing file '%s' does not support extended attributes" % dest)
+ else:
+ def _copyxattr(src, dest, exclude=None):
+ pass
+
+def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
+ hardlink_candidates=None, encoding=_encodings['fs']):
+ """moves a file from src to dest, preserving all permissions and attributes; mtime will
+ be preserved even when moving across filesystems. Returns mtime as integer on success
+ and None on failure. mtime is expressed in seconds in Python <3.3 and nanoseconds in
+ Python >=3.3. Move is atomic."""
+
+ if mysettings is None:
+ mysettings = portage.settings
+
+ src_bytes = _unicode_encode(src, encoding=encoding, errors='strict')
+ dest_bytes = _unicode_encode(dest, encoding=encoding, errors='strict')
+ xattr_enabled = "xattr" in mysettings.features
+ selinux_enabled = mysettings.selinux_enabled()
+ if selinux_enabled:
+ selinux = _unicode_module_wrapper(_selinux, encoding=encoding)
+ _copyfile = selinux.copyfile
+ _rename = selinux.rename
+ else:
+ _copyfile = _shutil.copyfile
+ _rename = _os.rename
+
+ lchown = _unicode_func_wrapper(portage.data.lchown, encoding=encoding)
+ os = _unicode_module_wrapper(_os,
+ encoding=encoding, overrides=_os_overrides)
+
+ try:
+ if not sstat:
+ sstat = os.lstat(src)
+
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ writemsg("!!! %s\n" % _("Stating source file failed... movefile()"),
+ noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ return None
+
+ destexists = 1
+ try:
+ dstat = os.lstat(dest)
+ except (OSError, IOError):
+ dstat = os.lstat(os.path.dirname(dest))
+ destexists = 0
+
+ if bsd_chflags:
+ if destexists and dstat.st_flags != 0:
+ bsd_chflags.lchflags(dest, 0)
+ # Use normal stat/chflags for the parent since we want to
+ # follow any symlinks to the real parent directory.
+ pflags = os.stat(os.path.dirname(dest)).st_flags
+ if pflags != 0:
+ bsd_chflags.chflags(os.path.dirname(dest), 0)
+
+ if destexists:
+ if stat.S_ISLNK(dstat[stat.ST_MODE]):
+ try:
+ os.unlink(dest)
+ destexists = 0
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ pass
+
+ if stat.S_ISLNK(sstat[stat.ST_MODE]):
+ try:
+ target = os.readlink(src)
+ if mysettings and "D" in mysettings and \
+ target.startswith(mysettings["D"]):
+ target = target[len(mysettings["D"])-1:]
+ if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
+ os.unlink(dest)
+ try:
+ if selinux_enabled:
+ selinux.symlink(target, dest, src)
+ else:
+ os.symlink(target, dest)
+ except OSError as e:
+ # Some programs will create symlinks automatically, so we have
+ # to tolerate these links being recreated during the merge
+ # process. In any case, if the link is pointing at the right
+ # place, we're in good shape.
+ if e.errno not in (errno.ENOENT, errno.EEXIST) or \
+ target != os.readlink(dest):
+ raise
+ lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
+
+ try:
+ _os.unlink(src_bytes)
+ except OSError:
+ pass
+
+ if sys.hexversion >= 0x3030000:
+ try:
+ os.utime(dest, ns=(sstat.st_mtime_ns, sstat.st_mtime_ns), follow_symlinks=False)
+ except NotImplementedError:
+ # utimensat() and lutimes() missing in libc.
+ return os.stat(dest, follow_symlinks=False).st_mtime_ns
+ else:
+ return sstat.st_mtime_ns
+ else:
+ # utime() in Python <3.3 only works on the target of a symlink, so it's not
+ # possible to preserve mtime on symlinks.
+ return os.lstat(dest)[stat.ST_MTIME]
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ writemsg("!!! %s\n" % _("failed to properly create symlink:"),
+ noiselevel=-1)
+ writemsg("!!! %s -> %s\n" % (dest, target), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ return None
+
+ hardlinked = False
+ # Since identical files might be merged to multiple filesystems,
+ # so os.link() calls might fail for some paths, so try them all.
+ # For atomic replacement, first create the link as a temp file
+ # and them use os.rename() to replace the destination.
+ if hardlink_candidates:
+ head, tail = os.path.split(dest)
+ hardlink_tmp = os.path.join(head, ".%s._portage_merge_.%s" % \
+ (tail, os.getpid()))
+ try:
+ os.unlink(hardlink_tmp)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ writemsg(_("!!! Failed to remove hardlink temp file: %s\n") % \
+ (hardlink_tmp,), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ return None
+ del e
+ for hardlink_src in hardlink_candidates:
+ try:
+ os.link(hardlink_src, hardlink_tmp)
+ except OSError:
+ continue
+ else:
+ try:
+ os.rename(hardlink_tmp, dest)
+ except OSError as e:
+ writemsg(_("!!! Failed to rename %s to %s\n") % \
+ (hardlink_tmp, dest), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ return None
+ hardlinked = True
+ try:
+ _os.unlink(src_bytes)
+ except OSError:
+ pass
+ break
+
+ renamefailed = 1
+ if hardlinked:
+ renamefailed = False
+ if not hardlinked and (selinux_enabled or sstat.st_dev == dstat.st_dev):
+ try:
+ if selinux_enabled:
+ selinux.rename(src, dest)
+ else:
+ os.rename(src, dest)
+ renamefailed = 0
+ except OSError as e:
+ if e.errno != errno.EXDEV:
+ # Some random error.
+ writemsg("!!! %s\n" % _("Failed to move %(src)s to %(dest)s") %
+ {"src": src, "dest": dest}, noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ return None
+ # Invalid cross-device-link 'bind' mounted or actually Cross-Device
+ if renamefailed:
+ if stat.S_ISREG(sstat[stat.ST_MODE]):
+ dest_tmp = dest + "#new"
+ dest_tmp_bytes = _unicode_encode(dest_tmp, encoding=encoding,
+ errors='strict')
+ try: # For safety copy then move it over.
+ _copyfile(src_bytes, dest_tmp_bytes)
+ if xattr_enabled:
+ try:
+ _copyxattr(src_bytes, dest_tmp_bytes,
+ exclude=mysettings.get("PORTAGE_XATTR_EXCLUDE", "security.* system.nfs4_acl"))
+ except SystemExit:
+ raise
+ except:
+ msg = _("Failed to copy extended attributes. "
+ "In order to avoid this error, set "
+ "FEATURES=\"-xattr\" in make.conf.")
+ msg = textwrap.wrap(msg, 65)
+ for line in msg:
+ writemsg("!!! %s\n" % (line,), noiselevel=-1)
+ raise
+ _apply_stat(sstat, dest_tmp_bytes)
+ _rename(dest_tmp_bytes, dest_bytes)
+ _os.unlink(src_bytes)
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ writemsg("!!! %s\n" % _('copy %(src)s -> %(dest)s failed.') %
+ {"src": src, "dest": dest}, noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ return None
+ else:
+ #we don't yet handle special, so we need to fall back to /bin/mv
+ a = spawn([MOVE_BINARY, '-f', src, dest], env=os.environ)
+ if a != os.EX_OK:
+ writemsg(_("!!! Failed to move special file:\n"), noiselevel=-1)
+ writemsg(_("!!! '%(src)s' to '%(dest)s'\n") % \
+ {"src": _unicode_decode(src, encoding=encoding),
+ "dest": _unicode_decode(dest, encoding=encoding)}, noiselevel=-1)
+ writemsg("!!! %s\n" % a, noiselevel=-1)
+ return None # failure
+
+ # In Python <3.3 always use stat_obj[stat.ST_MTIME] for the integral timestamp
+ # which is returned, since the stat_obj.st_mtime float attribute rounds *up*
+ # if the nanosecond part of the timestamp is 999999881 ns or greater.
+ try:
+ if hardlinked:
+ if sys.hexversion >= 0x3030000:
+ newmtime = os.stat(dest).st_mtime_ns
+ else:
+ newmtime = os.stat(dest)[stat.ST_MTIME]
+ else:
+ # Note: It is not possible to preserve nanosecond precision
+ # (supported in POSIX.1-2008 via utimensat) with the IEEE 754
+ # double precision float which only has a 53 bit significand.
+ if newmtime is not None:
+ if sys.hexversion >= 0x3030000:
+ os.utime(dest, ns=(newmtime, newmtime))
+ else:
+ os.utime(dest, (newmtime, newmtime))
+ else:
+ if sys.hexversion >= 0x3030000:
+ newmtime = sstat.st_mtime_ns
+ else:
+ newmtime = sstat[stat.ST_MTIME]
+ if renamefailed:
+ if sys.hexversion >= 0x3030000:
+ # If rename succeeded then timestamps are automatically
+ # preserved with complete precision because the source
+ # and destination inodes are the same. Otherwise, manually
+ # update timestamps with nanosecond precision.
+ os.utime(dest, ns=(newmtime, newmtime))
+ else:
+ # If rename succeeded then timestamps are automatically
+ # preserved with complete precision because the source
+ # and destination inodes are the same. Otherwise, round
+ # down to the nearest whole second since python's float
+ # st_mtime cannot be used to preserve the st_mtim.tv_nsec
+ # field with complete precision. Note that we have to use
+ # stat_obj[stat.ST_MTIME] here because the float
+ # stat_obj.st_mtime rounds *up* sometimes.
+ os.utime(dest, (newmtime, newmtime))
+ except OSError:
+ # The utime can fail here with EPERM even though the move succeeded.
+ # Instead of failing, use stat to return the mtime if possible.
+ try:
+ if sys.hexversion >= 0x3030000:
+ newmtime = os.stat(dest).st_mtime_ns
+ else:
+ newmtime = os.stat(dest)[stat.ST_MTIME]
+ except OSError as e:
+ writemsg(_("!!! Failed to stat in movefile()\n"), noiselevel=-1)
+ writemsg("!!! %s\n" % dest, noiselevel=-1)
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ return None
+
+ if bsd_chflags:
+ # Restore the flags we saved before moving
+ if pflags:
+ bsd_chflags.chflags(os.path.dirname(dest), pflags)
+
+ return newmtime
diff --git a/usr/lib/portage/pym/portage/util/mtimedb.py b/usr/lib/portage/pym/portage/util/mtimedb.py
new file mode 100644
index 0000000..30922a9
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/mtimedb.py
@@ -0,0 +1,128 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['MtimeDB']
+
+import copy
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+import errno
+import io
+import json
+import sys
+
+import portage
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.data import portage_gid, uid
+from portage.localization import _
+from portage.util import apply_secpass_permissions, atomic_ofstream, writemsg
+
+class MtimeDB(dict):
+
+ # JSON read support has been available since portage-2.1.10.49.
+ _json_write = True
+
+ _json_write_opts = {
+ "ensure_ascii": False,
+ "indent": "\t",
+ "sort_keys": True
+ }
+ if sys.hexversion < 0x30200F0:
+ # indent only supports int number of spaces
+ _json_write_opts["indent"] = 4
+
+ def __init__(self, filename):
+ dict.__init__(self)
+ self.filename = filename
+ self._load(filename)
+
+ def _load(self, filename):
+ f = None
+ content = None
+ try:
+ f = open(_unicode_encode(filename), 'rb')
+ content = f.read()
+ except EnvironmentError as e:
+ if getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
+ pass
+ else:
+ writemsg(_("!!! Error loading '%s': %s\n") % \
+ (filename, e), noiselevel=-1)
+ finally:
+ if f is not None:
+ f.close()
+
+ d = None
+ if content:
+ try:
+ d = json.loads(_unicode_decode(content,
+ encoding=_encodings['repo.content'], errors='strict'))
+ except SystemExit:
+ raise
+ except Exception as e:
+ try:
+ mypickle = pickle.Unpickler(io.BytesIO(content))
+ try:
+ mypickle.find_global = None
+ except AttributeError:
+ # Python >=3
+ pass
+ d = mypickle.load()
+ except SystemExit:
+ raise
+ except Exception:
+ writemsg(_("!!! Error loading '%s': %s\n") % \
+ (filename, e), noiselevel=-1)
+
+ if d is None:
+ d = {}
+
+ if "old" in d:
+ d["updates"] = d["old"]
+ del d["old"]
+ if "cur" in d:
+ del d["cur"]
+
+ d.setdefault("starttime", 0)
+ d.setdefault("version", "")
+ for k in ("info", "ldpath", "updates"):
+ d.setdefault(k, {})
+
+ mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
+ "starttime", "updates", "version"))
+
+ for k in list(d):
+ if k not in mtimedbkeys:
+ writemsg(_("Deleting invalid mtimedb key: %s\n") % str(k))
+ del d[k]
+ self.update(d)
+ self._clean_data = copy.deepcopy(d)
+
+ def commit(self):
+ if not self.filename:
+ return
+ d = {}
+ d.update(self)
+ # Only commit if the internal state has changed.
+ if d != self._clean_data:
+ d["version"] = str(portage.VERSION)
+ try:
+ f = atomic_ofstream(self.filename, mode='wb')
+ except EnvironmentError:
+ pass
+ else:
+ if self._json_write:
+ f.write(_unicode_encode(
+ json.dumps(d, **self._json_write_opts),
+ encoding=_encodings['repo.content'], errors='strict'))
+ else:
+ pickle.dump(d, f, protocol=2)
+ f.close()
+ apply_secpass_permissions(self.filename,
+ uid=uid, gid=portage_gid, mode=0o644)
+ self._clean_data = copy.deepcopy(d)
diff --git a/usr/lib/portage/pym/portage/util/whirlpool.py b/usr/lib/portage/pym/portage/util/whirlpool.py
new file mode 100644
index 0000000..170ae73
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/whirlpool.py
@@ -0,0 +1,796 @@
+## whirlpool.py - pure Python implementation of the Whirlpool algorithm.
+## Bjorn Edstrom <be@bjrn.se> 16 december 2007.
+##
+## Copyrights
+## ==========
+##
+## This code is based on the reference implementation by
+## Paulo S.L.M. Barreto and Vincent Rijmen. The reference implementation
+## is placed in the public domain but has the following headers:
+##
+## * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
+## * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+## * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+## * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
+## * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+## * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+## * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+## * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+## * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+## * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+## * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+## *
+## */
+## /* The code contained in this file (Whirlpool.c) is in the public domain. */
+##
+## This Python implementation is therefore also placed in the public domain.
+
+import sys
+if sys.hexversion >= 0x3000000:
+ xrange = range
+
+#block_size = 64
+digest_size = 64
+digestsize = 64
+
+class Whirlpool:
+ """Return a new Whirlpool object. An optional string argument
+ may be provided; if present, this string will be automatically
+ hashed."""
+ def __init__(self, arg=None):
+ self.ctx = WhirlpoolStruct()
+ if arg:
+ self.update(arg)
+ self.digest_status = 0
+
+ def update(self, arg):
+ """update(arg)"""
+ WhirlpoolAdd(arg, len(arg)*8, self.ctx)
+ self.digest_status = 0
+
+ def digest(self):
+ """digest()"""
+ if self.digest_status == 0:
+ self.dig = WhirlpoolFinalize(self.ctx)
+ self.digest_status = 1
+ return self.dig
+
+ def hexdigest(self):
+ """hexdigest()"""
+ dig = self.digest()
+ tempstr = ''
+ for d in dig:
+ xxx = '%02x' % (ord(d))
+ tempstr = tempstr + xxx
+ return tempstr
+
+ def copy(self):
+ """copy()"""
+ import copy
+ return copy.deepcopy(self)
+
+
+def new(init=None):
+ """Return a new Whirlpool object. An optional string argument
+ may be provided; if present, this string will be automatically
+ hashed."""
+ return Whirlpool(init)
+
+#
+# Private.
+#
+
+R = 10
+
+C0 = [
+0x18186018c07830d8, 0x23238c2305af4626, 0xc6c63fc67ef991b8, 0xe8e887e8136fcdfb,
+0x878726874ca113cb, 0xb8b8dab8a9626d11, 0x0101040108050209, 0x4f4f214f426e9e0d,
+0x3636d836adee6c9b, 0xa6a6a2a6590451ff, 0xd2d26fd2debdb90c, 0xf5f5f3f5fb06f70e,
+0x7979f979ef80f296, 0x6f6fa16f5fcede30, 0x91917e91fcef3f6d, 0x52525552aa07a4f8,
+0x60609d6027fdc047, 0xbcbccabc89766535, 0x9b9b569baccd2b37, 0x8e8e028e048c018a,
+0xa3a3b6a371155bd2, 0x0c0c300c603c186c, 0x7b7bf17bff8af684, 0x3535d435b5e16a80,
+0x1d1d741de8693af5, 0xe0e0a7e05347ddb3, 0xd7d77bd7f6acb321, 0xc2c22fc25eed999c,
+0x2e2eb82e6d965c43, 0x4b4b314b627a9629, 0xfefedffea321e15d, 0x575741578216aed5,
+0x15155415a8412abd, 0x7777c1779fb6eee8, 0x3737dc37a5eb6e92, 0xe5e5b3e57b56d79e,
+0x9f9f469f8cd92313, 0xf0f0e7f0d317fd23, 0x4a4a354a6a7f9420, 0xdada4fda9e95a944,
+0x58587d58fa25b0a2, 0xc9c903c906ca8fcf, 0x2929a429558d527c, 0x0a0a280a5022145a,
+0xb1b1feb1e14f7f50, 0xa0a0baa0691a5dc9, 0x6b6bb16b7fdad614, 0x85852e855cab17d9,
+0xbdbdcebd8173673c, 0x5d5d695dd234ba8f, 0x1010401080502090, 0xf4f4f7f4f303f507,
+0xcbcb0bcb16c08bdd, 0x3e3ef83eedc67cd3, 0x0505140528110a2d, 0x676781671fe6ce78,
+0xe4e4b7e47353d597, 0x27279c2725bb4e02, 0x4141194132588273, 0x8b8b168b2c9d0ba7,
+0xa7a7a6a7510153f6, 0x7d7de97dcf94fab2, 0x95956e95dcfb3749, 0xd8d847d88e9fad56,
+0xfbfbcbfb8b30eb70, 0xeeee9fee2371c1cd, 0x7c7ced7cc791f8bb, 0x6666856617e3cc71,
+0xdddd53dda68ea77b, 0x17175c17b84b2eaf, 0x4747014702468e45, 0x9e9e429e84dc211a,
+0xcaca0fca1ec589d4, 0x2d2db42d75995a58, 0xbfbfc6bf9179632e, 0x07071c07381b0e3f,
+0xadad8ead012347ac, 0x5a5a755aea2fb4b0, 0x838336836cb51bef, 0x3333cc3385ff66b6,
+0x636391633ff2c65c, 0x02020802100a0412, 0xaaaa92aa39384993, 0x7171d971afa8e2de,
+0xc8c807c80ecf8dc6, 0x19196419c87d32d1, 0x494939497270923b, 0xd9d943d9869aaf5f,
+0xf2f2eff2c31df931, 0xe3e3abe34b48dba8, 0x5b5b715be22ab6b9, 0x88881a8834920dbc,
+0x9a9a529aa4c8293e, 0x262698262dbe4c0b, 0x3232c8328dfa64bf, 0xb0b0fab0e94a7d59,
+0xe9e983e91b6acff2, 0x0f0f3c0f78331e77, 0xd5d573d5e6a6b733, 0x80803a8074ba1df4,
+0xbebec2be997c6127, 0xcdcd13cd26de87eb, 0x3434d034bde46889, 0x48483d487a759032,
+0xffffdbffab24e354, 0x7a7af57af78ff48d, 0x90907a90f4ea3d64, 0x5f5f615fc23ebe9d,
+0x202080201da0403d, 0x6868bd6867d5d00f, 0x1a1a681ad07234ca, 0xaeae82ae192c41b7,
+0xb4b4eab4c95e757d, 0x54544d549a19a8ce, 0x93937693ece53b7f, 0x222288220daa442f,
+0x64648d6407e9c863, 0xf1f1e3f1db12ff2a, 0x7373d173bfa2e6cc, 0x12124812905a2482,
+0x40401d403a5d807a, 0x0808200840281048, 0xc3c32bc356e89b95, 0xecec97ec337bc5df,
+0xdbdb4bdb9690ab4d, 0xa1a1bea1611f5fc0, 0x8d8d0e8d1c830791, 0x3d3df43df5c97ac8,
+0x97976697ccf1335b, 0x0000000000000000, 0xcfcf1bcf36d483f9, 0x2b2bac2b4587566e,
+0x7676c57697b3ece1, 0x8282328264b019e6, 0xd6d67fd6fea9b128, 0x1b1b6c1bd87736c3,
+0xb5b5eeb5c15b7774, 0xafaf86af112943be, 0x6a6ab56a77dfd41d, 0x50505d50ba0da0ea,
+0x45450945124c8a57, 0xf3f3ebf3cb18fb38, 0x3030c0309df060ad, 0xefef9bef2b74c3c4,
+0x3f3ffc3fe5c37eda, 0x55554955921caac7, 0xa2a2b2a2791059db, 0xeaea8fea0365c9e9,
+0x656589650fecca6a, 0xbabad2bab9686903, 0x2f2fbc2f65935e4a, 0xc0c027c04ee79d8e,
+0xdede5fdebe81a160, 0x1c1c701ce06c38fc, 0xfdfdd3fdbb2ee746, 0x4d4d294d52649a1f,
+0x92927292e4e03976, 0x7575c9758fbceafa, 0x06061806301e0c36, 0x8a8a128a249809ae,
+0xb2b2f2b2f940794b, 0xe6e6bfe66359d185, 0x0e0e380e70361c7e, 0x1f1f7c1ff8633ee7,
+0x6262956237f7c455, 0xd4d477d4eea3b53a, 0xa8a89aa829324d81, 0x96966296c4f43152,
+0xf9f9c3f99b3aef62, 0xc5c533c566f697a3, 0x2525942535b14a10, 0x59597959f220b2ab,
+0x84842a8454ae15d0, 0x7272d572b7a7e4c5, 0x3939e439d5dd72ec, 0x4c4c2d4c5a619816,
+0x5e5e655eca3bbc94, 0x7878fd78e785f09f, 0x3838e038ddd870e5, 0x8c8c0a8c14860598,
+0xd1d163d1c6b2bf17, 0xa5a5aea5410b57e4, 0xe2e2afe2434dd9a1, 0x616199612ff8c24e,
+0xb3b3f6b3f1457b42, 0x2121842115a54234, 0x9c9c4a9c94d62508, 0x1e1e781ef0663cee,
+0x4343114322528661, 0xc7c73bc776fc93b1, 0xfcfcd7fcb32be54f, 0x0404100420140824,
+0x51515951b208a2e3, 0x99995e99bcc72f25, 0x6d6da96d4fc4da22, 0x0d0d340d68391a65,
+0xfafacffa8335e979, 0xdfdf5bdfb684a369, 0x7e7ee57ed79bfca9, 0x242490243db44819,
+0x3b3bec3bc5d776fe, 0xabab96ab313d4b9a, 0xcece1fce3ed181f0, 0x1111441188552299,
+0x8f8f068f0c890383, 0x4e4e254e4a6b9c04, 0xb7b7e6b7d1517366, 0xebeb8beb0b60cbe0,
+0x3c3cf03cfdcc78c1, 0x81813e817cbf1ffd, 0x94946a94d4fe3540, 0xf7f7fbf7eb0cf31c,
+0xb9b9deb9a1676f18, 0x13134c13985f268b, 0x2c2cb02c7d9c5851, 0xd3d36bd3d6b8bb05,
+0xe7e7bbe76b5cd38c, 0x6e6ea56e57cbdc39, 0xc4c437c46ef395aa, 0x03030c03180f061b,
+0x565645568a13acdc, 0x44440d441a49885e, 0x7f7fe17fdf9efea0, 0xa9a99ea921374f88,
+0x2a2aa82a4d825467, 0xbbbbd6bbb16d6b0a, 0xc1c123c146e29f87, 0x53535153a202a6f1,
+0xdcdc57dcae8ba572, 0x0b0b2c0b58271653, 0x9d9d4e9d9cd32701, 0x6c6cad6c47c1d82b,
+0x3131c43195f562a4, 0x7474cd7487b9e8f3, 0xf6f6fff6e309f115, 0x464605460a438c4c,
+0xacac8aac092645a5, 0x89891e893c970fb5, 0x14145014a04428b4, 0xe1e1a3e15b42dfba,
+0x16165816b04e2ca6, 0x3a3ae83acdd274f7, 0x6969b9696fd0d206, 0x09092409482d1241,
+0x7070dd70a7ade0d7, 0xb6b6e2b6d954716f, 0xd0d067d0ceb7bd1e, 0xeded93ed3b7ec7d6,
+0xcccc17cc2edb85e2, 0x424215422a578468, 0x98985a98b4c22d2c, 0xa4a4aaa4490e55ed,
+0x2828a0285d885075, 0x5c5c6d5cda31b886, 0xf8f8c7f8933fed6b, 0x8686228644a411c2,
+]
+C1 = [
+0xd818186018c07830, 0x2623238c2305af46, 0xb8c6c63fc67ef991, 0xfbe8e887e8136fcd,
+0xcb878726874ca113, 0x11b8b8dab8a9626d, 0x0901010401080502, 0x0d4f4f214f426e9e,
+0x9b3636d836adee6c, 0xffa6a6a2a6590451, 0x0cd2d26fd2debdb9, 0x0ef5f5f3f5fb06f7,
+0x967979f979ef80f2, 0x306f6fa16f5fcede, 0x6d91917e91fcef3f, 0xf852525552aa07a4,
+0x4760609d6027fdc0, 0x35bcbccabc897665, 0x379b9b569baccd2b, 0x8a8e8e028e048c01,
+0xd2a3a3b6a371155b, 0x6c0c0c300c603c18, 0x847b7bf17bff8af6, 0x803535d435b5e16a,
+0xf51d1d741de8693a, 0xb3e0e0a7e05347dd, 0x21d7d77bd7f6acb3, 0x9cc2c22fc25eed99,
+0x432e2eb82e6d965c, 0x294b4b314b627a96, 0x5dfefedffea321e1, 0xd5575741578216ae,
+0xbd15155415a8412a, 0xe87777c1779fb6ee, 0x923737dc37a5eb6e, 0x9ee5e5b3e57b56d7,
+0x139f9f469f8cd923, 0x23f0f0e7f0d317fd, 0x204a4a354a6a7f94, 0x44dada4fda9e95a9,
+0xa258587d58fa25b0, 0xcfc9c903c906ca8f, 0x7c2929a429558d52, 0x5a0a0a280a502214,
+0x50b1b1feb1e14f7f, 0xc9a0a0baa0691a5d, 0x146b6bb16b7fdad6, 0xd985852e855cab17,
+0x3cbdbdcebd817367, 0x8f5d5d695dd234ba, 0x9010104010805020, 0x07f4f4f7f4f303f5,
+0xddcbcb0bcb16c08b, 0xd33e3ef83eedc67c, 0x2d0505140528110a, 0x78676781671fe6ce,
+0x97e4e4b7e47353d5, 0x0227279c2725bb4e, 0x7341411941325882, 0xa78b8b168b2c9d0b,
+0xf6a7a7a6a7510153, 0xb27d7de97dcf94fa, 0x4995956e95dcfb37, 0x56d8d847d88e9fad,
+0x70fbfbcbfb8b30eb, 0xcdeeee9fee2371c1, 0xbb7c7ced7cc791f8, 0x716666856617e3cc,
+0x7bdddd53dda68ea7, 0xaf17175c17b84b2e, 0x454747014702468e, 0x1a9e9e429e84dc21,
+0xd4caca0fca1ec589, 0x582d2db42d75995a, 0x2ebfbfc6bf917963, 0x3f07071c07381b0e,
+0xacadad8ead012347, 0xb05a5a755aea2fb4, 0xef838336836cb51b, 0xb63333cc3385ff66,
+0x5c636391633ff2c6, 0x1202020802100a04, 0x93aaaa92aa393849, 0xde7171d971afa8e2,
+0xc6c8c807c80ecf8d, 0xd119196419c87d32, 0x3b49493949727092, 0x5fd9d943d9869aaf,
+0x31f2f2eff2c31df9, 0xa8e3e3abe34b48db, 0xb95b5b715be22ab6, 0xbc88881a8834920d,
+0x3e9a9a529aa4c829, 0x0b262698262dbe4c, 0xbf3232c8328dfa64, 0x59b0b0fab0e94a7d,
+0xf2e9e983e91b6acf, 0x770f0f3c0f78331e, 0x33d5d573d5e6a6b7, 0xf480803a8074ba1d,
+0x27bebec2be997c61, 0xebcdcd13cd26de87, 0x893434d034bde468, 0x3248483d487a7590,
+0x54ffffdbffab24e3, 0x8d7a7af57af78ff4, 0x6490907a90f4ea3d, 0x9d5f5f615fc23ebe,
+0x3d202080201da040, 0x0f6868bd6867d5d0, 0xca1a1a681ad07234, 0xb7aeae82ae192c41,
+0x7db4b4eab4c95e75, 0xce54544d549a19a8, 0x7f93937693ece53b, 0x2f222288220daa44,
+0x6364648d6407e9c8, 0x2af1f1e3f1db12ff, 0xcc7373d173bfa2e6, 0x8212124812905a24,
+0x7a40401d403a5d80, 0x4808082008402810, 0x95c3c32bc356e89b, 0xdfecec97ec337bc5,
+0x4ddbdb4bdb9690ab, 0xc0a1a1bea1611f5f, 0x918d8d0e8d1c8307, 0xc83d3df43df5c97a,
+0x5b97976697ccf133, 0x0000000000000000, 0xf9cfcf1bcf36d483, 0x6e2b2bac2b458756,
+0xe17676c57697b3ec, 0xe68282328264b019, 0x28d6d67fd6fea9b1, 0xc31b1b6c1bd87736,
+0x74b5b5eeb5c15b77, 0xbeafaf86af112943, 0x1d6a6ab56a77dfd4, 0xea50505d50ba0da0,
+0x5745450945124c8a, 0x38f3f3ebf3cb18fb, 0xad3030c0309df060, 0xc4efef9bef2b74c3,
+0xda3f3ffc3fe5c37e, 0xc755554955921caa, 0xdba2a2b2a2791059, 0xe9eaea8fea0365c9,
+0x6a656589650fecca, 0x03babad2bab96869, 0x4a2f2fbc2f65935e, 0x8ec0c027c04ee79d,
+0x60dede5fdebe81a1, 0xfc1c1c701ce06c38, 0x46fdfdd3fdbb2ee7, 0x1f4d4d294d52649a,
+0x7692927292e4e039, 0xfa7575c9758fbcea, 0x3606061806301e0c, 0xae8a8a128a249809,
+0x4bb2b2f2b2f94079, 0x85e6e6bfe66359d1, 0x7e0e0e380e70361c, 0xe71f1f7c1ff8633e,
+0x556262956237f7c4, 0x3ad4d477d4eea3b5, 0x81a8a89aa829324d, 0x5296966296c4f431,
+0x62f9f9c3f99b3aef, 0xa3c5c533c566f697, 0x102525942535b14a, 0xab59597959f220b2,
+0xd084842a8454ae15, 0xc57272d572b7a7e4, 0xec3939e439d5dd72, 0x164c4c2d4c5a6198,
+0x945e5e655eca3bbc, 0x9f7878fd78e785f0, 0xe53838e038ddd870, 0x988c8c0a8c148605,
+0x17d1d163d1c6b2bf, 0xe4a5a5aea5410b57, 0xa1e2e2afe2434dd9, 0x4e616199612ff8c2,
+0x42b3b3f6b3f1457b, 0x342121842115a542, 0x089c9c4a9c94d625, 0xee1e1e781ef0663c,
+0x6143431143225286, 0xb1c7c73bc776fc93, 0x4ffcfcd7fcb32be5, 0x2404041004201408,
+0xe351515951b208a2, 0x2599995e99bcc72f, 0x226d6da96d4fc4da, 0x650d0d340d68391a,
+0x79fafacffa8335e9, 0x69dfdf5bdfb684a3, 0xa97e7ee57ed79bfc, 0x19242490243db448,
+0xfe3b3bec3bc5d776, 0x9aabab96ab313d4b, 0xf0cece1fce3ed181, 0x9911114411885522,
+0x838f8f068f0c8903, 0x044e4e254e4a6b9c, 0x66b7b7e6b7d15173, 0xe0ebeb8beb0b60cb,
+0xc13c3cf03cfdcc78, 0xfd81813e817cbf1f, 0x4094946a94d4fe35, 0x1cf7f7fbf7eb0cf3,
+0x18b9b9deb9a1676f, 0x8b13134c13985f26, 0x512c2cb02c7d9c58, 0x05d3d36bd3d6b8bb,
+0x8ce7e7bbe76b5cd3, 0x396e6ea56e57cbdc, 0xaac4c437c46ef395, 0x1b03030c03180f06,
+0xdc565645568a13ac, 0x5e44440d441a4988, 0xa07f7fe17fdf9efe, 0x88a9a99ea921374f,
+0x672a2aa82a4d8254, 0x0abbbbd6bbb16d6b, 0x87c1c123c146e29f, 0xf153535153a202a6,
+0x72dcdc57dcae8ba5, 0x530b0b2c0b582716, 0x019d9d4e9d9cd327, 0x2b6c6cad6c47c1d8,
+0xa43131c43195f562, 0xf37474cd7487b9e8, 0x15f6f6fff6e309f1, 0x4c464605460a438c,
+0xa5acac8aac092645, 0xb589891e893c970f, 0xb414145014a04428, 0xbae1e1a3e15b42df,
+0xa616165816b04e2c, 0xf73a3ae83acdd274, 0x066969b9696fd0d2, 0x4109092409482d12,
+0xd77070dd70a7ade0, 0x6fb6b6e2b6d95471, 0x1ed0d067d0ceb7bd, 0xd6eded93ed3b7ec7,
+0xe2cccc17cc2edb85, 0x68424215422a5784, 0x2c98985a98b4c22d, 0xeda4a4aaa4490e55,
+0x752828a0285d8850, 0x865c5c6d5cda31b8, 0x6bf8f8c7f8933fed, 0xc28686228644a411,
+]
+C2 = [
+0x30d818186018c078, 0x462623238c2305af, 0x91b8c6c63fc67ef9, 0xcdfbe8e887e8136f,
+0x13cb878726874ca1, 0x6d11b8b8dab8a962, 0x0209010104010805, 0x9e0d4f4f214f426e,
+0x6c9b3636d836adee, 0x51ffa6a6a2a65904, 0xb90cd2d26fd2debd, 0xf70ef5f5f3f5fb06,
+0xf2967979f979ef80, 0xde306f6fa16f5fce, 0x3f6d91917e91fcef, 0xa4f852525552aa07,
+0xc04760609d6027fd, 0x6535bcbccabc8976, 0x2b379b9b569baccd, 0x018a8e8e028e048c,
+0x5bd2a3a3b6a37115, 0x186c0c0c300c603c, 0xf6847b7bf17bff8a, 0x6a803535d435b5e1,
+0x3af51d1d741de869, 0xddb3e0e0a7e05347, 0xb321d7d77bd7f6ac, 0x999cc2c22fc25eed,
+0x5c432e2eb82e6d96, 0x96294b4b314b627a, 0xe15dfefedffea321, 0xaed5575741578216,
+0x2abd15155415a841, 0xeee87777c1779fb6, 0x6e923737dc37a5eb, 0xd79ee5e5b3e57b56,
+0x23139f9f469f8cd9, 0xfd23f0f0e7f0d317, 0x94204a4a354a6a7f, 0xa944dada4fda9e95,
+0xb0a258587d58fa25, 0x8fcfc9c903c906ca, 0x527c2929a429558d, 0x145a0a0a280a5022,
+0x7f50b1b1feb1e14f, 0x5dc9a0a0baa0691a, 0xd6146b6bb16b7fda, 0x17d985852e855cab,
+0x673cbdbdcebd8173, 0xba8f5d5d695dd234, 0x2090101040108050, 0xf507f4f4f7f4f303,
+0x8bddcbcb0bcb16c0, 0x7cd33e3ef83eedc6, 0x0a2d050514052811, 0xce78676781671fe6,
+0xd597e4e4b7e47353, 0x4e0227279c2725bb, 0x8273414119413258, 0x0ba78b8b168b2c9d,
+0x53f6a7a7a6a75101, 0xfab27d7de97dcf94, 0x374995956e95dcfb, 0xad56d8d847d88e9f,
+0xeb70fbfbcbfb8b30, 0xc1cdeeee9fee2371, 0xf8bb7c7ced7cc791, 0xcc716666856617e3,
+0xa77bdddd53dda68e, 0x2eaf17175c17b84b, 0x8e45474701470246, 0x211a9e9e429e84dc,
+0x89d4caca0fca1ec5, 0x5a582d2db42d7599, 0x632ebfbfc6bf9179, 0x0e3f07071c07381b,
+0x47acadad8ead0123, 0xb4b05a5a755aea2f, 0x1bef838336836cb5, 0x66b63333cc3385ff,
+0xc65c636391633ff2, 0x041202020802100a, 0x4993aaaa92aa3938, 0xe2de7171d971afa8,
+0x8dc6c8c807c80ecf, 0x32d119196419c87d, 0x923b494939497270, 0xaf5fd9d943d9869a,
+0xf931f2f2eff2c31d, 0xdba8e3e3abe34b48, 0xb6b95b5b715be22a, 0x0dbc88881a883492,
+0x293e9a9a529aa4c8, 0x4c0b262698262dbe, 0x64bf3232c8328dfa, 0x7d59b0b0fab0e94a,
+0xcff2e9e983e91b6a, 0x1e770f0f3c0f7833, 0xb733d5d573d5e6a6, 0x1df480803a8074ba,
+0x6127bebec2be997c, 0x87ebcdcd13cd26de, 0x68893434d034bde4, 0x903248483d487a75,
+0xe354ffffdbffab24, 0xf48d7a7af57af78f, 0x3d6490907a90f4ea, 0xbe9d5f5f615fc23e,
+0x403d202080201da0, 0xd00f6868bd6867d5, 0x34ca1a1a681ad072, 0x41b7aeae82ae192c,
+0x757db4b4eab4c95e, 0xa8ce54544d549a19, 0x3b7f93937693ece5, 0x442f222288220daa,
+0xc86364648d6407e9, 0xff2af1f1e3f1db12, 0xe6cc7373d173bfa2, 0x248212124812905a,
+0x807a40401d403a5d, 0x1048080820084028, 0x9b95c3c32bc356e8, 0xc5dfecec97ec337b,
+0xab4ddbdb4bdb9690, 0x5fc0a1a1bea1611f, 0x07918d8d0e8d1c83, 0x7ac83d3df43df5c9,
+0x335b97976697ccf1, 0x0000000000000000, 0x83f9cfcf1bcf36d4, 0x566e2b2bac2b4587,
+0xece17676c57697b3, 0x19e68282328264b0, 0xb128d6d67fd6fea9, 0x36c31b1b6c1bd877,
+0x7774b5b5eeb5c15b, 0x43beafaf86af1129, 0xd41d6a6ab56a77df, 0xa0ea50505d50ba0d,
+0x8a5745450945124c, 0xfb38f3f3ebf3cb18, 0x60ad3030c0309df0, 0xc3c4efef9bef2b74,
+0x7eda3f3ffc3fe5c3, 0xaac755554955921c, 0x59dba2a2b2a27910, 0xc9e9eaea8fea0365,
+0xca6a656589650fec, 0x6903babad2bab968, 0x5e4a2f2fbc2f6593, 0x9d8ec0c027c04ee7,
+0xa160dede5fdebe81, 0x38fc1c1c701ce06c, 0xe746fdfdd3fdbb2e, 0x9a1f4d4d294d5264,
+0x397692927292e4e0, 0xeafa7575c9758fbc, 0x0c3606061806301e, 0x09ae8a8a128a2498,
+0x794bb2b2f2b2f940, 0xd185e6e6bfe66359, 0x1c7e0e0e380e7036, 0x3ee71f1f7c1ff863,
+0xc4556262956237f7, 0xb53ad4d477d4eea3, 0x4d81a8a89aa82932, 0x315296966296c4f4,
+0xef62f9f9c3f99b3a, 0x97a3c5c533c566f6, 0x4a102525942535b1, 0xb2ab59597959f220,
+0x15d084842a8454ae, 0xe4c57272d572b7a7, 0x72ec3939e439d5dd, 0x98164c4c2d4c5a61,
+0xbc945e5e655eca3b, 0xf09f7878fd78e785, 0x70e53838e038ddd8, 0x05988c8c0a8c1486,
+0xbf17d1d163d1c6b2, 0x57e4a5a5aea5410b, 0xd9a1e2e2afe2434d, 0xc24e616199612ff8,
+0x7b42b3b3f6b3f145, 0x42342121842115a5, 0x25089c9c4a9c94d6, 0x3cee1e1e781ef066,
+0x8661434311432252, 0x93b1c7c73bc776fc, 0xe54ffcfcd7fcb32b, 0x0824040410042014,
+0xa2e351515951b208, 0x2f2599995e99bcc7, 0xda226d6da96d4fc4, 0x1a650d0d340d6839,
+0xe979fafacffa8335, 0xa369dfdf5bdfb684, 0xfca97e7ee57ed79b, 0x4819242490243db4,
+0x76fe3b3bec3bc5d7, 0x4b9aabab96ab313d, 0x81f0cece1fce3ed1, 0x2299111144118855,
+0x03838f8f068f0c89, 0x9c044e4e254e4a6b, 0x7366b7b7e6b7d151, 0xcbe0ebeb8beb0b60,
+0x78c13c3cf03cfdcc, 0x1ffd81813e817cbf, 0x354094946a94d4fe, 0xf31cf7f7fbf7eb0c,
+0x6f18b9b9deb9a167, 0x268b13134c13985f, 0x58512c2cb02c7d9c, 0xbb05d3d36bd3d6b8,
+0xd38ce7e7bbe76b5c, 0xdc396e6ea56e57cb, 0x95aac4c437c46ef3, 0x061b03030c03180f,
+0xacdc565645568a13, 0x885e44440d441a49, 0xfea07f7fe17fdf9e, 0x4f88a9a99ea92137,
+0x54672a2aa82a4d82, 0x6b0abbbbd6bbb16d, 0x9f87c1c123c146e2, 0xa6f153535153a202,
+0xa572dcdc57dcae8b, 0x16530b0b2c0b5827, 0x27019d9d4e9d9cd3, 0xd82b6c6cad6c47c1,
+0x62a43131c43195f5, 0xe8f37474cd7487b9, 0xf115f6f6fff6e309, 0x8c4c464605460a43,
+0x45a5acac8aac0926, 0x0fb589891e893c97, 0x28b414145014a044, 0xdfbae1e1a3e15b42,
+0x2ca616165816b04e, 0x74f73a3ae83acdd2, 0xd2066969b9696fd0, 0x124109092409482d,
+0xe0d77070dd70a7ad, 0x716fb6b6e2b6d954, 0xbd1ed0d067d0ceb7, 0xc7d6eded93ed3b7e,
+0x85e2cccc17cc2edb, 0x8468424215422a57, 0x2d2c98985a98b4c2, 0x55eda4a4aaa4490e,
+0x50752828a0285d88, 0xb8865c5c6d5cda31, 0xed6bf8f8c7f8933f, 0x11c28686228644a4,
+]
+C3 = [
+0x7830d818186018c0, 0xaf462623238c2305, 0xf991b8c6c63fc67e, 0x6fcdfbe8e887e813,
+0xa113cb878726874c, 0x626d11b8b8dab8a9, 0x0502090101040108, 0x6e9e0d4f4f214f42,
+0xee6c9b3636d836ad, 0x0451ffa6a6a2a659, 0xbdb90cd2d26fd2de, 0x06f70ef5f5f3f5fb,
+0x80f2967979f979ef, 0xcede306f6fa16f5f, 0xef3f6d91917e91fc, 0x07a4f852525552aa,
+0xfdc04760609d6027, 0x766535bcbccabc89, 0xcd2b379b9b569bac, 0x8c018a8e8e028e04,
+0x155bd2a3a3b6a371, 0x3c186c0c0c300c60, 0x8af6847b7bf17bff, 0xe16a803535d435b5,
+0x693af51d1d741de8, 0x47ddb3e0e0a7e053, 0xacb321d7d77bd7f6, 0xed999cc2c22fc25e,
+0x965c432e2eb82e6d, 0x7a96294b4b314b62, 0x21e15dfefedffea3, 0x16aed55757415782,
+0x412abd15155415a8, 0xb6eee87777c1779f, 0xeb6e923737dc37a5, 0x56d79ee5e5b3e57b,
+0xd923139f9f469f8c, 0x17fd23f0f0e7f0d3, 0x7f94204a4a354a6a, 0x95a944dada4fda9e,
+0x25b0a258587d58fa, 0xca8fcfc9c903c906, 0x8d527c2929a42955, 0x22145a0a0a280a50,
+0x4f7f50b1b1feb1e1, 0x1a5dc9a0a0baa069, 0xdad6146b6bb16b7f, 0xab17d985852e855c,
+0x73673cbdbdcebd81, 0x34ba8f5d5d695dd2, 0x5020901010401080, 0x03f507f4f4f7f4f3,
+0xc08bddcbcb0bcb16, 0xc67cd33e3ef83eed, 0x110a2d0505140528, 0xe6ce78676781671f,
+0x53d597e4e4b7e473, 0xbb4e0227279c2725, 0x5882734141194132, 0x9d0ba78b8b168b2c,
+0x0153f6a7a7a6a751, 0x94fab27d7de97dcf, 0xfb374995956e95dc, 0x9fad56d8d847d88e,
+0x30eb70fbfbcbfb8b, 0x71c1cdeeee9fee23, 0x91f8bb7c7ced7cc7, 0xe3cc716666856617,
+0x8ea77bdddd53dda6, 0x4b2eaf17175c17b8, 0x468e454747014702, 0xdc211a9e9e429e84,
+0xc589d4caca0fca1e, 0x995a582d2db42d75, 0x79632ebfbfc6bf91, 0x1b0e3f07071c0738,
+0x2347acadad8ead01, 0x2fb4b05a5a755aea, 0xb51bef838336836c, 0xff66b63333cc3385,
+0xf2c65c636391633f, 0x0a04120202080210, 0x384993aaaa92aa39, 0xa8e2de7171d971af,
+0xcf8dc6c8c807c80e, 0x7d32d119196419c8, 0x70923b4949394972, 0x9aaf5fd9d943d986,
+0x1df931f2f2eff2c3, 0x48dba8e3e3abe34b, 0x2ab6b95b5b715be2, 0x920dbc88881a8834,
+0xc8293e9a9a529aa4, 0xbe4c0b262698262d, 0xfa64bf3232c8328d, 0x4a7d59b0b0fab0e9,
+0x6acff2e9e983e91b, 0x331e770f0f3c0f78, 0xa6b733d5d573d5e6, 0xba1df480803a8074,
+0x7c6127bebec2be99, 0xde87ebcdcd13cd26, 0xe468893434d034bd, 0x75903248483d487a,
+0x24e354ffffdbffab, 0x8ff48d7a7af57af7, 0xea3d6490907a90f4, 0x3ebe9d5f5f615fc2,
+0xa0403d202080201d, 0xd5d00f6868bd6867, 0x7234ca1a1a681ad0, 0x2c41b7aeae82ae19,
+0x5e757db4b4eab4c9, 0x19a8ce54544d549a, 0xe53b7f93937693ec, 0xaa442f222288220d,
+0xe9c86364648d6407, 0x12ff2af1f1e3f1db, 0xa2e6cc7373d173bf, 0x5a24821212481290,
+0x5d807a40401d403a, 0x2810480808200840, 0xe89b95c3c32bc356, 0x7bc5dfecec97ec33,
+0x90ab4ddbdb4bdb96, 0x1f5fc0a1a1bea161, 0x8307918d8d0e8d1c, 0xc97ac83d3df43df5,
+0xf1335b97976697cc, 0x0000000000000000, 0xd483f9cfcf1bcf36, 0x87566e2b2bac2b45,
+0xb3ece17676c57697, 0xb019e68282328264, 0xa9b128d6d67fd6fe, 0x7736c31b1b6c1bd8,
+0x5b7774b5b5eeb5c1, 0x2943beafaf86af11, 0xdfd41d6a6ab56a77, 0x0da0ea50505d50ba,
+0x4c8a574545094512, 0x18fb38f3f3ebf3cb, 0xf060ad3030c0309d, 0x74c3c4efef9bef2b,
+0xc37eda3f3ffc3fe5, 0x1caac75555495592, 0x1059dba2a2b2a279, 0x65c9e9eaea8fea03,
+0xecca6a656589650f, 0x686903babad2bab9, 0x935e4a2f2fbc2f65, 0xe79d8ec0c027c04e,
+0x81a160dede5fdebe, 0x6c38fc1c1c701ce0, 0x2ee746fdfdd3fdbb, 0x649a1f4d4d294d52,
+0xe0397692927292e4, 0xbceafa7575c9758f, 0x1e0c360606180630, 0x9809ae8a8a128a24,
+0x40794bb2b2f2b2f9, 0x59d185e6e6bfe663, 0x361c7e0e0e380e70, 0x633ee71f1f7c1ff8,
+0xf7c4556262956237, 0xa3b53ad4d477d4ee, 0x324d81a8a89aa829, 0xf4315296966296c4,
+0x3aef62f9f9c3f99b, 0xf697a3c5c533c566, 0xb14a102525942535, 0x20b2ab59597959f2,
+0xae15d084842a8454, 0xa7e4c57272d572b7, 0xdd72ec3939e439d5, 0x6198164c4c2d4c5a,
+0x3bbc945e5e655eca, 0x85f09f7878fd78e7, 0xd870e53838e038dd, 0x8605988c8c0a8c14,
+0xb2bf17d1d163d1c6, 0x0b57e4a5a5aea541, 0x4dd9a1e2e2afe243, 0xf8c24e616199612f,
+0x457b42b3b3f6b3f1, 0xa542342121842115, 0xd625089c9c4a9c94, 0x663cee1e1e781ef0,
+0x5286614343114322, 0xfc93b1c7c73bc776, 0x2be54ffcfcd7fcb3, 0x1408240404100420,
+0x08a2e351515951b2, 0xc72f2599995e99bc, 0xc4da226d6da96d4f, 0x391a650d0d340d68,
+0x35e979fafacffa83, 0x84a369dfdf5bdfb6, 0x9bfca97e7ee57ed7, 0xb44819242490243d,
+0xd776fe3b3bec3bc5, 0x3d4b9aabab96ab31, 0xd181f0cece1fce3e, 0x5522991111441188,
+0x8903838f8f068f0c, 0x6b9c044e4e254e4a, 0x517366b7b7e6b7d1, 0x60cbe0ebeb8beb0b,
+0xcc78c13c3cf03cfd, 0xbf1ffd81813e817c, 0xfe354094946a94d4, 0x0cf31cf7f7fbf7eb,
+0x676f18b9b9deb9a1, 0x5f268b13134c1398, 0x9c58512c2cb02c7d, 0xb8bb05d3d36bd3d6,
+0x5cd38ce7e7bbe76b, 0xcbdc396e6ea56e57, 0xf395aac4c437c46e, 0x0f061b03030c0318,
+0x13acdc565645568a, 0x49885e44440d441a, 0x9efea07f7fe17fdf, 0x374f88a9a99ea921,
+0x8254672a2aa82a4d, 0x6d6b0abbbbd6bbb1, 0xe29f87c1c123c146, 0x02a6f153535153a2,
+0x8ba572dcdc57dcae, 0x2716530b0b2c0b58, 0xd327019d9d4e9d9c, 0xc1d82b6c6cad6c47,
+0xf562a43131c43195, 0xb9e8f37474cd7487, 0x09f115f6f6fff6e3, 0x438c4c464605460a,
+0x2645a5acac8aac09, 0x970fb589891e893c, 0x4428b414145014a0, 0x42dfbae1e1a3e15b,
+0x4e2ca616165816b0, 0xd274f73a3ae83acd, 0xd0d2066969b9696f, 0x2d12410909240948,
+0xade0d77070dd70a7, 0x54716fb6b6e2b6d9, 0xb7bd1ed0d067d0ce, 0x7ec7d6eded93ed3b,
+0xdb85e2cccc17cc2e, 0x578468424215422a, 0xc22d2c98985a98b4, 0x0e55eda4a4aaa449,
+0x8850752828a0285d, 0x31b8865c5c6d5cda, 0x3fed6bf8f8c7f893, 0xa411c28686228644,
+]
+C4 = [
+0xc07830d818186018, 0x05af462623238c23, 0x7ef991b8c6c63fc6, 0x136fcdfbe8e887e8,
+0x4ca113cb87872687, 0xa9626d11b8b8dab8, 0x0805020901010401, 0x426e9e0d4f4f214f,
+0xadee6c9b3636d836, 0x590451ffa6a6a2a6, 0xdebdb90cd2d26fd2, 0xfb06f70ef5f5f3f5,
+0xef80f2967979f979, 0x5fcede306f6fa16f, 0xfcef3f6d91917e91, 0xaa07a4f852525552,
+0x27fdc04760609d60, 0x89766535bcbccabc, 0xaccd2b379b9b569b, 0x048c018a8e8e028e,
+0x71155bd2a3a3b6a3, 0x603c186c0c0c300c, 0xff8af6847b7bf17b, 0xb5e16a803535d435,
+0xe8693af51d1d741d, 0x5347ddb3e0e0a7e0, 0xf6acb321d7d77bd7, 0x5eed999cc2c22fc2,
+0x6d965c432e2eb82e, 0x627a96294b4b314b, 0xa321e15dfefedffe, 0x8216aed557574157,
+0xa8412abd15155415, 0x9fb6eee87777c177, 0xa5eb6e923737dc37, 0x7b56d79ee5e5b3e5,
+0x8cd923139f9f469f, 0xd317fd23f0f0e7f0, 0x6a7f94204a4a354a, 0x9e95a944dada4fda,
+0xfa25b0a258587d58, 0x06ca8fcfc9c903c9, 0x558d527c2929a429, 0x5022145a0a0a280a,
+0xe14f7f50b1b1feb1, 0x691a5dc9a0a0baa0, 0x7fdad6146b6bb16b, 0x5cab17d985852e85,
+0x8173673cbdbdcebd, 0xd234ba8f5d5d695d, 0x8050209010104010, 0xf303f507f4f4f7f4,
+0x16c08bddcbcb0bcb, 0xedc67cd33e3ef83e, 0x28110a2d05051405, 0x1fe6ce7867678167,
+0x7353d597e4e4b7e4, 0x25bb4e0227279c27, 0x3258827341411941, 0x2c9d0ba78b8b168b,
+0x510153f6a7a7a6a7, 0xcf94fab27d7de97d, 0xdcfb374995956e95, 0x8e9fad56d8d847d8,
+0x8b30eb70fbfbcbfb, 0x2371c1cdeeee9fee, 0xc791f8bb7c7ced7c, 0x17e3cc7166668566,
+0xa68ea77bdddd53dd, 0xb84b2eaf17175c17, 0x02468e4547470147, 0x84dc211a9e9e429e,
+0x1ec589d4caca0fca, 0x75995a582d2db42d, 0x9179632ebfbfc6bf, 0x381b0e3f07071c07,
+0x012347acadad8ead, 0xea2fb4b05a5a755a, 0x6cb51bef83833683, 0x85ff66b63333cc33,
+0x3ff2c65c63639163, 0x100a041202020802, 0x39384993aaaa92aa, 0xafa8e2de7171d971,
+0x0ecf8dc6c8c807c8, 0xc87d32d119196419, 0x7270923b49493949, 0x869aaf5fd9d943d9,
+0xc31df931f2f2eff2, 0x4b48dba8e3e3abe3, 0xe22ab6b95b5b715b, 0x34920dbc88881a88,
+0xa4c8293e9a9a529a, 0x2dbe4c0b26269826, 0x8dfa64bf3232c832, 0xe94a7d59b0b0fab0,
+0x1b6acff2e9e983e9, 0x78331e770f0f3c0f, 0xe6a6b733d5d573d5, 0x74ba1df480803a80,
+0x997c6127bebec2be, 0x26de87ebcdcd13cd, 0xbde468893434d034, 0x7a75903248483d48,
+0xab24e354ffffdbff, 0xf78ff48d7a7af57a, 0xf4ea3d6490907a90, 0xc23ebe9d5f5f615f,
+0x1da0403d20208020, 0x67d5d00f6868bd68, 0xd07234ca1a1a681a, 0x192c41b7aeae82ae,
+0xc95e757db4b4eab4, 0x9a19a8ce54544d54, 0xece53b7f93937693, 0x0daa442f22228822,
+0x07e9c86364648d64, 0xdb12ff2af1f1e3f1, 0xbfa2e6cc7373d173, 0x905a248212124812,
+0x3a5d807a40401d40, 0x4028104808082008, 0x56e89b95c3c32bc3, 0x337bc5dfecec97ec,
+0x9690ab4ddbdb4bdb, 0x611f5fc0a1a1bea1, 0x1c8307918d8d0e8d, 0xf5c97ac83d3df43d,
+0xccf1335b97976697, 0x0000000000000000, 0x36d483f9cfcf1bcf, 0x4587566e2b2bac2b,
+0x97b3ece17676c576, 0x64b019e682823282, 0xfea9b128d6d67fd6, 0xd87736c31b1b6c1b,
+0xc15b7774b5b5eeb5, 0x112943beafaf86af, 0x77dfd41d6a6ab56a, 0xba0da0ea50505d50,
+0x124c8a5745450945, 0xcb18fb38f3f3ebf3, 0x9df060ad3030c030, 0x2b74c3c4efef9bef,
+0xe5c37eda3f3ffc3f, 0x921caac755554955, 0x791059dba2a2b2a2, 0x0365c9e9eaea8fea,
+0x0fecca6a65658965, 0xb9686903babad2ba, 0x65935e4a2f2fbc2f, 0x4ee79d8ec0c027c0,
+0xbe81a160dede5fde, 0xe06c38fc1c1c701c, 0xbb2ee746fdfdd3fd, 0x52649a1f4d4d294d,
+0xe4e0397692927292, 0x8fbceafa7575c975, 0x301e0c3606061806, 0x249809ae8a8a128a,
+0xf940794bb2b2f2b2, 0x6359d185e6e6bfe6, 0x70361c7e0e0e380e, 0xf8633ee71f1f7c1f,
+0x37f7c45562629562, 0xeea3b53ad4d477d4, 0x29324d81a8a89aa8, 0xc4f4315296966296,
+0x9b3aef62f9f9c3f9, 0x66f697a3c5c533c5, 0x35b14a1025259425, 0xf220b2ab59597959,
+0x54ae15d084842a84, 0xb7a7e4c57272d572, 0xd5dd72ec3939e439, 0x5a6198164c4c2d4c,
+0xca3bbc945e5e655e, 0xe785f09f7878fd78, 0xddd870e53838e038, 0x148605988c8c0a8c,
+0xc6b2bf17d1d163d1, 0x410b57e4a5a5aea5, 0x434dd9a1e2e2afe2, 0x2ff8c24e61619961,
+0xf1457b42b3b3f6b3, 0x15a5423421218421, 0x94d625089c9c4a9c, 0xf0663cee1e1e781e,
+0x2252866143431143, 0x76fc93b1c7c73bc7, 0xb32be54ffcfcd7fc, 0x2014082404041004,
+0xb208a2e351515951, 0xbcc72f2599995e99, 0x4fc4da226d6da96d, 0x68391a650d0d340d,
+0x8335e979fafacffa, 0xb684a369dfdf5bdf, 0xd79bfca97e7ee57e, 0x3db4481924249024,
+0xc5d776fe3b3bec3b, 0x313d4b9aabab96ab, 0x3ed181f0cece1fce, 0x8855229911114411,
+0x0c8903838f8f068f, 0x4a6b9c044e4e254e, 0xd1517366b7b7e6b7, 0x0b60cbe0ebeb8beb,
+0xfdcc78c13c3cf03c, 0x7cbf1ffd81813e81, 0xd4fe354094946a94, 0xeb0cf31cf7f7fbf7,
+0xa1676f18b9b9deb9, 0x985f268b13134c13, 0x7d9c58512c2cb02c, 0xd6b8bb05d3d36bd3,
+0x6b5cd38ce7e7bbe7, 0x57cbdc396e6ea56e, 0x6ef395aac4c437c4, 0x180f061b03030c03,
+0x8a13acdc56564556, 0x1a49885e44440d44, 0xdf9efea07f7fe17f, 0x21374f88a9a99ea9,
+0x4d8254672a2aa82a, 0xb16d6b0abbbbd6bb, 0x46e29f87c1c123c1, 0xa202a6f153535153,
+0xae8ba572dcdc57dc, 0x582716530b0b2c0b, 0x9cd327019d9d4e9d, 0x47c1d82b6c6cad6c,
+0x95f562a43131c431, 0x87b9e8f37474cd74, 0xe309f115f6f6fff6, 0x0a438c4c46460546,
+0x092645a5acac8aac, 0x3c970fb589891e89, 0xa04428b414145014, 0x5b42dfbae1e1a3e1,
+0xb04e2ca616165816, 0xcdd274f73a3ae83a, 0x6fd0d2066969b969, 0x482d124109092409,
+0xa7ade0d77070dd70, 0xd954716fb6b6e2b6, 0xceb7bd1ed0d067d0, 0x3b7ec7d6eded93ed,
+0x2edb85e2cccc17cc, 0x2a57846842421542, 0xb4c22d2c98985a98, 0x490e55eda4a4aaa4,
+0x5d8850752828a028, 0xda31b8865c5c6d5c, 0x933fed6bf8f8c7f8, 0x44a411c286862286,
+]
+C5 = [
+0x18c07830d8181860, 0x2305af462623238c, 0xc67ef991b8c6c63f, 0xe8136fcdfbe8e887,
+0x874ca113cb878726, 0xb8a9626d11b8b8da, 0x0108050209010104, 0x4f426e9e0d4f4f21,
+0x36adee6c9b3636d8, 0xa6590451ffa6a6a2, 0xd2debdb90cd2d26f, 0xf5fb06f70ef5f5f3,
+0x79ef80f2967979f9, 0x6f5fcede306f6fa1, 0x91fcef3f6d91917e, 0x52aa07a4f8525255,
+0x6027fdc04760609d, 0xbc89766535bcbcca, 0x9baccd2b379b9b56, 0x8e048c018a8e8e02,
+0xa371155bd2a3a3b6, 0x0c603c186c0c0c30, 0x7bff8af6847b7bf1, 0x35b5e16a803535d4,
+0x1de8693af51d1d74, 0xe05347ddb3e0e0a7, 0xd7f6acb321d7d77b, 0xc25eed999cc2c22f,
+0x2e6d965c432e2eb8, 0x4b627a96294b4b31, 0xfea321e15dfefedf, 0x578216aed5575741,
+0x15a8412abd151554, 0x779fb6eee87777c1, 0x37a5eb6e923737dc, 0xe57b56d79ee5e5b3,
+0x9f8cd923139f9f46, 0xf0d317fd23f0f0e7, 0x4a6a7f94204a4a35, 0xda9e95a944dada4f,
+0x58fa25b0a258587d, 0xc906ca8fcfc9c903, 0x29558d527c2929a4, 0x0a5022145a0a0a28,
+0xb1e14f7f50b1b1fe, 0xa0691a5dc9a0a0ba, 0x6b7fdad6146b6bb1, 0x855cab17d985852e,
+0xbd8173673cbdbdce, 0x5dd234ba8f5d5d69, 0x1080502090101040, 0xf4f303f507f4f4f7,
+0xcb16c08bddcbcb0b, 0x3eedc67cd33e3ef8, 0x0528110a2d050514, 0x671fe6ce78676781,
+0xe47353d597e4e4b7, 0x2725bb4e0227279c, 0x4132588273414119, 0x8b2c9d0ba78b8b16,
+0xa7510153f6a7a7a6, 0x7dcf94fab27d7de9, 0x95dcfb374995956e, 0xd88e9fad56d8d847,
+0xfb8b30eb70fbfbcb, 0xee2371c1cdeeee9f, 0x7cc791f8bb7c7ced, 0x6617e3cc71666685,
+0xdda68ea77bdddd53, 0x17b84b2eaf17175c, 0x4702468e45474701, 0x9e84dc211a9e9e42,
+0xca1ec589d4caca0f, 0x2d75995a582d2db4, 0xbf9179632ebfbfc6, 0x07381b0e3f07071c,
+0xad012347acadad8e, 0x5aea2fb4b05a5a75, 0x836cb51bef838336, 0x3385ff66b63333cc,
+0x633ff2c65c636391, 0x02100a0412020208, 0xaa39384993aaaa92, 0x71afa8e2de7171d9,
+0xc80ecf8dc6c8c807, 0x19c87d32d1191964, 0x497270923b494939, 0xd9869aaf5fd9d943,
+0xf2c31df931f2f2ef, 0xe34b48dba8e3e3ab, 0x5be22ab6b95b5b71, 0x8834920dbc88881a,
+0x9aa4c8293e9a9a52, 0x262dbe4c0b262698, 0x328dfa64bf3232c8, 0xb0e94a7d59b0b0fa,
+0xe91b6acff2e9e983, 0x0f78331e770f0f3c, 0xd5e6a6b733d5d573, 0x8074ba1df480803a,
+0xbe997c6127bebec2, 0xcd26de87ebcdcd13, 0x34bde468893434d0, 0x487a75903248483d,
+0xffab24e354ffffdb, 0x7af78ff48d7a7af5, 0x90f4ea3d6490907a, 0x5fc23ebe9d5f5f61,
+0x201da0403d202080, 0x6867d5d00f6868bd, 0x1ad07234ca1a1a68, 0xae192c41b7aeae82,
+0xb4c95e757db4b4ea, 0x549a19a8ce54544d, 0x93ece53b7f939376, 0x220daa442f222288,
+0x6407e9c86364648d, 0xf1db12ff2af1f1e3, 0x73bfa2e6cc7373d1, 0x12905a2482121248,
+0x403a5d807a40401d, 0x0840281048080820, 0xc356e89b95c3c32b, 0xec337bc5dfecec97,
+0xdb9690ab4ddbdb4b, 0xa1611f5fc0a1a1be, 0x8d1c8307918d8d0e, 0x3df5c97ac83d3df4,
+0x97ccf1335b979766, 0x0000000000000000, 0xcf36d483f9cfcf1b, 0x2b4587566e2b2bac,
+0x7697b3ece17676c5, 0x8264b019e6828232, 0xd6fea9b128d6d67f, 0x1bd87736c31b1b6c,
+0xb5c15b7774b5b5ee, 0xaf112943beafaf86, 0x6a77dfd41d6a6ab5, 0x50ba0da0ea50505d,
+0x45124c8a57454509, 0xf3cb18fb38f3f3eb, 0x309df060ad3030c0, 0xef2b74c3c4efef9b,
+0x3fe5c37eda3f3ffc, 0x55921caac7555549, 0xa2791059dba2a2b2, 0xea0365c9e9eaea8f,
+0x650fecca6a656589, 0xbab9686903babad2, 0x2f65935e4a2f2fbc, 0xc04ee79d8ec0c027,
+0xdebe81a160dede5f, 0x1ce06c38fc1c1c70, 0xfdbb2ee746fdfdd3, 0x4d52649a1f4d4d29,
+0x92e4e03976929272, 0x758fbceafa7575c9, 0x06301e0c36060618, 0x8a249809ae8a8a12,
+0xb2f940794bb2b2f2, 0xe66359d185e6e6bf, 0x0e70361c7e0e0e38, 0x1ff8633ee71f1f7c,
+0x6237f7c455626295, 0xd4eea3b53ad4d477, 0xa829324d81a8a89a, 0x96c4f43152969662,
+0xf99b3aef62f9f9c3, 0xc566f697a3c5c533, 0x2535b14a10252594, 0x59f220b2ab595979,
+0x8454ae15d084842a, 0x72b7a7e4c57272d5, 0x39d5dd72ec3939e4, 0x4c5a6198164c4c2d,
+0x5eca3bbc945e5e65, 0x78e785f09f7878fd, 0x38ddd870e53838e0, 0x8c148605988c8c0a,
+0xd1c6b2bf17d1d163, 0xa5410b57e4a5a5ae, 0xe2434dd9a1e2e2af, 0x612ff8c24e616199,
+0xb3f1457b42b3b3f6, 0x2115a54234212184, 0x9c94d625089c9c4a, 0x1ef0663cee1e1e78,
+0x4322528661434311, 0xc776fc93b1c7c73b, 0xfcb32be54ffcfcd7, 0x0420140824040410,
+0x51b208a2e3515159, 0x99bcc72f2599995e, 0x6d4fc4da226d6da9, 0x0d68391a650d0d34,
+0xfa8335e979fafacf, 0xdfb684a369dfdf5b, 0x7ed79bfca97e7ee5, 0x243db44819242490,
+0x3bc5d776fe3b3bec, 0xab313d4b9aabab96, 0xce3ed181f0cece1f, 0x1188552299111144,
+0x8f0c8903838f8f06, 0x4e4a6b9c044e4e25, 0xb7d1517366b7b7e6, 0xeb0b60cbe0ebeb8b,
+0x3cfdcc78c13c3cf0, 0x817cbf1ffd81813e, 0x94d4fe354094946a, 0xf7eb0cf31cf7f7fb,
+0xb9a1676f18b9b9de, 0x13985f268b13134c, 0x2c7d9c58512c2cb0, 0xd3d6b8bb05d3d36b,
+0xe76b5cd38ce7e7bb, 0x6e57cbdc396e6ea5, 0xc46ef395aac4c437, 0x03180f061b03030c,
+0x568a13acdc565645, 0x441a49885e44440d, 0x7fdf9efea07f7fe1, 0xa921374f88a9a99e,
+0x2a4d8254672a2aa8, 0xbbb16d6b0abbbbd6, 0xc146e29f87c1c123, 0x53a202a6f1535351,
+0xdcae8ba572dcdc57, 0x0b582716530b0b2c, 0x9d9cd327019d9d4e, 0x6c47c1d82b6c6cad,
+0x3195f562a43131c4, 0x7487b9e8f37474cd, 0xf6e309f115f6f6ff, 0x460a438c4c464605,
+0xac092645a5acac8a, 0x893c970fb589891e, 0x14a04428b4141450, 0xe15b42dfbae1e1a3,
+0x16b04e2ca6161658, 0x3acdd274f73a3ae8, 0x696fd0d2066969b9, 0x09482d1241090924,
+0x70a7ade0d77070dd, 0xb6d954716fb6b6e2, 0xd0ceb7bd1ed0d067, 0xed3b7ec7d6eded93,
+0xcc2edb85e2cccc17, 0x422a578468424215, 0x98b4c22d2c98985a, 0xa4490e55eda4a4aa,
+0x285d8850752828a0, 0x5cda31b8865c5c6d, 0xf8933fed6bf8f8c7, 0x8644a411c2868622,
+]
+C6 = [
+0x6018c07830d81818, 0x8c2305af46262323, 0x3fc67ef991b8c6c6, 0x87e8136fcdfbe8e8,
+0x26874ca113cb8787, 0xdab8a9626d11b8b8, 0x0401080502090101, 0x214f426e9e0d4f4f,
+0xd836adee6c9b3636, 0xa2a6590451ffa6a6, 0x6fd2debdb90cd2d2, 0xf3f5fb06f70ef5f5,
+0xf979ef80f2967979, 0xa16f5fcede306f6f, 0x7e91fcef3f6d9191, 0x5552aa07a4f85252,
+0x9d6027fdc0476060, 0xcabc89766535bcbc, 0x569baccd2b379b9b, 0x028e048c018a8e8e,
+0xb6a371155bd2a3a3, 0x300c603c186c0c0c, 0xf17bff8af6847b7b, 0xd435b5e16a803535,
+0x741de8693af51d1d, 0xa7e05347ddb3e0e0, 0x7bd7f6acb321d7d7, 0x2fc25eed999cc2c2,
+0xb82e6d965c432e2e, 0x314b627a96294b4b, 0xdffea321e15dfefe, 0x41578216aed55757,
+0x5415a8412abd1515, 0xc1779fb6eee87777, 0xdc37a5eb6e923737, 0xb3e57b56d79ee5e5,
+0x469f8cd923139f9f, 0xe7f0d317fd23f0f0, 0x354a6a7f94204a4a, 0x4fda9e95a944dada,
+0x7d58fa25b0a25858, 0x03c906ca8fcfc9c9, 0xa429558d527c2929, 0x280a5022145a0a0a,
+0xfeb1e14f7f50b1b1, 0xbaa0691a5dc9a0a0, 0xb16b7fdad6146b6b, 0x2e855cab17d98585,
+0xcebd8173673cbdbd, 0x695dd234ba8f5d5d, 0x4010805020901010, 0xf7f4f303f507f4f4,
+0x0bcb16c08bddcbcb, 0xf83eedc67cd33e3e, 0x140528110a2d0505, 0x81671fe6ce786767,
+0xb7e47353d597e4e4, 0x9c2725bb4e022727, 0x1941325882734141, 0x168b2c9d0ba78b8b,
+0xa6a7510153f6a7a7, 0xe97dcf94fab27d7d, 0x6e95dcfb37499595, 0x47d88e9fad56d8d8,
+0xcbfb8b30eb70fbfb, 0x9fee2371c1cdeeee, 0xed7cc791f8bb7c7c, 0x856617e3cc716666,
+0x53dda68ea77bdddd, 0x5c17b84b2eaf1717, 0x014702468e454747, 0x429e84dc211a9e9e,
+0x0fca1ec589d4caca, 0xb42d75995a582d2d, 0xc6bf9179632ebfbf, 0x1c07381b0e3f0707,
+0x8ead012347acadad, 0x755aea2fb4b05a5a, 0x36836cb51bef8383, 0xcc3385ff66b63333,
+0x91633ff2c65c6363, 0x0802100a04120202, 0x92aa39384993aaaa, 0xd971afa8e2de7171,
+0x07c80ecf8dc6c8c8, 0x6419c87d32d11919, 0x39497270923b4949, 0x43d9869aaf5fd9d9,
+0xeff2c31df931f2f2, 0xabe34b48dba8e3e3, 0x715be22ab6b95b5b, 0x1a8834920dbc8888,
+0x529aa4c8293e9a9a, 0x98262dbe4c0b2626, 0xc8328dfa64bf3232, 0xfab0e94a7d59b0b0,
+0x83e91b6acff2e9e9, 0x3c0f78331e770f0f, 0x73d5e6a6b733d5d5, 0x3a8074ba1df48080,
+0xc2be997c6127bebe, 0x13cd26de87ebcdcd, 0xd034bde468893434, 0x3d487a7590324848,
+0xdbffab24e354ffff, 0xf57af78ff48d7a7a, 0x7a90f4ea3d649090, 0x615fc23ebe9d5f5f,
+0x80201da0403d2020, 0xbd6867d5d00f6868, 0x681ad07234ca1a1a, 0x82ae192c41b7aeae,
+0xeab4c95e757db4b4, 0x4d549a19a8ce5454, 0x7693ece53b7f9393, 0x88220daa442f2222,
+0x8d6407e9c8636464, 0xe3f1db12ff2af1f1, 0xd173bfa2e6cc7373, 0x4812905a24821212,
+0x1d403a5d807a4040, 0x2008402810480808, 0x2bc356e89b95c3c3, 0x97ec337bc5dfecec,
+0x4bdb9690ab4ddbdb, 0xbea1611f5fc0a1a1, 0x0e8d1c8307918d8d, 0xf43df5c97ac83d3d,
+0x6697ccf1335b9797, 0x0000000000000000, 0x1bcf36d483f9cfcf, 0xac2b4587566e2b2b,
+0xc57697b3ece17676, 0x328264b019e68282, 0x7fd6fea9b128d6d6, 0x6c1bd87736c31b1b,
+0xeeb5c15b7774b5b5, 0x86af112943beafaf, 0xb56a77dfd41d6a6a, 0x5d50ba0da0ea5050,
+0x0945124c8a574545, 0xebf3cb18fb38f3f3, 0xc0309df060ad3030, 0x9bef2b74c3c4efef,
+0xfc3fe5c37eda3f3f, 0x4955921caac75555, 0xb2a2791059dba2a2, 0x8fea0365c9e9eaea,
+0x89650fecca6a6565, 0xd2bab9686903baba, 0xbc2f65935e4a2f2f, 0x27c04ee79d8ec0c0,
+0x5fdebe81a160dede, 0x701ce06c38fc1c1c, 0xd3fdbb2ee746fdfd, 0x294d52649a1f4d4d,
+0x7292e4e039769292, 0xc9758fbceafa7575, 0x1806301e0c360606, 0x128a249809ae8a8a,
+0xf2b2f940794bb2b2, 0xbfe66359d185e6e6, 0x380e70361c7e0e0e, 0x7c1ff8633ee71f1f,
+0x956237f7c4556262, 0x77d4eea3b53ad4d4, 0x9aa829324d81a8a8, 0x6296c4f431529696,
+0xc3f99b3aef62f9f9, 0x33c566f697a3c5c5, 0x942535b14a102525, 0x7959f220b2ab5959,
+0x2a8454ae15d08484, 0xd572b7a7e4c57272, 0xe439d5dd72ec3939, 0x2d4c5a6198164c4c,
+0x655eca3bbc945e5e, 0xfd78e785f09f7878, 0xe038ddd870e53838, 0x0a8c148605988c8c,
+0x63d1c6b2bf17d1d1, 0xaea5410b57e4a5a5, 0xafe2434dd9a1e2e2, 0x99612ff8c24e6161,
+0xf6b3f1457b42b3b3, 0x842115a542342121, 0x4a9c94d625089c9c, 0x781ef0663cee1e1e,
+0x1143225286614343, 0x3bc776fc93b1c7c7, 0xd7fcb32be54ffcfc, 0x1004201408240404,
+0x5951b208a2e35151, 0x5e99bcc72f259999, 0xa96d4fc4da226d6d, 0x340d68391a650d0d,
+0xcffa8335e979fafa, 0x5bdfb684a369dfdf, 0xe57ed79bfca97e7e, 0x90243db448192424,
+0xec3bc5d776fe3b3b, 0x96ab313d4b9aabab, 0x1fce3ed181f0cece, 0x4411885522991111,
+0x068f0c8903838f8f, 0x254e4a6b9c044e4e, 0xe6b7d1517366b7b7, 0x8beb0b60cbe0ebeb,
+0xf03cfdcc78c13c3c, 0x3e817cbf1ffd8181, 0x6a94d4fe35409494, 0xfbf7eb0cf31cf7f7,
+0xdeb9a1676f18b9b9, 0x4c13985f268b1313, 0xb02c7d9c58512c2c, 0x6bd3d6b8bb05d3d3,
+0xbbe76b5cd38ce7e7, 0xa56e57cbdc396e6e, 0x37c46ef395aac4c4, 0x0c03180f061b0303,
+0x45568a13acdc5656, 0x0d441a49885e4444, 0xe17fdf9efea07f7f, 0x9ea921374f88a9a9,
+0xa82a4d8254672a2a, 0xd6bbb16d6b0abbbb, 0x23c146e29f87c1c1, 0x5153a202a6f15353,
+0x57dcae8ba572dcdc, 0x2c0b582716530b0b, 0x4e9d9cd327019d9d, 0xad6c47c1d82b6c6c,
+0xc43195f562a43131, 0xcd7487b9e8f37474, 0xfff6e309f115f6f6, 0x05460a438c4c4646,
+0x8aac092645a5acac, 0x1e893c970fb58989, 0x5014a04428b41414, 0xa3e15b42dfbae1e1,
+0x5816b04e2ca61616, 0xe83acdd274f73a3a, 0xb9696fd0d2066969, 0x2409482d12410909,
+0xdd70a7ade0d77070, 0xe2b6d954716fb6b6, 0x67d0ceb7bd1ed0d0, 0x93ed3b7ec7d6eded,
+0x17cc2edb85e2cccc, 0x15422a5784684242, 0x5a98b4c22d2c9898, 0xaaa4490e55eda4a4,
+0xa0285d8850752828, 0x6d5cda31b8865c5c, 0xc7f8933fed6bf8f8, 0x228644a411c28686,
+]
+C7 = [
+0x186018c07830d818, 0x238c2305af462623, 0xc63fc67ef991b8c6, 0xe887e8136fcdfbe8,
+0x8726874ca113cb87, 0xb8dab8a9626d11b8, 0x0104010805020901, 0x4f214f426e9e0d4f,
+0x36d836adee6c9b36, 0xa6a2a6590451ffa6, 0xd26fd2debdb90cd2, 0xf5f3f5fb06f70ef5,
+0x79f979ef80f29679, 0x6fa16f5fcede306f, 0x917e91fcef3f6d91, 0x525552aa07a4f852,
+0x609d6027fdc04760, 0xbccabc89766535bc, 0x9b569baccd2b379b, 0x8e028e048c018a8e,
+0xa3b6a371155bd2a3, 0x0c300c603c186c0c, 0x7bf17bff8af6847b, 0x35d435b5e16a8035,
+0x1d741de8693af51d, 0xe0a7e05347ddb3e0, 0xd77bd7f6acb321d7, 0xc22fc25eed999cc2,
+0x2eb82e6d965c432e, 0x4b314b627a96294b, 0xfedffea321e15dfe, 0x5741578216aed557,
+0x155415a8412abd15, 0x77c1779fb6eee877, 0x37dc37a5eb6e9237, 0xe5b3e57b56d79ee5,
+0x9f469f8cd923139f, 0xf0e7f0d317fd23f0, 0x4a354a6a7f94204a, 0xda4fda9e95a944da,
+0x587d58fa25b0a258, 0xc903c906ca8fcfc9, 0x29a429558d527c29, 0x0a280a5022145a0a,
+0xb1feb1e14f7f50b1, 0xa0baa0691a5dc9a0, 0x6bb16b7fdad6146b, 0x852e855cab17d985,
+0xbdcebd8173673cbd, 0x5d695dd234ba8f5d, 0x1040108050209010, 0xf4f7f4f303f507f4,
+0xcb0bcb16c08bddcb, 0x3ef83eedc67cd33e, 0x05140528110a2d05, 0x6781671fe6ce7867,
+0xe4b7e47353d597e4, 0x279c2725bb4e0227, 0x4119413258827341, 0x8b168b2c9d0ba78b,
+0xa7a6a7510153f6a7, 0x7de97dcf94fab27d, 0x956e95dcfb374995, 0xd847d88e9fad56d8,
+0xfbcbfb8b30eb70fb, 0xee9fee2371c1cdee, 0x7ced7cc791f8bb7c, 0x66856617e3cc7166,
+0xdd53dda68ea77bdd, 0x175c17b84b2eaf17, 0x47014702468e4547, 0x9e429e84dc211a9e,
+0xca0fca1ec589d4ca, 0x2db42d75995a582d, 0xbfc6bf9179632ebf, 0x071c07381b0e3f07,
+0xad8ead012347acad, 0x5a755aea2fb4b05a, 0x8336836cb51bef83, 0x33cc3385ff66b633,
+0x6391633ff2c65c63, 0x020802100a041202, 0xaa92aa39384993aa, 0x71d971afa8e2de71,
+0xc807c80ecf8dc6c8, 0x196419c87d32d119, 0x4939497270923b49, 0xd943d9869aaf5fd9,
+0xf2eff2c31df931f2, 0xe3abe34b48dba8e3, 0x5b715be22ab6b95b, 0x881a8834920dbc88,
+0x9a529aa4c8293e9a, 0x2698262dbe4c0b26, 0x32c8328dfa64bf32, 0xb0fab0e94a7d59b0,
+0xe983e91b6acff2e9, 0x0f3c0f78331e770f, 0xd573d5e6a6b733d5, 0x803a8074ba1df480,
+0xbec2be997c6127be, 0xcd13cd26de87ebcd, 0x34d034bde4688934, 0x483d487a75903248,
+0xffdbffab24e354ff, 0x7af57af78ff48d7a, 0x907a90f4ea3d6490, 0x5f615fc23ebe9d5f,
+0x2080201da0403d20, 0x68bd6867d5d00f68, 0x1a681ad07234ca1a, 0xae82ae192c41b7ae,
+0xb4eab4c95e757db4, 0x544d549a19a8ce54, 0x937693ece53b7f93, 0x2288220daa442f22,
+0x648d6407e9c86364, 0xf1e3f1db12ff2af1, 0x73d173bfa2e6cc73, 0x124812905a248212,
+0x401d403a5d807a40, 0x0820084028104808, 0xc32bc356e89b95c3, 0xec97ec337bc5dfec,
+0xdb4bdb9690ab4ddb, 0xa1bea1611f5fc0a1, 0x8d0e8d1c8307918d, 0x3df43df5c97ac83d,
+0x976697ccf1335b97, 0x0000000000000000, 0xcf1bcf36d483f9cf, 0x2bac2b4587566e2b,
+0x76c57697b3ece176, 0x82328264b019e682, 0xd67fd6fea9b128d6, 0x1b6c1bd87736c31b,
+0xb5eeb5c15b7774b5, 0xaf86af112943beaf, 0x6ab56a77dfd41d6a, 0x505d50ba0da0ea50,
+0x450945124c8a5745, 0xf3ebf3cb18fb38f3, 0x30c0309df060ad30, 0xef9bef2b74c3c4ef,
+0x3ffc3fe5c37eda3f, 0x554955921caac755, 0xa2b2a2791059dba2, 0xea8fea0365c9e9ea,
+0x6589650fecca6a65, 0xbad2bab9686903ba, 0x2fbc2f65935e4a2f, 0xc027c04ee79d8ec0,
+0xde5fdebe81a160de, 0x1c701ce06c38fc1c, 0xfdd3fdbb2ee746fd, 0x4d294d52649a1f4d,
+0x927292e4e0397692, 0x75c9758fbceafa75, 0x061806301e0c3606, 0x8a128a249809ae8a,
+0xb2f2b2f940794bb2, 0xe6bfe66359d185e6, 0x0e380e70361c7e0e, 0x1f7c1ff8633ee71f,
+0x62956237f7c45562, 0xd477d4eea3b53ad4, 0xa89aa829324d81a8, 0x966296c4f4315296,
+0xf9c3f99b3aef62f9, 0xc533c566f697a3c5, 0x25942535b14a1025, 0x597959f220b2ab59,
+0x842a8454ae15d084, 0x72d572b7a7e4c572, 0x39e439d5dd72ec39, 0x4c2d4c5a6198164c,
+0x5e655eca3bbc945e, 0x78fd78e785f09f78, 0x38e038ddd870e538, 0x8c0a8c148605988c,
+0xd163d1c6b2bf17d1, 0xa5aea5410b57e4a5, 0xe2afe2434dd9a1e2, 0x6199612ff8c24e61,
+0xb3f6b3f1457b42b3, 0x21842115a5423421, 0x9c4a9c94d625089c, 0x1e781ef0663cee1e,
+0x4311432252866143, 0xc73bc776fc93b1c7, 0xfcd7fcb32be54ffc, 0x0410042014082404,
+0x515951b208a2e351, 0x995e99bcc72f2599, 0x6da96d4fc4da226d, 0x0d340d68391a650d,
+0xfacffa8335e979fa, 0xdf5bdfb684a369df, 0x7ee57ed79bfca97e, 0x2490243db4481924,
+0x3bec3bc5d776fe3b, 0xab96ab313d4b9aab, 0xce1fce3ed181f0ce, 0x1144118855229911,
+0x8f068f0c8903838f, 0x4e254e4a6b9c044e, 0xb7e6b7d1517366b7, 0xeb8beb0b60cbe0eb,
+0x3cf03cfdcc78c13c, 0x813e817cbf1ffd81, 0x946a94d4fe354094, 0xf7fbf7eb0cf31cf7,
+0xb9deb9a1676f18b9, 0x134c13985f268b13, 0x2cb02c7d9c58512c, 0xd36bd3d6b8bb05d3,
+0xe7bbe76b5cd38ce7, 0x6ea56e57cbdc396e, 0xc437c46ef395aac4, 0x030c03180f061b03,
+0x5645568a13acdc56, 0x440d441a49885e44, 0x7fe17fdf9efea07f, 0xa99ea921374f88a9,
+0x2aa82a4d8254672a, 0xbbd6bbb16d6b0abb, 0xc123c146e29f87c1, 0x535153a202a6f153,
+0xdc57dcae8ba572dc, 0x0b2c0b582716530b, 0x9d4e9d9cd327019d, 0x6cad6c47c1d82b6c,
+0x31c43195f562a431, 0x74cd7487b9e8f374, 0xf6fff6e309f115f6, 0x4605460a438c4c46,
+0xac8aac092645a5ac, 0x891e893c970fb589, 0x145014a04428b414, 0xe1a3e15b42dfbae1,
+0x165816b04e2ca616, 0x3ae83acdd274f73a, 0x69b9696fd0d20669, 0x092409482d124109,
+0x70dd70a7ade0d770, 0xb6e2b6d954716fb6, 0xd067d0ceb7bd1ed0, 0xed93ed3b7ec7d6ed,
+0xcc17cc2edb85e2cc, 0x4215422a57846842, 0x985a98b4c22d2c98, 0xa4aaa4490e55eda4,
+0x28a0285d88507528, 0x5c6d5cda31b8865c, 0xf8c7f8933fed6bf8, 0x86228644a411c286,
+]
+
+rc = [
+0x0000000000000000,
+0x1823c6e887b8014f,
+0x36a6d2f5796f9152,
+0x60bc9b8ea30c7b35,
+0x1de0d7c22e4bfe57,
+0x157737e59ff04ada,
+0x58c9290ab1a06b85,
+0xbd5d10f4cb3e0567,
+0xe427418ba77d95d8,
+0xfbee7c66dd17479e,
+0xca2dbf07ad5a8333
+]
+
+DIGESTBYTES = 64
+class WhirlpoolStruct:
+ def __init__(self):
+ self.bitLength = [0]*32
+ self.buffer = [0]*64
+ self.bufferBits = 0
+ self.bufferPos = 0
+ self.hash = [0]*8
+
+def WhirlpoolInit(ctx):
+ ctx = WhirlpoolStruct()
+ return
+
+def WhirlpoolAdd(source, sourceBits, ctx):
+ if not isinstance(source, bytes):
+ raise TypeError("Expected %s, got %s" % (bytes, type(source)))
+ if sys.hexversion < 0x3000000:
+ source = [ord(s)&0xff for s in source]
+
+ carry = 0
+ value = sourceBits
+ i = 31
+ while i >= 0 and (carry != 0 or value != 0):
+ carry += ctx.bitLength[i] + ((value % 0x100000000) & 0xff)
+ ctx.bitLength[i] = carry % 0x100
+ carry >>= 8
+ value >>= 8
+ i -= 1
+
+ bufferBits = ctx.bufferBits
+ bufferPos = ctx.bufferPos
+ sourcePos = 0
+ sourceGap = (8 - (sourceBits & 7)) & 7
+ bufferRem = ctx.bufferBits & 7
+ buffr = ctx.buffer
+
+ while sourceBits > 8:
+ b = ((source[sourcePos] << sourceGap) & 0xff) | ((source[sourcePos + 1] & 0xff) >> (8 - sourceGap))
+ buffr[bufferPos] |= (b >> bufferRem) % 0x100
+ bufferPos += 1
+ bufferBits += 8 - bufferRem
+ if bufferBits == 512:
+ processBuffer(ctx)
+ bufferBits = 0
+ bufferPos = 0
+
+ buffr[bufferPos] = b << (8 - bufferRem)
+ bufferBits += bufferRem
+
+ sourceBits -= 8
+ sourcePos += 1
+
+ b = (source[sourcePos] << sourceGap) & 0xff
+ buffr[bufferPos] |= b >> bufferRem
+ if bufferRem + sourceBits < 8:
+ bufferBits += sourceBits
+ else:
+ bufferPos += 1
+ bufferBits += 8 - bufferRem
+ sourceBits -= 8 - bufferRem
+ if bufferBits == 512:
+ processBuffer(ctx)
+ bufferBits = 0
+ bufferPos = 0
+ buffr[bufferPos] = b << (8 - bufferRem)
+ bufferBits += sourceBits
+ ctx.bufferBits = bufferBits
+ ctx.bufferPos = bufferPos
+
+def WhirlpoolFinalize(ctx):
+ bufferPos = ctx.bufferPos
+ ctx.buffer[bufferPos] |= 0x80 >> (ctx.bufferBits & 7)
+ bufferPos += 1
+ if bufferPos > 32:
+ if bufferPos < 64:
+ for i in xrange(64 - bufferPos):
+ ctx.buffer[bufferPos+i] = 0
+ processBuffer(ctx)
+ bufferPos = 0
+ if bufferPos < 32:
+ for i in xrange(32 - bufferPos):
+ ctx.buffer[bufferPos+i] = 0
+ bufferPos = 32
+ for i in xrange(32):
+ ctx.buffer[32+i] = ctx.bitLength[i]
+ processBuffer(ctx)
+ digest = ''
+ for i in xrange(8):
+ digest += chr((ctx.hash[i] >> 56) % 0x100)
+ digest += chr((ctx.hash[i] >> 48) % 0x100)
+ digest += chr((ctx.hash[i] >> 40) % 0x100)
+ digest += chr((ctx.hash[i] >> 32) % 0x100)
+ digest += chr((ctx.hash[i] >> 24) % 0x100)
+ digest += chr((ctx.hash[i] >> 16) % 0x100)
+ digest += chr((ctx.hash[i] >> 8) % 0x100)
+ digest += chr((ctx.hash[i]) % 0x100)
+ ctx.bufferPos = bufferPos
+ return digest
+
+def CDo(buf, a0, a1, a2, a3, a4, a5, a6, a7):
+ return C0[((buf[a0] >> 56) % 0x100000000) & 0xff] ^ \
+ C1[((buf[a1] >> 48) % 0x100000000) & 0xff] ^ \
+ C2[((buf[a2] >> 40) % 0x100000000) & 0xff] ^ \
+ C3[((buf[a3] >> 32) % 0x100000000) & 0xff] ^ \
+ C4[((buf[a4] >> 24) % 0x100000000) & 0xff] ^ \
+ C5[((buf[a5] >> 16) % 0x100000000) & 0xff] ^ \
+ C6[((buf[a6] >> 8) % 0x100000000) & 0xff] ^ \
+ C7[((buf[a7] >> 0) % 0x100000000) & 0xff]
+
+def processBuffer(ctx):
+ i, r = 0, 0
+ K = [0]*8
+ block = [0]*8
+ state = [0]*8
+ L = [0]*8
+ buffr = ctx.buffer
+
+ buf_cnt = 0
+ for i in xrange(8):
+ block[i] = ((buffr[buf_cnt+0] & 0xff) << 56) ^ \
+ ((buffr[buf_cnt+1] & 0xff) << 48) ^ \
+ ((buffr[buf_cnt+2] & 0xff) << 40) ^ \
+ ((buffr[buf_cnt+3] & 0xff) << 32) ^ \
+ ((buffr[buf_cnt+4] & 0xff) << 24) ^ \
+ ((buffr[buf_cnt+5] & 0xff) << 16) ^ \
+ ((buffr[buf_cnt+6] & 0xff) << 8) ^ \
+ ((buffr[buf_cnt+7] & 0xff) << 0)
+ buf_cnt += 8
+ for i in xrange(8):
+ K[i] = ctx.hash[i]
+ state[i] = block[i] ^ K[i]
+
+ for r in xrange(1, R+1):
+ L[0] = CDo(K, 0, 7, 6, 5, 4, 3, 2, 1) ^ rc[r]
+ L[1] = CDo(K, 1, 0, 7, 6, 5, 4, 3, 2)
+ L[2] = CDo(K, 2, 1, 0, 7, 6, 5, 4, 3)
+ L[3] = CDo(K, 3, 2, 1, 0, 7, 6, 5, 4)
+ L[4] = CDo(K, 4, 3, 2, 1, 0, 7, 6, 5)
+ L[5] = CDo(K, 5, 4, 3, 2, 1, 0, 7, 6)
+ L[6] = CDo(K, 6, 5, 4, 3, 2, 1, 0, 7)
+ L[7] = CDo(K, 7, 6, 5, 4, 3, 2, 1, 0)
+ for i in xrange(8):
+ K[i] = L[i]
+ L[0] = CDo(state, 0, 7, 6, 5, 4, 3, 2, 1) ^ K[0]
+ L[1] = CDo(state, 1, 0, 7, 6, 5, 4, 3, 2) ^ K[1]
+ L[2] = CDo(state, 2, 1, 0, 7, 6, 5, 4, 3) ^ K[2]
+ L[3] = CDo(state, 3, 2, 1, 0, 7, 6, 5, 4) ^ K[3]
+ L[4] = CDo(state, 4, 3, 2, 1, 0, 7, 6, 5) ^ K[4]
+ L[5] = CDo(state, 5, 4, 3, 2, 1, 0, 7, 6) ^ K[5]
+ L[6] = CDo(state, 6, 5, 4, 3, 2, 1, 0, 7) ^ K[6]
+ L[7] = CDo(state, 7, 6, 5, 4, 3, 2, 1, 0) ^ K[7]
+ for i in xrange(8):
+ state[i] = L[i]
+ # apply the Miyaguchi-Preneel compression function
+ for i in xrange(8):
+ ctx.hash[i] ^= state[i] ^ block[i]
+ return
+
+#
+# Tests.
+#
+
+if __name__ == '__main__':
+ assert Whirlpool(b'The quick brown fox jumps over the lazy dog').hexdigest() == \
+ 'b97de512e91e3828b40d2b0fdce9ceb3c4a71f9bea8d88e75c4fa854df36725fd2b52eb6544edcacd6f8beddfea403cb55ae31f03ad62a5ef54e42ee82c3fb35'
+ assert Whirlpool(b'The quick brown fox jumps over the lazy eog').hexdigest() == \
+ 'c27ba124205f72e6847f3e19834f925cc666d0974167af915bb462420ed40cc50900d85a1f923219d832357750492d5c143011a76988344c2635e69d06f2d38c'
+ assert Whirlpool(b'').hexdigest() == \
+ '19fa61d75522a4669b44e39c1d2e1726c530232130d407f89afee0964997f7a73e83be698b288febcf88e3e03c4f0757ea8964e59b63d93708b138cc42a66eb3'
diff --git a/usr/lib/portage/pym/portage/util/writeable_check.py b/usr/lib/portage/pym/portage/util/writeable_check.py
new file mode 100644
index 0000000..429691c
--- /dev/null
+++ b/usr/lib/portage/pym/portage/util/writeable_check.py
@@ -0,0 +1,86 @@
+#-*- coding:utf-8 -*-
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+"""
+Methods to check whether Portage is going to write to read-only filesystems.
+Since the methods are not portable across different OSes, each OS needs its
+own method. To expand RO checking for different OSes, add a method which
+accepts a list of directories and returns a list of mounts which need to be
+remounted RW, then add "elif ostype == (the ostype value for your OS)" to
+get_ro_checker().
+"""
+from __future__ import unicode_literals
+
+import io
+import logging
+
+from portage import _encodings
+from portage.util import writemsg_level
+from portage.localization import _
+from portage.data import ostype
+
+
+def get_ro_checker():
+ """
+ Uses the system type to find an appropriate method for testing whether Portage
+ is going to write to any read-only filesystems.
+
+ @return:
+ 1. A method for testing for RO filesystems appropriate to the current system.
+ """
+ return _CHECKERS.get(ostype, empty_ro_checker)
+
+
+def linux_ro_checker(dir_list):
+ """
+ Use /proc/self/mountinfo to check that no directories installed by the
+ ebuild are set to be installed to a read-only filesystem.
+
+ @param dir_list: A list of directories installed by the ebuild.
+ @type dir_list: List
+ @return:
+ 1. A list of filesystems which are both set to be written to and are mounted
+ read-only, may be empty.
+ """
+ ro_filesystems = set()
+
+ try:
+ with io.open("/proc/self/mountinfo", mode='r',
+ encoding=_encodings['content'], errors='replace') as f:
+ for line in f:
+ # we're interested in dir and both attr fileds which always
+ # start with either 'ro' or 'rw'
+ # example line:
+ # 14 1 8:3 / / rw,noatime - ext3 /dev/root rw,errors=continue,commit=5,barrier=1,data=writeback
+ # _dir ^ ^ attr1 ^ attr2
+ # there can be a variable number of fields
+ # to the left of the ' - ', after the attr's, so split it there
+ mount = line.split(' - ')
+ _dir, attr1 = mount[0].split()[4:6]
+ attr2 = mount[1].split()[2]
+ if attr1.startswith('ro') or attr2.startswith('ro'):
+ ro_filesystems.add(_dir)
+
+ # If /proc/self/mountinfo can't be read, assume that there are no RO
+ # filesystems and return.
+ except EnvironmentError:
+ writemsg_level(_("!!! /proc/self/mountinfo cannot be read"),
+ level=logging.WARNING, noiselevel=-1)
+ return []
+
+ return set.intersection(ro_filesystems, set(dir_list))
+
+
+def empty_ro_checker(dir_list):
+ """
+ Always returns [], this is the fallback function if the system does not have
+ an ro_checker method defined.
+ """
+ return []
+
+
+# _CHECKERS is a map from ostype output to the appropriate function to return
+# in get_ro_checker.
+_CHECKERS = {
+ "Linux": linux_ro_checker,
+}
diff --git a/usr/lib/portage/pym/portage/versions.py b/usr/lib/portage/pym/portage/versions.py
new file mode 100644
index 0000000..58e29d2
--- /dev/null
+++ b/usr/lib/portage/pym/portage/versions.py
@@ -0,0 +1,588 @@
+# versions.py -- core Portage functionality
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = [
+ 'best', 'catpkgsplit', 'catsplit',
+ 'cpv_getkey', 'cpv_getversion', 'cpv_sort_key', 'pkgcmp', 'pkgsplit',
+ 'ververify', 'vercmp'
+]
+
+import re
+import sys
+import warnings
+
+if sys.hexversion < 0x3000000:
+ _unicode = unicode
+else:
+ _unicode = str
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.repository.config:_gen_valid_repo',
+ 'portage.util:cmp_sort_key',
+)
+from portage import _unicode_decode
+from portage.eapi import _get_eapi_attrs
+from portage.exception import InvalidData
+from portage.localization import _
+
+_unknown_repo = "__unknown__"
+
+# \w is [a-zA-Z0-9_]
+
+# PMS 3.1.3: A slot name may contain any of the characters [A-Za-z0-9+_.-].
+# It must not begin with a hyphen or a dot.
+_slot = r'([\w+][\w+.-]*)'
+
+# 2.1.1 A category name may contain any of the characters [A-Za-z0-9+_.-].
+# It must not begin with a hyphen or a dot.
+_cat = r'[\w+][\w+.-]*'
+
+# 2.1.2 A package name may contain any of the characters [A-Za-z0-9+_-].
+# It must not begin with a hyphen,
+# and must not end in a hyphen followed by one or more digits.
+_pkg = {
+ "dots_disallowed_in_PN": r'[\w+][\w+-]*?',
+ "dots_allowed_in_PN": r'[\w+][\w+.-]*?',
+}
+
+_v = r'(cvs\.)?(\d+)((\.\d+)*)([a-z]?)((_(pre|p|beta|alpha|rc)\d*)*)'
+# PREFIX hack: -r(\d+) -> -r(\d+|0\d+\.\d+) (see below)
+_rev = r'(\d+|0\d+\.\d+)'
+_vr = _v + '(-r(' + _rev + '))?'
+
+_cp = {
+ "dots_disallowed_in_PN": '(' + _cat + '/' + _pkg['dots_disallowed_in_PN'] + '(-' + _vr + ')?)',
+ "dots_allowed_in_PN": '(' + _cat + '/' + _pkg['dots_allowed_in_PN'] + '(-' + _vr + ')?)',
+}
+_cpv = {
+ "dots_disallowed_in_PN": '(' + _cp['dots_disallowed_in_PN'] + '-' + _vr + ')',
+ "dots_allowed_in_PN": '(' + _cp['dots_allowed_in_PN'] + '-' + _vr + ')',
+}
+_pv = {
+ "dots_disallowed_in_PN": '(?P<pn>' + _pkg['dots_disallowed_in_PN'] + '(?P<pn_inval>-' + _vr + ')?)' + '-(?P<ver>' + _v + ')(-r(?P<rev>' + _rev + '))?',
+ "dots_allowed_in_PN": '(?P<pn>' + _pkg['dots_allowed_in_PN'] + '(?P<pn_inval>-' + _vr + ')?)' + '-(?P<ver>' + _v + ')(-r(?P<rev>' + _rev + '))?',
+}
+
+ver_regexp = re.compile("^" + _vr + "$")
+suffix_regexp = re.compile("^(alpha|beta|rc|pre|p)(\\d*)$")
+suffix_value = {"pre": -2, "p": 0, "alpha": -4, "beta": -3, "rc": -1}
+endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
+
+_slot_re_cache = {}
+
+def _get_slot_re(eapi_attrs):
+ cache_key = eapi_attrs.slot_operator
+ slot_re = _slot_re_cache.get(cache_key)
+ if slot_re is not None:
+ return slot_re
+
+ if eapi_attrs.slot_operator:
+ slot_re = _slot + r'(/' + _slot + r')?'
+ else:
+ slot_re = _slot
+
+ slot_re = re.compile('^' + slot_re + '$', re.VERBOSE | re.UNICODE)
+
+ _slot_re_cache[cache_key] = slot_re
+ return slot_re
+
+_pv_re_cache = {}
+
+def _get_pv_re(eapi_attrs):
+ cache_key = eapi_attrs.dots_in_PN
+ pv_re = _pv_re_cache.get(cache_key)
+ if pv_re is not None:
+ return pv_re
+
+ if eapi_attrs.dots_in_PN:
+ pv_re = _pv['dots_allowed_in_PN']
+ else:
+ pv_re = _pv['dots_disallowed_in_PN']
+
+ pv_re = re.compile(r'^' + pv_re + r'$', re.VERBOSE | re.UNICODE)
+
+ _pv_re_cache[cache_key] = pv_re
+ return pv_re
+
+def ververify(myver, silent=1):
+ if ver_regexp.match(myver):
+ return True
+ else:
+ if not silent:
+ print(_("!!! syntax error in version: %s") % myver)
+ return False
+
+def vercmp(ver1, ver2, silent=1):
+ """
+ Compare two versions
+ Example usage:
+ >>> from portage.versions import vercmp
+ >>> vercmp('1.0-r1','1.2-r3')
+ negative number
+ >>> vercmp('1.3','1.2-r3')
+ positive number
+ >>> vercmp('1.0_p3','1.0_p3')
+ 0
+
+ @param pkg1: version to compare with (see ver_regexp in portage.versions.py)
+ @type pkg1: string (example: "2.1.2-r3")
+ @param pkg2: version to compare againts (see ver_regexp in portage.versions.py)
+ @type pkg2: string (example: "2.1.2_rc5")
+ @rtype: None or float
+ @return:
+ 1. positive if ver1 is greater than ver2
+ 2. negative if ver1 is less than ver2
+ 3. 0 if ver1 equals ver2
+ 4. None if ver1 or ver2 are invalid (see ver_regexp in portage.versions.py)
+ """
+
+ if ver1 == ver2:
+ return 0
+
+ match1 = ver_regexp.match(ver1)
+ match2 = ver_regexp.match(ver2)
+
+ # checking that the versions are valid
+ if not match1 or not match1.groups():
+ if not silent:
+ print(_("!!! syntax error in version: %s") % ver1)
+ return None
+ if not match2 or not match2.groups():
+ if not silent:
+ print(_("!!! syntax error in version: %s") % ver2)
+ return None
+
+ # shortcut for cvs ebuilds (new style)
+ if match1.group(1) and not match2.group(1):
+ return 1
+ elif match2.group(1) and not match1.group(1):
+ return -1
+
+ # building lists of the version parts before the suffix
+ # first part is simple
+ list1 = [int(match1.group(2))]
+ list2 = [int(match2.group(2))]
+
+ # this part would greatly benefit from a fixed-length version pattern
+ if match1.group(3) or match2.group(3):
+ vlist1 = match1.group(3)[1:].split(".")
+ vlist2 = match2.group(3)[1:].split(".")
+
+ for i in range(0, max(len(vlist1), len(vlist2))):
+ # Implcit .0 is given a value of -1, so that 1.0.0 > 1.0, since it
+ # would be ambiguous if two versions that aren't literally equal
+ # are given the same value (in sorting, for example).
+ if len(vlist1) <= i or len(vlist1[i]) == 0:
+ list1.append(-1)
+ list2.append(int(vlist2[i]))
+ elif len(vlist2) <= i or len(vlist2[i]) == 0:
+ list1.append(int(vlist1[i]))
+ list2.append(-1)
+ # Let's make life easy and use integers unless we're forced to use floats
+ elif (vlist1[i][0] != "0" and vlist2[i][0] != "0"):
+ list1.append(int(vlist1[i]))
+ list2.append(int(vlist2[i]))
+ # now we have to use floats so 1.02 compares correctly against 1.1
+ else:
+ # list1.append(float("0."+vlist1[i]))
+ # list2.append(float("0."+vlist2[i]))
+ # Since python floats have limited range, we multiply both
+ # floating point representations by a constant so that they are
+ # transformed into whole numbers. This allows the practically
+ # infinite range of a python int to be exploited. The
+ # multiplication is done by padding both literal strings with
+ # zeros as necessary to ensure equal length.
+ max_len = max(len(vlist1[i]), len(vlist2[i]))
+ list1.append(int(vlist1[i].ljust(max_len, "0")))
+ list2.append(int(vlist2[i].ljust(max_len, "0")))
+
+ # and now the final letter
+ # NOTE: Behavior changed in r2309 (between portage-2.0.x and portage-2.1).
+ # The new behavior is 12.2.5 > 12.2b which, depending on how you look at,
+ # may seem counter-intuitive. However, if you really think about it, it
+ # seems like it's probably safe to assume that this is the behavior that
+ # is intended by anyone who would use versions such as these.
+ if len(match1.group(5)):
+ list1.append(ord(match1.group(5)))
+ if len(match2.group(5)):
+ list2.append(ord(match2.group(5)))
+
+ for i in range(0, max(len(list1), len(list2))):
+ if len(list1) <= i:
+ return -1
+ elif len(list2) <= i:
+ return 1
+ elif list1[i] != list2[i]:
+ a = list1[i]
+ b = list2[i]
+ rval = (a > b) - (a < b)
+ return rval
+
+ # main version is equal, so now compare the _suffix part
+ list1 = match1.group(6).split("_")[1:]
+ list2 = match2.group(6).split("_")[1:]
+
+ for i in range(0, max(len(list1), len(list2))):
+ # Implicit _p0 is given a value of -1, so that 1 < 1_p0
+ if len(list1) <= i:
+ s1 = ("p","-1")
+ else:
+ s1 = suffix_regexp.match(list1[i]).groups()
+ if len(list2) <= i:
+ s2 = ("p","-1")
+ else:
+ s2 = suffix_regexp.match(list2[i]).groups()
+ if s1[0] != s2[0]:
+ a = suffix_value[s1[0]]
+ b = suffix_value[s2[0]]
+ rval = (a > b) - (a < b)
+ return rval
+ if s1[1] != s2[1]:
+ # it's possible that the s(1|2)[1] == ''
+ # in such a case, fudge it.
+ try:
+ r1 = int(s1[1])
+ except ValueError:
+ r1 = 0
+ try:
+ r2 = int(s2[1])
+ except ValueError:
+ r2 = 0
+ rval = (r1 > r2) - (r1 < r2)
+ if rval:
+ return rval
+
+ # The suffix part is equal too, so finally check the revision
+ # PREFIX hack: a revision starting with 0 is an 'inter-revision',
+ # which means that it is possible to create revisions on revisions.
+ # An example is -r01.1 which is the first revision of -r1. Note
+ # that a period (.) is used to separate the real revision and the
+ # secondary revision number. This trick is in use to allow revision
+ # bumps in ebuilds synced from the main tree for Prefix changes,
+ # while still staying in the main tree versioning scheme.
+ if match1.group(10):
+ if match1.group(10)[0] == '0' and '.' in match1.group(10):
+ t = match1.group(10)[1:].split(".")
+ r1 = int(t[0])
+ r3 = int(t[1])
+ else:
+ r1 = int(match1.group(10))
+ r3 = 0
+ else:
+ r1 = 0
+ r3 = 0
+ if match2.group(10):
+ if match2.group(10)[0] == '0' and '.' in match2.group(10):
+ t = match2.group(10)[1:].split(".")
+ r2 = int(t[0])
+ r4 = int(t[1])
+ else:
+ r2 = int(match2.group(10))
+ r4 = 0
+ else:
+ r2 = 0
+ r4 = 0
+ if r1 == r2 and (r3 != 0 or r4 != 0):
+ r1 = r3
+ r2 = r4
+ rval = (r1 > r2) - (r1 < r2)
+ return rval
+
+def pkgcmp(pkg1, pkg2):
+ """
+ Compare 2 package versions created in pkgsplit format.
+
+ Example usage:
+ >>> from portage.versions import *
+ >>> pkgcmp(pkgsplit('test-1.0-r1'),pkgsplit('test-1.2-r3'))
+ -1
+ >>> pkgcmp(pkgsplit('test-1.3'),pkgsplit('test-1.2-r3'))
+ 1
+
+ @param pkg1: package to compare with
+ @type pkg1: list (example: ['test', '1.0', 'r1'])
+ @param pkg2: package to compare againts
+ @type pkg2: list (example: ['test', '1.0', 'r1'])
+ @rtype: None or integer
+ @return:
+ 1. None if package names are not the same
+ 2. 1 if pkg1 is greater than pkg2
+ 3. -1 if pkg1 is less than pkg2
+ 4. 0 if pkg1 equals pkg2
+ """
+ if pkg1[0] != pkg2[0]:
+ return None
+ return vercmp("-".join(pkg1[1:]), "-".join(pkg2[1:]))
+
+def _pkgsplit(mypkg, eapi=None):
+ """
+ @param mypkg: pv
+ @return:
+ 1. None if input is invalid.
+ 2. (pn, ver, rev) if input is pv
+ """
+ m = _get_pv_re(_get_eapi_attrs(eapi)).match(mypkg)
+ if m is None:
+ return None
+
+ if m.group('pn_inval') is not None:
+ # package name appears to have a version-like suffix
+ return None
+
+ rev = m.group('rev')
+ if rev is None:
+ rev = '0'
+ rev = 'r' + rev
+
+ return (m.group('pn'), m.group('ver'), rev)
+
+_cat_re = re.compile('^%s$' % _cat, re.UNICODE)
+_missing_cat = 'null'
+
+def catpkgsplit(mydata, silent=1, eapi=None):
+ """
+ Takes a Category/Package-Version-Rev and returns a list of each.
+
+ @param mydata: Data to split
+ @type mydata: string
+ @param silent: suppress error messages
+ @type silent: Boolean (integer)
+ @rype: list
+ @return:
+ 1. If each exists, it returns [cat, pkgname, version, rev]
+ 2. If cat is not specificed in mydata, cat will be "null"
+ 3. if rev does not exist it will be '-r0'
+ """
+ try:
+ return mydata.cpv_split
+ except AttributeError:
+ pass
+ mysplit = mydata.split('/', 1)
+ p_split = None
+ if len(mysplit) == 1:
+ cat = _missing_cat
+ p_split = _pkgsplit(mydata, eapi=eapi)
+ elif len(mysplit) == 2:
+ cat = mysplit[0]
+ if _cat_re.match(cat) is not None:
+ p_split = _pkgsplit(mysplit[1], eapi=eapi)
+ if not p_split:
+ return None
+ retval = (cat, p_split[0], p_split[1], p_split[2])
+ return retval
+
+class _pkg_str(_unicode):
+ """
+ This class represents a cpv. It inherits from str (unicode in python2) and
+ has attributes that cache results for use by functions like catpkgsplit and
+ cpv_getkey which are called frequently (especially in match_from_list).
+ Instances are typically created in dbapi.cp_list() or the Atom contructor,
+ and propagate from there. Generally, code that pickles these objects will
+ manually convert them to a plain unicode object first.
+ """
+
+ def __new__(cls, cpv, metadata=None, settings=None, eapi=None,
+ repo=None, slot=None):
+ return _unicode.__new__(cls, cpv)
+
+ def __init__(self, cpv, metadata=None, settings=None, eapi=None,
+ repo=None, slot=None):
+ if not isinstance(cpv, _unicode):
+ # Avoid TypeError from _unicode.__init__ with PyPy.
+ cpv = _unicode_decode(cpv)
+ _unicode.__init__(cpv)
+ if metadata is not None:
+ self.__dict__['_metadata'] = metadata
+ slot = metadata.get('SLOT', slot)
+ repo = metadata.get('repository', repo)
+ eapi = metadata.get('EAPI', eapi)
+ if settings is not None:
+ self.__dict__['_settings'] = settings
+ if eapi is not None:
+ self.__dict__['eapi'] = eapi
+ self.__dict__['cpv_split'] = catpkgsplit(cpv, eapi=eapi)
+ if self.cpv_split is None:
+ raise InvalidData(cpv)
+ self.__dict__['cp'] = self.cpv_split[0] + '/' + self.cpv_split[1]
+ if self.cpv_split[-1] == "r0" and cpv[-3:] != "-r0":
+ self.__dict__['version'] = "-".join(self.cpv_split[2:-1])
+ else:
+ self.__dict__['version'] = "-".join(self.cpv_split[2:])
+ # for match_from_list introspection
+ self.__dict__['cpv'] = self
+ if slot is not None:
+ eapi_attrs = _get_eapi_attrs(eapi)
+ slot_match = _get_slot_re(eapi_attrs).match(slot)
+ if slot_match is None:
+ # Avoid an InvalidAtom exception when creating SLOT atoms
+ self.__dict__['slot'] = '0'
+ self.__dict__['sub_slot'] = '0'
+ self.__dict__['slot_invalid'] = slot
+ else:
+ if eapi_attrs.slot_operator:
+ slot_split = slot.split("/")
+ self.__dict__['slot'] = slot_split[0]
+ if len(slot_split) > 1:
+ self.__dict__['sub_slot'] = slot_split[1]
+ else:
+ self.__dict__['sub_slot'] = slot_split[0]
+ else:
+ self.__dict__['slot'] = slot
+ self.__dict__['sub_slot'] = slot
+
+ if repo is not None:
+ repo = _gen_valid_repo(repo)
+ if not repo:
+ repo = _unknown_repo
+ self.__dict__['repo'] = repo
+
+ def __setattr__(self, name, value):
+ raise AttributeError("_pkg_str instances are immutable",
+ self.__class__, name, value)
+
+ @property
+ def stable(self):
+ try:
+ return self._stable
+ except AttributeError:
+ try:
+ metadata = self._metadata
+ settings = self._settings
+ except AttributeError:
+ raise AttributeError('stable')
+ if not settings.local_config:
+ # Since repoman uses different config instances for
+ # different profiles, our local instance does not
+ # refer to the correct profile.
+ raise AssertionError('invalid context')
+ stable = settings._isStable(self)
+ self.__dict__['_stable'] = stable
+ return stable
+
+def pkgsplit(mypkg, silent=1, eapi=None):
+ """
+ @param mypkg: either a pv or cpv
+ @return:
+ 1. None if input is invalid.
+ 2. (pn, ver, rev) if input is pv
+ 3. (cp, ver, rev) if input is a cpv
+ """
+ catpsplit = catpkgsplit(mypkg, eapi=eapi)
+ if catpsplit is None:
+ return None
+ cat, pn, ver, rev = catpsplit
+ if cat is _missing_cat and '/' not in mypkg:
+ return (pn, ver, rev)
+ else:
+ return (cat + '/' + pn, ver, rev)
+
+def cpv_getkey(mycpv, eapi=None):
+ """Calls catpkgsplit on a cpv and returns only the cp."""
+ try:
+ return mycpv.cp
+ except AttributeError:
+ pass
+ mysplit = catpkgsplit(mycpv, eapi=eapi)
+ if mysplit is not None:
+ return mysplit[0] + '/' + mysplit[1]
+
+ warnings.warn("portage.versions.cpv_getkey() " + \
+ "called with invalid cpv: '%s'" % (mycpv,),
+ DeprecationWarning, stacklevel=2)
+
+ myslash = mycpv.split("/", 1)
+ mysplit = _pkgsplit(myslash[-1], eapi=eapi)
+ if mysplit is None:
+ return None
+ mylen = len(myslash)
+ if mylen == 2:
+ return myslash[0] + "/" + mysplit[0]
+ else:
+ return mysplit[0]
+
+def cpv_getversion(mycpv, eapi=None):
+ """Returns the v (including revision) from an cpv."""
+ try:
+ return mycpv.version
+ except AttributeError:
+ pass
+ cp = cpv_getkey(mycpv, eapi=eapi)
+ if cp is None:
+ return None
+ return mycpv[len(cp+"-"):]
+
+def cpv_sort_key(eapi=None):
+ """
+ Create an object for sorting cpvs, to be used as the 'key' parameter
+ in places like list.sort() or sorted(). This calls catpkgsplit() once for
+ each cpv and caches the result. If a given cpv is invalid or two cpvs
+ have different category/package names, then plain string (> and <)
+ comparison is used.
+
+ @rtype: key object for sorting
+ @return: object for use as the 'key' parameter in places like
+ list.sort() or sorted()
+ """
+
+ split_cache = {}
+
+ def cmp_cpv(cpv1, cpv2):
+
+ split1 = split_cache.get(cpv1, False)
+ if split1 is False:
+ split1 = None
+ try:
+ split1 = cpv1.cpv
+ except AttributeError:
+ try:
+ split1 = _pkg_str(cpv1, eapi=eapi)
+ except InvalidData:
+ pass
+ split_cache[cpv1] = split1
+
+ split2 = split_cache.get(cpv2, False)
+ if split2 is False:
+ split2 = None
+ try:
+ split2 = cpv2.cpv
+ except AttributeError:
+ try:
+ split2 = _pkg_str(cpv2, eapi=eapi)
+ except InvalidData:
+ pass
+ split_cache[cpv2] = split2
+
+ if split1 is None or split2 is None or split1.cp != split2.cp:
+ return (cpv1 > cpv2) - (cpv1 < cpv2)
+
+ return vercmp(split1.version, split2.version)
+
+ return cmp_sort_key(cmp_cpv)
+
+def catsplit(mydep):
+ return mydep.split("/", 1)
+
+def best(mymatches, eapi=None):
+ """Accepts None arguments; assumes matches are valid."""
+ if not mymatches:
+ return ""
+ if len(mymatches) == 1:
+ return mymatches[0]
+ bestmatch = mymatches[0]
+ try:
+ v2 = bestmatch.version
+ except AttributeError:
+ v2 = _pkg_str(bestmatch, eapi=eapi).version
+ for x in mymatches[1:]:
+ try:
+ v1 = x.version
+ except AttributeError:
+ v1 = _pkg_str(x, eapi=eapi).version
+ if vercmp(v1, v2) > 0:
+ bestmatch = x
+ v2 = v1
+ return bestmatch
diff --git a/usr/lib/portage/pym/portage/xml/__init__.py b/usr/lib/portage/pym/portage/xml/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/usr/lib/portage/pym/portage/xml/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/usr/lib/portage/pym/portage/xml/metadata.py b/usr/lib/portage/pym/portage/xml/metadata.py
new file mode 100644
index 0000000..fcd9dc0
--- /dev/null
+++ b/usr/lib/portage/pym/portage/xml/metadata.py
@@ -0,0 +1,423 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Provides an easy-to-use python interface to Gentoo's metadata.xml file.
+
+ Example usage:
+ >>> from portage.xml.metadata import MetaDataXML
+ >>> pkg_md = MetaDataXML('/usr/portage/app-misc/gourmet/metadata.xml')
+ >>> pkg_md
+ <MetaDataXML '/usr/portage/app-misc/gourmet/metadata.xml'>
+ >>> pkg_md.herds()
+ ['no-herd']
+ >>> for maint in pkg_md.maintainers():
+ ... print "{0} ({1})".format(maint.email, maint.name)
+ ...
+ nixphoeni@gentoo.org (Joe Sapp)
+ >>> for flag in pkg_md.use():
+ ... print flag.name, "->", flag.description
+ ...
+ rtf -> Enable export to RTF
+ gnome-print -> Enable printing support using gnome-print
+ >>> upstream = pkg_md.upstream()
+ >>> upstream
+ [<_Upstream {'docs': [], 'remoteid': [], 'maintainer':
+ [<_Maintainer 'Thomas_Hinkle@alumni.brown.edu'>], 'bugtracker': [],
+ 'changelog': []}>]
+ >>> upstream[0].maintainer[0].name
+ 'Thomas Mills Hinkle'
+"""
+
+from __future__ import unicode_literals
+
+__all__ = ('MetaDataXML',)
+
+import sys
+
+if sys.hexversion < 0x2070000 or \
+ (sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000):
+ # Our _MetadataTreeBuilder usage is incompatible with
+ # cElementTree in Python 2.6, 3.0, and 3.1:
+ # File "/usr/lib/python2.6/xml/etree/ElementTree.py", line 644, in findall
+ # assert self._root is not None
+ import xml.etree.ElementTree as etree
+else:
+ try:
+ import xml.etree.cElementTree as etree
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except (ImportError, SystemError, RuntimeError, Exception):
+ # broken or missing xml support
+ # http://bugs.python.org/issue14988
+ import xml.etree.ElementTree as etree
+
+try:
+ from xml.parsers.expat import ExpatError
+except (SystemExit, KeyboardInterrupt):
+ raise
+except (ImportError, SystemError, RuntimeError, Exception):
+ ExpatError = SyntaxError
+
+import re
+import xml.etree.ElementTree
+from portage import _encodings, _unicode_encode
+from portage.util import unique_everseen
+
+class _MetadataTreeBuilder(xml.etree.ElementTree.TreeBuilder):
+ """
+ Implements doctype() as required to avoid deprecation warnings with
+ Python >=2.7.
+ """
+ def doctype(self, name, pubid, system):
+ pass
+
+class _Maintainer(object):
+ """An object for representing one maintainer.
+
+ @type email: str or None
+ @ivar email: Maintainer's email address. Used for both Gentoo and upstream.
+ @type name: str or None
+ @ivar name: Maintainer's name. Used for both Gentoo and upstream.
+ @type description: str or None
+ @ivar description: Description of what a maintainer does. Gentoo only.
+ @type restrict: str or None
+ @ivar restrict: e.g. &gt;=portage-2.2 means only maintains versions
+ of Portage greater than 2.2. Should be DEPEND string with < and >
+ converted to &lt; and &gt; respectively.
+ @type status: str or None
+ @ivar status: If set, either 'active' or 'inactive'. Upstream only.
+ """
+
+ def __init__(self, node):
+ self.email = None
+ self.name = None
+ self.description = None
+ self.restrict = node.get('restrict')
+ self.status = node.get('status')
+ for attr in node:
+ setattr(self, attr.tag, attr.text)
+
+ def __repr__(self):
+ return "<%s %r>" % (self.__class__.__name__, self.email)
+
+
+class _Useflag(object):
+ """An object for representing one USE flag.
+
+ @todo: Is there any way to have a keyword option to leave in
+ <pkg> and <cat> for later processing?
+ @type name: str or None
+ @ivar name: USE flag
+ @type restrict: str or None
+ @ivar restrict: e.g. &gt;=portage-2.2 means flag is only available in
+ versions greater than 2.2
+ @type description: str
+ @ivar description: description of the USE flag
+ """
+
+ def __init__(self, node):
+ self.name = node.get('name')
+ self.restrict = node.get('restrict')
+ _desc = ''
+ if node.text:
+ _desc = node.text
+ for child in node.getchildren():
+ _desc += child.text if child.text else ''
+ _desc += child.tail if child.tail else ''
+ # This takes care of tabs and newlines left from the file
+ self.description = re.sub('\s+', ' ', _desc)
+
+ def __repr__(self):
+ return "<%s %r>" % (self.__class__.__name__, self.name)
+
+
+class _Upstream(object):
+ """An object for representing one package's upstream.
+
+ @type maintainers: list
+ @ivar maintainers: L{_Maintainer} objects for each upstream maintainer
+ @type changelogs: list
+ @ivar changelogs: URLs to upstream's ChangeLog file in str format
+ @type docs: list
+ @ivar docs: Sequence of tuples containing URLs to upstream documentation
+ in the first slot and 'lang' attribute in the second, e.g.,
+ [('http.../docs/en/tut.html', None), ('http.../doc/fr/tut.html', 'fr')]
+ @type bugtrackers: list
+ @ivar bugtrackers: URLs to upstream's bugtracker. May also contain an email
+ address if prepended with 'mailto:'
+ @type remoteids: list
+ @ivar remoteids: Sequence of tuples containing the project's hosting site
+ name in the first slot and the project's ID name or number for that
+ site in the second, e.g., [('sourceforge', 'systemrescuecd')]
+ """
+
+ def __init__(self, node):
+ self.node = node
+ self.maintainers = self.upstream_maintainers()
+ self.changelogs = self.upstream_changelogs()
+ self.docs = self.upstream_documentation()
+ self.bugtrackers = self.upstream_bugtrackers()
+ self.remoteids = self.upstream_remoteids()
+
+ def __repr__(self):
+ return "<%s %r>" % (self.__class__.__name__, self.__dict__)
+
+ def upstream_bugtrackers(self):
+ """Retrieve upstream bugtracker location from xml node."""
+ return [e.text for e in self.node.findall('bugs-to')]
+
+ def upstream_changelogs(self):
+ """Retrieve upstream changelog location from xml node."""
+ return [e.text for e in self.node.findall('changelog')]
+
+ def upstream_documentation(self):
+ """Retrieve upstream documentation location from xml node."""
+ result = []
+ for elem in self.node.findall('doc'):
+ lang = elem.get('lang')
+ result.append((elem.text, lang))
+ return result
+
+ def upstream_maintainers(self):
+ """Retrieve upstream maintainer information from xml node."""
+ return [_Maintainer(m) for m in self.node.findall('maintainer')]
+
+ def upstream_remoteids(self):
+ """Retrieve upstream remote ID from xml node."""
+ return [(e.text, e.get('type')) for e in self.node.findall('remote-id')]
+
+
+class MetaDataXML(object):
+ """Access metadata.xml"""
+
+ def __init__(self, metadata_xml_path, herds):
+ """Parse a valid metadata.xml file.
+
+ @type metadata_xml_path: str
+ @param metadata_xml_path: path to a valid metadata.xml file
+ @type herds: str or ElementTree
+ @param herds: path to a herds.xml, or a pre-parsed ElementTree
+ @raise IOError: if C{metadata_xml_path} can not be read
+ """
+
+ self.metadata_xml_path = metadata_xml_path
+ self._xml_tree = None
+
+ try:
+ self._xml_tree = etree.parse(_unicode_encode(metadata_xml_path,
+ encoding=_encodings['fs'], errors='strict'),
+ parser=etree.XMLParser(target=_MetadataTreeBuilder()))
+ except ImportError:
+ pass
+ except ExpatError as e:
+ raise SyntaxError("%s" % (e,))
+
+ if isinstance(herds, etree.ElementTree):
+ herds_etree = herds
+ herds_path = None
+ else:
+ herds_etree = None
+ herds_path = herds
+
+ # Used for caching
+ self._herdstree = herds_etree
+ self._herds_path = herds_path
+ self._descriptions = None
+ self._maintainers = None
+ self._herds = None
+ self._useflags = None
+ self._upstream = None
+
+ def __repr__(self):
+ return "<%s %r>" % (self.__class__.__name__, self.metadata_xml_path)
+
+ def _get_herd_email(self, herd):
+ """Get a herd's email address.
+
+ @type herd: str
+ @param herd: herd whose email you want
+ @rtype: str or None
+ @return: email address or None if herd is not in herds.xml
+ @raise IOError: if $PORTDIR/metadata/herds.xml can not be read
+ """
+
+ if self._herdstree is None:
+ try:
+ self._herdstree = etree.parse(_unicode_encode(self._herds_path,
+ encoding=_encodings['fs'], errors='strict'),
+ parser=etree.XMLParser(target=_MetadataTreeBuilder()))
+ except (ImportError, IOError, SyntaxError):
+ return None
+
+ # Some special herds are not listed in herds.xml
+ if herd in ('no-herd', 'maintainer-wanted', 'maintainer-needed'):
+ return None
+
+ try:
+ # Python 2.7 or >=3.2
+ iterate = self._herdstree.iter
+ except AttributeError:
+ iterate = self._herdstree.getiterator
+
+ for node in iterate('herd'):
+ if node.findtext('name') == herd:
+ return node.findtext('email')
+
+ def herds(self, include_email=False):
+ """Return a list of text nodes for <herd>.
+
+ @type include_email: bool
+ @keyword include_email: if True, also look up the herd's email
+ @rtype: tuple
+ @return: if include_email is False, return a list of strings;
+ if include_email is True, return a list of tuples containing:
+ [('herd1', 'herd1@gentoo.org'), ('no-herd', None);
+ """
+ if self._herds is None:
+ if self._xml_tree is None:
+ self._herds = tuple()
+ else:
+ herds = []
+ for elem in self._xml_tree.findall('herd'):
+ text = elem.text
+ if text is None:
+ text = ''
+ if include_email:
+ herd_mail = self._get_herd_email(text)
+ herds.append((text, herd_mail))
+ else:
+ herds.append(text)
+ self._herds = tuple(herds)
+
+ return self._herds
+
+ def descriptions(self):
+ """Return a list of text nodes for <longdescription>.
+
+ @rtype: list
+ @return: package description in string format
+ @todo: Support the C{lang} attribute
+ """
+ if self._descriptions is None:
+ if self._xml_tree is None:
+ self._descriptions = tuple()
+ else:
+ self._descriptions = tuple(e.text \
+ for e in self._xml_tree.findall("longdescription"))
+
+ return self._descriptions
+
+ def maintainers(self):
+ """Get maintainers' name, email and description.
+
+ @rtype: list
+ @return: a sequence of L{_Maintainer} objects in document order.
+ """
+
+ if self._maintainers is None:
+ if self._xml_tree is None:
+ self._maintainers = tuple()
+ else:
+ self._maintainers = tuple(_Maintainer(node) \
+ for node in self._xml_tree.findall('maintainer'))
+
+ return self._maintainers
+
+ def use(self):
+ """Get names and descriptions for USE flags defined in metadata.
+
+ @rtype: list
+ @return: a sequence of L{_Useflag} objects in document order.
+ """
+
+ if self._useflags is None:
+ if self._xml_tree is None:
+ self._useflags = tuple()
+ else:
+ try:
+ # Python 2.7 or >=3.2
+ iterate = self._xml_tree.iter
+ except AttributeError:
+ iterate = self._xml_tree.getiterator
+ self._useflags = tuple(_Useflag(node) \
+ for node in iterate('flag'))
+
+ return self._useflags
+
+ def upstream(self):
+ """Get upstream contact information.
+
+ @rtype: list
+ @return: a sequence of L{_Upstream} objects in document order.
+ """
+
+ if self._upstream is None:
+ if self._xml_tree is None:
+ self._upstream = tuple()
+ else:
+ self._upstream = tuple(_Upstream(node) \
+ for node in self._xml_tree.findall('upstream'))
+
+ return self._upstream
+
+ def format_maintainer_string(self):
+ """Format string containing maintainers and herds (emails if possible).
+ Used by emerge to display maintainer information.
+ Entries are sorted according to the rules stated on the bug wranglers page.
+
+ @rtype: String
+ @return: a string containing maintainers and herds
+ """
+ maintainers = []
+ for maintainer in self.maintainers():
+ if maintainer.email is None or not maintainer.email.strip():
+ if maintainer.name and maintainer.name.strip():
+ maintainers.append(maintainer.name)
+ else:
+ maintainers.append(maintainer.email)
+
+ for herd, email in self.herds(include_email=True):
+ if herd == "no-herd":
+ continue
+ if email is None or not email.strip():
+ if herd and herd.strip():
+ maintainers.append(herd)
+ else:
+ maintainers.append(email)
+
+ maintainers = list(unique_everseen(maintainers))
+
+ maint_str = ""
+ if maintainers:
+ maint_str = maintainers[0]
+ maintainers = maintainers[1:]
+ if maintainers:
+ maint_str += " " + ",".join(maintainers)
+
+ return maint_str
+
+ def format_upstream_string(self):
+ """Format string containing upstream maintainers and bugtrackers.
+ Used by emerge to display upstream information.
+
+ @rtype: String
+ @return: a string containing upstream maintainers and bugtrackers
+ """
+ maintainers = []
+ for upstream in self.upstream():
+ for maintainer in upstream.maintainers:
+ if maintainer.email is None or not maintainer.email.strip():
+ if maintainer.name and maintainer.name.strip():
+ maintainers.append(maintainer.name)
+ else:
+ maintainers.append(maintainer.email)
+
+ for bugtracker in upstream.bugtrackers:
+ if bugtracker.startswith("mailto:"):
+ bugtracker = bugtracker[7:]
+ maintainers.append(bugtracker)
+
+
+ maintainers = list(unique_everseen(maintainers))
+ maint_str = " ".join(maintainers)
+ return maint_str
diff --git a/usr/lib/portage/pym/portage/xpak.py b/usr/lib/portage/pym/portage/xpak.py
new file mode 100644
index 0000000..b4567be
--- /dev/null
+++ b/usr/lib/portage/pym/portage/xpak.py
@@ -0,0 +1,499 @@
+# Copyright 2001-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+# The format for a tbz2/xpak:
+#
+# tbz2: tar.bz2 + xpak + (xpak_offset) + "STOP"
+# xpak: "XPAKPACK" + (index_len) + (data_len) + index + data + "XPAKSTOP"
+# index: (pathname_len) + pathname + (data_offset) + (data_len)
+# index entries are concatenated end-to-end.
+# data: concatenated data chunks, end-to-end.
+#
+# [tarball]XPAKPACKIIIIDDDD[index][data]XPAKSTOPOOOOSTOP
+#
+# (integer) == encodeint(integer) ===> 4 characters (big-endian copy)
+# '+' means concatenate the fields ===> All chunks are strings
+
+__all__ = [
+ 'addtolist', 'decodeint', 'encodeint', 'getboth',
+ 'getindex', 'getindex_mem', 'getitem', 'listindex',
+ 'searchindex', 'tbz2', 'xpak_mem', 'xpak', 'xpand',
+ 'xsplit', 'xsplit_mem',
+]
+
+import array
+import errno
+import sys
+
+import portage
+from portage import os
+from portage import shutil
+from portage import normalize_path
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+
+def addtolist(mylist, curdir):
+ """(list, dir) --- Takes an array(list) and appends all files from dir down
+ the directory tree. Returns nothing. list is modified."""
+ curdir = normalize_path(_unicode_decode(curdir,
+ encoding=_encodings['fs'], errors='strict'))
+ for parent, dirs, files in os.walk(curdir):
+
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ if parent != curdir:
+ mylist.append(parent[len(curdir) + 1:] + os.sep)
+
+ for x in dirs:
+ try:
+ _unicode_decode(x, encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ dirs.remove(x)
+
+ for x in files:
+ try:
+ x = _unicode_decode(x,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ mylist.append(os.path.join(parent, x)[len(curdir) + 1:])
+
+def encodeint(myint):
+ """Takes a 4 byte integer and converts it into a string of 4 characters.
+ Returns the characters in a string."""
+ a = array.array('B')
+ a.append((myint >> 24) & 0xff)
+ a.append((myint >> 16) & 0xff)
+ a.append((myint >> 8) & 0xff)
+ a.append(myint & 0xff)
+ try:
+ # Python >= 3.2
+ return a.tobytes()
+ except AttributeError:
+ return a.tostring()
+
+def decodeint(mystring):
+ """Takes a 4 byte string and converts it into a 4 byte integer.
+ Returns an integer."""
+ if sys.hexversion < 0x3000000:
+ mystring = [ord(x) for x in mystring]
+ myint = 0
+ myint += mystring[3]
+ myint += mystring[2] << 8
+ myint += mystring[1] << 16
+ myint += mystring[0] << 24
+ return myint
+
+def xpak(rootdir, outfile=None):
+ """(rootdir, outfile) -- creates an xpak segment of the directory 'rootdir'
+ and under the name 'outfile' if it is specified. Otherwise it returns the
+ xpak segment."""
+
+ mylist = []
+
+ addtolist(mylist, rootdir)
+ mylist.sort()
+ mydata = {}
+ for x in mylist:
+ if x == 'CONTENTS':
+ # CONTENTS is generated during the merge process.
+ continue
+ x = _unicode_encode(x, encoding=_encodings['fs'], errors='strict')
+ with open(os.path.join(rootdir, x), 'rb') as f:
+ mydata[x] = f.read()
+
+ xpak_segment = xpak_mem(mydata)
+ if outfile:
+ outf = open(_unicode_encode(outfile,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ outf.write(xpak_segment)
+ outf.close()
+ else:
+ return xpak_segment
+
+def xpak_mem(mydata):
+ """Create an xpack segment from a map object."""
+
+ mydata_encoded = {}
+ for k, v in mydata.items():
+ k = _unicode_encode(k,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ v = _unicode_encode(v,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ mydata_encoded[k] = v
+ mydata = mydata_encoded
+ del mydata_encoded
+
+ indexglob = b''
+ indexpos = 0
+ dataglob = b''
+ datapos = 0
+ for x, newglob in mydata.items():
+ mydatasize = len(newglob)
+ indexglob = indexglob + encodeint(len(x)) + x + encodeint(datapos) + encodeint(mydatasize)
+ indexpos = indexpos + 4 + len(x) + 4 + 4
+ dataglob = dataglob + newglob
+ datapos = datapos + mydatasize
+ return b'XPAKPACK' \
+ + encodeint(len(indexglob)) \
+ + encodeint(len(dataglob)) \
+ + indexglob \
+ + dataglob \
+ + b'XPAKSTOP'
+
+def xsplit(infile):
+ """(infile) -- Splits the infile into two files.
+ 'infile.index' contains the index segment.
+ 'infile.dat' contails the data segment."""
+ infile = _unicode_decode(infile,
+ encoding=_encodings['fs'], errors='strict')
+ myfile = open(_unicode_encode(infile,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ mydat = myfile.read()
+ myfile.close()
+
+ splits = xsplit_mem(mydat)
+ if not splits:
+ return False
+
+ myfile = open(_unicode_encode(infile + '.index',
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ myfile.write(splits[0])
+ myfile.close()
+ myfile = open(_unicode_encode(infile + '.dat',
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ myfile.write(splits[1])
+ myfile.close()
+ return True
+
+def xsplit_mem(mydat):
+ if mydat[0:8] != b'XPAKPACK':
+ return None
+ if mydat[-8:] != b'XPAKSTOP':
+ return None
+ indexsize = decodeint(mydat[8:12])
+ return (mydat[16:indexsize + 16], mydat[indexsize + 16:-8])
+
+def getindex(infile):
+ """(infile) -- grabs the index segment from the infile and returns it."""
+ myfile = open(_unicode_encode(infile,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ myheader = myfile.read(16)
+ if myheader[0:8] != b'XPAKPACK':
+ myfile.close()
+ return
+ indexsize = decodeint(myheader[8:12])
+ myindex = myfile.read(indexsize)
+ myfile.close()
+ return myindex
+
+def getboth(infile):
+ """(infile) -- grabs the index and data segments from the infile.
+ Returns an array [indexSegment, dataSegment]"""
+ myfile = open(_unicode_encode(infile,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ myheader = myfile.read(16)
+ if myheader[0:8] != b'XPAKPACK':
+ myfile.close()
+ return
+ indexsize = decodeint(myheader[8:12])
+ datasize = decodeint(myheader[12:16])
+ myindex = myfile.read(indexsize)
+ mydata = myfile.read(datasize)
+ myfile.close()
+ return myindex, mydata
+
+def listindex(myindex):
+ """Print to the terminal the filenames listed in the indexglob passed in."""
+ for x in getindex_mem(myindex):
+ print(x)
+
+def getindex_mem(myindex):
+ """Returns the filenames listed in the indexglob passed in."""
+ myindexlen = len(myindex)
+ startpos = 0
+ myret = []
+ while ((startpos + 8) < myindexlen):
+ mytestlen = decodeint(myindex[startpos:startpos + 4])
+ myret = myret + [myindex[startpos + 4:startpos + 4 + mytestlen]]
+ startpos = startpos + mytestlen + 12
+ return myret
+
+def searchindex(myindex, myitem):
+ """(index, item) -- Finds the offset and length of the file 'item' in the
+ datasegment via the index 'index' provided."""
+ myitem = _unicode_encode(myitem,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ mylen = len(myitem)
+ myindexlen = len(myindex)
+ startpos = 0
+ while ((startpos + 8) < myindexlen):
+ mytestlen = decodeint(myindex[startpos:startpos + 4])
+ if mytestlen == mylen:
+ if myitem == myindex[startpos + 4:startpos + 4 + mytestlen]:
+ #found
+ datapos = decodeint(myindex[startpos + 4 + mytestlen:startpos + 8 + mytestlen])
+ datalen = decodeint(myindex[startpos + 8 + mytestlen:startpos + 12 + mytestlen])
+ return datapos, datalen
+ startpos = startpos + mytestlen + 12
+
+def getitem(myid, myitem):
+ myindex = myid[0]
+ mydata = myid[1]
+ myloc = searchindex(myindex, myitem)
+ if not myloc:
+ return None
+ return mydata[myloc[0]:myloc[0] + myloc[1]]
+
+def xpand(myid, mydest):
+ mydest = normalize_path(mydest) + os.sep
+ myindex = myid[0]
+ mydata = myid[1]
+ myindexlen = len(myindex)
+ startpos = 0
+ while ((startpos + 8) < myindexlen):
+ namelen = decodeint(myindex[startpos:startpos + 4])
+ datapos = decodeint(myindex[startpos + 4 + namelen:startpos + 8 + namelen])
+ datalen = decodeint(myindex[startpos + 8 + namelen:startpos + 12 + namelen])
+ myname = myindex[startpos + 4:startpos + 4 + namelen]
+ myname = _unicode_decode(myname,
+ encoding=_encodings['repo.content'], errors='replace')
+ filename = os.path.join(mydest, myname.lstrip(os.sep))
+ filename = normalize_path(filename)
+ if not filename.startswith(mydest):
+ # myname contains invalid ../ component(s)
+ continue
+ dirname = os.path.dirname(filename)
+ if dirname:
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+ mydat = open(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ mydat.write(mydata[datapos:datapos + datalen])
+ mydat.close()
+ startpos = startpos + namelen + 12
+
+class tbz2(object):
+ def __init__(self, myfile):
+ self.file = myfile
+ self.filestat = None
+ self.index = b''
+ self.infosize = 0
+ self.xpaksize = 0
+ self.indexsize = None
+ self.datasize = None
+ self.indexpos = None
+ self.datapos = None
+
+ def decompose(self, datadir, cleanup=1):
+ """Alias for unpackinfo() --- Complement to recompose() but optionally
+ deletes the destination directory. Extracts the xpak from the tbz2 into
+ the directory provided. Raises IOError if scan() fails.
+ Returns result of upackinfo()."""
+ if not self.scan():
+ raise IOError
+ if cleanup:
+ self.cleanup(datadir)
+ if not os.path.exists(datadir):
+ os.makedirs(datadir)
+ return self.unpackinfo(datadir)
+ def compose(self, datadir, cleanup=0):
+ """Alias for recompose()."""
+ return self.recompose(datadir, cleanup)
+
+ def recompose(self, datadir, cleanup=0, break_hardlinks=True):
+ """Creates an xpak segment from the datadir provided, truncates the tbz2
+ to the end of regular data if an xpak segment already exists, and adds
+ the new segment to the file with terminating info."""
+ xpdata = xpak(datadir)
+ self.recompose_mem(xpdata, break_hardlinks=break_hardlinks)
+ if cleanup:
+ self.cleanup(datadir)
+
+ def recompose_mem(self, xpdata, break_hardlinks=True):
+ """
+ Update the xpak segment.
+ @param xpdata: A new xpak segment to be written, like that returned
+ from the xpak_mem() function.
+ @param break_hardlinks: If hardlinks exist, create a copy in order
+ to break them. This makes it safe to use hardlinks to create
+ cheap snapshots of the repository, which is useful for solving
+ race conditions on binhosts as described here:
+ http://code.google.com/p/chromium-os/issues/detail?id=3225.
+ Default is True.
+ """
+ self.scan() # Don't care about condition... We'll rewrite the data anyway.
+
+ if break_hardlinks and self.filestat and self.filestat.st_nlink > 1:
+ tmp_fname = "%s.%d" % (self.file, os.getpid())
+ shutil.copyfile(self.file, tmp_fname)
+ try:
+ portage.util.apply_stat_permissions(self.file, self.filestat)
+ except portage.exception.OperationNotPermitted:
+ pass
+ os.rename(tmp_fname, self.file)
+
+ myfile = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'ab+')
+ if not myfile:
+ raise IOError
+ myfile.seek(-self.xpaksize, 2) # 0,2 or -0,2 just mean EOF.
+ myfile.truncate()
+ myfile.write(xpdata + encodeint(len(xpdata)) + b'STOP')
+ myfile.flush()
+ myfile.close()
+ return 1
+
+ def cleanup(self, datadir):
+ datadir_split = os.path.split(datadir)
+ if len(datadir_split) >= 2 and len(datadir_split[1]) > 0:
+ # This is potentially dangerous,
+ # thus the above sanity check.
+ try:
+ shutil.rmtree(datadir)
+ except OSError as oe:
+ if oe.errno == errno.ENOENT:
+ pass
+ else:
+ raise oe
+
+ def scan(self):
+ """Scans the tbz2 to locate the xpak segment and setup internal values.
+ This function is called by relevant functions already."""
+ a = None
+ try:
+ mystat = os.stat(self.file)
+ if self.filestat:
+ changed = 0
+ if mystat.st_size != self.filestat.st_size \
+ or mystat.st_mtime != self.filestat.st_mtime \
+ or mystat.st_ctime != self.filestat.st_ctime:
+ changed = True
+ if not changed:
+ return 1
+ self.filestat = mystat
+ a = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ a.seek(-16, 2)
+ trailer = a.read()
+ self.infosize = 0
+ self.xpaksize = 0
+ if trailer[-4:] != b'STOP':
+ return 0
+ if trailer[0:8] != b'XPAKSTOP':
+ return 0
+ self.infosize = decodeint(trailer[8:12])
+ self.xpaksize = self.infosize + 8
+ a.seek(-(self.xpaksize), 2)
+ header = a.read(16)
+ if header[0:8] != b'XPAKPACK':
+ return 0
+ self.indexsize = decodeint(header[8:12])
+ self.datasize = decodeint(header[12:16])
+ self.indexpos = a.tell()
+ self.index = a.read(self.indexsize)
+ self.datapos = a.tell()
+ return 2
+ except SystemExit:
+ raise
+ except:
+ return 0
+ finally:
+ if a is not None:
+ a.close()
+
+ def filelist(self):
+ """Return an array of each file listed in the index."""
+ if not self.scan():
+ return None
+ return getindex_mem(self.index)
+
+ def getfile(self, myfile, mydefault=None):
+ """Finds 'myfile' in the data segment and returns it."""
+ if not self.scan():
+ return None
+ myresult = searchindex(self.index, myfile)
+ if not myresult:
+ return mydefault
+ a = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ a.seek(self.datapos + myresult[0], 0)
+ myreturn = a.read(myresult[1])
+ a.close()
+ return myreturn
+
+ def getelements(self, myfile):
+ """A split/array representation of tbz2.getfile()"""
+ mydat = self.getfile(myfile)
+ if not mydat:
+ return []
+ return mydat.split()
+
+ def unpackinfo(self, mydest):
+ """Unpacks all the files from the dataSegment into 'mydest'."""
+ if not self.scan():
+ return 0
+ mydest = normalize_path(mydest) + os.sep
+ a = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ if not os.path.exists(mydest):
+ os.makedirs(mydest)
+ startpos = 0
+ while ((startpos + 8) < self.indexsize):
+ namelen = decodeint(self.index[startpos:startpos + 4])
+ datapos = decodeint(self.index[startpos + 4 + namelen:startpos + 8 + namelen])
+ datalen = decodeint(self.index[startpos + 8 + namelen:startpos + 12 + namelen])
+ myname = self.index[startpos + 4:startpos + 4 + namelen]
+ myname = _unicode_decode(myname,
+ encoding=_encodings['repo.content'], errors='replace')
+ filename = os.path.join(mydest, myname.lstrip(os.sep))
+ filename = normalize_path(filename)
+ if not filename.startswith(mydest):
+ # myname contains invalid ../ component(s)
+ continue
+ dirname = os.path.dirname(filename)
+ if dirname:
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+ mydat = open(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ a.seek(self.datapos + datapos)
+ mydat.write(a.read(datalen))
+ mydat.close()
+ startpos = startpos + namelen + 12
+ a.close()
+ return 1
+
+ def get_data(self):
+ """Returns all the files from the dataSegment as a map object."""
+ if not self.scan():
+ return {}
+ a = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ mydata = {}
+ startpos = 0
+ while ((startpos + 8) < self.indexsize):
+ namelen = decodeint(self.index[startpos:startpos + 4])
+ datapos = decodeint(self.index[startpos + 4 + namelen:startpos + 8 + namelen])
+ datalen = decodeint(self.index[startpos + 8 + namelen:startpos + 12 + namelen])
+ myname = self.index[startpos + 4:startpos + 4 + namelen]
+ a.seek(self.datapos + datapos)
+ mydata[myname] = a.read(datalen)
+ startpos = startpos + namelen + 12
+ a.close()
+ return mydata
+
+ def getboth(self):
+ """Returns an array [indexSegment, dataSegment]"""
+ if not self.scan():
+ return None
+
+ a = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ a.seek(self.datapos)
+ mydata = a.read(self.datasize)
+ a.close()
+
+ return self.index, mydata
diff --git a/usr/lib/portage/pym/repoman/__init__.py b/usr/lib/portage/pym/repoman/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/usr/lib/portage/pym/repoman/__init__.py
diff --git a/usr/lib/portage/pym/repoman/checks.py b/usr/lib/portage/pym/repoman/checks.py
new file mode 100644
index 0000000..5f37648
--- /dev/null
+++ b/usr/lib/portage/pym/repoman/checks.py
@@ -0,0 +1,920 @@
+# repoman: Checks
+# Copyright 2007-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""This module contains functions used in Repoman to ascertain the quality
+and correctness of an ebuild."""
+
+from __future__ import unicode_literals
+
+import codecs
+from itertools import chain
+import re
+import time
+import repoman.errors as errors
+import portage
+from portage.eapi import eapi_supports_prefix, eapi_has_implicit_rdepend, \
+ eapi_has_src_prepare_and_src_configure, eapi_has_dosed_dohard, \
+ eapi_exports_AA, eapi_has_pkg_pretend
+
+class LineCheck(object):
+ """Run a check on a line of an ebuild."""
+ """A regular expression to determine whether to ignore the line"""
+ ignore_line = False
+ """True if lines containing nothing more than comments with optional
+ leading whitespace should be ignored"""
+ ignore_comment = True
+
+ def new(self, pkg):
+ pass
+
+ def check_eapi(self, eapi):
+ """ returns if the check should be run in the given EAPI (default is True) """
+ return True
+
+ def check(self, num, line):
+ """Run the check on line and return error if there is one"""
+ if self.re.match(line):
+ return self.error
+
+ def end(self):
+ pass
+
+class PhaseCheck(LineCheck):
+ """ basic class for function detection """
+
+ func_end_re = re.compile(r'^\}$')
+ phases_re = re.compile('(%s)' % '|'.join((
+ 'pkg_pretend', 'pkg_setup', 'src_unpack', 'src_prepare',
+ 'src_configure', 'src_compile', 'src_test', 'src_install',
+ 'pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm',
+ 'pkg_config')))
+ in_phase = ''
+
+ def check(self, num, line):
+ m = self.phases_re.match(line)
+ if m is not None:
+ self.in_phase = m.group(1)
+ if self.in_phase != '' and \
+ self.func_end_re.match(line) is not None:
+ self.in_phase = ''
+
+ return self.phase_check(num, line)
+
+ def phase_check(self, num, line):
+ """ override this function for your checks """
+ pass
+
+class EbuildHeader(LineCheck):
+ """Ensure ebuilds have proper headers
+ Copyright header errors
+ CVS header errors
+ License header errors
+
+ Args:
+ modification_year - Year the ebuild was last modified
+ """
+
+ repoman_check_name = 'ebuild.badheader'
+
+ gentoo_copyright = r'^# Copyright ((1999|2\d\d\d)-)?%s Gentoo Foundation$'
+ # Why a regex here, use a string match
+ # gentoo_license = re.compile(r'^# Distributed under the terms of the GNU General Public License v2$')
+ gentoo_license = '# Distributed under the terms of the GNU General Public License v2'
+ cvs_header = re.compile(r'^# \$Header: .*\$$')
+ ignore_comment = False
+
+ def new(self, pkg):
+ if pkg.mtime is None:
+ self.modification_year = r'2\d\d\d'
+ else:
+ self.modification_year = str(time.gmtime(pkg.mtime)[0])
+ self.gentoo_copyright_re = re.compile(
+ self.gentoo_copyright % self.modification_year)
+
+ def check(self, num, line):
+ if num > 2:
+ return
+ elif num == 0:
+ if not self.gentoo_copyright_re.match(line):
+ return errors.COPYRIGHT_ERROR
+ elif num == 1 and line.rstrip('\n') != self.gentoo_license:
+ return errors.LICENSE_ERROR
+ elif num == 2:
+ if not self.cvs_header.match(line):
+ return errors.CVS_HEADER_ERROR
+
+
+class EbuildWhitespace(LineCheck):
+ """Ensure ebuilds have proper whitespacing"""
+
+ repoman_check_name = 'ebuild.minorsyn'
+
+ ignore_line = re.compile(r'(^$)|(^(\t)*#)')
+ ignore_comment = False
+ leading_spaces = re.compile(r'^[\S\t]')
+ trailing_whitespace = re.compile(r'.*([\S]$)')
+
+ def check(self, num, line):
+ if self.leading_spaces.match(line) is None:
+ return errors.LEADING_SPACES_ERROR
+ if self.trailing_whitespace.match(line) is None:
+ return errors.TRAILING_WHITESPACE_ERROR
+
+class EbuildBlankLine(LineCheck):
+ repoman_check_name = 'ebuild.minorsyn'
+ ignore_comment = False
+ blank_line = re.compile(r'^$')
+
+ def new(self, pkg):
+ self.line_is_blank = False
+
+ def check(self, num, line):
+ if self.line_is_blank and self.blank_line.match(line):
+ return 'Useless blank line on line: %d'
+ if self.blank_line.match(line):
+ self.line_is_blank = True
+ else:
+ self.line_is_blank = False
+
+ def end(self):
+ if self.line_is_blank:
+ yield 'Useless blank line on last line'
+
+class EbuildQuote(LineCheck):
+ """Ensure ebuilds have valid quoting around things like D,FILESDIR, etc..."""
+
+ repoman_check_name = 'ebuild.minorsyn'
+ _message_commands = ["die", "echo", "eerror",
+ "einfo", "elog", "eqawarn", "ewarn"]
+ _message_re = re.compile(r'\s(' + "|".join(_message_commands) + \
+ r')\s+"[^"]*"\s*$')
+ _ignored_commands = ["local", "export"] + _message_commands
+ ignore_line = re.compile(r'(^$)|(^\s*#.*)|(^\s*\w+=.*)' + \
+ r'|(^\s*(' + "|".join(_ignored_commands) + r')\s+)')
+ ignore_comment = False
+ var_names = ["D", "DISTDIR", "FILESDIR", "S", "T", "ROOT", "WORKDIR"]
+
+ # EAPI=3/Prefix vars
+ var_names += ["ED", "EPREFIX", "EROOT"]
+
+ # variables for games.eclass
+ var_names += ["Ddir", "GAMES_PREFIX_OPT", "GAMES_DATADIR",
+ "GAMES_DATADIR_BASE", "GAMES_SYSCONFDIR", "GAMES_STATEDIR",
+ "GAMES_LOGDIR", "GAMES_BINDIR"]
+
+ # variables for multibuild.eclass
+ var_names += ["BUILD_DIR"]
+
+ var_names = "(%s)" % "|".join(var_names)
+ var_reference = re.compile(r'\$(\{'+var_names+'\}|' + \
+ var_names + '\W)')
+ missing_quotes = re.compile(r'(\s|^)[^"\'\s]*\$\{?' + var_names + \
+ r'\}?[^"\'\s]*(\s|$)')
+ cond_begin = re.compile(r'(^|\s+)\[\[($|\\$|\s+)')
+ cond_end = re.compile(r'(^|\s+)\]\]($|\\$|\s+)')
+
+ def check(self, num, line):
+ if self.var_reference.search(line) is None:
+ return
+ # There can be multiple matches / violations on a single line. We
+ # have to make sure none of the matches are violators. Once we've
+ # found one violator, any remaining matches on the same line can
+ # be ignored.
+ pos = 0
+ while pos <= len(line) - 1:
+ missing_quotes = self.missing_quotes.search(line, pos)
+ if not missing_quotes:
+ break
+ # If the last character of the previous match is a whitespace
+ # character, that character may be needed for the next
+ # missing_quotes match, so search overlaps by 1 character.
+ group = missing_quotes.group()
+ pos = missing_quotes.end() - 1
+
+ # Filter out some false positives that can
+ # get through the missing_quotes regex.
+ if self.var_reference.search(group) is None:
+ continue
+
+ # Filter matches that appear to be an
+ # argument to a message command.
+ # For example: false || ewarn "foo $WORKDIR/bar baz"
+ message_match = self._message_re.search(line)
+ if message_match is not None and \
+ message_match.start() < pos and \
+ message_match.end() > pos:
+ break
+
+ # This is an attempt to avoid false positives without getting
+ # too complex, while possibly allowing some (hopefully
+ # unlikely) violations to slip through. We just assume
+ # everything is correct if the there is a ' [[ ' or a ' ]] '
+ # anywhere in the whole line (possibly continued over one
+ # line).
+ if self.cond_begin.search(line) is not None:
+ continue
+ if self.cond_end.search(line) is not None:
+ continue
+
+ # Any remaining matches on the same line can be ignored.
+ return errors.MISSING_QUOTES_ERROR
+
+
+class EbuildAssignment(LineCheck):
+ """Ensure ebuilds don't assign to readonly variables."""
+
+ repoman_check_name = 'variable.readonly'
+ readonly_assignment = re.compile(r'^\s*(export\s+)?(A|CATEGORY|P|PV|PN|PR|PVR|PF|D|WORKDIR|FILESDIR|FEATURES|USE)=')
+
+ def check(self, num, line):
+ match = self.readonly_assignment.match(line)
+ e = None
+ if match is not None:
+ e = errors.READONLY_ASSIGNMENT_ERROR
+ return e
+
+class Eapi3EbuildAssignment(EbuildAssignment):
+ """Ensure ebuilds don't assign to readonly EAPI 3-introduced variables."""
+
+ readonly_assignment = re.compile(r'\s*(export\s+)?(ED|EPREFIX|EROOT)=')
+
+ def check_eapi(self, eapi):
+ return eapi_supports_prefix(eapi)
+
+class EbuildNestedDie(LineCheck):
+ """Check ebuild for nested die statements (die statements in subshells)"""
+
+ repoman_check_name = 'ebuild.nesteddie'
+ nesteddie_re = re.compile(r'^[^#]*\s\(\s[^)]*\bdie\b')
+
+ def check(self, num, line):
+ if self.nesteddie_re.match(line):
+ return errors.NESTED_DIE_ERROR
+
+
+class EbuildUselessDodoc(LineCheck):
+ """Check ebuild for useless files in dodoc arguments."""
+ repoman_check_name = 'ebuild.minorsyn'
+ uselessdodoc_re = re.compile(
+ r'^\s*dodoc(\s+|\s+.*\s+)(ABOUT-NLS|COPYING|LICENCE|LICENSE)($|\s)')
+
+ def check(self, num, line):
+ match = self.uselessdodoc_re.match(line)
+ if match:
+ return "Useless dodoc '%s'" % (match.group(2), ) + " on line: %d"
+
+
+class EbuildUselessCdS(LineCheck):
+ """Check for redundant cd ${S} statements"""
+ repoman_check_name = 'ebuild.minorsyn'
+ method_re = re.compile(r'^\s*src_(prepare|configure|compile|install|test)\s*\(\)')
+ cds_re = re.compile(r'^\s*cd\s+("\$(\{S\}|S)"|\$(\{S\}|S))\s')
+
+ def __init__(self):
+ self.check_next_line = False
+
+ def check(self, num, line):
+ if self.check_next_line:
+ self.check_next_line = False
+ if self.cds_re.match(line):
+ return errors.REDUNDANT_CD_S_ERROR
+ elif self.method_re.match(line):
+ self.check_next_line = True
+
+class EapiDefinition(LineCheck):
+ """
+ Check that EAPI assignment conforms to PMS section 7.3.1
+ (first non-comment, non-blank line).
+ """
+ repoman_check_name = 'EAPI.definition'
+ ignore_comment = True
+ _eapi_re = portage._pms_eapi_re
+
+ def new(self, pkg):
+ self._cached_eapi = pkg.eapi
+ self._parsed_eapi = None
+ self._eapi_line_num = None
+
+ def check(self, num, line):
+ if self._eapi_line_num is None and line.strip():
+ self._eapi_line_num = num + 1
+ m = self._eapi_re.match(line)
+ if m is not None:
+ self._parsed_eapi = m.group(2)
+
+ def end(self):
+ if self._parsed_eapi is None:
+ if self._cached_eapi != "0":
+ yield "valid EAPI assignment must occur on or before line: %s" % \
+ self._eapi_line_num
+ elif self._parsed_eapi != self._cached_eapi:
+ yield ("bash returned EAPI '%s' which does not match "
+ "assignment on line: %s") % \
+ (self._cached_eapi, self._eapi_line_num)
+
+class EbuildPatches(LineCheck):
+ """Ensure ebuilds use bash arrays for PATCHES to ensure white space safety"""
+ repoman_check_name = 'ebuild.patches'
+ re = re.compile(r'^\s*PATCHES=[^\(]')
+ error = errors.PATCHES_ERROR
+
+class EbuildQuotedA(LineCheck):
+ """Ensure ebuilds have no quoting around ${A}"""
+
+ repoman_check_name = 'ebuild.minorsyn'
+ a_quoted = re.compile(r'.*\"\$(\{A\}|A)\"')
+
+ def check(self, num, line):
+ match = self.a_quoted.match(line)
+ if match:
+ return "Quoted \"${A}\" on line: %d"
+
+class NoOffsetWithHelpers(LineCheck):
+ """ Check that the image location, the alternate root offset, and the
+ offset prefix (D, ROOT, ED, EROOT and EPREFIX) are not used with
+ helpers """
+
+ repoman_check_name = 'variable.usedwithhelpers'
+ # Ignore matches in quoted strings like this:
+ # elog "installed into ${ROOT}usr/share/php5/apc/."
+ re = re.compile(r'^[^#"\']*\b(docinto|docompress|dodir|dohard|exeinto|fowners|fperms|insinto|into)\s+"?\$\{?(D|ROOT|ED|EROOT|EPREFIX)\b.*')
+ error = errors.NO_OFFSET_WITH_HELPERS
+
+class ImplicitRuntimeDeps(LineCheck):
+ """
+ Detect the case where DEPEND is set and RDEPEND is unset in the ebuild,
+ since this triggers implicit RDEPEND=$DEPEND assignment (prior to EAPI 4).
+ """
+
+ repoman_check_name = 'RDEPEND.implicit'
+ _assignment_re = re.compile(r'^\s*(R?DEPEND)\+?=')
+
+ def new(self, pkg):
+ self._rdepend = False
+ self._depend = False
+
+ def check_eapi(self, eapi):
+ # Beginning with EAPI 4, there is no
+ # implicit RDEPEND=$DEPEND assignment
+ # to be concerned with.
+ return eapi_has_implicit_rdepend(eapi)
+
+ def check(self, num, line):
+ if not self._rdepend:
+ m = self._assignment_re.match(line)
+ if m is None:
+ pass
+ elif m.group(1) == "RDEPEND":
+ self._rdepend = True
+ elif m.group(1) == "DEPEND":
+ self._depend = True
+
+ def end(self):
+ if self._depend and not self._rdepend:
+ yield 'RDEPEND is not explicitly assigned'
+
+class InheritDeprecated(LineCheck):
+ """Check if ebuild directly or indirectly inherits a deprecated eclass."""
+
+ repoman_check_name = 'inherit.deprecated'
+
+ # deprecated eclass : new eclass (False if no new eclass)
+ deprecated_classes = {
+ "bash-completion": "bash-completion-r1",
+ "boost-utils": False,
+ "distutils": "distutils-r1",
+ "gems": "ruby-fakegem",
+ "mono": "mono-env",
+ "python": "python-r1 / python-single-r1 / python-any-r1",
+ "ruby": "ruby-ng",
+ "x-modular": "xorg-2",
+ }
+
+ _inherit_re = re.compile(r'^\s*inherit\s(.*)$')
+
+ def new(self, pkg):
+ self._errors = []
+
+ def check(self, num, line):
+ direct_inherits = None
+ m = self._inherit_re.match(line)
+ if m is not None:
+ direct_inherits = m.group(1)
+ if direct_inherits:
+ direct_inherits = direct_inherits.split()
+
+ if not direct_inherits:
+ return
+
+ for eclass in direct_inherits:
+ replacement = self.deprecated_classes.get(eclass)
+ if replacement is None:
+ pass
+ elif replacement is False:
+ self._errors.append("please migrate from " + \
+ "'%s' (no replacement) on line: %d" % (eclass, num + 1))
+ else:
+ self._errors.append("please migrate from " + \
+ "'%s' to '%s' on line: %d" % \
+ (eclass, replacement, num + 1))
+
+ def end(self):
+ for error in self._errors:
+ yield error
+ del self._errors
+
+class InheritEclass(LineCheck):
+ """
+ Base class for checking for missing inherits, as well as excess inherits.
+
+ Args:
+ eclass: Set to the name of your eclass.
+ funcs: A tuple of functions that this eclass provides.
+ comprehensive: Is the list of functions complete?
+ exempt_eclasses: If these eclasses are inherited, disable the missing
+ inherit check.
+ """
+
+ def __init__(self, eclass, funcs=None, comprehensive=False,
+ exempt_eclasses=None, ignore_missing=False, **kwargs):
+ self._eclass = eclass
+ self._comprehensive = comprehensive
+ self._exempt_eclasses = exempt_eclasses
+ self._ignore_missing = ignore_missing
+ inherit_re = eclass
+ self._inherit_re = re.compile(r'^(\s*|.*[|&]\s*)\binherit\s(.*\s)?%s(\s|$)' % inherit_re)
+ # Match when the function is preceded only by leading whitespace, a
+ # shell operator such as (, {, |, ||, or &&, or optional variable
+ # setting(s). This prevents false positives in things like elog
+ # messages, as reported in bug #413285.
+ self._func_re = re.compile(r'(^|[|&{(])\s*(\w+=.*)?\b(' + '|'.join(funcs) + r')\b')
+
+ def new(self, pkg):
+ self.repoman_check_name = 'inherit.missing'
+ # We can't use pkg.inherited because that tells us all the eclasses that
+ # have been inherited and not just the ones we inherit directly.
+ self._inherit = False
+ self._func_call = False
+ if self._exempt_eclasses is not None:
+ inherited = pkg.inherited
+ self._disabled = any(x in inherited for x in self._exempt_eclasses)
+ else:
+ self._disabled = False
+ self._eapi = pkg.eapi
+
+ def check(self, num, line):
+ if not self._inherit:
+ self._inherit = self._inherit_re.match(line)
+ if not self._inherit:
+ if self._disabled or self._ignore_missing:
+ return
+ s = self._func_re.search(line)
+ if s is not None:
+ func_name = s.group(3)
+ eapi_func = _eclass_eapi_functions.get(func_name)
+ if eapi_func is None or not eapi_func(self._eapi):
+ self._func_call = True
+ return ('%s.eclass is not inherited, '
+ 'but "%s" found at line: %s') % \
+ (self._eclass, func_name, '%d')
+ elif not self._func_call:
+ self._func_call = self._func_re.search(line)
+
+ def end(self):
+ if not self._disabled and self._comprehensive and self._inherit and not self._func_call:
+ self.repoman_check_name = 'inherit.unused'
+ yield 'no function called from %s.eclass; please drop' % self._eclass
+
+_eclass_eapi_functions = {
+ "usex" : lambda eapi: eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+}
+
+# eclasses that export ${ECLASS}_src_(compile|configure|install)
+_eclass_export_functions = (
+ 'ant-tasks', 'apache-2', 'apache-module', 'aspell-dict',
+ 'autotools-utils', 'base', 'bsdmk', 'cannadic',
+ 'clutter', 'cmake-utils', 'db', 'distutils', 'elisp',
+ 'embassy', 'emboss', 'emul-linux-x86', 'enlightenment',
+ 'font-ebdftopcf', 'font', 'fox', 'freebsd', 'freedict',
+ 'games', 'games-ggz', 'games-mods', 'gdesklets',
+ 'gems', 'gkrellm-plugin', 'gnatbuild', 'gnat', 'gnome2',
+ 'gnome-python-common', 'gnustep-base', 'go-mono', 'gpe',
+ 'gst-plugins-bad', 'gst-plugins-base', 'gst-plugins-good',
+ 'gst-plugins-ugly', 'gtk-sharp-module', 'haskell-cabal',
+ 'horde', 'java-ant-2', 'java-pkg-2', 'java-pkg-simple',
+ 'java-virtuals-2', 'kde4-base', 'kde4-meta', 'kernel-2',
+ 'latex-package', 'linux-mod', 'mozlinguas', 'myspell',
+ 'myspell-r2', 'mysql', 'mysql-v2', 'mythtv-plugins',
+ 'oasis', 'obs-service', 'office-ext', 'perl-app',
+ 'perl-module', 'php-ext-base-r1', 'php-ext-pecl-r2',
+ 'php-ext-source-r2', 'php-lib-r1', 'php-pear-lib-r1',
+ 'php-pear-r1', 'python-distutils-ng', 'python',
+ 'qt4-build', 'qt4-r2', 'rox-0install', 'rox', 'ruby',
+ 'ruby-ng', 'scsh', 'selinux-policy-2', 'sgml-catalog',
+ 'stardict', 'sword-module', 'tetex-3', 'tetex',
+ 'texlive-module', 'toolchain-binutils', 'toolchain',
+ 'twisted', 'vdr-plugin-2', 'vdr-plugin', 'vim',
+ 'vim-plugin', 'vim-spell', 'virtuoso', 'vmware',
+ 'vmware-mod', 'waf-utils', 'webapp', 'xemacs-elisp',
+ 'xemacs-packages', 'xfconf', 'x-modular', 'xorg-2',
+ 'zproduct'
+)
+
+_eclass_info = {
+ 'autotools': {
+ 'funcs': (
+ 'eaclocal', 'eautoconf', 'eautoheader',
+ 'eautomake', 'eautoreconf', '_elibtoolize',
+ 'eautopoint'
+ ),
+ 'comprehensive': True,
+
+ # Exempt eclasses:
+ # git - An EGIT_BOOTSTRAP variable may be used to call one of
+ # the autotools functions.
+ # subversion - An ESVN_BOOTSTRAP variable may be used to call one of
+ # the autotools functions.
+ 'exempt_eclasses': ('git', 'git-2', 'subversion', 'autotools-utils')
+ },
+
+ 'eutils': {
+ 'funcs': (
+ 'estack_push', 'estack_pop', 'eshopts_push', 'eshopts_pop',
+ 'eumask_push', 'eumask_pop', 'epatch', 'epatch_user',
+ 'emktemp', 'edos2unix', 'in_iuse', 'use_if_iuse', 'usex'
+ ),
+ 'comprehensive': False,
+
+ # These are "eclasses are the whole ebuild" type thing.
+ 'exempt_eclasses': _eclass_export_functions,
+ },
+
+ 'flag-o-matic': {
+ 'funcs': (
+ 'filter-(ld)?flags', 'strip-flags', 'strip-unsupported-flags',
+ 'append-((ld|c(pp|xx)?))?flags', 'append-libs',
+ ),
+ 'comprehensive': False
+ },
+
+ 'libtool': {
+ 'funcs': (
+ 'elibtoolize',
+ ),
+ 'comprehensive': True,
+ 'exempt_eclasses': ('autotools',)
+ },
+
+ 'multilib': {
+ 'funcs': (
+ 'get_libdir',
+ ),
+
+ # These are "eclasses are the whole ebuild" type thing.
+ 'exempt_eclasses': _eclass_export_functions + ('autotools', 'libtool',
+ 'multilib-minimal'),
+
+ 'comprehensive': False
+ },
+
+ 'multiprocessing': {
+ 'funcs': (
+ 'makeopts_jobs',
+ ),
+ 'comprehensive': False
+ },
+
+ 'prefix': {
+ 'funcs': (
+ 'eprefixify',
+ ),
+ 'comprehensive': True
+ },
+
+ 'toolchain-funcs': {
+ 'funcs': (
+ 'gen_usr_ldscript',
+ ),
+ 'comprehensive': False
+ },
+
+ 'user': {
+ 'funcs': (
+ 'enewuser', 'enewgroup',
+ 'egetent', 'egethome', 'egetshell', 'esethome'
+ ),
+ 'comprehensive': True
+ }
+}
+
+class EMakeParallelDisabled(PhaseCheck):
+ """Check for emake -j1 calls which disable parallelization."""
+ repoman_check_name = 'upstream.workaround'
+ re = re.compile(r'^\s*emake\s+.*-j\s*1\b')
+ error = errors.EMAKE_PARALLEL_DISABLED
+
+ def phase_check(self, num, line):
+ if self.in_phase == 'src_compile' or self.in_phase == 'src_install':
+ if self.re.match(line):
+ return self.error
+
+class EMakeParallelDisabledViaMAKEOPTS(LineCheck):
+ """Check for MAKEOPTS=-j1 that disables parallelization."""
+ repoman_check_name = 'upstream.workaround'
+ re = re.compile(r'^\s*MAKEOPTS=(\'|")?.*-j\s*1\b')
+ error = errors.EMAKE_PARALLEL_DISABLED_VIA_MAKEOPTS
+
+class NoAsNeeded(LineCheck):
+ """Check for calls to the no-as-needed function."""
+ repoman_check_name = 'upstream.workaround'
+ re = re.compile(r'.*\$\(no-as-needed\)')
+ error = errors.NO_AS_NEEDED
+
+class PreserveOldLib(LineCheck):
+ """Check for calls to the deprecated preserve_old_lib function."""
+ repoman_check_name = 'ebuild.minorsyn'
+ re = re.compile(r'.*preserve_old_lib')
+ error = errors.PRESERVE_OLD_LIB
+
+class SandboxAddpredict(LineCheck):
+ """Check for calls to the addpredict function."""
+ repoman_check_name = 'upstream.workaround'
+ re = re.compile(r'(^|\s)addpredict\b')
+ error = errors.SANDBOX_ADDPREDICT
+
+class DeprecatedBindnowFlags(LineCheck):
+ """Check for calls to the deprecated bindnow-flags function."""
+ repoman_check_name = 'ebuild.minorsyn'
+ re = re.compile(r'.*\$\(bindnow-flags\)')
+ error = errors.DEPRECATED_BINDNOW_FLAGS
+
+class WantAutoDefaultValue(LineCheck):
+ """Check setting WANT_AUTO* to latest (default value)."""
+ repoman_check_name = 'ebuild.minorsyn'
+ _re = re.compile(r'^WANT_AUTO(CONF|MAKE)=(\'|")?latest')
+
+ def check(self, num, line):
+ m = self._re.match(line)
+ if m is not None:
+ return 'WANT_AUTO' + m.group(1) + \
+ ' redundantly set to default value "latest" on line: %d'
+
+class SrcCompileEconf(PhaseCheck):
+ repoman_check_name = 'ebuild.minorsyn'
+ configure_re = re.compile(r'\s(econf|./configure)')
+
+ def check_eapi(self, eapi):
+ return eapi_has_src_prepare_and_src_configure(eapi)
+
+ def phase_check(self, num, line):
+ if self.in_phase == 'src_compile':
+ m = self.configure_re.match(line)
+ if m is not None:
+ return ("'%s'" % m.group(1)) + \
+ " call should be moved to src_configure from line: %d"
+
+class SrcUnpackPatches(PhaseCheck):
+ repoman_check_name = 'ebuild.minorsyn'
+ src_prepare_tools_re = re.compile(r'\s(e?patch|sed)\s')
+
+ def check_eapi(self, eapi):
+ return eapi_has_src_prepare_and_src_configure(eapi)
+
+ def phase_check(self, num, line):
+ if self.in_phase == 'src_unpack':
+ m = self.src_prepare_tools_re.search(line)
+ if m is not None:
+ return ("'%s'" % m.group(1)) + \
+ " call should be moved to src_prepare from line: %d"
+
+class BuiltWithUse(LineCheck):
+ repoman_check_name = 'ebuild.minorsyn'
+ re = re.compile(r'(^|.*\b)built_with_use\b')
+ error = errors.BUILT_WITH_USE
+
+class DeprecatedUseq(LineCheck):
+ """Checks for use of the deprecated useq function"""
+ repoman_check_name = 'ebuild.minorsyn'
+ re = re.compile(r'(^|.*\b)useq\b')
+ error = errors.USEQ_ERROR
+
+class DeprecatedHasq(LineCheck):
+ """Checks for use of the deprecated hasq function"""
+ repoman_check_name = 'ebuild.minorsyn'
+ re = re.compile(r'(^|.*\b)hasq\b')
+ error = errors.HASQ_ERROR
+
+# EAPI <2 checks
+class UndefinedSrcPrepareSrcConfigurePhases(LineCheck):
+ repoman_check_name = 'EAPI.incompatible'
+ src_configprepare_re = re.compile(r'\s*(src_configure|src_prepare)\s*\(\)')
+
+ def check_eapi(self, eapi):
+ return not eapi_has_src_prepare_and_src_configure(eapi)
+
+ def check(self, num, line):
+ m = self.src_configprepare_re.match(line)
+ if m is not None:
+ return ("'%s'" % m.group(1)) + \
+ " phase is not defined in EAPI < 2 on line: %d"
+
+
+# EAPI-3 checks
+class Eapi3DeprecatedFuncs(LineCheck):
+ repoman_check_name = 'EAPI.deprecated'
+ deprecated_commands_re = re.compile(r'^\s*(check_license)\b')
+
+ def check_eapi(self, eapi):
+ return eapi not in ('0', '1', '2')
+
+ def check(self, num, line):
+ m = self.deprecated_commands_re.match(line)
+ if m is not None:
+ return ("'%s'" % m.group(1)) + \
+ " has been deprecated in EAPI=3 on line: %d"
+
+# EAPI <4 checks
+class UndefinedPkgPretendPhase(LineCheck):
+ repoman_check_name = 'EAPI.incompatible'
+ pkg_pretend_re = re.compile(r'\s*(pkg_pretend)\s*\(\)')
+
+ def check_eapi(self, eapi):
+ return not eapi_has_pkg_pretend(eapi)
+
+ def check(self, num, line):
+ m = self.pkg_pretend_re.match(line)
+ if m is not None:
+ return ("'%s'" % m.group(1)) + \
+ " phase is not defined in EAPI < 4 on line: %d"
+
+# EAPI-4 checks
+class Eapi4IncompatibleFuncs(LineCheck):
+ repoman_check_name = 'EAPI.incompatible'
+ banned_commands_re = re.compile(r'^\s*(dosed|dohard)')
+
+ def check_eapi(self, eapi):
+ return not eapi_has_dosed_dohard(eapi)
+
+ def check(self, num, line):
+ m = self.banned_commands_re.match(line)
+ if m is not None:
+ return ("'%s'" % m.group(1)) + \
+ " has been banned in EAPI=4 on line: %d"
+
+class Eapi4GoneVars(LineCheck):
+ repoman_check_name = 'EAPI.incompatible'
+ undefined_vars_re = re.compile(r'.*\$(\{(AA|KV|EMERGE_FROM)\}|(AA|KV|EMERGE_FROM))')
+
+ def check_eapi(self, eapi):
+ # AA, KV, and EMERGE_FROM should not be referenced in EAPI 4 or later.
+ return not eapi_exports_AA(eapi)
+
+ def check(self, num, line):
+ m = self.undefined_vars_re.match(line)
+ if m is not None:
+ return ("variable '$%s'" % m.group(1)) + \
+ " is gone in EAPI=4 on line: %d"
+
+class PortageInternal(LineCheck):
+ repoman_check_name = 'portage.internal'
+ ignore_comment = True
+ # Match when the command is preceded only by leading whitespace or a shell
+ # operator such as (, {, |, ||, or &&. This prevents false positives in
+ # things like elog messages, as reported in bug #413285.
+ re = re.compile(r'^(\s*|.*[|&{(]+\s*)\b(ecompress|ecompressdir|env-update|prepall|prepalldocs|preplib)\b')
+
+ def check(self, num, line):
+ """Run the check on line and return error if there is one"""
+ m = self.re.match(line)
+ if m is not None:
+ return ("'%s'" % m.group(2)) + " called on line: %d"
+
+class PortageInternalVariableAssignment(LineCheck):
+ repoman_check_name = 'portage.internal'
+ internal_assignment = re.compile(r'\s*(export\s+)?(EXTRA_ECONF|EXTRA_EMAKE)\+?=')
+
+ def check(self, num, line):
+ match = self.internal_assignment.match(line)
+ e = None
+ if match is not None:
+ e = 'Assignment to variable %s' % match.group(2)
+ e += ' on line: %d'
+ return e
+
+_base_check_classes = (InheritEclass, LineCheck, PhaseCheck)
+_constant_checks = None
+
+def _init(experimental_inherit=False):
+
+ global _constant_checks, _eclass_info
+
+ if not experimental_inherit:
+ # Emulate the old eprefixify.defined and inherit.autotools checks.
+ _eclass_info = {
+ 'autotools': {
+ 'funcs': (
+ 'eaclocal', 'eautoconf', 'eautoheader',
+ 'eautomake', 'eautoreconf', '_elibtoolize',
+ 'eautopoint'
+ ),
+ 'comprehensive': True,
+ 'ignore_missing': True,
+ 'exempt_eclasses': ('git', 'git-2', 'subversion', 'autotools-utils')
+ },
+
+ 'prefix': {
+ 'funcs': (
+ 'eprefixify',
+ ),
+ 'comprehensive': False
+ }
+ }
+
+ _constant_checks = tuple(chain((v() for k, v in globals().items()
+ if isinstance(v, type) and issubclass(v, LineCheck) and
+ v not in _base_check_classes),
+ (InheritEclass(k, **portage._native_kwargs(kwargs))
+ for k, kwargs in _eclass_info.items())))
+
+_here_doc_re = re.compile(r'.*\s<<[-]?(\w+)$')
+_ignore_comment_re = re.compile(r'^\s*#')
+
+def run_checks(contents, pkg):
+ unicode_escape_codec = codecs.lookup('unicode_escape')
+ unicode_escape = lambda x: unicode_escape_codec.decode(x)[0]
+ if _constant_checks is None:
+ _init()
+ checks = _constant_checks
+ here_doc_delim = None
+ multiline = None
+
+ for lc in checks:
+ lc.new(pkg)
+ for num, line in enumerate(contents):
+
+ # Check if we're inside a here-document.
+ if here_doc_delim is not None:
+ if here_doc_delim.match(line):
+ here_doc_delim = None
+ if here_doc_delim is None:
+ here_doc = _here_doc_re.match(line)
+ if here_doc is not None:
+ here_doc_delim = re.compile(r'^\s*%s$' % here_doc.group(1))
+ if here_doc_delim is not None:
+ continue
+
+ # Unroll multiline escaped strings so that we can check things:
+ # inherit foo bar \
+ # moo \
+ # cow
+ # This will merge these lines like so:
+ # inherit foo bar moo cow
+ try:
+ # A normal line will end in the two bytes: <\> <\n>. So decoding
+ # that will result in python thinking the <\n> is being escaped
+ # and eat the single <\> which makes it hard for us to detect.
+ # Instead, strip the newline (which we know all lines have), and
+ # append a <0>. Then when python escapes it, if the line ended
+ # in a <\>, we'll end up with a <\0> marker to key off of. This
+ # shouldn't be a problem with any valid ebuild ...
+ line_escaped = unicode_escape(line.rstrip('\n') + '0')
+ except SystemExit:
+ raise
+ except:
+ # Who knows what kind of crazy crap an ebuild will have
+ # in it -- don't allow it to kill us.
+ line_escaped = line
+ if multiline:
+ # Chop off the \ and \n bytes from the previous line.
+ multiline = multiline[:-2] + line
+ if not line_escaped.endswith('\0'):
+ line = multiline
+ num = multinum
+ multiline = None
+ else:
+ continue
+ else:
+ if line_escaped.endswith('\0'):
+ multinum = num
+ multiline = line
+ continue
+
+ if not line.endswith("#nowarn\n"):
+ # Finally we have a full line to parse.
+ is_comment = _ignore_comment_re.match(line) is not None
+ for lc in checks:
+ if is_comment and lc.ignore_comment:
+ continue
+ if lc.check_eapi(pkg.eapi):
+ ignore = lc.ignore_line
+ if not ignore or not ignore.match(line):
+ e = lc.check(num, line)
+ if e:
+ yield lc.repoman_check_name, e % (num + 1)
+
+ for lc in checks:
+ i = lc.end()
+ if i is not None:
+ for e in i:
+ yield lc.repoman_check_name, e
diff --git a/usr/lib/portage/pym/repoman/errors.py b/usr/lib/portage/pym/repoman/errors.py
new file mode 100644
index 0000000..3833be6
--- /dev/null
+++ b/usr/lib/portage/pym/repoman/errors.py
@@ -0,0 +1,27 @@
+# repoman: Error Messages
+# Copyright 2007-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+COPYRIGHT_ERROR = 'Invalid Gentoo Copyright on line: %d'
+LICENSE_ERROR = 'Invalid Gentoo/GPL License on line: %d'
+CVS_HEADER_ERROR = 'Malformed CVS Header on line: %d'
+LEADING_SPACES_ERROR = 'Ebuild contains leading spaces on line: %d'
+TRAILING_WHITESPACE_ERROR = 'Trailing whitespace error on line: %d'
+READONLY_ASSIGNMENT_ERROR = 'Ebuild contains assignment to read-only variable on line: %d'
+MISSING_QUOTES_ERROR = 'Unquoted Variable on line: %d'
+NESTED_DIE_ERROR = 'Ebuild calls die in a subshell on line: %d'
+PATCHES_ERROR = 'PATCHES is not a bash array on line: %d'
+REDUNDANT_CD_S_ERROR = 'Ebuild has redundant cd ${S} statement on line: %d'
+EMAKE_PARALLEL_DISABLED = 'Upstream parallel compilation bug (ebuild calls emake -j1 on line: %d)'
+EMAKE_PARALLEL_DISABLED_VIA_MAKEOPTS = 'Upstream parallel compilation bug (MAKEOPTS=-j1 on line: %d)'
+DEPRECATED_BINDNOW_FLAGS = 'Deprecated bindnow-flags call on line: %d'
+EAPI_DEFINED_AFTER_INHERIT = 'EAPI defined after inherit on line: %d'
+NO_AS_NEEDED = 'Upstream asneeded linking bug (no-as-needed on line: %d)'
+PRESERVE_OLD_LIB = 'Ebuild calls deprecated preserve_old_lib on line: %d'
+BUILT_WITH_USE = 'built_with_use on line: %d'
+NO_OFFSET_WITH_HELPERS = "Helper function is used with D, ROOT, ED, EROOT or EPREFIX on line :%d"
+SANDBOX_ADDPREDICT = 'Ebuild calls addpredict on line: %d'
+USEQ_ERROR = 'Ebuild calls deprecated useq function on line: %d'
+HASQ_ERROR = 'Ebuild calls deprecated hasq function on line: %d'
diff --git a/usr/lib/portage/pym/repoman/herdbase.py b/usr/lib/portage/pym/repoman/herdbase.py
new file mode 100644
index 0000000..c5b88ff
--- /dev/null
+++ b/usr/lib/portage/pym/repoman/herdbase.py
@@ -0,0 +1,115 @@
+# -*- coding: utf-8 -*-
+# repoman: Herd database analysis
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2 or later
+
+from __future__ import unicode_literals
+
+import errno
+import xml.etree.ElementTree
+try:
+ from xml.parsers.expat import ExpatError
+except (SystemExit, KeyboardInterrupt):
+ raise
+except (ImportError, SystemError, RuntimeError, Exception):
+ # broken or missing xml support
+ # http://bugs.python.org/issue14988
+ # This means that python is built without xml support.
+ # We tolerate global scope import failures for optional
+ # modules, so that ImportModulesTestCase can succeed (or
+ # possibly alert us about unexpected import failures).
+ pass
+
+from portage import _encodings, _unicode_encode
+from portage.exception import FileNotFound, ParseError, PermissionDenied
+
+__all__ = [
+ "make_herd_base"
+]
+
+def _make_email(nick_name):
+ if not nick_name.endswith('@gentoo.org'):
+ nick_name = nick_name + '@gentoo.org'
+ return nick_name
+
+
+class HerdBase(object):
+ def __init__(self, herd_to_emails, all_emails):
+ self.herd_to_emails = herd_to_emails
+ self.all_emails = all_emails
+
+ def known_herd(self, herd_name):
+ return herd_name in self.herd_to_emails
+
+ def known_maintainer(self, nick_name):
+ return _make_email(nick_name) in self.all_emails
+
+ def maintainer_in_herd(self, nick_name, herd_name):
+ return _make_email(nick_name) in self.herd_to_emails[herd_name]
+
+class _HerdsTreeBuilder(xml.etree.ElementTree.TreeBuilder):
+ """
+ Implements doctype() as required to avoid deprecation warnings with
+ >=python-2.7.
+ """
+ def doctype(self, name, pubid, system):
+ pass
+
+def make_herd_base(filename):
+ herd_to_emails = dict()
+ all_emails = set()
+
+ try:
+ xml_tree = xml.etree.ElementTree.parse(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'),
+ parser=xml.etree.ElementTree.XMLParser(
+ target=_HerdsTreeBuilder()))
+ except ExpatError as e:
+ raise ParseError("metadata.xml: %s" % (e,))
+ except EnvironmentError as e:
+ func_call = "open('%s')" % filename
+ if e.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif e.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ raise
+
+ herds = xml_tree.findall('herd')
+ for h in herds:
+ _herd_name = h.find('name')
+ if _herd_name is None:
+ continue
+ herd_name = _herd_name.text.strip()
+ del _herd_name
+
+ maintainers = h.findall('maintainer')
+ herd_emails = set()
+ for m in maintainers:
+ _m_email = m.find('email')
+ if _m_email is None:
+ continue
+ m_email = _m_email.text.strip()
+
+ herd_emails.add(m_email)
+ all_emails.add(m_email)
+
+ herd_to_emails[herd_name] = herd_emails
+
+ return HerdBase(herd_to_emails, all_emails)
+
+
+if __name__ == '__main__':
+ h = make_herd_base('/usr/portage/metadata/herds.xml')
+
+ assert(h.known_herd('sound'))
+ assert(not h.known_herd('media-sound'))
+
+ assert(h.known_maintainer('sping'))
+ assert(h.known_maintainer('sping@gentoo.org'))
+ assert(not h.known_maintainer('portage'))
+
+ assert(h.maintainer_in_herd('zmedico@gentoo.org', 'tools-portage'))
+ assert(not h.maintainer_in_herd('pva@gentoo.org', 'tools-portage'))
+
+ import pprint
+ pprint.pprint(h.herd_to_emails)
diff --git a/usr/lib/portage/pym/repoman/utilities.py b/usr/lib/portage/pym/repoman/utilities.py
new file mode 100644
index 0000000..415825e
--- /dev/null
+++ b/usr/lib/portage/pym/repoman/utilities.py
@@ -0,0 +1,967 @@
+# repoman: Utilities
+# Copyright 2007-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""This module contains utility functions to help repoman find ebuilds to
+scan"""
+
+from __future__ import print_function, unicode_literals
+
+__all__ = [
+ "detect_vcs_conflicts",
+ "editor_is_executable",
+ "FindPackagesToScan",
+ "FindPortdir",
+ "FindVCS",
+ "format_qa_output",
+ "get_commit_message_with_editor",
+ "get_commit_message_with_stdin",
+ "get_committer_name",
+ "have_ebuild_dir",
+ "have_profile_dir",
+ "parse_metadata_use",
+ "UnknownHerdsError",
+ "check_metadata",
+ "UpdateChangeLog"
+]
+
+import collections
+import errno
+import io
+from itertools import chain
+import logging
+import pwd
+import re
+import stat
+import sys
+import subprocess
+import time
+import textwrap
+import difflib
+from tempfile import mkstemp
+
+import portage
+from portage import os
+from portage import shutil
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage import output
+from portage.const import BASH_BINARY
+from portage.localization import _
+from portage.output import red, green
+from portage.process import find_binary
+from portage import exception
+from portage import util
+normalize_path = util.normalize_path
+util.initialize_logger()
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+def detect_vcs_conflicts(options, vcs):
+ """Determine if the checkout has problems like cvs conflicts.
+
+ If you want more vcs support here just keep adding if blocks...
+ This could be better.
+
+ TODO(antarus): Also this should probably not call sys.exit() as
+ repoman is run on >1 packages and one failure should not cause
+ subsequent packages to fail.
+
+ Args:
+ vcs - A string identifying the version control system in use
+ Returns:
+ None (calls sys.exit on fatal problems)
+ """
+
+ cmd = None
+ if vcs == 'cvs':
+ logging.info("Performing a " + output.green("cvs -n up") + \
+ " with a little magic grep to check for updates.")
+ cmd = "cvs -n up 2>/dev/null | " + \
+ "egrep '^[^\?] .*' | " + \
+ "egrep -v '^. .*/digest-[^/]+|^cvs server: .* -- ignored$'"
+ if vcs == 'svn':
+ logging.info("Performing a " + output.green("svn status -u") + \
+ " with a little magic grep to check for updates.")
+ cmd = "svn status -u 2>&1 | " + \
+ "egrep -v '^. +.*/digest-[^/]+' | " + \
+ "head -n-1"
+
+ if cmd is not None:
+ # Use Popen instead of getstatusoutput(), in order to avoid
+ # unicode handling problems (see bug #310789).
+ args = [BASH_BINARY, "-c", cmd]
+ args = [_unicode_encode(x) for x in args]
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ out = _unicode_decode(proc.communicate()[0])
+ proc.wait()
+ mylines = out.splitlines()
+ myupdates = []
+ for line in mylines:
+ if not line:
+ continue
+ if line[0] not in " UPMARD": # unmodified(svn),Updates,Patches,Modified,Added,Removed/Replaced(svn),Deleted(svn)
+ # Stray Manifest is fine, we will readd it anyway.
+ if line[0] == '?' and line[1:].lstrip() == 'Manifest':
+ continue
+ logging.error(red("!!! Please fix the following issues reported " + \
+ "from cvs: ")+green("(U,P,M,A,R,D are ok)"))
+ logging.error(red("!!! Note: This is a pretend/no-modify pass..."))
+ logging.error(out)
+ sys.exit(1)
+ elif vcs == 'cvs' and line[0] in "UP":
+ myupdates.append(line[2:])
+ elif vcs == 'svn' and line[8] == '*':
+ myupdates.append(line[9:].lstrip(" 1234567890"))
+
+ if myupdates:
+ logging.info(green("Fetching trivial updates..."))
+ if options.pretend:
+ logging.info("(" + vcs + " update " + " ".join(myupdates) + ")")
+ retval = os.EX_OK
+ else:
+ retval = os.system(vcs + " update " + " ".join(myupdates))
+ if retval != os.EX_OK:
+ logging.fatal("!!! " + vcs + " exited with an error. Terminating.")
+ sys.exit(retval)
+
+
+def have_profile_dir(path, maxdepth=3, filename="profiles.desc"):
+ """
+ Try to figure out if 'path' has a profiles/
+ dir in it by checking for the given filename.
+ """
+ while path != "/" and maxdepth:
+ if os.path.exists(os.path.join(path, "profiles", filename)):
+ return normalize_path(path)
+ path = normalize_path(path + "/..")
+ maxdepth -= 1
+
+def have_ebuild_dir(path, maxdepth=3):
+ """
+ Try to figure out if 'path' or a subdirectory contains one or more
+ ebuild files named appropriately for their parent directory.
+ """
+ stack = [(normalize_path(path), 1)]
+ while stack:
+ path, depth = stack.pop()
+ basename = os.path.basename(path)
+ try:
+ listdir = os.listdir(path)
+ except OSError:
+ continue
+ for filename in listdir:
+ abs_filename = os.path.join(path, filename)
+ try:
+ st = os.stat(abs_filename)
+ except OSError:
+ continue
+ if stat.S_ISDIR(st.st_mode):
+ if depth < maxdepth:
+ stack.append((abs_filename, depth + 1))
+ elif stat.S_ISREG(st.st_mode):
+ if filename.endswith(".ebuild") and \
+ filename.startswith(basename + "-"):
+ return os.path.dirname(os.path.dirname(path))
+
+def parse_metadata_use(xml_tree):
+ """
+ Records are wrapped in XML as per GLEP 56
+ returns a dict with keys constisting of USE flag names and values
+ containing their respective descriptions
+ """
+ uselist = {}
+
+ usetags = xml_tree.findall("use")
+ if not usetags:
+ return uselist
+
+ # It's possible to have multiple 'use' elements.
+ for usetag in usetags:
+ flags = usetag.findall("flag")
+ if not flags:
+ # DTD allows use elements containing no flag elements.
+ continue
+
+ for flag in flags:
+ pkg_flag = flag.get("name")
+ if pkg_flag is None:
+ raise exception.ParseError("missing 'name' attribute for 'flag' tag")
+ flag_restrict = flag.get("restrict")
+
+ # emulate the Element.itertext() method from python-2.7
+ inner_text = []
+ stack = []
+ stack.append(flag)
+ while stack:
+ obj = stack.pop()
+ if isinstance(obj, basestring):
+ inner_text.append(obj)
+ continue
+ if isinstance(obj.text, basestring):
+ inner_text.append(obj.text)
+ if isinstance(obj.tail, basestring):
+ stack.append(obj.tail)
+ stack.extend(reversed(obj))
+
+ if pkg_flag not in uselist:
+ uselist[pkg_flag] = {}
+
+ # (flag_restrict can be None)
+ uselist[pkg_flag][flag_restrict] = " ".join("".join(inner_text).split())
+
+ return uselist
+
+class UnknownHerdsError(ValueError):
+ def __init__(self, herd_names):
+ _plural = len(herd_names) != 1
+ super(UnknownHerdsError, self).__init__(
+ 'Unknown %s %s' % (_plural and 'herds' or 'herd',
+ ','.join('"%s"' % e for e in herd_names)))
+
+
+def check_metadata_herds(xml_tree, herd_base):
+ herd_nodes = xml_tree.findall('herd')
+ unknown_herds = [name for name in
+ (e.text.strip() for e in herd_nodes if e.text is not None)
+ if not herd_base.known_herd(name)]
+
+ if unknown_herds:
+ raise UnknownHerdsError(unknown_herds)
+
+def check_metadata(xml_tree, herd_base):
+ if herd_base is not None:
+ check_metadata_herds(xml_tree, herd_base)
+
+def FindPackagesToScan(settings, startdir, reposplit):
+ """ Try to find packages that need to be scanned
+
+ Args:
+ settings - portage.config instance, preferably repoman_settings
+ startdir - directory that repoman was run in
+ reposplit - root of the repository
+ Returns:
+ A list of directories to scan
+ """
+
+
+ def AddPackagesInDir(path):
+ """ Given a list of dirs, add any packages in it """
+ ret = []
+ pkgdirs = os.listdir(path)
+ for d in pkgdirs:
+ if d == 'CVS' or d.startswith('.'):
+ continue
+ p = os.path.join(path, d)
+
+ if os.path.isdir(p):
+ cat_pkg_dir = os.path.join(*p.split(os.path.sep)[-2:])
+ logging.debug('adding %s to scanlist' % cat_pkg_dir)
+ ret.append(cat_pkg_dir)
+ return ret
+
+ scanlist = []
+ repolevel = len(reposplit)
+ if repolevel == 1: # root of the tree, startdir = repodir
+ for cat in settings.categories:
+ path = os.path.join(startdir, cat)
+ if not os.path.isdir(path):
+ continue
+ pkgdirs = os.listdir(path)
+ scanlist.extend(AddPackagesInDir(path))
+ elif repolevel == 2: # category level, startdir = catdir
+ # we only want 1 segment of the directory, is why we use catdir instead of startdir
+ catdir = reposplit[-2]
+ if catdir not in settings.categories:
+ logging.warn('%s is not a valid category according to profiles/categories, ' \
+ 'skipping checks in %s' % (catdir, catdir))
+ else:
+ scanlist = AddPackagesInDir(catdir)
+ elif repolevel == 3: # pkgdir level, startdir = pkgdir
+ catdir = reposplit[-2]
+ pkgdir = reposplit[-1]
+ if catdir not in settings.categories:
+ logging.warn('%s is not a valid category according to profiles/categories, ' \
+ 'skipping checks in %s' % (catdir, catdir))
+ else:
+ path = os.path.join(catdir, pkgdir)
+ logging.debug('adding %s to scanlist' % path)
+ scanlist.append(path)
+ return scanlist
+
+
+def format_qa_output(formatter, stats, fails, dofull, dofail, options, qawarnings):
+ """Helper function that formats output properly
+
+ Args:
+ formatter - a subclass of Formatter
+ stats - a dict of qa status items
+ fails - a dict of qa status failures
+ dofull - boolean to print full results or a summary
+ dofail - boolean to decide if failure was hard or soft
+
+ Returns:
+ None (modifies formatter)
+ """
+ full = options.mode == 'full'
+ # we only want key value pairs where value > 0
+ for category, number in \
+ filter(lambda myitem: myitem[1] > 0, sorted(stats.items())):
+ formatter.add_literal_data(" " + category.ljust(30))
+ if category in qawarnings:
+ formatter.push_style("WARN")
+ else:
+ formatter.push_style("BAD")
+ formatter.add_literal_data("%s" % number)
+ formatter.pop_style()
+ formatter.add_line_break()
+ if not dofull:
+ if not full and dofail and category in qawarnings:
+ # warnings are considered noise when there are failures
+ continue
+ fails_list = fails[category]
+ if not full and len(fails_list) > 12:
+ fails_list = fails_list[:12]
+ for failure in fails_list:
+ formatter.add_literal_data(" " + failure)
+ formatter.add_line_break()
+
+
+def format_qa_output_column(formatter, stats, fails, dofull, dofail, options, qawarnings):
+ """Helper function that formats output in a machine-parseable column format
+
+ @param formatter: an instance of Formatter
+ @type formatter: Formatter
+ @param path: dict of qa status items
+ @type path: dict
+ @param fails: dict of qa status failures
+ @type fails: dict
+ @param dofull: Whether to print full results or a summary
+ @type dofull: boolean
+ @param dofail: Whether failure was hard or soft
+ @type dofail: boolean
+ @param options: The command-line options provided to repoman
+ @type options: Namespace
+ @param qawarnings: the set of warning types
+ @type qawarnings: set
+ @return: None (modifies formatter)
+ """
+ full = options.mode == 'full'
+ for category, number in stats.items():
+ # we only want key value pairs where value > 0
+ if number < 1:
+ continue
+
+ formatter.add_literal_data("NumberOf " + category + " ")
+ if category in qawarnings:
+ formatter.push_style("WARN")
+ else:
+ formatter.push_style("BAD")
+ formatter.add_literal_data("%s" % number)
+ formatter.pop_style()
+ formatter.add_line_break()
+ if not dofull:
+ if not full and dofail and category in qawarnings:
+ # warnings are considered noise when there are failures
+ continue
+ fails_list = fails[category]
+ if not full and len(fails_list) > 12:
+ fails_list = fails_list[:12]
+ for failure in fails_list:
+ formatter.add_literal_data(category + " " + failure)
+ formatter.add_line_break()
+
+def editor_is_executable(editor):
+ """
+ Given an EDITOR string, validate that it refers to
+ an executable. This uses shlex_split() to split the
+ first component and do a PATH lookup if necessary.
+
+ @param editor: An EDITOR value from the environment.
+ @type: string
+ @rtype: bool
+ @return: True if an executable is found, False otherwise.
+ """
+ editor_split = util.shlex_split(editor)
+ if not editor_split:
+ return False
+ filename = editor_split[0]
+ if not os.path.isabs(filename):
+ return find_binary(filename) is not None
+ return os.access(filename, os.X_OK) and os.path.isfile(filename)
+
+
+def get_commit_message_with_editor(editor, message=None):
+ """
+ Execute editor with a temporary file as it's argument
+ and return the file content afterwards.
+
+ @param editor: An EDITOR value from the environment
+ @type: string
+ @param message: An iterable of lines to show in the editor.
+ @type: iterable
+ @rtype: string or None
+ @return: A string on success or None if an error occurs.
+ """
+ fd, filename = mkstemp()
+ try:
+ os.write(fd, _unicode_encode(_(
+ "\n# Please enter the commit message " + \
+ "for your changes.\n# (Comment lines starting " + \
+ "with '#' will not be included)\n"),
+ encoding=_encodings['content'], errors='backslashreplace'))
+ if message:
+ os.write(fd, b"#\n")
+ for line in message:
+ os.write(fd, _unicode_encode("#" + line,
+ encoding=_encodings['content'], errors='backslashreplace'))
+ os.close(fd)
+ retval = os.system(editor + " '%s'" % filename)
+ if not (os.WIFEXITED(retval) and os.WEXITSTATUS(retval) == os.EX_OK):
+ return None
+ try:
+ with io.open(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace'
+ ) as f:
+ mylines = f.readlines()
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ return None
+ return "".join(line for line in mylines if not line.startswith("#"))
+ finally:
+ try:
+ os.unlink(filename)
+ except OSError:
+ pass
+
+
+def get_commit_message_with_stdin():
+ """
+ Read a commit message from the user and return it.
+
+ @rtype: string or None
+ @return: A string on success or None if an error occurs.
+ """
+ print("Please enter a commit message. Use Ctrl-d to finish or Ctrl-c to abort.")
+ commitmessage = []
+ while True:
+ commitmessage.append(sys.stdin.readline())
+ if not commitmessage[-1]:
+ break
+ commitmessage = "".join(commitmessage)
+ return commitmessage
+
+
+def FindPortdir(settings):
+ """ Try to figure out what repo we are in and whether we are in a regular
+ tree or an overlay.
+
+ Basic logic is:
+
+ 1. Determine what directory we are in (supports symlinks).
+ 2. Build a list of directories from / to our current location
+ 3. Iterate over PORTDIR_OVERLAY, if we find a match, search for a profiles directory
+ in the overlay. If it has one, make it portdir, otherwise make it portdir_overlay.
+ 4. If we didn't find an overlay in PORTDIR_OVERLAY, see if we are in PORTDIR; if so, set
+ portdir_overlay to PORTDIR. If we aren't in PORTDIR, see if PWD has a profiles dir, if
+ so, set portdir_overlay and portdir to PWD, else make them False.
+ 5. If we haven't found portdir_overlay yet, it means the user is doing something odd, report
+ an error.
+ 6. If we haven't found a portdir yet, set portdir to PORTDIR.
+
+ Args:
+ settings - portage.config instance, preferably repoman_settings
+ Returns:
+ list(portdir, portdir_overlay, location)
+ """
+
+ portdir = None
+ portdir_overlay = None
+ location = os.getcwd()
+ pwd = _unicode_decode(os.environ.get('PWD', ''), encoding=_encodings['fs'])
+ if pwd and pwd != location and os.path.realpath(pwd) == location:
+ # getcwd() returns the canonical path but that makes it hard for repoman to
+ # orient itself if the user has symlinks in their portage tree structure.
+ # We use os.environ["PWD"], if available, to get the non-canonical path of
+ # the current working directory (from the shell).
+ location = pwd
+
+ location = normalize_path(location)
+
+ path_ids = {}
+ p = location
+ s = None
+ while True:
+ s = os.stat(p)
+ path_ids[(s.st_dev, s.st_ino)] = p
+ if p == "/":
+ break
+ p = os.path.dirname(p)
+ if location[-1] != "/":
+ location += "/"
+
+ for overlay in portage.util.shlex_split(settings["PORTDIR_OVERLAY"]):
+ overlay = os.path.realpath(overlay)
+ try:
+ s = os.stat(overlay)
+ except OSError:
+ continue
+ overlay = path_ids.get((s.st_dev, s.st_ino))
+ if overlay is None:
+ continue
+ if overlay[-1] != "/":
+ overlay += "/"
+ if True:
+ portdir_overlay = overlay
+ subdir = location[len(overlay):]
+ if subdir and subdir[-1] != "/":
+ subdir += "/"
+ if have_profile_dir(location, subdir.count("/")):
+ portdir = portdir_overlay
+ break
+
+ # Couldn't match location with anything from PORTDIR_OVERLAY,
+ # so fall back to have_profile_dir() checks alone. Assume that
+ # an overlay will contain at least a "repo_name" file while a
+ # master repo (portdir) will contain at least a "profiles.desc"
+ # file.
+ if not portdir_overlay:
+ portdir_overlay = have_profile_dir(location, filename="repo_name")
+ if not portdir_overlay:
+ portdir_overlay = have_ebuild_dir(location)
+ if portdir_overlay:
+ subdir = location[len(portdir_overlay):]
+ if subdir and subdir[-1] != os.sep:
+ subdir += os.sep
+ if have_profile_dir(location, subdir.count(os.sep)):
+ portdir = portdir_overlay
+
+ if not portdir_overlay:
+ if (settings["PORTDIR"] + os.path.sep).startswith(location):
+ portdir_overlay = settings["PORTDIR"]
+ else:
+ portdir_overlay = have_profile_dir(location)
+ portdir = portdir_overlay
+
+ if not portdir_overlay:
+ msg = 'Repoman is unable to determine PORTDIR or PORTDIR_OVERLAY' + \
+ ' from the current working directory'
+ logging.critical(msg)
+ return (None, None, None)
+
+ if not portdir:
+ portdir = settings["PORTDIR"]
+
+ if not portdir_overlay.endswith('/'):
+ portdir_overlay += '/'
+
+ if not portdir.endswith('/'):
+ portdir += '/'
+
+ return [normalize_path(x) for x in (portdir, portdir_overlay, location)]
+
+_vcs_type = collections.namedtuple('_vcs_type',
+ 'name dir_name')
+
+_FindVCS_data = (
+ _vcs_type(
+ name = 'git',
+ dir_name = '.git'
+ ),
+ _vcs_type(
+ name = 'bzr',
+ dir_name = '.bzr'
+ ),
+ _vcs_type(
+ name = 'hg',
+ dir_name = '.hg'
+ ),
+ _vcs_type(
+ name = 'svn',
+ dir_name = '.svn'
+ )
+)
+
+def FindVCS():
+ """ Try to figure out in what VCS' working tree we are. """
+
+ outvcs = []
+
+ def seek(depth = None):
+ """ Seek for VCSes that have a top-level data directory only. """
+ retvcs = []
+ pathprep = ''
+
+ while depth is None or depth > 0:
+ for vcs_type in _FindVCS_data:
+ vcs_dir = os.path.join(pathprep, vcs_type.dir_name)
+ if os.path.isdir(vcs_dir):
+ logging.debug('FindVCS: found %(name)s dir: %(vcs_dir)s' %
+ {'name': vcs_type.name,
+ 'vcs_dir': os.path.abspath(vcs_dir)})
+ retvcs.append(vcs_type.name)
+
+ if retvcs:
+ break
+ pathprep = os.path.join(pathprep, '..')
+ if os.path.realpath(pathprep).strip('/') == '':
+ break
+ if depth is not None:
+ depth = depth - 1
+
+ return retvcs
+
+ # Level zero VCS-es.
+ if os.path.isdir('CVS'):
+ outvcs.append('cvs')
+ if os.path.isdir('.svn'): # <1.7
+ outvcs.append('svn')
+
+ # If we already found one of 'level zeros', just take a quick look
+ # at the current directory. Otherwise, seek parents till we get
+ # something or reach root.
+ if outvcs:
+ outvcs.extend(seek(1))
+ else:
+ outvcs = seek()
+
+ if len(outvcs) > 1:
+ # eliminate duplicates, like for svn in bug #391199
+ outvcs = list(set(outvcs))
+
+ return outvcs
+
+_copyright_re1 = re.compile(br'^(# Copyright \d\d\d\d)-\d\d\d\d ')
+_copyright_re2 = re.compile(br'^(# Copyright )(\d\d\d\d) ')
+
+
+class _copyright_repl(object):
+ __slots__ = ('year',)
+ def __init__(self, year):
+ self.year = year
+ def __call__(self, matchobj):
+ if matchobj.group(2) == self.year:
+ return matchobj.group(0)
+ else:
+ return matchobj.group(1) + matchobj.group(2) + \
+ b'-' + self.year + b' '
+
+def _update_copyright_year(year, line):
+ """
+ These two regexes are taken from echangelog
+ update_copyright(), except that we don't hardcode
+ 1999 here (in order to be more generic).
+ """
+ is_bytes = isinstance(line, bytes)
+ if is_bytes:
+ if not line.startswith(b'# Copyright '):
+ return line
+ else:
+ if not line.startswith('# Copyright '):
+ return line
+
+ year = _unicode_encode(year)
+ line = _unicode_encode(line)
+
+ line = _copyright_re1.sub(br'\1-' + year + b' ', line)
+ line = _copyright_re2.sub(_copyright_repl(year), line)
+ if not is_bytes:
+ line = _unicode_decode(line)
+ return line
+
+def update_copyright(fn_path, year, pretend=False):
+ """
+ Check file for a Copyright statement, and update its year. The
+ patterns used for replacing copyrights are taken from echangelog.
+ Only the first lines of each file that start with a hash ('#') are
+ considered, until a line is found that doesn't start with a hash.
+ Files are read and written in binary mode, so that this function
+ will work correctly with files encoded in any character set, as
+ long as the copyright statements consist of plain ASCII.
+ """
+
+ try:
+ fn_hdl = io.open(_unicode_encode(fn_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='rb')
+ except EnvironmentError:
+ return
+
+ orig_header = []
+ new_header = []
+
+ for line in fn_hdl:
+ line_strip = line.strip()
+ orig_header.append(line)
+ if not line_strip or line_strip[:1] != b'#':
+ new_header.append(line)
+ break
+
+ line = _update_copyright_year(year, line)
+ new_header.append(line)
+
+ difflines = 0
+ for line in difflib.unified_diff(
+ [_unicode_decode(line) for line in orig_header],
+ [_unicode_decode(line) for line in new_header],
+ fromfile=fn_path, tofile=fn_path, n=0):
+ util.writemsg_stdout(line, noiselevel=-1)
+ difflines += 1
+ util.writemsg_stdout("\n", noiselevel=-1)
+
+ # unified diff has three lines to start with
+ if difflines > 3 and not pretend:
+ # write new file with changed header
+ f, fnnew_path = mkstemp()
+ f = io.open(f, mode='wb')
+ for line in new_header:
+ f.write(line)
+ for line in fn_hdl:
+ f.write(line)
+ f.close()
+ try:
+ fn_stat = os.stat(fn_path)
+ except OSError:
+ fn_stat = None
+
+ shutil.move(fnnew_path, fn_path)
+
+ if fn_stat is None:
+ util.apply_permissions(fn_path, mode=0o644)
+ else:
+ util.apply_stat_permissions(fn_path, fn_stat)
+ fn_hdl.close()
+
+def get_committer_name(env=None):
+ """Generate a committer string like echangelog does."""
+ if env is None:
+ env = os.environ
+ if 'GENTOO_COMMITTER_NAME' in env and \
+ 'GENTOO_COMMITTER_EMAIL' in env:
+ user = '%s <%s>' % (env['GENTOO_COMMITTER_NAME'],
+ env['GENTOO_COMMITTER_EMAIL'])
+ elif 'GENTOO_AUTHOR_NAME' in env and \
+ 'GENTOO_AUTHOR_EMAIL' in env:
+ user = '%s <%s>' % (env['GENTOO_AUTHOR_NAME'],
+ env['GENTOO_AUTHOR_EMAIL'])
+ elif 'ECHANGELOG_USER' in env:
+ user = env['ECHANGELOG_USER']
+ else:
+ pwd_struct = pwd.getpwuid(os.getuid())
+ gecos = pwd_struct.pw_gecos.split(',')[0] # bug #80011
+ user = '%s <%s@gentoo.org>' % (gecos, pwd_struct.pw_name)
+ return user
+
+def UpdateChangeLog(pkgdir, user, msg, skel_path, category, package,
+ new=(), removed=(), changed=(), pretend=False, quiet=False):
+ """
+ Write an entry to an existing ChangeLog, or create a new one.
+ Updates copyright year on changed files, and updates the header of
+ ChangeLog with the contents of skel.ChangeLog.
+ """
+
+ if '<root@' in user:
+ if not quiet:
+ logging.critical('Please set ECHANGELOG_USER or run as non-root')
+ return None
+
+ # ChangeLog times are in UTC
+ gmtime = time.gmtime()
+ year = time.strftime('%Y', gmtime)
+ date = time.strftime('%d %b %Y', gmtime)
+
+ # check modified files and the ChangeLog for copyright updates
+ # patches and diffs (identified by .patch and .diff) are excluded
+ for fn in chain(new, changed):
+ if fn.endswith('.diff') or fn.endswith('.patch'):
+ continue
+ update_copyright(os.path.join(pkgdir, fn), year, pretend=pretend)
+
+ cl_path = os.path.join(pkgdir, 'ChangeLog')
+ clold_lines = []
+ clnew_lines = []
+ old_header_lines = []
+ header_lines = []
+
+ clold_file = None
+ try:
+ clold_file = io.open(_unicode_encode(cl_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace')
+ except EnvironmentError:
+ pass
+
+ f, clnew_path = mkstemp()
+
+ # construct correct header first
+ try:
+ if clold_file is not None:
+ # retain header from old ChangeLog
+ first_line = True
+ for line in clold_file:
+ line_strip = line.strip()
+ if line_strip and line[:1] != "#":
+ clold_lines.append(line)
+ break
+ # always make sure cat/pkg is up-to-date in case we are
+ # moving packages around, or copied from another pkg, or ...
+ if first_line:
+ if line.startswith('# ChangeLog for'):
+ line = '# ChangeLog for %s/%s\n' % (category, package)
+ first_line = False
+ old_header_lines.append(line)
+ header_lines.append(_update_copyright_year(year, line))
+ if not line_strip:
+ break
+
+ clskel_file = None
+ if not header_lines:
+ # delay opening this until we find we need a header
+ try:
+ clskel_file = io.open(_unicode_encode(skel_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ except EnvironmentError:
+ pass
+
+ if clskel_file is not None:
+ # read skel.ChangeLog up to first empty line
+ for line in clskel_file:
+ line_strip = line.strip()
+ if not line_strip:
+ break
+ line = line.replace('<CATEGORY>', category)
+ line = line.replace('<PACKAGE_NAME>', package)
+ line = _update_copyright_year(year, line)
+ header_lines.append(line)
+ header_lines.append('\n')
+ clskel_file.close()
+
+ # write new ChangeLog entry
+ clnew_lines.extend(header_lines)
+ newebuild = False
+ for fn in new:
+ if not fn.endswith('.ebuild'):
+ continue
+ ebuild = fn.split(os.sep)[-1][0:-7]
+ clnew_lines.append('*%s (%s)\n' % (ebuild, date))
+ newebuild = True
+ if newebuild:
+ clnew_lines.append('\n')
+ trivial_files = ('ChangeLog', 'Manifest')
+ display_new = ['+' + elem for elem in new
+ if elem not in trivial_files]
+ display_removed = ['-' + elem for elem in removed]
+ display_changed = [elem for elem in changed
+ if elem not in trivial_files]
+ if not (display_new or display_removed or display_changed):
+ # If there's nothing else to display, show one of the
+ # trivial files.
+ for fn in trivial_files:
+ if fn in new:
+ display_new = ['+' + fn]
+ break
+ elif fn in changed:
+ display_changed = [fn]
+ break
+
+ display_new.sort()
+ display_removed.sort()
+ display_changed.sort()
+
+ mesg = '%s; %s %s:' % (date, user, ', '.join(chain(
+ display_new, display_removed, display_changed)))
+ for line in textwrap.wrap(mesg, 80, \
+ initial_indent=' ', subsequent_indent=' ', \
+ break_on_hyphens=False):
+ clnew_lines.append('%s\n' % line)
+ for line in textwrap.wrap(msg, 80, \
+ initial_indent=' ', subsequent_indent=' '):
+ clnew_lines.append('%s\n' % line)
+ # Don't append a trailing newline if the file is new.
+ if clold_file is not None:
+ clnew_lines.append('\n')
+
+ f = io.open(f, mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+
+ for line in clnew_lines:
+ f.write(line)
+
+ # append stuff from old ChangeLog
+ if clold_file is not None:
+
+ if clold_lines:
+ # clold_lines may contain a saved non-header line
+ # that we want to write first.
+ # Also, append this line to clnew_lines so that the
+ # unified_diff call doesn't show it as removed.
+ for line in clold_lines:
+ f.write(line)
+ clnew_lines.append(line)
+
+ else:
+ # ensure that there is no more than one blank
+ # line after our new entry
+ for line in clold_file:
+ if line.strip():
+ f.write(line)
+ break
+
+ # Now prepend old_header_lines to clold_lines, for use
+ # in the unified_diff call below.
+ clold_lines = old_header_lines + clold_lines
+
+ # Trim any trailing newlines.
+ lines = clold_file.readlines()
+ clold_file.close()
+ while lines and lines[-1] == '\n':
+ del lines[-1]
+ f.writelines(lines)
+ f.close()
+
+ # show diff
+ if not quiet:
+ for line in difflib.unified_diff(clold_lines, clnew_lines,
+ fromfile=cl_path, tofile=cl_path, n=0):
+ util.writemsg_stdout(line, noiselevel=-1)
+ util.writemsg_stdout("\n", noiselevel=-1)
+
+ if pretend:
+ # remove what we've done
+ os.remove(clnew_path)
+ else:
+ # rename to ChangeLog, and set permissions
+ try:
+ clold_stat = os.stat(cl_path)
+ except OSError:
+ clold_stat = None
+
+ shutil.move(clnew_path, cl_path)
+
+ if clold_stat is None:
+ util.apply_permissions(cl_path, mode=0o644)
+ else:
+ util.apply_stat_permissions(cl_path, clold_stat)
+
+ if clold_file is None:
+ return True
+ else:
+ return False
+ except IOError as e:
+ err = 'Repoman is unable to create/write to Changelog.new file: %s' % (e,)
+ logging.critical(err)
+ # try to remove if possible
+ try:
+ os.remove(clnew_path)
+ except OSError:
+ pass
+ return None
+
diff --git a/usr/sbin/archive-conf b/usr/sbin/archive-conf
new file mode 120000
index 0000000..940b11f
--- /dev/null
+++ b/usr/sbin/archive-conf
@@ -0,0 +1 @@
+../lib/portage/bin/archive-conf \ No newline at end of file
diff --git a/usr/sbin/dispatch-conf b/usr/sbin/dispatch-conf
new file mode 120000
index 0000000..fd991b5
--- /dev/null
+++ b/usr/sbin/dispatch-conf
@@ -0,0 +1 @@
+../lib/portage/bin/dispatch-conf \ No newline at end of file
diff --git a/usr/sbin/emaint b/usr/sbin/emaint
new file mode 120000
index 0000000..3be9c5e
--- /dev/null
+++ b/usr/sbin/emaint
@@ -0,0 +1 @@
+../lib/portage/bin/emaint \ No newline at end of file
diff --git a/usr/sbin/env-update b/usr/sbin/env-update
new file mode 120000
index 0000000..dc7aca4
--- /dev/null
+++ b/usr/sbin/env-update
@@ -0,0 +1 @@
+../lib/portage/bin/env-update \ No newline at end of file
diff --git a/usr/sbin/etc-update b/usr/sbin/etc-update
new file mode 120000
index 0000000..641afc6
--- /dev/null
+++ b/usr/sbin/etc-update
@@ -0,0 +1 @@
+../lib/portage/bin/etc-update \ No newline at end of file
diff --git a/usr/sbin/fixpackages b/usr/sbin/fixpackages
new file mode 120000
index 0000000..f9d5976
--- /dev/null
+++ b/usr/sbin/fixpackages
@@ -0,0 +1 @@
+../lib/portage/bin/fixpackages \ No newline at end of file
diff --git a/usr/sbin/readpecoff b/usr/sbin/readpecoff
new file mode 120000
index 0000000..0667013
--- /dev/null
+++ b/usr/sbin/readpecoff
@@ -0,0 +1 @@
+../lib/portage/bin/readpecoff \ No newline at end of file
diff --git a/usr/sbin/regenworld b/usr/sbin/regenworld
new file mode 120000
index 0000000..6316826
--- /dev/null
+++ b/usr/sbin/regenworld
@@ -0,0 +1 @@
+../lib/portage/bin/regenworld \ No newline at end of file
diff --git a/usr/share/man/man1/dispatch-conf.1 b/usr/share/man/man1/dispatch-conf.1
new file mode 100644
index 0000000..3f6f9fc
--- /dev/null
+++ b/usr/share/man/man1/dispatch-conf.1
@@ -0,0 +1,90 @@
+.TH "DISPATCH-CONF" "1" "Jan 2011" "Portage 2.2.14-prefix" "Portage"
+.SH "NAME"
+dispatch\-conf \- Sanely update configuration files after emerging new packages
+.SH "SYNOPSIS"
+.B dispatch\-conf
+.SH "DESCRIPTION"
+\fIdispatch\-conf\fR is designed to be run after merging new packages
+in order to see if there are updates to the configuration files.
+If a new configuration file will overwrite an old one, \fIdispatch\-conf\fR
+will prompt the user for a decision about how to resolve the discrepancy.
+Advantages of \fIdispatch\-conf\fR include easy rollback (changes to config
+files are stored either using patches or rcs) and the ability to
+automatically update config files that the user has never modified or
+that differ from the current version only in CVS cruft or white space.
+
+\fIdispatch\-conf\fR will check all directories in the \fICONFIG_PROTECT\fR
+variable. All config files found in \fICONFIG_PROTECT_MASK\fR will
+automatically be updated for you by \fIdispatch\-conf\fR. See
+\fBmake.conf\fR(5) for more information.
+.SH "OPTIONS"
+.TP
+None.
+.SH "USAGE"
+\fIdispatch\-conf\fR must be run as root, since the config files to be
+replaced are generally owned by root. Before running \fIdispatch\-conf\fR
+for the first time the settings in \fB/etc/dispatch\-conf.conf\fR
+should be edited and the archive directory specified in
+\fB/etc/dispatch\-conf.conf\fR will need to be created. All changes to
+config files will be saved in the archive directory either as patches
+or using rcs, making restoration to an earlier version rather simple.
+
+When \fIdispatch\-conf\fR finds a config file that has a new update the user
+is provided with a menu of options for how to handle the update:
+.TP
+.B u
+Update (replace) the current config file with the new config file and continue.
+.TP
+.B z
+Zap (delete) the new config file and continue.
+.TP
+.B n
+Skip to the next config file, leaving both the original config file and any
+\fICONFIG_PROTECT\fRed files.
+.TP
+.B e
+Edit the new config file, using the editor defined in \fIEDITOR\fR.
+.TP
+.B m
+Interactively merge the current and new config files.
+.TP
+.B l
+Look at the differences between the pre-merged and merged config files.
+.TP
+.B t
+Toggle between the merged and pre-merged config files (in terms of which
+should be installed using the \fBu\fR command).
+.TP
+.B h
+Display a help screen.
+.TP
+.B q
+Quit \fIdispatch\-conf\fR.
+.SH "FILE MODES"
+\fBWARNING:\fR When \fB/etc/dispatch\-conf.conf\fR is configured
+to use \fBrcs\fR(1), read and execute permissions of archived
+files may be inherited from the first check in of a working file,
+as documented in the \fBci\fR(1) man page. This means that even
+if the permissions of the working file have since changed, the
+older permissions of the first check in may be inherited. As
+mentioned in the \fBci\fR(1) man page, users can control access
+to RCS files by setting the permissions of the directory
+containing the files.
+.SH "REPORTING BUGS"
+Please report bugs via http://bugs.gentoo.org/
+.SH "AUTHORS"
+.nf
+Jeremy Wohl
+Karl Trygve Kalleberg <karltk@gentoo.org>
+Mike Frysinger <vapier@gentoo.org>
+Grant Goodyear <g2boojum@gentoo.org>
+.fi
+.SH "FILES"
+.TP
+.B /etc/dispatch\-conf.conf
+Configuration settings for \fIdispatch\-conf\fR are stored here.
+.SH "SEE ALSO"
+.BR make.conf (5),
+.BR ci (1),
+.BR etc-update (1),
+.BR rcs (1)
diff --git a/usr/share/man/man1/ebuild.1 b/usr/share/man/man1/ebuild.1
new file mode 100644
index 0000000..93b4fe5
--- /dev/null
+++ b/usr/share/man/man1/ebuild.1
@@ -0,0 +1,233 @@
+.TH "EBUILD" "1" "Mar 2013" "Portage 2.2.14-prefix" "Portage"
+.SH "NAME"
+ebuild \- a low level interface to the Portage system
+.SH "SYNOPSIS"
+.B ebuild
+.I file command [command]\fR...
+.SH "DESCRIPTION"
+The ebuild program is a direct interface to the Portage system. It
+allows for direct action upon an ebuild with specific subcommands or
+groups of commands to perform in a specific ebuild's context and
+functions. Accepting an ebuild script and one or more commands
+as arguments, the ebuild program parses the ebuild script and
+executes the specified commands. Commands exist to fetch sources,
+unpack sources, compile sources, install object files into a temporary
+directory "image", merge the image to the local filesystem, create a
+bzipped tarball package out of the image, and more.
+.SH "FILE"
+This must be a valid ebuild script. For further information read
+\fBebuild\fR(5).
+.SH "COMMANDS"
+By default, portage will execute all the functions in order up to the
+one actually specified, except for the functions that have already been
+executed in a previous invocation of ebuild. For example, simply issuing the
+command \fBcompile\fR will trigger the functions before it to also be run (such
+as \fBsetup\fR and \fBunpack\fR), unless they were run in a previous invocation
+of ebuild. If you want to make sure they are all run, you need to use
+the command \fBclean\fR first. If you wish to only have the specified command
+run, then you should use the \fInoauto\fR option in the \fBFEATURES\fR
+environment variable. See the \fBmake.conf\fR(5) man page for more
+information.
+
+.TP
+.BR help
+Shows a condensed form of this man page along with a lot of package
+specific information.
+.TP
+.BR setup
+Runs all package-specific setup actions and exotic system checks.
+.TP
+.BR clean
+Cleans the temporary build directory that Portage has created for
+this particular ebuild file. The temporary build directory normally
+contains the extracted source files as well as a possible
+"install image" (all the files that will be merged to the local
+filesystem or stored in a package). The location of the build
+directory is set by the PORTAGE_TMPDIR variable. For information
+on what this variable is, run \fIemerge \-\-info\fR, or to override
+this variable, see \fBmake.conf\fR(5).
+
+Note: Portage cleans up almost everything after a package has been
+successfully merged unless FEATURES contains 'noclean'. Adding noclean
+to FEATURES will cause a lot of files to remain and will consume large
+amounts of space, very quickly. It is not recommended to leave this on,
+unless you have use for the sources post\-merge. Optionally, one may
+manually clean these files with \fIrm \-rf /var/tmp/portage\fR.
+.TP
+.BR fetch
+Checks to see if all the sources specified in SRC_URI are available in
+DISTDIR (see \fBmake.conf\fR(5) for more information) and have a valid
+checksum. If the sources aren't available, an attempt is made to
+download them from the locations specified in SRC_URI. If multiple
+download locations are listed for a particular file, Portage pings
+each location to see which location is closer. (May not be true
+presently.) The Gentoo Linux mirrors defined by GENTOO_MIRRORS is
+always considered first. If for some reason the current or
+just\-downloaded sources' checksums don't match those recorded
+in files/digest\-[package]\-[version\-rev], a warning is printed
+and ebuild exits with an error code of 1.
+.TP
+.BR digest
+This is now equivalent to the \fImanifest\fR command.
+.TP
+.BR manifest
+Updates the manifest file for the package. This creates checksums for all
+of the files found in the same directory as the current ebuild as well as
+the recursive contents of the files subdirectory. It also creates checksums
+for all of the files listed in SRC_URI for each ebuild. For further
+information regarding the behavior of this command, see the documentation for
+the \fIassume\-digests\fR value of the \fBFEATURES\fR variable in
+\fBmake.conf\fR(5). See the \fB\-\-force\fR option if you would like to
+prevent digests from being assumed.
+.TP
+.BR unpack
+Extracts the sources to a subdirectory in the \fIbuild directory\fR
+(BUILD_PREFIX) by running the \fIsrc_unpack()\fR function in the ebuild
+file. If no src_unpack() function has been specified, a default
+src_unpack() function is used that extracts all the files specified in
+SRC_URI. The sources are normally extracted to
+${BUILD_PREFIX}/[package]\-[version\-rev]/work. This particular directory
+can be referenced by using the ${WORKDIR} variable.
+
+If you're creating an ebuild, you'll want to make sure that the S
+(source directory) variable defined at the top of your ebuild script
+points to the directory that contains your extracted sources. This
+directory is defined by default to be ${WORKDIR}/${P}, so it is not
+often required. The src_unpack() function is also responsible for
+making the appropriate patches to the sources so that they're ready
+for compilation.
+.TP
+.BR prepare
+Prepares the extracted sources by running the \fIsrc_prepare()\fR
+function specified in the ebuild file. When src_prepare() starts, the
+current working directory will be set to ${S}. This function is supported
+beginning with \fBEAPI 2\fR.
+.TP
+.BR configure
+Configures the extracted sources by running the \fIsrc_configure()\fR
+function specified in the ebuild file. When src_configure() starts, the
+current working directory will be set to ${S}. This function is supported
+beginning with \fBEAPI 2\fR.
+.TP
+.BR compile
+Compiles the extracted sources by running the \fIsrc_compile()\fR
+function specified in the ebuild file. When src_compile() starts, the
+current working directory will be set to ${S}. When src_compile()
+completes, the sources should be fully compiled.
+.TP
+.BR test
+Runs package-specific test cases to verify that everything was built
+properly.
+.TP
+.BR preinst
+Runs package-specific actions that need to be done before the package
+is installed into the live filesystem.
+.TP
+.BR install
+Installs the package to the temporary \fIinstall directory\fR by running
+the \fIsrc_install()\fR function. When completed, the
+\fIinstall directory\fR (${BUILD_PREFIX}/[package]\-[version\-rev]/image)
+will contain all the files that should either be merged to the local
+filesystem or included in a binary package.
+.TP
+.BR postinst
+Runs package-specific actions that need to be done after the package
+is installed into the live filesystem. Usually helpful messages are
+shown here.
+.TP
+.BR qmerge
+This function installs all the files in the \fIinstall directory\fR
+to the live filesystem. The process works as follows: first, the
+\fIpkg_preinst()\fR function (if specified) is run. Then, the files
+are merged into the live filesystem, and the installed files'
+checksums are recorded in
+\fI/var/db/pkg/${CATEGORY}/${PN}\-${PVR}/CONTENTS\fR. After
+all the files have been merged, the \fIpkg_postinst()\fR function
+(if specified) is executed.
+.TP
+.BR merge
+Normally, to merge an ebuild, you need to \fIfetch\fR, \fIunpack\fR,
+\fIcompile\fR, \fIinstall\fR and \fIqmerge\fR. If you're simply
+interested in merging the ebuild, you can use this command, which
+will perform all these steps for you, stopping along the way if a
+particular step doesn't complete successfully.
+.TP
+.BR unmerge
+This function first executes the \fIpkg_prerm()\fR function (if specified).
+Then it removes all files from the live filesystem that have a valid
+checksum and mtime in the package contents file. Any empty directories
+are recursively removed. Finally, it runs \fIpkg_postrm()\fR function (if
+specified). It is safe to merge a new version of a package first and
+then unmerge the old one. In fact, this is the recommended package
+upgrade method.
+.TP
+.BR prerm
+Runs package-specific actions that need to be executed before the package is
+removed from the filesystem. See also \fIunmerge\fR.
+.TP
+.BR postrm
+Runs package-specific actions that need to be executed after the package is
+removed from the filesystem. See also \fIunmerge\fR.
+.TP
+.BR config
+Runs package-specific actions that need to be executed after the emerge
+process has completed. This usually entails setup of configuration files
+or other similar setups that the user may wish to run.
+.TP
+.BR package
+This command is a lot like the \fImerge\fR command, except that after
+fetching, unpacking, compiling and installing, a .tbz2 binary package
+tarball is created and stored in \fBPKGDIR\fR (see \fBmake.conf\fR(5)).
+.TP
+.BR rpm
+Builds a RedHat RPM package from the files in the temporary
+\fIinstall directory\fR. At the moment, the ebuild's dependency
+information is not incorporated into the RPM.
+.SH "OPTIONS"
+.TP
+.BR "\-\-debug"
+Run bash with the \-x option, causing it to output verbose debugging
+information to stdout.
+.TP
+.BR "\-\-color < y | n >"
+Enable or disable color output. This option will override \fINOCOLOR\fR
+(see \fBmake.conf\fR(5)) and may also be used to force color output when stdout
+is not a tty (by default, color is disabled unless stdout is a tty).
+.TP
+.BR "\-\-force"
+When used together with the digest or manifest command,
+this option forces regeneration of
+digests for all distfiles associated with the current ebuild. Any distfiles
+that do not already exist in ${DISTDIR} will be automatically fetched.
+.TP
+.BR "\-\-ignore\-default\-opts"
+Do not use the \fIEBUILD_DEFAULT_OPTS\fR environment variable.
+.TP
+.BR "\-\-skip\-manifest"
+Skip all manifest checks.
+.SH "REPORTING BUGS"
+Please report bugs via http://bugs.gentoo.org/
+.SH "AUTHORS"
+.nf
+Achim Gottinger <achim@gentoo.org>
+Daniel Robbins <drobbins@gentoo.org>
+Nicholas Jones <carpaski@gentoo.org>
+Mike Frysinger <vapier@gentoo.org>
+.fi
+.SH "FILES"
+.TP
+.B /etc/portage/make.conf
+Contains variables for the build\-process and overwrites those
+in make.globals.
+.TP
+.B /etc/portage/color.map
+Contains variables customizing colors.
+.SH "SEE ALSO"
+.BR emerge (1),
+.BR ebuild (5),
+.BR make.conf (5),
+.BR color.map (5)
+.TP
+The \fI/usr/lib/portage/bin/ebuild.sh\fR script.
+.TP
+The helper apps in \fI/usr/lib/portage/bin\fR.
diff --git a/usr/share/man/man1/egencache.1 b/usr/share/man/man1/egencache.1
new file mode 100644
index 0000000..1af83de
--- /dev/null
+++ b/usr/share/man/man1/egencache.1
@@ -0,0 +1,163 @@
+.TH "EGENCACHE" "1" "Jul 2013" "Portage 2.2.14-prefix" "Portage"
+.SH "NAME"
+egencache \- generate metadata cache for ebuild repositories
+.SH "SYNOPSIS"
+.B egencache
+.I [options] --update [ATOM]\fR...
+.SH "DESCRIPTION"
+The egencache program generates metadata cache for ebuild repositories and
+stores it in the \fImetadata/md5\-cache/\fR directory within the repository
+itself, for distribution.
+.SH ACTIONS
+.TP
+.BR "\-\-update [ATOM] ... "
+Update the \fImetadata/md5\-cache/\fR directory (generate metadata as
+necessary).
+If no package atoms are specified then all will be updated. See ebuild(5)
+for the details on package atom syntax.
+.TP
+.BR "\-\-update\-changelogs"
+Update the ChangeLog files from SCM logs (supported only in git repos).
+.TP
+.BR "\-\-update\-use\-local\-desc"
+Update the \fIprofiles/use.local.desc\fR file from metadata.xml.
+.TP
+.BR "\-\-update\-manifests"
+Update manifest files, and sign them if signing is enabled. This supports
+parallelization if enabled via the \-\-jobs option. The \-\-thin\-manifests
+and \-\-sign\-manifests options may be used to manually override layout.conf
+settings.
+.SH OPTIONS
+.TP
+.BR "\-\-cache\-dir=CACHE_DIR"
+Location of the intermediate metadata cache which is stored in a different
+format that includes eclass state. See the \fBBUGS\fR section for
+information about why this is necessary.
+.br
+Defaults to /var/cache/edb/dep.
+.TP
+.BR "\-\-config\-root=PORTAGE_CONFIGROOT"
+Location of portage config files.
+.br
+Defaults to /.
+.TP
+.BR "\-\-gpg\-dir"
+Override the PORTAGE_GPG_DIR variable.
+.TP
+.BR "\-\-gpg\-key"
+Override the PORTAGE_GPG_KEY variable.
+.TP
+.BR "\-\-ignore-default-opts"
+Causes \fIEGENCACHE_DEFAULT_OPTS\fR to be ignored.
+.TP
+.BR "\-\-jobs=JOBS"
+Specifies the maximum number of ebuild processes to spawn simultaneously.
+Also see the related \fB\-\-load\-average\fR option.
+.TP
+.BR \-\-load\-average=LOAD
+Specifies that maximum load allowed when spawning multiple jobs.
+.TP
+.BR "\-\-portdir=PORTDIR"
+Override the PORTDIR variable. This option is deprecated in favor of
+\-\-repositories\-configuration option.
+.TP
+.BR "\-\-portdir\-overlay=PORTDIR_OVERLAY"
+Override the PORTDIR_OVERLAY variable. This option is deprecated in favor of
+\-\-repositories\-configuration option.
+.TP
+.BR "\-\-preserve\-comments"
+Preserve the comments found in the output use.local.desc file. This requires
+the output file to exist before egencache is called.
+.TP
+.BR "\-\-repo=REPO"
+Name of the repo to operate on. The name should correspond the value of
+a \fBrepo_name\fR entry (see \fBportage\fR(5)) from one of the repositories.
+.TP
+.BR "\-\-repositories\-configuration=REPOSITORIES_CONFIGURATION"
+Override configuration of repositories. The argument of this option has
+the same format as repos.conf (see \fBportage\fR(5)).
+.TP
+.BR "\-\-rsync"
+When used together with the \fB\-\-update\fR action, this enables a workaround
+for cases in which the content of a cache entry changes and neither the file
+mtime nor size changes, preventing rsync from detecting changes. Such cases are
+handled by bumping the mtime on the ebuild (and the corresponding cache entry).
+This option should only be needed for distribution via something like
+\fBrsync\fR(1), which relies on timestamps and file sizes to detect changes
+(see \fBbug 139134\fR). It's not needed with \fBgit\fR(1) since that uses a
+more thorough mechanism which allows it to detect changed inode numbers
+(described in \fIracy-git.txt\fR in the git technical docs).
+.TP
+.BR "\-\-sign\-manifests< y | n >"
+Manually override layout.conf sign-manifests setting.
+.TP
+.BR "\-\-strict\-manifests< y | n >"
+Manually override "strict" FEATURES setting.
+.TP
+.BR "\-\-thin\-manifests< y | n >"
+Manually override layout.conf thin-manifests setting.
+.TP
+.BR "\-\-tolerant"
+Exit successfully if only minor errors occurred, such as skipped cache
+updates due to ebuilds that either fail to source or are not sourced
+due to invalid Manifest entries.
+.TP
+.BR "\-\-use\-local\-desc\-output=ULD_OUTPUT"
+output file for use.local.desc data (or '-' for stdout)
+.SH "ENVIRONMENT OPTIONS"
+.TP
+\fBEGENCACHE_DEFAULT_OPTS\fR
+If this variable is set in \fBmake.conf\fR(5) then any options that it
+contains will be added to the beginning of the command line on every
+invocation. These options will not be added if the
+\fB\-\-ignore-default\-opts\fR option is specified.
+.SH "BUGS"
+Prior to portage-2.1.11.32, the 'pms' cache format was enabled by default.
+This 'pms' format, which is distributed in the \fImetadata/cache/\fR
+directory of the repository, has significant limitations related to the
+cache validation mechanism which involves comparison of
+a cache entry mtime to the mtime of the corresponding \fBebuild(5)\fR. This
+mechanism is unreliable in cases when eclass changes result in metadata
+changes, since no information about eclass state is available in the cache.
+Also, since the mtime of the cache entry must correspond to that of the
+ebuild, the cache format is only suitable for distribution via protocols
+that preserve timestamps (such as \fBrsync(1))\fR. For cache that is
+distributed via \fBgit(1)\fR repositories, there is currently a workaround
+implemented in \fBemerge\fR(1) \fB\-\-sync\fR which updates ebuild mtimes
+to match their corresponding cache entries (except for ebuilds that are
+modified relative to HEAD).
+
+In order to solve the above problems, the newer 'md5-dict' format has been
+enabled by default since portage-2.1.11.32. This format is distributed in
+the \fImetadata/md5-cache/\fR directory of the repository, and includes
+additional validation data in the form of digests for both the ebuild
+and its inherited eclasses. \fBWARNING:\fR Portage versions prior to
+portage-2.1.11.14 will \fBNOT\fR recognize the 'md5-dict' format unless it is
+explicitly listed in \fImetadata/layout.conf\fR (refer to \fBportage\fR(5)
+for example usage).
+
+\fBWARNING:\fR For backward compatibility, the obsolete 'pms' cache format
+will still be generated by default if the \fImetadata/cache/\fR directory
+exists in the repository. It can also be explicitly enabled via the
+cache\-formats setting in \fImetadata/layout.conf\fR (refer to \fBportage\fR(5)
+for example usage). If the 'pms' cache format is enabled and the 'md5-dict'
+format is not enabled, then it is necessary to enable
+\fBmetadata-transfer\fR in \fBFEATURES\fR (see \fBmake.conf(5)\fR).
+This causes intermediate cache (in a different format that includes
+eclass state) to be generated inside the directory which is configurable
+via the \fB\-\-cache\-dir\fR option.
+.SH "REPORTING BUGS"
+Please report bugs via http://bugs.gentoo.org/
+.SH "AUTHORS"
+.nf
+Zac Medico <zmedico@gentoo.org>
+Arfrever Frehtes Taifersar Arahesis <arfrever@apache.org>
+.fi
+.SH "FILES"
+.TP
+.B /etc/portage/make.conf
+Contains variables.
+.SH "SEE ALSO"
+.BR emerge (1),
+.BR make.conf (5),
+.BR portage (5)
diff --git a/usr/share/man/man1/emaint.1 b/usr/share/man/man1/emaint.1
new file mode 100644
index 0000000..3d75b0c
--- /dev/null
+++ b/usr/share/man/man1/emaint.1
@@ -0,0 +1,77 @@
+.TH "EMAINT" "1" "Nov 2008" "Portage 2.2.14-prefix" "Portage"
+.SH NAME
+emaint \- performs system health checks and maintenance
+.SH SYNOPSIS
+.BR emaint
+[\fIoptions\fR]
+[\fBall\fR | \fBbinhost\fR | \fBcleanresume\fR | \
+\fBmovebin\fR | \fBmoveinst\fR | \fBworld\fR]
+.SH DESCRIPTION
+The emaint program provides an interface to system health
+checks and maintenance.
+.SH COMMANDS
+.TP
+.BR all
+Perform all supported commands.
+.TP
+.BR binhost
+Generate a metadata index for binary packages located in \fBPKGDIR\fR (for
+download by remote clients). See the \fBPORTAGE_BINHOST\fR documentation in
+the \fBmake.conf\fR(5) man page for additional information.
+.TP
+.BR cleanconfig
+Discard no longer installed config tracker entries.
+.TP
+.BR cleanresume
+Discard merge lists saved for the \fBemerge\fR(1) \fB--resume\fR action.
+.TP
+.BR logs
+Clean out old logs from the \fBPORT_LOGDIR\fR using the command
+\fBPORT_LOGDIR_CLEAN\fR
+See the \fBmake.conf\fR(5) man page for additional information as well as
+enabling the \fB'clean-logs'\fR feature in emerge to do this automatically.
+.TP
+.BR movebin
+Perform package move updates for binary packages located in \fBPKGDIR\fR.
+.TP
+.BR moveinst
+Perform package move updates for installed packages.
+.TP
+.BR world
+Fix problems in the \fIworld\fR file.
+.SH DEFAULT OPTIONS
+.TP
+.B \-c, \-\-check
+Check for any problems that may exist. (all commands)
+.TP
+.B \-f, \-\-fix
+Fix any problems that may exist. (not all commands)
+.SH OPTIONS
+.TP
+.B \-C, \-\-clean
+Cleans the logs from \fBPORT_LOGDIR\fR (logs command only)
+.TP
+.B \-p, \-\-pretend
+Sets pretend mode (same as \-c, \-\-check) for use with the \-C, \-\-clean
+OPTION (logs command only)
+.TP
+.B \-t NUM, \-\-time NUM
+Changes the minimum age \fBNUM\fR (in days) of the logs to be listed or
+deleted. (logs command only)
+.SH "REPORTING BUGS"
+Please report bugs via http://bugs.gentoo.org/
+.SH AUTHORS
+.nf
+Mike Frysinger <vapier@gentoo.org>
+Brian Dolbec <dolsen@gentoo.org>
+.fi
+.SH "FILES"
+.TP
+.B /var/lib/portage/world
+Contains a list of all user\-specified packages.
+.TP
+.B /var/lib/portage/config
+Contains the paths and md5sums of all the config files being tracked.
+.SH "SEE ALSO"
+.BR emerge (1),
+.BR portage (5)
diff --git a/usr/share/man/man1/emerge.1 b/usr/share/man/man1/emerge.1
new file mode 100644
index 0000000..6a5be06
--- /dev/null
+++ b/usr/share/man/man1/emerge.1
@@ -0,0 +1,1262 @@
+.TH "EMERGE" "1" "Oct 2014" "Portage 2.2.14-prefix" "Portage"
+.SH "NAME"
+emerge \- Command\-line interface to the Portage system
+.SH "SYNOPSIS"
+.TP
+.BR emerge
+[\fIoptions\fR] [\fIaction\fR] [\fIebuild\fR | \fItbz2file\fR | \fIfile\fR |
+\fI@set\fR | \fIatom\fR] ...
+.TP
+.BR emerge
+\fB\-\-sync\fR | \fB\-\-version\fR
+.TP
+.BR emerge
+\fB\-\-info\fR [\fIatom\fR]
+.TP
+.BR emerge
+\fB\-\-search\fR \fIsomestring\fR
+.TP
+.BR emerge
+\fB\-\-help\fR
+.SH "DESCRIPTION"
+\fBemerge\fR is the definitive command\-line interface to the Portage
+system. It is primarily used for installing packages, and \fBemerge\fR
+can automatically handle any dependencies that the desired package has.
+\fBemerge\fR can also update the \fBportage tree\fR, making new and
+updated packages available. \fBemerge\fR gracefully handles updating
+installed packages to newer releases as well. It handles both source
+and binary packages, and it can be used to create binary packages for
+distribution.
+.SH "EBUILDS, TBZ2S, SETS AND ATOMS"
+\fBemerge\fR primarily installs packages. You can specify
+packages to install in five possible ways: an \fIatom\fR,
+a \fIset\fR, an installed \fIfile\fR, an \fIebuild\fR, or
+a \fItbz2file\fR.
+.LP
+.TP
+.BR ebuild
+An \fIebuild\fR must be, at a minimum, a valid Portage
+package directory name without a version or category, such as
+\fBportage\fR or \fBpython\fR.
+Both categories and version numbers may be used in addition, such
+as \fBsys\-apps/portage\fR or \fB=python\-2.2.1\-r2\fR.
+\fBemerge\fR
+ignores a trailing slash so that filename completion can be used.
+The \fIebuild\fR may also be an actual filename, such as
+\fB/usr/portage/app\-admin/python/python\-2.2.1\-r2.ebuild\fR.
+\fBWARNING:\fR The implementation of \fBemerge /path/to/ebuild\fR is broken and
+so this syntax shouldn't be used.
+.TP
+.BR tbz2file
+A \fItbz2file\fR must be a valid .tbz2 created with \fBebuild
+<package>\-<version>.ebuild package\fR or \fBemerge \-\-buildpkg
+[category/]<package>\fR or \fBquickpkg /var/db/pkg/<category>/<package>\fR.
+.TP
+.BR file
+A \fIfile\fR must be a file or directory that has been installed by one or
+more packages. If an absolute path is not used, then it must begin with
+either "./" or "../". For directories that are owned by multiple packages, all
+owning packages will be selected. See the portageq(1) owners command if you
+would like to query the owners of one or more files or directories.
+.TP
+.BR set
+A \fIset\fR is a convenient shorthand for a large group of
+packages. Three sets are currently always available: \fBselected\fR,
+\fBsystem\fR and \fBworld\fR. \fBselected\fR contains the user-selected
+"world" packages that are listed in \fB/var/lib/portage/world\fR,
+and nested sets that may be listed
+in \fB/var/lib/portage/world_sets\fR. \fBsystem\fR refers to a set of
+packages deemed necessary for your system to run properly. \fBworld\fR
+encompasses both the \fBselected\fR and \fBsystem\fR sets. [See
+\fBFILES\fR below for more information.] Other sets can exist depending
+on the current configuration. The default set configuration is located
+in the \fB/usr/share/portage/config/sets\fR directory.
+User sets may be created by placing files in the \fB/etc/portage/sets/\fR
+directory (see \fBportage\fR(5)). Note that a \fIset\fR
+is generally used in conjunction with \fB\-\-update\fR. When used as
+arguments to \fBemerge\fR sets have to be prefixed with \fB@\fR to be
+recognized. Use the \fB\-\-list\-sets\fR action to display a list of
+available package sets.
+.TP
+.BR atom
+An \fIatom\fR describes bounds on a package that you wish to install.
+\fISee ebuild(5) for the details on atom syntax.\fR For example,
+\fB>=dev\-lang/python\-2.2.1\-r2\fR matches the latest available version of
+Python greater than or equal to 2.2.1\-r2. Similarly,
+\fB<dev\-lang/python\-2.0\fR matches the latest available version of Python
+before 2.0. Note that in many shells you will need to escape characters such
+as '<' and '='; use single\- or double\-quotes around the \fIatom\fR
+to get around escaping problems. You may also constrain an atom to match a
+specific \fBSLOT\fR by appending a colon and a \fBSLOT\fR. Example:
+\fBx11\-libs/qt:3\fR.
+.SH "ACTIONS"
+.TP
+.BR "No action"
+If no action is specified, the action is to merge in the specified
+packages, satisfying any dependencies that they may have. The
+arguments can be \fIatoms\fR, \fIsets\fR, installed \fIfiles\fR,
+\fIebuilds\fR, or \fItbz2s\fR.
+\fBNote that you need to use the \-\-usepkg
+option if you want to install a tbz2\fR. The packages are added
+to the \fBworld\fR file at the end, so that they are considered for
+later updating.
+.TP
+.BR \-\-check\-news
+Scan all repositories for relevant unread GLEP 42 news items, and display
+how many are found. See
+\fIhttp://www.gentoo.org/proj/en/glep/glep-0042.html\fR.
+.TP
+.BR \-\-clean
+Cleans up the system by examining the installed packages and removing older
+packages. This is accomplished by looking at each installed package and
+separating the installed versions by \fBslot\fR. Clean will \fBremove all but
+the most recently installed version in each \fbslot\fR. Clean should not
+remove unslotted packages. Note: Most recently installed means most
+\fBrecent\fR, not highest version.
+.TP
+.BR "\-\-config "
+Run package specific actions needed to be executed after the emerge process
+has completed. This usually entails configuration file setup or other similar
+setups that the user may wish to run.
+.TP
+.BR "\-\-depclean (-c)"
+Cleans the system by removing packages that are not associated
+with explicitly merged packages. Depclean works by creating the
+full dependency tree from the @world set,
+then comparing it to installed packages. Packages installed, but
+not part of the dependency tree, will be uninstalled by depclean.
+See \fB\-\-with\-bdeps\fR for behavior with respect to build time dependencies
+that are not strictly required. Packages that are part of the world set will
+always be kept. They can be manually added to this set with \fIemerge
+\-\-noreplace <atom>\fR. As a safety measure, depclean will not remove any
+packages unless *all* required dependencies have been resolved. As a
+consequence, it is often necessary to run \fIemerge \-\-update \-\-newuse
+\-\-deep @world\fR prior to depclean. Also note that
+depclean may break link level dependencies, especially when the
+\fB\-\-depclean\-lib\-check\fR option is disabled. Thus, it is
+recommended to use a tool such as \fBrevdep-rebuild\fR(1)
+in order to detect such breakage.
+
+\fBWARNING:\fR
+Inexperienced users are advised to use \fB\-\-pretend\fR or \fB\-\-ask\fR
+with this option in order to see a preview of which packages
+will be uninstalled. Always study the list of packages
+to be cleaned for any obvious mistakes. Note that packages listed in
+package.provided (see \fBportage\fR(5)) may be removed by
+depclean, even if they are part of the world set.
+
+Depclean serves as a dependency aware
+version of \fB\-\-unmerge\fR. When given one or more atoms, it will
+unmerge matched packages that have no reverse dependencies. Use
+\fB\-\-depclean\fR together with \fB\-\-verbose\fR to show reverse
+dependencies.
+.TP
+.BR "\-\-deselect [ y | n ]"
+Remove atoms and/or sets from the world file. This action is implied
+by uninstall actions, including \fB-\-depclean\fR,
+\fB-\-prune\fR and \fB-\-unmerge\fR. Use \fB-\-deselect=n\fR
+in order to prevent uninstall actions from removing
+atoms from the world file.
+.TP
+.BR "\-\-help " (\fB\-h\fR)
+Displays help information for emerge. Adding one of the additional
+arguments listed above will give you more specific help information
+on that subject. The internal \fBemerge\fR help documentation is
+updated more frequently than this man page; check it out if you
+are having problems that this man page does not help resolve.
+.TP
+.BR \-\-info
+Produces a list of information to include in bug reports which aids the
+developers when fixing the reported problem. \fBPlease include this
+information when submitting a bug report.\fR Expanded output can be obtained
+with the \fI\-\-verbose\fR option.
+.TP
+.BR \-\-list\-sets
+Displays a list of available package sets.
+.TP
+.BR \-\-metadata
+Transfers pregenerated metadata cache from ${repository_location}/metadata/md5\-cache/
+to /var/cache/edb/dep/ as is normally done on the tail end of an rsync update using
+\fBemerge \-\-sync\fR. This process populates the cache database that Portage uses
+for pre-parsed lookups of package data. It does not populate cache for repositories
+not distributing pregenerated metadata cache. In order to generate cache for these
+repositories, use \fB\-\-regen\fR.
+In versions of portage >=2.1.5 the \-\-metadata action is totally unnecessary
+unless the user has enabled FEATURES="metadata-transfer" in \fBmake.conf\fR(5).
+.TP
+.BR "\-\-prune " (\fB\-P\fR)
+Removes all but the highest installed version of a package from your
+system. Use \fB\-\-prune\fR together with \fB\-\-verbose\fR to show
+reverse dependencies or with \fB\-\-nodeps\fR to ignore all dependencies.
+\fBWARNING: This action can remove packages from your world file! Check
+the emerge output of the next \-\-depclean run carefully! Use
+\-\-depclean to avoid this issue.\fR
+.TP
+.BR \-\-regen
+Causes portage to check and update the dependency cache of all ebuilds in the
+portage tree. The cache is used to speed up searches and the building of
+dependency trees. This command is not recommended for rsync users as rsync
+updates the cache using server\-side caches. If you do not know the
+differences between a 'rsync user' and some other user, then you are a 'rsync
+user' :). Rsync users should simply run \fBemerge \-\-sync\fR to regenerate
+the cache. After a portage update, rsync users may find it convenient to run
+\fBemerge \-\-metadata\fR to rebuild the cache as portage does at the end of
+a sync operation. In order to specify parallel \fB\-\-regen\fR behavior, use
+the \fB\-\-jobs\fR and \fB\-\-load\-average\fR options. If you would like to
+generate and distribute cache for use by others, use \fBegencache\fR(1).
+.TP
+.BR "\-\-resume" (\fB\-r\fR)
+Resumes the most recent merge list that has been aborted due to an error.
+This re\-uses the arguments and options that were given with the original
+command that's being resumed, and the user may also provide
+additional options when calling \fB\-\-resume\fR. It is an error to provide
+atoms or sets as arguments to \fB\-\-resume\fR, since the arguments from the
+resumed command are used instead.
+Please note that this operation will only return an error on failure. If there
+is nothing for portage to do, then portage will exit with a message and a
+success condition. A resume list will persist until it has been completed in
+entirety or until another aborted merge list replaces it. The resume history
+is capable of storing two merge lists. After one resume list completes, it is
+possible to invoke \-\-resume once again in order to resume an older list.
+The resume lists are stored in \fI/var/cache/edb/mtimedb\fR, and may be
+explicitly discarded by running `emaint \-\-fix cleanresume` (see
+\fBemaint\fR(1)).
+.TP
+.BR "\-\-search " (\fB\-s\fR)
+Searches for matches of the supplied string in the portage tree.
+By default emerge uses a case-insensitive simple search, but you can
+enable a regular expression search by prefixing the search string with %.
+For example, \fBemerge \-\-search "%^kde"\fR searches for any package whose
+name starts with "kde"; \fBemerge \-\-search "%gcc$"\fR searches for any
+package that ends with "gcc"; \fBemerge \-\-search "office"\fR searches for
+any package that contains the word "office". If you want to include the
+category into the search string, prepend an @: \fBemerge \-\-search
+"%@^dev-java.*jdk"\fR. If you want to search the package descriptions as well,
+use the \fB\-\-searchdesc\fR action.
+.TP
+.BR "\-\-searchdesc " (\fB\-S\fR)
+Matches the search string against the description field as well as
+the package name. \fBTake caution\fR as the descriptions are also
+matched as regular expressions.
+.TP
+.BR \-\-sync
+Updates repositories, for which sync\-type and sync\-uri attributes are
+set in repos.conf. See \fBportage\fR(5) for more information.
+The \fBPORTAGE_SYNC_STALE\fR variable configures
+warnings that are shown when emerge \-\-sync has not
+been executed recently.
+
+\fBWARNING:\fR
+The emerge \-\-sync action will revert local changes (e.g. modifications or
+additions of files) inside repositories synchronized using rsync.
+
+\fBNOTE:\fR
+The \fBemerge\-webrsync\fR program will download the entire
+portage tree as a tarball, which is much faster than emerge
+\-\-sync for first time syncs.
+
+.TP
+.BR "\-\-unmerge " (\fB\-C\fR)
+\fBWARNING: This action can remove important packages!\fR Removes
+all matching packages. This does no checking of dependencies, so
+it may remove packages necessary for the proper operation of your
+system. Its arguments can be \fIatoms\fR or
+\fIebuilds\fR. For a dependency aware version of \fB\-\-unmerge\fR,
+use \fB\-\-depclean\fR or \fB\-\-prune\fR.
+.TP
+.BR "\-\-version " (\fB\-V\fR)
+Displays the version number of \fBemerge\fR.
+.SH "OPTIONS"
+.TP
+.BR \-\-accept\-properties=ACCEPT_PROPERTIES
+This option temporarily overrides the \fBACCEPT_PROPERTIES\fR
+variable. The \fBACCEPT_PROPERTIES\fR variable is incremental,
+which means that the specified setting is appended to the
+existing value from your configuration. The special \fB-*\fR
+token can be used to discard the existing configuration
+value and start fresh. See the \fBMASKED PACKAGES\fR section
+and \fBmake.conf\fR(5) for more information about
+ACCEPT_PROPERTIES. A typical usage example for this option
+would be to use \fI\-\-accept\-properties=\-interactive\fR to
+temporarily mask interactive packages. With default
+configuration, this would result in an effective
+\fBACCEPT_PROPERTIES\fR value of "* -interactive".
+.TP
+.BR \-\-accept\-restrict=ACCEPT_RESTRICT
+This option temporarily overrides the \fBACCEPT_RESTRICT\fR
+variable. The \fBACCEPT_RESTRICT\fR variable is incremental,
+which means that the specified setting is appended to the
+existing value from your configuration. The special \fB-*\fR
+token can be used to discard the existing configuration
+value and start fresh. See the \fBMASKED PACKAGES\fR section
+and \fBmake.conf\fR(5) for more information about
+ACCEPT_RESTRICT. A typical usage example for this option
+would be to use \fI\-\-accept\-restrict=\-bindist\fR to
+temporarily mask packages that are not binary
+re\-distributable. With default
+configuration, this would result in an effective
+\fBACCEPT_RESTRICT\fR value of "* -bindist".
+.TP
+.BR "\-\-alert [ y | n ] (\-A short option)"
+Add a terminal bell character ('\\a') to all interactive prompts. This
+is especially useful if dependency resolution is taking a long time, and
+you want emerge to alert you when it is finished. If you use
+\fBemerge -auAD world\fR, emerge will courteously point out when it has
+finished calculating the graph.
+
+\fB--alert\fR may be 'y' or 'n'. 'true' and 'false' mean the same thing.
+Using \fB--alert\fR without an option is the same as using it with 'y'.
+Try it with 'emerge -aA portage'.
+
+If your terminal emulator is set up to make '\\a' into a window manager
+urgency hint, move your cursor to a different window to get the effect.
+.TP
+.BR "\-\-alphabetical "
+When displaying USE and other flag output, combines the enabled and
+disabled lists into one list and sorts the whole list alphabetically.
+.TP
+.BR "\-\-ask [ y | n ] (\-a short option)"
+Before performing the action, display what will take place (server info for
+\fB\-\-sync\fR, \fB\-\-pretend\fR output for merge, and so forth), then ask
+whether to proceed with the action or abort. Using \fB\-\-ask\fR is more
+efficient than using \fB\-\-pretend\fR and then executing the same command
+without \fB\-\-pretend\fR, as dependencies will only need to be calculated
+once. \fBWARNING: If the "Enter" key is pressed at the prompt (with no other
+input), it is interpreted as acceptance of the first choice. Note that the
+input
+buffer is not cleared prior to the prompt, so an accidental press of the
+"Enter" key at any time prior to the prompt will be interpreted as a choice!
+Use the \-\-ask\-enter\-invalid option if you want a single "Enter" key
+press to be interpreted as invalid input.\fR
+.TP
+.BR "\-\-ask\-enter\-invalid"
+When used together with the \fB\-\-ask\fR option,
+interpret a single "Enter" key press as
+invalid input. This helps prevent accidental
+acceptance of the first choice. This option is
+intended to be set in the \fBmake.conf\fR(5)
+\fBEMERGE_DEFAULT_OPTS\fR variable.
+.TP
+.BR "\-\-autounmask [ y | n ]"
+Automatically unmask packages and generate package.use
+settings as necessary to satisfy dependencies. This
+option is enabled by default. If any configuration
+changes are required, then they will be displayed
+after the merge list and emerge will immediately
+abort. If the displayed configuration changes are
+satisfactory, you should copy and paste them into
+the specified configuration file(s), or enable the
+\fB\-\-autounmask\-write\fR option. The
+\fBEMERGE_DEFAULT_OPTS\fR variable may be used to
+disable this option by default in \fBmake.conf\fR(5).
+.TP
+.BR "\-\-autounmask\-unrestricted\-atoms [ y | n ]"
+If \-\-autounmask is enabled, keyword and mask changes
+using the \'=\' operator will be written. With this
+option, \'>=\' operators will be used whenever possible.
+USE and license changes always use the latter behavior.
+.TP
+.BR "\-\-autounmask\-keep\-masks [ y | n ]"
+If \-\-autounmask is enabled, no package.unmask or ** keyword changes
+will be created. This leads to unsatisfied dependencies if
+no other solution exists.
+.TP
+.BR "\-\-autounmask\-write [ y | n ]"
+If \-\-autounmask is enabled, changes are written
+to config files, respecting \fBCONFIG_PROTECT\fR and \fB\-\-ask\fR.
+If the corresponding package.* is a file, the changes are appended to
+it, if it is a directory, changes are written to the lexicographically
+last file. This way it is always ensured that the new changes take
+precedence over existing changes. This option is automatically enabled with
+\-\-ask.
+.TP
+.BR \-\-backtrack=COUNT
+Specifies an integer number of times to backtrack if
+dependency calculation fails due to a conflict or an
+unsatisfied dependency (default: \'10\').
+.TP
+.BR "\-\-binpkg\-respect\-use [ y | n ]"
+Tells emerge to ignore binary packages if their use flags
+don't match the current configuration. (default: \'n\')
+.TP
+.BR "\-\-buildpkg [ y | n ] (\-b short option)"
+Tells emerge to build binary packages for all ebuilds processed in
+addition to actually merging the packages. Useful for maintainers
+or if you administrate multiple Gentoo Linux systems (build once,
+emerge tbz2s everywhere) as well as disaster recovery. The package
+will be created in the \fBPKGDIR\fR directory (see \fBmake.conf\fR(5)).
+An alternative for already\-merged
+packages is to use \fBquickpkg\fR(1) which creates a tbz2 from the
+live filesystem.
+.TP
+.BR "\-\-buildpkg\-exclude " ATOMS
+A space separated list of package atoms for which
+no binary packages should be built. This option overrides all
+possible ways to enable building of binary packages.
+.TP
+.BR "\-\-buildpkgonly " (\fB\-B\fR)
+Creates binary packages for all ebuilds processed without actually
+merging the packages. This comes with the caveat that all build-time
+dependencies must already be emerged on the system.
+.TP
+.BR "\-\-changed\-use " (\fB\-U\fR)
+Tells emerge to include installed packages where USE flags have
+changed since installation. This option also implies the
+\fB\-\-selective\fR option. Unlike \fB\-\-newuse\fR, the
+\fB\-\-changed\-use\fR option does not trigger reinstallation when
+flags that the user has not enabled are added or removed.
+
+NOTE: This option ignores the state of the "test" USE flag, since that flag
+has a special binding to FEATURES="test" (see \fBmake.conf\fR(5) for more
+information about \fBFEATURES\fR settings).
+.TP
+.BR "\-\-changelog " (\fB\-l\fR)
+Use this in conjunction with the \fB\-\-pretend\fR option. This will
+show the ChangeLog entries for all the packages that will be upgraded.
+.TP
+.BR "\-\-color < y | n >"
+Enable or disable color output. This option will override \fINOCOLOR\fR
+(see \fBmake.conf\fR(5)) and may also be used to force color output when stdout
+is not a tty (by default, color is disabled unless stdout is a tty).
+.TP
+.BR "\-\-columns"
+Used alongside \fB\-\-pretend\fR to cause the package name, new version,
+and old version to be displayed in an aligned format for easy cut\-n\-paste.
+.TP
+.BR "\-\-complete\-graph [ y | n ]"
+This causes \fBemerge\fR to consider the deep dependencies of all
+packages from the world set. With this option enabled,
+\fBemerge\fR will bail out if it determines that the given operation will
+break any dependencies of the packages that have been added to the
+graph. Like the \fB\-\-deep\fR option, the \fB\-\-complete\-graph\fR
+option will significantly increase the time taken for dependency
+calculations. Note that, unlike the \fB\-\-deep\fR option, the
+\fB\-\-complete\-graph\fR option does not cause any more packages to
+be updated than would have otherwise been updated with the option disabled.
+Using \fB\-\-with\-bdeps=y\fR together with \fB\-\-complete\-graph\fR makes
+the graph as complete as possible.
+.TP
+.BR "\-\-complete\-graph\-if\-new\-use < y | n >"
+Trigger the \fB\-\-complete\-graph\fR behavior if USE or IUSE will
+change for an installed package. This option is enabled by default.
+.TP
+.BR "\-\-complete\-graph\-if\-new\-ver < y | n >"
+Trigger the \fB\-\-complete\-graph\fR behavior if an installed package
+version will change (upgrade or downgrade). This option is enabled by default.
+.TP
+.BR \-\-config\-root=DIR
+Set the \fBPORTAGE_CONFIGROOT\fR environment variable.
+.TP
+.BR "\-\-debug " (\fB\-d\fR)
+Tells emerge to run the emerge command in \fB\-\-debug\fR mode. In this
+mode the bash build environment will run with the \-x option, causing
+it to output verbose debugging information to stdout. This also enables
+a plethora of other output (mostly dependency resolution messages).
+.TP
+.BR "\-\-deep [DEPTH] " (\fB\-D\fR)
+This flag forces
+\fBemerge\fR to consider the entire dependency tree of packages,
+instead of checking only the immediate dependencies of the packages.
+As an example, this catches updates in libraries that are not directly
+listed in the dependencies of a package. Also see \fB\-\-with\-bdeps\fR for
+behavior with respect to build time dependencies that are not strictly
+required.
+.TP
+.BR "\-\-depclean\-lib\-check [ y | n ]"
+Account for library link-level dependencies during
+\fB\-\-depclean\fR and \fB\-\-prune\fR actions.
+This option is enabled by default. If FEATURES="preserve\-libs" is
+enabled in \fBmake.conf\fR(5), and preserve\-libs is not restricted
+for any of the packages selected for removal, then this option is
+ignored because any libraries that have consumers will simply be
+preserved.
+.TP
+.BR \-\-digest
+Prevent corruption from being noticed. The `repoman manifest` command is the
+preferred way to generate manifests and it is capable of doing an entire
+repository or category at once (see \fBrepoman\fR(1)).
+.TP
+.BR "\-\-dynamic\-deps < y | n >"
+In dependency calculations, substitute the dependencies of installed
+packages with the dependencies of corresponding unbuilt ebuilds from
+source repositories. This causes the effective dependencies of
+installed packages to vary dynamically when source ebuild dependencies
+are modified. This option is enabled by default.
+
+\fBWARNING:\fR
+If you want to disable \-\-dynamic\-deps, then it may be necessary to
+first run \fBfixpackages\fR(1) in order to get the best results. The
+\fBfixpackages\fR(1) command performs two different operations that can
+also be performed separately by the `emaint \-\-fix moveinst` and
+`emaint \-\-fix movebin` commands (see \fBemaint\fR(1)).
+.TP
+.BR "\-\-emptytree " (\fB\-e\fR)
+Reinstalls target atoms and their entire deep
+dependency tree, as though no packages are currently
+installed. You should run this with \fB\-\-pretend\fR
+first to make sure the result is what you expect.
+.TP
+.BR "\-\-exclude " ATOMS
+A space separated list of package names or slot atoms.
+Emerge won't install any ebuild or binary package that
+matches any of the given package atoms.
+.TP
+.BR "\-\-fail\-clean [ y | n ]"
+Clean up temporary files after a build failure. This is
+particularly useful if you have \fBPORTAGE_TMPDIR\fR on
+tmpfs. If this option is enabled, you probably also want
+to enable \fBPORT_LOGDIR\fR (see \fBmake.conf\fR(5)) in
+order to save the build log.
+.TP
+.BR "\-\-fetchonly " (\fB\-f\fR)
+Instead of doing any package building, just perform fetches for all
+packages (fetch things from SRC_URI based upon USE setting).
+.TP
+.BR "\-\-fetch\-all\-uri " (\fB\-F\fR)
+Instead of doing any package building, just perform fetches for all
+packages (fetch everything in SRC_URI regardless of USE setting).
+.TP
+.BR "\-\-getbinpkg [ y | n ] (\-g short option)"
+Using the server and location defined in \fIPORTAGE_BINHOST\fR (see
+\fBmake.conf\fR(5)), portage will download the information from each binary
+package found and it will use that information to help build the dependency
+list. This option implies \fB\-k\fR. (Use \fB\-gK\fR for binary\-only
+merging.)
+.TP
+.BR "\-\-getbinpkgonly [ y | n ] (\-G short option)"
+This option is identical to \fB\-g\fR, as above, except binaries from the
+remote server are preferred over local packages if they are not identical.
+.TP
+.BR "\-\-ignore-default-opts"
+Causes \fIEMERGE_DEFAULT_OPTS\fR (see \fBmake.conf\fR(5)) to be ignored.
+.TP
+.BR "\-\-ignore\-built\-slot\-operator\-deps < y | n >"
+Ignore the slot/sub\-slot := operator parts of dependencies that have
+been recorded when packages where built. This option is intended
+only for debugging purposes, and it only affects built packages
+that specify slot/sub\-slot := operator dependencies which are
+supported beginning with \fBEAPI 5\fR.
+.TP
+.BR "-j [JOBS], \-\-jobs[=JOBS]"
+Specifies the number of packages to build simultaneously. If this option is
+given without an argument, emerge will not limit the number of jobs that can
+run simultaneously. Also see the related \fB\-\-load\-average\fR option.
+Similarly to the \-\-quiet\-build option, the \-\-jobs option causes all
+build output to be redirected to logs.
+Note that interactive packages currently force a setting
+of \fI\-\-jobs=1\fR. This issue can be temporarily avoided
+by specifying \fI\-\-accept\-properties=\-interactive\fR.
+.TP
+.BR "\-\-keep\-going [ y | n ]"
+Continue as much as possible after an error. When an error occurs,
+dependencies are recalculated for remaining packages and any with
+unsatisfied dependencies are automatically dropped. Also see
+the related \fB\-\-skipfirst\fR option.
+.TP
+.BR "\-\-load\-average [LOAD]"
+Specifies that no new builds should be started if there are other builds
+running and the load average is at least LOAD (a floating-point number).
+With no argument, removes a previous load limit.
+This option is recommended for use in combination with \fB\-\-jobs\fR in
+order to avoid excess load. See \fBmake\fR(1) for information about
+analogous options that should be configured via \fBMAKEOPTS\fR in
+\fBmake.conf\fR(5).
+.TP
+.BR "\-\-misspell\-suggestions < y | n >"
+Enable or disable misspell suggestions. By default, emerge will show
+a list of packages with similar names when a package doesn't exist.
+The \fIEMERGE_DEFAULT_OPTS\fR variable may be used to disable this
+option by default.
+.TP
+.BR "\-\-newrepo "
+Tells emerge to recompile a package if it is now being pulled from a
+different repository. This option also implies the
+\fB\-\-selective\fR option.
+.TP
+.BR "\-\-newuse " (\fB\-N\fR)
+Tells emerge to include installed packages where USE
+flags have changed since compilation. This option
+also implies the \fB\-\-selective\fR option.
+USE flag changes include:
+
+A USE flag was added to a package.
+A USE flag was removed from a package.
+A USE flag was turned on for a package.
+A USE flag was turned off for a package.
+
+USE flags may be toggled by your profile as well as your USE and package.use
+settings. If you would like to skip rebuilds for which disabled flags have
+been added to or removed from IUSE, see the related
+\fB\-\-changed\-use\fR option. If you would like to skip rebuilds for
+specific packages, see the \fB\-\-exclude\fR option.
+
+NOTE: This option ignores the state of the "test" USE flag, since that flag
+has a special binding to FEATURES="test" (see \fBmake.conf\fR(5) for more
+information about \fBFEATURES\fR settings).
+.TP
+.BR "\-\-noconfmem"
+Causes portage to disregard merge records indicating that a config file
+inside of a \fBCONFIG_PROTECT\fR directory has been merged already. Portage
+will normally merge those files only once to prevent the user from
+dealing with the same config multiple times. This flag will cause the
+file to always be merged.
+.TP
+.BR "\-\-nodeps " (\fB\-O\fR)
+Merges specified packages without merging any dependencies. Note that
+the build may fail if the dependencies aren't satisfied.
+.TP
+.BR "\-\-noreplace " (\fB\-n\fR)
+Skips the packages specified on the command\-line that have already
+been installed. Without this option, any package atoms or package sets
+you specify on the command\-line \fBwill\fR cause Portage to remerge
+the package, even if it is already installed. Note that Portage will
+not remerge dependencies by default. This option can be used to update the
+world file without rebuilding the packages.
+.TP
+.BR "\-\-nospinner"
+Disables the spinner for the session. The spinner is active when the
+terminal device is determined to be a TTY. This flag disables it regardless.
+.TP
+.BR "\-\-usepkg\-exclude " ATOMS
+A space separated list of package names or slot atoms. Emerge will ignore
+matching binary packages.
+.TP
+.BR "\-\-rebuild\-exclude " ATOMS
+A space separated list of package names or slot atoms. Emerge will not rebuild
+matching packages due to \fB\-\-rebuild\fR.
+.TP
+.BR "\-\-rebuild\-ignore " ATOMS
+A space separated list of package names or slot atoms. Emerge will not rebuild
+packages that depend on matching packages due to \fB\-\-rebuild\fR.
+.TP
+.BR "\-\-oneshot " (\fB\-1\fR)
+Emerge as normal, but do not add the packages to the world file
+for later updating.
+.TP
+.BR "\-\-onlydeps " (\fB\-o\fR)
+Only merge (or pretend to merge) the dependencies of the packages
+specified, not the packages themselves.
+.TP
+.BR "\-\-package\-moves [ y | n ]"
+Perform package moves when necessary. This option is enabled
+by default. Package moves are typically applied immediately
+after a \fB\-\-sync\fR action. They are applied in an
+incremental fashion, using only the subset of the history of
+package moves which have been added or modified since the
+previous application of package moves.
+
+\fBWARNING:\fR This option
+should remain enabled under normal circumstances.
+Do not disable it unless you know what you are
+doing.
+
+\fBNOTE:\fR The \fBfixpackages\fR(1) command can be used to
+exhaustively apply the entire history of package moves,
+regardless of whether or not any of the package moves have
+been previously applied.
+.TP
+.BR \-\-pkg\-format
+Specify which binary package format will be created as target.
+Possible choices now are tar and rpm or their combinations.
+.TP
+.BR \-\-prefix=DIR
+Set the \fBEPREFIX\fR environment variable.
+.TP
+.BR "\-\-pretend " (\fB\-p\fR)
+Instead of actually performing the merge, simply display what *would*
+have been installed if \fB\-\-pretend\fR weren't used. Using \fB\-\-pretend\fR
+is strongly recommended before installing an unfamiliar package. In
+the printout:
+
+.TS
+lI l.
+N new (not yet installed)
+S new SLOT installation (side-by-side versions)
+U updating (to another version)
+D downgrading (best version seems lower)
+r reinstall (forced for some reason, possibly due to slot or sub\-slot)
+R replacing (remerging same version)
+F fetch restricted (must be manually downloaded)
+f fetch restricted (already downloaded)
+I interactive (requires user input)
+B blocked by another package (unresolved conflict)
+b blocked by another package (automatically resolved conflict)
+.TE
+.TP
+.BR "\-\-quiet [ y | n ] (\-q short option)"
+Results may vary, but the general outcome is a reduced or condensed
+output from portage's displays.
+.TP
+.BR "\-\-quiet\-build [ y | n ]"
+Redirect all build output to logs alone, and do not display it on
+stdout. If a build failure occurs for a single package, the build
+log will be automatically displayed on stdout (unless the
+\fI\-\-quiet\-fail\fR option is enabled). If there are multiple
+build failures (due to options like \-\-keep\-going or \-\-jobs),
+then the content of the log files will not be displayed, and instead
+the paths of the log files will be displayed together with the
+corresponding die messages.
+Note that interactive packages currently force all build output to
+be displayed on stdout. This issue can be temporarily avoided
+by specifying \fI\-\-accept\-properties=\-interactive\fR.
+.TP
+.BR "\-\-quiet\-fail [ y | n ]"
+Suppresses display of the build log on stdout when build output is hidden
+due to options such as \fI\-\-jobs\fR, \fI\-\-quiet\fR, or
+\fI\-\-quiet\-build\fR. Only the die message and the path of the build log
+will be displayed on stdout.
+.TP
+.BR "\-\-quiet\-repo\-display"
+In the package merge list display, suppress ::repository output, and
+instead use numbers to indicate which repositories package come from.
+.TP
+.BR \-\-quiet\-unmerge\-warn
+Disable the warning message that's shown prior to
+\fB\-\-unmerge\fR actions. This option is intended
+to be set in the \fBmake.conf\fR(5)
+\fBEMERGE_DEFAULT_OPTS\fR variable.
+.TP
+.BR "\-\-read-news [ y | n ]"
+Offer to read news via eselect if there are unread news.
+.TP
+.BR "\-\-rebuild\-if\-new\-slot [ y | n ]"
+Automatically rebuild or reinstall packages when slot/sub\-slot :=
+operator dependencies can be satisfied by a newer slot, so that
+older packages slots will become eligible for removal by the
+\-\-depclean action as soon as possible. This option only
+affects packages that specify slot/sub\-slot := dependencies
+which are supported beginning with \fBEAPI 5\fR.
+Since this option requires
+checking of reverse dependencies, it enables \-\-complete\-graph
+mode whenever a new slot is installed. This option is enabled by
+default.
+
+NOTE: If you want to skip all rebuilds involving slot\-operator
+dependecies (including those that involve sub\-slot changes alone),
+then \fI\-\-ignore\-built\-slot\-operator\-deps=y\fR is the option
+that you are looking for, since \fI\-\-rebuild\-if\-new\-slot\fR
+does not affect rebuilds triggered by sub\-slot changes alone.
+.TP
+.BR "\-\-rebuild\-if\-new\-rev [ y | n ]"
+Rebuild packages when build\-time dependencies are built from source, if the
+dependency is not already installed with the same version and revision.
+.TP
+.BR "\-\-rebuild\-if\-new\-ver [ y | n ]"
+Rebuild packages when build\-time dependencies are built from source, if the
+dependency is not already installed with the same version. Revision numbers
+are ignored.
+.TP
+.BR "\-\-rebuild\-if\-unbuilt [ y | n ]"
+Rebuild packages when build\-time dependencies are built from source.
+.TP
+.BR "\-\-rebuilt\-binaries [ y | n ]"
+Replace installed packages with binary packages that have
+been rebuilt. Rebuilds are detected by comparison of
+BUILD_TIME package metadata. This option is enabled
+automatically when using binary packages
+(\fB\-\-usepkgonly\fR or \fB\-\-getbinpkgonly\fR) together with
+\fB\-\-update\fR and \fB\-\-deep\fR.
+.TP
+.BR "\-\-rebuilt\-binaries\-timestamp=TIMESTAMP"
+This option modifies emerge's behaviour only if
+\fB\-\-rebuilt\-binaries\fR is given. Only binaries that
+have a BUILD_TIME that is larger than the given TIMESTAMP
+and that is larger than that of the installed package will
+be considered by the rebuilt\-binaries logic.
+.TP
+.BR "\-\-reinstall changed\-use"
+This is an alias for \fB\-\-changed\-use\fR.
+.TP
+.BR "\-\-reinstall\-atoms " ATOMS
+A space separated list of package names or slot atoms. Emerge will treat
+matching packages as if they are not installed, and reinstall them if
+necessary.
+.TP
+.BR \-\-root=DIR
+Set the \fBROOT\fR environment variable.
+.TP
+.BR "\-\-root\-deps[=rdeps]"
+If no argument is given then build\-time dependencies of packages for
+\fBROOT\fR are installed to \fBROOT\fR instead of /.
+If the \fBrdeps\fR argument is given then discard all build\-time dependencies
+of packages for \fBROOT\fR.
+This option is only meaningful when used together with \fBROOT\fR and it should
+not be enabled under normal circumstances!
+
+Does not affect EAPIs that support \fBHDEPEND\fR.
+Experimental \fBEAPI 5-hdepend\fR provides \fBHDEPEND\fR as a new
+means to adjust installation into "\fI/\fR" and \fBROOT\fR.
+If ebuilds using EAPIs which \fIdo not\fR support \fBHDEPEND\fR are built in
+the same \fBemerge\fR run as those using EAPIs which \fIdo\fR support
+\fBHDEPEND\fR, this option affects only the former.
+.TP
+.BR "\-\-select [ y | n ] (\-w short option)"
+Add specified packages to the world set (inverse of
+\fB\-\-oneshot\fR). This is useful if you want to
+use \fBEMERGE_DEFAULT_OPTS\fR to make
+\fB\-\-oneshot\fR behavior default.
+.TP
+.BR "\-\-selective [ y | n ]"
+This is identical to the \fB\-\-noreplace\fR option.
+Some options, such as \fB\-\-update\fR, imply \fB\-\-selective\fR.
+Use \fB\-\-selective=n\fR if you want to forcefully disable
+\fB\-\-selective\fR, regardless of options like \fB\-\-changed\-use\fR,
+\fB\-\-newuse\fR, \fB\-\-noreplace\fR, or \fB\-\-update\fR.
+.TP
+.BR "\-\-skipfirst"
+This option is only valid when used with \fB\-\-resume\fR. It removes the
+first package in the resume list. Dependencies are recalculated for
+remaining packages and any that have unsatisfied dependencies or are
+masked will be automatically dropped. Also see the related
+\fB\-\-keep\-going\fR option.
+.TP
+.BR "\-\-tree " (\fB\-t\fR)
+Shows the dependency tree for the given target by indenting dependencies.
+This is only really useful in combination with \fB\-\-emptytree\fR or
+\fB\-\-update\fR and \fB\-\-deep\fR.
+.TP
+.BR "\-\-unordered\-display"
+By default the displayed merge list is sorted using the
+order in which the packages will be merged. When
+\fB\-\-tree\fR is used together with this option, this
+constraint is removed, hopefully leading to a more
+readable dependency tree.
+.TP
+.BR "\-\-update " (\fB\-u\fR)
+Updates packages to the best version available, which may
+not always be the highest version number due to masking
+for testing and development. Package atoms specified on
+the command line are greedy, meaning that unspecific
+atoms may match multiple versions of slotted packages.
+.TP
+.BR "\-\-use\-ebuild\-visibility [ y | n ]"
+Use unbuilt ebuild metadata for visibility
+checks on built packages.
+.TP
+.BR "\-\-useoldpkg\-atoms " ATOMS
+A space separated list of package names or slot atoms. Emerge will prefer
+matching binary packages over newer unbuilt packages.
+.TP
+.BR "\-\-usepkg [ y | n ] (\-k short option)"
+Tells emerge to use binary packages (from $PKGDIR) if they are available, thus
+possibly avoiding some time\-consuming compiles. This option is useful for CD
+installs; you can export PKGDIR=/mnt/cdrom/packages and then use this option to
+have emerge "pull" binary packages from the CD in order to satisfy
+dependencies.
+.TP
+.BR "\-\-usepkgonly [ y | n ] (\-K short option)"
+Tells emerge to only use binary packages (from $PKGDIR). All the binary
+packages must be available at the time of dependency calculation or emerge
+will simply abort. Portage does not use ebuild repositories when calculating
+dependency information so all masking information is ignored.
+.TP
+.BR "\-\-verbose [ y | n ] (\-v short option)"
+Tell emerge to run in verbose mode. Currently this flag causes emerge to print
+out GNU info errors, if any, and to show the USE flags that will be used for
+each package when pretending. The following symbols are affixed to USE flags
+in order to indicate their status:
+
+.TS
+l l l
+___
+l l l.
+Symbol Location Meaning
+
+- prefix not enabled (either disabled or removed)
+* suffix transition to or from the enabled state
+% suffix newly added or removed
+() circumfix forced, masked, or removed
+{} circumfix state is bound to FEATURES settings
+.TE
+.TP
+.BR "\-\-verbose\-conflicts"
+Make slot conflicts more verbose. Note that this may in some cases output
+hundreds of packages for slot conflicts.
+.TP
+.BR "\-\-verbose\-main\-repo\-display"
+In the package merge list display, print ::repository even for main repository.
+.TP
+.BR "\-\-verbose\-slot\-rebuilds [ y | n ]"
+Turns on/off the extra emerge output to list which packages are causing rebuilds.
+The default is set to "y" (on).
+.TP
+.BR "\-\-with\-bdeps < y | n >"
+In dependency calculations, pull in build time dependencies
+that are not strictly required. This defaults to \'n\' for
+installation actions, meaning they will not be installed, and
+\'y\' for the \fB\-\-depclean\fR action, meaning they will not be removed.
+This setting can be added to
+\fBEMERGE_DEFAULT_OPTS\fR (see make.conf(5)) and later overridden via the
+command line.
+.SH "ENVIRONMENT OPTIONS"
+.TP
+\fBEPREFIX\fR = \fI[path]\fR
+Use \fBEPREFIX\fR to specify the target prefix to be used for merging packages
+or ebuilds. This variable can be set via the \fB\-\-prefix\fR
+option or in \fBmake.conf\fR(5) (the command line overrides other settings).
+.br
+Defaults to the prefix where portage is currently installed.
+.TP
+\fBROOT\fR = \fI[path]\fR
+Use \fBROOT\fR to specify the target root filesystem to be used for
+merging packages or ebuilds. This variable can be set via the \fB\-\-root\fR
+option or in \fBmake.conf\fR(5) (the command line overrides other settings).
+.br
+Defaults to /.
+.TP
+\fBPORTAGE_CONFIGROOT\fR = \fI[path]\fR
+Use \fBPORTAGE_CONFIGROOT\fR to specify the location for various portage
+configuration files
+(see \fBFILES\fR for a detailed list of configuration files). This variable
+can be set via the \fB\-\-config\-root\fR option.
+.br
+Defaults to /.
+.SH "OUTPUT"
+When utilizing \fBemerge\fR with the \fB\-\-pretend\fR and \fB\-\-verbose\fR
+flags, the output may be a little hard to understand at first. This section
+explains the abbreviations.
+.TP
+.B [blocks B ] app\-text/dos2unix ("app\-text/dos2unix" is blocking \
+app\-text/hd2u\-0.8.0)
+Dos2unix is Blocking hd2u from being emerged. Blockers are defined when
+two packages will clobber each others files, or otherwise cause some form
+of breakage in your system. However, blockers usually do not need to be
+simultaneously emerged because they usually provide the same functionality.
+.TP
+.B [ebuild N ] app\-games/qstat\-25c
+Qstat is New to your system, and will be emerged for the first time.
+.TP
+.B [ebuild NS ] dev-libs/glib-2.4.7
+You already have a version of glib installed, but a 'new' version in
+a different SLOT is available.
+.TP
+.B [ebuild R ] sys\-apps/sed\-4.0.5
+Sed 4.0.5 has already been emerged, but if you run the command, then
+portage will Re\-emerge the specified package (sed in this case).
+.TP
+.B [ebuild F ] media\-video/realplayer\-8\-r6
+The realplayer package requires that you Fetch the sources manually.
+When you attempt to emerge the package, if the sources are not found,
+then portage will halt and you will be provided with instructions on how
+to download the required files.
+.TP
+.B [ebuild f ] media\-video/realplayer\-8\-r6
+The realplayer package's files are already downloaded.
+.TP
+.B [ebuild U ] net\-fs/samba\-2.2.8_pre1 [2.2.7a]
+Samba 2.2.7a has already been emerged and can be Updated to version
+2.2.8_pre1.
+.TP
+.B [ebuild UD] media\-libs/libgd\-1.8.4 [2.0.11]
+Libgd 2.0.11 is already emerged, but if you run the command, then
+portage will Downgrade to version 1.8.4 for you.
+.br
+This may occur if a newer version of a package has been masked because it is
+broken or it creates a security risk on your system and a fix has not been
+released yet.
+.br
+Another reason this may occur is if a package you are trying to emerge requires
+an older version of a package in order to emerge successfully. In this case,
+libgd 2.x is incompatible with libgd 1.x. This means that packages that were
+created with libgd 1.x will not compile with 2.x and must downgrade libgd first
+before they can emerge.
+.TP
+.B [ebuild U ] sys\-devel/distcc\-2.16 [2.13\-r1] USE="ipv6* \-gtk \-qt%"
+Here we see that the make.conf variable \fBUSE\fR affects how this package is
+built. In this example, ipv6 optional support is enabled and both gtk and qt
+support are disabled. The asterisk following ipv6 indicates that ipv6 support
+was disabled the last time this package was installed. The percent sign
+following qt indicates that the qt option has been added to the package since
+it was last installed. For information about all \fBUSE\fR symbols, see the
+\fB\-\-verbose\fR option documentation above.
+.br
+\fB*Note:\fR Flags that haven't changed since the last install are only
+displayed when you use the \fB\-\-pretend\fR and \fB\-\-verbose\fR options.
+Using the \fB\-\-quiet\fR option will prevent all information from being
+displayed.
+.TP
+.B [ebuild r U ] dev\-libs/icu\-50.1.1:0/50.1.1 [50.1\-r2:0/50.1]
+Icu 50.1\-r2 has already been emerged and can be Updated to version
+50.1.1. The \fBr\fR symbol indicates that a sub\-slot change (from 50.1
+to 50.1.1 in this case) will force packages having slot\-operator
+dependencies on it to be rebuilt (as libxml2 will be rebuilt in the next
+example).
+.TP
+.B [ebuild rR ] dev\-libs/libxml2\-2.9.0\-r1:2 USE="icu"
+Libxml2 2.9.0\-r1 has already been emerged, but if you run the command,
+then portage will Re\-emerge it in order to satisfy a slot\-operator
+dependency which forces it to be rebuilt when the icu sub\-slot changes
+(as it changed in the previous example).
+.TP
+.B [ebuild U *] sys\-apps/portage\-2.2.0_alpha6 [2.1.9.25]
+Portage 2.1.9.25 is installed, but if you run the command, then
+portage will upgrade to version 2.2.0_alpha6. In this case,
+the \fB*\fR symbol is displayed, in order to indicate that version
+2.2.0_alpha6 is masked by missing keyword. This type of masking
+display is disabled by the \fB\-\-quiet\fR option if the
+\fB\-\-verbose\fR option is not enabled simultaneously.
+The following symbols are used to indicate various types
+of masking:
+.TS
+l l
+__
+c l.
+Symbol Mask Type
+
+# package.mask
+* missing keyword
+~ unstable keyword
+.TE
+
+\fBNOTE:\fR The unstable keyword symbol (~) will not be shown in cases
+in which the corresponding unstable keywords have been accepted
+globally via \fBACCEPT_KEYWORDS\fR.
+.TP
+
+
+.SH "NOTES"
+You should almost always precede any package install or update attempt with a
+\fB\-\-pretend\fR install or update. This lets you see how much will be
+done, and shows you any blocking packages that you will have to rectify.
+This goes doubly so for the \fBsystem\fR and \fBworld\fR sets, which can
+update a large number of packages if the portage tree has been particularly
+active.
+.LP
+You also want to typically use \fB\-\-update\fR, which ignores packages that
+are already fully updated but updates those that are not.
+.LP
+When you install a package with uninstalled dependencies and do
+not explicitly state those dependencies in the list of parameters,
+they will not be added to the world file. If you want them to be
+detected for world updates, make sure to explicitly list them as
+parameters to \fBemerge\fR.
+.LP
+\fBUSE variables\fR may be specified on the command line to
+override those specified in the default locations, letting you
+avoid using some dependencies you may not want to have. \fBUSE
+flags specified on the command line are NOT remembered\fR. For
+example, \fBenv USE="\-X \-gnome" emerge mc\fR will emerge mc with
+those USE settings (on Bourne-compatible shells you may omit the \fBenv\fR
+part). If you want those USE settings to be more
+permanent, you can put them in /etc/portage/package.use instead.
+.LP
+If \fBemerge \-\-update @system\fR or \fBemerge \-\-update @world\fR
+fails with an error message, it may be that an ebuild uses some
+newer feature not present in this version of \fBemerge\fR. You
+can use \fBemerge \-\-update portage\fR to upgrade to the lastest
+version, which should support any necessary new features.
+.SH "MASKED PACKAGES"
+\fINOTE: Please use caution when using development packages. Problems
+and bugs resulting from misusing masked packages drains Gentoo
+developer time. Please be sure you are capable of handling any problems
+that may ensue.\fR
+.LP
+Masks in \fBportage\fR have many uses: they allow a
+testing period where the packages can be used in live machines; they
+prevent the use of a package when it will fail; and they mask existing
+packages that are broken or could pose a security risk. Read below
+to find out how to unmask in various cases. Also note that if you give
+\fBemerge\fR an ebuild, then all forms of masking will be ignored and
+\fBemerge\fR will attempt to emerge the package.
+.TP
+.BR backtracking
+When packages are masked for \fBbacktracking\fR, it means that the dependency
+resolver has temporarily masked them in order to avoid dependency conflicts
+and/or unsatisfied dependencies. This type of mask is typically accompanied
+by a message about a missed package update which has been skipped in order to
+avoid dependency conflicts and/or unsatisfied dependencies.
+.TP
+.BR package.mask
+The \fBpackage.mask\fR file primarily blocks the use of packages that cause
+problems or are known to have issues on different systems. It resides in
+\fI/usr/portage/profiles\fR.
+.TP
+.BR CHOST
+Use the \fBACCEPT_CHOSTS\fR variable in \fBmake.conf\fR(5) to control
+\fBCHOST\fR acceptance.
+.TP
+.BR EAPI
+The \fBEAPI\fR variable in an \fBebuild\fR(5) file is used to mask packages
+that are not supported by the current version of portage. Packages masked by
+\fBEAPI\fR can only be installed after portage has been upgraded.
+.TP
+.BR KEYWORDS
+The \fBKEYWORDS\fR variable in an \fBebuild\fR file is also used for masking
+a package still in testing. There are architecture\-specific keywords for
+each package that let \fBportage\fR know which systems are compatible with
+the package. Packages which compile on an architecture, but have not been
+proven to be "stable", are masked with a tilde (\fB~\fR) in front of the
+architecture name. \fBemerge\fR examines the \fBACCEPT_KEYWORDS\fR environment
+variable to allow or disallow the emerging of a package masked by
+\fBKEYWORDS\fR. To inform \fBemerge\fR that it should build these 'testing'
+versions of packages, you should update your
+\fI/etc/portage/package.accept_keywords\fR
+file to list the packages you want the
+\'testing\' version. See \fBportage\fR(5) for more information.
+.TP
+.BR LICENSE
+The \fBLICENSE\fR variable in an \fBebuild\fR file can be used to mask
+packages based on licensing restrictions. \fBemerge\fR examines the
+\fBACCEPT_LICENSE\fR environment variable to allow or disallow the emerging
+of a package masked by \fBLICENSE\fR. See \fBmake.conf\fR(5) for information
+about \fBACCEPT_LICENSE\fR, and see \fBportage\fR(5) for information about
+\fI/etc/portage/package.license\fR.
+.TP
+.BR PROPERTIES
+The \fBPROPERTIES\fR variable in an \fBebuild\fR file can be used to mask
+packages based on properties restrictions. \fBemerge\fR examines the
+\fBACCEPT_PROPERTIES\fR environment variable to allow or disallow the emerging
+of a package masked by \fBPROPERTIES\fR. See \fBmake.conf\fR(5) for information
+about \fBACCEPT_PROPERTIES\fR, and see \fBportage\fR(5) for information about
+\fI/etc/portage/package.properties\fR. Use the \fB\-\-accept\-properties\fR
+option to temporarily override \fBACCEPT_PROPERTIES\fR.
+.TP
+.BR RESTRICT
+The \fBRESTRICT\fR variable in an \fBebuild\fR file can be used to mask
+packages based on RESTRICT tokens. \fBemerge\fR examines the
+\fBACCEPT_RESTRICT\fR environment variable to allow or disallow the emerging
+of a package masked by \fBRESTRICT\fR. See \fBmake.conf\fR(5) for information
+about \fBACCEPT_RESTRICT\fR, and see \fBportage\fR(5) for information about
+\fI/etc/portage/package.accept_restrict\fR. Use the \fB\-\-accept\-restrict\fR
+option to temporarily override \fBACCEPT_RESTRICT\fR.
+.SH "CONFIGURATION FILES"
+Portage has a special feature called "config file protection". The purpose of
+this feature is to prevent new package installs from clobbering existing
+configuration files. By default, config file protection is turned on for /etc
+and the KDE configuration dirs; more may be added in the future.
+.LP
+When Portage installs a file into a protected directory tree like /etc, any
+existing files will not be overwritten. If a file of the same name already
+exists, Portage will change the name of the to\-be\-installed file from 'foo'
+to \'._cfg0000_foo\'. If \'._cfg0000_foo\' already exists, this name becomes
+\'._cfg0001_foo\', etc. In this way, existing files are not overwritten,
+allowing the administrator to manually merge the new config files and avoid any
+unexpected changes.
+.LP
+In addition to protecting overwritten files, Portage will not delete any files
+from a protected directory when a package is unmerged. While this may be a
+little bit untidy, it does prevent potentially valuable config files from being
+deleted, which is of paramount importance.
+.LP
+Protected directories are set using the \fICONFIG_PROTECT\fR variable, normally
+defined in make.globals. Directory exceptions to the CONFIG_PROTECTed
+directories can be specified using the \fICONFIG_PROTECT_MASK\fR variable.
+To find files that need to be updated in /etc, type \fBfind /etc \-name
+\[aq]._cfg????_*\[aq]\fR.
+.LP
+You can disable this feature by setting \fICONFIG_PROTECT="\-*"\fR in
+\fBmake.conf\fR(5).
+Then, Portage will mercilessly auto\-update your config files. Alternatively,
+you can leave Config File Protection on but tell Portage that it can overwrite
+files in certain specific /etc subdirectories. For example, if you wanted
+Portage to automatically update your rc scripts and your wget configuration,
+but didn't want any other changes made without your explicit approval, you'd
+add this to \fBmake.conf\fR(5):
+.LP
+.I CONFIG_PROTECT_MASK="/etc/wget /etc/rc.d"
+.LP
+Tools such as dispatch\-conf, cfg\-update, and etc\-update are also available
+to aid in the merging of these files. They provide interactive merging and can
+auto\-merge trivial changes.
+.LP
+When an offset prefix (\fBEPREFIX\fR) is active, all paths in
+\fBCONFIG_PROTECT\fR and \fBCONFIG_PROTECT_MASK\fR are prefixed with the
+offset by Portage before they are considered. Hence, these paths never
+contain the offset prefix, and the variables can be defined in
+offset-unaware locations, such as the profiles.
+.SH "REPORTING BUGS"
+Please report any bugs you encounter through our website:
+.LP
+\fBhttp://bugs.gentoo.org/\fR
+.LP
+Please include the output of \fBemerge \-\-info\fR when you submit your
+bug report.
+.SH "AUTHORS"
+.nf
+Daniel Robbins <drobbins@gentoo.org>
+Geert Bevin <gbevin@gentoo.org>
+Achim Gottinger <achim@gentoo.org>
+Nicholas Jones <carpaski@gentoo.org>
+Phil Bordelon <phil@thenexusproject.org>
+Mike Frysinger <vapier@gentoo.org>
+Marius Mauch <genone@gentoo.org>
+Jason Stubbs <jstubbs@gentoo.org>
+Brian Harring <ferringb@gmail.com>
+Zac Medico <zmedico@gentoo.org>
+Fabian Groffen <grobian@gentoo.org>
+Arfrever Frehtes Taifersar Arahesis <arfrever@apache.org>
+.fi
+.SH "FILES"
+Here is a common list of files you will probably be interested in. For a
+complete listing, please refer to the \fBportage\fR(5) man page.
+.TP
+.B /usr/share/portage/config/sets/
+Contains the default set configuration.
+.TP
+.B /var/lib/portage/world
+Contains a list of all user\-specified packages. You can safely edit
+this file, adding packages that you want to be considered in \fBworld\fR
+set updates and removing those that you do not want to be considered.
+.TP
+.B /var/lib/portage/world_sets
+This is like the world file but instead of package atoms it contains
+packages sets which always begin with the \fB@\fR character. Use
+\fB/etc/portage/sets/\fR to define user package sets.
+.TP
+.B /etc/portage/make.conf
+Contains variables for the build process, overriding those in
+\fBmake.globals\fR.
+.TP
+.B /etc/portage/color.map
+Contains variables customizing colors.
+.TP
+.B /etc/portage/sets/
+Contains user package set definitions (see \fBportage\fR(5)).
+.TP
+.B /etc/dispatch\-conf.conf
+Contains settings to handle automatic updates/backups of configuration
+files.
+.TP
+.B /etc/portage/make.profile/make.defaults
+Contains profile\-specific variables for the build process. \fBDo not
+edit this file\fR.
+.TP
+.B /usr/portage/profiles/use.desc
+Contains the master list of USE flags with descriptions of their
+functions. \fBDo not edit this file\fR.
+.TP
+.B /etc/portage/make.profile/virtuals
+Contains a list of default packages used to resolve virtual dependencies.
+\fBDo not edit this file\fR.
+.TP
+.B /etc/portage/make.profile/packages
+Contains a list of packages used for the base system. The \fBsystem\fR
+and \fBworld\fR sets consult this file. \fBDo not edit this file\fR.
+.TP
+.B /usr/share/portage/config/make.globals
+Contains the default variables for the build process. \fBDo not edit
+this file\fR.
+.TP
+.B /var/log/emerge.log
+Contains a log of all emerge output. This file is always appended to, so if you
+want to clean it, you need to do so manually.
+.TP
+.B /var/log/emerge-fetch.log
+Contains a log of all the fetches in the previous emerge invocation.
+.TP
+.B
+/var/log/portage/elog/summary.log
+Contains the emerge summaries. Installs \fI/etc/logrotate/elog-save-summary\fR.
+.SH "SEE ALSO"
+.BR "emerge \-\-help",
+.BR quickpkg (1),
+.BR ebuild (1),
+.BR ebuild (5),
+.BR make.conf (5),
+.BR color.map (5),
+.BR portage (5)
+.LP
+A number of helper applications reside in \fI/usr/lib/portage/bin\fR.
+.LP
+The \fBapp\-portage/gentoolkit\fR package contains useful scripts such as
+\fBequery\fR (a package query tool).
diff --git a/usr/share/man/man1/env-update.1 b/usr/share/man/man1/env-update.1
new file mode 100644
index 0000000..70c2548
--- /dev/null
+++ b/usr/share/man/man1/env-update.1
@@ -0,0 +1,28 @@
+.TH "ENV-UPDATE" "1" "Aug 2008" "Portage 2.2.14-prefix" "Portage"
+.SH "NAME"
+env\-update \- updates environment settings automatically
+.SH "SYNOPSIS"
+\fBenv\-update\fR \fI[options]\fR
+.SH "DESCRIPTION"
+.B env\-update
+reads the files in \fI/etc/env.d\fR and automatically generates
+\fI/etc/profile.env\fR and \fI/etc/ld.so.conf\fR. Then \fBldconfig\fR(8)
+is run to update \fI/etc/ld.so.cache\fR. \fBenv-update\fR is run by
+\fBemerge\fR(1) automatically after each package merge. Also, if you
+make changes to \fI/etc/env.d\fR, you should run \fBenv-update\fR
+yourself for changes to take effect immediately. Note that this would
+only affect new processes. In order for the changes to affect your
+active shell, you will probably have to run \fIsource /etc/profile\fR
+first.
+.SH "OPTIONS"
+.TP
+.B \-\-no\-ldconfig
+Do not run \fBldconfig\fR (and thus skip rebuilding the \fIld.so.cache\fR,
+etc...).
+.SH "REPORTING BUGS"
+Please report bugs via http://bugs.gentoo.org/
+.SH "AUTHORS"
+Daniel Robbins <drobbins@gentoo.org>
+.SH "SEE ALSO"
+.BR emerge (1),
+.BR ldconfig (8)
diff --git a/usr/share/man/man1/etc-update.1 b/usr/share/man/man1/etc-update.1
new file mode 100644
index 0000000..b6c4f64
--- /dev/null
+++ b/usr/share/man/man1/etc-update.1
@@ -0,0 +1,52 @@
+.TH "ETC-UPDATE" "1" "Mar 2012" "Portage 2.2.14-prefix" "Portage"
+.SH "NAME"
+etc\-update \- handle configuration file updates
+.SH "SYNOPSIS"
+.BR etc\-update
+[\fIoptions\fR] [\fI--automode <mode>\fR] [\fIpaths to scan\fR]
+.SH "DESCRIPTION"
+\fIetc\-update\fR is supposed to be run after merging a new package to see if
+there are updates to the configuration files. If a new
+configuration file will override an old one,
+\fIetc\-update\fR will prompt the user for a decision.
+.PP
+\fIetc\-update\fR will check all directories specified on the command
+line. If no paths are given, then the \fICONFIG_PROTECT\fR variable
+will be used. All config files found in \fICONFIG_PROTECT_MASK\fR will
+automatically be updated for you by \fIetc\-update\fR.
+See \fBmake.conf\fR(5) for more information.
+.PP
+\fIetc\-update\fR respects the normal \fIPORTAGE_CONFIGROOT\fR and
+\fIEROOT\fR variables for finding the aforementioned config protect variables.
+.SH "OPTIONS"
+.TP
+.BR \-d ", " \-\-debug
+Run with shell tracing enabled.
+.TP
+.BR \-h ", " \-\-help
+Surprisingly, show the help output.
+.TP
+.BR \-p ", " \-\-preen
+Automerge trivial changes only and quit.
+.TP
+.BR \-v ", " \-\-verbose
+Show settings and important decision info while running.
+.TP
+.BR "\-\-automode <mode>"
+Select one of the automatic merge modes. Valid modes are: \-3 \-5 \-7 \-9.
+See the \fI\-\-help\fR text for more details.
+.SH "REPORTING BUGS"
+Please report bugs via http://bugs.gentoo.org/
+.SH "AUTHORS"
+.nf
+Jochem Kossen and Leo Lipelis
+Karl Trygve Kalleberg <karltk@gentoo.org>
+Mike Frysinger <vapier@gentoo.org>
+.fi
+.SH "FILES"
+.TP
+.B /etc/etc\-update.conf
+Configuration settings for \fIetc\-update\fR are stored here.
+.SH "SEE ALSO"
+.BR dispatch\-conf (1),
+.BR make.conf (5)
diff --git a/usr/share/man/man1/quickpkg.1 b/usr/share/man/man1/quickpkg.1
new file mode 100644
index 0000000..5996250
--- /dev/null
+++ b/usr/share/man/man1/quickpkg.1
@@ -0,0 +1,73 @@
+.TH "QUICKPKG" "1" "Dec 2012" "Portage 2.2.14-prefix" "Portage"
+.SH NAME
+quickpkg \- creates portage packages
+.SH SYNOPSIS
+.B quickpkg [options] <list of packages or package\-sets>
+.SH DESCRIPTION
+.I quickpkg
+can be utilized to quickly create a package for portage by
+utilizing the files already on your filesystem. This package
+then can be emerged on any system. To review syntax for
+emerging binary packages, review \fBemerge\fR(1). The upside
+of this process is that you don't have to wait for the package
+to unpack, configure, compile, and install before you can have
+the package ready to go. The downside is that the package will
+contain the files that exist on your filesystem even if they have
+modified since they were first installed.
+.br
+The packages, after being created, will be placed in \fBPKGDIR\fR.
+This variable is defined in \fBmake.conf\fR(5) and defaults to
+/usr/portage/packages.
+.SH OPTIONS
+.TP
+.B <list of packages or package\-sets>
+Each package in the list can be of two forms. First you can
+give it the full path to the installed entry in the virtual
+database. That is, /var/db/pkg/<CATEGORY>/<PKG-VERSION>/.
+The second form is a portage depend atom or a portage package
+set. The atom or set is of the same form that you would give
+\fBemerge\fR if you wanted to emerge something.
+See \fBebuild\fR(5) for full definition.
+.TP
+.BR "\-\-ignore\-default\-opts"
+Causes the \fIQUICKPKG_DEFAULT_OPTS\fR environment variable to be ignored.
+.TP
+.BR "\-\-include\-config < y | n >"
+Include all files protected by CONFIG_PROTECT (as a security precaution,
+default is 'n').
+.TP
+.BR "\-\-include\-unmodified\-config < y | n >"
+Include files protected by CONFIG_PROTECT that have not been modified
+since installation (as a security precaution, default is 'n').
+.TP
+.BR \-\-umask=UMASK
+The umask used during package creation (default is 0077).
+.SH "EXAMPLES"
+.B quickpkg
+/var/db/pkg/dev-python/pyogg-1.1
+.br
+.B quickpkg
+planeshift
+.br
+.B quickpkg
+=apache-1.3.27-r1
+.br
+.B quickpkg
+=net-www/apache-2*
+.br
+.B quickpkg
+@system
+.SH "REPORTING BUGS"
+Please report bugs via http://bugs.gentoo.org/
+.SH AUTHORS
+.nf
+Terry Chan (original author)
+Mike Frysinger <vapier@gentoo.org> (revamped version)
+.fi
+.SH "FILES"
+.TP
+.B /etc/portage/make.conf
+The \fBPKGDIR\fR variable is defined here.
+.SH "SEE ALSO"
+.BR ebuild (5),
+.BR make.conf (5)
diff --git a/usr/share/man/man1/repoman.1 b/usr/share/man/man1/repoman.1
new file mode 100644
index 0000000..4d7e9ff
--- /dev/null
+++ b/usr/share/man/man1/repoman.1
@@ -0,0 +1,403 @@
+.TH "REPOMAN" "1" "Aug 2013" "Portage 2.2.14-prefix" "Portage"
+.SH NAME
+repoman \- Gentoo's program to enforce a minimal level of quality assurance in
+packages added to the portage tree
+.SH SYNOPSIS
+\fBrepoman\fR [\fIoption\fR] [\fImode\fR]
+.SH DESCRIPTION
+.BR "Quality is job zero."
+
+.BR repoman
+checks the quality of ebuild repositories.
+
+Note: \fBrepoman commit\fR only works \fIinside local\fR cvs, git, or
+subversion repositories.
+
+Note: Messages pertaining to specific lines may be inaccurate in the
+prescence of continuation lines from use of the \fI\\\fR character in
+BASH.
+.SH OPTIONS
+.TP
+\fB-a\fR, \fB--ask\fR
+Request a confirmation before commiting
+.TP
+\fB\-\-digest=<y|n>\fR
+Automatically update Manifest digests for modified files. This
+option triggers a behavior that is very similar to that enabled
+by FEATURES="digest" in \fBmake.conf\fR(5). In order to enable
+this behavior by default for repoman alone, add
+\fB\-\-digest=y\fR to the \fIREPOMAN_DEFAULT_OPTS\fR variable in
+\fBmake.conf\fR(5). The \fBmanifest\-check\fR mode will
+automatically ignore the \-\-digest option.
+
+\fBNOTE:\fR
+This option does not trigger update of digests for Manifest DIST
+entries that already exist. Replacement of existing Manifest
+DIST entries can be forced by using the \fBmanifest\fR mode
+together with the \fB\-\-force\fR option.
+.TP
+\fB--force\fR
+Force commit to proceed, regardless of QA issues. For convenience, this option
+causes the most time consuming QA checks to be skipped. The commit message will
+include an indication that this option has been enabled, together with the
+usual portage version stamp.
+
+When used together with \fBmanifest\fR mode, \fB--force\fR causes existing
+digests to be replaced for any files that exist in ${DISTDIR}.
+Existing digests are assumed to be correct for files that would otherwise
+have to be downloaded in order to recompute digests. \fBWARNING:\fR When
+replacing existing digests, it is the user's responsibility to ensure that
+files contained in ${DISTDIR} have the correct identities. Especially beware
+of partially downloaded files.
+.TP
+\fB-q\fR, \fB--quiet\fR
+Be less verbose about extraneous info
+.TP
+\fB-p\fR, \fB--pretend\fR
+Don't commit or fix anything; just show what would be done
+.TP
+\fB-x\fR, \fB--xmlparse\fR
+Forces the metadata.xml parse check to be carried out
+.TP
+\fB-v\fR, \fB--verbose\fR
+Displays every package name while checking
+.TP
+\fB\-\-echangelog=<y|n|force>\fR
+For commit mode, call echangelog if ChangeLog is unmodified (or
+regardless of modification if 'force' is specified). This option
+can be enabled by default for a particular repository by setting
+"update\-changelog = true" in metadata/layout.conf (see
+\fBportage(5)\fR).
+.TP
+\fB\-\-experimental\-inherit=<y|n>\fR
+Enable experimental inherit.missing checks which may misbehave when the
+internal eclass database becomes outdated.
+.TP
+\fB\-\-if\-modified=<y|n>\fR
+Only check packages that have uncommitted modifications
+.TP
+\fB\-i\fR, \fB\-\-ignore\-arches\fR
+Ignore arch-specific failures (where arch != host)
+.TP
+\fB\-\-ignore\-default\-opts\fR
+Do not use the \fIREPOMAN_DEFAULT_OPTS\fR environment variable.
+.TP
+\fB\-I\fR, \fB\-\-ignore\-masked\fR
+Ignore masked packages (not allowed with commit mode)
+.TP
+.BR "\-\-include\-arches " ARCHES
+A space separated list of arches used to filter the selection of
+profiles for dependency checks.
+.TP
+\fB\-d\fR, \fB\-\-include\-dev\fR
+Include dev profiles in dependency checks.
+.TP
+\fB\-e <y|n>\fR, \fB\-\-include\-exp\-profiles=<y|n>\fR
+Include exp profiles in dependency checks.
+.TP
+\fB\-\-unmatched\-removal\fR
+Enable strict checking of package.mask and package.unmask files for
+unmatched removal atoms.
+.TP
+\fB\-\-without\-mask\fR
+Behave as if no package.mask entries exist (not allowed with commit mode)
+.TP
+\fB-m\fR, \fB--commitmsg\fR
+Adds a commit message via the command line
+.TP
+\fB-M\fR, \fB--commitmsgfile\fR
+Adds a commit message from the specified file
+.TP
+\fB-V\fR, \fB--version\fR
+Show version info
+.TP
+\fB-h\fR, \fB--help\fR
+Show this screen
+.SH MODES
+.TP
+.B full
+Scan directory tree for QA issues (full listing)
+.TP
+.B help
+Show this screen
+.TP
+.B scan
+Scan directory tree for QA issues (short listing)
+.TP
+.B fix
+Fix simple QA issues (stray digests, missing digests)
+.TP
+.B manifest
+Generate a Manifest (fetches distfiles if necessary). See the \fB\-\-force\fR
+option if you would like to replace existing distfiles digests.
+.TP
+.B manifest-check
+Check Manifests for missing or incorrect digests
+.TP
+.B commit
+Scan directory tree for QA issues; if OK, commit via cvs
+.SH QA KEYWORDS
+.TP
+.B CVS/Entries.IO_error
+Attempting to commit, and an IO error was encountered access the Entries file
+.TP
+.B DESCRIPTION.missing
+Ebuilds that have a missing or empty DESCRIPTION variable
+.TP
+.B EAPI.definition
+EAPI definition does not conform to PMS section 7.3.1 (first
+non\-comment, non\-blank line). See bug #402167.
+.TP
+.B EAPI.deprecated
+Ebuilds that use features that are deprecated in the current EAPI
+.TP
+.B EAPI.incompatible
+Ebuilds that use features that are only available with a different EAPI
+.TP
+.B EAPI.unsupported
+Ebuilds that have an unsupported EAPI version (you must upgrade portage)
+.TP
+.B HOMEPAGE.missing
+Ebuilds that have a missing or empty HOMEPAGE variable
+.TP
+.B HOMEPAGE.virtual
+Virtuals that have a non-empty HOMEPAGE variable
+.TP
+.B IUSE.invalid
+This ebuild has a variable in IUSE that is not in the use.desc or its
+metadata.xml file
+.TP
+.B IUSE.missing
+This ebuild has a USE conditional which references a flag that is not listed in
+IUSE
+.TP
+.B KEYWORDS.dropped
+Ebuilds that appear to have dropped KEYWORDS for some arch
+.TP
+.B KEYWORDS.invalid
+This ebuild contains KEYWORDS that are not listed in profiles/arch.list or for
+which no valid profile was found
+.TP
+.B KEYWORDS.missing
+Ebuilds that have a missing or empty KEYWORDS variable
+.TP
+.B KEYWORDS.stable
+Ebuilds that have been added directly with stable KEYWORDS
+.TP
+.B KEYWORDS.stupid
+Ebuilds that use KEYWORDS=-* instead of package.mask
+.TP
+.B LICENSE.deprecated
+This ebuild is listing a deprecated license.
+.TP
+.B LICENSE.invalid
+This ebuild is listing a license that doesnt exist in portages license/ dir.
+.TP
+.B LICENSE.missing
+Ebuilds that have a missing or empty LICENSE variable
+.TP
+.B LICENSE.syntax
+Syntax error in LICENSE (usually an extra/missing space/parenthesis)
+.TP
+.B LICENSE.virtual
+Virtuals that have a non-empty LICENSE variable
+.TP
+.B LIVEVCS.stable
+Ebuild is a live ebuild (cvs, git, darcs, svn, etc) checkout with stable
+keywords.
+.TP
+.B LIVEVCS.unmasked
+Ebuild is a live ebuild (cvs, git, darcs, svn, etc) checkout but has keywords
+and is not masked in the global package.mask.
+.TP
+.B PDEPEND.suspect
+PDEPEND contains a package that usually only belongs in DEPEND
+.TP
+.B PROVIDE.syntax
+Syntax error in PROVIDE (usually an extra/missing space/parenthesis)
+.TP
+.B RDEPEND.implicit
+RDEPEND is unset in the ebuild which triggers implicit RDEPEND=$DEPEND
+assignment (prior to EAPI 4)
+.TP
+.B RDEPEND.suspect
+RDEPEND contains a package that usually only belongs in DEPEND
+.TP
+.B PROPERTIES.syntax
+Syntax error in PROPERTIES (usually an extra/missing space/parenthesis)
+.TP
+.B RESTRICT.syntax
+Syntax error in RESTRICT (usually an extra/missing space/parenthesis)
+.B SLOT.invalid
+Ebuilds that have a missing or invalid SLOT variable value
+.TP
+.B SRC_URI.mirror
+A uri listed in profiles/thirdpartymirrors is found in SRC_URI
+.TP
+.B changelog.ebuildadded
+An ebuild was added but the ChangeLog was not modified
+.TP
+.B changelog.missing
+Missing ChangeLog files
+.TP
+.B changelog.notadded
+ChangeLogs that exist but have not been added to cvs
+.TP
+.B dependency.bad
+User-visible ebuilds with unsatisfied dependencies (matched against *visible*
+ebuilds)
+.TP
+.B dependency.badindev
+User-visible ebuilds with unsatisfied dependencies (matched against *visible*
+ebuilds) in developing arch
+.TP
+.B dependency.badmasked
+Masked ebuilds with unsatisfied dependencies (matched against *all* ebuilds)
+.TP
+.B dependency.badmaskedindev
+Masked ebuilds with unsatisfied dependencies (matched against *all* ebuilds) in
+developing arch
+.TP
+.B dependency.badtilde
+Uses the ~ dep operator with a non-zero revision part, which is useless (the
+revision is ignored)
+.TP
+.B dependency.syntax
+Syntax error in dependency string (usually an extra/missing space/parenthesis)
+.TP
+.B dependency.unknown
+Ebuild has a dependency that refers to an unknown package (which may be
+valid if it is a blocker for a renamed/removed package, or is an
+alternative choice provided by an overlay)
+.TP
+.B digest.assumed
+Existing digest must be assumed correct (Package level only)
+.TP
+.B digest.missing
+Some files listed in SRC_URI aren't referenced in the Manifest
+.TP
+.B digest.unused
+Some files listed in the Manifest aren't referenced in SRC_URI
+.TP
+.B ebuild.badheader
+This ebuild has a malformed header
+.TP
+.B ebuild.invalidname
+Ebuild files with a non-parseable or syntactically incorrect name (or using 2.1
+versioning extensions)
+.TP
+.B ebuild.majorsyn
+This ebuild has a major syntax error that may cause the ebuild to fail
+partially or fully
+.TP
+.B ebuild.minorsyn
+This ebuild has a minor syntax error that contravenes gentoo coding style
+.TP
+.B ebuild.namenomatch
+Ebuild files that do not have the same name as their parent directory
+.TP
+.B ebuild.nesteddie
+Placing 'die' inside ( ) prints an error, but doesn't stop the ebuild.
+.TP
+.B ebuild.notadded
+Ebuilds that exist but have not been added to cvs
+.TP
+.B ebuild.output
+A simple sourcing of the ebuild produces output; this breaks ebuild policy.
+.TP
+.B ebuild.patches
+PATCHES variable should be a bash array to ensure white space safety
+.TP
+.B ebuild.syntax
+Error generating cache entry for ebuild; typically caused by ebuild syntax
+error or digest verification failure.
+.TP
+.B file.UTF8
+File is not UTF8 compliant
+.TP
+.B file.executable
+Ebuilds, digests, metadata.xml, Manifest, and ChangeLog do not need the
+executable bit
+.TP
+.B file.name
+File/dir name must be composed of only the following chars: a-zA-Z0-9._-+:
+.TP
+.B file.size
+Files in the files directory must be under 20k
+.TP
+.B inherit.missing
+Ebuild uses functions from an eclass but does not inherit it
+.TP
+.B inherit.unused
+Ebuild inherits an eclass but does not use it
+.TP
+.B inherit.deprecated
+Ebuild inherits a deprecated eclass
+.TP
+.B java.eclassesnotused
+With virtual/jdk in DEPEND you must inherit a java eclass. Refer to
+\fIhttp://www.gentoo.org/proj/en/java/java\-devel.xml\fR for more information.
+.TP
+.B manifest.bad
+Manifest has missing or incorrect digests
+.TP
+.B metadata.bad
+Bad metadata.xml files
+.TP
+.B metadata.missing
+Missing metadata.xml files
+.TP
+.B metadata.warning
+Warnings in metadata.xml files
+.TP
+.B repo.eapi.banned
+The ebuild uses an EAPI which is banned by the repository's
+metadata/layout.conf settings.
+.TP
+.B repo.eapi.deprecated
+The ebuild uses an EAPI which is deprecated by the repository's
+metadata/layout.conf settings.
+.TP
+.B IUSE.rubydeprecated
+The ebuild has set a ruby interpreter in USE_RUBY, that is not available as a ruby target anymore
+.TP
+.B portage.internal
+The ebuild uses an internal Portage function or variable
+.TP
+.B upstream.workaround
+The ebuild works around an upstream bug, an upstream bug should be filed and
+tracked in bugs.gentoo.org
+.TP
+.B usage.obsolete
+The ebuild makes use of an obsolete construct
+.TP
+.B variable.invalidchar
+A variable contains an invalid character that is not part of the ASCII
+character set.
+.TP
+.B variable.readonly
+Assigning a readonly variable
+.TP
+.B variable.usedwithhelpers
+Ebuild uses D, ROOT, ED, EROOT or EPREFIX with helpers
+.TP
+.B virtual.oldstyle
+The ebuild PROVIDEs an old-style virtual (see GLEP 37). This is an error
+unless "allow\-provide\-virtuals = true" is set in metadata/layout.conf.
+.TP
+.B virtual.suspect
+Ebuild contains a package that usually should be pulled via virtual/,
+not directly.
+.TP
+.B wxwidgets.eclassnotused
+Ebuild DEPENDs on x11-libs/wxGTK without inheriting wxwidgets.eclass. Refer to
+bug #305469 for more information.
+.SH "REPORTING BUGS"
+Please report bugs via http://bugs.gentoo.org/
+.SH AUTHORS
+.nf
+Daniel Robbins <drobbins@gentoo.org>
+Saleem Abdulrasool <compnerd@gentoo.org>
+.fi
+.SH "SEE ALSO"
+.BR emerge (1)
diff --git a/usr/share/man/man5/color.map.5 b/usr/share/man/man5/color.map.5
new file mode 100644
index 0000000..670cb09
--- /dev/null
+++ b/usr/share/man/man5/color.map.5
@@ -0,0 +1,209 @@
+.TH "COLOR.MAP" "5" "Jul 2013" "Portage 2.2.14-prefix" "Portage"
+.SH "NAME"
+color.map \- custom color settings for Portage
+.SH "SYNOPSIS"
+.B /etc/portage/color.map
+.SH "DESCRIPTION"
+This file contains variables that define color classes used by Portage.
+Portage will check this file first for color classes settings. If no setting
+of given color class is found in /etc/portage/color.map, Portage uses default
+value defined internally.
+.SH "SYNTAX"
+\fBVARIABLE\fR = \fI[space delimited list of attributes or ansi code
+pattern]\fR
+.TP
+\fBATTRIBUTE\fR = \fI[space delimited list of attributes or ansi code \
+pattern]\fR
+.SH "VARIABLES"
+.TP
+\fBNORMAL\fR = \fI"normal"\fR
+Defines color used for some words occuring in other contexts than those below.
+.TP
+\fBBAD\fR = \fI"red"\fR
+Defines color used for some words occuring in bad context.
+.TP
+\fBBRACKET\fR = \fI"blue"\fR
+Defines color used for brackets.
+.TP
+\fBGOOD\fR = \fI"green"\fR
+Defines color used for some words occuring in good context.
+.TP
+\fBHILITE\fR = \fI"teal"\fR
+Defines color used for highlighted words.
+.TP
+\fBINFORM\fR = \fI"darkgreen"\fR
+Defines color used for informational words.
+.TP
+\fBMERGE_LIST_PROGRESS\fR = \fI"yellow"\fR
+Defines color used for numbers indicating merge progress.
+.TP
+\fBPKG_BLOCKER\fR = \fI"red"\fR
+Defines color used for unsatisfied blockers.
+.TP
+\fBPKG_BLOCKER_SATISFIED\fR = \fI"darkblue"\fR
+Defines color used for satisfied blockers.
+.TP
+\fBPKG_MERGE\fR = \fI"darkgreen"\fR
+Defines color used for packages planned to be merged.
+.TP
+\fBPKG_MERGE_SYSTEM\fR = \fI"darkgreen"\fR
+Defines color used for system packages planned to be merged.
+.TP
+\fBPKG_MERGE_WORLD\fR = \fI"green"\fR
+Defines color used for world packages planned to be merged.
+.TP
+\fBPKG_BINARY_MERGE\fR = \fI"purple"\fR
+Defines color used for packages planned to be merged using a binary package.
+.TP
+\fBPKG_BINARY_MERGE_SYSTEM\fR = \fI"purple"\fR
+Defines color used for system packages planned to be merged using a binary
+package.
+.TP
+\fBPKG_BINARY_MERGE_WORLD\fR = \fI"fuchsia"\fR
+Defines color used for world packages planned to be merged using a binary
+package.
+.TP
+\fBPKG_NOMERGE\fR = \fI"darkblue"\fR
+Defines color used for packages not planned to be merged.
+.TP
+\fBPKG_NOMERGE_SYSTEM\fR = \fI"darkblue"\fR
+Defines color used for system packages not planned to be merged.
+.TP
+\fBPKG_NOMERGE_WORLD\fR = \fI"blue"\fR
+Defines color used for world packages not planned to be merged.
+.TP
+\fBPKG_UNINSTALL\fR = \fI"red"\fR
+Defines color used for packages planned to be uninstalled in order
+to resolve conflicts.
+.TP
+\fBPROMPT_CHOICE_DEFAULT\fR = \fI"green"\fR
+Defines color used for the default choice at a prompt.
+.TP
+\fBPROMPT_CHOICE_OTHER\fR = \fI"red"\fR
+Defines color used for a non\-default choice at a prompt.
+.TP
+\fBSECURITY_WARN\fR = \fI"red"\fR
+Defines color used for security warnings.
+.TP
+\fBUNMERGE_WARN\fR = \fI"red"\fR
+Defines color used for unmerge warnings.
+.TP
+\fBWARN\fR = \fI"yellow"\fR
+Defines color used for warnings.
+.SH "LIST OF VALID ATTRIBUTES"
+.TP
+.B Foreground colors
+.RS
+.TP
+.B black
+.TP
+.B darkgray
+.TP
+.B darkred
+.TP
+.B red
+.TP
+.B darkgreen
+.TP
+.B green
+.TP
+.B brown
+.TP
+.B yellow
+.TP
+.B darkyellow
+.TP
+.B darkblue
+.TP
+.B blue
+.TP
+.B purple
+.TP
+.B fuchsia
+.TP
+.B teal
+.TP
+\fBturquoise\fR = \fBdarkteal\fR
+.TP
+.B lightgray
+.TP
+.B white
+.RE
+.TP
+.B Background colors
+.RS
+.TP
+.B bg_black
+.TP
+.B bg_darkred
+.TP
+.B bg_darkgreen
+.TP
+\fBbg_brown\fR = \fBbg_darkyellow\fR
+.TP
+.B bg_darkblue
+.TP
+.B bg_purple
+.TP
+.B bg_teal
+.TP
+.B bg_lightgray
+.RE
+.TP
+.B Other attributes
+.RS
+.TP
+.B normal
+.TP
+.B no\-attr
+.TP
+.B reset
+.TP
+.B bold
+.TP
+.B faint
+.TP
+.B standout
+.TP
+.B no\-standout
+.TP
+.B underline
+.TP
+.B no\-underline
+.TP
+.B blink
+.TP
+.B no\-blink
+.TP
+.B overline
+.TP
+.B no\-overline
+.TP
+.B reverse
+.TP
+.B no\-reverse
+.TP
+.B invisible
+.RE
+.SH "REPORTING BUGS"
+Please report bugs via http://bugs.gentoo.org/
+.SH "AUTHORS"
+.nf
+Arfrever Frehtes Taifersar Arahesis <arfrever@apache.org>
+.fi
+.SH "FILES"
+.TP
+.B /etc/portage/color.map
+Contains variables customizing colors.
+.TP
+.B /etc/portage/make.conf
+Contains other variables.
+.SH "SEE ALSO"
+.BR console_codes (4),
+.BR make.conf (5),
+.BR portage (5),
+.BR emerge (1),
+.BR ebuild (1),
+.BR ebuild (5)
+.TP
+The \fI/usr/lib/portage/pym/portage/output.py\fR Python module.
diff --git a/usr/share/man/man5/ebuild.5 b/usr/share/man/man5/ebuild.5
new file mode 100644
index 0000000..1be5138
--- /dev/null
+++ b/usr/share/man/man5/ebuild.5
@@ -0,0 +1,1576 @@
+.TH "EBUILD" "5" "Jan 2014" "Portage 2.2.14-prefix" "Portage"
+
+.SH "NAME"
+ebuild \- the internal format, variables, and functions in an ebuild script
+
+.SH "DESCRIPTION"
+The \fBebuild\fR(1) program accepts a single ebuild script as an argument.
+This script contains variables and commands that specify how to download,
+unpack, patch, compile, install and merge a particular software package from
+its original sources. In addition to all of this, the ebuild script can also
+contain pre/post install/remove commands, as required. All ebuild scripts are
+written in bash.
+
+.SS "Dependencies"
+A \fIdepend atom\fR is simply a dependency that is used by portage when
+calculating relationships between packages. Please note that if the atom has
+not already been emerged, then the latest version available is matched.
+.TP
+.B Atom Bases
+The base atom is just a full category/packagename.
+
+Examples:
+.nf
+.I sys\-apps/sed
+.I sys\-libs/zlib
+.I net\-misc/dhcp
+.fi
+.TP
+.B Atom Versions
+It is nice to be more specific and say that only certain versions of atoms are
+acceptable. Note that versions must be combined with a prefix (see below).
+Hence you may add a version number as a postfix to the base.
+
+Examples:
+.nf
+ sys\-apps/sed\fI\-4.0.5\fR
+ sys\-libs/zlib\fI\-1.1.4\-r1\fR
+ net\-misc/dhcp\fI\-3.0_p2\fR
+.fi
+
+Versions are normally made up of two or three numbers separated by periods,
+such as 1.2 or 4.5.2. This string may be followed by a character such as 1.2a
+or 4.5.2z. Note that this letter is \fInot\fR meant to indicate alpha, beta,
+etc... status. For that, use the optional suffix; either _alpha, _beta, _pre
+(pre\-release), _rc (release candidate), or _p (patch). This means for the
+3rd pre\-release of a package, you would use something like 1.2_pre3. The
+suffixes here can be arbitrarily chained without limitation.
+.TP
+.B Atom Prefix Operators [> >= = <= <]
+Sometimes you want to be able to depend on general versions rather than
+specifying exact versions all the time. Hence we provide standard boolean
+operators:
+
+Examples:
+.nf
+ \fI>\fRmedia\-libs/libgd\-1.6
+ \fI>=\fRmedia\-libs/libgd\-1.6
+ \fI=\fRmedia\-libs/libgd\-1.6
+ \fI<=\fRmedia\-libs/libgd\-1.6
+ \fI<\fRmedia\-libs/libgd\-1.6
+.fi
+.TP
+.B Extended Atom Prefixes [!~] and Postfixes [*]
+Now to get even fancier, we provide the ability to define blocking packages and
+version range matching. Also note that these extended prefixes/postfixes may
+be combined in any way with the atom classes defined above.
+.RS
+.TP
+.I ~
+means match any revision of the base version specified. So in the
+example below, we would match versions '1.0.2a', '1.0.2a\-r1', '1.0.2a\-r2',
+etc...
+
+Example:
+.nf
+ \fI~\fRnet\-libs/libnet\-1.0.2a
+.fi
+.TP
+.I !
+means block packages from being installed at the same time.
+
+Example:
+.nf
+ \fI!\fRapp\-text/dos2unix
+.fi
+.TP
+.I !!
+means block packages from being installed at the same time
+and explicitly disallow them from being temporarily installed
+simultaneously during a series of upgrades. This syntax is supported
+beginning with \fBEAPI 2\fR.
+
+Example:
+.nf
+ \fI!!\fR<sys\-apps/portage\-2.1.4_rc1
+.fi
+.TP
+.I *
+means match any version of the package so long
+as the specified string prefix is matched. So with a
+version of '2*', we can match '2.1', '2.2', '2.2.1',
+etc... and not match version '1.0', '3.0', '4.1', etc...
+Beware that, due to the string matching nature, '20'
+will also be matched by '2*'. The version part
+that comes before the '*' must be a valid version in the absence of the '*'.
+For example, '2' is a valid version and '2.' is not. Therefore, '2*' is
+allowed and '2.*' is not.
+
+Examples:
+.nf
+ =dev\-libs/glib\-2\fI*\fR
+ \fI!\fR=net\-fs/samba\-2\fI*\fR
+.fi
+.RE
+.TP
+.B Atom Slots
+Beginning with \fBEAPI 1\fR, any atom can be constrained to match a specific
+\fBSLOT\fR. This is accomplished by appending a colon followed by a
+\fBSLOT\fR:
+
+Examples:
+.nf
+ x11\-libs/qt:3
+ \fI~\fRx11\-libs/qt-3.3.8:3
+ \fI>=\fRx11\-libs/qt-3.3.8:3
+ \fI=\fRx11\-libs/qt-3.3*:3
+.fi
+.TP
+.B Sub Slots
+Beginning with \fBEAPI 5\fR, a slot dependency may contain an
+optional sub\-slot part that follows the regular slot and is
+delimited by a \fI/\fR character.
+
+Examples:
+.nf
+ dev\-libs/icu:0/0
+ dev\-libs/icu:0/49
+ dev\-lang/perl:0/5.12
+ dev\-libs/glib:2/2.30
+.fi
+.TP
+.B Atom Slot Operators
+Beginning with \fBEAPI 5\fR, slot operator dependency consists
+of a colon followed by one of the following operators:
+.RS
+.TP
+.I *
+Indicates that any slot value is acceptable. In addition,
+for runtime dependencies, indicates that the package will not
+break if the matched package is uninstalled and replaced by
+a different matching package in a different slot.
+
+Examples:
+.nf
+ dev\-libs/icu:*
+ dev\-lang/perl:*
+ dev-libs/glib:*
+.fi
+.TP
+.I =
+Indicates that any slot value is acceptable. In addition,
+for runtime dependencies, indicates that the package will
+break unless a matching package with slot and sub\-slot equal
+to the slot and sub\-slot of the best installed version at the
+time the package was installed is available.
+
+Examples:
+.nf
+ dev\-libs/icu:=
+ dev\-lang/perl:=
+ dev-libs/glib:=
+.fi
+.TP
+.I slot=
+Indicates that only a specific slot value is acceptable, and
+otherwise behaves identically to the plain equals slot operator.
+
+Examples:
+.nf
+ dev\-libs/icu:0=
+ dev\-lang/perl:0=
+ dev-libs/glib:2=
+.fi
+.PP
+To implement the equals slot operator, the package manager
+will need to store the slot/sub\-slot pair of the best installed
+version of the matching package. This syntax is only for package
+manager use and must not be used by ebuilds. The package manager
+may do this by inserting the appropriate slot/sub\-slot pair
+between the colon and equals sign when saving the package's
+dependencies. The sub\-slot part must not be omitted here
+(when the SLOT variable omits the sub\-slot part, the package
+is considered to have an implicit sub\-slot which is equal to
+the regular slot).
+
+Examples:
+.nf
+ dev\-libs/icu:0/0=
+ dev\-libs/icu:0/49=
+ dev\-lang/perl:0/5.12=
+ dev-libs/glib:2/2.30=
+.fi
+.RE
+.TP
+.B Atom USE
+Beginning with \fBEAPI 2\fR, any atom can be constrained to match specific
+\fBUSE\fR flag settings. When used together with \fBSLOT\fR dependencies,
+\fBUSE\fR dependencies appear on the right hand side of \fBSLOT\fR
+dependencies.
+.RS
+.TP
+.B Unconditional USE Dependencies
+.TS
+l l
+__
+l l.
+Example Meaning
+foo[bar] foo must have bar enabled
+foo[bar,baz] foo must have both bar and baz enabled
+foo[\-bar,baz] foo must have bar disabled and baz enabled
+.TE
+.TP
+.B Conditional USE Dependencies
+.TS
+l l
+__
+l l.
+Compact Form Equivalent Expanded Form
+foo[bar?] bar? ( foo[bar] ) !bar? ( foo )
+foo[!bar?] bar? ( foo ) !bar? ( foo[\-bar] )
+foo[bar=] bar? ( foo[bar] ) !bar? ( foo[\-bar] )
+foo[!bar=] bar? ( foo[\-bar] ) !bar? ( foo[bar] )
+.TE
+.RE
+.TP
+.B Atom USE defaults
+Beginning with \fBEAPI 4\fR, \fBUSE\fR dependencies may specify default
+assumptions about values for flags that may or may not be missing from
+the \fBIUSE\fR of the matched package. Such defaults are specified by
+immediately following a flag with either \fI(+)\fR or \fI(\-)\fR. Use
+\fI(+)\fR to behave as if a missing flag is present and enabled, or
+\fI(\-)\fR to behave as if it is present and disabled:
+
+Examples:
+.nf
+ media\-video/ffmpeg[threads(+)]
+ media\-video/ffmpeg[-threads(\-)]
+.fi
+.TP
+.B Dynamic Dependencies
+Sometimes programs may depend on different things depending on the USE
+variable. Portage offers a few options to handle this. Note that when
+using the following syntaxes, each case is considered as 1 Atom in the
+scope it appears. That means that each Atom both conditionally include
+multiple Atoms and be nested to an infinite depth.
+.RS
+.TP
+.B usevar? ( Atom )
+To include the jpeg library when the user has jpeg in \fBUSE\fR, simply use the
+following syntax:
+
+jpeg? ( media\-libs/jpeg )
+.TP
+.B !usevar? ( Atom )
+If you want to include a package only if the user does not have a certain
+option in their \fBUSE\fR variable, then use the following syntax:
+
+!nophysfs? ( dev\-games/physfs )
+
+This is often useful for those times when you want to want to add optional
+support for a feature and have it enabled by default.
+.TP
+.B usevar? ( Atom if true ) !usevar? ( Atom if false )
+For functionality like the tertiary operator found in C you must use
+two statements, one normal and one inverted. If a package uses
+GTK2 or GTK1, but not both, then you can handle that like this:
+
+gtk2? ( =x11\-libs/gtk+\-2* ) !gtk2? ( =x11\-libs/gtk+\-1* )
+
+That way the default is the superior GTK2 library.
+.TP
+.B || ( Atom Atom ... )
+When a package can work with a few different packages but a virtual is not
+appropriate, this syntax can easily be used.
+
+Example:
+.nf
+|| (
+ app\-games/unreal\-tournament
+ app\-games/unreal\-tournament\-goty
+)
+.fi
+
+Here we see that unreal\-tournament has a normal version and it has a goty
+version. Since they provide the same base set of files, another package can
+use either. Adding a virtual is inappropriate due to the small scope of it.
+
+Another good example is when a package can be built with multiple video
+interfaces, but it can only ever have just one.
+
+Example:
+.nf
+|| (
+ sdl? ( media\-libs/libsdl )
+ svga? ( media\-libs/svgalib )
+ opengl? ( virtual/opengl )
+ ggi? ( media\-libs/libggi )
+ virtual/x11
+)
+.fi
+
+Here only one of the packages will be chosen, and the order of preference is
+determined by the order in which they appear. So sdl has the best chance of
+being chosen, followed by svga, then opengl, then ggi, with a default of X if
+the user does not specify any of the previous choices.
+
+Note that if any of the packages listed are already merged, the package manager
+will use that to consider the dependency satisfied.
+
+.SS "Cross-compilation"
+Portage supports cross-compilation into a subdirectory specified by \fBROOT\fR.
+.TP
+.B Host
+\fIHost\fR in this context means the platform hosting the build process, i.e.
+what autotools calls CBUILD.
+Its packages are contained in the root of the filesystem ("\fI/\fR").
+
+If \fBROOT\fR is "\fI/\fR", all dependency types will be installed there.
+Otherwise, for EAPIs that support \fBHDEPEND\fR (experimental
+\fBEAPI 5-hdepend\fR), only \fBHDEPEND\fR is installed into "\fI/\fR".
+For EAPIs that do not support \fBHDEPEND\fR, the behaviour is controlled by the
+\fI\-\-root-deps\fR flag to \fBemerge\fR(1), defaulting to install only
+\fBDEPEND\fR into the \fIhost\fR.
+.TP
+.B Target
+\fITarget\fR refers to the platform that the package will later run on, i.e.
+what autotools calls CHOST.
+The directory housing this system is specified by \fBROOT\fR.
+If it is different from "\fI/\fR", i.e. \fIhost\fR and \fItarget\fR are not the
+same, this variable contains the path to the directory housing the \fItarget\fR
+system.
+
+For EAPIs that support \fBHDEPEND\fR (experimental \fBEAPI 5-hdepend\fR),
+\fBDEPEND\fR, \fBRDEPEND\fR, and \fBPDEPEND\fR
+list the \fItarget\fR dependencies, i.e. those to be installed into \fBROOT\fR.
+For EAPIs that do not support \fBHDEPEND\fR, the \fBemerge\fR(1) flag
+\fI\-\-root-deps\fR controls what the package manager installs there.
+Without it, \fBemerge\fR defaults to install only runtime dependencies (i.e.
+\fBRDEPEND\fR and \fBPDEPEND\fR) into \fBROOT\fR.
+.PP
+See section \fBVARIABLES\fR for more information about the \fBDEPEND\fR,
+\fBRDEPEND\fR and \fBHDEPEND\fR variables.
+.TP
+.B The targetroot USE flag
+For EAPIs that support the "\fItargetroot\fR" USE flag, that flag is
+automatically enabled by the package manager if \fIhost\fR and \fItarget\fR
+system are not the same, i.e. if the \fBROOT\fR is not "\fI/\fR".
+This is necessary where the package to be built needs an executable copy of
+itself during the build process.
+A known example is dev-lang/python, which needs to run a Python interpreter
+during compilation.
+
+.SH "VARIABLES"
+.TP
+.B Usage Notes
+\- Variables defined in \fBmake.conf\fR(5) are available for use in
+ebuilds (except Portage\-specific variables, which might be not supported by
+other package managers).
+.br
+\- When assigning values to variables in ebuilds, you \fIcannot have a
+space\fR between the variable name and the equal sign.
+.br
+\- Variable values should only contain characters that are members of the
+\fBascii\fR(7) character set. This requirement is mandated by \fBGLEP 31\fR.
+.TP
+.B P
+This variable contains the package name without the ebuild revision.
+This variable must NEVER be modified.
+
+xfree\-4.2.1\-r2.ebuild \-\-> $P=='xfree\-4.2.1'
+.TP
+.B PN
+Contains the name of the script without the version number.
+
+xfree\-4.2.1\-r2.ebuild \-\-> $PN=='xfree'
+.TP
+.B PV
+Contains the version number without the revision.
+
+xfree\-4.2.1\-r2.ebuild \-\-> $PV=='4.2.1'
+.TP
+.B PR
+Contains the revision number or 'r0' if no revision number exists.
+
+xfree\-4.2.1\-r2.ebuild \-\-> $PR=='r2'
+.TP
+.B PVR
+Contains the version number with the revision.
+
+xfree\-4.2.1\-r2.ebuild \-\-> $PVR=='4.2.1\-r2'
+.TP
+.B PF
+Contains the full package name \fBPN\fR\-\fBPVR\fR
+
+xfree\-4.2.1\-r2.ebuild \-\-> $PF=='xfree\-4.2.1\-r2'
+.TP
+.B CATEGORY
+Contains the package category name.
+.TP
+.B A
+Contains all source files required for the package. This variable must
+not be defined. It is autogenerated from the \fBSRC_URI\fR variable.
+.TP
+.B WORKDIR\fR = \fI"${PORTAGE_TMPDIR}/portage/${CATEGORY}/${PF}/work"
+Contains the path to the package build root. Do not modify this variable.
+.TP
+.B FILESDIR\fR = \fI"${repository_location}/${CATEGORY}/${PN}/files"
+Contains the path to the 'files' subdirectory in the package specific
+location in given repository. Do not modify this variable.
+.TP
+.B EBUILD_PHASE
+Contains the abreviated name of the phase function that is
+currently executing, such as "setup", "unpack", "compile", or
+"preinst".
+.TP
+.B EBUILD_PHASE_FUNC
+Beginning with \fBEAPI 5\fR, contains the full name of the phase
+function that is currently executing, such as "pkg_setup",
+"src_unpack", "src_compile", or "pkg_preinst".
+.TP
+.B EPREFIX
+Beginning with \fBEAPI 3\fR, contains the offset
+that this Portage was configured for during
+installation. The offset is sometimes necessary in an ebuild or eclass,
+and is available in such cases as ${EPREFIX}. EPREFIX does not contain
+a trailing slash, therefore an absent offset is represented by the empty
+string. Do not modify this variable.
+.TP
+.B S\fR = \fI"${WORKDIR}/${P}"
+Contains the path to the temporary \fIbuild directory\fR. This variable
+is used by the functions \fIsrc_compile\fR and \fIsrc_install\fR. Both
+are executed with \fIS\fR as the current directory. This variable may
+be modified to match the extraction directory of a tarball for the package.
+.TP
+.B T\fR = \fI"${PORTAGE_TMPDIR}/portage/${CATEGORY}/${PF}/temp"
+Contains the path to a \fItemporary directory\fR. You may use this for
+whatever you like.
+.TP
+.B D\fR = \fI"${PORTAGE_TMPDIR}/portage/${CATEGORY}/${PF}/image/"
+Contains the path to the temporary \fIinstall directory\fR. Every write
+operation that does not involve the helper tools and functions (found below)
+should be prefixed with ${D}.
+Beginning with \fBEAPI 3\fR, the offset prefix often needs
+to be taken into account here, for which the variable
+${ED} is provided (see below).
+Do not modify this variable.
+.TP
+.B ED\fR = \fI"${PORTAGE_TMPDIR}/portage/${CATEGORY}/${PF}/image/${EPREFIX}/"
+Beginning with \fBEAPI 3\fR, contains the path
+"${D%/}${EPREFIX}/" for convenience purposes.
+For EAPI values prior to \fBEAPI 3\fR which do
+not support ED, helpers use \fBD\fR where
+they would otherwise use ED.
+Do not modify this variable.
+.TP
+.B MERGE_TYPE
+Beginning with \fBEAPI 4\fR, the MERGE_TYPE variable can be used to
+query the current merge type. This variable will contain one of the
+following possible values:
+
+.RS
+.TS
+l l
+__
+l l.
+Value Meaning
+binary previously\-built which is scheduled for merge
+buildonly source\-build which is not scheduled for merge
+source source\-build which is scheduled for merge
+.TE
+.RE
+.TP
+.B PORTAGE_LOG_FILE
+Contains the path of the build log. If \fBPORT_LOGDIR\fR variable is unset then
+PORTAGE_LOG_FILE=\fI"${T}/build.log"\fR.
+.TP
+.B REPLACED_BY_VERSION
+Beginning with \fBEAPI 4\fR, the REPLACED_BY_VERSION variable can be
+used in pkg_prerm and pkg_postrm to query the package version that
+is replacing the current package. If there is no replacement package,
+the variable will be empty, otherwise it will contain a single version
+number.
+.TP
+.B REPLACING_VERSIONS
+Beginning with \fBEAPI 4\fR, the REPLACING_VERSIONS variable can be
+used in pkg_pretend, pkg_setup, pkg_preinst and pkg_postinst to query
+the package version(s) that the current package is replacing. If there
+are no packages to replace, the variable will be empty, otherwise it
+will contain a space\-separated list of version numbers corresponding
+to the package version(s) being replaced. Typically, this variable will
+not contain more than one version, but according to PMS it can contain
+more.
+.TP
+.B ROOT\fR = \fI"/"
+Contains the path that portage should use as the root of the live filesystem.
+When packages wish to make changes to the live filesystem, they should do so in
+the tree prefixed by ${ROOT}. Often the offset prefix needs to be taken
+into account here, for which the variable ${EROOT} is provided (see
+below). Do not modify this variable.
+.TP
+.B EROOT\fR = \fI"${ROOT%/}${EPREFIX}/"
+Beginning with \fBEAPI 3\fR, contains
+"${ROOT%/}${EPREFIX}/" for convenience
+purposes. Do not modify this variable.
+.TP
+.B DESCRIPTION\fR = \fI"A happy little package"
+Should contain a short description of the package.
+.TP
+.B EAPI\fR = \fI"0"
+Defines the ebuild API version to which this package conforms. If not
+defined then it defaults to "0". If portage does not recognize the
+EAPI value then it will mask the package and refuse to perform any
+operations with it since this means that a newer version of portage
+needs to be installed first. For maximum backward compatiblity, a
+package should conform to the lowest possible EAPI. Note that anyone
+who uses the \fBebuild\fR(1) and \fBrepoman\fR(1) commands with this
+package will be required to have a version of portage that recognizes
+the EAPI to which this package conforms.
+.TP
+.B SRC_URI\fR = \fI"http://example.com/path/${P}.tar.gz"
+Contains a list of URIs for the required source files. It can contain
+multiple URIs for a single source file. The list is processed in order
+if the file was not found on any of the \fIGENTOO_MIRRORS\fR.
+Beginning with \fBEAPI 2\fR, the output file name of a given URI may be
+customized with a "->" operator on the right hand side, followed by the
+desired output file name. All tokens, including the operator and output
+file name, should be separated by whitespace.
+.TP
+.B HOMEPAGE\fR = \fI"http://example.com/"
+Should contain a list of URIs for the sources main sites and other further
+package dependent information.
+.TP
+.B KEYWORDS\fR = \fI[\-~][x86,ppc,sparc,mips,alpha,arm,hppa]
+Should contain appropriate list of arches that the ebuild is know to
+work/not work. By default if you do not know if an ebuild runs under
+a particular arch simply omit that KEYWORD. If the ebuild will not
+work on that arch include it as \-ppc for example. If the ebuild is
+being submitted for inclusion, it must have ~arch set for architectures
+where it has been PROVEN TO WORK. (Packages KEYWORDed this way may be
+unmasked for testing by setting ACCEPT_KEYWORDS="~arch" on the command
+line, or in \fBmake.conf\fR(5)) For an authoritative list please review
+/usr/portage/profiles/arch.list. Please keep this list in alphabetical order.
+.TP
+.B SLOT
+This sets the SLOT for packages that may need to have multiple versions
+co\-exist. By default you should set \fBSLOT\fR="0". If you are unsure, then
+do not fiddle with this until you seek some guidance from some guru. This
+value should \fINEVER\fR be left undefined.
+
+Beginning with \fBEAPI 5\fR, the SLOT variable may contain
+an optional sub\-slot part that follows the regular slot and
+is delimited by a / character. The sub\-slot must be a valid
+slot name. The sub\-slot is used to represent cases in which
+an upgrade to a new version of a package with a different
+sub\-slot may require dependent packages to be rebuilt. When
+the sub\-slot part is omitted from the SLOT definition, the
+package is considered to have an implicit sub\-slot which is
+equal to the regular slot. Refer to the \fBAtom Slot
+Operators\fR section for more information about sub\-slot
+usage.
+.TP
+.B LICENSE
+This should be a space delimited list of licenses that the package falls
+under. This \fB_must_\fR be set to a matching license in
+/usr/portage/licenses/. If the license does not exist in portage yet, you
+must add it first.
+.TP
+.B IUSE
+This should be a list of any and all USE flags that are leveraged within
+your build script. The only USE flags that should not be listed here are
+arch related flags (see \fBKEYWORDS\fR). Beginning with \fBEAPI 1\fR, it
+is possible to prefix flags with + or - in order to create default settings
+that respectively enable or disable the corresponding \fBUSE\fR flags. For
+details about \fBUSE\fR flag stacking order, refer to the \fBUSE_ORDER\fR
+variable in \fBmake.conf\fR(5). Given the default \fBUSE_ORDER\fR setting,
+negative IUSE default settings are effective only for negation of
+repo\-level USE settings, since profile and user configuration settings
+override them.
+.TP
+.B DEPEND
+This should contain a list of all packages that are required for the program
+to compile (aka \fIbuildtime\fR dependencies). These are usually libraries and
+headers.
+
+Starting from experimental \fBEAPI 5-hdepend\fR, tools should go into the
+\fBHDEPEND\fR variable instead, as \fBDEPEND\fR will only be installed into the
+\fItarget\fR system and hence cannot be executed in a cross\-compile setting.
+(See section \fBCross\-compilation\fR for more information.)
+
+You may use the syntax described above in the \fBDependencies\fR section.
+.TP
+.B RDEPEND
+This should contain a list of all packages that are required for this
+program to run (aka \fIruntime\fR dependencies). These are usually libraries.
+
+In \fBEAPI 3\fR or earlier, if this is not set, then it defaults to the value
+of \fBDEPEND\fR. In \fBEAPI 4\fR or later, \fBRDEPEND\fR will never be
+implicitly set.
+
+You may use the syntax described above in the \fBDependencies\fR section.
+.TP
+.B HDEPEND
+This should contain a list of all packages that are required to be executable
+during compilation of this program (aka \fIhost\fR buildtime dependencies).
+These are usually tools, like interpreters or (cross\-)compilers.
+
+This variable is new in experimental \fBEAPI 5-hdepend\fR and will be installed
+into the \fIhost\fR system.
+(See section \fBCross-compilation\fR for more information.)
+
+You may use the syntax described above in the \fBDependencies\fR section.
+.TP
+.B PDEPEND
+This should contain a list of all packages that should be merged after this
+one (aka \fIpost\fR merge dependencies), but which may be installed by the
+package manager at any time, if that is not possible.
+
+.B ***WARNING***
+.br
+Use this only as last resort to break cyclic dependencies!
+
+You may use the syntax described above in the \fBDependencies\fR section.
+.TP
+.B REQUIRED_USE
+Beginning with \fBEAPI 4\fR, the \fBREQUIRED_USE\fR variable can be
+used to specify combinations of \fBUSE\fR flags that are allowed
+or not allowed. Elements can be nested when necessary.
+.TS
+l l
+__
+l l.
+Behavior Expression
+If flag1 enabled then flag2 disabled flag1? ( !flag2 )
+If flag1 enabled then flag2 enabled flag1? ( flag2 )
+If flag1 disabled then flag2 enabled !flag1? ( flag2 )
+If flag1 disabled then flag2 disabled !flag1? ( !flag2 )
+Must enable any one or more (inclusive or) || ( flag1 flag2 flag3 )
+Must enable exactly one but not more (exclusive or) ^^ ( flag1 flag2 flag3 )
+May enable at most one (EAPI 5 or later) ?? ( flag1 flag2 flag3 )
+.TE
+.TP
+.B RESTRICT\fR = \fI[strip,mirror,fetch,userpriv]
+This should be a space delimited list of portage features to restrict.
+You may use conditional syntax to vary restrictions as seen above in DEPEND.
+.PD 0
+.RS
+.TP
+.I binchecks
+Disable all QA checks for binaries. This should ONLY be used in packages
+for which binary checks make no sense (linux\-headers and kernel\-sources, for
+example, can safely be skipped since they have no binaries). If the binary
+checks need to be skipped for other reasons (such as proprietary binaries),
+see the \fBQA CONTROL VARIABLES\fR section for more specific exemptions.
+.TP
+.I bindist
+Distribution of built packages is restricted.
+.TP
+.I fetch
+like \fImirror\fR but the files will not be fetched via \fBSRC_URI\fR either.
+.TP
+.I installsources
+Disables installsources for specific packages. This is for packages with
+binaries that are not compatible with debugedit.
+.TP
+.I mirror
+files in \fBSRC_URI\fR will not be downloaded from the \fBGENTOO_MIRRORS\fR.
+.TP
+.I preserve\-libs
+Disables preserve\-libs for specific packages. Note than when a package is
+merged, RESTRICT=preserve\-libs applies if either the new instance or the
+old instance sets RESTRICT=preserve\-libs.
+.TP
+.I primaryuri
+fetch from URIs in \fBSRC_URI\fR before \fBGENTOO_MIRRORS\fR.
+.TP
+.I splitdebug
+Disables splitdebug for specific packages. This is for packages with
+binaries that trigger problems with splitdebug, such as file\-collisions
+between symlinks in /usr/lib/debug/.build-id (triggered by bundled libraries).
+.TP
+.I strip
+final binaries/libraries will not be stripped of debug symbols.
+.TP
+.I test
+do not run src_test even if user has \fBFEATURES\fR=test.
+.TP
+.I userpriv
+Disables userpriv for specific packages.
+.RE
+.PD 1
+.TP
+.B PROPERTIES\fR = \fI[interactive]
+A space delimited list of properties, with conditional syntax support.
+.PD 0
+.RS
+.TP
+.I interactive
+One or more ebuild phases will produce a prompt that requires user interaction.
+.RE
+.PD 1
+.TP
+.B PROVIDE\fR = \fI"virtual/TARGET"
+This variable should only be used when a package provides a virtual target.
+For example, blackdown\-jdk and sun\-jdk provide \fIvirtual/jdk\fR. This
+allows for packages to depend on \fIvirtual/jdk\fR rather than on blackdown
+or sun specifically.
+
+The \fBPROVIDE\fR variable has been deprecated. See
+\fIhttp://www.gentoo.org/proj/en/glep/glep-0037.html\fR for details.
+
+.TP
+.B DOCS
+Beginning with \fBEAPI 4\fR, an array or space\-delimited list of documentation
+files for the default src_install function to install using dodoc. If
+undefined, a reasonable default list is used. See the documentation for
+src_install below.
+
+.SS "QA Control Variables:"
+.TP
+.B Usage Notes
+Several QA variables are provided which allow an ebuild to manipulate some
+of the QA checks performed by portage. Use of these variables in ebuilds
+should be kept to an absolute minimum otherwise they defeat the purpose
+of the QA checks, and their use is subject to agreement of the QA team.
+They are primarily intended for use by ebuilds that install closed\-source
+binary objects that cannot be altered.
+
+Note that objects that violate these rules may fail on some architectures.
+.TP
+.B QA_PREBUILT
+This should contain a list of file paths, relative to the image
+directory, of files that are pre\-built binaries. Paths
+listed here will be appended to each of the QA_* variables
+listed below. The paths may contain fnmatch\-like patterns
+which will be internally translated to regular expressions for
+the QA_* variables that support regular expressions instead
+of fnmatch patterns. The translation mechanism simply replaces
+"*" with ".*".
+.TP
+.B QA_TEXTRELS
+This variable can be set to a list of file paths, relative to the image
+directory, of files that contain text relocations that cannot be eliminated.
+The paths may contain fnmatch patterns.
+
+This variable is intended to be used on closed\-source binary objects that
+cannot be altered.
+.TP
+.B QA_EXECSTACK
+This should contain a list of file paths, relative to the image directory, of
+objects that require executable stack in order to run.
+The paths may contain fnmatch patterns.
+
+This variable is intended to be used on objects that truly need executable
+stack (i.e. not those marked to need it which in fact do not).
+.TP
+.B QA_WX_LOAD
+This should contain a list of file paths, relative to the image directory, of
+files that contain writable and executable segments. These are rare.
+The paths may contain fnmatch patterns.
+.TP
+.B QA_FLAGS_IGNORED
+This should contain a list of file paths, relative to the image directory, of
+files that do not contain .GCC.command.line sections or contain .hash sections.
+The paths may contain regular expressions with escape\-quoted special
+characters.
+
+This variable is intended to be used on files of binary packages which ignore
+CFLAGS, CXXFLAGS, FFLAGS, FCFLAGS, and LDFLAGS variables.
+.TP
+.B QA_IGNORE_INSTALL_NAME_FILES
+This should contain a list of file names (without path) that should be
+ignored in the install_name check. That is, if these files point to
+something not available in the image directory or live filesystem, these
+files are ignored, albeit being broken.
+.TP
+.B QA_MULTILIB_PATHS
+This should contain a list of file paths, relative to the image directory, of
+files that should be ignored for the multilib\-strict checks.
+The paths may contain regular expressions with escape\-quoted special
+characters.
+.TP
+.B QA_PRESTRIPPED
+This should contain a list of file paths, relative to the image directory, of
+files that contain pre-stripped binaries. The paths may contain regular
+expressions with escape\-quoted special characters.
+.TP
+.B QA_SONAME
+This should contain a list of file paths, relative to the image directory, of
+shared libraries that lack SONAMEs. The paths may contain regular expressions
+with escape\-quoted special characters.
+.TP
+.B QA_SONAME_NO_SYMLINK
+This should contain a list of file paths, relative to the image directory, of
+shared libraries that have SONAMEs but should not have a corresponding SONAME
+symlink in the same directory. The paths may contain regular expressions
+with escape\-quoted special characters.
+.TP
+.B QA_AM_MAINTAINER_MODE
+This should contain a list of lines containing automake missing \-\-run
+commands. The lines may contain regular expressions with escape\-quoted
+special characters.
+.TP
+.B QA_CONFIGURE_OPTIONS
+This should contain a list of configure options which trigger warnings about
+unrecognized options. The options may contain regular expressions with
+escape\-quoted special characters.
+.TP
+.B QA_DT_NEEDED
+This should contain a list of file paths, relative to the image directory, of
+shared libraries that lack NEEDED entries. The paths may contain regular
+expressions with escape\-quoted special characters.
+.TP
+.B QA_DESKTOP_FILE
+This should contain a list of file paths, relative to the image directory, of
+desktop files which should not be validated. The paths may contain regular
+expressions with escape\-quoted special characters.
+
+.SH "PORTAGE DECLARATIONS"
+.TP
+.B inherit
+Inherit is portage's maintenance of extra classes of functions that are
+external to ebuilds and provided as inheritable capabilities and data. They
+define functions and set data types as drop\-in replacements, expanded, and
+simplified routines for extremely common tasks to streamline the build
+process. Call to inherit cannot depend on conditions which can vary in given
+ebuild. Specification of the eclasses contains only their name and not the
+\fI.eclass\fR extension. Also note that the inherit statement must come
+before other variable declarations unless these variables are used in global
+scope of eclasses.
+
+.SH "PHASE FUNCTIONS"
+.TP
+.B pkg_pretend
+Beginning with \fBEAPI 4\fR, this function can be defined in order to
+check that miscellaneous requirements are met. It is called as early
+as possible, before any attempt is made to satisfy dependencies. If the
+function detects a problem then it should call eerror and die. The
+environment (variables, functions, temporary directories, etc..) that
+is used to execute pkg_pretend is not saved and therefore is not
+available in phases that execute afterwards.
+.TP
+.B pkg_nofetch
+This function will be executed when the files in \fBSRC_URI\fR
+cannot be fetched for any reason. If you turn on \fIfetch\fR in
+\fBRESTRICT\fR, this is useful for displaying information to the
+user on *how* to obtain said files. All
+you have to do is output a message and let the function return. Do not
+end the function with a call to \fBdie\fR.
+.TP
+.B pkg_setup
+This function can be used if the package needs specific setup actions or
+checks to be preformed before anything else.
+.br
+Initial working directory: $PORTAGE_TMPDIR
+.TP
+.B src_unpack
+This function is used to unpack all the sources in \fIA\fR to \fIWORKDIR\fR.
+If not defined in the \fIebuild script\fR it calls \fIunpack ${A}\fR. Any
+patches and other pre configure/compile modifications should be done here.
+.br
+Initial working directory: $WORKDIR
+.TP
+.B src_prepare
+All preparation of source code, such as application of patches, should be done
+here. This function is supported beginning with \fBEAPI 2\fR.
+.br
+Initial working directory: $S
+.TP
+.B src_configure
+All necessary steps for configuration should be done here. This function is
+supported beginning with \fBEAPI 2\fR.
+.br
+Initial working directory: $S
+.TP
+.B src_compile
+With less than \fBEAPI 2\fR, all necessary steps for both configuration and
+compilation should be done here. Beginning with \fBEAPI 2\fR, only compilation
+steps should be done here.
+.br
+Initial working directory: $S
+.TP
+.B src_test
+Run all package specific test cases. The default is to run
+\'emake check\' followed \'emake test\'. Prior to \fBEAPI 5\fR,
+the default src_test implementation will automatically pass the
+\-j1 option as the last argument to emake, and beginning with
+\fBEAPI 5\fR it will allow the tests to run in parallel.
+.br
+Initial working directory: $S
+.TP
+.B src_install
+Should contain everything required to install the package in the temporary
+\fIinstall directory\fR.
+.br
+Initial working directory: $S
+
+Beginning with \fBEAPI 4\fR, if src_install is undefined then the
+following default implementation is used:
+
+.nf
+src_install() {
+ if [[ \-f Makefile || \-f GNUmakefile || \-f makefile ]] ; then
+ emake DESTDIR="${D}" install
+ fi
+
+ if ! declare -p DOCS &>/dev/null ; then
+ local d
+ for d in README* ChangeLog AUTHORS NEWS TODO CHANGES \\
+ THANKS BUGS FAQ CREDITS CHANGELOG ; do
+ [[ \-s "${d}" ]] && dodoc "${d}"
+ done
+ elif [[ $(declare \-p DOCS) == "declare \-a "* ]] ; then
+ dodoc "${DOCS[@]}"
+ else
+ dodoc ${DOCS}
+ fi
+}
+.fi
+.TP
+.B pkg_preinst pkg_postinst
+All modifications required on the live\-filesystem before and after the
+package is merged should be placed here. Also commentary for the user
+should be listed here as it will be displayed last.
+.br
+Initial working directory: $PWD
+.TP
+.B pkg_prerm pkg_postrm
+Like the pkg_*inst functions but for unmerge.
+.br
+Initial working directory: $PWD
+.TP
+.B pkg_config
+This function should contain optional basic configuration steps.
+.br
+Initial working directory: $PWD
+
+.SH "HELPER FUNCTIONS"
+.SS "Phases:"
+.TP
+.B default
+Calls the default phase function implementation for the currently executing
+phase. This function is supported beginning with \fBEAPI 2\fR.
+.TP
+.B default_*
+Beginning with \fBEAPI 2\fR, the default pkg_nofetch and src_* phase
+functions are accessible via a function having a name that begins with
+default_ and ends with the respective phase function name. For example,
+a call to a function with the name default_src_compile is equivalent to
+a call to the default src_compile implementation.
+
+.RS
+.TS
+l
+_
+l.
+Default Phase Functions
+default_pkg_nofetch
+default_src_unpack
+default_src_prepare
+default_src_configure
+default_src_compile
+default_src_test
+.TE
+.RE
+
+.SS "General:"
+.TP
+.B die\fR \fI[reason]
+Causes the current emerge process to be aborted. The final display will
+include \fIreason\fR.
+
+Beginning with \fBEAPI 4\fR, all helpers automatically call \fBdie\fR
+whenever some sort of error occurs. Helper calls may be prefixed with
+the \fBnonfatal\fR helper in order to prevent errors from being fatal.
+.TP
+.B nonfatal\fR \fI<helper>
+Execute \fIhelper\fR and \fIdo not\fR call die if it fails.
+The \fBnonfatal\fR helper is available beginning with \fBEAPI 4\fR.
+.TP
+.B use\fR \fI<USE item>
+If \fIUSE item\fR is in the \fBUSE\fR variable, the function will silently
+return 0 (aka shell true). If \fIUSE item\fR is not in the \fBUSE\fR
+variable, the function will silently return 1 (aka shell false). \fBusev\fR
+is a verbose version of \fBuse\fR.
+.RS
+.TP
+.I Example:
+.nf
+if use gnome ; then
+ guiconf="\-\-enable\-gui=gnome \-\-with\-x"
+elif use gtk ; then
+ guiconf="\-\-enable\-gui=gtk \-\-with\-x"
+elif use X ; then
+ guiconf="\-\-enable\-gui=athena \-\-with\-x"
+else
+ # No gui version will be built
+ guiconf=""
+fi
+.fi
+.RE
+.TP
+.B usev\fR \fI<USE item>
+Like \fBuse\fR, but also echoes \fIUSE item\fR when \fBuse\fR returns true.
+.TP
+.B usex\fR \fI<USE flag>\fR \fI[true output]\fR \fI[false output]\fR \fI[true \
+suffix]\fR \fI[false suffix]
+If USE flag is set, echo [true output][true suffix] (defaults to
+"yes"), otherwise echo [false output][false suffix] (defaults to
+"no"). The usex helper is available beginning with \fBEAPI 5\fR.
+.TP
+.B use_with\fR \fI<USE item>\fR \fI[configure name]\fR \fI[configure opt]
+Useful for creating custom options to pass to a configure script. If \fIUSE
+item\fR is in the \fBUSE\fR variable and a \fIconfigure opt\fR is specified,
+then the string \fI\-\-with\-[configure name]=[configure opt]\fR will be
+echoed. If \fIconfigure opt\fR is not specified, then just
+\fI\-\-with\-[configure name]\fR will be echoed. If \fIUSE item\fR is not in
+the \fBUSE\fR variable, then the string \fI\-\-without\-[configure name]\fR
+will be echoed. If \fIconfigure name\fR is not specified, then \fIUSE item\fR
+will be used in its place. Beginning with \fBEAPI 4\fR, an empty \fIconfigure
+opt\fR argument is recognized. In \fBEAPI 3\fR and earlier, an empty
+\fIconfigure opt\fR argument is treated as if it weren't provided.
+.RS
+.TP
+.I Examples:
+.nf
+USE="opengl"
+myconf=$(use_with opengl)
+(myconf now has the value "\-\-with\-opengl")
+
+USE="jpeg"
+myconf=$(use_with jpeg libjpeg)
+(myconf now has the value "\-\-with\-libjpeg")
+
+USE=""
+myconf=$(use_with jpeg libjpeg)
+(myconf now has the value "\-\-without\-libjpeg")
+
+USE="sdl"
+myconf=$(use_with sdl SDL all\-plugins)
+(myconf now has the value "\-\-with\-SDL=all\-plugins")
+.fi
+.RE
+.TP
+.B use_enable\fR \fI<USE item>\fR \fI[configure name]\fR \fI[configure opt]
+Same as \fBuse_with\fR above, except that the configure options are
+\fI\-\-enable\-\fR instead of \fI\-\-with\-\fR and \fI\-\-disable\-\fR instead
+of \fI\-\-without\-\fR. Beginning with \fBEAPI 4\fR, an empty \fIconfigure
+opt\fR argument is recognized. In \fBEAPI 3\fR and earlier, an empty
+\fIconfigure opt\fR argument is treated as if it weren't provided.
+.TP
+.B has\fR \fI<item>\fR \fI<item list>
+If \fIitem\fR is in \fIitem list\fR, then \fBhas\fR returns
+0. Otherwise, 1 is returned. There is another version, \fBhasv\fR, that
+will conditionally echo \fIitem\fR.
+.br
+The \fIitem list\fR is delimited by the \fIIFS\fR variable. This variable
+has a default value of ' ', or a space. It is a \fBbash\fR(1) setting.
+.TP
+.B hasv\fR \fI<item>\fR \fI<item list>
+Like \fBhas\fR, but also echoes \fIitem\fR when \fBhas\fR returns true.
+.TP
+.B has_version\fR \fI[\-\-host\-root]\fR \fI<category/package\-version>
+Check to see if \fIcategory/package\-version\fR is installed on the system.
+The parameter accepts all values that are acceptable in the \fBDEPEND\fR
+variable. The function returns 0 if \fIcategory/package\-version\fR is
+installed, 1 otherwise. Beginning with \fBEAPI 5\fR, the
+\-\-host\-root option may be used in order to cause the query
+to apply to the host root instead of ${ROOT}.
+.TP
+.B best_version\fR \fI[\-\-host\-root]\fR \fI<package name>
+This function will look up \fIpackage name\fR in the database of currently
+installed programs and echo the "best version" of the package that is
+currently installed. Beginning with \fBEAPI 5\fR, the
+\-\-host\-root option may be used in order to cause the query
+to apply to the host root instead of ${ROOT}.
+
+Example:
+.nf
+ VERINS="$(best_version net\-ftp/glftpd)"
+ (VERINS now has the value "net\-ftp/glftpd\-1.27" if glftpd\-1.27 is \
+ installed)
+.fi
+
+.SS "Hooks:"
+.TP
+.B register_die_hook\fR \fI[list of function names]
+Register one or more functions to call when the ebuild fails for any reason,
+including file collisions with other packages.
+.TP
+.B register_success_hook\fR \fI[list of function names]
+Register one or more functions to call when the ebuild builds and/or installs
+successfully.
+
+.SS "Output:"
+.TP
+.B einfo\fR \fI"disposable message"
+Same as \fBelog\fR, but should be used when the message isn't important to the
+user (like progress or status messages during the build process).
+.TP
+.B elog\fR \fI"informative message"
+If you need to display a message that you wish the user to read and take
+notice of, then use \fBelog\fR. It works just like \fBecho\fR(1), but
+adds a little more to the output so as to catch the user's eye. The message
+will also be logged by portage for later review.
+.TP
+.B ewarn\fR \fI"warning message"
+Same as \fBeinfo\fR, but should be used when showing a warning to the user.
+.TP
+.B eqawarn\fR \fI"QA warning message"
+Same as \fBeinfo\fR, but should be used when showing a QA warning to the user.
+.TP
+.B eerror\fR \fI"error message"
+Same as \fBeinfo\fR, but should be used when showing an error to the user.
+.TP
+.B ebegin\fR \fI"helpful message"
+Like \fBeinfo\fR, we output a \fIhelpful message\fR and then hint that the
+following operation may take some time to complete. Once the task is
+finished, you need to call \fBeend\fR.
+.TP
+.B eend\fR \fI<status>\fR \fI["error message"]
+Followup the \fBebegin\fR message with an appropriate "OK" or "!!" (for
+errors) marker. If \fIstatus\fR is non\-zero, then the additional \fIerror
+message\fR is displayed.
+
+.SS "Unpack:"
+.TP
+.B unpack\fR \fI<source>\fR \fI[list of more sources]
+This function uncompresses and/or untars a list of sources into the current
+directory. The function will append \fIsource\fR to the \fBDISTDIR\fR variable.
+
+.SS "Compile:"
+.TP
+.B econf\fR \fI[configure options]
+This is used as a replacement for configure. Performs:
+.nf
+${\fIECONF_SOURCE\fR:-.}/configure \\
+ ${CBUILD:+\-\-build=${CBUILD}} \\
+ \-\-datadir="${EPREFIX}"/usr/share \\
+ \-\-host=${CHOST} \\
+ \-\-infodir="${EPREFIX}"/usr/share/info \\
+ \-\-localstatedir="${EPREFIX}"/var/lib \\
+ \-\-prefix="${EPREFIX}"/usr \\
+ \-\-mandir="${EPREFIX}"/usr/share/man \\
+ \-\-sysconfdir="${EPREFIX}"/etc \\
+ ${CTARGET:+\-\-target=${CTARGET}} \\
+ \-\-disable\-dependency\-tracking \\
+ \fI${EXTRA_ECONF}\fR \\
+ \fIconfigure options\fR || die "econf failed"
+.fi
+Note that the \fIEXTRA_ECONF\fR is for users only, not for ebuild
+writers. If you wish to pass more options to configure, just pass the
+extra arguments to \fBeconf\fR. Also note that \fBeconf\fR automatically
+calls \fBdie\fR if the configure script fails.
+Beginning with \fBEAPI 3\fR, \fBeconf\fR uses the \fB${EPREFIX}\fR
+variable which is disregarded for prior \fBEAPI\fR values.
+Beginning with \fBEAPI 4\fR, \fBeconf\fR adds
+\fI\-\-disable\-dependency\-tracking\fR to the arguments if the
+string \fIdisable\-dependency\-tracking\fR occurs in the output
+of \fIconfigure \-\-help\fR.
+Beginning with \fBEAPI 5\fR, \fBeconf\fR adds
+\fIdisable\-silent\-rules\fR to the arguments if the
+string \fIdisable\-silent\-rules\fR occurs in the output
+of \fIconfigure \-\-help\fR.
+.TP
+.B emake\fR \fI[make options]
+This is used as a replacement for make. Performs 'make ${MAKEOPTS}
+\fImake options\fR' (as set in make.globals), default is MAKEOPTS="\-j2".
+
+.B ***WARNING***
+.br
+if you are going to use \fBemake\fR, make sure your build is happy with
+parallel makes (make \-j2). It should be tested thoroughly as parallel
+makes are notorious for failing _sometimes_ but not always. If you determine
+that your package fails to build in parallel, and you are unable to resolve
+the issue, then you should run '\fBemake\fR \-j1' instead of 'make'.
+
+.SS "Install:"
+.TP
+.B einstall\fR \fI[make options]
+This is used as a replacement for make install. Performs:
+.nf
+make \\
+ prefix=${ED}/usr \\
+ datadir=${ED}/usr/share \\
+ infodir=${ED}/usr/share/info \\
+ localstatedir=${ED}/var/lib \\
+ mandir=${ED}/usr/share/man \\
+ sysconfdir=${ED}/etc \\
+ \fI${EXTRA_EINSTALL}\fR \\
+ \fImake options\fR \\
+ install
+.fi
+Please do \fBnot\fR use this in place of 'emake install DESTDIR=${D}'.
+That is the preferred way of installing make\-based packages. Also, do
+not utilize the \fIEXTRA_EINSTALL\fR variable since it is for users.
+
+.PD 0
+.TP
+.B prepall
+.TP
+.B prepalldocs
+.TP
+.B prepallinfo
+.TP
+.B prepallman
+.TP
+.B prepallstrip
+.PD 1
+Useful for when a package installs into \fB${D}\fR via scripts
+(i.e. makefiles). If you want to be sure that libraries are executable,
+aclocal files are installed into the right place, doc/info/man files are
+all compressed, and that executables are all stripped of debugging symbols,
+then use these suite of functions.
+.RS
+.PD 0
+.TP
+.B prepall:
+Runs \fBprepallman\fR, \fBprepallinfo\fR, \fBprepallstrip\fR, sets
+libraries +x, and then checks aclocal directories. Please note this
+does \fI*not*\fR run \fBprepalldocs\fR.
+.TP
+.B prepalldocs:
+Compresses all doc files in ${ED}/usr/share/doc.
+.TP
+.B prepallinfo:
+Compresses all info files in ${ED}/usr/share/info.
+.TP
+.B prepallman:
+Compresses all man files in ${ED}/usr/share/man.
+.TP
+.B prepallstrip:
+Strips all executable files of debugging symboles. This includes libraries.
+.RE
+
+.TP
+.B prepinfo\fR \fI[dir]
+.TP
+.B prepman\fR \fI[dir]
+.TP
+.B prepstrip\fR \fI[dir]
+.PD 1
+Similar to the \fBprepall\fR functions, these are subtle in their differences.
+.RS
+.PD 0
+.TP
+.B prepinfo:
+If a \fIdir\fR is not specified, then \fBprepinfo\fR will assume the dir
+\fIusr\fR. \fBprepinfo\fR will then compress all the files in
+${ED}/\fIdir\fR/info.
+.TP
+.B prepman:
+If a \fIdir\fR is not specified, then \fBprepman\fR will assume the dir
+\fIusr\fR. \fBprepman\fR will then compress all the files in
+${ED}/\fIdir\fR/man/*/.
+.TP
+.B prepstrip:
+All the files found in ${ED}/\fIdir\fR will be stripped. You may specify
+multiple directories.
+.RE
+.PD 1
+.TP
+.B docompress\fR \fI[\-x] <path> [list of more paths]
+.RS
+Beginning with \fBEAPI 4\fR, the \fBdocompress\fR helper is used to
+manage lists of files to be included or excluded from optional compression.
+If the first argument is \fB\-x\fR, add each of its subsequent arguments to
+the exclusion list. Otherwise, add each argument to the inclusion list.
+The inclusion list initially contains \fI/usr/share/doc\fR,
+\fI/usr/share/info\fR, and \fI/usr/share/man\fR. The exclusion list
+initially contains \fI/usr/share/doc/${PF}/html\fR.
+
+The optional compression shall be carried out after \fBsrc_install\fR
+has completed, and before the execution of any subsequent phase
+function. For each item in the inclusion list, pretend it has the
+value of the \fBD\fR variable prepended, then:
+
+.RS
+If it is a directory, act as if every file or directory immediately
+under this directory were in the inclusion list.
+
+If the item is a file, it may be compressed unless it has been
+excluded as described below.
+
+If the item does not exist, it is ignored.
+.RE
+
+Whether an item is to be excluded is determined as follows: For each
+item in the exclusion list, pretend it has the value of the \fBD\fR
+variable prepended, then:
+
+.RS
+If it is a directory, act as if every file or directory immediately
+under this directory were in the exclusion list.
+
+If the item is a file, it shall not be compressed.
+
+If the item does not exist, it is ignored.
+.RE
+.RE
+.TP
+.B dosed\fR \fI"s:orig:change:g" <filename>
+Beginning with \fBEAPI 4\fR, the \fBdosed\fR helper no longer exists. Ebuilds
+should call \fBsed(1)\fR directly (and assume that it is GNU sed).
+
+Performs sed in place on \fIfilename\fR inside ${ED}. If no expression is
+given then \fI"s:${D}::g"\fR is used as the default expression. Note
+that this expression does \fBNOT\fR use the offset prefix.
+.br
+.BR 'dosed\ "s:/usr/local:/usr:g"\ /usr/bin/some\-script'
+runs sed on ${ED}/usr/bin/some\-script
+.TP
+.B dodir\fR \fI<path> [more paths]
+Creates directories inside of ${ED}.
+.br
+.BR 'dodir\ /usr/lib/apache'
+creates ${ED}/usr/lib/apache. Note that the do* functions will run
+\fBdodir\fR for you.
+.TP
+.B diropts\fR \fI[options for install(1)]
+Can be used to define options for the install function used in
+\fBdodir\fR. The default is \fI\-m0755\fR.
+.TP
+.B into\fR \fI<path>
+Sets the root (\fIDESTTREE\fR) for other functions like \fBdobin\fR,
+\fBdosbin\fR, \fBdoman\fR, \fBdoinfo\fR, \fBdolib\fR.
+.br
+The default root is /usr.
+.TP
+.B keepdir\fR \fI<path> [more paths]
+Tells portage to leave directories behind even if they're empty. Functions
+the same as \fBdodir\fR.
+.TP
+.B dobin\fR \fI<binary> [list of more binaries]
+Installs a \fIbinary\fR or a list of binaries into \fIDESTTREE\fR/bin.
+Creates all necessary dirs.
+.TP
+.B dosbin\fR \fI<binary> [list of more binaries]
+Installs a \fIbinary\fR or a list of binaries into \fIDESTTREE\fR/sbin.
+Creates all necessary dirs.
+.TP
+.B doinitd\fR \fI<init.d script> [list of more init.d scripts]
+Install Gentoo \fIinit.d scripts\fR. They will be installed into the
+correct location for Gentoo init.d scripts (/etc/init.d/). Creates all
+necessary dirs.
+.TP
+.B doconfd\fR \fI<conf.d file> [list of more conf.d file]
+Install Gentoo \fIconf.d files\fR. They will be installed into the
+correct location for Gentoo conf.d files (/etc/conf.d/). Creates all
+necessary dirs.
+.TP
+.B doenvd\fR \fI<env.d entry> [list of more env.d entries]
+Install Gentoo \fIenv.d entries\fR. They will be installed into the
+correct location for Gentoo env.d entries (/etc/env.d/). Creates all
+necessary dirs.
+
+.PD 0
+.TP
+.B dolib\fR \fI<library>\fR \fI[list of more libraries]
+.TP
+.B dolib.a\fR \fI<library>\fR \fI[list of more libraries]
+.TP
+.B dolib.so\fR \fI<library>\fR \fI[list of more libraries]
+.PD 1
+Installs a library or a list of libraries into \fIDESTTREE\fR/lib.
+Creates all necessary dirs.
+.TP
+.B libopts\fR \fI[options for install(1)]
+Can be used to define options for the install function used in
+the \fBdolib\fR functions. The default is \fI\-m0644\fR.
+.TP
+.B doman\fR \fI[\-i18n=<locale>]\fR \fI<man\-page> [list of more man\-pages]
+Installs manual\-pages into /usr/share/man/man[0\-9n] depending on the
+manual file ending. The files are compressed if they are not already. You
+can specify locale\-specific manpages with the \fI\-i18n\fR option. Then the
+man\-page will be installed into /usr/share/man/\fI<locale>\fR/man[0\-9n].
+Beginning with \fBEAPI 2\fR, a locale\-specific manpage which contains a locale
+in the file name will be installed in /usr/share/man/\fI<locale>\fR/man[0\-9n],
+with the locale portion of the file name removed, and the \fI\-i18n\fR option
+has no effect. For example, with \fBEAPI 2\fR, a manpage named
+foo.\fI<locale>\fR.1 will be installed as
+/usr/share/man/\fI<locale>\fR/man1/foo.1. Beginning with \fBEAPI 4\fR,
+the \fI\-i18n\fR option takes precedence over the locale suffix of the
+file name.
+
+.PD 0
+.TP
+.B dohard\fR \fI<filename> <linkname>
+Beginning with \fBEAPI 4\fR, the \fBdohard\fR helper no longer exists. Ebuilds
+should call \fBln(1)\fR directly.
+.TP
+.B dosym\fR \fI<filename> <linkname>
+.PD 1
+Performs the ln command to create a symlink.
+.TP
+.B doheader\fR \fI[\-r] <file> [list of more files]
+Installs the given header files into /usr/include/, by default
+with file mode \fI0644\fR (this can be overridden with the
+\fBinsopts\fR function). Setting \-r sets recursive. The
+\fBdoheader\fR helper is available beginning with \fBEAPI 5\fR.
+.TP
+.B dohtml\fR \fI [\-a filetypes] [\-r] [\-x list\-of\-dirs\-to\-ignore] \
+[list\-of\-files\-and\-dirs]
+Installs the files in the list of files (space\-separated list) into
+/usr/share/doc/${PF}/html provided the file ends in .htm, .html, .css, .js, \
+.gif, .jpeg, .jpg, or .png.
+Setting \fI\-a\fR limits what types of files will be included,
+\fI\-A\fR appends to the default list, setting \fI\-x\fR sets which dirs to
+exclude (CVS excluded by default), \fI\-p\fR sets a document prefix,
+\fI\-r\fR sets recursive.
+.TP
+.B doinfo\fR \fI<info\-file> [list of more info\-files]
+Installs info\-pages into \fIDESTDIR\fR/info. Files are automatically
+gzipped. Creates all necessary dirs.
+.TP
+.B domo\fR \fI<locale\-file> [list of more locale\-files]
+Installs locale\-files into \fIDESTDIR\fR/usr/share/locale/[LANG]
+depending on local\-file's ending. Creates all necessary dirs.
+
+.PD 0
+.TP
+.B fowners\fR \fI<permissions> <file> [files]
+.TP
+.B fperms\fR \fI<permissions> <file> [files]
+.PD 1
+Performs chown (\fBfowners\fR) or chmod (\fBfperms\fR), applying
+\fIpermissions\fR to \fIfiles\fR.
+.TP
+.B insinto\fR \fI[path]
+Sets the destination path for the \fBdoins\fR function.
+.br
+The default path is /.
+.TP
+.B insopts\fR \fI[options for install(1)]
+Can be used to define options for the install function used in
+\fBdoins\fR. The default is \fI\-m0644\fR.
+.TP
+.B doins\fR \fI[\-r] <file> [list of more files]
+Installs files into the path controlled by \fBinsinto\fR. This function
+uses \fBinstall\fR(1). Creates all necessary dirs.
+Setting \-r sets recursive. Beginning with \fBEAPI 4\fR, both
+\fBdoins\fR and \fBnewins\fR preserve symlinks. In \fBEAPI 3\fR and
+earlier, symlinks are dereferenced rather than preserved.
+.TP
+.B exeinto\fR \fI[path]
+Sets the destination path for the \fBdoexe\fR function.
+.br
+The default path is /.
+.TP
+.B exeopts\fR \fI[options for install(1)]
+Can be used to define options for the install function used in \fBdoexe\fR.
+The default is \fI\-m0755\fR.
+.TP
+.B doexe\fR \fI<executable> [list of more executables]
+Installs executables into the path controlled by \fBexeinto\fR. This function
+uses \fBinstall\fR(1). Creates all necessary dirs.
+.TP
+.B docinto\fR \fI[path]
+Sets the subdir used by \fBdodoc\fR and \fBdohtml\fR
+when installing into the document tree
+(based in /usr/share/doc/${PF}/). Default is no subdir, or just "".
+.TP
+.B dodoc\fR \fI[-r] <document> [list of more documents]
+Installs a document or a list of documents into
+/usr/share/doc/${PF}/\fI<docinto path>\fR.
+Documents are marked for compression. Creates all necessary dirs.
+Beginning with \fBEAPI 4\fR, there is support for recursion, enabled by the
+new \fI\-r\fR option.
+
+.PD 0
+.TP
+.B newbin\fR \fI<old file> <new filename>
+.TP
+.B newsbin\fR \fI<old file> <new filename>
+.TP
+.B newinitd\fR \fI<old file> <new filename>
+.TP
+.B newconfd\fR \fI<old file> <new filename>
+.TP
+.B newenvd\fR \fI<old file> <new filename>
+.TP
+.B newlib.so\fR \fI<old file> <new filename>
+.TP
+.B newlib.a\fR \fI<old file> <new filename>
+.TP
+.B newman\fR \fI<old file> <new filename>
+.TP
+.B newins\fR \fI<old file> <new filename>
+.TP
+.B newexe\fR \fI<old file> <new filename>
+.TP
+.B newdoc\fR \fI<old file> <new filename>
+.PD 1
+All these functions act like the do* functions, but they only work with one
+file and the file is installed as \fI[new filename]\fR.
+Beginning with \fBEAPI 5\fR, standard input is read when the
+first parameter is \- (a hyphen).
+
+.SH "EXAMPLES"
+.DS
+.nf
+# Copyright 1999\-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: $
+
+EAPI="5"
+
+inherit some_eclass another_eclass
+
+DESCRIPTION="Super\-useful stream editor (sed)"
+HOMEPAGE="http://www.gnu.org/software/sed/sed.html"
+SRC_URI="ftp://alpha.gnu.org/pub/gnu/${PN}/${P}.tar.gz"
+
+LICENSE="GPL\-2"
+SLOT="0"
+KEYWORDS="~x86"
+IUSE=""
+
+RDEPEND=""
+DEPEND="nls? ( sys-devel/gettext )"
+
+src_configure() {
+ econf \\
+ \-\-bindir="${EPREFIX}"/bin
+}
+
+src_install() {
+ emake DESTDIR="${D}" install
+ dodoc NEWS README* THANKS AUTHORS BUGS ChangeLog
+}
+.fi
+.DE
+
+.SH "FILES"
+.TP
+The \fI/usr/lib/portage/bin/ebuild.sh\fR script.
+.TP
+The helper apps in \fI/usr/lib/portage/bin\fR.
+.TP
+.B /etc/portage/make.conf
+Contains variables for the build\-process and overwrites those in
+make.defaults.
+.TP
+.B /usr/share/portage/config/make.globals
+Contains the default variables for the build\-process, you should edit
+\fI/etc/portage/make.conf\fR instead.
+.TP
+.B /etc/portage/color.map
+Contains variables customizing colors.
+
+.SH "SEE ALSO"
+.BR ebuild (1),
+.BR make.conf (5),
+.BR color.map (5)
+
+.SH "REPORTING BUGS"
+Please report bugs via http://bugs.gentoo.org/
+
+.SH "AUTHORS"
+.nf
+Achim Gottinger <achim@gentoo.org>
+Mark Guertin <gerk@gentoo.org>
+Nicholas Jones <carpaski@gentoo.org>
+Mike Frysinger <vapier@gentoo.org>
+Arfrever Frehtes Taifersar Arahesis <arfrever@apache.org>
+Fabian Groffen <grobian@gentoo.org>
+.fi
diff --git a/usr/share/man/man5/make.conf.5 b/usr/share/man/man5/make.conf.5
new file mode 100644
index 0000000..e18113f
--- /dev/null
+++ b/usr/share/man/man5/make.conf.5
@@ -0,0 +1,1120 @@
+.TH "MAKE.CONF" "5" "Apr 2014" "Portage 2.2.14-prefix" "Portage"
+.SH "NAME"
+make.conf \- custom settings for Portage
+.SH "SYNOPSIS"
+\fB/etc/make.conf\fR and \fB/etc/portage/make.conf\fR
+.SH "DESCRIPTION"
+
+This file contains various variables that are used by Portage. The file has a
+newline\-delimited list of \fI<key>=<value>\fR pairs (see the default file for
+examples) which are accessible from the environment of ebuilds. It supports
+simple shell\-like expansion of the form \fIvar="${var}"\fR, the source
+keyword and variable substitution, but not some of the more advanced BASH
+features like arrays and special parameter expansions. For more details, see
+the Simple lexical analysis documentation:
+\fLhttp://docs.python.org/3/library/shlex.html\fR. Note that if you source
+files, they need to be in the same shlex syntax for portage to read them.
+.br
+Portage will check the currently\-defined environment variables
+first for any settings. If no environment settings are found,
+Portage then checks the make.conf files. Both /etc/make.conf and
+/etc/portage/make.conf are checked (if present), and settings from
+/etc/portage/make.conf will override settings from /etc/make.conf.
+If no setting is found in the make.conf files, Portage checks
+make.globals. If no
+setting is found there, the profile's default setting is grabbed
+from /etc/portage/make.profile/make.defaults. Please note that all user
+settings should be made in the environment or in the make.conf
+files, which are intended to be customized by the user.
+.br
+Exceptions are incremental variables such as USE, CONFIG_PROTECT*,
+and ACCEPT_KEYWORDS. Incremental variables are propagated down from
+make.defaults to make.globals to make.conf to the environment
+settings. Clearing these variables requires a clear\-all as in:
+export USE="\-*"
+.br
+In order to create per\-package environment settings, refer to
+\fBpackage.env\fR in \fBportage\fR(5).
+.SH "VARIABLES"
+.TP
+\fBACCEPT_CHOSTS\fR = \fI[space delimited list of CHOST values]\fR
+Specifies acceptable \fBCHOST\fR values. Regular
+expression syntax is supported, so it is necessary to escape
+\fBCHOST\fR characters if they have special meaning in regular expressions.
+.br
+Defaults to the value of $CHOST.
+.TP
+\fBACCEPT_KEYWORDS\fR = \fI[space delimited list of KEYWORDS]\fR
+Enable testing of ebuilds that have not yet been deemed 'stable'. Users
+of the 'x86' architecture would set this to '~x86' while ppc users would
+set this to '~ppc'. This is an incremental variable. Only define a
+~arch.
+.br
+Defaults to the value of $ARCH.
+.TP
+\fBACCEPT_LICENSE\fR = \fI[space delimited list of licenses or groups]\fR
+This variable is used to mask packages based on licensing restrictions. It
+may contain both license and group names, where group names are prefixed
+with the '@' symbol. License groups are defined in the \fIlicense_groups\fR
+file (see \fBportage\fR(5)). In addition to license and group names, the
+\fI*\fR and \fI-*\fR wildcard tokens are also supported. Refer to GLEP 23
+for further information:
+\fIhttp://www.gentoo.org/proj/en/glep/glep-0023.html\fR.
+.br
+Defaults to the value of * -@EULA.
+.br
+.I Examples:
+.nf
+# Only accept licenses in the FREE license group (i.e. Free Software)
+ACCEPT_LICENSE="-* @FREE"
+# As before, but exclude the "Artistic" license
+ACCEPT_LICENSE="-* @FREE -Artistic"
+# Accept any license except those in the EULA license group (default)
+ACCEPT_LICENSE="* -@EULA"
+.fi
+.TP
+\fBACCEPT_PROPERTIES\fR = \fI[space delimited list of properties]\fR
+This variable is used to mask packages based on PROPERTIES restrictions.
+In addition to property names, the \fI*\fR and \fI-*\fR wildcard tokens are
+also supported. This variable can be temporarily overridden using the
+\fB\-\-accept\-properties\fR option of \fBemerge\fR(1).
+See \fBebuild\fR(5) for more information about PROPERTIES.
+.br
+Defaults to the value of *.
+.br
+.I Examples:
+.nf
+# Accept any properties
+ACCEPT_PROPERTIES="*"
+# Accept any properties except the "interactive" property
+ACCEPT_PROPERTIES="* -interactive"
+.fi
+.TP
+\fBACCEPT_RESTRICT\fR = \fI[space delimited list of RESTRICT tokens]\fR
+This variable is used to mask packages based on RESTRICT tokens.
+In addition to RESTRICT tokens, the \fI*\fR and \fI-*\fR wildcard tokens are
+also supported. This variable can be temporarily overridden using the
+\fB\-\-accept\-restrict\fR option of \fBemerge\fR(1).
+See \fBebuild\fR(5) for more information about RESTRICT.
+.br
+Defaults to the value of *.
+.br
+.I Examples:
+.nf
+# Accept any restrict tokens
+ACCEPT_RESTRICT="*"
+# Accept any tokens except "bindist"
+ACCEPT_RESTRICT="* -bindist"
+.fi
+.TP
+.B CBUILD
+This variable is passed by the \fIebuild scripts\fR to the \fIconfigure\fR
+as \fI\-\-build=${CBUILD}\fR only if it is defined. Do not set this yourself
+unless you know what you are doing.
+.TP
+\fBCCACHE_DIR\fR = \fI[path]\fR
+Defines the location of the ccache working directory. See the \fBccache\fR(1)
+man page for more information.
+.br
+Defaults to /var/tmp/ccache
+.TP
+\fBCCACHE_SIZE\fR = \fI"size"\fR
+This controls the space use limitations for ccache. The default is 2 gigabytes
+('2G'). Sizes are specified with 'G', 'M', or 'K'.
+.TP
+.B CFLAGS CXXFLAGS
+Use these variables to set the desired optimization/CPU instruction settings
+for applications that you compile. These two variables are passed to the C
+and C++ compilers, respectively. (CXX is used to refer to the C++ compiler
+within many buildsystems.) Nearly all ebuild files will take advantage
+of your custom settings, resulting in a Gentoo Linux that is fully customized
+to your specifications. Please use sane settings as some packages will fail to
+compile/run if the optimizations are too extreme.
+
+For more information, see the \fIInvoking GCC\fR section of the gcc manual:
+.br
+http://gcc.gnu.org/onlinedocs/
+.TP
+.B CHOST
+This variable is passed by the \fIebuild scripts\fR to the \fIconfigure\fR
+step as \fI\-\-host=${CHOST}\fR. This way you can force the build\-host.
+
+For more information:
+.br
+http://gcc.gnu.org/onlinedocs/gcc\-4.1.1/gcc/Submodel\-Options.html
+.br
+http://gcc.gnu.org/onlinedocs/gcc\-3.3/gcc/Submodel\-Options.html
+.br
+http://gcc.gnu.org/onlinedocs/gcc\-3.2/gcc/Submodel\-Options.html
+.br
+http://gcc.gnu.org/onlinedocs/gcc\-2.95.3/gcc_2.html
+.TP
+\fBCLEAN_DELAY\fR = \fIinteger\fR
+Determines how long the countdown delay will be after running
+`emerge --unmerge`.
+.br
+Defaults to 5 seconds.
+.TP
+\fBCOLLISION_IGNORE\fR = \fI[space delimited list of fnmatch patterns]\fR
+This variable allows the user to disable \fIcollision\-protect\fR and
+\fIprotect\-owned\fR for specific \fBfnmatch\fR(3) patterns. For backward
+compatibility, directories that are listed without a fnmatch pattern will
+automatically have /* appended to them.
+.br
+Defaults to "/lib/modules/* *.py[co]".
+.TP
+\fBCONFIG_PROTECT\fR = \fI[space delimited list of files and/or directories]\fR
+All files and/or directories that are defined here will have "config file
+protection" enabled for them. See the \fBCONFIGURATION FILES\fR section
+of \fBemerge\fR(1) for more information.
+Note that if an offset prefix (\fBEPREFIX\fR) is activated, all paths defined
+in \fBCONFIG_PROTECT\fR are prefixed by Portage with the offset before
+they are used.
+.TP
+\fBCONFIG_PROTECT_MASK\fR = \fI[space delimited list of files and/or \
+directories]\fR
+All files and/or directories that are defined here will have "config file
+protection" disabled for them. See the \fBCONFIGURATION FILES\fR section
+of \fBemerge\fR(1) for more information.
+.TP
+.B CTARGET
+This variable is passed by the \fIebuild scripts\fR to the \fIconfigure\fR
+as \fI\-\-target=${CTARGET}\fR only if it is defined.
+.TP
+.B DCO_SIGNED_OFF_BY
+This variable may contain a name and email address which will be used by
+\fBrepoman\fR(1) to add a Signed\-off\-by line to each commit message.
+.TP
+\fBDISTDIR\fR = \fI[path]\fR
+Defines the location of your local source file repository. After packages
+are built, it is safe to remove any and all files from this directory since
+they will be automatically fetched on demand for a given build. If you would
+like to selectively prune obsolete files from this directory, see
+\fBeclean\fR(1) from the gentoolkit package.
+
+Use the \fBPORTAGE_RO_DISTDIRS\fR variable to specify one or
+more read-only directories containing distfiles.
+
+Note
+that locations under /usr/portage are not necessarily safe for data storage.
+See the \fBPORTDIR\fR documentation for more information.
+.br
+Defaults to /usr/portage/distfiles.
+.TP
+.B DOC_SYMLINKS_DIR
+If this variable contains a directory then symlinks to html documentation will
+be installed into it.
+.TP
+.B EBEEP_IGNORE
+Defines whether or not to ignore audible beeps when displaying important
+informational messages. This variable is unset by default.
+.TP
+.B EMERGE_DEFAULT_OPTS
+Options to append to the end of the \fBemerge\fR(1) command line on every
+invocation. These options will not be appended to the command line if
+\-\-ignore\-default\-opts is specified.
+.TP
+.B EMERGE_LOG_DIR
+Controls the location of emerge.log and emerge-fetch.log.
+.br
+Defaults to /var/log.
+.TP
+\fBEMERGE_WARNING_DELAY\fR = \fIinteger\fR
+Determines how long the countdown delay will be after running
+`emerge --unmerge` for a system package.
+.br
+Defaults to 10 seconds.
+.TP
+.B EPAUSE_IGNORE
+Defines whether or not to ignore short pauses that occur when displaying
+important informational messages. This variable is unset by default.
+If it is set to any value pauses are ignored.
+.TP
+\fBEXTRA_ECONF\fR = \fI[configure options string]\fR
+Contains additional options that \fBeconf\fR will append to configure
+script arguments (see \fBebuild\fR(5)).
+.TP
+\fBFEATURES\fR = \fI"sandbox"\fR
+Defines actions portage takes by default. This is an incremental variable.
+Most of these settings are for developer use, but some are available to
+non-developers as well. The \fBsandbox\fR feature is very important and
+should not be disabled by default.
+.RS
+.TP
+.B assume\-digests
+When commiting work to cvs with \fBrepoman\fR(1), assume that all existing
+SRC_URI digests are correct. This feature also affects digest generation via
+\fBebuild\fR(1) and \fBemerge\fR(1) (emerge generates digests only when the
+\fIdigest\fR feature is enabled). Existing digests for files that do not exist
+in ${DISTDIR} will be automatically assumed even when \fIassume\-digests\fR is
+not enabled. If a file exists in ${DISTDIR} but its size does not match the
+existing digest, the digest will be regenerated regardless of whether or
+not \fIassume\-digests\fR is enabled. The \fBebuild\fR(1) \fBdigest\fR command
+has a \fB\-\-force\fR option that can be used to force regeneration of digests.
+.TP
+.B binpkg\-logs
+Keep logs from successful binary package merges. This is relevant only when
+\fBPORT_LOGDIR\fR is set.
+.TP
+.B buildpkg
+Binary packages will be created for all packages that are merged. Also see
+\fBquickpkg\fR(1) and \fBemerge\fR(1) \fB\-\-buildpkg\fR and
+\fB\-\-buildpkgonly\fR options.
+.TP
+.B buildsyspkg
+Build binary packages for just packages in the system set.
+.TP
+.B candy
+Enable a special progress indicator when \fBemerge\fR(1) is calculating
+dependencies.
+.TP
+.B case\-insensitive\-fs
+Use case\-insensitive file name comparisions when merging and unmerging
+files.
+.TP
+.B ccache
+Enable portage support for the ccache package. If the ccache dir is not
+present in the user's environment, then portage will default to
+${PORTAGE_TMPDIR}/ccache.
+
+\fBWarning\fR: This feature is known to cause numerous compilation failures.
+Sometimes ccache will retain stale code objects or corrupted files, which can
+lead to packages that cannot be emerged. If this happens (if you receive errors
+like "File not recognized: File truncated"), try recompiling the application
+with ccache disabled before reporting a bug. Unless you are doing development
+work, do not enable ccache.
+.TP
+.B cgroup
+Use Linux control group to control processes spawned by ebuilds. This allows
+emerge to safely kill all subprocesses when ebuild phase exits.
+.TP
+.B clean\-logs
+Enable automatic execution of the command specified by the
+PORT_LOGDIR_CLEAN variable. The default PORT_LOGDIR_CLEAN setting will
+remove all files from PORT_LOGDIR that were last modified at least 7
+days ago.
+.TP
+.B collision\-protect
+A QA\-feature to ensure that a package doesn't overwrite files it doesn't own.
+The \fICOLLISION_IGNORE\fR variable can be used to selectively disable this
+feature. Also see the related \fIprotect\-owned\fR feature.
+.TP
+.B compress\-build\-logs
+The causes all build logs to be compressed while they are being written.
+Log file names have an extension that is appropriate for the compression
+type. Currently, only \fBgzip\fR(1) compression is supported, so build
+logs will have a '.gz' extension when this feature is enabled.
+.TP
+.B compress\-index
+If set then a compressed copy of 'Packages' index file will be written.
+This feature is intended for Gentoo binhosts using certain webservers
+(such as, but not limited to, Nginx with gzip_static module) to avoid
+redundant on\-the\-fly compression. The resulting file will be called
+\[aq]Packages.gz' and its modification time will match that of 'Packages'.
+.TP
+.B compressdebug
+Compress the debug sections in the split debug files with zlib to save
+space. Make sure you have built both binutils and gdb with USE=zlib
+support for this to work. See \fBsplitdebug\fR for general split debug
+information (upon which this feature depends).
+.TP
+.B config\-protect\-if\-modified
+This causes the \fBCONFIG_PROTECT\fR behavior to be skipped for files
+that have not been modified since they were installed. This feature is
+enabled by default.
+.TP
+.B digest
+Autogenerate digests for packages when running the
+\fBemerge\fR(1), \fBebuild\fR(1), or \fBrepoman\fR(1) commands. If
+the \fIassume\-digests\fR feature is also enabled then existing SRC_URI digests
+will be reused whenever they are available.
+.TP
+.B distcc
+Enable portage support for the distcc package.
+.TP
+.B distcc\-pump
+Enable portage support for the distcc package with pump mode.
+.TP
+.B distlocks
+Portage uses lockfiles to ensure competing instances don't clobber
+each other's files. This feature is enabled by default but may cause
+heartache on less intelligent remote filesystems like NFSv2 and some
+strangely configured Samba server (oplocks off, NFS re\-export). A tool
+/usr/lib/portage/bin/clean_locks exists to help handle lock issues
+when a problem arises (normally due to a crash or disconnect).
+.TP
+.B downgrade\-backup
+When a package is downgraded to a lower version, call \fBquickpkg\fR(1)
+in order to create a backup of the installed version before it is
+unmerged (if a binary package of the same version does not already
+exist). Also see the related \fIunmerge\-backup\fR feature.
+.TP
+.B ebuild\-locks
+Use locks to ensure that unsandboxed ebuild phases never execute
+concurrently. Also see \fIparallel\-install\fR.
+.TP
+.B fail\-clean
+Clean up temporary files after a build failure. This is particularly useful
+if you have \fBPORTAGE_TMPDIR\fR on tmpfs. If this feature is enabled, you
+probably also want to enable \fBPORT_LOGDIR\fR in order to save the build log.
+Both the \fBebuild\fR(1) command and the \fInoclean\fR feature cause the
+\fIfail\-clean\fR feature to be automatically disabled.
+.TP
+.B fakeroot
+Enable fakeroot for the install and package phases when a non-root user runs
+the \fBebuild\fR(1) command.
+.TP
+.B fixlafiles
+Modifies .la files to not include other .la files and some other
+fixes (order of flags, duplicated entries, ...)
+.TP
+.B force\-mirror
+Only fetch files from configured mirrors, ignoring \fBSRC_URI\fR,
+except when \fImirror\fR is in the \fBebuild\fR(5) \fBRESTRICT\fR variable.
+.TP
+.B force\-prefix
+Enable prefix support for all ebuilds, regardless of EAPI, since older EAPIs
+would otherwise be useless with prefix configurations. This brings
+compatibility with the prefix branch of portage, which also supports EPREFIX
+for all EAPIs (for obvious reasons).
+.TP
+.B getbinpkg
+Force emerges to always try to fetch files from the \fIPORTAGE_BINHOST\fR. See
+\fBmake.conf\fR(5) for more information.
+.TP
+.B installsources
+Install source code into /usr/src/debug/${CATEGORY}/${PF} (also see
+\fBsplitdebug\fR). This feature works only if debugedit is installed and CFLAGS
+is set to include debug information (such as with the \-ggdb flag).
+.TP
+.B ipc\-sandbox
+Isolate the ebuild phase functions from host IPC namespace. Supported
+only on Linux. Requires IPC namespace support in kernel.
+.TP
+.B keeptemp
+Do not delete the ${T} directory after the merge process.
+.TP
+.B keepwork
+Do not delete the ${WORKDIR} directory after the merge process. ${WORKDIR} can
+then be reused since this feature disables most of the clean phase that runs
+prior to each build. Due to lack of proper cleanup, this feature can
+interfere with normal emerge operation and therefore it should not be left
+enabled for more than a short period of time.
+.TP
+.B lmirror
+When \fImirror\fR is enabled in \fBFEATURES\fR, fetch files even
+when \fImirror\fR is also in the \fBebuild\fR(5) \fBRESTRICT\fR variable.
+Do \fBNOT\fR use \fIlmirror\fR for clients that need to override \fBRESTRICT\fR
+when fetching from a local mirror, but instead use a "local" mirror setting
+in \fI/etc/portage/mirrors\fR, as described in \fBportage\fR(5).
+.TP
+.B merge\-sync
+After a package is merged or unmerged, sync relevant files to
+disk in order to avoid data\-loss in the event of a power failure.
+This feature is enabled by default.
+.TP
+.B metadata\-transfer
+Automatically perform a metadata transfer when `emerge \-\-sync` is run.
+In versions of portage >=2.1.5, this feature is disabled by
+default. When metadata\-transfer is disabled, metadata cache from the
+${repository_location}/metadata/md5\-cache/ directory will be used directly
+(if available).
+.TP
+.B mirror
+Fetch everything in \fBSRC_URI\fR regardless of \fBUSE\fR settings,
+except do not fetch anything when \fImirror\fR is in \fBRESTRICT\fR.
+.TP
+.B multilib\-strict
+Many Makefiles assume that their libraries should go to /usr/lib, or
+$(prefix)/lib. This assumption can cause a serious mess if /usr/lib
+isn't a symlink to /usr/lib64. To find the bad packages, we have a
+portage feature called \fImultilib\-strict\fR. It will prevent emerge
+from putting 64bit libraries into anything other than (/usr)/lib64.
+.TP
+.B network\-sandbox
+Isolate the ebuild phase functions from host network interfaces.
+Supported only on Linux. Requires network namespace support in kernel.
+.TP
+.B news
+Enable GLEP 42 news support. See
+\fIhttp://www.gentoo.org/proj/en/glep/glep-0042.html\fR.
+.TP
+.B noauto
+When utilizing \fBebuild\fR(1), only run the function requested. Also, forces
+the corresponding ebuild and eclasses to be sourced again for each phase, in
+order to account for modifications.
+.TP
+.B noclean
+Do not delete the the source and temporary files after the merge process.
+.TP
+.B nodoc
+Do not install doc files (/usr/share/doc).
+.TP
+.B noinfo
+Do not install info pages.
+.TP
+.B noman
+Do not install manpages.
+.TP
+.B nostrip
+Prevents the stripping of binaries that are merged to the live filesystem.
+.TP
+.B notitles
+Disables xterm titlebar updates (which contains status info).
+.TP
+.B parallel\-fetch
+Fetch in the background while compiling. Run
+`tail \-f /var/log/emerge\-fetch.log` in a
+terminal to view parallel-fetch progress.
+.TP
+.B parallel\-install
+Use finer\-grained locks when installing packages, allowing for greater
+parallelization. For additional parallelization, disable
+\fIebuild\-locks\fR.
+.TP
+.B prelink\-checksums
+If \fBprelink\fR(8) is installed then use it to undo any prelinks on files
+before computing checksums for merge and unmerge. This feature is
+useful only if \fBprelink\fR(8) is installed and accurate checksums (despite
+prelinking) are needed for some reason such as for checking the integrity of
+installed files or because the \fIunmerge\-orphans\fR feature is disabled.
+
+Note that normal emerging of packages from source computes the
+checksums before things will be prelinked, so in such cases, this
+feature isn't required either. Undoing prelinking while merging is
+only required when using tools like \fBquickpkg\fR(1) which can cause
+already prelinked files to be merged.
+.TP
+.B preserve\-libs
+Preserve libraries when the sonames change during upgrade or downgrade.
+Libraries are preserved only if consumers of those libraries are detected.
+Preserved libraries are automatically removed when there are no remaining
+consumers. Run `emerge @preserved\-rebuild` in order to rebuild all
+consumers of preserved libraries.
+.TP
+.B protect\-owned
+This is identical to the \fIcollision\-protect\fR feature except that files
+may be overwritten if they are not explicitly listed in the contents of a
+currently installed package. This is particularly useful on systems that
+have lots of orphan files that have been left behind by older versions
+of portage that did not support the \fIunmerge\-orphans\fR feature. Like
+\fIcollision\-protect\fR, the \fICOLLISION_IGNORE\fR variable can be used to
+selectively disable this feature. It is recommended to leave either
+\fIprotect\-owned\fR or \fIcollision\-protect\fR enabled at all times,
+since otherwise file collisions between packages may result in files being
+overwritten or uninstalled at inappropriate times.
+If \fIcollision\-protect\fR is enabled then it takes precedence over
+\fIprotect\-owned\fR.
+.TP
+.B python\-trace
+Output a verbose trace of python execution to stderr when a command's
+\-\-debug option is enabled.
+.TP
+.B sandbox
+Enable sandbox\-ing when running \fBemerge\fR(1) and \fBebuild\fR(1).
+On Mac OS X platforms that have /usr/bin/sandbox-exec available (10.5
+and later), this particular sandbox implementation is used instead of
+sys-apps/sandbox.
+.TP
+.B sesandbox
+Enable SELinux sandbox\-ing. Do not toggle this \fBFEATURE\fR yourself.
+.TP
+.B sfperms
+Stands for Smart Filesystem Permissions. Before merging packages to the
+live filesystem, automatically search for and set permissions on setuid
+and setgid files. Files that are setuid have the group and other read
+bits removed while files that are setgid have the other read bit removed.
+See also \fIsuidctl\fR below.
+.TP
+.B sign
+When commiting work to cvs with \fBrepoman\fR(1), sign the Manifest with
+a GPG key. Read about the \fIPORTAGE_GPG_KEY\fR variable in
+\fBmake.conf\fR(5).
+.TP
+.B skiprocheck
+Skip write access checks on \fBDISTDIR\fR when fetching files. This is
+useful when \fBFETCHCOMMAND\fR and \fBRESUMECOMMAND\fR are used to
+forward fetch requests to a server that exposes \fBDISTDIR\fR as
+a read-only NFS share. A read-only \fBDISTDIR\fR is not compatible with the
+\fIdistlocks\fR, so it is recommended to also add "-distlocks" to
+\fBFEATURES\fR in order to avoid warning messages that are triggered by this
+incompatibility.
+.TP
+.B split\-elog
+Store logs created by \fBPORTAGE_ELOG_SYSTEM="save"\fR in category
+subdirectories of \fBPORT_LOGDIR/elog\fR, instead of using
+\fBPORT_LOGDIR/elog\fR directly.
+.TP
+.B split\-log
+Store build logs in category subdirectories of \fBPORT_LOGDIR/build\fR,
+instead of using \fBPORT_LOGDIR\fR directly.
+.TP
+.B splitdebug
+Prior to stripping ELF etdyn and etexec files, the debugging info is
+stored for later use by various debuggers. This feature is disabled by
+\fBnostrip\fR. You should also consider setting \fBcompressdebug\fR so
+the files don't suck up a lot of space. For installation of source code,
+see \fBinstallsources\fR.
+.TP
+.B strict
+Have portage react strongly to conditions that have the potential to be
+dangerous (like missing or incorrect digests for ebuilds).
+.TP
+.B stricter
+Have portage react strongly to conditions that may conflict with system
+security provisions (for example textrels, executable stack). Read about
+the \fIQA_STRICT_*\fR variables in \fBmake.conf\fR(5).
+.TP
+.B suidctl
+Before merging packages to the live filesystem, automatically strip setuid
+bits from any file that is not listed in \fI/etc/portage/suidctl.conf\fR.
+.TP
+.B test
+Run package\-specific tests during each merge to help make sure
+the package compiled properly. See \fItest\fR in \fBebuild\fR(1)
+and \fIsrc_test()\fR in \fBebuild\fR(5). This feature implies the "test"
+\fBUSE\fR flag if it is a member of \fBIUSE\fR, either explicitly or
+implicitly (see \fBebuild\fR(5) for more information about \fBIUSE\fR).
+The "test" \fBUSE\fR flag is also automatically disabled when the
+"test" feature is disabled.
+.TP
+.B test\-fail\-continue
+If "test" is enabled \fBFEATURES\fR and the test phase of an ebuild fails,
+continue to execute the remaining phases as if the failure had not occurred.
+Note that the test phase for a specific package may be disabled by masking
+the "test" \fBUSE\fR flag in \fBpackage.use.mask\fR (see \fBportage\fR(5)).
+.TP
+.B unknown\-features\-filter
+Filter out any unknown values that the FEATURES variable contains.
+.TP
+.B unknown\-features\-warn
+Warn if FEATURES contains one or more unknown values.
+.TP
+.B unmerge\-backup
+Call \fBquickpkg\fR(1) to create a backup of each package before it is
+unmerged (if a binary package of the same version does not already exist).
+Also see the related \fIdowngrade\-backup\fR feature.
+.TP
+.B unmerge\-logs
+Keep logs from successful unmerge phases. This is relevant only when
+\fBPORT_LOGDIR\fR is set.
+.TP
+.B unmerge\-orphans
+If a file is not claimed by another package in the same slot and it is not
+protected by \fICONFIG_PROTECT\fR, unmerge it even if the modification time or
+checksum differs from the file that was originally installed.
+.TP
+.B userfetch
+When portage is run as root, drop privileges to portage:portage during the
+fetching of package sources.
+.TP
+.B userpriv
+Allow portage to drop root privileges and compile packages as
+portage:portage without a sandbox (unless \fIusersandbox\fR is also used).
+.TP
+.B usersandbox
+Enable the sandbox in the compile phase, when running without root privs
+(\fIuserpriv\fR).
+.TP
+.B usersync
+Drop privileges to the owner of ${repository_location} for \fBemerge(1) --sync\fR
+operations. Note that this feature assumes that all subdirectories of
+${repository_location} have the same ownership as ${repository_location} itself.
+It is the user's responsibility to ensure correct ownership, since otherwise
+Portage would have to waste time validating ownership for each and every sync
+operation.
+.TP
+.B webrsync-gpg
+Enable GPG verification when using \fIemerge\-webrsync\fR.
+.TP
+.B xattr
+Preserve extended attributes (filesystem-stored metadata) when installing
+files (see \fBattr\fR(1)). The \fBPORTAGE_XATTR_EXCLUDE\fR variable may be
+used to exclude specific attributes from being preserved.
+.RE
+.TP
+.B FETCHCOMMAND
+This variable contains the command used for fetching package sources from
+the internet. It must contain the full path to the executable as well as the
+place\-holders \\${DISTDIR}, \\${FILE} and \\${URI}. The command should be
+written to place the fetched file at \\${DISTDIR}/\\${FILE}.
+Also see \fBRESUMECOMMAND\fR.
+.TP
+.B FFLAGS FCFLAGS
+Use these variables to set the desired optimization/CPU instruction settings
+for applications that you compile with a FORTRAN compiler. FFLAGS is usually
+passed to the FORTRAN 77 compiler, and FCFLAGS to any FORTRAN compiler in
+more modern build systems.
+
+For more information, see the \fIInvoking GCC\fR section of the gcc manual:
+.br
+http://gcc.gnu.org/onlinedocs/
+.TP
+\fBGENTOO_MIRRORS\fR = \fI[URIs]\fR
+Insert your space\-separated list of local mirrors here. These
+locations are used to download files before the ones listed in
+the \fIebuild scripts\fR. Merging 'mirrorselect' can help. Entries in this
+variable that have no protocol and simply start with a '/' path separator may
+be used to specify mounted filesystem mirrors.
+.TP
+\fBhttp_proxy ftp_proxy RSYNC_PROXY\fR = \fI[protocol://host:port]\fR
+These variables are used by network clients such as \fBwget\fR(1) and
+\fBrsync\fR(1). They are only required if you use a
+proxy server for internet access.
+.TP
+\fBINSTALL_MASK\fR = \fI[space delimited list of file names]\fR
+Use this variable if you want to selectively prevent certain files from being
+copied into your file system tree. This does not work on symlinks, but only on
+actual files. Useful if you wish to filter out files like HACKING.gz and
+TODO.gz. The \fBINSTALL_MASK\fR is processed just before a package is merged.
+Also supported is a \fBPKG_INSTALL_MASK\fR variable that behaves exactly like
+\fBINSTALL_MASK\fR except that it is processed just before creation of a binary
+package.
+.TP
+.B LDFLAGS
+A list of flags to pass to the compiler when the linker will be called. See
+\fBld\fR(1) for linker flags, but don't forget that these flags will be passed
+directly to the compiler. Thus, you must use '-Wl' to escape the flags
+which only the linker understands (see \fBgcc\fR(1)).
+
+\fB***warning***\fR
+.br
+Setting this and other *FLAGS variables arbitrarily may cause compile or
+runtime failures. Bug reports submitted when nonstandard values are
+enabled for these flags may be closed as INVALID.
+.TP
+.B MAKEOPTS
+Use this variable if you want to use parallel make. For example, if you
+have a dual\-processor system, set this variable to "\-j2" or "\-j3" for
+enhanced build performance with many packages. Suggested settings are
+between \fICPUs+1\fR and \fI2*CPUs+1\fR. In order to avoid
+excess load, the \fB\-\-load\-average\fR option is recommended.
+For more information, see \fBmake\fR(1). Also see \fBemerge\fR(1) for
+information about analogous \fB\-\-jobs\fR and \fB\-\-load\-average\fR options.
+.TP
+\fBNOCOLOR\fR = \fI["true" | "false"]\fR
+Defines if color should be disabled by default.
+.br
+Defaults to false.
+.TP
+\fBPKGDIR\fR = \fI[path]\fR
+Defines the location where created .tbz2 binary packages will be
+stored when the \fBemerge\fR(1) \fB\-\-buildpkg\fR option is enabled.
+By default, a given package is stored in a subdirectory corresponding
+to it's category. However, for backward compatibility with the layout
+used by older versions of portage, if the \fI${PKGDIR}/All\fR directory
+exists then all packages will be stored inside of it and symlinks to
+the packages will be created in the category subdirectories. Note
+that locations under /usr/portage are not necessarily safe for data storage.
+See the \fBPORTDIR\fR documentation for more information.
+.br
+Defaults to /usr/portage/packages.
+.TP
+.B PORT_LOGDIR
+This variable defines the directory in which per\-ebuild logs are kept.
+Logs are created only when this is set. They are stored as
+${CATEGORY}:${PF}:YYYYMMDD\-HHMMSS.log in the directory specified. If the
+directory does not exist, it will be created automatically and group
+permissions will be applied to it. If the directory already exists, portage
+will not modify it's permissions.
+.TP
+.B PORT_LOGDIR_CLEAN
+This variable should contain a command for portage to call in order
+to clean PORT_LOGDIR. The command string should contain a
+\\${PORT_LOGDIR} place\-holder that will be substituted
+with the value of that variable. This variable will have no effect
+unless \fBclean\-logs\fR is enabled in \fBFEATURES\fR.
+.TP
+\fBPORTAGE_BINHOST\fR = \fI[space delimited URI list]\fR
+This is a list of hosts from which portage will grab prebuilt\-binary packages.
+Each entry in the list must specify the full address of a directory
+serving tbz2's for your system (this directory must contain a 'Packages' index
+file). This is only used when running with
+the get binary pkg options are given to \fBemerge\fR. Review \fBemerge\fR(1)
+for more information.
+.TP
+\fBPORTAGE_BINHOST_HEADER_URI\fR = \
+\fI"ftp://login:pass@grp.mirror.site/pub/grp/i686/athlon\-xp/"\fR
+This variable only makes sense on a system that will serve as a binhost and
+build packages for clients. It defines the URI header field for the package
+index file which is located at ${PKGDIR}/Packages. Clients that have
+\fBPORTAGE_BINHOST\fR properly configured will be able to fetch the index and
+use the URI header field as a base URI for fetching binary packages. If the URI
+header field is not defined then the client will use it's ${PORTAGE_BINHOST}
+setting as the base URI.
+.TP
+.B PORTAGE_BINPKG_FORMAT
+This variable sets default format used for binary packages. Possible values
+are tar and rpm or both.
+.TP
+.B PORTAGE_BINPKG_TAR_OPTS
+This variable contains options to be passed to the tar command for creation
+of binary packages.
+.TP
+\fBPORTAGE_BUNZIP2_COMMAND\fR = \fI[bunzip2 command string]\fR
+This variable should contain a command that is suitable for portage to call
+for bunzip2 extraction operations.
+.TP
+\fBPORTAGE_BZIP2_COMMAND\fR = \fI[bzip2 command string]\fR
+This variable should contain a command that is suitable for portage to call
+for bzip2 compression operations. \fBPORTAGE_BZIP2_COMMAND\fR will also be
+called for extraction operation, with -d appended, unless the
+\fBPORTAGE_BUNZIP2_COMMAND\fR variable is set.
+.TP
+\fBPORTAGE_CHECKSUM_FILTER\fR = \fI[space delimited list of hash names]\fR
+This variable may be used to filter the hash functions that are used to
+verify integrity of files. Hash function names are case\-insensitive, and
+the \fI*\fR and \fI\-*\fR wildcard tokens are supported.
+.br
+Defaults to the value of *.
+.br
+.I Examples:
+.nf
+# Use all available hash functions
+PORTAGE_CHECKSUM_FILTER="*"
+# Use any function except whirlpool
+PORTAGE_CHECKSUM_FILTER="* \-whirlpool"
+# Only use sha256
+PORTAGE_CHECKSUM_FILTER="\-* sha256"
+.fi
+.TP
+\fBPORTAGE_COMPRESS\fR = \fI"bzip2"\fR
+This variable contains the command used to compress documentation during the
+install phase.
+.TP
+\fBPORTAGE_COMPRESS_EXCLUDE_SUFFIXES\fR = \fI"gif htm[l]? jp[e]?g pdf png"\fR
+This variable contains a space delimited list of file suffixes for which
+matching files are excluded when the \fBPORTAGE_COMPRESS\fR command is
+called. Regular expressions are supported and the match is performed only
+against the portion of the file name which follows the last period character.
+.TP
+\fBPORTAGE_COMPRESS_FLAGS\fR = \fI"\-9"\fR
+This variable contains flags for the \fBPORTAGE_COMPRESS\fR command.
+.TP
+.B PORTAGE_ELOG_CLASSES
+.TP
+.B PORTAGE_ELOG_COMMAND
+.TP
+.B PORTAGE_ELOG_MAILFROM
+.TP
+.B PORTAGE_ELOG_MAILSUBJECT
+.TP
+.B PORTAGE_ELOG_MAILURI
+.TP
+.B PORTAGE_ELOG_SYSTEM
+Please see /usr/share/portage/config/make.conf.example for elog documentation.
+.TP
+\fBPORTAGE_FETCH_CHECKSUM_TRY_MIRRORS\fR = \fI5\fR
+Number of mirrors to try when a downloaded file has an incorrect checksum.
+.TP
+\fBPORTAGE_FETCH_RESUME_MIN_SIZE\fR = \fI350K\fR
+Minimum size of existing file for \fBRESUMECOMMAND\fR to be called. Files
+smaller than this size will be removed and \fBFETCHCOMMAND\fR will be called
+to download the file from the beginning. This is useful for helping to ensure
+that small garbage files such as html 404 pages are properly discarded. The
+variable should contain an integer number of bytes and may have a suffix such
+as K, M, or G.
+.TP
+.B PORTAGE_GPG_DIR
+The \fBgpg\fR(1) home directory that is used by \fBrepoman\fR(1)
+when \fBsign\fR is in \fBFEATURES\fR.
+.br
+Defaults to $HOME/.gnupg.
+.TP
+.B PORTAGE_GPG_KEY
+The \fBgpg\fR(1) key used by \fBrepoman\fR(1) to sign manifests
+when \fBsign\fR is in \fBFEATURES\fR. In order to sign commits with
+\fBgit\fR(1), you will need Git >=1.7.9 and your commit key will have
+to be configured by \fI`git config user.signingkey key_id`\fR.
+.TP
+.B PORTAGE_GPG_SIGNING_COMMAND
+The command used by \fBrepoman\fR(1) to sign manifests when \fBsign\fR is
+in \fBFEATURES\fR.
+.TP
+\fBPORTAGE_GRPNAME\fR = \fI[group]\fR
+Defines the groupname to use when executing in userpriv/etc... modes (i.e.
+non-root).
+.br
+Defaults to portage.
+.TP
+\fBPORTAGE_INST_GID\fR = \fI[gid]\fR
+Defines the group id when installing files via dobin/dosbin. Useful when
+running ebuild as yourself.
+.br
+Defaults to 0.
+.TP
+\fBPORTAGE_INST_UID\fR = \fI[uid]\fR
+Defines the user id when installing files via dobin/dosbin. Useful when
+running ebuild as yourself.
+.br
+Defaults to 0.
+.TP
+\fBPORTAGE_IONICE_COMMAND\fR = \fI[ionice command string]\fR
+This variable should contain a command for portage to call in order
+to adjust the io priority of portage and it's subprocesses. The command
+string should contain a \\${PID} place-holder that will be substituted
+with an integer pid. For example, a value of "ionice \-c 3 \-p \\${PID}"
+will set idle io priority. For more information about ionice, see
+\fBionice\fR(1). This variable is unset by default.
+.TP
+\fBPORTAGE_NICENESS\fR = \fI[number]\fR
+The value of this variable will be added to the current nice level that
+emerge is running at. In other words, this will not set the nice level,
+it will increment it. For more information about nice levels and what
+are acceptable ranges, see \fBnice\fR(1).
+.TP
+\fBPORTAGE_RO_DISTDIRS\fR = \fI[space delimited list of directories]\fR
+When a given file does not exist in \fBDISTDIR\fR, search for the file
+in this list of directories. Search order is from left to right. Note
+that the current implementation works by creating a symlink inside
+\fBDISTDIR\fR, but that may change in the future.
+.TP
+\fBPORTAGE_RSYNC_EXTRA_OPTS\fR = \fI[rsync options string]\fR
+Additional rsync options to be used by \fBemerge \-\-sync\fR.
+.br
+Defaults to no value.
+.TP
+\fBPORTAGE_RSYNC_INITIAL_TIMEOUT\fR = \fIinteger\fR
+Used by \fBemerge \-\-sync\fR as a timeout for the initial connection to an
+rsync server.
+.br
+Defaults to 15 seconds.
+.TP
+\fBPORTAGE_RSYNC_OPTS\fR = \fI[rsync options string]\fR
+Default rsync options to be used by \fBemerge \-\-sync\fR.
+.br
+\fBDon't change this unless you know exactly what you're doing!\fR
+.br
+Defaults to "\-\-recursive \-\-links \-\-safe\-links \-\-perms \-\-times
+\-\-compress \-\-force \-\-whole\-file \-\-delete \-\-stats
+\-\-timeout=180 \-\-exclude='/distfiles' \-\-exclude='/local'
+\-\-exclude='/packages'"
+.TP
+\fBPORTAGE_RSYNC_RETRIES\fR = \fI[NUMBER]\fR
+The number of times rsync should retry on failed connections before
+giving up. If set to a negative number, then retry until all possible
+addresses are exhausted.
+.br
+Defaults to -1.
+.TP
+\fBPORTAGE_SSH_OPTS\fR = \fI[list of ssh options]\fR
+Additional ssh options to be used when portage executes ssh or sftp.
+This variable supports use of embedded quote characters to quote
+whitespace or special shell characters within arguments (embedded
+quotes must be escaped in make.conf settings).
+.br
+Defaults to no value.
+.TP
+\fBPORTAGE_SYNC_STALE\fR = \fI[NUMBER]\fR
+Defines the number of days after the last `emerge \-\-sync` that a warning
+message should be produced. A value of 0 will disable warnings.
+.br
+Defaults to 30.
+.TP
+\fBPORTAGE_TMPDIR\fR = \fI[path]\fR
+Defines the location of the temporary build directories.
+.br
+Defaults to /var/tmp.
+
+This should not be set to point anywhere under location of any repository.
+.TP
+\fBPORTAGE_USERNAME\fR = \fI[user]\fR
+Defines the username to use when executing in userpriv/etc... modes (i.e.
+non-root).
+.br
+Defaults to portage.
+.TP
+\fBPORTAGE_WORKDIR_MODE\fR = \fI"0700"\fR
+This variable controls permissions for \fIWORKDIR\fR (see \fBebuild\fR(5)).
+.TP
+\fBPORTAGE_XATTR_EXCLUDE\fR = \fI[space delimited list of fnmatch patterns]\fR
+This variable may be used to exclude specific attributes from being preserved
+when \fBxattr\fR is in \fBFEATURES\fR.
+.br
+Defaults to "security.*" (security labels are special, see bug #461868).
+.TP
+\fBPORTDIR\fR = \fI[path]\fR
+Defines the location of main repository. This variable is deprecated in favor of
+settings in \fBrepos.conf\fR. If you change this, you must update
+your /etc/portage/make.profile symlink accordingly.
+.br
+Defaults to /usr/portage.
+.br
+\fB***Warning***\fR
+.br
+Data stored inside \fBPORTDIR\fR is in peril of being overwritten or deleted by
+the emerge \-\-sync command. The default value of
+\fBPORTAGE_RSYNC_OPTS\fR will protect the default locations of
+\fBDISTDIR\fR and \fBPKGDIR\fR, but users are warned that any other locations
+inside \fBPORTDIR\fR are not necessarily safe for data storage. You should not
+put other data (such as overlays) in your \fBPORTDIR\fB. Portage will walk
+directory structures and may arbitrarily add invalid categories as packages.
+.TP
+\fBPORTDIR_OVERLAY\fR = \fI"[path] [different\-path] [etc...]"\fR
+Defines the locations of other repositories. This variable is deprecated in
+favor of settings in \fBrepos.conf\fR. This variable is a space\-delimited list of
+directories.
+.br
+Defaults to no value.
+.TP
+\fBQA_STRICT_EXECSTACK = \fI"set"\fR
+Set this to cause portage to ignore any \fIQA_EXECSTACK\fR override
+settings from ebuilds. See also \fBebuild\fR(5).
+.TP
+\fBQA_STRICT_FLAGS_IGNORED = \fI"set"\fR
+Set this to cause portage to ignore any \fIQA_FLAGS_IGNORED\fR override
+settings from ebuilds. See also \fBebuild\fR(5).
+.TP
+\fBQA_STRICT_MULTILIB_PATHS = \fI"set"\fR
+Set this to cause portage to ignore any \fIQA_MULTILIB_PATHS\fR override
+settings from ebuilds. See also \fBebuild\fR(5).
+.TP
+\fBQA_STRICT_PRESTRIPPED = \fI"set"\fR
+Set this to cause portage to ignore any \fIQA_PRESTRIPPED\fR override
+settings from ebuilds. See also \fBebuild\fR(5).
+.TP
+\fBQA_STRICT_TEXTRELS = \fI"set"\fR
+Set this to cause portage to ignore any \fIQA_TEXTREL\fR override
+settings from ebuilds. See also \fBebuild\fR(5).
+.TP
+\fBQA_STRICT_WX_LOAD = \fI"set"\fR
+Set this to cause portage to ignore any \fIQA_WX_LOAD\fR override
+settings from ebuilds. See also \fBebuild\fR(5).
+.TP
+.B RESUMECOMMAND
+This variable contains the command used for resuming package sources that
+have been partially downloaded. It should be defined using the same format
+as \fBFETCHCOMMAND\fR, and must include any additional option(s) that may
+be necessary in order to continue a partially downloaded file located at
+\\${DISTDIR}/\\${FILE}.
+.TP
+\fBROOT\fR = \fI[path]\fR
+Use \fBROOT\fR to specify the target root filesystem to be used for merging
+packages or ebuilds.
+Typically, you should set this setting in the environment rather than in
+\fImake.conf\fR itself. It's commonly used for creating new build
+images. Make sure you use an absolute path. Refer to the
+\fBCross-compilation\fR section of \fBebuild\fR(5) for information about
+how dependencies are handled for \fBROOT\fR.
+.br
+Defaults to /.
+.TP
+\fBRPMDIR\fR = \fI[path]\fR
+Defines the location where created RPM packages will be stored.
+.br
+Defaults to /usr/portage/rpm.
+.TP
+\fBSYNC\fR = \fI[RSYNC]\fR
+Insert your preferred rsync mirror here. This rsync server
+is used to sync the local portage tree when `emerge \-\-sync` is run.
+
+Note that the \fBSYNC\fR variable is now deprecated, and instead the
+sync\-type and sync\-uri attributes in repos.conf should be used. See
+\fBportage\fR(5) for more information.
+
+Defaults to rsync://rsync.gentoo.org/gentoo\-portage
+.RS
+.TP
+.B Usage:
+(rsync|ssh)://[username@]hostname[:port]/(module|path)
+.TP
+.B Examples:
+rsync://private\-mirror.com/portage\-module
+.br
+rsync://rsync\-user@private\-mirror.com:873/gentoo\-portage
+.br
+ssh://ssh\-user@192.168.0.1:22/usr/portage
+.br
+ssh://ssh\-user@192.168.0.1:22/\\${HOME}/portage\-storage
+.TP
+Note: For the ssh:// scheme, key\-based authentication might be of interest.
+.RE
+.TP
+\fBUNINSTALL_IGNORE\fR = \fI[space delimited list of fnmatch patterns]\fR
+This variable prevents uninstallation of files that match
+specific \fBfnmatch\fR(3) patterns. In order to ignore file
+collisions with these files at install time, the same patterns
+can be added to the \fBCOLLISION_IGNORE\fR variable.
+.br
+Defaults to "/lib/modules/*".
+.TP
+\fBUSE\fR = \fI[space delimited list of USE items]\fR
+This variable contains options that control the build behavior of several
+packages. More information in \fBebuild\fR(5). Possible USE values
+can be found in \fI/usr/portage/profiles/use.desc\fR.
+.TP
+\fBUSE_ORDER\fR = \fI"env:pkg:conf:defaults:pkginternal:repo:env.d"\fR
+Determines the precedence of layers in the incremental stacking of the USE
+variable. Precedence decreases from left to right such that env overrides
+pkg, pkg overrides conf, and so forth.
+
+.B ***warning***
+.br
+Do not modify this value unless you're a developer and you know what
+you're doing. If you change this and something breaks, we will not help
+you fix it.
+.br
+.RS
+.TP
+.B env
+USE from the current environment variables (USE and those listed in USE_EXPAND)
+.TP
+.B pkg
+Per\-package USE from \fB/etc/portage/package.use\fR (see \fBportage\fR(5))
+.TP
+.B conf
+USE from make.conf
+.TP
+.B defaults
+USE from make.defaults and package.use in the profile
+(e.g. /etc/portage/make.profile/package.use) (see \fBportage\fR(5))
+.TP
+.B pkginternal
+USE from \fBebuild\fR(5) IUSE defaults
+.TP
+.B repo
+USE from make.defaults and package.use in the repo's profiles/ top dir
+(e.g. /usr/portage/profiles/package.use) (see \fBportage\fR(5))
+.TP
+.B env.d
+USE from the environment variables, such as LINGUAS, defined by files in
+\fI/etc/env.d/\fR
+.RE
+
+.SH "REPORTING BUGS"
+Please report bugs via http://bugs.gentoo.org/
+.SH "AUTHORS"
+.nf
+Daniel Robbins <drobbins@gentoo.org>
+Nicholas Jones <carpaski@gentoo.org>
+Mike Frysinger <vapier@gentoo.org>
+Saleem Abdulrasool <compnerd@gentoo.org>
+Arfrever Frehtes Taifersar Arahesis <arfrever@apache.org>
+.fi
+.SH "FILES"
+.TP
+\fB/etc/make.conf\fR and \fB/etc/portage/make.conf\fR
+Contains variables for the build\-process and overwrites those in
+make.defaults.
+.TP
+.B /usr/share/portage/config/make.globals
+Contains the default variables for the build\-process, you should edit
+\fI/etc/portage/make.conf\fR instead.
+.TP
+.B /etc/portage/color.map
+Contains variables customizing colors.
+.TP
+.B /usr/portage/profiles/use.desc
+Contains a list of all global USE flags.
+.TP
+.B /usr/portage/profiles/use.local.desc
+Contains a list of all local USE variables.
+.SH "SEE ALSO"
+.BR emerge (1),
+.BR portage (5),
+.BR ebuild (1),
+.BR ebuild (5)
+.TP
+The \fI/usr/lib/portage/bin/ebuild.sh\fR script.
+.TP
+The helper apps in \fI/usr/lib/portage/bin\fR.
diff --git a/usr/share/man/man5/portage.5 b/usr/share/man/man5/portage.5
new file mode 100644
index 0000000..e3650ce
--- /dev/null
+++ b/usr/share/man/man5/portage.5
@@ -0,0 +1,1410 @@
+.TH "PORTAGE" "5" "Feb 2014" "Portage 2.2.14-prefix" "Portage"
+.SH NAME
+portage \- the heart of Gentoo
+.SH "DESCRIPTION"
+The current portage code uses many different configuration files, most of which
+are unknown to users and normal developers. Here we will try to collect all
+the odds and ends so as to help users more effectively utilize portage. This
+is a reference only for files which do not already have a man page.
+
+All files in the make.profile directory may be tweaked via parent profiles
+when using cascading profiles. For more info, please see
+http://www.gentoo.org/proj/en/releng/docs/cascading-profiles.xml
+.IP Note:
+If you are looking for information on how to emerge something, please see
+.BR emerge (1).
+.SH "SYNOPSIS"
+.TP
+\fB/etc/portage/make.profile/\fR or \fB/etc/make.profile/\fR
+site\-specific overrides go in \fB/etc/portage/profile/\fR
+.nf
+deprecated
+eapi
+make.defaults
+packages
+packages.build
+package.accept_keywords
+package.keywords
+package.mask
+package.provided
+package.unmask
+package.use
+package.use.force
+package.use.mask
+package.use.stable.force
+package.use.stable.mask
+parent
+profile.bashrc
+use.force
+use.mask
+use.stable.mask
+use.stable.force
+virtuals
+.fi
+.TP
+.BR /etc/portage/
+.nf
+bashrc
+categories
+color.map
+license_groups
+.BR make.conf (5)
+mirrors
+modules
+package.accept_keywords
+package.accept_restrict
+package.env
+package.keywords
+package.license
+package.mask
+package.properties
+package.unmask
+package.use
+repos.conf
+.fi
+.TP
+.BR /etc/portage/env/
+package-specific bashrc files
+.TP
+.BR /etc/portage/profile/
+site-specific overrides of \fB/etc/portage/make.profile/\fR
+.TP
+.BR /etc/portage/sets/
+user\-defined package sets
+.TP
+.BR /usr/portage/metadata/
+.nf
+layout.conf
+.fi
+.TP
+.BR /usr/portage/profiles/
+.nf
+arch.list
+categories
+info_pkgs
+info_vars
+license_groups
+make.defaults
+package.mask
+package.unmask
+package.use
+package.use.force
+package.use.mask
+package.use.stable.force
+package.use.stable.mask
+profiles.desc
+repo_name
+thirdpartymirrors
+use.desc
+use.force
+use.local.desc
+use.mask
+use.stable.mask
+use.stable.force
+.fi
+.TP
+.BR /usr/share/portage/config/
+.nf
+make.globals
+repos.conf
+.fi
+.TP
+.BR /var/cache/edb/
+misc internal cache files
+.TP
+.BR /var/db/pkg/
+database to track installed packages
+.TP
+.BR /var/lib/portage/
+.nf
+config
+world
+world_sets
+.fi
+.SH "GLOSSARY"
+In the following sections, some terminology may be foreign to you or used
+with meaning specific to Portage. Please see the referenced manpages for
+more detailed explanations.
+.RS
+.TP
+.B DEPEND atom
+An atom is either of the form category/package or consists of an operator
+followed by category/package followed by a hyphen and a version specification.
+An atom might be suffixed by a slot specification.
+.br
+More reading:
+.BR ebuild (5)
+
+.B Extended Atom Syntax
+.br
+The following atom syntax extensions are only supported in user
+configuration files and command line arguments for programs such as
+\fBemerge(1)\fR:
+.RS
+.TP
+.B Repository Constraints
+Atoms with repository constraints have a '::' separator appended to the
+right side, followed by a repository name. Each repository name should
+correspond to the value of a \fBrepo_name\fR entry from one of the
+repositories that is configured in \fBrepos.conf\fR file.
+
+.I Examples:
+.nf
+# match sed from the 'gentoo' repository
+sys\-apps/sed::gentoo
+# match kdelibs from the 'kde\-testing' repository
+kde\-base/kdelibs::kde\-testing
+# match empathy from the 'gnome' repository
+net\-im/empathy::gnome
+.fi
+.TP
+.B Wildcard Patterns
+Atoms containing wildcard patterns are of the form category/package, where
+the special '*' wildcard character substitutes for an arbitrary number
+of normal characters. More than one '*' character is allowed, but not two
+next to each other.
+
+.I Examples:
+.nf
+# match anything with a version containing 9999, which can be used in
+# package.mask to prevent emerge --autounmask from selecting live ebuilds
+=*/*-*9999*
+# match anything with a version containing _beta
+=*/*-*_beta*
+# match anything from the 'sys\-apps' category
+sys\-apps/*
+# match packages named 'zlib' from any category
+*/zlib
+# match any package from a category that begins with 'net\-'
+net\-*/*
+# match any package name from any category
+*/*
+# match any package from the 'gentoo' repository
+*/*::gentoo
+.fi
+.RE
+.TP
+.B KEYWORD
+Each architecture has a unique KEYWORD.
+.br
+More reading:
+.BR ebuild (5)
+.TP
+.B virtual
+A DEPEND atom that is part of the "virtual" category. They are used
+when different packages can satisfy a dependency and only one of them is
+needed.
+.br
+More reading:
+.BR ebuild (5)
+.RE
+.SH "SPECIFIC FILE DESCRIPTIONS"
+.TP
+\fB/etc/portage/make.profile/\fR or \fB/etc/make.profile/\fR
+This is usually just a symlink to the correct profile in
+\fB/usr/portage/profiles/\fR. Since it is part of the portage tree, it
+may easily be updated/regenerated by running `emerge \-\-sync`. It defines
+what a profile is (usually arch specific stuff). If you need a custom
+profile, then you should make your own \fBmake.profile\fR
+directory and populate it. However, if you just wish to override some
+settings, use \fB/etc/portage/profile/\fR (it supports all of the same file
+types that \fBmake.profile\fR does, except parent). Do NOT edit the
+settings in \fBmake.profile\fR because they WILL be lost with the next
+`emerge \-\-sync`. If both \fB/etc/portage/make.profile/\fR and
+\fB/etc/make.profile/\fR exist, then \fB/etc/portage/make.profile/\fR
+will be preferred.
+
+Any file in this directory, directories of other profiles or top-level
+"profiles" directory that begins with "package." or "use." can be more than
+just a flat file. If it is a directory, then all the files in that directory
+will be sorted in ascending alphabetical order by file name and summed together
+as if it were a single file. Note that this behavior is only supported since
+portage-2.1.6.7, and it is not included in PMS at this time.
+
+.I Example:
+.nf
+${repository_location}/profiles/package.mask/removals
+${repository_location}/profiles/package.mask/testing
+.fi
+.RS
+.TP
+.BR deprecated
+The existence of this file marks a profile as deprecated, meaning it is
+not supported by Gentoo anymore. The first line must be the profile to which
+users are encouraged to upgrade, optionally followed by some instructions
+explaining how they can upgrade.
+
+.I Example:
+.nf
+default-linux/x86/2005.0
+# emerge -n '>=sys-apps/portage-2.0.51'
+# rm -f /etc/portage/make.profile
+# ln -s /usr/portage/profiles/default-linux/alpha/2005.0 \
+/etc/portage/make.profile
+.fi
+.TP
+.BR eapi
+The first line of this file specifies the \fBEAPI\fR to which files in the
+same directory conform. See \fBebuild\fR(5) for information about \fBEAPI\fR
+and related features. Beginning with \fBEAPI 5\fR, new USE
+configuration files are supported: use.stable.mask,
+use.stable.force, package.use.stable.mask and
+package.use.stable.force. These files behave similarly to
+previously supported USE configuration files, except that they
+only influence packages that are merged due to a stable keyword.
+.TP
+.BR make.defaults
+The profile default settings for Portage. The general format is described
+in \fBmake.conf\fR(5). The \fImake.defaults\fR for your profile defines a
+few specific variables too:
+
+.PD 0
+.RS
+.TP
+.BR ARCH
+Architecture type (x86/ppc/hppa/etc...).
+.TP
+\fBIUSE_IMPLICIT\fR = \fI[space delimited list of USE flags]\fR
+Defines implicit \fBIUSE\fR for ebuilds using \fBEAPI 5\fR or
+later. Flags that come from \fBUSE_EXPAND\fR or
+\fBUSE_EXPAND_UNPREFIXED\fR variables do not belong in
+\fBIUSE_IMPLICIT\fR, since \fBUSE_EXPAND_VALUES_*\fR variables
+are used to define implicit \fBIUSE\fR for those flags. See
+\fBebuild\fR(5) for more information about \fBIUSE\fR.
+.TP
+.B USERLAND = \fI"GNU"\fR
+Support BSD/cygwin/etc...
+.TP
+\fBUSE_EXPAND\fR = \fI[space delimited list of variable names]\fR
+Any variable listed here will be used to augment USE by inserting a new flag
+for every value in that variable, so USE_EXPAND="FOO" and FOO="bar bla" results
+in USE="foo_bar foo_bla".
+.TP
+\fBUSE_EXPAND_HIDDEN\fR = \fI[space delimited list of variable names]\fR
+Names of \fBUSE_EXPAND\fR variables that should not be shown in the verbose
+merge list output of the \fBemerge\fR(1) command.
+.TP
+\fBUSE_EXPAND_IMPLICIT\fR = \fI[space delimited list of variable names]\fR
+Defines \fBUSE_EXPAND\fR and \fBUSE_EXPAND_UNPREFIXED\fR
+variables for which the corresponding USE flags may have
+implicit \fBIUSE\fR for ebuilds using \fBEAPI 5\fR or later.
+.TP
+\fBUSE_EXPAND_UNPREFIXED\fR = \fI[space delimited list of variable names]\fR
+Any variable listed here will be used to augment USE by
+inserting a new flag for every value in that variable, so
+USE_EXPAND_UNPREFIXED="FOO" and FOO="bar bla" results in
+USE="bar bla".
+.TP
+\fBUSE_EXPAND_VALUES_ARCH\fR = \fI[space delimited list of ARCH values]\fR
+Defines ARCH values used to generate implicit
+\fBIUSE\fR for ebuilds using \fBEAPI 5\fR or later.
+.TP
+\fBUSE_EXPAND_VALUES_ELIBC\fR = \fI[space delimited list of ELIBC values]\fR
+Defines ELIBC values used to generate implicit
+\fBIUSE\fR for ebuilds using \fBEAPI 5\fR or later.
+.TP
+\fBUSE_EXPAND_VALUES_KERNEL\fR = \fI[space delimited list of KERNEL values]\fR
+Defines KERNEL values used to generate implicit
+\fBIUSE\fR for ebuilds using \fBEAPI 5\fR or later.
+.TP
+\fBUSE_EXPAND_VALUES_USERLAND\fR = \fI[space delimited list of USERLAND \
+values]\fR
+Defines USERLAND values used to generate implicit
+\fBIUSE\fR for ebuilds using \fBEAPI 5\fR or later.
+.TP
+.B ELIBC = \fI"glibc"\fR
+Support uClibc/BSD libc/etc...
+.TP
+.B PROFILE_ONLY_VARIABLES = \fI"ARCH"\fR
+Prevent critical variables from being changed by the user in make.conf
+or the env.
+.TP
+.BR PROFILE_ARCH
+Distinguish machines classes that have the same \fBARCH\fR. All sparc
+machines have ARCH=sparc but set this to either 'sparc32' or 'sparc64'.
+.TP
+.BR BOOTSTRAP_USE
+Special USE flags which may be needed when bootstrapping from stage1 to stage2.
+.RE
+.PD 1
+.TP
+.BR packages
+Provides the list of packages that compose the special \fIsystem\fR set.
+
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- one DEPEND atom per line
+\- packages to be added to the system set begin with a *
+\- atoms without * only appear for legacy reasons
+.fi
+.I Note:
+In a cascading profile setup, you can remove packages in children
+profiles which were added by parent profiles by prefixing the atom with
+a '\-'.
+
+.I Example:
+.nf
+# i am a comment !
+# pull in a version of glibc less than 2.3
+*<sys\-libs/glibc\-2.3
+# pull in any version of bash
+*app\-shells/bash
+# pull in a version of readline earlier than 4.2
+*<sys\-libs/readline\-4.2
+.fi
+.TP
+.BR packages.build
+A list of packages (one per line) that make up a stage1 tarball. Really only
+useful for stage builders.
+.TP
+.BR package.provided
+A list of packages (one per line) that portage should assume have been
+provided. Useful for porting to non-Linux systems. Basically, it's a
+list that replaces the \fBemerge \-\-inject\fR syntax.
+
+For example, if you manage your own copy of a 2.6 kernel, then you can
+tell portage that 'sys-kernel/development-sources-2.6.7' is already taken
+care of and it should get off your back about it.
+
+Portage will not attempt to update a package that is listed here unless
+another package explicitly requires a version that is newer than what
+has been listed. Dependencies that are satisfied by package.provided
+entries may cause installed packages satisfying equivalent dependencies
+to be removed by \fBemerge\fR(1) \fB\-\-depclean\fR actions (see the
+\fBACTIONS\fR section of the \fBemerge\fR(1) man page for more information).
+
+Virtual packages (virtual/*) should not be specified in package.provided,
+since virtual packages themselves do not provide any files, and
+package.provided is intended to represent packages that do provide files.
+Depending on the type of virtual, it may be necessary to add an entry to the
+virtuals file and/or add a package that satisfies a virtual to
+package.provided.
+
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- one DEPEND atom per line
+\- relational operators are not allowed
+\- must include a version
+.fi
+
+.I Example:
+.nf
+# you take care of the kernel
+sys-kernel/development-sources-2.6.7
+
+# you installed your own special copy of QT
+x11-libs/qt-3.3.0
+
+# you have modular X but packages want monolithic
+x11-base/xorg-x11-6.8
+.fi
+.TP
+\fBpackage.use.force\fR and \fBpackage.use.stable.force\fR
+Per\-package USE flag forcing.
+
+.I Note:
+In a cascading profile setup, you can remove USE flags in children
+profiles which were added by parent profiles by prefixing the flag with
+a '\-'.
+
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- one DEPEND atom per line with space-delimited USE flags
+.fi
+
+.I Example:
+.nf
+# force docs for GTK 2.x
+=x11\-libs/gtk+\-2* doc
+# unforce mysql support for QT
+x11\-libs/qt \-mysql
+.fi
+.TP
+\fBpackage.use.mask\fR and \fBpackage.use.stable.mask\fR
+Per\-package USE flag masks.
+
+.I Note:
+In a cascading profile setup, you can remove USE flags in children
+profiles which were added by parent profiles by prefixing the flag with
+a '\-'.
+
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- one DEPEND atom per line with space-delimited USE flags
+.fi
+
+.I Example:
+.nf
+# mask docs for GTK 2.x
+=x11\-libs/gtk+\-2* doc
+# unmask mysql support for QT
+x11\-libs/qt \-mysql
+.fi
+.TP
+.BR parent
+This contains paths to the parent profiles (one per line). They may be either
+relative (to the location of the profile) or absolute. Most commonly this file
+contains '..' to indicate the directory above. Utilized only in cascading
+profiles.
+
+When multiple parent profiles are specified, they are inherited in order from
+the first line to the last.
+
+If \fBlayout.conf\fR is new enough, you can also use the <repo>:<path>
+syntax. The <repo> is the same string as is stored in the \fBrepo_name\fR
+file (or omitted to refer to the current repo), and <path> is a subdir starting
+at profiles/.
+.TP
+.BR profile.bashrc
+If needed, this file can be used to set up a special environment for ebuilds,
+different from the standard root environment. The syntax is the same as for
+any other bash script.
+.TP
+\fBuse.force\fR and \fBuse.stable.force\fR
+Some USE flags don't make sense to disable under certain conditions. Here we
+list forced flags.
+
+.I Note:
+In a cascading profile setup, you can remove USE flags in children
+profiles which were added by parent profiles by prefixing the flag with
+a '\-'.
+
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- one USE flag per line
+.fi
+.TP
+\fBuse.mask\fR and \fBuse.stable.mask\fR
+Some USE flags don't make sense on some archs (for example altivec on
+non\-ppc or mmx on non\-x86), or haven't yet been tested. Here we list
+the masked ones.
+
+.I Note:
+In a cascading profile setup, you can remove USE flags in children
+profiles which were added by parent profiles by prefixing the flag with
+a '\-'.
+
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- one USE flag per line
+.fi
+
+.I Example:
+.nf
+# mask doc
+doc
+# unmask mysql
+\-mysql
+.fi
+.TP
+.BR virtuals
+The virtuals file controls default preferences for virtuals that
+are defined via the \fBPROVIDE\fR ebuild variable (see
+\fBebuild\fR(5)). Since Gentoo now uses \fBGLEP 37\fR virtuals
+instead of \fBPROVIDE\fR virtuals, the virtuals file is
+irrelevant for all Gentoo ebuilds. However, it is still possible
+for third\-parties to distribute ebuilds that make use of
+\fBPROVIDE\fR.
+
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- one virtual and DEPEND atom base pair per line
+.fi
+
+.I Example:
+.nf
+# use net\-mail/ssmtp as the default mta
+virtual/mta net\-mail/ssmtp
+# use app\-dicts/aspell\-en as the default dictionary
+virtual/aspell\-dict app\-dicts/aspell\-en
+.fi
+.RE
+.TP
+.BR /etc/portage/
+Any file in this directory that begins with "package." or is repos.conf can be
+more than just a flat file. If it is a directory, then all the files in that
+directory will be sorted in ascending alphabetical order by file name and summed
+together as if it were a single file.
+
+.I Example:
+.nf
+/etc/portage/package.accept_keywords/common
+/etc/portage/package.accept_keywords/e17
+/etc/portage/package.accept_keywords/kde
+.fi
+.RS
+.TP
+.BR bashrc
+If needed, this file can be used to set up a special environment for ebuilds,
+different from the standard root environment. The syntax is the same as for
+any other bash script.
+
+Additional package-specific bashrc files can be created in /etc/portage/env.
+.TP
+.BR categories
+A simple list of valid categories that may be used in repositories and PKGDIR
+(see \fBmake.conf\fR(5)). This allows for custom categories to be created.
+
+.I Format:
+.nf
+\- one category per line
+.fi
+
+.I Example:
+.nf
+app\-hackers
+media\-other
+.fi
+.TP
+.BR color.map
+Contains variables customizing colors. See \fBcolor.map\fR(5).
+.TP
+.BR make.conf
+The global custom settings for Portage. See \fBmake.conf\fR(5).
+.TP
+.BR mirrors
+Whenever portage encounters a mirror:// style URI it will look up the actual
+hosts here. If the mirror set is not found here, it will check the global
+mirrors file at /usr/portage/profiles/thirdpartymirrors. You may also set a
+special mirror type called "local". This list of mirrors will be checked
+before GENTOO_MIRRORS and will be used even if the package has
+RESTRICT="mirror" or RESTRICT="fetch".
+
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- mirror type followed by a list of hosts
+.fi
+
+.I Example:
+.nf
+# local private mirrors used only by my company
+local ftp://192.168.0.3/mirrors/gentoo http://192.168.0.4/distfiles
+
+# people in japan would want to use the japanese mirror first
+sourceforge http://keihanna.dl.sourceforge.net/sourceforge
+
+# people in tawain would want to use the local gnu mirror first
+gnu ftp://ftp.nctu.edu.tw/UNIX/gnu/
+.fi
+.TP
+.BR modules
+This file can be used to override the metadata cache implementation. In
+practice, portdbapi.auxdbmodule is the only variable that the user will want to
+override.
+
+.I Example:
+.nf
+portdbapi.auxdbmodule = portage.cache.sqlite.database
+.fi
+
+After changing the portdbapi.auxdbmodule setting, it may be necessary to
+transfer or regenerate metadata cache. Users of the rsync tree need to
+run `emerge \-\-metadata` if they have enabled FEATURES="metadata-transfer"
+in \fBmake.conf\fR(5). In order to regenerate metadata for repositories
+not distributing pregenerated metadata cache, run `emerge \-\-regen`
+(see \fBemerge\fR(1)). If you use something like the sqlite module and want
+to keep all metadata in that format alone (useful for querying), enable
+FEATURES="metadata-transfer" in \fBmake.conf\fR(5).
+.TP
+\fBpackage.accept_keywords\fR and \fBpackage.keywords\fR
+Per\-package ACCEPT_KEYWORDS. Useful for mixing unstable packages in with a
+normally stable system or vice versa. This will allow ACCEPT_KEYWORDS to be
+augmented for a single package. If both \fBpackage.accept_keywords\fR and
+\fBpackage.keywords\fR are present, both of them will be used, and values
+from \fBpackage.accept_keywords\fR will override values from
+\fBpackage.keywords\fR. The \fBpackage.accept_keywords\fR file is
+intended to replace the \fBpackage.keywords\fR file, since
+profiles support a different form of \fBpackage.keywords\fR which
+modifies effective KEYWORDS (rather than ACCEPT_KEYWORDS).
+
+.I Format:
+.nf
+\- comment lines begin with # (no inline comments)
+\- one DEPEND atom per line followed by additional KEYWORDS
+\- lines without any KEYWORDS imply unstable host arch
+
+.I Example:
+# always use unstable libgd
+media\-libs/libgd ~x86
+# only use stable mplayer
+media\-video/mplayer \-~x86
+# always use unstable netcat
+net-analyzer/netcat
+.fi
+
+.I Note:
+.fi
+In addition to the normal values from ACCEPT_KEYWORDS package.keywords supports
+three special tokens:
+
+.nf
+\fB*\fR package is visible if it is stable on any architecture
+\fB~*\fR package is visible if it is in testing on any architecture
+\fB**\fR package is always visible (KEYWORDS are ignored completely)
+.fi
+
+.I Additional Note:
+If you encounter the \fB-*\fR KEYWORD, this indicates that the package is known
+to be broken on all systems which are not otherwise listed in KEYWORDS. For
+example, a binary only package which is built for x86 will look like:
+
+games-fps/quake3-demo-1.11.ebuild:KEYWORDS="-* x86"
+
+If you wish to accept this package anyways, then use one of the other keywords
+in your package.accept_keywords like this:
+
+games-fps/quake3-demo x86
+
+.TP
+.BR package.accept_restrict
+This will allow ACCEPT_RESTRICT (see \fBmake.conf\fR(5)) to be augmented for a
+single package.
+
+.I Format:
+.nf
+\- comment lines begin with # (no inline comments)
+\- one DEPEND atom per line followed by additional RESTRICT tokens
+.fi
+.TP
+.BR package.env
+Per\-package environment variable settings. Entries refer to
+environment files that are placed in the \fB/etc/portage/env/\fR
+directory and have the same format as \fBmake.conf\fR(5). Note that these
+files are interpreted much earlier than the package\-specific \fIbashrc\fR
+files which are described in a later section about \fB/etc/portage/env/\fR.
+Beginners should be careful to recognize the difference between these two types
+of files. When environment variable settings are all that's needed,
+\fBpackage.env\fR is the recommended approach to use.
+
+.I Format:
+.nf
+\- comment lines begin with # (no inline comments)
+\- one DEPEND atom per line followed by name(s) of environment file(s)
+.fi
+
+.I Example:
+.nf
+# use environment variables from /etc/portage/env/glibc.conf for the glibc \
+package
+sys\-libs/glibc glibc.conf
+.fi
+
+.TP
+.BR package.license
+This will allow ACCEPT_LICENSE (see \fBmake.conf\fR(5)) to be augmented for a
+single package.
+
+.I Format:
+.nf
+\- comment lines begin with # (no inline comments)
+\- one DEPEND atom per line followed by additional licenses or groups
+.fi
+.TP
+.BR package.mask
+A list of package atoms to mask. Useful if specific versions of packages do
+not work well for you. For example, you swear by the Nvidia drivers, but only
+versions earlier than 1.0.4496. No problem!
+
+.I Format:
+.nf
+\- comment lines begin with # (no inline comments)
+\- one DEPEND atom per line
+.fi
+
+.I Example:
+.nf
+# mask out versions 1.0.4496 of the nvidia
+# drivers and later
+>=media\-video/nvidia\-kernel\-1.0.4496
+>=media\-video/nvidia\-glx\-1.0.4496
+.fi
+.TP
+.BR package.properties
+This will allow ACCEPT_PROPERTIES (see \fBmake.conf\fR(5)) to be augmented for
+a single package.
+
+.I Format:
+.nf
+\- comment lines begin with # (no inline comments)
+\- one DEPEND atom per line followed by additional properties
+.fi
+.TP
+.BR package.unmask
+Just like package.mask above, except here you list packages you want to
+unmask. Useful for overriding the global package.mask file (see
+above). Note that this does not override packages that are masked via
+KEYWORDS.
+.TP
+.BR package.use
+Per\-package USE flags. Useful for tracking local USE flags or for
+enabling USE flags for certain packages only. Perhaps you develop GTK
+and thus you want documentation for it, but you don't want
+documentation for QT. Easy as pie my friend!
+
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- one DEPEND atom per line with space-delimited USE flags
+.fi
+
+.I Example:
+.nf
+# turn on docs for GTK 2.x
+=x11\-libs/gtk+\-2* doc
+# disable mysql support for QT
+x11\-libs/qt \-mysql
+.fi
+.TP
+.BR repos.conf
+Specifies \fIsite\-specific\fR repository configuration information.
+.br
+Configuration specified in \fBrepos.conf\fR can be overriden by \fBPORTAGE_REPOSITORIES\fR
+environmental variable, which has the same format as \fBrepos.conf\fR.
+
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- configuration of each repository is specified in a section starting with \
+"[${repository_name}]"
+\- attributes are specified in "${attribute} = ${value}" format
+.fi
+
+.I Attributes supported in DEFAULT section:
+.RS
+.RS
+.TP
+.B main\-repo
+Specifies main repository.
+.TP
+.B eclass\-overrides
+Makes all repositories inherit eclasses from specified repositories.
+.br
+Setting this attribute is generally not recommended since resulting changes
+in eclass inheritance may trigger performance issues due to invalidation
+of metadata cache.
+.br
+When 'force = eclass\-overrides' attribute is not set, \fBegencache\fR(1),
+\fBemirrordist\fR(1) and \fBrepoman\fR(1) ignore this attribute,
+since operations performed by these tools are inherently
+\fBnot\fR \fIsite\-specific\fR.
+.TP
+.B force
+Specifies names of attributes, which should be forcefully respected by
+\fBegencache\fR(1), \fBemirrordist\fR(1) and \fBrepoman\fR(1).
+.br
+Valid values: aliases, eclass\-overrides, masters
+.RE
+
+.I Attributes supported in sections of repositories:
+.RS
+.TP
+.B aliases
+Specifies aliases of given repository.
+.br
+Setting this attribute is generally not recommended since resulting changes
+in eclass inheritance may trigger performance issues due to invalidation
+of metadata cache.
+.br
+When 'force = aliases' attribute is not set, \fBegencache\fR(1),
+\fBemirrordist\fR(1) and \fBrepoman\fR(1) ignore this attribute,
+since operations performed by these tools are inherently
+\fBnot\fR \fIsite\-specific\fR.
+.TP
+.B eclass\-overrides
+Makes given repository inherit eclasses from specified repositories.
+.br
+Setting this attribute is generally not recommended since resulting changes
+in eclass inheritance may trigger performance issues due to invalidation
+of metadata cache.
+.br
+When 'force = eclass\-overrides' attribute is not set, \fBegencache\fR(1),
+\fBemirrordist\fR(1) and \fBrepoman\fR(1) ignore this attribute,
+since operations performed by these tools are inherently
+\fBnot\fR \fIsite\-specific\fR.
+.TP
+.B force
+Specifies names of attributes, which should be forcefully respected by
+\fBegencache\fR(1), \fBemirrordist\fR(1) and \fBrepoman\fR(1).
+.br
+Valid values: aliases, eclass\-overrides, masters
+.TP
+.B location
+Specifies location of given repository.
+.TP
+.B masters
+Specifies master repositories of given repository.
+.br
+Setting this attribute is generally not recommended since resulting changes
+in eclass inheritance may trigger performance issues due to invalidation
+of metadata cache.
+.br
+When 'force = masters' attribute is not set, \fBegencache\fR(1),
+\fBemirrordist\fR(1) and \fBrepoman\fR(1) ignore this attribute,
+since operations performed by these tools are inherently
+\fBnot\fR \fIsite\-specific\fR.
+.TP
+.B priority
+Specifies priority of given repository.
+.TP
+.B sync\-cvs\-repo
+Specifies CVS repository.
+.TP
+.B sync\-type
+Specifies type of synchronization performed by `emerge \-\-sync`.
+.br
+Valid non\-empty values: cvs, git, rsync
+.br
+This attribute can be set to empty value to disable synchronization of given
+repository. Empty value is default.
+.TP
+.B sync\-uri
+Specifies URI of repository used for synchronization performed by `emerge
+\-\-sync`.
+.br
+This attribute can be set to empty value to disable synchronization of given
+repository. Empty value is default.
+.RS
+.TP
+Syntax:
+cvs: [cvs://]:access_method:[username@]hostname[:port]:/path
+.br
+git: (git|git+ssh|http|https)://[username@]hostname[:port]/path
+.br
+rsync: (rsync|ssh)://[username@]hostname[:port]/(module|path)
+.TP
+Examples:
+.RS
+rsync://private\-mirror.com/portage\-module
+.br
+rsync://rsync\-user@private\-mirror.com:873/gentoo\-portage
+.br
+ssh://ssh\-user@192.168.0.1:22/usr/portage
+.br
+ssh://ssh\-user@192.168.0.1:22/\\${HOME}/portage\-storage
+.RE
+.TP
+Note: For the ssh:// scheme, key\-based authentication might be of interest.
+.RE
+.RE
+
+.I Example:
+.nf
+[DEFAULT]
+# make gentoo the main repository, which makes it the default master
+# repository for repositories that do not specify masters
+main\-repo = gentoo
+# make all repositories inherit eclasses from the java\-overlay and
+# java\-experimental repositories, with eclasses from java\-experimental
+# taking precedence over those from java\-overlay
+eclass\-overrides = java\-overlay java\-experimental
+
+[gentoo]
+# repos with higher priorities are preferred when ebuilds with equal versions
+# are found in multiple repos (see the `emerge \-\-info \-\-verbose` repo
+# display for a listing of repos and their corresponding priorities).
+priority = 9999
+# disable all eclass overrides for ebuilds from the gentoo repository
+eclass\-overrides =
+# when processing metadata/layout.conf from other repositories, substitute
+# 'gentoo' in place of references to repositories named 'foo' and 'bar',
+# and discard the 'baz' alias contained in gentoo's layout.conf
+aliases = foo bar -baz
+
+[kde-testing]
+# override the metadata/layout.conf masters setting from the kde-testing repo
+masters = gentoo kde
+
+[python]
+# override the metadata/layout.conf masters setting from the python repo,
+# so that settings won't be inherited from those masters, and so that
+# those master repos won't be required as dependencies (the user must
+# ensure that any required dependencies such as eclasses are satisfied)
+masters =
+
+# Repository 'gentoo' synchronized using CVS
+[gentoo]
+location = /usr/portage
+sync\-type = cvs
+sync\-uri = :pserver:anonymous@anoncvs.gentoo.org:/var/cvsroot
+sync\-cvs\-repo = gentoo\-x86
+.fi
+.RE
+.RE
+.TP
+.BR /etc/portage/env/
+.RS
+In this directory additional package\-specific bashrc files can be created.
+Note that if package\-specific environment variable settings are all that's
+needed, then \fB/etc/portage/package.env\fR should be used instead of the
+bashrc approach that is described here. Also note that special variables
+such as \fBFEATURES\fR and \fBINSTALL_MASK\fR will not produce the intended
+results if they are set in bashrc, and therefore
+\fB/etc/portage/package.env\fR should be used instead. Lastly, note that these
+files are interpreted much later than the portage environment file
+\fBpackage.env\fR.
+
+Portage will source all of these bashrc files after \fB/etc/portage/bashrc\fR
+in the following order:
+.nr step 1 1
+.IP \n[step]. 3
+/etc/portage/env/${CATEGORY}/${PN}
+.IP \n+[step].
+/etc/portage/env/${CATEGORY}/${PN}:${SLOT}
+.IP \n+[step].
+/etc/portage/env/${CATEGORY}/${P}
+.IP \n+[step].
+/etc/portage/env/${CATEGORY}/${PF}
+.RE
+.TP
+.BR /etc/portage/sets/
+.RS
+For each file in this directory, a package set is created with its name
+corresponding to the name of the file. Each file should contain a list
+of package atoms and nested package sets, one per line. When a package
+set is referenced as an \fBemerge\fR(1) argument or when it is
+referenced as a nested package set (inside of another package set), the
+set name is prefixed with \fB@\fR.
+
+Also see \fB/var/lib/portage/world_sets\fR and the \fBemerge\fR(1)
+\fB\-\-list\-sets\fR option.
+.RE
+.TP
+.BR /usr/portage/metadata/
+.RS
+.TP
+.BR layout.conf
+Specifies information about the repository layout.
+\fISite-specific\fR overrides to \fBlayout.conf\fR settings may be specified in
+\fB/etc/portage/repos.conf\fR.
+Settings in \fBrepos.conf\fR take precedence over settings in
+\fBlayout.conf\fR, except tools such as \fBrepoman\fR(1) and \fBegencache\fR(1)
+ignore "aliases", "eclass-overrides" and "masters" attributes set in
+\fBrepos.conf\fR since their operations are inherently \fBnot\fR
+\fIsite\-specific\fR.
+
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- attributes are specified in "${attribute} = ${value}" format
+.fi
+
+.I Supported attributes.
+.RS
+.RS
+.TP
+.BR aliases
+Behaves like an "aliases" attribute in \fBrepos.conf\fR.
+.TP
+.BR eapis\-banned
+List of EAPIs which are not allowed in this repo.
+.TP
+.BR eapis\-deprecated
+List of EAPIs which are allowed but generate warnings when used.
+.TP
+.BR masters
+Names of repositories which satisfy dependencies on eclasses and from which
+settings specified in various repository\-level files (\fBpackage.mask\fR,
+\fBpackage.use.mask\fR, \fBuse.mask\fR etc.) are inherited. Each repository
+name should correspond to the value of a \fBrepo_name\fR entry from one of
+the repositories that is configured in \fBrepos.conf\fR file. Repositories
+listed toward the right of the \fBmasters\fR list take precedence over those
+listed toward the left of the list.
+.TP
+.BR repo\-name " = <value of profiles/repo_name>"
+The name of this repository (overrides profiles/repo_name if it exists).
+.TP
+.BR sign\-commits " = [true|" false "]"
+Boolean value whether we should sign commits in this repo.
+.TP
+.BR sign\-manifests " = [" true "|false]"
+Boolean value whether we should sign Manifest files in this repo.
+.TP
+.BR thin\-manifests " = [true|" false "]"
+Boolean value whether Manifest files contain only DIST entries.
+.TP
+.BR use\-manifests " = [" strict "|true|false]"
+How Manifest files get used. Possible values are "strict" (require an entry
+for every file), "true" (if an entry exists for a file, enforce it), or "false"
+(don't check Manifest files at all).
+.TP
+.BR manifest\-hashes
+List of hashes to generate/check in Manifest files. Valid hashes depend on the
+current version of portage; see the portage.const.MANIFEST2_HASH_FUNCTIONS
+constant for the current list.
+.TP
+.BR update\-changelog " = [true|" false "]"
+The default setting for repoman's --echangelog option.
+.TP
+.BR cache\-formats " = [pms] [md5-dict]"
+The cache formats supported in the metadata tree. There is the old "pms" format
+and the newer/faster "md5-dict" format. Default is to detect dirs.
+.TP
+.BR profile\-formats " = [pms|portage-1|portage-2]"
+Control functionality available to profiles in this repo such as which files
+may be dirs, or the syntax available in parent files. Use "portage-2" if you're
+unsure. The default is "portage-1-compat" mode which is meant to be compatible
+with old profiles, but is not allowed to be opted into directly.
+.RE
+.RE
+
+.RS
+.I Example:
+.nf
+# Specify the repository name (overriding profils/repo_name).
+repo\-name = foo-overlay
+
+# eclasses provided by java-overlay take precedence over identically named
+# eclasses that are provided by gentoo
+masters = gentoo java-overlay
+
+# indicate that this repo can be used as a substitute for foo-overlay
+aliases = foo-overlay
+
+# indicate that ebuilds with the specified EAPIs are banned
+eapis\-banned = 0 1
+
+# indicate that ebuilds with the specified EAPIs are deprecated
+eapis\-deprecated = 2 3
+
+# sign commits in this repo, which requires Git >=1.7.9, and
+# key configured by `git config user.signingkey key_id`
+sign\-commits = true
+
+# do not sign Manifest files in this repo
+sign\-manifests = false
+
+# Manifest files only contain DIST entries
+thin\-manifests = true
+
+# indicate that this repo requires manifests for each package, and is
+# considered a failure if a manifest file is missing/incorrect
+use\-manifests = strict
+
+# customize the set of hashes generated for Manifest entries
+manifest\-hashes = SHA256 SHA512 WHIRLPOOL
+
+# indicate that this repo enables repoman's --echangelog=y option automatically
+update\-changelog = true
+
+# indicate that this repo contains both md5-dict and pms cache formats,
+# which may be generated by egencache(1)
+cache\-formats = md5-dict pms
+
+# indicate that this repo contains profiles that may use directories for
+# package.mask, package.provided, package.use, package.use.force,
+# package.use.mask, package.use.stable.force, package.use.stable.mask,
+# use.force, use.mask, use.stable.force, and use.stable.mask.
+# profile\-formats = portage-1
+# indicate that paths such as 'gentoo:targets/desktop' or ':targets/desktop' in
+# profile parent files can be used to express paths relative to the root
+# 'profiles' directory of a repository (when the repo name is omitted before
+# the colon, it refers to the current repository the parent file is inside)
+profile\-formats = portage-2
+.fi
+.RE
+.RE
+.TP
+.BR /usr/portage/profiles/
+Global Gentoo settings that are controlled by the developers. To override
+these settings, you can use the files in \fB/etc/portage/\fR.
+.RS
+.TP
+.BR arch.list
+A list of all valid KEYWORDS. This does not include modifiers.
+
+.I Format:
+.nf
+\- one KEYWORD per line
+.fi
+
+.I Example:
+.nf
+x86
+ppc
+sparc
+.fi
+.TP
+.BR categories
+A simple list of valid categories that may be used in repositories and PKGDIR
+(see \fBmake.conf\fR(5)).
+
+.I Format:
+.nf
+\- one category per line
+.fi
+
+.I Example:
+.nf
+app\-admin
+dev\-lang
+games\-strategy
+sys\-kernel
+.fi
+.TP
+.BR info_pkgs
+A list of all the packages which will be displayed when you run `emerge info`.
+.TP
+.BR info_vars
+A list of all the variables which will be displayed when you run `emerge info`.
+.TP
+.BR license_groups
+This contains groups of licenses that may be specifed in the
+\fBACCEPT_LICENSE\fR variable (see \fBmake.conf\fR(5)). Refer
+to GLEP 23 for further information:
+\fIhttp://www.gentoo.org/proj/en/glep/glep-0023.html\fR.
+
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- one group name, followed by list of licenses and nested groups
+\- nested groups are prefixed with the '@' symbol
+.fi
+
+.I Example:
+.nf
+# The FSF-APPROVED group includes the entire GPL-COMPATIBLE group and more.
+FSF-APPROVED @GPL-COMPATIBLE Apache-1.1 BSD-4 MPL-1.0 MPL-1.1
+# The GPL-COMPATIBLE group includes all licenses compatible with the GNU GPL.
+GPL-COMPATIBLE Apache-2.0 BSD BSD-2 GPL-2 GPL-3 LGPL-2.1 LGPL-3 X11 ZLIB
+.fi
+.TP
+.BR package.accept_keywords
+Per\-package ACCEPT_KEYWORDS for profiles. This has the same format and
+behavior as /etc/portage/package.accept_keywords, including the ability
+to list atoms without any keywords in order to accept unstable variants
+of all stable keywords listed in ACCEPT_KEYWORDS.
+.TP
+.BR package.keywords
+Per\-profile KEYWORDS. Useful for cases in which the effective KEYWORDS of a
+given package should vary depending on which profile the user has selected.
+
+.I Format:
+.nf
+\- comment lines begin with # (no inline comments)
+\- one DEPEND atom per line followed by additional KEYWORDS
+.fi
+
+.I Example:
+.nf
+# add stable keyword to libgd
+media\-libs/libgd x86
+# remove stable keyword from mplayer and add unstable keyword
+media\-video/mplayer \-x86 ~x86
+# remove all keywords from netcat
+net-analyzer/netcat -*
+.fi
+.TP
+.BR package.mask
+This contains a list of DEPEND atoms for packages that should not be installed
+in any profile. Useful for adding the latest KDE betas and making sure no
+one accidentally upgrades to them. Also useful for quickly masking specific
+versions due to security issues. ALWAYS include a comment explaining WHY the
+package has been masked and WHO is doing the masking.
+
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- one DEPEND atom per line
+.fi
+
+.I Example:
+.nf
+# masked for security reasons
+<sys\-libs/zlib\-1.1.4
+# <caleb@gentoo.org> (10 Sep 2003)
+# new kde betas
+=kde\-base/kde\-3.2.0_beta1
+=kde\-base/kdeaccessibility\-3.2.0_beta1
+.fi
+.TP
+.BR profiles.desc
+List all the current stable and development profiles. If a profile is listed
+here, then it will be checked by repoman.
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- one profile list per line in format: arch dir status
+\- arch must be listed in arch.list
+\- dir is relative to profiles.desc
+\- status must be 'stable', 'dev', or 'exp'
+.fi
+
+.I Example:
+.nf
+alpha default/linux/alpha/10.0 stable
+m68k default/linux/m68k/10.0 dev
+x86 default/linux/x86/10.0 stable
+x86-linux prefix/linux/x86 exp
+.fi
+.TP
+.BR repo_name
+The first line of the file should define a unique repository name. The name
+may contain any of the characters [A\-Za\-z0\-9_\-]. It must not begin with a
+hyphen. If the repo\-name attribute is specified in layout.conf, then that
+setting will take precedence.
+.TP
+.BR thirdpartymirrors
+Controls the mapping of mirror:// style URIs to actual lists of
+mirrors. Keeps us from overloading a single server.
+
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- mirror type followed by a list of hosts
+.fi
+
+.I Example:
+.nf
+sourceforge http://aleron.dl.sourceforge.net/sourceforge \
+http://unc.dl.sourceforge.net/sourceforge
+
+gentoo http://distro.ibiblio.org/pub/linux/distributions/gentoo/distfiles/ \
+ftp://ftp.gtlib.cc.gatech.edu/pub/gentoo/distfiles
+
+kernel http://www.kernel.org/pub http://www.us.kernel.org/pub
+.fi
+.TP
+.BR use.desc
+All global USE flags must be listed here with a description of what they do.
+
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- use flag \- some description
+.fi
+
+.I Example:
+.nf
+3dfx \- Adds support for 3dfx video cards
+acl \- Adds support for Access Control Lists
+doc \- Adds extra documentation
+.fi
+.TP
+.BR use.local.desc
+All local USE flags are listed here along with the package and a
+description. This file is automatically generated from the
+metadata.xml files that are included with each individual package.
+Refer to GLEP 56 for further information:
+\fIhttp://www.gentoo.org/proj/en/glep/glep-0056.html\fR.
+
+.nf
+.I Format:
+\- comments begin with # (no inline comments)
+\- package:use flag \- description
+
+.I Example:
+app\-editors/nano:justify \- Toggles the justify option
+dev\-libs/DirectFB:fusion \- Adds Multi Application support
+games\-emulation/xmess:net \- Adds network support
+.fi
+.RE
+.TP
+.BR /usr/share/portage/config/
+.RS
+.TP
+.BR make.globals
+The global default settings for Portage. This comes from the portage package
+itself. Settings in \fBmake.conf\fR or \fBpackage.env\fR override values set
+here. The format is described extensively in \fBmake.conf\fR(5).
+.TP
+.BR repos.conf
+The default configuration of repositories for Portage. This comes from
+the portage package itself. Settings in \fB/etc/portage/repos.conf\fR
+override values set here. The format is described extensively in section
+for \fB/etc/portage/repos.conf\fR.
+.RE
+.TP
+.BR /var/cache/edb/
+.RS
+This directory is used to store internal portage cache files. The names and
+purpose of these files are not documented on purpose so as to keep down bitrot
+as internals change. If you aren't working on portage internally, then the
+details most likely do not matter to you.
+
+This entire directory can be safely deleted. It is highly recommended you do
+not do this however as it can be a time consuming process to generate them all
+again.
+.RE
+.TP
+.BR /var/db/pkg/
+.RS
+All installed package information is recorded here. If portage thinks you have
+a package installed, it is usually because it is listed here.
+
+The format follows somewhat closely that of the portage tree. There is a
+directory for each category and a package-version subdirectory for each package
+you have installed.
+
+Inside each package directory are misc files that describe the installed
+contents of the package as well as build time information (so that the package
+can be unmerged without needing the portage tree).
+
+The exact file contents and format are not described here again so that things
+can be changed quickly. Generally though there is one file per environment
+variable that "matters" (like CFLAGS) with the contents stored inside of it.
+Another common file is the CONTENTS file which lists the path and hashes of
+all objects that the package installed onto your system.
+.RE
+.TP
+.BR /var/lib/portage/
+.RS
+.TP
+.BR config
+Hashes which are used to determine whether files in config protected
+directories have been modified since being installed. Files which have not
+been modified will automatically be unmerged.
+.TP
+.BR world
+Every time you emerge a package, the package that you requested is
+recorded here. Then when you run `emerge world \-up`, the list of
+packages is read from this file. Note that this does not mean that the
+packages that were installed as dependencies are listed here. For
+example, if you run `emerge mod_wsgi` and you do not have apache
+already, then "www\-apache/mod_wsgi" is recorded in the world file but
+"www\-servers/apache" is not. For more information, review \fBemerge\fR(1).
+
+.I Format:
+.nf
+\- one DEPEND atom base per line
+.fi
+
+.I Example:
+.nf
+games\-misc/fortune\-mod\-gentoo\-dev
+dev\-libs/uclibc
+app\-cdr/cdemu
+.fi
+.TP
+.BR world_sets
+This is like the world file but instead of package atoms it contains
+packages sets which always begin with the \fB@\fR character. Use
+\fB/etc/portage/sets/\fR to define user package sets.
+
+.I Example:
+.nf
+@kde
+.fi
+.RE
+.SH "REPORTING BUGS"
+Please report bugs via http://bugs.gentoo.org/
+.SH "AUTHORS"
+.nf
+Marius Mauch <genone@gentoo.org>
+Mike Frysinger <vapier@gentoo.org>
+Drake Wyrm <wyrm@haell.com>
+Arfrever Frehtes Taifersar Arahesis <arfrever@apache.org>
+.fi
+.SH "SEE ALSO"
+.BR emerge (1),
+.BR ebuild (1),
+.BR ebuild (5),
+.BR make.conf (5),
+.BR color.map (5)
diff --git a/usr/share/portage/config/make.conf.example b/usr/share/portage/config/make.conf.example
new file mode 100644
index 0000000..cbd8e55
--- /dev/null
+++ b/usr/share/portage/config/make.conf.example
@@ -0,0 +1,368 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Contains local system settings for Portage system
+
+# Please review 'man make.conf' for more information.
+
+# Build-time functionality
+# ========================
+#
+# The USE variable is used to enable optional build-time functionality. For
+# example, quite a few packages have optional X, gtk or GNOME functionality
+# that can only be enabled or disabled at compile-time. Gentoo Linux has a
+# very extensive set of USE variables described in our USE variable HOWTO at
+# http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?part=2&chap=1
+#
+# The available list of use flags with descriptions is in your portage tree.
+# Use 'less' to view them: --> less /usr/portage/profiles/use.desc <--
+#
+# 'ufed' is an ncurses/dialog interface available in portage to make handling
+# useflags for you. 'emerge app-portage/ufed'
+#
+# Example:
+#USE="X gtk gnome -alsa"
+
+# Host and optimization settings
+# ==============================
+#
+# For optimal performance, enable a CFLAGS setting appropriate for your CPU.
+#
+# Please note that if you experience strange issues with a package, it may be
+# due to gcc's optimizations interacting in a strange way. Please test the
+# package (and in some cases the libraries it uses) at default optimizations
+# before reporting errors to developers.
+#
+# If your gcc supports it, you can add -frecord-gcc-switches to all of the
+# following *FLAGS in order to enable *FLAGS ignorance checking for ebuilds:
+# CFLAGS, CXXFLAGS, FFLAGS, and FCFLAGS.
+# Note that this check is only enabled if every one of these variables contains
+# -frecord-gcc-switches, since otherwise the check could result in false
+# positive results.
+#
+# Please refer to the GCC manual for a list of possible values.
+#
+#CFLAGS="-O2 -pipe"
+#
+# If you set a CFLAGS above, then this line will set your default C++ flags to
+# the same settings.
+#CXXFLAGS="${CFLAGS}"
+#
+# If you set a CFLAGS above, then this line will set your default FORTRAN 77
+# flags to the same settings.
+#FFLAGS="${CFLAGS}"
+#
+# If you set a FFLAGS above, then this line will set your default FORTRAN
+# flags to the same settings for modern build systems
+#FCFLAGS="${FFLAGS}"
+
+
+# Advanced Masking
+# ================
+#
+# Gentoo is using a new masking system to allow for easier stability testing
+# on packages. KEYWORDS are used in ebuilds to mask and unmask packages based
+# on the platform they are set for. A special form has been added that
+# indicates packages and revisions that are expected to work, but have not yet
+# been approved for the stable set. '~arch' is a superset of 'arch' which
+# includes the unstable, in testing, packages. Users of the 'x86' architecture
+# would add '~x86' to ACCEPT_KEYWORDS to enable unstable/testing packages.
+# '~ppc', '~sparc' are the unstable KEYWORDS for their respective platforms.
+#
+# Please note that this is not for development, alpha, beta, nor cvs release
+# packages. "Broken" packages will not be added to testing and should not be
+# requested to be added. Alternative routes are available to developers
+# for experimental packages, and it is at their discretion to use them.
+#
+# DO NOT PUT ANYTHING BUT YOUR SPECIFIC ~ARCHITECTURE IN THE LIST.
+# IF YOU ARE UNSURE OF YOUR ARCH, OR THE IMPLICATIONS, DO NOT MODIFY THIS.
+#
+#ACCEPT_KEYWORDS="~arch"
+
+# ACCEPT_LICENSE is used to mask packages based on licensing restrictions.
+# It may contain both license and group names, where group names are
+# prefixed with the '@' symbol. License groups are defined in the
+# license_groups file (see portage(5) man page). In addition to license
+# and group names, the * and -* wildcard tokens are also supported.
+#
+# Accept any license except those in the EULA license group (default).
+#ACCEPT_LICENSE="* -@EULA"
+#
+# Only accept licenses in the FREE license group (i.e. Free Software).
+#ACCEPT_LICENSE="-* @FREE"
+
+# Portage Directories
+# ===================
+#
+# Each of these settings controls an aspect of portage's storage and file
+# system usage. If you change any of these, be sure it is available when
+# you try to use portage. *** DO NOT INCLUDE A TRAILING "/" ***
+#
+# PORTAGE_TMPDIR is the location portage will use for compilations and
+# temporary storage of data. This can get VERY large depending upon
+# the application being installed.
+#PORTAGE_TMPDIR=/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/var/tmp
+#
+# PORTDIR is the location of the portage tree. This is the repository
+# for all profile information as well as all ebuilds. If you change
+# this, you must update your /etc/portage/make.profile symlink accordingly.
+# ***Warning***
+# Data stored inside PORTDIR is in peril of being overwritten or deleted by
+# the emerge --sync command. The default value of PORTAGE_RSYNC_OPTS
+# will protect the default locations of DISTDIR and PKGDIR, but users are
+# warned that any other locations inside PORTDIR are not necessarily safe
+# for data storage.
+#PORTDIR=/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/portage
+#
+# DISTDIR is where all of the source code tarballs will be placed for
+# emerges. After packages are built, it is safe to remove any and
+# all files from this directory since they will be automatically
+# fetched on demand for a given build. If you would like to
+# selectively prune obsolete files from this directory, see
+# eclean from the gentoolkit package. Note that locations under
+# /usr/portage are not necessarily safe for data storage. See the
+# PORTDIR documentation for more information.
+#DISTDIR=/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/portage/distfiles
+#
+# PKGDIR is the location of binary packages that you can have created
+# with '--buildpkg' or '-b' while emerging a package. This can get
+# up to several hundred megs, or even a few gigs. Note that
+# locations under /usr/portage are not necessarily safe for data
+# storage. See the PORTDIR documentation for more information.
+#PKGDIR=/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/portage/packages
+#
+# PORT_LOGDIR is the location where portage will store all the logs it
+# creates from each individual merge. They are stored as
+# ${CATEGORY}:${PF}:YYYYMMDD-HHMMSS.log in the directory specified.
+# If the directory does not exist, it will be created automatically and
+# group permissions will be applied to it. If the directory already
+# exists, portage will not modify it's permissions.
+#PORT_LOGDIR=""
+#
+# PORTDIR_OVERLAY is a directory where local ebuilds may be stored without
+# concern that they will be deleted by rsync updates. Default is not
+# defined.
+#PORTDIR_OVERLAY=/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/local/portage
+
+# Fetching files
+# ==============
+#
+# If you need to set a proxy for wget or lukemftp, add the appropriate "export
+# ftp_proxy=<proxy>" and "export http_proxy=<proxy>" lines to /etc/profile if
+# all users on your system should use them.
+#
+# Portage uses wget by default. Here are some settings for some alternate
+# downloaders -- note that you need to merge these programs first before they
+# will be available. The command should be written to place the fetched file
+# at \${DISTDIR}/\${FILE}.
+#
+# Default fetch command (3 tries, passive ftp for firewall compatibility)
+#FETCHCOMMAND="wget -t 3 -T 60 --passive-ftp -O \"\${DISTDIR}/\${FILE}\" \"\${URI}\""
+#RESUMECOMMAND="wget -c -t 3 -T 60 --passive-ftp -O \"\${DISTDIR}/\${FILE}\" \"\${URI}\""
+#
+# Using wget, ratelimiting downloads
+#FETCHCOMMAND="wget -t 3 -T 60 --passive-ftp --limit-rate=200k -O \"\${DISTDIR}/\${FILE}\" \"\${URI}\""
+#RESUMECOMMAND="wget -c -t 3 -T 60 --passive-ftp --limit-rate=200k -O \"\${DISTDIR}/\${FILE}\" \"\${URI}\""
+#
+# Lukemftp (BSD ftp):
+#FETCHCOMMAND="lukemftp -s -a -o \"\${DISTDIR}/\${FILE}\" \"\${URI}\""
+#RESUMECOMMAND="lukemftp -s -a -R -o \"\${DISTDIR}/\${FILE}\" \"\${URI}\""
+#
+# Portage uses GENTOO_MIRRORS to specify mirrors to use for source retrieval.
+# The list is a space separated list which is read left to right. If you use
+# another mirror we highly recommend leaving the default mirror at the end of
+# the list so that portage will fall back to it if the files cannot be found
+# on your specified mirror. We _HIGHLY_ recommend that you change this setting
+# to a nearby mirror by merging and using the 'mirrorselect' tool.
+#GENTOO_MIRRORS="<your_mirror_here> http://distfiles.gentoo.org http://www.ibiblio.org/pub/Linux/distributions/gentoo"
+#
+# Portage uses PORTAGE_BINHOST to specify mirrors for prebuilt-binary packages.
+# The list is a single entry specifying the full address of the directory
+# serving the tbz2's for your system. Running emerge with either '--getbinpkg'
+# or '--getbinpkgonly' will cause portage to retrieve the metadata from all
+# packages in the directory specified, and use that data to determine what will
+# be downloaded and merged. '-g' or '-gK' are the recommend parameters. Please
+# consult the man pages and 'emerge --help' for more information. For FTP, the
+# default connection is passive -- If you require an active connection, affix
+# an asterisk (*) to the end of the host:port string before the path.
+#PORTAGE_BINHOST="http://grp.mirror.site/gentoo/grp/1.4/i686/athlon-xp/"
+# This ftp connection is passive ftp.
+#PORTAGE_BINHOST="ftp://login:pass@grp.mirror.site/pub/grp/i686/athlon-xp/"
+# This ftp connection is active ftp.
+#PORTAGE_BINHOST="ftp://login:pass@grp.mirror.site:21*/pub/grp/i686/athlon-xp/"
+
+# Synchronizing Portage
+# =====================
+#
+# Each of these settings affects how Gentoo synchronizes your Portage tree.
+# Synchronization is handled by rsync and these settings allow some control
+# over how it is done.
+#
+# SYNC is the server used by rsync to retrieve a localized rsync mirror
+# rotation. This allows you to select servers that are geographically
+# close to you, yet still distribute the load over a number of servers.
+# Please do not single out specific rsync mirrors. Doing so places undue
+# stress on particular mirrors. Instead you may use one of the following
+# continent specific rotations:
+#
+# Default: "rsync://rsync.gentoo.org/gentoo-portage"
+# North America: "rsync://rsync.namerica.gentoo.org/gentoo-portage"
+# South America: "rsync://rsync.samerica.gentoo.org/gentoo-portage"
+# Europe: "rsync://rsync.europe.gentoo.org/gentoo-portage"
+# Asia: "rsync://rsync.asia.gentoo.org/gentoo-portage"
+# Australia: "rsync://rsync.au.gentoo.org/gentoo-portage"
+#
+# If you have multiple Gentoo boxes, it is probably a good idea to have only
+# one of them sync from the rotations above. The other boxes can then rsync
+# from the local rsync server, reducing the load on the mirrors.
+# Instructions for setting up a local rsync server are available here:
+# http://www.gentoo.org/doc/en/rsync.xml
+#
+# For Gentoo Prefix, use the following URL:
+#
+# Default: "rsync://rsync.prefix.bitzolder.nl/gentoo-portage-prefix
+#
+#SYNC="rsync://rsync.gentoo.org/gentoo-portage"
+#
+# PORTAGE_RSYNC_RETRIES sets the number of times portage will attempt to retrieve
+# a current portage tree before it exits with an error. This allows
+# for a more successful retrieval without user intervention most times.
+# If set to a negative number, then retry until all possible addresses are
+# exhausted.
+#PORTAGE_RSYNC_RETRIES="-1"
+#
+# PORTAGE_RSYNC_EXTRA_OPTS can be used to feed additional options to the rsync
+# command used by `emerge --sync`. This will not change the default options
+# which are set by PORTAGE_RSYNC_OPTS (don't change those unless you know
+# exactly what you're doing).
+#PORTAGE_RSYNC_EXTRA_OPTS=""
+#
+# Advanced Features
+# =================
+#
+# EMERGE_DEFAULT_OPTS allows emerge to act as if certain options are
+# specified on every run. Useful options include --ask, --verbose,
+# --usepkg and many others. Options that are not useful, such as --help,
+# are not filtered.
+#EMERGE_DEFAULT_OPTS=""
+#
+# INSTALL_MASK allows certain files to not be installed into your file system.
+# This is useful when you wish to filter out a certain set of files from
+# ever being installed, such as INSTALL.gz or TODO.gz
+#INSTALL_MASK=""
+#
+# MAKEOPTS provides extra options that may be passed to 'make' when a
+# program is compiled. Presently the only use is for specifying
+# the number of parallel makes (-j) to perform. The suggested number
+# for parallel makes is CPUs+1.
+#MAKEOPTS="-j2"
+#
+# PORTAGE_NICENESS provides a default increment to emerge's niceness level.
+# Note: This is an increment. Running emerge in a niced environment will
+# reduce it further. Default is unset.
+#PORTAGE_NICENESS=3
+#
+# PORTAGE_IONICE_COMMAND provides a command for portage to call in order to
+# adjust the io priority of portage and it's subprocesses. Default is
+# unset.
+#PORTAGE_IONICE_COMMAND="ionice -c 3 -p \${PID}"
+#
+# AUTOCLEAN enables portage to automatically clean out older or overlapping
+# packages from the system after every successful merge. This is the
+# same as running 'emerge -c' after every merge. Set with: "yes" or "no".
+# This does not affect the unpacked source. See 'noclean' below.
+#
+# Warning: AUTOCLEAN="no" can cause serious problems due to overlapping
+# packages. Do not use it unless absolutely necessary!
+#AUTOCLEAN="yes"
+#
+# FEATURES defines actions portage takes by default. This is an incremental
+# variable. See the make.conf(5) man page for a complete list of supported
+# values and their respective meanings.
+#FEATURES="ccache distcc installsources \
+# splitdebug test userpriv usersandbox"
+
+# CCACHE_SIZE and CCACHE_DIR are used to control the behavior of ccache, and
+# and are only used if "ccache" is in FEATURES.
+#
+# CCACHE_SIZE sets the space limitations for ccache. The default size is
+# "2G", or 2 gigabytes. Units are specified with 'G', 'M', or 'K'.
+#
+#CCACHE_SIZE="512M"
+#
+# CCACHE_DIR sets the ccache path. If not specified, portage will default
+# to "${PORTAGE_TMPDIR}/ccache".
+#
+# Note that to display ccache statistics outside of portage, you must
+# remember to give the correct path to the cache.
+#
+# $ CCACHE_DIR=/var/tmp/ccache ccache -s
+#
+#CCACHE_DIR="${PORTAGE_TMPDIR}/ccache"
+
+# DISTCC_DIR sets the temporary space used by distcc.
+#DISTCC_DIR="${PORTAGE_TMPDIR}/.distcc"
+
+# logging related variables:
+# PORTAGE_ELOG_CLASSES: selects messages to be logged, possible values are:
+# info, warn, error, log, qa, *
+#PORTAGE_ELOG_CLASSES="log warn error"
+
+# PORTAGE_ELOG_SYSTEM: selects the module(s) to process the log messages. Modules
+# included in portage are (empty means logging is disabled):
+# echo (display messages again when emerge exits)
+# save (saves one log per package in $PORT_LOGDIR/elog,
+# /var/log/portage/elog if $PORT_LOGDIR is unset)
+# custom (passes all messages to $PORTAGE_ELOG_COMMAND)
+# syslog (sends all messages to syslog)
+# mail (send all messages to the mailserver defined
+# in $PORTAGE_ELOG_MAILURI)
+# save_summary (like "save" but merges all messages
+# in $PORT_LOGDIR/elog/summary.log,
+# /var/log/portage/elog/summary.log if
+# $PORT_LOGDIR is unset)
+# mail_summary (like "mail" but sends all messages in
+# a single mail when emerge exits)
+# To use elog you should enable at least one module
+# The module name may be followed by a colon and a comma
+# separated list of loglevels to override PORTAGE_ELOG_CLASSES
+# for this module (e.g.
+# PORTAGE_ELOG_SYSTEM="mail:warn,error syslog:* save")
+#PORTAGE_ELOG_SYSTEM="save_summary:log,warn,error,qa echo"
+
+# PORTAGE_ELOG_COMMAND: only used with the "custom" logging module. Specifies a command
+# to process log messages. Two variables are expanded:
+# ${PACKAGE} - expands to the cpv entry of the processed
+# package (see $PVR in ebuild(5))
+# ${LOGFILE} - absolute path to the logfile
+# Both variables have to be quoted with single quotes
+#PORTAGE_ELOG_COMMAND="/path/to/logprocessor -p '\${PACKAGE}' -f '\${LOGFILE}'"
+
+# PORTAGE_ELOG_MAILURI: this variable holds all important settings for the mail
+# module. In most cases listing the recipient address and
+# the receiving mailserver should be sufficient, but you can
+# also use advanced settings like authentication or TLS. The
+# full syntax is:
+# address [[user:passwd@]mailserver[:port]]
+# where
+# address: recipient address
+# user: username for smtp auth (defaults to none)
+# passwd: password for smtp auth (defaults to none)
+# mailserver: smtp server that should be used to deliver the mail (defaults to localhost)
+# alternatively this can also be a the path to a sendmail binary if you don't want to use smtp
+# port: port to use on the given smtp server (defaults to 25, values > 100000 indicate that starttls should be used on (port-100000))
+# Examples:
+#PORTAGE_ELOG_MAILURI="root@localhost localhost" (this is also the default setting)
+#PORTAGE_ELOG_MAILURI="user@some.domain mail.some.domain" (sends mails to user@some.domain using the mailserver mail.some.domain)
+#PORTAGE_ELOG_MAILURI="user@some.domain user:secret@mail.some.domain:100465" (this is left uncommented as a reader exercise ;)
+
+# PORTAGE_ELOG_MAILFROM: you can set the from-address of logmails with this variable,
+# if unset mails are sent by "portage" (this default may fail
+# in some environments).
+#PORTAGE_ELOG_MAILFROM="portage@some.domain"
+
+# PORTAGE_ELOG_MAILSUBJECT: template string to be used as subject for logmails. The following
+# variables are expanded:
+# ${ACTION} - merged, unmerged, or unknown
+# ${PACKAGE} - see description of PORTAGE_ELOG_COMMAND
+# ${HOST} - FQDN of the host portage is running on
+#PORTAGE_ELOG_MAILSUBJECT="[portage] ebuild log for \${PACKAGE} on \${HOST}"
diff --git a/usr/share/portage/config/make.globals b/usr/share/portage/config/make.globals
new file mode 100644
index 0000000..e901e6b
--- /dev/null
+++ b/usr/share/portage/config/make.globals
@@ -0,0 +1,179 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# System-wide defaults for the Portage system
+
+# *****************************
+# ** DO NOT EDIT THIS FILE **
+# ***************************************************
+# **** CHANGES TO make.conf *OVERRIDE* THIS FILE ****
+# ***************************************************
+# ** Incremental Variables Accumulate Across Files **
+# ** USE, CONFIG_*, and FEATURES are incremental **
+# ***************************************************
+
+# When compiler flags are unset, many packages will substitute their own
+# implicit flags. For uniformity, use an empty string as the default.
+CFLAGS=""
+CXXFLAGS=""
+LDFLAGS=""
+FFLAGS=""
+FCFLAGS=""
+
+# Default distfiles mirrors. This rotation has multiple hosts and is reliable.
+# Approved by the mirror-admin team.
+GENTOO_MIRRORS="http://distfiles.gentoo.org"
+
+ACCEPT_LICENSE="* -@EULA"
+ACCEPT_PROPERTIES="*"
+ACCEPT_RESTRICT="*"
+
+# Miscellaneous paths
+DISTDIR="/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/portage/distfiles"
+PKGDIR="/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/portage/packages"
+RPMDIR="/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/portage/rpm"
+
+# Temporary build directory
+PORTAGE_TMPDIR="/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/var/tmp"
+
+# Fetching command (3 tries, passive ftp for firewall compatibility)
+FETCHCOMMAND="wget -t 3 -T 60 --passive-ftp -O \"\${DISTDIR}/\${FILE}\" \"\${URI}\""
+RESUMECOMMAND="wget -c -t 3 -T 60 --passive-ftp -O \"\${DISTDIR}/\${FILE}\" \"\${URI}\""
+
+FETCHCOMMAND_RSYNC="rsync -avP \"\${URI}\" \"\${DISTDIR}/\${FILE}\""
+RESUMECOMMAND_RSYNC="rsync -avP \"\${URI}\" \"\${DISTDIR}/\${FILE}\""
+
+# NOTE: rsync will evaluate quotes embedded inside PORTAGE_SSH_OPTS
+FETCHCOMMAND_SSH="bash -c \"x=\\\${2#ssh://} ; host=\\\${x%%/*} ; port=\\\${host##*:} ; host=\\\${host%:*} ; [[ \\\${host} = \\\${port} ]] && port=22 ; exec rsync --rsh=\\\"ssh -p\\\${port} \\\${3}\\\" -avP \\\"\\\${host}:/\\\${x#*/}\\\" \\\"\\\$1\\\"\" rsync \"\${DISTDIR}/\${FILE}\" \"\${URI}\" \"\${PORTAGE_SSH_OPTS}\""
+RESUMECOMMAND_SSH=${FETCHCOMMAND_SSH}
+
+# NOTE: bash eval is used to evaluate quotes embedded inside PORTAGE_SSH_OPTS
+FETCHCOMMAND_SFTP="bash -c \"x=\\\${2#sftp://} ; host=\\\${x%%/*} ; port=\\\${host##*:} ; host=\\\${host%:*} ; [[ \\\${host} = \\\${port} ]] && port=22 ; eval \\\"declare -a ssh_opts=(\\\${3})\\\" ; exec sftp -P \\\${port} \\\"\\\${ssh_opts[@]}\\\" \\\"\\\${host}:/\\\${x#*/}\\\" \\\"\\\$1\\\"\" sftp \"\${DISTDIR}/\${FILE}\" \"\${URI}\" \"\${PORTAGE_SSH_OPTS}\""
+
+# Default user options
+FEATURES="assume-digests binpkg-logs
+ config-protect-if-modified distlocks ebuild-locks
+ fixlafiles merge-sync news parallel-fetch preserve-libs protect-owned
+ sandbox sfperms strict unknown-features-warn unmerge-logs
+ unmerge-orphans userfetch userpriv usersandbox usersync"
+
+# Ignore file collisions in /lib/modules since files inside this directory
+# are never unmerged, and therefore collisions must be ignored in order for
+# FEATURES=protect-owned to operate smoothly in all cases.
+# Ignore file collisions for unowned *.pyo and *.pyc files, this helps during
+# transition from compiling python modules in live file system to compiling
+# them in src_install() function.
+COLLISION_IGNORE="/lib/modules/* *.py[co] *\$py.class"
+UNINSTALL_IGNORE="/lib/modules/*"
+
+# Prefix: we want preserve-libs, not sure how mainline goes about this
+FEATURES="${FEATURES} preserve-libs"
+
+# Force EPREFIX, ED and EROOT to exist in all EAPIs, not just 3 and up
+FEATURES="${FEATURES} force-prefix"
+
+# Avoid problems due to case-insensitivity, bug #524236
+FEATURES="${FEATURES} case-insensitive-fs"
+
+# By default wait 5 secs before cleaning a package
+CLEAN_DELAY="5"
+
+# By default wait 10 secs on an important warning
+EMERGE_WARNING_DELAY="10"
+
+# Automatically clean installed packages after they are updated.
+# This option will be removed and forced to yes.
+AUTOCLEAN="yes"
+
+PORTAGE_BZIP2_COMMAND="bzip2"
+
+# Don't compress files with these suffixes.
+PORTAGE_COMPRESS_EXCLUDE_SUFFIXES="css gif htm[l]? jp[e]?g js pdf png"
+
+# Number of mirrors to try when a downloaded file has an incorrect checksum.
+PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS="5"
+
+# Minimum size of existing file for RESUMECOMMAND to be called.
+PORTAGE_FETCH_RESUME_MIN_SIZE="350K"
+
+# Number of times 'emerge --sync' will run before giving up.
+PORTAGE_RSYNC_RETRIES="-1"
+
+# Number of seconds rsync will wait before timing out.
+#RSYNC_TIMEOUT="180"
+
+PORTAGE_RSYNC_OPTS="--recursive --links --safe-links --perms --times --omit-dir-times --compress --force --whole-file --delete --stats --human-readable --timeout=180 --exclude=/distfiles --exclude=/local --exclude=/packages"
+
+# The number of days after the last `emerge --sync` that a warning
+# message should be produced.
+PORTAGE_SYNC_STALE="30"
+
+# Executed before emerge exit if FEATURES=clean-logs is enabled.
+PORT_LOGDIR_CLEAN="find \"\${PORT_LOGDIR}\" -type f ! -name \"summary.log*\" -mtime +7 -delete"
+
+# Minimal CONFIG_PROTECT
+# NOTE: in Prefix, these are NOT prefixed on purpose, because the
+# profiles define them too
+CONFIG_PROTECT="/etc"
+CONFIG_PROTECT_MASK="/etc/env.d"
+
+# Disable auto-use
+USE_ORDER="env:pkg:conf:defaults:pkginternal:repo:env.d"
+
+# Default portage user/group
+PORTAGE_USER='vapier'
+PORTAGE_GROUP='eng'
+PORTAGE_ROOT_USER='vapier'
+
+# Default ownership of installed files.
+PORTAGE_INST_UID="145691"
+PORTAGE_INST_GID="5000"
+
+# Default PATH for ebuild env
+DEFAULT_PATH="/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/sbin:/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/bin:/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/sbin:/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/bin"
+# Any extra PATHs to add to the ebuild environment's PATH (if any)
+EXTRA_PATH=""
+
+# The offset prefix this Portage was configured with (not used by
+# Portage itself)
+CONFIGURE_EPREFIX="/usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir"
+
+# Mode bits for ${WORKDIR} (see ebuild.5).
+PORTAGE_WORKDIR_MODE="0700"
+
+# Some defaults for elog
+PORTAGE_ELOG_CLASSES="log warn error"
+PORTAGE_ELOG_SYSTEM="save_summary:log,warn,error,qa echo"
+
+PORTAGE_ELOG_MAILURI="vapier"
+PORTAGE_ELOG_MAILSUBJECT="[portage] ebuild log for \${PACKAGE} on \${HOST}"
+PORTAGE_ELOG_MAILFROM="vapier@localhost"
+
+# Signing command used by repoman
+PORTAGE_GPG_SIGNING_COMMAND="gpg --sign --digest-algo SHA256 --clearsign --yes --default-key \"\${PORTAGE_GPG_KEY}\" --homedir \"\${PORTAGE_GPG_DIR}\" \"\${FILE}\""
+
+# Security labels are special, see bug #461868.
+# system.nfs4_acl attributes are irrelevant, see bug #475496.
+PORTAGE_XATTR_EXCLUDE="security.* system.nfs4_acl"
+
+# Writeable paths for Mac OS X seatbelt sandbox
+#
+# If path ends in a slash (/), access will recursively be allowed to directory
+# contents (using a regex), not the directory itself. Without a slash, access
+# to the directory or file itself will be allowed (using a literal), so it can
+# be created, removed and changed. If both is needed, the directory needs to be
+# given twice, once with and once without the slash. Obviously this only makes
+# sense for directories, not files.
+#
+# An empty value for either variable will disable all restrictions on the
+# corresponding operation.
+MACOSSANDBOX_PATHS="/dev/fd/ /private/tmp/ /private/var/tmp/ @@PORTAGE_BUILDDIR@@/ @@PORTAGE_ACTUAL_DISTDIR@@/"
+MACOSSANDBOX_PATHS_CONTENT_ONLY="/dev/null /dev/dtracehelper /dev/tty /private/var/run/syslog"
+
+# *****************************
+# ** DO NOT EDIT THIS FILE **
+# ***************************************************
+# **** CHANGES TO make.conf *OVERRIDE* THIS FILE ****
+# ***************************************************
+# ** Incremental Variables Accumulate Across Files **
+# ** USE, CONFIG_*, and FEATURES are incremental **
+# ***************************************************
diff --git a/usr/share/portage/config/repos.conf b/usr/share/portage/config/repos.conf
new file mode 100644
index 0000000..20bffe4
--- /dev/null
+++ b/usr/share/portage/config/repos.conf
@@ -0,0 +1,7 @@
+[DEFAULT]
+main-repo = gentoo_prefix
+
+[gentoo_prefix]
+location = /usr/local/google/home/vapier/src/android/mdk/build/portage/tmp/prefix-portage-2.2.14/destdir/usr/portage
+sync-type = rsync
+sync-uri = rsync://rsync.prefix.bitzolder.nl/gentoo-portage-prefix
diff --git a/usr/share/portage/config/sets/portage.conf b/usr/share/portage/config/sets/portage.conf
new file mode 100644
index 0000000..fd2c387
--- /dev/null
+++ b/usr/share/portage/config/sets/portage.conf
@@ -0,0 +1,91 @@
+# WARNING: default set configuration, DO NOT CHANGE.
+# If you want to change anything redefine the relevant section in
+# /etc/portage/sets.conf. Any changes to this file will be lost on the next
+# portage update, and configuration errors here might upset portage in
+# unexpected ways.
+
+# Not much that could be changed for world, so better leave it alone
+[world]
+class = portage.sets.base.DummyPackageSet
+packages = @selected @system
+
+# Not much that could be changed for world, so better leave it alone
+[selected]
+class = portage.sets.files.WorldSelectedSet
+
+# Same as for world, though later portage versions might use a different class
+[system]
+class = portage.sets.profiles.PackagesSystemSet
+
+# For security there are multiple classes available, but differences are
+# rather small (normally there should be no visible difference):
+# - AffectedSet: include all GLSAs that cover a vulnerable package
+# - NewAffectedSet: include all GLSAs that cover a vulnerable package and
+# haven't been applied previously
+# - NewGlsaSet: include all GLSAs that haven't been applied
+# - SecuritySet: include all GLSAs
+[security]
+class = portage.sets.security.NewAffectedSet
+
+# A superset of the classic <parameter>world</parameter> target, a set created
+# by this class contains SLOT atoms to match all installed packages. Note that
+# use of this set makes it impossible for emerge to solve blockers by automatic
+# uninstallation of blocked packages.
+[installed]
+class = portage.sets.dbapi.EverythingSet
+
+# The following treats all files in /etc/portage/sets as a package set called
+# '$filename'.
+[usersets]
+class = portage.sets.files.StaticFileSet
+multiset = true
+directory = %(PORTAGE_CONFIGROOT)setc/portage/sets
+world-candidate = True
+
+# Set to rebuild all packages that need a preserved lib that only remains due
+# to FEATURES=preserve-libs
+[preserved-rebuild]
+class = portage.sets.libs.PreservedLibraryConsumerSet
+
+# Installed ebuilds that inherit from known live eclasses.
+[live-rebuild]
+class = portage.sets.dbapi.VariableSet
+variable = INHERITED
+includes = bzr cvs darcs git git-2 git-r3 mercurial subversion tla
+
+# Installed packages that own files inside /lib/modules.
+[module-rebuild]
+class = portage.sets.dbapi.OwnerSet
+files = /lib/modules
+
+# Installed packages that own files inside /usr/lib/xorg/modules,
+# excluding the package that owns /usr/bin/Xorg.
+[x11-module-rebuild]
+class = portage.sets.dbapi.OwnerSet
+files = /usr/lib/xorg/modules
+exclude-files = /usr/bin/Xorg
+
+# Binary packages that have a different build time from a currently
+# installed package of the exact same version.
+[rebuilt-binaries]
+class = portage.sets.dbapi.RebuiltBinaries
+
+# Installed packages for which the highest visible ebuild
+# version is lower than the currently installed version.
+[downgrade]
+class = portage.sets.dbapi.DowngradeSet
+
+# Installed packages for which there are no visible ebuilds
+# corresponding to the same $CATEGORY/$PN:$SLOT.
+[unavailable]
+class = portage.sets.dbapi.UnavailableSet
+
+# Installed packages for which corresponding binary packages
+# are not available.
+[unavailable-binaries]
+class = portage.sets.dbapi.UnavailableBinaries
+
+# Installed packages for which vdb *DEPEND entries are outdated compared
+# to the matching portdb entry.
+[changed-deps]
+class = portage.sets.dbapi.ChangedDepsSet