aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Hubbard <dsp@google.com>2022-12-13 11:02:59 -0800
committerDavid Hubbard <dsp@google.com>2022-12-13 12:23:58 -0800
commitb897bf6b054bde97e163d1c22bf5c3e23c50f29e (patch)
treec5887d606e4d033bec1d7d2dcfa5b5685d376455
parent70e62ae53f1999a250beda05386c8a496cb15410 (diff)
downloadninja-b897bf6b054bde97e163d1c22bf5c3e23c50f29e.tar.gz
Auto-merge duplicate pools between subninja chdir's
Note: Pools cannot have the same name, except in the very rare case that a subninja chdir invokes an entirely separate build which then has a pool with the same name. Even then, the pools are silently merged across subninja chdirs, unless the depth value does not match. The special predefined 'console' pool will always have a depth of 1, so references to it get merged by the same logic. Pools are *not* supposed to break the build due to these subtle nuances of when they are merged and when they are not. Pools are supposed to be a useful decorator that optimizes the build for some rare rules that need to have *less* parallelism. Sometimes the different trees being built cannot avoid having duplicate pool names because the sources are maintained by different parties. The goal then is to *not* break the build, by either silently merging pools as much as possible, and otherwise allowing pools with the same name to silently coexist. Each pool must have a unique tuple ( Scope* Pool::parent_, HashedStr name_ ) Change-Id: Id220ed9baa41af3ddb902e8e20bd03f6acbb4441
-rw-r--r--src/build_test.cc5
-rw-r--r--src/manifest_parser.cc10
-rw-r--r--src/manifest_parser_test.cc4
-rw-r--r--src/state.cc81
-rw-r--r--src/state.h35
5 files changed, 113 insertions, 22 deletions
diff --git a/src/build_test.cc b/src/build_test.cc
index 17ec834..6957bb4 100644
--- a/src/build_test.cc
+++ b/src/build_test.cc
@@ -1564,7 +1564,10 @@ TEST_F(BuildTest, PoolEdgesReadyButNotWanted) {
State save_state;
RebuildTarget("final.stamp", manifest, NULL, NULL, &save_state);
- EXPECT_GE(save_state.LookupPool("some_pool")->current_use(), 0);
+ Pool* pool = save_state.LookupPool("some_pool",
+ save_state.edges_[0]->pos_.scope_pos());
+ ASSERT_TRUE(pool != nullptr);
+ EXPECT_GE(pool->current_use(), 0);
}
struct BuildWithLogTest : public BuildTest {
diff --git a/src/manifest_parser.cc b/src/manifest_parser.cc
index 430f28d..d7fabe5 100644
--- a/src/manifest_parser.cc
+++ b/src/manifest_parser.cc
@@ -144,7 +144,7 @@ private:
bool LoadIncludeOrSubninja(Include& include, const LoadedFile& file,
Scope* scope, std::vector<Clump*>* out_clumps,
std::string* err);
- bool HandlePool(Pool* pool, const LoadedFile& file, std::string* err);
+ bool HandlePool(Pool* pool, Scope* scope, const LoadedFile& file, std::string* err);
bool HandleClump(Clump* clump, const LoadedFile& file, Scope* scope,
std::string* err);
@@ -275,7 +275,7 @@ bool DfsParser::LoadIncludeOrSubninja(Include& include, const LoadedFile& file,
return true;
}
-bool DfsParser::HandlePool(Pool* pool, const LoadedFile& file,
+bool DfsParser::HandlePool(Pool* pool, Scope* scope, const LoadedFile& file,
std::string* err) {
std::string depth_string;
EvaluateBindingInScope(&depth_string, pool->parse_state_.depth,
@@ -285,7 +285,7 @@ bool DfsParser::HandlePool(Pool* pool, const LoadedFile& file,
return DecorateError(file, pool->parse_state_.depth_diag_pos,
"invalid pool depth", err);
}
- if (!state_->AddPool(pool)) {
+ if (!state_->AddPool(pool, scope)) {
return DecorateError(file, pool->parse_state_.pool_name_diag_pos,
"duplicate pool '" + pool->name() + "'", err);
}
@@ -319,7 +319,7 @@ bool DfsParser::HandleClump(Clump* clump, const LoadedFile& file, Scope* scope,
}
}
for (Pool* pool : clump->pools_) {
- if (!HandlePool(pool, file, err)) {
+ if (!HandlePool(pool, scope, file, err)) {
return false;
}
}
@@ -453,7 +453,7 @@ bool ManifestLoader::AddEdgeToGraph(Edge* edge, const LoadedFile& file,
if (pool_name.empty()) {
edge->pool_ = &State::kDefaultPool;
} else {
- edge->pool_ = state_->LookupPoolAtPos(pool_name, edge->pos_.dfs_location());
+ edge->pool_ = state_->LookupPoolAtPos(pool_name, edge_pos, edge->pos_.dfs_location());
if (edge->pool_ == nullptr) {
return DecorateError(file, edge->parse_state_.final_diag_pos,
"unknown pool name '" + pool_name + "'", err);
diff --git a/src/manifest_parser_test.cc b/src/manifest_parser_test.cc
index acb3abd..7944ab1 100644
--- a/src/manifest_parser_test.cc
+++ b/src/manifest_parser_test.cc
@@ -1655,8 +1655,8 @@ TEST_F(ParserTest, IncludeUsingAVariable) {
"path = nonexistent.ninja\n"));
}
-TEST_F(ParserTest, UnscopedPool) {
- // Pools aren't scoped.
+TEST_F(ParserTest, CrossFilePoolScope) {
+ // Pool scope is across all ninja files within this chdir.
fs_.Create("foo.ninja", "pool link\n"
" depth = 3\n");
ASSERT_NO_FATAL_FAILURE(AssertParse(
diff --git a/src/state.cc b/src/state.cc
index 2ae3dde..aa46912 100644
--- a/src/state.cc
+++ b/src/state.cc
@@ -74,12 +74,56 @@ State::State() {
root_scope_.AllocDecls(1);
root_scope_.AddAllBuiltinRules();
- AddPool(&kDefaultPool);
- AddPool(&kConsolePool);
+ AddPool(&kDefaultPool, &root_scope_);
+ AddPool(&kConsolePool, &root_scope_);
}
-bool State::AddPool(Pool* pool) {
- return pools_.insert({ pool->name_hashed(), pool }).second;
+bool State::AddPool(Pool* pool, Scope* scope) {
+ // Find root scope of this subninja chdir.
+ while (scope->parent() != nullptr) {
+ scope = scope->parent();
+ }
+ pool->parent_ = scope;
+
+ auto r = pools_.insert({ pool->name_hashed(), {} });
+ if (r.second) {
+ // This pool has a unique name, so a new SameNamePools was inserted.
+ bool poolUnique = r.first->second.pool.insert({ scope, pool }).second;
+ if (!poolUnique) {
+ Fatal("SameNamePools should be empty, but says it has a duplicate.");
+ return false;
+ }
+ return true;
+ }
+
+ // This pool does not have a unique name. Nothing was changed in pools_.
+ SameNamePools& sims = r.first->second;
+ auto i = sims.pool.find(scope);
+ if (i != sims.pool.end()) {
+ return false; // Found a duplicate in the same subninja chdir.
+ }
+ // This pool is unique within its subninja chdir.
+ // If it has the same depth as any pool in r.first.pool, merge it.
+ for (i = sims.pool.begin(); i != sims.pool.end(); ++i) {
+ if (i->second->depth_ == pool->depth_) {
+ // The Pool* pool is not used. The previous Pool* in i->second is kept.
+ // However, the previous Pool* is now available in this scope.
+ bool poolUnique = sims.pool.insert({ scope, i->second }).second;
+ if (!poolUnique) {
+ Fatal("SameNamePools should not have a duplicate, but does.");
+ return false;
+ }
+ return true;
+ }
+ }
+
+ // This pool is unique within its subninja chdir and unique globally.
+ bool poolUnique = sims.pool.insert({ scope, pool }).second;
+ if (!poolUnique) {
+ Fatal("SameNamePools did not accept another pool.");
+ return false;
+ }
+ return true;
}
Edge* State::AddEdge(const Rule* rule) {
@@ -93,16 +137,31 @@ Edge* State::AddEdge(const Rule* rule) {
return edge;
}
-Pool* State::LookupPool(const HashedStrView& pool_name) {
+Pool* State::LookupPool(const HashedStrView& pool_name, const ScopePosition edge_pos) {
+ if (edge_pos.scope == nullptr) {
+ Fatal("LookupPool(%s): edge_pos.scope is NULL",
+ pool_name.str_view().AsString().c_str());
+ return nullptr;
+ }
+ // Find root scope of this subninja chdir.
+ Scope* parent = edge_pos.scope;
+ while (parent->parent() != nullptr) {
+ parent = parent->parent();
+ }
+
auto i = pools_.find(pool_name);
if (i == pools_.end())
return nullptr;
- return i->second;
+ auto j = i->second.pool.find(parent);
+ if (j == i->second.pool.end()) {
+ return nullptr;
+ }
+ return j->second;
}
-Pool* State::LookupPoolAtPos(const HashedStrView& pool_name,
+Pool* State::LookupPoolAtPos(const HashedStrView& pool_name, const ScopePosition edge_pos,
DeclIndex dfs_location) {
- Pool* result = LookupPool(pool_name);
+ Pool* result = LookupPool(pool_name, edge_pos);
if (result == nullptr) return nullptr;
return result->dfs_location() < dfs_location ? result : nullptr;
}
@@ -222,8 +281,10 @@ void State::Dump() {
if (!pools_.empty()) {
printf("resource_pools:\n");
for (auto it = pools_.begin(); it != pools_.end(); ++it) {
- if (!it->second->name().empty()) {
- it->second->Dump();
+ for (auto sameIt = it->second.pool.begin(); sameIt != it->second.pool.end(); ++sameIt) {
+ if (!sameIt->second->name().empty()) {
+ sameIt->second->Dump();
+ }
}
}
}
diff --git a/src/state.h b/src/state.h
index 7e000a6..6d2cd2d 100644
--- a/src/state.h
+++ b/src/state.h
@@ -77,6 +77,7 @@ struct Pool {
HashedStr name_;
int depth_ = 0;
RelativePosition pos_;
+ Scope* parent_;
// Temporary fields used only during manifest parsing.
struct {
@@ -103,6 +104,29 @@ private:
DelayedEdges delayed_;
};
+// SameNamePools contains all pools with the same name.
+// Note: Pools cannot have the same name, except in the very rare case
+// that a subninja chdir invokes an entirely separate build which then
+// has a pool with the same name.
+//
+// Even then, the pools are silently merged across subninja chdirs, unless the
+// depth value does not match. The special predefined 'console' pool will
+// always have a depth of 1, so references to it get merged by the same logic.
+//
+// Pools are *not* supposed to break the build due to these subtle nuances of
+// when they are merged and when they are not. Pools are supposed to be a
+// useful decorator that optimizes the build for some rare rules that need to
+// have *less* parallelism. Sometimes the different trees being built cannot
+// avoid having duplicate pool names because the sources are maintained by
+// different parties. The goal then is to *not* break the build, by either
+// silently merging pools as much as possible, and otherwise allowing pools
+// with the same name to silently coexist.
+//
+// Each pool must have a unique tuple ( Scope* Pool::parent_, HashedStr name_ )
+struct SameNamePools {
+ std::unordered_map<Scope*, Pool*> pool;
+};
+
/// Global state (file status) for a single run.
struct State {
/// The built-in pools and rules use this dummy built-in scope to initialize
@@ -116,14 +140,17 @@ struct State {
State();
void AddBuiltinRule(Rule* rule);
- bool AddPool(Pool* pool);
+ bool AddPool(Pool* pool, Scope* scope);
Edge* AddEdge(const Rule* rule);
- Pool* LookupPool(const HashedStrView& pool_name);
+ Pool* LookupPool(const HashedStrView& pool_name,
+ const ScopePosition edge_pos);
/// Lookup a pool at a DFS position. Pools don't respect scopes. A pool's name
/// must be unique across the entire manifest, and it must be declared before
/// an edge references it.
- Pool* LookupPoolAtPos(const HashedStrView& pool_name, DeclIndex dfs_location);
+ Pool* LookupPoolAtPos(const HashedStrView& pool_name,
+ const ScopePosition edge_pos,
+ DeclIndex dfs_location);
/// Creates the node if it doesn't exist. Never returns nullptr. Thread-safe.
/// 'path' must be the Scope::GlobalPath(), globally unique.
@@ -166,7 +193,7 @@ struct State {
Paths paths_;
/// All the pools used in the graph.
- std::unordered_map<HashedStrView, Pool*> pools_;
+ std::unordered_map<HashedStrView, SameNamePools> pools_;
/// All the edges of the graph.
vector<Edge*> edges_;