forked from OSchip/llvm-project
[clangd] Speed up a slow sleeping testcase.
This testcase runs slowly due to 3.2s of sleeps = 2 + 1 + 0.2s. After this patch it has 0.55s only. Reduced by: - observed that the last test was bogus: we were sleeping until the queue was idle, effectively just a second copy of the first test. This avoids 1s sleep. - when waiting for debounce, sleep only until test passes, not for enough time to be safe (in practice was 2x debounce time, now 1x debounce time) - scaling delays down by a factor of 2 (note: factor of 10 caused bot failures) Differential Revision: https://reviews.llvm.org/D125103
This commit is contained in:
parent
fffb6e6afd
commit
c468635b7d
|
@ -34,9 +34,9 @@ void Notification::notify() {
|
|||
}
|
||||
}
|
||||
|
||||
void Notification::wait() const {
|
||||
bool Notification::wait(Deadline D) const {
|
||||
std::unique_lock<std::mutex> Lock(Mu);
|
||||
CV.wait(Lock, [this] { return Notified; });
|
||||
return clangd::wait(Lock, CV, D, [&] { return Notified; });
|
||||
}
|
||||
|
||||
Semaphore::Semaphore(std::size_t MaxLocks) : FreeSlots(MaxLocks) {}
|
||||
|
|
|
@ -24,20 +24,6 @@
|
|||
namespace clang {
|
||||
namespace clangd {
|
||||
|
||||
/// A threadsafe flag that is initially clear.
|
||||
class Notification {
|
||||
public:
|
||||
// Sets the flag. No-op if already set.
|
||||
void notify();
|
||||
// Blocks until flag is set.
|
||||
void wait() const;
|
||||
|
||||
private:
|
||||
bool Notified = false;
|
||||
mutable std::condition_variable CV;
|
||||
mutable std::mutex Mu;
|
||||
};
|
||||
|
||||
/// Limits the number of threads that can acquire the lock at the same time.
|
||||
class Semaphore {
|
||||
public:
|
||||
|
@ -100,6 +86,21 @@ LLVM_NODISCARD bool wait(std::unique_lock<std::mutex> &Lock,
|
|||
return true;
|
||||
}
|
||||
|
||||
/// A threadsafe flag that is initially clear.
|
||||
class Notification {
|
||||
public:
|
||||
// Sets the flag. No-op if already set.
|
||||
void notify();
|
||||
// Blocks until flag is set.
|
||||
void wait() const { (void)wait(Deadline::infinity()); }
|
||||
LLVM_NODISCARD bool wait(Deadline D) const;
|
||||
|
||||
private:
|
||||
bool Notified = false;
|
||||
mutable std::condition_variable CV;
|
||||
mutable std::mutex Mu;
|
||||
};
|
||||
|
||||
/// Runs tasks on separate (detached) threads and wait for all tasks to finish.
|
||||
/// Objects that need to spawn threads can own an AsyncTaskRunner to ensure they
|
||||
/// all complete on destruction.
|
||||
|
|
|
@ -247,28 +247,31 @@ TEST_F(TUSchedulerTests, WantDiagnostics) {
|
|||
}
|
||||
|
||||
TEST_F(TUSchedulerTests, Debounce) {
|
||||
std::atomic<int> CallbackCount(0);
|
||||
{
|
||||
auto Opts = optsForTest();
|
||||
Opts.UpdateDebounce = DebouncePolicy::fixed(std::chrono::seconds(1));
|
||||
TUScheduler S(CDB, Opts, captureDiags());
|
||||
// FIXME: we could probably use timeouts lower than 1 second here.
|
||||
auto Path = testPath("foo.cpp");
|
||||
updateWithDiags(S, Path, "auto (debounced)", WantDiagnostics::Auto,
|
||||
[&](std::vector<Diag>) {
|
||||
ADD_FAILURE()
|
||||
<< "auto should have been debounced and canceled";
|
||||
});
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(200));
|
||||
updateWithDiags(S, Path, "auto (timed out)", WantDiagnostics::Auto,
|
||||
[&](std::vector<Diag>) { ++CallbackCount; });
|
||||
std::this_thread::sleep_for(std::chrono::seconds(2));
|
||||
updateWithDiags(S, Path, "auto (shut down)", WantDiagnostics::Auto,
|
||||
[&](std::vector<Diag>) { ++CallbackCount; });
|
||||
auto Opts = optsForTest();
|
||||
Opts.UpdateDebounce = DebouncePolicy::fixed(std::chrono::milliseconds(500));
|
||||
TUScheduler S(CDB, Opts, captureDiags());
|
||||
auto Path = testPath("foo.cpp");
|
||||
// Issue a write that's going to be debounced away.
|
||||
updateWithDiags(S, Path, "auto (debounced)", WantDiagnostics::Auto,
|
||||
[&](std::vector<Diag>) {
|
||||
ADD_FAILURE()
|
||||
<< "auto should have been debounced and canceled";
|
||||
});
|
||||
// Sleep a bit to verify that it's really debounce that's holding diagnostics.
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(50));
|
||||
|
||||
ASSERT_TRUE(S.blockUntilIdle(timeoutSeconds(10)));
|
||||
}
|
||||
EXPECT_EQ(2, CallbackCount);
|
||||
// Issue another write, this time we'll wait for its diagnostics.
|
||||
Notification N;
|
||||
updateWithDiags(S, Path, "auto (timed out)", WantDiagnostics::Auto,
|
||||
[&](std::vector<Diag>) { N.notify(); });
|
||||
EXPECT_TRUE(N.wait(timeoutSeconds(1)));
|
||||
|
||||
// Once we start shutting down the TUScheduler, this one becomes a dead write.
|
||||
updateWithDiags(S, Path, "auto (discarded)", WantDiagnostics::Auto,
|
||||
[&](std::vector<Diag>) {
|
||||
ADD_FAILURE()
|
||||
<< "auto should have been discarded (dead write)";
|
||||
});
|
||||
}
|
||||
|
||||
TEST_F(TUSchedulerTests, Cancellation) {
|
||||
|
|
Loading…
Reference in New Issue