From 2403af917b62af85e06c3e26a4665ac3c173f533 Mon Sep 17 00:00:00 2001 From: Aliaksey Kandratsenka Date: Thu, 4 Apr 2013 17:14:51 -0700 Subject: fix excessive CPU consumption of timer_server I've found stdlib's timer to burn CPU without good reason. Here's what happens. The problem is that it sleeps in milliseconds but computes time in microseconds. And there is bug in code to compute milliseconds to sleep. It computes microseconds difference between now and nearest timer event and then does _truncating_ division by 1000. So on average it sleeps 500 microseconds _less than needed_. On wakeup its checks do I have timer tick that already occurred? No. Ok how much I need to sleep ? It does that bad computation again and gets 0 milliseconds. So next gen_server timeout happens right away only to find we're still before closest timer tick and to decide to sleep 0 milliseconds again. And again and again. This commit changes division to pick ceiling of ratio rather than floor. So that we always sleep not less then difference between now and closest event time. --- lib/stdlib/src/timer.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/stdlib/src/timer.erl b/lib/stdlib/src/timer.erl index 689e42051f..e11fb046e9 100644 --- a/lib/stdlib/src/timer.erl +++ b/lib/stdlib/src/timer.erl @@ -354,7 +354,7 @@ timer_timeout(SysTime) -> '$end_of_table' -> infinity; {Time, _Ref} when Time > SysTime -> - Timeout = (Time - SysTime) div 1000, + Timeout = (Time - SysTime + 999) div 1000, %% Returned timeout must fit in a small int erlang:min(Timeout, ?MAX_TIMEOUT); Key -> @@ -414,7 +414,7 @@ next_timeout() -> '$end_of_table' -> infinity; {Time, _} -> - erlang:min(positive((Time - system_time()) div 1000), ?MAX_TIMEOUT) + erlang:min(positive((Time - system_time() + 999) div 1000), ?MAX_TIMEOUT) end. %% Help functions -- cgit v1.2.3