Replaced usage of clock() with much more precise tbb::tick_count::now().

parent 13830f64
......@@ -25,6 +25,8 @@
#include "abstractpipeline.h"
#include <tbb/tick_count.h>
#include "tgt/exception.h"
#include "tgt/glcanvas.h"
#include "tgt/tgt_gl.h"
......@@ -35,8 +37,6 @@
#include "core/tools/opengljobprocessor.h"
#include "core/tools/simplejobprocessor.h"
#include <ctime>
#ifdef CAMPVIS_DEBUG
// Anonymous OpenGL helper functions
......@@ -151,7 +151,9 @@ namespace campvis {
// execute processor if needed
if (processor->getEnabled() && !processor->isLocked()) {
if (! processor->isValid()) {
clock_t startTime = clock();
tbb::tick_count startTime;
if (processor->getClockExecutionTime())
startTime = tbb::tick_count::now();
try {
processor->process(*_data);
......@@ -164,8 +166,8 @@ namespace campvis {
}
if (processor->getClockExecutionTime()) {
clock_t endTime = clock();
LINFO("Executed processor " << processor->getName() << " duration: " << (endTime - startTime));
tbb::tick_count endTime = tbb::tick_count::now();
LINFO("Executed processor " << processor->getName() << " duration: " << (endTime - startTime).seconds());
}
}
}
......
......@@ -24,6 +24,8 @@
#include "opengljobprocessor.h"
#include <tbb/tick_count.h>
#include "tgt/assert.h"
#include "tgt/logmanager.h"
#include "tgt/openglgarbagecollector.h"
......@@ -83,16 +85,16 @@ namespace campvis {
_this_thread_id = std::this_thread::get_id();
std::unique_lock<std::mutex> lock(tgt::GlContextManager::getRef().getGlMutex());
clock_t lastCleanupTime = clock() * 1000 / CLOCKS_PER_SEC;
tbb::tick_count lastCleanupTime = tbb::tick_count::now();
while (! _stopExecution) {
// this is a simple round-robing scheduling between all contexts:
bool hadWork = false;
// TODO: consider only non-empty context queues here
clock_t maxTimePerContext = static_cast<clock_t>(30 / _contexts.size());
double maxTimePerContext = (1.0 / 30.0) / _contexts.size();
for (size_t i = 0; i < _contexts.size(); ++i) {
clock_t startTimeCurrentContext = clock() * 1000 / CLOCKS_PER_SEC;
tbb::tick_count startTimeCurrentContext = tbb::tick_count::now();
tgt::GLCanvas* context = _contexts[i];
tbb::concurrent_hash_map<tgt::GLCanvas*, PerContextJobQueue*>::const_accessor a;
......@@ -121,7 +123,7 @@ namespace campvis {
// now comes the per-context scheduling strategy:
// first: perform as much serial jobs as possible:
AbstractJob* jobToDo = 0;
while ((clock() * 1000 / CLOCKS_PER_SEC) - startTimeCurrentContext < maxTimePerContext) {
while ((tbb::tick_count::now() - startTimeCurrentContext).seconds() < maxTimePerContext) {
// try fetch a job
if (! a->second->_serialJobs.try_pop(jobToDo)) {
// no job to do, exit the while loop
......@@ -147,15 +149,15 @@ namespace campvis {
// fourth: start the GC if it's time
if (clock() * 1000 / CLOCKS_PER_SEC - lastCleanupTime > 250) {
if ((tbb::tick_count::now() - lastCleanupTime).seconds() > 0.25) {
GLGC.deleteGarbage();
lastCleanupTime = clock();
lastCleanupTime = tbb::tick_count::now();
}
}
while (_pause > 0) {
GLGC.deleteGarbage();
lastCleanupTime = clock();
lastCleanupTime = tbb::tick_count::now();
tgt::GlContextManager::getRef().releaseCurrentContext();
_evaluationCondition.wait(lock);
tgt::GlContextManager::getRef().acquireContext(_currentContext);
......@@ -165,7 +167,7 @@ namespace campvis {
if (! hadWork) {
if (_currentContext != 0) {
GLGC.deleteGarbage();
lastCleanupTime = clock();
lastCleanupTime = tbb::tick_count::now();
}
tgt::GlContextManager::getRef().releaseCurrentContext();
_evaluationCondition.wait(lock);
......
......@@ -44,8 +44,6 @@
#include "core/coreapi.h"
#include "core/tools/job.h"
#include <ctime>
namespace tgt {
class GLCanvas;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment