mirror of
https://github.com/xmrig/xmrig.git
synced 2026-01-22 22:42:53 -05:00
feat: stability improvements, see detail below
Key stability improvements made (deterministic + bounded) 1) Bounded memory usage in long-running stats Fixed unbounded growth in NetworkState latency tracking: Replaced std::vector<uint16_t> m_latency + push_back() with a fixed-size ring buffer (kLatencyWindow = 1024) and explicit counters. Median latency computation now operates on at most 1024 samples, preventing memory growth and avoiding performance cliffs from ever-growing copies/sorts. 2) Prevent crash/UAF on shutdown + more predictable teardown Controller shutdown ordering (Controller::stop()): Now stops m_miner before destroying m_network. This reduces chances of worker threads submitting results into a network listener that’s already destroyed. Thread teardown hardening (backend/common/Thread.h): Destructor now checks std:🧵:joinable() before join(). Avoids std::terminate() if a thread object exists but never started due to early exit/error paths. 3) Fixed real leaks (including executable memory) Executable memory leak fixed (crypto/cn/CnCtx.cpp): CnCtx::create() allocates executable memory for generated_code via VirtualMemory::allocateExecutableMemory(0x4000, ...). Previously CnCtx::release() only _mm_free()’d the struct, leaking the executable mapping. Now CnCtx::release() frees generated_code before freeing the ctx. GPU verification leak fixed (net/JobResults.cpp): In getResults() (GPU result verification), a cryptonight_ctx was created via CnCtx::create() but never released. Added CnCtx::release(ctx, 1). 4) JobResults: bounded queues + backpressure + safe shutdown semantics The old JobResults could: enqueue unlimited std::list items (m_results, m_bundles) → unbounded RAM, call uv_queue_work per async batch → unbounded libuv threadpool backlog, delete handler directly while worker threads might still submit → potential crash/UAF. Changes made: Hard queue limits: kMaxQueuedResults = 4096 kMaxQueuedBundles = 256 Excess is dropped (bounded behavior under load). Async coalescing: Only one pending async notification at a time (m_pendingAsync), reducing eventfd/uv wake storms. Bounded libuv work scheduling: Only one uv_queue_work is scheduled at a time (m_workScheduled), preventing CPU starvation and unpredictable backlog. Safe shutdown: JobResults::stop() now detaches global handler first, then calls handler->stop(). Shutdown detaches m_listener, clears queues, and defers deletion until in-flight work is done. Defensive bound on GPU result count: Clamp count to 0xFF inside JobResults as well, not just in the caller, to guard against corrupted kernels/drivers. 5) Idempotent cleanup VirtualMemory::destroy() now sets pool = nullptr after delete: prevents accidental double-delete on repeated teardown paths. Verification performed codespell . --config ./.codespellrc: clean CMake configure + build completed successfully (Release build) Signed-off-by: rezky_nightky <with.rezky@gmail.com>
This commit is contained in:
@@ -65,7 +65,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
# else
|
# else
|
||||||
inline ~Thread() { m_thread.join(); delete m_worker; }
|
inline ~Thread() { if (m_thread.joinable()) { m_thread.join(); } delete m_worker; }
|
||||||
|
|
||||||
inline void start(void *(*callback)(void *)) { m_thread = std::thread(callback, this); }
|
inline void start(void *(*callback)(void *)) { m_thread = std::thread(callback, this); }
|
||||||
# endif
|
# endif
|
||||||
|
|||||||
@@ -210,7 +210,7 @@ void xmrig::NetworkState::printResults() const
|
|||||||
printHashes(m_accepted, m_hashes);
|
printHashes(m_accepted, m_hashes);
|
||||||
printDiff(m_diff);
|
printDiff(m_diff);
|
||||||
|
|
||||||
if (m_active && !m_latency.empty()) {
|
if (m_active && m_latencyCount > 0) {
|
||||||
printAvgTime(avgTime());
|
printAvgTime(avgTime());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -298,13 +298,19 @@ void xmrig::NetworkState::onResultAccepted(IStrategy *strategy, IClient *client,
|
|||||||
|
|
||||||
uint32_t xmrig::NetworkState::latency() const
|
uint32_t xmrig::NetworkState::latency() const
|
||||||
{
|
{
|
||||||
const size_t calls = m_latency.size();
|
const size_t calls = m_latencyCount;
|
||||||
if (calls == 0) {
|
if (calls == 0) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto v = m_latency;
|
std::array<uint16_t, kLatencyWindow> v;
|
||||||
std::nth_element(v.begin(), v.begin() + calls / 2, v.end());
|
const size_t start = (m_latencyPos + kLatencyWindow - calls) % kLatencyWindow;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < calls; ++i) {
|
||||||
|
v[i] = m_latency[(start + i) % kLatencyWindow];
|
||||||
|
}
|
||||||
|
|
||||||
|
std::nth_element(v.begin(), v.begin() + calls / 2, v.begin() + calls);
|
||||||
|
|
||||||
return v[calls / 2];
|
return v[calls / 2];
|
||||||
}
|
}
|
||||||
@@ -312,11 +318,11 @@ uint32_t xmrig::NetworkState::latency() const
|
|||||||
|
|
||||||
uint64_t xmrig::NetworkState::avgTime() const
|
uint64_t xmrig::NetworkState::avgTime() const
|
||||||
{
|
{
|
||||||
if (m_latency.empty()) {
|
if (m_latencyCount == 0) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return connectionTime() / m_latency.size();
|
return connectionTime() / m_latencyCount;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -342,7 +348,12 @@ void xmrig::NetworkState::add(const SubmitResult &result, const char *error)
|
|||||||
std::sort(m_topDiff.rbegin(), m_topDiff.rend());
|
std::sort(m_topDiff.rbegin(), m_topDiff.rend());
|
||||||
}
|
}
|
||||||
|
|
||||||
m_latency.push_back(result.elapsed > 0xFFFF ? 0xFFFF : static_cast<uint16_t>(result.elapsed));
|
m_latency[m_latencyPos] = result.elapsed > 0xFFFF ? 0xFFFF : static_cast<uint16_t>(result.elapsed);
|
||||||
|
m_latencyPos = (m_latencyPos + 1) % kLatencyWindow;
|
||||||
|
|
||||||
|
if (m_latencyCount < kLatencyWindow) {
|
||||||
|
m_latencyCount++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -355,5 +366,6 @@ void xmrig::NetworkState::stop()
|
|||||||
m_fingerprint = nullptr;
|
m_fingerprint = nullptr;
|
||||||
|
|
||||||
m_failures++;
|
m_failures++;
|
||||||
m_latency.clear();
|
m_latencyCount = 0;
|
||||||
|
m_latencyPos = 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -27,7 +27,6 @@
|
|||||||
|
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
|
|
||||||
namespace xmrig {
|
namespace xmrig {
|
||||||
@@ -60,6 +59,8 @@ protected:
|
|||||||
void onResultAccepted(IStrategy *strategy, IClient *client, const SubmitResult &result, const char *error) override;
|
void onResultAccepted(IStrategy *strategy, IClient *client, const SubmitResult &result, const char *error) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
constexpr static size_t kLatencyWindow = 1024;
|
||||||
|
|
||||||
uint32_t latency() const;
|
uint32_t latency() const;
|
||||||
uint64_t avgTime() const;
|
uint64_t avgTime() const;
|
||||||
uint64_t connectionTime() const;
|
uint64_t connectionTime() const;
|
||||||
@@ -70,7 +71,9 @@ private:
|
|||||||
bool m_active = false;
|
bool m_active = false;
|
||||||
char m_pool[256]{};
|
char m_pool[256]{};
|
||||||
std::array<uint64_t, 10> m_topDiff { { } };
|
std::array<uint64_t, 10> m_topDiff { { } };
|
||||||
std::vector<uint16_t> m_latency;
|
std::array<uint16_t, kLatencyWindow> m_latency { { } };
|
||||||
|
size_t m_latencyCount = 0;
|
||||||
|
size_t m_latencyPos = 0;
|
||||||
String m_fingerprint;
|
String m_fingerprint;
|
||||||
String m_ip;
|
String m_ip;
|
||||||
String m_tls;
|
String m_tls;
|
||||||
|
|||||||
@@ -76,10 +76,12 @@ void xmrig::Controller::stop()
|
|||||||
{
|
{
|
||||||
Base::stop();
|
Base::stop();
|
||||||
|
|
||||||
m_network.reset();
|
if (m_miner) {
|
||||||
|
|
||||||
m_miner->stop();
|
m_miner->stop();
|
||||||
m_miner.reset();
|
m_miner.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
m_network.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -49,6 +49,15 @@ void xmrig::CnCtx::release(cryptonight_ctx **ctx, size_t count)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < count; ++i) {
|
for (size_t i = 0; i < count; ++i) {
|
||||||
|
if (ctx[i] && ctx[i]->generated_code) {
|
||||||
|
# ifdef XMRIG_OS_WIN
|
||||||
|
VirtualMemory::freeLargePagesMemory(reinterpret_cast<void *>(ctx[i]->generated_code), 0);
|
||||||
|
# else
|
||||||
|
VirtualMemory::freeLargePagesMemory(reinterpret_cast<void *>(ctx[i]->generated_code), 0x4000);
|
||||||
|
# endif
|
||||||
|
ctx[i]->generated_code = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
_mm_free(ctx[i]);
|
_mm_free(ctx[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -124,6 +124,7 @@ uint32_t xmrig::VirtualMemory::bindToNUMANode(int64_t)
|
|||||||
void xmrig::VirtualMemory::destroy()
|
void xmrig::VirtualMemory::destroy()
|
||||||
{
|
{
|
||||||
delete pool;
|
delete pool;
|
||||||
|
pool = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -56,6 +56,7 @@
|
|||||||
|
|
||||||
|
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
#include <atomic>
|
||||||
#include <list>
|
#include <list>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
@@ -66,6 +67,9 @@ namespace xmrig {
|
|||||||
|
|
||||||
|
|
||||||
#if defined(XMRIG_FEATURE_OPENCL) || defined(XMRIG_FEATURE_CUDA)
|
#if defined(XMRIG_FEATURE_OPENCL) || defined(XMRIG_FEATURE_CUDA)
|
||||||
|
class JobResultsPrivate;
|
||||||
|
|
||||||
|
|
||||||
class JobBundle
|
class JobBundle
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@@ -86,14 +90,14 @@ public:
|
|||||||
class JobBaton : public Baton<uv_work_t>
|
class JobBaton : public Baton<uv_work_t>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
inline JobBaton(std::list<JobBundle> &&bundles, IJobResultListener *listener, bool hwAES) :
|
inline JobBaton(std::list<JobBundle> &&bundles, JobResultsPrivate *owner, bool hwAES) :
|
||||||
hwAES(hwAES),
|
hwAES(hwAES),
|
||||||
listener(listener),
|
owner(owner),
|
||||||
bundles(std::move(bundles))
|
bundles(std::move(bundles))
|
||||||
{}
|
{}
|
||||||
|
|
||||||
const bool hwAES;
|
const bool hwAES;
|
||||||
IJobResultListener *listener;
|
JobResultsPrivate *owner;
|
||||||
std::list<JobBundle> bundles;
|
std::list<JobBundle> bundles;
|
||||||
std::vector<JobResult> results;
|
std::vector<JobResult> results;
|
||||||
uint32_t errors = 0;
|
uint32_t errors = 0;
|
||||||
@@ -188,6 +192,8 @@ static void getResults(JobBundle &bundle, std::vector<JobResult> &results, uint3
|
|||||||
|
|
||||||
checkHash(bundle, results, nonce, hash, errors);
|
checkHash(bundle, results, nonce, hash, errors);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
CnCtx::release(ctx, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
delete memory;
|
delete memory;
|
||||||
@@ -200,6 +206,11 @@ class JobResultsPrivate : public IAsyncListener
|
|||||||
public:
|
public:
|
||||||
XMRIG_DISABLE_COPY_MOVE_DEFAULT(JobResultsPrivate)
|
XMRIG_DISABLE_COPY_MOVE_DEFAULT(JobResultsPrivate)
|
||||||
|
|
||||||
|
constexpr static size_t kMaxQueuedResults = 4096;
|
||||||
|
# if defined(XMRIG_FEATURE_OPENCL) || defined(XMRIG_FEATURE_CUDA)
|
||||||
|
constexpr static size_t kMaxQueuedBundles = 256;
|
||||||
|
# endif
|
||||||
|
|
||||||
inline JobResultsPrivate(IJobResultListener *listener, bool hwAES) :
|
inline JobResultsPrivate(IJobResultListener *listener, bool hwAES) :
|
||||||
m_hwAES(hwAES),
|
m_hwAES(hwAES),
|
||||||
m_listener(listener)
|
m_listener(listener)
|
||||||
@@ -214,23 +225,76 @@ public:
|
|||||||
inline void submit(const JobResult &result)
|
inline void submit(const JobResult &result)
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(m_mutex);
|
std::lock_guard<std::mutex> lock(m_mutex);
|
||||||
|
|
||||||
|
if (m_stopping) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m_results.size() >= kMaxQueuedResults) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
m_results.push_back(result);
|
m_results.push_back(result);
|
||||||
|
|
||||||
|
if (m_async && !m_pendingAsync.exchange(true)) {
|
||||||
m_async->send();
|
m_async->send();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
# if defined(XMRIG_FEATURE_OPENCL) || defined(XMRIG_FEATURE_CUDA)
|
# if defined(XMRIG_FEATURE_OPENCL) || defined(XMRIG_FEATURE_CUDA)
|
||||||
inline void submit(const Job &job, uint32_t *results, size_t count, uint32_t device_index)
|
inline void submit(const Job &job, uint32_t *results, size_t count, uint32_t device_index)
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(m_mutex);
|
std::lock_guard<std::mutex> lock(m_mutex);
|
||||||
|
|
||||||
|
if (count > 0xFF) {
|
||||||
|
count = 0xFF;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m_stopping) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m_bundles.size() >= kMaxQueuedBundles) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
m_bundles.emplace_back(job, results, count, device_index);
|
m_bundles.emplace_back(job, results, count, device_index);
|
||||||
|
|
||||||
|
if (m_async && !m_pendingAsync.exchange(true)) {
|
||||||
m_async->send();
|
m_async->send();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
# endif
|
# endif
|
||||||
|
|
||||||
|
|
||||||
|
inline void stop()
|
||||||
|
{
|
||||||
|
bool deleteNow = false;
|
||||||
|
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(m_mutex);
|
||||||
|
m_stopping = true;
|
||||||
|
m_listener = nullptr;
|
||||||
|
m_results.clear();
|
||||||
|
|
||||||
|
# if defined(XMRIG_FEATURE_OPENCL) || defined(XMRIG_FEATURE_CUDA)
|
||||||
|
m_bundles.clear();
|
||||||
|
m_workScheduled = false;
|
||||||
|
m_deleteWhenDone = true;
|
||||||
|
deleteNow = (m_pendingWork == 0);
|
||||||
|
# else
|
||||||
|
deleteNow = true;
|
||||||
|
# endif
|
||||||
|
}
|
||||||
|
|
||||||
|
if (deleteNow) {
|
||||||
|
m_async.reset();
|
||||||
|
delete this;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
inline void onAsync() override { submit(); }
|
inline void onAsync() override { submit(); }
|
||||||
|
|
||||||
@@ -239,23 +303,33 @@ private:
|
|||||||
# if defined(XMRIG_FEATURE_OPENCL) || defined(XMRIG_FEATURE_CUDA)
|
# if defined(XMRIG_FEATURE_OPENCL) || defined(XMRIG_FEATURE_CUDA)
|
||||||
inline void submit()
|
inline void submit()
|
||||||
{
|
{
|
||||||
|
m_pendingAsync.store(false);
|
||||||
|
|
||||||
std::list<JobBundle> bundles;
|
std::list<JobBundle> bundles;
|
||||||
std::list<JobResult> results;
|
std::list<JobResult> results;
|
||||||
|
|
||||||
m_mutex.lock();
|
m_mutex.lock();
|
||||||
m_bundles.swap(bundles);
|
|
||||||
m_results.swap(results);
|
m_results.swap(results);
|
||||||
|
|
||||||
|
const bool canScheduleWork = !m_workScheduled && !m_stopping && !m_bundles.empty();
|
||||||
|
if (canScheduleWork) {
|
||||||
|
m_bundles.swap(bundles);
|
||||||
|
m_workScheduled = true;
|
||||||
|
m_pendingWork++;
|
||||||
|
}
|
||||||
m_mutex.unlock();
|
m_mutex.unlock();
|
||||||
|
|
||||||
for (const auto &result : results) {
|
for (const auto &result : results) {
|
||||||
|
if (m_listener) {
|
||||||
m_listener->onJobResult(result);
|
m_listener->onJobResult(result);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (bundles.empty()) {
|
if (bundles.empty()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto baton = new JobBaton(std::move(bundles), m_listener, m_hwAES);
|
auto baton = new JobBaton(std::move(bundles), this, m_hwAES);
|
||||||
|
|
||||||
uv_queue_work(uv_default_loop(), &baton->req,
|
uv_queue_work(uv_default_loop(), &baton->req,
|
||||||
[](uv_work_t *req) {
|
[](uv_work_t *req) {
|
||||||
@@ -268,8 +342,67 @@ private:
|
|||||||
[](uv_work_t *req, int) {
|
[](uv_work_t *req, int) {
|
||||||
auto baton = static_cast<JobBaton*>(req->data);
|
auto baton = static_cast<JobBaton*>(req->data);
|
||||||
|
|
||||||
for (const auto &result : baton->results) {
|
if (baton->owner) {
|
||||||
baton->listener->onJobResult(result);
|
baton->owner->onBatonDone(std::move(baton->results));
|
||||||
|
}
|
||||||
|
|
||||||
|
delete baton;
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
inline void onBatonDone(std::vector<JobResult> &&results)
|
||||||
|
{
|
||||||
|
for (const auto &result : results) {
|
||||||
|
if (m_listener) {
|
||||||
|
m_listener->onJobResult(result);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::list<JobBundle> bundles;
|
||||||
|
|
||||||
|
m_mutex.lock();
|
||||||
|
|
||||||
|
m_pendingWork--;
|
||||||
|
|
||||||
|
const bool canScheduleWork = !m_stopping && !m_bundles.empty();
|
||||||
|
if (canScheduleWork) {
|
||||||
|
m_bundles.swap(bundles);
|
||||||
|
m_pendingWork++;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
m_workScheduled = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const bool canDelete = m_deleteWhenDone && m_pendingWork == 0;
|
||||||
|
m_mutex.unlock();
|
||||||
|
|
||||||
|
if (canDelete) {
|
||||||
|
m_async.reset();
|
||||||
|
delete this;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bundles.empty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto baton = new JobBaton(std::move(bundles), this, m_hwAES);
|
||||||
|
|
||||||
|
uv_queue_work(uv_default_loop(), &baton->req,
|
||||||
|
[](uv_work_t *req) {
|
||||||
|
auto baton = static_cast<JobBaton*>(req->data);
|
||||||
|
|
||||||
|
for (JobBundle &bundle : baton->bundles) {
|
||||||
|
getResults(bundle, baton->results, baton->errors, baton->hwAES);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[](uv_work_t *req, int) {
|
||||||
|
auto baton = static_cast<JobBaton*>(req->data);
|
||||||
|
|
||||||
|
if (baton->owner) {
|
||||||
|
baton->owner->onBatonDone(std::move(baton->results));
|
||||||
}
|
}
|
||||||
|
|
||||||
delete baton;
|
delete baton;
|
||||||
@@ -279,6 +412,8 @@ private:
|
|||||||
# else
|
# else
|
||||||
inline void submit()
|
inline void submit()
|
||||||
{
|
{
|
||||||
|
m_pendingAsync.store(false);
|
||||||
|
|
||||||
std::list<JobResult> results;
|
std::list<JobResult> results;
|
||||||
|
|
||||||
m_mutex.lock();
|
m_mutex.lock();
|
||||||
@@ -286,9 +421,11 @@ private:
|
|||||||
m_mutex.unlock();
|
m_mutex.unlock();
|
||||||
|
|
||||||
for (const auto &result : results) {
|
for (const auto &result : results) {
|
||||||
|
if (m_listener) {
|
||||||
m_listener->onJobResult(result);
|
m_listener->onJobResult(result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
# endif
|
# endif
|
||||||
|
|
||||||
const bool m_hwAES;
|
const bool m_hwAES;
|
||||||
@@ -296,9 +433,14 @@ private:
|
|||||||
std::list<JobResult> m_results;
|
std::list<JobResult> m_results;
|
||||||
std::mutex m_mutex;
|
std::mutex m_mutex;
|
||||||
std::shared_ptr<Async> m_async;
|
std::shared_ptr<Async> m_async;
|
||||||
|
std::atomic<bool> m_pendingAsync{ false };
|
||||||
|
bool m_stopping = false;
|
||||||
|
|
||||||
# if defined(XMRIG_FEATURE_OPENCL) || defined(XMRIG_FEATURE_CUDA)
|
# if defined(XMRIG_FEATURE_OPENCL) || defined(XMRIG_FEATURE_CUDA)
|
||||||
std::list<JobBundle> m_bundles;
|
std::list<JobBundle> m_bundles;
|
||||||
|
bool m_workScheduled = false;
|
||||||
|
uint32_t m_pendingWork = 0;
|
||||||
|
bool m_deleteWhenDone = false;
|
||||||
# endif
|
# endif
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -325,11 +467,12 @@ void xmrig::JobResults::setListener(IJobResultListener *listener, bool hwAES)
|
|||||||
|
|
||||||
void xmrig::JobResults::stop()
|
void xmrig::JobResults::stop()
|
||||||
{
|
{
|
||||||
assert(handler != nullptr);
|
auto h = handler;
|
||||||
|
|
||||||
delete handler;
|
|
||||||
|
|
||||||
handler = nullptr;
|
handler = nullptr;
|
||||||
|
|
||||||
|
if (h) {
|
||||||
|
h->stop();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -347,8 +490,6 @@ void xmrig::JobResults::submit(const Job& job, uint32_t nonce, const uint8_t* re
|
|||||||
|
|
||||||
void xmrig::JobResults::submit(const JobResult &result)
|
void xmrig::JobResults::submit(const JobResult &result)
|
||||||
{
|
{
|
||||||
assert(handler != nullptr);
|
|
||||||
|
|
||||||
if (handler) {
|
if (handler) {
|
||||||
handler->submit(result);
|
handler->submit(result);
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user