1
0
mirror of https://github.com/xmrig/xmrig.git synced 2026-04-17 13:02:57 -04:00

feat: stability improvements, see detail below

Key stability improvements made (deterministic + bounded)
1) Bounded memory usage in long-running stats
Fixed unbounded growth in NetworkState latency tracking:
Replaced std::vector<uint16_t> m_latency + push_back() with a fixed-size ring buffer (kLatencyWindow = 1024) and explicit counters.
Median latency computation now operates on at most 1024 samples, preventing memory growth and avoiding performance cliffs from ever-growing copies/sorts.
2) Prevent crash/UAF on shutdown + more predictable teardown
Controller shutdown ordering (Controller::stop()):
Now stops m_miner before destroying m_network.
This reduces chances of worker threads submitting results into a network listener that’s already destroyed.
Thread teardown hardening (backend/common/Thread.h):
Destructor now checks std::thread::joinable() before join().
Avoids std::terminate() if a thread object exists but never started due to early exit/error paths.
3) Fixed real leaks (including executable memory)
Executable memory leak fixed (crypto/cn/CnCtx.cpp):
CnCtx::create() allocates executable memory for generated_code via VirtualMemory::allocateExecutableMemory(0x4000, ...).
Previously CnCtx::release() only _mm_free()’d the struct, leaking the executable mapping.
Now CnCtx::release() frees generated_code before freeing the ctx.
GPU verification leak fixed (net/JobResults.cpp):
In getResults() (GPU result verification), a cryptonight_ctx was created via CnCtx::create() but never released.
Added CnCtx::release(ctx, 1).
4) JobResults: bounded queues + backpressure + safe shutdown semantics
The old JobResults could:

enqueue unlimited std::list items (m_results, m_bundles) → unbounded RAM,
call uv_queue_work per async batch → unbounded libuv threadpool backlog,
delete handler directly while worker threads might still submit → potential crash/UAF.
Changes made:

Hard queue limits:
kMaxQueuedResults = 4096
kMaxQueuedBundles = 256
Excess is dropped (bounded behavior under load).
Async coalescing:
Only one pending async notification at a time (m_pendingAsync), reducing eventfd/uv wake storms.
Bounded libuv work scheduling:
Only one uv_queue_work is scheduled at a time (m_workScheduled), preventing CPU starvation and unpredictable backlog.
Safe shutdown:
JobResults::stop() now detaches global handler first, then calls handler->stop().
Shutdown detaches m_listener, clears queues, and defers deletion until in-flight work is done.
Defensive bound on GPU result count:
Clamp count to 0xFF inside JobResults as well, not just in the caller, to guard against corrupted kernels/drivers.
5) Idempotent cleanup
VirtualMemory::destroy() now sets pool = nullptr after delete:
prevents accidental double-delete on repeated teardown paths.
Verification performed
codespell . --config ./.codespellrc: clean
CMake configure + build completed successfully (Release build)

Signed-off-by: rezky_nightky <with.rezky@gmail.com>
This commit is contained in:
rezky_nightky
2026-01-21 21:22:43 +07:00
parent cb7511507f
commit 5ca4828255
7 changed files with 199 additions and 31 deletions

View File

@@ -56,6 +56,7 @@
#include <cassert>
#include <atomic>
#include <list>
#include <memory>
#include <mutex>
@@ -66,6 +67,9 @@ namespace xmrig {
#if defined(XMRIG_FEATURE_OPENCL) || defined(XMRIG_FEATURE_CUDA)
class JobResultsPrivate;
class JobBundle
{
public:
@@ -86,14 +90,14 @@ public:
class JobBaton : public Baton<uv_work_t>
{
public:
inline JobBaton(std::list<JobBundle> &&bundles, IJobResultListener *listener, bool hwAES) :
inline JobBaton(std::list<JobBundle> &&bundles, JobResultsPrivate *owner, bool hwAES) :
hwAES(hwAES),
listener(listener),
owner(owner),
bundles(std::move(bundles))
{}
const bool hwAES;
IJobResultListener *listener;
JobResultsPrivate *owner;
std::list<JobBundle> bundles;
std::vector<JobResult> results;
uint32_t errors = 0;
@@ -188,6 +192,8 @@ static void getResults(JobBundle &bundle, std::vector<JobResult> &results, uint3
checkHash(bundle, results, nonce, hash, errors);
}
CnCtx::release(ctx, 1);
}
delete memory;
@@ -200,6 +206,11 @@ class JobResultsPrivate : public IAsyncListener
public:
XMRIG_DISABLE_COPY_MOVE_DEFAULT(JobResultsPrivate)
constexpr static size_t kMaxQueuedResults = 4096;
# if defined(XMRIG_FEATURE_OPENCL) || defined(XMRIG_FEATURE_CUDA)
constexpr static size_t kMaxQueuedBundles = 256;
# endif
inline JobResultsPrivate(IJobResultListener *listener, bool hwAES) :
m_hwAES(hwAES),
m_listener(listener)
@@ -214,9 +225,20 @@ public:
inline void submit(const JobResult &result)
{
std::lock_guard<std::mutex> lock(m_mutex);
if (m_stopping) {
return;
}
if (m_results.size() >= kMaxQueuedResults) {
return;
}
m_results.push_back(result);
m_async->send();
if (m_async && !m_pendingAsync.exchange(true)) {
m_async->send();
}
}
@@ -224,13 +246,55 @@ public:
inline void submit(const Job &job, uint32_t *results, size_t count, uint32_t device_index)
{
std::lock_guard<std::mutex> lock(m_mutex);
if (count > 0xFF) {
count = 0xFF;
}
if (m_stopping) {
return;
}
if (m_bundles.size() >= kMaxQueuedBundles) {
return;
}
m_bundles.emplace_back(job, results, count, device_index);
m_async->send();
if (m_async && !m_pendingAsync.exchange(true)) {
m_async->send();
}
}
# endif
inline void stop()
{
bool deleteNow = false;
{
std::lock_guard<std::mutex> lock(m_mutex);
m_stopping = true;
m_listener = nullptr;
m_results.clear();
# if defined(XMRIG_FEATURE_OPENCL) || defined(XMRIG_FEATURE_CUDA)
m_bundles.clear();
m_workScheduled = false;
m_deleteWhenDone = true;
deleteNow = (m_pendingWork == 0);
# else
deleteNow = true;
# endif
}
if (deleteNow) {
m_async.reset();
delete this;
}
}
protected:
inline void onAsync() override { submit(); }
@@ -239,23 +303,33 @@ private:
# if defined(XMRIG_FEATURE_OPENCL) || defined(XMRIG_FEATURE_CUDA)
inline void submit()
{
m_pendingAsync.store(false);
std::list<JobBundle> bundles;
std::list<JobResult> results;
m_mutex.lock();
m_bundles.swap(bundles);
m_results.swap(results);
const bool canScheduleWork = !m_workScheduled && !m_stopping && !m_bundles.empty();
if (canScheduleWork) {
m_bundles.swap(bundles);
m_workScheduled = true;
m_pendingWork++;
}
m_mutex.unlock();
for (const auto &result : results) {
m_listener->onJobResult(result);
if (m_listener) {
m_listener->onJobResult(result);
}
}
if (bundles.empty()) {
return;
}
auto baton = new JobBaton(std::move(bundles), m_listener, m_hwAES);
auto baton = new JobBaton(std::move(bundles), this, m_hwAES);
uv_queue_work(uv_default_loop(), &baton->req,
[](uv_work_t *req) {
@@ -268,8 +342,67 @@ private:
[](uv_work_t *req, int) {
auto baton = static_cast<JobBaton*>(req->data);
for (const auto &result : baton->results) {
baton->listener->onJobResult(result);
if (baton->owner) {
baton->owner->onBatonDone(std::move(baton->results));
}
delete baton;
}
);
}
inline void onBatonDone(std::vector<JobResult> &&results)
{
for (const auto &result : results) {
if (m_listener) {
m_listener->onJobResult(result);
}
}
std::list<JobBundle> bundles;
m_mutex.lock();
m_pendingWork--;
const bool canScheduleWork = !m_stopping && !m_bundles.empty();
if (canScheduleWork) {
m_bundles.swap(bundles);
m_pendingWork++;
}
else {
m_workScheduled = false;
}
const bool canDelete = m_deleteWhenDone && m_pendingWork == 0;
m_mutex.unlock();
if (canDelete) {
m_async.reset();
delete this;
return;
}
if (bundles.empty()) {
return;
}
auto baton = new JobBaton(std::move(bundles), this, m_hwAES);
uv_queue_work(uv_default_loop(), &baton->req,
[](uv_work_t *req) {
auto baton = static_cast<JobBaton*>(req->data);
for (JobBundle &bundle : baton->bundles) {
getResults(bundle, baton->results, baton->errors, baton->hwAES);
}
},
[](uv_work_t *req, int) {
auto baton = static_cast<JobBaton*>(req->data);
if (baton->owner) {
baton->owner->onBatonDone(std::move(baton->results));
}
delete baton;
@@ -279,6 +412,8 @@ private:
# else
inline void submit()
{
m_pendingAsync.store(false);
std::list<JobResult> results;
m_mutex.lock();
@@ -286,7 +421,9 @@ private:
m_mutex.unlock();
for (const auto &result : results) {
m_listener->onJobResult(result);
if (m_listener) {
m_listener->onJobResult(result);
}
}
}
# endif
@@ -296,9 +433,14 @@ private:
std::list<JobResult> m_results;
std::mutex m_mutex;
std::shared_ptr<Async> m_async;
std::atomic<bool> m_pendingAsync{ false };
bool m_stopping = false;
# if defined(XMRIG_FEATURE_OPENCL) || defined(XMRIG_FEATURE_CUDA)
std::list<JobBundle> m_bundles;
bool m_workScheduled = false;
uint32_t m_pendingWork = 0;
bool m_deleteWhenDone = false;
# endif
};
@@ -325,11 +467,12 @@ void xmrig::JobResults::setListener(IJobResultListener *listener, bool hwAES)
void xmrig::JobResults::stop()
{
assert(handler != nullptr);
delete handler;
auto h = handler;
handler = nullptr;
if (h) {
h->stop();
}
}
@@ -347,8 +490,6 @@ void xmrig::JobResults::submit(const Job& job, uint32_t nonce, const uint8_t* re
void xmrig::JobResults::submit(const JobResult &result)
{
assert(handler != nullptr);
if (handler) {
handler->submit(result);
}