mirror of
https://github.com/xmrig/xmrig.git
synced 2025-12-25 05:40:39 -05:00
This commit is contained in:
@@ -109,6 +109,11 @@ void xmrig::Workers<T>::start(const std::vector<T> &data)
|
||||
|
||||
for (Thread<T> *worker : m_workers) {
|
||||
worker->start(Workers<T>::onReady);
|
||||
|
||||
// This sleep is important for optimal caching!
|
||||
// Threads must allocate scratchpads in order so that adjacent cores will use adjacent scratchpads
|
||||
// Sub-optimal caching can result in up to 0.5% hashrate penalty
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(20));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -163,7 +168,7 @@ void xmrig::Workers<T>::onReady(void *arg)
|
||||
assert(worker != nullptr);
|
||||
|
||||
if (!worker || !worker->selfTest()) {
|
||||
LOG_ERR("%s " RED("thread ") RED_BOLD("#%zu") RED(" self-test failed"), T::tag(), worker->id());
|
||||
LOG_ERR("%s " RED("thread ") RED_BOLD("#%zu") RED(" self-test failed"), T::tag(), worker ? worker->id() : 0);
|
||||
|
||||
handle->backend()->start(worker, false);
|
||||
delete worker;
|
||||
|
||||
@@ -24,6 +24,9 @@
|
||||
#define XMRIG_IRXSTORAGE_H
|
||||
|
||||
|
||||
#include "crypto/rx/RxConfig.h"
|
||||
|
||||
|
||||
#include <cstdint>
|
||||
#include <utility>
|
||||
|
||||
@@ -41,9 +44,9 @@ class IRxStorage
|
||||
public:
|
||||
virtual ~IRxStorage() = default;
|
||||
|
||||
virtual RxDataset *dataset(const Job &job, uint32_t nodeId) const = 0;
|
||||
virtual std::pair<uint32_t, uint32_t> hugePages() const = 0;
|
||||
virtual void init(const RxSeed &seed, uint32_t threads, bool hugePages) = 0;
|
||||
virtual RxDataset *dataset(const Job &job, uint32_t nodeId) const = 0;
|
||||
virtual std::pair<uint32_t, uint32_t> hugePages() const = 0;
|
||||
virtual void init(const RxSeed &seed, uint32_t threads, bool hugePages, RxConfig::Mode mode) = 0;
|
||||
};
|
||||
|
||||
|
||||
|
||||
@@ -186,8 +186,20 @@ void xmrig::CpuWorker<N>::start()
|
||||
consumeJob();
|
||||
}
|
||||
|
||||
uint64_t storeStatsMask = 7;
|
||||
|
||||
# ifdef XMRIG_ALGO_RANDOMX
|
||||
bool first = true;
|
||||
uint64_t tempHash[8] = {};
|
||||
|
||||
// RandomX is faster, we don't need to store stats so often
|
||||
if (m_job.currentJob().algorithm().family() == Algorithm::RANDOM_X) {
|
||||
storeStatsMask = 63;
|
||||
}
|
||||
# endif
|
||||
|
||||
while (!Nonce::isOutdated(Nonce::CPU, m_job.sequence())) {
|
||||
if ((m_count & 0x7) == 0) {
|
||||
if ((m_count & storeStatsMask) == 0) {
|
||||
storeStats();
|
||||
}
|
||||
|
||||
@@ -197,30 +209,43 @@ void xmrig::CpuWorker<N>::start()
|
||||
break;
|
||||
}
|
||||
|
||||
uint32_t current_job_nonces[N];
|
||||
for (size_t i = 0; i < N; ++i) {
|
||||
current_job_nonces[i] = *m_job.nonce(i);
|
||||
}
|
||||
|
||||
# ifdef XMRIG_ALGO_RANDOMX
|
||||
if (job.algorithm().family() == Algorithm::RANDOM_X) {
|
||||
if (job.algorithm() == Algorithm::DEFYX) {
|
||||
defyx_calculate_hash(m_vm->get(), m_job.blob(), job.size(), m_hash);
|
||||
if (first) {
|
||||
first = false;
|
||||
defyx_calculate_hash_first(m_vm->get(), tempHash, m_job.blob(), job.size());
|
||||
}
|
||||
m_job.nextRound(kReserveCount, 1);
|
||||
defyx_calculate_hash_next(m_vm->get(), tempHash, m_job.blob(), job.size(), m_hash);
|
||||
} else {
|
||||
randomx_calculate_hash(m_vm->get(), m_job.blob(), job.size(), m_hash);
|
||||
if (first) {
|
||||
first = false;
|
||||
randomx_calculate_hash_first(m_vm->get(), tempHash, m_job.blob(), job.size());
|
||||
}
|
||||
m_job.nextRound(kReserveCount, 1);
|
||||
randomx_calculate_hash_next(m_vm->get(), tempHash, m_job.blob(), job.size(), m_hash);
|
||||
}
|
||||
}
|
||||
else
|
||||
# endif
|
||||
{
|
||||
fn(job.algorithm())(m_job.blob(), job.size(), m_hash, m_ctx, job.height());
|
||||
m_job.nextRound(kReserveCount, 1);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < N; ++i) {
|
||||
if (*reinterpret_cast<uint64_t*>(m_hash + (i * 32) + 24) < job.target()) {
|
||||
JobResults::submit(job, *m_job.nonce(i), m_hash + (i * 32));
|
||||
JobResults::submit(job, current_job_nonces[i], m_hash + (i * 32));
|
||||
}
|
||||
}
|
||||
|
||||
m_job.nextRound(kReserveCount, 1);
|
||||
m_count += N;
|
||||
|
||||
std::this_thread::yield();
|
||||
}
|
||||
|
||||
consumeJob();
|
||||
|
||||
@@ -64,7 +64,7 @@ namespace xmrig {
|
||||
extern template class Threads<CudaThreads>;
|
||||
|
||||
|
||||
constexpr const size_t oneMiB = 1024u * 1024u;
|
||||
constexpr const size_t oneMiB = 1024U * 1024U;
|
||||
static const char *kLabel = "CUDA";
|
||||
static const char *tag = GREEN_BG_BOLD(WHITE_BOLD_S " nv ");
|
||||
static const String kType = "cuda";
|
||||
@@ -249,7 +249,7 @@ public:
|
||||
|
||||
std::string fans;
|
||||
if (!health.fanSpeed.empty()) {
|
||||
for (uint32_t i = 0; i < health.fanSpeed.size(); ++i) {
|
||||
for (size_t i = 0; i < health.fanSpeed.size(); ++i) {
|
||||
fans += " fan" + std::to_string(i) + ":" CYAN_BOLD_S + std::to_string(health.fanSpeed[i]) + "%" CLEAR;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ namespace xmrig {
|
||||
extern template class Threads<OclThreads>;
|
||||
|
||||
|
||||
constexpr const size_t oneMiB = 1024u * 1024u;
|
||||
constexpr const size_t oneMiB = 1024U * 1024U;
|
||||
static const char *tag = MAGENTA_BG_BOLD(WHITE_BOLD_S " ocl ");
|
||||
static const String kType = "opencl";
|
||||
static std::mutex mutex;
|
||||
|
||||
Reference in New Issue
Block a user