1
0
mirror of https://github.com/xmrig/xmrig.git synced 2025-12-12 01:42:48 -05:00

Linux: added support for transparent huge pages

This commit is contained in:
SChernykh
2025-12-11 11:23:18 +01:00
parent 856813c1ae
commit 482a1f0b40
8 changed files with 32 additions and 5 deletions

View File

@@ -87,14 +87,14 @@ xmrig::CpuWorker<N>::CpuWorker(size_t id, const CpuLaunchData &data) :
if (!cn_heavyZen3Memory) {
// Round up number of threads to the multiple of 8
const size_t num_threads = ((m_threads + 7) / 8) * 8;
cn_heavyZen3Memory = new VirtualMemory(m_algorithm.l3() * num_threads, data.hugePages, false, false, node());
cn_heavyZen3Memory = new VirtualMemory(m_algorithm.l3() * num_threads, data.hugePages, false, false, node(), VirtualMemory::kDefaultHugePageSize);
}
m_memory = cn_heavyZen3Memory;
}
else
# endif
{
m_memory = new VirtualMemory(m_algorithm.l3() * N, data.hugePages, false, true, node());
m_memory = new VirtualMemory(m_algorithm.l3() * N, data.hugePages, false, true, node(), VirtualMemory::kDefaultHugePageSize);
}
# ifdef XMRIG_ALGO_GHOSTRIDER

View File

@@ -49,7 +49,7 @@ xmrig::MemoryPool::MemoryPool(size_t size, bool hugePages, uint32_t node)
constexpr size_t alignment = 1 << 24;
m_memory = new VirtualMemory(size * pageSize + alignment, hugePages, false, false, node);
m_memory = new VirtualMemory(size * pageSize + alignment, hugePages, false, false, node, VirtualMemory::kDefaultHugePageSize);
m_alignOffset = (alignment - (((size_t)m_memory->scratchpad()) % alignment)) % alignment;
}

View File

@@ -75,6 +75,16 @@ xmrig::VirtualMemory::VirtualMemory(size_t size, bool hugePages, bool oneGbPages
}
m_scratchpad = static_cast<uint8_t*>(_mm_malloc(m_size, alignSize));
// Huge pages failed to allocate, but try to enable transparent huge pages for the range
if (alignSize >= kDefaultHugePageSize) {
if (m_scratchpad) {
adviseLargePages(m_scratchpad, m_size);
}
else {
m_scratchpad = static_cast<uint8_t*>(_mm_malloc(m_size, 64));
}
}
}

View File

@@ -65,6 +65,7 @@ public:
static void *allocateExecutableMemory(size_t size, bool hugePages);
static void *allocateLargePagesMemory(size_t size);
static void *allocateOneGbPagesMemory(size_t size);
static bool adviseLargePages(void *p, size_t size);
static void destroy();
static void flushInstructionCache(void *p, size_t size);
static void freeLargePagesMemory(void *p, size_t size);

View File

@@ -276,6 +276,16 @@ bool xmrig::VirtualMemory::allocateOneGbPagesMemory()
}
bool xmrig::VirtualMemory::adviseLargePages(void *p, size_t size)
{
# ifdef XMRIG_OS_LINUX
return (madvise(p, size, MADV_HUGEPAGE) == 0);
# else
return false;
# endif
}
void xmrig::VirtualMemory::freeLargePagesMemory()
{
if (m_flags.test(FLAG_LOCK)) {

View File

@@ -260,6 +260,12 @@ bool xmrig::VirtualMemory::allocateOneGbPagesMemory()
}
bool xmrig::VirtualMemory::adviseLargePages(void *p, size_t size)
{
return false;
}
void xmrig::VirtualMemory::freeLargePagesMemory()
{
freeLargePagesMemory(m_scratchpad, m_size);

View File

@@ -215,7 +215,7 @@ void xmrig::RxDataset::allocate(bool hugePages, bool oneGbPages)
return;
}
m_memory = new VirtualMemory(maxSize(), hugePages, oneGbPages, false, m_node);
m_memory = new VirtualMemory(maxSize(), hugePages, oneGbPages, false, m_node, VirtualMemory::kDefaultHugePageSize);
if (m_memory->isOneGbPages()) {
m_scratchpadOffset = maxSize() + RANDOMX_CACHE_MAX_SIZE;

View File

@@ -115,7 +115,7 @@ static inline void checkHash(const JobBundle &bundle, std::vector<JobResult> &re
static void getResults(JobBundle &bundle, std::vector<JobResult> &results, uint32_t &errors, bool hwAES)
{
const auto &algorithm = bundle.job.algorithm();
auto memory = new VirtualMemory(algorithm.l3(), false, false, false);
auto memory = new VirtualMemory(algorithm.l3(), false, false, false, 0, VirtualMemory::kDefaultHugePageSize);
alignas(16) uint8_t hash[32]{ 0 };
if (algorithm.family() == Algorithm::RANDOM_X) {