mirror of
https://github.com/xmrig/xmrig.git
synced 2025-12-12 01:42:48 -05:00
Linux: added support for transparent huge pages
This commit is contained in:
@@ -87,14 +87,14 @@ xmrig::CpuWorker<N>::CpuWorker(size_t id, const CpuLaunchData &data) :
|
|||||||
if (!cn_heavyZen3Memory) {
|
if (!cn_heavyZen3Memory) {
|
||||||
// Round up number of threads to the multiple of 8
|
// Round up number of threads to the multiple of 8
|
||||||
const size_t num_threads = ((m_threads + 7) / 8) * 8;
|
const size_t num_threads = ((m_threads + 7) / 8) * 8;
|
||||||
cn_heavyZen3Memory = new VirtualMemory(m_algorithm.l3() * num_threads, data.hugePages, false, false, node());
|
cn_heavyZen3Memory = new VirtualMemory(m_algorithm.l3() * num_threads, data.hugePages, false, false, node(), VirtualMemory::kDefaultHugePageSize);
|
||||||
}
|
}
|
||||||
m_memory = cn_heavyZen3Memory;
|
m_memory = cn_heavyZen3Memory;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
# endif
|
# endif
|
||||||
{
|
{
|
||||||
m_memory = new VirtualMemory(m_algorithm.l3() * N, data.hugePages, false, true, node());
|
m_memory = new VirtualMemory(m_algorithm.l3() * N, data.hugePages, false, true, node(), VirtualMemory::kDefaultHugePageSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
# ifdef XMRIG_ALGO_GHOSTRIDER
|
# ifdef XMRIG_ALGO_GHOSTRIDER
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ xmrig::MemoryPool::MemoryPool(size_t size, bool hugePages, uint32_t node)
|
|||||||
|
|
||||||
constexpr size_t alignment = 1 << 24;
|
constexpr size_t alignment = 1 << 24;
|
||||||
|
|
||||||
m_memory = new VirtualMemory(size * pageSize + alignment, hugePages, false, false, node);
|
m_memory = new VirtualMemory(size * pageSize + alignment, hugePages, false, false, node, VirtualMemory::kDefaultHugePageSize);
|
||||||
|
|
||||||
m_alignOffset = (alignment - (((size_t)m_memory->scratchpad()) % alignment)) % alignment;
|
m_alignOffset = (alignment - (((size_t)m_memory->scratchpad()) % alignment)) % alignment;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,6 +75,16 @@ xmrig::VirtualMemory::VirtualMemory(size_t size, bool hugePages, bool oneGbPages
|
|||||||
}
|
}
|
||||||
|
|
||||||
m_scratchpad = static_cast<uint8_t*>(_mm_malloc(m_size, alignSize));
|
m_scratchpad = static_cast<uint8_t*>(_mm_malloc(m_size, alignSize));
|
||||||
|
|
||||||
|
// Huge pages failed to allocate, but try to enable transparent huge pages for the range
|
||||||
|
if (alignSize >= kDefaultHugePageSize) {
|
||||||
|
if (m_scratchpad) {
|
||||||
|
adviseLargePages(m_scratchpad, m_size);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
m_scratchpad = static_cast<uint8_t*>(_mm_malloc(m_size, 64));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -65,6 +65,7 @@ public:
|
|||||||
static void *allocateExecutableMemory(size_t size, bool hugePages);
|
static void *allocateExecutableMemory(size_t size, bool hugePages);
|
||||||
static void *allocateLargePagesMemory(size_t size);
|
static void *allocateLargePagesMemory(size_t size);
|
||||||
static void *allocateOneGbPagesMemory(size_t size);
|
static void *allocateOneGbPagesMemory(size_t size);
|
||||||
|
static bool adviseLargePages(void *p, size_t size);
|
||||||
static void destroy();
|
static void destroy();
|
||||||
static void flushInstructionCache(void *p, size_t size);
|
static void flushInstructionCache(void *p, size_t size);
|
||||||
static void freeLargePagesMemory(void *p, size_t size);
|
static void freeLargePagesMemory(void *p, size_t size);
|
||||||
|
|||||||
@@ -276,6 +276,16 @@ bool xmrig::VirtualMemory::allocateOneGbPagesMemory()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool xmrig::VirtualMemory::adviseLargePages(void *p, size_t size)
|
||||||
|
{
|
||||||
|
# ifdef XMRIG_OS_LINUX
|
||||||
|
return (madvise(p, size, MADV_HUGEPAGE) == 0);
|
||||||
|
# else
|
||||||
|
return false;
|
||||||
|
# endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void xmrig::VirtualMemory::freeLargePagesMemory()
|
void xmrig::VirtualMemory::freeLargePagesMemory()
|
||||||
{
|
{
|
||||||
if (m_flags.test(FLAG_LOCK)) {
|
if (m_flags.test(FLAG_LOCK)) {
|
||||||
|
|||||||
@@ -260,6 +260,12 @@ bool xmrig::VirtualMemory::allocateOneGbPagesMemory()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool xmrig::VirtualMemory::adviseLargePages(void *p, size_t size)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void xmrig::VirtualMemory::freeLargePagesMemory()
|
void xmrig::VirtualMemory::freeLargePagesMemory()
|
||||||
{
|
{
|
||||||
freeLargePagesMemory(m_scratchpad, m_size);
|
freeLargePagesMemory(m_scratchpad, m_size);
|
||||||
|
|||||||
@@ -215,7 +215,7 @@ void xmrig::RxDataset::allocate(bool hugePages, bool oneGbPages)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
m_memory = new VirtualMemory(maxSize(), hugePages, oneGbPages, false, m_node);
|
m_memory = new VirtualMemory(maxSize(), hugePages, oneGbPages, false, m_node, VirtualMemory::kDefaultHugePageSize);
|
||||||
|
|
||||||
if (m_memory->isOneGbPages()) {
|
if (m_memory->isOneGbPages()) {
|
||||||
m_scratchpadOffset = maxSize() + RANDOMX_CACHE_MAX_SIZE;
|
m_scratchpadOffset = maxSize() + RANDOMX_CACHE_MAX_SIZE;
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ static inline void checkHash(const JobBundle &bundle, std::vector<JobResult> &re
|
|||||||
static void getResults(JobBundle &bundle, std::vector<JobResult> &results, uint32_t &errors, bool hwAES)
|
static void getResults(JobBundle &bundle, std::vector<JobResult> &results, uint32_t &errors, bool hwAES)
|
||||||
{
|
{
|
||||||
const auto &algorithm = bundle.job.algorithm();
|
const auto &algorithm = bundle.job.algorithm();
|
||||||
auto memory = new VirtualMemory(algorithm.l3(), false, false, false);
|
auto memory = new VirtualMemory(algorithm.l3(), false, false, false, 0, VirtualMemory::kDefaultHugePageSize);
|
||||||
alignas(16) uint8_t hash[32]{ 0 };
|
alignas(16) uint8_t hash[32]{ 0 };
|
||||||
|
|
||||||
if (algorithm.family() == Algorithm::RANDOM_X) {
|
if (algorithm.family() == Algorithm::RANDOM_X) {
|
||||||
|
|||||||
Reference in New Issue
Block a user