1
0
mirror of https://github.com/xmrig/xmrig.git synced 2025-12-09 16:52:40 -05:00

Compare commits

..

11 Commits

Author SHA1 Message Date
Tony Butler
735e6e9814 How about this way 2023-05-23 16:49:40 -06:00
Tony Butler
5e207f1a2d CN: Consistency cleanup 2023-05-23 16:49:40 -06:00
xmrig
02d45834e1 Merge pull request #3273 from SChernykh/dev
RandomX: fixed undefined behavior
2023-05-23 20:18:32 +07:00
SChernykh
1252a4710e RandomX: fixed undefined behavior
Using an inactive member of a `union` is an undefined behavior in C++
2023-05-23 14:40:12 +02:00
xmrig
5891f1f06b Merge pull request #3271 from SChernykh/opt_genprog
RandomX: optimized program generation
2023-05-22 05:25:32 +07:00
SChernykh
5dcbab7e3a RandomX: optimized program generation 2023-05-21 17:44:20 +02:00
xmrig
7b51e23aa0 Merge pull request #3254 from SChernykh/dev
Tweaked auto-tuning for Intel CPUs
2023-04-19 12:29:58 +07:00
SChernykh
7f7fc363e1 Tweaked auto-tuning for Intel CPUs
Alder Lake and newer CPUs have exclusive L3 cache and benefit from more threads until L3+L2 is filled.
2023-04-18 21:20:45 +02:00
XMRig
c4e1363148 #3245 Improved algorithm negotiation for donation rounds by sending extra information about current mining job. 2023-04-07 23:35:05 +07:00
XMRig
a2e9b3456d v6.19.3-dev 2023-04-04 00:34:54 +07:00
XMRig
4790318685 Merge branch 'master' into dev 2023-04-04 00:34:22 +07:00
13 changed files with 309 additions and 287 deletions

View File

@@ -298,8 +298,10 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith
cores.reserve(m_cores);
findByType(cache, HWLOC_OBJ_CORE, [&cores](hwloc_obj_t found) { cores.emplace_back(found); });
const bool L3_exclusive = isCacheExclusive(cache);
# ifdef XMRIG_ALGO_GHOSTRIDER
if ((algorithm == Algorithm::GHOSTRIDER_RTM) && (PUs > cores.size()) && (PUs < cores.size() * 2)) {
if ((algorithm == Algorithm::GHOSTRIDER_RTM) && L3_exclusive && (PUs > cores.size()) && (PUs < cores.size() * 2)) {
// Don't use E-cores on Alder Lake
cores.erase(std::remove_if(cores.begin(), cores.end(), [](hwloc_obj_t c) { return hwloc_bitmap_weight(c->cpuset) == 1; }), cores.end());
@@ -311,7 +313,6 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith
# endif
size_t L3 = cache->attr->cache.size;
const bool L3_exclusive = isCacheExclusive(cache);
size_t L2 = 0;
int L2_associativity = 0;
size_t extra = 0;
@@ -349,6 +350,10 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith
}
# ifdef XMRIG_ALGO_RANDOMX
if ((algorithm.family() == Algorithm::RANDOM_X) && L3_exclusive && (PUs > cores.size()) && (PUs < cores.size() * 2)) {
// Use all L3+L2 on latest Intel CPUs with P-cores, E-cores and exclusive L3 cache
cacheHashes = (L3 + L2) / scratchpad;
}
if (extra == 0 && algorithm.l2() > 0) {
cacheHashes = std::min<size_t>(std::max<size_t>(L2 / algorithm.l2(), cores.size()), cacheHashes);
}

View File

@@ -37,14 +37,36 @@ class CnAlgo
public:
constexpr CnAlgo() {};
constexpr inline Algorithm::Id base() const { static_assert(Algorithm::isCN(ALGO), "invalid CRYPTONIGHT algorithm"); return Algorithm::base(ALGO); }
constexpr inline bool isHeavy() const { return Algorithm::family(ALGO) == Algorithm::CN_HEAVY; }
constexpr inline bool isR() const { return ALGO == Algorithm::CN_R; }
constexpr inline size_t memory() const { static_assert(Algorithm::isCN(ALGO), "invalid CRYPTONIGHT algorithm"); return Algorithm::l3(ALGO); }
constexpr inline uint32_t iterations() const { static_assert(Algorithm::isCN(ALGO), "invalid CRYPTONIGHT algorithm"); return CN_ITER; }
# define ASSERT_CN static_assert(Algorithm::isCN(ALGO), "invalid CRYPTONIGHT algorithm")
constexpr inline Algorithm::Id base() const { ASSERT_CN; return Algorithm::base(ALGO); }
constexpr inline size_t memory() const { ASSERT_CN; return Algorithm::l3(ALGO); }
constexpr inline uint32_t iterations() const { ASSERT_CN; return CN_ITER; }
constexpr inline uint32_t mask() const { return static_cast<uint32_t>(((memory() - 1) / 16) * 16); }
constexpr inline uint32_t half_mem() const { return mask() < memory() / 2; }
constexpr inline bool isBase1() const { ASSERT_CN; return Algorithm::base(ALGO) == Algorithm::CN_1; }
constexpr inline bool isBase2() const { ASSERT_CN; return Algorithm::base(ALGO) == Algorithm::CN_2; }
constexpr inline bool is2() const { return ALGO == Algorithm::CN_2; }
constexpr inline bool isR() const { return ALGO == Algorithm::CN_R; }
constexpr inline bool isHalf() const { return ALGO == Algorithm::CN_HALF; }
constexpr inline bool isRTO() const { return ALGO == Algorithm::CN_RTO; }
constexpr inline bool isRWZ() const { return ALGO == Algorithm::CN_RWZ; }
constexpr inline bool isZLS() const { return ALGO == Algorithm::CN_ZLS; }
constexpr inline bool isDouble() const { return ALGO == Algorithm::CN_DOUBLE; }
constexpr inline bool isCCX() const { return ALGO == Algorithm::CN_CCX; }
constexpr inline bool isHeavy() const { ASSERT_CN; return Algorithm::family(ALGO) == Algorithm::CN_HEAVY; }
constexpr inline bool isHeavyTube() const { return ALGO == Algorithm::CN_HEAVY_TUBE; }
constexpr inline bool isHeavyXHV() const { return ALGO == Algorithm::CN_HEAVY_XHV; }
constexpr inline bool isPico0() const { return ALGO == Algorithm::CN_PICO_0; }
constexpr inline bool isPicoTLO() const { return ALGO == Algorithm::CN_PICO_TLO; }
constexpr inline bool isUPX2() const { return ALGO == Algorithm::CN_UPX2; }
constexpr inline bool isGR0() const { return ALGO == Algorithm::CN_GR_0; }
constexpr inline bool isGR1() const { return ALGO == Algorithm::CN_GR_1; }
constexpr inline bool isGR2() const { return ALGO == Algorithm::CN_GR_2; }
constexpr inline bool isGR3() const { return ALGO == Algorithm::CN_GR_3; }
constexpr inline bool isGR4() const { return ALGO == Algorithm::CN_GR_4; }
constexpr inline bool isGR5() const { return ALGO == Algorithm::CN_GR_5; }
inline static uint32_t iterations(Algorithm::Id algo)
{
switch (algo) {

View File

@@ -603,7 +603,7 @@ static inline void cryptonight_monero_tweak(uint64_t *mem_out, const uint8_t *l,
constexpr CnAlgo<ALGO> props;
if (props.base() == Algorithm::CN_2) {
VARIANT2_SHUFFLE(l, idx, ax0, bx0, bx1, cx, (((ALGO == Algorithm::CN_RWZ) || (ALGO == Algorithm::CN_UPX2)) ? 1 : 0));
VARIANT2_SHUFFLE(l, idx, ax0, bx0, bx1, cx, ((props.isRWZ() || props.isUPX2()) ? 1 : 0));
_mm_store_si128(reinterpret_cast<__m128i *>(mem_out), _mm_xor_si128(bx0, cx));
} else {
__m128i tmp = _mm_xor_si128(bx0, cx);
@@ -665,15 +665,8 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
constexpr CnAlgo<ALGO> props;
constexpr size_t MASK = props.mask();
constexpr Algorithm::Id BASE = props.base();
# ifdef XMRIG_ALGO_CN_HEAVY
constexpr bool IS_CN_HEAVY_TUBE = ALGO == Algorithm::CN_HEAVY_TUBE;
# else
constexpr bool IS_CN_HEAVY_TUBE = false;
# endif
if (BASE == Algorithm::CN_1 && size < 43) {
if (props.isBase1() && size < 43) {
memset(output, 0, 32);
return;
}
@@ -694,10 +687,7 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
V4_Instruction code[256];
const int code_size = v4_random_math_init<ALGO>(code, height);
if (ALGO == Algorithm::CN_R) {
v4_soft_aes_compile_code(code, code_size, reinterpret_cast<void*>(ctx[0]->generated_code), Assembly::NONE);
}
v4_soft_aes_compile_code(code, code_size, reinterpret_cast<void*>(ctx[0]->generated_code), Assembly::NONE);
ctx[0]->generated_code_data = { ALGO, height };
}
@@ -718,26 +708,26 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
__m128i bx1 = _mm_set_epi64x(static_cast<int64_t>(h0[9] ^ h0[11]), static_cast<int64_t>(h0[8] ^ h0[10]));
__m128 conc_var;
if (ALGO == Algorithm::CN_CCX) {
if (props.isCCX()) {
conc_var = _mm_setzero_ps();
RESTORE_ROUNDING_MODE();
}
for (size_t i = 0; i < props.iterations(); i++) {
__m128i cx;
if (IS_CN_HEAVY_TUBE || !SOFT_AES) {
if (props.isHeavyTube() || !SOFT_AES) {
cx = _mm_load_si128(reinterpret_cast<const __m128i *>(&l0[interleaved_index<interleave>(idx0 & MASK)]));
if (ALGO == Algorithm::CN_CCX) {
if (props.isCCX()) {
cryptonight_conceal_tweak(cx, conc_var);
}
}
const __m128i ax0 = _mm_set_epi64x(static_cast<int64_t>(ah0), static_cast<int64_t>(al0));
if (IS_CN_HEAVY_TUBE) {
if (props.isHeavyTube()) {
cx = aes_round_tweak_div(cx, ax0);
}
else if (SOFT_AES) {
if (ALGO == Algorithm::CN_CCX) {
if (props.isCCX()) {
cx = _mm_load_si128(reinterpret_cast<const __m128i*>(&l0[interleaved_index<interleave>(idx0 & MASK)]));
cryptonight_conceal_tweak(cx, conc_var);
cx = soft_aesenc(&cx, ax0, reinterpret_cast<const uint32_t*>(saes_table));
@@ -750,7 +740,7 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
cx = _mm_aesenc_si128(cx, ax0);
}
if (BASE == Algorithm::CN_1 || BASE == Algorithm::CN_2) {
if (props.isBase1() || props.isBase2()) {
cryptonight_monero_tweak<ALGO>(reinterpret_cast<uint64_t*>(&l0[interleaved_index<interleave>(idx0 & MASK)]), l0, idx0 & MASK, ax0, bx0, bx1, cx);
} else {
_mm_store_si128(reinterpret_cast<__m128i *>(&l0[interleaved_index<interleave>(idx0 & MASK)]), _mm_xor_si128(bx0, cx));
@@ -762,13 +752,11 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
cl = (reinterpret_cast<uint64_t*>(&l0[interleaved_index<interleave>(idx0 & MASK)]))[0];
ch = (reinterpret_cast<uint64_t*>(&l0[interleaved_index<interleave>(idx0 & MASK)]))[1];
if (BASE == Algorithm::CN_2) {
if (props.isBase2()) {
if (props.isR()) {
VARIANT4_RANDOM_MATH(0, al0, ah0, cl, bx0, bx1);
if (ALGO == Algorithm::CN_R) {
al0 ^= r0[2] | (static_cast<uint64_t>(r0[3]) << 32);
ah0 ^= r0[0] | (static_cast<uint64_t>(r0[1]) << 32);
}
al0 ^= r0[2] | (static_cast<uint64_t>(r0[3]) << 32);
ah0 ^= r0[0] | (static_cast<uint64_t>(r0[1]) << 32);
} else {
VARIANT2_INTEGER_MATH(0, cl, cx);
}
@@ -776,11 +764,11 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
lo = __umul128(idx0, cl, &hi);
if (BASE == Algorithm::CN_2) {
if (ALGO == Algorithm::CN_R) {
if (props.isBase2()) {
if (props.isR()) {
VARIANT2_SHUFFLE(l0, idx0 & MASK, ax0, bx0, bx1, cx, 0);
} else {
VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx0, bx1, hi, lo, (((ALGO == Algorithm::CN_RWZ) || (ALGO == Algorithm::CN_UPX2)) ? 1 : 0));
VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx0, bx1, hi, lo, ((props.isRWZ() || props.isUPX2()) ? 1 : 0));
}
}
@@ -789,9 +777,9 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
reinterpret_cast<uint64_t*>(&l0[interleaved_index<interleave>(idx0 & MASK)])[0] = al0;
if (IS_CN_HEAVY_TUBE || ALGO == Algorithm::CN_RTO) {
if (props.isHeavyTube() || props.isRTO()) {
reinterpret_cast<uint64_t*>(&l0[interleaved_index<interleave>(idx0 & MASK)])[1] = ah0 ^ tweak1_2_0 ^ al0;
} else if (BASE == Algorithm::CN_1) {
} else if (props.isBase1()) {
reinterpret_cast<uint64_t*>(&l0[interleaved_index<interleave>(idx0 & MASK)])[1] = ah0 ^ tweak1_2_0;
} else {
reinterpret_cast<uint64_t*>(&l0[interleaved_index<interleave>(idx0 & MASK)])[1] = ah0;
@@ -819,7 +807,7 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
((int64_t*)&l0[interleaved_index<interleave>(idx0 & MASK)])[0] = n ^ q;
if (ALGO == Algorithm::CN_HEAVY_XHV) {
if (props.isHeavyXHV()) {
d = ~d;
}
@@ -827,7 +815,7 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
}
# endif
if (BASE == Algorithm::CN_2) {
if (props.isBase2()) {
bx1 = bx0;
}
@@ -960,7 +948,7 @@ inline void cryptonight_single_hash_asm(const uint8_t *__restrict__ input, size_
}
cn_explode_scratchpad<ALGO, false, 0>(ctx[0]);
if (ALGO == Algorithm::CN_2) {
if (props.is2()) {
if (ASM == Assembly::INTEL) {
cnv2_mainloop_ivybridge_asm(ctx);
}
@@ -971,7 +959,7 @@ inline void cryptonight_single_hash_asm(const uint8_t *__restrict__ input, size_
cnv2_mainloop_bulldozer_asm(ctx);
}
}
else if (ALGO == Algorithm::CN_HALF) {
else if (props.isHalf()) {
if (ASM == Assembly::INTEL) {
cn_half_mainloop_ivybridge_asm(ctx);
}
@@ -983,7 +971,7 @@ inline void cryptonight_single_hash_asm(const uint8_t *__restrict__ input, size_
}
}
# ifdef XMRIG_ALGO_CN_PICO
else if (ALGO == Algorithm::CN_PICO_0) {
else if (props.isPico0()) {
if (ASM == Assembly::INTEL) {
cn_trtl_mainloop_ivybridge_asm(ctx);
}
@@ -994,7 +982,7 @@ inline void cryptonight_single_hash_asm(const uint8_t *__restrict__ input, size_
cn_trtl_mainloop_bulldozer_asm(ctx);
}
}
else if (ALGO == Algorithm::CN_PICO_TLO) {
else if (props.isPicoTLO()) {
if (ASM == Assembly::INTEL) {
cn_tlo_mainloop_ivybridge_asm(ctx);
}
@@ -1006,10 +994,10 @@ inline void cryptonight_single_hash_asm(const uint8_t *__restrict__ input, size_
}
}
# endif
else if (ALGO == Algorithm::CN_RWZ) {
else if (props.isRWZ()) {
cnv2_rwz_mainloop_asm(ctx);
}
else if (ALGO == Algorithm::CN_ZLS) {
else if (props.isZLS()) {
if (ASM == Assembly::INTEL) {
cn_zls_mainloop_ivybridge_asm(ctx);
}
@@ -1020,7 +1008,7 @@ inline void cryptonight_single_hash_asm(const uint8_t *__restrict__ input, size_
cn_zls_mainloop_bulldozer_asm(ctx);
}
}
else if (ALGO == Algorithm::CN_DOUBLE) {
else if (props.isDouble()) {
if (ASM == Assembly::INTEL) {
cn_double_mainloop_ivybridge_asm(ctx);
}
@@ -1032,7 +1020,7 @@ inline void cryptonight_single_hash_asm(const uint8_t *__restrict__ input, size_
}
}
# ifdef XMRIG_ALGO_CN_FEMTO
else if (ALGO == Algorithm::CN_UPX2) {
else if (props.isUPX2()) {
cn_upx2_mainloop_asm(ctx);
}
# endif
@@ -1078,22 +1066,22 @@ inline void cryptonight_double_hash_asm(const uint8_t *__restrict__ input, size_
cn_explode_scratchpad<ALGO, false, 0>(ctx[1]);
}
if (ALGO == Algorithm::CN_2) {
if (props.is2()) {
cnv2_double_mainloop_sandybridge_asm(ctx);
}
else if (ALGO == Algorithm::CN_HALF) {
else if (props.isHalf()){
cn_half_double_mainloop_sandybridge_asm(ctx);
}
# ifdef XMRIG_ALGO_CN_PICO
else if (ALGO == Algorithm::CN_PICO_0) {
else if (props.isPico0()) {
cn_trtl_double_mainloop_sandybridge_asm(ctx);
}
else if (ALGO == Algorithm::CN_PICO_TLO) {
else if (props.isPicoTLO()) {
cn_tlo_double_mainloop_sandybridge_asm(ctx);
}
# endif
# ifdef XMRIG_ALGO_CN_FEMTO
else if (ALGO == Algorithm::CN_UPX2) {
else if (props.isUPX2()) {
if (Cpu::info()->arch() == ICpuInfo::ARCH_ZEN3) {
cnv2_upx_double_mainloop_zen3_asm(ctx);
}
@@ -1102,13 +1090,13 @@ inline void cryptonight_double_hash_asm(const uint8_t *__restrict__ input, size_
}
}
# endif
else if (ALGO == Algorithm::CN_RWZ) {
else if (props.isRWZ()) {
cnv2_rwz_double_mainloop_asm(ctx);
}
else if (ALGO == Algorithm::CN_ZLS) {
else if (props.isZLS()) {
cn_zls_double_mainloop_sandybridge_asm(ctx);
}
else if (ALGO == Algorithm::CN_DOUBLE) {
else if (props.isDouble()) {
cn_double_double_mainloop_sandybridge_asm(ctx);
}
else if (props.isR()) {
@@ -1146,9 +1134,8 @@ template<Algorithm::Id ALGO>
static NOINLINE void cryptonight_single_hash_gr_sse41(const uint8_t* __restrict__ input, size_t size, uint8_t* __restrict__ output, cryptonight_ctx** __restrict__ ctx, uint64_t height)
{
constexpr CnAlgo<ALGO> props;
constexpr Algorithm::Id BASE = props.base();
if (BASE == Algorithm::CN_1 && size < 43) {
if (props.isBase1() && size < 43) {
memset(output, 0, 32);
return;
}
@@ -1163,12 +1150,12 @@ static NOINLINE void cryptonight_single_hash_gr_sse41(const uint8_t* __restrict_
VARIANT1_INIT(0);
ctx[0]->tweak1_2 = tweak1_2_0;
ctx[0]->tweak1_table = tweak1_table;
if (ALGO == Algorithm::CN_GR_0) cn_gr0_single_mainloop_asm(ctx);
if (ALGO == Algorithm::CN_GR_1) cn_gr1_single_mainloop_asm(ctx);
if (ALGO == Algorithm::CN_GR_2) cn_gr2_single_mainloop_asm(ctx);
if (ALGO == Algorithm::CN_GR_3) cn_gr3_single_mainloop_asm(ctx);
if (ALGO == Algorithm::CN_GR_4) cn_gr4_single_mainloop_asm(ctx);
if (ALGO == Algorithm::CN_GR_5) cn_gr5_single_mainloop_asm(ctx);
if (props.isGR0()) cn_gr0_single_mainloop_asm(ctx);
if (props.isGR1()) cn_gr1_single_mainloop_asm(ctx);
if (props.isGR2()) cn_gr2_single_mainloop_asm(ctx);
if (props.isGR3()) cn_gr3_single_mainloop_asm(ctx);
if (props.isGR4()) cn_gr4_single_mainloop_asm(ctx);
if (props.isGR5()) cn_gr5_single_mainloop_asm(ctx);
cn_implode_scratchpad<ALGO, false, 0>(ctx[0]);
keccakf(reinterpret_cast<uint64_t*>(ctx[0]->state), 24);
@@ -1180,9 +1167,8 @@ template<Algorithm::Id ALGO>
static NOINLINE void cryptonight_double_hash_gr_sse41(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t height)
{
constexpr CnAlgo<ALGO> props;
constexpr Algorithm::Id BASE = props.base();
if (BASE == Algorithm::CN_1 && size < 43) {
if (props.isBase1() && size < 43) {
memset(output, 0, 64);
return;
}
@@ -1196,7 +1182,7 @@ static NOINLINE void cryptonight_double_hash_gr_sse41(const uint8_t *__restrict_
}
# ifdef XMRIG_VAES
if (!props.isHeavy() && cn_vaes_enabled) {
if (cn_vaes_enabled) {
cn_explode_scratchpad_vaes_double(ctx[0], ctx[1], props.memory(), props.half_mem());
}
else
@@ -1214,15 +1200,15 @@ static NOINLINE void cryptonight_double_hash_gr_sse41(const uint8_t *__restrict_
ctx[0]->tweak1_table = tweak1_table;
if (ALGO == Algorithm::CN_GR_0) cn_gr0_double_mainloop_asm(ctx);
if (ALGO == Algorithm::CN_GR_1) cn_gr1_double_mainloop_asm(ctx);
if (ALGO == Algorithm::CN_GR_2) cn_gr2_double_mainloop_asm(ctx);
if (ALGO == Algorithm::CN_GR_3) cn_gr3_double_mainloop_asm(ctx);
if (ALGO == Algorithm::CN_GR_4) cn_gr4_double_mainloop_asm(ctx);
if (ALGO == Algorithm::CN_GR_5) cn_gr5_double_mainloop_asm(ctx);
if (props.isGR0()) cn_gr0_double_mainloop_asm(ctx);
if (props.isGR1()) cn_gr1_double_mainloop_asm(ctx);
if (props.isGR2()) cn_gr2_double_mainloop_asm(ctx);
if (props.isGR3()) cn_gr3_double_mainloop_asm(ctx);
if (props.isGR4()) cn_gr4_double_mainloop_asm(ctx);
if (props.isGR5()) cn_gr5_double_mainloop_asm(ctx);
# ifdef XMRIG_VAES
if (!props.isHeavy() && cn_vaes_enabled) {
if (cn_vaes_enabled) {
cn_implode_scratchpad_vaes_double(ctx[0], ctx[1], props.memory(), props.half_mem());
}
else
@@ -1267,15 +1253,8 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
constexpr CnAlgo<ALGO> props;
constexpr size_t MASK = props.mask();
constexpr Algorithm::Id BASE = props.base();
# ifdef XMRIG_ALGO_CN_HEAVY
constexpr bool IS_CN_HEAVY_TUBE = ALGO == Algorithm::CN_HEAVY_TUBE;
# else
constexpr bool IS_CN_HEAVY_TUBE = false;
# endif
if (BASE == Algorithm::CN_1 && size < 43) {
if (props.isBase1() && size < 43) {
memset(output, 0, 64);
return;
}
@@ -1323,7 +1302,7 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
__m128i bx11 = _mm_set_epi64x(h1[9] ^ h1[11], h1[8] ^ h1[10]);
__m128 conc_var0, conc_var1;
if (ALGO == Algorithm::CN_CCX) {
if (props.isCCX()) {
conc_var0 = _mm_setzero_ps();
conc_var1 = _mm_setzero_ps();
RESTORE_ROUNDING_MODE();
@@ -1334,10 +1313,10 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
for (size_t i = 0; i < props.iterations(); i++) {
__m128i cx0, cx1;
if (IS_CN_HEAVY_TUBE || !SOFT_AES) {
if (props.isHeavyTube() || !SOFT_AES) {
cx0 = _mm_load_si128(reinterpret_cast<const __m128i *>(&l0[idx0 & MASK]));
cx1 = _mm_load_si128(reinterpret_cast<const __m128i *>(&l1[idx1 & MASK]));
if (ALGO == Algorithm::CN_CCX) {
if (props.isCCX()) {
cryptonight_conceal_tweak(cx0, conc_var0);
cryptonight_conceal_tweak(cx1, conc_var1);
}
@@ -1345,12 +1324,12 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
const __m128i ax0 = _mm_set_epi64x(ah0, al0);
const __m128i ax1 = _mm_set_epi64x(ah1, al1);
if (IS_CN_HEAVY_TUBE) {
if (props.isHeavyTube()) {
cx0 = aes_round_tweak_div(cx0, ax0);
cx1 = aes_round_tweak_div(cx1, ax1);
}
else if (SOFT_AES) {
if (ALGO == Algorithm::CN_CCX) {
if (props.isCCX()) {
cx0 = _mm_load_si128(reinterpret_cast<const __m128i*>(&l0[idx0 & MASK]));
cx1 = _mm_load_si128(reinterpret_cast<const __m128i*>(&l1[idx1 & MASK]));
cryptonight_conceal_tweak(cx0, conc_var0);
@@ -1368,7 +1347,7 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
cx1 = _mm_aesenc_si128(cx1, ax1);
}
if (BASE == Algorithm::CN_1 || BASE == Algorithm::CN_2) {
if (props.isBase1() || props.isBase2()) {
cryptonight_monero_tweak<ALGO>((uint64_t*)&l0[idx0 & MASK], l0, idx0 & MASK, ax0, bx00, bx01, cx0);
cryptonight_monero_tweak<ALGO>((uint64_t*)&l1[idx1 & MASK], l1, idx1 & MASK, ax1, bx10, bx11, cx1);
} else {
@@ -1383,13 +1362,11 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
cl = ((uint64_t*) &l0[idx0 & MASK])[0];
ch = ((uint64_t*) &l0[idx0 & MASK])[1];
if (BASE == Algorithm::CN_2) {
if (props.isBase2()) {
if (props.isR()) {
VARIANT4_RANDOM_MATH(0, al0, ah0, cl, bx00, bx01);
if (ALGO == Algorithm::CN_R) {
al0 ^= r0[2] | ((uint64_t)(r0[3]) << 32);
ah0 ^= r0[0] | ((uint64_t)(r0[1]) << 32);
}
al0 ^= r0[2] | ((uint64_t)(r0[3]) << 32);
ah0 ^= r0[0] | ((uint64_t)(r0[1]) << 32);
} else {
VARIANT2_INTEGER_MATH(0, cl, cx0);
}
@@ -1397,11 +1374,11 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
lo = __umul128(idx0, cl, &hi);
if (BASE == Algorithm::CN_2) {
if (ALGO == Algorithm::CN_R) {
if (props.isBase2()) {
if (props.isR()) {
VARIANT2_SHUFFLE(l0, idx0 & MASK, ax0, bx00, bx01, cx0, 0);
} else {
VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx00, bx01, hi, lo, (((ALGO == Algorithm::CN_RWZ) || (ALGO == Algorithm::CN_UPX2)) ? 1 : 0));
VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx00, bx01, hi, lo, ((props.isRWZ() || props.isUPX2()) ? 1 : 0));
}
}
@@ -1410,9 +1387,9 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
((uint64_t*)&l0[idx0 & MASK])[0] = al0;
if (IS_CN_HEAVY_TUBE || ALGO == Algorithm::CN_RTO) {
if (props.isHeavyTube() || props.isRTO()) {
((uint64_t*) &l0[idx0 & MASK])[1] = ah0 ^ tweak1_2_0 ^ al0;
} else if (BASE == Algorithm::CN_1) {
} else if (props.isBase1()) {
((uint64_t*) &l0[idx0 & MASK])[1] = ah0 ^ tweak1_2_0;
} else {
((uint64_t*) &l0[idx0 & MASK])[1] = ah0;
@@ -1430,7 +1407,7 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
((int64_t*)&l0[idx0 & MASK])[0] = n ^ q;
if (ALGO == Algorithm::CN_HEAVY_XHV) {
if (props.isHeavyXHV()) {
d = ~d;
}
@@ -1441,13 +1418,11 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
cl = ((uint64_t*) &l1[idx1 & MASK])[0];
ch = ((uint64_t*) &l1[idx1 & MASK])[1];
if (BASE == Algorithm::CN_2) {
if (props.isBase2()) {
if (props.isR()) {
VARIANT4_RANDOM_MATH(1, al1, ah1, cl, bx10, bx11);
if (ALGO == Algorithm::CN_R) {
al1 ^= r1[2] | ((uint64_t)(r1[3]) << 32);
ah1 ^= r1[0] | ((uint64_t)(r1[1]) << 32);
}
al1 ^= r1[2] | ((uint64_t)(r1[3]) << 32);
ah1 ^= r1[0] | ((uint64_t)(r1[1]) << 32);
} else {
VARIANT2_INTEGER_MATH(1, cl, cx1);
}
@@ -1455,11 +1430,11 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
lo = __umul128(idx1, cl, &hi);
if (BASE == Algorithm::CN_2) {
if (ALGO == Algorithm::CN_R) {
if (props.isBase2()) {
if (props.isR()) {
VARIANT2_SHUFFLE(l1, idx1 & MASK, ax1, bx10, bx11, cx1, 0);
} else {
VARIANT2_SHUFFLE2(l1, idx1 & MASK, ax1, bx10, bx11, hi, lo, (((ALGO == Algorithm::CN_RWZ) || (ALGO == Algorithm::CN_UPX2)) ? 1 : 0));
VARIANT2_SHUFFLE2(l1, idx1 & MASK, ax1, bx10, bx11, hi, lo, ((props.isRWZ() || props.isUPX2()) ? 1 : 0));
}
}
@@ -1468,9 +1443,9 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
((uint64_t*)&l1[idx1 & MASK])[0] = al1;
if (IS_CN_HEAVY_TUBE || ALGO == Algorithm::CN_RTO) {
if (props.isHeavyTube() || props.isRTO()) {
((uint64_t*)&l1[idx1 & MASK])[1] = ah1 ^ tweak1_2_1 ^ al1;
} else if (BASE == Algorithm::CN_1) {
} else if (props.isBase1()) {
((uint64_t*)&l1[idx1 & MASK])[1] = ah1 ^ tweak1_2_1;
} else {
((uint64_t*)&l1[idx1 & MASK])[1] = ah1;
@@ -1488,7 +1463,7 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
((int64_t*)&l1[idx1 & MASK])[0] = n ^ q;
if (ALGO == Algorithm::CN_HEAVY_XHV) {
if (props.isHeavyXHV()) {
d = ~d;
}
@@ -1496,7 +1471,7 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si
}
# endif
if (BASE == Algorithm::CN_2) {
if (props.isBase2()) {
bx01 = bx00;
bx11 = bx10;
}
@@ -1529,9 +1504,8 @@ template<Algorithm::Id ALGO>
static NOINLINE void cryptonight_quad_hash_gr_sse41(const uint8_t* __restrict__ input, size_t size, uint8_t* __restrict__ output, cryptonight_ctx** __restrict__ ctx, uint64_t height)
{
constexpr CnAlgo<ALGO> props;
constexpr Algorithm::Id BASE = props.base();
if (BASE == Algorithm::CN_1 && size < 43) {
if (props.isBase1() && size < 43) {
memset(output, 0, 32 * 4);
return;
}
@@ -1549,7 +1523,7 @@ static NOINLINE void cryptonight_quad_hash_gr_sse41(const uint8_t* __restrict__
}
# ifdef XMRIG_VAES
if (!props.isHeavy() && cn_vaes_enabled) {
if (cn_vaes_enabled) {
cn_explode_scratchpad_vaes_double(ctx[0], ctx[1], props.memory(), props.half_mem());
cn_explode_scratchpad_vaes_double(ctx[2], ctx[3], props.memory(), props.half_mem());
}
@@ -1569,15 +1543,15 @@ static NOINLINE void cryptonight_quad_hash_gr_sse41(const uint8_t* __restrict__
ctx[0]->tweak1_table = tweak1_table;
if (ALGO == Algorithm::CN_GR_0) cn_gr0_quad_mainloop_asm(ctx);
if (ALGO == Algorithm::CN_GR_1) cn_gr1_quad_mainloop_asm(ctx);
if (ALGO == Algorithm::CN_GR_2) cn_gr2_quad_mainloop_asm(ctx);
if (ALGO == Algorithm::CN_GR_3) cn_gr3_quad_mainloop_asm(ctx);
if (ALGO == Algorithm::CN_GR_4) cn_gr4_quad_mainloop_asm(ctx);
if (ALGO == Algorithm::CN_GR_5) cn_gr5_quad_mainloop_asm(ctx);
if (props.isGR0()) cn_gr0_quad_mainloop_asm(ctx);
if (props.isGR1()) cn_gr1_quad_mainloop_asm(ctx);
if (props.isGR2()) cn_gr2_quad_mainloop_asm(ctx);
if (props.isGR3()) cn_gr3_quad_mainloop_asm(ctx);
if (props.isGR4()) cn_gr4_quad_mainloop_asm(ctx);
if (props.isGR5()) cn_gr5_quad_mainloop_asm(ctx);
# ifdef XMRIG_VAES
if (!props.isHeavy() && cn_vaes_enabled) {
if (cn_vaes_enabled) {
cn_implode_scratchpad_vaes_double(ctx[0], ctx[1], props.memory(), props.half_mem());
cn_implode_scratchpad_vaes_double(ctx[2], ctx[3], props.memory(), props.half_mem());
}
@@ -1606,14 +1580,14 @@ static NOINLINE void cryptonight_quad_hash_gr_sse41(const uint8_t* __restrict__
#define CN_STEP1(a, b0, b1, c, l, ptr, idx, conc_var) \
ptr = reinterpret_cast<__m128i*>(&l[idx & MASK]); \
c = _mm_load_si128(ptr); \
if (ALGO == Algorithm::CN_CCX) { \
if (props.isCCX()) { \
cryptonight_conceal_tweak(c, conc_var); \
}
#define CN_STEP2(a, b0, b1, c, l, ptr, idx) \
if (IS_CN_HEAVY_TUBE) { \
if (props.isHeavyTube()) { \
c = aes_round_tweak_div(c, a); \
} \
else if (SOFT_AES) { \
@@ -1622,7 +1596,7 @@ static NOINLINE void cryptonight_quad_hash_gr_sse41(const uint8_t* __restrict__
c = _mm_aesenc_si128(c, a); \
} \
\
if (BASE == Algorithm::CN_1 || BASE == Algorithm::CN_2) { \
if (props.isBase1() || props.isBase2()) { \
cryptonight_monero_tweak<ALGO>((uint64_t*)ptr, l, idx & MASK, a, b0, b1, c); \
} else { \
_mm_store_si128(ptr, _mm_xor_si128(b0, c)); \
@@ -1638,36 +1612,34 @@ static NOINLINE void cryptonight_quad_hash_gr_sse41(const uint8_t* __restrict__
#define CN_STEP4(part, a, b0, b1, c, l, mc, ptr, idx) \
uint64_t al##part, ah##part; \
if (BASE == Algorithm::CN_2) { \
if (props.isBase2()) { \
if (props.isR()) { \
al##part = _mm_cvtsi128_si64(a); \
ah##part = _mm_cvtsi128_si64(_mm_srli_si128(a, 8)); \
VARIANT4_RANDOM_MATH(part, al##part, ah##part, cl##part, b0, b1); \
if (ALGO == Algorithm::CN_R) { \
al##part ^= r##part[2] | ((uint64_t)(r##part[3]) << 32); \
ah##part ^= r##part[0] | ((uint64_t)(r##part[1]) << 32); \
} \
al##part ^= r##part[2] | ((uint64_t)(r##part[3]) << 32); \
ah##part ^= r##part[0] | ((uint64_t)(r##part[1]) << 32); \
} else { \
VARIANT2_INTEGER_MATH(part, cl##part, c); \
} \
} \
lo = __umul128(idx, cl##part, &hi); \
if (BASE == Algorithm::CN_2) { \
if (ALGO == Algorithm::CN_R) { \
if (props.isBase2()) { \
if (props.isR()) { \
VARIANT2_SHUFFLE(l, idx & MASK, a, b0, b1, c, 0); \
} else { \
VARIANT2_SHUFFLE2(l, idx & MASK, a, b0, b1, hi, lo, (((ALGO == Algorithm::CN_RWZ) || (ALGO == Algorithm::CN_UPX2)) ? 1 : 0)); \
VARIANT2_SHUFFLE2(l, idx & MASK, a, b0, b1, hi, lo, ((props.isRWZ() || props.isUPX2()) ? 1 : 0)); \
} \
} \
if (ALGO == Algorithm::CN_R) { \
if (props.isR()) { \
a = _mm_set_epi64x(ah##part, al##part); \
} \
a = _mm_add_epi64(a, _mm_set_epi64x(lo, hi)); \
\
if (BASE == Algorithm::CN_1) { \
if (props.isBase1()) { \
_mm_store_si128(ptr, _mm_xor_si128(a, mc)); \
\
if (IS_CN_HEAVY_TUBE || ALGO == Algorithm::CN_RTO) { \
if (props.isHeavyTube() || props.isRTO()) { \
((uint64_t*)ptr)[1] ^= ((uint64_t*)ptr)[0]; \
} \
} else { \
@@ -1681,13 +1653,13 @@ static NOINLINE void cryptonight_quad_hash_gr_sse41(const uint8_t* __restrict__
int32_t d = ((int32_t*)&l[idx & MASK])[2]; \
int64_t q = n / (d | 0x5); \
((int64_t*)&l[idx & MASK])[0] = n ^ q; \
if (IS_CN_HEAVY_XHV) { \
if (props.isHeavyXHV()) { \
d = ~d; \
} \
\
idx = d ^ q; \
} \
if (BASE == Algorithm::CN_2) { \
if (props.isBase2()) { \
b1 = b0; \
} \
b0 = c;
@@ -1697,11 +1669,11 @@ static NOINLINE void cryptonight_quad_hash_gr_sse41(const uint8_t* __restrict__
__m128i mc##n; \
__m128i division_result_xmm_##n; \
__m128i sqrt_result_xmm_##n; \
if (BASE == Algorithm::CN_1) { \
if (props.isBase1()) { \
mc##n = _mm_set_epi64x(*reinterpret_cast<const uint64_t*>(input + n * size + 35) ^ \
*(reinterpret_cast<const uint64_t*>((ctx)->state) + 24), 0); \
} \
if (BASE == Algorithm::CN_2) { \
if (props.isBase2()) { \
division_result_xmm_##n = _mm_cvtsi64_si128(h##n[12]); \
sqrt_result_xmm_##n = _mm_cvtsi64_si128(h##n[13]); \
} \
@@ -1710,7 +1682,7 @@ static NOINLINE void cryptonight_quad_hash_gr_sse41(const uint8_t* __restrict__
__m128i bx##n##1 = _mm_set_epi64x(h##n[9] ^ h##n[11], h##n[8] ^ h##n[10]); \
__m128i cx##n = _mm_setzero_si128(); \
__m128 conc_var##n; \
if (ALGO == Algorithm::CN_CCX) { \
if (props.isCCX()) { \
conc_var##n = _mm_setzero_ps(); \
} \
VARIANT4_RANDOM_MATH_INIT(n);
@@ -1721,17 +1693,8 @@ inline void cryptonight_triple_hash(const uint8_t *__restrict__ input, size_t si
{
constexpr CnAlgo<ALGO> props;
constexpr size_t MASK = props.mask();
constexpr Algorithm::Id BASE = props.base();
# ifdef XMRIG_ALGO_CN_HEAVY
constexpr bool IS_CN_HEAVY_TUBE = ALGO == Algorithm::CN_HEAVY_TUBE;
constexpr bool IS_CN_HEAVY_XHV = ALGO == Algorithm::CN_HEAVY_XHV;
# else
constexpr bool IS_CN_HEAVY_TUBE = false;
constexpr bool IS_CN_HEAVY_XHV = false;
# endif
if (BASE == Algorithm::CN_1 && size < 43) {
if (props.isBase1() && size < 43) {
memset(output, 0, 32 * 3);
return;
}
@@ -1755,7 +1718,7 @@ inline void cryptonight_triple_hash(const uint8_t *__restrict__ input, size_t si
CONST_INIT(ctx[1], 1);
CONST_INIT(ctx[2], 2);
VARIANT2_SET_ROUNDING_MODE();
if (ALGO == Algorithm::CN_CCX) {
if (props.isCCX()) {
RESTORE_ROUNDING_MODE();
}
@@ -1819,17 +1782,8 @@ inline void cryptonight_quad_hash(const uint8_t *__restrict__ input, size_t size
constexpr CnAlgo<ALGO> props;
constexpr size_t MASK = props.mask();
constexpr Algorithm::Id BASE = props.base();
# ifdef XMRIG_ALGO_CN_HEAVY
constexpr bool IS_CN_HEAVY_TUBE = ALGO == Algorithm::CN_HEAVY_TUBE;
constexpr bool IS_CN_HEAVY_XHV = ALGO == Algorithm::CN_HEAVY_XHV;
# else
constexpr bool IS_CN_HEAVY_TUBE = false;
constexpr bool IS_CN_HEAVY_XHV = false;
# endif
if (BASE == Algorithm::CN_1 && size < 43) {
if (props.isBase1() && size < 43) {
memset(output, 0, 32 * 4);
return;
}
@@ -1869,7 +1823,7 @@ inline void cryptonight_quad_hash(const uint8_t *__restrict__ input, size_t size
CONST_INIT(ctx[2], 2);
CONST_INIT(ctx[3], 3);
VARIANT2_SET_ROUNDING_MODE();
if (ALGO == Algorithm::CN_CCX) {
if (props.isCCX()) {
RESTORE_ROUNDING_MODE();
}
@@ -1930,17 +1884,8 @@ inline void cryptonight_penta_hash(const uint8_t *__restrict__ input, size_t siz
{
constexpr CnAlgo<ALGO> props;
constexpr size_t MASK = props.mask();
constexpr Algorithm::Id BASE = props.base();
# ifdef XMRIG_ALGO_CN_HEAVY
constexpr bool IS_CN_HEAVY_TUBE = ALGO == Algorithm::CN_HEAVY_TUBE;
constexpr bool IS_CN_HEAVY_XHV = ALGO == Algorithm::CN_HEAVY_XHV;
# else
constexpr bool IS_CN_HEAVY_TUBE = false;
constexpr bool IS_CN_HEAVY_XHV = false;
# endif
if (BASE == Algorithm::CN_1 && size < 43) {
if (props.isBase1() && size < 43) {
memset(output, 0, 32 * 5);
return;
}
@@ -1970,7 +1915,7 @@ inline void cryptonight_penta_hash(const uint8_t *__restrict__ input, size_t siz
CONST_INIT(ctx[3], 3);
CONST_INIT(ctx[4], 4);
VARIANT2_SET_ROUNDING_MODE();
if (ALGO == Algorithm::CN_CCX) {
if (props.isCCX()) {
RESTORE_ROUNDING_MODE();
}

View File

@@ -7,8 +7,8 @@
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 tevador <tevador@gmail.com>
* Copyright 2018-2020 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright 2018-2023 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2023 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,7 +28,7 @@
#define XMRIG_KP_HASH_H
#include <stdint.h>
#include <cstdint>
namespace xmrig
@@ -43,16 +43,16 @@ class KPHash
public:
static constexpr uint32_t EPOCH_LENGTH = 7500;
static constexpr uint32_t PERIOD_LENGTH = 3;
static constexpr int CNT_CACHE = 11;
static constexpr int CNT_MATH = 18;
static constexpr uint32_t REGS = 32;
static constexpr uint32_t LANES = 16;
static constexpr int CNT_CACHE = 11;
static constexpr int CNT_MATH = 18;
static constexpr uint32_t REGS = 32;
static constexpr uint32_t LANES = 16;
static void calculate(const KPCache& light_cache, uint32_t block_height, const uint8_t (&header_hash)[32], uint64_t nonce, uint32_t (&output)[8], uint32_t (&mix_hash)[8]);
};
} /* namespace xmrig */
} // namespace xmrig
#endif /* XMRIG_KP_HASH_H */
#endif // XMRIG_KP_HASH_H

View File

@@ -34,6 +34,8 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "base/tools/Chrono.h"
#include "crypto/randomx/randomx.h"
#include "crypto/randomx/soft_aes.h"
#include "crypto/randomx/instruction.hpp"
#include "crypto/randomx/common.hpp"
#include "crypto/rx/Profiler.h"
#define AES_HASH_1R_STATE0 0xd7983aad, 0xcc82db47, 0x9fa856de, 0x92b52c0d
@@ -165,6 +167,9 @@ void fillAes1Rx4(void *state, size_t outputSize, void *buffer) {
template void fillAes1Rx4<true>(void *state, size_t outputSize, void *buffer);
template void fillAes1Rx4<false>(void *state, size_t outputSize, void *buffer);
static constexpr randomx::Instruction inst{ 0xFF, 7, 7, 0xFF, 0xFFFFFFFFU };
alignas(16) static const randomx::Instruction inst_mask[2] = { inst, inst };
template<int softAes>
void fillAes4Rx4(void *state, size_t outputSize, void *buffer) {
const uint8_t* outptr = (uint8_t*)buffer;
@@ -187,32 +192,42 @@ void fillAes4Rx4(void *state, size_t outputSize, void *buffer) {
state2 = rx_load_vec_i128((rx_vec_i128*)state + 2);
state3 = rx_load_vec_i128((rx_vec_i128*)state + 3);
while (outptr < outputEnd) {
state0 = aesdec<softAes>(state0, key0);
state1 = aesenc<softAes>(state1, key0);
state2 = aesdec<softAes>(state2, key4);
state3 = aesenc<softAes>(state3, key4);
state0 = aesdec<softAes>(state0, key1);
state1 = aesenc<softAes>(state1, key1);
state2 = aesdec<softAes>(state2, key5);
state3 = aesenc<softAes>(state3, key5);
state0 = aesdec<softAes>(state0, key2);
state1 = aesenc<softAes>(state1, key2);
state2 = aesdec<softAes>(state2, key6);
state3 = aesenc<softAes>(state3, key6);
state0 = aesdec<softAes>(state0, key3);
state1 = aesenc<softAes>(state1, key3);
state2 = aesdec<softAes>(state2, key7);
state3 = aesenc<softAes>(state3, key7);
#define TRANSFORM do { \
state0 = aesdec<softAes>(state0, key0); \
state1 = aesenc<softAes>(state1, key0); \
state2 = aesdec<softAes>(state2, key4); \
state3 = aesenc<softAes>(state3, key4); \
state0 = aesdec<softAes>(state0, key1); \
state1 = aesenc<softAes>(state1, key1); \
state2 = aesdec<softAes>(state2, key5); \
state3 = aesenc<softAes>(state3, key5); \
state0 = aesdec<softAes>(state0, key2); \
state1 = aesenc<softAes>(state1, key2); \
state2 = aesdec<softAes>(state2, key6); \
state3 = aesenc<softAes>(state3, key6); \
state0 = aesdec<softAes>(state0, key3); \
state1 = aesenc<softAes>(state1, key3); \
state2 = aesdec<softAes>(state2, key7); \
state3 = aesenc<softAes>(state3, key7); \
} while (0)
for (int i = 0; i < 2; ++i, outptr += 64) {
TRANSFORM;
rx_store_vec_i128((rx_vec_i128*)outptr + 0, state0);
rx_store_vec_i128((rx_vec_i128*)outptr + 1, state1);
rx_store_vec_i128((rx_vec_i128*)outptr + 2, state2);
rx_store_vec_i128((rx_vec_i128*)outptr + 3, state3);
}
static_assert(sizeof(inst_mask) == sizeof(rx_vec_i128), "Incorrect inst_mask size");
const rx_vec_i128 mask = *reinterpret_cast<const rx_vec_i128*>(inst_mask);
while (outptr < outputEnd) {
TRANSFORM;
rx_store_vec_i128((rx_vec_i128*)outptr + 0, rx_and_vec_i128(state0, mask));
rx_store_vec_i128((rx_vec_i128*)outptr + 1, rx_and_vec_i128(state1, mask));
rx_store_vec_i128((rx_vec_i128*)outptr + 2, rx_and_vec_i128(state2, mask));
rx_store_vec_i128((rx_vec_i128*)outptr + 3, rx_and_vec_i128(state3, mask));
outptr += 64;
}
}

View File

@@ -126,6 +126,7 @@ FORCE_INLINE rx_vec_f128 rx_set1_vec_f128(uint64_t x) {
#define rx_xor_vec_f128 _mm_xor_pd
#define rx_and_vec_f128 _mm_and_pd
#define rx_and_vec_i128 _mm_and_si128
#define rx_or_vec_f128 _mm_or_pd
#ifdef __AES__
@@ -278,6 +279,10 @@ FORCE_INLINE rx_vec_f128 rx_and_vec_f128(rx_vec_f128 a, rx_vec_f128 b) {
return (rx_vec_f128)vec_and(a,b);
}
FORCE_INLINE rx_vec_i128 rx_and_vec_i128(rx_vec_i128 a, rx_vec_i128 b) {
return (rx_vec_i128)vec_and(a, b);
}
FORCE_INLINE rx_vec_f128 rx_or_vec_f128(rx_vec_f128 a, rx_vec_f128 b) {
return (rx_vec_f128)vec_or(a,b);
}
@@ -444,6 +449,8 @@ FORCE_INLINE rx_vec_f128 rx_and_vec_f128(rx_vec_f128 a, rx_vec_f128 b) {
return vreinterpretq_f64_u8(vandq_u8(vreinterpretq_u8_f64(a), vreinterpretq_u8_f64(b)));
}
#define rx_and_vec_i128 vandq_u8
FORCE_INLINE rx_vec_f128 rx_or_vec_f128(rx_vec_f128 a, rx_vec_f128 b) {
return vreinterpretq_f64_u8(vorrq_u8(vreinterpretq_u8_f64(a), vreinterpretq_u8_f64(b)));
}
@@ -635,6 +642,13 @@ FORCE_INLINE rx_vec_f128 rx_and_vec_f128(rx_vec_f128 a, rx_vec_f128 b) {
return x;
}
FORCE_INLINE rx_vec_i128 rx_and_vec_i128(rx_vec_i128 a, rx_vec_i128 b) {
rx_vec_i128 x;
x.u64[0] = a.u64[0] & b.u64[0];
x.u64[1] = a.u64[1] & b.u64[1];
return x;
}
FORCE_INLINE rx_vec_f128 rx_or_vec_f128(rx_vec_f128 a, rx_vec_f128 b) {
rx_vec_f128 x;
x.i.u64[0] = a.i.u64[0] | b.i.u64[0];

View File

@@ -144,8 +144,6 @@ void JitCompilerA64::generateProgram(Program& program, ProgramConfiguration& con
for (uint32_t i = 0; i < program.getSize(); ++i)
{
Instruction& instr = program(i);
instr.src %= RegistersCount;
instr.dst %= RegistersCount;
(this->*engine[instr.opcode])(instr, codePos);
}
@@ -204,8 +202,6 @@ void JitCompilerA64::generateProgramLight(Program& program, ProgramConfiguration
for (uint32_t i = 0; i < program.getSize(); ++i)
{
Instruction& instr = program(i);
instr.src %= RegistersCount;
instr.dst %= RegistersCount;
(this->*engine[instr.opcode])(instr, codePos);
}

View File

@@ -312,11 +312,19 @@ namespace randomx {
freePagedMemory(allocatedCode, allocatedSize);
}
template<size_t N>
static FORCE_INLINE void prefetch_data(const void* data) {
rx_prefetch_nta(data);
prefetch_data<N - 1>(reinterpret_cast<const char*>(data) + 64);
}
template<> FORCE_INLINE void prefetch_data<0>(const void*) {}
template<typename T> static FORCE_INLINE void prefetch_data(const T& data) { prefetch_data<(sizeof(T) + 63) / 64>(&data); }
void JitCompilerX86::prepare() {
for (size_t i = 0; i < sizeof(engine); i += 64)
rx_prefetch_nta((const char*)(&engine) + i);
for (size_t i = 0; i < sizeof(RandomX_CurrentConfig); i += 64)
rx_prefetch_nta((const char*)(&RandomX_CurrentConfig) + i);
prefetch_data(engine);
prefetch_data(RandomX_CurrentConfig);
}
void JitCompilerX86::generateProgram(Program& prog, ProgramConfiguration& pcfg, uint32_t flags) {
@@ -748,7 +756,7 @@ namespace randomx {
template void JitCompilerX86::genAddressReg<true>(const Instruction& instr, const uint32_t src, uint8_t* code, uint32_t& codePos);
FORCE_INLINE void JitCompilerX86::genAddressRegDst(const Instruction& instr, uint8_t* code, uint32_t& codePos) {
const uint32_t dst = static_cast<uint32_t>(instr.dst % RegistersCount) << 16;
const uint32_t dst = static_cast<uint32_t>(instr.dst) << 16;
*(uint32_t*)(code + codePos) = 0x24808d41 + dst;
codePos += (dst == (RegisterNeedsSib << 16)) ? 4 : 3;
@@ -768,8 +776,8 @@ namespace randomx {
uint32_t pos = codePos;
uint8_t* const p = code + pos;
const uint32_t dst = instr.dst % RegistersCount;
const uint32_t sib = (instr.getModShift() << 6) | ((instr.src % RegistersCount) << 3) | dst;
const uint32_t dst = instr.dst;
const uint32_t sib = (instr.getModShift() << 6) | (instr.src << 3) | dst;
uint32_t k = 0x048d4f + (dst << 19);
if (dst == RegisterNeedsDisplacement)
@@ -788,8 +796,8 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint32_t src = instr.src % RegistersCount;
const uint32_t dst = instr.dst % RegistersCount;
const uint32_t src = instr.src;
const uint32_t dst = instr.dst;
if (src != dst) {
genAddressReg<true>(instr, src, p, pos);
@@ -809,8 +817,8 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint32_t src = instr.src % RegistersCount;
const uint32_t dst = instr.dst % RegistersCount;
const uint32_t src = instr.src;
const uint32_t dst = instr.dst;
if (src != dst) {
*(uint32_t*)(p + pos) = 0xc02b4d + (dst << 19) + (src << 16);
@@ -830,8 +838,8 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint32_t src = instr.src % RegistersCount;
const uint32_t dst = instr.dst % RegistersCount;
const uint32_t src = instr.src;
const uint32_t dst = instr.dst;
if (src != dst) {
genAddressReg<true>(instr, src, p, pos);
@@ -851,8 +859,8 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint32_t src = instr.src % RegistersCount;
const uint32_t dst = instr.dst % RegistersCount;
const uint32_t src = instr.src;
const uint32_t dst = instr.dst;
if (src != dst) {
emit32(0xc0af0f4d + ((dst * 8 + src) << 24), p, pos);
@@ -871,8 +879,8 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint64_t src = instr.src % RegistersCount;
const uint64_t dst = instr.dst % RegistersCount;
const uint64_t src = instr.src;
const uint64_t dst = instr.dst;
if (src != dst) {
genAddressReg<true>(instr, src, p, pos);
@@ -892,8 +900,8 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint32_t src = instr.src % RegistersCount;
const uint32_t dst = instr.dst % RegistersCount;
const uint32_t src = instr.src;
const uint32_t dst = instr.dst;
*(uint32_t*)(p + pos) = 0xc08b49 + (dst << 16);
*(uint32_t*)(p + pos + 3) = 0xe0f749 + (src << 16);
@@ -908,8 +916,8 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint32_t src = instr.src % RegistersCount;
const uint32_t dst = instr.dst % RegistersCount;
const uint32_t src = instr.src;
const uint32_t dst = instr.dst;
*(uint32_t*)(p + pos) = 0xC4D08B49 + (dst << 16);
*(uint32_t*)(p + pos + 4) = 0xC0F6FB42 + (dst << 27) + (src << 24);
@@ -923,8 +931,8 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint64_t src = instr.src % RegistersCount;
const uint64_t dst = instr.dst % RegistersCount;
const uint64_t src = instr.src;
const uint64_t dst = instr.dst;
if (src != dst) {
genAddressReg<false>(instr, src, p, pos);
@@ -947,8 +955,8 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint64_t src = instr.src % RegistersCount;
const uint64_t dst = instr.dst % RegistersCount;
const uint64_t src = instr.src;
const uint64_t dst = instr.dst;
if (src != dst) {
genAddressReg<false>(instr, src, p, pos);
@@ -970,8 +978,8 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint64_t src = instr.src % RegistersCount;
const uint64_t dst = instr.dst % RegistersCount;
const uint64_t src = instr.src;
const uint64_t dst = instr.dst;
*(uint64_t*)(p + pos) = 0x8b4ce8f749c08b49ull + (dst << 16) + (src << 40);
pos += 8;
@@ -985,8 +993,8 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint64_t src = instr.src % RegistersCount;
const uint64_t dst = instr.dst % RegistersCount;
const uint64_t src = instr.src;
const uint64_t dst = instr.dst;
if (src != dst) {
genAddressReg<false>(instr, src, p, pos);
@@ -1011,7 +1019,7 @@ namespace randomx {
uint64_t divisor = instr.getImm32();
if (!isZeroOrPowerOf2(divisor)) {
const uint32_t dst = instr.dst % RegistersCount;
const uint32_t dst = instr.dst;
const uint64_t reciprocal = randomx_reciprocal_fast(divisor);
if (imul_rcp_storage_used < 16) {
@@ -1040,7 +1048,7 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint32_t dst = instr.dst % RegistersCount;
const uint32_t dst = instr.dst;
*(uint32_t*)(p + pos) = 0xd8f749 + (dst << 16);
pos += 3;
@@ -1052,8 +1060,8 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint64_t src = instr.src % RegistersCount;
const uint64_t dst = instr.dst % RegistersCount;
const uint64_t src = instr.src;
const uint64_t dst = instr.dst;
if (src != dst) {
*(uint32_t*)(p + pos) = 0xc0334d + (((dst << 3) + src) << 16);
@@ -1073,8 +1081,8 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint64_t src = instr.src % RegistersCount;
const uint64_t dst = instr.dst % RegistersCount;
const uint64_t src = instr.src;
const uint64_t dst = instr.dst;
if (src != dst) {
genAddressReg<true>(instr, src, p, pos);
@@ -1094,8 +1102,8 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint64_t src = instr.src % RegistersCount;
const uint64_t dst = instr.dst % RegistersCount;
const uint64_t src = instr.src;
const uint64_t dst = instr.dst;
if (src != dst) {
*(uint64_t*)(p + pos) = 0xc8d349c88b41ull + (src << 16) + (dst << 40);
@@ -1115,8 +1123,8 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint64_t src = instr.src % RegistersCount;
const uint64_t dst = instr.dst % RegistersCount;
const uint64_t src = instr.src;
const uint64_t dst = instr.dst;
if (src != dst) {
*(uint64_t*)(p + pos) = 0xc0d349c88b41ull + (src << 16) + (dst << 40);
@@ -1136,8 +1144,8 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint32_t src = instr.src % RegistersCount;
const uint32_t dst = instr.dst % RegistersCount;
const uint32_t src = instr.src;
const uint32_t dst = instr.dst;
if (src != dst) {
*(uint32_t*)(p + pos) = 0xc0874d + (((dst << 3) + src) << 16);
@@ -1153,7 +1161,7 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const uint64_t dst = instr.dst % RegistersCount;
const uint64_t dst = instr.dst;
*(uint64_t*)(p + pos) = 0x01c0c60f66ull + (((dst << 3) + dst) << 24);
pos += 5;
@@ -1182,7 +1190,7 @@ namespace randomx {
prevFPOperation = pos;
const uint32_t src = instr.src % RegistersCount;
const uint32_t src = instr.src;
const uint32_t dst = instr.dst % RegisterCountFlt;
genAddressReg<true>(instr, src, p, pos);
@@ -1214,7 +1222,7 @@ namespace randomx {
prevFPOperation = pos;
const uint32_t src = instr.src % RegistersCount;
const uint32_t src = instr.src;
const uint32_t dst = instr.dst % RegisterCountFlt;
genAddressReg<true>(instr, src, p, pos);
@@ -1257,7 +1265,7 @@ namespace randomx {
prevFPOperation = pos;
const uint32_t src = instr.src % RegistersCount;
const uint32_t src = instr.src;
const uint64_t dst = instr.dst % RegisterCountFlt;
genAddressReg<true>(instr, src, p, pos);
@@ -1307,7 +1315,7 @@ namespace randomx {
uint32_t pos = codePos;
prevCFROUND = pos;
const uint32_t src = instr.src % RegistersCount;
const uint32_t src = instr.src;
*(uint32_t*)(p + pos) = 0x00C08B49 + (src << 16);
const int rotate = (static_cast<int>(instr.getImm32() & 63) - 2) & 63;
@@ -1343,7 +1351,7 @@ namespace randomx {
uint32_t pos = codePos;
prevCFROUND = pos;
const uint64_t src = instr.src % RegistersCount;
const uint64_t src = instr.src;
const uint64_t rotate = (static_cast<int>(instr.getImm32() & 63) - 2) & 63;
*(uint64_t*)(p + pos) = 0xC0F0FBC3C4ULL | (src << 32) | (rotate << 40);
@@ -1367,7 +1375,7 @@ namespace randomx {
uint8_t* const p = code;
uint32_t pos = codePos;
const int reg = instr.dst % RegistersCount;
const int reg = instr.dst;
int32_t jmp_offset = registerUsage[reg];
// if it jumps over the previous FP instruction that uses rounding, treat it as if FP instruction happened now
@@ -1426,7 +1434,7 @@ namespace randomx {
uint32_t pos = codePos;
genAddressRegDst(instr, p, pos);
emit32(0x0604894c + (static_cast<uint32_t>(instr.src % RegistersCount) << 19), p, pos);
emit32(0x0604894c + (static_cast<uint32_t>(instr.src) << 19), p, pos);
codePos = pos;
}

View File

@@ -1,7 +1,7 @@
/* XMRig
* Copyright (c) 2019 Howard Chu <https://github.com/hyc>
* Copyright (c) 2018-2021 SChernykh <https://github.com/SChernykh>
* Copyright (c) 2016-2021 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright (c) 2018-2023 SChernykh <https://github.com/SChernykh>
* Copyright (c) 2016-2023 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -292,8 +292,7 @@ void xmrig::Network::setJob(IClient *client, const Job &job, bool donate)
}
if (!donate && m_donate) {
m_donate->setAlgo(job.algorithm());
m_donate->setProxy(client->pool().proxy());
static_cast<DonateStrategy *>(m_donate)->update(client, job);
}
m_controller->miner()->setJob(job, donate);

View File

@@ -1,7 +1,7 @@
/* XMRig
* Copyright (c) 2019 Howard Chu <https://github.com/hyc>
* Copyright (c) 2018-2021 SChernykh <https://github.com/SChernykh>
* Copyright (c) 2016-2021 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright (c) 2018-2023 SChernykh <https://github.com/SChernykh>
* Copyright (c) 2016-2023 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -89,7 +89,7 @@ private:
};
} /* namespace xmrig */
} // namespace xmrig
#endif /* XMRIG_NETWORK_H */
#endif // XMRIG_NETWORK_H

View File

@@ -1,6 +1,6 @@
/* XMRig
* Copyright (c) 2018-2022 SChernykh <https://github.com/SChernykh>
* Copyright (c) 2016-2022 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright (c) 2018-2023 SChernykh <https://github.com/SChernykh>
* Copyright (c) 2016-2023 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -48,7 +48,7 @@ static const char *kDonateHost = "donate.v2.xmrig.com";
static const char *kDonateHostTls = "donate.ssl.xmrig.com";
#endif
} /* namespace xmrig */
} // namespace xmrig
xmrig::DonateStrategy::DonateStrategy(Controller *controller, IStrategyListener *listener) :
@@ -98,6 +98,17 @@ xmrig::DonateStrategy::~DonateStrategy()
}
void xmrig::DonateStrategy::update(IClient *client, const Job &job)
{
setAlgo(job.algorithm());
setProxy(client->pool().proxy());
m_diff = job.diff();
m_height = job.height();
m_seed = job.seed();
}
int64_t xmrig::DonateStrategy::submit(const JobResult &result)
{
return m_proxy ? m_proxy->submit(result) : m_strategy->submit(result);
@@ -199,13 +210,13 @@ void xmrig::DonateStrategy::onLogin(IClient *, rapidjson::Document &doc, rapidjs
params.AddMember("url", m_pools[0].url().toJSON(), allocator);
# endif
setAlgorithms(doc, params);
setParams(doc, params);
}
void xmrig::DonateStrategy::onLogin(IStrategy *, IClient *, rapidjson::Document &doc, rapidjson::Value &params)
{
setAlgorithms(doc, params);
setParams(doc, params);
}
@@ -270,12 +281,20 @@ void xmrig::DonateStrategy::idle(double min, double max)
}
void xmrig::DonateStrategy::setAlgorithms(rapidjson::Document &doc, rapidjson::Value &params)
void xmrig::DonateStrategy::setJob(IClient *client, const Job &job, const rapidjson::Value &params)
{
if (isActive()) {
m_listener->onJob(this, client, job, params);
}
}
void xmrig::DonateStrategy::setParams(rapidjson::Document &doc, rapidjson::Value &params)
{
using namespace rapidjson;
auto &allocator = doc.GetAllocator();
auto algorithms = m_controller->miner()->algorithms();
Algorithms algorithms = m_controller->miner()->algorithms();
const size_t index = static_cast<size_t>(std::distance(algorithms.begin(), std::find(algorithms.begin(), algorithms.end(), m_algorithm)));
if (index > 0 && index < algorithms.size()) {
std::swap(algorithms[0], algorithms[index]);
@@ -287,14 +306,12 @@ void xmrig::DonateStrategy::setAlgorithms(rapidjson::Document &doc, rapidjson::V
algo.PushBack(StringRef(a.name()), allocator);
}
params.AddMember("algo", algo, allocator);
}
params.AddMember("algo", algo, allocator);
params.AddMember("diff", m_diff, allocator);
params.AddMember("height", m_height, allocator);
void xmrig::DonateStrategy::setJob(IClient *client, const Job &job, const rapidjson::Value &params)
{
if (isActive()) {
m_listener->onJob(this, client, job, params);
if (!m_seed.empty()) {
params.AddMember("seed_hash", Cvt::toHex(m_seed, doc), allocator);
}
}

View File

@@ -1,6 +1,6 @@
/* XMRig
* Copyright (c) 2018-2022 SChernykh <https://github.com/SChernykh>
* Copyright (c) 2016-2022 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright (c) 2018-2023 SChernykh <https://github.com/SChernykh>
* Copyright (c) 2016-2023 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -20,15 +20,12 @@
#define XMRIG_DONATESTRATEGY_H
#include <vector>
#include "base/kernel/interfaces/IClientListener.h"
#include "base/kernel/interfaces/IStrategy.h"
#include "base/kernel/interfaces/IStrategyListener.h"
#include "base/kernel/interfaces/ITimerListener.h"
#include "base/net/stratum/Pool.h"
#include "base/tools/Object.h"
#include "base/tools/Buffer.h"
namespace xmrig {
@@ -36,7 +33,6 @@ namespace xmrig {
class Client;
class Controller;
class IStrategyListener;
class DonateStrategy : public IStrategy, public IStrategyListener, public ITimerListener, public IClientListener
@@ -47,6 +43,8 @@ public:
DonateStrategy(Controller *controller, IStrategyListener *listener);
~DonateStrategy() override;
void update(IClient *client, const Job &job);
protected:
inline bool isActive() const override { return state() == STATE_ACTIVE; }
inline IClient *client() const override { return m_proxy ? m_proxy : m_strategy->client(); }
@@ -88,13 +86,14 @@ private:
IClient *createProxy();
void idle(double min, double max);
void setAlgorithms(rapidjson::Document &doc, rapidjson::Value &params);
void setJob(IClient *client, const Job &job, const rapidjson::Value &params);
void setParams(rapidjson::Document &doc, rapidjson::Value &params);
void setResult(IClient *client, const SubmitResult &result, const char *error);
void setState(State state);
Algorithm m_algorithm;
bool m_tls = false;
Buffer m_seed;
char m_userId[65] = { 0 };
const uint64_t m_donateTime;
const uint64_t m_idleTime;
@@ -105,12 +104,14 @@ private:
State m_state = STATE_NEW;
std::vector<Pool> m_pools;
Timer *m_timer = nullptr;
uint64_t m_diff = 0;
uint64_t m_height = 0;
uint64_t m_now = 0;
uint64_t m_timestamp = 0;
};
} /* namespace xmrig */
} // namespace xmrig
#endif /* XMRIG_DONATESTRATEGY_H */
#endif // XMRIG_DONATESTRATEGY_H

View File

@@ -22,7 +22,7 @@
#define APP_ID "xmrig"
#define APP_NAME "XMRig"
#define APP_DESC "XMRig miner"
#define APP_VERSION "6.19.2"
#define APP_VERSION "6.19.3-dev"
#define APP_DOMAIN "xmrig.com"
#define APP_SITE "www.xmrig.com"
#define APP_COPYRIGHT "Copyright (C) 2016-2023 xmrig.com"
@@ -30,7 +30,7 @@
#define APP_VER_MAJOR 6
#define APP_VER_MINOR 19
#define APP_VER_PATCH 2
#define APP_VER_PATCH 3
#ifdef _MSC_VER
# if (_MSC_VER >= 1930)