mirror of
https://github.com/xmrig/xmrig.git
synced 2025-12-07 07:55:04 -05:00
Compare commits
16 Commits
v4.6.0-bet
...
v4.6.2-bet
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7c4b76f3f7 | ||
|
|
835228d9f7 | ||
|
|
2847c814d2 | ||
|
|
f77c4c67f2 | ||
|
|
13d256e737 | ||
|
|
ed4cfd55ac | ||
|
|
f965fd5a8c | ||
|
|
472ec1a0e6 | ||
|
|
74d62c92cd | ||
|
|
7e47b04676 | ||
|
|
d19c152d9a | ||
|
|
2abea46a87 | ||
|
|
e450e62b40 | ||
|
|
e6afd12d4f | ||
|
|
2ba19c9827 | ||
|
|
426bc8f0c4 |
@@ -1,3 +1,11 @@
|
||||
# v4.6.2-beta
|
||||
- [#1274](https://github.com/xmrig/xmrig/issues/1274) Added `--cuda-devices` command line option.
|
||||
- [#1277](https://github.com/xmrig/xmrig/pull/1277) Fixed function names for clang on Apple.
|
||||
|
||||
# v4.6.1-beta
|
||||
- [#1272](https://github.com/xmrig/xmrig/pull/1272) Optimized hashrate calculation.
|
||||
- [#1273](https://github.com/xmrig/xmrig/issues/1273) Fixed crash when use `GET /2/backends` API endpoint with disabled CUDA.
|
||||
|
||||
# v4.6.0-beta
|
||||
- [#1263](https://github.com/xmrig/xmrig/pull/1263) Added new option `dataset_host` for NVIDIA GPUs with less than 4 GB memory (RandomX only).
|
||||
|
||||
|
||||
@@ -74,7 +74,7 @@ API:
|
||||
|
||||
OpenCL backend:
|
||||
--opencl enable OpenCL mining backend
|
||||
--opencl-devices=N list of OpenCL devices to use
|
||||
--opencl-devices=N comma separated list of OpenCL devices to use
|
||||
--opencl-platform=N OpenCL platform index or name
|
||||
--opencl-loader=PATH path to OpenCL-ICD-Loader (OpenCL.dll or libOpenCL.so)
|
||||
--opencl-no-cache disable OpenCL cache
|
||||
@@ -83,6 +83,7 @@ OpenCL backend:
|
||||
CUDA backend:
|
||||
--cuda enable CUDA mining backend
|
||||
--cuda-loader=PATH path to CUDA plugin (xmrig-cuda.dll or libxmrig-cuda.so)
|
||||
--cuda-devices=N comma separated list of CUDA devices to use
|
||||
--no-nvml disable NVML (NVIDIA Management Library) support
|
||||
|
||||
Logging:
|
||||
|
||||
@@ -47,7 +47,6 @@ inline static const char *format(double h, char *buf, size_t size)
|
||||
|
||||
|
||||
xmrig::Hashrate::Hashrate(size_t threads) :
|
||||
m_highest(0.0),
|
||||
m_threads(threads)
|
||||
{
|
||||
m_counts = new uint64_t*[threads];
|
||||
@@ -100,30 +99,30 @@ double xmrig::Hashrate::calc(size_t threadId, size_t ms) const
|
||||
|
||||
uint64_t earliestHashCount = 0;
|
||||
uint64_t earliestStamp = 0;
|
||||
uint64_t lastestStamp = 0;
|
||||
uint64_t lastestHashCnt = 0;
|
||||
bool haveFullSet = false;
|
||||
|
||||
for (size_t i = 1; i < kBucketSize; i++) {
|
||||
const size_t idx = (m_top[threadId] - i) & kBucketMask;
|
||||
const uint64_t timeStampLimit = xmrig::Chrono::highResolutionMSecs() - ms;
|
||||
uint64_t* timestamps = m_timestamps[threadId];
|
||||
uint64_t* counts = m_counts[threadId];
|
||||
|
||||
if (m_timestamps[threadId][idx] == 0) {
|
||||
const size_t idx_start = (m_top[threadId] - 1) & kBucketMask;
|
||||
size_t idx = idx_start;
|
||||
|
||||
uint64_t lastestStamp = timestamps[idx];
|
||||
uint64_t lastestHashCnt = counts[idx];
|
||||
|
||||
do {
|
||||
if (timestamps[idx] < timeStampLimit) {
|
||||
haveFullSet = (timestamps[idx] != 0);
|
||||
if (idx != idx_start) {
|
||||
idx = (idx + 1) & kBucketMask;
|
||||
earliestStamp = timestamps[idx];
|
||||
earliestHashCount = counts[idx];
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (lastestStamp == 0) {
|
||||
lastestStamp = m_timestamps[threadId][idx];
|
||||
lastestHashCnt = m_counts[threadId][idx];
|
||||
}
|
||||
|
||||
if (xmrig::Chrono::highResolutionMSecs() - m_timestamps[threadId][idx] > ms) {
|
||||
haveFullSet = true;
|
||||
break;
|
||||
}
|
||||
|
||||
earliestStamp = m_timestamps[threadId][idx];
|
||||
earliestHashCount = m_counts[threadId][idx];
|
||||
}
|
||||
idx = (idx - 1) & kBucketMask;
|
||||
} while (idx != idx_start);
|
||||
|
||||
if (!haveFullSet || earliestStamp == 0 || lastestStamp == 0) {
|
||||
return nan("");
|
||||
@@ -150,15 +149,6 @@ void xmrig::Hashrate::add(size_t threadId, uint64_t count, uint64_t timestamp)
|
||||
}
|
||||
|
||||
|
||||
void xmrig::Hashrate::updateHighest()
|
||||
{
|
||||
double highest = calc(ShortInterval);
|
||||
if (std::isnormal(highest) && highest > m_highest) {
|
||||
m_highest = highest;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const char *xmrig::Hashrate::format(double h, char *buf, size_t size)
|
||||
{
|
||||
return ::format(h, buf, size);
|
||||
|
||||
@@ -53,9 +53,7 @@ public:
|
||||
double calc(size_t ms) const;
|
||||
double calc(size_t threadId, size_t ms) const;
|
||||
void add(size_t threadId, uint64_t count, uint64_t timestamp);
|
||||
void updateHighest();
|
||||
|
||||
inline double highest() const { return m_highest; }
|
||||
inline size_t threads() const { return m_threads; }
|
||||
|
||||
static const char *format(double h, char *buf, size_t size);
|
||||
@@ -70,7 +68,6 @@ private:
|
||||
constexpr static size_t kBucketSize = 2 << 11;
|
||||
constexpr static size_t kBucketMask = kBucketSize - 1;
|
||||
|
||||
double m_highest;
|
||||
size_t m_threads;
|
||||
uint32_t* m_top;
|
||||
uint64_t** m_counts;
|
||||
|
||||
@@ -144,8 +144,6 @@ void xmrig::Workers<T>::tick(uint64_t)
|
||||
|
||||
d_ptr->hashrate->add(handle->id(), handle->worker()->hashCount(), handle->worker()->timestamp());
|
||||
}
|
||||
|
||||
d_ptr->hashrate->updateHighest();
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -155,11 +155,14 @@ public:
|
||||
return;
|
||||
}
|
||||
|
||||
devices = CudaLib::devices(cuda.bfactor(), cuda.bsleep(), cuda.devicesHint());
|
||||
if (devices.empty()) {
|
||||
return printDisabled(kLabel, RED_S " (no devices)");
|
||||
}
|
||||
|
||||
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") WHITE_BOLD("%s") "/" WHITE_BOLD("%s") BLACK_BOLD("/%s"), kLabel,
|
||||
CudaLib::version(runtimeVersion).c_str(), CudaLib::version(driverVersion).c_str(), CudaLib::pluginVersion());
|
||||
|
||||
devices = CudaLib::devices(cuda.bfactor(), cuda.bsleep());
|
||||
|
||||
# ifdef XMRIG_FEATURE_NVML
|
||||
if (cuda.isNvmlEnabled()) {
|
||||
if (NvmlLib::init(cuda.nvmlLoader())) {
|
||||
@@ -172,7 +175,7 @@ public:
|
||||
);
|
||||
}
|
||||
else {
|
||||
printDisabled(kLabel, RED_S " (failed to load NVML)");
|
||||
printDisabled(kNvmlLabel, RED_S " (failed to load NVML)");
|
||||
}
|
||||
}
|
||||
else {
|
||||
@@ -474,19 +477,21 @@ rapidjson::Value xmrig::CudaBackend::toJSON(rapidjson::Document &doc) const
|
||||
out.AddMember("algo", d_ptr->algo.toJSON(), allocator);
|
||||
out.AddMember("profile", profileName().toJSON(), allocator);
|
||||
|
||||
Value versions(kObjectType);
|
||||
versions.AddMember("cuda-runtime", Value(CudaLib::version(d_ptr->runtimeVersion).c_str(), allocator), allocator);
|
||||
versions.AddMember("cuda-driver", Value(CudaLib::version(d_ptr->driverVersion).c_str(), allocator), allocator);
|
||||
versions.AddMember("plugin", String(CudaLib::pluginVersion()).toJSON(doc), allocator);
|
||||
if (CudaLib::isReady()) {
|
||||
Value versions(kObjectType);
|
||||
versions.AddMember("cuda-runtime", Value(CudaLib::version(d_ptr->runtimeVersion).c_str(), allocator), allocator);
|
||||
versions.AddMember("cuda-driver", Value(CudaLib::version(d_ptr->driverVersion).c_str(), allocator), allocator);
|
||||
versions.AddMember("plugin", String(CudaLib::pluginVersion()).toJSON(doc), allocator);
|
||||
|
||||
# ifdef XMRIG_FEATURE_NVML
|
||||
if (NvmlLib::isReady()) {
|
||||
versions.AddMember("nvml", StringRef(NvmlLib::version()), allocator);
|
||||
versions.AddMember("driver", StringRef(NvmlLib::driverVersion()), allocator);
|
||||
# ifdef XMRIG_FEATURE_NVML
|
||||
if (NvmlLib::isReady()) {
|
||||
versions.AddMember("nvml", StringRef(NvmlLib::version()), allocator);
|
||||
versions.AddMember("driver", StringRef(NvmlLib::driverVersion()), allocator);
|
||||
}
|
||||
# endif
|
||||
|
||||
out.AddMember("versions", versions, allocator);
|
||||
}
|
||||
# endif
|
||||
|
||||
out.AddMember("versions", versions, allocator);
|
||||
|
||||
if (d_ptr->threads.empty() || !hashrate()) {
|
||||
return out;
|
||||
|
||||
@@ -78,6 +78,16 @@ rapidjson::Value xmrig::CudaConfig::toJSON(rapidjson::Document &doc) const
|
||||
|
||||
std::vector<xmrig::CudaLaunchData> xmrig::CudaConfig::get(const Miner *miner, const Algorithm &algorithm, const std::vector<CudaDevice> &devices) const
|
||||
{
|
||||
auto deviceIndex = [&devices](uint32_t index) -> int {
|
||||
for (uint32_t i = 0; i < devices.size(); ++i) {
|
||||
if (devices[i].index() == index) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
};
|
||||
|
||||
std::vector<CudaLaunchData> out;
|
||||
const auto &threads = m_threads.get(algorithm);
|
||||
|
||||
@@ -85,15 +95,16 @@ std::vector<xmrig::CudaLaunchData> xmrig::CudaConfig::get(const Miner *miner, co
|
||||
return out;
|
||||
}
|
||||
|
||||
out.reserve(threads.count() * 2);
|
||||
out.reserve(threads.count());
|
||||
|
||||
for (const auto &thread : threads.data()) {
|
||||
if (thread.index() >= devices.size()) {
|
||||
const int index = deviceIndex(thread.index());
|
||||
if (index == -1) {
|
||||
LOG_INFO("%s" YELLOW(" skip non-existing device with index ") YELLOW_BOLD("%u"), cuda_tag(), thread.index());
|
||||
continue;
|
||||
}
|
||||
|
||||
out.emplace_back(miner, algorithm, thread, devices[thread.index()]);
|
||||
out.emplace_back(miner, algorithm, thread, devices[static_cast<size_t>(index)]);
|
||||
}
|
||||
|
||||
return out;
|
||||
@@ -153,7 +164,7 @@ void xmrig::CudaConfig::generate()
|
||||
return;
|
||||
}
|
||||
|
||||
const auto devices = CudaLib::devices(bfactor(), bsleep());
|
||||
const auto devices = CudaLib::devices(bfactor(), bsleep(), m_devicesHint);
|
||||
if (devices.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -43,16 +43,17 @@ public:
|
||||
std::vector<CudaLaunchData> get(const Miner *miner, const Algorithm &algorithm, const std::vector<CudaDevice> &devices) const;
|
||||
void read(const rapidjson::Value &value);
|
||||
|
||||
inline bool isEnabled() const { return m_enabled; }
|
||||
inline bool isShouldSave() const { return m_shouldSave; }
|
||||
inline const String &loader() const { return m_loader; }
|
||||
inline const Threads<CudaThreads> &threads() const { return m_threads; }
|
||||
inline int32_t bfactor() const { return m_bfactor; }
|
||||
inline int32_t bsleep() const { return m_bsleep; }
|
||||
inline bool isEnabled() const { return m_enabled; }
|
||||
inline bool isShouldSave() const { return m_shouldSave; }
|
||||
inline const std::vector<uint32_t> &devicesHint() const { return m_devicesHint; }
|
||||
inline const String &loader() const { return m_loader; }
|
||||
inline const Threads<CudaThreads> &threads() const { return m_threads; }
|
||||
inline int32_t bfactor() const { return m_bfactor; }
|
||||
inline int32_t bsleep() const { return m_bsleep; }
|
||||
|
||||
# ifdef XMRIG_FEATURE_NVML
|
||||
inline bool isNvmlEnabled() const { return m_nvml; }
|
||||
inline const String &nvmlLoader() const { return m_nvmlLoader; }
|
||||
inline bool isNvmlEnabled() const { return m_nvml; }
|
||||
inline const String &nvmlLoader() const { return m_nvmlLoader; }
|
||||
# endif
|
||||
|
||||
private:
|
||||
|
||||
@@ -209,7 +209,7 @@ std::string xmrig::CudaLib::version(uint32_t version)
|
||||
}
|
||||
|
||||
|
||||
std::vector<xmrig::CudaDevice> xmrig::CudaLib::devices(int32_t bfactor, int32_t bsleep) noexcept
|
||||
std::vector<xmrig::CudaDevice> xmrig::CudaLib::devices(int32_t bfactor, int32_t bsleep, const std::vector<uint32_t> &hints) noexcept
|
||||
{
|
||||
const uint32_t count = deviceCount();
|
||||
if (!count) {
|
||||
@@ -219,10 +219,24 @@ std::vector<xmrig::CudaDevice> xmrig::CudaLib::devices(int32_t bfactor, int32_t
|
||||
std::vector<CudaDevice> out;
|
||||
out.reserve(count);
|
||||
|
||||
for (uint32_t i = 0; i < count; ++i) {
|
||||
CudaDevice device(i, bfactor, bsleep);
|
||||
if (device.isValid()) {
|
||||
out.emplace_back(std::move(device));
|
||||
if (hints.empty()) {
|
||||
for (uint32_t i = 0; i < count; ++i) {
|
||||
CudaDevice device(i, bfactor, bsleep);
|
||||
if (device.isValid()) {
|
||||
out.emplace_back(std::move(device));
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (const uint32_t i : hints) {
|
||||
if (i >= count) {
|
||||
continue;
|
||||
}
|
||||
|
||||
CudaDevice device(i, bfactor, bsleep);
|
||||
if (device.isValid()) {
|
||||
out.emplace_back(std::move(device));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -69,8 +69,9 @@ public:
|
||||
static const char *lastError() noexcept;
|
||||
static void close();
|
||||
|
||||
static inline bool isInitialized() { return m_initialized; }
|
||||
static inline const String &loader() { return m_loader; }
|
||||
static inline bool isInitialized() { return m_initialized; }
|
||||
static inline bool isReady() noexcept { return m_ready; }
|
||||
static inline const String &loader() { return m_loader; }
|
||||
|
||||
static bool cnHash(nvid_ctx *ctx, uint32_t startNonce, uint64_t height, uint64_t target, uint32_t *rescount, uint32_t *resnonce);
|
||||
static bool deviceInit(nvid_ctx *ctx) noexcept;
|
||||
@@ -84,7 +85,7 @@ public:
|
||||
static int32_t deviceInt(nvid_ctx *ctx, DeviceProperty property) noexcept;
|
||||
static nvid_ctx *alloc(uint32_t id, int32_t bfactor, int32_t bsleep) noexcept;
|
||||
static std::string version(uint32_t version);
|
||||
static std::vector<CudaDevice> devices(int32_t bfactor, int32_t bsleep) noexcept;
|
||||
static std::vector<CudaDevice> devices(int32_t bfactor, int32_t bsleep, const std::vector<uint32_t> &hints) noexcept;
|
||||
static uint32_t deviceCount() noexcept;
|
||||
static uint32_t deviceUint(nvid_ctx *ctx, DeviceProperty property) noexcept;
|
||||
static uint32_t driverVersion() noexcept;
|
||||
|
||||
@@ -50,7 +50,7 @@ bool xmrig::Json::getBool(const rapidjson::Value &obj, const char *key, bool def
|
||||
}
|
||||
|
||||
|
||||
const char *xmrig::Json::getString(const rapidjson::Value &obj, const char *key, const char *defaultValue)
|
||||
const char *xmrig::Json::getString(const rapidjson::Value &obj, const char *key, const char *defaultValue)
|
||||
{
|
||||
assert(obj.IsObject());
|
||||
|
||||
|
||||
@@ -187,10 +187,14 @@ void xmrig::ConfigTransform::transform(rapidjson::Document &doc, int key, const
|
||||
|
||||
# ifdef XMRIG_FEATURE_CUDA
|
||||
case IConfig::CudaKey: /* --cuda */
|
||||
return set(doc, kCuda, "enabled", true);
|
||||
return set(doc, kCuda, kEnabled, true);
|
||||
|
||||
case IConfig::CudaLoaderKey: /* --cuda-loader */
|
||||
return set(doc, kCuda, "loader", arg);
|
||||
|
||||
case IConfig::CudaDevicesKey: /* --cuda-devices */
|
||||
set(doc, kCuda, kEnabled, true);
|
||||
return set(doc, kCuda, "devices-hint", arg);
|
||||
# endif
|
||||
|
||||
# ifdef XMRIG_FEATURE_NVML
|
||||
|
||||
@@ -107,6 +107,7 @@ static const option options[] = {
|
||||
# ifdef XMRIG_FEATURE_CUDA
|
||||
{ "cuda", 0, nullptr, IConfig::CudaKey },
|
||||
{ "cuda-loader", 1, nullptr, IConfig::CudaLoaderKey },
|
||||
{ "cuda-devices", 1, nullptr, IConfig::CudaDevicesKey },
|
||||
# endif
|
||||
# ifdef XMRIG_FEATURE_NVML
|
||||
{ "no-nvml", 0, nullptr, IConfig::NvmlKey },
|
||||
|
||||
@@ -101,7 +101,7 @@ static inline const std::string &usage()
|
||||
# ifdef XMRIG_FEATURE_OPENCL
|
||||
u += "\nOpenCL backend:\n";
|
||||
u += " --opencl enable OpenCL mining backend\n";
|
||||
u += " --opencl-devices=N list of OpenCL devices to use\n";
|
||||
u += " --opencl-devices=N comma separated list of OpenCL devices to use\n";
|
||||
u += " --opencl-platform=N OpenCL platform index or name\n";
|
||||
u += " --opencl-loader=PATH path to OpenCL-ICD-Loader (OpenCL.dll or libOpenCL.so)\n";
|
||||
u += " --opencl-no-cache disable OpenCL cache\n";
|
||||
@@ -112,6 +112,7 @@ static inline const std::string &usage()
|
||||
u += "\nCUDA backend:\n";
|
||||
u += " --cuda enable CUDA mining backend\n";
|
||||
u += " --cuda-loader=PATH path to CUDA plugin (xmrig-cuda.dll or libxmrig-cuda.so)\n";
|
||||
u += " --cuda-devices=N comma separated list of CUDA devices to use\n";
|
||||
# endif
|
||||
# ifdef XMRIG_FEATURE_NVML
|
||||
u += " --no-nvml disable NVML (NVIDIA Management Library) support\n";
|
||||
|
||||
@@ -25,26 +25,32 @@
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#if defined(__APPLE__)
|
||||
#define DECL(x) _##x
|
||||
#else
|
||||
#define DECL(x) x
|
||||
#endif
|
||||
|
||||
.arch armv8-a
|
||||
.text
|
||||
.global randomx_program_aarch64
|
||||
.global randomx_program_aarch64_main_loop
|
||||
.global randomx_program_aarch64_vm_instructions
|
||||
.global randomx_program_aarch64_imul_rcp_literals_end
|
||||
.global randomx_program_aarch64_vm_instructions_end
|
||||
.global randomx_program_aarch64_cacheline_align_mask1
|
||||
.global randomx_program_aarch64_cacheline_align_mask2
|
||||
.global randomx_program_aarch64_update_spMix1
|
||||
.global randomx_program_aarch64_vm_instructions_end_light
|
||||
.global randomx_program_aarch64_light_cacheline_align_mask
|
||||
.global randomx_program_aarch64_light_dataset_offset
|
||||
.global randomx_init_dataset_aarch64
|
||||
.global randomx_init_dataset_aarch64_end
|
||||
.global randomx_calc_dataset_item_aarch64
|
||||
.global randomx_calc_dataset_item_aarch64_prefetch
|
||||
.global randomx_calc_dataset_item_aarch64_mix
|
||||
.global randomx_calc_dataset_item_aarch64_store_result
|
||||
.global randomx_calc_dataset_item_aarch64_end
|
||||
.global DECL(randomx_program_aarch64)
|
||||
.global DECL(randomx_program_aarch64_main_loop)
|
||||
.global DECL(randomx_program_aarch64_vm_instructions)
|
||||
.global DECL(randomx_program_aarch64_imul_rcp_literals_end)
|
||||
.global DECL(randomx_program_aarch64_vm_instructions_end)
|
||||
.global DECL(randomx_program_aarch64_cacheline_align_mask1)
|
||||
.global DECL(randomx_program_aarch64_cacheline_align_mask2)
|
||||
.global DECL(randomx_program_aarch64_update_spMix1)
|
||||
.global DECL(randomx_program_aarch64_vm_instructions_end_light)
|
||||
.global DECL(randomx_program_aarch64_light_cacheline_align_mask)
|
||||
.global DECL(randomx_program_aarch64_light_dataset_offset)
|
||||
.global DECL(randomx_init_dataset_aarch64)
|
||||
.global DECL(randomx_init_dataset_aarch64_end)
|
||||
.global DECL(randomx_calc_dataset_item_aarch64)
|
||||
.global DECL(randomx_calc_dataset_item_aarch64_prefetch)
|
||||
.global DECL(randomx_calc_dataset_item_aarch64_mix)
|
||||
.global DECL(randomx_calc_dataset_item_aarch64_store_result)
|
||||
.global DECL(randomx_calc_dataset_item_aarch64_end)
|
||||
|
||||
# Register allocation
|
||||
|
||||
@@ -99,7 +105,7 @@
|
||||
# v31 -> scale mask = 0x81f000000000000081f0000000000000
|
||||
|
||||
.balign 4
|
||||
randomx_program_aarch64:
|
||||
DECL(randomx_program_aarch64):
|
||||
# Save callee-saved registers
|
||||
sub sp, sp, 192
|
||||
stp x16, x17, [sp]
|
||||
@@ -187,7 +193,7 @@ randomx_program_aarch64:
|
||||
ldr q14, literal_v14
|
||||
ldr q15, literal_v15
|
||||
|
||||
randomx_program_aarch64_main_loop:
|
||||
DECL(randomx_program_aarch64_main_loop):
|
||||
# spAddr0 = spMix1 & ScratchpadL3Mask64;
|
||||
# spAddr1 = (spMix1 >> 32) & ScratchpadL3Mask64;
|
||||
lsr x18, x10, 32
|
||||
@@ -260,7 +266,7 @@ randomx_program_aarch64_main_loop:
|
||||
orr v23.16b, v23.16b, v30.16b
|
||||
|
||||
# Execute VM instructions
|
||||
randomx_program_aarch64_vm_instructions:
|
||||
DECL(randomx_program_aarch64_vm_instructions):
|
||||
|
||||
# 16 KB buffer for generated instructions
|
||||
.fill 4096,4,0
|
||||
@@ -278,7 +284,7 @@ literal_x27: .fill 1,8,0
|
||||
literal_x28: .fill 1,8,0
|
||||
literal_x29: .fill 1,8,0
|
||||
literal_x30: .fill 1,8,0
|
||||
randomx_program_aarch64_imul_rcp_literals_end:
|
||||
DECL(randomx_program_aarch64_imul_rcp_literals_end):
|
||||
|
||||
literal_v0: .fill 2,8,0
|
||||
literal_v1: .fill 2,8,0
|
||||
@@ -297,14 +303,14 @@ literal_v13: .fill 2,8,0
|
||||
literal_v14: .fill 2,8,0
|
||||
literal_v15: .fill 2,8,0
|
||||
|
||||
randomx_program_aarch64_vm_instructions_end:
|
||||
DECL(randomx_program_aarch64_vm_instructions_end):
|
||||
|
||||
# mx ^= r[readReg2] ^ r[readReg3];
|
||||
eor x9, x9, x18
|
||||
|
||||
# Calculate dataset pointer for dataset prefetch
|
||||
mov w18, w9
|
||||
randomx_program_aarch64_cacheline_align_mask1:
|
||||
DECL(randomx_program_aarch64_cacheline_align_mask1):
|
||||
# Actual mask will be inserted by JIT compiler
|
||||
and x18, x18, 1
|
||||
add x18, x18, x1
|
||||
@@ -317,12 +323,12 @@ randomx_program_aarch64_cacheline_align_mask1:
|
||||
|
||||
# Calculate dataset pointer for dataset read
|
||||
mov w10, w9
|
||||
randomx_program_aarch64_cacheline_align_mask2:
|
||||
DECL(randomx_program_aarch64_cacheline_align_mask2):
|
||||
# Actual mask will be inserted by JIT compiler
|
||||
and x10, x10, 1
|
||||
add x10, x10, x1
|
||||
|
||||
randomx_program_aarch64_xor_with_dataset_line:
|
||||
DECL(randomx_program_aarch64_xor_with_dataset_line):
|
||||
# xor integer registers with dataset data
|
||||
ldp x18, x19, [x10]
|
||||
eor x4, x4, x18
|
||||
@@ -337,7 +343,7 @@ randomx_program_aarch64_xor_with_dataset_line:
|
||||
eor x14, x14, x18
|
||||
eor x15, x15, x19
|
||||
|
||||
randomx_program_aarch64_update_spMix1:
|
||||
DECL(randomx_program_aarch64_update_spMix1):
|
||||
# JIT compiler will replace it with "eor x10, config.readReg0, config.readReg1"
|
||||
eor x10, x0, x0
|
||||
|
||||
@@ -358,7 +364,7 @@ randomx_program_aarch64_update_spMix1:
|
||||
stp q18, q19, [x16, 32]
|
||||
|
||||
subs x3, x3, 1
|
||||
bne randomx_program_aarch64_main_loop
|
||||
bne DECL(randomx_program_aarch64_main_loop)
|
||||
|
||||
# Restore x0
|
||||
ldr x0, [sp], 16
|
||||
@@ -392,7 +398,7 @@ randomx_program_aarch64_update_spMix1:
|
||||
|
||||
ret
|
||||
|
||||
randomx_program_aarch64_vm_instructions_end_light:
|
||||
DECL(randomx_program_aarch64_vm_instructions_end_light):
|
||||
sub sp, sp, 96
|
||||
stp x0, x1, [sp, 64]
|
||||
stp x2, x30, [sp, 80]
|
||||
@@ -409,26 +415,26 @@ randomx_program_aarch64_vm_instructions_end_light:
|
||||
# x1 -> pointer to output
|
||||
mov x1, sp
|
||||
|
||||
randomx_program_aarch64_light_cacheline_align_mask:
|
||||
DECL(randomx_program_aarch64_light_cacheline_align_mask):
|
||||
# Actual mask will be inserted by JIT compiler
|
||||
and w2, w9, 1
|
||||
|
||||
# x2 -> item number
|
||||
lsr x2, x2, 6
|
||||
|
||||
randomx_program_aarch64_light_dataset_offset:
|
||||
DECL(randomx_program_aarch64_light_dataset_offset):
|
||||
# Apply dataset offset (filled in by JIT compiler)
|
||||
add x2, x2, 0
|
||||
add x2, x2, 0
|
||||
|
||||
bl randomx_calc_dataset_item_aarch64
|
||||
bl DECL(randomx_calc_dataset_item_aarch64)
|
||||
|
||||
mov x10, sp
|
||||
ldp x0, x1, [sp, 64]
|
||||
ldp x2, x30, [sp, 80]
|
||||
add sp, sp, 96
|
||||
|
||||
b randomx_program_aarch64_xor_with_dataset_line
|
||||
b DECL(randomx_program_aarch64_xor_with_dataset_line)
|
||||
|
||||
|
||||
|
||||
@@ -439,26 +445,26 @@ randomx_program_aarch64_light_dataset_offset:
|
||||
# x2 -> start item
|
||||
# x3 -> end item
|
||||
|
||||
randomx_init_dataset_aarch64:
|
||||
DECL(randomx_init_dataset_aarch64):
|
||||
# Save x30 (return address)
|
||||
str x30, [sp, -16]!
|
||||
|
||||
# Load pointer to cache memory
|
||||
ldr x0, [x0]
|
||||
|
||||
randomx_init_dataset_aarch64_main_loop:
|
||||
bl randomx_calc_dataset_item_aarch64
|
||||
DECL(randomx_init_dataset_aarch64_main_loop):
|
||||
bl DECL(randomx_calc_dataset_item_aarch64)
|
||||
add x1, x1, 64
|
||||
add x2, x2, 1
|
||||
cmp x2, x3
|
||||
bne randomx_init_dataset_aarch64_main_loop
|
||||
bne DECL(randomx_init_dataset_aarch64_main_loop)
|
||||
|
||||
# Restore x30 (return address)
|
||||
ldr x30, [sp], 16
|
||||
|
||||
ret
|
||||
|
||||
randomx_init_dataset_aarch64_end:
|
||||
DECL(randomx_init_dataset_aarch64_end):
|
||||
|
||||
# Input parameters
|
||||
#
|
||||
@@ -476,7 +482,7 @@ randomx_init_dataset_aarch64_end:
|
||||
# x12 -> temporary
|
||||
# x13 -> temporary
|
||||
|
||||
randomx_calc_dataset_item_aarch64:
|
||||
DECL(randomx_calc_dataset_item_aarch64):
|
||||
sub sp, sp, 112
|
||||
stp x0, x1, [sp]
|
||||
stp x2, x3, [sp, 16]
|
||||
@@ -523,7 +529,7 @@ randomx_calc_dataset_item_aarch64:
|
||||
ldr x12, superscalarAdd7
|
||||
eor x7, x0, x12
|
||||
|
||||
b randomx_calc_dataset_item_aarch64_prefetch
|
||||
b DECL(randomx_calc_dataset_item_aarch64_prefetch)
|
||||
|
||||
superscalarMul0: .quad 6364136223846793005
|
||||
superscalarAdd1: .quad 9298411001130361340
|
||||
@@ -536,7 +542,7 @@ superscalarAdd7: .quad 9549104520008361294
|
||||
|
||||
# Prefetch -> SuperScalar hash -> Mix will be repeated N times
|
||||
|
||||
randomx_calc_dataset_item_aarch64_prefetch:
|
||||
DECL(randomx_calc_dataset_item_aarch64_prefetch):
|
||||
# Actual mask will be inserted by JIT compiler
|
||||
and x11, x10, 1
|
||||
add x11, x8, x11, lsl 6
|
||||
@@ -544,7 +550,7 @@ randomx_calc_dataset_item_aarch64_prefetch:
|
||||
|
||||
# Generated SuperScalar hash program goes here
|
||||
|
||||
randomx_calc_dataset_item_aarch64_mix:
|
||||
DECL(randomx_calc_dataset_item_aarch64_mix):
|
||||
ldp x12, x13, [x11]
|
||||
eor x0, x0, x12
|
||||
eor x1, x1, x13
|
||||
@@ -558,7 +564,7 @@ randomx_calc_dataset_item_aarch64_mix:
|
||||
eor x6, x6, x12
|
||||
eor x7, x7, x13
|
||||
|
||||
randomx_calc_dataset_item_aarch64_store_result:
|
||||
DECL(randomx_calc_dataset_item_aarch64_store_result):
|
||||
stp x0, x1, [x9]
|
||||
stp x2, x3, [x9, 16]
|
||||
stp x4, x5, [x9, 32]
|
||||
@@ -575,4 +581,4 @@ randomx_calc_dataset_item_aarch64_store_result:
|
||||
|
||||
ret
|
||||
|
||||
randomx_calc_dataset_item_aarch64_end:
|
||||
DECL(randomx_calc_dataset_item_aarch64_end):
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
#define APP_ID "xmrig"
|
||||
#define APP_NAME "XMRig"
|
||||
#define APP_DESC "XMRig miner"
|
||||
#define APP_VERSION "4.6.0-beta"
|
||||
#define APP_VERSION "4.6.2-beta"
|
||||
#define APP_DOMAIN "xmrig.com"
|
||||
#define APP_SITE "www.xmrig.com"
|
||||
#define APP_COPYRIGHT "Copyright (C) 2016-2019 xmrig.com"
|
||||
@@ -36,7 +36,7 @@
|
||||
|
||||
#define APP_VER_MAJOR 4
|
||||
#define APP_VER_MINOR 6
|
||||
#define APP_VER_PATCH 0
|
||||
#define APP_VER_PATCH 2
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# if (_MSC_VER >= 1920)
|
||||
|
||||
Reference in New Issue
Block a user