mirror of
https://github.com/xmrig/xmrig.git
synced 2025-12-07 07:55:04 -05:00
Compare commits
52 Commits
4c57b60e59
...
v6.19.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c0143b90ce | ||
|
|
c3cdffe86d | ||
|
|
8a4da33bea | ||
|
|
1c7a339527 | ||
|
|
490acd6e55 | ||
|
|
6ecf57959b | ||
|
|
e2c58126e9 | ||
|
|
0ed4b35cd3 | ||
|
|
afe2aa4402 | ||
|
|
3f7533a645 | ||
|
|
6ef0409086 | ||
|
|
64b0d9562e | ||
|
|
770b71c69a | ||
|
|
44642643f8 | ||
|
|
273bb84df8 | ||
|
|
4d0b8c9daf | ||
|
|
7d4d48e83b | ||
|
|
2ea37cdf37 | ||
|
|
a02afe6d4f | ||
|
|
6e86dddc65 | ||
|
|
0171faffe7 | ||
|
|
25decd1b7f | ||
|
|
354b9ddb34 | ||
|
|
3ad6ab56a5 | ||
|
|
1aa0e37b54 | ||
|
|
807c64ddb1 | ||
|
|
5bf90704a6 | ||
|
|
912d1e362b | ||
|
|
eeb459506c | ||
|
|
f4ec0287c4 | ||
|
|
483d6ada3d | ||
|
|
28e81bd7c0 | ||
|
|
54e75bc7c4 | ||
|
|
c388113a30 | ||
|
|
36afeec225 | ||
|
|
4b5e56416d | ||
|
|
0d314d0469 | ||
|
|
7fc45dfb2d | ||
|
|
2ba40edee0 | ||
|
|
bc4dd11761 | ||
|
|
7b52a41459 | ||
|
|
b5de214ff9 | ||
|
|
8bd3b393ef | ||
|
|
9223c2f027 | ||
|
|
6346d36d1b | ||
|
|
93c07e1d34 | ||
|
|
0ba3000982 | ||
|
|
f0e7de8c71 | ||
|
|
1c4eb6c5fe | ||
|
|
63e21dfe63 | ||
|
|
b2d9dab2e3 | ||
|
|
7e49fc828d |
21
CHANGELOG.md
21
CHANGELOG.md
@@ -1,3 +1,24 @@
|
||||
# v6.19.0
|
||||
- [#3144](https://github.com/xmrig/xmrig/pull/3144) Update to latest `sse2neon.h`.
|
||||
- [#3161](https://github.com/xmrig/xmrig/pull/3161) MSVC build: enabled parallel compilation.
|
||||
- [#3163](https://github.com/xmrig/xmrig/pull/3163) Improved Zen 3 MSR mod.
|
||||
- [#3176](https://github.com/xmrig/xmrig/pull/3176) Update cmake required version to 3.1.
|
||||
- [#3182](https://github.com/xmrig/xmrig/pull/3182) DragonflyBSD compilation fixes.
|
||||
- [#3196](https://github.com/xmrig/xmrig/pull/3196) Show IP address for failed connections.
|
||||
- [#3185](https://github.com/xmrig/xmrig/issues/3185) Fixed macOS DMI reader.
|
||||
- [#3198](https://github.com/xmrig/xmrig/pull/3198) Fixed broken RandomX light mode mining.
|
||||
- [#3202](https://github.com/xmrig/xmrig/pull/3202) Solo mining: added job timeout (default is 15 seconds).
|
||||
|
||||
# v6.18.1
|
||||
- [#3129](https://github.com/xmrig/xmrig/pull/3129) Fix: protectRX flushed CPU cache only on MacOS/iOS.
|
||||
- [#3126](https://github.com/xmrig/xmrig/pull/3126) Don't reset when pool sends the same job blob.
|
||||
- [#3120](https://github.com/xmrig/xmrig/pull/3120) RandomX: optimized `CFROUND` elimination.
|
||||
- [#3109](https://github.com/xmrig/xmrig/pull/3109) RandomX: added Blake2 AVX2 version.
|
||||
- [#3082](https://github.com/xmrig/xmrig/pull/3082) Fixed GCC 12 warnings.
|
||||
- [#3075](https://github.com/xmrig/xmrig/pull/3075) Recognize `armv7ve` as valid ARMv7 target.
|
||||
- [#3132](https://github.com/xmrig/xmrig/pull/3132) RandomX: added MSR mod for Zen 4.
|
||||
- [#3134](https://github.com/xmrig/xmrig/pull/3134) Added Zen4 to `randomx_boost.sh`.
|
||||
|
||||
# v6.18.0
|
||||
- [#3067](https://github.com/xmrig/xmrig/pull/3067) Monero v15 network upgrade support and more house keeping.
|
||||
- Removed deprecated AstroBWTv1 and v2.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
cmake_minimum_required(VERSION 2.8.12)
|
||||
cmake_minimum_required(VERSION 3.1)
|
||||
project(xmrig)
|
||||
|
||||
option(WITH_HWLOC "Enable hwloc support" ON)
|
||||
@@ -27,6 +27,7 @@ option(WITH_STRICT_CACHE "Enable strict checks for OpenCL cache" ON)
|
||||
option(WITH_INTERLEAVE_DEBUG_LOG "Enable debug log for threads interleave" OFF)
|
||||
option(WITH_PROFILING "Enable profiling for developers" OFF)
|
||||
option(WITH_SSE4_1 "Enable SSE 4.1 for Blake2" ON)
|
||||
option(WITH_AVX2 "Enable AVX2 for Blake2" ON)
|
||||
option(WITH_VAES "Enable VAES instructions for Cryptonight" ON)
|
||||
option(WITH_BENCHMARK "Enable builtin RandomX benchmark and stress test" ON)
|
||||
option(WITH_SECURE_JIT "Enable secure access to JIT memory" OFF)
|
||||
|
||||
@@ -25,13 +25,14 @@ if (XMRIG_64_BIT AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|AMD64)$")
|
||||
add_definitions(-DRAPIDJSON_SSE2)
|
||||
else()
|
||||
set(WITH_SSE4_1 OFF)
|
||||
set(WITH_AVX2 OFF)
|
||||
set(WITH_VAES OFF)
|
||||
endif()
|
||||
|
||||
if (NOT ARM_TARGET)
|
||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm64|armv8-a)$")
|
||||
set(ARM_TARGET 8)
|
||||
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(armv7|armv7f|armv7s|armv7k|armv7-a|armv7l)$")
|
||||
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(armv7|armv7f|armv7s|armv7k|armv7-a|armv7l|armv7ve)$")
|
||||
set(ARM_TARGET 7)
|
||||
endif()
|
||||
endif()
|
||||
@@ -57,3 +58,7 @@ endif()
|
||||
if (WITH_SSE4_1)
|
||||
add_definitions(-DXMRIG_FEATURE_SSE4_1)
|
||||
endif()
|
||||
|
||||
if (WITH_AVX2)
|
||||
add_definitions(-DXMRIG_FEATURE_AVX2)
|
||||
endif()
|
||||
|
||||
@@ -61,11 +61,11 @@ if (CMAKE_CXX_COMPILER_ID MATCHES GNU)
|
||||
add_definitions(/DHAVE_BUILTIN_CLEAR_CACHE)
|
||||
|
||||
elseif (CMAKE_CXX_COMPILER_ID MATCHES MSVC)
|
||||
set(CMAKE_C_FLAGS_RELEASE "/MT /O2 /Oi /DNDEBUG /GL")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "/MT /O2 /Oi /DNDEBUG /GL")
|
||||
set(CMAKE_C_FLAGS_RELEASE "/MP /MT /O2 /Oi /DNDEBUG /GL")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "/MP /MT /O2 /Oi /DNDEBUG /GL")
|
||||
|
||||
set(CMAKE_C_FLAGS_RELWITHDEBINFO "/Ob1 /Zi /DRELWITHDEBINFO")
|
||||
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "/Ob1 /Zi /DRELWITHDEBINFO")
|
||||
set(CMAKE_C_FLAGS_RELWITHDEBINFO "/MP /Ob1 /Zi /DRELWITHDEBINFO")
|
||||
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "/MP /Ob1 /Zi /DRELWITHDEBINFO")
|
||||
|
||||
add_definitions(/D_CRT_SECURE_NO_WARNINGS)
|
||||
add_definitions(/D_CRT_NONSTDC_NO_WARNINGS)
|
||||
|
||||
@@ -15,7 +15,7 @@ else()
|
||||
set(XMRIG_OS_ANDROID ON)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
set(XMRIG_OS_LINUX ON)
|
||||
elseif(CMAKE_SYSTEM_NAME STREQUAL FreeBSD)
|
||||
elseif(CMAKE_SYSTEM_NAME STREQUAL FreeBSD OR CMAKE_SYSTEM_NAME STREQUAL DragonFly)
|
||||
set(XMRIG_OS_FREEBSD ON)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
@@ -76,7 +76,15 @@ if (WITH_RANDOMX)
|
||||
list(APPEND SOURCES_CRYPTO src/crypto/randomx/blake2/blake2b_sse41.c)
|
||||
|
||||
if (CMAKE_C_COMPILER_ID MATCHES GNU OR CMAKE_C_COMPILER_ID MATCHES Clang)
|
||||
set_source_files_properties(src/crypto/randomx/blake2/blake2b_sse41.c PROPERTIES COMPILE_FLAGS -msse4.1)
|
||||
set_source_files_properties(src/crypto/randomx/blake2/blake2b_sse41.c PROPERTIES COMPILE_FLAGS "-Ofast -msse4.1")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (WITH_AVX2)
|
||||
list(APPEND SOURCES_CRYPTO src/crypto/randomx/blake2/avx2/blake2b_avx2.c)
|
||||
|
||||
if (CMAKE_C_COMPILER_ID MATCHES GNU OR CMAKE_C_COMPILER_ID MATCHES Clang)
|
||||
set_source_files_properties(src/crypto/randomx/blake2/avx2/blake2b_avx2.c PROPERTIES COMPILE_FLAGS "-Ofast -mavx2")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
HWLOC_VERSION_MAJOR="2"
|
||||
HWLOC_VERSION_MINOR="7"
|
||||
HWLOC_VERSION_PATCH="1"
|
||||
HWLOC_VERSION_MINOR="9"
|
||||
HWLOC_VERSION_PATCH="0"
|
||||
|
||||
HWLOC_VERSION="${HWLOC_VERSION_MAJOR}.${HWLOC_VERSION_MINOR}.${HWLOC_VERSION_PATCH}"
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
OPENSSL_VERSION="1.1.1o"
|
||||
OPENSSL_VERSION="1.1.1s"
|
||||
|
||||
mkdir -p deps
|
||||
mkdir -p deps/include
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
OPENSSL_VERSION="3.0.3"
|
||||
OPENSSL_VERSION="3.0.7"
|
||||
|
||||
mkdir -p deps
|
||||
mkdir -p deps/include
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
UV_VERSION="1.44.1"
|
||||
UV_VERSION="1.44.2"
|
||||
|
||||
mkdir -p deps
|
||||
mkdir -p deps/include
|
||||
|
||||
@@ -10,14 +10,24 @@ fi
|
||||
|
||||
if grep -E 'AMD Ryzen|AMD EPYC' /proc/cpuinfo > /dev/null;
|
||||
then
|
||||
if grep "cpu family[[:space:]]:[[:space:]]25" /proc/cpuinfo > /dev/null;
|
||||
if grep "cpu family[[:space:]]\{1,\}:[[:space:]]25" /proc/cpuinfo > /dev/null;
|
||||
then
|
||||
echo "Detected Zen3 CPU"
|
||||
wrmsr -a 0xc0011020 0x4480000000000
|
||||
wrmsr -a 0xc0011021 0x1c000200000040
|
||||
wrmsr -a 0xc0011022 0xc000000401500000
|
||||
wrmsr -a 0xc001102b 0x2000cc14
|
||||
echo "MSR register values for Zen3 applied"
|
||||
if grep "model[[:space:]]\{1,\}:[[:space:]]97" /proc/cpuinfo > /dev/null;
|
||||
then
|
||||
echo "Detected Zen4 CPU"
|
||||
wrmsr -a 0xc0011020 0x4400000000000
|
||||
wrmsr -a 0xc0011021 0x4000000000040
|
||||
wrmsr -a 0xc0011022 0x8680000401570000
|
||||
wrmsr -a 0xc001102b 0x2040cc10
|
||||
echo "MSR register values for Zen4 applied"
|
||||
else
|
||||
echo "Detected Zen3 CPU"
|
||||
wrmsr -a 0xc0011020 0x4480000000000
|
||||
wrmsr -a 0xc0011021 0x1c000200000040
|
||||
wrmsr -a 0xc0011022 0xc000000401570000
|
||||
wrmsr -a 0xc001102b 0x2000cc10
|
||||
echo "MSR register values for Zen3 applied"
|
||||
fi
|
||||
else
|
||||
echo "Detected Zen1/Zen2 CPU"
|
||||
wrmsr -a 0xc0011020 0
|
||||
|
||||
2
src/3rdparty/argon2/CMakeLists.txt
vendored
2
src/3rdparty/argon2/CMakeLists.txt
vendored
@@ -1,4 +1,4 @@
|
||||
cmake_minimum_required(VERSION 2.8.12)
|
||||
cmake_minimum_required(VERSION 3.1)
|
||||
|
||||
project(argon2 C)
|
||||
set(CMAKE_C_STANDARD 99)
|
||||
|
||||
2
src/3rdparty/hwloc/CMakeLists.txt
vendored
2
src/3rdparty/hwloc/CMakeLists.txt
vendored
@@ -1,4 +1,4 @@
|
||||
cmake_minimum_required (VERSION 2.8.12)
|
||||
cmake_minimum_required(VERSION 3.1)
|
||||
project (hwloc C)
|
||||
|
||||
include_directories(include)
|
||||
|
||||
44
src/3rdparty/hwloc/NEWS
vendored
44
src/3rdparty/hwloc/NEWS
vendored
@@ -17,6 +17,50 @@ bug fixes (and other actions) for each version of hwloc since version
|
||||
0.9.
|
||||
|
||||
|
||||
Version 2.9.0
|
||||
-------------
|
||||
* Backends
|
||||
+ Expose the memory size of CXL memory devices (Type 3) on Linux.
|
||||
+ The LevelZero backend now reports the "XeLinkBandwidth" distance
|
||||
matrix between L0 devices (and subdevices) when available.
|
||||
+ Add support for CUDA compute capability up to 9.0.
|
||||
* Tools
|
||||
+ lstopo now switches to console mode when its output is redirected.
|
||||
Graphical window mode may be forced back with --of window.
|
||||
+ hwloc-calc now accepts "numa" in -H, and I/O subtypes such as "gpu"
|
||||
in -I and -N.
|
||||
|
||||
|
||||
Version 2.8.0
|
||||
-------------
|
||||
* API
|
||||
+ Add HWLOC_TOPOLOGY_FLAG_NO_DISTANCES, _NO_MEMATTRS and _NO_CPUKINDS
|
||||
to reduce the overhead when unneeded.
|
||||
+ Add separate Read/Write Bandwidth/Latency memory attributes and
|
||||
implement them on Linux.
|
||||
* Backends
|
||||
+ NUMA nodes may now have a subtype such as DRAM, HBM, SPM, or NVM
|
||||
on heterogeneous memory platforms on Linux.
|
||||
- Add DAXType and DAXParent attributes on Linux to tell where a
|
||||
DAX device or its corresponding NUMA node come from (SPM for
|
||||
Specific-Purpose or NVM for Non-Volatile Memory).
|
||||
+ Detect heterogeneous caches in hybrid CPUs on MacOS X,
|
||||
thanks to Paul Bone for the help.
|
||||
+ Max frequencies are not ignored in Linux cpukinds anymore (they were
|
||||
ignored in hwloc 2.7.0), but they may be slightly adjusted to avoid
|
||||
reporting hybrid CPUs because Intel Turbo Boost Max 3.0.
|
||||
- See the documentation of environment variable HWLOC_CPUKINDS_MAXFREQ.
|
||||
+ Hardwire the PCI locality of HPE Cray EX235a nodes.
|
||||
* Tools
|
||||
+ lstopo and other tools may now load Linux and x86 cpuid topology files
|
||||
from a tarball.
|
||||
+ lstopo may now replace the P# and L# index prefixes with custom strings
|
||||
thanks to --os-index-prefix and --logical-index-prefix options.
|
||||
* Misc
|
||||
+ Add --disable-readme to avoid regenerating the top-level hwloc README
|
||||
file from the documentation.
|
||||
|
||||
|
||||
Version 2.7.1
|
||||
-------------
|
||||
* Workaround crashes when virtual machines report incoherent x86 CPUID
|
||||
|
||||
2
src/3rdparty/hwloc/README
vendored
2
src/3rdparty/hwloc/README
vendored
@@ -78,7 +78,7 @@ debug and report issues.
|
||||
Questions may be sent to the users or developers mailing lists (https://
|
||||
www.open-mpi.org/community/lists/hwloc.php).
|
||||
|
||||
There is also a #hwloc IRC channel on Freenode (irc.freenode.net).
|
||||
There is also a #hwloc IRC channel on Libera Chat (irc.libera.chat).
|
||||
|
||||
|
||||
|
||||
|
||||
8
src/3rdparty/hwloc/VERSION
vendored
8
src/3rdparty/hwloc/VERSION
vendored
@@ -8,8 +8,8 @@
|
||||
# Please update HWLOC_VERSION* in contrib/windows/hwloc_config.h too.
|
||||
|
||||
major=2
|
||||
minor=7
|
||||
release=1
|
||||
minor=9
|
||||
release=0
|
||||
|
||||
# greek is used for alpha or beta release tags. If it is non-empty,
|
||||
# it will be appended to the version number. It does not have to be
|
||||
@@ -22,7 +22,7 @@ greek=
|
||||
|
||||
# The date when this release was created
|
||||
|
||||
date="Mar 20, 2022"
|
||||
date="Dec 14, 2022"
|
||||
|
||||
# If snapshot=1, then use the value from snapshot_version as the
|
||||
# entire hwloc version (i.e., ignore major, minor, release, and
|
||||
@@ -41,7 +41,7 @@ snapshot_version=${major}.${minor}.${release}${greek}-git
|
||||
# 2. Version numbers are described in the Libtool current:revision:age
|
||||
# format.
|
||||
|
||||
libhwloc_so_version=20:3:5
|
||||
libhwloc_so_version=21:1:6
|
||||
libnetloc_so_version=0:0:0
|
||||
|
||||
# Please also update the <TargetName> lines in contrib/windows/libhwloc.vcxproj
|
||||
|
||||
29
src/3rdparty/hwloc/include/hwloc.h
vendored
29
src/3rdparty/hwloc/include/hwloc.h
vendored
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright © 2009 CNRS
|
||||
* Copyright © 2009-2021 Inria. All rights reserved.
|
||||
* Copyright © 2009-2022 Inria. All rights reserved.
|
||||
* Copyright © 2009-2012 Université Bordeaux
|
||||
* Copyright © 2009-2020 Cisco Systems, Inc. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
@@ -93,7 +93,7 @@ extern "C" {
|
||||
* Two stable releases of the same series usually have the same ::HWLOC_API_VERSION
|
||||
* even if their HWLOC_VERSION are different.
|
||||
*/
|
||||
#define HWLOC_API_VERSION 0x00020500
|
||||
#define HWLOC_API_VERSION 0x00020800
|
||||
|
||||
/** \brief Indicate at runtime which hwloc API version was used at build time.
|
||||
*
|
||||
@@ -971,7 +971,7 @@ HWLOC_DECLSPEC const char * hwloc_obj_type_string (hwloc_obj_type_t type) __hwlo
|
||||
*
|
||||
* If \p size is 0, \p string may safely be \c NULL.
|
||||
*
|
||||
* \return the number of character that were actually written if not truncating,
|
||||
* \return the number of characters that were actually written if not truncating,
|
||||
* or that would have been written (not including the ending \\0).
|
||||
*/
|
||||
HWLOC_DECLSPEC int hwloc_obj_type_snprintf(char * __hwloc_restrict string, size_t size,
|
||||
@@ -986,7 +986,7 @@ HWLOC_DECLSPEC int hwloc_obj_type_snprintf(char * __hwloc_restrict string, size_
|
||||
*
|
||||
* If \p size is 0, \p string may safely be \c NULL.
|
||||
*
|
||||
* \return the number of character that were actually written if not truncating,
|
||||
* \return the number of characters that were actually written if not truncating,
|
||||
* or that would have been written (not including the ending \\0).
|
||||
*/
|
||||
HWLOC_DECLSPEC int hwloc_obj_attr_snprintf(char * __hwloc_restrict string, size_t size,
|
||||
@@ -2060,7 +2060,26 @@ enum hwloc_topology_flags_e {
|
||||
* not change to due thread binding changes on Windows
|
||||
* (see ::HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING).
|
||||
*/
|
||||
HWLOC_TOPOLOGY_FLAG_DONT_CHANGE_BINDING = (1UL<<6)
|
||||
HWLOC_TOPOLOGY_FLAG_DONT_CHANGE_BINDING = (1UL<<6),
|
||||
|
||||
/** \brief Ignore distances.
|
||||
*
|
||||
* Ignore distance information from the operating systems (and from XML)
|
||||
* and hence do not use distances for grouping.
|
||||
*/
|
||||
HWLOC_TOPOLOGY_FLAG_NO_DISTANCES = (1UL<<7),
|
||||
|
||||
/** \brief Ignore memory attributes.
|
||||
*
|
||||
* Ignore memory attribues from the operating systems (and from XML).
|
||||
*/
|
||||
HWLOC_TOPOLOGY_FLAG_NO_MEMATTRS = (1UL<<8),
|
||||
|
||||
/** \brief Ignore CPU Kinds.
|
||||
*
|
||||
* Ignore CPU kind information from the operating systems (and from XML).
|
||||
*/
|
||||
HWLOC_TOPOLOGY_FLAG_NO_CPUKINDS = (1UL<<9)
|
||||
};
|
||||
|
||||
/** \brief Set OR'ed flags to non-yet-loaded topology.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright © 2009 CNRS
|
||||
* Copyright © 2009-2021 Inria. All rights reserved.
|
||||
* Copyright © 2009-2022 Inria. All rights reserved.
|
||||
* Copyright © 2009-2012 Université Bordeaux
|
||||
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
@@ -11,10 +11,10 @@
|
||||
#ifndef HWLOC_CONFIG_H
|
||||
#define HWLOC_CONFIG_H
|
||||
|
||||
#define HWLOC_VERSION "2.7.1"
|
||||
#define HWLOC_VERSION "2.9.0"
|
||||
#define HWLOC_VERSION_MAJOR 2
|
||||
#define HWLOC_VERSION_MINOR 7
|
||||
#define HWLOC_VERSION_RELEASE 1
|
||||
#define HWLOC_VERSION_MINOR 9
|
||||
#define HWLOC_VERSION_RELEASE 0
|
||||
#define HWLOC_VERSION_GREEK ""
|
||||
|
||||
#define __hwloc_restrict
|
||||
|
||||
14
src/3rdparty/hwloc/include/hwloc/bitmap.h
vendored
14
src/3rdparty/hwloc/include/hwloc/bitmap.h
vendored
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright © 2009 CNRS
|
||||
* Copyright © 2009-2020 Inria. All rights reserved.
|
||||
* Copyright © 2009-2022 Inria. All rights reserved.
|
||||
* Copyright © 2009-2012 Université Bordeaux
|
||||
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
@@ -112,7 +112,7 @@ HWLOC_DECLSPEC int hwloc_bitmap_copy(hwloc_bitmap_t dst, hwloc_const_bitmap_t sr
|
||||
*
|
||||
* If \p buflen is 0, \p buf may safely be \c NULL.
|
||||
*
|
||||
* \return the number of character that were actually written if not truncating,
|
||||
* \return the number of characters that were actually written if not truncating,
|
||||
* or that would have been written (not including the ending \\0).
|
||||
*/
|
||||
HWLOC_DECLSPEC int hwloc_bitmap_snprintf(char * __hwloc_restrict buf, size_t buflen, hwloc_const_bitmap_t bitmap);
|
||||
@@ -137,7 +137,7 @@ HWLOC_DECLSPEC int hwloc_bitmap_sscanf(hwloc_bitmap_t bitmap, const char * __hwl
|
||||
*
|
||||
* If \p buflen is 0, \p buf may safely be \c NULL.
|
||||
*
|
||||
* \return the number of character that were actually written if not truncating,
|
||||
* \return the number of characters that were actually written if not truncating,
|
||||
* or that would have been written (not including the ending \\0).
|
||||
*/
|
||||
HWLOC_DECLSPEC int hwloc_bitmap_list_snprintf(char * __hwloc_restrict buf, size_t buflen, hwloc_const_bitmap_t bitmap);
|
||||
@@ -161,7 +161,7 @@ HWLOC_DECLSPEC int hwloc_bitmap_list_sscanf(hwloc_bitmap_t bitmap, const char *
|
||||
*
|
||||
* If \p buflen is 0, \p buf may safely be \c NULL.
|
||||
*
|
||||
* \return the number of character that were actually written if not truncating,
|
||||
* \return the number of characters that were actually written if not truncating,
|
||||
* or that would have been written (not including the ending \\0).
|
||||
*/
|
||||
HWLOC_DECLSPEC int hwloc_bitmap_taskset_snprintf(char * __hwloc_restrict buf, size_t buflen, hwloc_const_bitmap_t bitmap);
|
||||
@@ -357,11 +357,11 @@ HWLOC_DECLSPEC int hwloc_bitmap_last_unset(hwloc_const_bitmap_t bitmap) __hwloc_
|
||||
* The loop must start with hwloc_bitmap_foreach_begin() and end
|
||||
* with hwloc_bitmap_foreach_end() followed by a terminating ';'.
|
||||
*
|
||||
* \p index is the loop variable; it should be an unsigned int. The
|
||||
* first iteration will set \p index to the lowest index in the bitmap.
|
||||
* \p id is the loop variable; it should be an unsigned int. The
|
||||
* first iteration will set \p id to the lowest index in the bitmap.
|
||||
* Successive iterations will iterate through, in order, all remaining
|
||||
* indexes set in the bitmap. To be specific: each iteration will return a
|
||||
* value for \p index such that hwloc_bitmap_isset(bitmap, index) is true.
|
||||
* value for \p id such that hwloc_bitmap_isset(bitmap, id) is true.
|
||||
*
|
||||
* The assert prevents the loop from being infinite if the bitmap is infinitely set.
|
||||
*
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright © 2009 CNRS
|
||||
* Copyright © 2009-2021 Inria. All rights reserved.
|
||||
* Copyright © 2009-2022 Inria. All rights reserved.
|
||||
* Copyright © 2009-2012 Université Bordeaux
|
||||
* Copyright © 2009-2010 Cisco Systems, Inc. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
@@ -55,7 +55,7 @@ hwloc_topology_insert_misc_object_by_parent(hwloc_topology_t topology, hwloc_obj
|
||||
*
|
||||
* If \p size is 0, \p string may safely be \c NULL.
|
||||
*
|
||||
* \return the number of character that were actually written if not truncating,
|
||||
* \return the number of characters that were actually written if not truncating,
|
||||
* or that would have been written (not including the ending \\0).
|
||||
*/
|
||||
static __hwloc_inline int
|
||||
|
||||
9
src/3rdparty/hwloc/include/hwloc/distances.h
vendored
9
src/3rdparty/hwloc/include/hwloc/distances.h
vendored
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright © 2010-2021 Inria. All rights reserved.
|
||||
* Copyright © 2010-2022 Inria. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
*/
|
||||
|
||||
@@ -35,8 +35,8 @@ extern "C" {
|
||||
* from a core in another node.
|
||||
* The corresponding kind is ::HWLOC_DISTANCES_KIND_FROM_OS | ::HWLOC_DISTANCES_KIND_FROM_USER.
|
||||
* The name of this distances structure is "NUMALatency".
|
||||
* Others distance structures include and "XGMIBandwidth", "XGMIHops"
|
||||
* and "NVLinkBandwidth".
|
||||
* Others distance structures include and "XGMIBandwidth", "XGMIHops",
|
||||
* "XeLinkBandwidth" and "NVLinkBandwidth".
|
||||
*
|
||||
* The matrix may also contain bandwidths between random sets of objects,
|
||||
* possibly provided by the user, as specified in the \p kind attribute.
|
||||
@@ -160,7 +160,8 @@ hwloc_distances_get_by_type(hwloc_topology_t topology, hwloc_obj_type_t type,
|
||||
* Usually only one distances structure may match a given name.
|
||||
*
|
||||
* The name of the most common structure is "NUMALatency".
|
||||
* Others include "XGMIBandwidth", "XGMIHops" and "NVLinkBandwidth".
|
||||
* Others include "XGMIBandwidth", "XGMIHops", "XeLinkBandwidth",
|
||||
* and "NVLinkBandwidth".
|
||||
*/
|
||||
HWLOC_DECLSPEC int
|
||||
hwloc_distances_get_by_name(hwloc_topology_t topology, const char *name,
|
||||
|
||||
5
src/3rdparty/hwloc/include/hwloc/helper.h
vendored
5
src/3rdparty/hwloc/include/hwloc/helper.h
vendored
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright © 2009 CNRS
|
||||
* Copyright © 2009-2021 Inria. All rights reserved.
|
||||
* Copyright © 2009-2022 Inria. All rights reserved.
|
||||
* Copyright © 2009-2012 Université Bordeaux
|
||||
* Copyright © 2009-2010 Cisco Systems, Inc. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
@@ -886,9 +886,6 @@ enum hwloc_distrib_flags_e {
|
||||
* \p flags should be 0 or a OR'ed set of ::hwloc_distrib_flags_e.
|
||||
*
|
||||
* \note This function requires the \p roots objects to have a CPU set.
|
||||
*
|
||||
* \note This function replaces the now deprecated hwloc_distribute()
|
||||
* and hwloc_distributev() functions.
|
||||
*/
|
||||
static __hwloc_inline int
|
||||
hwloc_distrib(hwloc_topology_t topology,
|
||||
|
||||
136
src/3rdparty/hwloc/include/hwloc/intel-mic.h
vendored
136
src/3rdparty/hwloc/include/hwloc/intel-mic.h
vendored
@@ -1,136 +0,0 @@
|
||||
/*
|
||||
* Copyright © 2013-2016 Inria. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
*/
|
||||
|
||||
/** \file
|
||||
* \brief Macros to help interaction between hwloc and Intel Xeon Phi (MIC).
|
||||
*
|
||||
* Applications that use both hwloc and Intel Xeon Phi (MIC) may want to
|
||||
* include this file so as to get topology information for MIC devices.
|
||||
*/
|
||||
|
||||
#ifndef HWLOC_INTEL_MIC_H
|
||||
#define HWLOC_INTEL_MIC_H
|
||||
|
||||
#include "hwloc.h"
|
||||
#include "hwloc/autogen/config.h"
|
||||
#include "hwloc/helper.h"
|
||||
|
||||
#ifdef HWLOC_LINUX_SYS
|
||||
#include "hwloc/linux.h"
|
||||
|
||||
#include <dirent.h>
|
||||
#include <string.h>
|
||||
#endif
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/** \defgroup hwlocality_intel_mic Interoperability with Intel Xeon Phi (MIC)
|
||||
*
|
||||
* This interface offers ways to retrieve topology information about
|
||||
* Intel Xeon Phi (MIC) devices.
|
||||
*
|
||||
* @{
|
||||
*/
|
||||
|
||||
/** \brief Get the CPU set of logical processors that are physically
|
||||
* close to MIC device whose index is \p idx.
|
||||
*
|
||||
* Return the CPU set describing the locality of the MIC device whose index is \p idx.
|
||||
*
|
||||
* Topology \p topology and device index \p idx must match the local machine.
|
||||
* I/O devices detection is not needed in the topology.
|
||||
*
|
||||
* The function only returns the locality of the device.
|
||||
* If more information about the device is needed, OS objects should
|
||||
* be used instead, see hwloc_intel_mic_get_device_osdev_by_index().
|
||||
*
|
||||
* This function is currently only implemented in a meaningful way for
|
||||
* Linux; other systems will simply get a full cpuset.
|
||||
*/
|
||||
static __hwloc_inline int
|
||||
hwloc_intel_mic_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused,
|
||||
int idx __hwloc_attribute_unused,
|
||||
hwloc_cpuset_t set)
|
||||
{
|
||||
#ifdef HWLOC_LINUX_SYS
|
||||
/* If we're on Linux, use the sysfs mechanism to get the local cpus */
|
||||
#define HWLOC_INTEL_MIC_DEVICE_SYSFS_PATH_MAX 128
|
||||
char path[HWLOC_INTEL_MIC_DEVICE_SYSFS_PATH_MAX];
|
||||
DIR *sysdir = NULL;
|
||||
struct dirent *dirent;
|
||||
unsigned pcibus, pcidev, pcifunc;
|
||||
|
||||
if (!hwloc_topology_is_thissystem(topology)) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
sprintf(path, "/sys/class/mic/mic%d", idx);
|
||||
sysdir = opendir(path);
|
||||
if (!sysdir)
|
||||
return -1;
|
||||
|
||||
while ((dirent = readdir(sysdir)) != NULL) {
|
||||
if (sscanf(dirent->d_name, "pci_%02x:%02x.%02x", &pcibus, &pcidev, &pcifunc) == 3) {
|
||||
sprintf(path, "/sys/class/mic/mic%d/pci_%02x:%02x.%02x/local_cpus", idx, pcibus, pcidev, pcifunc);
|
||||
if (hwloc_linux_read_path_as_cpumask(path, set) < 0
|
||||
|| hwloc_bitmap_iszero(set))
|
||||
hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
closedir(sysdir);
|
||||
#else
|
||||
/* Non-Linux systems simply get a full cpuset */
|
||||
hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology));
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** \brief Get the hwloc OS device object corresponding to the
|
||||
* MIC device for the given index.
|
||||
*
|
||||
* Return the OS device object describing the MIC device whose index is \p idx.
|
||||
* Return NULL if there is none.
|
||||
*
|
||||
* The topology \p topology does not necessarily have to match the current
|
||||
* machine. For instance the topology may be an XML import of a remote host.
|
||||
* I/O devices detection must be enabled in the topology.
|
||||
*
|
||||
* \note The corresponding PCI device object can be obtained by looking
|
||||
* at the OS device parent object.
|
||||
*/
|
||||
static __hwloc_inline hwloc_obj_t
|
||||
hwloc_intel_mic_get_device_osdev_by_index(hwloc_topology_t topology,
|
||||
unsigned idx)
|
||||
{
|
||||
hwloc_obj_t osdev = NULL;
|
||||
while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) {
|
||||
if (HWLOC_OBJ_OSDEV_COPROC == osdev->attr->osdev.type
|
||||
&& osdev->name
|
||||
&& !strncmp("mic", osdev->name, 3)
|
||||
&& atoi(osdev->name + 3) == (int) idx)
|
||||
return osdev;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/** @} */
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* HWLOC_INTEL_MIC_H */
|
||||
85
src/3rdparty/hwloc/include/hwloc/memattrs.h
vendored
85
src/3rdparty/hwloc/include/hwloc/memattrs.h
vendored
@@ -54,6 +54,8 @@ extern "C" {
|
||||
* Attribute values for these nodes, if any, may then be obtained with
|
||||
* hwloc_memattr_get_value() and manually compared with the desired criteria.
|
||||
*
|
||||
* \sa An example is available in doc/examples/memory-attributes.c in the source tree.
|
||||
*
|
||||
* \note The API also supports specific objects as initiator,
|
||||
* but it is currently not used internally by hwloc.
|
||||
* Users may for instance use it to provide custom performance
|
||||
@@ -65,19 +67,19 @@ extern "C" {
|
||||
|
||||
/** \brief Memory node attributes. */
|
||||
enum hwloc_memattr_id_e {
|
||||
/** \brief "Capacity".
|
||||
* The capacity is returned in bytes
|
||||
* (local_memory attribute in objects).
|
||||
/** \brief
|
||||
* The \"Capacity\" is returned in bytes (local_memory attribute in objects).
|
||||
*
|
||||
* Best capacity nodes are nodes with <b>higher capacity</b>.
|
||||
*
|
||||
* No initiator is involved when looking at this attribute.
|
||||
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST.
|
||||
* \hideinitializer
|
||||
*/
|
||||
HWLOC_MEMATTR_ID_CAPACITY = 0,
|
||||
|
||||
/** \brief "Locality".
|
||||
* The locality is returned as the number of PUs in that locality
|
||||
/** \brief
|
||||
* The \"Locality\" is returned as the number of PUs in that locality
|
||||
* (e.g. the weight of its cpuset).
|
||||
*
|
||||
* Best locality nodes are nodes with <b>smaller locality</b>
|
||||
@@ -87,26 +89,87 @@ enum hwloc_memattr_id_e {
|
||||
*
|
||||
* No initiator is involved when looking at this attribute.
|
||||
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST.
|
||||
* \hideinitializer
|
||||
*/
|
||||
HWLOC_MEMATTR_ID_LOCALITY = 1,
|
||||
|
||||
/** \brief "Bandwidth".
|
||||
* The bandwidth is returned in MiB/s, as seen from the given initiator location.
|
||||
/** \brief
|
||||
* The \"Bandwidth\" is returned in MiB/s, as seen from the given initiator location.
|
||||
*
|
||||
* Best bandwidth nodes are nodes with <b>higher bandwidth</b>.
|
||||
*
|
||||
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST
|
||||
* and ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR.
|
||||
*
|
||||
* This is the average bandwidth for read and write accesses. If the platform
|
||||
* provides individual read and write bandwidths but no explicit average value,
|
||||
* hwloc computes and returns the average.
|
||||
* \hideinitializer
|
||||
*/
|
||||
HWLOC_MEMATTR_ID_BANDWIDTH = 2,
|
||||
|
||||
/** \brief "Latency".
|
||||
* The latency is returned as nanoseconds, as seen from the given initiator location.
|
||||
/** \brief
|
||||
* The \"ReadBandwidth\" is returned in MiB/s, as seen from the given initiator location.
|
||||
*
|
||||
* Best bandwidth nodes are nodes with <b>higher bandwidth</b>.
|
||||
*
|
||||
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST
|
||||
* and ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR.
|
||||
* \hideinitializer
|
||||
*/
|
||||
HWLOC_MEMATTR_ID_READ_BANDWIDTH = 4,
|
||||
|
||||
/** \brief
|
||||
* The \"WriteBandwidth\" is returned in MiB/s, as seen from the given initiator location.
|
||||
*
|
||||
* Best bandwidth nodes are nodes with <b>higher bandwidth</b>.
|
||||
*
|
||||
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST
|
||||
* and ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR.
|
||||
* \hideinitializer
|
||||
*/
|
||||
HWLOC_MEMATTR_ID_WRITE_BANDWIDTH = 5,
|
||||
|
||||
/** \brief
|
||||
* The \"Latency\" is returned as nanoseconds, as seen from the given initiator location.
|
||||
*
|
||||
* Best latency nodes are nodes with <b>smaller latency</b>.
|
||||
*
|
||||
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_LOWER_FIRST
|
||||
* and ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR.
|
||||
*
|
||||
* This is the average latency for read and write accesses. If the platform
|
||||
* provides individual read and write latencies but no explicit average value,
|
||||
* hwloc computes and returns the average.
|
||||
* \hideinitializer
|
||||
*/
|
||||
HWLOC_MEMATTR_ID_LATENCY = 3
|
||||
HWLOC_MEMATTR_ID_LATENCY = 3,
|
||||
|
||||
/* TODO read vs write, persistence? */
|
||||
/** \brief
|
||||
* The \"ReadLatency\" is returned as nanoseconds, as seen from the given initiator location.
|
||||
*
|
||||
* Best latency nodes are nodes with <b>smaller latency</b>.
|
||||
*
|
||||
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_LOWER_FIRST
|
||||
* and ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR.
|
||||
* \hideinitializer
|
||||
*/
|
||||
HWLOC_MEMATTR_ID_READ_LATENCY = 6,
|
||||
|
||||
/** \brief
|
||||
* The \"WriteLatency\" is returned as nanoseconds, as seen from the given initiator location.
|
||||
*
|
||||
* Best latency nodes are nodes with <b>smaller latency</b>.
|
||||
*
|
||||
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_LOWER_FIRST
|
||||
* and ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR.
|
||||
* \hideinitializer
|
||||
*/
|
||||
HWLOC_MEMATTR_ID_WRITE_LATENCY = 7,
|
||||
|
||||
/* TODO persistence? */
|
||||
|
||||
HWLOC_MEMATTR_ID_MAX /**< \private Sentinel value */
|
||||
};
|
||||
|
||||
/** \brief A memory attribute identifier.
|
||||
|
||||
9
src/3rdparty/hwloc/include/hwloc/plugins.h
vendored
9
src/3rdparty/hwloc/include/hwloc/plugins.h
vendored
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright © 2013-2021 Inria. All rights reserved.
|
||||
* Copyright © 2013-2022 Inria. All rights reserved.
|
||||
* Copyright © 2016 Cisco Systems, Inc. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
*/
|
||||
@@ -338,9 +338,15 @@ struct hwloc_component {
|
||||
* This function return 1 by default (show critical only),
|
||||
* 0 in lstopo (show all),
|
||||
* or anything set in HWLOC_HIDE_ERRORS in the environment.
|
||||
*
|
||||
* Use macros HWLOC_SHOW_CRITICAL_ERRORS() and HWLOC_SHOW_ALL_ERRORS()
|
||||
* for clarity.
|
||||
*/
|
||||
HWLOC_DECLSPEC int hwloc_hide_errors(void);
|
||||
|
||||
#define HWLOC_SHOW_CRITICAL_ERRORS() (hwloc_hide_errors() < 2)
|
||||
#define HWLOC_SHOW_ALL_ERRORS() (hwloc_hide_errors() == 0)
|
||||
|
||||
/** \brief Add an object to the topology.
|
||||
*
|
||||
* Insert new object \p obj in the topology starting under existing object \p root
|
||||
@@ -501,6 +507,7 @@ hwloc_filter_check_pcidev_subtype_important(unsigned classid)
|
||||
|| baseclass == 0x0b /* PCI_BASE_CLASS_PROCESSOR */
|
||||
|| classid == 0x0c04 /* PCI_CLASS_SERIAL_FIBER */
|
||||
|| classid == 0x0c06 /* PCI_CLASS_SERIAL_INFINIBAND */
|
||||
|| classid == 0x0502 /* PCI_CLASS_MEMORY_CXL */
|
||||
|| baseclass == 0x06 /* PCI_BASE_CLASS_BRIDGE with non-PCI downstream. the core will drop the useless ones later */
|
||||
|| baseclass == 0x12 /* Processing Accelerators */);
|
||||
}
|
||||
|
||||
11
src/3rdparty/hwloc/include/hwloc/rename.h
vendored
11
src/3rdparty/hwloc/include/hwloc/rename.h
vendored
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright © 2010-2021 Inria. All rights reserved.
|
||||
* Copyright © 2010-2022 Inria. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
*/
|
||||
|
||||
@@ -123,6 +123,9 @@ extern "C" {
|
||||
#define HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING HWLOC_NAME_CAPS(TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING)
|
||||
#define HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_MEMBINDING HWLOC_NAME_CAPS(TOPOLOGY_FLAG_RESTRICT_TO_MEMBINDING)
|
||||
#define HWLOC_TOPOLOGY_FLAG_DONT_CHANGE_BINDING HWLOC_NAME_CAPS(TOPOLOGY_FLAG_DONT_CHANGE_BINDING)
|
||||
#define HWLOC_TOPOLOGY_FLAG_NO_DISTANCES HWLOC_NAME_CAPS(TOPOLOGY_FLAG_NO_DISTANCES)
|
||||
#define HWLOC_TOPOLOGY_FLAG_NO_MEMATTRS HWLOC_NAME_CAPS(TOPOLOGY_FLAG_NO_MEMATTRS)
|
||||
#define HWLOC_TOPOLOGY_FLAG_NO_CPUKINDS HWLOC_NAME_CAPS(TOPOLOGY_FLAG_NO_CPUKINDS)
|
||||
|
||||
#define hwloc_topology_set_pid HWLOC_NAME(topology_set_pid)
|
||||
#define hwloc_topology_set_synthetic HWLOC_NAME(topology_set_synthetic)
|
||||
@@ -381,6 +384,11 @@ extern "C" {
|
||||
#define HWLOC_MEMATTR_ID_LOCALITY HWLOC_NAME_CAPS(MEMATTR_ID_LOCALITY)
|
||||
#define HWLOC_MEMATTR_ID_BANDWIDTH HWLOC_NAME_CAPS(MEMATTR_ID_BANDWIDTH)
|
||||
#define HWLOC_MEMATTR_ID_LATENCY HWLOC_NAME_CAPS(MEMATTR_ID_LATENCY)
|
||||
#define HWLOC_MEMATTR_ID_READ_BANDWIDTH HWLOC_NAME_CAPS(MEMATTR_ID_READ_BANDWIDTH)
|
||||
#define HWLOC_MEMATTR_ID_WRITE_BANDWIDTH HWLOC_NAME_CAPS(MEMATTR_ID_WRITE_BANDWIDTH)
|
||||
#define HWLOC_MEMATTR_ID_READ_LATENCY HWLOC_NAME_CAPS(MEMATTR_ID_READ_LATENCY)
|
||||
#define HWLOC_MEMATTR_ID_WRITE_LATENCY HWLOC_NAME_CAPS(MEMATTR_ID_WRITE_LATENCY)
|
||||
#define HWLOC_MEMATTR_ID_MAX HWLOC_NAME_CAPS(MEMATTR_ID_MAX)
|
||||
|
||||
#define hwloc_memattr_id_t HWLOC_NAME(memattr_id_t)
|
||||
#define hwloc_memattr_get_by_name HWLOC_NAME(memattr_get_by_name)
|
||||
@@ -862,6 +870,7 @@ extern "C" {
|
||||
#define hwloc_internal_memattrs_destroy HWLOC_NAME(internal_memattrs_destroy)
|
||||
#define hwloc_internal_memattrs_need_refresh HWLOC_NAME(internal_memattrs_need_refresh)
|
||||
#define hwloc_internal_memattrs_refresh HWLOC_NAME(internal_memattrs_refresh)
|
||||
#define hwloc_internal_memattrs_guess_memory_tiers HWLOC_NAME(internal_memattrs_guess_memory_tiers)
|
||||
|
||||
#define hwloc_internal_cpukind_s HWLOC_NAME(internal_cpukind_s)
|
||||
#define hwloc_internal_cpukinds_init HWLOC_NAME(internal_cpukinds_init)
|
||||
|
||||
4
src/3rdparty/hwloc/include/private/private.h
vendored
4
src/3rdparty/hwloc/include/private/private.h
vendored
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright © 2009 CNRS
|
||||
* Copyright © 2009-2021 Inria. All rights reserved.
|
||||
* Copyright © 2009-2022 Inria. All rights reserved.
|
||||
* Copyright © 2009-2012, 2020 Université Bordeaux
|
||||
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
|
||||
*
|
||||
@@ -259,6 +259,7 @@ struct hwloc_topology {
|
||||
unsigned bus_first, bus_last;
|
||||
hwloc_bitmap_t cpuset;
|
||||
} * pci_forced_locality;
|
||||
hwloc_uint64_t pci_locality_quirks;
|
||||
|
||||
/* component blacklisting */
|
||||
unsigned nr_blacklisted_components;
|
||||
@@ -419,6 +420,7 @@ extern void hwloc_internal_memattrs_need_refresh(hwloc_topology_t topology);
|
||||
extern void hwloc_internal_memattrs_refresh(hwloc_topology_t topology);
|
||||
extern int hwloc_internal_memattrs_dup(hwloc_topology_t new, hwloc_topology_t old);
|
||||
extern int hwloc_internal_memattr_set_value(hwloc_topology_t topology, hwloc_memattr_id_t id, hwloc_obj_type_t target_type, hwloc_uint64_t target_gp_index, unsigned target_os_index, struct hwloc_internal_location_s *initiator, hwloc_uint64_t value);
|
||||
extern int hwloc_internal_memattrs_guess_memory_tiers(hwloc_topology_t topology);
|
||||
|
||||
extern void hwloc_internal_cpukinds_init(hwloc_topology_t topology);
|
||||
extern int hwloc_internal_cpukinds_rank(hwloc_topology_t topology);
|
||||
|
||||
13
src/3rdparty/hwloc/include/private/windows.h
vendored
13
src/3rdparty/hwloc/include/private/windows.h
vendored
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright © 2009 Université Bordeaux
|
||||
* Copyright © 2020 Inria. All rights reserved.
|
||||
* Copyright © 2020-2022 Inria. All rights reserved.
|
||||
*
|
||||
* See COPYING in top-level directory.
|
||||
*/
|
||||
@@ -8,13 +8,22 @@
|
||||
#ifndef HWLOC_PRIVATE_WINDOWS_H
|
||||
#define HWLOC_PRIVATE_WINDOWS_H
|
||||
|
||||
#ifndef _ANONYMOUS_UNION
|
||||
#ifdef __GNUC__
|
||||
#define _ANONYMOUS_UNION __extension__
|
||||
#define _ANONYMOUS_STRUCT __extension__
|
||||
#else
|
||||
#define _ANONYMOUS_UNION
|
||||
#endif /* __GNUC__ */
|
||||
#endif /* _ANONYMOUS_UNION */
|
||||
|
||||
#ifndef _ANONYMOUS_STRUCT
|
||||
#ifdef __GNUC__
|
||||
#define _ANONYMOUS_STRUCT __extension__
|
||||
#else
|
||||
#define _ANONYMOUS_STRUCT
|
||||
#endif /* __GNUC__ */
|
||||
#endif /* _ANONYMOUS_STRUCT */
|
||||
|
||||
#define DUMMYUNIONNAME
|
||||
#define DUMMYSTRUCTNAME
|
||||
|
||||
|
||||
14
src/3rdparty/hwloc/src/components.c
vendored
14
src/3rdparty/hwloc/src/components.c
vendored
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright © 2009-2021 Inria. All rights reserved.
|
||||
* Copyright © 2009-2022 Inria. All rights reserved.
|
||||
* Copyright © 2012 Université Bordeaux
|
||||
* See COPYING in top-level directory.
|
||||
*/
|
||||
@@ -386,7 +386,7 @@ hwloc_disc_component_register(struct hwloc_disc_component *component,
|
||||
|HWLOC_DISC_PHASE_MISC
|
||||
|HWLOC_DISC_PHASE_ANNOTATE
|
||||
|HWLOC_DISC_PHASE_TWEAK))) {
|
||||
if (hwloc_hide_errors() < 2)
|
||||
if (HWLOC_SHOW_CRITICAL_ERRORS())
|
||||
fprintf(stderr, "hwloc: Cannot register discovery component `%s' with invalid phases 0x%x\n",
|
||||
component->name, component->phases);
|
||||
return -1;
|
||||
@@ -476,7 +476,7 @@ hwloc_components_init(void)
|
||||
/* hwloc_static_components is created by configure in static-components.h */
|
||||
for(i=0; NULL != hwloc_static_components[i]; i++) {
|
||||
if (hwloc_static_components[i]->flags) {
|
||||
if (hwloc_hide_errors() < 2)
|
||||
if (HWLOC_SHOW_CRITICAL_ERRORS())
|
||||
fprintf(stderr, "hwloc: Ignoring static component with invalid flags %lx\n",
|
||||
hwloc_static_components[i]->flags);
|
||||
continue;
|
||||
@@ -505,7 +505,7 @@ hwloc_components_init(void)
|
||||
#ifdef HWLOC_HAVE_PLUGINS
|
||||
for(desc = hwloc_plugins; NULL != desc; desc = desc->next) {
|
||||
if (desc->component->flags) {
|
||||
if (hwloc_hide_errors() < 2)
|
||||
if (HWLOC_SHOW_CRITICAL_ERRORS())
|
||||
fprintf(stderr, "hwloc: Ignoring plugin `%s' component with invalid flags %lx\n",
|
||||
desc->name, desc->component->flags);
|
||||
continue;
|
||||
@@ -738,7 +738,7 @@ hwloc_disc_component_try_enable(struct hwloc_topology *topology,
|
||||
backend = comp->instantiate(topology, comp, topology->backend_excluded_phases | blacklisted_phases,
|
||||
NULL, NULL, NULL);
|
||||
if (!backend) {
|
||||
if (hwloc_components_verbose || (envvar_forced && hwloc_hide_errors() < 2))
|
||||
if (hwloc_components_verbose || (envvar_forced && HWLOC_SHOW_CRITICAL_ERRORS()))
|
||||
fprintf(stderr, "hwloc: Failed to instantiate discovery component `%s'\n", comp->name);
|
||||
return -1;
|
||||
}
|
||||
@@ -835,7 +835,7 @@ hwloc_disc_components_enable_others(struct hwloc_topology *topology)
|
||||
if (comp->phases & ~blacklisted_phases)
|
||||
hwloc_disc_component_try_enable(topology, comp, 1 /* envvar forced */, blacklisted_phases);
|
||||
} else {
|
||||
if (hwloc_hide_errors() < 2)
|
||||
if (HWLOC_SHOW_CRITICAL_ERRORS())
|
||||
fprintf(stderr, "hwloc: Cannot find discovery component `%s'\n", name);
|
||||
}
|
||||
|
||||
@@ -967,7 +967,7 @@ hwloc_backend_enable(struct hwloc_backend *backend)
|
||||
|
||||
/* check backend flags */
|
||||
if (backend->flags) {
|
||||
if (hwloc_hide_errors() < 2)
|
||||
if (HWLOC_SHOW_CRITICAL_ERRORS())
|
||||
fprintf(stderr, "hwloc: Cannot enable discovery component `%s' phases 0x%x with unknown flags %lx\n",
|
||||
backend->component->name, backend->component->phases, backend->flags);
|
||||
return -1;
|
||||
|
||||
4
src/3rdparty/hwloc/src/cpukinds.c
vendored
4
src/3rdparty/hwloc/src/cpukinds.c
vendored
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright © 2020-2021 Inria. All rights reserved.
|
||||
* Copyright © 2020-2022 Inria. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
*/
|
||||
|
||||
@@ -504,7 +504,7 @@ hwloc_internal_cpukinds_rank(struct hwloc_topology *topology)
|
||||
heuristics = HWLOC_CPUKINDS_RANKING_FORCED_EFFICIENCY;
|
||||
else if (!strcmp(env, "no_forced_efficiency"))
|
||||
heuristics = HWLOC_CPUKINDS_RANKING_NO_FORCED_EFFICIENCY;
|
||||
else if (hwloc_hide_errors() < 2)
|
||||
else if (HWLOC_SHOW_CRITICAL_ERRORS())
|
||||
fprintf(stderr, "hwloc: Failed to recognize HWLOC_CPUKINDS_RANKING value %s\n", env);
|
||||
}
|
||||
|
||||
|
||||
4
src/3rdparty/hwloc/src/diff.c
vendored
4
src/3rdparty/hwloc/src/diff.c
vendored
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright © 2013-2020 Inria. All rights reserved.
|
||||
* Copyright © 2013-2022 Inria. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
*/
|
||||
|
||||
@@ -218,7 +218,7 @@ hwloc_diff_trees(hwloc_topology_t topo1, hwloc_obj_t obj1,
|
||||
struct hwloc_info_s *info1 = &obj1->infos[i], *info2 = &obj2->infos[i];
|
||||
if (strcmp(info1->name, info2->name))
|
||||
goto out_too_complex;
|
||||
if (strcmp(obj1->infos[i].value, obj2->infos[i].value)) {
|
||||
if (strcmp(info1->value, info2->value)) {
|
||||
err = hwloc_append_diff_obj_attr_string(obj1,
|
||||
HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_INFO,
|
||||
info1->name,
|
||||
|
||||
4
src/3rdparty/hwloc/src/distances.c
vendored
4
src/3rdparty/hwloc/src/distances.c
vendored
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright © 2010-2021 Inria. All rights reserved.
|
||||
* Copyright © 2010-2022 Inria. All rights reserved.
|
||||
* Copyright © 2011-2012 Université Bordeaux
|
||||
* Copyright © 2011 Cisco Systems, Inc. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
@@ -860,7 +860,7 @@ struct hwloc_distances_container_s {
|
||||
struct hwloc_distances_s distances;
|
||||
};
|
||||
|
||||
#define HWLOC_DISTANCES_CONTAINER_OFFSET ((char*)&((struct hwloc_distances_container_s*)NULL)->distances - (char*)NULL)
|
||||
#define HWLOC_DISTANCES_CONTAINER_OFFSET ((uintptr_t)(&((struct hwloc_distances_container_s*)NULL)->distances) - (uintptr_t)NULL)
|
||||
#define HWLOC_DISTANCES_CONTAINER(_d) (struct hwloc_distances_container_s *) ( ((char*)_d) - HWLOC_DISTANCES_CONTAINER_OFFSET )
|
||||
|
||||
static struct hwloc_internal_distances_s *
|
||||
|
||||
243
src/3rdparty/hwloc/src/memattrs.c
vendored
243
src/3rdparty/hwloc/src/memattrs.c
vendored
@@ -1,11 +1,12 @@
|
||||
/*
|
||||
* Copyright © 2020-2021 Inria. All rights reserved.
|
||||
* Copyright © 2020-2022 Inria. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
*/
|
||||
|
||||
#include "private/autogen/config.h"
|
||||
#include "hwloc.h"
|
||||
#include "private/private.h"
|
||||
#include "private/debug.h"
|
||||
|
||||
|
||||
/*****************************
|
||||
@@ -49,36 +50,51 @@ hwloc__setup_memattr(struct hwloc_internal_memattr_s *imattr,
|
||||
void
|
||||
hwloc_internal_memattrs_prepare(struct hwloc_topology *topology)
|
||||
{
|
||||
#define NR_DEFAULT_MEMATTRS 4
|
||||
topology->memattrs = malloc(NR_DEFAULT_MEMATTRS * sizeof(*topology->memattrs));
|
||||
topology->memattrs = malloc(HWLOC_MEMATTR_ID_MAX * sizeof(*topology->memattrs));
|
||||
if (!topology->memattrs)
|
||||
return;
|
||||
|
||||
assert(HWLOC_MEMATTR_ID_CAPACITY < NR_DEFAULT_MEMATTRS);
|
||||
hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_CAPACITY],
|
||||
(char *) "Capacity",
|
||||
HWLOC_MEMATTR_FLAG_HIGHER_FIRST,
|
||||
HWLOC_IMATTR_FLAG_STATIC_NAME|HWLOC_IMATTR_FLAG_CONVENIENCE);
|
||||
|
||||
assert(HWLOC_MEMATTR_ID_LOCALITY < NR_DEFAULT_MEMATTRS);
|
||||
hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_LOCALITY],
|
||||
(char *) "Locality",
|
||||
HWLOC_MEMATTR_FLAG_LOWER_FIRST,
|
||||
HWLOC_IMATTR_FLAG_STATIC_NAME|HWLOC_IMATTR_FLAG_CONVENIENCE);
|
||||
|
||||
assert(HWLOC_MEMATTR_ID_BANDWIDTH < NR_DEFAULT_MEMATTRS);
|
||||
hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_BANDWIDTH],
|
||||
(char *) "Bandwidth",
|
||||
HWLOC_MEMATTR_FLAG_HIGHER_FIRST|HWLOC_MEMATTR_FLAG_NEED_INITIATOR,
|
||||
HWLOC_IMATTR_FLAG_STATIC_NAME);
|
||||
|
||||
assert(HWLOC_MEMATTR_ID_LATENCY < NR_DEFAULT_MEMATTRS);
|
||||
hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_READ_BANDWIDTH],
|
||||
(char *) "ReadBandwidth",
|
||||
HWLOC_MEMATTR_FLAG_HIGHER_FIRST|HWLOC_MEMATTR_FLAG_NEED_INITIATOR,
|
||||
HWLOC_IMATTR_FLAG_STATIC_NAME);
|
||||
|
||||
hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_WRITE_BANDWIDTH],
|
||||
(char *) "WriteBandwidth",
|
||||
HWLOC_MEMATTR_FLAG_HIGHER_FIRST|HWLOC_MEMATTR_FLAG_NEED_INITIATOR,
|
||||
HWLOC_IMATTR_FLAG_STATIC_NAME);
|
||||
|
||||
hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_LATENCY],
|
||||
(char *) "Latency",
|
||||
HWLOC_MEMATTR_FLAG_LOWER_FIRST|HWLOC_MEMATTR_FLAG_NEED_INITIATOR,
|
||||
HWLOC_IMATTR_FLAG_STATIC_NAME);
|
||||
|
||||
topology->nr_memattrs = NR_DEFAULT_MEMATTRS;
|
||||
hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_READ_LATENCY],
|
||||
(char *) "ReadLatency",
|
||||
HWLOC_MEMATTR_FLAG_LOWER_FIRST|HWLOC_MEMATTR_FLAG_NEED_INITIATOR,
|
||||
HWLOC_IMATTR_FLAG_STATIC_NAME);
|
||||
|
||||
hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_WRITE_LATENCY],
|
||||
(char *) "WriteLatency",
|
||||
HWLOC_MEMATTR_FLAG_LOWER_FIRST|HWLOC_MEMATTR_FLAG_NEED_INITIATOR,
|
||||
HWLOC_IMATTR_FLAG_STATIC_NAME);
|
||||
|
||||
topology->nr_memattrs = HWLOC_MEMATTR_ID_MAX;
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1197,3 +1213,214 @@ hwloc_get_local_numanode_objs(hwloc_topology_t topology,
|
||||
*nrp = i;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**************************************
|
||||
* Using memattrs to identify HBM/DRAM
|
||||
*/
|
||||
|
||||
struct hwloc_memory_tier_s {
|
||||
hwloc_obj_t node;
|
||||
uint64_t local_bw;
|
||||
enum hwloc_memory_tier_type_e {
|
||||
/* warning the order is important for guess_memory_tiers() after qsort() */
|
||||
HWLOC_MEMORY_TIER_UNKNOWN,
|
||||
HWLOC_MEMORY_TIER_DRAM,
|
||||
HWLOC_MEMORY_TIER_HBM,
|
||||
HWLOC_MEMORY_TIER_SPM, /* Specific-Purpose Memory is usually HBM, we'll use BW to confirm */
|
||||
HWLOC_MEMORY_TIER_NVM,
|
||||
HWLOC_MEMORY_TIER_GPU,
|
||||
} type;
|
||||
};
|
||||
|
||||
static int compare_tiers(const void *_a, const void *_b)
|
||||
{
|
||||
const struct hwloc_memory_tier_s *a = _a, *b = _b;
|
||||
/* sort by type of tier first */
|
||||
if (a->type != b->type)
|
||||
return a->type - b->type;
|
||||
/* then by bandwidth */
|
||||
if (a->local_bw > b->local_bw)
|
||||
return -1;
|
||||
else if (a->local_bw < b->local_bw)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
hwloc_internal_memattrs_guess_memory_tiers(hwloc_topology_t topology)
|
||||
{
|
||||
struct hwloc_internal_memattr_s *imattr;
|
||||
struct hwloc_memory_tier_s *tiers;
|
||||
unsigned i, j, n;
|
||||
const char *env;
|
||||
int spm_is_hbm = -1; /* -1 will guess from BW, 0 no, 1 forced */
|
||||
int mark_dram = 1;
|
||||
unsigned first_spm, first_nvm;
|
||||
hwloc_uint64_t max_unknown_bw, min_spm_bw;
|
||||
|
||||
env = getenv("HWLOC_MEMTIERS_GUESS");
|
||||
if (env) {
|
||||
if (!strcmp(env, "none")) {
|
||||
return 0;
|
||||
} else if (!strcmp(env, "default")) {
|
||||
/* nothing */
|
||||
} else if (!strcmp(env, "spm_is_hbm")) {
|
||||
hwloc_debug("Assuming SPM-tier is HBM, ignore bandwidth\n");
|
||||
spm_is_hbm = 1;
|
||||
} else if (HWLOC_SHOW_CRITICAL_ERRORS()) {
|
||||
fprintf(stderr, "hwloc: Failed to recognize HWLOC_MEMTIERS_GUESS value %s\n", env);
|
||||
}
|
||||
}
|
||||
|
||||
imattr = &topology->memattrs[HWLOC_MEMATTR_ID_BANDWIDTH];
|
||||
|
||||
if (!(imattr->iflags & HWLOC_IMATTR_FLAG_CACHE_VALID))
|
||||
hwloc__imattr_refresh(topology, imattr);
|
||||
|
||||
n = hwloc_get_nbobjs_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE);
|
||||
assert(n);
|
||||
|
||||
tiers = malloc(n * sizeof(*tiers));
|
||||
if (!tiers)
|
||||
return -1;
|
||||
|
||||
for(i=0; i<n; i++) {
|
||||
hwloc_obj_t node;
|
||||
const char *daxtype;
|
||||
struct hwloc_internal_location_s iloc;
|
||||
struct hwloc_internal_memattr_target_s *imtg = NULL;
|
||||
struct hwloc_internal_memattr_initiator_s *imi;
|
||||
|
||||
node = hwloc_get_obj_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE, i);
|
||||
assert(node);
|
||||
tiers[i].node = node;
|
||||
|
||||
/* defaults */
|
||||
tiers[i].type = HWLOC_MEMORY_TIER_UNKNOWN;
|
||||
tiers[i].local_bw = 0; /* unknown */
|
||||
|
||||
daxtype = hwloc_obj_get_info_by_name(node, "DAXType");
|
||||
/* mark NVM, SPM and GPU nodes */
|
||||
if (daxtype && !strcmp(daxtype, "NVM"))
|
||||
tiers[i].type = HWLOC_MEMORY_TIER_NVM;
|
||||
if (daxtype && !strcmp(daxtype, "SPM"))
|
||||
tiers[i].type = HWLOC_MEMORY_TIER_SPM;
|
||||
if (node->subtype && !strcmp(node->subtype, "GPUMemory"))
|
||||
tiers[i].type = HWLOC_MEMORY_TIER_GPU;
|
||||
|
||||
if (spm_is_hbm == -1) {
|
||||
for(j=0; j<imattr->nr_targets; j++)
|
||||
if (imattr->targets[j].obj == node) {
|
||||
imtg = &imattr->targets[j];
|
||||
break;
|
||||
}
|
||||
if (imtg && !hwloc_bitmap_iszero(node->cpuset)) {
|
||||
iloc.type = HWLOC_LOCATION_TYPE_CPUSET;
|
||||
iloc.location.cpuset = node->cpuset;
|
||||
imi = hwloc__memattr_target_get_initiator(imtg, &iloc, 0);
|
||||
if (imi)
|
||||
tiers[i].local_bw = imi->value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* sort tiers */
|
||||
qsort(tiers, n, sizeof(*tiers), compare_tiers);
|
||||
hwloc_debug("Sorting memory tiers...\n");
|
||||
for(i=0; i<n; i++)
|
||||
hwloc_debug(" tier %u = node L#%u P#%u with tier type %d and local BW #%llu\n",
|
||||
i,
|
||||
tiers[i].node->logical_index, tiers[i].node->os_index,
|
||||
tiers[i].type, (unsigned long long) tiers[i].local_bw);
|
||||
|
||||
/* now we have UNKNOWN tiers (sorted by BW), then SPM tiers (sorted by BW), then NVM, then GPU */
|
||||
|
||||
/* iterate over UNKNOWN tiers, and find their BW */
|
||||
for(i=0; i<n; i++) {
|
||||
if (tiers[i].type > HWLOC_MEMORY_TIER_UNKNOWN)
|
||||
break;
|
||||
}
|
||||
first_spm = i;
|
||||
/* get max BW from first */
|
||||
if (first_spm > 0)
|
||||
max_unknown_bw = tiers[0].local_bw;
|
||||
else
|
||||
max_unknown_bw = 0;
|
||||
|
||||
/* there are no DRAM or HBM tiers yet */
|
||||
|
||||
/* iterate over SPM tiers, and find their BW */
|
||||
for(i=first_spm; i<n; i++) {
|
||||
if (tiers[i].type > HWLOC_MEMORY_TIER_SPM)
|
||||
break;
|
||||
}
|
||||
first_nvm = i;
|
||||
/* get min BW from last */
|
||||
if (first_nvm > first_spm)
|
||||
min_spm_bw = tiers[first_nvm-1].local_bw;
|
||||
else
|
||||
min_spm_bw = 0;
|
||||
|
||||
/* FIXME: if there's more than 10% between some sets of nodes inside a tier, split it? */
|
||||
/* FIXME: if there are cpuset-intersecting nodes in same tier, abort? */
|
||||
|
||||
if (spm_is_hbm == -1) {
|
||||
/* if we have BW for all SPM and UNKNOWN
|
||||
* and all SPM BW are 2x superior to all UNKNOWN BW
|
||||
*/
|
||||
hwloc_debug("UNKNOWN-memory-tier max bandwidth %llu\n", (unsigned long long) max_unknown_bw);
|
||||
hwloc_debug("SPM-memory-tier min bandwidth %llu\n", (unsigned long long) min_spm_bw);
|
||||
if (max_unknown_bw > 0 && min_spm_bw > 0 && max_unknown_bw*2 < min_spm_bw) {
|
||||
hwloc_debug("assuming SPM means HBM and !SPM means DRAM since bandwidths are very different\n");
|
||||
spm_is_hbm = 1;
|
||||
} else {
|
||||
hwloc_debug("cannot assume SPM means HBM\n");
|
||||
spm_is_hbm = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (spm_is_hbm) {
|
||||
for(i=0; i<first_spm; i++)
|
||||
tiers[i].type = HWLOC_MEMORY_TIER_DRAM;
|
||||
for(i=first_spm; i<first_nvm; i++)
|
||||
tiers[i].type = HWLOC_MEMORY_TIER_HBM;
|
||||
}
|
||||
|
||||
if (first_spm == n)
|
||||
mark_dram = 0;
|
||||
|
||||
/* now apply subtypes */
|
||||
for(i=0; i<n; i++) {
|
||||
const char *type = NULL;
|
||||
if (tiers[i].node->subtype) /* don't overwrite the existing subtype */
|
||||
continue;
|
||||
switch (tiers[i].type) {
|
||||
case HWLOC_MEMORY_TIER_DRAM:
|
||||
if (mark_dram)
|
||||
type = "DRAM";
|
||||
break;
|
||||
case HWLOC_MEMORY_TIER_HBM:
|
||||
type = "HBM";
|
||||
break;
|
||||
case HWLOC_MEMORY_TIER_SPM:
|
||||
type = "SPM";
|
||||
break;
|
||||
case HWLOC_MEMORY_TIER_NVM:
|
||||
type = "NVM";
|
||||
break;
|
||||
default:
|
||||
/* GPU memory is already marked with subtype="GPUMemory",
|
||||
* UNKNOWN doesn't deserve any subtype
|
||||
*/
|
||||
break;
|
||||
}
|
||||
if (type) {
|
||||
hwloc_debug("Marking node L#%u P#%u as %s\n", tiers[i].node->logical_index, tiers[i].node->os_index, type);
|
||||
tiers[i].node->subtype = strdup(type);
|
||||
}
|
||||
}
|
||||
|
||||
free(tiers);
|
||||
return 0;
|
||||
}
|
||||
|
||||
144
src/3rdparty/hwloc/src/pci-common.c
vendored
144
src/3rdparty/hwloc/src/pci-common.c
vendored
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright © 2009-2021 Inria. All rights reserved.
|
||||
* Copyright © 2009-2022 Inria. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
*/
|
||||
|
||||
@@ -119,6 +119,13 @@ hwloc_pci_discovery_init(struct hwloc_topology *topology)
|
||||
topology->pci_forced_locality = NULL;
|
||||
|
||||
topology->first_pci_locality = topology->last_pci_locality = NULL;
|
||||
|
||||
#define HWLOC_PCI_LOCALITY_QUIRK_CRAY_EX235A (1ULL<<0)
|
||||
#define HWLOC_PCI_LOCALITY_QUIRK_FAKE (1ULL<<62)
|
||||
topology->pci_locality_quirks = (uint64_t) -1;
|
||||
/* -1 is unknown, 0 is disabled, >0 is bitmask of enabled quirks.
|
||||
* bit 63 should remain unused so that -1 is unaccessible as a bitmask.
|
||||
*/
|
||||
}
|
||||
|
||||
void
|
||||
@@ -146,7 +153,7 @@ hwloc_pci_discovery_prepare(struct hwloc_topology *topology)
|
||||
}
|
||||
free(buffer);
|
||||
} else {
|
||||
if (hwloc_hide_errors() < 2)
|
||||
if (HWLOC_SHOW_CRITICAL_ERRORS())
|
||||
fprintf(stderr, "hwloc/pci: Ignoring HWLOC_PCI_LOCALITY file `%s' too large (%lu bytes)\n",
|
||||
env, (unsigned long) st.st_size);
|
||||
}
|
||||
@@ -333,7 +340,7 @@ hwloc_pci_add_object(struct hwloc_obj *parent, struct hwloc_obj **parent_io_firs
|
||||
}
|
||||
case HWLOC_PCI_BUSID_EQUAL: {
|
||||
static int reported = 0;
|
||||
if (!reported && hwloc_hide_errors() < 2) {
|
||||
if (!reported && HWLOC_SHOW_CRITICAL_ERRORS()) {
|
||||
fprintf(stderr, "*********************************************************\n");
|
||||
fprintf(stderr, "* hwloc %s received invalid PCI information.\n", HWLOC_VERSION);
|
||||
fprintf(stderr, "*\n");
|
||||
@@ -442,13 +449,90 @@ hwloc_pcidisc_add_hostbridges(struct hwloc_topology *topology,
|
||||
return new;
|
||||
}
|
||||
|
||||
static struct hwloc_obj *
|
||||
hwloc_pci_fixup_busid_parent(struct hwloc_topology *topology __hwloc_attribute_unused,
|
||||
struct hwloc_pcidev_attr_s *busid __hwloc_attribute_unused,
|
||||
struct hwloc_obj *parent __hwloc_attribute_unused)
|
||||
/* return 1 if a quirk was applied */
|
||||
static int
|
||||
hwloc__pci_find_busid_parent_quirk(struct hwloc_topology *topology,
|
||||
struct hwloc_pcidev_attr_s *busid,
|
||||
hwloc_cpuset_t cpuset)
|
||||
{
|
||||
/* no quirk for now */
|
||||
return parent;
|
||||
if (topology->pci_locality_quirks == (uint64_t)-1 /* unknown */) {
|
||||
const char *dmi_board_name, *env;
|
||||
|
||||
/* first invokation, detect which quirks are needed */
|
||||
topology->pci_locality_quirks = 0; /* no quirk yet */
|
||||
|
||||
dmi_board_name = hwloc_obj_get_info_by_name(hwloc_get_root_obj(topology), "DMIBoardName");
|
||||
if (dmi_board_name && !strcmp(dmi_board_name, "HPE CRAY EX235A")) {
|
||||
hwloc_debug("enabling for PCI locality quirk for HPE Cray EX235A\n");
|
||||
topology->pci_locality_quirks |= HWLOC_PCI_LOCALITY_QUIRK_CRAY_EX235A;
|
||||
}
|
||||
|
||||
env = getenv("HWLOC_PCI_LOCALITY_QUIRK_FAKE");
|
||||
if (env && atoi(env)) {
|
||||
hwloc_debug("enabling for PCI locality fake quirk (attaching everything to last PU)\n");
|
||||
topology->pci_locality_quirks |= HWLOC_PCI_LOCALITY_QUIRK_FAKE;
|
||||
}
|
||||
}
|
||||
|
||||
if (topology->pci_locality_quirks & HWLOC_PCI_LOCALITY_QUIRK_FAKE) {
|
||||
unsigned last = hwloc_bitmap_last(hwloc_topology_get_topology_cpuset(topology));
|
||||
hwloc_bitmap_set(cpuset, last);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (topology->pci_locality_quirks & HWLOC_PCI_LOCALITY_QUIRK_CRAY_EX235A) {
|
||||
/* AMD Trento has xGMI ports connected to individual CCDs (8 cores + L3)
|
||||
* instead of NUMA nodes (pairs of CCDs within Trento) as is usual in AMD EPYC CPUs.
|
||||
* This is not described by the ACPI tables, hence we need to manually hardwire
|
||||
* the xGMI locality for the (currently single) server that currently uses that CPU.
|
||||
* It's not clear if ACPI tables can/will ever be fixed (would require one initiator
|
||||
* proximity domain per CCD), or if Linux can/will work around the issue.
|
||||
*/
|
||||
if (busid->domain == 0) {
|
||||
if (busid->bus >= 0xd0 && busid->bus <= 0xd1) {
|
||||
hwloc_bitmap_set_range(cpuset, 0, 7);
|
||||
hwloc_bitmap_set_range(cpuset, 64, 71);
|
||||
return 1;
|
||||
}
|
||||
if (busid->bus >= 0xd4 && busid->bus <= 0xd6) {
|
||||
hwloc_bitmap_set_range(cpuset, 8, 15);
|
||||
hwloc_bitmap_set_range(cpuset, 72, 79);
|
||||
return 1;
|
||||
}
|
||||
if (busid->bus >= 0xc8 && busid->bus <= 0xc9) {
|
||||
hwloc_bitmap_set_range(cpuset, 16, 23);
|
||||
hwloc_bitmap_set_range(cpuset, 80, 87);
|
||||
return 1;
|
||||
}
|
||||
if (busid->bus >= 0xcc && busid->bus <= 0xce) {
|
||||
hwloc_bitmap_set_range(cpuset, 24, 31);
|
||||
hwloc_bitmap_set_range(cpuset, 88, 95);
|
||||
return 1;
|
||||
}
|
||||
if (busid->bus >= 0xd8 && busid->bus <= 0xd9) {
|
||||
hwloc_bitmap_set_range(cpuset, 32, 39);
|
||||
hwloc_bitmap_set_range(cpuset, 96, 103);
|
||||
return 1;
|
||||
}
|
||||
if (busid->bus >= 0xdc && busid->bus <= 0xde) {
|
||||
hwloc_bitmap_set_range(cpuset, 40, 47);
|
||||
hwloc_bitmap_set_range(cpuset, 104, 111);
|
||||
return 1;
|
||||
}
|
||||
if (busid->bus >= 0xc0 && busid->bus <= 0xc1) {
|
||||
hwloc_bitmap_set_range(cpuset, 48, 55);
|
||||
hwloc_bitmap_set_range(cpuset, 112, 119);
|
||||
return 1;
|
||||
}
|
||||
if (busid->bus >= 0xc4 && busid->bus <= 0xc6) {
|
||||
hwloc_bitmap_set_range(cpuset, 56, 63);
|
||||
hwloc_bitmap_set_range(cpuset, 120, 127);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct hwloc_obj *
|
||||
@@ -457,7 +541,7 @@ hwloc__pci_find_busid_parent(struct hwloc_topology *topology, struct hwloc_pcide
|
||||
hwloc_bitmap_t cpuset = hwloc_bitmap_alloc();
|
||||
hwloc_obj_t parent;
|
||||
int forced = 0;
|
||||
int noquirks = 0;
|
||||
int noquirks = 0, got_quirked = 0;
|
||||
unsigned i;
|
||||
int err;
|
||||
|
||||
@@ -490,7 +574,7 @@ hwloc__pci_find_busid_parent(struct hwloc_topology *topology, struct hwloc_pcide
|
||||
if (env) {
|
||||
static int reported = 0;
|
||||
if (!topology->pci_has_forced_locality && !reported) {
|
||||
if (!hwloc_hide_errors())
|
||||
if (HWLOC_SHOW_ALL_ERRORS())
|
||||
fprintf(stderr, "hwloc/pci: Environment variable %s is deprecated, please use HWLOC_PCI_LOCALITY instead.\n", env);
|
||||
reported = 1;
|
||||
}
|
||||
@@ -505,7 +589,13 @@ hwloc__pci_find_busid_parent(struct hwloc_topology *topology, struct hwloc_pcide
|
||||
}
|
||||
}
|
||||
|
||||
if (!forced) {
|
||||
if (!forced && !noquirks && topology->pci_locality_quirks /* either quirks are unknown yet, or some are enabled */) {
|
||||
err = hwloc__pci_find_busid_parent_quirk(topology, busid, cpuset);
|
||||
if (err > 0)
|
||||
got_quirked = 1;
|
||||
}
|
||||
|
||||
if (!forced && !got_quirked) {
|
||||
/* get the cpuset by asking the backend that provides the relevant hook, if any. */
|
||||
struct hwloc_backend *backend = topology->get_pci_busid_cpuset_backend;
|
||||
if (backend)
|
||||
@@ -520,11 +610,7 @@ hwloc__pci_find_busid_parent(struct hwloc_topology *topology, struct hwloc_pcide
|
||||
hwloc_debug_bitmap(" will attach PCI bus to cpuset %s\n", cpuset);
|
||||
|
||||
parent = hwloc_find_insert_io_parent_by_complete_cpuset(topology, cpuset);
|
||||
if (parent) {
|
||||
if (!noquirks)
|
||||
/* We found a valid parent. Check that the OS didn't report invalid locality */
|
||||
parent = hwloc_pci_fixup_busid_parent(topology, busid, parent);
|
||||
} else {
|
||||
if (!parent) {
|
||||
/* Fallback to root */
|
||||
parent = hwloc_get_root_obj(topology);
|
||||
}
|
||||
@@ -805,19 +891,28 @@ hwloc_pcidisc_find_linkspeed(const unsigned char *config,
|
||||
memcpy(&linksta, &config[offset + HWLOC_PCI_EXP_LNKSTA], 4);
|
||||
speed = linksta & HWLOC_PCI_EXP_LNKSTA_SPEED; /* PCIe generation */
|
||||
width = (linksta & HWLOC_PCI_EXP_LNKSTA_WIDTH) >> 4; /* how many lanes */
|
||||
/* PCIe Gen1 = 2.5GT/s signal-rate per lane with 8/10 encoding = 0.25GB/s data-rate per lane
|
||||
* PCIe Gen2 = 5 GT/s signal-rate per lane with 8/10 encoding = 0.5 GB/s data-rate per lane
|
||||
* PCIe Gen3 = 8 GT/s signal-rate per lane with 128/130 encoding = 1 GB/s data-rate per lane
|
||||
* PCIe Gen4 = 16 GT/s signal-rate per lane with 128/130 encoding = 2 GB/s data-rate per lane
|
||||
* PCIe Gen5 = 32 GT/s signal-rate per lane with 128/130 encoding = 4 GB/s data-rate per lane
|
||||
* PCIe Gen6 = 64 GT/s signal-rate per lane with 128/130 encoding = 8 GB/s data-rate per lane
|
||||
/*
|
||||
* These are single-direction bandwidths only.
|
||||
*
|
||||
* Gen1 used NRZ with 8/10 encoding.
|
||||
* PCIe Gen1 = 2.5GT/s signal-rate per lane x 8/10 = 0.25GB/s data-rate per lane
|
||||
* PCIe Gen2 = 5 GT/s signal-rate per lane x 8/10 = 0.5 GB/s data-rate per lane
|
||||
* Gen3 switched to NRZ with 128/130 encoding.
|
||||
* PCIe Gen3 = 8 GT/s signal-rate per lane x 128/130 = 1 GB/s data-rate per lane
|
||||
* PCIe Gen4 = 16 GT/s signal-rate per lane x 128/130 = 2 GB/s data-rate per lane
|
||||
* PCIe Gen5 = 32 GT/s signal-rate per lane x 128/130 = 4 GB/s data-rate per lane
|
||||
* Gen6 switched to PAM with with 242/256 FLIT (242B payload protected by 8B CRC + 6B FEC).
|
||||
* PCIe Gen6 = 64 GT/s signal-rate per lane x 242/256 = 8 GB/s data-rate per lane
|
||||
* PCIe Gen7 = 128GT/s signal-rate per lane x 242/256 = 16 GB/s data-rate per lane
|
||||
*/
|
||||
|
||||
/* lanespeed in Gbit/s */
|
||||
if (speed <= 2)
|
||||
lanespeed = 2.5f * speed * 0.8f;
|
||||
else if (speed <= 5)
|
||||
lanespeed = 8.0f * (1<<(speed-3)) * 128/130;
|
||||
else
|
||||
lanespeed = 8.0f * (1<<(speed-3)) * 128/130; /* assume Gen7 will be 128 GT/s and so on */
|
||||
lanespeed = 8.0f * (1<<(speed-3)) * 242/256; /* assume Gen8 will be 256 GT/s and so on */
|
||||
|
||||
/* linkspeed in GB/s */
|
||||
*linkspeed = lanespeed * width / 8;
|
||||
@@ -944,6 +1039,7 @@ hwloc_pci_class_string(unsigned short class_id)
|
||||
switch (class_id) {
|
||||
case 0x0500: return "RAM";
|
||||
case 0x0501: return "Flash";
|
||||
case 0x0502: return "CXLMem";
|
||||
}
|
||||
return "Memory";
|
||||
case 0x06:
|
||||
|
||||
28
src/3rdparty/hwloc/src/topology-synthetic.c
vendored
28
src/3rdparty/hwloc/src/topology-synthetic.c
vendored
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright © 2009 CNRS
|
||||
* Copyright © 2009-2020 Inria. All rights reserved.
|
||||
* Copyright © 2009-2022 Inria. All rights reserved.
|
||||
* Copyright © 2009-2010 Université Bordeaux
|
||||
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
@@ -323,17 +323,29 @@ hwloc_synthetic_parse_memory_attr(const char *attr, const char **endp)
|
||||
hwloc_uint64_t size;
|
||||
size = strtoull(attr, (char **) &endptr, 0);
|
||||
if (!hwloc_strncasecmp(endptr, "TB", 2)) {
|
||||
size *= 1000ULL*1000ULL*1000ULL*1000ULL;
|
||||
endptr += 2;
|
||||
} else if (!hwloc_strncasecmp(endptr, "TiB", 3)) {
|
||||
size <<= 40;
|
||||
endptr += 2;
|
||||
endptr += 3;
|
||||
} else if (!hwloc_strncasecmp(endptr, "GB", 2)) {
|
||||
size *= 1000ULL*1000ULL*1000ULL;
|
||||
endptr += 2;
|
||||
} else if (!hwloc_strncasecmp(endptr, "GiB", 3)) {
|
||||
size <<= 30;
|
||||
endptr += 2;
|
||||
endptr += 3;
|
||||
} else if (!hwloc_strncasecmp(endptr, "MB", 2)) {
|
||||
size *= 1000ULL*1000ULL;
|
||||
endptr += 2;
|
||||
} else if (!hwloc_strncasecmp(endptr, "MiB", 3)) {
|
||||
size <<= 20;
|
||||
endptr += 2;
|
||||
endptr += 3;
|
||||
} else if (!hwloc_strncasecmp(endptr, "kB", 2)) {
|
||||
size <<= 10;
|
||||
size *= 1000ULL;
|
||||
endptr += 2;
|
||||
} else if (!hwloc_strncasecmp(endptr, "kiB", 3)) {
|
||||
size <<= 10;
|
||||
endptr += 3;
|
||||
}
|
||||
*endp = endptr;
|
||||
return size;
|
||||
@@ -802,15 +814,15 @@ hwloc_backend_synthetic_init(struct hwloc_synthetic_backend_data_s *data,
|
||||
} else if (hwloc__obj_type_is_cache(type)) {
|
||||
if (!curlevel->attr.memorysize) {
|
||||
if (1 == curlevel->attr.depth)
|
||||
/* 32Kb in L1 */
|
||||
/* 32KiB in L1 */
|
||||
curlevel->attr.memorysize = 32*1024;
|
||||
else
|
||||
/* *4 at each level, starting from 1MB for L2, unified */
|
||||
/* *4 at each level, starting from 1MiB for L2, unified */
|
||||
curlevel->attr.memorysize = 256ULL*1024 << (2*curlevel->attr.depth);
|
||||
}
|
||||
|
||||
} else if (type == HWLOC_OBJ_NUMANODE && !curlevel->attr.memorysize) {
|
||||
/* 1GB in memory nodes. */
|
||||
/* 1GiB in memory nodes. */
|
||||
curlevel->attr.memorysize = 1024*1024*1024;
|
||||
}
|
||||
|
||||
|
||||
4
src/3rdparty/hwloc/src/topology-windows.c
vendored
4
src/3rdparty/hwloc/src/topology-windows.c
vendored
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright © 2009 CNRS
|
||||
* Copyright © 2009-2021 Inria. All rights reserved.
|
||||
* Copyright © 2009-2022 Inria. All rights reserved.
|
||||
* Copyright © 2009-2012, 2020 Université Bordeaux
|
||||
* Copyright © 2011 Cisco Systems, Inc. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
@@ -366,7 +366,7 @@ hwloc_win_get_processor_groups(void)
|
||||
hwloc_debug("found %lu windows processor groups\n", nr_processor_groups);
|
||||
|
||||
if (nr_processor_groups > 1 && SIZEOF_VOID_P == 4) {
|
||||
if (!hwloc_hide_errors())
|
||||
if (HWLOC_SHOW_ALL_ERRORS())
|
||||
fprintf(stderr, "hwloc: multiple processor groups found on 32bits Windows, topology may be invalid/incomplete.\n");
|
||||
}
|
||||
|
||||
|
||||
4
src/3rdparty/hwloc/src/topology-x86.c
vendored
4
src/3rdparty/hwloc/src/topology-x86.c
vendored
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright © 2010-2021 Inria. All rights reserved.
|
||||
* Copyright © 2010-2022 Inria. All rights reserved.
|
||||
* Copyright © 2010-2013 Université Bordeaux
|
||||
* Copyright © 2010-2011 Cisco Systems, Inc. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
@@ -1349,7 +1349,7 @@ look_procs(struct hwloc_backend *backend, struct procinfo *infos, unsigned long
|
||||
if (data->apicid_unique) {
|
||||
summarize(backend, infos, flags);
|
||||
|
||||
if (has_hybrid(features)) {
|
||||
if (has_hybrid(features) && !(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_CPUKINDS)) {
|
||||
/* use hybrid info for cpukinds */
|
||||
hwloc_bitmap_t atomset = hwloc_bitmap_alloc();
|
||||
hwloc_bitmap_t coreset = hwloc_bitmap_alloc();
|
||||
|
||||
46
src/3rdparty/hwloc/src/topology-xml.c
vendored
46
src/3rdparty/hwloc/src/topology-xml.c
vendored
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright © 2009 CNRS
|
||||
* Copyright © 2009-2021 Inria. All rights reserved.
|
||||
* Copyright © 2009-2022 Inria. All rights reserved.
|
||||
* Copyright © 2009-2011, 2020 Université Bordeaux
|
||||
* Copyright © 2009-2018 Cisco Systems, Inc. All rights reserved.
|
||||
* See COPYING in top-level directory.
|
||||
@@ -123,6 +123,17 @@ hwloc__xml_import_object_attr(struct hwloc_topology *topology,
|
||||
fprintf(stderr, "%s: unexpected zero gp_index, topology may be invalid\n", state->global->msgprefix);
|
||||
if (obj->gp_index >= topology->next_gp_index)
|
||||
topology->next_gp_index = obj->gp_index + 1;
|
||||
} else if (!strcmp(name, "id")) { /* forward compat */
|
||||
if (!strncmp(value, "obj", 3)) {
|
||||
obj->gp_index = strtoull(value+3, NULL, 10);
|
||||
if (!obj->gp_index && hwloc__xml_verbose())
|
||||
fprintf(stderr, "%s: unexpected zero id, topology may be invalid\n", state->global->msgprefix);
|
||||
if (obj->gp_index >= topology->next_gp_index)
|
||||
topology->next_gp_index = obj->gp_index + 1;
|
||||
} else {
|
||||
if (hwloc__xml_verbose())
|
||||
fprintf(stderr, "%s: unexpected id `%s' not-starting with `obj', ignoring\n", state->global->msgprefix, value);
|
||||
}
|
||||
} else if (!strcmp(name, "cpuset")) {
|
||||
if (!obj->cpuset)
|
||||
obj->cpuset = hwloc_bitmap_alloc();
|
||||
@@ -263,7 +274,7 @@ hwloc__xml_import_object_attr(struct hwloc_topology *topology,
|
||||
#ifndef HWLOC_HAVE_32BITS_PCI_DOMAIN
|
||||
} else if (domain > 0xffff) {
|
||||
static int warned = 0;
|
||||
if (!warned && hwloc_hide_errors() < 2)
|
||||
if (!warned && HWLOC_SHOW_ALL_ERRORS())
|
||||
fprintf(stderr, "hwloc/xml: Ignoring PCI device with non-16bit domain.\nPass --enable-32bits-pci-domain to configure to support such devices\n(warning: it would break the library ABI, don't enable unless really needed).\n");
|
||||
warned = 1;
|
||||
*ignore = 1;
|
||||
@@ -363,7 +374,7 @@ hwloc__xml_import_object_attr(struct hwloc_topology *topology,
|
||||
#ifndef HWLOC_HAVE_32BITS_PCI_DOMAIN
|
||||
} else if (domain > 0xffff) {
|
||||
static int warned = 0;
|
||||
if (!warned && hwloc_hide_errors() < 2)
|
||||
if (!warned && HWLOC_SHOW_ALL_ERRORS())
|
||||
fprintf(stderr, "hwloc/xml: Ignoring bridge to PCI with non-16bit domain.\nPass --enable-32bits-pci-domain to configure to support such devices\n(warning: it would break the library ABI, don't enable unless really needed).\n");
|
||||
warned = 1;
|
||||
*ignore = 1;
|
||||
@@ -1235,7 +1246,7 @@ hwloc__xml_import_object(hwloc_topology_t topology,
|
||||
/* next should be before cur */
|
||||
if (!childrengotignored) {
|
||||
static int reported = 0;
|
||||
if (!reported && hwloc_hide_errors() < 2) {
|
||||
if (!reported && HWLOC_SHOW_CRITICAL_ERRORS()) {
|
||||
hwloc__xml_import_report_outoforder(topology, next, cur);
|
||||
reported = 1;
|
||||
}
|
||||
@@ -1568,6 +1579,9 @@ hwloc__xml_v2import_distances(hwloc_topology_t topology,
|
||||
}
|
||||
}
|
||||
|
||||
if (topology->flags & HWLOC_TOPOLOGY_FLAG_NO_DISTANCES)
|
||||
goto out_ignore;
|
||||
|
||||
hwloc_internal_distances_add_by_index(topology, name, unique_type, different_types, nbobjs, indexes, u64values, kind, 0 /* assume grouping was applied when this matrix was discovered before exporting to XML */);
|
||||
|
||||
/* prevent freeing below */
|
||||
@@ -1722,7 +1736,8 @@ hwloc__xml_import_memattr(hwloc_topology_t topology,
|
||||
}
|
||||
}
|
||||
|
||||
if (name && flags != (unsigned long) -1) {
|
||||
if (name && flags != (unsigned long) -1
|
||||
&& !(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_MEMATTRS)) {
|
||||
hwloc_memattr_id_t _id;
|
||||
|
||||
ret = hwloc_memattr_get_by_name(topology, name, &_id);
|
||||
@@ -1833,7 +1848,13 @@ hwloc__xml_import_cpukind(hwloc_topology_t topology,
|
||||
goto error;
|
||||
}
|
||||
|
||||
hwloc_internal_cpukinds_register(topology, cpuset, forced_efficiency, infos, nr_infos, HWLOC_CPUKINDS_REGISTER_FLAG_OVERWRITE_FORCED_EFFICIENCY);
|
||||
if (topology->flags & HWLOC_TOPOLOGY_FLAG_NO_CPUKINDS) {
|
||||
hwloc__free_infos(infos, nr_infos);
|
||||
hwloc_bitmap_free(cpuset);
|
||||
} else {
|
||||
hwloc_internal_cpukinds_register(topology, cpuset, forced_efficiency, infos, nr_infos, HWLOC_CPUKINDS_REGISTER_FLAG_OVERWRITE_FORCED_EFFICIENCY);
|
||||
hwloc__free_infos(infos, nr_infos);
|
||||
}
|
||||
|
||||
return state->global->close_tag(state);
|
||||
|
||||
@@ -2168,7 +2189,8 @@ done:
|
||||
* but it would require to have those objects in the original XML order (like the first_numanode cousin-list).
|
||||
* because the topology order can be different if some parents are ignored during load.
|
||||
*/
|
||||
if (nbobjs == data->nbnumanodes) {
|
||||
if (nbobjs == data->nbnumanodes
|
||||
&& !(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_DISTANCES)) {
|
||||
hwloc_obj_t *objs = malloc(nbobjs*sizeof(hwloc_obj_t));
|
||||
uint64_t *values = malloc(nbobjs*nbobjs*sizeof(*values));
|
||||
assert(data->nbnumanodes > 0); /* v1dist->nbobjs is >0 after import */
|
||||
@@ -2650,7 +2672,7 @@ hwloc__xml_export_object_contents (hwloc__xml_export_state_t state, hwloc_topolo
|
||||
|
||||
logical_to_v2array = malloc(nbobjs * sizeof(*logical_to_v2array));
|
||||
if (!logical_to_v2array) {
|
||||
if (!hwloc_hide_errors())
|
||||
if (HWLOC_SHOW_ALL_ERRORS())
|
||||
fprintf(stderr, "hwloc/xml/export/v1: failed to allocated logical_to_v2array\n");
|
||||
continue;
|
||||
}
|
||||
@@ -3124,9 +3146,11 @@ hwloc__xml_export_memattrs(hwloc__xml_export_state_t state, hwloc_topology_t top
|
||||
continue;
|
||||
|
||||
imattr = &topology->memattrs[id];
|
||||
if ((id == HWLOC_MEMATTR_ID_LATENCY || id == HWLOC_MEMATTR_ID_BANDWIDTH)
|
||||
&& !imattr->nr_targets)
|
||||
/* no need to export target-less attributes for initial attributes, no release support attributes without those definitions */
|
||||
if (id < HWLOC_MEMATTR_ID_MAX && !imattr->nr_targets)
|
||||
/* no need to export standard attributes without any target,
|
||||
* their definition is now standardized,
|
||||
* the old hwloc importing this XML may recreate these attributes just like it would for a non-imported topology.
|
||||
*/
|
||||
continue;
|
||||
|
||||
state->new_child(state, &mstate, "memattr");
|
||||
|
||||
45
src/3rdparty/hwloc/src/topology.c
vendored
45
src/3rdparty/hwloc/src/topology.c
vendored
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright © 2009 CNRS
|
||||
* Copyright © 2009-2021 Inria. All rights reserved.
|
||||
* Copyright © 2009-2022 Inria. All rights reserved.
|
||||
* Copyright © 2009-2012, 2020 Université Bordeaux
|
||||
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright © 2022 IBM Corporation. All rights reserved.
|
||||
@@ -114,14 +114,25 @@ int hwloc_topology_abi_check(hwloc_topology_t topology)
|
||||
return topology->topology_abi != HWLOC_TOPOLOGY_ABI ? -1 : 0;
|
||||
}
|
||||
|
||||
/* callers should rather use wrappers HWLOC_SHOW_ALL_ERRORS() and HWLOC_SHOW_CRITICAL_ERRORS() for clarity */
|
||||
int hwloc_hide_errors(void)
|
||||
{
|
||||
static int hide = 1; /* only show critical errors by default. lstopo will show others */
|
||||
static int checked = 0;
|
||||
if (!checked) {
|
||||
const char *envvar = getenv("HWLOC_HIDE_ERRORS");
|
||||
if (envvar)
|
||||
if (envvar) {
|
||||
hide = atoi(envvar);
|
||||
#ifdef HWLOC_DEBUG
|
||||
} else {
|
||||
/* if debug is enabled and HWLOC_DEBUG_VERBOSE isn't forced to 0,
|
||||
* show all errors jus like we show all debug messages.
|
||||
*/
|
||||
envvar = getenv("HWLOC_DEBUG_VERBOSE");
|
||||
if (!envvar || atoi(envvar))
|
||||
hide = 0;
|
||||
#endif
|
||||
}
|
||||
checked = 1;
|
||||
}
|
||||
return hide;
|
||||
@@ -158,7 +169,7 @@ static void report_insert_error(hwloc_obj_t new, hwloc_obj_t old, const char *ms
|
||||
{
|
||||
static int reported = 0;
|
||||
|
||||
if (reason && !reported && hwloc_hide_errors() < 2) {
|
||||
if (reason && !reported && HWLOC_SHOW_CRITICAL_ERRORS()) {
|
||||
char newstr[512];
|
||||
char oldstr[512];
|
||||
report_insert_error_format_obj(newstr, sizeof(newstr), new);
|
||||
@@ -3178,7 +3189,7 @@ hwloc_connect_levels(hwloc_topology_t topology)
|
||||
tmpnbobjs = realloc(topology->level_nbobjects,
|
||||
2 * topology->nb_levels_allocated * sizeof(*topology->level_nbobjects));
|
||||
if (!tmplevels || !tmpnbobjs) {
|
||||
if (hwloc_hide_errors() < 2)
|
||||
if (HWLOC_SHOW_CRITICAL_ERRORS())
|
||||
fprintf(stderr, "hwloc: failed to realloc level arrays to %u\n", topology->nb_levels_allocated * 2);
|
||||
|
||||
/* if one realloc succeeded, make sure the caller will free the new buffer */
|
||||
@@ -3536,6 +3547,8 @@ hwloc_discover(struct hwloc_topology *topology,
|
||||
/*
|
||||
* Additional discovery
|
||||
*/
|
||||
hwloc_pci_discovery_prepare(topology);
|
||||
|
||||
if (topology->backend_phases & HWLOC_DISC_PHASE_PCI) {
|
||||
dstatus->phase = HWLOC_DISC_PHASE_PCI;
|
||||
hwloc_discover_by_phase(topology, dstatus, "PCI");
|
||||
@@ -3553,6 +3566,8 @@ hwloc_discover(struct hwloc_topology *topology,
|
||||
hwloc_discover_by_phase(topology, dstatus, "ANNOTATE");
|
||||
}
|
||||
|
||||
hwloc_pci_discovery_exit(topology); /* pci needed up to annotate */
|
||||
|
||||
if (getenv("HWLOC_DEBUG_SORT_CHILDREN"))
|
||||
hwloc_debug_sort_children(topology->levels[0][0]);
|
||||
|
||||
@@ -3565,17 +3580,17 @@ hwloc_discover(struct hwloc_topology *topology,
|
||||
hwloc_debug("%s", "\nRemoving empty objects\n");
|
||||
remove_empty(topology, &topology->levels[0][0]);
|
||||
if (!topology->levels[0][0]) {
|
||||
if (hwloc_hide_errors() < 2)
|
||||
if (HWLOC_SHOW_CRITICAL_ERRORS())
|
||||
fprintf(stderr, "hwloc: Topology became empty, aborting!\n");
|
||||
return -1;
|
||||
}
|
||||
if (hwloc_bitmap_iszero(topology->levels[0][0]->cpuset)) {
|
||||
if (hwloc_hide_errors() < 2)
|
||||
if (HWLOC_SHOW_CRITICAL_ERRORS())
|
||||
fprintf(stderr, "hwloc: Topology does not contain any PU, aborting!\n");
|
||||
return -1;
|
||||
}
|
||||
if (hwloc_bitmap_iszero(topology->levels[0][0]->nodeset)) {
|
||||
if (hwloc_hide_errors() < 2)
|
||||
if (HWLOC_SHOW_CRITICAL_ERRORS())
|
||||
fprintf(stderr, "hwloc: Topology does not contain any NUMA node, aborting!\n");
|
||||
return -1;
|
||||
}
|
||||
@@ -3811,7 +3826,16 @@ hwloc_topology_set_flags (struct hwloc_topology *topology, unsigned long flags)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (flags & ~(HWLOC_TOPOLOGY_FLAG_INCLUDE_DISALLOWED|HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM|HWLOC_TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES|HWLOC_TOPOLOGY_FLAG_IMPORT_SUPPORT|HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING|HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_MEMBINDING|HWLOC_TOPOLOGY_FLAG_DONT_CHANGE_BINDING)) {
|
||||
if (flags & ~(HWLOC_TOPOLOGY_FLAG_INCLUDE_DISALLOWED
|
||||
|HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM
|
||||
|HWLOC_TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES
|
||||
|HWLOC_TOPOLOGY_FLAG_IMPORT_SUPPORT
|
||||
|HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING
|
||||
|HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_MEMBINDING
|
||||
|HWLOC_TOPOLOGY_FLAG_DONT_CHANGE_BINDING
|
||||
|HWLOC_TOPOLOGY_FLAG_NO_DISTANCES
|
||||
|HWLOC_TOPOLOGY_FLAG_NO_MEMATTRS
|
||||
|HWLOC_TOPOLOGY_FLAG_NO_CPUKINDS)) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
@@ -4076,15 +4100,11 @@ hwloc_topology_load (struct hwloc_topology *topology)
|
||||
*/
|
||||
hwloc_set_binding_hooks(topology);
|
||||
|
||||
hwloc_pci_discovery_prepare(topology);
|
||||
|
||||
/* actual topology discovery */
|
||||
err = hwloc_discover(topology, &dstatus);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
hwloc_pci_discovery_exit(topology);
|
||||
|
||||
#ifndef HWLOC_DEBUG
|
||||
if (getenv("HWLOC_DEBUG_CHECK"))
|
||||
#endif
|
||||
@@ -4106,6 +4126,7 @@ hwloc_topology_load (struct hwloc_topology *topology)
|
||||
/* Same for memattrs */
|
||||
hwloc_internal_memattrs_need_refresh(topology);
|
||||
hwloc_internal_memattrs_refresh(topology);
|
||||
hwloc_internal_memattrs_guess_memory_tiers(topology);
|
||||
|
||||
topology->is_loaded = 1;
|
||||
|
||||
|
||||
2
src/3rdparty/libethash/CMakeLists.txt
vendored
2
src/3rdparty/libethash/CMakeLists.txt
vendored
@@ -1,4 +1,4 @@
|
||||
cmake_minimum_required (VERSION 2.8.12)
|
||||
cmake_minimum_required(VERSION 3.1)
|
||||
project (ethash C)
|
||||
|
||||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Os")
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#include "crypto/common/Nonce.h"
|
||||
#include "crypto/common/VirtualMemory.h"
|
||||
#include "crypto/rx/Rx.h"
|
||||
#include "crypto/rx/RxCache.h"
|
||||
#include "crypto/rx/RxDataset.h"
|
||||
#include "crypto/rx/RxVm.h"
|
||||
#include "crypto/ghostrider/ghostrider.h"
|
||||
@@ -77,8 +78,11 @@ xmrig::CpuWorker<N>::CpuWorker(size_t id, const CpuLaunchData &data) :
|
||||
{
|
||||
# ifdef XMRIG_ALGO_CN_HEAVY
|
||||
// cn-heavy optimization for Zen3 CPUs
|
||||
const bool is_vermeer = (Cpu::info()->arch() == ICpuInfo::ARCH_ZEN3) && (Cpu::info()->model() == 0x21);
|
||||
if ((N == 1) && (m_av == CnHash::AV_SINGLE) && (m_algorithm.family() == Algorithm::CN_HEAVY) && (m_assembly != Assembly::NONE) && is_vermeer) {
|
||||
const auto arch = Cpu::info()->arch();
|
||||
const uint32_t model = Cpu::info()->model();
|
||||
const bool is_vermeer = (arch == ICpuInfo::ARCH_ZEN3) && (model == 0x21);
|
||||
const bool is_raphael = (arch == ICpuInfo::ARCH_ZEN4) && (model == 0x61);
|
||||
if ((N == 1) && (m_av == CnHash::AV_SINGLE) && (m_algorithm.family() == Algorithm::CN_HEAVY) && (m_assembly != Assembly::NONE) && (is_vermeer || is_raphael)) {
|
||||
std::lock_guard<std::mutex> lock(cn_heavyZen3MemoryMutex);
|
||||
if (!cn_heavyZen3Memory) {
|
||||
// Round up number of threads to the multiple of 8
|
||||
@@ -142,6 +146,11 @@ void xmrig::CpuWorker<N>::allocateRandomX_VM()
|
||||
uint8_t* scratchpad = m_memory->isHugePages() ? m_memory->scratchpad() : dataset->tryAllocateScrathpad();
|
||||
m_vm = RxVm::create(dataset, scratchpad ? scratchpad : m_memory->scratchpad(), !m_hwAES, m_assembly, node());
|
||||
}
|
||||
else if (!dataset->get() && (m_job.currentJob().seed() != m_seed)) {
|
||||
// Update RandomX light VM with the new seed
|
||||
randomx_vm_set_cache(m_vm, dataset->cache()->get());
|
||||
}
|
||||
m_seed = m_job.currentJob().seed();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -97,6 +97,7 @@ private:
|
||||
|
||||
# ifdef XMRIG_ALGO_RANDOMX
|
||||
randomx_vm *m_vm = nullptr;
|
||||
Buffer m_seed;
|
||||
# endif
|
||||
|
||||
# ifdef XMRIG_ALGO_GHOSTRIDER
|
||||
|
||||
@@ -45,19 +45,21 @@ public:
|
||||
ARCH_ZEN,
|
||||
ARCH_ZEN_PLUS,
|
||||
ARCH_ZEN2,
|
||||
ARCH_ZEN3
|
||||
ARCH_ZEN3,
|
||||
ARCH_ZEN4
|
||||
};
|
||||
|
||||
enum MsrMod : uint32_t {
|
||||
MSR_MOD_NONE,
|
||||
MSR_MOD_RYZEN_17H,
|
||||
MSR_MOD_RYZEN_19H,
|
||||
MSR_MOD_RYZEN_19H_ZEN4,
|
||||
MSR_MOD_INTEL,
|
||||
MSR_MOD_CUSTOM,
|
||||
MSR_MOD_MAX
|
||||
};
|
||||
|
||||
# define MSR_NAMES_LIST "none", "ryzen_17h", "ryzen_19h", "intel", "custom"
|
||||
# define MSR_NAMES_LIST "none", "ryzen_17h", "ryzen_19h", "ryzen_19h_zen4", "intel", "custom"
|
||||
|
||||
enum Flag : uint32_t {
|
||||
FLAG_AES,
|
||||
|
||||
@@ -64,7 +64,7 @@ static_assert(kCpuFlagsSize == ICpuInfo::FLAG_MAX, "kCpuFlagsSize and FLAG_MAX m
|
||||
|
||||
|
||||
#ifdef XMRIG_FEATURE_MSR
|
||||
constexpr size_t kMsrArraySize = 5;
|
||||
constexpr size_t kMsrArraySize = 6;
|
||||
static const std::array<const char *, kMsrArraySize> msrNames = { MSR_NAMES_LIST };
|
||||
static_assert(kMsrArraySize == ICpuInfo::MSR_MOD_MAX, "kMsrArraySize and MSR_MOD_MAX mismatch");
|
||||
#endif
|
||||
@@ -250,8 +250,14 @@ xmrig::BasicCpuInfo::BasicCpuInfo() :
|
||||
break;
|
||||
|
||||
case 0x19:
|
||||
m_arch = ARCH_ZEN3;
|
||||
m_msrMod = MSR_MOD_RYZEN_19H;
|
||||
if (m_model == 0x61) {
|
||||
m_arch = ARCH_ZEN4;
|
||||
m_msrMod = MSR_MOD_RYZEN_19H_ZEN4;
|
||||
}
|
||||
else {
|
||||
m_arch = ARCH_ZEN3;
|
||||
m_msrMod = MSR_MOD_RYZEN_19H;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
|
||||
#if __ARM_FEATURE_CRYPTO && !defined(__APPLE__)
|
||||
# include <sys/auxv.h>
|
||||
# ifndef __FreeBSD__
|
||||
# if !defined(XMRIG_OS_FREEBSD)
|
||||
# include <asm/hwcap.h>
|
||||
# else
|
||||
# include <stdint.h>
|
||||
@@ -71,7 +71,7 @@ xmrig::BasicCpuInfo::BasicCpuInfo() :
|
||||
# if __ARM_FEATURE_CRYPTO
|
||||
# if defined(__APPLE__)
|
||||
m_flags.set(FLAG_AES, true);
|
||||
# elif defined(__FreeBSD__)
|
||||
# elif defined(XMRIG_OS_FREEBSD)
|
||||
uint64_t isar0 = READ_SPECIALREG(id_aa64isar0_el1);
|
||||
m_flags.set(FLAG_AES, ID_AA64ISAR0_AES_VAL(isar0) >= ID_AA64ISAR0_AES_BASE);
|
||||
# else
|
||||
|
||||
@@ -16,10 +16,12 @@
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
#ifdef XMRIG_OS_FREEBSD
|
||||
# include <sys/types.h>
|
||||
# include <sys/param.h>
|
||||
# include <sys/cpuset.h>
|
||||
# ifndef __DragonFly__
|
||||
# include <sys/cpuset.h>
|
||||
# endif
|
||||
# include <pthread_np.h>
|
||||
#endif
|
||||
|
||||
@@ -41,11 +43,6 @@
|
||||
#include "version.h"
|
||||
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
typedef cpuset_t cpu_set_t;
|
||||
#endif
|
||||
|
||||
|
||||
char *xmrig::Platform::createUserAgent()
|
||||
{
|
||||
constexpr const size_t max = 256;
|
||||
@@ -74,6 +71,19 @@ char *xmrig::Platform::createUserAgent()
|
||||
|
||||
|
||||
#ifndef XMRIG_FEATURE_HWLOC
|
||||
#ifdef __DragonFly__
|
||||
|
||||
bool xmrig::Platform::setThreadAffinity(uint64_t cpu_id)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#ifdef XMRIG_OS_FREEBSD
|
||||
typedef cpuset_t cpu_set_t;
|
||||
#endif
|
||||
|
||||
bool xmrig::Platform::setThreadAffinity(uint64_t cpu_id)
|
||||
{
|
||||
cpu_set_t mn;
|
||||
@@ -89,7 +99,9 @@ bool xmrig::Platform::setThreadAffinity(uint64_t cpu_id)
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // __DragonFly__
|
||||
#endif // XMRIG_FEATURE_HWLOC
|
||||
|
||||
|
||||
void xmrig::Platform::setProcessPriority(int)
|
||||
|
||||
@@ -247,6 +247,7 @@ void xmrig::BaseTransform::transform(rapidjson::Document &doc, int key, const ch
|
||||
case IConfig::HttpPort: /* --http-port */
|
||||
case IConfig::DonateLevelKey: /* --donate-level */
|
||||
case IConfig::DaemonPollKey: /* --daemon-poll-interval */
|
||||
case IConfig::DaemonJobTimeoutKey: /* --daemon-job-timeout */
|
||||
case IConfig::DnsTtlKey: /* --dns-ttl */
|
||||
case IConfig::DaemonZMQPortKey: /* --daemon-zmq-port */
|
||||
return transformUint64(doc, key, static_cast<uint64_t>(strtol(arg, nullptr, 10)));
|
||||
@@ -360,6 +361,9 @@ void xmrig::BaseTransform::transformUint64(rapidjson::Document &doc, int key, ui
|
||||
case IConfig::DaemonPollKey: /* --daemon-poll-interval */
|
||||
return add(doc, Pools::kPools, Pool::kDaemonPollInterval, arg);
|
||||
|
||||
case IConfig::DaemonJobTimeoutKey: /* --daemon-job-timeout */
|
||||
return add(doc, Pools::kPools, Pool::kDaemonJobTimeout, arg);
|
||||
|
||||
case IConfig::DaemonZMQPortKey: /* --daemon-zmq-port */
|
||||
return add(doc, Pools::kPools, Pool::kDaemonZMQPort, arg);
|
||||
# endif
|
||||
|
||||
@@ -88,6 +88,7 @@ public:
|
||||
DaemonZMQPortKey = 1056,
|
||||
HugePagesJitKey = 1057,
|
||||
RotationKey = 1058,
|
||||
DaemonJobTimeoutKey = 1059,
|
||||
|
||||
// xmrig common
|
||||
CPUPriorityKey = 1021,
|
||||
|
||||
@@ -1018,7 +1018,7 @@ void xmrig::Client::onConnect(uv_connect_t *req, int status)
|
||||
|
||||
if (status < 0) {
|
||||
if (!client->isQuiet()) {
|
||||
LOG_ERR("%s " RED("connect error: ") RED_BOLD("\"%s\""), client->tag(), uv_strerror(status));
|
||||
LOG_ERR("%s %s " RED("connect error: ") RED_BOLD("\"%s\""), client->tag(), client->ip().data(), uv_strerror(status));
|
||||
}
|
||||
|
||||
if (client->state() == ReconnectingState || client->state() == ClosingState) {
|
||||
|
||||
@@ -303,6 +303,18 @@ void xmrig::DaemonClient::onHttpData(const HttpData &data)
|
||||
|
||||
void xmrig::DaemonClient::onTimer(const Timer *)
|
||||
{
|
||||
if (m_pool.zmq_port() >= 0) {
|
||||
m_prevHash = nullptr;
|
||||
m_blocktemplateRequestHash = nullptr;
|
||||
send(kGetHeight);
|
||||
return;
|
||||
}
|
||||
|
||||
if (Chrono::steadyMSecs() >= m_jobSteadyMs + m_pool.jobTimeout()) {
|
||||
m_prevHash = nullptr;
|
||||
m_blocktemplateRequestHash = nullptr;
|
||||
}
|
||||
|
||||
if (m_state == ConnectingState) {
|
||||
connect();
|
||||
}
|
||||
@@ -352,7 +364,7 @@ void xmrig::DaemonClient::onResolved(const DnsRecords &records, int status, cons
|
||||
|
||||
bool xmrig::DaemonClient::isOutdated(uint64_t height, const char *hash) const
|
||||
{
|
||||
return m_job.height() != height || m_prevHash != hash;
|
||||
return m_job.height() != height || m_prevHash != hash || Chrono::steadyMSecs() >= m_jobSteadyMs + m_pool.jobTimeout();
|
||||
}
|
||||
|
||||
|
||||
@@ -468,6 +480,7 @@ bool xmrig::DaemonClient::parseJob(const rapidjson::Value ¶ms, int *code)
|
||||
m_job = std::move(job);
|
||||
m_blocktemplateStr = std::move(blocktemplate);
|
||||
m_prevHash = Json::getString(params, "prev_hash");
|
||||
m_jobSteadyMs = Chrono::steadyMSecs();
|
||||
|
||||
if (m_state == ConnectingState) {
|
||||
setState(ConnectedState);
|
||||
@@ -596,6 +609,10 @@ void xmrig::DaemonClient::setState(SocketState state)
|
||||
const uint64_t interval = std::max<uint64_t>(20, m_pool.pollInterval());
|
||||
m_timer->start(interval, interval);
|
||||
}
|
||||
else {
|
||||
const uint64_t t = m_pool.jobTimeout();
|
||||
m_timer->start(t, t);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -865,7 +882,12 @@ void xmrig::DaemonClient::ZMQParse()
|
||||
// Clear previous hash and check daemon height to guarantee that xmrig will call get_block_template RPC later
|
||||
// We can't call get_block_template directly because daemon is not ready yet
|
||||
m_prevHash = nullptr;
|
||||
m_blocktemplateRequestHash = nullptr;
|
||||
send(kGetHeight);
|
||||
|
||||
const uint64_t t = m_pool.jobTimeout();
|
||||
m_timer->stop();
|
||||
m_timer->start(t, t);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -104,6 +104,7 @@ private:
|
||||
String m_blocktemplateStr;
|
||||
String m_currentJobId;
|
||||
String m_prevHash;
|
||||
uint64_t m_jobSteadyMs = 0;
|
||||
String m_tlsFingerprint;
|
||||
String m_tlsVersion;
|
||||
Timer *m_timer;
|
||||
|
||||
@@ -48,7 +48,13 @@ xmrig::Job::Job(bool nicehash, const Algorithm &algorithm, const String &clientI
|
||||
|
||||
bool xmrig::Job::isEqual(const Job &other) const
|
||||
{
|
||||
return m_id == other.m_id && m_clientId == other.m_clientId && memcmp(m_blob, other.m_blob, sizeof(m_blob)) == 0 && m_target == other.m_target;
|
||||
return m_id == other.m_id && m_clientId == other.m_clientId && isEqualBlob(other) && m_target == other.m_target;
|
||||
}
|
||||
|
||||
|
||||
bool xmrig::Job::isEqualBlob(const Job &other) const
|
||||
{
|
||||
return (m_size == other.m_size) && (memcmp(m_blob, other.m_blob, m_size) == 0);
|
||||
}
|
||||
|
||||
|
||||
@@ -58,19 +64,19 @@ bool xmrig::Job::setBlob(const char *blob)
|
||||
return false;
|
||||
}
|
||||
|
||||
m_size = strlen(blob);
|
||||
if (m_size % 2 != 0) {
|
||||
size_t size = strlen(blob);
|
||||
if (size % 2 != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
m_size /= 2;
|
||||
size /= 2;
|
||||
|
||||
const size_t minSize = nonceOffset() + nonceSize();
|
||||
if (m_size < minSize || m_size >= sizeof(m_blob)) {
|
||||
if (size < minSize || size >= sizeof(m_blob)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!Cvt::fromHex(m_blob, sizeof(m_blob), blob, m_size * 2)) {
|
||||
if (!Cvt::fromHex(m_blob, sizeof(m_blob), blob, size * 2)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -80,9 +86,10 @@ bool xmrig::Job::setBlob(const char *blob)
|
||||
|
||||
# ifdef XMRIG_PROXY_PROJECT
|
||||
memset(m_rawBlob, 0, sizeof(m_rawBlob));
|
||||
memcpy(m_rawBlob, blob, m_size * 2);
|
||||
memcpy(m_rawBlob, blob, size * 2);
|
||||
# endif
|
||||
|
||||
m_size = size;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -59,6 +59,7 @@ public:
|
||||
~Job() = default;
|
||||
|
||||
bool isEqual(const Job &other) const;
|
||||
bool isEqualBlob(const Job &other) const;
|
||||
bool setBlob(const char *blob);
|
||||
bool setSeedHash(const char *hash);
|
||||
bool setTarget(const char *target);
|
||||
|
||||
@@ -65,6 +65,7 @@ const char *Pool::kAlgo = "algo";
|
||||
const char *Pool::kCoin = "coin";
|
||||
const char *Pool::kDaemon = "daemon";
|
||||
const char *Pool::kDaemonPollInterval = "daemon-poll-interval";
|
||||
const char *Pool::kDaemonJobTimeout = "daemon-job-timeout";
|
||||
const char *Pool::kDaemonZMQPort = "daemon-zmq-port";
|
||||
const char *Pool::kEnabled = "enabled";
|
||||
const char *Pool::kFingerprint = "tls-fingerprint";
|
||||
@@ -88,6 +89,7 @@ const char *Pool::kNicehashHost = "nicehash.com";
|
||||
xmrig::Pool::Pool(const char *url) :
|
||||
m_flags(1 << FLAG_ENABLED),
|
||||
m_pollInterval(kDefaultPollInterval),
|
||||
m_jobTimeout(kDefaultJobTimeout),
|
||||
m_url(url)
|
||||
{
|
||||
}
|
||||
@@ -101,6 +103,7 @@ xmrig::Pool::Pool(const char *host, uint16_t port, const char *user, const char
|
||||
m_user(user),
|
||||
m_spendSecretKey(spendSecretKey),
|
||||
m_pollInterval(kDefaultPollInterval),
|
||||
m_jobTimeout(kDefaultJobTimeout),
|
||||
m_url(host, port, tls)
|
||||
{
|
||||
m_flags.set(FLAG_NICEHASH, nicehash || strstr(host, kNicehashHost));
|
||||
@@ -111,6 +114,7 @@ xmrig::Pool::Pool(const char *host, uint16_t port, const char *user, const char
|
||||
xmrig::Pool::Pool(const rapidjson::Value &object) :
|
||||
m_flags(1 << FLAG_ENABLED),
|
||||
m_pollInterval(kDefaultPollInterval),
|
||||
m_jobTimeout(kDefaultJobTimeout),
|
||||
m_url(Json::getString(object, kUrl))
|
||||
{
|
||||
if (!m_url.isValid()) {
|
||||
@@ -123,6 +127,7 @@ xmrig::Pool::Pool(const rapidjson::Value &object) :
|
||||
m_rigId = Json::getString(object, kRigId);
|
||||
m_fingerprint = Json::getString(object, kFingerprint);
|
||||
m_pollInterval = Json::getUint64(object, kDaemonPollInterval, kDefaultPollInterval);
|
||||
m_jobTimeout = Json::getUint64(object, kDaemonJobTimeout, kDefaultJobTimeout);
|
||||
m_algorithm = Json::getString(object, kAlgo);
|
||||
m_coin = Json::getString(object, kCoin);
|
||||
m_daemon = Json::getString(object, kSelfSelect);
|
||||
@@ -207,6 +212,7 @@ bool xmrig::Pool::isEqual(const Pool &other) const
|
||||
&& m_url == other.m_url
|
||||
&& m_user == other.m_user
|
||||
&& m_pollInterval == other.m_pollInterval
|
||||
&& m_jobTimeout == other.m_jobTimeout
|
||||
&& m_daemon == other.m_daemon
|
||||
&& m_proxy == other.m_proxy
|
||||
);
|
||||
@@ -299,6 +305,7 @@ rapidjson::Value xmrig::Pool::toJSON(rapidjson::Document &doc) const
|
||||
|
||||
if (m_mode == MODE_DAEMON) {
|
||||
obj.AddMember(StringRef(kDaemonPollInterval), m_pollInterval, allocator);
|
||||
obj.AddMember(StringRef(kDaemonJobTimeout), m_jobTimeout, allocator);
|
||||
obj.AddMember(StringRef(kDaemonZMQPort), m_zmqPort, allocator);
|
||||
}
|
||||
else {
|
||||
|
||||
@@ -59,6 +59,7 @@ public:
|
||||
static const char *kCoin;
|
||||
static const char *kDaemon;
|
||||
static const char *kDaemonPollInterval;
|
||||
static const char* kDaemonJobTimeout;
|
||||
static const char *kEnabled;
|
||||
static const char *kFingerprint;
|
||||
static const char *kKeepalive;
|
||||
@@ -78,6 +79,7 @@ public:
|
||||
constexpr static int kKeepAliveTimeout = 60;
|
||||
constexpr static uint16_t kDefaultPort = 3333;
|
||||
constexpr static uint64_t kDefaultPollInterval = 1000;
|
||||
constexpr static uint64_t kDefaultJobTimeout = 15000;
|
||||
|
||||
Pool() = default;
|
||||
Pool(const char *host, uint16_t port, const char *user, const char *password, const char* spendSecretKey, int keepAlive, bool nicehash, bool tls, Mode mode);
|
||||
@@ -110,6 +112,7 @@ public:
|
||||
inline uint16_t port() const { return m_url.port(); }
|
||||
inline int zmq_port() const { return m_zmqPort; }
|
||||
inline uint64_t pollInterval() const { return m_pollInterval; }
|
||||
inline uint64_t jobTimeout() const { return m_jobTimeout; }
|
||||
inline void setAlgo(const Algorithm &algorithm) { m_algorithm = algorithm; }
|
||||
inline void setUrl(const char *url) { m_url = Url(url); }
|
||||
inline void setPassword(const String &password) { m_password = password; }
|
||||
@@ -156,6 +159,7 @@ private:
|
||||
String m_user;
|
||||
String m_spendSecretKey;
|
||||
uint64_t m_pollInterval = kDefaultPollInterval;
|
||||
uint64_t m_jobTimeout = kDefaultJobTimeout;
|
||||
Url m_daemon;
|
||||
Url m_url;
|
||||
int m_zmqPort = -1;
|
||||
|
||||
@@ -561,6 +561,12 @@ void xmrig::Miner::setJob(const Job &job, bool donate)
|
||||
const uint8_t index = donate ? 1 : 0;
|
||||
|
||||
d_ptr->reset = !(d_ptr->job.index() == 1 && index == 0 && d_ptr->userJobId == job.id());
|
||||
|
||||
// Don't reset nonce if pool sends the same hashing blob again, but with different difficulty (for example)
|
||||
if (d_ptr->job.isEqualBlob(job)) {
|
||||
d_ptr->reset = false;
|
||||
}
|
||||
|
||||
d_ptr->job = job;
|
||||
d_ptr->job.setIndex(index);
|
||||
|
||||
|
||||
@@ -50,6 +50,7 @@ static const option options[] = {
|
||||
{ "http-no-restricted", 0, nullptr, IConfig::HttpRestrictedKey },
|
||||
{ "daemon", 0, nullptr, IConfig::DaemonKey },
|
||||
{ "daemon-poll-interval", 1, nullptr, IConfig::DaemonPollKey },
|
||||
{ "daemon-job-timeout", 1, nullptr, IConfig::DaemonJobTimeoutKey },
|
||||
{ "self-select", 1, nullptr, IConfig::SelfSelectKey },
|
||||
{ "submit-to-origin", 0, nullptr, IConfig::SubmitToOriginKey },
|
||||
{ "daemon-zmq-port", 1, nullptr, IConfig::DaemonZMQPortKey },
|
||||
|
||||
@@ -64,7 +64,9 @@ static inline const std::string &usage()
|
||||
|
||||
# ifdef XMRIG_FEATURE_HTTP
|
||||
u += " --daemon use daemon RPC instead of pool for solo mining\n";
|
||||
u += " --daemon-zmq-port daemon's zmq-pub port number (only use it if daemon has it enabled)\n";
|
||||
u += " --daemon-poll-interval=N daemon poll interval in milliseconds (default: 1000)\n";
|
||||
u += " --daemon-job-timeout=N daemon job timeout in milliseconds (default: 15000)\n";
|
||||
u += " --self-select=URL self-select block templates from URL\n";
|
||||
u += " --submit-to-origin also submit solution back to self-select URL\n";
|
||||
# endif
|
||||
|
||||
@@ -407,8 +407,12 @@ xmrig::cn_hash_fun xmrig::CnHash::fn(const Algorithm &algorithm, AlgoVariant av,
|
||||
}
|
||||
|
||||
# ifdef XMRIG_ALGO_CN_HEAVY
|
||||
// cn-heavy optimization for Zen3 CPUs
|
||||
if ((av == AV_SINGLE) && (assembly != Assembly::NONE) && (Cpu::info()->arch() == ICpuInfo::ARCH_ZEN3) && (Cpu::info()->model() == 0x21)) {
|
||||
// cn-heavy optimization for Zen3/Zen4 CPUs
|
||||
const auto arch = Cpu::info()->arch();
|
||||
const uint32_t model = Cpu::info()->model();
|
||||
const bool is_vermeer = (arch == ICpuInfo::ARCH_ZEN3) && (model == 0x21);
|
||||
const bool is_raphael = (arch == ICpuInfo::ARCH_ZEN4) && (model == 0x61);
|
||||
if ((av == AV_SINGLE) && (assembly != Assembly::NONE) && (is_vermeer || is_raphael)) {
|
||||
switch (algorithm.id()) {
|
||||
case Algorithm::CN_HEAVY_0:
|
||||
return cryptonight_single_hash<Algorithm::CN_HEAVY_0, false, 3>;
|
||||
|
||||
15026
src/crypto/cn/sse2neon.h
15026
src/crypto/cn/sse2neon.h
File diff suppressed because it is too large
Load Diff
@@ -57,6 +57,15 @@
|
||||
# define MAP_HUGE_MASK 0x3f
|
||||
#endif
|
||||
|
||||
#ifdef XMRIG_OS_FREEBSD
|
||||
# ifndef MAP_ALIGNED_SUPER
|
||||
# define MAP_ALIGNED_SUPER 0
|
||||
# endif
|
||||
# ifndef MAP_PREFAULT_READ
|
||||
# define MAP_PREFAULT_READ 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef XMRIG_SECURE_JIT
|
||||
# define SECURE_PROT_EXEC 0
|
||||
@@ -65,7 +74,7 @@
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(XMRIG_OS_LINUX) || (!defined(XMRIG_OS_APPLE) && !defined(__FreeBSD__))
|
||||
#if defined(XMRIG_OS_LINUX) || (!defined(XMRIG_OS_APPLE) && !defined(XMRIG_OS_FREEBSD))
|
||||
static inline int hugePagesFlag(size_t size)
|
||||
{
|
||||
return (static_cast<int>(log2(size)) & MAP_HUGE_MASK) << MAP_HUGE_SHIFT;
|
||||
@@ -112,13 +121,19 @@ bool xmrig::VirtualMemory::protectRWX(void *p, size_t size)
|
||||
|
||||
bool xmrig::VirtualMemory::protectRX(void *p, size_t size)
|
||||
{
|
||||
bool result = true;
|
||||
|
||||
# if defined(XMRIG_OS_APPLE) && defined(XMRIG_ARM)
|
||||
pthread_jit_write_protect_np(true);
|
||||
flushInstructionCache(p, size);
|
||||
return true;
|
||||
# else
|
||||
return mprotect(p, size, PROT_READ | PROT_EXEC) == 0;
|
||||
result = (mprotect(p, size, PROT_READ | PROT_EXEC) == 0);
|
||||
# endif
|
||||
|
||||
# if defined(XMRIG_ARM)
|
||||
flushInstructionCache(p, size);
|
||||
# endif
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@@ -129,7 +144,7 @@ void *xmrig::VirtualMemory::allocateExecutableMemory(size_t size, bool hugePages
|
||||
# ifdef XMRIG_ARM
|
||||
pthread_jit_write_protect_np(false);
|
||||
# endif
|
||||
# elif defined(__FreeBSD__)
|
||||
# elif defined(XMRIG_OS_FREEBSD)
|
||||
void *mem = nullptr;
|
||||
|
||||
if (hugePages) {
|
||||
@@ -162,7 +177,7 @@ void *xmrig::VirtualMemory::allocateLargePagesMemory(size_t size)
|
||||
{
|
||||
# if defined(XMRIG_OS_APPLE)
|
||||
void *mem = mmap(0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, VM_FLAGS_SUPERPAGE_SIZE_2MB, 0);
|
||||
# elif defined(__FreeBSD__)
|
||||
# elif defined(XMRIG_OS_FREEBSD)
|
||||
void *mem = mmap(0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_ALIGNED_SUPER | MAP_PREFAULT_READ, -1, 0);
|
||||
# else
|
||||
void *mem = mmap(0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE | hugePagesFlag(hugePageSize()), 0, 0);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
cmake_minimum_required(VERSION 2.8.12)
|
||||
cmake_minimum_required(VERSION 3.1)
|
||||
project(GhostRider)
|
||||
|
||||
set(HEADERS
|
||||
|
||||
121
src/crypto/randomx/blake2/avx2/LICENSE
Normal file
121
src/crypto/randomx/blake2/avx2/LICENSE
Normal file
@@ -0,0 +1,121 @@
|
||||
Creative Commons Legal Code
|
||||
|
||||
CC0 1.0 Universal
|
||||
|
||||
CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
|
||||
LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
|
||||
ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
|
||||
INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
|
||||
REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
|
||||
PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
|
||||
THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
|
||||
HEREUNDER.
|
||||
|
||||
Statement of Purpose
|
||||
|
||||
The laws of most jurisdictions throughout the world automatically confer
|
||||
exclusive Copyright and Related Rights (defined below) upon the creator
|
||||
and subsequent owner(s) (each and all, an "owner") of an original work of
|
||||
authorship and/or a database (each, a "Work").
|
||||
|
||||
Certain owners wish to permanently relinquish those rights to a Work for
|
||||
the purpose of contributing to a commons of creative, cultural and
|
||||
scientific works ("Commons") that the public can reliably and without fear
|
||||
of later claims of infringement build upon, modify, incorporate in other
|
||||
works, reuse and redistribute as freely as possible in any form whatsoever
|
||||
and for any purposes, including without limitation commercial purposes.
|
||||
These owners may contribute to the Commons to promote the ideal of a free
|
||||
culture and the further production of creative, cultural and scientific
|
||||
works, or to gain reputation or greater distribution for their Work in
|
||||
part through the use and efforts of others.
|
||||
|
||||
For these and/or other purposes and motivations, and without any
|
||||
expectation of additional consideration or compensation, the person
|
||||
associating CC0 with a Work (the "Affirmer"), to the extent that he or she
|
||||
is an owner of Copyright and Related Rights in the Work, voluntarily
|
||||
elects to apply CC0 to the Work and publicly distribute the Work under its
|
||||
terms, with knowledge of his or her Copyright and Related Rights in the
|
||||
Work and the meaning and intended legal effect of CC0 on those rights.
|
||||
|
||||
1. Copyright and Related Rights. A Work made available under CC0 may be
|
||||
protected by copyright and related or neighboring rights ("Copyright and
|
||||
Related Rights"). Copyright and Related Rights include, but are not
|
||||
limited to, the following:
|
||||
|
||||
i. the right to reproduce, adapt, distribute, perform, display,
|
||||
communicate, and translate a Work;
|
||||
ii. moral rights retained by the original author(s) and/or performer(s);
|
||||
iii. publicity and privacy rights pertaining to a person's image or
|
||||
likeness depicted in a Work;
|
||||
iv. rights protecting against unfair competition in regards to a Work,
|
||||
subject to the limitations in paragraph 4(a), below;
|
||||
v. rights protecting the extraction, dissemination, use and reuse of data
|
||||
in a Work;
|
||||
vi. database rights (such as those arising under Directive 96/9/EC of the
|
||||
European Parliament and of the Council of 11 March 1996 on the legal
|
||||
protection of databases, and under any national implementation
|
||||
thereof, including any amended or successor version of such
|
||||
directive); and
|
||||
vii. other similar, equivalent or corresponding rights throughout the
|
||||
world based on applicable law or treaty, and any national
|
||||
implementations thereof.
|
||||
|
||||
2. Waiver. To the greatest extent permitted by, but not in contravention
|
||||
of, applicable law, Affirmer hereby overtly, fully, permanently,
|
||||
irrevocably and unconditionally waives, abandons, and surrenders all of
|
||||
Affirmer's Copyright and Related Rights and associated claims and causes
|
||||
of action, whether now known or unknown (including existing as well as
|
||||
future claims and causes of action), in the Work (i) in all territories
|
||||
worldwide, (ii) for the maximum duration provided by applicable law or
|
||||
treaty (including future time extensions), (iii) in any current or future
|
||||
medium and for any number of copies, and (iv) for any purpose whatsoever,
|
||||
including without limitation commercial, advertising or promotional
|
||||
purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
|
||||
member of the public at large and to the detriment of Affirmer's heirs and
|
||||
successors, fully intending that such Waiver shall not be subject to
|
||||
revocation, rescission, cancellation, termination, or any other legal or
|
||||
equitable action to disrupt the quiet enjoyment of the Work by the public
|
||||
as contemplated by Affirmer's express Statement of Purpose.
|
||||
|
||||
3. Public License Fallback. Should any part of the Waiver for any reason
|
||||
be judged legally invalid or ineffective under applicable law, then the
|
||||
Waiver shall be preserved to the maximum extent permitted taking into
|
||||
account Affirmer's express Statement of Purpose. In addition, to the
|
||||
extent the Waiver is so judged Affirmer hereby grants to each affected
|
||||
person a royalty-free, non transferable, non sublicensable, non exclusive,
|
||||
irrevocable and unconditional license to exercise Affirmer's Copyright and
|
||||
Related Rights in the Work (i) in all territories worldwide, (ii) for the
|
||||
maximum duration provided by applicable law or treaty (including future
|
||||
time extensions), (iii) in any current or future medium and for any number
|
||||
of copies, and (iv) for any purpose whatsoever, including without
|
||||
limitation commercial, advertising or promotional purposes (the
|
||||
"License"). The License shall be deemed effective as of the date CC0 was
|
||||
applied by Affirmer to the Work. Should any part of the License for any
|
||||
reason be judged legally invalid or ineffective under applicable law, such
|
||||
partial invalidity or ineffectiveness shall not invalidate the remainder
|
||||
of the License, and in such case Affirmer hereby affirms that he or she
|
||||
will not (i) exercise any of his or her remaining Copyright and Related
|
||||
Rights in the Work or (ii) assert any associated claims and causes of
|
||||
action with respect to the Work, in either case contrary to Affirmer's
|
||||
express Statement of Purpose.
|
||||
|
||||
4. Limitations and Disclaimers.
|
||||
|
||||
a. No trademark or patent rights held by Affirmer are waived, abandoned,
|
||||
surrendered, licensed or otherwise affected by this document.
|
||||
b. Affirmer offers the Work as-is and makes no representations or
|
||||
warranties of any kind concerning the Work, express, implied,
|
||||
statutory or otherwise, including without limitation warranties of
|
||||
title, merchantability, fitness for a particular purpose, non
|
||||
infringement, or the absence of latent or other defects, accuracy, or
|
||||
the present or absence of errors, whether or not discoverable, all to
|
||||
the greatest extent permissible under applicable law.
|
||||
c. Affirmer disclaims responsibility for clearing rights of other persons
|
||||
that may apply to the Work or any use thereof, including without
|
||||
limitation any person's Copyright and Related Rights in the Work.
|
||||
Further, Affirmer disclaims responsibility for obtaining any necessary
|
||||
consents, permissions or other rights required for any use of the
|
||||
Work.
|
||||
d. Affirmer understands and acknowledges that Creative Commons is not a
|
||||
party to this document and has no duty or obligation with respect to
|
||||
this CC0 or use of the Work.
|
||||
38
src/crypto/randomx/blake2/avx2/blake2.h
Normal file
38
src/crypto/randomx/blake2/avx2/blake2.h
Normal file
@@ -0,0 +1,38 @@
|
||||
#ifndef BLAKE2_AVX2_BLAKE2_H
|
||||
#define BLAKE2_AVX2_BLAKE2_H
|
||||
|
||||
#if !defined(__cplusplus) && (!defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901L)
|
||||
#if defined(_MSC_VER)
|
||||
#define INLINE __inline
|
||||
#elif defined(__GNUC__)
|
||||
#define INLINE __inline__
|
||||
#else
|
||||
#define INLINE
|
||||
#endif
|
||||
#else
|
||||
#define INLINE inline
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#define ALIGN(x) __declspec(align(x))
|
||||
#else
|
||||
#define ALIGN(x) __attribute__((aligned(x)))
|
||||
#endif
|
||||
|
||||
enum blake2s_constant {
|
||||
BLAKE2S_BLOCKBYTES = 64,
|
||||
BLAKE2S_OUTBYTES = 32,
|
||||
BLAKE2S_KEYBYTES = 32,
|
||||
BLAKE2S_SALTBYTES = 8,
|
||||
BLAKE2S_PERSONALBYTES = 8
|
||||
};
|
||||
|
||||
enum blake2b_constant {
|
||||
BLAKE2B_BLOCKBYTES = 128,
|
||||
BLAKE2B_OUTBYTES = 64,
|
||||
BLAKE2B_KEYBYTES = 64,
|
||||
BLAKE2B_SALTBYTES = 16,
|
||||
BLAKE2B_PERSONALBYTES = 16
|
||||
};
|
||||
|
||||
#endif
|
||||
48
src/crypto/randomx/blake2/avx2/blake2b-common.h
Normal file
48
src/crypto/randomx/blake2/avx2/blake2b-common.h
Normal file
@@ -0,0 +1,48 @@
|
||||
#ifndef BLAKE2_AVX2_BLAKE2B_COMMON_H
|
||||
#define BLAKE2_AVX2_BLAKE2B_COMMON_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
#include "blake2.h"
|
||||
|
||||
#define LOAD128(p) _mm_load_si128( (__m128i *)(p) )
|
||||
#define STORE128(p,r) _mm_store_si128((__m128i *)(p), r)
|
||||
|
||||
#define LOADU128(p) _mm_loadu_si128( (__m128i *)(p) )
|
||||
#define STOREU128(p,r) _mm_storeu_si128((__m128i *)(p), r)
|
||||
|
||||
#define LOAD(p) _mm256_load_si256( (__m256i *)(p) )
|
||||
#define STORE(p,r) _mm256_store_si256((__m256i *)(p), r)
|
||||
|
||||
#define LOADU(p) _mm256_loadu_si256( (__m256i *)(p) )
|
||||
#define STOREU(p,r) _mm256_storeu_si256((__m256i *)(p), r)
|
||||
|
||||
static INLINE uint64_t LOADU64(void const * p) {
|
||||
uint64_t v;
|
||||
memcpy(&v, p, sizeof v);
|
||||
return v;
|
||||
}
|
||||
|
||||
#define ROTATE16 _mm256_setr_epi8( 2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9, \
|
||||
2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9 )
|
||||
|
||||
#define ROTATE24 _mm256_setr_epi8( 3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10, \
|
||||
3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10 )
|
||||
|
||||
#define ADD(a, b) _mm256_add_epi64(a, b)
|
||||
#define SUB(a, b) _mm256_sub_epi64(a, b)
|
||||
|
||||
#define XOR(a, b) _mm256_xor_si256(a, b)
|
||||
#define AND(a, b) _mm256_and_si256(a, b)
|
||||
#define OR(a, b) _mm256_or_si256(a, b)
|
||||
|
||||
#define ROT32(x) _mm256_shuffle_epi32((x), _MM_SHUFFLE(2, 3, 0, 1))
|
||||
#define ROT24(x) _mm256_shuffle_epi8((x), ROTATE24)
|
||||
#define ROT16(x) _mm256_shuffle_epi8((x), ROTATE16)
|
||||
#define ROT63(x) _mm256_or_si256(_mm256_srli_epi64((x), 63), ADD((x), (x)))
|
||||
|
||||
#endif
|
||||
340
src/crypto/randomx/blake2/avx2/blake2b-load-avx2.h
Normal file
340
src/crypto/randomx/blake2/avx2/blake2b-load-avx2.h
Normal file
@@ -0,0 +1,340 @@
|
||||
#ifndef BLAKE2_AVX2_BLAKE2B_LOAD_AVX2_H
|
||||
#define BLAKE2_AVX2_BLAKE2B_LOAD_AVX2_H
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_0_1(b0) do { \
|
||||
t0 = _mm256_unpacklo_epi64(m0, m1); \
|
||||
t1 = _mm256_unpacklo_epi64(m2, m3); \
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_0_2(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpackhi_epi64(m0, m1);\
|
||||
t1 = _mm256_unpackhi_epi64(m2, m3);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_0_3(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m7, m4);\
|
||||
t1 = _mm256_unpacklo_epi64(m5, m6);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_0_4(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpackhi_epi64(m7, m4);\
|
||||
t1 = _mm256_unpackhi_epi64(m5, m6);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_1_1(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m7, m2);\
|
||||
t1 = _mm256_unpackhi_epi64(m4, m6);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_1_2(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m5, m4);\
|
||||
t1 = _mm256_alignr_epi8(m3, m7, 8);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_1_3(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpackhi_epi64(m2, m0);\
|
||||
t1 = _mm256_blend_epi32(m5, m0, 0x33);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_1_4(b0) \
|
||||
do { \
|
||||
t0 = _mm256_alignr_epi8(m6, m1, 8);\
|
||||
t1 = _mm256_blend_epi32(m3, m1, 0x33);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_2_1(b0) \
|
||||
do { \
|
||||
t0 = _mm256_alignr_epi8(m6, m5, 8);\
|
||||
t1 = _mm256_unpackhi_epi64(m2, m7);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_2_2(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m4, m0);\
|
||||
t1 = _mm256_blend_epi32(m6, m1, 0x33);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_2_3(b0) \
|
||||
do { \
|
||||
t0 = _mm256_alignr_epi8(m5, m4, 8);\
|
||||
t1 = _mm256_unpackhi_epi64(m1, m3);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_2_4(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m2, m7);\
|
||||
t1 = _mm256_blend_epi32(m0, m3, 0x33);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_3_1(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpackhi_epi64(m3, m1);\
|
||||
t1 = _mm256_unpackhi_epi64(m6, m5);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_3_2(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpackhi_epi64(m4, m0);\
|
||||
t1 = _mm256_unpacklo_epi64(m6, m7);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_3_3(b0) \
|
||||
do { \
|
||||
t0 = _mm256_alignr_epi8(m1, m7, 8);\
|
||||
t1 = _mm256_shuffle_epi32(m2, _MM_SHUFFLE(1,0,3,2));\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_3_4(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m4, m3);\
|
||||
t1 = _mm256_unpacklo_epi64(m5, m0);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_4_1(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpackhi_epi64(m4, m2);\
|
||||
t1 = _mm256_unpacklo_epi64(m1, m5);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_4_2(b0) \
|
||||
do { \
|
||||
t0 = _mm256_blend_epi32(m3, m0, 0x33);\
|
||||
t1 = _mm256_blend_epi32(m7, m2, 0x33);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_4_3(b0) \
|
||||
do { \
|
||||
t0 = _mm256_alignr_epi8(m7, m1, 8);\
|
||||
t1 = _mm256_alignr_epi8(m3, m5, 8);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_4_4(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpackhi_epi64(m6, m0);\
|
||||
t1 = _mm256_unpacklo_epi64(m6, m4);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_5_1(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m1, m3);\
|
||||
t1 = _mm256_unpacklo_epi64(m0, m4);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_5_2(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m6, m5);\
|
||||
t1 = _mm256_unpackhi_epi64(m5, m1);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_5_3(b0) \
|
||||
do { \
|
||||
t0 = _mm256_alignr_epi8(m2, m0, 8);\
|
||||
t1 = _mm256_unpackhi_epi64(m3, m7);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_5_4(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpackhi_epi64(m4, m6);\
|
||||
t1 = _mm256_alignr_epi8(m7, m2, 8);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_6_1(b0) \
|
||||
do { \
|
||||
t0 = _mm256_blend_epi32(m0, m6, 0x33);\
|
||||
t1 = _mm256_unpacklo_epi64(m7, m2);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_6_2(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpackhi_epi64(m2, m7);\
|
||||
t1 = _mm256_alignr_epi8(m5, m6, 8);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_6_3(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m4, m0);\
|
||||
t1 = _mm256_blend_epi32(m4, m3, 0x33);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_6_4(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpackhi_epi64(m5, m3);\
|
||||
t1 = _mm256_shuffle_epi32(m1, _MM_SHUFFLE(1,0,3,2));\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_7_1(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpackhi_epi64(m6, m3);\
|
||||
t1 = _mm256_blend_epi32(m1, m6, 0x33);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_7_2(b0) \
|
||||
do { \
|
||||
t0 = _mm256_alignr_epi8(m7, m5, 8);\
|
||||
t1 = _mm256_unpackhi_epi64(m0, m4);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_7_3(b0) \
|
||||
do { \
|
||||
t0 = _mm256_blend_epi32(m2, m1, 0x33);\
|
||||
t1 = _mm256_alignr_epi8(m4, m7, 8);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_7_4(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m5, m0);\
|
||||
t1 = _mm256_unpacklo_epi64(m2, m3);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_8_1(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m3, m7);\
|
||||
t1 = _mm256_alignr_epi8(m0, m5, 8);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_8_2(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpackhi_epi64(m7, m4);\
|
||||
t1 = _mm256_alignr_epi8(m4, m1, 8);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_8_3(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m5, m6);\
|
||||
t1 = _mm256_unpackhi_epi64(m6, m0);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_8_4(b0) \
|
||||
do { \
|
||||
t0 = _mm256_alignr_epi8(m1, m2, 8);\
|
||||
t1 = _mm256_alignr_epi8(m2, m3, 8);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_9_1(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m5, m4);\
|
||||
t1 = _mm256_unpackhi_epi64(m3, m0);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_9_2(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m1, m2);\
|
||||
t1 = _mm256_blend_epi32(m2, m3, 0x33);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_9_3(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpackhi_epi64(m6, m7);\
|
||||
t1 = _mm256_unpackhi_epi64(m4, m1);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_9_4(b0) \
|
||||
do { \
|
||||
t0 = _mm256_blend_epi32(m5, m0, 0x33);\
|
||||
t1 = _mm256_unpacklo_epi64(m7, m6);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_10_1(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m0, m1);\
|
||||
t1 = _mm256_unpacklo_epi64(m2, m3);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_10_2(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpackhi_epi64(m0, m1);\
|
||||
t1 = _mm256_unpackhi_epi64(m2, m3);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_10_3(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m7, m4);\
|
||||
t1 = _mm256_unpacklo_epi64(m5, m6);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_10_4(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpackhi_epi64(m7, m4);\
|
||||
t1 = _mm256_unpackhi_epi64(m5, m6);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_11_1(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m7, m2);\
|
||||
t1 = _mm256_unpackhi_epi64(m4, m6);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_11_2(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpacklo_epi64(m5, m4);\
|
||||
t1 = _mm256_alignr_epi8(m3, m7, 8);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_11_3(b0) \
|
||||
do { \
|
||||
t0 = _mm256_unpackhi_epi64(m2, m0);\
|
||||
t1 = _mm256_blend_epi32(m5, m0, 0x33);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_LOAD_MSG_11_4(b0) \
|
||||
do { \
|
||||
t0 = _mm256_alignr_epi8(m6, m1, 8);\
|
||||
t1 = _mm256_blend_epi32(m3, m1, 0x33);\
|
||||
b0 = _mm256_blend_epi32(t0, t1, 0xF0);\
|
||||
} while(0)
|
||||
|
||||
#endif
|
||||
|
||||
16
src/crypto/randomx/blake2/avx2/blake2b.h
Normal file
16
src/crypto/randomx/blake2/avx2/blake2b.h
Normal file
@@ -0,0 +1,16 @@
|
||||
#ifndef BLAKE2_AVX2_BLAKE2B_H
|
||||
#define BLAKE2_AVX2_BLAKE2B_H
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
int blake2b_avx2(void* out, size_t outlen, const void* in, size_t inlen);
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
141
src/crypto/randomx/blake2/avx2/blake2b_avx2.c
Normal file
141
src/crypto/randomx/blake2/avx2/blake2b_avx2.c
Normal file
@@ -0,0 +1,141 @@
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "blake2.h"
|
||||
#include "blake2b.h"
|
||||
#include "blake2b-common.h"
|
||||
|
||||
ALIGN(64) static const uint64_t blake2b_IV[8] = {
|
||||
UINT64_C(0x6A09E667F3BCC908), UINT64_C(0xBB67AE8584CAA73B),
|
||||
UINT64_C(0x3C6EF372FE94F82B), UINT64_C(0xA54FF53A5F1D36F1),
|
||||
UINT64_C(0x510E527FADE682D1), UINT64_C(0x9B05688C2B3E6C1F),
|
||||
UINT64_C(0x1F83D9ABFB41BD6B), UINT64_C(0x5BE0CD19137E2179),
|
||||
};
|
||||
|
||||
#define BLAKE2B_G1_V1(a, b, c, d, m) do { \
|
||||
a = ADD(a, m); \
|
||||
a = ADD(a, b); d = XOR(d, a); d = ROT32(d); \
|
||||
c = ADD(c, d); b = XOR(b, c); b = ROT24(b); \
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_G2_V1(a, b, c, d, m) do { \
|
||||
a = ADD(a, m); \
|
||||
a = ADD(a, b); d = XOR(d, a); d = ROT16(d); \
|
||||
c = ADD(c, d); b = XOR(b, c); b = ROT63(b); \
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_DIAG_V1(a, b, c, d) do { \
|
||||
a = _mm256_permute4x64_epi64(a, _MM_SHUFFLE(2,1,0,3)); \
|
||||
d = _mm256_permute4x64_epi64(d, _MM_SHUFFLE(1,0,3,2)); \
|
||||
c = _mm256_permute4x64_epi64(c, _MM_SHUFFLE(0,3,2,1)); \
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_UNDIAG_V1(a, b, c, d) do { \
|
||||
a = _mm256_permute4x64_epi64(a, _MM_SHUFFLE(0,3,2,1)); \
|
||||
d = _mm256_permute4x64_epi64(d, _MM_SHUFFLE(1,0,3,2)); \
|
||||
c = _mm256_permute4x64_epi64(c, _MM_SHUFFLE(2,1,0,3)); \
|
||||
} while(0)
|
||||
|
||||
#include "blake2b-load-avx2.h"
|
||||
|
||||
#define BLAKE2B_ROUND_V1(a, b, c, d, r, m) do { \
|
||||
__m256i b0; \
|
||||
BLAKE2B_LOAD_MSG_ ##r ##_1(b0); \
|
||||
BLAKE2B_G1_V1(a, b, c, d, b0); \
|
||||
BLAKE2B_LOAD_MSG_ ##r ##_2(b0); \
|
||||
BLAKE2B_G2_V1(a, b, c, d, b0); \
|
||||
BLAKE2B_DIAG_V1(a, b, c, d); \
|
||||
BLAKE2B_LOAD_MSG_ ##r ##_3(b0); \
|
||||
BLAKE2B_G1_V1(a, b, c, d, b0); \
|
||||
BLAKE2B_LOAD_MSG_ ##r ##_4(b0); \
|
||||
BLAKE2B_G2_V1(a, b, c, d, b0); \
|
||||
BLAKE2B_UNDIAG_V1(a, b, c, d); \
|
||||
} while(0)
|
||||
|
||||
#define BLAKE2B_ROUNDS_V1(a, b, c, d, m) do { \
|
||||
BLAKE2B_ROUND_V1(a, b, c, d, 0, (m)); \
|
||||
BLAKE2B_ROUND_V1(a, b, c, d, 1, (m)); \
|
||||
BLAKE2B_ROUND_V1(a, b, c, d, 2, (m)); \
|
||||
BLAKE2B_ROUND_V1(a, b, c, d, 3, (m)); \
|
||||
BLAKE2B_ROUND_V1(a, b, c, d, 4, (m)); \
|
||||
BLAKE2B_ROUND_V1(a, b, c, d, 5, (m)); \
|
||||
BLAKE2B_ROUND_V1(a, b, c, d, 6, (m)); \
|
||||
BLAKE2B_ROUND_V1(a, b, c, d, 7, (m)); \
|
||||
BLAKE2B_ROUND_V1(a, b, c, d, 8, (m)); \
|
||||
BLAKE2B_ROUND_V1(a, b, c, d, 9, (m)); \
|
||||
BLAKE2B_ROUND_V1(a, b, c, d, 10, (m)); \
|
||||
BLAKE2B_ROUND_V1(a, b, c, d, 11, (m)); \
|
||||
} while(0)
|
||||
|
||||
#define DECLARE_MESSAGE_WORDS(m) \
|
||||
const __m256i m0 = _mm256_broadcastsi128_si256(LOADU128((m) + 0)); \
|
||||
const __m256i m1 = _mm256_broadcastsi128_si256(LOADU128((m) + 16)); \
|
||||
const __m256i m2 = _mm256_broadcastsi128_si256(LOADU128((m) + 32)); \
|
||||
const __m256i m3 = _mm256_broadcastsi128_si256(LOADU128((m) + 48)); \
|
||||
const __m256i m4 = _mm256_broadcastsi128_si256(LOADU128((m) + 64)); \
|
||||
const __m256i m5 = _mm256_broadcastsi128_si256(LOADU128((m) + 80)); \
|
||||
const __m256i m6 = _mm256_broadcastsi128_si256(LOADU128((m) + 96)); \
|
||||
const __m256i m7 = _mm256_broadcastsi128_si256(LOADU128((m) + 112)); \
|
||||
__m256i t0, t1;
|
||||
|
||||
#define BLAKE2B_COMPRESS_V1(a, b, m, t0, t1, f0, f1) do { \
|
||||
DECLARE_MESSAGE_WORDS(m) \
|
||||
const __m256i iv0 = a; \
|
||||
const __m256i iv1 = b; \
|
||||
__m256i c = LOAD(&blake2b_IV[0]); \
|
||||
__m256i d = XOR( \
|
||||
LOAD(&blake2b_IV[4]), \
|
||||
_mm256_set_epi64x(f1, f0, t1, t0) \
|
||||
); \
|
||||
BLAKE2B_ROUNDS_V1(a, b, c, d, m); \
|
||||
a = XOR(a, c); \
|
||||
b = XOR(b, d); \
|
||||
a = XOR(a, iv0); \
|
||||
b = XOR(b, iv1); \
|
||||
} while(0)
|
||||
|
||||
int blake2b_avx2(void* out_ptr, size_t outlen, const void* in_ptr, size_t inlen) {
|
||||
const __m256i parameter_block = _mm256_set_epi64x(0, 0, 0, 0x01010000UL | (uint32_t)outlen);
|
||||
ALIGN(64) uint8_t buffer[BLAKE2B_BLOCKBYTES];
|
||||
__m256i a = XOR(LOAD(&blake2b_IV[0]), parameter_block);
|
||||
__m256i b = LOAD(&blake2b_IV[4]);
|
||||
uint64_t counter = 0;
|
||||
const uint8_t* in = (const uint8_t*)in_ptr;
|
||||
do {
|
||||
const uint64_t flag = (inlen <= BLAKE2B_BLOCKBYTES) ? -1 : 0;
|
||||
size_t block_size = BLAKE2B_BLOCKBYTES;
|
||||
if(inlen < BLAKE2B_BLOCKBYTES) {
|
||||
memcpy(buffer, in, inlen);
|
||||
memset(buffer + inlen, 0, BLAKE2B_BLOCKBYTES - inlen);
|
||||
block_size = inlen;
|
||||
in = buffer;
|
||||
}
|
||||
counter += block_size;
|
||||
BLAKE2B_COMPRESS_V1(a, b, in, counter, 0, flag, 0);
|
||||
inlen -= block_size;
|
||||
in += block_size;
|
||||
} while(inlen > 0);
|
||||
|
||||
uint8_t* out = (uint8_t*)out_ptr;
|
||||
|
||||
switch (outlen) {
|
||||
case 64:
|
||||
STOREU(out + 32, b);
|
||||
// Fall through
|
||||
|
||||
case 32:
|
||||
STOREU(out, a);
|
||||
break;
|
||||
|
||||
default:
|
||||
STOREU(buffer, a);
|
||||
STOREU(buffer + 32, b);
|
||||
memcpy(out, buffer, outlen);
|
||||
break;
|
||||
}
|
||||
|
||||
_mm256_zeroupper();
|
||||
return 0;
|
||||
}
|
||||
@@ -92,7 +92,12 @@ extern "C" {
|
||||
int rx_blake2b_final(blake2b_state *S, void *out, size_t outlen);
|
||||
|
||||
/* Simple API */
|
||||
int rx_blake2b(void *out, size_t outlen, const void *in, size_t inlen);
|
||||
void rx_blake2b_compress_integer(blake2b_state * S, const uint8_t * block);
|
||||
void rx_blake2b_compress_sse41(blake2b_state * S, const uint8_t * block);
|
||||
int rx_blake2b_default(void* out, size_t outlen, const void* in, size_t inlen);
|
||||
|
||||
extern void (*rx_blake2b_compress)(blake2b_state * S, const uint8_t * block);
|
||||
extern int (*rx_blake2b)(void* out, size_t outlen, const void* in, size_t inlen);
|
||||
|
||||
/* Argon2 Team - Begin Code */
|
||||
int rxa2_blake2b_long(void *out, size_t outlen, const void *in, size_t inlen);
|
||||
|
||||
@@ -179,7 +179,7 @@ int rx_blake2b_init_key(blake2b_state *S, size_t outlen, const void *key, size_t
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rx_blake2b_compress_integer(blake2b_state *S, const uint8_t *block) {
|
||||
void rx_blake2b_compress_integer(blake2b_state *S, const uint8_t *block) {
|
||||
uint64_t m[16];
|
||||
uint64_t v[16];
|
||||
unsigned int i, r;
|
||||
@@ -237,21 +237,6 @@ static void rx_blake2b_compress_integer(blake2b_state *S, const uint8_t *block)
|
||||
#undef ROUND
|
||||
}
|
||||
|
||||
#if defined(XMRIG_FEATURE_SSE4_1)
|
||||
|
||||
uint32_t rx_blake2b_use_sse41 = 0;
|
||||
void rx_blake2b_compress_sse41(blake2b_state* S, const uint8_t* block);
|
||||
|
||||
#define rx_blake2b_compress(S, block) \
|
||||
if (rx_blake2b_use_sse41) \
|
||||
rx_blake2b_compress_sse41(S, block); \
|
||||
else \
|
||||
rx_blake2b_compress_integer(S, block);
|
||||
|
||||
#else
|
||||
#define rx_blake2b_compress(S, block) rx_blake2b_compress_integer(S, block);
|
||||
#endif
|
||||
|
||||
int rx_blake2b_update(blake2b_state *S, const void *in, size_t inlen) {
|
||||
const uint8_t *pin = (const uint8_t *)in;
|
||||
|
||||
@@ -322,7 +307,7 @@ int rx_blake2b_final(blake2b_state *S, void *out, size_t outlen) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rx_blake2b(void *out, size_t outlen, const void *in, size_t inlen) {
|
||||
int rx_blake2b_default(void *out, size_t outlen, const void *in, size_t inlen) {
|
||||
blake2b_state S;
|
||||
int ret = -1;
|
||||
|
||||
|
||||
@@ -167,6 +167,11 @@ namespace randomx {
|
||||
|
||||
static const uint8_t* NOPX[] = { NOP1, NOP2, NOP3, NOP4, NOP5, NOP6, NOP7, NOP8, NOP9 };
|
||||
|
||||
static const uint8_t NOP13[] = { 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x1F, 0x44, 0x00, 0x00 };
|
||||
static const uint8_t NOP14[] = { 0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00 };
|
||||
static const uint8_t NOP25[] = { 0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00 };
|
||||
static const uint8_t NOP26[] = { 0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00 };
|
||||
|
||||
static const uint8_t JMP_ALIGN_PREFIX[14][16] = {
|
||||
{},
|
||||
{0x2E},
|
||||
@@ -257,6 +262,10 @@ namespace randomx {
|
||||
// AVX2 init is faster on Zen3
|
||||
initDatasetAVX2 = true;
|
||||
break;
|
||||
case xmrig::ICpuInfo::ARCH_ZEN4:
|
||||
// AVX2 init is slower on Zen4
|
||||
initDatasetAVX2 = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -407,7 +416,7 @@ namespace randomx {
|
||||
*(uint32_t*)(code + codePos + 14) = RandomX_CurrentConfig.ScratchpadL3Mask64_Calculated;
|
||||
if (hasAVX) {
|
||||
uint32_t* p = (uint32_t*)(code + codePos + 61);
|
||||
*p = (*p & 0xFF000000U) | 0x0077F8C5U;
|
||||
*p = (*p & 0xFF000000U) | 0x0077F8C5U; // vzeroupper
|
||||
}
|
||||
|
||||
# ifdef XMRIG_FIX_RYZEN
|
||||
@@ -419,7 +428,8 @@ namespace randomx {
|
||||
|
||||
memcpy(imul_rcp_storage - 34, &pcfg.eMask, sizeof(pcfg.eMask));
|
||||
codePos = codePosFirst;
|
||||
prevCFROUND = 0;
|
||||
prevCFROUND = -1;
|
||||
prevFPOperation = -1;
|
||||
|
||||
//mark all registers as used
|
||||
uint64_t* r = (uint64_t*)registerUsage;
|
||||
@@ -1155,7 +1165,7 @@ namespace randomx {
|
||||
uint8_t* const p = code;
|
||||
uint32_t pos = codePos;
|
||||
|
||||
prevCFROUND = 0;
|
||||
prevFPOperation = pos;
|
||||
|
||||
const uint64_t dst = instr.dst % RegisterCountFlt;
|
||||
const uint64_t src = instr.src % RegisterCountFlt;
|
||||
@@ -1170,7 +1180,7 @@ namespace randomx {
|
||||
uint8_t* const p = code;
|
||||
uint32_t pos = codePos;
|
||||
|
||||
prevCFROUND = 0;
|
||||
prevFPOperation = pos;
|
||||
|
||||
const uint32_t src = instr.src % RegistersCount;
|
||||
const uint32_t dst = instr.dst % RegisterCountFlt;
|
||||
@@ -1187,7 +1197,7 @@ namespace randomx {
|
||||
uint8_t* const p = code;
|
||||
uint32_t pos = codePos;
|
||||
|
||||
prevCFROUND = 0;
|
||||
prevFPOperation = pos;
|
||||
|
||||
const uint64_t dst = instr.dst % RegisterCountFlt;
|
||||
const uint64_t src = instr.src % RegisterCountFlt;
|
||||
@@ -1202,7 +1212,7 @@ namespace randomx {
|
||||
uint8_t* const p = code;
|
||||
uint32_t pos = codePos;
|
||||
|
||||
prevCFROUND = 0;
|
||||
prevFPOperation = pos;
|
||||
|
||||
const uint32_t src = instr.src % RegistersCount;
|
||||
const uint32_t dst = instr.dst % RegisterCountFlt;
|
||||
@@ -1230,7 +1240,7 @@ namespace randomx {
|
||||
uint8_t* const p = code;
|
||||
uint32_t pos = codePos;
|
||||
|
||||
prevCFROUND = 0;
|
||||
prevFPOperation = pos;
|
||||
|
||||
const uint64_t dst = instr.dst % RegisterCountFlt;
|
||||
const uint64_t src = instr.src % RegisterCountFlt;
|
||||
@@ -1245,7 +1255,7 @@ namespace randomx {
|
||||
uint8_t* const p = code;
|
||||
uint32_t pos = codePos;
|
||||
|
||||
prevCFROUND = 0;
|
||||
prevFPOperation = pos;
|
||||
|
||||
const uint32_t src = instr.src % RegistersCount;
|
||||
const uint64_t dst = instr.dst % RegisterCountFlt;
|
||||
@@ -1272,7 +1282,7 @@ namespace randomx {
|
||||
uint8_t* const p = code;
|
||||
uint32_t pos = codePos;
|
||||
|
||||
prevCFROUND = 0;
|
||||
prevFPOperation = pos;
|
||||
|
||||
const uint32_t dst = instr.dst % RegisterCountFlt;
|
||||
|
||||
@@ -1283,21 +1293,18 @@ namespace randomx {
|
||||
|
||||
void JitCompilerX86::h_CFROUND(const Instruction& instr) {
|
||||
uint8_t* const p = code;
|
||||
uint32_t pos = prevCFROUND;
|
||||
int32_t t = prevCFROUND;
|
||||
|
||||
if (pos) {
|
||||
if (t > prevFPOperation) {
|
||||
if (vm_flags & RANDOMX_FLAG_AMD) {
|
||||
memcpy(p + pos + 0, NOP9, 9);
|
||||
memcpy(p + pos + 9, NOP9, 9);
|
||||
memcpy(p + pos + 18, NOP8, 8);
|
||||
memcpy(p + t, NOP26, 26);
|
||||
}
|
||||
else {
|
||||
memcpy(p + pos + 0, NOP8, 8);
|
||||
memcpy(p + pos + 8, NOP6, 6);
|
||||
memcpy(p + t, NOP14, 14);
|
||||
}
|
||||
}
|
||||
|
||||
pos = codePos;
|
||||
uint32_t pos = codePos;
|
||||
prevCFROUND = pos;
|
||||
|
||||
const uint32_t src = instr.src % RegistersCount;
|
||||
@@ -1322,21 +1329,18 @@ namespace randomx {
|
||||
|
||||
void JitCompilerX86::h_CFROUND_BMI2(const Instruction& instr) {
|
||||
uint8_t* const p = code;
|
||||
uint32_t pos = prevCFROUND;
|
||||
int32_t t = prevCFROUND;
|
||||
|
||||
if (pos) {
|
||||
if (t > prevFPOperation) {
|
||||
if (vm_flags & RANDOMX_FLAG_AMD) {
|
||||
memcpy(p + pos + 0, NOP9, 9);
|
||||
memcpy(p + pos + 9, NOP9, 9);
|
||||
memcpy(p + pos + 18, NOP7, 7);
|
||||
memcpy(p + t, NOP25, 25);
|
||||
}
|
||||
else {
|
||||
memcpy(p + pos + 0, NOP8, 8);
|
||||
memcpy(p + pos + 8, NOP5, 5);
|
||||
memcpy(p + t, NOP13, 13);
|
||||
}
|
||||
}
|
||||
|
||||
pos = codePos;
|
||||
uint32_t pos = codePos;
|
||||
prevCFROUND = pos;
|
||||
|
||||
const uint64_t src = instr.src % RegistersCount;
|
||||
@@ -1363,10 +1367,15 @@ namespace randomx {
|
||||
uint8_t* const p = code;
|
||||
uint32_t pos = codePos;
|
||||
|
||||
prevCFROUND = 0;
|
||||
|
||||
const int reg = instr.dst % RegistersCount;
|
||||
int32_t jmp_offset = registerUsage[reg] - (pos + 16);
|
||||
int32_t jmp_offset = registerUsage[reg];
|
||||
|
||||
// if it jumps over the previous FP instruction that uses rounding, treat it as if FP instruction happened now
|
||||
if (jmp_offset <= prevFPOperation) {
|
||||
prevFPOperation = pos;
|
||||
}
|
||||
|
||||
jmp_offset -= pos + 16;
|
||||
|
||||
if (jccErratum) {
|
||||
const uint32_t branch_begin = static_cast<uint32_t>(pos + 7);
|
||||
|
||||
@@ -89,7 +89,8 @@ namespace randomx {
|
||||
uint32_t codePos = 0;
|
||||
uint32_t codePosFirst = 0;
|
||||
uint32_t vm_flags = 0;
|
||||
uint32_t prevCFROUND = 0;
|
||||
int32_t prevCFROUND = -1;
|
||||
int32_t prevFPOperation = -1;
|
||||
|
||||
# ifdef XMRIG_FIX_RYZEN
|
||||
std::pair<const void*, const void*> mainLoopBounds;
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
*/
|
||||
|
||||
#include "crypto/rx/Rx.h"
|
||||
#include "backend/cpu/Cpu.h"
|
||||
#include "backend/cpu/CpuConfig.h"
|
||||
#include "backend/cpu/CpuThreads.h"
|
||||
#include "crypto/rx/RxConfig.h"
|
||||
@@ -84,6 +85,16 @@ void xmrig::Rx::init(IRxListener *listener)
|
||||
}
|
||||
|
||||
|
||||
#include "crypto/randomx/blake2/blake2.h"
|
||||
#if defined(XMRIG_FEATURE_AVX2)
|
||||
#include "crypto/randomx/blake2/avx2/blake2b.h"
|
||||
#endif
|
||||
|
||||
|
||||
void (*rx_blake2b_compress)(blake2b_state* S, const uint8_t * block) = rx_blake2b_compress_integer;
|
||||
int (*rx_blake2b)(void* out, size_t outlen, const void* in, size_t inlen) = rx_blake2b_default;
|
||||
|
||||
|
||||
template<typename T>
|
||||
bool xmrig::Rx::init(const T &seed, const RxConfig &config, const CpuConfig &cpu)
|
||||
{
|
||||
@@ -133,6 +144,19 @@ bool xmrig::Rx::init(const T &seed, const RxConfig &config, const CpuConfig &cpu
|
||||
if (!cpu.isHwAES()) {
|
||||
SelectSoftAESImpl(cpu.threads().get(seed.algorithm()).count());
|
||||
}
|
||||
|
||||
# if defined(XMRIG_FEATURE_SSE4_1)
|
||||
if (Cpu::info()->has(ICpuInfo::FLAG_SSE41)) {
|
||||
rx_blake2b_compress = rx_blake2b_compress_sse41;
|
||||
}
|
||||
# endif
|
||||
|
||||
#if defined(XMRIG_FEATURE_AVX2)
|
||||
if (Cpu::info()->has(ICpuInfo::FLAG_AVX2)) {
|
||||
rx_blake2b = blake2b_avx2;
|
||||
}
|
||||
# endif
|
||||
|
||||
osInitialized = true;
|
||||
}
|
||||
|
||||
|
||||
@@ -58,12 +58,13 @@ static const std::array<const char *, RxConfig::ModeMax> modeNames = { "auto", "
|
||||
|
||||
|
||||
#ifdef XMRIG_FEATURE_MSR
|
||||
constexpr size_t kMsrArraySize = 5;
|
||||
constexpr size_t kMsrArraySize = 6;
|
||||
|
||||
static const std::array<MsrItems, kMsrArraySize> msrPresets = {
|
||||
MsrItems(),
|
||||
MsrItems{{ 0xC0011020, 0ULL }, { 0xC0011021, 0x40ULL, ~0x20ULL }, { 0xC0011022, 0x1510000ULL }, { 0xC001102b, 0x2000cc16ULL }},
|
||||
MsrItems{{ 0xC0011020, 0x0004480000000000ULL }, { 0xC0011021, 0x001c000200000040ULL, ~0x20ULL }, { 0xC0011022, 0xc000000401500000ULL }, { 0xC001102b, 0x2000cc14ULL }},
|
||||
MsrItems{{ 0xC0011020, 0x0004480000000000ULL }, { 0xC0011021, 0x001c000200000040ULL, ~0x20ULL }, { 0xC0011022, 0xc000000401570000ULL }, { 0xC001102b, 0x2000cc10ULL }},
|
||||
MsrItems{{ 0xC0011020, 0x0004400000000000ULL }, { 0xC0011021, 0x0004000000000040ULL, ~0x20ULL }, { 0xC0011022, 0x8680000401570000ULL }, { 0xC001102b, 0x2040cc10ULL }},
|
||||
MsrItems{{ 0x1a4, 0xf }},
|
||||
MsrItems()
|
||||
};
|
||||
|
||||
@@ -25,11 +25,6 @@
|
||||
#include "crypto/rx/RxVm.h"
|
||||
|
||||
|
||||
#if defined(XMRIG_FEATURE_SSE4_1)
|
||||
extern "C" uint32_t rx_blake2b_use_sse41;
|
||||
#endif
|
||||
|
||||
|
||||
randomx_vm *xmrig::RxVm::create(RxDataset *dataset, uint8_t *scratchpad, bool softAes, const Assembly &assembly, uint32_t node)
|
||||
{
|
||||
int flags = 0;
|
||||
@@ -51,10 +46,6 @@ randomx_vm *xmrig::RxVm::create(RxDataset *dataset, uint8_t *scratchpad, bool so
|
||||
flags |= RANDOMX_FLAG_AMD;
|
||||
}
|
||||
|
||||
# if defined(XMRIG_FEATURE_SSE4_1)
|
||||
rx_blake2b_use_sse41 = Cpu::info()->has(ICpuInfo::FLAG_SSE41) ? 1 : 0;
|
||||
# endif
|
||||
|
||||
return randomx_create_vm(static_cast<randomx_flags>(flags), !dataset->get() ? dataset->cache()->get() : nullptr, dataset->get(), scratchpad, node);
|
||||
}
|
||||
|
||||
|
||||
18
src/donate.h
18
src/donate.h
@@ -1,6 +1,6 @@
|
||||
/* XMRig
|
||||
* Copyright (c) 2018-2021 SChernykh <https://github.com/SChernykh>
|
||||
* Copyright (c) 2016-2021 XMRig <https://github.com/xmrig>, <support@xmrig.com>
|
||||
* Copyright (c) 2018-2022 SChernykh <https://github.com/SChernykh>
|
||||
* Copyright (c) 2016-2022 XMRig <https://github.com/xmrig>, <support@xmrig.com>
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@@ -23,22 +23,22 @@
|
||||
/*
|
||||
* Dev donation.
|
||||
*
|
||||
* Percentage of your hashing power that you want to donate to the developer, can be 0 if you don't want to do that.
|
||||
* Percentage of your hashing power that you want to donate to the developer can be 0% but supports XMRig Development.
|
||||
*
|
||||
* Example of how it works for the setting of 1%:
|
||||
* You miner will mine into your usual pool for random time (in range from 49.5 to 148.5 minutes),
|
||||
* Your miner will mine into your usual pool for a random time (in a range from 49.5 to 148.5 minutes),
|
||||
* then switch to the developer's pool for 1 minute, then switch again to your pool for 99 minutes
|
||||
* and then switch again to developer's pool for 1 minute, these rounds will continue until miner working.
|
||||
* and then switch again to developer's pool for 1 minute; these rounds will continue until the miner stops.
|
||||
*
|
||||
* Randomised only first round, to prevent waves on the donation pool.
|
||||
* Randomised only on the first round to prevent waves on the donation pool.
|
||||
*
|
||||
* Switching is instant, and only happens after a successful connection, so you never loose any hashes.
|
||||
* Switching is instant and only happens after a successful connection, so you never lose any hashes.
|
||||
*
|
||||
* If you plan on changing this setting to 0 please consider making a one off donation to my wallet:
|
||||
* If you plan on changing donations to 0%, please consider making a one-off donation to my wallet:
|
||||
* XMR: 48edfHu7V9Z84YzzMa6fUueoELZ9ZRXq9VetWzYGzKt52XU5xvqgzYnDK9URnRoJMk1j8nLwEVsaSWJ4fhdUyZijBGUicoD
|
||||
*/
|
||||
constexpr const int kDefaultDonateLevel = 1;
|
||||
constexpr const int kMinimumDonateLevel = 1;
|
||||
|
||||
|
||||
#endif /* XMRIG_DONATE_H */
|
||||
#endif // XMRIG_DONATE_H
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/* XMRig
|
||||
* Copyright (c) 2002-2006 Hugo Weber <address@hidden>
|
||||
* Copyright (c) 2018-2021 SChernykh <https://github.com/SChernykh>
|
||||
* Copyright (c) 2016-2021 XMRig <https://github.com/xmrig>, <support@xmrig.com>
|
||||
* Copyright (c) 2018-2023 SChernykh <https://github.com/SChernykh>
|
||||
* Copyright (c) 2016-2023 XMRig <https://github.com/xmrig>, <support@xmrig.com>
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@@ -17,7 +17,6 @@
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
#include "hw/dmi/DmiReader.h"
|
||||
#include "hw/dmi/DmiTools.h"
|
||||
|
||||
@@ -91,12 +90,12 @@ bool xmrig::DmiReader::read()
|
||||
}
|
||||
|
||||
CFDataRef data = reinterpret_cast<CFDataRef>(IORegistryEntryCreateCFProperty(service, CFSTR("SMBIOS-EPS"), kCFAllocatorDefault, kNilOptions));
|
||||
if (!data) {
|
||||
if (!data || CFDataGetLength(data) < 0x1f) {
|
||||
return false;
|
||||
}
|
||||
|
||||
uint8_t buf[0x20]{};
|
||||
CFDataGetBytes(data, CFRangeMake(0, sizeof(buf)), buf);
|
||||
CFDataGetBytes(data, CFRangeMake(0, sizeof(buf) - 1), buf);
|
||||
CFRelease(data);
|
||||
|
||||
auto smb = smbios_decode(buf, m_size, m_version, service);
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
#include <unistd.h>
|
||||
#include <cstdio>
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
#ifdef XMRIG_OS_FREEBSD
|
||||
# include <kenv.h>
|
||||
#endif
|
||||
|
||||
@@ -288,7 +288,7 @@ static off_t address_from_efi()
|
||||
const char *filename;
|
||||
char linebuf[64];
|
||||
off_t address = 0;
|
||||
# elif defined(__FreeBSD__)
|
||||
# elif defined(XMRIG_OS_FREEBSD)
|
||||
char addrstr[KENV_MVALLEN + 1];
|
||||
# endif
|
||||
|
||||
@@ -310,7 +310,7 @@ static off_t address_from_efi()
|
||||
fclose(efi_systab);
|
||||
|
||||
return address;
|
||||
# elif defined(__FreeBSD__)
|
||||
# elif defined(XMRIG_OS_FREEBSD)
|
||||
if (kenv(KENV_GET, "hint.smbios.0.mem", addrstr, sizeof(addrstr)) == -1) {
|
||||
return EFI_NOT_FOUND;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/* XMRig
|
||||
* Copyright (c) 2018-2022 SChernykh <https://github.com/SChernykh>
|
||||
* Copyright (c) 2016-2022 XMRig <https://github.com/xmrig>, <support@xmrig.com>
|
||||
* Copyright (c) 2018-2023 SChernykh <https://github.com/SChernykh>
|
||||
* Copyright (c) 2016-2023 XMRig <https://github.com/xmrig>, <support@xmrig.com>
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@@ -22,15 +22,15 @@
|
||||
#define APP_ID "xmrig"
|
||||
#define APP_NAME "XMRig"
|
||||
#define APP_DESC "XMRig miner"
|
||||
#define APP_VERSION "6.18.1-dev"
|
||||
#define APP_VERSION "6.19.0"
|
||||
#define APP_DOMAIN "xmrig.com"
|
||||
#define APP_SITE "www.xmrig.com"
|
||||
#define APP_COPYRIGHT "Copyright (C) 2016-2022 xmrig.com"
|
||||
#define APP_COPYRIGHT "Copyright (C) 2016-2023 xmrig.com"
|
||||
#define APP_KIND "miner"
|
||||
|
||||
#define APP_VER_MAJOR 6
|
||||
#define APP_VER_MINOR 18
|
||||
#define APP_VER_PATCH 1
|
||||
#define APP_VER_MINOR 19
|
||||
#define APP_VER_PATCH 0
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# if (_MSC_VER >= 1930)
|
||||
@@ -52,4 +52,4 @@
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif /* XMRIG_VERSION_H */
|
||||
#endif // XMRIG_VERSION_H
|
||||
|
||||
Reference in New Issue
Block a user