1
0
mirror of https://github.com/xmrig/xmrig.git synced 2025-12-06 23:52:38 -05:00

Compare commits

..

378 Commits

Author SHA1 Message Date
XMRig
80ae339343 v6.12.2 2021-05-31 12:58:30 +07:00
XMRig
4d87555398 Merge branch 'dev' 2021-05-31 12:57:55 +07:00
xmrig
bef82c5de6 Update CHANGELOG.md 2021-05-30 21:28:28 +07:00
xmrig
b069ad5dd1 Merge pull request #2358 from zzjzxq33/patch-1
Update openssl version to 1.1.1k
2021-05-30 17:53:53 +07:00
xmrig
f6a0646271 Merge pull request #2401 from SChernykh/dev
RandomX: fix broken light mode mining
2021-05-22 18:54:29 +07:00
SChernykh
b5f1a1feae RandomX: fix broken light mode mining
It broke after #2395
2021-05-22 13:49:22 +02:00
XMRig
1ce059da1c Add "argon2/ninja" algorithm alias. 2021-05-22 15:10:50 +07:00
xmrig
2929451ee1 Merge pull request #2398 from SChernykh/dev
RandomX ARMv8: optimized dataset read
2021-05-21 09:58:54 +07:00
SChernykh
94fecb5e92 RandomX ARMv8: optimized dataset read
Break dependency from readReg2 and readReg3. It should run faster on superscalar and out-of-order CPUs i.e. Apple M1.
2021-05-20 21:24:28 +02:00
xmrig
3bfa5ea038 Merge pull request #2395 from SChernykh/dev
RandomX: rewrote dataset read code
2021-05-20 18:58:48 +07:00
SChernykh
ff82ca57f2 RandomX: rewrote dataset read code
Unified code for AMD and Intel
1% faster on Intel
0.15% faster on AMD Ryzen
2021-05-20 12:45:42 +02:00
xmrig
7f7b1fb073 Merge pull request #2393 from SChernykh/dev
RandomX: added BMI2 version for scratchpad prefetch
2021-05-19 22:54:58 +07:00
SChernykh
d443dd86f1 RandomX: added BMI2 version for scratchpad prefetch
Saves 1 instruction and 1 byte in the main loop.
2021-05-19 17:52:16 +02:00
xmrig
3ac8f6b23a Merge pull request #2386 from SChernykh/dev
Enabled IMUL_RCP optimization for light mode mining
2021-05-17 16:36:23 +07:00
SChernykh
9b1f020a8b Enabled IMUL_RCP optimization for light mode mining
Better fix for #2377
2021-05-17 11:26:40 +02:00
XMRig
8bf88a4e74 Merge branch 'Spudz76-dev-fixCLKawPowPlatformHandling' into dev 2021-05-16 10:10:33 +07:00
XMRig
08a2c143f5 Regenerate OpenCL headers. 2021-05-16 10:09:29 +07:00
Tony Butler
4eb9a1aad5 Fix CL code for KawPow where it assumes everything is AMD 2021-05-15 20:34:57 -06:00
xmrig
c8c40586a1 Merge pull request #2378 from SChernykh/dev
Fixed broken light mode mining on x86
2021-05-16 07:03:57 +07:00
SChernykh
29cb416107 Fixed broken light mode mining on x86 2021-05-15 21:41:39 +02:00
xmrig
465169ff12 Merge pull request #2375 from Spudz76/dev-fixMacOSCudaLoader
Fixup MacOS CUDA backend default loader name
2021-05-14 18:48:09 +07:00
Tony Butler
df2bcd8192 Fixup MacOS CUDA backend default loader name 2021-05-14 05:28:31 -06:00
zzjzxq33
d89bb56964 Update openssl version to 1.1.1k 2021-05-09 11:11:46 +08:00
XMRig
87a0864e3b ...and --cpu-affinity. 2021-05-08 04:36:09 +07:00
XMRig
ecf5579f36 #2351 Fixed help output for --cpu-priority option. 2021-05-08 04:34:22 +07:00
xmrig
d5523d819f Merge pull request #2341 from SChernykh/dev
Update sse2neon.h
2021-05-03 23:17:12 +07:00
SChernykh
dbda2e9ccd Update sse2neon.h 2021-05-03 18:08:59 +02:00
xmrig
8babd7bc0a Merge pull request #2340 from SChernykh/dev
Fix AES detection on FreeBSD on ARM
2021-05-03 19:06:49 +07:00
SChernykh
27ced139a6 Fix AES detection on FreeBSD on ARM 2021-05-03 09:57:43 +02:00
xmrig
b46849e813 Merge pull request #2322 from SChernykh/dev
Update randomx_boost.sh
2021-04-28 19:12:37 +07:00
SChernykh
a96a6108ff Update randomx_boost.sh
- Support builtin MSR, see #2283
- Added detection of AMD EPYC CPUs
2021-04-28 14:10:30 +02:00
xmrig
c50c78b700 Merge pull request #2312 from SChernykh/dev
Add  missing allow_writes=on to randomx_boost.sh
2021-04-25 20:46:07 +07:00
SChernykh
cd7ab2c79f Add missing allow_writes=on to randomx_boost.sh 2021-04-25 15:31:30 +02:00
XMRig
695fbc013b #2280 Disable GPU backends in benchmark mode. 2021-04-25 15:28:45 +07:00
XMRig
a403c53543 Merge branch 'jsonboss-patch-1' into dev 2021-04-24 23:22:56 +07:00
XMRig
e26fbc96e9 Removed unnecessary system call. 2021-04-24 23:22:10 +07:00
XMRig
259c165e60 Merge branch 'patch-1' of https://github.com/jsonboss/xmrig into jsonboss-patch-1 2021-04-24 22:14:59 +07:00
XMRig
7897bf02dc v6.12.2-dev 2021-04-24 01:53:07 +07:00
XMRig
05f62c5ccc Merge branch 'master' into dev 2021-04-24 01:52:37 +07:00
XMRig
d82e100e30 v6.12.1 2021-04-23 19:43:12 +07:00
XMRig
5f869a414c Merge branch 'dev' 2021-04-23 19:42:29 +07:00
xmrig
7fd6be7d83 Update CHANGELOG.md 2021-04-23 18:54:42 +07:00
xmrig
ae6c536e98 Merge pull request #2296 from SChernykh/dev
Fixed Zen3 asm for cn/upx2
2021-04-21 19:52:52 +07:00
XMRig
c66c593123 v6.12.1-dev 2021-04-21 19:51:03 +07:00
XMRig
b3788b2ba3 Merge branch 'master' into dev 2021-04-21 19:49:54 +07:00
SChernykh
b7adb34c37 Fixed Zen3 asm for cn/upx2
- Invalid rounding mode was used which caused rejected shares sometimes
- Also optimized CN implode/explode functions a bit.
2021-04-21 13:22:25 +02:00
XMRig
ace8409a56 v6.12.0 2021-04-20 20:55:58 +07:00
XMRig
e2c757d9dd Merge branch 'dev' 2021-04-20 20:55:35 +07:00
xmrig
da35de993f Update CHANGELOG.md 2021-04-19 23:20:10 +07:00
xmrig
854b7618ef Merge pull request #2289 from SChernykh/dev
RandomX: optimized IMUL_RCP instruction
2021-04-19 22:54:02 +07:00
SChernykh
3477f9fbc1 RandomX: optimized IMUL_RCP instruction
+0.4% on AMD Zen2
+0.3% on AMD Zen3
+0.1% on Intel SandyBridge
+0.3% on rx/wow on Intel SandyBridge
2021-04-19 17:43:58 +02:00
xmrig
5799744f2f Update CHANGELOG.md 2021-04-19 20:56:45 +07:00
xmrig
61d165a314 Merge pull request #2287 from SChernykh/dev
Fixed rounding mode after running cn/upx
2021-04-19 18:06:16 +07:00
SChernykh
69186f2470 Optimized cn/upx for Zen3
0.9% faster
2021-04-19 12:29:44 +02:00
SChernykh
730d4a6cee Fix dvision by zero check in percent() 2021-04-19 12:05:07 +02:00
SChernykh
54bc91d5e3 Fixed rounding mode after running cn/upx 2021-04-19 12:02:57 +02:00
jsonboss
2012ffb231 support builtin msr 2021-04-19 10:38:27 +08:00
XMRig
5f9e0ebc6c v6.12.0-dev 2021-04-18 20:12:03 +07:00
xmrig
f314c69a70 Merge pull request #2278 from SChernykh/dev
Optimized cn/upx2
2021-04-17 23:41:26 +07:00
SChernykh
16fe462cad Optimized cn/upx2 for Ryzen CPUs 2021-04-17 18:18:26 +02:00
xmrig
e6e2987ddf Merge pull request #2276 from SChernykh/dev
Added support for Uplexa (cn/upx2 algorithm)
2021-04-17 20:10:54 +07:00
SChernykh
ed456b02cf Update CnHash.cpp 2021-04-17 15:06:31 +02:00
SChernykh
da7f5826cb Added support for Uplexa (cn/upx2 algorithm) 2021-04-17 14:53:42 +02:00
XMRig
6cb398bb42 Merge branch 'dev' of github.com:xmrig/xmrig into dev 2021-04-14 23:44:42 +07:00
XMRig
748be760e8 Added support for --user command line option for the benchmark. 2021-04-14 23:43:31 +07:00
xmrig
4a4118bb8e Merge pull request #2261 from SChernykh/dev
Show total hashrate if compiled without OpenCL
2021-04-13 19:06:42 +07:00
SChernykh
77f1bf0861 Show total hashrate if compiled without OpenCL 2021-04-13 14:02:29 +02:00
XMRig
6bb29b3e7b v6.11.3-dev 2021-04-11 21:13:39 +07:00
XMRig
f720772338 Merge branch 'master' into dev 2021-04-11 21:13:08 +07:00
XMRig
e53e48b88c v6.11.2 2021-04-11 17:24:46 +07:00
XMRig
ecf36ee891 Merge branch 'dev' 2021-04-11 17:24:23 +07:00
xmrig
23ef949dd3 Update CHANGELOG.md 2021-04-11 11:45:07 +07:00
XMRig
92e708c6e7 Update llhttp to v5.1.0 2021-04-10 21:23:32 +07:00
XMRig
30cfcc27db #2207 Fixed regression in HTTP parser. 2021-04-10 21:02:59 +07:00
XMRig
3c6077fb02 v6.11.2-dev 2021-04-08 00:33:01 +07:00
XMRig
63883b4fa7 Merge branch 'master' into dev 2021-04-08 00:32:21 +07:00
XMRig
0f83b5e06c v6.11.1 2021-04-07 10:34:37 +07:00
XMRig
637a333197 Merge branch 'dev' 2021-04-07 10:33:48 +07:00
xmrig
3171b06048 Update CHANGELOG.md 2021-04-07 10:32:17 +07:00
xmrig
2a66a0fa2f Merge pull request #2239 from SChernykh/dev
Fixed broken "coin" setting functionality
2021-04-07 10:30:28 +07:00
SChernykh
c080d5b962 Fixed broken "coin" setting functionality 2021-04-06 23:02:10 +02:00
XMRig
0133107f14 v6.11.0 2021-04-06 21:11:44 +07:00
XMRig
253e349ef9 Merge branch 'dev' 2021-04-06 21:11:13 +07:00
xmrig
5126cc1414 Update CHANGELOG.md 2021-04-06 15:48:18 +07:00
XMRig
ea1245026d #2234 Use const_cast. 2021-04-06 12:07:06 +07:00
xmrig
2158adb711 Merge pull request #2234 from esrrhs/dev
fix build error on gcc 4.8.5
2021-04-06 12:00:36 +07:00
xmrig
8554bb4d9c Merge pull request #2235 from SChernykh/dev
Fixed cn-heavy for GCC-8
2021-04-04 18:09:09 +07:00
SChernykh
1741354498 Fixed cn-heavy for GCC-8 2021-04-04 10:18:27 +02:00
esrrhs
866e97efcf fix build error on gcc 9.3.0
FileLogWriter.h:34:41: error: array used as initializer
2021-04-04 12:42:14 +08:00
xmrig
277352d072 Merge pull request #2233 from SChernykh/dev
Fixed compilation for ARM
2021-04-03 23:03:05 +07:00
SChernykh
8cae605e1f Update randomx.cmake 2021-04-03 17:59:28 +02:00
SChernykh
59c85eaf6a Fixed compilation for ARM 2021-04-03 17:50:52 +02:00
xmrig
864233c110 Merge pull request #2228 from esrrhs/dev
remove useless v4_random_math_init if algo is not cn/r
2021-04-02 15:49:53 +07:00
xmrig
e9b32b3009 Merge pull request #2229 from SChernykh/dev
Don't use RandomX JIT if WITH_ASM=OFF
2021-04-02 15:47:51 +07:00
SChernykh
ec608bbd05 Don't use RandomX JIT if WITH_ASM=OFF
Because RandomX JIT use asm code
2021-04-02 10:05:46 +02:00
esrrhs
ec2793bcc9 remove useless v4_random_math_init if algo is not cn/r 2021-04-02 14:59:09 +08:00
xmrig
eb40f07552 Merge pull request #2225 from gentoo-monero/fix-2224
Add missing include
2021-04-01 17:27:53 +07:00
Matthew Smith
28f268aeba Add missing include
memory header ends up not being included when built without OpenCL
support.

Closes: https://github.com/xmrig/xmrig/issues/2224
2021-04-01 11:01:55 +01:00
XMRig
bad5458d40 Merge branch 'pr2217' into dev 2021-03-29 18:17:33 +07:00
XMRig
b72e21fc3c Merge branch 'master' of https://github.com/esrrhs/xmrig into pr2217 2021-03-29 18:16:45 +07:00
esrrhs
d578a3828f setBlob should run after setAlgorithm 2021-03-29 12:11:03 +08:00
xmrig
6c417eb9af Merge pull request #2216 from SChernykh/dev
Optimize cn-heavy in GCC builds
2021-03-28 21:13:45 +07:00
SChernykh
dc70893e6b Optimize cn-heavy in GCC builds
+0.7% in GCC builds, but GCC is still slower than MSVC on cn-heavy.
2021-03-28 16:12:09 +02:00
xmrig
c5c958743e Merge pull request #2214 from SChernykh/cn-heavy-opt
Optimized cn-heavy
2021-03-28 09:56:22 +07:00
xmrig
89f2fa6818 Merge pull request #2213 from SChernykh/dev
Fixed use-after-free bug when exiting
2021-03-28 09:55:50 +07:00
SChernykh
bcfd9edaa5 Optimized cn-heavy
- Remove unnecessary type conversion when doing `idx0 = d ^ q;`
- Saves 1 CPU cycle in the main loop
- 0.2% speedup on Ryzen 5 5600X, results on other CPUs may vary
2021-03-27 22:21:01 +01:00
SChernykh
e0f774d6dd Fixed use-after-free bug when exiting 2021-03-27 21:53:40 +01:00
XMRig
955cc366d1 v6.11.0-dev 2021-03-20 13:42:46 +07:00
xmrig
bc4f6249be Merge pull request #2196 from xmrig/feature-dns2
Improved DNS subsystem
2021-03-20 12:50:53 +07:00
XMRig
0d45600b0e Added command line options --dns-ipv6 and --dns-ttl. 2021-03-20 11:12:09 +07:00
XMRig
2c8f7f692c Added DNS config. 2021-03-20 00:09:59 +07:00
XMRig
3e41bdc552 New DNS implementation. 2021-03-16 22:24:37 +07:00
XMRig
5b189696d7 Added DnsRecords class. 2021-03-14 09:44:56 +07:00
XMRig
c6bcea3811 Improved DnsRecord class. 2021-03-13 20:30:52 +07:00
xmrig
900dd13c45 Merge pull request #2177 from SChernykh/dev
Fix `vld1q_u8_x4` compilation error with GCC 10.2
2021-03-13 08:30:44 +07:00
SChernykh
2876f17f65 Fix vld1q_u8_x4 compilation error with GCC 10.2 2021-03-12 16:26:02 +01:00
xmrig
b2563ca8a6 Merge pull request #2172 from bisand/patch-1
Added reference to limits.h in AdlLib_linux.cpp
2021-03-11 18:07:23 +07:00
André Biseth
7c0d60ac68 Added reference to limits.h in AdlLib_linux.cpp
Suggested solution to bug https://github.com/xmrig/xmrig/issues/2171
2021-03-11 11:50:05 +01:00
xmrig
813a1885cb Merge pull request #2169 from SChernykh/dev
Fix wrong type in Handle::deleteLater()
2021-03-11 06:26:27 +07:00
SChernykh
54bcf05b1d Fix wrong type in Handle::deleteLater()
Bug found by Address Sanitizer
2021-03-10 14:55:06 +01:00
XMRig
bbea8810a7 v6.10.1-dev 2021-03-08 06:04:59 +07:00
XMRig
b6514957f1 Merge branch 'master' into dev 2021-03-08 06:04:32 +07:00
XMRig
69590f9777 v6.10.0 2021-03-08 04:05:27 +07:00
xmrig
576ff120e5 Merge pull request #2128 from ianmaddox/patch-1
Minor verbiage tweak
2021-03-08 04:02:02 +07:00
xmrig
2d52118c1b Merge pull request #2161 from coolhaircut/patch-1
Added Userspace MSR permissions clarification in CPU.md
2021-03-08 04:01:15 +07:00
xmrig
28ad59d828 Merge pull request #2129 from felixonmars/patch-1
Correct a typo in doc/CPU.md
2021-03-08 04:00:38 +07:00
XMRig
e0c630f34f Merge branch 'dev' 2021-03-08 03:59:09 +07:00
XMRig
b8f9a326aa 6.10.0-dev 2021-03-07 01:44:38 +07:00
Cool Dude (with a cool haircut)
542617b6db Update CPU.md 2021-03-05 22:54:03 +00:00
XMRig
f5db50c9d7 Sync with the proxy. 2021-03-06 05:32:54 +07:00
XMRig
856c8e6bcd Fixed build without TLS support. 2021-03-06 02:07:10 +07:00
XMRig
b3dbf6e23f http-parser replaced to llhttp. 2021-03-06 01:46:49 +07:00
xmrig
a11c57226b Merge pull request #2158 from SChernykh/dev
Fix GCC compilation
2021-03-04 16:48:05 +07:00
SChernykh
94d2cac775 Fix GCC compilation 2021-03-04 10:45:39 +01:00
XMRig
548a7d46e1 Add note about CPU affinity. 2021-03-04 16:19:06 +07:00
xmrig
bebc163e25 Merge pull request #2157 from SChernykh/dev
Fix crash in cn-heavy on Zen3 with manual thread count
2021-03-04 16:03:54 +07:00
SChernykh
70cddc06ba Fix crash in cn-heavy on Zen3 with manual thread count 2021-03-04 10:02:35 +01:00
XMRig
1f9cdc0564 Update hwloc for MSVC. 2021-03-04 03:23:26 +07:00
XMRig
a5a7ee716d Update build scripts. 2021-03-03 19:38:54 +07:00
xmrig
d2f24d94b9 Merge pull request #2150 from TheGreatMcPain/dev
Update sse2neon.h to the latest master. Fixes build on armv7.
2021-03-02 19:41:11 +07:00
TheGreatMcPain
ba3299b61b Update sse2neon.h to the latest master. Fixes build on armv7.
A few days after this header was introduced. Upstream updated it with
armv7 versions of `_mm_aesenc_si128` which allows xmrig to build
on armv7.
2021-03-02 01:33:25 -06:00
xmrig
ca5dfe7c12 Merge pull request #2147 from SChernykh/dev
Fixed many "new job" messages when solo mining
2021-03-01 23:49:03 +07:00
SChernykh
91ad6fcf3d Fixed many "new job" messages when solo mining
Fix for https://github.com/xmrig/xmrig/issues/2127
2021-03-01 17:46:05 +01:00
XMRig
0b7dfaabe0 Code cleanup. 2021-03-01 19:04:03 +07:00
XMRig
6f8ffb7660 Fixed possible out of order write to log file. 2021-03-01 18:54:20 +07:00
XMRig
4a8e7510e1 #2123 Ignore regex exception. 2021-02-27 15:29:14 +07:00
Felix Yan
32876dd01d Correct a typo in doc/CPU.md 2021-02-24 04:36:27 +08:00
Ian Maddox
37df513b32 Minor verbiage tweak
Fixing mixed phrasing in error message
2021-02-23 11:34:10 -08:00
xmrig
31a5d05dc1 Merge pull request #2122 from SChernykh/dev
Fixed pause logic when both pause on battery and user activity are en…
2021-02-21 22:36:32 +07:00
SChernykh
d478d737c4 Fixed pause logic when both pause on battery and user activity are enabled 2021-02-21 16:33:57 +01:00
XMRig
e20daff4eb v6.9.1-dev 2021-02-21 22:28:15 +07:00
XMRig
1ccdcb1645 Merge branch 'master' into dev 2021-02-21 22:27:36 +07:00
XMRig
072881e1a1 v6.9.0 2021-02-21 21:23:48 +07:00
XMRig
0c4a3cfc30 Merge branch 'dev' 2021-02-21 21:23:15 +07:00
xmrig
cffd0f50a4 Update CPU.md 2021-02-21 20:22:06 +07:00
XMRig
4b1857114e v6.9.0-dev 2021-02-20 14:28:20 +07:00
XMRig
b49fb27e84 Added idle time detection for macOS. 2021-02-20 13:18:31 +07:00
XMRig
ee341118ce #2104 Added user configurable idle time. 2021-02-19 23:35:30 +07:00
XMRig
f599807bbb Simplified code, fixed broken pause. 2021-02-19 16:26:31 +07:00
xmrig
a2ad626012 Merge pull request #2117 from SChernykh/dev
Fixed crash when GPU mining cn-heavy on Zen3 system
2021-02-18 21:08:44 +07:00
SChernykh
e8a99809b6 Fixed crash when GPU mining cn-heavy on Zen3 system 2021-02-18 14:49:37 +01:00
XMRig
0fe20fe88c Merge remote-tracking branch 'remotes/origin/pr2112' into dev 2021-02-18 15:35:59 +07:00
XMRig
d1d1517b4f Fixed macOS build. 2021-02-18 15:22:39 +07:00
XMRig
5980675876 Code and copyright cleanup. 2021-02-18 12:56:39 +07:00
Hansie Odendaal
3b87cd97ce Allow result submission to origin daemon with self-select
With `self-select` mode enabled, the `submit-to-origin` config option
will let the `SelfSelectClient` submit the solution to both
the daemon where it got the template from as well as to
the connected pool, for miners that want to do pool minining
with Monero and solo mining with an altcoin (merged mining variant).

Thank you and special credit to @StriderDM (https://github.com/StriderDM)!
2021-02-17 18:05:13 +02:00
xmrig
d2f01cfa86 Merge pull request #2104 from SChernykh/dev
Added `pause-on-active` option
2021-02-15 11:04:14 +07:00
SChernykh
82830e359a Added pause-on-active option
Windows only for now. When set to true, pauses mining when user touches mouse or keyboard.
2021-02-14 15:32:18 +01:00
XMRig
8e3fec5768 v6.8.3 2021-02-12 22:51:26 +07:00
XMRig
4fd23a1bf4 Merge branch 'master' into dev 2021-02-12 22:50:52 +07:00
XMRig
8bfaddd3fc v6.8.2 2021-02-12 18:47:16 +07:00
XMRig
dabafaaadb Merge branch 'dev' 2021-02-12 18:46:41 +07:00
xmrig
5cda714254 Update CHANGELOG.md 2021-02-12 18:35:43 +07:00
xmrig
91151ce4a1 Merge pull request #2089 from SChernykh/dev
Optimized cn-heavy for Zen3
2021-02-08 16:24:16 +07:00
SChernykh
dc1443f3b8 Cryptonight: add prefetching to interleaved mode 2021-02-07 23:29:54 +01:00
SChernykh
8af8df25aa Optimized cn-heavy for Zen3
- Uses scratchpad interleaving to access only the closest L3 slice from each CPU core.
- Also activates MSR mod for cn-heavy because CPU prefetchers get confused with interleaving
- 7-8% speedup on Zen3
2021-02-07 22:05:11 +01:00
XMRig
b1e14dc1d3 Always disable kawpow for CPU backend. 2021-02-07 18:49:54 +07:00
XMRig
f460d76f8d Add missing option to config example. 2021-02-06 16:17:53 +07:00
xmrig
1c63e9efba Merge pull request #2080 from SChernykh/dev 2021-02-04 04:29:59 +07:00
SChernykh
21abbe4e84 Fix compile error in Termux 2021-02-03 19:05:05 +01:00
XMRig
3080f47cd6 v6.8.2-dev 2021-02-03 18:01:14 +07:00
XMRig
f4ebdaa8e5 Merge branch 'master' into dev 2021-02-03 18:00:42 +07:00
XMRig
1bcfd0cdea v6.8.1 2021-02-03 07:00:39 +07:00
XMRig
9396ecf93d Merge branch 'dev' 2021-02-03 06:57:11 +07:00
xmrig
a4af964696 Update CHANGELOG.md 2021-02-03 06:04:30 +07:00
XMRig
2c8d8ee2ab Fixed macOS build and compile warning. 2021-02-02 13:53:45 +07:00
xmrig
631a8ca802 Merge pull request #2077 from SChernykh/dev
Fix for illegal instruction crash on ARM
2021-02-02 04:57:36 +07:00
SChernykh
346892e170 Update jit_compiler_a64.cpp 2021-02-01 22:52:02 +01:00
SChernykh
db03573804 ARM JIT: added missing cache flush 2021-02-01 22:42:35 +01:00
SChernykh
e74573f81f Fixed code allocation for ARM 2021-02-01 22:36:11 +01:00
xmrig
0e70974d7d Merge pull request #2076 from xmrig/feature-flexible-hugepages
Added support for flexible huge page sizes on Linux.
2021-02-02 04:07:41 +07:00
xmrig
3a3ee91324 Merge pull request #2075 from SChernykh/dev
Fixed crashes on ARM
2021-02-02 03:06:58 +07:00
SChernykh
4108428872 Fixed crashes on ARM 2021-02-01 17:07:45 +01:00
XMRig
4c3425a958 Added "--hugepage-size" command line option. 2021-02-01 05:06:24 +07:00
XMRig
09624c4f9b Added support for flexible huge page sizes on Linux. 2021-01-31 23:38:57 +07:00
XMRig
8faef28e7d Detect Apple M1 on Linux. 2021-01-31 05:41:32 +07:00
XMRig
62450f4ed8 Update ARM CPUs names. 2021-01-31 03:53:22 +07:00
XMRig
2c52a5a352 #2066 Fixed AMD GPUs health data readings. 2021-01-30 02:42:59 +07:00
XMRig
7d52bd7454 Extend normalization rules. 2021-01-29 18:22:24 +07:00
XMRig
f68b105bd9 Normalize DMI memory slot name. 2021-01-29 04:23:50 +07:00
XMRig
9ca1a6129b #2066 Quick fix for AMD GPUs health data. 2021-01-29 01:23:35 +07:00
xmrig
7a3df1c0bb Merge pull request #2067 from SChernykh/dev
Fix compilation error when RandomX and Argon2 are disabled
2021-01-28 20:44:03 +07:00
SChernykh
22a1b8d82d Fix compilation error when RandomX and Argon2 are disabled 2021-01-28 14:38:28 +01:00
xmrig
0a462fbef5 Merge pull request #2064 from SChernykh/dev
Added documentation for config.json CPU options
2021-01-28 19:41:15 +07:00
SChernykh
f302b4b0ef Added documentation for config.json CPU options 2021-01-28 13:37:27 +01:00
XMRig
65fe26dc6c Don't print empty memory slots if the total count above 8. 2021-01-28 00:00:00 +07:00
XMRig
e6d4921e21 v6.8.1-dev 2021-01-26 16:40:10 +07:00
XMRig
f82d67e76e Merge branch 'master' into dev 2021-01-26 16:38:37 +07:00
XMRig
4e671a945d v6.8.0 2021-01-26 15:26:16 +07:00
XMRig
e38d277143 Merge branch 'dev' 2021-01-26 15:25:20 +07:00
XMRig
8eb9b4d37a Update default config example. 2021-01-26 15:15:08 +07:00
xmrig
2d45cc64c1 Update CHANGELOG.md 2021-01-26 15:08:05 +07:00
XMRig
b9081e992b Code cleanup 2021-01-25 22:00:42 +07:00
XMRig
1424b2975f Fixed DMI memory speed. 2021-01-24 15:56:02 +07:00
XMRig
0fa5db8fa3 Code cleanup. 2021-01-24 15:02:22 +07:00
xmrig
5999dccd57 Merge pull request #2058 from SChernykh/dev
RandomX JIT x86: remove unnecessary instructions
2021-01-24 13:59:56 +07:00
SChernykh
78922a0772 RandomX JIT x86: remove unnecessary instructions
Adopted from https://github.com/tevador/RandomX/pull/201
2021-01-23 22:28:50 +01:00
XMRig
bc3914883a Merge branch 'alvv-z-patch-1' into dev 2021-01-24 02:30:22 +07:00
XMRig
86dae9e149 Merge branch 'patch-1' of https://github.com/alvv-z/xmrig into alvv-z-patch-1 2021-01-24 02:30:05 +07:00
xmrig
05b2260393 Merge pull request #2057 from xmrig/feature-msr2
Improved MSR subsystem code quality
2021-01-24 02:28:54 +07:00
XMRig
672f6df6c1 Fixed Cache QoS restore on exit where it not supported. 2021-01-24 02:23:27 +07:00
XMRig
9dae559b73 Added RxMsr class. 2021-01-23 23:23:39 +07:00
XMRig
b9d813c403 Move Ryzen related fixes to RxFix class. 2021-01-23 00:27:56 +07:00
XMRig
c48e2e6af8 Added new class Msr. 2021-01-22 23:50:25 +07:00
xmrig
76fba819fe Merge pull request #2055 from GoDzM4TT3O/patch-1
Add missing "cstdio" library
2021-01-22 22:19:41 +07:00
GoDzM4TT3O
6bab624885 Add missing "cstdio" library
Compilation fails if the above library is missing. This fixes a compilation error.
2021-01-22 14:18:28 +01:00
XMRig
3730bcd434 Merge branch 'master' into feature-msr2 2021-01-22 16:55:57 +07:00
XMRig
3b7d30a91d v6.8.0-dev 2021-01-22 00:27:38 +07:00
XMRig
c8588903e3 Enable DMI reader by default. 2021-01-22 00:12:34 +07:00
xmrig
0b4fec15dd Merge pull request #2052 from xmrig/feature-dmi
Added DMI/SMBIOS reader
2021-01-22 00:09:10 +07:00
XMRig
ef8cc28f3f Added DMI data to online benchmark. 2021-01-21 23:22:01 +07:00
XMRig
8471f7fad3 Added "GET /2/dmi" API endpoint. 2021-01-20 22:54:02 +07:00
alvv-z
b99dc440af Spelling Check
agaiin -> again
2021-01-20 12:36:47 +01:00
XMRig
9a02007900 Added config option "dmi" and command line option "--no-dmi". 2021-01-20 16:02:48 +07:00
XMRig
efc5e5d811 Fix summary. 2021-01-20 00:45:36 +07:00
XMRig
dea5be0a57 Added basic system reader. 2021-01-20 00:43:01 +07:00
XMRig
24c290963a Added DMI reader for macOS. 2021-01-19 14:16:03 +07:00
XMRig
9dffcdaddd Enable FreeBSD support. 2021-01-19 01:45:17 +07:00
XMRig
3df47052ed Added legacy DMI readers for Linux. 2021-01-19 01:23:09 +07:00
XMRig
3b8d081c8c Add support for older DMI formats on Linux. 2021-01-18 22:56:57 +07:00
XMRig
05e6f66169 Added basic Linux support. 2021-01-18 16:53:42 +07:00
XMRig
11e0d3de3a Added DMI reader (Windows only). 2021-01-18 11:23:29 +07:00
XMRig
ea367da064 #2043 Fix compile warning. 2021-01-17 17:48:35 +07:00
xmrig
a999a56775 Merge pull request #2041 from coldiron/typo-fixes
fixed grammar in a couple of awkward error messages
2021-01-16 10:15:29 +07:00
Richard Mitsuk Lavitt
590252bd5e fixed grammar in a couple of awkward error messages 2021-01-15 14:33:38 -06:00
XMRig
cc2de4f768 v6.7.3-dev 2021-01-15 20:11:28 +07:00
XMRig
aeea0e0a6c Merge branch 'master' into dev 2021-01-15 20:09:26 +07:00
XMRig
82d698a1e5 v6.7.2 2021-01-15 19:31:41 +07:00
XMRig
95b2b5e028 Merge branch 'dev' 2021-01-15 19:31:09 +07:00
xmrig
eae84d47e7 Update CHANGELOG.md 2021-01-15 19:30:22 +07:00
XMRig
45d12314f4 Sync changes. 2021-01-15 19:18:52 +07:00
xmrig
fa11cb623d Merge pull request #2039 from SChernykh/dev
Fixed solo mining
2021-01-15 18:49:04 +07:00
SChernykh
7da04c6a2c Always use cvt_bin2hex 2021-01-15 12:46:27 +01:00
SChernykh
5c449913af Fixed solo mining
It was broken since 6.7.0
2021-01-15 11:18:36 +01:00
XMRig
af019fed8e v6.7.2-dev 2021-01-11 18:29:56 +07:00
XMRig
8872630c46 Merge branch 'master' into dev 2021-01-11 18:29:06 +07:00
XMRig
d3ec21cbf5 v6.7.1 2021-01-11 16:13:29 +07:00
XMRig
395dd4086b Merge branch 'dev' 2021-01-11 16:12:14 +07:00
XMRig
a7f9808621 Fixed HOSTNAME environment variable. 2021-01-11 11:42:32 +07:00
xmrig
88862b617f Update CHANGELOG.md 2021-01-10 07:53:44 +07:00
xmrig
39bfa0c420 Merge pull request #2028 from SChernykh/dev
RandomX x86 JIT: remove redundant CFROUND
2021-01-08 04:58:25 +07:00
SChernykh
f62f4e6108 RandomX x86 JIT: remove redundant CFROUND 2021-01-07 16:20:00 +01:00
xmrig
9f128d1182 Merge pull request #2009 from SChernykh/dev
AstroBWT OpenCL fixes
2020-12-27 22:56:58 +07:00
SChernykh
2f2b33c82b AstroBWT OpenCL fixes
- Rewrote main BWT kernel to work properly on Navi
- Fixed nonce iterations in OclWorker
- Fixed memory allocation for AstroBWT
2020-12-27 16:44:35 +01:00
xmrig
56280cb1d5 Merge pull request #2007 from Frago9876543210/dev
Added scripts/{build, deps} into .gitignore
2020-12-26 00:13:19 +07:00
Frago9876543210
07127c6e87 Added scripts/{build, deps} into .gitignore 2020-12-25 20:05:18 +03:00
xmrig
3dabc77a09 Merge pull request #1998 from SChernykh/dev
Show hashrate in the benchmark finished message
2020-12-23 21:04:11 +07:00
SChernykh
66349e3d23 Show hashrate in the benchmark finished message 2020-12-23 14:31:38 +01:00
XMRig
85a78ce537 #1995 Fixed log initialization. 2020-12-22 21:41:39 +07:00
XMRig
0d9f17670e v6.7.1-dev 2020-12-21 20:59:00 +07:00
XMRig
deb561a410 Merge branch 'master' into dev 2020-12-21 20:57:49 +07:00
XMRig
9d256a1e9b v6.7.0 2020-12-21 17:55:17 +07:00
XMRig
3c985eef25 Merge branch 'dev' 2020-12-21 17:54:18 +07:00
xmrig
6224887967 Update CHANGELOG.md 2020-12-21 17:26:29 +07:00
xmrig
09361bf3a5 Update CHANGELOG.md 2020-12-21 11:30:12 +07:00
xmrig
8a1311f015 Merge pull request #1989 from SChernykh/dev
Fixed broken Dero solo mining
2020-12-21 09:01:10 +07:00
SChernykh
cde7cddcaa Fixed broken Dero solo mining 2020-12-20 22:17:08 +01:00
xmrig
aa53ba073d Merge pull request #1987 from SChernykh/dev
Another dataset AVX2 init speedup (+3.8% faster on Zen3)
2020-12-20 01:57:12 +07:00
SChernykh
ac46d6f8de Fix GCC warning 2020-12-19 19:50:52 +01:00
SChernykh
5efd00abec Another dataset AVX2 init speedup (+3.8% faster on Zen3) 2020-12-19 19:46:31 +01:00
xmrig
e79e3370f8 Merge pull request #1986 from SChernykh/dev
Dataset initialization with AVX2 (faster startup)
2020-12-20 00:16:20 +07:00
SChernykh
633aaccd9c Added config option for AVX2 dataset init
-1 = Auto detect
0 = Always disabled
1 = Enabled if AVX2 is supported
2020-12-19 16:18:49 +01:00
SChernykh
410313d933 Auto-detect the fastest code for dataset init 2020-12-19 13:59:28 +01:00
SChernykh
7aba194d3b Fixed Windows scripts
Change back to script's folder when started as administrator
2020-12-18 16:51:28 +01:00
SChernykh
515a85e66c Dataset initialization with AVX2 (WIP) 2020-12-18 14:53:54 +01:00
XMRig
6b21a51a2f Huge pages not supported by macOS ARM. 2020-12-16 01:59:20 +07:00
XMRig
a934ba3079 Fixed Xcode generator. 2020-12-15 22:47:22 +07:00
XMRig
633a92bff0 Merge branch 'gcc11' of https://github.com/voidanix/xmrig into dev 2020-12-15 17:32:24 +07:00
XMRig
5a846ebd58 Merge branch '64bit-is-not-x64' of https://github.com/rivoreo/xmrig into dev 2020-12-15 17:28:36 +07:00
voidanix
e4c2ccba9d Fix build on GCC 11 2020-12-15 09:39:26 +01:00
WHR
15168950e5 Don't use team 'x64' for '64-bit' 2020-12-15 14:56:06 +08:00
XMRig
6b331b6945 Reduce JIT memory for ARM. 2020-12-15 02:52:38 +07:00
xmrig
4c7d20c8e6 Merge pull request #1977 from SChernykh/dev
Fix: secure JIT and huge pages are incompatible on Windows
2020-12-15 00:41:55 +07:00
SChernykh
414588d701 Fix alignment for Linux 2020-12-14 18:32:25 +01:00
SChernykh
f89f6a8abf Fix: secure JIT and huge pages are incompatible on Windows 2020-12-14 18:22:58 +01:00
XMRig
ca3695a754 Update hwloc for MSVC to 2.4.0. 2020-12-14 02:55:50 +07:00
XMRig
7c682ec91a Update build scripts. 2020-12-13 22:56:00 +07:00
XMRig
cc5c2c41be Hardcode L2 cache size for Apple M1. 2020-12-13 20:09:29 +07:00
XMRig
643142dc30 Update user agent for macOS and fix compile warnings 2020-12-13 13:57:12 +07:00
XMRig
a36fb7e728 More correct CPU affinity support for macOS. 2020-12-13 01:20:31 +07:00
XMRig
87fafcf91b Fixed JIT on macOS. 2020-12-12 22:40:48 +07:00
XMRig
2966b80ba1 Fixed macOS build. 2020-12-12 22:15:15 +07:00
XMRig
179f09081f Alternative secure JIT for macOS. 2020-12-12 21:32:36 +07:00
XMRig
775867fc3e Fixed secure JIT on Linux and code cleanup. 2020-12-12 19:18:47 +07:00
XMRig
497863441a Remove duplicated code. 2020-12-12 12:39:11 +07:00
XMRig
ec62ded279 Added generic secure JIT support for RandomX. 2020-12-11 23:17:54 +07:00
xmrig
f9c0933f05 Merge pull request #1970 from SChernykh/dev
More static analysis fixes
2020-12-08 22:34:55 +07:00
SChernykh
0da3390d09 More static analysis fixes 2020-12-08 16:05:58 +01:00
xmrig
9a025fdb75 Merge pull request #1969 from SChernykh/dev
Fixed errors found by static analysis
2020-12-08 18:24:41 +07:00
SChernykh
cafd868773 Fixed errors found by static analysis 2020-12-08 12:16:59 +01:00
xmrig
1c9e959cc4 Merge pull request #1968 from SChernykh/dev
Added virtual machine detection
2020-12-06 23:45:53 +07:00
SChernykh
41a9bddd59 Added virtual machine detection 2020-12-06 17:34:01 +01:00
xmrig
7a09f5fe47 Merge pull request #1966 from xmrig/remove-libcpuid
Removed libcpuid support
2020-12-06 21:59:12 +07:00
XMRig
ab45794b7c Removed libcpuid support. 2020-12-06 21:51:24 +07:00
xmrig
1d5592f303 Merge pull request #1964 from xmrig/update-base
Cleanup and refactoring
2020-12-06 00:13:05 +07:00
XMRig
2bf8887cab v6.7.0-dev 2020-12-06 00:08:34 +07:00
XMRig
acf7ec8355 Restore Hashrate class interface. 2020-12-05 11:09:25 +07:00
XMRig
bd82b3c852 Added GpuWorker class. 2020-12-04 22:25:28 +07:00
XMRig
daf08fcf9a Cleanup 2020-12-04 19:52:53 +07:00
XMRig
c8ee6f7db8 Move Profiler and more cleanup. 2020-12-04 09:23:40 +07:00
XMRig
662a957106 Fixed Linux build. 2020-12-03 19:55:49 +07:00
XMRig
3055e03b7e Cleanup 2020-12-03 19:45:16 +07:00
XMRig
11da7a3155 Update Json. 2020-12-03 15:39:33 +07:00
XMRig
0a27c6d6af Update Signals and Console. 2020-12-03 12:06:18 +07:00
XMRig
86795aa5b7 Update HTTP 2020-12-03 10:48:57 +07:00
XMRig
63bd45c397 Added Cvt class. 2020-12-02 16:31:45 +07:00
XMRig
469b1f08de Update net 2020-12-02 11:32:11 +07:00
XMRig
121c515a07 Update log. 2020-12-01 23:28:07 +07:00
XMRig
2715bc20d9 v6.6.3-dev 2020-12-01 21:28:31 +07:00
XMRig
c156cdfe7a Merge branch 'master' into dev 2020-12-01 21:27:56 +07:00
XMRig
a9965c5580 v6.6.2 2020-12-01 20:51:37 +07:00
XMRig
dca6d3f1ff Merge branch 'dev' 2020-12-01 20:49:59 +07:00
xmrig
91979dc4dd Update CHANGELOG.md 2020-12-01 20:12:27 +07:00
xmrig
87195ed237 Merge pull request #1960 from SChernykh/dev
Fix RandomX init when switching to other algo and back
2020-11-30 09:20:14 +07:00
SChernykh
d557fe7f39 Fix RandomX init when switching to other algo and back 2020-11-29 22:02:48 +01:00
xmrig
13ee9d09a8 Merge pull request #1959 from SChernykh/dev
Optimized JIT compiler
2020-11-29 20:08:40 +07:00
SChernykh
f16d1837f8 Optimized JIT compiler
More branch-free code
2020-11-29 14:05:50 +01:00
XMRig
096b09bf4d Update base 2020-11-29 18:45:52 +07:00
xmrig
bbcf8e2be3 Merge pull request #1958 from SChernykh/dev
Example mining scripts to help new miners
2020-11-27 14:51:53 +07:00
SChernykh
fb9d2b9e7c Example mining scripts to help new miners 2020-11-26 19:06:46 +01:00
XMRig
58711aa666 v6.6.2-dev 2020-11-25 21:12:45 +07:00
XMRig
c7236d2cf0 Merge branch 'master' into dev 2020-11-25 21:06:58 +07:00
XMRig
25da0cba57 v6.6.1 2020-11-25 17:46:39 +07:00
XMRig
fb721edc20 Merge branch 'dev' 2020-11-25 17:46:00 +07:00
xmrig
d2a4fa367a Update CHANGELOG.md 2020-11-25 16:44:06 +07:00
XMRig
8686e08336 Fixed, benchmark validation on NUMA hardware produced incorrect results in some conditions. 2020-11-25 09:35:11 +07:00
XMRig
09b68f3cdb Added BenchStatePrivate class. 2020-11-24 17:59:40 +07:00
XMRig
05a2054057 v6.6.1-dev 2020-11-24 10:58:12 +07:00
XMRig
4e59f90495 Merge branch 'master' into dev 2020-11-24 10:57:35 +07:00
XMRig
19f0476efb Merge branch 'dev' 2020-11-24 09:23:35 +07:00
XMRig
edf7885172 Fixed benchmark progress logging. 2020-11-23 21:41:51 +07:00
XMRig
6cd7f3e053 v6.6.0 2020-11-23 17:42:42 +07:00
XMRig
f1ae81c6ae Merge branch 'dev' 2020-11-23 17:41:33 +07:00
xmrig
8cbf90d35b Update CHANGELOG.md 2020-11-22 18:42:10 +07:00
XMRig
48eaf11026 v6.6.0-dev 2020-11-20 18:55:38 +07:00
XMRig
75f18c9b31 Use static RandomX seed for benchmark. 2020-11-20 08:15:04 +07:00
XMRig
302fe70f6b Fixed online validate with token. 2020-11-18 07:28:46 +07:00
XMRig
a2a0defeef Submit top benchmark diff. 2020-11-17 07:33:20 +07:00
XMRig
e2ea11ffeb Remove benchmark size from Job class. 2020-11-17 05:28:42 +07:00
XMRig
d8f9501ac8 Added DNS cache for online benchmark. 2020-11-17 04:59:26 +07:00
XMRig
12a1365b5d Fixed --token option. 2020-11-16 18:53:24 +07:00
xmrig
8f3a2a63ba Merge pull request #1951 from xmrig/feature-1t-bench
New single thread benchmark and online benchmark refactoring
2020-11-16 17:12:23 +07:00
XMRig
f7f07ce42c Fixed build. 2020-11-16 16:37:57 +07:00
XMRig
c1d99bfa09 Benchmark refactoring, zero delay submit and unified HTTP layer. 2020-11-16 16:22:34 +07:00
XMRig
be8245fc92 Unlock benchmark size for debug builds. 2020-11-16 08:56:35 +07:00
XMRig
926871cbe1 Removed non thread safe access to config. 2020-11-16 07:58:28 +07:00
SChernykh
ee677ef5c9 Added reference hashes for 1T offline benchmark 2020-11-16 00:57:00 +01:00
SChernykh
c10ec90b60 Make single thread bench cheat-resistant
Each hash is dependent on the previous hash to make multi-threaded cheating impossible.
2020-11-15 20:38:27 +01:00
XMRig
0d3c2752c9 v6.5.4-dev 2020-11-15 08:23:18 +07:00
XMRig
eaa44a1547 Merge branch 'master' into dev 2020-11-15 08:22:50 +07:00
XMRig
89454c6d30 v6.5.3 2020-11-15 07:33:52 +07:00
XMRig
d3f2184fcc Merge branch 'dev' 2020-11-15 07:15:32 +07:00
xmrig
19da03c9b7 Update CHANGELOG.md 2020-11-15 07:14:46 +07:00
XMRig
aa284c6a3a Added warning about libcpuid deprecation. 2020-11-15 04:23:59 +07:00
XMRig
6379d1f90e Add static_assert 2020-11-15 04:13:40 +07:00
xmrig
8737af0f6f Merge pull request #1946 from SChernykh/dev
Fixed MSR mod names in JSON API
2020-11-15 02:41:22 +07:00
SChernykh
9a1e867da2 Fixed MSR mod names in JSON API 2020-11-14 19:55:43 +01:00
XMRig
be979d35c7 v6.5.3-dev 2020-11-14 02:46:32 +07:00
XMRig
971abe536c Merge branch 'master' into dev 2020-11-14 02:45:50 +07:00
429 changed files with 40595 additions and 19054 deletions

2
.gitignore vendored
View File

@@ -1,4 +1,6 @@
/build
scripts/build
scripts/deps
/CMakeLists.txt.user
/.idea
/src/backend/opencl/cl/cn/cryptonight_gen.cl

View File

@@ -1,3 +1,118 @@
# v6.12.2
- [#2280](https://github.com/xmrig/xmrig/issues/2280) GPU backends are now disabled in benchmark mode.
- [#2322](https://github.com/xmrig/xmrig/pull/2322) Improved MSR compatibility with recent Linux kernels and updated `randomx_boost.sh`.
- [#2340](https://github.com/xmrig/xmrig/pull/2340) Fixed AES detection on FreeBSD on ARM.
- [#2341](https://github.com/xmrig/xmrig/pull/2341) `sse2neon` updated to the latest version.
- [#2351](https://github.com/xmrig/xmrig/issues/2351) Fixed help output for `--cpu-priority` and `--cpu-affinity` option.
- [#2375](https://github.com/xmrig/xmrig/pull/2375) Fixed macOS CUDA backend default loader name.
- [#2378](https://github.com/xmrig/xmrig/pull/2378) Fixed broken light mode mining on x86.
- [#2379](https://github.com/xmrig/xmrig/pull/2379) Fixed CL code for KawPow where it assumes everything is AMD.
- [#2386](https://github.com/xmrig/xmrig/pull/2386) RandomX: enabled `IMUL_RCP` optimization for light mode mining.
- [#2393](https://github.com/xmrig/xmrig/pull/2393) RandomX: added BMI2 version for scratchpad prefetch.
- [#2395](https://github.com/xmrig/xmrig/pull/2395) RandomX: rewrote dataset read code.
- [#2398](https://github.com/xmrig/xmrig/pull/2398) RandomX: optimized ARMv8 dataset read.
- Added `argon2/ninja` alias for `argon2/wrkz` algorithm.
# v6.12.1
- [#2296](https://github.com/xmrig/xmrig/pull/2296) Fixed Zen3 assembly code for `cn/upx2` algorithm.
# v6.12.0
- [#2276](https://github.com/xmrig/xmrig/pull/2276) Added support for Uplexa (`cn/upx2` algorithm).
- [#2261](https://github.com/xmrig/xmrig/pull/2261) Show total hashrate if compiled without OpenCL.
- [#2289](https://github.com/xmrig/xmrig/pull/2289) RandomX: optimized `IMUL_RCP` instruction.
- Added support for `--user` command line option for online benchmark.
# v6.11.2
- [#2207](https://github.com/xmrig/xmrig/issues/2207) Fixed regression in HTTP parser and llhttp updated to v5.1.0.
# v6.11.1
- [#2239](https://github.com/xmrig/xmrig/pull/2239) Fixed broken `coin` setting functionality.
# v6.11.0
- [#2196](https://github.com/xmrig/xmrig/pull/2196) Improved DNS subsystem and added new DNS specific options.
- [#2172](https://github.com/xmrig/xmrig/pull/2172) Fixed build on Alpine 3.13.
- [#2177](https://github.com/xmrig/xmrig/pull/2177) Fixed ARM specific compilation error with GCC 10.2.
- [#2214](https://github.com/xmrig/xmrig/pull/2214) [#2216](https://github.com/xmrig/xmrig/pull/2216) [#2235](https://github.com/xmrig/xmrig/pull/2235) Optimized `cn-heavy` algorithm.
- [#2217](https://github.com/xmrig/xmrig/pull/2217) Fixed mining job creation sequence.
- [#2225](https://github.com/xmrig/xmrig/pull/2225) Fixed build without OpenCL support on some systems.
- [#2229](https://github.com/xmrig/xmrig/pull/2229) Don't use RandomX JIT if `WITH_ASM=OFF`.
- [#2228](https://github.com/xmrig/xmrig/pull/2228) Removed useless code for cryptonight algorithms.
- [#2234](https://github.com/xmrig/xmrig/pull/2234) Fixed build error on gcc 4.8.
# v6.10.0
- [#2122](https://github.com/xmrig/xmrig/pull/2122) Fixed pause logic when both pause on battery and user activity are enabled.
- [#2123](https://github.com/xmrig/xmrig/issues/2123) Fixed compatibility with gcc 4.8.
- [#2147](https://github.com/xmrig/xmrig/pull/2147) Fixed many `new job` messages when solo mining.
- [#2150](https://github.com/xmrig/xmrig/pull/2150) Updated `sse2neon.h` to the latest master, fixes build on ARMv7.
- [#2157](https://github.com/xmrig/xmrig/pull/2157) Fixed crash in `cn-heavy` on Zen3 with manual thread count.
- Fixed possible out of order write to log file.
- [http-parser](https://github.com/nodejs/http-parser) replaced to [llhttp](https://github.com/nodejs/llhttp).
- For official builds: libuv, hwloc and OpenSSL updated to latest versions.
# v6.9.0
- [#2104](https://github.com/xmrig/xmrig/pull/2104) Added [pause-on-active](https://xmrig.com/docs/miner/config/misc#pause-on-active) config option and `--pause-on-active=N` command line option.
- [#2112](https://github.com/xmrig/xmrig/pull/2112) Added support for [Tari merge mining](https://github.com/tari-project/tari/blob/development/README.md#tari-merge-mining).
- [#2117](https://github.com/xmrig/xmrig/pull/2117) Fixed crash when GPU mining `cn-heavy` on Zen3 system.
# v6.8.2
- [#2080](https://github.com/xmrig/xmrig/pull/2080) Fixed compile error in Termux.
- [#2089](https://github.com/xmrig/xmrig/pull/2089) Optimized CryptoNight-Heavy for Zen3, 7-8% speedup.
# v6.8.1
- [#2064](https://github.com/xmrig/xmrig/pull/2064) Added documentation for config.json CPU options.
- [#2066](https://github.com/xmrig/xmrig/issues/2066) Fixed AMD GPUs health data readings on Linux.
- [#2067](https://github.com/xmrig/xmrig/pull/2067) Fixed compilation error when RandomX and Argon2 are disabled.
- [#2076](https://github.com/xmrig/xmrig/pull/2076) Added support for flexible huge page sizes on Linux.
- [#2077](https://github.com/xmrig/xmrig/pull/2077) Fixed `illegal instruction` crash on ARM.
# v6.8.0
- [#2052](https://github.com/xmrig/xmrig/pull/2052) Added DMI/SMBIOS reader.
- Added information about memory modules on the miner startup and for online benchmark.
- Added new HTTP API endpoint: `GET /2/dmi`.
- Added new command line option `--no-dmi` or config option `"dmi"`.
- Added new CMake option `-DWITH_DMI=OFF`.
- [#2057](https://github.com/xmrig/xmrig/pull/2057) Improved MSR subsystem code quality.
- [#2058](https://github.com/xmrig/xmrig/pull/2058) RandomX JIT x86: removed unnecessary instructions.
# v6.7.2
- [#2039](https://github.com/xmrig/xmrig/pull/2039) Fixed solo mining.
# v6.7.1
- [#1995](https://github.com/xmrig/xmrig/issues/1995) Fixed log initialization.
- [#1998](https://github.com/xmrig/xmrig/pull/1998) Added hashrate in the benchmark finished message.
- [#2009](https://github.com/xmrig/xmrig/pull/2009) AstroBWT OpenCL fixes.
- [#2028](https://github.com/xmrig/xmrig/pull/2028) RandomX x86 JIT: removed redundant `CFROUND`.
# v6.7.0
- **[#1991](https://github.com/xmrig/xmrig/issues/1991) Added Apple M1 processor support.**
- **[#1986](https://github.com/xmrig/xmrig/pull/1986) Up to 20-30% faster RandomX dataset initialization with AVX2 on some CPUs.**
- [#1964](https://github.com/xmrig/xmrig/pull/1964) Cleanup and refactoring.
- [#1966](https://github.com/xmrig/xmrig/pull/1966) Removed libcpuid support.
- [#1968](https://github.com/xmrig/xmrig/pull/1968) Added virtual machine detection.
- [#1969](https://github.com/xmrig/xmrig/pull/1969) [#1970](https://github.com/xmrig/xmrig/pull/1970) Fixed errors found by static analysis.
- [#1977](https://github.com/xmrig/xmrig/pull/1977) Fixed: secure JIT and huge pages are incompatible on Windows.
- [#1979](https://github.com/xmrig/xmrig/pull/1979) Term `x64` replaced to `64-bit`.
- [#1980](https://github.com/xmrig/xmrig/pull/1980) Fixed build on gcc 11.
- [#1989](https://github.com/xmrig/xmrig/pull/1989) Fixed broken Dero solo mining.
# v6.6.2
- [#1958](https://github.com/xmrig/xmrig/pull/1958) Added example mining scripts to help new miners.
- [#1959](https://github.com/xmrig/xmrig/pull/1959) Optimized JIT compiler.
- [#1960](https://github.com/xmrig/xmrig/pull/1960) Fixed RandomX init when switching to other algo and back.
# v6.6.1
- Fixed, benchmark validation on NUMA hardware produced incorrect results in some conditions.
# v6.6.0
- Online benchmark protocol upgraded to v2, validation not compatible with previous versions.
- Single thread benchmark now is cheat-resistant, not possible speedup it with multiple threads.
- RandomX dataset is now always initialized with static seed, to prevent time cheat by report slow dataset initialization.
- Zero delay online submission, to make time validation much more precise and strict.
- DNS cache for online benchmark to prevent unexpected delays.
# v6.5.3
- [#1946](https://github.com/xmrig/xmrig/pull/1946) Fixed MSR mod names in JSON API (v6.5.2 affected).
# v6.5.2
- [#1935](https://github.com/xmrig/xmrig/pull/1935) Separate MSR mod for Zen/Zen2 and Zen3.
- [#1937](https://github.com/xmrig/xmrig/issues/1937) Print path to existing WinRing0 service without verbose option.

View File

@@ -1,11 +1,11 @@
cmake_minimum_required(VERSION 2.8)
cmake_minimum_required(VERSION 2.8.12)
project(xmrig)
option(WITH_LIBCPUID "Enable libcpuid support" ON)
option(WITH_HWLOC "Enable hwloc support" ON)
option(WITH_CN_LITE "Enable CryptoNight-Lite algorithms family" ON)
option(WITH_CN_HEAVY "Enable CryptoNight-Heavy algorithms family" ON)
option(WITH_CN_PICO "Enable CryptoNight-Pico algorithm" ON)
option(WITH_CN_FEMTO "Enable CryptoNight-UPX2 algorithm" ON)
option(WITH_RANDOMX "Enable RandomX algorithms family" ON)
option(WITH_ARGON2 "Enable Argon2 algorithms family" ON)
option(WITH_ASTROBWT "Enable AstroBWT algorithms family" ON)
@@ -26,6 +26,8 @@ option(WITH_INTERLEAVE_DEBUG_LOG "Enable debug log for threads interleave" OFF)
option(WITH_PROFILING "Enable profiling for developers" OFF)
option(WITH_SSE4_1 "Enable SSE 4.1 for Blake2" ON)
option(WITH_BENCHMARK "Enable builtin RandomX benchmark and stress test" ON)
option(WITH_SECURE_JIT "Enable secure access to JIT memory" OFF)
option(WITH_DMI "Enable DMI/SMBIOS reader" ON)
option(BUILD_STATIC "Build static binary" OFF)
option(ARM_TARGET "Force use specific ARM target 8 or 7" 0)
@@ -146,8 +148,10 @@ elseif (XMRIG_OS_APPLE)
src/App_unix.cpp
src/crypto/common/VirtualMemory_unix.cpp
)
find_library(IOKIT_LIBRARY IOKit)
set(EXTRA_LIBS ${IOKIT_LIBRARY})
find_library(CORESERVICES_LIBRARY CoreServices)
set(EXTRA_LIBS ${IOKIT_LIBRARY} ${CORESERVICES_LIBRARY})
else()
list(APPEND SOURCES_OS
src/App_unix.cpp
@@ -168,8 +172,8 @@ else()
endif()
endif()
add_definitions(-DXMRIG_MINER_PROJECT)
add_definitions(-D__STDC_FORMAT_MACROS -DUNICODE)
add_definitions(-DXMRIG_MINER_PROJECT -DXMRIG_JSON_SINGLE_LINE_ARRAY)
add_definitions(-D__STDC_FORMAT_MACROS -DUNICODE -D_FILE_OFFSET_BITS=64)
find_package(UV REQUIRED)
@@ -193,10 +197,17 @@ if (WITH_CN_PICO)
add_definitions(/DXMRIG_ALGO_CN_PICO)
endif()
if (WITH_CN_FEMTO)
add_definitions(/DXMRIG_ALGO_CN_FEMTO)
endif()
if (WITH_EMBEDDED_CONFIG)
add_definitions(/DXMRIG_FEATURE_EMBEDDED_CONFIG)
endif()
include(src/hw/api/api.cmake)
include(src/hw/dmi/dmi.cmake)
include_directories(src)
include_directories(src/3rdparty)
include_directories(${UV_INCLUDE_DIR})
@@ -205,15 +216,17 @@ if (WITH_DEBUG_LOG)
add_definitions(/DAPP_DEBUG)
endif()
add_executable(${CMAKE_PROJECT_NAME} ${HEADERS} ${SOURCES} ${SOURCES_OS} ${SOURCES_CPUID} ${HEADERS_CRYPTO} ${SOURCES_CRYPTO} ${SOURCES_SYSLOG} ${TLS_SOURCES} ${XMRIG_ASM_SOURCES})
add_executable(${CMAKE_PROJECT_NAME} ${HEADERS} ${SOURCES} ${SOURCES_OS} ${HEADERS_CRYPTO} ${SOURCES_CRYPTO} ${SOURCES_SYSLOG} ${TLS_SOURCES} ${XMRIG_ASM_SOURCES})
target_link_libraries(${CMAKE_PROJECT_NAME} ${XMRIG_ASM_LIBRARY} ${OPENSSL_LIBRARIES} ${UV_LIBRARIES} ${EXTRA_LIBS} ${CPUID_LIB} ${ARGON2_LIBRARY} ${ETHASH_LIBRARY})
if (WIN32)
add_custom_command(TARGET ${CMAKE_PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "${CMAKE_SOURCE_DIR}/bin/WinRing0/WinRing0x64.sys" $<TARGET_FILE_DIR:${CMAKE_PROJECT_NAME}>)
add_custom_command(TARGET ${CMAKE_PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "${CMAKE_SOURCE_DIR}/scripts/benchmark_1M.cmd" $<TARGET_FILE_DIR:${CMAKE_PROJECT_NAME}>)
add_custom_command(TARGET ${CMAKE_PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "${CMAKE_SOURCE_DIR}/scripts/benchmark_10M.cmd" $<TARGET_FILE_DIR:${CMAKE_PROJECT_NAME}>)
add_custom_command(TARGET ${CMAKE_PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "${CMAKE_SOURCE_DIR}/scripts/pool_mine_example.cmd" $<TARGET_FILE_DIR:${CMAKE_PROJECT_NAME}>)
add_custom_command(TARGET ${CMAKE_PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "${CMAKE_SOURCE_DIR}/scripts/solo_mine_example.cmd" $<TARGET_FILE_DIR:${CMAKE_PROJECT_NAME}>)
endif()
if (CMAKE_CXX_COMPILER_ID MATCHES Clang AND CMAKE_BUILD_TYPE STREQUAL Release)
if (CMAKE_CXX_COMPILER_ID MATCHES Clang AND CMAKE_BUILD_TYPE STREQUAL Release AND NOT CMAKE_GENERATOR STREQUAL Xcode)
add_custom_command(TARGET ${PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_STRIP} ${CMAKE_PROJECT_NAME})
endif()

View File

@@ -19,7 +19,7 @@ XMRig is a high performance, open source, cross platform RandomX, KawPow, Crypto
* **[Build from source](https://xmrig.com/docs/miner/build)**
## Usage
The preferred way to configure the miner is the [JSON config file](src/config.json) as it is more flexible and human friendly. The [command line interface](https://xmrig.com/docs/miner/command-line-options) does not cover all features, such as mining profiles for different algorithms. Important options can be changed during runtime without miner restart by editing the config file or executing API calls.
The preferred way to configure the miner is the [JSON config file](https://xmrig.com/docs/miner/config) as it is more flexible and human friendly. The [command line interface](https://xmrig.com/docs/miner/command-line-options) does not cover all features, such as mining profiles for different algorithms. Important options can be changed during runtime without miner restart by editing the config file or executing [API](https://xmrig.com/docs/miner/api) calls.
* **[Wizard](https://xmrig.com/wizard)** helps you create initial configuration for the miner.
* **[Workers](http://workers.xmrig.info)** helps manage your miners via HTTP API.

View File

@@ -18,7 +18,6 @@ endif()
if (ARM_TARGET AND ARM_TARGET GREATER 6)
set(XMRIG_ARM ON)
set(WITH_LIBCPUID OFF)
add_definitions(/DXMRIG_ARM)
message(STATUS "Use ARM_TARGET=${ARM_TARGET} (${CMAKE_SYSTEM_PROCESSOR})")

View File

@@ -32,6 +32,10 @@ elseif(XMRIG_OS_APPLE)
else()
add_definitions(/DXMRIG_OS_MACOS)
endif()
if (XMRIG_ARM)
set(WITH_SECURE_JIT ON)
endif()
elseif(XMRIG_OS_UNIX)
add_definitions(/DXMRIG_OS_UNIX)
@@ -43,3 +47,7 @@ elseif(XMRIG_OS_UNIX)
add_definitions(/DXMRIG_OS_FREEBSD)
endif()
endif()
if (WITH_SECURE_JIT)
add_definitions(/DXMRIG_SECURE_JIT)
endif()

View File

@@ -42,13 +42,13 @@ if (WITH_RANDOMX)
src/crypto/rx/RxVm.cpp
)
if (CMAKE_C_COMPILER_ID MATCHES MSVC)
if (WITH_ASM AND CMAKE_C_COMPILER_ID MATCHES MSVC)
enable_language(ASM_MASM)
list(APPEND SOURCES_CRYPTO
src/crypto/randomx/jit_compiler_x86_static.asm
src/crypto/randomx/jit_compiler_x86.cpp
)
elseif (NOT XMRIG_ARM AND CMAKE_SIZEOF_VOID_P EQUAL 8)
elseif (WITH_ASM AND NOT XMRIG_ARM AND CMAKE_SIZEOF_VOID_P EQUAL 8)
list(APPEND SOURCES_CRYPTO
src/crypto/randomx/jit_compiler_x86_static.S
src/crypto/randomx/jit_compiler_x86.cpp
@@ -61,7 +61,11 @@ if (WITH_RANDOMX)
src/crypto/randomx/jit_compiler_a64.cpp
)
# cheat because cmake and ccache hate each other
set_property(SOURCE src/crypto/randomx/jit_compiler_a64_static.S PROPERTY LANGUAGE C)
if (CMAKE_GENERATOR STREQUAL Xcode)
set_property(SOURCE src/crypto/randomx/jit_compiler_a64_static.S PROPERTY LANGUAGE ASM)
else()
set_property(SOURCE src/crypto/randomx/jit_compiler_a64_static.S PROPERTY LANGUAGE C)
endif()
else()
list(APPEND SOURCES_CRYPTO
src/crypto/randomx/jit_compiler_fallback.cpp
@@ -96,18 +100,41 @@ if (WITH_RANDOMX)
message("-- WITH_MSR=ON")
if (XMRIG_OS_WIN)
list(APPEND SOURCES_CRYPTO src/crypto/rx/Rx_win.cpp)
list(APPEND SOURCES_CRYPTO
src/crypto/rx/RxFix_win.cpp
src/hw/msr/Msr_win.cpp
)
elseif (XMRIG_OS_LINUX)
list(APPEND SOURCES_CRYPTO src/crypto/rx/Rx_linux.cpp)
list(APPEND SOURCES_CRYPTO
src/crypto/rx/RxFix_linux.cpp
src/hw/msr/Msr_linux.cpp
)
endif()
list(APPEND HEADERS_CRYPTO src/crypto/rx/msr/MsrItem.h)
list(APPEND SOURCES_CRYPTO src/crypto/rx/msr/MsrItem.cpp)
list(APPEND HEADERS_CRYPTO
src/crypto/rx/RxFix.h
src/crypto/rx/RxMsr.h
src/hw/msr/Msr.h
src/hw/msr/MsrItem.h
)
list(APPEND SOURCES_CRYPTO
src/crypto/rx/RxMsr.cpp
src/hw/msr/Msr.cpp
src/hw/msr/MsrItem.cpp
)
else()
remove_definitions(/DXMRIG_FEATURE_MSR)
remove_definitions(/DXMRIG_FIX_RYZEN)
message("-- WITH_MSR=OFF")
endif()
if (WITH_PROFILING)
add_definitions(/DXMRIG_FEATURE_PROFILING)
list(APPEND HEADERS_CRYPTO src/crypto/rx/Profiler.h)
list(APPEND SOURCES_CRYPTO src/crypto/rx/Profiler.cpp)
endif()
else()
remove_definitions(/DXMRIG_ALGO_RANDOMX)
endif()

View File

@@ -1,3 +1,5 @@
**:warning: Recent version of this page https://xmrig.com/docs/miner/config/cpu.**
# CPU backend
All CPU related settings contains in one `cpu` object in config file, CPU backend allow specify multiple profiles and allow switch between them without restrictions by pool request or config change. Default auto-configuration create reasonable minimum of profiles which cover all supported algorithms.
@@ -75,6 +77,35 @@ Each number represent one thread and means CPU affinity, this is default format
```
Internal format, but can be user defined.
## RandomX options
#### `init`
Thread count to initialize RandomX dataset. Auto-detect (`-1`) or any number greater than 0 to use that many threads.
#### `init-avx2`
Use AVX2 for dataset initialization. Faster on some CPUs. Auto-detect (`-1`), disabled (`0`), always enabled on CPUs that support AVX2 (`1`).
#### `mode`
RandomX mining mode: `auto`, `fast` (2 GB memory), `light` (256 MB memory).
#### `1gb-pages`
Use 1GB hugepages for RandomX dataset (Linux only). Enabled (`true`) or disabled (`false`). It gives 1-3% speedup.
#### `wrmsr`
[MSR mod](https://xmrig.com/docs/miner/randomx-optimization-guide/msr). Enabled (`true`) or disabled (`false`). It gives up to 15% speedup depending on your system. _(**Note**: Userspace MSR writes are no longer enabled by default; the flag `msr.allow_writes=on` must be set for Linux Kernels 5.9 and after.)_
#### `rdmsr`
Restore MSR register values to their original values on exit. Used together with `wrmsr`. Enabled (`true`) or disabled (`false`).
#### `cache_qos`
[Cache QoS](https://xmrig.com/docs/miner/randomx-optimization-guide/qos). Enabled (`true`) or disabled (`false`). It's useful when you can't or don't want to mine on all CPU cores to make mining hashrate more stable.
#### `numa`
NUMA support (better hashrate on multi-CPU servers and Ryzen Threadripper 1xxx/2xxx). Enabled (`true`) or disabled (`false`).
#### `scratchpad_prefetch_mode`
Which instruction to use in RandomX loop to prefetch data from scratchpad. `1` is default and fastest in most cases. Can be off (`0`), `prefetcht0` instruction (`1`), `prefetchnta` instruction (`2`, a bit faster on Coffee Lake and a few other CPUs), `mov` instruction (`3`).
## Shared options
#### `enabled`
@@ -83,23 +114,32 @@ Enable (`true`) or disable (`false`) CPU backend, by default `true`.
#### `huge-pages`
Enable (`true`) or disable (`false`) huge pages support, by default `true`.
#### `huge-pages-jit`
Enable (`true`) or disable (`false`) huge pages support for RandomX JIT code, by default `false`. It gives a very small boost on Ryzen CPUs, but hashrate is unstable between launches. Use with caution.
#### `hw-aes`
Force enable (`true`) or disable (`false`) hardware AES support. Default value `null` means miner autodetect this feature. Usually don't need change this option, this option useful for some rare cases when miner can't detect hardware AES, but it available. If you force enable this option, but your hardware not support it, miner will crash.
#### `priority`
Mining threads priority, value from `1` (lowest priority) to `5` (highest possible priority). Default value `null` means miner don't change threads priority at all.
Mining threads priority, value from `1` (lowest priority) to `5` (highest possible priority). Default value `null` means miner don't change threads priority at all. Setting priority higher than 2 can make your PC unresponsive.
#### `memory-pool` (since v4.3.0)
Use continuous, persistent memory block for mining threads, useful for preserve huge pages allocation while algorithm switching. Possible values `false` (feature disabled, by default) or `true` or specific count of 2 MB huge pages. It helps to avoid loosing huge pages for scratchpads when RandomX dataset is updated and mining threads restart after a 2-3 days of mining.
#### `yield` (since v5.1.1)
Prefer system better system response/stability `true` (default value) or maximum hashrate `false`.
#### `asm`
Enable/configure or disable ASM optimizations. Possible values: `true`, `false`, `"intel"`, `"ryzen"`, `"bulldozer"`.
#### `argon2-impl` (since v3.1.0)
Allow override automatically detected Argon2 implementation, this option added mostly for debug purposes, default value `null` means autodetect. Other possible values: `"x86_64"`, `"SSE2"`, `"SSSE3"`, `"XOP"`, `"AVX2"`, `"AVX-512F"`. Manual selection has no safe guards, if you CPU not support required instuctions, miner will crash.
Allow override automatically detected Argon2 implementation, this option added mostly for debug purposes, default value `null` means autodetect. This is used in RandomX dataset initialization and also in some other mining algorithms. Other possible values: `"x86_64"`, `"SSE2"`, `"SSSE3"`, `"XOP"`, `"AVX2"`, `"AVX-512F"`. Manual selection has no safe guards - if your CPU doesn't support required instuctions, miner will crash.
#### `astrobwt-max-size`
AstroBWT algorithm: skip hashes with large stage 2 size, default: `550`, min: `400`, max: `1200`. Optimal value depends on your CPU/GPU
#### `astrobwt-avx2`
AstroBWT algorithm: use AVX2 code. It's faster on some CPUs and slower on other
#### `max-threads-hint` (since v4.2.0)
Maximum CPU threads count (in percentage) hint for autoconfig. [CPU_MAX_USAGE.md](CPU_MAX_USAGE.md)
#### `memory-pool` (since v4.3.0)
Use continuous, persistent memory block for mining threads, useful for preserve huge pages allocation while algorithm swithing. Possible values `false` (feature disabled, by default) or `true` or specific count of 2 MB huge pages.
#### `yield` (since v5.1.1)
Prefer system better system response/stability `true` (default value) or maximum hashrate `false`.

View File

@@ -1,5 +0,0 @@
6bb1a2e3a0fbca5195be6022f2a9fbff8a353c37c7542e7ab89420cb45b64505 xmrig-5.0.1-gcc-win32.zip
24dba9ec281acfb2ea2c401ebd0e4e2d1f1ee5fd557da5ff3c7049020c1f78b6 xmrig-5.0.1-gcc-win64.zip
86d65c6693ec9e35cd7547329580638b85c9eb0cf8383892a1c15199de5b556f xmrig-5.0.1-msvc-cuda10_1-win64.zip
0fbfe518b1c4b6993b0f66ff01302626375b15620ccf8f64d6fb97845068ffca xmrig-5.0.1-msvc-win64.zip
aa34890738a3494de2fa0e44db346937fea7339852f5f10b5d4655f95e2d8f1f xmrig-5.0.1-xenial-x64.tar.gz

View File

@@ -1,11 +0,0 @@
-----BEGIN PGP SIGNATURE-----
iQEzBAABCgAdFiEEmsTOqOZuNaXHzdwbRGpTY4vpRAkFAl3VcsoACgkQRGpTY4vp
RAm9vQgA1MyTUU2jley2TCYLUzQy2Fffc8fbXYv64r44jbWOjC/6qo2iIlRgPhIc
oVyPKr5TYS3QjDzCEm8IvozS0YudS6soESbPzqDonboK8pd0K4bsML9TQY2feV7A
NL5vln0rfVHp1wxLLrQpfBqAgvJUXEyaHece6gFQN79JOGhEo2bHL2NyrOl+FViS
b2BaMtXq410Fh+XT6ShnOaG/2EuO8ZqSGdCO6A/2LHQw1UY+mZiCvue6P6B06HmB
WD/urOv38V389v+V+Sp4UlEW6VpBOOjvtChoVWtLt+tKzydrnt2EmoWWWg475pka
4G6whHuMWS8CTt5/PDhJpvVXNQTIOw==
=C764
-----END PGP SIGNATURE-----

View File

@@ -1,3 +1,4 @@
@echo off
cd %~dp0
xmrig.exe --bench=10M --submit
pause

View File

@@ -1,3 +1,4 @@
@echo off
cd %~dp0
xmrig.exe --bench=1M --submit
pause

View File

@@ -1,6 +1,6 @@
#!/bin/bash -e
HWLOC_VERSION="2.2.0"
HWLOC_VERSION="2.4.1"
mkdir -p deps
mkdir -p deps/include
@@ -8,7 +8,7 @@ mkdir -p deps/lib
mkdir -p build && cd build
wget https://download.open-mpi.org/release/hwloc/v2.2/hwloc-${HWLOC_VERSION}.tar.gz -O hwloc-${HWLOC_VERSION}.tar.gz
wget https://download.open-mpi.org/release/hwloc/v2.4/hwloc-${HWLOC_VERSION}.tar.gz -O hwloc-${HWLOC_VERSION}.tar.gz
tar -xzf hwloc-${HWLOC_VERSION}.tar.gz
cd hwloc-${HWLOC_VERSION}

View File

@@ -1,6 +1,6 @@
#!/bin/bash -e
OPENSSL_VERSION="1.1.1h"
OPENSSL_VERSION="1.1.1k"
mkdir -p deps
mkdir -p deps/include
@@ -17,4 +17,4 @@ make -j$(nproc || sysctl -n hw.ncpu || sysctl -n hw.logicalcpu)
cp -fr include ../../deps
cp libcrypto.a ../../deps/lib
cp libssl.a ../../deps/lib
cd ..
cd ..

View File

@@ -1,6 +1,6 @@
#!/bin/bash -e
UV_VERSION="1.40.0"
UV_VERSION="1.41.0"
mkdir -p deps
mkdir -p deps/include

View File

@@ -0,0 +1,20 @@
:: Example batch file for mining Monero at a pool
::
:: Format:
:: xmrig.exe -o <pool address>:<pool port> -u <pool username/wallet> -p <pool password>
::
:: Fields:
:: pool address The host name of the pool stratum or its IP address, for example pool.hashvault.pro
:: pool port The port of the pool's stratum to connect to, for example 3333. Check your pool's getting started page.
:: pool username/wallet For most pools, this is the wallet address you want to mine to. Some pools require a username
:: pool password For most pools this can be just 'x'. For pools using usernames, you may need to provide a password as configured on the pool.
::
:: List of Monero mining pools:
:: https://miningpoolstats.stream/monero
::
:: Choose pools outside of top 5 to help Monero network be more decentralized!
:: Smaller pools also often have smaller fees/payout limits.
cd %~dp0
xmrig.exe -o pool.hashvault.pro:3333 -u 48edfHu7V9Z84YzzMa6fUueoELZ9ZRXq9VetWzYGzKt52XU5xvqgzYnDK9URnRoJMk1j8nLwEVsaSWJ4fhdUyZijBGUicoD -p x
pause

View File

@@ -1,28 +1,34 @@
#!/bin/bash
modprobe msr
MSR_FILE=/sys/module/msr/parameters/allow_writes
if cat /proc/cpuinfo | grep "AMD Ryzen" > /dev/null;
if test -e "$MSR_FILE"; then
echo on > $MSR_FILE
else
modprobe msr allow_writes=on
fi
if cat /proc/cpuinfo | grep -E 'AMD Ryzen|AMD EPYC' > /dev/null;
then
if cat /proc/cpuinfo | grep "cpu family[[:space:]]:[[:space:]]25" > /dev/null;
then
echo "Detected Ryzen (Zen3)"
echo "Detected Zen3 CPU"
wrmsr -a 0xc0011020 0x4480000000000
wrmsr -a 0xc0011021 0x1c000200000040
wrmsr -a 0xc0011022 0xc000000401500000
wrmsr -a 0xc001102b 0x2000cc14
echo "MSR register values for Ryzen (Zen3) applied"
echo "MSR register values for Zen3 applied"
else
echo "Detected Ryzen (Zen1/Zen2)"
echo "Detected Zen1/Zen2 CPU"
wrmsr -a 0xc0011020 0
wrmsr -a 0xc0011021 0x40
wrmsr -a 0xc0011022 0x1510000
wrmsr -a 0xc001102b 0x2000cc16
echo "MSR register values for Ryzen (Zen1/Zen2) applied"
echo "MSR register values for Zen1/Zen2 applied"
fi
elif cat /proc/cpuinfo | grep "Intel" > /dev/null;
then
echo "Detected Intel"
echo "Detected Intel CPU"
wrmsr -a 0x1a4 0xf
echo "MSR register values for Intel applied"
else

View File

@@ -0,0 +1,16 @@
:: Example batch file for mining Monero solo
::
:: Format:
:: xmrig.exe -o <node address>:<node port> -a rx/0 -u <wallet address> --daemon
::
:: Fields:
:: node address The host name of your monerod node or its IP address. It can also be a public node with RPC enabled, for example node.xmr.to
:: node port The RPC port of your monerod node to connect to, usually 18081.
:: wallet address Check your Monero CLI or GUI wallet to see your wallet's address.
::
:: Mining solo is the best way to help Monero network be more decentralized!
:: But you will only get a payout when you find a block which can take more than a year for a single low-end PC.
cd %~dp0
xmrig.exe -o node.xmr.to:18081 -a rx/0 -u 48edfHu7V9Z84YzzMa6fUueoELZ9ZRXq9VetWzYGzKt52XU5xvqgzYnDK9URnRoJMk1j8nLwEVsaSWJ4fhdUyZijBGUicoD --daemon
pause

View File

@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 2.8)
cmake_minimum_required(VERSION 2.8.12)
project(argon2 C)
set(CMAKE_C_STANDARD 99)

View File

@@ -1754,7 +1754,7 @@ inline bool divisible_by_power_of_2(uint64_t x, int exp) FMT_NOEXCEPT {
#ifdef FMT_BUILTIN_CTZLL
return FMT_BUILTIN_CTZLL(x) >= exp;
#else
return exp < num_bits<uint64_t>()) && x == ((x >> exp) << exp);
return (exp < num_bits<uint64_t>()) && x == ((x >> exp) << exp);
#endif
}

View File

@@ -1,68 +0,0 @@
# Authors ordered by first contribution.
Ryan Dahl <ry@tinyclouds.org>
Jeremy Hinegardner <jeremy@hinegardner.org>
Sergey Shepelev <temotor@gmail.com>
Joe Damato <ice799@gmail.com>
tomika <tomika_nospam@freemail.hu>
Phoenix Sol <phoenix@burninglabs.com>
Cliff Frey <cliff@meraki.com>
Ewen Cheslack-Postava <ewencp@cs.stanford.edu>
Santiago Gala <sgala@apache.org>
Tim Becker <tim.becker@syngenio.de>
Jeff Terrace <jterrace@gmail.com>
Ben Noordhuis <info@bnoordhuis.nl>
Nathan Rajlich <nathan@tootallnate.net>
Mark Nottingham <mnot@mnot.net>
Aman Gupta <aman@tmm1.net>
Tim Becker <tim.becker@kuriositaet.de>
Sean Cunningham <sean.cunningham@mandiant.com>
Peter Griess <pg@std.in>
Salman Haq <salman.haq@asti-usa.com>
Cliff Frey <clifffrey@gmail.com>
Jon Kolb <jon@b0g.us>
Fouad Mardini <f.mardini@gmail.com>
Paul Querna <pquerna@apache.org>
Felix Geisendörfer <felix@debuggable.com>
koichik <koichik@improvement.jp>
Andre Caron <andre.l.caron@gmail.com>
Ivo Raisr <ivosh@ivosh.net>
James McLaughlin <jamie@lacewing-project.org>
David Gwynne <loki@animata.net>
Thomas LE ROUX <thomas@november-eleven.fr>
Randy Rizun <rrizun@ortivawireless.com>
Andre Louis Caron <andre.louis.caron@usherbrooke.ca>
Simon Zimmermann <simonz05@gmail.com>
Erik Dubbelboer <erik@dubbelboer.com>
Martell Malone <martellmalone@gmail.com>
Bertrand Paquet <bpaquet@octo.com>
BogDan Vatra <bogdan@kde.org>
Peter Faiman <peter@thepicard.org>
Corey Richardson <corey@octayn.net>
Tóth Tamás <tomika_nospam@freemail.hu>
Cam Swords <cam.swords@gmail.com>
Chris Dickinson <christopher.s.dickinson@gmail.com>
Uli Köhler <ukoehler@btronik.de>
Charlie Somerville <charlie@charliesomerville.com>
Patrik Stutz <patrik.stutz@gmail.com>
Fedor Indutny <fedor.indutny@gmail.com>
runner <runner.mei@gmail.com>
Alexis Campailla <alexis@janeasystems.com>
David Wragg <david@wragg.org>
Vinnie Falco <vinnie.falco@gmail.com>
Alex Butum <alexbutum@linux.com>
Rex Feng <rexfeng@gmail.com>
Alex Kocharin <alex@kocharin.ru>
Mark Koopman <markmontymark@yahoo.com>
Helge Heß <me@helgehess.eu>
Alexis La Goutte <alexis.lagoutte@gmail.com>
George Miroshnykov <george.miroshnykov@gmail.com>
Maciej Małecki <me@mmalecki.com>
Marc O'Morain <github.com@marcomorain.com>
Jeff Pinner <jpinner@twitter.com>
Timothy J Fontaine <tjfontaine@gmail.com>
Akagi201 <akagi201@gmail.com>
Romain Giraud <giraud.romain@gmail.com>
Jay Satiro <raysatiro@yahoo.com>
Arne Steen <Arne.Steen@gmx.de>
Kjell Schubert <kjell.schubert@gmail.com>
Olivier Mengué <dolmen@cpan.org>

View File

@@ -1,19 +0,0 @@
Copyright Joyent, Inc. and other Node contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.

View File

@@ -1,246 +0,0 @@
HTTP Parser
===========
[![Build Status](https://api.travis-ci.org/nodejs/http-parser.svg?branch=master)](https://travis-ci.org/nodejs/http-parser)
This is a parser for HTTP messages written in C. It parses both requests and
responses. The parser is designed to be used in performance HTTP
applications. It does not make any syscalls nor allocations, it does not
buffer data, it can be interrupted at anytime. Depending on your
architecture, it only requires about 40 bytes of data per message
stream (in a web server that is per connection).
Features:
* No dependencies
* Handles persistent streams (keep-alive).
* Decodes chunked encoding.
* Upgrade support
* Defends against buffer overflow attacks.
The parser extracts the following information from HTTP messages:
* Header fields and values
* Content-Length
* Request method
* Response status code
* Transfer-Encoding
* HTTP version
* Request URL
* Message body
Usage
-----
One `http_parser` object is used per TCP connection. Initialize the struct
using `http_parser_init()` and set the callbacks. That might look something
like this for a request parser:
```c
http_parser_settings settings;
settings.on_url = my_url_callback;
settings.on_header_field = my_header_field_callback;
/* ... */
http_parser *parser = malloc(sizeof(http_parser));
http_parser_init(parser, HTTP_REQUEST);
parser->data = my_socket;
```
When data is received on the socket execute the parser and check for errors.
```c
size_t len = 80*1024, nparsed;
char buf[len];
ssize_t recved;
recved = recv(fd, buf, len, 0);
if (recved < 0) {
/* Handle error. */
}
/* Start up / continue the parser.
* Note we pass recved==0 to signal that EOF has been received.
*/
nparsed = http_parser_execute(parser, &settings, buf, recved);
if (parser->upgrade) {
/* handle new protocol */
} else if (nparsed != recved) {
/* Handle error. Usually just close the connection. */
}
```
`http_parser` needs to know where the end of the stream is. For example, sometimes
servers send responses without Content-Length and expect the client to
consume input (for the body) until EOF. To tell `http_parser` about EOF, give
`0` as the fourth parameter to `http_parser_execute()`. Callbacks and errors
can still be encountered during an EOF, so one must still be prepared
to receive them.
Scalar valued message information such as `status_code`, `method`, and the
HTTP version are stored in the parser structure. This data is only
temporally stored in `http_parser` and gets reset on each new message. If
this information is needed later, copy it out of the structure during the
`headers_complete` callback.
The parser decodes the transfer-encoding for both requests and responses
transparently. That is, a chunked encoding is decoded before being sent to
the on_body callback.
The Special Problem of Upgrade
------------------------------
`http_parser` supports upgrading the connection to a different protocol. An
increasingly common example of this is the WebSocket protocol which sends
a request like
GET /demo HTTP/1.1
Upgrade: WebSocket
Connection: Upgrade
Host: example.com
Origin: http://example.com
WebSocket-Protocol: sample
followed by non-HTTP data.
(See [RFC6455](https://tools.ietf.org/html/rfc6455) for more information the
WebSocket protocol.)
To support this, the parser will treat this as a normal HTTP message without a
body, issuing both on_headers_complete and on_message_complete callbacks. However
http_parser_execute() will stop parsing at the end of the headers and return.
The user is expected to check if `parser->upgrade` has been set to 1 after
`http_parser_execute()` returns. Non-HTTP data begins at the buffer supplied
offset by the return value of `http_parser_execute()`.
Callbacks
---------
During the `http_parser_execute()` call, the callbacks set in
`http_parser_settings` will be executed. The parser maintains state and
never looks behind, so buffering the data is not necessary. If you need to
save certain data for later usage, you can do that from the callbacks.
There are two types of callbacks:
* notification `typedef int (*http_cb) (http_parser*);`
Callbacks: on_message_begin, on_headers_complete, on_message_complete.
* data `typedef int (*http_data_cb) (http_parser*, const char *at, size_t length);`
Callbacks: (requests only) on_url,
(common) on_header_field, on_header_value, on_body;
Callbacks must return 0 on success. Returning a non-zero value indicates
error to the parser, making it exit immediately.
For cases where it is necessary to pass local information to/from a callback,
the `http_parser` object's `data` field can be used.
An example of such a case is when using threads to handle a socket connection,
parse a request, and then give a response over that socket. By instantiation
of a thread-local struct containing relevant data (e.g. accepted socket,
allocated memory for callbacks to write into, etc), a parser's callbacks are
able to communicate data between the scope of the thread and the scope of the
callback in a threadsafe manner. This allows `http_parser` to be used in
multi-threaded contexts.
Example:
```c
typedef struct {
socket_t sock;
void* buffer;
int buf_len;
} custom_data_t;
int my_url_callback(http_parser* parser, const char *at, size_t length) {
/* access to thread local custom_data_t struct.
Use this access save parsed data for later use into thread local
buffer, or communicate over socket
*/
parser->data;
...
return 0;
}
...
void http_parser_thread(socket_t sock) {
int nparsed = 0;
/* allocate memory for user data */
custom_data_t *my_data = malloc(sizeof(custom_data_t));
/* some information for use by callbacks.
* achieves thread -> callback information flow */
my_data->sock = sock;
/* instantiate a thread-local parser */
http_parser *parser = malloc(sizeof(http_parser));
http_parser_init(parser, HTTP_REQUEST); /* initialise parser */
/* this custom data reference is accessible through the reference to the
parser supplied to callback functions */
parser->data = my_data;
http_parser_settings settings; /* set up callbacks */
settings.on_url = my_url_callback;
/* execute parser */
nparsed = http_parser_execute(parser, &settings, buf, recved);
...
/* parsed information copied from callback.
can now perform action on data copied into thread-local memory from callbacks.
achieves callback -> thread information flow */
my_data->buffer;
...
}
```
In case you parse HTTP message in chunks (i.e. `read()` request line
from socket, parse, read half headers, parse, etc) your data callbacks
may be called more than once. `http_parser` guarantees that data pointer is only
valid for the lifetime of callback. You can also `read()` into a heap allocated
buffer to avoid copying memory around if this fits your application.
Reading headers may be a tricky task if you read/parse headers partially.
Basically, you need to remember whether last header callback was field or value
and apply the following logic:
(on_header_field and on_header_value shortened to on_h_*)
------------------------ ------------ --------------------------------------------
| State (prev. callback) | Callback | Description/action |
------------------------ ------------ --------------------------------------------
| nothing (first call) | on_h_field | Allocate new buffer and copy callback data |
| | | into it |
------------------------ ------------ --------------------------------------------
| value | on_h_field | New header started. |
| | | Copy current name,value buffers to headers |
| | | list and allocate new buffer for new name |
------------------------ ------------ --------------------------------------------
| field | on_h_field | Previous name continues. Reallocate name |
| | | buffer and append callback data to it |
------------------------ ------------ --------------------------------------------
| field | on_h_value | Value for current header started. Allocate |
| | | new buffer and copy callback data to it |
------------------------ ------------ --------------------------------------------
| value | on_h_value | Value continues. Reallocate value buffer |
| | | and append callback data to it |
------------------------ ------------ --------------------------------------------
Parsing URLs
------------
A simplistic zero-copy URL parser is provided as `http_parser_parse_url()`.
Users of this library may wish to use it to parse URLs constructed from
consecutive `on_url` callbacks.
See examples of reading in headers:
* [partial example](http://gist.github.com/155877) in C
* [from http-parser tests](http://github.com/joyent/http-parser/blob/37a0ff8/test.c#L403) in C
* [from Node library](http://github.com/joyent/node/blob/842eaf4/src/http.js#L284) in Javascript

File diff suppressed because it is too large Load Diff

View File

@@ -1,442 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef http_parser_h
#define http_parser_h
#ifdef __cplusplus
extern "C" {
#endif
/* Also update SONAME in the Makefile whenever you change these. */
#define HTTP_PARSER_VERSION_MAJOR 2
#define HTTP_PARSER_VERSION_MINOR 9
#define HTTP_PARSER_VERSION_PATCH 3
#include <stddef.h>
#if defined(_WIN32) && !defined(__MINGW32__) && \
(!defined(_MSC_VER) || _MSC_VER<1600) && !defined(__WINE__)
#include <BaseTsd.h>
typedef __int8 int8_t;
typedef unsigned __int8 uint8_t;
typedef __int16 int16_t;
typedef unsigned __int16 uint16_t;
typedef __int32 int32_t;
typedef unsigned __int32 uint32_t;
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
#else
#include <stdint.h>
#endif
/* Compile with -DHTTP_PARSER_STRICT=0 to make less checks, but run
* faster
*/
#ifndef HTTP_PARSER_STRICT
# define HTTP_PARSER_STRICT 1
#endif
/* Maximium header size allowed. If the macro is not defined
* before including this header then the default is used. To
* change the maximum header size, define the macro in the build
* environment (e.g. -DHTTP_MAX_HEADER_SIZE=<value>). To remove
* the effective limit on the size of the header, define the macro
* to a very large number (e.g. -DHTTP_MAX_HEADER_SIZE=0x7fffffff)
*/
#ifndef HTTP_MAX_HEADER_SIZE
# define HTTP_MAX_HEADER_SIZE (80*1024)
#endif
typedef struct http_parser http_parser;
typedef struct http_parser_settings http_parser_settings;
/* Callbacks should return non-zero to indicate an error. The parser will
* then halt execution.
*
* The one exception is on_headers_complete. In a HTTP_RESPONSE parser
* returning '1' from on_headers_complete will tell the parser that it
* should not expect a body. This is used when receiving a response to a
* HEAD request which may contain 'Content-Length' or 'Transfer-Encoding:
* chunked' headers that indicate the presence of a body.
*
* Returning `2` from on_headers_complete will tell parser that it should not
* expect neither a body nor any futher responses on this connection. This is
* useful for handling responses to a CONNECT request which may not contain
* `Upgrade` or `Connection: upgrade` headers.
*
* http_data_cb does not return data chunks. It will be called arbitrarily
* many times for each string. E.G. you might get 10 callbacks for "on_url"
* each providing just a few characters more data.
*/
typedef int (*http_data_cb) (http_parser*, const char *at, size_t length);
typedef int (*http_cb) (http_parser*);
/* Status Codes */
#define HTTP_STATUS_MAP(XX) \
XX(100, CONTINUE, Continue) \
XX(101, SWITCHING_PROTOCOLS, Switching Protocols) \
XX(102, PROCESSING, Processing) \
XX(200, OK, OK) \
XX(201, CREATED, Created) \
XX(202, ACCEPTED, Accepted) \
XX(203, NON_AUTHORITATIVE_INFORMATION, Non-Authoritative Information) \
XX(204, NO_CONTENT, No Content) \
XX(205, RESET_CONTENT, Reset Content) \
XX(206, PARTIAL_CONTENT, Partial Content) \
XX(207, MULTI_STATUS, Multi-Status) \
XX(208, ALREADY_REPORTED, Already Reported) \
XX(226, IM_USED, IM Used) \
XX(300, MULTIPLE_CHOICES, Multiple Choices) \
XX(301, MOVED_PERMANENTLY, Moved Permanently) \
XX(302, FOUND, Found) \
XX(303, SEE_OTHER, See Other) \
XX(304, NOT_MODIFIED, Not Modified) \
XX(305, USE_PROXY, Use Proxy) \
XX(307, TEMPORARY_REDIRECT, Temporary Redirect) \
XX(308, PERMANENT_REDIRECT, Permanent Redirect) \
XX(400, BAD_REQUEST, Bad Request) \
XX(401, UNAUTHORIZED, Unauthorized) \
XX(402, PAYMENT_REQUIRED, Payment Required) \
XX(403, FORBIDDEN, Forbidden) \
XX(404, NOT_FOUND, Not Found) \
XX(405, METHOD_NOT_ALLOWED, Method Not Allowed) \
XX(406, NOT_ACCEPTABLE, Not Acceptable) \
XX(407, PROXY_AUTHENTICATION_REQUIRED, Proxy Authentication Required) \
XX(408, REQUEST_TIMEOUT, Request Timeout) \
XX(409, CONFLICT, Conflict) \
XX(410, GONE, Gone) \
XX(411, LENGTH_REQUIRED, Length Required) \
XX(412, PRECONDITION_FAILED, Precondition Failed) \
XX(413, PAYLOAD_TOO_LARGE, Payload Too Large) \
XX(414, URI_TOO_LONG, URI Too Long) \
XX(415, UNSUPPORTED_MEDIA_TYPE, Unsupported Media Type) \
XX(416, RANGE_NOT_SATISFIABLE, Range Not Satisfiable) \
XX(417, EXPECTATION_FAILED, Expectation Failed) \
XX(421, MISDIRECTED_REQUEST, Misdirected Request) \
XX(422, UNPROCESSABLE_ENTITY, Unprocessable Entity) \
XX(423, LOCKED, Locked) \
XX(424, FAILED_DEPENDENCY, Failed Dependency) \
XX(426, UPGRADE_REQUIRED, Upgrade Required) \
XX(428, PRECONDITION_REQUIRED, Precondition Required) \
XX(429, TOO_MANY_REQUESTS, Too Many Requests) \
XX(431, REQUEST_HEADER_FIELDS_TOO_LARGE, Request Header Fields Too Large) \
XX(451, UNAVAILABLE_FOR_LEGAL_REASONS, Unavailable For Legal Reasons) \
XX(500, INTERNAL_SERVER_ERROR, Internal Server Error) \
XX(501, NOT_IMPLEMENTED, Not Implemented) \
XX(502, BAD_GATEWAY, Bad Gateway) \
XX(503, SERVICE_UNAVAILABLE, Service Unavailable) \
XX(504, GATEWAY_TIMEOUT, Gateway Timeout) \
XX(505, HTTP_VERSION_NOT_SUPPORTED, HTTP Version Not Supported) \
XX(506, VARIANT_ALSO_NEGOTIATES, Variant Also Negotiates) \
XX(507, INSUFFICIENT_STORAGE, Insufficient Storage) \
XX(508, LOOP_DETECTED, Loop Detected) \
XX(510, NOT_EXTENDED, Not Extended) \
XX(511, NETWORK_AUTHENTICATION_REQUIRED, Network Authentication Required) \
enum http_status
{
#define XX(num, name, string) HTTP_STATUS_##name = num,
HTTP_STATUS_MAP(XX)
#undef XX
};
/* Request Methods */
#define HTTP_METHOD_MAP(XX) \
XX(0, DELETE, DELETE) \
XX(1, GET, GET) \
XX(2, HEAD, HEAD) \
XX(3, POST, POST) \
XX(4, PUT, PUT) \
/* pathological */ \
XX(5, CONNECT, CONNECT) \
XX(6, OPTIONS, OPTIONS) \
XX(7, TRACE, TRACE) \
/* WebDAV */ \
XX(8, COPY, COPY) \
XX(9, LOCK, LOCK) \
XX(10, MKCOL, MKCOL) \
XX(11, MOVE, MOVE) \
XX(12, PROPFIND, PROPFIND) \
XX(13, PROPPATCH, PROPPATCH) \
XX(14, SEARCH, SEARCH) \
XX(15, UNLOCK, UNLOCK) \
XX(16, BIND, BIND) \
XX(17, REBIND, REBIND) \
XX(18, UNBIND, UNBIND) \
XX(19, ACL, ACL) \
/* subversion */ \
XX(20, REPORT, REPORT) \
XX(21, MKACTIVITY, MKACTIVITY) \
XX(22, CHECKOUT, CHECKOUT) \
XX(23, MERGE, MERGE) \
/* upnp */ \
XX(24, MSEARCH, M-SEARCH) \
XX(25, NOTIFY, NOTIFY) \
XX(26, SUBSCRIBE, SUBSCRIBE) \
XX(27, UNSUBSCRIBE, UNSUBSCRIBE) \
/* RFC-5789 */ \
XX(28, PATCH, PATCH) \
XX(29, PURGE, PURGE) \
/* CalDAV */ \
XX(30, MKCALENDAR, MKCALENDAR) \
/* RFC-2068, section 19.6.1.2 */ \
XX(31, LINK, LINK) \
XX(32, UNLINK, UNLINK) \
/* icecast */ \
XX(33, SOURCE, SOURCE) \
enum http_method
{
#define XX(num, name, string) HTTP_##name = num,
HTTP_METHOD_MAP(XX)
#undef XX
};
enum http_parser_type { HTTP_REQUEST, HTTP_RESPONSE, HTTP_BOTH };
/* Flag values for http_parser.flags field */
enum flags
{ F_CHUNKED = 1 << 0
, F_CONNECTION_KEEP_ALIVE = 1 << 1
, F_CONNECTION_CLOSE = 1 << 2
, F_CONNECTION_UPGRADE = 1 << 3
, F_TRAILING = 1 << 4
, F_UPGRADE = 1 << 5
, F_SKIPBODY = 1 << 6
, F_CONTENTLENGTH = 1 << 7
, F_TRANSFER_ENCODING = 1 << 8
};
/* Map for errno-related constants
*
* The provided argument should be a macro that takes 2 arguments.
*/
#define HTTP_ERRNO_MAP(XX) \
/* No error */ \
XX(OK, "success") \
\
/* Callback-related errors */ \
XX(CB_message_begin, "the on_message_begin callback failed") \
XX(CB_url, "the on_url callback failed") \
XX(CB_header_field, "the on_header_field callback failed") \
XX(CB_header_value, "the on_header_value callback failed") \
XX(CB_headers_complete, "the on_headers_complete callback failed") \
XX(CB_body, "the on_body callback failed") \
XX(CB_message_complete, "the on_message_complete callback failed") \
XX(CB_status, "the on_status callback failed") \
XX(CB_chunk_header, "the on_chunk_header callback failed") \
XX(CB_chunk_complete, "the on_chunk_complete callback failed") \
\
/* Parsing-related errors */ \
XX(INVALID_EOF_STATE, "stream ended at an unexpected time") \
XX(HEADER_OVERFLOW, \
"too many header bytes seen; overflow detected") \
XX(CLOSED_CONNECTION, \
"data received after completed connection: close message") \
XX(INVALID_VERSION, "invalid HTTP version") \
XX(INVALID_STATUS, "invalid HTTP status code") \
XX(INVALID_METHOD, "invalid HTTP method") \
XX(INVALID_URL, "invalid URL") \
XX(INVALID_HOST, "invalid host") \
XX(INVALID_PORT, "invalid port") \
XX(INVALID_PATH, "invalid path") \
XX(INVALID_QUERY_STRING, "invalid query string") \
XX(INVALID_FRAGMENT, "invalid fragment") \
XX(LF_EXPECTED, "LF character expected") \
XX(INVALID_HEADER_TOKEN, "invalid character in header") \
XX(INVALID_CONTENT_LENGTH, \
"invalid character in content-length header") \
XX(UNEXPECTED_CONTENT_LENGTH, \
"unexpected content-length header") \
XX(INVALID_CHUNK_SIZE, \
"invalid character in chunk size header") \
XX(INVALID_TRANSFER_ENCODING, \
"request has invalid transfer-encoding") \
XX(INVALID_CONSTANT, "invalid constant string") \
XX(INVALID_INTERNAL_STATE, "encountered unexpected internal state")\
XX(STRICT, "strict mode assertion failed") \
XX(PAUSED, "parser is paused") \
XX(UNKNOWN, "an unknown error occurred")
/* Define HPE_* values for each errno value above */
#define HTTP_ERRNO_GEN(n, s) HPE_##n,
enum http_errno {
HTTP_ERRNO_MAP(HTTP_ERRNO_GEN)
};
#undef HTTP_ERRNO_GEN
/* Get an http_errno value from an http_parser */
#define HTTP_PARSER_ERRNO(p) ((enum http_errno) (p)->http_errno)
struct http_parser {
/** PRIVATE **/
unsigned int type : 2; /* enum http_parser_type */
unsigned int state : 7; /* enum state from http_parser.c */
unsigned int header_state : 7; /* enum header_state from http_parser.c */
unsigned int index : 7; /* index into current matcher */
unsigned int lenient_http_headers : 1;
unsigned int flags : 16; /* F_* values from 'flags' enum; semi-public */
uint32_t nread; /* # bytes read in various scenarios */
uint64_t content_length; /* # bytes in body (0 if no Content-Length header) */
/** READ-ONLY **/
unsigned short http_major;
unsigned short http_minor;
unsigned int status_code : 16; /* responses only */
unsigned int method : 8; /* requests only */
unsigned int http_errno : 7;
/* 1 = Upgrade header was present and the parser has exited because of that.
* 0 = No upgrade header present.
* Should be checked when http_parser_execute() returns in addition to
* error checking.
*/
unsigned int upgrade : 1;
/** PUBLIC **/
void *data; /* A pointer to get hook to the "connection" or "socket" object */
};
struct http_parser_settings {
http_cb on_message_begin;
http_data_cb on_url;
http_data_cb on_status;
http_data_cb on_header_field;
http_data_cb on_header_value;
http_cb on_headers_complete;
http_data_cb on_body;
http_cb on_message_complete;
/* When on_chunk_header is called, the current chunk length is stored
* in parser->content_length.
*/
http_cb on_chunk_header;
http_cb on_chunk_complete;
};
enum http_parser_url_fields
{ UF_SCHEMA = 0
, UF_HOST = 1
, UF_PORT = 2
, UF_PATH = 3
, UF_QUERY = 4
, UF_FRAGMENT = 5
, UF_USERINFO = 6
, UF_MAX = 7
};
/* Result structure for http_parser_parse_url().
*
* Callers should index into field_data[] with UF_* values iff field_set
* has the relevant (1 << UF_*) bit set. As a courtesy to clients (and
* because we probably have padding left over), we convert any port to
* a uint16_t.
*/
struct http_parser_url {
uint16_t field_set; /* Bitmask of (1 << UF_*) values */
uint16_t port; /* Converted UF_PORT string */
struct {
uint16_t off; /* Offset into buffer in which field starts */
uint16_t len; /* Length of run in buffer */
} field_data[UF_MAX];
};
/* Returns the library version. Bits 16-23 contain the major version number,
* bits 8-15 the minor version number and bits 0-7 the patch level.
* Usage example:
*
* unsigned long version = http_parser_version();
* unsigned major = (version >> 16) & 255;
* unsigned minor = (version >> 8) & 255;
* unsigned patch = version & 255;
* printf("http_parser v%u.%u.%u\n", major, minor, patch);
*/
unsigned long http_parser_version(void);
void http_parser_init(http_parser *parser, enum http_parser_type type);
/* Initialize http_parser_settings members to 0
*/
void http_parser_settings_init(http_parser_settings *settings);
/* Executes the parser. Returns number of parsed bytes. Sets
* `parser->http_errno` on error. */
size_t http_parser_execute(http_parser *parser,
const http_parser_settings *settings,
const char *data,
size_t len);
/* If http_should_keep_alive() in the on_headers_complete or
* on_message_complete callback returns 0, then this should be
* the last message on the connection.
* If you are the server, respond with the "Connection: close" header.
* If you are the client, close the connection.
*/
int http_should_keep_alive(const http_parser *parser);
/* Returns a string version of the HTTP method. */
const char *http_method_str(enum http_method m);
/* Returns a string version of the HTTP status code. */
const char *http_status_str(enum http_status s);
/* Return a string name of the given error */
const char *http_errno_name(enum http_errno err);
/* Return a string description of the given error */
const char *http_errno_description(enum http_errno err);
/* Initialize all http_parser_url members to 0 */
void http_parser_url_init(struct http_parser_url *u);
/* Parse a URL; return nonzero on failure */
int http_parser_parse_url(const char *buf, size_t buflen,
int is_connect,
struct http_parser_url *u);
/* Pause or un-pause the parser; a nonzero value pauses */
void http_parser_pause(http_parser *parser, int paused);
/* Checks if this is the final chunk of the body. */
int http_body_is_final(const http_parser *parser);
/* Change the maximum header size provided at compile time. */
void http_parser_set_max_header_size(uint32_t size);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -1,4 +1,4 @@
cmake_minimum_required (VERSION 2.8)
cmake_minimum_required (VERSION 2.8.12)
project (hwloc C)
include_directories(include)
@@ -13,23 +13,25 @@ set(HEADERS
)
set(SOURCES
src/base64.c
src/bind.c
src/bitmap.c
src/components.c
src/diff.c
src/distances.c
src/misc.c
src/pci-common.c
src/shmem.c
src/topology.c
src/topology-noos.c
src/topology-synthetic.c
src/topology-windows.c
src/topology-x86.c
src/topology-xml.c
src/topology-xml-nolibxml.c
src/base64.c
src/bind.c
src/bitmap.c
src/components.c
src/diff.c
src/distances.c
src/misc.c
src/pci-common.c
src/shmem.c
src/topology.c
src/topology-noos.c
src/topology-synthetic.c
src/topology-windows.c
src/topology-x86.c
src/topology-xml.c
src/topology-xml-nolibxml.c
src/traversal.c
src/memattrs.c
src/cpukinds.c
)
add_library(hwloc STATIC

View File

@@ -2,6 +2,7 @@ Copyright © 2009 CNRS
Copyright © 2009-2020 Inria. All rights reserved.
Copyright © 2009-2013 Université Bordeaux
Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
Copyright © 2020 Hewlett Packard Enterprise. All rights reserved.
$COPYRIGHT$
@@ -16,6 +17,76 @@ bug fixes (and other actions) for each version of hwloc since version
0.9.
Version 2.4.0
-------------
* API
+ Add hwloc/cpukinds.h for reporting information about hybrid CPUs.
- Use Linux cpufreq frequencies to rank cores by efficiency.
- Use x86 CPUID hybrid leaf and future Linux kernels sysfs CPU type
files to identify Intel Atom and Core cores.
- Use the Windows native EfficiencyClass to separate kinds.
* Backends
+ Properly handle Linux kernel 5.10+ exposing ACPI HMAT information
with knowledge of Generic Initiators.
* Tools
+ lstopo has new --cpukinds and --no-cpukinds options for showing
CPU kinds or not in textual and graphical modes respectively.
+ hwloc-calc has a new --cpukind option for filtering PUs by kind.
+ hwloc-annotate has a new cpukind command for modifying CPU kinds.
* Misc
+ Fix hwloc_bitmap_nr_ulongs(), thanks to Norbert Eicker.
+ Add a documentation section about
"Topology Attributes: Distances, Memory Attributes and CPU Kinds".
+ Silence some spurious warnings in the OpenCL backend and when showing
process binding with lstopo --ps.
Version 2.3.0
-------------
* API
+ Add hwloc/memattrs.h for exposing latency/bandwidth information
between initiators (CPU sets for now) and target NUMA nodes,
typically on heterogeneous platforms.
- When available, bandwidths and latencies are read from the ACPI HMAT
table exposed by Linux kernel 5.2+.
- Attributes may also be customized to expose user-defined performance
information.
+ Add hwloc_get_local_numanode_objs() for listing NUMA nodes that are
local to some locality.
+ The new topology flag HWLOC_TOPOLOGY_FLAG_IMPORT_SUPPORT causes
support arrays to be loaded from XML exported with hwloc 2.3+.
- hwloc_topology_get_support() now returns an additional "misc"
array with feature "imported_support" set when support was imported.
+ Add hwloc_topology_refresh() to refresh internal caches after modifying
the topology and before consulting the topology in a multithread context.
* Backends
+ Add a ROCm SMI backend and a hwloc/rsmi.h helper file for getting
the locality of AMD GPUs, now exposed as "rsmi" OS devices.
Thanks to Mike Li.
+ Remove POWER device-tree-based topology on Linux,
(it was disabled by default since 2.1).
* Tools
+ Command-line options for specifying flags now understand comma-separated
lists of flag names (substrings).
+ hwloc-info and hwloc-calc have new --local-memory --local-memory-flags
and --best-memattr options for reporting local memory nodes and filtering
by memory attributes.
+ hwloc-bind has a new --best-memattr option for filtering by memory attributes
among the memory binding set.
+ Tools that have a --restrict option may now receive a nodeset or
some custom flags for restricting the topology.
+ lstopo now has a --thickness option for changing line thickness in the
graphical output.
+ Fix lstopo drawing when autoresizing on Windows 10.
+ Pressing the F5 key in lstopo X11 and Windows graphical/interactive outputs
now refreshes the display according to the current topology and binding.
+ Add a tikz lstopo graphical backend to generate picture easily included into
LaTeX documents. Thanks to Clement Foyer.
* Misc
+ The default installation path of the Bash completion file has changed to
${datadir}/bash-completion/completions/hwloc. Thanks to Tomasz Kłoczko.
Version 2.2.0
-------------
* API

View File

@@ -23,9 +23,9 @@ APIs are documented after these sections.
Installation
hwloc (http://www.open-mpi.org/projects/hwloc/) is available under the BSD
license. It is hosted as a sub-project of the overall Open MPI project (http://
www.open-mpi.org/). Note that hwloc does not require any functionality from
hwloc (https://www.open-mpi.org/projects/hwloc/) is available under the BSD
license. It is hosted as a sub-project of the overall Open MPI project (https:/
/www.open-mpi.org/). Note that hwloc does not require any functionality from
Open MPI -- it is a wholly separate (and much smaller!) project and code base.
It just happens to be hosted as part of the overall Open MPI project.
@@ -75,7 +75,7 @@ Bugs should be reported in the tracker (https://github.com/open-mpi/hwloc/
issues). Opening a new issue automatically displays lots of hints about how to
debug and report issues.
Questions may be sent to the users or developers mailing lists (http://
Questions may be sent to the users or developers mailing lists (https://
www.open-mpi.org/community/lists/hwloc.php).
There is also a #hwloc IRC channel on Freenode (irc.freenode.net).

View File

@@ -8,7 +8,7 @@
# Please update HWLOC_VERSION* in contrib/windows/hwloc_config.h too.
major=2
minor=2
minor=4
release=0
# greek is used for alpha or beta release tags. If it is non-empty,
@@ -22,7 +22,7 @@ greek=
# The date when this release was created
date="Mar 30, 2020"
date="Nov 26, 2020"
# If snapshot=1, then use the value from snapshot_version as the
# entire hwloc version (i.e., ignore major, minor, release, and
@@ -41,7 +41,7 @@ snapshot_version=${major}.${minor}.${release}${greek}-git
# 2. Version numbers are described in the Libtool current:revision:age
# format.
libhwloc_so_version=17:0:2
libhwloc_so_version=19:0:4
libnetloc_so_version=0:0:0
# Please also update the <TargetName> lines in contrib/windows/libhwloc.vcxproj

View File

@@ -1,8 +1,8 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2021 Inria. All rights reserved.
* Copyright © 2009-2012 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* Copyright © 2009-2020 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -11,7 +11,7 @@
* ------------------------------------------------
* $tarball_directory/doc/doxygen-doc/
* or
* http://www.open-mpi.org/projects/hwloc/doc/
* https://www.open-mpi.org/projects/hwloc/doc/
*=====================================================================
*
* FAIR WARNING: Do NOT expect to be able to figure out all the
@@ -93,7 +93,7 @@ extern "C" {
* Two stable releases of the same series usually have the same ::HWLOC_API_VERSION
* even if their HWLOC_VERSION are different.
*/
#define HWLOC_API_VERSION 0x00020100
#define HWLOC_API_VERSION 0x00020400
/** \brief Indicate at runtime which hwloc API version was used at build time.
*
@@ -102,7 +102,7 @@ extern "C" {
HWLOC_DECLSPEC unsigned hwloc_get_api_version(void);
/** \brief Current component and plugin ABI version (see hwloc/plugins.h) */
#define HWLOC_COMPONENT_ABI 6
#define HWLOC_COMPONENT_ABI 7
/** @} */
@@ -196,7 +196,7 @@ typedef enum {
*/
HWLOC_OBJ_CORE, /**< \brief Core.
* A computation unit (may be shared by several
* logical processors).
* PUs, aka logical processors).
*/
HWLOC_OBJ_PU, /**< \brief Processing Unit, or (Logical) Processor.
* An execution unit (may share a core with some
@@ -257,22 +257,31 @@ typedef enum {
HWLOC_OBJ_BRIDGE, /**< \brief Bridge (filtered out by default).
* Any bridge (or PCI switch) that connects the host or an I/O bus,
* to another I/O bus.
* They are not added to the topology unless I/O discovery
* is enabled with hwloc_topology_set_flags().
*
* Bridges are not added to the topology unless their
* filtering is changed (see hwloc_topology_set_type_filter()
* and hwloc_topology_set_io_types_filter()).
*
* I/O objects are not listed in the main children list,
* but rather in the dedicated io children list.
* I/O objects have NULL CPU and node sets.
*/
HWLOC_OBJ_PCI_DEVICE, /**< \brief PCI device (filtered out by default).
* They are not added to the topology unless I/O discovery
* is enabled with hwloc_topology_set_flags().
*
* PCI devices are not added to the topology unless their
* filtering is changed (see hwloc_topology_set_type_filter()
* and hwloc_topology_set_io_types_filter()).
*
* I/O objects are not listed in the main children list,
* but rather in the dedicated io children list.
* I/O objects have NULL CPU and node sets.
*/
HWLOC_OBJ_OS_DEVICE, /**< \brief Operating system device (filtered out by default).
* They are not added to the topology unless I/O discovery
* is enabled with hwloc_topology_set_flags().
*
* OS devices are not added to the topology unless their
* filtering is changed (see hwloc_topology_set_type_filter()
* and hwloc_topology_set_io_types_filter()).
*
* I/O objects are not listed in the main children list,
* but rather in the dedicated io children list.
* I/O objects have NULL CPU and node sets.
@@ -282,6 +291,10 @@ typedef enum {
* Objects without particular meaning, that can e.g. be
* added by the application for its own use, or by hwloc
* for miscellaneous objects such as MemoryModule (DIMMs).
*
* They are not added to the topology unless their filtering
* is changed (see hwloc_topology_set_type_filter()).
*
* These objects are not listed in the main children list,
* but rather in the dedicated misc children list.
* Misc objects may only have Misc objects as children,
@@ -304,7 +317,6 @@ typedef enum {
HWLOC_OBJ_DIE, /**< \brief Die within a physical package.
* A subpart of the physical package, that contains multiple cores.
* \hideinitializer
*/
HWLOC_OBJ_TYPE_MAX /**< \private Sentinel value */
@@ -338,8 +350,7 @@ typedef enum hwloc_obj_osdev_type_e {
HWLOC_OBJ_OSDEV_DMA, /**< \brief Operating system dma engine device.
* For instance the "dma0chan0" DMA channel on Linux. */
HWLOC_OBJ_OSDEV_COPROC /**< \brief Operating system co-processor device.
* For instance "mic0" for a Xeon Phi (MIC) on Linux,
* "opencl0d0" for a OpenCL device,
* For instance "opencl0d0" for a OpenCL device,
* "cuda0" for a CUDA device. */
} hwloc_obj_osdev_type_t;
@@ -512,7 +523,7 @@ struct hwloc_obj {
*
* \note Its value must not be changed, hwloc_bitmap_dup() must be used instead.
*/
hwloc_cpuset_t complete_cpuset; /**< \brief The complete CPU set of logical processors of this object,
hwloc_cpuset_t complete_cpuset; /**< \brief The complete CPU set of processors of this object,
*
* This may include not only the same as the cpuset field, but also some CPUs for
* which topology information is unknown or incomplete, some offlines CPUs, and
@@ -533,6 +544,8 @@ struct hwloc_obj {
* between this object and the NUMA node objects).
*
* In the end, these nodes are those that are close to the current object.
* Function hwloc_get_local_numanode_objs() may be used to list those NUMA
* nodes more precisely.
*
* If the ::HWLOC_TOPOLOGY_FLAG_INCLUDE_DISALLOWED configuration flag is set,
* some of these nodes may not be allowed for allocation,
@@ -1929,7 +1942,31 @@ enum hwloc_topology_flags_e {
* would result in the same behavior.
* \hideinitializer
*/
HWLOC_TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES = (1UL<<2)
HWLOC_TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES = (1UL<<2),
/** \brief Import support from the imported topology.
*
* When importing a XML topology from a remote machine, binding is
* disabled by default (see ::HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM).
* This disabling is also marked by putting zeroes in the corresponding
* supported feature bits reported by hwloc_topology_get_support().
*
* The flag ::HWLOC_TOPOLOGY_FLAG_IMPORT_SUPPORT actually imports
* support bits from the remote machine. It also sets the flag
* \p imported_support in the struct hwloc_topology_misc_support array.
* If the imported XML did not contain any support information
* (exporter hwloc is too old), this flag is not set.
*
* Note that these supported features are only relevant for the hwloc
* installation that actually exported the XML topology
* (it may vary with the operating system, or with how hwloc was compiled).
*
* Note that setting this flag however does not enable binding for the
* locally imported hwloc topology, it only reports what the remote
* hwloc and machine support.
*
*/
HWLOC_TOPOLOGY_FLAG_IMPORT_SUPPORT = (1UL<<3)
};
/** \brief Set OR'ed flags to non-yet-loaded topology.
@@ -1972,6 +2009,8 @@ struct hwloc_topology_discovery_support {
unsigned char disallowed_pu;
/** \brief Detecting and identifying NUMA nodes that are not available to the current process is supported. */
unsigned char disallowed_numa;
/** \brief Detecting the efficiency of CPU kinds is supported, see \ref hwlocality_cpukinds. */
unsigned char cpukind_efficiency;
};
/** \brief Flags describing actual PU binding support for this topology.
@@ -2042,6 +2081,13 @@ struct hwloc_topology_membind_support {
unsigned char get_area_memlocation;
};
/** \brief Flags describing miscellaneous features.
*/
struct hwloc_topology_misc_support {
/** Support was imported when importing another topology, see ::HWLOC_TOPOLOGY_FLAG_IMPORT_SUPPORT. */
unsigned char imported_support;
};
/** \brief Set of flags describing actual support for this topology.
*
* This is retrieved with hwloc_topology_get_support() and will be valid until
@@ -2052,6 +2098,7 @@ struct hwloc_topology_support {
struct hwloc_topology_discovery_support *discovery;
struct hwloc_topology_cpubind_support *cpubind;
struct hwloc_topology_membind_support *membind;
struct hwloc_topology_misc_support *misc;
};
/** \brief Retrieve the topology support.
@@ -2062,6 +2109,18 @@ struct hwloc_topology_support {
* call may still fail in some corner cases.
*
* These features are also listed by hwloc-info \--support
*
* The reported features are what the current topology supports
* on the current machine. If the topology was exported to XML
* from another machine and later imported here, support still
* describes what is supported for this imported topology after
* import. By default, binding will be reported as unsupported
* in this case (see ::HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM).
*
* Topology flag ::HWLOC_TOPOLOGY_FLAG_IMPORT_SUPPORT may be used
* to report the supported features of the original remote machine
* instead. If it was successfully imported, \p imported_support
* will be set in the struct hwloc_topology_misc_support array.
*/
HWLOC_DECLSPEC const struct hwloc_topology_support *hwloc_topology_get_support(hwloc_topology_t __hwloc_restrict topology);
@@ -2108,8 +2167,8 @@ enum hwloc_type_filter_e {
*
* It is only useful for I/O object types.
* For ::HWLOC_OBJ_PCI_DEVICE and ::HWLOC_OBJ_OS_DEVICE, it means that only objects
* of major/common kinds are kept (storage, network, OpenFabrics, Intel MICs, CUDA,
* OpenCL, NVML, and displays).
* of major/common kinds are kept (storage, network, OpenFabrics, CUDA,
* OpenCL, RSMI, NVML, and displays).
* Also, only OS devices directly attached on PCI (e.g. no USB) are reported.
* For ::HWLOC_OBJ_BRIDGE, it means that bridges are kept only if they have children.
*
@@ -2303,22 +2362,9 @@ HWLOC_DECLSPEC hwloc_obj_t hwloc_topology_insert_misc_object(hwloc_topology_t to
/** \brief Allocate a Group object to insert later with hwloc_topology_insert_group_object().
*
* This function returns a new Group object.
* The caller should (at least) initialize its sets before inserting the object.
* See hwloc_topology_insert_group_object().
*
* The \p subtype object attribute may be set to display something else
* than "Group" as the type name for this object in lstopo.
* Custom name/value info pairs may be added with hwloc_obj_add_info() after
* insertion.
*
* The \p kind group attribute should be 0. The \p subkind group attribute may
* be set to identify multiple Groups of the same level.
*
* It is recommended not to set any other object attribute before insertion,
* since the Group may get discarded during insertion.
*
* The object will be destroyed if passed to hwloc_topology_insert_group_object()
* without any set defined.
* The caller should (at least) initialize its sets before inserting
* the object in the topology. See hwloc_topology_insert_group_object().
*/
HWLOC_DECLSPEC hwloc_obj_t hwloc_topology_alloc_group_object(hwloc_topology_t topology);
@@ -2329,34 +2375,44 @@ HWLOC_DECLSPEC hwloc_obj_t hwloc_topology_alloc_group_object(hwloc_topology_t to
* the final location of the Group in the topology.
* Then the object can be passed to this function for actual insertion in the topology.
*
* The group \p dont_merge attribute may be set to prevent the core from
* ever merging this object with another object hierarchically-identical.
*
* Either the cpuset or nodeset field (or both, if compatible) must be set
* to a non-empty bitmap. The complete_cpuset or complete_nodeset may be set
* instead if inserting with respect to the complete topology
* (including disallowed, offline or unknown objects).
*
* It grouping several objects, hwloc_obj_add_other_obj_sets() is an easy way
* If grouping several objects, hwloc_obj_add_other_obj_sets() is an easy way
* to build the Group sets iteratively.
*
* These sets cannot be larger than the current topology, or they would get
* restricted silently.
*
* The core will setup the other sets after actual insertion.
*
* The \p subtype object attribute may be defined (to a dynamically
* allocated string) to display something else than "Group" as the
* type name for this object in lstopo.
* Custom name/value info pairs may be added with hwloc_obj_add_info() after
* insertion.
*
* The group \p dont_merge attribute may be set to \c 1 to prevent
* the hwloc core from ever merging this object with another
* hierarchically-identical object.
* This is useful when the Group itself describes an important feature
* that cannot be exposed anywhere else in the hierarchy.
*
* The group \p kind attribute may be set to a high value such
* as \c 0xffffffff to tell hwloc that this new Group should always
* be discarded in favor of any existing Group with the same locality.
*
* \return The inserted object if it was properly inserted.
*
* \return An existing object if the Group was discarded because the topology already
* contained an object at the same location (the Group did not add any locality information).
* Any name/info key pair set before inserting is appended to the existing object.
* \return An existing object if the Group was merged or discarded
* because the topology already contained an object at the same
* location (the Group did not add any hierarchy information).
*
* \return \c NULL if the insertion failed because of conflicting sets in topology tree.
*
* \return \c NULL if Group objects are filtered-out of the topology (::HWLOC_TYPE_FILTER_KEEP_NONE).
*
* \return \c NULL if the object was discarded because no set was initialized in the Group
* before insert, or all of them were empty.
* \return \c NULL if the object was discarded because no set was
* initialized in the Group before insert, or all of them were empty.
*/
HWLOC_DECLSPEC hwloc_obj_t hwloc_topology_insert_group_object(hwloc_topology_t topology, hwloc_obj_t group);
@@ -2371,6 +2427,22 @@ HWLOC_DECLSPEC hwloc_obj_t hwloc_topology_insert_group_object(hwloc_topology_t t
*/
HWLOC_DECLSPEC int hwloc_obj_add_other_obj_sets(hwloc_obj_t dst, hwloc_obj_t src);
/** \brief Refresh internal structures after topology modification.
*
* Modifying the topology (by restricting, adding objects, modifying structures
* such as distances or memory attributes, etc.) may cause some internal caches
* to become invalid. These caches are automatically refreshed when accessed
* but this refreshing is not thread-safe.
*
* This function is not thread-safe either, but it is a good way to end a
* non-thread-safe phase of topology modification. Once this refresh is done,
* multiple threads may concurrently consult the topology, objects, distances,
* attributes, etc.
*
* See also \ref threadsafety
*/
HWLOC_DECLSPEC int hwloc_topology_refresh(hwloc_topology_t topology);
/** @} */
@@ -2386,6 +2458,12 @@ HWLOC_DECLSPEC int hwloc_obj_add_other_obj_sets(hwloc_obj_t dst, hwloc_obj_t src
/* inline code of some functions above */
#include "hwloc/inlines.h"
/* memory attributes */
#include "hwloc/memattrs.h"
/* kinds of CPU cores */
#include "hwloc/cpukinds.h"
/* exporting to XML or synthetic */
#include "hwloc/export.h"

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2019 Inria. All rights reserved.
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2012 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -11,10 +11,10 @@
#ifndef HWLOC_CONFIG_H
#define HWLOC_CONFIG_H
#define HWLOC_VERSION "2.2.0"
#define HWLOC_VERSION "2.4.1"
#define HWLOC_VERSION_MAJOR 2
#define HWLOC_VERSION_MINOR 2
#define HWLOC_VERSION_RELEASE 0
#define HWLOC_VERSION_MINOR 4
#define HWLOC_VERSION_RELEASE 1
#define HWLOC_VERSION_GREEK ""
#define __hwloc_restrict

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2018 Inria. All rights reserved.
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2012 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -231,7 +231,7 @@ HWLOC_DECLSPEC int hwloc_bitmap_clr_range(hwloc_bitmap_t bitmap, unsigned begin,
/** \brief Keep a single index among those set in bitmap \p bitmap
*
* May be useful before binding so that the process does not
* have a chance of migrating between multiple logical CPUs
* have a chance of migrating between multiple processors
* in the original mask.
* Instead of running the task on any PU inside the given CPU set,
* the operating system scheduler will be forced to run it on a single

View File

@@ -0,0 +1,188 @@
/*
* Copyright © 2020 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
/** \file
* \brief Kinds of CPU cores.
*/
#ifndef HWLOC_CPUKINDS_H
#define HWLOC_CPUKINDS_H
#include "hwloc.h"
#ifdef __cplusplus
extern "C" {
#elif 0
}
#endif
/** \defgroup hwlocality_cpukinds Kinds of CPU cores
*
* Platforms with heterogeneous CPUs may have some cores with
* different features or frequencies.
* This API exposes identical PUs in sets called CPU kinds.
* Each PU of the topology may only be in a single kind.
*
* The number of kinds may be obtained with hwloc_cpukinds_get_nr().
* If the platform is homogeneous, there may be a single kind
* with all PUs.
* If the platform or operating system does not expose any
* information about CPU cores, there may be no kind at all.
*
* The index of the kind that describes a given CPU set
* (if any, and not partially)
* may be obtained with hwloc_cpukinds_get_by_cpuset().
*
* From the index of a kind, it is possible to retrieve information
* with hwloc_cpukinds_get_info():
* an abstracted efficiency value,
* and an array of info attributes
* (for instance the "CoreType" and "FrequencyMaxMHz",
* see \ref topoattrs_cpukinds).
*
* A higher efficiency value means intrinsic greater performance
* (and possibly less performance/power efficiency).
* Kinds with lower efficiency are ranked first:
* Passing 0 as \p kind_index to hwloc_cpukinds_get_info() will
* return information about the less efficient CPU kind.
*
* When available, efficiency values are gathered from the operating
* system (when \p cpukind_efficiency is set in the
* struct hwloc_topology_discovery_support array, only on Windows 10 for now).
* Otherwise hwloc tries to compute efficiencies
* by comparing CPU kinds using frequencies (on ARM),
* or core types and frequencies (on other architectures).
* The environment variable HWLOC_CPUKINDS_RANKING may be used
* to change this heuristics, see \ref envvar.
*
* If hwloc fails to rank any kind, for instance because the operating
* system does not expose efficiencies and core frequencies,
* all kinds will have an unknown efficiency (\c -1),
* and they are not indexed/ordered in any specific way.
*
* @{
*/
/** \brief Get the number of different kinds of CPU cores in the topology.
*
* \p flags must be \c 0 for now.
*
* \return The number of CPU kinds (positive integer) on success.
* \return \c 0 if no information about kinds was found.
* \return \c -1 with \p errno set to \c EINVAL if \p flags is invalid.
*/
HWLOC_DECLSPEC int
hwloc_cpukinds_get_nr(hwloc_topology_t topology,
unsigned long flags);
/** \brief Get the index of the CPU kind that contains CPUs listed in \p cpuset.
*
* \p flags must be \c 0 for now.
*
* \return The index of the CPU kind (positive integer or 0) on success.
* \return \c -1 with \p errno set to \c EXDEV if \p cpuset is
* only partially included in the some kind.
* \return \c -1 with \p errno set to \c ENOENT if \p cpuset is
* not included in any kind, even partially.
* \return \c -1 with \p errno set to \c EINVAL if parameters are invalid.
*/
HWLOC_DECLSPEC int
hwloc_cpukinds_get_by_cpuset(hwloc_topology_t topology,
hwloc_const_bitmap_t cpuset,
unsigned long flags);
/** \brief Get the CPU set and infos about a CPU kind in the topology.
*
* \p kind_index identifies one kind of CPU between 0 and the number
* of kinds returned by hwloc_cpukinds_get_nr() minus 1.
*
* If not \c NULL, the bitmap \p cpuset will be filled with
* the set of PUs of this kind.
*
* The integer pointed by \p efficiency, if not \c NULL will, be filled
* with the ranking of this kind of CPU in term of efficiency (see above).
* It ranges from \c 0 to the number of kinds
* (as reported by hwloc_cpukinds_get_nr()) minus 1.
*
* Kinds with lower efficiency are reported first.
*
* If there is a single kind in the topology, its efficiency \c 0.
* If the efficiency of some kinds of cores is unknown,
* the efficiency of all kinds is set to \c -1,
* and kinds are reported in no specific order.
*
* The array of info attributes (for instance the "CoreType",
* "FrequencyMaxMHz" or "FrequencyBaseMHz", see \ref topoattrs_cpukinds)
* and its length are returned in \p infos or \p nr_infos.
* The array belongs to the topology, it should not be freed or modified.
*
* If \p nr_infos or \p infos is \c NULL, no info is returned.
*
* \p flags must be \c 0 for now.
*
* \return \c 0 on success.
* \return \c -1 with \p errno set to \c ENOENT if \p kind_index does not match any CPU kind.
* \return \c -1 with \p errno set to \c EINVAL if parameters are invalid.
*/
HWLOC_DECLSPEC int
hwloc_cpukinds_get_info(hwloc_topology_t topology,
unsigned kind_index,
hwloc_bitmap_t cpuset,
int *efficiency,
unsigned *nr_infos, struct hwloc_info_s **infos,
unsigned long flags);
/** \brief Register a kind of CPU in the topology.
*
* Mark the PUs listed in \p cpuset as being of the same kind
* with respect to the given attributes.
*
* \p forced_efficiency should be \c -1 if unknown.
* Otherwise it is an abstracted efficiency value to enforce
* the ranking of all kinds if all of them have valid (and
* different) efficiencies.
*
* The array \p infos of size \p nr_infos may be used to provide
* info names and values describing this kind of PUs.
*
* \p flags must be \c 0 for now.
*
* Parameters \p cpuset and \p infos will be duplicated internally,
* the caller is responsible for freeing them.
*
* If \p cpuset overlaps with some existing kinds, those might get
* modified or split. For instance if existing kind A contains
* PUs 0 and 1, and one registers another kind for PU 1 and 2,
* there will be 3 resulting kinds:
* existing kind A is restricted to only PU 0;
* new kind B contains only PU 1 and combines information from A
* and from the newly-registered kind;
* new kind C contains only PU 2 and only gets information from
* the newly-registered kind.
*
* \note The efficiency \p forced_efficiency provided to this function
* may be different from the one reported later by hwloc_cpukinds_get_info()
* because hwloc will scale efficiency values down to
* between 0 and the number of kinds minus 1.
*
* \return \c 0 on success.
* \return \c -1 with \p errno set to \c EINVAL if some parameters are invalid,
* for instance if \p cpuset is \c NULL or empty.
*/
HWLOC_DECLSPEC int
hwloc_cpukinds_register(hwloc_topology_t topology,
hwloc_bitmap_t cpuset,
int forced_efficiency,
unsigned nr_infos, struct hwloc_info_s *infos,
unsigned long flags);
/** @} */
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* HWLOC_CPUKINDS_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2010-2017 Inria. All rights reserved.
* Copyright © 2010-2020 Inria. All rights reserved.
* Copyright © 2010-2011 Université Bordeaux
* Copyright © 2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -72,7 +72,7 @@ hwloc_cuda_get_device_pci_ids(hwloc_topology_t topology __hwloc_attribute_unused
return 0;
}
/** \brief Get the CPU set of logical processors that are physically
/** \brief Get the CPU set of processors that are physically
* close to device \p cudevice.
*
* Return the CPU set describing the locality of the CUDA device \p cudevice.

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2010-2017 Inria. All rights reserved.
* Copyright © 2010-2020 Inria. All rights reserved.
* Copyright © 2010-2011 Université Bordeaux
* Copyright © 2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -69,7 +69,7 @@ hwloc_cudart_get_device_pci_ids(hwloc_topology_t topology __hwloc_attribute_unus
return 0;
}
/** \brief Get the CPU set of logical processors that are physically
/** \brief Get the CPU set of processors that are physically
* close to device \p idx.
*
* Return the CPU set describing the locality of the CUDA device

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2013-2018 Inria. All rights reserved.
* Copyright © 2013-2020 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -110,7 +110,7 @@ union hwloc_topology_diff_obj_attr_u {
*/
typedef enum hwloc_topology_diff_type_e {
/** \brief An object attribute was changed.
* The union is a hwloc_topology_diff_obj_attr_u::hwloc_topology_diff_obj_attr_s.
* The union is a hwloc_topology_diff_u::hwloc_topology_diff_obj_attr_s.
*/
HWLOC_TOPOLOGY_DIFF_OBJ_ATTR,
@@ -119,7 +119,7 @@ typedef enum hwloc_topology_diff_type_e {
* this object has not been checked.
* hwloc_topology_diff_build() will return 1.
*
* The union is a hwloc_topology_diff_obj_attr_u::hwloc_topology_diff_too_complex_s.
* The union is a hwloc_topology_diff_u::hwloc_topology_diff_too_complex_s.
*/
HWLOC_TOPOLOGY_DIFF_TOO_COMPLEX
} hwloc_topology_diff_type_t;

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2010-2019 Inria. All rights reserved.
* Copyright © 2010-2020 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -34,6 +34,7 @@ extern "C" {
* It corresponds to the latency for accessing the memory of one node
* from a core in another node.
* The corresponding kind is ::HWLOC_DISTANCES_KIND_FROM_OS | ::HWLOC_DISTANCES_KIND_FROM_USER.
* The name of this distances structure is "NUMALatency".
*
* The matrix may also contain bandwidths between random sets of objects,
* possibly provided by the user, as specified in the \p kind attribute.
@@ -144,6 +145,8 @@ hwloc_distances_get_by_type(hwloc_topology_t topology, hwloc_obj_type_t type,
/** \brief Retrieve a distance matrix with the given name.
*
* Usually only one distances structure may match a given name.
*
* The name of the most common structure is "NUMALatency".
*/
HWLOC_DECLSPEC int
hwloc_distances_get_by_name(hwloc_topology_t topology, const char *name,

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2013 inria. All rights reserved.
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2011 Université Bordeaux
* Copyright © 2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -22,7 +22,7 @@
#include <assert.h>
#if !defined _GNU_SOURCE || !defined _SCHED_H || (!defined CPU_SETSIZE && !defined sched_priority)
#if !defined _GNU_SOURCE || (!defined _SCHED_H && !defined _SCHED_H_) || (!defined CPU_SETSIZE && !defined sched_priority)
#error Please make sure to include sched.h before including glibc-sched.h, and define _GNU_SOURCE before any inclusion of sched.h
#endif

View File

@@ -872,8 +872,8 @@ hwloc_distrib(hwloc_topology_t topology,
unsigned chunk, weight;
hwloc_obj_t root = roots[flags & HWLOC_DISTRIB_FLAG_REVERSE ? n_roots-1-i : i];
hwloc_cpuset_t cpuset = root->cpuset;
if (root->type == HWLOC_OBJ_NUMANODE)
/* NUMANodes have same cpuset as their parent, but we need normal objects below */
while (!hwloc_obj_type_is_normal(root->type))
/* If memory/io/misc, walk up to normal parent */
root = root->parent;
weight = (unsigned) hwloc_bitmap_weight(cpuset);
if (!weight)
@@ -919,7 +919,7 @@ hwloc_distrib(hwloc_topology_t topology,
/** \brief Get complete CPU set
*
* \return the complete CPU set of logical processors of the system.
* \return the complete CPU set of processors of the system.
*
* \note The returned cpuset is not newly allocated and should thus not be
* changed or freed; hwloc_bitmap_dup() must be used to obtain a local copy.
@@ -931,7 +931,7 @@ hwloc_topology_get_complete_cpuset(hwloc_topology_t topology) __hwloc_attribute_
/** \brief Get topology CPU set
*
* \return the CPU set of logical processors of the system for which hwloc
* \return the CPU set of processors of the system for which hwloc
* provides topology information. This is equivalent to the cpuset of the
* system object.
*
@@ -945,7 +945,7 @@ hwloc_topology_get_topology_cpuset(hwloc_topology_t topology) __hwloc_attribute_
/** \brief Get allowed CPU set
*
* \return the CPU set of allowed logical processors of the system.
* \return the CPU set of allowed processors of the system.
*
* \note If the topology flag ::HWLOC_TOPOLOGY_FLAG_INCLUDE_DISALLOWED was not set,
* this is identical to hwloc_topology_get_topology_cpuset(), which means

View File

@@ -0,0 +1,455 @@
/*
* Copyright © 2019-2020 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
/** \file
* \brief Memory node attributes.
*/
#ifndef HWLOC_MEMATTR_H
#define HWLOC_MEMATTR_H
#include "hwloc.h"
#ifdef __cplusplus
extern "C" {
#elif 0
}
#endif
/** \defgroup hwlocality_memattrs Comparing memory node attributes for finding where to allocate on
*
* Platforms with heterogeneous memory require ways to decide whether
* a buffer should be allocated on "fast" memory (such as HBM),
* "normal" memory (DDR) or even "slow" but large-capacity memory
* (non-volatile memory).
* These memory nodes are called "Targets" while the CPU accessing them
* is called the "Initiator". Access performance depends on their
* locality (NUMA platforms) as well as the intrinsic performance
* of the targets (heterogeneous platforms).
*
* The following attributes describe the performance of memory accesses
* from an Initiator to a memory Target, for instance their latency
* or bandwidth.
* Initiators performing these memory accesses are usually some PUs or Cores
* (described as a CPU set).
* Hence a Core may choose where to allocate a memory buffer by comparing
* the attributes of different target memory nodes nearby.
*
* There are also some attributes that are system-wide.
* Their value does not depend on a specific initiator performing
* an access.
* The memory node Capacity is an example of such attribute without
* initiator.
*
* One way to use this API is to start with a cpuset describing the Cores where
* a program is bound. The best target NUMA node for allocating memory in this
* program on these Cores may be obtained by passing this cpuset as an initiator
* to hwloc_memattr_get_best_target() with the relevant memory attribute.
* For instance, if the code is latency limited, use the Latency attribute.
*
* A more flexible approach consists in getting the list of local NUMA nodes
* by passing this cpuset to hwloc_get_local_numanode_objs().
* Attribute values for these nodes, if any, may then be obtained with
* hwloc_memattr_get_value() and manually compared with the desired criteria.
*
* \note The API also supports specific objects as initiator,
* but it is currently not used internally by hwloc.
* Users may for instance use it to provide custom performance
* values for host memory accesses performed by GPUs.
*
* \note The interface actually also accepts targets that are not NUMA nodes.
* @{
*/
/** \brief Memory node attributes. */
enum hwloc_memattr_id_e {
/** \brief "Capacity".
* The capacity is returned in bytes
* (local_memory attribute in objects).
*
* Best capacity nodes are nodes with <b>higher capacity</b>.
*
* No initiator is involved when looking at this attribute.
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST.
*/
HWLOC_MEMATTR_ID_CAPACITY = 0,
/** \brief "Locality".
* The locality is returned as the number of PUs in that locality
* (e.g. the weight of its cpuset).
*
* Best locality nodes are nodes with <b>smaller locality</b>
* (nodes that are local to very few PUs).
* Poor locality nodes are nodes with larger locality
* (nodes that are local to the entire machine).
*
* No initiator is involved when looking at this attribute.
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST.
*/
HWLOC_MEMATTR_ID_LOCALITY = 1,
/** \brief "Bandwidth".
* The bandwidth is returned in MiB/s, as seen from the given initiator location.
* Best bandwidth nodes are nodes with <b>higher bandwidth</b>.
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST
* and ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR.
*/
HWLOC_MEMATTR_ID_BANDWIDTH = 2,
/** \brief "Latency".
* The latency is returned as nanoseconds, as seen from the given initiator location.
* Best latency nodes are nodes with <b>smaller latency</b>.
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_LOWER_FIRST
* and ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR.
*/
HWLOC_MEMATTR_ID_LATENCY = 3
/* TODO read vs write, persistence? */
};
/** \brief A memory attribute identifier.
* May be either one of ::hwloc_memattr_id_e or a new id returned by hwloc_memattr_register().
*/
typedef unsigned hwloc_memattr_id_t;
/** \brief Return the identifier of the memory attribute with the given name.
*/
HWLOC_DECLSPEC int
hwloc_memattr_get_by_name(hwloc_topology_t topology,
const char *name,
hwloc_memattr_id_t *id);
/** \brief Type of location. */
enum hwloc_location_type_e {
/** \brief Location is given as a cpuset, in the location cpuset union field. \hideinitializer */
HWLOC_LOCATION_TYPE_CPUSET = 1,
/** \brief Location is given as an object, in the location object union field. \hideinitializer */
HWLOC_LOCATION_TYPE_OBJECT = 0
};
/** \brief Where to measure attributes from. */
struct hwloc_location {
/** \brief Type of location. */
enum hwloc_location_type_e type;
/** \brief Actual location. */
union hwloc_location_u {
/** \brief Location as a cpuset, when the location type is ::HWLOC_LOCATION_TYPE_CPUSET. */
hwloc_cpuset_t cpuset;
/** \brief Location as an object, when the location type is ::HWLOC_LOCATION_TYPE_OBJECT. */
hwloc_obj_t object;
} location;
};
/** \brief Flags for selecting target NUMA nodes. */
enum hwloc_local_numanode_flag_e {
/** \brief Select NUMA nodes whose locality is larger than the given cpuset.
* For instance, if a single PU (or its cpuset) is given in \p initiator,
* select all nodes close to the package that contains this PU.
* \hideinitializer
*/
HWLOC_LOCAL_NUMANODE_FLAG_LARGER_LOCALITY = (1UL<<0),
/** \brief Select NUMA nodes whose locality is smaller than the given cpuset.
* For instance, if a package (or its cpuset) is given in \p initiator,
* also select nodes that are attached to only a half of that package.
* \hideinitializer
*/
HWLOC_LOCAL_NUMANODE_FLAG_SMALLER_LOCALITY = (1UL<<1),
/** \brief Select all NUMA nodes in the topology.
* The initiator \p initiator is ignored.
* \hideinitializer
*/
HWLOC_LOCAL_NUMANODE_FLAG_ALL = (1UL<<2)
};
/** \brief Return an array of local NUMA nodes.
*
* By default only select the NUMA nodes whose locality is exactly
* the given \p location. More nodes may be selected if additional flags
* are given as a OR'ed set of ::hwloc_local_numanode_flag_e.
*
* If \p location is given as an explicit object, its CPU set is used
* to find NUMA nodes with the corresponding locality.
* If the object does not have a CPU set (e.g. I/O object), the CPU
* parent (where the I/O object is attached) is used.
*
* On input, \p nr points to the number of nodes that may be stored
* in the \p nodes array.
* On output, \p nr will be changed to the number of stored nodes,
* or the number of nodes that would have been stored if there were
* enough room.
*
* \note Some of these NUMA nodes may not have any memory attribute
* values and hence not be reported as actual targets in other functions.
*
* \note The number of NUMA nodes in the topology (obtained by
* hwloc_bitmap_weight() on the root object nodeset) may be used
* to allocate the \p nodes array.
*
* \note When an object CPU set is given as locality, for instance a Package,
* and when flags contain both ::HWLOC_LOCAL_NUMANODE_FLAG_LARGER_LOCALITY
* and ::HWLOC_LOCAL_NUMANODE_FLAG_SMALLER_LOCALITY,
* the returned array corresponds to the nodeset of that object.
*/
HWLOC_DECLSPEC int
hwloc_get_local_numanode_objs(hwloc_topology_t topology,
struct hwloc_location *location,
unsigned *nr,
hwloc_obj_t *nodes,
unsigned long flags);
/** \brief Return an attribute value for a specific target NUMA node.
*
* If the attribute does not relate to a specific initiator
* (it does not have the flag ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR),
* location \p initiator is ignored and may be \c NULL.
*
* \p flags must be \c 0 for now.
*
* \note The initiator \p initiator should be of type ::HWLOC_LOCATION_TYPE_CPUSET
* when refering to accesses performed by CPU cores.
* ::HWLOC_LOCATION_TYPE_OBJECT is currently unused internally by hwloc,
* but users may for instance use it to provide custom information about
* host memory accesses performed by GPUs.
*/
HWLOC_DECLSPEC int
hwloc_memattr_get_value(hwloc_topology_t topology,
hwloc_memattr_id_t attribute,
hwloc_obj_t target_node,
struct hwloc_location *initiator,
unsigned long flags,
hwloc_uint64_t *value);
/** \brief Return the best target NUMA node for the given attribute and initiator.
*
* If the attribute does not relate to a specific initiator
* (it does not have the flag ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR),
* location \p initiator is ignored and may be \c NULL.
*
* If \p value is non \c NULL, the corresponding value is returned there.
*
* If multiple targets have the same attribute values, only one is
* returned (and there is no way to clarify how that one is chosen).
* Applications that want to detect targets with identical/similar
* values, or that want to look at values for multiple attributes,
* should rather get all values using hwloc_memattr_get_value()
* and manually select the target they consider the best.
*
* \p flags must be \c 0 for now.
*
* If there are no matching targets, \c -1 is returned with \p errno set to \c ENOENT;
*
* \note The initiator \p initiator should be of type ::HWLOC_LOCATION_TYPE_CPUSET
* when refering to accesses performed by CPU cores.
* ::HWLOC_LOCATION_TYPE_OBJECT is currently unused internally by hwloc,
* but users may for instance use it to provide custom information about
* host memory accesses performed by GPUs.
*/
HWLOC_DECLSPEC int
hwloc_memattr_get_best_target(hwloc_topology_t topology,
hwloc_memattr_id_t attribute,
struct hwloc_location *initiator,
unsigned long flags,
hwloc_obj_t *best_target, hwloc_uint64_t *value);
/** \brief Return the best initiator for the given attribute and target NUMA node.
*
* If the attribute does not relate to a specific initiator
* (it does not have the flag ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR),
* \c -1 is returned and \p errno is set to \c EINVAL.
*
* If \p value is non \c NULL, the corresponding value is returned there.
*
* If multiple initiators have the same attribute values, only one is
* returned (and there is no way to clarify how that one is chosen).
* Applications that want to detect initiators with identical/similar
* values, or that want to look at values for multiple attributes,
* should rather get all values using hwloc_memattr_get_value()
* and manually select the initiator they consider the best.
*
* The returned initiator should not be modified or freed,
* it belongs to the topology.
*
* \p flags must be \c 0 for now.
*
* If there are no matching initiators, \c -1 is returned with \p errno set to \c ENOENT;
*/
HWLOC_DECLSPEC int
hwloc_memattr_get_best_initiator(hwloc_topology_t topology,
hwloc_memattr_id_t attribute,
hwloc_obj_t target,
unsigned long flags,
struct hwloc_location *best_initiator, hwloc_uint64_t *value);
/** @} */
/** \defgroup hwlocality_memattrs_manage Managing memory attributes
* @{
*/
/** \brief Return the name of a memory attribute.
*/
HWLOC_DECLSPEC int
hwloc_memattr_get_name(hwloc_topology_t topology,
hwloc_memattr_id_t attribute,
const char **name);
/** \brief Return the flags of the given attribute.
*
* Flags are a OR'ed set of ::hwloc_memattr_flag_e.
*/
HWLOC_DECLSPEC int
hwloc_memattr_get_flags(hwloc_topology_t topology,
hwloc_memattr_id_t attribute,
unsigned long *flags);
/** \brief Memory attribute flags.
* Given to hwloc_memattr_register() and returned by hwloc_memattr_get_flags().
*/
enum hwloc_memattr_flag_e {
/** \brief The best nodes for this memory attribute are those with the higher values.
* For instance Bandwidth.
*/
HWLOC_MEMATTR_FLAG_HIGHER_FIRST = (1UL<<0),
/** \brief The best nodes for this memory attribute are those with the lower values.
* For instance Latency.
*/
HWLOC_MEMATTR_FLAG_LOWER_FIRST = (1UL<<1),
/** \brief The value returned for this memory attribute depends on the given initiator.
* For instance Bandwidth and Latency, but not Capacity.
*/
HWLOC_MEMATTR_FLAG_NEED_INITIATOR = (1UL<<2)
};
/** \brief Register a new memory attribute.
*
* Add a specific memory attribute that is not defined in ::hwloc_memattr_id_e.
* Flags are a OR'ed set of ::hwloc_memattr_flag_e. It must contain at least
* one of ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST or ::HWLOC_MEMATTR_FLAG_LOWER_FIRST.
*/
HWLOC_DECLSPEC int
hwloc_memattr_register(hwloc_topology_t topology,
const char *name,
unsigned long flags,
hwloc_memattr_id_t *id);
/** \brief Set an attribute value for a specific target NUMA node.
*
* If the attribute does not relate to a specific initiator
* (it does not have the flag ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR),
* location \p initiator is ignored and may be \c NULL.
*
* The initiator will be copied into the topology,
* the caller should free anything allocated to store the initiator,
* for instance the cpuset.
*
* \p flags must be \c 0 for now.
*
* \note The initiator \p initiator should be of type ::HWLOC_LOCATION_TYPE_CPUSET
* when refering to accesses performed by CPU cores.
* ::HWLOC_LOCATION_TYPE_OBJECT is currently unused internally by hwloc,
* but users may for instance use it to provide custom information about
* host memory accesses performed by GPUs.
*/
HWLOC_DECLSPEC int
hwloc_memattr_set_value(hwloc_topology_t topology,
hwloc_memattr_id_t attribute,
hwloc_obj_t target_node,
struct hwloc_location *initiator,
unsigned long flags,
hwloc_uint64_t value);
/** \brief Return the target NUMA nodes that have some values for a given attribute.
*
* Return targets for the given attribute in the \p targets array
* (for the given initiator if any).
* If \p values is not \c NULL, the corresponding attribute values
* are stored in the array it points to.
*
* On input, \p nr points to the number of targets that may be stored
* in the array \p targets (and \p values).
* On output, \p nr points to the number of targets (and values) that
* were actually found, even if some of them couldn't be stored in the array.
* Targets that couldn't be stored are ignored, but the function still
* returns success (\c 0). The caller may find out by comparing the value pointed
* by \p nr before and after the function call.
*
* The returned targets should not be modified or freed,
* they belong to the topology.
*
* Argument \p initiator is ignored if the attribute does not relate to a specific
* initiator (it does not have the flag ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR).
* Otherwise \p initiator may be non \c NULL to report only targets
* that have a value for that initiator.
*
* \p flags must be \c 0 for now.
*
* \note This function is meant for tools and debugging (listing internal information)
* rather than for application queries. Applications should rather select useful
* NUMA nodes with hwloc_get_local_numanode_objs() and then look at their attribute
* values.
*
* \note The initiator \p initiator should be of type ::HWLOC_LOCATION_TYPE_CPUSET
* when refering to accesses performed by CPU cores.
* ::HWLOC_LOCATION_TYPE_OBJECT is currently unused internally by hwloc,
* but users may for instance use it to provide custom information about
* host memory accesses performed by GPUs.
*/
HWLOC_DECLSPEC int
hwloc_memattr_get_targets(hwloc_topology_t topology,
hwloc_memattr_id_t attribute,
struct hwloc_location *initiator,
unsigned long flags,
unsigned *nrp, hwloc_obj_t *targets, hwloc_uint64_t *values);
/** \brief Return the initiators that have values for a given attribute for a specific target NUMA node.
*
* Return initiators for the given attribute and target node in the
* \p initiators array.
* If \p values is not \c NULL, the corresponding attribute values
* are stored in the array it points to.
*
* On input, \p nr points to the number of initiators that may be stored
* in the array \p initiators (and \p values).
* On output, \p nr points to the number of initiators (and values) that
* were actually found, even if some of them couldn't be stored in the array.
* Initiators that couldn't be stored are ignored, but the function still
* returns success (\c 0). The caller may find out by comparing the value pointed
* by \p nr before and after the function call.
*
* The returned initiators should not be modified or freed,
* they belong to the topology.
*
* \p flags must be \c 0 for now.
*
* If the attribute does not relate to a specific initiator
* (it does not have the flag ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR),
* no initiator is returned.
*
* \note This function is meant for tools and debugging (listing internal information)
* rather than for application queries. Applications should rather select useful
* NUMA nodes with hwloc_get_local_numanode_objs() and then look at their attribute
* values for some relevant initiators.
*/
HWLOC_DECLSPEC int
hwloc_memattr_get_initiators(hwloc_topology_t topology,
hwloc_memattr_id_t attribute,
hwloc_obj_t target_node,
unsigned long flags,
unsigned *nr, struct hwloc_location *initiators, hwloc_uint64_t *values);
/** @} */
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* HWLOC_MEMATTR_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2012-2016 Inria. All rights reserved.
* Copyright © 2012-2020 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -36,7 +36,7 @@ extern "C" {
* @{
*/
/** \brief Get the CPU set of logical processors that are physically
/** \brief Get the CPU set of processors that are physically
* close to NVML device \p device.
*
* Return the CPU set describing the locality of the NVML device \p device.

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2012-2019 Inria. All rights reserved.
* Copyright © 2012-2021 Inria. All rights reserved.
* Copyright © 2013, 2018 Université Bordeaux. All right reserved.
* See COPYING in top-level directory.
*/
@@ -82,9 +82,10 @@ hwloc_opencl_get_device_pci_busid(cl_device_id device,
if (CL_SUCCESS == clret
&& HWLOC_CL_DEVICE_TOPOLOGY_TYPE_PCIE_AMD == amdtopo.raw.type) {
*domain = 0; /* can't do anything better */
*bus = (unsigned) amdtopo.pcie.bus;
*dev = (unsigned) amdtopo.pcie.device;
*func = (unsigned) amdtopo.pcie.function;
/* cl_device_topology_amd stores bus ID in cl_char, dont convert those signed char directly to unsigned int */
*bus = (unsigned) (unsigned char) amdtopo.pcie.bus;
*dev = (unsigned) (unsigned char) amdtopo.pcie.device;
*func = (unsigned) (unsigned char) amdtopo.pcie.function;
return 0;
}
@@ -109,7 +110,7 @@ hwloc_opencl_get_device_pci_busid(cl_device_id device,
return -1;
}
/** \brief Get the CPU set of logical processors that are physically
/** \brief Get the CPU set of processors that are physically
* close to OpenCL device \p device.
*
* Return the CPU set describing the locality of the OpenCL device \p device.

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2016 Inria. All rights reserved.
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2010 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -41,7 +41,7 @@ extern "C" {
* @{
*/
/** \brief Get the CPU set of logical processors that are physically
/** \brief Get the CPU set of processors that are physically
* close to device \p ibdev.
*
* Return the CPU set describing the locality of the OpenFabrics

View File

@@ -313,7 +313,13 @@ struct hwloc_component {
* @{
*/
/** \brief Check whether insertion errors are hidden */
HWLOC_DECLSPEC int hwloc_hide_errors(void);
/** \brief Add an object to the topology.
*
* Insert new object \p obj in the topology starting under existing object \p root
* (if \c NULL, the topology root object is used).
*
* It is sorted along the tree of other objects according to the inclusion of
* cpusets, to eventually be added as a child of the smallest object including
@@ -327,32 +333,20 @@ struct hwloc_component {
*
* This shall only be called before levels are built.
*
* In case of error, hwloc_report_os_error() is called.
*
* The caller should check whether the object type is filtered-out before calling this function.
*
* The topology cpuset/nodesets will be enlarged to include the object sets.
*
* \p reason is a unique string identifying where and why this insertion call was performed
* (it will be displayed in case of internal insertion error).
*
* Returns the object on success.
* Returns NULL and frees obj on error.
* Returns another object and frees obj if it was merged with an identical pre-existing object.
*/
HWLOC_DECLSPEC struct hwloc_obj *hwloc_insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t obj);
/** \brief Type of error callbacks during object insertion */
typedef void (*hwloc_report_error_t)(const char * msg, int line);
/** \brief Report an insertion error from a backend */
HWLOC_DECLSPEC void hwloc_report_os_error(const char * msg, int line);
/** \brief Check whether insertion errors are hidden */
HWLOC_DECLSPEC int hwloc_hide_errors(void);
/** \brief Add an object to the topology and specify which error callback to use.
*
* This function is similar to hwloc_insert_object_by_cpuset() but it allows specifying
* where to start insertion from (if \p root is NULL, the topology root object is used),
* and specifying the error callback.
*/
HWLOC_DECLSPEC struct hwloc_obj *hwloc__insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t root, hwloc_obj_t obj, hwloc_report_error_t report_error);
HWLOC_DECLSPEC hwloc_obj_t
hwloc__insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t root,
hwloc_obj_t obj, const char *reason);
/** \brief Insert an object somewhere in the topology.
*

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* Copyright © 2010-2019 Inria. All rights reserved.
* Copyright © 2010-2020 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -119,6 +119,7 @@ extern "C" {
#define HWLOC_TOPOLOGY_FLAG_INCLUDE_DISALLOWED HWLOC_NAME_CAPS(TOPOLOGY_FLAG_WITH_DISALLOWED)
#define HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM HWLOC_NAME_CAPS(TOPOLOGY_FLAG_IS_THISSYSTEM)
#define HWLOC_TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES HWLOC_NAME_CAPS(TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES)
#define HWLOC_TOPOLOGY_FLAG_IMPORT_SUPPORT HWLOC_NAME_CAPS(TOPOLOGY_FLAG_IMPORT_SUPPORT)
#define hwloc_topology_set_pid HWLOC_NAME(topology_set_pid)
#define hwloc_topology_set_synthetic HWLOC_NAME(topology_set_synthetic)
@@ -134,6 +135,7 @@ extern "C" {
#define hwloc_topology_discovery_support HWLOC_NAME(topology_discovery_support)
#define hwloc_topology_cpubind_support HWLOC_NAME(topology_cpubind_support)
#define hwloc_topology_membind_support HWLOC_NAME(topology_membind_support)
#define hwloc_topology_misc_support HWLOC_NAME(topology_misc_support)
#define hwloc_topology_support HWLOC_NAME(topology_support)
#define hwloc_topology_get_support HWLOC_NAME(topology_get_support)
@@ -170,6 +172,7 @@ extern "C" {
#define hwloc_topology_alloc_group_object HWLOC_NAME(topology_alloc_group_object)
#define hwloc_topology_insert_group_object HWLOC_NAME(topology_insert_group_object)
#define hwloc_obj_add_other_obj_sets HWLOC_NAME(obj_add_other_obj_sets)
#define hwloc_topology_refresh HWLOC_NAME(topology_refresh)
#define hwloc_topology_get_depth HWLOC_NAME(topology_get_depth)
#define hwloc_get_type_depth HWLOC_NAME(get_type_depth)
@@ -367,6 +370,51 @@ extern "C" {
#define hwloc_cpuset_to_nodeset HWLOC_NAME(cpuset_to_nodeset)
#define hwloc_cpuset_from_nodeset HWLOC_NAME(cpuset_from_nodeset)
/* memattrs.h */
#define hwloc_memattr_id_e HWLOC_NAME(memattr_id_e)
#define HWLOC_MEMATTR_ID_CAPACITY HWLOC_NAME_CAPS(MEMATTR_ID_CAPACITY)
#define HWLOC_MEMATTR_ID_LOCALITY HWLOC_NAME_CAPS(MEMATTR_ID_LOCALITY)
#define HWLOC_MEMATTR_ID_BANDWIDTH HWLOC_NAME_CAPS(MEMATTR_ID_BANDWIDTH)
#define HWLOC_MEMATTR_ID_LATENCY HWLOC_NAME_CAPS(MEMATTR_ID_LATENCY)
#define hwloc_memattr_id_t HWLOC_NAME(memattr_id_t)
#define hwloc_memattr_get_by_name HWLOC_NAME(memattr_get_by_name)
#define hwloc_location HWLOC_NAME(location)
#define hwloc_location_type_e HWLOC_NAME(location_type_e)
#define HWLOC_LOCATION_TYPE_OBJECT HWLOC_NAME_CAPS(LOCATION_TYPE_OBJECT)
#define HWLOC_LOCATION_TYPE_CPUSET HWLOC_NAME_CAPS(LOCATION_TYPE_CPUSET)
#define hwloc_location_u HWLOC_NAME(location_u)
#define hwloc_memattr_get_value HWLOC_NAME(memattr_get_value)
#define hwloc_memattr_get_best_target HWLOC_NAME(memattr_get_best_target)
#define hwloc_memattr_get_best_initiator HWLOC_NAME(memattr_get_best_initiator)
#define hwloc_local_numanode_flag_e HWLOC_NAME(local_numanode_flag_e)
#define HWLOC_LOCAL_NUMANODE_FLAG_LARGER_LOCALITY HWLOC_NAME_CAPS(LOCAL_NUMANODE_FLAG_LARGER_LOCALITY)
#define HWLOC_LOCAL_NUMANODE_FLAG_SMALLER_LOCALITY HWLOC_NAME_CAPS(LOCAL_NUMANODE_FLAG_SMALLER_LOCALITY)
#define HWLOC_LOCAL_NUMANODE_FLAG_ALL HWLOC_NAME_CAPS(LOCAL_NUMANODE_FLAG_ALL)
#define hwloc_get_local_numanode_objs HWLOC_NAME(get_local_numanode_objs)
#define hwloc_memattr_get_name HWLOC_NAME(memattr_get_name)
#define hwloc_memattr_get_flags HWLOC_NAME(memattr_get_flags)
#define hwloc_memattr_flag_e HWLOC_NAME(memattr_flag_e)
#define HWLOC_MEMATTR_FLAG_HIGHER_FIRST HWLOC_NAME_CAPS(MEMATTR_FLAG_HIGHER_FIRST)
#define HWLOC_MEMATTR_FLAG_LOWER_FIRST HWLOC_NAME_CAPS(MEMATTR_FLAG_LOWER_FIRST)
#define HWLOC_MEMATTR_FLAG_NEED_INITIATOR HWLOC_NAME_CAPS(MEMATTR_FLAG_NEED_INITIATOR)
#define hwloc_memattr_register HWLOC_NAME(memattr_register)
#define hwloc_memattr_set_value HWLOC_NAME(memattr_set_value)
#define hwloc_memattr_get_targets HWLOC_NAME(memattr_get_targets)
#define hwloc_memattr_get_initiators HWLOC_NAME(memattr_get_initiators)
/* cpukinds.h */
#define hwloc_cpukinds_get_nr HWLOC_NAME(cpukinds_get_nr)
#define hwloc_cpukinds_get_by_cpuset HWLOC_NAME(cpukinds_get_by_cpuset)
#define hwloc_cpukinds_get_info HWLOC_NAME(cpukinds_get_info)
#define hwloc_cpukinds_register HWLOC_NAME(cpukinds_register)
/* export.h */
#define hwloc_topology_export_xml_flags_e HWLOC_NAME(topology_export_xml_flags_e)
@@ -510,6 +558,12 @@ extern "C" {
#define hwloc_nvml_get_device_osdev HWLOC_NAME(nvml_get_device_osdev)
#define hwloc_nvml_get_device_osdev_by_index HWLOC_NAME(nvml_get_device_osdev_by_index)
/* rsmi.h */
#define hwloc_rsmi_get_device_cpuset HWLOC_NAME(rsmi_get_device_cpuset)
#define hwloc_rsmi_get_device_osdev HWLOC_NAME(rsmi_get_device_osdev)
#define hwloc_rsmi_get_device_osdev_by_index HWLOC_NAME(rsmi_get_device_osdev_by_index)
/* gl.h */
#define hwloc_gl_get_display_osdev_by_port_device HWLOC_NAME(gl_get_display_osdev_by_port_device)
@@ -547,9 +601,6 @@ extern "C" {
#define hwloc_plugin_check_namespace HWLOC_NAME(plugin_check_namespace)
#define hwloc_insert_object_by_cpuset HWLOC_NAME(insert_object_by_cpuset)
#define hwloc_report_error_t HWLOC_NAME(report_error_t)
#define hwloc_report_os_error HWLOC_NAME(report_os_error)
#define hwloc_hide_errors HWLOC_NAME(hide_errors)
#define hwloc__insert_object_by_cpuset HWLOC_NAME(_insert_object_by_cpuset)
#define hwloc_insert_object_by_parent HWLOC_NAME(insert_object_by_parent)
@@ -683,6 +734,7 @@ extern "C" {
#define hwloc_cuda_component HWLOC_NAME(cuda_component)
#define hwloc_gl_component HWLOC_NAME(gl_component)
#define hwloc_nvml_component HWLOC_NAME(nvml_component)
#define hwloc_rsmi_component HWLOC_NAME(rsmi_component)
#define hwloc_opencl_component HWLOC_NAME(opencl_component)
#define hwloc_pci_component HWLOC_NAME(pci_component)
@@ -691,6 +743,8 @@ extern "C" {
/* private/private.h */
#define hwloc_internal_location_s HWLOC_NAME(internal_location_s)
#define hwloc_special_level_s HWLOC_NAME(special_level_s)
#define hwloc_pci_forced_locality_s HWLOC_NAME(pci_forced_locality_s)
@@ -713,6 +767,8 @@ extern "C" {
#define hwloc__attach_memory_object HWLOC_NAME(insert_memory_object)
#define hwloc_get_obj_by_type_and_gp_index HWLOC_NAME(get_obj_by_type_and_gp_index)
#define hwloc_pci_discovery_init HWLOC_NAME(pci_discovery_init)
#define hwloc_pci_discovery_prepare HWLOC_NAME(pci_discovery_prepare)
#define hwloc_pci_discovery_exit HWLOC_NAME(pci_discovery_exit)
@@ -723,6 +779,7 @@ extern "C" {
#define hwloc__add_info_nodup HWLOC_NAME(_add_info_nodup)
#define hwloc__move_infos HWLOC_NAME(_move_infos)
#define hwloc__free_infos HWLOC_NAME(_free_infos)
#define hwloc__tma_dup_infos HWLOC_NAME(_tma_dup_infos)
#define hwloc_binding_hooks HWLOC_NAME(binding_hooks)
#define hwloc_set_native_binding_hooks HWLOC_NAME(set_native_binding_hooks)
@@ -764,6 +821,24 @@ extern "C" {
#define hwloc_internal_distances_add_by_index HWLOC_NAME(internal_distances_add_by_index)
#define hwloc_internal_distances_invalidate_cached_objs HWLOC_NAME(hwloc_internal_distances_invalidate_cached_objs)
#define hwloc_internal_memattr_s HWLOC_NAME(internal_memattr_s)
#define hwloc_internal_memattr_target_s HWLOC_NAME(internal_memattr_target_s)
#define hwloc_internal_memattr_initiator_s HWLOC_NAME(internal_memattr_initiator_s)
#define hwloc_internal_memattrs_init HWLOC_NAME(internal_memattrs_init)
#define hwloc_internal_memattrs_prepare HWLOC_NAME(internal_memattrs_prepare)
#define hwloc_internal_memattrs_dup HWLOC_NAME(internal_memattrs_dup)
#define hwloc_internal_memattrs_destroy HWLOC_NAME(internal_memattrs_destroy)
#define hwloc_internal_memattrs_need_refresh HWLOC_NAME(internal_memattrs_need_refresh)
#define hwloc_internal_memattrs_refresh HWLOC_NAME(internal_memattrs_refresh)
#define hwloc_internal_cpukind_s HWLOC_NAME(internal_cpukind_s)
#define hwloc_internal_cpukinds_init HWLOC_NAME(internal_cpukinds_init)
#define hwloc_internal_cpukinds_destroy HWLOC_NAME(internal_cpukinds_destroy)
#define hwloc_internal_cpukinds_dup HWLOC_NAME(internal_cpukinds_dup)
#define hwloc_internal_cpukinds_register HWLOC_NAME(internal_cpukinds_register)
#define hwloc_internal_cpukinds_rank HWLOC_NAME(internal_cpukinds_rank)
#define hwloc_internal_cpukinds_restrict HWLOC_NAME(internal_cpukinds_restrict)
#define hwloc_encode_to_base64 HWLOC_NAME(encode_to_base64)
#define hwloc_decode_from_base64 HWLOC_NAME(decode_from_base64)

201
src/3rdparty/hwloc/include/hwloc/rsmi.h vendored Normal file
View File

@@ -0,0 +1,201 @@
/*
* Copyright © 2012-2020 Inria. All rights reserved.
* Copyright (c) 2020, Advanced Micro Devices, Inc. All rights reserved.
* Written by Advanced Micro Devices,
* See COPYING in top-level directory.
*/
/** \file
* \brief Macros to help interaction between hwloc and the ROCm SMI Management Library.
*
* Applications that use both hwloc and the ROCm SMI Management Library may want to
* include this file so as to get topology information for AMD GPU devices.
*/
#ifndef HWLOC_RSMI_H
#define HWLOC_RSMI_H
#include "hwloc.h"
#include "hwloc/autogen/config.h"
#include "hwloc/helper.h"
#ifdef HWLOC_LINUX_SYS
#include "hwloc/linux.h"
#endif
#include <rocm_smi/rocm_smi.h>
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup hwlocality_rsmi Interoperability with the ROCm SMI Management Library
*
* This interface offers ways to retrieve topology information about
* devices managed by the ROCm SMI Management Library.
*
* @{
*/
/** \brief Get the CPU set of logical processors that are physically
* close to AMD GPU device whose index is \p dv_ind.
*
* Return the CPU set describing the locality of the AMD GPU device
* whose index is \p dv_ind.
*
* Topology \p topology and device \p dv_ind must match the local machine.
* I/O devices detection and the ROCm SMI component are not needed in the
* topology.
*
* The function only returns the locality of the device.
* If more information about the device is needed, OS objects should
* be used instead, see hwloc_rsmi_get_device_osdev()
* and hwloc_rsmi_get_device_osdev_by_index().
*
* This function is currently only implemented in a meaningful way for
* Linux; other systems will simply get a full cpuset.
*/
static __hwloc_inline int
hwloc_rsmi_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused,
uint32_t dv_ind, hwloc_cpuset_t set)
{
#ifdef HWLOC_LINUX_SYS
/* If we're on Linux, use the sysfs mechanism to get the local cpus */
#define HWLOC_RSMI_DEVICE_SYSFS_PATH_MAX 128
char path[HWLOC_RSMI_DEVICE_SYSFS_PATH_MAX];
rsmi_status_t ret;
uint64_t bdfid = 0;
unsigned domain, device, bus;
if (!hwloc_topology_is_thissystem(topology)) {
errno = EINVAL;
return -1;
}
ret = rsmi_dev_pci_id_get(dv_ind, &bdfid);
if (RSMI_STATUS_SUCCESS != ret) {
errno = EINVAL;
return -1;
}
domain = (bdfid>>32) & 0xffffffff;
bus = ((bdfid & 0xffff)>>8) & 0xff;
device = ((bdfid & 0xff)>>3) & 0x1f;
sprintf(path, "/sys/bus/pci/devices/%04x:%02x:%02x.0/local_cpus", domain, bus, device);
if (hwloc_linux_read_path_as_cpumask(path, set) < 0
|| hwloc_bitmap_iszero(set))
hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology));
#else
/* Non-Linux systems simply get a full cpuset */
hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology));
#endif
return 0;
}
/** \brief Get the hwloc OS device object corresponding to the
* AMD GPU device whose index is \p dv_ind.
*
* Return the OS device object describing the AMD GPU device whose
* index is \p dv_ind. Returns NULL if there is none.
*
* The topology \p topology does not necessarily have to match the current
* machine. For instance the topology may be an XML import of a remote host.
* I/O devices detection and the ROCm SMI component must be enabled in the
* topology.
*
* \note The corresponding PCI device object can be obtained by looking
* at the OS device parent object (unless PCI devices are filtered out).
*/
static __hwloc_inline hwloc_obj_t
hwloc_rsmi_get_device_osdev_by_index(hwloc_topology_t topology, uint32_t dv_ind)
{
hwloc_obj_t osdev = NULL;
while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) {
if (HWLOC_OBJ_OSDEV_GPU == osdev->attr->osdev.type
&& osdev->name
&& !strncmp("rsmi", osdev->name, 4)
&& atoi(osdev->name + 4) == (int) dv_ind)
return osdev;
}
return NULL;
}
/** \brief Get the hwloc OS device object corresponding to AMD GPU device,
* whose index is \p dv_ind.
*
* Return the hwloc OS device object that describes the given
* AMD GPU, whose index is \p dv_ind Return NULL if there is none.
*
* Topology \p topology and device \p dv_ind must match the local machine.
* I/O devices detection and the ROCm SMI component must be enabled in the
* topology. If not, the locality of the object may still be found using
* hwloc_rsmi_get_device_cpuset().
*
* \note The corresponding hwloc PCI device may be found by looking
* at the result parent pointer (unless PCI devices are filtered out).
*/
static __hwloc_inline hwloc_obj_t
hwloc_rsmi_get_device_osdev(hwloc_topology_t topology, uint32_t dv_ind)
{
hwloc_obj_t osdev;
rsmi_status_t ret;
uint64_t bdfid = 0;
unsigned domain, device, bus, func;
uint64_t id;
char uuid[64];
if (!hwloc_topology_is_thissystem(topology)) {
errno = EINVAL;
return NULL;
}
ret = rsmi_dev_pci_id_get(dv_ind, &bdfid);
if (RSMI_STATUS_SUCCESS != ret) {
errno = EINVAL;
return NULL;
}
domain = (bdfid>>32) & 0xffffffff;
bus = ((bdfid & 0xffff)>>8) & 0xff;
device = ((bdfid & 0xff)>>3) & 0x1f;
func = bdfid & 0x7;
ret = rsmi_dev_unique_id_get(dv_ind, &id);
if (RSMI_STATUS_SUCCESS != ret)
uuid[0] = '\0';
else
sprintf(uuid, "%lx", id);
osdev = NULL;
while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) {
hwloc_obj_t pcidev = osdev->parent;
const char *info;
if (strncmp(osdev->name, "rsmi", 4))
continue;
if (pcidev
&& pcidev->type == HWLOC_OBJ_PCI_DEVICE
&& pcidev->attr->pcidev.domain == domain
&& pcidev->attr->pcidev.bus == bus
&& pcidev->attr->pcidev.dev == device
&& pcidev->attr->pcidev.func == func)
return osdev;
info = hwloc_obj_get_info_by_name(osdev, "AMDUUID");
if (info && !strcmp(info, uuid))
return osdev;
}
return NULL;
}
/** @} */
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* HWLOC_RSMI_H */

View File

@@ -1,8 +1,8 @@
/*
* Copyright © 2009, 2011, 2012 CNRS. All rights reserved.
* Copyright © 2009-2018 Inria. All rights reserved.
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009, 2011, 2012, 2015 Université Bordeaux. All rights reserved.
* Copyright © 2009 Cisco Systems, Inc. All rights reserved.
* Copyright © 2009-2020 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@@ -575,7 +575,7 @@
#define PACKAGE "hwloc"
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT "http://www.open-mpi.org/projects/hwloc/"
#define PACKAGE_BUGREPORT "https://www.open-mpi.org/projects/hwloc/"
/* Define to the full name of this package. */
#define PACKAGE_NAME "hwloc"
@@ -668,5 +668,9 @@
/* Define this to the thread ID type */
#define hwloc_thread_t HANDLE
/* Define to 1 if you have the declaration of `GetModuleFileName', and to 0 if
you don't. */
#define HAVE_DECL_GETMODULEFILENAME 1
#endif /* HWLOC_CONFIGURE_H */

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2017 Inria. All rights reserved.
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009, 2011 Université Bordeaux
* Copyright © 2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -19,6 +19,10 @@
#include <stdio.h>
#endif
#ifdef ANDROID
extern void JNIDebug(char *text);
#endif
/* Compile-time assertion */
#define HWLOC_BUILD_ASSERT(condition) ((void)sizeof(char[1 - 2*!(condition)]))
@@ -44,9 +48,17 @@ static __hwloc_inline void hwloc_debug(const char *s __hwloc_attribute_unused, .
{
#ifdef HWLOC_DEBUG
if (hwloc_debug_enabled()) {
#ifdef ANDROID
char buffer[256];
#endif
va_list ap;
va_start(ap, s);
#ifdef ANDROID
vsprintf(buffer, s, ap);
JNIDebug(buffer);
#else
vfprintf(stderr, s, ap);
#endif
va_end(ap);
}
#endif
@@ -57,21 +69,21 @@ static __hwloc_inline void hwloc_debug(const char *s __hwloc_attribute_unused, .
if (hwloc_debug_enabled()) { \
char *s; \
hwloc_bitmap_asprintf(&s, bitmap); \
fprintf(stderr, fmt, s); \
hwloc_debug(fmt, s); \
free(s); \
} } while (0)
#define hwloc_debug_1arg_bitmap(fmt, arg1, bitmap) do { \
if (hwloc_debug_enabled()) { \
char *s; \
hwloc_bitmap_asprintf(&s, bitmap); \
fprintf(stderr, fmt, arg1, s); \
hwloc_debug(fmt, arg1, s); \
free(s); \
} } while (0)
#define hwloc_debug_2args_bitmap(fmt, arg1, arg2, bitmap) do { \
if (hwloc_debug_enabled()) { \
char *s; \
hwloc_bitmap_asprintf(&s, bitmap); \
fprintf(stderr, fmt, arg1, arg2, s); \
hwloc_debug(fmt, arg1, arg2, s); \
free(s); \
} } while (0)
#else

View File

@@ -30,6 +30,7 @@ HWLOC_DECLSPEC extern const struct hwloc_component hwloc_x86_component;
HWLOC_DECLSPEC extern const struct hwloc_component hwloc_cuda_component;
HWLOC_DECLSPEC extern const struct hwloc_component hwloc_gl_component;
HWLOC_DECLSPEC extern const struct hwloc_component hwloc_nvml_component;
HWLOC_DECLSPEC extern const struct hwloc_component hwloc_rsmi_component;
HWLOC_DECLSPEC extern const struct hwloc_component hwloc_opencl_component;
HWLOC_DECLSPEC extern const struct hwloc_component hwloc_pci_component;

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2019 Inria. All rights reserved.
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2012, 2020 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
*
@@ -40,7 +40,19 @@
#endif
#include <string.h>
#define HWLOC_TOPOLOGY_ABI 0x20100 /* version of the layout of struct topology */
#define HWLOC_TOPOLOGY_ABI 0x20400 /* version of the layout of struct topology */
struct hwloc_internal_location_s {
enum hwloc_location_type_e type;
union {
struct {
hwloc_obj_t obj; /* cached between refreshes */
uint64_t gp_index;
hwloc_obj_type_t type;
} object; /* if type == HWLOC_LOCATION_TYPE_OBJECT */
hwloc_cpuset_t cpuset; /* if type == HWLOC_LOCATION_TYPE_CPUSET */
} location;
};
/*****************************************************
* WARNING:
@@ -163,6 +175,50 @@ struct hwloc_topology {
} *first_dist, *last_dist;
unsigned next_dist_id;
/* memory attributes */
unsigned nr_memattrs;
struct hwloc_internal_memattr_s {
/* memattr info */
char *name; /* TODO unit is implicit, in the documentation of standard attributes, or in the name? */
unsigned long flags;
#define HWLOC_IMATTR_FLAG_STATIC_NAME (1U<<0) /* no need to free name */
#define HWLOC_IMATTR_FLAG_CACHE_VALID (1U<<1) /* target and initiator are valid */
#define HWLOC_IMATTR_FLAG_CONVENIENCE (1U<<2) /* convenience attribute reporting values from non-memattr attributes (R/O and no actual targets stored) */
unsigned iflags;
/* array of values */
unsigned nr_targets;
struct hwloc_internal_memattr_target_s {
/* target object */
hwloc_obj_t obj; /* cached between refreshes */
hwloc_obj_type_t type;
unsigned os_index; /* only used temporarily during discovery when there's no obj/gp_index yet */
hwloc_uint64_t gp_index;
/* value if there are no initiator for this attr */
hwloc_uint64_t noinitiator_value;
/* initiators otherwise */
unsigned nr_initiators;
struct hwloc_internal_memattr_initiator_s {
struct hwloc_internal_location_s initiator;
hwloc_uint64_t value;
} *initiators;
} *targets;
} *memattrs;
/* hybridcpus */
unsigned nr_cpukinds;
unsigned nr_cpukinds_allocated;
struct hwloc_internal_cpukind_s {
hwloc_cpuset_t cpuset;
#define HWLOC_CPUKIND_EFFICIENCY_UNKNOWN -1
int efficiency;
int forced_efficiency; /* returned by the hardware or OS if any */
hwloc_uint64_t ranking_value; /* internal value for ranking */
unsigned nr_infos;
struct hwloc_info_s *infos;
} *cpukinds;
int grouping;
int grouping_verbose;
unsigned grouping_nbaccuracies;
@@ -240,8 +296,9 @@ extern void hwloc_topology_clear(struct hwloc_topology *topology);
/* insert memory object as memory child of normal parent */
extern struct hwloc_obj * hwloc__attach_memory_object(struct hwloc_topology *topology, hwloc_obj_t parent,
hwloc_obj_t obj,
hwloc_report_error_t report_error);
hwloc_obj_t obj, const char *reason);
extern hwloc_obj_t hwloc_get_obj_by_type_and_gp_index(hwloc_topology_t topology, hwloc_obj_type_t type, uint64_t gp_index);
extern void hwloc_pci_discovery_init(struct hwloc_topology *topology);
extern void hwloc_pci_discovery_prepare(struct hwloc_topology *topology);
@@ -261,6 +318,7 @@ extern hwloc_obj_t hwloc_find_insert_io_parent_by_complete_cpuset(struct hwloc_t
extern int hwloc__add_info(struct hwloc_info_s **infosp, unsigned *countp, const char *name, const char *value);
extern int hwloc__add_info_nodup(struct hwloc_info_s **infosp, unsigned *countp, const char *name, const char *value, int replace);
extern int hwloc__move_infos(struct hwloc_info_s **dst_infosp, unsigned *dst_countp, struct hwloc_info_s **src_infosp, unsigned *src_countp);
extern int hwloc__tma_dup_infos(struct hwloc_tma *tma, struct hwloc_info_s **dst_infosp, unsigned *dst_countp, struct hwloc_info_s *src_infos, unsigned src_count);
extern void hwloc__free_infos(struct hwloc_info_s *infos, unsigned count);
/* set native OS binding hooks */
@@ -354,6 +412,22 @@ extern int hwloc_internal_distances_add(hwloc_topology_t topology, const char *n
extern int hwloc_internal_distances_add_by_index(hwloc_topology_t topology, const char *name, hwloc_obj_type_t unique_type, hwloc_obj_type_t *different_types, unsigned nbobjs, uint64_t *indexes, uint64_t *values, unsigned long kind, unsigned long flags);
extern void hwloc_internal_distances_invalidate_cached_objs(hwloc_topology_t topology);
extern void hwloc_internal_memattrs_init(hwloc_topology_t topology);
extern void hwloc_internal_memattrs_prepare(hwloc_topology_t topology);
extern void hwloc_internal_memattrs_destroy(hwloc_topology_t topology);
extern void hwloc_internal_memattrs_need_refresh(hwloc_topology_t topology);
extern void hwloc_internal_memattrs_refresh(hwloc_topology_t topology);
extern int hwloc_internal_memattrs_dup(hwloc_topology_t new, hwloc_topology_t old);
extern int hwloc_internal_memattr_set_value(hwloc_topology_t topology, hwloc_memattr_id_t id, hwloc_obj_type_t target_type, hwloc_uint64_t target_gp_index, unsigned target_os_index, struct hwloc_internal_location_s *initiator, hwloc_uint64_t value);
extern void hwloc_internal_cpukinds_init(hwloc_topology_t topology);
extern int hwloc_internal_cpukinds_rank(hwloc_topology_t topology);
extern void hwloc_internal_cpukinds_destroy(hwloc_topology_t topology);
extern int hwloc_internal_cpukinds_dup(hwloc_topology_t new, hwloc_topology_t old);
#define HWLOC_CPUKINDS_REGISTER_FLAG_OVERWRITE_FORCED_EFFICIENCY (1<<0)
extern int hwloc_internal_cpukinds_register(hwloc_topology_t topology, hwloc_cpuset_t cpuset, int forced_efficiency, const struct hwloc_info_s *infos, unsigned nr_infos, unsigned long flags);
extern void hwloc_internal_cpukinds_restrict(hwloc_topology_t topology);
/* encode src buffer into target buffer.
* targsize must be at least 4*((srclength+2)/3)+1.
* target will be 0-terminated.

View File

@@ -46,7 +46,7 @@ struct hwloc_xml_backend_data_s {
int (*find_child)(struct hwloc__xml_import_state_s * state, struct hwloc__xml_import_state_s * childstate, char **tagp);
int (*close_tag)(struct hwloc__xml_import_state_s * state); /* look for an explicit closing tag </name> */
void (*close_child)(struct hwloc__xml_import_state_s * state);
int (*get_content)(struct hwloc__xml_import_state_s * state, char **beginp, size_t expected_length); /* return 0 on empty content (and sets beginp to empty string), 1 on actual content, -1 on error or unexpected content length */
int (*get_content)(struct hwloc__xml_import_state_s * state, const char **beginp, size_t expected_length); /* return 0 on empty content (and sets beginp to empty string), 1 on actual content, -1 on error or unexpected content length */
void (*close_content)(struct hwloc__xml_import_state_s * state);
char * msgprefix;
void *data; /* libxml2 doc, or nolibxml buffer */

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2019 Inria. All rights reserved.
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2010, 2012 Université Bordeaux
* Copyright © 2011-2015 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -921,5 +921,6 @@ hwloc_set_binding_hooks(struct hwloc_topology *topology)
DO(mem,get_area_membind);
DO(mem,get_area_memlocation);
DO(mem,alloc_membind);
#undef DO
}
}

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2018 Inria. All rights reserved.
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2011 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -818,7 +818,7 @@ int hwloc_bitmap_nr_ulongs(const struct hwloc_bitmap_s *set)
return -1;
last = hwloc_bitmap_last(set);
return (last + HWLOC_BITS_PER_LONG-1)/HWLOC_BITS_PER_LONG;
return (last + HWLOC_BITS_PER_LONG)/HWLOC_BITS_PER_LONG;
}
int hwloc_bitmap_only(struct hwloc_bitmap_s * set, unsigned cpu)

649
src/3rdparty/hwloc/src/cpukinds.c vendored Normal file
View File

@@ -0,0 +1,649 @@
/*
* Copyright © 2020-2021 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
#include "private/autogen/config.h"
#include "hwloc.h"
#include "private/private.h"
#include "private/debug.h"
/*****************
* Basics
*/
void
hwloc_internal_cpukinds_init(struct hwloc_topology *topology)
{
topology->cpukinds = NULL;
topology->nr_cpukinds = 0;
topology->nr_cpukinds_allocated = 0;
}
void
hwloc_internal_cpukinds_destroy(struct hwloc_topology *topology)
{
unsigned i;
for(i=0; i<topology->nr_cpukinds; i++) {
struct hwloc_internal_cpukind_s *kind = &topology->cpukinds[i];
hwloc_bitmap_free(kind->cpuset);
hwloc__free_infos(kind->infos, kind->nr_infos);
}
free(topology->cpukinds);
topology->cpukinds = NULL;
topology->nr_cpukinds = 0;
}
int
hwloc_internal_cpukinds_dup(hwloc_topology_t new, hwloc_topology_t old)
{
struct hwloc_tma *tma = new->tma;
struct hwloc_internal_cpukind_s *kinds;
unsigned i;
kinds = hwloc_tma_malloc(tma, old->nr_cpukinds * sizeof(*kinds));
if (!kinds)
return -1;
new->cpukinds = kinds;
new->nr_cpukinds = old->nr_cpukinds;
memcpy(kinds, old->cpukinds, old->nr_cpukinds * sizeof(*kinds));
for(i=0;i<old->nr_cpukinds; i++) {
kinds[i].cpuset = hwloc_bitmap_tma_dup(tma, old->cpukinds[i].cpuset);
if (!kinds[i].cpuset) {
new->nr_cpukinds = i;
goto failed;
}
if (hwloc__tma_dup_infos(tma,
&kinds[i].infos, &kinds[i].nr_infos,
old->cpukinds[i].infos, old->cpukinds[i].nr_infos) < 0) {
assert(!tma || !tma->dontfree); /* this tma cannot fail to allocate */
hwloc_bitmap_free(kinds[i].cpuset);
new->nr_cpukinds = i;
goto failed;
}
}
return 0;
failed:
hwloc_internal_cpukinds_destroy(new);
return -1;
}
void
hwloc_internal_cpukinds_restrict(hwloc_topology_t topology)
{
unsigned i;
int removed = 0;
for(i=0; i<topology->nr_cpukinds; i++) {
struct hwloc_internal_cpukind_s *kind = &topology->cpukinds[i];
hwloc_bitmap_and(kind->cpuset, kind->cpuset, hwloc_get_root_obj(topology)->cpuset);
if (hwloc_bitmap_iszero(kind->cpuset)) {
hwloc_bitmap_free(kind->cpuset);
hwloc__free_infos(kind->infos, kind->nr_infos);
memmove(kind, kind+1, (topology->nr_cpukinds - i - 1)*sizeof(*kind));
i--;
topology->nr_cpukinds--;
removed = 1;
}
}
if (removed)
hwloc_internal_cpukinds_rank(topology);
}
/********************
* Registering
*/
static __hwloc_inline int
hwloc__cpukind_check_duplicate_info(struct hwloc_internal_cpukind_s *kind,
const char *name, const char *value)
{
unsigned i;
for(i=0; i<kind->nr_infos; i++)
if (!strcmp(kind->infos[i].name, name)
&& !strcmp(kind->infos[i].value, value))
return 1;
return 0;
}
static __hwloc_inline void
hwloc__cpukind_add_infos(struct hwloc_internal_cpukind_s *kind,
const struct hwloc_info_s *infos, unsigned nr_infos)
{
unsigned i;
for(i=0; i<nr_infos; i++) {
if (hwloc__cpukind_check_duplicate_info(kind, infos[i].name, infos[i].value))
continue;
hwloc__add_info(&kind->infos, &kind->nr_infos, infos[i].name, infos[i].value);
}
}
int
hwloc_internal_cpukinds_register(hwloc_topology_t topology, hwloc_cpuset_t cpuset,
int forced_efficiency,
const struct hwloc_info_s *infos, unsigned nr_infos,
unsigned long flags)
{
struct hwloc_internal_cpukind_s *kinds;
unsigned i, max, bits, oldnr, newnr;
if (hwloc_bitmap_iszero(cpuset)) {
hwloc_bitmap_free(cpuset);
errno = EINVAL;
return -1;
}
if (flags & ~HWLOC_CPUKINDS_REGISTER_FLAG_OVERWRITE_FORCED_EFFICIENCY) {
errno = EINVAL;
return -1;
}
/* TODO: for now, only windows provides a forced efficiency.
* if another backend ever provides a conflicting value, the first backend value will be kept.
* (user-provided values are not an issue, they are meant to overwrite)
*/
/* If we have N kinds currently, we may need 2N+1 kinds after inserting the new one:
* - each existing kind may get split into which PUs are in the new kind and which aren't.
* - some PUs might not have been in any kind yet.
*/
max = 2 * topology->nr_cpukinds + 1;
/* Allocate the power-of-two above 2N+1. */
bits = hwloc_flsl(max-1) + 1;
max = 1U<<bits;
/* Allocate 8 minimum to avoid multiple reallocs */
if (max < 8)
max = 8;
/* Create or enlarge the array of kinds if needed */
kinds = topology->cpukinds;
if (max > topology->nr_cpukinds_allocated) {
kinds = realloc(kinds, max * sizeof(*kinds));
if (!kinds) {
hwloc_bitmap_free(cpuset);
return -1;
}
memset(&kinds[topology->nr_cpukinds_allocated], 0, (max - topology->nr_cpukinds_allocated) * sizeof(*kinds));
topology->nr_cpukinds_allocated = max;
topology->cpukinds = kinds;
}
newnr = oldnr = topology->nr_cpukinds;
for(i=0; i<oldnr; i++) {
int res = hwloc_bitmap_compare_inclusion(cpuset, kinds[i].cpuset);
if (res == HWLOC_BITMAP_INTERSECTS || res == HWLOC_BITMAP_INCLUDED) {
/* new kind with intersection of cpusets and union of infos */
kinds[newnr].cpuset = hwloc_bitmap_alloc();
kinds[newnr].efficiency = HWLOC_CPUKIND_EFFICIENCY_UNKNOWN;
kinds[newnr].forced_efficiency = forced_efficiency;
hwloc_bitmap_and(kinds[newnr].cpuset, cpuset, kinds[i].cpuset);
hwloc__cpukind_add_infos(&kinds[newnr], kinds[i].infos, kinds[i].nr_infos);
hwloc__cpukind_add_infos(&kinds[newnr], infos, nr_infos);
/* remove cpuset PUs from the existing kind that we just split */
hwloc_bitmap_andnot(kinds[i].cpuset, kinds[i].cpuset, kinds[newnr].cpuset);
/* clear cpuset PUs that were taken care of */
hwloc_bitmap_andnot(cpuset, cpuset, kinds[newnr].cpuset);
newnr++;
} else if (res == HWLOC_BITMAP_CONTAINS
|| res == HWLOC_BITMAP_EQUAL) {
/* append new info to existing smaller (or equal) kind */
hwloc__cpukind_add_infos(&kinds[i], infos, nr_infos);
if ((flags & HWLOC_CPUKINDS_REGISTER_FLAG_OVERWRITE_FORCED_EFFICIENCY)
|| kinds[i].forced_efficiency == HWLOC_CPUKIND_EFFICIENCY_UNKNOWN)
kinds[i].forced_efficiency = forced_efficiency;
/* clear cpuset PUs that were taken care of */
hwloc_bitmap_andnot(cpuset, cpuset, kinds[i].cpuset);
} else {
assert(res == HWLOC_BITMAP_DIFFERENT);
/* nothing to do */
}
/* don't compare with anything else if already empty */
if (hwloc_bitmap_iszero(cpuset))
break;
}
/* add a final kind with remaining PUs if any */
if (!hwloc_bitmap_iszero(cpuset)) {
kinds[newnr].cpuset = cpuset;
kinds[newnr].efficiency = HWLOC_CPUKIND_EFFICIENCY_UNKNOWN;
kinds[newnr].forced_efficiency = forced_efficiency;
hwloc__cpukind_add_infos(&kinds[newnr], infos, nr_infos);
newnr++;
} else {
hwloc_bitmap_free(cpuset);
}
topology->nr_cpukinds = newnr;
return 0;
}
int
hwloc_cpukinds_register(hwloc_topology_t topology, hwloc_cpuset_t _cpuset,
int forced_efficiency,
unsigned nr_infos, struct hwloc_info_s *infos,
unsigned long flags)
{
hwloc_bitmap_t cpuset;
int err;
if (flags) {
errno = EINVAL;
return -1;
}
if (!_cpuset || hwloc_bitmap_iszero(_cpuset)) {
errno = EINVAL;
return -1;
}
cpuset = hwloc_bitmap_dup(_cpuset);
if (!cpuset)
return -1;
if (forced_efficiency < 0)
forced_efficiency = HWLOC_CPUKIND_EFFICIENCY_UNKNOWN;
err = hwloc_internal_cpukinds_register(topology, cpuset, forced_efficiency, infos, nr_infos, HWLOC_CPUKINDS_REGISTER_FLAG_OVERWRITE_FORCED_EFFICIENCY);
if (err < 0)
return err;
hwloc_internal_cpukinds_rank(topology);
return 0;
}
/*********************
* Ranking
*/
static int
hwloc__cpukinds_check_duplicate_rankings(struct hwloc_topology *topology)
{
unsigned i,j;
for(i=0; i<topology->nr_cpukinds; i++)
for(j=i+1; j<topology->nr_cpukinds; j++)
if (topology->cpukinds[i].ranking_value == topology->cpukinds[j].ranking_value)
/* if any duplicate, fail */
return -1;
return 0;
}
static int
hwloc__cpukinds_try_rank_by_forced_efficiency(struct hwloc_topology *topology)
{
unsigned i;
hwloc_debug("Trying to rank cpukinds by forced efficiency...\n");
for(i=0; i<topology->nr_cpukinds; i++) {
if (topology->cpukinds[i].forced_efficiency == HWLOC_CPUKIND_EFFICIENCY_UNKNOWN)
/* if any unknown, fail */
return -1;
topology->cpukinds[i].ranking_value = topology->cpukinds[i].forced_efficiency;
}
return hwloc__cpukinds_check_duplicate_rankings(topology);
}
struct hwloc_cpukinds_info_summary {
int have_max_freq;
int have_base_freq;
int have_intel_core_type;
struct hwloc_cpukind_info_summary {
unsigned intel_core_type; /* 1 for atom, 2 for core */
unsigned max_freq, base_freq; /* MHz, hence < 100000 */
} * summaries;
};
static void
hwloc__cpukinds_summarize_info(struct hwloc_topology *topology,
struct hwloc_cpukinds_info_summary *summary)
{
unsigned i, j;
summary->have_max_freq = 1;
summary->have_base_freq = 1;
summary->have_intel_core_type = 1;
for(i=0; i<topology->nr_cpukinds; i++) {
struct hwloc_internal_cpukind_s *kind = &topology->cpukinds[i];
for(j=0; j<kind->nr_infos; j++) {
struct hwloc_info_s *info = &kind->infos[j];
if (!strcmp(info->name, "FrequencyMaxMHz")) {
summary->summaries[i].max_freq = atoi(info->value);
} else if (!strcmp(info->name, "FrequencyBaseMHz")) {
summary->summaries[i].base_freq = atoi(info->value);
} else if (!strcmp(info->name, "CoreType")) {
if (!strcmp(info->value, "IntelAtom"))
summary->summaries[i].intel_core_type = 1;
else if (!strcmp(info->value, "IntelCore"))
summary->summaries[i].intel_core_type = 2;
}
}
hwloc_debug("cpukind #%u has intel_core_type %u max_freq %u base_freq %u\n",
i, summary->summaries[i].intel_core_type,
summary->summaries[i].max_freq, summary->summaries[i].base_freq);
if (!summary->summaries[i].base_freq)
summary->have_base_freq = 0;
if (!summary->summaries[i].max_freq)
summary->have_max_freq = 0;
if (!summary->summaries[i].intel_core_type)
summary->have_intel_core_type = 0;
}
}
enum hwloc_cpukinds_ranking {
HWLOC_CPUKINDS_RANKING_DEFAULT, /* forced + frequency on ARM, forced + coretype_frequency otherwise */
HWLOC_CPUKINDS_RANKING_NO_FORCED_EFFICIENCY, /* default without forced */
HWLOC_CPUKINDS_RANKING_FORCED_EFFICIENCY,
HWLOC_CPUKINDS_RANKING_CORETYPE_FREQUENCY,
HWLOC_CPUKINDS_RANKING_CORETYPE,
HWLOC_CPUKINDS_RANKING_FREQUENCY,
HWLOC_CPUKINDS_RANKING_FREQUENCY_MAX,
HWLOC_CPUKINDS_RANKING_FREQUENCY_BASE,
HWLOC_CPUKINDS_RANKING_NONE
};
static int
hwloc__cpukinds_try_rank_by_info(struct hwloc_topology *topology,
enum hwloc_cpukinds_ranking heuristics,
struct hwloc_cpukinds_info_summary *summary)
{
unsigned i;
if (HWLOC_CPUKINDS_RANKING_CORETYPE_FREQUENCY == heuristics) {
hwloc_debug("Trying to rank cpukinds by coretype+frequency...\n");
/* we need intel_core_type + (base or max freq) for all kinds */
if (!summary->have_intel_core_type
|| (!summary->have_max_freq && !summary->have_base_freq))
return -1;
/* rank first by coretype (Core>>Atom) then by frequency, base if available, max otherwise */
for(i=0; i<topology->nr_cpukinds; i++) {
struct hwloc_internal_cpukind_s *kind = &topology->cpukinds[i];
if (summary->have_base_freq)
kind->ranking_value = (summary->summaries[i].intel_core_type << 20) + summary->summaries[i].base_freq;
else
kind->ranking_value = (summary->summaries[i].intel_core_type << 20) + summary->summaries[i].max_freq;
}
} else if (HWLOC_CPUKINDS_RANKING_CORETYPE == heuristics) {
hwloc_debug("Trying to rank cpukinds by coretype...\n");
/* we need intel_core_type */
if (!summary->have_intel_core_type)
return -1;
/* rank by coretype (Core>>Atom) */
for(i=0; i<topology->nr_cpukinds; i++) {
struct hwloc_internal_cpukind_s *kind = &topology->cpukinds[i];
kind->ranking_value = (summary->summaries[i].intel_core_type << 20);
}
} else if (HWLOC_CPUKINDS_RANKING_FREQUENCY == heuristics) {
hwloc_debug("Trying to rank cpukinds by frequency...\n");
/* we need base or max freq for all kinds */
if (!summary->have_max_freq && !summary->have_base_freq)
return -1;
/* rank first by frequency, base if available, max otherwise */
for(i=0; i<topology->nr_cpukinds; i++) {
struct hwloc_internal_cpukind_s *kind = &topology->cpukinds[i];
if (summary->have_base_freq)
kind->ranking_value = summary->summaries[i].base_freq;
else
kind->ranking_value = summary->summaries[i].max_freq;
}
} else if (HWLOC_CPUKINDS_RANKING_FREQUENCY_MAX == heuristics) {
hwloc_debug("Trying to rank cpukinds by frequency max...\n");
/* we need max freq for all kinds */
if (!summary->have_max_freq)
return -1;
/* rank first by frequency, base if available, max otherwise */
for(i=0; i<topology->nr_cpukinds; i++) {
struct hwloc_internal_cpukind_s *kind = &topology->cpukinds[i];
kind->ranking_value = summary->summaries[i].max_freq;
}
} else if (HWLOC_CPUKINDS_RANKING_FREQUENCY_BASE == heuristics) {
hwloc_debug("Trying to rank cpukinds by frequency base...\n");
/* we need max freq for all kinds */
if (!summary->have_base_freq)
return -1;
/* rank first by frequency, base if available, max otherwise */
for(i=0; i<topology->nr_cpukinds; i++) {
struct hwloc_internal_cpukind_s *kind = &topology->cpukinds[i];
kind->ranking_value = summary->summaries[i].base_freq;
}
} else assert(0);
return hwloc__cpukinds_check_duplicate_rankings(topology);
}
static int hwloc__cpukinds_compare_ranking_values(const void *_a, const void *_b)
{
const struct hwloc_internal_cpukind_s *a = _a;
const struct hwloc_internal_cpukind_s *b = _b;
return a->ranking_value - b->ranking_value;
}
/* this function requires ranking values to be unique */
static void
hwloc__cpukinds_finalize_ranking(struct hwloc_topology *topology)
{
unsigned i;
/* sort */
qsort(topology->cpukinds, topology->nr_cpukinds, sizeof(*topology->cpukinds), hwloc__cpukinds_compare_ranking_values);
/* define our own efficiency between 0 and N-1 */
for(i=0; i<topology->nr_cpukinds; i++)
topology->cpukinds[i].efficiency = i;
}
int
hwloc_internal_cpukinds_rank(struct hwloc_topology *topology)
{
enum hwloc_cpukinds_ranking heuristics;
char *env;
unsigned i;
int err;
if (!topology->nr_cpukinds)
return 0;
if (topology->nr_cpukinds == 1) {
topology->cpukinds[0].efficiency = 0;
return 0;
}
heuristics = HWLOC_CPUKINDS_RANKING_DEFAULT;
env = getenv("HWLOC_CPUKINDS_RANKING");
if (env) {
if (!strcmp(env, "default"))
heuristics = HWLOC_CPUKINDS_RANKING_DEFAULT;
else if (!strcmp(env, "none"))
heuristics = HWLOC_CPUKINDS_RANKING_NONE;
else if (!strcmp(env, "coretype+frequency"))
heuristics = HWLOC_CPUKINDS_RANKING_CORETYPE_FREQUENCY;
else if (!strcmp(env, "coretype"))
heuristics = HWLOC_CPUKINDS_RANKING_CORETYPE;
else if (!strcmp(env, "frequency"))
heuristics = HWLOC_CPUKINDS_RANKING_FREQUENCY;
else if (!strcmp(env, "frequency_max"))
heuristics = HWLOC_CPUKINDS_RANKING_FREQUENCY_MAX;
else if (!strcmp(env, "frequency_base"))
heuristics = HWLOC_CPUKINDS_RANKING_FREQUENCY_BASE;
else if (!strcmp(env, "forced_efficiency"))
heuristics = HWLOC_CPUKINDS_RANKING_FORCED_EFFICIENCY;
else if (!strcmp(env, "no_forced_efficiency"))
heuristics = HWLOC_CPUKINDS_RANKING_NO_FORCED_EFFICIENCY;
else if (!hwloc_hide_errors())
fprintf(stderr, "Failed to recognize HWLOC_CPUKINDS_RANKING value %s\n", env);
}
if (heuristics == HWLOC_CPUKINDS_RANKING_DEFAULT
|| heuristics == HWLOC_CPUKINDS_RANKING_NO_FORCED_EFFICIENCY) {
/* default is forced_efficiency first */
struct hwloc_cpukinds_info_summary summary;
enum hwloc_cpukinds_ranking subheuristics;
const char *arch;
if (heuristics == HWLOC_CPUKINDS_RANKING_DEFAULT)
hwloc_debug("Using default ranking strategy...\n");
else
hwloc_debug("Using custom ranking strategy from HWLOC_CPUKINDS_RANKING=%s\n", env);
if (heuristics != HWLOC_CPUKINDS_RANKING_NO_FORCED_EFFICIENCY) {
err = hwloc__cpukinds_try_rank_by_forced_efficiency(topology);
if (!err)
goto ready;
}
summary.summaries = calloc(topology->nr_cpukinds, sizeof(*summary.summaries));
if (!summary.summaries)
goto failed;
hwloc__cpukinds_summarize_info(topology, &summary);
arch = hwloc_obj_get_info_by_name(topology->levels[0][0], "Architecture");
/* TODO: rather coretype_frequency only on x86/Intel? */
if (arch && (!strncmp(arch, "arm", 3) || !strncmp(arch, "aarch", 5)))
/* then frequency on ARM */
subheuristics = HWLOC_CPUKINDS_RANKING_FREQUENCY;
else
/* or coretype+frequency otherwise */
subheuristics = HWLOC_CPUKINDS_RANKING_CORETYPE_FREQUENCY;
err = hwloc__cpukinds_try_rank_by_info(topology, subheuristics, &summary);
free(summary.summaries);
if (!err)
goto ready;
} else if (heuristics == HWLOC_CPUKINDS_RANKING_FORCED_EFFICIENCY) {
hwloc_debug("Using custom ranking strategy from HWLOC_CPUKINDS_RANKING=%s\n", env);
err = hwloc__cpukinds_try_rank_by_forced_efficiency(topology);
if (!err)
goto ready;
} else if (heuristics != HWLOC_CPUKINDS_RANKING_NONE) {
/* custom heuristics */
struct hwloc_cpukinds_info_summary summary;
hwloc_debug("Using custom ranking strategy from HWLOC_CPUKINDS_RANKING=%s\n", env);
summary.summaries = calloc(topology->nr_cpukinds, sizeof(*summary.summaries));
if (!summary.summaries)
goto failed;
hwloc__cpukinds_summarize_info(topology, &summary);
err = hwloc__cpukinds_try_rank_by_info(topology, heuristics, &summary);
free(summary.summaries);
if (!err)
goto ready;
}
failed:
/* failed to rank, clear efficiencies */
for(i=0; i<topology->nr_cpukinds; i++)
topology->cpukinds[i].efficiency = HWLOC_CPUKIND_EFFICIENCY_UNKNOWN;
hwloc_debug("Failed to rank cpukinds.\n\n");
return 0;
ready:
for(i=0; i<topology->nr_cpukinds; i++)
hwloc_debug("cpukind #%u got ranking value %llu\n", i, (unsigned long long) topology->cpukinds[i].ranking_value);
hwloc__cpukinds_finalize_ranking(topology);
#ifdef HWLOC_DEBUG
for(i=0; i<topology->nr_cpukinds; i++)
assert(topology->cpukinds[i].efficiency == (int) i);
#endif
hwloc_debug("\n");
return 0;
}
/*****************
* Consulting
*/
int
hwloc_cpukinds_get_nr(hwloc_topology_t topology, unsigned long flags)
{
if (flags) {
errno = EINVAL;
return -1;
}
return topology->nr_cpukinds;
}
int
hwloc_cpukinds_get_info(hwloc_topology_t topology,
unsigned id,
hwloc_bitmap_t cpuset,
int *efficiencyp,
unsigned *nr_infosp, struct hwloc_info_s **infosp,
unsigned long flags)
{
struct hwloc_internal_cpukind_s *kind;
if (flags) {
errno = EINVAL;
return -1;
}
if (id >= topology->nr_cpukinds) {
errno = ENOENT;
return -1;
}
kind = &topology->cpukinds[id];
if (cpuset)
hwloc_bitmap_copy(cpuset, kind->cpuset);
if (efficiencyp)
*efficiencyp = kind->efficiency;
if (nr_infosp && infosp) {
*nr_infosp = kind->nr_infos;
*infosp = kind->infos;
}
return 0;
}
int
hwloc_cpukinds_get_by_cpuset(hwloc_topology_t topology,
hwloc_const_bitmap_t cpuset,
unsigned long flags)
{
unsigned id;
if (flags) {
errno = EINVAL;
return -1;
}
if (!cpuset || hwloc_bitmap_iszero(cpuset)) {
errno = EINVAL;
return -1;
}
for(id=0; id<topology->nr_cpukinds; id++) {
struct hwloc_internal_cpukind_s *kind = &topology->cpukinds[id];
int res = hwloc_bitmap_compare_inclusion(cpuset, kind->cpuset);
if (res == HWLOC_BITMAP_EQUAL || res == HWLOC_BITMAP_INCLUDED) {
return (int) id;
} else if (res == HWLOC_BITMAP_INTERSECTS || res == HWLOC_BITMAP_CONTAINS) {
errno = EXDEV;
return -1;
}
}
errno = ENOENT;
return -1;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2013-2019 Inria. All rights reserved.
* Copyright © 2013-2020 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -333,10 +333,8 @@ int hwloc_topology_diff_build(hwloc_topology_t topo1,
if (!err) {
if (SETS_DIFFERENT(allowed_cpuset, topo1, topo2)
|| SETS_DIFFERENT(allowed_nodeset, topo1, topo2)) {
hwloc_append_diff_too_complex(hwloc_get_root_obj(topo1), diffp, &lastdiff);
err = 1;
}
|| SETS_DIFFERENT(allowed_nodeset, topo1, topo2))
goto roottoocomplex;
}
if (!err) {
@@ -346,33 +344,78 @@ int hwloc_topology_diff_build(hwloc_topology_t topo1,
dist1 = topo1->first_dist;
dist2 = topo2->first_dist;
while (dist1 || dist2) {
if (!!dist1 != !!dist2) {
hwloc_append_diff_too_complex(hwloc_get_root_obj(topo1), diffp, &lastdiff);
err = 1;
break;
}
if (!!dist1 != !!dist2)
goto roottoocomplex;
if (dist1->unique_type != dist2->unique_type
|| dist1->different_types || dist2->different_types /* too lazy to support this case */
|| dist1->nbobjs != dist2->nbobjs
|| dist1->kind != dist2->kind
|| memcmp(dist1->values, dist2->values, dist1->nbobjs * dist1->nbobjs * sizeof(*dist1->values))) {
hwloc_append_diff_too_complex(hwloc_get_root_obj(topo1), diffp, &lastdiff);
err = 1;
break;
}
|| memcmp(dist1->values, dist2->values, dist1->nbobjs * dist1->nbobjs * sizeof(*dist1->values)))
goto roottoocomplex;
for(i=0; i<dist1->nbobjs; i++)
/* gp_index isn't enforced above. so compare logical_index instead, which is enforced. requires distances refresh() above */
if (dist1->objs[i]->logical_index != dist2->objs[i]->logical_index) {
hwloc_append_diff_too_complex(hwloc_get_root_obj(topo1), diffp, &lastdiff);
err = 1;
break;
}
if (dist1->objs[i]->logical_index != dist2->objs[i]->logical_index)
goto roottoocomplex;
dist1 = dist1->next;
dist2 = dist2->next;
}
}
if (!err) {
/* memattrs */
hwloc_internal_memattrs_refresh(topo1);
hwloc_internal_memattrs_refresh(topo2);
if (topo1->nr_memattrs != topo2->nr_memattrs)
goto roottoocomplex;
for(i=0; i<topo1->nr_memattrs; i++) {
struct hwloc_internal_memattr_s *imattr1 = &topo1->memattrs[i], *imattr2 = &topo2->memattrs[i];
unsigned j;
if (strcmp(imattr1->name, imattr2->name)
|| imattr1->flags != imattr2->flags
|| imattr1->nr_targets != imattr2->nr_targets)
goto roottoocomplex;
if (i == HWLOC_MEMATTR_ID_CAPACITY
|| i == HWLOC_MEMATTR_ID_LOCALITY)
/* no need to check virtual attributes, there were refreshed from other topology attributes, checked above */
continue;
for(j=0; j<imattr1->nr_targets; j++) {
struct hwloc_internal_memattr_target_s *imtg1 = &imattr1->targets[j], *imtg2 = &imattr2->targets[j];
if (imtg1->type != imtg2->type)
goto roottoocomplex;
if (imtg1->obj->logical_index != imtg2->obj->logical_index)
goto roottoocomplex;
if (imattr1->flags & HWLOC_MEMATTR_FLAG_NEED_INITIATOR) {
unsigned k;
for(k=0; k<imtg1->nr_initiators; k++) {
struct hwloc_internal_memattr_initiator_s *imi1 = &imtg1->initiators[k], *imi2 = &imtg2->initiators[k];
if (imi1->value != imi2->value
|| imi1->initiator.type != imi2->initiator.type)
goto roottoocomplex;
if (imi1->initiator.type == HWLOC_LOCATION_TYPE_CPUSET) {
if (!hwloc_bitmap_isequal(imi1->initiator.location.cpuset, imi2->initiator.location.cpuset))
goto roottoocomplex;
} else if (imi1->initiator.type == HWLOC_LOCATION_TYPE_OBJECT) {
if (imi1->initiator.location.object.type != imi2->initiator.location.object.type)
goto roottoocomplex;
if (imi1->initiator.location.object.obj->logical_index != imi2->initiator.location.object.obj->logical_index)
goto roottoocomplex;
} else {
assert(0);
}
}
} else {
if (imtg1->noinitiator_value != imtg2->noinitiator_value)
goto roottoocomplex;
}
}
}
}
return err;
roottoocomplex:
hwloc_append_diff_too_complex(hwloc_get_root_obj(topo1), diffp, &lastdiff);
return 1;
}
/********************

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2010-2019 Inria. All rights reserved.
* Copyright © 2010-2020 Inria. All rights reserved.
* Copyright © 2011-2012 Université Bordeaux
* Copyright © 2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -526,36 +526,6 @@ int hwloc_distances_add(hwloc_topology_t topology,
* Refresh objects in distances
*/
static hwloc_obj_t hwloc_find_obj_by_depth_and_gp_index(hwloc_topology_t topology, unsigned depth, uint64_t gp_index)
{
hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, depth, 0);
while (obj) {
if (obj->gp_index == gp_index)
return obj;
obj = obj->next_cousin;
}
return NULL;
}
static hwloc_obj_t hwloc_find_obj_by_type_and_gp_index(hwloc_topology_t topology, hwloc_obj_type_t type, uint64_t gp_index)
{
int depth = hwloc_get_type_depth(topology, type);
if (depth == HWLOC_TYPE_DEPTH_UNKNOWN)
return NULL;
if (depth == HWLOC_TYPE_DEPTH_MULTIPLE) {
int topodepth = hwloc_topology_get_depth(topology);
for(depth=0; depth<topodepth; depth++) {
if (hwloc_get_depth_type(topology, depth) == type) {
hwloc_obj_t obj = hwloc_find_obj_by_depth_and_gp_index(topology, depth, gp_index);
if (obj)
return obj;
}
}
return NULL;
}
return hwloc_find_obj_by_depth_and_gp_index(topology, depth, gp_index);
}
static void
hwloc_internal_distances_restrict(hwloc_obj_t *objs,
uint64_t *indexes,
@@ -612,7 +582,7 @@ hwloc_internal_distances_refresh_one(hwloc_topology_t topology,
else
abort();
} else {
obj = hwloc_find_obj_by_type_and_gp_index(topology, different_types ? different_types[i] : unique_type, indexes[i]);
obj = hwloc_get_obj_by_type_and_gp_index(topology, different_types ? different_types[i] : unique_type, indexes[i]);
}
objs[i] = obj;
if (!obj)
@@ -874,26 +844,6 @@ hwloc_distances_get_by_type(hwloc_topology_t topology, hwloc_obj_type_t type,
* Grouping objects according to distances
*/
static void hwloc_report_user_distance_error(const char *msg, int line)
{
static int reported = 0;
if (!reported && !hwloc_hide_errors()) {
fprintf(stderr, "****************************************************************************\n");
fprintf(stderr, "* hwloc %s was given invalid distances by the user.\n", HWLOC_VERSION);
fprintf(stderr, "*\n");
fprintf(stderr, "* %s\n", msg);
fprintf(stderr, "* Error occurred in topology.c line %d\n", line);
fprintf(stderr, "*\n");
fprintf(stderr, "* Please make sure that distances given through the programming API\n");
fprintf(stderr, "* do not contradict any other topology information.\n");
fprintf(stderr, "* \n");
fprintf(stderr, "* hwloc will now ignore this invalid topology information and continue.\n");
fprintf(stderr, "****************************************************************************\n");
reported = 1;
}
}
static int hwloc_compare_values(uint64_t a, uint64_t b, float accuracy)
{
if (accuracy != 0.0f && fabsf((float)a-(float)b) < (float)a * accuracy)
@@ -1086,7 +1036,7 @@ hwloc__groups_by_distances(struct hwloc_topology *topology,
hwloc_debug_1arg_bitmap("adding Group object with %u objects and cpuset %s\n",
groupsizes[i], group_obj->cpuset);
res_obj = hwloc__insert_object_by_cpuset(topology, NULL, group_obj,
(kind & HWLOC_DISTANCES_KIND_FROM_USER) ? hwloc_report_user_distance_error : hwloc_report_os_error);
(kind & HWLOC_DISTANCES_KIND_FROM_USER) ? "distances:fromuser:group" : "distances:group");
/* res_obj may be NULL on failure to insert. */
if (!res_obj)
failed++;

1197
src/3rdparty/hwloc/src/memattrs.c vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2018 Inria. All rights reserved.
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2010 Université Bordeaux
* Copyright © 2009-2018 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -114,7 +114,7 @@ void hwloc_add_uname_info(struct hwloc_topology *topology __hwloc_attribute_unus
char *
hwloc_progname(struct hwloc_topology *topology __hwloc_attribute_unused)
{
#if HAVE_DECL_GETMODULEFILENAME
#if (defined HAVE_DECL_GETMODULEFILENAME) && HAVE_DECL_GETMODULEFILENAME
char name[256], *local_basename;
unsigned res = GetModuleFileName(NULL, name, sizeof(name));
if (res == sizeof(name) || !res)

View File

@@ -232,7 +232,8 @@ enum hwloc_pci_busid_comparison_e {
HWLOC_PCI_BUSID_LOWER,
HWLOC_PCI_BUSID_HIGHER,
HWLOC_PCI_BUSID_INCLUDED,
HWLOC_PCI_BUSID_SUPERSET
HWLOC_PCI_BUSID_SUPERSET,
HWLOC_PCI_BUSID_EQUAL
};
static enum hwloc_pci_busid_comparison_e
@@ -274,11 +275,8 @@ hwloc_pci_compare_busids(struct hwloc_obj *a, struct hwloc_obj *b)
if (a->attr->pcidev.func > b->attr->pcidev.func)
return HWLOC_PCI_BUSID_HIGHER;
/* Should never reach here. Abort on both debug builds and
non-debug builds */
assert(0);
fprintf(stderr, "Bad assertion in hwloc %s:%d (aborting)\n", __FILE__, __LINE__);
exit(1);
/* Should never reach here. */
return HWLOC_PCI_BUSID_EQUAL;
}
static void
@@ -329,6 +327,23 @@ hwloc_pci_add_object(struct hwloc_obj *parent, struct hwloc_obj **parent_io_firs
}
return;
}
case HWLOC_PCI_BUSID_EQUAL: {
static int reported = 0;
if (!reported && !hwloc_hide_errors()) {
fprintf(stderr, "*********************************************************\n");
fprintf(stderr, "* hwloc %s received invalid PCI information.\n", HWLOC_VERSION);
fprintf(stderr, "*\n");
fprintf(stderr, "* Trying to insert PCI object %04x:%02x:%02x.%01x at %04x:%02x:%02x.%01x\n",
new->attr->pcidev.domain, new->attr->pcidev.bus, new->attr->pcidev.dev, new->attr->pcidev.func,
(*curp)->attr->pcidev.domain, (*curp)->attr->pcidev.bus, (*curp)->attr->pcidev.dev, (*curp)->attr->pcidev.func);
fprintf(stderr, "*\n");
fprintf(stderr, "* hwloc will now ignore this object and continue.\n");
fprintf(stderr, "*********************************************************\n");
reported = 1;
}
hwloc_free_unlinked_object(new);
return;
}
}
}
/* add to the end of the list if higher than everybody */
@@ -425,39 +440,10 @@ hwloc_pcidisc_add_hostbridges(struct hwloc_topology *topology,
static struct hwloc_obj *
hwloc_pci_fixup_busid_parent(struct hwloc_topology *topology __hwloc_attribute_unused,
struct hwloc_pcidev_attr_s *busid,
struct hwloc_obj *parent)
struct hwloc_pcidev_attr_s *busid __hwloc_attribute_unused,
struct hwloc_obj *parent __hwloc_attribute_unused)
{
/* Xeon E5v3 in cluster-on-die mode only have PCI on the first NUMA node of each package.
* but many dual-processor host report the second PCI hierarchy on 2nd NUMA of first package.
*/
if (parent->depth >= 2
&& parent->type == HWLOC_OBJ_NUMANODE
&& parent->sibling_rank == 1 && parent->parent->arity == 2
&& parent->parent->type == HWLOC_OBJ_PACKAGE
&& parent->parent->sibling_rank == 0 && parent->parent->parent->arity == 2) {
const char *cpumodel = hwloc_obj_get_info_by_name(parent->parent, "CPUModel");
if (cpumodel && strstr(cpumodel, "Xeon")) {
if (!hwloc_hide_errors()) {
fprintf(stderr, "****************************************************************************\n");
fprintf(stderr, "* hwloc %s has encountered an incorrect PCI locality information.\n", HWLOC_VERSION);
fprintf(stderr, "* PCI bus %04x:%02x is supposedly close to 2nd NUMA node of 1st package,\n",
busid->domain, busid->bus);
fprintf(stderr, "* however hwloc believes this is impossible on this architecture.\n");
fprintf(stderr, "* Therefore the PCI bus will be moved to 1st NUMA node of 2nd package.\n");
fprintf(stderr, "*\n");
fprintf(stderr, "* If you feel this fixup is wrong, disable it by setting in your environment\n");
fprintf(stderr, "* HWLOC_PCI_%04x_%02x_LOCALCPUS= (empty value), and report the problem\n",
busid->domain, busid->bus);
fprintf(stderr, "* to the hwloc's user mailing list together with the XML output of lstopo.\n");
fprintf(stderr, "*\n");
fprintf(stderr, "* You may silence this message by setting HWLOC_HIDE_ERRORS=1 in your environment.\n");
fprintf(stderr, "****************************************************************************\n");
}
return parent->parent->next_sibling->first_child;
}
}
/* no quirk for now */
return parent;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2017-2019 Inria. All rights reserved.
* Copyright © 2017-2020 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -97,6 +97,7 @@ hwloc_shmem_topology_write(hwloc_topology_t topology,
* without being able to free() them.
*/
hwloc_internal_distances_refresh(topology);
hwloc_internal_memattrs_refresh(topology);
header.header_version = HWLOC_SHMEM_HEADER_VERSION;
header.header_length = sizeof(header);
@@ -134,8 +135,9 @@ hwloc_shmem_topology_write(hwloc_topology_t topology,
assert((char *)mmap_res <= (char *)mmap_address + length);
/* now refresh the new distances so that adopters can use them without refreshing the R/O shmem mapping */
/* now refresh the new distances/memattrs so that adopters can use them without refreshing the R/O shmem mapping */
hwloc_internal_distances_refresh(new);
hwloc_internal_memattrs_refresh(topology);
/* topology is saved, release resources now */
munmap(mmap_address, length);
@@ -214,11 +216,13 @@ hwloc_shmem_topology_adopt(hwloc_topology_t *topologyp,
new->support.discovery = malloc(sizeof(*new->support.discovery));
new->support.cpubind = malloc(sizeof(*new->support.cpubind));
new->support.membind = malloc(sizeof(*new->support.membind));
if (!new->support.discovery || !new->support.cpubind || !new->support.membind)
new->support.misc = malloc(sizeof(*new->support.misc));
if (!new->support.discovery || !new->support.cpubind || !new->support.membind || !new->support.misc)
goto out_with_support;
memcpy(new->support.discovery, old->support.discovery, sizeof(*new->support.discovery));
memcpy(new->support.cpubind, old->support.cpubind, sizeof(*new->support.cpubind));
memcpy(new->support.membind, old->support.membind, sizeof(*new->support.membind));
memcpy(new->support.misc, old->support.misc, sizeof(*new->support.misc));
hwloc_set_binding_hooks(new);
/* clear userdata callbacks pointing to the writer process' functions */
new->userdata_export_cb = NULL;
@@ -236,6 +240,7 @@ hwloc_shmem_topology_adopt(hwloc_topology_t *topologyp,
free(new->support.discovery);
free(new->support.cpubind);
free(new->support.membind);
free(new->support.misc);
free(new);
out_with_components:
hwloc_components_fini();
@@ -252,6 +257,7 @@ hwloc__topology_disadopt(hwloc_topology_t topology)
free(topology->support.discovery);
free(topology->support.cpubind);
free(topology->support.membind);
free(topology->support.misc);
free(topology);
}

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2019 Inria. All rights reserved.
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2010 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -471,7 +471,7 @@ hwloc_backend_synthetic_init(struct hwloc_synthetic_backend_data_s *data,
/* initialize parent arity to 0 so that the levels are not infinite */
data->level[count-1].arity = 0;
while (*pos == ' ')
while (*pos == ' ' || *pos == '\n')
pos++;
if (!*pos)
@@ -912,7 +912,7 @@ hwloc_synthetic_insert_attached(struct hwloc_topology *topology,
hwloc_synthetic_set_attr(&attached->attr, child);
hwloc_insert_object_by_cpuset(topology, child);
hwloc__insert_object_by_cpuset(topology, NULL, child, "synthetic:attached");
hwloc_synthetic_insert_attached(topology, data, attached->next, set);
}
@@ -964,7 +964,7 @@ hwloc__look_synthetic(struct hwloc_topology *topology,
hwloc_synthetic_set_attr(&curlevel->attr, obj);
hwloc_insert_object_by_cpuset(topology, obj);
hwloc__insert_object_by_cpuset(topology, NULL, obj, "synthetic");
}
hwloc_synthetic_insert_attached(topology, data, curlevel->attached, set);

View File

@@ -93,9 +93,10 @@ typedef struct _GROUP_AFFINITY {
#endif
#ifndef HAVE_PROCESSOR_RELATIONSHIP
typedef struct _PROCESSOR_RELATIONSHIP {
typedef struct HWLOC_PROCESSOR_RELATIONSHIP {
BYTE Flags;
BYTE Reserved[21];
BYTE EfficiencyClass; /* for RelationProcessorCore, higher means greater performance but less efficiency, only available in Win10+ */
BYTE Reserved[20];
WORD GroupCount;
GROUP_AFFINITY GroupMask[ANYSIZE_ARRAY];
} PROCESSOR_RELATIONSHIP, *PPROCESSOR_RELATIONSHIP;
@@ -228,9 +229,12 @@ static PFN_VIRTUALFREEEX VirtualFreeExProc;
typedef BOOL (WINAPI *PFN_QUERYWORKINGSETEX)(HANDLE hProcess, PVOID pv, DWORD cb);
static PFN_QUERYWORKINGSETEX QueryWorkingSetExProc;
typedef NTSTATUS (WINAPI *PFN_RTLGETVERSION)(OSVERSIONINFOEX*);
PFN_RTLGETVERSION RtlGetVersionProc;
static void hwloc_win_get_function_ptrs(void)
{
HMODULE kernel32;
HMODULE kernel32, ntdll;
#if HWLOC_HAVE_GCC_W_CAST_FUNCTION_TYPE
#pragma GCC diagnostic ignored "-Wcast-function-type"
@@ -275,6 +279,9 @@ static void hwloc_win_get_function_ptrs(void)
QueryWorkingSetExProc = (PFN_QUERYWORKINGSETEX) GetProcAddress(psapi, "QueryWorkingSetEx");
}
ntdll = GetModuleHandle("ntdll");
RtlGetVersionProc = (PFN_RTLGETVERSION) GetProcAddress(ntdll, "RtlGetVersion");
#if HWLOC_HAVE_GCC_W_CAST_FUNCTION_TYPE
#pragma GCC diagnostic warning "-Wcast-function-type"
#endif
@@ -734,6 +741,88 @@ hwloc_win_get_area_memlocation(hwloc_topology_t topology __hwloc_attribute_unuse
}
/*************************
* Efficiency classes
*/
struct hwloc_win_efficiency_classes {
unsigned nr_classes;
unsigned nr_classes_allocated;
struct hwloc_win_efficiency_class {
unsigned value;
hwloc_bitmap_t cpuset;
} *classes;
};
static void
hwloc_win_efficiency_classes_init(struct hwloc_win_efficiency_classes *classes)
{
classes->classes = NULL;
classes->nr_classes_allocated = 0;
classes->nr_classes = 0;
}
static int
hwloc_win_efficiency_classes_add(struct hwloc_win_efficiency_classes *classes,
hwloc_const_bitmap_t cpuset,
unsigned value)
{
unsigned i;
/* look for existing class with that efficiency value */
for(i=0; i<classes->nr_classes; i++) {
if (classes->classes[i].value == value) {
hwloc_bitmap_or(classes->classes[i].cpuset, classes->classes[i].cpuset, cpuset);
return 0;
}
}
/* extend the array if needed */
if (classes->nr_classes == classes->nr_classes_allocated) {
struct hwloc_win_efficiency_class *tmp;
unsigned new_nr_allocated = 2*classes->nr_classes_allocated;
if (!new_nr_allocated) {
#define HWLOC_WIN_EFFICIENCY_CLASSES_DEFAULT_MAX 4 /* 2 should be enough is most cases */
new_nr_allocated = HWLOC_WIN_EFFICIENCY_CLASSES_DEFAULT_MAX;
}
tmp = realloc(classes->classes, new_nr_allocated * sizeof(*classes->classes));
if (!tmp)
return -1;
classes->classes = tmp;
classes->nr_classes_allocated = new_nr_allocated;
}
/* add new class */
classes->classes[classes->nr_classes].cpuset = hwloc_bitmap_alloc();
if (!classes->classes[classes->nr_classes].cpuset)
return -1;
classes->classes[classes->nr_classes].value = value;
hwloc_bitmap_copy(classes->classes[classes->nr_classes].cpuset, cpuset);
classes->nr_classes++;
return 0;
}
static void
hwloc_win_efficiency_classes_register(hwloc_topology_t topology,
struct hwloc_win_efficiency_classes *classes)
{
unsigned i;
for(i=0; i<classes->nr_classes; i++) {
hwloc_internal_cpukinds_register(topology, classes->classes[i].cpuset, classes->classes[i].value, NULL, 0, 0);
classes->classes[i].cpuset = NULL; /* given to cpukinds */
}
}
static void
hwloc_win_efficiency_classes_destroy(struct hwloc_win_efficiency_classes *classes)
{
unsigned i;
for(i=0; i<classes->nr_classes; i++)
hwloc_bitmap_free(classes->classes[i].cpuset);
free(classes->classes);
}
/*************************
* discovery
*/
@@ -753,6 +842,12 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
DWORD length;
int gotnuma = 0;
int gotnumamemory = 0;
OSVERSIONINFOEX osvi;
char versionstr[20];
char hostname[122] = "";
unsigned hostname_size = sizeof(hostname);
int has_efficiencyclass = 0;
struct hwloc_win_efficiency_classes eclasses;
assert(dstatus->phase == HWLOC_DISC_PHASE_CPU);
@@ -760,6 +855,25 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
/* somebody discovered things */
return -1;
ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
if (RtlGetVersionProc) {
/* RtlGetVersion() returns the currently-running Windows version */
RtlGetVersionProc(&osvi);
} else {
/* GetVersionEx() and isWindows10OrGreater() depend on what the manifest says
* (manifest of the program, not of libhwloc.dll), they may return old versions
* if the currently-running Windows is not listed in the manifest.
*/
GetVersionEx((LPOSVERSIONINFO)&osvi);
}
if (osvi.dwMajorVersion >= 10) {
has_efficiencyclass = 1;
hwloc_win_efficiency_classes_init(&eclasses);
}
hwloc_alloc_root_sets(topology->levels[0][0]);
GetSystemInfo(&SystemInfo);
@@ -887,7 +1001,7 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
default:
break;
}
hwloc_insert_object_by_cpuset(topology, obj);
hwloc__insert_object_by_cpuset(topology, NULL, obj, "windows:GetLogicalProcessorInformation");
}
free(procInfo);
@@ -919,6 +1033,7 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
(void*) procInfo < (void*) ((uintptr_t) procInfoTotal + length);
procInfo = (void*) ((uintptr_t) procInfo + procInfo->Size)) {
unsigned num, i;
unsigned efficiency_class = 0;
GROUP_AFFINITY *GroupMask;
/* Ignore unknown caches */
@@ -953,6 +1068,11 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
type = HWLOC_OBJ_CORE;
num = procInfo->Processor.GroupCount;
GroupMask = procInfo->Processor.GroupMask;
if (has_efficiencyclass)
/* the EfficiencyClass field didn't exist before Windows10 and recent MSVC headers,
* so just access it manually instead of trying to detect it.
*/
efficiency_class = * ((&procInfo->Processor.Flags) + 1);
break;
case RelationGroup:
/* So strange an interface... */
@@ -981,7 +1101,7 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, id);
obj->cpuset = set;
obj->attr->group.kind = HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP;
hwloc_insert_object_by_cpuset(topology, obj);
hwloc__insert_object_by_cpuset(topology, NULL, obj, "windows:GetLogicalProcessorInformation:ProcessorGroup");
} else
hwloc_bitmap_free(set);
}
@@ -1005,6 +1125,11 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
}
hwloc_debug_2args_bitmap("%s#%u bitmap %s\n", hwloc_obj_type_string(type), id, obj->cpuset);
switch (type) {
case HWLOC_OBJ_CORE: {
if (has_efficiencyclass)
hwloc_win_efficiency_classes_add(&eclasses, obj->cpuset, efficiency_class);
break;
}
case HWLOC_OBJ_NUMANODE:
{
ULONGLONG avail;
@@ -1055,7 +1180,7 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
default:
break;
}
hwloc_insert_object_by_cpuset(topology, obj);
hwloc__insert_object_by_cpuset(topology, NULL, obj, "windows:GetLogicalProcessorInformationEx");
}
free(procInfoTotal);
}
@@ -1076,29 +1201,88 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
hwloc_bitmap_only(obj->cpuset, idx);
hwloc_debug_1arg_bitmap("cpu %u has cpuset %s\n",
idx, obj->cpuset);
hwloc_insert_object_by_cpuset(topology, obj);
hwloc__insert_object_by_cpuset(topology, NULL, obj, "windows:ProcessorGroup:pu");
} hwloc_bitmap_foreach_end();
hwloc_bitmap_free(groups_pu_set);
} else {
/* no processor groups */
SYSTEM_INFO sysinfo;
hwloc_obj_t obj;
unsigned idx;
GetSystemInfo(&sysinfo);
for(idx=0; idx<32; idx++)
if (sysinfo.dwActiveProcessorMask & (((DWORD_PTR)1)<<idx)) {
if (SystemInfo.dwActiveProcessorMask & (((DWORD_PTR)1)<<idx)) {
obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_PU, idx);
obj->cpuset = hwloc_bitmap_alloc();
hwloc_bitmap_only(obj->cpuset, idx);
hwloc_debug_1arg_bitmap("cpu %u has cpuset %s\n",
idx, obj->cpuset);
hwloc_insert_object_by_cpuset(topology, obj);
hwloc__insert_object_by_cpuset(topology, NULL, obj, "windows:pu");
}
}
if (has_efficiencyclass) {
topology->support.discovery->cpukind_efficiency = 1;
hwloc_win_efficiency_classes_register(topology, &eclasses);
}
out:
if (has_efficiencyclass)
hwloc_win_efficiency_classes_destroy(&eclasses);
/* emulate uname instead of calling hwloc_add_uname_info() */
hwloc_obj_add_info(topology->levels[0][0], "Backend", "Windows");
hwloc_add_uname_info(topology, NULL);
hwloc_obj_add_info(topology->levels[0][0], "OSName", "Windows");
#if defined(__CYGWIN__)
hwloc_obj_add_info(topology->levels[0][0], "WindowsBuildEnvironment", "Cygwin");
#elif defined(__MINGW32__)
hwloc_obj_add_info(topology->levels[0][0], "WindowsBuildEnvironment", "MinGW");
#endif
/* see https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-osversioninfoexa */
if (osvi.dwMajorVersion == 10) {
if (osvi.dwMinorVersion == 0)
hwloc_obj_add_info(topology->levels[0][0], "OSRelease", "10");
} else if (osvi.dwMajorVersion == 6) {
if (osvi.dwMinorVersion == 3)
hwloc_obj_add_info(topology->levels[0][0], "OSRelease", "8.1"); /* or "Server 2012 R2" */
else if (osvi.dwMinorVersion == 2)
hwloc_obj_add_info(topology->levels[0][0], "OSRelease", "8"); /* or "Server 2012" */
else if (osvi.dwMinorVersion == 1)
hwloc_obj_add_info(topology->levels[0][0], "OSRelease", "7"); /* or "Server 2008 R2" */
else if (osvi.dwMinorVersion == 0)
hwloc_obj_add_info(topology->levels[0][0], "OSRelease", "Vista"); /* or "Server 2008" */
} /* earlier versions are ignored */
snprintf(versionstr, sizeof(versionstr), "%u.%u.%u", osvi.dwMajorVersion, osvi.dwMinorVersion, osvi.dwBuildNumber);
hwloc_obj_add_info(topology->levels[0][0], "OSVersion", versionstr);
#if !defined(__CYGWIN__)
GetComputerName(hostname, &hostname_size);
#else
gethostname(hostname, hostname_size);
#endif
if (*hostname)
hwloc_obj_add_info(topology->levels[0][0], "Hostname", hostname);
/* convert to unix-like architecture strings */
switch (SystemInfo.wProcessorArchitecture) {
case 0:
hwloc_obj_add_info(topology->levels[0][0], "Architecture", "i686");
break;
case 9:
hwloc_obj_add_info(topology->levels[0][0], "Architecture", "x86_64");
break;
case 5:
hwloc_obj_add_info(topology->levels[0][0], "Architecture", "arm");
break;
case 12:
hwloc_obj_add_info(topology->levels[0][0], "Architecture", "arm64");
break;
case 6:
hwloc_obj_add_info(topology->levels[0][0], "Architecture", "ia64");
break;
}
return 0;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2010-2019 Inria. All rights reserved.
* Copyright © 2010-2021 Inria. All rights reserved.
* Copyright © 2010-2013 Université Bordeaux
* Copyright © 2010-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -181,6 +181,7 @@ enum hwloc_x86_disc_flags {
#define has_topoext(features) ((features)[6] & (1 << 22))
#define has_x2apic(features) ((features)[4] & (1 << 21))
#define has_hybrid(features) ((features)[18] & (1 << 15))
struct cacheinfo {
hwloc_obj_cache_type_t type;
@@ -217,6 +218,9 @@ struct procinfo {
unsigned cpustepping;
unsigned cpumodelnumber;
unsigned cpufamilynumber;
unsigned hybridcoretype;
unsigned hybridnativemodel;
};
enum cpuid_type {
@@ -681,6 +685,15 @@ static void look_proc(struct hwloc_backend *backend, struct procinfo *infos, uns
}
}
if (highest_cpuid >= 0x1a && has_hybrid(features)) {
/* Get hybrid cpu information from cpuid 0x1a */
eax = 0x1a;
ecx = 0;
cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump);
infos->hybridcoretype = eax >> 24;
infos->hybridnativemodel = eax & 0xffffff;
}
/*********************************************************************************
* Get the hierarchy of thread, core, die, package, etc. from CPU-specific leaves
*/
@@ -751,7 +764,13 @@ static void look_proc(struct hwloc_backend *backend, struct procinfo *infos, uns
/* default cacheid value */
cache->cacheid = infos->apicid / cache->nbthreads_sharing;
if (cpuid_type == amd) {
if (cpuid_type == intel) {
/* round nbthreads_sharing to nearest power of two to build a mask (for clearing lower bits) */
unsigned bits = hwloc_flsl(cache->nbthreads_sharing-1);
unsigned mask = ~((1U<<bits) - 1);
cache->cacheid = infos->apicid & mask;
} else if (cpuid_type == amd) {
/* AMD quirks */
if (infos->cpufamilynumber == 0x17
&& cache->level == 3 && cache->nbthreads_sharing == 6) {
@@ -872,7 +891,7 @@ hwloc_x86_add_groups(hwloc_topology_t topology,
obj->attr->group.dont_merge = dont_merge;
hwloc_debug_2args_bitmap("os %s %u has cpuset %s\n",
subtype, id, obj_cpuset);
hwloc_insert_object_by_cpuset(topology, obj);
hwloc__insert_object_by_cpuset(topology, NULL, obj, "x86:group");
}
}
@@ -889,6 +908,16 @@ static void summarize(struct hwloc_backend *backend, struct procinfo *infos, uns
int gotnuma = 0;
int fulldiscovery = (flags & HWLOC_X86_DISC_FLAG_FULL);
#ifdef HWLOC_DEBUG
hwloc_debug("\nSummary of x86 CPUID topology:\n");
for(i=0; i<nbprocs; i++) {
hwloc_debug("PU %u present=%u apicid=%u on PKG %d CORE %d DIE %d NODE %d\n",
i, infos[i].present, infos[i].apicid,
infos[i].ids[PKG], infos[i].ids[CORE], infos[i].ids[DIE], infos[i].ids[NODE]);
}
hwloc_debug("\n");
#endif
for (i = 0; i < nbprocs; i++)
if (infos[i].present) {
hwloc_bitmap_set(complete_cpuset, i);
@@ -930,7 +959,7 @@ static void summarize(struct hwloc_backend *backend, struct procinfo *infos, uns
hwloc_debug_1arg_bitmap("os package %u has cpuset %s\n",
packageid, package_cpuset);
hwloc_insert_object_by_cpuset(topology, package);
hwloc__insert_object_by_cpuset(topology, NULL, package, "x86:package");
} else {
/* Annotate packages previously-existing packages */
@@ -986,7 +1015,7 @@ static void summarize(struct hwloc_backend *backend, struct procinfo *infos, uns
hwloc_bitmap_set(node->nodeset, nodeid);
hwloc_debug_1arg_bitmap("os node %u has cpuset %s\n",
nodeid, node_cpuset);
hwloc_insert_object_by_cpuset(topology, node);
hwloc__insert_object_by_cpuset(topology, NULL, node, "x86:numa");
gotnuma++;
}
}
@@ -1033,7 +1062,7 @@ static void summarize(struct hwloc_backend *backend, struct procinfo *infos, uns
unknown_obj->attr->group.subkind = level;
hwloc_debug_2args_bitmap("os unknown%u %u has cpuset %s\n",
level, unknownid, unknown_cpuset);
hwloc_insert_object_by_cpuset(topology, unknown_obj);
hwloc__insert_object_by_cpuset(topology, NULL, unknown_obj, "x86:group:unknown");
}
}
}
@@ -1073,7 +1102,7 @@ static void summarize(struct hwloc_backend *backend, struct procinfo *infos, uns
die->cpuset = die_cpuset;
hwloc_debug_1arg_bitmap("os die %u has cpuset %s\n",
dieid, die_cpuset);
hwloc_insert_object_by_cpuset(topology, die);
hwloc__insert_object_by_cpuset(topology, NULL, die, "x86:die");
}
}
}
@@ -1111,7 +1140,7 @@ static void summarize(struct hwloc_backend *backend, struct procinfo *infos, uns
core->cpuset = core_cpuset;
hwloc_debug_1arg_bitmap("os core %u has cpuset %s\n",
coreid, core_cpuset);
hwloc_insert_object_by_cpuset(topology, core);
hwloc__insert_object_by_cpuset(topology, NULL, core, "x86:core");
}
}
}
@@ -1125,7 +1154,7 @@ static void summarize(struct hwloc_backend *backend, struct procinfo *infos, uns
obj->cpuset = hwloc_bitmap_alloc();
hwloc_bitmap_only(obj->cpuset, i);
hwloc_debug_1arg_bitmap("PU %u has cpuset %s\n", i, obj->cpuset);
hwloc_insert_object_by_cpuset(topology, obj);
hwloc__insert_object_by_cpuset(topology, NULL, obj, "x86:pu");
}
}
@@ -1208,7 +1237,7 @@ static void summarize(struct hwloc_backend *backend, struct procinfo *infos, uns
hwloc_obj_add_info(cache, "Inclusive", infos[i].cache[l].inclusive ? "1" : "0");
hwloc_debug_2args_bitmap("os L%u cache %u has cpuset %s\n",
level, cacheid, cache_cpuset);
hwloc_insert_object_by_cpuset(topology, cache);
hwloc__insert_object_by_cpuset(topology, NULL, cache, "x86:cache");
}
}
}
@@ -1274,8 +1303,41 @@ look_procs(struct hwloc_backend *backend, struct procinfo *infos, unsigned long
hwloc_bitmap_free(orig_cpuset);
}
if (data->apicid_unique)
if (data->apicid_unique) {
summarize(backend, infos, flags);
if (has_hybrid(features)) {
/* use hybrid info for cpukinds */
hwloc_bitmap_t atomset = hwloc_bitmap_alloc();
hwloc_bitmap_t coreset = hwloc_bitmap_alloc();
for(i=0; i<nbprocs; i++) {
if (infos[i].hybridcoretype == 0x20)
hwloc_bitmap_set(atomset, i);
else if (infos[i].hybridcoretype == 0x40)
hwloc_bitmap_set(coreset, i);
}
/* register IntelAtom set if any */
if (!hwloc_bitmap_iszero(atomset)) {
struct hwloc_info_s infoattr;
infoattr.name = (char *) "CoreType";
infoattr.value = (char *) "IntelAtom";
hwloc_internal_cpukinds_register(topology, atomset, HWLOC_CPUKIND_EFFICIENCY_UNKNOWN, &infoattr, 1, 0);
/* the cpuset is given to the callee */
} else {
hwloc_bitmap_free(atomset);
}
/* register IntelCore set if any */
if (!hwloc_bitmap_iszero(coreset)) {
struct hwloc_info_s infoattr;
infoattr.name = (char *) "CoreType";
infoattr.value = (char *) "IntelCore";
hwloc_internal_cpukinds_register(topology, coreset, HWLOC_CPUKIND_EFFICIENCY_UNKNOWN, &infoattr, 1, 0);
/* the cpuset is given to the callee */
} else {
hwloc_bitmap_free(coreset);
}
}
}
/* if !data->apicid_unique, do nothing and return success, so that the caller does nothing either */
return 0;
@@ -1354,7 +1416,7 @@ int hwloc_look_x86(struct hwloc_backend *backend, unsigned long flags)
unsigned highest_cpuid;
unsigned highest_ext_cpuid;
/* This stores cpuid features with the same indexing as Linux */
unsigned features[10] = { 0 };
unsigned features[19] = { 0 };
struct procinfo *infos = NULL;
enum cpuid_type cpuid_type = unknown;
hwloc_x86_os_state_t os_state;
@@ -1381,6 +1443,9 @@ int hwloc_look_x86(struct hwloc_backend *backend, unsigned long flags)
/* check if binding works */
memset(&hooks, 0, sizeof(hooks));
support.membind = &memsupport;
/* We could just copy the main hooks (except in some corner cases),
* but the current overhead is negligible, so just always reget them.
*/
hwloc_set_native_binding_hooks(&hooks, &support);
if (hooks.get_thisthread_cpubind && hooks.set_thisthread_cpubind) {
get_cpubind = hooks.get_thisthread_cpubind;
@@ -1451,6 +1516,7 @@ int hwloc_look_x86(struct hwloc_backend *backend, unsigned long flags)
ecx = 0;
cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump);
features[9] = ebx;
features[18] = edx;
}
if (cpuid_type != intel && highest_ext_cpuid >= 0x80000001) {
@@ -1531,7 +1597,8 @@ hwloc_x86_discover(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
}
if (topology->levels[0][0]->cpuset) {
/* somebody else discovered things */
/* somebody else discovered things, reconnect levels so that we can look at them */
hwloc_topology_reconnect(topology, 0);
if (topology->nb_levels == 2 && topology->level_nbobjects[1] == data->nbprocs) {
/* only PUs were discovered, as much as we would, complete the topology with everything else */
alreadypus = 1;
@@ -1539,7 +1606,6 @@ hwloc_x86_discover(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
}
/* several object types were added, we can't easily complete, just do partial discovery */
hwloc_topology_reconnect(topology, 0);
ret = hwloc_look_x86(backend, flags);
if (ret)
hwloc_obj_add_info(topology->levels[0][0], "Backend", "x86");

View File

@@ -213,7 +213,7 @@ hwloc__nolibxml_import_close_child(hwloc__xml_import_state_t state)
static int
hwloc__nolibxml_import_get_content(hwloc__xml_import_state_t state,
char **beginp, size_t expected_length)
const char **beginp, size_t expected_length)
{
hwloc__nolibxml_import_state_data_t nstate = (void*) state->data;
char *buffer = nstate->tagbuffer;
@@ -224,7 +224,7 @@ hwloc__nolibxml_import_get_content(hwloc__xml_import_state_t state,
if (nstate->closed) {
if (expected_length)
return -1;
*beginp = (char *) "";
*beginp = "";
return 0;
}

View File

@@ -1,7 +1,7 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2011 Université Bordeaux
* Copyright © 2009-2011, 2020 Université Bordeaux
* Copyright © 2009-2018 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -481,11 +481,9 @@ hwloc__xml_import_object_attr(struct hwloc_topology *topology,
}
}
static int
hwloc__xml_import_info(struct hwloc_xml_backend_data_s *data,
hwloc_obj_t obj,
hwloc__xml_import_state_t state)
hwloc___xml_import_info(char **infonamep, char **infovaluep,
hwloc__xml_import_state_t state)
{
char *infoname = NULL;
char *infovalue = NULL;
@@ -502,6 +500,25 @@ hwloc__xml_import_info(struct hwloc_xml_backend_data_s *data,
return -1;
}
*infonamep = infoname;
*infovaluep = infovalue;
return state->global->close_tag(state);
}
static int
hwloc__xml_import_obj_info(struct hwloc_xml_backend_data_s *data,
hwloc_obj_t obj,
hwloc__xml_import_state_t state)
{
char *infoname = NULL;
char *infovalue = NULL;
int err;
err = hwloc___xml_import_info(&infoname, &infovalue, state);
if (err < 0)
return err;
if (infoname) {
/* empty strings are ignored by libxml */
if (data->version_major < 2 &&
@@ -518,7 +535,7 @@ hwloc__xml_import_info(struct hwloc_xml_backend_data_s *data,
}
}
return state->global->close_tag(state);
return err;
}
static int
@@ -694,14 +711,15 @@ hwloc__xml_import_userdata(hwloc_topology_t topology __hwloc_attribute_unused, h
}
if (!topology->userdata_import_cb) {
char *buffer;
const char *buffer;
size_t reallength = encoded ? BASE64_ENCODED_LENGTH(length) : length;
ret = state->global->get_content(state, &buffer, reallength);
if (ret < 0)
return -1;
} else if (topology->userdata_not_decoded) {
char *buffer, *fakename;
const char *buffer;
char *fakename;
size_t reallength = encoded ? BASE64_ENCODED_LENGTH(length) : length;
ret = state->global->get_content(state, &buffer, reallength);
if (ret < 0)
@@ -714,7 +732,7 @@ hwloc__xml_import_userdata(hwloc_topology_t topology __hwloc_attribute_unused, h
free(fakename);
} else if (encoded && length) {
char *encoded_buffer;
const char *encoded_buffer;
size_t encoded_length = BASE64_ENCODED_LENGTH(length);
ret = state->global->get_content(state, &encoded_buffer, encoded_length);
if (ret < 0)
@@ -734,7 +752,7 @@ hwloc__xml_import_userdata(hwloc_topology_t topology __hwloc_attribute_unused, h
}
} else { /* always handle length==0 in the non-encoded case */
char *buffer = (char *) "";
const char *buffer = "";
if (length) {
ret = state->global->get_content(state, &buffer, length);
if (ret < 0)
@@ -888,7 +906,7 @@ hwloc__xml_import_object(hwloc_topology_t topology,
}
} else if (!strcmp(tag, "info")) {
ret = hwloc__xml_import_info(data, obj, &childstate);
ret = hwloc__xml_import_obj_info(data, obj, &childstate);
} else if (data->version_major < 2 && !strcmp(tag, "distances")) {
ret = hwloc__xml_v1import_distances(data, obj, &childstate);
} else if (!strcmp(tag, "userdata")) {
@@ -1238,6 +1256,80 @@ hwloc__xml_import_object(hwloc_topology_t topology,
return -1;
}
static int
hwloc__xml_v2import_support(hwloc_topology_t topology,
hwloc__xml_import_state_t state)
{
char *name = NULL;
int value = 1; /* value is optional */
while (1) {
char *attrname, *attrvalue;
if (state->global->next_attr(state, &attrname, &attrvalue) < 0)
break;
if (!strcmp(attrname, "name"))
name = attrvalue;
else if (!strcmp(attrname, "value"))
value = atoi(attrvalue);
else {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: ignoring unknown support attribute %s\n",
state->global->msgprefix, attrname);
}
}
if (name && topology->flags & HWLOC_TOPOLOGY_FLAG_IMPORT_SUPPORT) {
#ifdef HWLOC_DEBUG
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_support) == 4*sizeof(void*));
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_discovery_support) == 6);
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_cpubind_support) == 11);
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_membind_support) == 15);
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_misc_support) == 1);
#endif
#define DO(_cat,_name) if (!strcmp(#_cat "." #_name, name)) topology->support._cat->_name = value
DO(discovery,pu);
else DO(discovery,numa);
else DO(discovery,numa_memory);
else DO(discovery,disallowed_pu);
else DO(discovery,disallowed_numa);
else DO(discovery,cpukind_efficiency);
else DO(cpubind,set_thisproc_cpubind);
else DO(cpubind,get_thisproc_cpubind);
else DO(cpubind,set_proc_cpubind);
else DO(cpubind,get_proc_cpubind);
else DO(cpubind,set_thisthread_cpubind);
else DO(cpubind,get_thisthread_cpubind);
else DO(cpubind,set_thread_cpubind);
else DO(cpubind,get_thread_cpubind);
else DO(cpubind,get_thisproc_last_cpu_location);
else DO(cpubind,get_proc_last_cpu_location);
else DO(cpubind,get_thisthread_last_cpu_location);
else DO(membind,set_thisproc_membind);
else DO(membind,get_thisproc_membind);
else DO(membind,set_proc_membind);
else DO(membind,get_proc_membind);
else DO(membind,set_thisthread_membind);
else DO(membind,get_thisthread_membind);
else DO(membind,set_area_membind);
else DO(membind,get_area_membind);
else DO(membind,alloc_membind);
else DO(membind,firsttouch_membind);
else DO(membind,bind_membind);
else DO(membind,interleave_membind);
else DO(membind,nexttouch_membind);
else DO(membind,migrate_membind);
else DO(membind,get_area_memlocation);
else if (!strcmp("custom.exported_support", name))
/* support was exported in a custom/fake field, mark it as imported here */
topology->support.misc->imported_support = 1;
#undef DO
}
return 0;
}
static int
hwloc__xml_v2import_distances(hwloc_topology_t topology,
hwloc__xml_import_state_t state,
@@ -1317,7 +1409,8 @@ hwloc__xml_v2import_distances(hwloc_topology_t topology,
nr_u64values = 0;
while (1) {
struct hwloc__xml_import_state_s childstate;
char *attrname, *attrvalue, *tag, *buffer;
char *attrname, *attrvalue, *tag;
const char *buffer;
int length;
int is_index = 0;
int is_u64values = 0;
@@ -1356,7 +1449,7 @@ hwloc__xml_v2import_distances(hwloc_topology_t topology,
if (is_index) {
/* get indexes */
char *tmp, *tmp2;
const char *tmp, *tmp2;
if (nr_indexes >= nbobjs) {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: %s with more than %u indexes\n",
@@ -1369,6 +1462,9 @@ hwloc__xml_v2import_distances(hwloc_topology_t topology,
unsigned long long u;
if (heterotypes) {
hwloc_obj_type_t t = HWLOC_OBJ_TYPE_NONE;
if (!*tmp)
/* reached the end of this indexes attribute */
break;
if (hwloc_type_sscanf(tmp, &t, NULL, 0) < 0) {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: %s with unrecognized heterogeneous type %s\n",
@@ -1398,7 +1494,7 @@ hwloc__xml_v2import_distances(hwloc_topology_t topology,
} else if (is_u64values) {
/* get uint64_t values */
char *tmp;
const char *tmp;
if (nr_u64values >= nbobjs*nbobjs) {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: %s with more than %u u64values\n",
@@ -1491,6 +1587,259 @@ hwloc__xml_v2import_distances(hwloc_topology_t topology,
#undef _TAG_NAME
}
static int
hwloc__xml_import_memattr_value(hwloc_topology_t topology,
hwloc_memattr_id_t id,
unsigned long flags,
hwloc__xml_import_state_t state)
{
char *target_obj_gp_index_s = NULL;
char *target_obj_type_s = NULL;
hwloc_uint64_t target_obj_gp_index;
char *value_s = NULL;
hwloc_uint64_t value;
char *initiator_cpuset_s = NULL;
char *initiator_obj_gp_index_s = NULL;
char *initiator_obj_type_s = NULL;
hwloc_obj_type_t target_obj_type = HWLOC_OBJ_TYPE_NONE;
while (1) {
char *attrname, *attrvalue;
if (state->global->next_attr(state, &attrname, &attrvalue) < 0)
break;
if (!strcmp(attrname, "target_obj_gp_index"))
target_obj_gp_index_s = attrvalue;
else if (!strcmp(attrname, "target_obj_type"))
target_obj_type_s = attrvalue;
else if (!strcmp(attrname, "value"))
value_s = attrvalue;
else if (!strcmp(attrname, "initiator_cpuset"))
initiator_cpuset_s = attrvalue;
else if (!strcmp(attrname, "initiator_obj_gp_index"))
initiator_obj_gp_index_s = attrvalue;
else if (!strcmp(attrname, "initiator_obj_type"))
initiator_obj_type_s = attrvalue;
else {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: ignoring unknown memattr_value attribute %s\n",
state->global->msgprefix, attrname);
return -1;
}
}
if (!target_obj_type_s) {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: ignoring memattr_value without target_obj_type.\n",
state->global->msgprefix);
return -1;
}
if (hwloc_type_sscanf(target_obj_type_s, &target_obj_type, NULL, 0) < 0) {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: failed to identify memattr_value target object type %s\n",
state->global->msgprefix, target_obj_type_s);
return -1;
}
if (!value_s || !target_obj_gp_index_s) {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: ignoring memattr_value without value and target_obj_gp_index\n",
state->global->msgprefix);
return -1;
}
target_obj_gp_index = strtoull(target_obj_gp_index_s, NULL, 10);
value = strtoull(value_s, NULL, 10);
if (flags & HWLOC_MEMATTR_FLAG_NEED_INITIATOR) {
/* add a value with initiator */
struct hwloc_internal_location_s loc;
if (!initiator_cpuset_s && (!initiator_obj_gp_index_s || !initiator_obj_type_s)) {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: ignoring memattr_value without initiator attributes\n",
state->global->msgprefix);
return -1;
}
/* setup the initiator */
if (initiator_cpuset_s) {
loc.type = HWLOC_LOCATION_TYPE_CPUSET;
loc.location.cpuset = hwloc_bitmap_alloc();
if (!loc.location.cpuset) {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: failed to allocated memattr_value initiator cpuset\n",
state->global->msgprefix);
return -1;
}
hwloc_bitmap_sscanf(loc.location.cpuset, initiator_cpuset_s);
} else {
loc.type = HWLOC_LOCATION_TYPE_OBJECT;
loc.location.object.gp_index = strtoull(initiator_obj_gp_index_s, NULL, 10);
if (hwloc_type_sscanf(initiator_obj_type_s, &loc.location.object.type, NULL, 0) < 0) {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: failed to identify memattr_value initiator object type %s\n",
state->global->msgprefix, initiator_obj_type_s);
return -1;
}
}
hwloc_internal_memattr_set_value(topology, id, target_obj_type, target_obj_gp_index, (unsigned)-1, &loc, value);
if (loc.type == HWLOC_LOCATION_TYPE_CPUSET)
hwloc_bitmap_free(loc.location.cpuset);
} else {
/* add a value without initiator */
hwloc_internal_memattr_set_value(topology, id, target_obj_type, target_obj_gp_index, (unsigned)-1, NULL, value);
}
return 0;
}
static int
hwloc__xml_import_memattr(hwloc_topology_t topology,
hwloc__xml_import_state_t state)
{
char *name = NULL;
unsigned long flags = (unsigned long) -1;
hwloc_memattr_id_t id = (hwloc_memattr_id_t) -1;
int ret;
while (1) {
char *attrname, *attrvalue;
if (state->global->next_attr(state, &attrname, &attrvalue) < 0)
break;
if (!strcmp(attrname, "name"))
name = attrvalue;
else if (!strcmp(attrname, "flags"))
flags = strtoul(attrvalue, NULL, 10);
else {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: ignoring unknown memattr attribute %s\n",
state->global->msgprefix, attrname);
return -1;
}
}
if (name && flags != (unsigned long) -1) {
hwloc_memattr_id_t _id;
ret = hwloc_memattr_get_by_name(topology, name, &_id);
if (ret < 0) {
/* register a new attribute */
ret = hwloc_memattr_register(topology, name, flags, &_id);
if (!ret)
id = _id;
} else {
/* check the flags of the existing attribute */
unsigned long mflags;
ret = hwloc_memattr_get_flags(topology, _id, &mflags);
if (!ret && mflags == flags)
id = _id;
}
/* if there's no matching attribute, id is -1 and values will be ignored below */
}
while (1) {
struct hwloc__xml_import_state_s childstate;
char *tag;
ret = state->global->find_child(state, &childstate, &tag);
if (ret <= 0)
break;
if (!strcmp(tag, "memattr_value")) {
ret = hwloc__xml_import_memattr_value(topology, id, flags, &childstate);
} else {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: memattr with unrecognized child %s\n",
state->global->msgprefix, tag);
ret = -1;
}
if (ret < 0)
goto error;
state->global->close_child(&childstate);
}
return state->global->close_tag(state);
error:
return -1;
}
static int
hwloc__xml_import_cpukind(hwloc_topology_t topology,
hwloc__xml_import_state_t state)
{
hwloc_bitmap_t cpuset = NULL;
int forced_efficiency = HWLOC_CPUKIND_EFFICIENCY_UNKNOWN;
unsigned nr_infos = 0;
struct hwloc_info_s *infos = NULL;
int ret;
while (1) {
char *attrname, *attrvalue;
if (state->global->next_attr(state, &attrname, &attrvalue) < 0)
break;
if (!strcmp(attrname, "cpuset")) {
if (!cpuset)
cpuset = hwloc_bitmap_alloc();
hwloc_bitmap_sscanf(cpuset, attrvalue);
} else if (!strcmp(attrname, "forced_efficiency")) {
forced_efficiency = atoi(attrvalue);
} else {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: ignoring unknown cpukind attribute %s\n",
state->global->msgprefix, attrname);
hwloc_bitmap_free(cpuset);
return -1;
}
}
while (1) {
struct hwloc__xml_import_state_s childstate;
char *tag;
ret = state->global->find_child(state, &childstate, &tag);
if (ret <= 0)
break;
if (!strcmp(tag, "info")) {
char *infoname = NULL;
char *infovalue = NULL;
ret = hwloc___xml_import_info(&infoname, &infovalue, &childstate);
if (!ret && infoname && infovalue)
hwloc__add_info(&infos, &nr_infos, infoname, infovalue);
} else {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: cpukind with unrecognized child %s\n",
state->global->msgprefix, tag);
ret = -1;
}
if (ret < 0)
goto error;
state->global->close_child(&childstate);
}
if (!cpuset) {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: ignoring cpukind without cpuset\n",
state->global->msgprefix);
goto error;
}
hwloc_internal_cpukinds_register(topology, cpuset, forced_efficiency, infos, nr_infos, HWLOC_CPUKINDS_REGISTER_FLAG_OVERWRITE_FORCED_EFFICIENCY);
return state->global->close_tag(state);
error:
hwloc__free_infos(infos, nr_infos);
hwloc_bitmap_free(cpuset);
return -1;
}
static int
hwloc__xml_import_diff_one(hwloc__xml_import_state_t state,
hwloc_topology_diff_t *firstdiffp,
@@ -1759,6 +2108,18 @@ hwloc_look_xml(struct hwloc_backend *backend, struct hwloc_disc_status *dstatus)
ret = hwloc__xml_v2import_distances(topology, &childstate, 1);
if (ret < 0)
goto failed;
} else if (!strcmp(tag, "support")) {
ret = hwloc__xml_v2import_support(topology, &childstate);
if (ret < 0)
goto failed;
} else if (!strcmp(tag, "memattr")) {
ret = hwloc__xml_import_memattr(topology, &childstate);
if (ret < 0)
goto failed;
} else if (!strcmp(tag, "cpukind")) {
ret = hwloc__xml_import_cpukind(topology, &childstate);
if (ret < 0)
goto failed;
} else {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: ignoring unknown tag `%s' after root object.\n",
@@ -1864,12 +2225,14 @@ done:
/* keep the "Backend" information intact */
/* we could add "BackendSource=XML" to notify that XML was used between the actual backend and here */
topology->support.discovery->pu = 1;
topology->support.discovery->disallowed_pu = 1;
if (data->nbnumanodes) {
topology->support.discovery->numa = 1;
topology->support.discovery->numa_memory = 1; // FIXME
topology->support.discovery->disallowed_numa = 1;
if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_IMPORT_SUPPORT)) {
topology->support.discovery->pu = 1;
topology->support.discovery->disallowed_pu = 1;
if (data->nbnumanodes) {
topology->support.discovery->numa = 1;
topology->support.discovery->numa_memory = 1; // FIXME
topology->support.discovery->disallowed_numa = 1;
}
}
if (data->look_done)
@@ -2620,9 +2983,199 @@ hwloc__xml_v2export_distances(hwloc__xml_export_state_t parentstate, hwloc_topol
hwloc___xml_v2export_distances(parentstate, dist);
}
static void
hwloc__xml_v2export_support(hwloc__xml_export_state_t parentstate, hwloc_topology_t topology)
{
struct hwloc__xml_export_state_s state;
char tmp[11];
#ifdef HWLOC_DEBUG
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_support) == 4*sizeof(void*));
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_discovery_support) == 6);
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_cpubind_support) == 11);
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_membind_support) == 15);
HWLOC_BUILD_ASSERT(sizeof(struct hwloc_topology_misc_support) == 1);
#endif
#define DO(_cat,_name) do { \
if (topology->support._cat->_name) { \
parentstate->new_child(parentstate, &state, "support"); \
state.new_prop(&state, "name", #_cat "." #_name); \
if (topology->support._cat->_name != 1) { \
sprintf(tmp, "%u", topology->support._cat->_name); \
state.new_prop(&state, "value", tmp); \
} \
state.end_object(&state, "support"); \
} \
} while (0)
DO(discovery,pu);
DO(discovery,numa);
DO(discovery,numa_memory);
DO(discovery,disallowed_pu);
DO(discovery,disallowed_numa);
DO(discovery,cpukind_efficiency);
DO(cpubind,set_thisproc_cpubind);
DO(cpubind,get_thisproc_cpubind);
DO(cpubind,set_proc_cpubind);
DO(cpubind,get_proc_cpubind);
DO(cpubind,set_thisthread_cpubind);
DO(cpubind,get_thisthread_cpubind);
DO(cpubind,set_thread_cpubind);
DO(cpubind,get_thread_cpubind);
DO(cpubind,get_thisproc_last_cpu_location);
DO(cpubind,get_proc_last_cpu_location);
DO(cpubind,get_thisthread_last_cpu_location);
DO(membind,set_thisproc_membind);
DO(membind,get_thisproc_membind);
DO(membind,set_proc_membind);
DO(membind,get_proc_membind);
DO(membind,set_thisthread_membind);
DO(membind,get_thisthread_membind);
DO(membind,set_area_membind);
DO(membind,get_area_membind);
DO(membind,alloc_membind);
DO(membind,firsttouch_membind);
DO(membind,bind_membind);
DO(membind,interleave_membind);
DO(membind,nexttouch_membind);
DO(membind,migrate_membind);
DO(membind,get_area_memlocation);
/* misc.imported_support would be meaningless in the remote importer,
* but the importer needs to know whether we exported support or not
* (in case there are no support bit set at all),
* use a custom/fake field to do so.
*/
parentstate->new_child(parentstate, &state, "support");
state.new_prop(&state, "name", "custom.exported_support");
state.end_object(&state, "support");
#undef DO
}
static void
hwloc__xml_export_memattr_target(hwloc__xml_export_state_t state,
struct hwloc_internal_memattr_s *imattr,
struct hwloc_internal_memattr_target_s *imtg)
{
struct hwloc__xml_export_state_s vstate;
char tmp[255];
if (imattr->flags & HWLOC_MEMATTR_FLAG_NEED_INITIATOR) {
/* export all initiators */
unsigned k;
for(k=0; k<imtg->nr_initiators; k++) {
struct hwloc_internal_memattr_initiator_s *imi = &imtg->initiators[k];
state->new_child(state, &vstate, "memattr_value");
vstate.new_prop(&vstate, "target_obj_type", hwloc_obj_type_string(imtg->type));
snprintf(tmp, sizeof(tmp), "%llu", (unsigned long long) imtg->gp_index);
vstate.new_prop(&vstate, "target_obj_gp_index", tmp);
snprintf(tmp, sizeof(tmp), "%llu", (unsigned long long) imi->value);
vstate.new_prop(&vstate, "value", tmp);
switch (imi->initiator.type) {
case HWLOC_LOCATION_TYPE_OBJECT:
snprintf(tmp, sizeof(tmp), "%llu", (unsigned long long) imi->initiator.location.object.gp_index);
vstate.new_prop(&vstate, "initiator_obj_gp_index", tmp);
vstate.new_prop(&vstate, "initiator_obj_type", hwloc_obj_type_string(imi->initiator.location.object.type));
break;
case HWLOC_LOCATION_TYPE_CPUSET: {
char *setstring;
hwloc_bitmap_asprintf(&setstring, imi->initiator.location.cpuset);
if (setstring)
vstate.new_prop(&vstate, "initiator_cpuset", setstring);
free(setstring);
break;
}
default:
assert(0);
}
vstate.end_object(&vstate, "memattr_value");
}
} else {
/* just export the global value */
state->new_child(state, &vstate, "memattr_value");
vstate.new_prop(&vstate, "target_obj_type", hwloc_obj_type_string(imtg->type));
snprintf(tmp, sizeof(tmp), "%llu", (unsigned long long) imtg->gp_index);
vstate.new_prop(&vstate, "target_obj_gp_index", tmp);
snprintf(tmp, sizeof(tmp), "%llu", (unsigned long long) imtg->noinitiator_value);
vstate.new_prop(&vstate, "value", tmp);
vstate.end_object(&vstate, "memattr_value");
}
}
static void
hwloc__xml_export_memattrs(hwloc__xml_export_state_t state, hwloc_topology_t topology)
{
unsigned id;
for(id=0; id<topology->nr_memattrs; id++) {
struct hwloc_internal_memattr_s *imattr;
struct hwloc__xml_export_state_s mstate;
char tmp[255];
unsigned j;
if (id == HWLOC_MEMATTR_ID_CAPACITY || id == HWLOC_MEMATTR_ID_LOCALITY)
/* no need to export virtual memattrs */
continue;
imattr = &topology->memattrs[id];
if ((id == HWLOC_MEMATTR_ID_LATENCY || id == HWLOC_MEMATTR_ID_BANDWIDTH)
&& !imattr->nr_targets)
/* no need to export target-less attributes for initial attributes, no release support attributes without those definitions */
continue;
state->new_child(state, &mstate, "memattr");
mstate.new_prop(&mstate, "name", imattr->name);
snprintf(tmp, sizeof(tmp), "%lu", imattr->flags);
mstate.new_prop(&mstate, "flags", tmp);
for(j=0; j<imattr->nr_targets; j++)
hwloc__xml_export_memattr_target(&mstate, imattr, &imattr->targets[j]);
mstate.end_object(&mstate, "memattr");
}
}
static void
hwloc__xml_export_cpukinds(hwloc__xml_export_state_t state, hwloc_topology_t topology)
{
unsigned i;
for(i=0; i<topology->nr_cpukinds; i++) {
struct hwloc_internal_cpukind_s *kind = &topology->cpukinds[i];
struct hwloc__xml_export_state_s cstate;
char *setstring;
unsigned j;
state->new_child(state, &cstate, "cpukind");
hwloc_bitmap_asprintf(&setstring, kind->cpuset);
cstate.new_prop(&cstate, "cpuset", setstring);
free(setstring);
if (kind->forced_efficiency != HWLOC_CPUKIND_EFFICIENCY_UNKNOWN) {
char tmp[11];
snprintf(tmp, sizeof(tmp), "%d", kind->forced_efficiency);
cstate.new_prop(&cstate, "forced_efficiency", tmp);
}
for(j=0; j<kind->nr_infos; j++) {
char *name = hwloc__xml_export_safestrdup(kind->infos[j].name);
char *value = hwloc__xml_export_safestrdup(kind->infos[j].value);
struct hwloc__xml_export_state_s istate;
cstate.new_child(&cstate, &istate, "info");
istate.new_prop(&istate, "name", name);
istate.new_prop(&istate, "value", value);
istate.end_object(&istate, "info");
free(name);
free(value);
}
cstate.end_object(&cstate, "cpukind");
}
}
void
hwloc__xml_export_topology(hwloc__xml_export_state_t state, hwloc_topology_t topology, unsigned long flags)
{
char *env;
hwloc_obj_t root = hwloc_get_root_obj(topology);
if (flags & HWLOC_TOPOLOGY_EXPORT_XML_FLAG_V1) {
@@ -2665,6 +3218,11 @@ hwloc__xml_export_topology(hwloc__xml_export_state_t state, hwloc_topology_t top
} else {
hwloc__xml_v2export_object (state, topology, root, flags);
hwloc__xml_v2export_distances (state, topology);
env = getenv("HWLOC_XML_EXPORT_SUPPORT");
if (!env || atoi(env))
hwloc__xml_v2export_support(state, topology);
hwloc__xml_export_memattrs(state, topology);
hwloc__xml_export_cpukinds(state, topology);
}
}

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2019 Inria. All rights reserved.
* Copyright © 2009-2021 Inria. All rights reserved.
* Copyright © 2009-2012, 2020 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -75,16 +75,49 @@ int hwloc_hide_errors(void)
return hide;
}
void hwloc_report_os_error(const char *msg, int line)
/* format the obj info to print in error messages */
static void
report_insert_error_format_obj(char *buf, size_t buflen, hwloc_obj_t obj)
{
char typestr[64];
char *cpusetstr;
char *nodesetstr = NULL;
hwloc_obj_type_snprintf(typestr, sizeof(typestr), obj, 0);
hwloc_bitmap_asprintf(&cpusetstr, obj->cpuset);
if (obj->nodeset) /* may be missing during insert */
hwloc_bitmap_asprintf(&nodesetstr, obj->nodeset);
if (obj->os_index != HWLOC_UNKNOWN_INDEX)
snprintf(buf, buflen, "%s (P#%u cpuset %s%s%s)",
typestr, obj->os_index, cpusetstr,
nodesetstr ? " nodeset " : "",
nodesetstr ? nodesetstr : "");
else
snprintf(buf, buflen, "%s (cpuset %s%s%s)",
typestr, cpusetstr,
nodesetstr ? " nodeset " : "",
nodesetstr ? nodesetstr : "");
free(cpusetstr);
free(nodesetstr);
}
static void report_insert_error(hwloc_obj_t new, hwloc_obj_t old, const char *msg, const char *reason)
{
static int reported = 0;
if (!reported && !hwloc_hide_errors()) {
if (reason && !reported && !hwloc_hide_errors()) {
char newstr[512];
char oldstr[512];
report_insert_error_format_obj(newstr, sizeof(newstr), new);
report_insert_error_format_obj(oldstr, sizeof(oldstr), old);
fprintf(stderr, "****************************************************************************\n");
fprintf(stderr, "* hwloc %s received invalid information from the operating system.\n", HWLOC_VERSION);
fprintf(stderr, "*\n");
fprintf(stderr, "* %s\n", msg);
fprintf(stderr, "* Error occurred in topology.c line %d\n", line);
fprintf(stderr, "* Failed with: %s\n", msg);
fprintf(stderr, "* while inserting %s at %s\n", newstr, oldstr);
fprintf(stderr, "* coming from: %s\n", reason);
fprintf(stderr, "*\n");
fprintf(stderr, "* The following FAQ entry in the hwloc documentation may help:\n");
fprintf(stderr, "* What should I do when hwloc reports \"operating system\" warnings?\n");
@@ -264,7 +297,7 @@ hwloc_setup_pu_level(struct hwloc_topology *topology,
hwloc_debug_2args_bitmap("cpu %u (os %u) has cpuset %s\n",
cpu, oscpu, obj->cpuset);
hwloc_insert_object_by_cpuset(topology, obj);
hwloc__insert_object_by_cpuset(topology, NULL, obj, "core:pulevel");
cpu++;
}
@@ -347,16 +380,18 @@ hwloc_debug_print_object(int indent __hwloc_attribute_unused, hwloc_obj_t obj)
static void
hwloc_debug_print_objects(int indent __hwloc_attribute_unused, hwloc_obj_t obj)
{
hwloc_obj_t child;
hwloc_debug_print_object(indent, obj);
for_each_child (child, obj)
hwloc_debug_print_objects(indent + 1, child);
for_each_memory_child (child, obj)
hwloc_debug_print_objects(indent + 1, child);
for_each_io_child (child, obj)
hwloc_debug_print_objects(indent + 1, child);
for_each_misc_child (child, obj)
hwloc_debug_print_objects(indent + 1, child);
if (hwloc_debug_enabled() >= 2) {
hwloc_obj_t child;
hwloc_debug_print_object(indent, obj);
for_each_child (child, obj)
hwloc_debug_print_objects(indent + 1, child);
for_each_memory_child (child, obj)
hwloc_debug_print_objects(indent + 1, child);
for_each_io_child (child, obj)
hwloc_debug_print_objects(indent + 1, child);
for_each_misc_child (child, obj)
hwloc_debug_print_objects(indent + 1, child);
}
}
#else /* !HWLOC_DEBUG */
#define hwloc_debug_print_object(indent, obj) do { /* nothing */ } while (0)
@@ -472,29 +507,33 @@ int hwloc_obj_add_info(hwloc_obj_t obj, const char *name, const char *value)
}
/* This function may be called with topology->tma set, it cannot free() or realloc() */
static int hwloc__tma_dup_infos(struct hwloc_tma *tma, hwloc_obj_t new, hwloc_obj_t src)
int hwloc__tma_dup_infos(struct hwloc_tma *tma,
struct hwloc_info_s **newip, unsigned *newcp,
struct hwloc_info_s *oldi, unsigned oldc)
{
struct hwloc_info_s *newi;
unsigned i, j;
new->infos = hwloc_tma_calloc(tma, src->infos_count * sizeof(*src->infos));
if (!new->infos)
newi = hwloc_tma_calloc(tma, oldc * sizeof(*newi));
if (!newi)
return -1;
for(i=0; i<src->infos_count; i++) {
new->infos[i].name = hwloc_tma_strdup(tma, src->infos[i].name);
new->infos[i].value = hwloc_tma_strdup(tma, src->infos[i].value);
if (!new->infos[i].name || !new->infos[i].value)
for(i=0; i<oldc; i++) {
newi[i].name = hwloc_tma_strdup(tma, oldi[i].name);
newi[i].value = hwloc_tma_strdup(tma, oldi[i].value);
if (!newi[i].name || !newi[i].value)
goto failed;
}
new->infos_count = src->infos_count;
*newip = newi;
*newcp = oldc;
return 0;
failed:
assert(!tma || !tma->dontfree); /* this tma cannot fail to allocate */
for(j=0; j<=i; j++) {
free(new->infos[i].name);
free(new->infos[i].value);
free(newi[i].name);
free(newi[i].value);
}
free(new->infos);
new->infos = NULL;
free(newi);
*newip = NULL;
return -1;
}
@@ -528,8 +567,9 @@ hwloc_free_unlinked_object(hwloc_obj_t obj)
}
/* Replace old with contents of new object, and make new freeable by the caller.
* Only updates next_sibling/first_child pointers,
* so may only be used during early discovery.
* Requires reconnect (for siblings pointers and group depth),
* fixup of sets (only the main cpuset was likely compared before merging),
* and update of total_memory and group depth.
*/
static void
hwloc_replace_linked_object(hwloc_obj_t old, hwloc_obj_t new)
@@ -812,7 +852,7 @@ hwloc__duplicate_object(struct hwloc_topology *newtopology,
newobj->nodeset = hwloc_bitmap_tma_dup(tma, src->nodeset);
newobj->complete_nodeset = hwloc_bitmap_tma_dup(tma, src->complete_nodeset);
hwloc__tma_dup_infos(tma, newobj, src);
hwloc__tma_dup_infos(tma, &newobj->infos, &newobj->infos_count, src->infos, src->infos_count);
/* find our level */
if (src->depth < 0) {
@@ -970,6 +1010,7 @@ hwloc__topology_dup(hwloc_topology_t *newp,
memcpy(new->support.discovery, old->support.discovery, sizeof(*old->support.discovery));
memcpy(new->support.cpubind, old->support.cpubind, sizeof(*old->support.cpubind));
memcpy(new->support.membind, old->support.membind, sizeof(*old->support.membind));
memcpy(new->support.misc, old->support.misc, sizeof(*old->support.misc));
new->allowed_cpuset = hwloc_bitmap_tma_dup(tma, old->allowed_cpuset);
new->allowed_nodeset = hwloc_bitmap_tma_dup(tma, old->allowed_nodeset);
@@ -1008,6 +1049,14 @@ hwloc__topology_dup(hwloc_topology_t *newp,
if (err < 0)
goto out_with_topology;
err = hwloc_internal_memattrs_dup(new, old);
if (err < 0)
goto out_with_topology;
err = hwloc_internal_cpukinds_dup(new, old);
if (err < 0)
goto out_with_topology;
/* we connected everything during duplication */
new->modified = 0;
@@ -1229,31 +1278,6 @@ hwloc__object_cpusets_compare_first(hwloc_obj_t obj1, hwloc_obj_t obj2)
return 0;
}
/* format the obj info to print in error messages */
static void
hwloc__report_error_format_obj(char *buf, size_t buflen, hwloc_obj_t obj)
{
char typestr[64];
char *cpusetstr;
char *nodesetstr = NULL;
hwloc_obj_type_snprintf(typestr, sizeof(typestr), obj, 0);
hwloc_bitmap_asprintf(&cpusetstr, obj->cpuset);
if (obj->nodeset) /* may be missing during insert */
hwloc_bitmap_asprintf(&nodesetstr, obj->nodeset);
if (obj->os_index != HWLOC_UNKNOWN_INDEX)
snprintf(buf, buflen, "%s (P#%u cpuset %s%s%s)",
typestr, obj->os_index, cpusetstr,
nodesetstr ? " nodeset " : "",
nodesetstr ? nodesetstr : "");
else
snprintf(buf, buflen, "%s (cpuset %s%s%s)",
typestr, cpusetstr,
nodesetstr ? " nodeset " : "",
nodesetstr ? nodesetstr : "");
free(cpusetstr);
free(nodesetstr);
}
/*
* How to insert objects into the topology.
*
@@ -1325,7 +1349,7 @@ merge_insert_equal(hwloc_obj_t new, hwloc_obj_t old)
/* returns the result of merge, or NULL if not merged */
static __hwloc_inline hwloc_obj_t
hwloc__insert_try_merge_group(hwloc_obj_t old, hwloc_obj_t new)
hwloc__insert_try_merge_group(hwloc_topology_t topology, hwloc_obj_t old, hwloc_obj_t new)
{
if (new->type == HWLOC_OBJ_GROUP && old->type == HWLOC_OBJ_GROUP) {
/* which group do we keep? */
@@ -1336,6 +1360,7 @@ hwloc__insert_try_merge_group(hwloc_obj_t old, hwloc_obj_t new)
/* keep the new one, it doesn't want to be merged */
hwloc_replace_linked_object(old, new);
topology->modified = 1;
return new;
} else {
@@ -1343,9 +1368,12 @@ hwloc__insert_try_merge_group(hwloc_obj_t old, hwloc_obj_t new)
/* keep the old one, it doesn't want to be merged */
return old;
/* compare subkinds to decice who to keep */
if (new->attr->group.kind < old->attr->group.kind)
/* compare subkinds to decide which group to keep */
if (new->attr->group.kind < old->attr->group.kind) {
/* keep smaller kind */
hwloc_replace_linked_object(old, new);
topology->modified = 1;
}
return old;
}
}
@@ -1371,6 +1399,7 @@ hwloc__insert_try_merge_group(hwloc_obj_t old, hwloc_obj_t new)
* and let the caller free the new object
*/
hwloc_replace_linked_object(old, new);
topology->modified = 1;
return old;
} else {
@@ -1390,9 +1419,9 @@ hwloc__insert_try_merge_group(hwloc_obj_t old, hwloc_obj_t new)
*/
static struct hwloc_obj *
hwloc___insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t cur, hwloc_obj_t obj,
hwloc_report_error_t report_error)
const char *reason)
{
hwloc_obj_t child, next_child = NULL;
hwloc_obj_t child, next_child = NULL, tmp;
/* These will always point to the pointer to their next last child. */
hwloc_obj_t *cur_children = &cur->first_child;
hwloc_obj_t *obj_children = &obj->first_child;
@@ -1412,7 +1441,7 @@ hwloc___insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t cur
int setres = res;
if (res == HWLOC_OBJ_EQUAL) {
hwloc_obj_t merged = hwloc__insert_try_merge_group(child, obj);
hwloc_obj_t merged = hwloc__insert_try_merge_group(topology, child, obj);
if (merged)
return merged;
/* otherwise compare actual types to decide of the inclusion */
@@ -1430,18 +1459,10 @@ hwloc___insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t cur
case HWLOC_OBJ_INCLUDED:
/* OBJ is strictly contained is some child of CUR, go deeper. */
return hwloc___insert_object_by_cpuset(topology, child, obj, report_error);
return hwloc___insert_object_by_cpuset(topology, child, obj, reason);
case HWLOC_OBJ_INTERSECTS:
if (report_error) {
char childstr[512];
char objstr[512];
char msg[1100];
hwloc__report_error_format_obj(objstr, sizeof(objstr), obj);
hwloc__report_error_format_obj(childstr, sizeof(childstr), child);
snprintf(msg, sizeof(msg), "%s intersects with %s without inclusion!", objstr, childstr);
report_error(msg, __LINE__);
}
report_insert_error(obj, child, "intersection without inclusion", reason);
goto putback;
case HWLOC_OBJ_DIFFERENT:
@@ -1464,6 +1485,8 @@ hwloc___insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t cur
if (setres == HWLOC_OBJ_EQUAL) {
obj->memory_first_child = child->memory_first_child;
child->memory_first_child = NULL;
for(tmp=obj->memory_first_child; tmp; tmp = tmp->next_sibling)
tmp->parent = obj;
}
break;
}
@@ -1483,7 +1506,9 @@ hwloc___insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t cur
return obj;
putback:
/* Put-back OBJ children in CUR and return an error. */
/* OBJ cannot be inserted.
* Put-back OBJ children in CUR and return an error.
*/
if (putp)
cur_children = putp; /* No need to try to insert before where OBJ was supposed to go */
else
@@ -1492,12 +1517,12 @@ hwloc___insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t cur
while ((child = obj->first_child) != NULL) {
/* Remove from OBJ */
obj->first_child = child->next_sibling;
obj->parent = cur;
/* Find child position in CUR, and insert. */
/* Find child position in CUR, and reinsert it. */
while (*cur_children && hwloc__object_cpusets_compare_first(*cur_children, child) < 0)
cur_children = &(*cur_children)->next_sibling;
child->next_sibling = *cur_children;
*cur_children = child;
child->parent = cur;
}
return NULL;
}
@@ -1521,7 +1546,7 @@ hwloc__find_obj_covering_memory_cpuset(struct hwloc_topology *topology, hwloc_ob
static struct hwloc_obj *
hwloc__find_insert_memory_parent(struct hwloc_topology *topology, hwloc_obj_t obj,
hwloc_report_error_t report_error)
const char *reason)
{
hwloc_obj_t parent, group, result;
@@ -1573,7 +1598,7 @@ hwloc__find_insert_memory_parent(struct hwloc_topology *topology, hwloc_obj_t ob
return parent;
}
result = hwloc__insert_object_by_cpuset(topology, parent, group, report_error);
result = hwloc__insert_object_by_cpuset(topology, parent, group, reason);
if (!result) {
/* failed to insert, fallback to larger parent */
return parent;
@@ -1586,8 +1611,7 @@ hwloc__find_insert_memory_parent(struct hwloc_topology *topology, hwloc_obj_t ob
/* only works for MEMCACHE and NUMAnode with a single bit in nodeset */
static hwloc_obj_t
hwloc___attach_memory_object_by_nodeset(struct hwloc_topology *topology, hwloc_obj_t parent,
hwloc_obj_t obj,
hwloc_report_error_t report_error)
hwloc_obj_t obj, const char *reason)
{
hwloc_obj_t *curp = &parent->memory_first_child;
unsigned first = hwloc_bitmap_first(obj->nodeset);
@@ -1611,20 +1635,12 @@ hwloc___attach_memory_object_by_nodeset(struct hwloc_topology *topology, hwloc_o
if (obj->type == HWLOC_OBJ_NUMANODE) {
if (cur->type == HWLOC_OBJ_NUMANODE) {
/* identical NUMA nodes? ignore the new one */
if (report_error) {
char curstr[512];
char objstr[512];
char msg[1100];
hwloc__report_error_format_obj(curstr, sizeof(curstr), cur);
hwloc__report_error_format_obj(objstr, sizeof(objstr), obj);
snprintf(msg, sizeof(msg), "%s and %s have identical nodesets!", objstr, curstr);
report_error(msg, __LINE__);
}
report_insert_error(obj, cur, "NUMAnodes with identical nodesets", reason);
return NULL;
}
assert(cur->type == HWLOC_OBJ_MEMCACHE);
/* insert the new NUMA node below that existing memcache */
return hwloc___attach_memory_object_by_nodeset(topology, cur, obj, report_error);
return hwloc___attach_memory_object_by_nodeset(topology, cur, obj, reason);
} else {
assert(obj->type == HWLOC_OBJ_MEMCACHE);
@@ -1637,7 +1653,7 @@ hwloc___attach_memory_object_by_nodeset(struct hwloc_topology *topology, hwloc_o
* (depth starts from the NUMA node).
* insert the new memcache below the existing one
*/
return hwloc___attach_memory_object_by_nodeset(topology, cur, obj, report_error);
return hwloc___attach_memory_object_by_nodeset(topology, cur, obj, reason);
}
/* insert the memcache above the existing memcache or numa node */
obj->next_sibling = cur->next_sibling;
@@ -1673,8 +1689,7 @@ hwloc___attach_memory_object_by_nodeset(struct hwloc_topology *topology, hwloc_o
*/
struct hwloc_obj *
hwloc__attach_memory_object(struct hwloc_topology *topology, hwloc_obj_t parent,
hwloc_obj_t obj,
hwloc_report_error_t report_error)
hwloc_obj_t obj, const char *reason)
{
hwloc_obj_t result;
@@ -1704,7 +1719,7 @@ hwloc__attach_memory_object(struct hwloc_topology *topology, hwloc_obj_t parent,
hwloc_bitmap_copy(obj->complete_cpuset, parent->complete_cpuset);
#endif
result = hwloc___attach_memory_object_by_nodeset(topology, parent, obj, report_error);
result = hwloc___attach_memory_object_by_nodeset(topology, parent, obj, reason);
if (result == obj) {
/* Add the bit to the top sets, and to the parent CPU-side object */
if (obj->type == HWLOC_OBJ_NUMANODE) {
@@ -1722,8 +1737,7 @@ hwloc__attach_memory_object(struct hwloc_topology *topology, hwloc_obj_t parent,
/* insertion routine that lets you change the error reporting callback */
struct hwloc_obj *
hwloc__insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t root,
hwloc_obj_t obj,
hwloc_report_error_t report_error)
hwloc_obj_t obj, const char *reason)
{
struct hwloc_obj *result;
@@ -1740,20 +1754,20 @@ hwloc__insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t root
if (hwloc__obj_type_is_memory(obj->type)) {
if (!root) {
root = hwloc__find_insert_memory_parent(topology, obj, report_error);
root = hwloc__find_insert_memory_parent(topology, obj, reason);
if (!root) {
hwloc_free_unlinked_object(obj);
return NULL;
}
}
return hwloc__attach_memory_object(topology, root, obj, report_error);
return hwloc__attach_memory_object(topology, root, obj, reason);
}
if (!root)
/* Start at the top. */
root = topology->levels[0][0];
result = hwloc___insert_object_by_cpuset(topology, root, obj, report_error);
result = hwloc___insert_object_by_cpuset(topology, root, obj, reason);
if (result && result->type == HWLOC_OBJ_PU) {
/* Add the bit to the top sets */
if (hwloc_bitmap_isset(result->cpuset, result->os_index))
@@ -1769,12 +1783,6 @@ hwloc__insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t root
/* the default insertion routine warns in case of error.
* it's used by most backends */
struct hwloc_obj *
hwloc_insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t obj)
{
return hwloc__insert_object_by_cpuset(topology, NULL, obj, hwloc_report_os_error);
}
void
hwloc_insert_object_by_parent(struct hwloc_topology *topology, hwloc_obj_t parent, hwloc_obj_t obj)
{
@@ -1917,6 +1925,7 @@ hwloc_topology_insert_group_object(struct hwloc_topology *topology, hwloc_obj_t
if (hwloc_bitmap_isset(nodeset, numa->os_index))
hwloc_bitmap_or(obj->cpuset, obj->cpuset, numa->cpuset);
}
/* FIXME insert by nodeset to group NUMAs even if CPUless? */
cmp = hwloc_obj_cmp_sets(obj, root);
if (cmp == HWLOC_OBJ_INCLUDED) {
@@ -1928,12 +1937,24 @@ hwloc_topology_insert_group_object(struct hwloc_topology *topology, hwloc_obj_t
if (!res)
return NULL;
if (res != obj)
/* merged */
if (res != obj && res->type != HWLOC_OBJ_GROUP)
/* merged, not into a Group, nothing to update */
return res;
/* res == obj means that the object was inserted.
* We need to reconnect levels, fill all its cpu/node sets,
* compute its total memory, group depth, etc.
*
* res != obj usually means that our new group was merged into an
* existing object, no need to recompute anything.
* However, if merging with an existing group, depending on their kinds,
* the contents of obj may overwrite the contents of the old group.
* This requires reconnecting levels, filling sets, recomputing total memory, etc.
*/
/* properly inserted */
hwloc_obj_add_children_sets(obj);
hwloc_obj_add_children_sets(res);
if (hwloc_topology_reconnect(topology, 0) < 0)
return NULL;
@@ -1945,7 +1966,7 @@ hwloc_topology_insert_group_object(struct hwloc_topology *topology, hwloc_obj_t
#endif
hwloc_topology_check(topology);
return obj;
return res;
}
hwloc_obj_t
@@ -2047,7 +2068,7 @@ hwloc_find_insert_io_parent_by_complete_cpuset(struct hwloc_topology *topology,
hwloc_bitmap_and(cpuset, cpuset, hwloc_topology_get_topology_cpuset(topology));
group_obj->cpuset = hwloc_bitmap_dup(cpuset);
group_obj->attr->group.kind = HWLOC_GROUP_KIND_IO;
parent = hwloc__insert_object_by_cpuset(topology, largeparent, group_obj, hwloc_report_os_error);
parent = hwloc__insert_object_by_cpuset(topology, largeparent, group_obj, "topology:io_parent");
if (!parent)
/* Failed to insert the Group, maybe a conflicting cpuset */
return largeparent;
@@ -3251,7 +3272,7 @@ hwloc_discover(struct hwloc_topology *topology,
* produced by hwloc_setup_pu_level()
*/
/* To be able to just use hwloc_insert_object_by_cpuset to insert the object
/* To be able to just use hwloc__insert_object_by_cpuset to insert the object
* in the topology according to the cpuset, the cpuset field must be
* initialized.
*/
@@ -3356,7 +3377,7 @@ hwloc_discover(struct hwloc_topology *topology,
hwloc_bitmap_set(node->nodeset, 0);
memcpy(&node->attr->numanode, &topology->machine_memory, sizeof(topology->machine_memory));
memset(&topology->machine_memory, 0, sizeof(topology->machine_memory));
hwloc_insert_object_by_cpuset(topology, node);
hwloc__insert_object_by_cpuset(topology, NULL, node, "core:defaultnumanode");
} else {
/* if we're sure we found all NUMA nodes without their sizes (x86 backend?),
* we could split topology->total_memory in all of them.
@@ -3514,6 +3535,7 @@ hwloc_topology_setup_defaults(struct hwloc_topology *topology)
memset(topology->support.discovery, 0, sizeof(*topology->support.discovery));
memset(topology->support.cpubind, 0, sizeof(*topology->support.cpubind));
memset(topology->support.membind, 0, sizeof(*topology->support.membind));
memset(topology->support.misc, 0, sizeof(*topology->support.misc));
/* Only the System object on top by default */
topology->next_gp_index = 1; /* keep 0 as an invalid value */
@@ -3590,6 +3612,7 @@ hwloc__topology_init (struct hwloc_topology **topologyp,
topology->support.discovery = hwloc_tma_malloc(tma, sizeof(*topology->support.discovery));
topology->support.cpubind = hwloc_tma_malloc(tma, sizeof(*topology->support.cpubind));
topology->support.membind = hwloc_tma_malloc(tma, sizeof(*topology->support.membind));
topology->support.misc = hwloc_tma_malloc(tma, sizeof(*topology->support.misc));
topology->nb_levels_allocated = nblevels; /* enough for default 10 levels = Mach+Pack+Die+NUMA+L3+L2+L1d+L1i+Co+PU */
topology->levels = hwloc_tma_calloc(tma, topology->nb_levels_allocated * sizeof(*topology->levels));
@@ -3598,6 +3621,8 @@ hwloc__topology_init (struct hwloc_topology **topologyp,
hwloc__topology_filter_init(topology);
hwloc_internal_distances_init(topology);
hwloc_internal_memattrs_init(topology);
hwloc_internal_cpukinds_init(topology);
topology->userdata_export_cb = NULL;
topology->userdata_import_cb = NULL;
@@ -3691,7 +3716,7 @@ hwloc_topology_set_flags (struct hwloc_topology *topology, unsigned long flags)
return -1;
}
if (flags & ~(HWLOC_TOPOLOGY_FLAG_INCLUDE_DISALLOWED|HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM|HWLOC_TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES)) {
if (flags & ~(HWLOC_TOPOLOGY_FLAG_INCLUDE_DISALLOWED|HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM|HWLOC_TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES|HWLOC_TOPOLOGY_FLAG_IMPORT_SUPPORT)) {
errno = EINVAL;
return -1;
}
@@ -3827,7 +3852,9 @@ hwloc_topology_clear (struct hwloc_topology *topology)
{
/* no need to set to NULL after free() since callers will call setup_defaults() or just destroy the rest of the topology */
unsigned l;
hwloc_internal_cpukinds_destroy(topology);
hwloc_internal_distances_destroy(topology);
hwloc_internal_memattrs_destroy(topology);
hwloc_free_object_and_children(topology->levels[0][0]);
hwloc_bitmap_free(topology->allowed_cpuset);
hwloc_bitmap_free(topology->allowed_nodeset);
@@ -3858,6 +3885,7 @@ hwloc_topology_destroy (struct hwloc_topology *topology)
free(topology->support.discovery);
free(topology->support.cpubind);
free(topology->support.membind);
free(topology->support.misc);
free(topology);
}
@@ -3873,7 +3901,9 @@ hwloc_topology_load (struct hwloc_topology *topology)
return -1;
}
/* initialize envvar-related things */
hwloc_internal_distances_prepare(topology);
hwloc_internal_memattrs_prepare(topology);
if (getenv("HWLOC_XML_USERDATA_NOT_DECODED"))
topology->userdata_not_decoded = 1;
@@ -3954,6 +3984,9 @@ hwloc_topology_load (struct hwloc_topology *topology)
#endif
hwloc_topology_check(topology);
/* Rank cpukinds */
hwloc_internal_cpukinds_rank(topology);
/* Mark distances objs arrays as invalid since we may have removed objects
* from the topology after adding the distances (remove_empty, etc).
* It would be hard to actually verify whether it's needed.
@@ -3964,6 +3997,10 @@ hwloc_topology_load (struct hwloc_topology *topology)
*/
hwloc_internal_distances_refresh(topology);
/* Same for memattrs */
hwloc_internal_memattrs_need_refresh(topology);
hwloc_internal_memattrs_refresh(topology);
topology->is_loaded = 1;
if (topology->backend_phases & HWLOC_DISC_PHASE_TWEAK) {
@@ -4246,10 +4283,12 @@ hwloc_topology_restrict(struct hwloc_topology *topology, hwloc_const_bitmap_t se
/* some objects may have disappeared, we need to update distances objs arrays */
hwloc_internal_distances_invalidate_cached_objs(topology);
hwloc_internal_memattrs_need_refresh(topology);
hwloc_filter_levels_keep_structure(topology);
hwloc_propagate_symmetric_subtree(topology, topology->levels[0][0]);
propagate_total_memory(topology->levels[0][0]);
hwloc_internal_cpukinds_restrict(topology);
#ifndef HWLOC_DEBUG
if (getenv("HWLOC_DEBUG_CHECK"))
@@ -4334,6 +4373,15 @@ hwloc_topology_allow(struct hwloc_topology *topology,
return -1;
}
int
hwloc_topology_refresh(struct hwloc_topology *topology)
{
hwloc_internal_cpukinds_rank(topology);
hwloc_internal_distances_refresh(topology);
hwloc_internal_memattrs_refresh(topology);
return 0;
}
int
hwloc_topology_is_thissystem(struct hwloc_topology *topology)
{
@@ -4628,6 +4676,9 @@ hwloc__check_misc_children(hwloc_topology_t topology, hwloc_bitmap_t gp_indexes,
static void
hwloc__check_object(hwloc_topology_t topology, hwloc_bitmap_t gp_indexes, hwloc_obj_t obj)
{
hwloc_uint64_t total_memory;
hwloc_obj_t child;
assert(!hwloc_bitmap_isset(gp_indexes, obj->gp_index));
hwloc_bitmap_set(gp_indexes, obj->gp_index);
@@ -4685,6 +4736,18 @@ hwloc__check_object(hwloc_topology_t topology, hwloc_bitmap_t gp_indexes, hwloc_
assert(hwloc_cache_type_by_depth_type(obj->attr->cache.depth, obj->attr->cache.type) == obj->type);
}
/* check total memory */
total_memory = 0;
if (obj->type == HWLOC_OBJ_NUMANODE)
total_memory += obj->attr->numanode.local_memory;
for_each_child(child, obj) {
total_memory += child->total_memory;
}
for_each_memory_child(child, obj) {
total_memory += child->total_memory;
}
assert(total_memory == obj->total_memory);
/* check children */
hwloc__check_normal_children(topology, gp_indexes, obj);
hwloc__check_memory_children(topology, gp_indexes, obj);

View File

@@ -1,7 +1,7 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2019 Inria. All rights reserved.
* Copyright © 2009-2010 Université Bordeaux
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2010, 2020 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -138,6 +138,37 @@ hwloc_obj_type_is_icache(hwloc_obj_type_t type)
return hwloc__obj_type_is_icache(type);
}
static hwloc_obj_t hwloc_get_obj_by_depth_and_gp_index(hwloc_topology_t topology, unsigned depth, uint64_t gp_index)
{
hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, depth, 0);
while (obj) {
if (obj->gp_index == gp_index)
return obj;
obj = obj->next_cousin;
}
return NULL;
}
hwloc_obj_t hwloc_get_obj_by_type_and_gp_index(hwloc_topology_t topology, hwloc_obj_type_t type, uint64_t gp_index)
{
int depth = hwloc_get_type_depth(topology, type);
if (depth == HWLOC_TYPE_DEPTH_UNKNOWN)
return NULL;
if (depth == HWLOC_TYPE_DEPTH_MULTIPLE) {
for(depth=1 /* no multiple machine levels */;
(unsigned) depth < topology->nb_levels-1 /* no multiple PU levels */;
depth++) {
if (hwloc_get_depth_type(topology, depth) == type) {
hwloc_obj_t obj = hwloc_get_obj_by_depth_and_gp_index(topology, depth, gp_index);
if (obj)
return obj;
}
}
return NULL;
}
return hwloc_get_obj_by_depth_and_gp_index(topology, depth, gp_index);
}
unsigned hwloc_get_closest_objs (struct hwloc_topology *topology, struct hwloc_obj *src, struct hwloc_obj **objs, unsigned max)
{
struct hwloc_obj *parent, *nextparent, **src_objs;
@@ -654,7 +685,11 @@ hwloc_obj_attr_snprintf(char * __hwloc_restrict string, size_t size, hwloc_obj_t
unsigned i;
for(i=0; i<obj->infos_count; i++) {
struct hwloc_info_s *info = &obj->infos[i];
const char *quote = strchr(info->value, ' ') ? "\"" : "";
const char *quote;
if (strchr(info->value, ' '))
quote = "\"";
else
quote = "";
res = hwloc_snprintf(tmp, tmplen, "%s%s=%s%s%s",
prefix,
info->name,
@@ -673,3 +708,31 @@ hwloc_obj_attr_snprintf(char * __hwloc_restrict string, size_t size, hwloc_obj_t
return ret;
}
int hwloc_bitmap_singlify_per_core(hwloc_topology_t topology, hwloc_bitmap_t cpuset, unsigned which)
{
hwloc_obj_t core = NULL;
while ((core = hwloc_get_next_obj_covering_cpuset_by_type(topology, cpuset, HWLOC_OBJ_CORE, core)) != NULL) {
/* this core has some PUs in the cpuset, find the index-th one */
unsigned i = 0;
int pu = -1;
do {
pu = hwloc_bitmap_next(core->cpuset, pu);
if (pu == -1) {
/* no which-th PU in cpuset and core, remove the entire core */
hwloc_bitmap_andnot(cpuset, cpuset, core->cpuset);
break;
}
if (hwloc_bitmap_isset(cpuset, pu)) {
if (i == which) {
/* remove the entire core except that exact pu */
hwloc_bitmap_andnot(cpuset, cpuset, core->cpuset);
hwloc_bitmap_set(cpuset, pu);
break;
}
i++;
}
} while (1);
}
return 0;
}

View File

@@ -1,38 +0,0 @@
cmake_minimum_required (VERSION 2.8)
project (cpuid C)
add_definitions(/DVERSION="0.4.0")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Os")
set(HEADERS
libcpuid.h
libcpuid_types.h
libcpuid_constants.h
libcpuid_internal.h
amd_code_t.h
intel_code_t.h
recog_amd.h
recog_intel.h
asm-bits.h
libcpuid_util.h
)
set(SOURCES
cpuid_main.c
asm-bits.c
recog_amd.c
recog_intel.c
libcpuid_util.c
)
if (CMAKE_CL_64)
enable_language(ASM_MASM)
set(SOURCES_ASM masm-x64.asm)
endif()
add_library(cpuid STATIC
${HEADERS}
${SOURCES}
${SOURCES_ASM}
)

View File

@@ -1,39 +0,0 @@
/*
* Copyright 2016 Veselin Georgiev,
* anrieffNOSPAM @ mgail_DOT.com (convert to gmail)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This file contains a list of internal codes we use in detection. It is
* of no external use and isn't a complete list of AMD products.
*/
CODE2(OPTERON_800, 1000),
CODE(PHENOM),
CODE(PHENOM2),
CODE(FUSION_C),
CODE(FUSION_E),
CODE(FUSION_EA),
CODE(FUSION_Z),
CODE(FUSION_A),

View File

@@ -1,836 +0,0 @@
/*
* Copyright 2008 Veselin Georgiev,
* anrieffNOSPAM @ mgail_DOT.com (convert to gmail)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "libcpuid.h"
#include "asm-bits.h"
int cpuid_exists_by_eflags(void)
{
#if defined(PLATFORM_X64)
return 1; /* CPUID is always present on the x86_64 */
#elif defined(PLATFORM_X86)
# if defined(COMPILER_GCC) || defined(COMPILER_CLANG)
int result;
__asm __volatile(
" pushfl\n"
" pop %%eax\n"
" mov %%eax, %%ecx\n"
" xor $0x200000, %%eax\n"
" push %%eax\n"
" popfl\n"
" pushfl\n"
" pop %%eax\n"
" xor %%ecx, %%eax\n"
" mov %%eax, %0\n"
" push %%ecx\n"
" popfl\n"
: "=m"(result)
: :"eax", "ecx", "memory");
return (result != 0);
# elif defined(COMPILER_MICROSOFT)
int result;
__asm {
pushfd
pop eax
mov ecx, eax
xor eax, 0x200000
push eax
popfd
pushfd
pop eax
xor eax, ecx
mov result, eax
push ecx
popfd
};
return (result != 0);
# else
return 0;
# endif /* COMPILER_MICROSOFT */
#elif defined(PLATFORM_ARM)
return 0;
#else
return 0;
#endif /* PLATFORM_X86 */
}
#ifdef INLINE_ASM_SUPPORTED
/*
* with MSVC/AMD64, the exec_cpuid() and cpu_rdtsc() functions
* are implemented in separate .asm files. Otherwise, use inline assembly
*/
void exec_cpuid(uint32_t *regs)
{
# if defined(COMPILER_GCC) || defined(COMPILER_CLANG)
# ifdef PLATFORM_X64
__asm __volatile(
" mov %0, %%rdi\n"
" push %%rbx\n"
" push %%rcx\n"
" push %%rdx\n"
" mov (%%rdi), %%eax\n"
" mov 4(%%rdi), %%ebx\n"
" mov 8(%%rdi), %%ecx\n"
" mov 12(%%rdi), %%edx\n"
" cpuid\n"
" movl %%eax, (%%rdi)\n"
" movl %%ebx, 4(%%rdi)\n"
" movl %%ecx, 8(%%rdi)\n"
" movl %%edx, 12(%%rdi)\n"
" pop %%rdx\n"
" pop %%rcx\n"
" pop %%rbx\n"
:
:"m"(regs)
:"memory", "eax", "rdi"
);
# elif defined(PLATFORM_X86)
__asm __volatile(
" mov %0, %%edi\n"
" push %%ebx\n"
" push %%ecx\n"
" push %%edx\n"
" mov (%%edi), %%eax\n"
" mov 4(%%edi), %%ebx\n"
" mov 8(%%edi), %%ecx\n"
" mov 12(%%edi), %%edx\n"
" cpuid\n"
" mov %%eax, (%%edi)\n"
" mov %%ebx, 4(%%edi)\n"
" mov %%ecx, 8(%%edi)\n"
" mov %%edx, 12(%%edi)\n"
" pop %%edx\n"
" pop %%ecx\n"
" pop %%ebx\n"
:
:"m"(regs)
:"memory", "eax", "edi"
);
# elif defined(PLATFORM_ARM)
# endif /* COMPILER_GCC */
#else
# ifdef COMPILER_MICROSOFT
__asm {
push ebx
push ecx
push edx
push edi
mov edi, regs
mov eax, [edi]
mov ebx, [edi+4]
mov ecx, [edi+8]
mov edx, [edi+12]
cpuid
mov [edi], eax
mov [edi+4], ebx
mov [edi+8], ecx
mov [edi+12], edx
pop edi
pop edx
pop ecx
pop ebx
}
# else
# error "Unsupported compiler"
# endif /* COMPILER_MICROSOFT */
#endif
}
#endif /* INLINE_ASSEMBLY_SUPPORTED */
#ifdef INLINE_ASM_SUPPORTED
void cpu_rdtsc(uint64_t* result)
{
uint32_t low_part, hi_part;
#if defined(COMPILER_GCC) || defined(COMPILER_CLANG)
#ifdef PLATFORM_ARM
low_part = 0;
hi_part = 0;
#else
__asm __volatile (
" rdtsc\n"
" mov %%eax, %0\n"
" mov %%edx, %1\n"
:"=m"(low_part), "=m"(hi_part)::"memory", "eax", "edx"
);
#endif
#else
# ifdef COMPILER_MICROSOFT
__asm {
rdtsc
mov low_part, eax
mov hi_part, edx
};
# else
# error "Unsupported compiler"
# endif /* COMPILER_MICROSOFT */
#endif /* COMPILER_GCC */
*result = (uint64_t)low_part + (((uint64_t) hi_part) << 32);
}
#endif /* INLINE_ASM_SUPPORTED */
#ifdef INLINE_ASM_SUPPORTED
void busy_sse_loop(int cycles)
{
# if defined(COMPILER_GCC) || defined(COMPILER_CLANG)
#ifndef __APPLE__
# define XALIGN ".balign 16\n"
#else
# define XALIGN ".align 4\n"
#endif
#ifdef PLATFORM_ARM
#else
__asm __volatile (
" xorps %%xmm0, %%xmm0\n"
" xorps %%xmm1, %%xmm1\n"
" xorps %%xmm2, %%xmm2\n"
" xorps %%xmm3, %%xmm3\n"
" xorps %%xmm4, %%xmm4\n"
" xorps %%xmm5, %%xmm5\n"
" xorps %%xmm6, %%xmm6\n"
" xorps %%xmm7, %%xmm7\n"
XALIGN
/* ".bsLoop:\n" */
"1:\n"
// 0:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
// 1:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
// 2:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
// 3:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
// 4:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
// 5:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
// 6:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
// 7:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
// 8:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
// 9:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//10:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//11:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//12:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//13:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//14:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//15:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//16:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//17:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//18:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//19:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//20:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//21:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//22:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//23:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//24:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//25:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//26:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//27:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//28:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//29:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//30:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
//31:
" addps %%xmm1, %%xmm0\n"
" addps %%xmm2, %%xmm1\n"
" addps %%xmm3, %%xmm2\n"
" addps %%xmm4, %%xmm3\n"
" addps %%xmm5, %%xmm4\n"
" addps %%xmm6, %%xmm5\n"
" addps %%xmm7, %%xmm6\n"
" addps %%xmm0, %%xmm7\n"
" dec %%eax\n"
/* "jnz .bsLoop\n" */
" jnz 1b\n"
::"a"(cycles)
);
#endif
#else
# ifdef COMPILER_MICROSOFT
__asm {
mov eax, cycles
xorps xmm0, xmm0
xorps xmm1, xmm1
xorps xmm2, xmm2
xorps xmm3, xmm3
xorps xmm4, xmm4
xorps xmm5, xmm5
xorps xmm6, xmm6
xorps xmm7, xmm7
//--
align 16
bsLoop:
// 0:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 1:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 2:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 3:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 4:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 5:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 6:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 7:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 8:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 9:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 10:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 11:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 12:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 13:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 14:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 15:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 16:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 17:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 18:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 19:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 20:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 21:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 22:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 23:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 24:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 25:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 26:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 27:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 28:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 29:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 30:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
// 31:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
//----------------------
dec eax
jnz bsLoop
}
# else
# error "Unsupported compiler"
# endif /* COMPILER_MICROSOFT */
#endif /* COMPILER_GCC */
}
#endif /* INLINE_ASSEMBLY_SUPPORTED */

View File

@@ -1,71 +0,0 @@
/*
* Copyright 2008 Veselin Georgiev,
* anrieffNOSPAM @ mgail_DOT.com (convert to gmail)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __ASM_BITS_H__
#define __ASM_BITS_H__
#include "libcpuid.h"
/* Determine Compiler: */
#if defined(_MSC_VER)
#if !defined(COMPILER_MICROSOFT)
# define COMPILER_MICROSOFT
#endif
#elif defined(__GNUC__)
#if !defined(COMPILER_GCC)
# define COMPILER_GCC
#endif
#elif defined(__clang__)
#if !defined(COMPILER_CLANG)
# define COMPILER_CLANG
#endif
#endif
/* Determine Platform */
#if defined(__x86_64__) || defined(_M_AMD64)
#if !defined(PLATFORM_X64)
# define PLATFORM_X64
#endif
#elif defined(__i386__) || defined(_M_IX86)
#if !defined(PLATFORM_X86)
# define PLATFORM_X86
#endif
#elif defined(__ARMEL__)
#if !defined(PLATFORM_ARM)
# define PLATFORM_ARM
#endif
#endif
/* Under Windows/AMD64 with MSVC, inline assembly isn't supported */
#if (((defined(COMPILER_GCC) || defined(COMPILER_CLANG))) && \
(defined(PLATFORM_X64) || defined(PLATFORM_X86) || defined(PLATFORM_ARM))) || \
(defined(COMPILER_MICROSOFT) && defined(PLATFORM_X86))
# define INLINE_ASM_SUPPORTED
#endif
int cpuid_exists_by_eflags(void);
void exec_cpuid(uint32_t *regs);
void busy_sse_loop(int cycles);
#endif /* __ASM_BITS_H__ */

View File

@@ -1,389 +0,0 @@
/*
* Copyright 2008 Veselin Georgiev,
* anrieffNOSPAM @ mgail_DOT.com (convert to gmail)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "libcpuid.h"
#include "libcpuid_internal.h"
#include "recog_intel.h"
#include "recog_amd.h"
#include "asm-bits.h"
#include "libcpuid_util.h"
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
/* Implementation: */
static int _libcpiud_errno = ERR_OK;
int set_error(cpu_error_t err)
{
_libcpiud_errno = (int) err;
return (int) err;
}
static void cpu_id_t_constructor(struct cpu_id_t* id)
{
memset(id, 0, sizeof(struct cpu_id_t));
id->l1_data_cache = id->l1_instruction_cache = id->l2_cache = id->l3_cache = id->l4_cache = -1;
id->l1_assoc = id->l2_assoc = id->l3_assoc = id->l4_assoc = -1;
id->l1_cacheline = id->l2_cacheline = id->l3_cacheline = id->l4_cacheline = -1;
id->sse_size = -1;
}
/* get_total_cpus() system specific code: uses OS routines to determine total number of CPUs */
#ifdef __APPLE__
#include <unistd.h>
#include <mach/clock_types.h>
#include <mach/clock.h>
#include <mach/mach.h>
static int get_total_cpus(void)
{
kern_return_t kr;
host_basic_info_data_t basic_info;
host_info_t info = (host_info_t)&basic_info;
host_flavor_t flavor = HOST_BASIC_INFO;
mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
kr = host_info(mach_host_self(), flavor, info, &count);
if (kr != KERN_SUCCESS) return 1;
return basic_info.avail_cpus;
}
#define GET_TOTAL_CPUS_DEFINED
#endif
#ifdef _WIN32
#include <windows.h>
static int get_total_cpus(void)
{
SYSTEM_INFO system_info;
GetSystemInfo(&system_info);
return system_info.dwNumberOfProcessors;
}
#define GET_TOTAL_CPUS_DEFINED
#endif
#if defined linux || defined __linux__ || defined __sun
#include <sys/sysinfo.h>
#include <unistd.h>
static int get_total_cpus(void)
{
return sysconf(_SC_NPROCESSORS_ONLN);
}
#define GET_TOTAL_CPUS_DEFINED
#endif
#if defined __FreeBSD__ || defined __OpenBSD__ || defined __NetBSD__ || defined __bsdi__ || defined __QNX__
#include <sys/types.h>
#include <sys/sysctl.h>
static int get_total_cpus(void)
{
int mib[2] = { CTL_HW, HW_NCPU };
int ncpus;
size_t len = sizeof(ncpus);
if (sysctl(mib, 2, &ncpus, &len, (void *) 0, 0) != 0) return 1;
return ncpus;
}
#define GET_TOTAL_CPUS_DEFINED
#endif
#ifndef GET_TOTAL_CPUS_DEFINED
static int get_total_cpus(void)
{
static int warning_printed = 0;
if (!warning_printed) {
warning_printed = 1;
warnf("Your system is not supported by libcpuid -- don't know how to detect the\n");
warnf("total number of CPUs on your system. It will be reported as 1.\n");
printf("Please use cpu_id_t.logical_cpus field instead.\n");
}
return 1;
}
#endif /* GET_TOTAL_CPUS_DEFINED */
static void load_features_common(struct cpu_raw_data_t* raw, struct cpu_id_t* data)
{
const struct feature_map_t matchtable_edx1[] = {
{ 0, CPU_FEATURE_FPU },
{ 1, CPU_FEATURE_VME },
{ 2, CPU_FEATURE_DE },
{ 3, CPU_FEATURE_PSE },
{ 4, CPU_FEATURE_TSC },
{ 5, CPU_FEATURE_MSR },
{ 6, CPU_FEATURE_PAE },
{ 7, CPU_FEATURE_MCE },
{ 8, CPU_FEATURE_CX8 },
{ 9, CPU_FEATURE_APIC },
{ 11, CPU_FEATURE_SEP },
{ 12, CPU_FEATURE_MTRR },
{ 13, CPU_FEATURE_PGE },
{ 14, CPU_FEATURE_MCA },
{ 15, CPU_FEATURE_CMOV },
{ 16, CPU_FEATURE_PAT },
{ 17, CPU_FEATURE_PSE36 },
{ 19, CPU_FEATURE_CLFLUSH },
{ 23, CPU_FEATURE_MMX },
{ 24, CPU_FEATURE_FXSR },
{ 25, CPU_FEATURE_SSE },
{ 26, CPU_FEATURE_SSE2 },
{ 28, CPU_FEATURE_HT },
};
const struct feature_map_t matchtable_ecx1[] = {
{ 0, CPU_FEATURE_PNI },
{ 1, CPU_FEATURE_PCLMUL },
{ 3, CPU_FEATURE_MONITOR },
{ 9, CPU_FEATURE_SSSE3 },
{ 12, CPU_FEATURE_FMA3 },
{ 13, CPU_FEATURE_CX16 },
{ 19, CPU_FEATURE_SSE4_1 },
{ 20, CPU_FEATURE_SSE4_2 },
{ 22, CPU_FEATURE_MOVBE },
{ 23, CPU_FEATURE_POPCNT },
{ 25, CPU_FEATURE_AES },
{ 26, CPU_FEATURE_XSAVE },
{ 27, CPU_FEATURE_OSXSAVE },
{ 28, CPU_FEATURE_AVX },
{ 29, CPU_FEATURE_F16C },
{ 30, CPU_FEATURE_RDRAND },
};
const struct feature_map_t matchtable_ebx7[] = {
{ 3, CPU_FEATURE_BMI1 },
{ 5, CPU_FEATURE_AVX2 },
{ 8, CPU_FEATURE_BMI2 },
};
const struct feature_map_t matchtable_edx81[] = {
{ 11, CPU_FEATURE_SYSCALL },
{ 27, CPU_FEATURE_RDTSCP },
{ 29, CPU_FEATURE_LM },
};
const struct feature_map_t matchtable_ecx81[] = {
{ 0, CPU_FEATURE_LAHF_LM },
};
const struct feature_map_t matchtable_edx87[] = {
{ 8, CPU_FEATURE_CONSTANT_TSC },
};
if (raw->basic_cpuid[0][0] >= 1) {
match_features(matchtable_edx1, COUNT_OF(matchtable_edx1), raw->basic_cpuid[1][3], data);
match_features(matchtable_ecx1, COUNT_OF(matchtable_ecx1), raw->basic_cpuid[1][2], data);
}
if (raw->basic_cpuid[0][0] >= 7) {
match_features(matchtable_ebx7, COUNT_OF(matchtable_ebx7), raw->basic_cpuid[7][1], data);
}
if (raw->ext_cpuid[0][0] >= 0x80000001) {
match_features(matchtable_edx81, COUNT_OF(matchtable_edx81), raw->ext_cpuid[1][3], data);
match_features(matchtable_ecx81, COUNT_OF(matchtable_ecx81), raw->ext_cpuid[1][2], data);
}
if (raw->ext_cpuid[0][0] >= 0x80000007) {
match_features(matchtable_edx87, COUNT_OF(matchtable_edx87), raw->ext_cpuid[7][3], data);
}
if (data->flags[CPU_FEATURE_SSE]) {
/* apply guesswork to check if the SSE unit width is 128 bit */
switch (data->vendor) {
case VENDOR_AMD:
data->sse_size = (data->ext_family >= 16 && data->ext_family != 17) ? 128 : 64;
break;
case VENDOR_INTEL:
data->sse_size = (data->family == 6 && data->ext_model >= 15) ? 128 : 64;
break;
default:
break;
}
/* leave the CPU_FEATURE_128BIT_SSE_AUTH 0; the advanced per-vendor detection routines
* will set it accordingly if they detect the needed bit */
}
}
static cpu_vendor_t cpuid_vendor_identify(const uint32_t *raw_vendor, char *vendor_str)
{
int i;
cpu_vendor_t vendor = VENDOR_UNKNOWN;
const struct { cpu_vendor_t vendor; char match[16]; }
matchtable[NUM_CPU_VENDORS] = {
/* source: http://www.sandpile.org/ia32/cpuid.htm */
{ VENDOR_INTEL , "GenuineIntel" },
{ VENDOR_AMD , "AuthenticAMD" },
{ VENDOR_CYRIX , "CyrixInstead" },
{ VENDOR_NEXGEN , "NexGenDriven" },
{ VENDOR_TRANSMETA , "GenuineTMx86" },
{ VENDOR_UMC , "UMC UMC UMC " },
{ VENDOR_CENTAUR , "CentaurHauls" },
{ VENDOR_RISE , "RiseRiseRise" },
{ VENDOR_SIS , "SiS SiS SiS " },
{ VENDOR_NSC , "Geode by NSC" },
};
memcpy(vendor_str + 0, &raw_vendor[1], 4);
memcpy(vendor_str + 4, &raw_vendor[3], 4);
memcpy(vendor_str + 8, &raw_vendor[2], 4);
vendor_str[12] = 0;
/* Determine vendor: */
for (i = 0; i < NUM_CPU_VENDORS; i++)
if (!strcmp(vendor_str, matchtable[i].match)) {
vendor = matchtable[i].vendor;
break;
}
return vendor;
}
static int cpuid_basic_identify(struct cpu_raw_data_t* raw, struct cpu_id_t* data)
{
int i, j, basic, xmodel, xfamily, ext;
char brandstr[64] = {0};
data->vendor = cpuid_vendor_identify(raw->basic_cpuid[0], data->vendor_str);
if (data->vendor == VENDOR_UNKNOWN)
return set_error(ERR_CPU_UNKN);
basic = raw->basic_cpuid[0][0];
if (basic >= 1) {
data->family = (raw->basic_cpuid[1][0] >> 8) & 0xf;
data->model = (raw->basic_cpuid[1][0] >> 4) & 0xf;
data->stepping = raw->basic_cpuid[1][0] & 0xf;
xmodel = (raw->basic_cpuid[1][0] >> 16) & 0xf;
xfamily = (raw->basic_cpuid[1][0] >> 20) & 0xff;
if (data->vendor == VENDOR_AMD && data->family < 0xf)
data->ext_family = data->family;
else
data->ext_family = data->family + xfamily;
data->ext_model = data->model + (xmodel << 4);
}
ext = raw->ext_cpuid[0][0] - 0x8000000;
/* obtain the brand string, if present: */
if (ext >= 4) {
for (i = 0; i < 3; i++)
for (j = 0; j < 4; j++)
memcpy(brandstr + i * 16 + j * 4,
&raw->ext_cpuid[2 + i][j], 4);
brandstr[48] = 0;
i = 0;
while (brandstr[i] == ' ') i++;
strncpy(data->brand_str, brandstr + i, sizeof(data->brand_str));
data->brand_str[48] = 0;
}
load_features_common(raw, data);
data->total_logical_cpus = get_total_cpus();
return set_error(ERR_OK);
}
/* Interface: */
int cpuid_get_total_cpus(void)
{
return get_total_cpus();
}
int cpuid_present(void)
{
return cpuid_exists_by_eflags();
}
void cpu_exec_cpuid(uint32_t eax, uint32_t* regs)
{
regs[0] = eax;
regs[1] = regs[2] = regs[3] = 0;
exec_cpuid(regs);
}
void cpu_exec_cpuid_ext(uint32_t* regs)
{
exec_cpuid(regs);
}
int cpuid_get_raw_data(struct cpu_raw_data_t* data)
{
unsigned i;
if (!cpuid_present())
return set_error(ERR_NO_CPUID);
for (i = 0; i < 32; i++)
cpu_exec_cpuid(i, data->basic_cpuid[i]);
for (i = 0; i < 32; i++)
cpu_exec_cpuid(0x80000000 + i, data->ext_cpuid[i]);
for (i = 0; i < MAX_INTELFN4_LEVEL; i++) {
memset(data->intel_fn4[i], 0, sizeof(data->intel_fn4[i]));
data->intel_fn4[i][0] = 4;
data->intel_fn4[i][2] = i;
cpu_exec_cpuid_ext(data->intel_fn4[i]);
}
for (i = 0; i < MAX_INTELFN11_LEVEL; i++) {
memset(data->intel_fn11[i], 0, sizeof(data->intel_fn11[i]));
data->intel_fn11[i][0] = 11;
data->intel_fn11[i][2] = i;
cpu_exec_cpuid_ext(data->intel_fn11[i]);
}
for (i = 0; i < MAX_INTELFN12H_LEVEL; i++) {
memset(data->intel_fn12h[i], 0, sizeof(data->intel_fn12h[i]));
data->intel_fn12h[i][0] = 0x12;
data->intel_fn12h[i][2] = i;
cpu_exec_cpuid_ext(data->intel_fn12h[i]);
}
for (i = 0; i < MAX_INTELFN14H_LEVEL; i++) {
memset(data->intel_fn14h[i], 0, sizeof(data->intel_fn14h[i]));
data->intel_fn14h[i][0] = 0x14;
data->intel_fn14h[i][2] = i;
cpu_exec_cpuid_ext(data->intel_fn14h[i]);
}
return set_error(ERR_OK);
}
int cpu_ident_internal(struct cpu_raw_data_t* raw, struct cpu_id_t* data, struct internal_id_info_t* internal)
{
int r;
struct cpu_raw_data_t myraw;
if (!raw) {
if ((r = cpuid_get_raw_data(&myraw)) < 0)
return set_error(r);
raw = &myraw;
}
cpu_id_t_constructor(data);
if ((r = cpuid_basic_identify(raw, data)) < 0)
return set_error(r);
switch (data->vendor) {
case VENDOR_INTEL:
r = cpuid_identify_intel(raw, data, internal);
break;
case VENDOR_AMD:
r = cpuid_identify_amd(raw, data, internal);
break;
default:
break;
}
return set_error(r);
}
int cpu_identify(struct cpu_raw_data_t* raw, struct cpu_id_t* data)
{
struct internal_id_info_t throwaway;
return cpu_ident_internal(raw, data, &throwaway);
}
const char* cpuid_lib_version(void)
{
return VERSION;
}

View File

@@ -1,58 +0,0 @@
/*
* Copyright 2016 Veselin Georgiev,
* anrieffNOSPAM @ mgail_DOT.com (convert to gmail)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This file contains a list of internal codes we use in detection. It is
* of no external use and isn't a complete list of intel products.
*/
CODE2(PENTIUM, 2000),
CODE(IRWIN),
CODE(POTOMAC),
CODE(GAINESTOWN),
CODE(WESTMERE),
CODE(PENTIUM_M),
CODE(NOT_CELERON),
CODE(CORE_SOLO),
CODE(MOBILE_CORE_SOLO),
CODE(CORE_DUO),
CODE(MOBILE_CORE_DUO),
CODE(WOLFDALE),
CODE(MEROM),
CODE(PENRYN),
CODE(QUAD_CORE),
CODE(DUAL_CORE_HT),
CODE(QUAD_CORE_HT),
CODE(MORE_THAN_QUADCORE),
CODE(PENTIUM_D),
CODE(SILVERTHORNE),
CODE(DIAMONDVILLE),
CODE(PINEVIEW),
CODE(CEDARVIEW),

View File

@@ -1,678 +0,0 @@
/*
* Copyright 2008 Veselin Georgiev,
* anrieffNOSPAM @ mgail_DOT.com (convert to gmail)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __LIBCPUID_H__
#define __LIBCPUID_H__
/**
* \file libcpuid.h
* \author Veselin Georgiev
* \date Oct 2008
* \version 0.4.0
*
* Version history:
*
* * 0.1.0 (2008-10-15): initial adaptation from wxfractgui sources
* * 0.1.1 (2009-07-06): Added intel_fn11 fields to cpu_raw_data_t to handle
* new processor topology enumeration required on Core i7
* * 0.1.2 (2009-09-26): Added support for MSR reading through self-extracting
* kernel driver on Win32.
* * 0.1.3 (2010-04-20): Added support for greater more accurate CPU clock
* measurements with cpu_clock_by_ic()
* * 0.2.0 (2011-10-11): Support for AMD Bulldozer CPUs, 128-bit SSE unit size
* checking. A backwards-incompatible change, since the
* sizeof cpu_id_t is now different.
* * 0.2.1 (2012-05-26): Support for Ivy Bridge, and detecting the presence of
* the RdRand instruction.
* * 0.2.2 (2015-11-04): Support for newer processors up to Haswell and Vishera.
* Fix clock detection in cpu_clock_by_ic() for Bulldozer.
* More entries supported in cpu_msrinfo().
* *BSD and Solaris support (unofficial).
* * 0.3.0 (2016-07-09): Support for Skylake; MSR ops in FreeBSD; INFO_VOLTAGE
* for AMD CPUs. Level 4 cache support for Crystalwell
* (a backwards-incompatible change since the sizeof
* cpu_raw_data_t is now different).
* * 0.4.0 (2016-09-30): Better detection of AMD clock multiplier with msrinfo.
* Support for Intel SGX detection
* (a backwards-incompatible change since the sizeof
* cpu_raw_data_t and cpu_id_t is now different).
*/
/** @mainpage A simple libcpuid introduction
*
* LibCPUID provides CPU identification and access to the CPUID and RDTSC
* instructions on the x86.
* <p>
* To execute CPUID, use \ref cpu_exec_cpuid <br>
* To execute RDTSC, use \ref cpu_rdtsc <br>
* To fetch the CPUID info needed for CPU identification, use
* \ref cpuid_get_raw_data <br>
* To make sense of that data (decode, extract features), use \ref cpu_identify <br>
* To detect the CPU speed, use either \ref cpu_clock, \ref cpu_clock_by_os,
* \ref cpu_tsc_mark + \ref cpu_tsc_unmark + \ref cpu_clock_by_mark,
* \ref cpu_clock_measure or \ref cpu_clock_by_ic.
* Read carefully for pros/cons of each method. <br>
*
* To read MSRs, use \ref cpu_msr_driver_open to get a handle, and then
* \ref cpu_rdmsr for querying abilities. Some MSR decoding is available on recent
* CPUs, and can be queried through \ref cpu_msrinfo; the various types of queries
* are described in \ref cpu_msrinfo_request_t.
* </p>
*/
/** @defgroup libcpuid LibCPUID
* @brief LibCPUID provides CPU identification
@{ */
/* Include some integer type specifications: */
#include "libcpuid_types.h"
/* Some limits and other constants */
#include "libcpuid_constants.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief CPU vendor, as guessed from the Vendor String.
*/
typedef enum {
VENDOR_INTEL = 0, /*!< Intel CPU */
VENDOR_AMD, /*!< AMD CPU */
VENDOR_CYRIX, /*!< Cyrix CPU */
VENDOR_NEXGEN, /*!< NexGen CPU */
VENDOR_TRANSMETA, /*!< Transmeta CPU */
VENDOR_UMC, /*!< x86 CPU by UMC */
VENDOR_CENTAUR, /*!< x86 CPU by IDT */
VENDOR_RISE, /*!< x86 CPU by Rise Technology */
VENDOR_SIS, /*!< x86 CPU by SiS */
VENDOR_NSC, /*!< x86 CPU by National Semiconductor */
NUM_CPU_VENDORS, /*!< Valid CPU vendor ids: 0..NUM_CPU_VENDORS - 1 */
VENDOR_UNKNOWN = -1,
} cpu_vendor_t;
#define NUM_CPU_VENDORS NUM_CPU_VENDORS
/**
* @brief Contains just the raw CPUID data.
*
* This contains only the most basic CPU data, required to do identification
* and feature recognition. Every processor should be identifiable using this
* data only.
*/
struct cpu_raw_data_t {
/** contains results of CPUID for eax = 0, 1, ...*/
uint32_t basic_cpuid[MAX_CPUID_LEVEL][4];
/** contains results of CPUID for eax = 0x80000000, 0x80000001, ...*/
uint32_t ext_cpuid[MAX_EXT_CPUID_LEVEL][4];
/** when the CPU is intel and it supports deterministic cache
information: this contains the results of CPUID for eax = 4
and ecx = 0, 1, ... */
uint32_t intel_fn4[MAX_INTELFN4_LEVEL][4];
/** when the CPU is intel and it supports leaf 0Bh (Extended Topology
enumeration leaf), this stores the result of CPUID with
eax = 11 and ecx = 0, 1, 2... */
uint32_t intel_fn11[MAX_INTELFN11_LEVEL][4];
/** when the CPU is intel and supports leaf 12h (SGX enumeration leaf),
* this stores the result of CPUID with eax = 0x12 and
* ecx = 0, 1, 2... */
uint32_t intel_fn12h[MAX_INTELFN12H_LEVEL][4];
/** when the CPU is intel and supports leaf 14h (Intel Processor Trace
* capabilities leaf).
* this stores the result of CPUID with eax = 0x12 and
* ecx = 0, 1, 2... */
uint32_t intel_fn14h[MAX_INTELFN14H_LEVEL][4];
};
/**
* @brief This contains information about SGX features of the processor
* Example usage:
* @code
* ...
* struct cpu_raw_data_t raw;
* struct cpu_id_t id;
*
* if (cpuid_get_raw_data(&raw) == 0 && cpu_identify(&raw, &id) == 0 && id.sgx.present) {
* printf("SGX is present.\n");
* printf("SGX1 instructions: %s.\n", id.sgx.flags[INTEL_SGX1] ? "present" : "absent");
* printf("SGX2 instructions: %s.\n", id.sgx.flags[INTEL_SGX2] ? "present" : "absent");
* printf("Max 32-bit enclave size: 2^%d bytes.\n", id.sgx.max_enclave_32bit);
* printf("Max 64-bit enclave size: 2^%d bytes.\n", id.sgx.max_enclave_64bit);
* for (int i = 0; i < id.sgx.num_epc_sections; i++) {
* struct cpu_epc_t epc = cpuid_get_epc(i, NULL);
* printf("EPC section #%d: address = %x, size = %d bytes.\n", epc.address, epc.size);
* }
* } else {
* printf("SGX is not present.\n");
* }
* @endcode
*/
struct cpu_sgx_t {
/** Whether SGX is present (boolean) */
uint32_t present;
/** Max enclave size in 32-bit mode. This is a power-of-two value:
* if it is "31", then the max enclave size is 2^31 bytes (2 GiB).
*/
uint8_t max_enclave_32bit;
/** Max enclave size in 64-bit mode. This is a power-of-two value:
* if it is "36", then the max enclave size is 2^36 bytes (64 GiB).
*/
uint8_t max_enclave_64bit;
/**
* contains SGX feature flags. See the \ref cpu_sgx_feature_t
* "INTEL_SGX*" macros below.
*/
uint8_t flags[SGX_FLAGS_MAX];
/** number of Enclave Page Cache (EPC) sections. Info for each
* section is available through the \ref cpuid_get_epc() function
*/
int num_epc_sections;
/** bit vector of the supported extended features that can be written
* to the MISC region of the SSA (Save State Area)
*/
uint32_t misc_select;
/** a bit vector of the attributes that can be set to SECS.ATTRIBUTES
* via ECREATE. Corresponds to bits 0-63 (incl.) of SECS.ATTRIBUTES.
*/
uint64_t secs_attributes;
/** a bit vector of the bits that can be set in the XSAVE feature
* request mask; Corresponds to bits 64-127 of SECS.ATTRIBUTES.
*/
uint64_t secs_xfrm;
};
/**
* @brief This contains the recognized CPU features/info
*/
struct cpu_id_t {
/** contains the CPU vendor string, e.g. "GenuineIntel" */
char vendor_str[VENDOR_STR_MAX];
/** contains the brand string, e.g. "Intel(R) Xeon(TM) CPU 2.40GHz" */
char brand_str[BRAND_STR_MAX];
/** contains the recognized CPU vendor */
cpu_vendor_t vendor;
/**
* contain CPU flags. Used to test for features. See
* the \ref cpu_feature_t "CPU_FEATURE_*" macros below.
* @see Features
*/
uint8_t flags[CPU_FLAGS_MAX];
/** CPU family */
int32_t family;
/** CPU model */
int32_t model;
/** CPU stepping */
int32_t stepping;
/** CPU extended family */
int32_t ext_family;
/** CPU extended model */
int32_t ext_model;
/** Number of CPU cores on the current processor */
int32_t num_cores;
/**
* Number of logical processors on the current processor.
* Could be more than the number of physical cores,
* e.g. when the processor has HyperThreading.
*/
int32_t num_logical_cpus;
/**
* The total number of logical processors.
* The same value is availabe through \ref cpuid_get_total_cpus.
*
* This is num_logical_cpus * {total physical processors in the system}
* (but only on a real system, under a VM this number may be lower).
*
* If you're writing a multithreaded program and you want to run it on
* all CPUs, this is the number of threads you need.
*
* @note in a VM, this will exactly match the number of CPUs set in
* the VM's configuration.
*
*/
int32_t total_logical_cpus;
/**
* L1 data cache size in KB. Could be zero, if the CPU lacks cache.
* If the size cannot be determined, it will be -1.
*/
int32_t l1_data_cache;
/**
* L1 instruction cache size in KB. Could be zero, if the CPU lacks
* cache. If the size cannot be determined, it will be -1.
* @note On some Intel CPUs, whose instruction cache is in fact
* a trace cache, the size will be expressed in K uOps.
*/
int32_t l1_instruction_cache;
/**
* L2 cache size in KB. Could be zero, if the CPU lacks L2 cache.
* If the size of the cache could not be determined, it will be -1
*/
int32_t l2_cache;
/** L3 cache size in KB. Zero on most systems */
int32_t l3_cache;
/** L4 cache size in KB. Zero on most systems */
int32_t l4_cache;
/** Cache associativity for the L1 data cache. -1 if undetermined */
int32_t l1_assoc;
/** Cache associativity for the L2 cache. -1 if undetermined */
int32_t l2_assoc;
/** Cache associativity for the L3 cache. -1 if undetermined */
int32_t l3_assoc;
/** Cache associativity for the L4 cache. -1 if undetermined */
int32_t l4_assoc;
/** Cache-line size for L1 data cache. -1 if undetermined */
int32_t l1_cacheline;
/** Cache-line size for L2 cache. -1 if undetermined */
int32_t l2_cacheline;
/** Cache-line size for L3 cache. -1 if undetermined */
int32_t l3_cacheline;
/** Cache-line size for L4 cache. -1 if undetermined */
int32_t l4_cacheline;
/**
* The brief and human-friendly CPU codename, which was recognized.<br>
* Examples:
* @code
* +--------+--------+-------+-------+-------+---------------------------------------+-----------------------+
* | Vendor | Family | Model | Step. | Cache | Brand String | cpu_id_t.cpu_codename |
* +--------+--------+-------+-------+-------+---------------------------------------+-----------------------+
* | AMD | 6 | 8 | 0 | 256 | (not available - will be ignored) | "K6-2" |
* | Intel | 15 | 2 | 5 | 512 | "Intel(R) Xeon(TM) CPU 2.40GHz" | "Xeon (Prestonia)" |
* | Intel | 6 | 15 | 11 | 4096 | "Intel(R) Core(TM)2 Duo CPU E6550..." | "Conroe (Core 2 Duo)" |
* | AMD | 15 | 35 | 2 | 1024 | "Dual Core AMD Opteron(tm) Proces..." | "Opteron (Dual Core)" |
* +--------+--------+-------+-------+-------+---------------------------------------+-----------------------+
* @endcode
*/
char cpu_codename[64];
/** SSE execution unit size (64 or 128; -1 if N/A) */
int32_t sse_size;
/**
* contain miscellaneous detection information. Used to test about specifics of
* certain detected features. See \ref cpu_hint_t "CPU_HINT_*" macros below.
* @see Hints
*/
uint8_t detection_hints[CPU_HINTS_MAX];
/** contains information about SGX features if the processor, if present */
struct cpu_sgx_t sgx;
};
/**
* @brief CPU feature identifiers
*
* Usage:
* @code
* ...
* struct cpu_raw_data_t raw;
* struct cpu_id_t id;
* if (cpuid_get_raw_data(&raw) == 0 && cpu_identify(&raw, &id) == 0) {
* if (id.flags[CPU_FEATURE_SSE2]) {
* // The CPU has SSE2...
* ...
* } else {
* // no SSE2
* }
* } else {
* // processor cannot be determined.
* }
* @endcode
*/
typedef enum {
CPU_FEATURE_FPU = 0, /*!< Floating point unit */
CPU_FEATURE_VME, /*!< Virtual mode extension */
CPU_FEATURE_DE, /*!< Debugging extension */
CPU_FEATURE_PSE, /*!< Page size extension */
CPU_FEATURE_TSC, /*!< Time-stamp counter */
CPU_FEATURE_MSR, /*!< Model-specific regsisters, RDMSR/WRMSR supported */
CPU_FEATURE_PAE, /*!< Physical address extension */
CPU_FEATURE_MCE, /*!< Machine check exception */
CPU_FEATURE_CX8, /*!< CMPXCHG8B instruction supported */
CPU_FEATURE_APIC, /*!< APIC support */
CPU_FEATURE_MTRR, /*!< Memory type range registers */
CPU_FEATURE_SEP, /*!< SYSENTER / SYSEXIT instructions supported */
CPU_FEATURE_PGE, /*!< Page global enable */
CPU_FEATURE_MCA, /*!< Machine check architecture */
CPU_FEATURE_CMOV, /*!< CMOVxx instructions supported */
CPU_FEATURE_PAT, /*!< Page attribute table */
CPU_FEATURE_PSE36, /*!< 36-bit page address extension */
CPU_FEATURE_PN, /*!< Processor serial # implemented (Intel P3 only) */
CPU_FEATURE_CLFLUSH, /*!< CLFLUSH instruction supported */
CPU_FEATURE_DTS, /*!< Debug store supported */
CPU_FEATURE_ACPI, /*!< ACPI support (power states) */
CPU_FEATURE_MMX, /*!< MMX instruction set supported */
CPU_FEATURE_FXSR, /*!< FXSAVE / FXRSTOR supported */
CPU_FEATURE_SSE, /*!< Streaming-SIMD Extensions (SSE) supported */
CPU_FEATURE_SSE2, /*!< SSE2 instructions supported */
CPU_FEATURE_SS, /*!< Self-snoop */
CPU_FEATURE_HT, /*!< Hyper-threading supported (but might be disabled) */
CPU_FEATURE_TM, /*!< Thermal monitor */
CPU_FEATURE_IA64, /*!< IA64 supported (Itanium only) */
CPU_FEATURE_PBE, /*!< Pending-break enable */
CPU_FEATURE_PNI, /*!< PNI (SSE3) instructions supported */
CPU_FEATURE_PCLMUL, /*!< PCLMULQDQ instruction supported */
CPU_FEATURE_DTS64, /*!< 64-bit Debug store supported */
CPU_FEATURE_MONITOR, /*!< MONITOR / MWAIT supported */
CPU_FEATURE_DS_CPL, /*!< CPL Qualified Debug Store */
CPU_FEATURE_VMX, /*!< Virtualization technology supported */
CPU_FEATURE_SMX, /*!< Safer mode exceptions */
CPU_FEATURE_EST, /*!< Enhanced SpeedStep */
CPU_FEATURE_TM2, /*!< Thermal monitor 2 */
CPU_FEATURE_SSSE3, /*!< SSSE3 instructionss supported (this is different from SSE3!) */
CPU_FEATURE_CID, /*!< Context ID supported */
CPU_FEATURE_CX16, /*!< CMPXCHG16B instruction supported */
CPU_FEATURE_XTPR, /*!< Send Task Priority Messages disable */
CPU_FEATURE_PDCM, /*!< Performance capabilities MSR supported */
CPU_FEATURE_DCA, /*!< Direct cache access supported */
CPU_FEATURE_SSE4_1, /*!< SSE 4.1 instructions supported */
CPU_FEATURE_SSE4_2, /*!< SSE 4.2 instructions supported */
CPU_FEATURE_SYSCALL, /*!< SYSCALL / SYSRET instructions supported */
CPU_FEATURE_XD, /*!< Execute disable bit supported */
CPU_FEATURE_MOVBE, /*!< MOVBE instruction supported */
CPU_FEATURE_POPCNT, /*!< POPCNT instruction supported */
CPU_FEATURE_AES, /*!< AES* instructions supported */
CPU_FEATURE_XSAVE, /*!< XSAVE/XRSTOR/etc instructions supported */
CPU_FEATURE_OSXSAVE, /*!< non-privileged copy of OSXSAVE supported */
CPU_FEATURE_AVX, /*!< Advanced vector extensions supported */
CPU_FEATURE_MMXEXT, /*!< AMD MMX-extended instructions supported */
CPU_FEATURE_3DNOW, /*!< AMD 3DNow! instructions supported */
CPU_FEATURE_3DNOWEXT, /*!< AMD 3DNow! extended instructions supported */
CPU_FEATURE_NX, /*!< No-execute bit supported */
CPU_FEATURE_FXSR_OPT, /*!< FFXSR: FXSAVE and FXRSTOR optimizations */
CPU_FEATURE_RDTSCP, /*!< RDTSCP instruction supported (AMD-only) */
CPU_FEATURE_LM, /*!< Long mode (x86_64/EM64T) supported */
CPU_FEATURE_LAHF_LM, /*!< LAHF/SAHF supported in 64-bit mode */
CPU_FEATURE_CMP_LEGACY, /*!< core multi-processing legacy mode */
CPU_FEATURE_SVM, /*!< AMD Secure virtual machine */
CPU_FEATURE_ABM, /*!< LZCNT instruction support */
CPU_FEATURE_MISALIGNSSE,/*!< Misaligned SSE supported */
CPU_FEATURE_SSE4A, /*!< SSE 4a from AMD */
CPU_FEATURE_3DNOWPREFETCH, /*!< PREFETCH/PREFETCHW support */
CPU_FEATURE_OSVW, /*!< OS Visible Workaround (AMD) */
CPU_FEATURE_IBS, /*!< Instruction-based sampling */
CPU_FEATURE_SSE5, /*!< SSE 5 instructions supported (deprecated, will never be 1) */
CPU_FEATURE_SKINIT, /*!< SKINIT / STGI supported */
CPU_FEATURE_WDT, /*!< Watchdog timer support */
CPU_FEATURE_TS, /*!< Temperature sensor */
CPU_FEATURE_FID, /*!< Frequency ID control */
CPU_FEATURE_VID, /*!< Voltage ID control */
CPU_FEATURE_TTP, /*!< THERMTRIP */
CPU_FEATURE_TM_AMD, /*!< AMD-specified hardware thermal control */
CPU_FEATURE_STC, /*!< Software thermal control */
CPU_FEATURE_100MHZSTEPS,/*!< 100 MHz multiplier control */
CPU_FEATURE_HWPSTATE, /*!< Hardware P-state control */
CPU_FEATURE_CONSTANT_TSC, /*!< TSC ticks at constant rate */
CPU_FEATURE_XOP, /*!< The XOP instruction set (same as the old CPU_FEATURE_SSE5) */
CPU_FEATURE_FMA3, /*!< The FMA3 instruction set */
CPU_FEATURE_FMA4, /*!< The FMA4 instruction set */
CPU_FEATURE_TBM, /*!< Trailing bit manipulation instruction support */
CPU_FEATURE_F16C, /*!< 16-bit FP convert instruction support */
CPU_FEATURE_RDRAND, /*!< RdRand instruction */
CPU_FEATURE_X2APIC, /*!< x2APIC, APIC_BASE.EXTD, MSRs 0000_0800h...0000_0BFFh 64-bit ICR (+030h but not +031h), no DFR (+00Eh), SELF_IPI (+040h) also see standard level 0000_000Bh */
CPU_FEATURE_CPB, /*!< Core performance boost */
CPU_FEATURE_APERFMPERF, /*!< MPERF/APERF MSRs support */
CPU_FEATURE_PFI, /*!< Processor Feedback Interface support */
CPU_FEATURE_PA, /*!< Processor accumulator */
CPU_FEATURE_AVX2, /*!< AVX2 instructions */
CPU_FEATURE_BMI1, /*!< BMI1 instructions */
CPU_FEATURE_BMI2, /*!< BMI2 instructions */
CPU_FEATURE_HLE, /*!< Hardware Lock Elision prefixes */
CPU_FEATURE_RTM, /*!< Restricted Transactional Memory instructions */
CPU_FEATURE_AVX512F, /*!< AVX-512 Foundation */
CPU_FEATURE_AVX512DQ, /*!< AVX-512 Double/Quad granular insns */
CPU_FEATURE_AVX512PF, /*!< AVX-512 Prefetch */
CPU_FEATURE_AVX512ER, /*!< AVX-512 Exponential/Reciprocal */
CPU_FEATURE_AVX512CD, /*!< AVX-512 Conflict detection */
CPU_FEATURE_SHA_NI, /*!< SHA-1/SHA-256 instructions */
CPU_FEATURE_AVX512BW, /*!< AVX-512 Byte/Word granular insns */
CPU_FEATURE_AVX512VL, /*!< AVX-512 128/256 vector length extensions */
CPU_FEATURE_SGX, /*!< SGX extensions. Non-autoritative, check cpu_id_t::sgx::present to verify presence */
CPU_FEATURE_RDSEED, /*!< RDSEED instruction */
CPU_FEATURE_ADX, /*!< ADX extensions (arbitrary precision) */
/* termination: */
NUM_CPU_FEATURES,
} cpu_feature_t;
/**
* @brief CPU detection hints identifiers
*
* Usage: similar to the flags usage
*/
typedef enum {
CPU_HINT_SSE_SIZE_AUTH = 0, /*!< SSE unit size is authoritative (not only a Family/Model guesswork, but based on an actual CPUID bit) */
/* termination */
NUM_CPU_HINTS,
} cpu_hint_t;
/**
* @brief SGX features flags
* \see cpu_sgx_t
*
* Usage:
* @code
* ...
* struct cpu_raw_data_t raw;
* struct cpu_id_t id;
* if (cpuid_get_raw_data(&raw) == 0 && cpu_identify(&raw, &id) == 0 && id.sgx.present) {
* if (id.sgx.flags[INTEL_SGX1])
* // The CPU has SGX1 instructions support...
* ...
* } else {
* // no SGX
* }
* } else {
* // processor cannot be determined.
* }
* @endcode
*/
typedef enum {
INTEL_SGX1, /*!< SGX1 instructions support */
INTEL_SGX2, /*!< SGX2 instructions support */
/* termination: */
NUM_SGX_FEATURES,
} cpu_sgx_feature_t;
/**
* @brief Describes common library error codes
*/
typedef enum {
ERR_OK = 0, /*!< No error */
ERR_NO_CPUID = -1, /*!< CPUID instruction is not supported */
ERR_NO_RDTSC = -2, /*!< RDTSC instruction is not supported */
ERR_NO_MEM = -3, /*!< Memory allocation failed */
ERR_OPEN = -4, /*!< File open operation failed */
ERR_BADFMT = -5, /*!< Bad file format */
ERR_NOT_IMP = -6, /*!< Not implemented */
ERR_CPU_UNKN = -7, /*!< Unsupported processor */
ERR_NO_RDMSR = -8, /*!< RDMSR instruction is not supported */
ERR_NO_DRIVER= -9, /*!< RDMSR driver error (generic) */
ERR_NO_PERMS = -10, /*!< No permissions to install RDMSR driver */
ERR_EXTRACT = -11, /*!< Cannot extract RDMSR driver (read only media?) */
ERR_HANDLE = -12, /*!< Bad handle */
ERR_INVMSR = -13, /*!< Invalid MSR */
ERR_INVCNB = -14, /*!< Invalid core number */
ERR_HANDLE_R = -15, /*!< Error on handle read */
ERR_INVRANGE = -16, /*!< Invalid given range */
} cpu_error_t;
/**
* @brief Internal structure, used in cpu_tsc_mark, cpu_tsc_unmark and
* cpu_clock_by_mark
*/
struct cpu_mark_t {
uint64_t tsc; /*!< Time-stamp from RDTSC */
uint64_t sys_clock; /*!< In microsecond resolution */
};
/**
* @brief Returns the total number of logical CPU threads (even if CPUID is not present).
*
* Under VM, this number (and total_logical_cpus, since they are fetched with the same code)
* may be nonsensical, i.e. might not equal NumPhysicalCPUs*NumCoresPerCPU*HyperThreading.
* This is because no matter how many logical threads the host machine has, you may limit them
* in the VM to any number you like. **This** is the number returned by cpuid_get_total_cpus().
*
* @returns Number of logical CPU threads available. Equals the \ref cpu_id_t::total_logical_cpus.
*/
int cpuid_get_total_cpus(void);
/**
* @brief Checks if the CPUID instruction is supported
* @retval 1 if CPUID is present
* @retval 0 the CPU doesn't have CPUID.
*/
int cpuid_present(void);
/**
* @brief Executes the CPUID instruction
* @param eax - the value of the EAX register when executing CPUID
* @param regs - the results will be stored here. regs[0] = EAX, regs[1] = EBX, ...
* @note CPUID will be executed with EAX set to the given value and EBX, ECX,
* EDX set to zero.
*/
void cpu_exec_cpuid(uint32_t eax, uint32_t* regs);
/**
* @brief Executes the CPUID instruction with the given input registers
* @note This is just a bit more generic version of cpu_exec_cpuid - it allows
* you to control all the registers.
* @param regs - Input/output. Prior to executing CPUID, EAX, EBX, ECX and
* EDX will be set to regs[0], regs[1], regs[2] and regs[3].
* After CPUID, this array will contain the results.
*/
void cpu_exec_cpuid_ext(uint32_t* regs);
/**
* @brief Obtains the raw CPUID data from the current CPU
* @param data - a pointer to cpu_raw_data_t structure
* @returns zero if successful, and some negative number on error.
* The error message can be obtained by calling \ref cpuid_error.
* @see cpu_error_t
*/
int cpuid_get_raw_data(struct cpu_raw_data_t* data);
/**
* @brief Identifies the CPU
* @param raw - Input - a pointer to the raw CPUID data, which is obtained
* either by cpuid_get_raw_data or cpuid_deserialize_raw_data.
* Can also be NULL, in which case the functions calls
* cpuid_get_raw_data itself.
* @param data - Output - the decoded CPU features/info is written here.
* @note The function will not fail, even if some of the information
* cannot be obtained. Even when the CPU is new and thus unknown to
* libcpuid, some generic info, such as "AMD K9 family CPU" will be
* written to data.cpu_codename, and most other things, such as the
* CPU flags, cache sizes, etc. should be detected correctly anyway.
* However, the function CAN fail, if the CPU is completely alien to
* libcpuid.
* @note While cpu_identify() and cpuid_get_raw_data() are fast for most
* purposes, running them several thousand times per second can hamper
* performance significantly. Specifically, avoid writing "cpu feature
* checker" wrapping function, which calls cpu_identify and returns the
* value of some flag, if that function is going to be called frequently.
* @returns zero if successful, and some negative number on error.
* The error message can be obtained by calling \ref cpuid_error.
* @see cpu_error_t
*/
int cpu_identify(struct cpu_raw_data_t* raw, struct cpu_id_t* data);
/**
* @brief The return value of cpuid_get_epc().
* @details
* Describes an EPC (Enclave Page Cache) layout (physical address and size).
* A CPU may have one or more EPC areas, and information about each is
* fetched via \ref cpuid_get_epc.
*/
struct cpu_epc_t {
uint64_t start_addr;
uint64_t length;
};
/**
* @brief Fetches information about an EPC (Enclave Page Cache) area.
* @param index - zero-based index, valid range [0..cpu_id_t.egx.num_epc_sections)
* @param raw - a pointer to fetched raw CPUID data. Needed only for testing,
* you can safely pass NULL here (if you pass a real structure,
* it will be used for fetching the leaf 12h data if index < 2;
* otherwise the real CPUID instruction will be used).
* @returns the requested data. If the CPU doesn't support SGX, or if
* index >= cpu_id_t.egx.num_epc_sections, both fields of the returned
* structure will be zeros.
*/
struct cpu_epc_t cpuid_get_epc(int index, const struct cpu_raw_data_t* raw);
/**
* @brief Returns the libcpuid version
*
* @returns the string representation of the libcpuid version, like "0.1.1"
*/
const char* cpuid_lib_version(void);
#ifdef __cplusplus
} /* extern "C" */
#endif
/** @} */
#endif /* __LIBCPUID_H__ */

View File

@@ -1,47 +0,0 @@
/*
* Copyright 2008 Veselin Georgiev,
* anrieffNOSPAM @ mgail_DOT.com (convert to gmail)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @File libcpuid_constants.h
* @Author Veselin Georgiev
* @Brief Some limits and constants for libcpuid
*/
#ifndef __LIBCPUID_CONSTANTS_H__
#define __LIBCPUID_CONSTANTS_H__
#define VENDOR_STR_MAX 16
#define BRAND_STR_MAX 64
#define CPU_FLAGS_MAX 128
#define MAX_CPUID_LEVEL 32
#define MAX_EXT_CPUID_LEVEL 32
#define MAX_INTELFN4_LEVEL 8
#define MAX_INTELFN11_LEVEL 4
#define MAX_INTELFN12H_LEVEL 4
#define MAX_INTELFN14H_LEVEL 4
#define CPU_HINTS_MAX 16
#define SGX_FLAGS_MAX 14
#endif /* __LIBCPUID_CONSTANTS_H__ */

View File

@@ -1,107 +0,0 @@
/*
* Copyright 2016 Veselin Georgiev,
* anrieffNOSPAM @ mgail_DOT.com (convert to gmail)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __LIBCPUID_INTERNAL_H__
#define __LIBCPUID_INTERNAL_H__
/*
* This file contains internal undocumented declarations and function prototypes
* for the workings of the internal library infrastructure.
*/
enum _common_codes_t {
NA = 0,
NC, /* No code */
};
#define CODE(x) x
#define CODE2(x, y) x = y
enum _amd_code_t {
#include "amd_code_t.h"
};
typedef enum _amd_code_t amd_code_t;
enum _intel_code_t {
#include "intel_code_t.h"
};
typedef enum _intel_code_t intel_code_t;
#undef CODE
#undef CODE2
struct internal_id_info_t {
union {
amd_code_t amd;
intel_code_t intel;
} code;
uint64_t bits;
int score; // detection (matchtable) score
};
#define LBIT(x) (((long long) 1) << x)
enum _common_bits_t {
_M_ = LBIT( 0 ),
MOBILE_ = LBIT( 1 ),
_MP_ = LBIT( 2 ),
};
// additional detection bits for Intel CPUs:
enum _intel_bits_t {
PENTIUM_ = LBIT( 10 ),
CELERON_ = LBIT( 11 ),
CORE_ = LBIT( 12 ),
_I_ = LBIT( 13 ),
_3 = LBIT( 14 ),
_5 = LBIT( 15 ),
_7 = LBIT( 16 ),
_9 = LBIT( 17 ),
XEON_ = LBIT( 18 ),
ATOM_ = LBIT( 19 ),
};
typedef enum _intel_bits_t intel_bits_t;
enum _amd_bits_t {
ATHLON_ = LBIT( 10 ),
_XP_ = LBIT( 11 ),
DURON_ = LBIT( 12 ),
SEMPRON_ = LBIT( 13 ),
OPTERON_ = LBIT( 14 ),
TURION_ = LBIT( 15 ),
_LV_ = LBIT( 16 ),
_64_ = LBIT( 17 ),
_X2 = LBIT( 18 ),
_X3 = LBIT( 19 ),
_X4 = LBIT( 20 ),
_X6 = LBIT( 21 ),
_FX = LBIT( 22 ),
_APU_ = LBIT( 23 ),
};
typedef enum _amd_bits_t amd_bits_t;
int cpu_ident_internal(struct cpu_raw_data_t* raw, struct cpu_id_t* data,
struct internal_id_info_t* internal);
#endif /* __LIBCPUID_INTERNAL_H__ */

View File

@@ -1,63 +0,0 @@
/*
* Copyright 2008 Veselin Georgiev,
* anrieffNOSPAM @ mgail_DOT.com (convert to gmail)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @File libcpuid_types.h
* @Author Veselin Georgiev
* @Brief Type specifications for libcpuid.
*/
#ifndef __LIBCPUID_TYPES_H__
#define __LIBCPUID_TYPES_H__
#if !defined(_MSC_VER) || _MSC_VER >= 1600
# include <stdint.h>
#else
/* we have to provide our own: */
# if !defined(__int32_t_defined)
typedef int int32_t;
# endif
# if !defined(__uint32_t_defined)
typedef unsigned uint32_t;
# endif
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef signed short int16_t;
typedef unsigned short uint16_t;
#if (defined _MSC_VER) && (_MSC_VER <= 1300)
/* MSVC 6.0: no long longs ... */
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
#else
/* all other sane compilers: */
typedef signed long long int64_t;
typedef unsigned long long uint64_t;
#endif
#endif
#endif /* __LIBCPUID_TYPES_H__ */

View File

@@ -1,93 +0,0 @@
/*
* Copyright 2008 Veselin Georgiev,
* anrieffNOSPAM @ mgail_DOT.com (convert to gmail)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <ctype.h>
#include "libcpuid.h"
#include "libcpuid_util.h"
void match_features(const struct feature_map_t* matchtable, int count, uint32_t reg, struct cpu_id_t* data)
{
int i;
for (i = 0; i < count; i++)
if (reg & (1u << matchtable[i].bit))
data->flags[matchtable[i].feature] = 1;
}
static int xmatch_entry(char c, const char* p)
{
int i, j;
if (c == 0) return -1;
if (c == p[0]) return 1;
if (p[0] == '.') return 1;
if (p[0] == '#' && isdigit(c)) return 1;
if (p[0] == '[') {
j = 1;
while (p[j] && p[j] != ']') j++;
if (!p[j]) return -1;
for (i = 1; i < j; i++)
if (p[i] == c) return j + 1;
}
return -1;
}
int match_pattern(const char* s, const char* p)
{
int i, j, dj, k, n, m;
n = (int) strlen(s);
m = (int) strlen(p);
for (i = 0; i < n; i++) {
if (xmatch_entry(s[i], p) != -1) {
j = 0;
k = 0;
while (j < m && ((dj = xmatch_entry(s[i + k], p + j)) != -1)) {
k++;
j += dj;
}
if (j == m) return i + 1;
}
}
return 0;
}
struct cpu_id_t* get_cached_cpuid(void)
{
static int initialized = 0;
static struct cpu_id_t id;
if (initialized) return &id;
if (cpu_identify(NULL, &id))
memset(&id, 0, sizeof(id));
initialized = 1;
return &id;
}
int match_all(uint64_t bits, uint64_t mask)
{
return (bits & mask) == mask;
}

View File

@@ -1,78 +0,0 @@
/*
* Copyright 2008 Veselin Georgiev,
* anrieffNOSPAM @ mgail_DOT.com (convert to gmail)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __LIBCPUID_UTIL_H__
#define __LIBCPUID_UTIL_H__
#define COUNT_OF(array) (sizeof(array) / sizeof(array[0]))
struct feature_map_t {
unsigned bit;
cpu_feature_t feature;
};
void match_features(const struct feature_map_t* matchtable, int count,
uint32_t reg, struct cpu_id_t* data);
struct match_entry_t {
int family, model, stepping, ext_family, ext_model;
int ncores, l2cache, l3cache, brand_code;
uint64_t model_bits;
int model_code;
char name[32];
};
// returns the match score:
int match_cpu_codename(const struct match_entry_t* matchtable, int count,
struct cpu_id_t* data, int brand_code, uint64_t bits,
int model_code);
/*
* Seek for a pattern in `haystack'.
* Pattern may be an fixed string, or contain the special metacharacters
* '.' - match any single character
* '#' - match any digit
* '[<chars>] - match any of the given chars (regex-like ranges are not
* supported)
* Return val: 0 if the pattern is not found. Nonzero if it is found (actually,
* x + 1 where x is the index where the match is found).
*/
int match_pattern(const char* haystack, const char* pattern);
/*
* Gets an initialized cpu_id_t. It is cached, so that internal libcpuid
* machinery doesn't need to issue cpu_identify more than once.
*/
struct cpu_id_t* get_cached_cpuid(void);
/* returns true if all bits of mask are present in `bits'. */
int match_all(uint64_t bits, uint64_t mask);
/*
* Sets the current errno
*/
int set_error(cpu_error_t err);
#endif /* __LIBCPUID_UTIL_H__ */

View File

@@ -1,359 +0,0 @@
.code
; procedure exec_cpuid
; Signature: void exec_cpiud(uint32_t *regs)
exec_cpuid Proc
push rbx
push rcx
push rdx
push rdi
mov rdi, rcx
mov eax, [rdi]
mov ebx, [rdi+4]
mov ecx, [rdi+8]
mov edx, [rdi+12]
cpuid
mov [rdi], eax
mov [rdi+4], ebx
mov [rdi+8], ecx
mov [rdi+12], edx
pop rdi
pop rdx
pop rcx
pop rbx
ret
exec_cpuid endp
; procedure cpu_rdtsc
; Signature: void cpu_rdtsc(uint64_t *result)
cpu_rdtsc Proc
push rdx
rdtsc
mov [rcx], eax
mov [rcx+4], edx
pop rdx
ret
cpu_rdtsc endp
; procedure busy_sse_loop
; Signature: void busy_sse_loop(int cycles)
busy_sse_loop Proc
; save xmm6 & xmm7 into the shadow area, as Visual C++ 2008
; expects that we don't touch them:
movups [rsp + 8], xmm6
movups [rsp + 24], xmm7
xorps xmm0, xmm0
xorps xmm1, xmm1
xorps xmm2, xmm2
xorps xmm3, xmm3
xorps xmm4, xmm4
xorps xmm5, xmm5
xorps xmm6, xmm6
xorps xmm7, xmm7
; --
align 16
bsLoop:
; 0:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 1:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 2:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 3:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 4:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 5:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 6:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 7:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 8:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 9:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 10:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 11:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 12:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 13:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 14:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 15:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 16:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 17:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 18:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 19:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 20:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 21:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 22:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 23:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 24:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 25:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 26:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 27:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 28:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 29:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 30:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; 31:
addps xmm0, xmm1
addps xmm1, xmm2
addps xmm2, xmm3
addps xmm3, xmm4
addps xmm4, xmm5
addps xmm5, xmm6
addps xmm6, xmm7
addps xmm7, xmm0
; ----------------------
dec ecx
jnz bsLoop
; restore xmm6 & xmm7:
movups xmm6, [rsp + 8]
movups xmm7, [rsp + 24]
ret
busy_sse_loop endp
END

View File

@@ -1,173 +0,0 @@
/*
* Copyright 2008 Veselin Georgiev,
* anrieffNOSPAM @ mgail_DOT.com (convert to gmail)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include "libcpuid.h"
#include "libcpuid_util.h"
#include "libcpuid_internal.h"
#include "recog_amd.h"
const struct amd_code_str { amd_code_t code; char *str; } amd_code_str[] = {
#define CODE(x) { x, #x }
#define CODE2(x, y) CODE(x)
#include "amd_code_t.h"
#undef CODE
};
struct amd_code_and_bits_t {
int code;
uint64_t bits;
};
enum _amd_model_codes_t {
// Only for Ryzen CPUs:
_1400,
_1500,
_1600,
_1900,
_2400,
_2500,
_2700,
};
static void load_amd_features(struct cpu_raw_data_t* raw, struct cpu_id_t* data)
{
const struct feature_map_t matchtable_edx81[] = {
{ 20, CPU_FEATURE_NX },
{ 22, CPU_FEATURE_MMXEXT },
{ 25, CPU_FEATURE_FXSR_OPT },
{ 30, CPU_FEATURE_3DNOWEXT },
{ 31, CPU_FEATURE_3DNOW },
};
const struct feature_map_t matchtable_ecx81[] = {
{ 1, CPU_FEATURE_CMP_LEGACY },
{ 2, CPU_FEATURE_SVM },
{ 5, CPU_FEATURE_ABM },
{ 6, CPU_FEATURE_SSE4A },
{ 7, CPU_FEATURE_MISALIGNSSE },
{ 8, CPU_FEATURE_3DNOWPREFETCH },
{ 9, CPU_FEATURE_OSVW },
{ 10, CPU_FEATURE_IBS },
{ 11, CPU_FEATURE_XOP },
{ 12, CPU_FEATURE_SKINIT },
{ 13, CPU_FEATURE_WDT },
{ 16, CPU_FEATURE_FMA4 },
{ 21, CPU_FEATURE_TBM },
};
const struct feature_map_t matchtable_edx87[] = {
{ 0, CPU_FEATURE_TS },
{ 1, CPU_FEATURE_FID },
{ 2, CPU_FEATURE_VID },
{ 3, CPU_FEATURE_TTP },
{ 4, CPU_FEATURE_TM_AMD },
{ 5, CPU_FEATURE_STC },
{ 6, CPU_FEATURE_100MHZSTEPS },
{ 7, CPU_FEATURE_HWPSTATE },
/* id 8 is handled in common */
{ 9, CPU_FEATURE_CPB },
{ 10, CPU_FEATURE_APERFMPERF },
{ 11, CPU_FEATURE_PFI },
{ 12, CPU_FEATURE_PA },
};
if (raw->ext_cpuid[0][0] >= 0x80000001) {
match_features(matchtable_edx81, COUNT_OF(matchtable_edx81), raw->ext_cpuid[1][3], data);
match_features(matchtable_ecx81, COUNT_OF(matchtable_ecx81), raw->ext_cpuid[1][2], data);
}
if (raw->ext_cpuid[0][0] >= 0x80000007)
match_features(matchtable_edx87, COUNT_OF(matchtable_edx87), raw->ext_cpuid[7][3], data);
if (raw->ext_cpuid[0][0] >= 0x8000001a) {
/* We have the extended info about SSE unit size */
data->detection_hints[CPU_HINT_SSE_SIZE_AUTH] = 1;
data->sse_size = (raw->ext_cpuid[0x1a][0] & 1) ? 128 : 64;
}
}
static void decode_amd_cache_info(struct cpu_raw_data_t* raw, struct cpu_id_t* data)
{
int l3_result;
const int assoc_table[16] = {
0, 1, 2, 0, 4, 0, 8, 0, 16, 0, 32, 48, 64, 96, 128, 255
};
unsigned n = raw->ext_cpuid[0][0];
if (n >= 0x80000005) {
data->l1_data_cache = (raw->ext_cpuid[5][2] >> 24) & 0xff;
data->l1_assoc = (raw->ext_cpuid[5][2] >> 16) & 0xff;
data->l1_cacheline = (raw->ext_cpuid[5][2]) & 0xff;
data->l1_instruction_cache = (raw->ext_cpuid[5][3] >> 24) & 0xff;
}
if (n >= 0x80000006) {
data->l2_cache = (raw->ext_cpuid[6][2] >> 16) & 0xffff;
data->l2_assoc = assoc_table[(raw->ext_cpuid[6][2] >> 12) & 0xf];
data->l2_cacheline = (raw->ext_cpuid[6][2]) & 0xff;
l3_result = (raw->ext_cpuid[6][3] >> 18);
if (l3_result > 0) {
l3_result = 512 * l3_result; /* AMD spec says it's a range,
but we take the lower bound */
data->l3_cache = l3_result;
data->l3_assoc = assoc_table[(raw->ext_cpuid[6][3] >> 12) & 0xf];
data->l3_cacheline = (raw->ext_cpuid[6][3]) & 0xff;
} else {
data->l3_cache = 0;
}
}
}
static void decode_amd_number_of_cores(struct cpu_raw_data_t* raw, struct cpu_id_t* data)
{
int logical_cpus = -1, num_cores = -1;
if (raw->basic_cpuid[0][0] >= 1) {
logical_cpus = (raw->basic_cpuid[1][1] >> 16) & 0xff;
if (raw->ext_cpuid[0][0] >= 8) {
num_cores = 1 + (raw->ext_cpuid[8][2] & 0xff);
}
}
if (data->flags[CPU_FEATURE_HT]) {
if (num_cores > 1) {
if (data->ext_family >= 23)
num_cores /= 2; // e.g., Ryzen 7 reports 16 "real" cores, but they are really just 8.
data->num_cores = num_cores;
data->num_logical_cpus = logical_cpus;
} else {
data->num_cores = 1;
data->num_logical_cpus = (logical_cpus >= 2 ? logical_cpus : 2);
}
} else {
data->num_cores = data->num_logical_cpus = 1;
}
}
int cpuid_identify_amd(struct cpu_raw_data_t* raw, struct cpu_id_t* data, struct internal_id_info_t* internal)
{
load_amd_features(raw, data);
decode_amd_cache_info(raw, data);
decode_amd_number_of_cores(raw, data);
return 0;
}

View File

@@ -1,31 +0,0 @@
/*
* Copyright 2008 Veselin Georgiev,
* anrieffNOSPAM @ mgail_DOT.com (convert to gmail)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RECOG_AMD_H__
#define __RECOG_AMD_H__
int cpuid_identify_amd(struct cpu_raw_data_t* raw, struct cpu_id_t* data, struct internal_id_info_t* internal);
#endif /* __RECOG_AMD_H__ */

View File

@@ -1,543 +0,0 @@
/*
* Copyright 2008 Veselin Georgiev,
* anrieffNOSPAM @ mgail_DOT.com (convert to gmail)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <ctype.h>
#include "libcpuid.h"
#include "libcpuid_util.h"
#include "libcpuid_internal.h"
#include "recog_intel.h"
const struct intel_bcode_str { intel_code_t code; char *str; } intel_bcode_str[] = {
#define CODE(x) { x, #x }
#define CODE2(x, y) CODE(x)
#include "intel_code_t.h"
#undef CODE
};
typedef struct {
int code;
uint64_t bits;
} intel_code_and_bits_t;
enum _intel_model_t {
UNKNOWN = -1,
_3000 = 100,
_3100,
_3200,
X3200,
_3300,
X3300,
_5100,
_5200,
_5300,
_5400,
_2xxx, /* Core i[357] 2xxx */
_3xxx, /* Core i[357] 3xxx */
};
typedef enum _intel_model_t intel_model_t;
static void load_intel_features(struct cpu_raw_data_t* raw, struct cpu_id_t* data)
{
const struct feature_map_t matchtable_edx1[] = {
{ 18, CPU_FEATURE_PN },
{ 21, CPU_FEATURE_DTS },
{ 22, CPU_FEATURE_ACPI },
{ 27, CPU_FEATURE_SS },
{ 29, CPU_FEATURE_TM },
{ 30, CPU_FEATURE_IA64 },
{ 31, CPU_FEATURE_PBE },
};
const struct feature_map_t matchtable_ecx1[] = {
{ 2, CPU_FEATURE_DTS64 },
{ 4, CPU_FEATURE_DS_CPL },
{ 5, CPU_FEATURE_VMX },
{ 6, CPU_FEATURE_SMX },
{ 7, CPU_FEATURE_EST },
{ 8, CPU_FEATURE_TM2 },
{ 10, CPU_FEATURE_CID },
{ 14, CPU_FEATURE_XTPR },
{ 15, CPU_FEATURE_PDCM },
{ 18, CPU_FEATURE_DCA },
{ 21, CPU_FEATURE_X2APIC },
};
const struct feature_map_t matchtable_edx81[] = {
{ 20, CPU_FEATURE_XD },
};
const struct feature_map_t matchtable_ebx7[] = {
{ 2, CPU_FEATURE_SGX },
{ 4, CPU_FEATURE_HLE },
{ 11, CPU_FEATURE_RTM },
{ 16, CPU_FEATURE_AVX512F },
{ 17, CPU_FEATURE_AVX512DQ },
{ 18, CPU_FEATURE_RDSEED },
{ 19, CPU_FEATURE_ADX },
{ 26, CPU_FEATURE_AVX512PF },
{ 27, CPU_FEATURE_AVX512ER },
{ 28, CPU_FEATURE_AVX512CD },
{ 29, CPU_FEATURE_SHA_NI },
{ 30, CPU_FEATURE_AVX512BW },
{ 31, CPU_FEATURE_AVX512VL },
};
if (raw->basic_cpuid[0][0] >= 1) {
match_features(matchtable_edx1, COUNT_OF(matchtable_edx1), raw->basic_cpuid[1][3], data);
match_features(matchtable_ecx1, COUNT_OF(matchtable_ecx1), raw->basic_cpuid[1][2], data);
}
if (raw->ext_cpuid[0][0] >= 1) {
match_features(matchtable_edx81, COUNT_OF(matchtable_edx81), raw->ext_cpuid[1][3], data);
}
// detect TSX/AVX512:
if (raw->basic_cpuid[0][0] >= 7) {
match_features(matchtable_ebx7, COUNT_OF(matchtable_ebx7), raw->basic_cpuid[7][1], data);
}
}
enum _cache_type_t {
L1I,
L1D,
L2,
L3,
L4
};
typedef enum _cache_type_t cache_type_t;
static void check_case(uint8_t on, cache_type_t cache, int size, int assoc, int linesize, struct cpu_id_t* data)
{
if (!on) return;
switch (cache) {
case L1I:
data->l1_instruction_cache = size;
break;
case L1D:
data->l1_data_cache = size;
data->l1_assoc = assoc;
data->l1_cacheline = linesize;
break;
case L2:
data->l2_cache = size;
data->l2_assoc = assoc;
data->l2_cacheline = linesize;
break;
case L3:
data->l3_cache = size;
data->l3_assoc = assoc;
data->l3_cacheline = linesize;
break;
case L4:
data->l4_cache = size;
data->l4_assoc = assoc;
data->l4_cacheline = linesize;
break;
default:
break;
}
}
static void decode_intel_oldstyle_cache_info(struct cpu_raw_data_t* raw, struct cpu_id_t* data)
{
uint8_t f[256] = {0};
int reg, off;
uint32_t x;
for (reg = 0; reg < 4; reg++) {
x = raw->basic_cpuid[2][reg];
if (x & 0x80000000) continue;
for (off = 0; off < 4; off++) {
f[x & 0xff] = 1;
x >>= 8;
}
}
check_case(f[0x06], L1I, 8, 4, 32, data);
check_case(f[0x08], L1I, 16, 4, 32, data);
check_case(f[0x0A], L1D, 8, 2, 32, data);
check_case(f[0x0C], L1D, 16, 4, 32, data);
check_case(f[0x22], L3, 512, 4, 64, data);
check_case(f[0x23], L3, 1024, 8, 64, data);
check_case(f[0x25], L3, 2048, 8, 64, data);
check_case(f[0x29], L3, 4096, 8, 64, data);
check_case(f[0x2C], L1D, 32, 8, 64, data);
check_case(f[0x30], L1I, 32, 8, 64, data);
check_case(f[0x39], L2, 128, 4, 64, data);
check_case(f[0x3A], L2, 192, 6, 64, data);
check_case(f[0x3B], L2, 128, 2, 64, data);
check_case(f[0x3C], L2, 256, 4, 64, data);
check_case(f[0x3D], L2, 384, 6, 64, data);
check_case(f[0x3E], L2, 512, 4, 64, data);
check_case(f[0x41], L2, 128, 4, 32, data);
check_case(f[0x42], L2, 256, 4, 32, data);
check_case(f[0x43], L2, 512, 4, 32, data);
check_case(f[0x44], L2, 1024, 4, 32, data);
check_case(f[0x45], L2, 2048, 4, 32, data);
check_case(f[0x46], L3, 4096, 4, 64, data);
check_case(f[0x47], L3, 8192, 8, 64, data);
check_case(f[0x4A], L3, 6144, 12, 64, data);
check_case(f[0x4B], L3, 8192, 16, 64, data);
check_case(f[0x4C], L3, 12288, 12, 64, data);
check_case(f[0x4D], L3, 16384, 16, 64, data);
check_case(f[0x4E], L2, 6144, 24, 64, data);
check_case(f[0x60], L1D, 16, 8, 64, data);
check_case(f[0x66], L1D, 8, 4, 64, data);
check_case(f[0x67], L1D, 16, 4, 64, data);
check_case(f[0x68], L1D, 32, 4, 64, data);
/* The following four entries are trace cache. Intel does not
* specify a cache-line size, so we use -1 instead
*/
check_case(f[0x70], L1I, 12, 8, -1, data);
check_case(f[0x71], L1I, 16, 8, -1, data);
check_case(f[0x72], L1I, 32, 8, -1, data);
check_case(f[0x73], L1I, 64, 8, -1, data);
check_case(f[0x78], L2, 1024, 4, 64, data);
check_case(f[0x79], L2, 128, 8, 64, data);
check_case(f[0x7A], L2, 256, 8, 64, data);
check_case(f[0x7B], L2, 512, 8, 64, data);
check_case(f[0x7C], L2, 1024, 8, 64, data);
check_case(f[0x7D], L2, 2048, 8, 64, data);
check_case(f[0x7F], L2, 512, 2, 64, data);
check_case(f[0x82], L2, 256, 8, 32, data);
check_case(f[0x83], L2, 512, 8, 32, data);
check_case(f[0x84], L2, 1024, 8, 32, data);
check_case(f[0x85], L2, 2048, 8, 32, data);
check_case(f[0x86], L2, 512, 4, 64, data);
check_case(f[0x87], L2, 1024, 8, 64, data);
if (f[0x49]) {
/* This flag is overloaded with two meanings. On Xeon MP
* (family 0xf, model 0x6) this means L3 cache. On all other
* CPUs (notably Conroe et al), this is L2 cache. In both cases
* it means 4MB, 16-way associative, 64-byte line size.
*/
if (data->family == 0xf && data->model == 0x6) {
data->l3_cache = 4096;
data->l3_assoc = 16;
data->l3_cacheline = 64;
} else {
data->l2_cache = 4096;
data->l2_assoc = 16;
data->l2_cacheline = 64;
}
}
if (f[0x40]) {
/* Again, a special flag. It means:
* 1) If no L2 is specified, then CPU is w/o L2 (0 KB)
* 2) If L2 is specified by other flags, then, CPU is w/o L3.
*/
if (data->l2_cache == -1) {
data->l2_cache = 0;
} else {
data->l3_cache = 0;
}
}
}
static void decode_intel_deterministic_cache_info(struct cpu_raw_data_t* raw,
struct cpu_id_t* data)
{
int ecx;
int ways, partitions, linesize, sets, size, level, typenumber;
cache_type_t type;
for (ecx = 0; ecx < MAX_INTELFN4_LEVEL; ecx++) {
typenumber = raw->intel_fn4[ecx][0] & 0x1f;
if (typenumber == 0) break;
level = (raw->intel_fn4[ecx][0] >> 5) & 0x7;
if (level == 1 && typenumber == 1)
type = L1D;
else if (level == 1 && typenumber == 2)
type = L1I;
else if (level == 2 && typenumber == 3)
type = L2;
else if (level == 3 && typenumber == 3)
type = L3;
else if (level == 4 && typenumber == 3)
type = L4;
else {
continue;
}
ways = ((raw->intel_fn4[ecx][1] >> 22) & 0x3ff) + 1;
partitions = ((raw->intel_fn4[ecx][1] >> 12) & 0x3ff) + 1;
linesize = (raw->intel_fn4[ecx][1] & 0xfff) + 1;
sets = raw->intel_fn4[ecx][2] + 1;
size = ways * partitions * linesize * sets / 1024;
check_case(1, type, size, ways, linesize, data);
}
}
static int decode_intel_extended_topology(struct cpu_raw_data_t* raw,
struct cpu_id_t* data)
{
int i, level_type, num_smt = -1, num_core = -1;
for (i = 0; i < MAX_INTELFN11_LEVEL; i++) {
level_type = (raw->intel_fn11[i][2] & 0xff00) >> 8;
switch (level_type) {
case 0x01:
num_smt = raw->intel_fn11[i][1] & 0xffff;
break;
case 0x02:
num_core = raw->intel_fn11[i][1] & 0xffff;
break;
default:
break;
}
}
if (num_smt == -1 || num_core == -1) return 0;
data->num_logical_cpus = num_core;
data->num_cores = num_core / num_smt;
// make sure num_cores is at least 1. In VMs, the CPUID instruction
// is rigged and may give nonsensical results, but we should at least
// avoid outputs like data->num_cores == 0.
if (data->num_cores <= 0) data->num_cores = 1;
return 1;
}
static void decode_intel_number_of_cores(struct cpu_raw_data_t* raw,
struct cpu_id_t* data)
{
int logical_cpus = -1, num_cores = -1;
if (raw->basic_cpuid[0][0] >= 11) {
if (decode_intel_extended_topology(raw, data)) return;
}
if (raw->basic_cpuid[0][0] >= 1) {
logical_cpus = (raw->basic_cpuid[1][1] >> 16) & 0xff;
if (raw->basic_cpuid[0][0] >= 4) {
num_cores = 1 + ((raw->basic_cpuid[4][0] >> 26) & 0x3f);
}
}
if (data->flags[CPU_FEATURE_HT]) {
if (num_cores > 1) {
data->num_cores = num_cores;
data->num_logical_cpus = logical_cpus;
} else {
data->num_cores = 1;
data->num_logical_cpus = (logical_cpus >= 1 ? logical_cpus : 1);
if (data->num_logical_cpus == 1)
data->flags[CPU_FEATURE_HT] = 0;
}
} else {
data->num_cores = data->num_logical_cpus = 1;
}
}
static intel_code_and_bits_t get_brand_code_and_bits(struct cpu_id_t* data)
{
intel_code_t code = (intel_code_t) NC;
intel_code_and_bits_t result;
uint64_t bits = 0;
int i = 0;
const char* bs = data->brand_str;
const char* s;
const struct { intel_code_t c; const char *search; } matchtable[] = {
{ PENTIUM_M, "Pentium(R) M" },
{ CORE_SOLO, "Pentium(R) Dual CPU" },
{ CORE_SOLO, "Pentium(R) Dual-Core" },
{ PENTIUM_D, "Pentium(R) D" },
{ CORE_SOLO, "Genuine Intel(R) CPU" },
{ CORE_SOLO, "Intel(R) Core(TM)" },
{ DIAMONDVILLE, "CPU [N ][23]## " },
{ SILVERTHORNE, "CPU Z" },
{ PINEVIEW, "CPU [ND][45]## " },
{ CEDARVIEW, "CPU [ND]#### " },
};
const struct { uint64_t bit; const char* search; } bit_matchtable[] = {
{ XEON_, "Xeon" },
{ _MP_, " MP" },
{ ATOM_, "Atom(TM) CPU" },
{ MOBILE_, "Mobile" },
{ CELERON_, "Celeron" },
{ PENTIUM_, "Pentium" },
};
for (i = 0; i < COUNT_OF(bit_matchtable); i++) {
if (match_pattern(bs, bit_matchtable[i].search))
bits |= bit_matchtable[i].bit;
}
if ((i = match_pattern(bs, "Core(TM) [im][3579]")) != 0) {
bits |= CORE_;
i--;
switch (bs[i + 9]) {
case 'i': bits |= _I_; break;
case 'm': bits |= _M_; break;
}
switch (bs[i + 10]) {
case '3': bits |= _3; break;
case '5': bits |= _5; break;
case '7': bits |= _7; break;
case '9': bits |= _9; break;
}
}
for (i = 0; i < COUNT_OF(matchtable); i++)
if (match_pattern(bs, matchtable[i].search)) {
code = matchtable[i].c;
break;
}
if (bits & XEON_) {
if (match_pattern(bs, "W35##") || match_pattern(bs, "[ELXW]75##"))
bits |= _7;
else if (match_pattern(bs, "[ELXW]55##"))
code = GAINESTOWN;
else if (match_pattern(bs, "[ELXW]56##"))
code = WESTMERE;
else if (data->l3_cache > 0 && data->family == 16)
/* restrict by family, since later Xeons also have L3 ... */
code = IRWIN;
}
if (match_all(bits, XEON_ + _MP_) && data->l3_cache > 0)
code = POTOMAC;
if (code == CORE_SOLO) {
s = strstr(bs, "CPU");
if (s) {
s += 3;
while (*s == ' ') s++;
if (*s == 'T')
bits |= MOBILE_;
}
}
if (code == CORE_SOLO) {
switch (data->num_cores) {
case 1: break;
case 2:
{
code = CORE_DUO;
if (data->num_logical_cpus > 2)
code = DUAL_CORE_HT;
break;
}
case 4:
{
code = QUAD_CORE;
if (data->num_logical_cpus > 4)
code = QUAD_CORE_HT;
break;
}
default:
code = MORE_THAN_QUADCORE; break;
}
}
if (code == CORE_DUO && (bits & MOBILE_) && data->model != 14) {
if (data->ext_model < 23) {
code = MEROM;
} else {
code = PENRYN;
}
}
if (data->ext_model == 23 &&
(code == CORE_DUO || code == PENTIUM_D || (bits & CELERON_))) {
code = WOLFDALE;
}
result.code = code;
result.bits = bits;
return result;
}
static void decode_intel_sgx_features(const struct cpu_raw_data_t* raw, struct cpu_id_t* data)
{
struct cpu_epc_t epc;
int i;
if (raw->basic_cpuid[0][0] < 0x12) return; // no 12h leaf
if (raw->basic_cpuid[0x12][0] == 0) return; // no sub-leafs available, probably it's disabled by BIOS
// decode sub-leaf 0:
if (raw->basic_cpuid[0x12][0] & 1) data->sgx.flags[INTEL_SGX1] = 1;
if (raw->basic_cpuid[0x12][0] & 2) data->sgx.flags[INTEL_SGX2] = 1;
if (data->sgx.flags[INTEL_SGX1] || data->sgx.flags[INTEL_SGX2])
data->sgx.present = 1;
data->sgx.misc_select = raw->basic_cpuid[0x12][1];
data->sgx.max_enclave_32bit = (raw->basic_cpuid[0x12][3] ) & 0xff;
data->sgx.max_enclave_64bit = (raw->basic_cpuid[0x12][3] >> 8) & 0xff;
// decode sub-leaf 1:
data->sgx.secs_attributes = raw->intel_fn12h[1][0] | (((uint64_t) raw->intel_fn12h[1][1]) << 32);
data->sgx.secs_xfrm = raw->intel_fn12h[1][2] | (((uint64_t) raw->intel_fn12h[1][3]) << 32);
// decode higher-order subleafs, whenever present:
data->sgx.num_epc_sections = -1;
for (i = 0; i < 1000000; i++) {
epc = cpuid_get_epc(i, raw);
if (epc.length == 0) {
data->sgx.num_epc_sections = i;
break;
}
}
if (data->sgx.num_epc_sections == -1) {
data->sgx.num_epc_sections = 1000000;
}
}
struct cpu_epc_t cpuid_get_epc(int index, const struct cpu_raw_data_t* raw)
{
uint32_t regs[4];
struct cpu_epc_t retval = {0, 0};
if (raw && index < MAX_INTELFN12H_LEVEL - 2) {
// this was queried already, use the data:
memcpy(regs, raw->intel_fn12h[2 + index], sizeof(regs));
} else {
// query this ourselves:
regs[0] = 0x12;
regs[2] = 2 + index;
regs[1] = regs[3] = 0;
cpu_exec_cpuid_ext(regs);
}
// decode values:
if ((regs[0] & 0xf) == 0x1) {
retval.start_addr |= (regs[0] & 0xfffff000); // bits [12, 32) -> bits [12, 32)
retval.start_addr |= ((uint64_t) (regs[1] & 0x000fffff)) << 32; // bits [0, 20) -> bits [32, 52)
retval.length |= (regs[2] & 0xfffff000); // bits [12, 32) -> bits [12, 32)
retval.length |= ((uint64_t) (regs[3] & 0x000fffff)) << 32; // bits [0, 20) -> bits [32, 52)
}
return retval;
}
int cpuid_identify_intel(struct cpu_raw_data_t* raw, struct cpu_id_t* data, struct internal_id_info_t* internal)
{
intel_code_and_bits_t brand;
load_intel_features(raw, data);
if (raw->basic_cpuid[0][0] >= 4) {
/* Deterministic way is preferred, being more generic */
decode_intel_deterministic_cache_info(raw, data);
} else if (raw->basic_cpuid[0][0] >= 2) {
decode_intel_oldstyle_cache_info(raw, data);
}
decode_intel_number_of_cores(raw, data);
brand = get_brand_code_and_bits(data);
internal->code.intel = brand.code;
internal->bits = brand.bits;
if (data->flags[CPU_FEATURE_SGX]) {
// if SGX is indicated by the CPU, verify its presence:
decode_intel_sgx_features(raw, data);
}
return 0;
}

View File

@@ -1,31 +0,0 @@
/*
* Copyright 2008 Veselin Georgiev,
* anrieffNOSPAM @ mgail_DOT.com (convert to gmail)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RECOG_INTEL_H__
#define __RECOG_INTEL_H__
int cpuid_identify_intel(struct cpu_raw_data_t* raw, struct cpu_id_t* data, struct internal_id_info_t* internal);
#endif /*__RECOG_INTEL_H__*/

View File

@@ -1,4 +1,4 @@
cmake_minimum_required (VERSION 2.8)
cmake_minimum_required (VERSION 2.8.12)
project (ethash C)
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Os")

22
src/3rdparty/llhttp/LICENSE-MIT vendored Normal file
View File

@@ -0,0 +1,22 @@
This software is licensed under the MIT License.
Copyright Fedor Indutny, 2018.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the
following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.

135
src/3rdparty/llhttp/README.md vendored Normal file
View File

@@ -0,0 +1,135 @@
# llhttp
[![CI](https://github.com/nodejs/llhttp/workflows/CI/badge.svg)](https://github.com/nodejs/llhttp/actions?query=workflow%3ACI)
Port of [http_parser][0] to [llparse][1].
## Why?
Let's face it, [http_parser][0] is practically unmaintainable. Even
introduction of a single new method results in a significant code churn.
This project aims to:
* Make it maintainable
* Verifiable
* Improving benchmarks where possible
More details in [Fedor Indutny's talk at JSConf EU 2019](https://youtu.be/x3k_5Mi66sY)
## How?
Over time, different approaches for improving [http_parser][0]'s code base
were tried. However, all of them failed due to resulting significant performance
degradation.
This project is a port of [http_parser][0] to TypeScript. [llparse][1] is used
to generate the output C source file, which could be compiled and
linked with the embedder's program (like [Node.js][7]).
## Performance
So far llhttp outperforms http_parser:
| | input size | bandwidth | reqs/sec | time |
|:----------------|-----------:|-------------:|-----------:|--------:|
| **llhttp** | 8192.00 mb | 1777.24 mb/s | 3583799.39 req/sec | 4.61 s |
| **http_parser** | 8192.00 mb | 694.66 mb/s | 1406180.33 req/sec | 11.79 s |
llhttp is faster by approximately **156%**.
## Maintenance
llhttp project has about 1400 lines of TypeScript code describing the parser
itself and around 450 lines of C code and headers providing the helper methods.
The whole [http_parser][0] is implemented in approximately 2500 lines of C, and
436 lines of headers.
All optimizations and multi-character matching in llhttp are generated
automatically, and thus doesn't add any extra maintenance cost. On the contrary,
most of http_parser's code is hand-optimized and unrolled. Instead describing
"how" it should parse the HTTP requests/responses, a maintainer should
implement the new features in [http_parser][0] cautiously, considering
possible performance degradation and manually optimizing the new code.
## Verification
The state machine graph is encoded explicitly in llhttp. The [llparse][1]
automatically checks the graph for absence of loops and correct reporting of the
input ranges (spans) like header names and values. In the future, additional
checks could be performed to get even stricter verification of the llhttp.
## Usage
```C
#include "llhttp.h"
llhttp_t parser;
llhttp_settings_t settings;
/* Initialize user callbacks and settings */
llhttp_settings_init(&settings);
/* Set user callback */
settings.on_message_complete = handle_on_message_complete;
/* Initialize the parser in HTTP_BOTH mode, meaning that it will select between
* HTTP_REQUEST and HTTP_RESPONSE parsing automatically while reading the first
* input.
*/
llhttp_init(&parser, HTTP_BOTH, &settings);
/* Parse request! */
const char* request = "GET / HTTP/1.1\r\n\r\n";
int request_len = strlen(request);
enum llhttp_errno err = llhttp_execute(&parser, request, request_len);
if (err == HPE_OK) {
/* Successfully parsed! */
} else {
fprintf(stderr, "Parse error: %s %s\n", llhttp_errno_name(err),
parser.reason);
}
```
---
### Bindings to other languages
* Python: [pallas/pyllhttp][8]
* Ruby: [metabahn/llhttp][9]
#### LICENSE
This software is licensed under the MIT License.
Copyright Fedor Indutny, 2018.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the
following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
[0]: https://github.com/nodejs/http-parser
[1]: https://github.com/nodejs/llparse
[2]: https://en.wikipedia.org/wiki/Register_allocation#Spilling
[3]: https://en.wikipedia.org/wiki/Tail_call
[4]: https://llvm.org/docs/LangRef.html
[5]: https://llvm.org/docs/LangRef.html#call-instruction
[6]: https://clang.llvm.org/
[7]: https://github.com/nodejs/node
[8]: https://github.com/pallas/pyllhttp
[9]: https://github.com/metabahn/llhttp

348
src/3rdparty/llhttp/api.c vendored Normal file
View File

@@ -0,0 +1,348 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "llhttp.h"
#define CALLBACK_MAYBE(PARSER, NAME, ...) \
do { \
const llhttp_settings_t* settings; \
settings = (const llhttp_settings_t*) (PARSER)->settings; \
if (settings == NULL || settings->NAME == NULL) { \
err = 0; \
break; \
} \
err = settings->NAME(__VA_ARGS__); \
} while (0)
void llhttp_init(llhttp_t* parser, llhttp_type_t type,
const llhttp_settings_t* settings) {
llhttp__internal_init(parser);
parser->type = type;
parser->settings = (void*) settings;
}
#if defined(__wasm__)
extern int wasm_on_message_begin(llhttp_t * p);
extern int wasm_on_url(llhttp_t* p, const char* at, size_t length);
extern int wasm_on_status(llhttp_t* p, const char* at, size_t length);
extern int wasm_on_header_field(llhttp_t* p, const char* at, size_t length);
extern int wasm_on_header_value(llhttp_t* p, const char* at, size_t length);
extern int wasm_on_headers_complete(llhttp_t * p);
extern int wasm_on_body(llhttp_t* p, const char* at, size_t length);
extern int wasm_on_message_complete(llhttp_t * p);
const llhttp_settings_t wasm_settings = {
wasm_on_message_begin,
wasm_on_url,
wasm_on_status,
wasm_on_header_field,
wasm_on_header_value,
wasm_on_headers_complete,
wasm_on_body,
wasm_on_message_complete,
NULL,
NULL,
};
llhttp_t* llhttp_alloc(llhttp_type_t type) {
llhttp_t* parser = malloc(sizeof(llhttp_t));
llhttp_init(parser, type, &wasm_settings);
return parser;
}
void llhttp_free(llhttp_t* parser) {
free(parser);
}
/* Some getters required to get stuff from the parser */
uint8_t llhttp_get_type(llhttp_t* parser) {
return parser->type;
}
uint8_t llhttp_get_http_major(llhttp_t* parser) {
return parser->http_major;
}
uint8_t llhttp_get_http_minor(llhttp_t* parser) {
return parser->http_minor;
}
uint8_t llhttp_get_method(llhttp_t* parser) {
return parser->method;
}
int llhttp_get_status_code(llhttp_t* parser) {
return parser->status_code;
}
uint8_t llhttp_get_upgrade(llhttp_t* parser) {
return parser->upgrade;
}
#endif // defined(__wasm__)
void llhttp_reset(llhttp_t* parser) {
llhttp_type_t type = parser->type;
const llhttp_settings_t* settings = parser->settings;
void* data = parser->data;
uint8_t lenient_flags = parser->lenient_flags;
llhttp__internal_init(parser);
parser->type = type;
parser->settings = (void*) settings;
parser->data = data;
parser->lenient_flags = lenient_flags;
}
llhttp_errno_t llhttp_execute(llhttp_t* parser, const char* data, size_t len) {
return llhttp__internal_execute(parser, data, data + len);
}
void llhttp_settings_init(llhttp_settings_t* settings) {
memset(settings, 0, sizeof(*settings));
}
llhttp_errno_t llhttp_finish(llhttp_t* parser) {
int err;
/* We're in an error state. Don't bother doing anything. */
if (parser->error != 0) {
return 0;
}
switch (parser->finish) {
case HTTP_FINISH_SAFE_WITH_CB:
CALLBACK_MAYBE(parser, on_message_complete, parser);
if (err != HPE_OK) return err;
/* FALLTHROUGH */
case HTTP_FINISH_SAFE:
return HPE_OK;
case HTTP_FINISH_UNSAFE:
parser->reason = "Invalid EOF state";
return HPE_INVALID_EOF_STATE;
default:
abort();
}
}
void llhttp_pause(llhttp_t* parser) {
if (parser->error != HPE_OK) {
return;
}
parser->error = HPE_PAUSED;
parser->reason = "Paused";
}
void llhttp_resume(llhttp_t* parser) {
if (parser->error != HPE_PAUSED) {
return;
}
parser->error = 0;
}
void llhttp_resume_after_upgrade(llhttp_t* parser) {
if (parser->error != HPE_PAUSED_UPGRADE) {
return;
}
parser->error = 0;
}
llhttp_errno_t llhttp_get_errno(const llhttp_t* parser) {
return parser->error;
}
const char* llhttp_get_error_reason(const llhttp_t* parser) {
return parser->reason;
}
void llhttp_set_error_reason(llhttp_t* parser, const char* reason) {
parser->reason = reason;
}
const char* llhttp_get_error_pos(const llhttp_t* parser) {
return parser->error_pos;
}
const char* llhttp_errno_name(llhttp_errno_t err) {
#define HTTP_ERRNO_GEN(CODE, NAME, _) case HPE_##NAME: return "HPE_" #NAME;
switch (err) {
HTTP_ERRNO_MAP(HTTP_ERRNO_GEN)
default: abort();
}
#undef HTTP_ERRNO_GEN
}
const char* llhttp_method_name(llhttp_method_t method) {
#define HTTP_METHOD_GEN(NUM, NAME, STRING) case HTTP_##NAME: return #STRING;
switch (method) {
HTTP_METHOD_MAP(HTTP_METHOD_GEN)
default: abort();
}
#undef HTTP_METHOD_GEN
}
void llhttp_set_lenient_headers(llhttp_t* parser, int enabled) {
if (enabled) {
parser->lenient_flags |= LENIENT_HEADERS;
} else {
parser->lenient_flags &= ~LENIENT_HEADERS;
}
}
void llhttp_set_lenient_chunked_length(llhttp_t* parser, int enabled) {
if (enabled) {
parser->lenient_flags |= LENIENT_CHUNKED_LENGTH;
} else {
parser->lenient_flags &= ~LENIENT_CHUNKED_LENGTH;
}
}
void llhttp_set_lenient_keep_alive(llhttp_t* parser, int enabled) {
if (enabled) {
parser->lenient_flags |= LENIENT_KEEP_ALIVE;
} else {
parser->lenient_flags &= ~LENIENT_KEEP_ALIVE;
}
}
/* Callbacks */
int llhttp__on_message_begin(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_message_begin, s);
return err;
}
int llhttp__on_url(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_url, s, p, endp - p);
return err;
}
int llhttp__on_url_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_url_complete, s);
return err;
}
int llhttp__on_status(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_status, s, p, endp - p);
return err;
}
int llhttp__on_status_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_status_complete, s);
return err;
}
int llhttp__on_header_field(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_header_field, s, p, endp - p);
return err;
}
int llhttp__on_header_field_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_header_field_complete, s);
return err;
}
int llhttp__on_header_value(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_header_value, s, p, endp - p);
return err;
}
int llhttp__on_header_value_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_header_value_complete, s);
return err;
}
int llhttp__on_headers_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_headers_complete, s);
return err;
}
int llhttp__on_message_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_message_complete, s);
return err;
}
int llhttp__on_body(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_body, s, p, endp - p);
return err;
}
int llhttp__on_chunk_header(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_chunk_header, s);
return err;
}
int llhttp__on_chunk_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_chunk_complete, s);
return err;
}
/* Private */
void llhttp__debug(llhttp_t* s, const char* p, const char* endp,
const char* msg) {
if (p == endp) {
fprintf(stderr, "p=%p type=%d flags=%02x next=null debug=%s\n", s, s->type,
s->flags, msg);
} else {
fprintf(stderr, "p=%p type=%d flags=%02x next=%02x debug=%s\n", s,
s->type, s->flags, *p, msg);
}
}

253
src/3rdparty/llhttp/api.h vendored Normal file
View File

@@ -0,0 +1,253 @@
#ifndef INCLUDE_LLHTTP_API_H_
#define INCLUDE_LLHTTP_API_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <stddef.h>
#if defined(__wasm__)
#define LLHTTP_EXPORT __attribute__((visibility("default")))
#else
#define LLHTTP_EXPORT
#endif
typedef llhttp__internal_t llhttp_t;
typedef struct llhttp_settings_s llhttp_settings_t;
typedef int (*llhttp_data_cb)(llhttp_t*, const char *at, size_t length);
typedef int (*llhttp_cb)(llhttp_t*);
struct llhttp_settings_s {
/* Possible return values 0, -1, `HPE_PAUSED` */
llhttp_cb on_message_begin;
llhttp_data_cb on_url;
llhttp_data_cb on_status;
llhttp_data_cb on_header_field;
llhttp_data_cb on_header_value;
/* Possible return values:
* 0 - Proceed normally
* 1 - Assume that request/response has no body, and proceed to parsing the
* next message
* 2 - Assume absence of body (as above) and make `llhttp_execute()` return
* `HPE_PAUSED_UPGRADE`
* -1 - Error
* `HPE_PAUSED`
*/
llhttp_cb on_headers_complete;
llhttp_data_cb on_body;
/* Possible return values 0, -1, `HPE_PAUSED` */
llhttp_cb on_message_complete;
/* When on_chunk_header is called, the current chunk length is stored
* in parser->content_length.
* Possible return values 0, -1, `HPE_PAUSED`
*/
llhttp_cb on_chunk_header;
llhttp_cb on_chunk_complete;
llhttp_cb on_url_complete;
llhttp_cb on_status_complete;
llhttp_cb on_header_field_complete;
llhttp_cb on_header_value_complete;
};
/* Initialize the parser with specific type and user settings.
*
* NOTE: lifetime of `settings` has to be at least the same as the lifetime of
* the `parser` here. In practice, `settings` has to be either a static
* variable or be allocated with `malloc`, `new`, etc.
*/
LLHTTP_EXPORT
void llhttp_init(llhttp_t* parser, llhttp_type_t type,
const llhttp_settings_t* settings);
#if defined(__wasm__)
LLHTTP_EXPORT
llhttp_t* llhttp_alloc(llhttp_type_t type);
LLHTTP_EXPORT
void llhttp_free(llhttp_t* parser);
LLHTTP_EXPORT
uint8_t llhttp_get_type(llhttp_t* parser);
LLHTTP_EXPORT
uint8_t llhttp_get_http_major(llhttp_t* parser);
LLHTTP_EXPORT
uint8_t llhttp_get_http_minor(llhttp_t* parser);
LLHTTP_EXPORT
uint8_t llhttp_get_method(llhttp_t* parser);
LLHTTP_EXPORT
int llhttp_get_status_code(llhttp_t* parser);
LLHTTP_EXPORT
uint8_t llhttp_get_upgrade(llhttp_t* parser);
#endif // defined(__wasm__)
/* Reset an already initialized parser back to the start state, preserving the
* existing parser type, callback settings, user data, and lenient flags.
*/
LLHTTP_EXPORT
void llhttp_reset(llhttp_t* parser);
/* Initialize the settings object */
LLHTTP_EXPORT
void llhttp_settings_init(llhttp_settings_t* settings);
/* Parse full or partial request/response, invoking user callbacks along the
* way.
*
* If any of `llhttp_data_cb` returns errno not equal to `HPE_OK` - the parsing
* interrupts, and such errno is returned from `llhttp_execute()`. If
* `HPE_PAUSED` was used as a errno, the execution can be resumed with
* `llhttp_resume()` call.
*
* In a special case of CONNECT/Upgrade request/response `HPE_PAUSED_UPGRADE`
* is returned after fully parsing the request/response. If the user wishes to
* continue parsing, they need to invoke `llhttp_resume_after_upgrade()`.
*
* NOTE: if this function ever returns a non-pause type error, it will continue
* to return the same error upon each successive call up until `llhttp_init()`
* is called.
*/
LLHTTP_EXPORT
llhttp_errno_t llhttp_execute(llhttp_t* parser, const char* data, size_t len);
/* This method should be called when the other side has no further bytes to
* send (e.g. shutdown of readable side of the TCP connection.)
*
* Requests without `Content-Length` and other messages might require treating
* all incoming bytes as the part of the body, up to the last byte of the
* connection. This method will invoke `on_message_complete()` callback if the
* request was terminated safely. Otherwise a error code would be returned.
*/
LLHTTP_EXPORT
llhttp_errno_t llhttp_finish(llhttp_t* parser);
/* Returns `1` if the incoming message is parsed until the last byte, and has
* to be completed by calling `llhttp_finish()` on EOF
*/
LLHTTP_EXPORT
int llhttp_message_needs_eof(const llhttp_t* parser);
/* Returns `1` if there might be any other messages following the last that was
* successfully parsed.
*/
LLHTTP_EXPORT
int llhttp_should_keep_alive(const llhttp_t* parser);
/* Make further calls of `llhttp_execute()` return `HPE_PAUSED` and set
* appropriate error reason.
*
* Important: do not call this from user callbacks! User callbacks must return
* `HPE_PAUSED` if pausing is required.
*/
LLHTTP_EXPORT
void llhttp_pause(llhttp_t* parser);
/* Might be called to resume the execution after the pause in user's callback.
* See `llhttp_execute()` above for details.
*
* Call this only if `llhttp_execute()` returns `HPE_PAUSED`.
*/
LLHTTP_EXPORT
void llhttp_resume(llhttp_t* parser);
/* Might be called to resume the execution after the pause in user's callback.
* See `llhttp_execute()` above for details.
*
* Call this only if `llhttp_execute()` returns `HPE_PAUSED_UPGRADE`
*/
LLHTTP_EXPORT
void llhttp_resume_after_upgrade(llhttp_t* parser);
/* Returns the latest return error */
LLHTTP_EXPORT
llhttp_errno_t llhttp_get_errno(const llhttp_t* parser);
/* Returns the verbal explanation of the latest returned error.
*
* Note: User callback should set error reason when returning the error. See
* `llhttp_set_error_reason()` for details.
*/
LLHTTP_EXPORT
const char* llhttp_get_error_reason(const llhttp_t* parser);
/* Assign verbal description to the returned error. Must be called in user
* callbacks right before returning the errno.
*
* Note: `HPE_USER` error code might be useful in user callbacks.
*/
LLHTTP_EXPORT
void llhttp_set_error_reason(llhttp_t* parser, const char* reason);
/* Returns the pointer to the last parsed byte before the returned error. The
* pointer is relative to the `data` argument of `llhttp_execute()`.
*
* Note: this method might be useful for counting the number of parsed bytes.
*/
LLHTTP_EXPORT
const char* llhttp_get_error_pos(const llhttp_t* parser);
/* Returns textual name of error code */
LLHTTP_EXPORT
const char* llhttp_errno_name(llhttp_errno_t err);
/* Returns textual name of HTTP method */
LLHTTP_EXPORT
const char* llhttp_method_name(llhttp_method_t method);
/* Enables/disables lenient header value parsing (disabled by default).
*
* Lenient parsing disables header value token checks, extending llhttp's
* protocol support to highly non-compliant clients/server. No
* `HPE_INVALID_HEADER_TOKEN` will be raised for incorrect header values when
* lenient parsing is "on".
*
* **(USE AT YOUR OWN RISK)**
*/
LLHTTP_EXPORT
void llhttp_set_lenient_headers(llhttp_t* parser, int enabled);
/* Enables/disables lenient handling of conflicting `Transfer-Encoding` and
* `Content-Length` headers (disabled by default).
*
* Normally `llhttp` would error when `Transfer-Encoding` is present in
* conjunction with `Content-Length`. This error is important to prevent HTTP
* request smuggling, but may be less desirable for small number of cases
* involving legacy servers.
*
* **(USE AT YOUR OWN RISK)**
*/
LLHTTP_EXPORT
void llhttp_set_lenient_chunked_length(llhttp_t* parser, int enabled);
/* Enables/disables lenient handling of `Connection: close` and HTTP/1.0
* requests responses.
*
* Normally `llhttp` would error on (in strict mode) or discard (in loose mode)
* the HTTP request/response after the request/response with `Connection: close`
* and `Content-Length`. This is important to prevent cache poisoning attacks,
* but might interact badly with outdated and insecure clients. With this flag
* the extra request/response will be parsed normally.
*
* **(USE AT YOUR OWN RISK)**
*/
void llhttp_set_lenient_keep_alive(llhttp_t* parser, int enabled);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* INCLUDE_LLHTTP_API_H_ */

149
src/3rdparty/llhttp/http.c vendored Normal file
View File

@@ -0,0 +1,149 @@
#include <stdio.h>
#ifndef LLHTTP__TEST
# include "llhttp.h"
#else
# define llhttp_t llparse_t
#endif /* */
int llhttp_message_needs_eof(const llhttp_t* parser);
int llhttp_should_keep_alive(const llhttp_t* parser);
int llhttp__before_headers_complete(llhttp_t* parser, const char* p,
const char* endp) {
/* Set this here so that on_headers_complete() callbacks can see it */
if ((parser->flags & F_UPGRADE) &&
(parser->flags & F_CONNECTION_UPGRADE)) {
/* For responses, "Upgrade: foo" and "Connection: upgrade" are
* mandatory only when it is a 101 Switching Protocols response,
* otherwise it is purely informational, to announce support.
*/
parser->upgrade =
(parser->type == HTTP_REQUEST || parser->status_code == 101);
} else {
parser->upgrade = (parser->method == HTTP_CONNECT);
}
return 0;
}
/* Return values:
* 0 - No body, `restart`, message_complete
* 1 - CONNECT request, `restart`, message_complete, and pause
* 2 - chunk_size_start
* 3 - body_identity
* 4 - body_identity_eof
* 5 - invalid transfer-encoding for request
*/
int llhttp__after_headers_complete(llhttp_t* parser, const char* p,
const char* endp) {
int hasBody;
hasBody = parser->flags & F_CHUNKED || parser->content_length > 0;
if (parser->upgrade && (parser->method == HTTP_CONNECT ||
(parser->flags & F_SKIPBODY) || !hasBody)) {
/* Exit, the rest of the message is in a different protocol. */
return 1;
}
if (parser->flags & F_SKIPBODY) {
return 0;
} else if (parser->flags & F_CHUNKED) {
/* chunked encoding - ignore Content-Length header, prepare for a chunk */
return 2;
} else if (parser->flags & F_TRANSFER_ENCODING) {
if (parser->type == HTTP_REQUEST &&
(parser->lenient_flags & LENIENT_CHUNKED_LENGTH) == 0) {
/* RFC 7230 3.3.3 */
/* If a Transfer-Encoding header field
* is present in a request and the chunked transfer coding is not
* the final encoding, the message body length cannot be determined
* reliably; the server MUST respond with the 400 (Bad Request)
* status code and then close the connection.
*/
return 5;
} else {
/* RFC 7230 3.3.3 */
/* If a Transfer-Encoding header field is present in a response and
* the chunked transfer coding is not the final encoding, the
* message body length is determined by reading the connection until
* it is closed by the server.
*/
return 4;
}
} else {
if (!(parser->flags & F_CONTENT_LENGTH)) {
if (!llhttp_message_needs_eof(parser)) {
/* Assume content-length 0 - read the next */
return 0;
} else {
/* Read body until EOF */
return 4;
}
} else if (parser->content_length == 0) {
/* Content-Length header given but zero: Content-Length: 0\r\n */
return 0;
} else {
/* Content-Length header given and non-zero */
return 3;
}
}
}
int llhttp__after_message_complete(llhttp_t* parser, const char* p,
const char* endp) {
int should_keep_alive;
should_keep_alive = llhttp_should_keep_alive(parser);
parser->finish = HTTP_FINISH_SAFE;
parser->flags = 0;
/* NOTE: this is ignored in loose parsing mode */
return should_keep_alive;
}
int llhttp_message_needs_eof(const llhttp_t* parser) {
if (parser->type == HTTP_REQUEST) {
return 0;
}
/* See RFC 2616 section 4.4 */
if (parser->status_code / 100 == 1 || /* 1xx e.g. Continue */
parser->status_code == 204 || /* No Content */
parser->status_code == 304 || /* Not Modified */
(parser->flags & F_SKIPBODY)) { /* response to a HEAD request */
return 0;
}
/* RFC 7230 3.3.3, see `llhttp__after_headers_complete` */
if ((parser->flags & F_TRANSFER_ENCODING) &&
(parser->flags & F_CHUNKED) == 0) {
return 1;
}
if (parser->flags & (F_CHUNKED | F_CONTENT_LENGTH)) {
return 0;
}
return 1;
}
int llhttp_should_keep_alive(const llhttp_t* parser) {
if (parser->http_major > 0 && parser->http_minor > 0) {
/* HTTP/1.1 */
if (parser->flags & F_CONNECTION_CLOSE) {
return 0;
}
} else {
/* HTTP/1.0 or earlier */
if (!(parser->flags & F_CONNECTION_KEEP_ALIVE)) {
return 0;
}
}
return !llhttp_message_needs_eof(parser);
}

14926
src/3rdparty/llhttp/llhttp.c vendored Normal file

File diff suppressed because it is too large Load Diff

508
src/3rdparty/llhttp/llhttp.h vendored Normal file
View File

@@ -0,0 +1,508 @@
#ifndef INCLUDE_LLHTTP_H_
#define INCLUDE_LLHTTP_H_
#define LLHTTP_VERSION_MAJOR 5
#define LLHTTP_VERSION_MINOR 1
#define LLHTTP_VERSION_PATCH 0
#ifndef LLHTTP_STRICT_MODE
# define LLHTTP_STRICT_MODE 0
#endif
#ifndef INCLUDE_LLHTTP_ITSELF_H_
#define INCLUDE_LLHTTP_ITSELF_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
typedef struct llhttp__internal_s llhttp__internal_t;
struct llhttp__internal_s {
int32_t _index;
void* _span_pos0;
void* _span_cb0;
int32_t error;
const char* reason;
const char* error_pos;
void* data;
void* _current;
uint64_t content_length;
uint8_t type;
uint8_t method;
uint8_t http_major;
uint8_t http_minor;
uint8_t header_state;
uint8_t lenient_flags;
uint8_t upgrade;
uint8_t finish;
uint16_t flags;
uint16_t status_code;
void* settings;
};
int llhttp__internal_init(llhttp__internal_t* s);
int llhttp__internal_execute(llhttp__internal_t* s, const char* p, const char* endp);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* INCLUDE_LLHTTP_ITSELF_H_ */
#ifndef LLLLHTTP_C_HEADERS_
#define LLLLHTTP_C_HEADERS_
#ifdef __cplusplus
extern "C" {
#endif
enum llhttp_errno {
HPE_OK = 0,
HPE_INTERNAL = 1,
HPE_STRICT = 2,
HPE_LF_EXPECTED = 3,
HPE_UNEXPECTED_CONTENT_LENGTH = 4,
HPE_CLOSED_CONNECTION = 5,
HPE_INVALID_METHOD = 6,
HPE_INVALID_URL = 7,
HPE_INVALID_CONSTANT = 8,
HPE_INVALID_VERSION = 9,
HPE_INVALID_HEADER_TOKEN = 10,
HPE_INVALID_CONTENT_LENGTH = 11,
HPE_INVALID_CHUNK_SIZE = 12,
HPE_INVALID_STATUS = 13,
HPE_INVALID_EOF_STATE = 14,
HPE_INVALID_TRANSFER_ENCODING = 15,
HPE_CB_MESSAGE_BEGIN = 16,
HPE_CB_HEADERS_COMPLETE = 17,
HPE_CB_MESSAGE_COMPLETE = 18,
HPE_CB_CHUNK_HEADER = 19,
HPE_CB_CHUNK_COMPLETE = 20,
HPE_PAUSED = 21,
HPE_PAUSED_UPGRADE = 22,
HPE_PAUSED_H2_UPGRADE = 23,
HPE_USER = 24
};
typedef enum llhttp_errno llhttp_errno_t;
enum llhttp_flags {
F_CONNECTION_KEEP_ALIVE = 0x1,
F_CONNECTION_CLOSE = 0x2,
F_CONNECTION_UPGRADE = 0x4,
F_CHUNKED = 0x8,
F_UPGRADE = 0x10,
F_CONTENT_LENGTH = 0x20,
F_SKIPBODY = 0x40,
F_TRAILING = 0x80,
F_TRANSFER_ENCODING = 0x200
};
typedef enum llhttp_flags llhttp_flags_t;
enum llhttp_lenient_flags {
LENIENT_HEADERS = 0x1,
LENIENT_CHUNKED_LENGTH = 0x2,
LENIENT_KEEP_ALIVE = 0x4
};
typedef enum llhttp_lenient_flags llhttp_lenient_flags_t;
enum llhttp_type {
HTTP_BOTH = 0,
HTTP_REQUEST = 1,
HTTP_RESPONSE = 2
};
typedef enum llhttp_type llhttp_type_t;
enum llhttp_finish {
HTTP_FINISH_SAFE = 0,
HTTP_FINISH_SAFE_WITH_CB = 1,
HTTP_FINISH_UNSAFE = 2
};
typedef enum llhttp_finish llhttp_finish_t;
enum llhttp_method {
HTTP_DELETE = 0,
HTTP_GET = 1,
HTTP_HEAD = 2,
HTTP_POST = 3,
HTTP_PUT = 4,
HTTP_CONNECT = 5,
HTTP_OPTIONS = 6,
HTTP_TRACE = 7,
HTTP_COPY = 8,
HTTP_LOCK = 9,
HTTP_MKCOL = 10,
HTTP_MOVE = 11,
HTTP_PROPFIND = 12,
HTTP_PROPPATCH = 13,
HTTP_SEARCH = 14,
HTTP_UNLOCK = 15,
HTTP_BIND = 16,
HTTP_REBIND = 17,
HTTP_UNBIND = 18,
HTTP_ACL = 19,
HTTP_REPORT = 20,
HTTP_MKACTIVITY = 21,
HTTP_CHECKOUT = 22,
HTTP_MERGE = 23,
HTTP_MSEARCH = 24,
HTTP_NOTIFY = 25,
HTTP_SUBSCRIBE = 26,
HTTP_UNSUBSCRIBE = 27,
HTTP_PATCH = 28,
HTTP_PURGE = 29,
HTTP_MKCALENDAR = 30,
HTTP_LINK = 31,
HTTP_UNLINK = 32,
HTTP_SOURCE = 33,
HTTP_PRI = 34,
HTTP_DESCRIBE = 35,
HTTP_ANNOUNCE = 36,
HTTP_SETUP = 37,
HTTP_PLAY = 38,
HTTP_PAUSE = 39,
HTTP_TEARDOWN = 40,
HTTP_GET_PARAMETER = 41,
HTTP_SET_PARAMETER = 42,
HTTP_REDIRECT = 43,
HTTP_RECORD = 44,
HTTP_FLUSH = 45
};
typedef enum llhttp_method llhttp_method_t;
#define HTTP_ERRNO_MAP(XX) \
XX(0, OK, OK) \
XX(1, INTERNAL, INTERNAL) \
XX(2, STRICT, STRICT) \
XX(3, LF_EXPECTED, LF_EXPECTED) \
XX(4, UNEXPECTED_CONTENT_LENGTH, UNEXPECTED_CONTENT_LENGTH) \
XX(5, CLOSED_CONNECTION, CLOSED_CONNECTION) \
XX(6, INVALID_METHOD, INVALID_METHOD) \
XX(7, INVALID_URL, INVALID_URL) \
XX(8, INVALID_CONSTANT, INVALID_CONSTANT) \
XX(9, INVALID_VERSION, INVALID_VERSION) \
XX(10, INVALID_HEADER_TOKEN, INVALID_HEADER_TOKEN) \
XX(11, INVALID_CONTENT_LENGTH, INVALID_CONTENT_LENGTH) \
XX(12, INVALID_CHUNK_SIZE, INVALID_CHUNK_SIZE) \
XX(13, INVALID_STATUS, INVALID_STATUS) \
XX(14, INVALID_EOF_STATE, INVALID_EOF_STATE) \
XX(15, INVALID_TRANSFER_ENCODING, INVALID_TRANSFER_ENCODING) \
XX(16, CB_MESSAGE_BEGIN, CB_MESSAGE_BEGIN) \
XX(17, CB_HEADERS_COMPLETE, CB_HEADERS_COMPLETE) \
XX(18, CB_MESSAGE_COMPLETE, CB_MESSAGE_COMPLETE) \
XX(19, CB_CHUNK_HEADER, CB_CHUNK_HEADER) \
XX(20, CB_CHUNK_COMPLETE, CB_CHUNK_COMPLETE) \
XX(21, PAUSED, PAUSED) \
XX(22, PAUSED_UPGRADE, PAUSED_UPGRADE) \
XX(23, PAUSED_H2_UPGRADE, PAUSED_H2_UPGRADE) \
XX(24, USER, USER) \
#define HTTP_METHOD_MAP(XX) \
XX(0, DELETE, DELETE) \
XX(1, GET, GET) \
XX(2, HEAD, HEAD) \
XX(3, POST, POST) \
XX(4, PUT, PUT) \
XX(5, CONNECT, CONNECT) \
XX(6, OPTIONS, OPTIONS) \
XX(7, TRACE, TRACE) \
XX(8, COPY, COPY) \
XX(9, LOCK, LOCK) \
XX(10, MKCOL, MKCOL) \
XX(11, MOVE, MOVE) \
XX(12, PROPFIND, PROPFIND) \
XX(13, PROPPATCH, PROPPATCH) \
XX(14, SEARCH, SEARCH) \
XX(15, UNLOCK, UNLOCK) \
XX(16, BIND, BIND) \
XX(17, REBIND, REBIND) \
XX(18, UNBIND, UNBIND) \
XX(19, ACL, ACL) \
XX(20, REPORT, REPORT) \
XX(21, MKACTIVITY, MKACTIVITY) \
XX(22, CHECKOUT, CHECKOUT) \
XX(23, MERGE, MERGE) \
XX(24, MSEARCH, M-SEARCH) \
XX(25, NOTIFY, NOTIFY) \
XX(26, SUBSCRIBE, SUBSCRIBE) \
XX(27, UNSUBSCRIBE, UNSUBSCRIBE) \
XX(28, PATCH, PATCH) \
XX(29, PURGE, PURGE) \
XX(30, MKCALENDAR, MKCALENDAR) \
XX(31, LINK, LINK) \
XX(32, UNLINK, UNLINK) \
XX(33, SOURCE, SOURCE) \
XX(34, PRI, PRI) \
XX(35, DESCRIBE, DESCRIBE) \
XX(36, ANNOUNCE, ANNOUNCE) \
XX(37, SETUP, SETUP) \
XX(38, PLAY, PLAY) \
XX(39, PAUSE, PAUSE) \
XX(40, TEARDOWN, TEARDOWN) \
XX(41, GET_PARAMETER, GET_PARAMETER) \
XX(42, SET_PARAMETER, SET_PARAMETER) \
XX(43, REDIRECT, REDIRECT) \
XX(44, RECORD, RECORD) \
XX(45, FLUSH, FLUSH) \
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* LLLLHTTP_C_HEADERS_ */
#ifndef INCLUDE_LLHTTP_API_H_
#define INCLUDE_LLHTTP_API_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <stddef.h>
#if defined(__wasm__)
#define LLHTTP_EXPORT __attribute__((visibility("default")))
#else
#define LLHTTP_EXPORT
#endif
typedef llhttp__internal_t llhttp_t;
typedef struct llhttp_settings_s llhttp_settings_t;
typedef int (*llhttp_data_cb)(llhttp_t*, const char *at, size_t length);
typedef int (*llhttp_cb)(llhttp_t*);
struct llhttp_settings_s {
/* Possible return values 0, -1, `HPE_PAUSED` */
llhttp_cb on_message_begin;
llhttp_data_cb on_url;
llhttp_data_cb on_status;
llhttp_data_cb on_header_field;
llhttp_data_cb on_header_value;
/* Possible return values:
* 0 - Proceed normally
* 1 - Assume that request/response has no body, and proceed to parsing the
* next message
* 2 - Assume absence of body (as above) and make `llhttp_execute()` return
* `HPE_PAUSED_UPGRADE`
* -1 - Error
* `HPE_PAUSED`
*/
llhttp_cb on_headers_complete;
llhttp_data_cb on_body;
/* Possible return values 0, -1, `HPE_PAUSED` */
llhttp_cb on_message_complete;
/* When on_chunk_header is called, the current chunk length is stored
* in parser->content_length.
* Possible return values 0, -1, `HPE_PAUSED`
*/
llhttp_cb on_chunk_header;
llhttp_cb on_chunk_complete;
llhttp_cb on_url_complete;
llhttp_cb on_status_complete;
llhttp_cb on_header_field_complete;
llhttp_cb on_header_value_complete;
};
/* Initialize the parser with specific type and user settings.
*
* NOTE: lifetime of `settings` has to be at least the same as the lifetime of
* the `parser` here. In practice, `settings` has to be either a static
* variable or be allocated with `malloc`, `new`, etc.
*/
LLHTTP_EXPORT
void llhttp_init(llhttp_t* parser, llhttp_type_t type,
const llhttp_settings_t* settings);
#if defined(__wasm__)
LLHTTP_EXPORT
llhttp_t* llhttp_alloc(llhttp_type_t type);
LLHTTP_EXPORT
void llhttp_free(llhttp_t* parser);
LLHTTP_EXPORT
uint8_t llhttp_get_type(llhttp_t* parser);
LLHTTP_EXPORT
uint8_t llhttp_get_http_major(llhttp_t* parser);
LLHTTP_EXPORT
uint8_t llhttp_get_http_minor(llhttp_t* parser);
LLHTTP_EXPORT
uint8_t llhttp_get_method(llhttp_t* parser);
LLHTTP_EXPORT
int llhttp_get_status_code(llhttp_t* parser);
LLHTTP_EXPORT
uint8_t llhttp_get_upgrade(llhttp_t* parser);
#endif // defined(__wasm__)
/* Reset an already initialized parser back to the start state, preserving the
* existing parser type, callback settings, user data, and lenient flags.
*/
LLHTTP_EXPORT
void llhttp_reset(llhttp_t* parser);
/* Initialize the settings object */
LLHTTP_EXPORT
void llhttp_settings_init(llhttp_settings_t* settings);
/* Parse full or partial request/response, invoking user callbacks along the
* way.
*
* If any of `llhttp_data_cb` returns errno not equal to `HPE_OK` - the parsing
* interrupts, and such errno is returned from `llhttp_execute()`. If
* `HPE_PAUSED` was used as a errno, the execution can be resumed with
* `llhttp_resume()` call.
*
* In a special case of CONNECT/Upgrade request/response `HPE_PAUSED_UPGRADE`
* is returned after fully parsing the request/response. If the user wishes to
* continue parsing, they need to invoke `llhttp_resume_after_upgrade()`.
*
* NOTE: if this function ever returns a non-pause type error, it will continue
* to return the same error upon each successive call up until `llhttp_init()`
* is called.
*/
LLHTTP_EXPORT
llhttp_errno_t llhttp_execute(llhttp_t* parser, const char* data, size_t len);
/* This method should be called when the other side has no further bytes to
* send (e.g. shutdown of readable side of the TCP connection.)
*
* Requests without `Content-Length` and other messages might require treating
* all incoming bytes as the part of the body, up to the last byte of the
* connection. This method will invoke `on_message_complete()` callback if the
* request was terminated safely. Otherwise a error code would be returned.
*/
LLHTTP_EXPORT
llhttp_errno_t llhttp_finish(llhttp_t* parser);
/* Returns `1` if the incoming message is parsed until the last byte, and has
* to be completed by calling `llhttp_finish()` on EOF
*/
LLHTTP_EXPORT
int llhttp_message_needs_eof(const llhttp_t* parser);
/* Returns `1` if there might be any other messages following the last that was
* successfully parsed.
*/
LLHTTP_EXPORT
int llhttp_should_keep_alive(const llhttp_t* parser);
/* Make further calls of `llhttp_execute()` return `HPE_PAUSED` and set
* appropriate error reason.
*
* Important: do not call this from user callbacks! User callbacks must return
* `HPE_PAUSED` if pausing is required.
*/
LLHTTP_EXPORT
void llhttp_pause(llhttp_t* parser);
/* Might be called to resume the execution after the pause in user's callback.
* See `llhttp_execute()` above for details.
*
* Call this only if `llhttp_execute()` returns `HPE_PAUSED`.
*/
LLHTTP_EXPORT
void llhttp_resume(llhttp_t* parser);
/* Might be called to resume the execution after the pause in user's callback.
* See `llhttp_execute()` above for details.
*
* Call this only if `llhttp_execute()` returns `HPE_PAUSED_UPGRADE`
*/
LLHTTP_EXPORT
void llhttp_resume_after_upgrade(llhttp_t* parser);
/* Returns the latest return error */
LLHTTP_EXPORT
llhttp_errno_t llhttp_get_errno(const llhttp_t* parser);
/* Returns the verbal explanation of the latest returned error.
*
* Note: User callback should set error reason when returning the error. See
* `llhttp_set_error_reason()` for details.
*/
LLHTTP_EXPORT
const char* llhttp_get_error_reason(const llhttp_t* parser);
/* Assign verbal description to the returned error. Must be called in user
* callbacks right before returning the errno.
*
* Note: `HPE_USER` error code might be useful in user callbacks.
*/
LLHTTP_EXPORT
void llhttp_set_error_reason(llhttp_t* parser, const char* reason);
/* Returns the pointer to the last parsed byte before the returned error. The
* pointer is relative to the `data` argument of `llhttp_execute()`.
*
* Note: this method might be useful for counting the number of parsed bytes.
*/
LLHTTP_EXPORT
const char* llhttp_get_error_pos(const llhttp_t* parser);
/* Returns textual name of error code */
LLHTTP_EXPORT
const char* llhttp_errno_name(llhttp_errno_t err);
/* Returns textual name of HTTP method */
LLHTTP_EXPORT
const char* llhttp_method_name(llhttp_method_t method);
/* Enables/disables lenient header value parsing (disabled by default).
*
* Lenient parsing disables header value token checks, extending llhttp's
* protocol support to highly non-compliant clients/server. No
* `HPE_INVALID_HEADER_TOKEN` will be raised for incorrect header values when
* lenient parsing is "on".
*
* **(USE AT YOUR OWN RISK)**
*/
LLHTTP_EXPORT
void llhttp_set_lenient_headers(llhttp_t* parser, int enabled);
/* Enables/disables lenient handling of conflicting `Transfer-Encoding` and
* `Content-Length` headers (disabled by default).
*
* Normally `llhttp` would error when `Transfer-Encoding` is present in
* conjunction with `Content-Length`. This error is important to prevent HTTP
* request smuggling, but may be less desirable for small number of cases
* involving legacy servers.
*
* **(USE AT YOUR OWN RISK)**
*/
LLHTTP_EXPORT
void llhttp_set_lenient_chunked_length(llhttp_t* parser, int enabled);
/* Enables/disables lenient handling of `Connection: close` and HTTP/1.0
* requests responses.
*
* Normally `llhttp` would error on (in strict mode) or discard (in loose mode)
* the HTTP request/response after the request/response with `Connection: close`
* and `Content-Length`. This is important to prevent cache poisoning attacks,
* but might interact badly with outdated and insecure clients. With this flag
* the extra request/response will be parsed normally.
*
* **(USE AT YOUR OWN RISK)**
*/
void llhttp_set_lenient_keep_alive(llhttp_t* parser, int enabled);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* INCLUDE_LLHTTP_API_H_ */
#endif /* INCLUDE_LLHTTP_H_ */

View File

@@ -43,17 +43,13 @@
xmrig::App::App(Process *process)
{
m_controller = new Controller(process);
m_controller = std::make_shared<Controller>(process);
}
xmrig::App::~App()
{
Cpu::release();
delete m_signals;
delete m_console;
delete m_controller;
}
@@ -65,7 +61,7 @@ int xmrig::App::exec()
return 2;
}
m_signals = new Signals(this);
m_signals = std::make_shared<Signals>(this);
int rc = 0;
if (background(rc)) {
@@ -78,10 +74,10 @@ int xmrig::App::exec()
}
if (!m_controller->isBackground()) {
m_console = new Console(this);
m_console = std::make_shared<Console>(this);
}
Summary::print(m_controller);
Summary::print(m_controller.get());
if (m_controller->config()->isDryRun()) {
LOG_NOTICE("%s " WHITE_BOLD("OK"), Tags::config());
@@ -115,32 +111,20 @@ void xmrig::App::onSignal(int signum)
switch (signum)
{
case SIGHUP:
LOG_WARN("%s " YELLOW("SIGHUP received, exiting"), Tags::signal());
break;
case SIGTERM:
LOG_WARN("%s " YELLOW("SIGTERM received, exiting"), Tags::signal());
break;
case SIGINT:
LOG_WARN("%s " YELLOW("SIGINT received, exiting"), Tags::signal());
break;
return close();
default:
return;
break;
}
close();
}
void xmrig::App::close()
{
m_signals->stop();
if (m_console) {
m_console->stop();
}
m_signals.reset();
m_console.reset();
m_controller->stop();

View File

@@ -32,6 +32,9 @@
#include "base/tools/Object.h"
#include <memory>
namespace xmrig {
@@ -60,9 +63,9 @@ private:
bool background(int &rc);
void close();
Console *m_console = nullptr;
Controller *m_controller = nullptr;
Signals *m_signals = nullptr;
std::shared_ptr<Console> m_console;
std::shared_ptr<Controller> m_controller;
std::shared_ptr<Signals> m_signals;
};

View File

@@ -5,8 +5,8 @@
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright 2018-2020 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2020 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -36,8 +36,6 @@
bool xmrig::App::background(int &rc)
{
signal(SIGPIPE, SIG_IGN);
if (!m_controller->isBackground()) {
return false;
}

View File

@@ -5,8 +5,8 @@
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <support@xmrig.com>
* Copyright 2018-2021 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2021 XMRig <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -39,6 +39,11 @@
#include "version.h"
#ifdef XMRIG_FEATURE_DMI
# include "hw/dmi/DmiReader.h"
#endif
#ifdef XMRIG_ALGO_RANDOMX
# include "crypto/rx/RxConfig.h"
#endif
@@ -47,6 +52,13 @@
namespace xmrig {
#ifdef XMRIG_OS_WIN
static constexpr const char *kHugepagesSupported = GREEN_BOLD("permission granted");
#else
static constexpr const char *kHugepagesSupported = GREEN_BOLD("supported");
#endif
#ifdef XMRIG_FEATURE_ASM
static const char *coloredAsmNames[] = {
RED_BOLD("none"),
@@ -64,19 +76,15 @@ inline static const char *asmName(Assembly::Id assembly)
#endif
static void print_memory(Config *config)
static void print_pages(const Config *config)
{
# ifdef XMRIG_OS_WIN
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") "%s",
"HUGE PAGES", config->cpu().isHugePages() ? (VirtualMemory::isHugepagesAvailable() ? GREEN_BOLD("permission granted") : RED_BOLD("unavailable")) : RED_BOLD("disabled"));
# else
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") "%s", "HUGE PAGES", config->cpu().isHugePages() ? GREEN_BOLD("supported") : RED_BOLD("disabled"));
# endif
"HUGE PAGES", config->cpu().isHugePages() ? (VirtualMemory::isHugepagesAvailable() ? kHugepagesSupported : RED_BOLD("unavailable")) : RED_BOLD("disabled"));
# ifdef XMRIG_ALGO_RANDOMX
# ifdef XMRIG_OS_LINUX
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") "%s",
"1GB PAGES", (VirtualMemory::isOneGbPagesAvailable() ? (config->rx().isOneGbPages() ? GREEN_BOLD("supported") : YELLOW_BOLD("disabled")) : YELLOW_BOLD("unavailable")));
"1GB PAGES", (VirtualMemory::isOneGbPagesAvailable() ? (config->rx().isOneGbPages() ? kHugepagesSupported : YELLOW_BOLD("disabled")) : YELLOW_BOLD("unavailable")));
# else
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") "%s", "1GB PAGES", YELLOW_BOLD("unavailable"));
# endif
@@ -84,42 +92,36 @@ static void print_memory(Config *config)
}
static void print_cpu(Config *)
static void print_cpu(const Config *)
{
const auto info = Cpu::info();
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%s (%zu)") " %sx64 %sAES",
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%s (%zu)") " %s %sAES%s",
"CPU",
info->brand(),
info->packages(),
info->isX64() ? GREEN_BOLD_S : RED_BOLD_S "-",
info->hasAES() ? GREEN_BOLD_S : RED_BOLD_S "-"
ICpuInfo::is64bit() ? GREEN_BOLD("64-bit") : RED_BOLD("32-bit"),
info->hasAES() ? GREEN_BOLD_S : RED_BOLD_S "-",
info->isVM() ? RED_BOLD_S " VM" : ""
);
# if defined(XMRIG_FEATURE_LIBCPUID) || defined (XMRIG_FEATURE_HWLOC)
# if defined(XMRIG_FEATURE_HWLOC)
Log::print(WHITE_BOLD(" %-13s") BLACK_BOLD("L2:") WHITE_BOLD("%.1f MB") BLACK_BOLD(" L3:") WHITE_BOLD("%.1f MB")
CYAN_BOLD(" %zu") "C" BLACK_BOLD("/") CYAN_BOLD("%zu") "T"
# ifdef XMRIG_FEATURE_HWLOC
BLACK_BOLD(" NUMA:") CYAN_BOLD("%zu")
# endif
, "",
BLACK_BOLD(" NUMA:") CYAN_BOLD("%zu"),
"",
info->L2() / 1048576.0,
info->L3() / 1048576.0,
info->cores(),
info->threads()
# ifdef XMRIG_FEATURE_HWLOC
, info->nodes()
# endif
info->threads(),
info->nodes()
);
# else
Log::print(WHITE_BOLD(" %-13s") BLACK_BOLD("threads:") CYAN_BOLD("%zu"),
"",
info->threads()
);
Log::print(WHITE_BOLD(" %-13s") BLACK_BOLD("threads:") CYAN_BOLD("%zu"), "", info->threads());
# endif
}
static void print_memory()
static void print_memory(const Config *config)
{
constexpr size_t oneGiB = 1024U * 1024U * 1024U;
const auto freeMem = static_cast<double>(uv_get_free_memory());
@@ -127,16 +129,49 @@ static void print_memory()
const double percent = freeMem > 0 ? ((totalMem - freeMem) / totalMem * 100.0) : 100.0;
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") CYAN_BOLD("%.1f/%.1f GB") BLACK_BOLD(" (%.0f%%)"),
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") CYAN_BOLD("%.1f/%.1f") CYAN(" GB") BLACK_BOLD(" (%.0f%%)"),
"MEMORY",
(totalMem - freeMem) / oneGiB,
totalMem / oneGiB,
percent
);
# ifdef XMRIG_FEATURE_DMI
if (!config->isDMI()) {
return;
}
DmiReader reader;
if (!reader.read()) {
return;
}
const bool printEmpty = reader.memory().size() <= 8;
for (const auto &memory : reader.memory()) {
if (!memory.isValid()) {
continue;
}
if (memory.size()) {
Log::print(WHITE_BOLD(" %-13s") "%s: " CYAN_BOLD("%" PRIu64) CYAN(" GB ") WHITE_BOLD("%s @ %" PRIu64 " MHz ") BLACK_BOLD("%s"),
"", memory.id().data(), memory.size() / oneGiB, memory.type(), memory.speed() / 1000000ULL, memory.product().data());
}
else if (printEmpty) {
Log::print(WHITE_BOLD(" %-13s") "%s: " BLACK_BOLD("<empty>"), "", memory.slot().data());
}
}
const auto &board = Cpu::info()->isVM() ? reader.system() : reader.board();
if (board.isValid()) {
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") WHITE_BOLD("%s") " - " WHITE_BOLD("%s"), "MOTHERBOARD", board.vendor().data(), board.product().data());
}
# endif
}
static void print_threads(Config *config)
static void print_threads(const Config *config)
{
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") WHITE_BOLD("%s%d%%"),
"DONATE",
@@ -160,11 +195,11 @@ static void print_threads(Config *config)
static void print_commands(Config *)
{
if (Log::isColors()) {
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("COMMANDS ") MAGENTA_BG(WHITE_BOLD_S "h") WHITE_BOLD("ashrate, ")
MAGENTA_BG(WHITE_BOLD_S "p") WHITE_BOLD("ause, ")
MAGENTA_BG(WHITE_BOLD_S "r") WHITE_BOLD("esume, ")
Log::print(GREEN_BOLD(" * ") WHITE_BOLD("COMMANDS ") MAGENTA_BG_BOLD("h") WHITE_BOLD("ashrate, ")
MAGENTA_BG_BOLD("p") WHITE_BOLD("ause, ")
MAGENTA_BG_BOLD("r") WHITE_BOLD("esume, ")
WHITE_BOLD("re") MAGENTA_BG(WHITE_BOLD_S "s") WHITE_BOLD("ults, ")
MAGENTA_BG(WHITE_BOLD_S "c") WHITE_BOLD("onnection")
MAGENTA_BG_BOLD("c") WHITE_BOLD("onnection")
);
}
else {
@@ -178,14 +213,16 @@ static void print_commands(Config *)
void xmrig::Summary::print(Controller *controller)
{
controller->config()->printVersions();
print_memory(controller->config());
print_cpu(controller->config());
print_memory();
print_threads(controller->config());
controller->config()->pools().print();
const auto config = controller->config();
print_commands(controller->config());
config->printVersions();
print_pages(config);
print_cpu(config);
print_memory(config);
print_threads(config);
config->pools().print();
print_commands(config);
}

View File

@@ -5,8 +5,8 @@
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright 2018-2021 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2021 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by

View File

@@ -1,7 +1,7 @@
include (src/backend/cpu/cpu.cmake)
include (src/backend/opencl/opencl.cmake)
include (src/backend/cuda/cuda.cmake)
include (src/backend/common/common.cmake)
include(src/backend/cpu/cpu.cmake)
include(src/backend/opencl/opencl.cmake)
include(src/backend/cuda/cuda.cmake)
include(src/backend/common/common.cmake)
set(HEADERS_BACKEND

View File

@@ -1,244 +0,0 @@
/* XMRig
* Copyright (c) 2018-2020 SChernykh <https://github.com/SChernykh>
* Copyright (c) 2016-2020 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "backend/common/Benchmark.h"
#include "3rdparty/fmt/core.h"
#include "backend/common/interfaces/IBackend.h"
#include "backend/common/interfaces/IWorker.h"
#include "base/io/log/Log.h"
#include "base/io/log/Tags.h"
#include "base/net/http/Fetch.h"
#include "base/net/http/HttpData.h"
#include "base/net/http/HttpListener.h"
#include "base/net/stratum/benchmark/BenchConfig.h"
#include "base/net/stratum/Job.h"
#include "base/tools/Chrono.h"
#include <algorithm>
namespace xmrig {
static const std::map<int, std::map<uint32_t, uint64_t> > hashCheck = {
{ Algorithm::RX_0, {
{ 250000U, 0x7D6054757BB08A63ULL },
{ 500000U, 0x96607546DE1F5ECCULL },
{ 1000000U, 0x898B6E0431C28A6BULL },
{ 2000000U, 0xEE9468F8B40926BCULL },
{ 3000000U, 0xC2BC5D11724813C0ULL },
{ 4000000U, 0x3A2C7B285B87F941ULL },
{ 5000000U, 0x3B5BD2C3A16B450EULL },
{ 6000000U, 0x5CD0602F20C5C7C4ULL },
{ 7000000U, 0x101DE939474B6812ULL },
{ 8000000U, 0x52B765A1B156C6ECULL },
{ 9000000U, 0x323935102AB6B45CULL },
{ 10000000U, 0xB5231262E2792B26ULL }
}},
{ Algorithm::RX_WOW, {
{ 250000U, 0xC7F712C9603E2603ULL },
{ 500000U, 0x21A0E5AAE6DA7D8DULL },
{ 1000000U, 0x0F3E5400B39EA96AULL },
{ 2000000U, 0x85944CCFA2752D1FULL },
{ 3000000U, 0x64AFFCAE991811BAULL },
{ 4000000U, 0x3E4D0B836D3B13BAULL },
{ 5000000U, 0xEB7417D621271166ULL },
{ 6000000U, 0x97FFE10C0949FFA5ULL },
{ 7000000U, 0x84CAC0F8879A4BA1ULL },
{ 8000000U, 0xA1B79F031DA2459FULL },
{ 9000000U, 0x9B65226DA873E65DULL },
{ 10000000U, 0x0F9E00C5A511C200ULL }
}}
};
} // namespace xmrig
xmrig::Benchmark::Benchmark(const Job &job, size_t workers, const IBackend *backend) :
m_algo(job.algorithm()),
m_backend(backend),
m_workers(workers),
m_id(job.id()),
m_token(job.benchToken()),
m_end(job.benchSize()),
m_hash(job.benchHash())
{
# ifdef XMRIG_FEATURE_HTTP
if (!m_token.isEmpty()) {
m_httpListener = std::make_shared<HttpListener>(this, Tags::bench());
}
# endif
}
bool xmrig::Benchmark::finish(uint64_t totalHashCount)
{
m_reset = true;
m_current = totalHashCount;
if (m_done < m_workers) {
return false;
}
const double dt = static_cast<double>(m_doneTime - m_startTime) / 1000.0;
uint64_t checkData = referenceHash();
const char *color = checkData ? ((m_data == checkData) ? GREEN_BOLD_S : RED_BOLD_S) : BLACK_BOLD_S;
LOG_NOTICE("%s " WHITE_BOLD("benchmark finished in ") CYAN_BOLD("%.3f seconds") WHITE_BOLD_S " hash sum = " CLEAR "%s%016" PRIX64 CLEAR, Tags::bench(), dt, color, m_data);
# ifdef XMRIG_FEATURE_HTTP
if (!m_token.isEmpty()) {
using namespace rapidjson;
Document doc(kObjectType);
auto &allocator = doc.GetAllocator();
doc.AddMember("steady_done_ts", m_doneTime, allocator);
doc.AddMember(StringRef(BenchConfig::kHash), Value(fmt::format("{:016X}", m_data).c_str(), allocator), allocator);
doc.AddMember("backend", m_backend->toJSON(doc), allocator);
send(doc);
}
else
# endif
{
printExit();
}
return true;
}
void xmrig::Benchmark::start()
{
m_startTime = Chrono::steadyMSecs();
# ifdef XMRIG_FEATURE_HTTP
if (!m_token.isEmpty()) {
using namespace rapidjson;
Document doc(kObjectType);
doc.AddMember("steady_start_ts", m_startTime, doc.GetAllocator());
send(doc);
}
# endif
}
void xmrig::Benchmark::printProgress() const
{
if (!m_startTime || !m_current) {
return;
}
const double dt = static_cast<double>(Chrono::steadyMSecs() - m_startTime) / 1000.0;
const double percent = static_cast<double>(m_current) / m_end * 100.0;
LOG_NOTICE("%s " MAGENTA_BOLD("%5.2f%% ") CYAN_BOLD("%" PRIu64) CYAN("/%" PRIu64) BLACK_BOLD(" (%.3fs)"), Tags::bench(), percent, m_current, m_end, dt);
}
void xmrig::Benchmark::tick(IWorker *worker)
{
if (m_reset) {
m_data = 0;
m_done = 0;
m_reset = false;
}
const uint64_t doneTime = worker->benchDoneTime();
if (!doneTime) {
return;
}
++m_done;
m_data ^= worker->benchData();
m_doneTime = std::max(doneTime, m_doneTime);
}
void xmrig::Benchmark::onHttpData(const HttpData &data)
{
# ifdef XMRIG_FEATURE_HTTP
rapidjson::Document doc;
try {
doc = data.json();
} catch (const std::exception &ex) {
return setError(ex.what());
}
if (data.status != 200) {
return setError(data.statusName());
}
if (m_doneTime) {
LOG_NOTICE("%s " WHITE_BOLD("benchmark submitted ") CYAN_BOLD("https://xmrig.com/benchmark/%s"), Tags::bench(), m_id.data());
printExit();
}
# endif
}
uint64_t xmrig::Benchmark::referenceHash() const
{
if (m_hash) {
return m_hash;
}
# ifdef XMRIG_FEATURE_HTTP
if (!m_token.isEmpty()) {
return 0;
}
# endif
uint64_t hash = 0;
try {
hash = hashCheck.at(m_algo).at(m_end);
} catch (const std::exception &ex) {}
return hash;
}
void xmrig::Benchmark::printExit()
{
LOG_INFO("%s " WHITE_BOLD("press ") MAGENTA_BOLD("Ctrl+C") WHITE_BOLD(" to exit"), Tags::bench());
}
#ifdef XMRIG_FEATURE_HTTP
void xmrig::Benchmark::send(const rapidjson::Value &body)
{
FetchRequest req(HTTP_PATCH, BenchConfig::kApiHost, BenchConfig::kApiPort, fmt::format("/1/benchmark/{}", m_id).c_str(), body, BenchConfig::kApiTLS, true);
req.headers.insert({ "Authorization", fmt::format("Bearer {}", m_token)});
fetch(std::move(req), m_httpListener);
}
void xmrig::Benchmark::setError(const char *message)
{
LOG_ERR("%s " RED("benchmark failed ") RED_BOLD("\"%s\""), Tags::bench(), message);
}
#endif

View File

@@ -0,0 +1,52 @@
/* XMRig
* Copyright (c) 2018-2020 SChernykh <https://github.com/SChernykh>
* Copyright (c) 2016-2020 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "backend/common/GpuWorker.h"
#include "base/tools/Chrono.h"
xmrig::GpuWorker::GpuWorker(size_t id, int64_t affinity, int priority, uint32_t deviceIndex) : Worker(id, affinity, priority),
m_deviceIndex(deviceIndex)
{
}
void xmrig::GpuWorker::storeStats()
{
// Get index which is unused now
const uint32_t index = m_index.load(std::memory_order_relaxed) ^ 1;
// Fill in the data for that index
m_hashCount[index] = m_count;
m_timestamp[index] = Chrono::steadyMSecs();
// Switch to that index
// All data will be in memory by the time it completes thanks to std::memory_order_seq_cst
m_index.fetch_xor(1, std::memory_order_seq_cst);
}
void xmrig::GpuWorker::hashrateData(uint64_t &hashCount, uint64_t &timeStamp, uint64_t &rawHashes) const
{
const uint32_t index = m_index.load(std::memory_order_relaxed);
rawHashes = m_hashrateData.interpolate(timeStamp);
hashCount = m_hashCount[index];
timeStamp = m_timestamp[index];
}

Some files were not shown because too many files have changed in this diff Show More