1
0
mirror of https://github.com/xmrig/xmrig.git synced 2025-12-06 23:52:38 -05:00

Compare commits

...

435 Commits

Author SHA1 Message Date
XMRig
a5aa2c9042 v6.21.1 2024-02-25 22:26:52 +07:00
XMRig
fa35a32eee Merge branch 'dev' 2024-02-25 22:25:41 +07:00
XMRig
7b6ce59821 Update CHANGELOG.md. 2024-02-22 03:26:41 +07:00
XMRig
33315ba2ef Merge branch 'Daviey-HTTPRebindSegFault' into dev 2024-02-12 14:51:34 +07:00
XMRig
2c9c40d623 Merge branch 'HTTPRebindSegFault' of https://github.com/Daviey/xmrig into Daviey-HTTPRebindSegFault 2024-02-12 14:50:48 +07:00
Dave Walker (Daviey)
daa6328418 Fix segfault in HTTP API rebind
Previously with HTTP API enabled on brenchmarking run, it is possible
to cause a segfault due to an issue handling the m_httpd pointer and
rebinding.

  - Initialize m_httpd to nullptr to indicate when it's not in use.
  - Safely delete m_httpd in Api's destructor to prevent use-after-free
    issues.
  - Add checks to ensure m_httpd is not nullptr before usage in start,
    stop, and tick methods.
  - Log errors for HTTP server start failures to aid in debugging.

Fixes MoneroOcean/xmrig#120

Signed-off-by: Dave Walker (Daviey) <email@daviey.com>
2024-02-11 17:52:36 +00:00
XMRig
8afd4d5f2f Cleanup. 2024-01-17 00:31:16 +07:00
xmrig
77e2f3a028 Merge pull request #3399 from SChernykh/dev
Fixed Zephyr mining (OpenCL)
2024-01-14 09:01:44 +07:00
SChernykh
206295c6cb Fixed Zephyr mining (OpenCL) 2024-01-13 20:14:08 +01:00
XMRig
07e1e77c4f Code style cleanup. 2023-12-29 21:17:19 +07:00
xmrig
50a98a4bb1 Merge pull request #3391 from moneromooo-monero/tf-dev
add support for townforge (monero fork using randomx)
2023-12-27 23:13:54 +07:00
moneromooo-monero
c50369d65d add support for townforge (monero fork using randomx) 2023-12-23 15:31:05 +00:00
XMRig
592b0c9c76 v6.21.1-dev 2023-11-23 21:19:36 +07:00
XMRig
89eab0eff2 Merge branch 'master' into dev 2023-11-23 21:18:21 +07:00
XMRig
8084ff37a5 v6.21.0 2023-11-23 20:40:58 +07:00
XMRig
7cf3db7750 Merge branch 'dev' 2023-11-23 20:40:34 +07:00
XMRig
4bda6e054d v6.21.0-dev 2023-11-23 19:51:41 +07:00
xmrig
64a0ed413b Merge pull request #3358 from SChernykh/dev
Zephyr solo mining: handle multiple outputs
2023-11-15 22:36:35 +07:00
SChernykh
0b59b7eb43 Zephyr solo mining: handle multiple outputs 2023-11-15 16:18:05 +01:00
xmrig
ae6b10b5a4 Merge pull request #3356 from SChernykh/dev
Updated pricing record size for Zephyr solo mining
2023-11-15 08:27:02 +07:00
SChernykh
705a7eac0c Updated pricing record size for Zephyr solo mining 2023-11-14 13:06:10 +01:00
xmrig
10bfffe033 Merge pull request #3348 from SChernykh/dev
Update to latest sse2neon.h
2023-10-31 11:52:38 +07:00
SChernykh
4131aa4754 Update sse2neon.h 2023-10-30 20:07:03 +01:00
xmrig
fee51b20fa Merge pull request #3346 from SChernykh/dev
ARM64 JIT: don't use `x18` register
2023-10-20 07:36:12 +07:00
SChernykh
5e66efabcf ARM64 JIT: don't use x18 register
From https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms
> The platforms reserve register x18. Don’t use this register.

This PR fixes invalid hashes when running on Apple silicon with the latest macOS SDK.
2023-10-19 17:45:15 +02:00
XMRig
08901a9a4b Merge branch 'JacksonZ03-main' into dev 2023-10-09 15:15:32 +07:00
XMRig
a19f590ee6 Merge branch 'main' of https://github.com/JacksonZ03/xmrig into JacksonZ03-main 2023-10-09 15:14:50 +07:00
Jackson Zheng
2fa754825d Update cn_main_loop.asm
Found this line to be missing. I looked through the history and seemed like the original author of the commit missed it out.
2023-10-08 23:29:52 +01:00
Jackson Zheng
f3446c0a94 Update cn_main_loop.asm
I was scanning the code and found this line to be missing. Not sure if this was a mistake or if it was intentionally left out?
2023-10-08 23:12:58 +01:00
xmrig
71209d4cd7 Merge pull request #3339 from SChernykh/dev
Added SNI option for TLS connections
2023-09-29 19:15:29 +07:00
SChernykh
0a3313cb76 Added SNI option for TLS connections
Disabled by default, add `"sni": true,` to pool config to enable it.
2023-09-29 08:33:49 +02:00
xmrig
e855723cd9 Merge pull request #3320 from SChernykh/dev
Add "built for OS/architecture/bits" to "ABOUT"
2023-08-21 19:00:14 +07:00
SChernykh
6e294bd046 Add "built for OS/architecture/bits" to "ABOUT"
To make it more clear what binary it is on some XMRig screenshot.
2023-08-21 13:49:21 +02:00
XMRig
dfe70d9ea7 Fixed huge pages availability info on Linux. 2023-08-08 17:48:44 +07:00
XMRig
2ecf10cdcb Make Platform::hasKeepalive() constexpr where always supported and code cleanup. 2023-08-06 20:26:07 +07:00
xmrig
b55ca8e547 Merge pull request #3312 from SChernykh/dev
Disable TCP keepalive before closing socket
2023-08-06 20:14:37 +07:00
SChernykh
12577df7ba Disable TCP keepalive before closing socket 2023-08-06 14:51:25 +02:00
xmrig
64f5bb467a Merge pull request #3302 from SChernykh/dev
Enabled keepalive for Windows (>= Vista)
2023-07-17 17:17:39 +07:00
SChernykh
5717e72367 Enabled keepalive for Windows (>= Vista) 2023-07-17 09:49:10 +02:00
XMRig
e7de104d88 v6.20.1-dev 2023-07-03 18:47:55 +07:00
XMRig
3b5e04b1b7 Merge branch 'master' into dev 2023-07-03 18:47:22 +07:00
XMRig
2e77faa80c v6.20.0 2023-07-03 12:42:00 +07:00
XMRig
6e63a246bf Merge branch 'dev' 2023-07-03 12:41:35 +07:00
XMRig
09abc81255 v6.20.0-dev 2023-07-03 12:37:36 +07:00
xmrig
fc698f7bcf Merge pull request #3291 from SChernykh/dev
Zephyr solo mining: fix for blocks with transactions
2023-06-24 20:22:53 +07:00
SChernykh
cb2f8fd453 Zephyr solo mining: fix for blocks with transactions 2023-06-24 15:15:37 +02:00
xmrig
59c6c42ceb Merge pull request #3290 from SChernykh/dev
Zephyr coin support
2023-06-24 19:53:54 +07:00
SChernykh
6c10cc5a4b Zephyr coin support
Solo mining will require `--coin Zephyr` in command line, or `"coin": "Zephyr",` in `pools` section of config.json
2023-06-24 14:37:20 +02:00
xmrig
d5a8f8a5ae Merge pull request #3288 from SChernykh/dev
KawPow: fixed data race when building programs
2023-06-19 17:40:24 +07:00
SChernykh
d94d052e6c KawPow: fixed data race when building programs
`uv_queue_work` can't be called from other threads, only `uv_async_send` is thread-safe.
2023-06-19 12:32:28 +02:00
XMRig
ae2b7e3348 Merge branch 'Spudz76-dev-addApiRebind' into dev 2023-06-07 20:49:34 +07:00
XMRig
7d7f30701f Code cleanup. 2023-06-07 20:48:56 +07:00
XMRig
e80fc25789 Merge branch 'dev-addApiRebind' of https://github.com/Spudz76/xmrig into Spudz76-dev-addApiRebind 2023-06-07 20:12:58 +07:00
XMRig
ff53be5f3b Merge branch 'benthetechguy-readme' into dev 2023-06-07 00:52:37 +07:00
XMRig
6981e68ae3 Merge branch 'readme' of https://github.com/benthetechguy/xmrig into benthetechguy-readme 2023-06-07 00:52:03 +07:00
XMRig
c7e541d84f Disallow direct use of HwlocCpuInfo class. 2023-06-07 00:32:09 +07:00
XMRig
a2ae17b4c4 Code cleanup. 2023-06-06 23:15:58 +07:00
XMRig
554b60966b Fixed compatibility with hwloc 1.11. 2023-06-06 02:30:10 +07:00
xmrig
0378aa8df4 Merge pull request #3236 from MrFoxPro/dev
fix(cuda): receive CUDA loader error on linux too.
2023-06-05 23:07:38 +07:00
XMRig
6dbd46a891 Added new CMake options ARM_V8 and ARM_V7. 2023-06-04 20:32:05 +07:00
XMRig
055db83142 Added new ARM CPU names. 2023-06-04 19:36:53 +07:00
XMRig
cdd5dff337 v6.19.4-dev 2023-06-03 21:14:26 +07:00
XMRig
bc5fe8f456 Merge branch 'master' into dev 2023-06-03 21:13:51 +07:00
XMRig
0bc87345c4 v6.19.3 2023-06-03 19:59:18 +07:00
XMRig
f17d31e61a Merge branch 'dev' 2023-06-03 19:57:36 +07:00
xmrig
e6bf4c0077 Update CHANGELOG.md 2023-06-02 22:12:18 +07:00
xmrig
ff79b8fce4 Merge pull request #3280 from SChernykh/dev
Updated example scripts
2023-06-02 17:47:13 +07:00
SChernykh
af87369e4f Updated example scripts
- Hashvault is top 1 pool now, so changed it to a smaller pool
- node.xmr.to doesn't exist anymore
2023-06-02 09:34:26 +02:00
xmrig
65fc16d5ac Merge pull request #3275 from SChernykh/dev
RandomX: fixed `jccErratum` list
2023-05-26 18:25:57 +07:00
SChernykh
826e23b4c4 Fixed jccErratum list 2023-05-26 12:46:59 +02:00
Tony Butler
548fbb9f71 Add API rebind polling 2023-05-23 16:49:43 -06:00
xmrig
02d45834e1 Merge pull request #3273 from SChernykh/dev
RandomX: fixed undefined behavior
2023-05-23 20:18:32 +07:00
SChernykh
1252a4710e RandomX: fixed undefined behavior
Using an inactive member of a `union` is an undefined behavior in C++
2023-05-23 14:40:12 +02:00
xmrig
5891f1f06b Merge pull request #3271 from SChernykh/opt_genprog
RandomX: optimized program generation
2023-05-22 05:25:32 +07:00
SChernykh
5dcbab7e3a RandomX: optimized program generation 2023-05-21 17:44:20 +02:00
xmrig
7b51e23aa0 Merge pull request #3254 from SChernykh/dev
Tweaked auto-tuning for Intel CPUs
2023-04-19 12:29:58 +07:00
SChernykh
7f7fc363e1 Tweaked auto-tuning for Intel CPUs
Alder Lake and newer CPUs have exclusive L3 cache and benefit from more threads until L3+L2 is filled.
2023-04-18 21:20:45 +02:00
XMRig
c4e1363148 #3245 Improved algorithm negotiation for donation rounds by sending extra information about current mining job. 2023-04-07 23:35:05 +07:00
XMRig
a2e9b3456d v6.19.3-dev 2023-04-04 00:34:54 +07:00
XMRig
4790318685 Merge branch 'master' into dev 2023-04-04 00:34:22 +07:00
XMRig
038c4fbe34 v6.19.2 2023-04-03 22:15:40 +07:00
XMRig
d65d34ef36 Merge branch 'dev' 2023-04-03 22:14:58 +07:00
xmrig
af6647f377 Update CHANGELOG.md 2023-04-03 20:34:35 +07:00
xmrig
8f9adc02c0 Merge pull request #3241 from SChernykh/dev
Sync with changes from proxy
2023-04-03 20:28:38 +07:00
SChernykh
5e0079f012 Sync with changes from proxy 2023-04-03 15:01:40 +02:00
xmrig
dc5e341778 Merge pull request #3240 from koitsu/dev-improve-cmd-files
Improve .cmd files when run by shortcuts on another drive
2023-04-01 12:30:33 +07:00
Jeremy Chadwick
0f81ab4c67 Improve .cmd files when run by shortcuts on another drive 2023-03-31 20:16:00 -07:00
Dmitriy Nikiforov
62a3a98e7d fix(cuda): receive CUDA loader error on linux too. 2023-03-27 18:48:13 +05:00
XMRig
d31b3b7c76 Code style cleanup. 2023-03-25 20:56:25 +07:00
xmrig
e352109431 Merge pull request #3232 from moneromooo-monero/xhd-dev
DaemonClient: new X-Hash-Difficulty HTTP header optimization
2023-03-25 20:51:09 +07:00
moneromooo-monero
88b0385bfe DaemonClient: new X-Hash-Difficulty HTTP header optimization
If the caller knows the difficulty of a PoW hash a given nonce
yields, it can tell the callee via the X-Hash-Difficulty, which
may allow the callee to skip some processing if the difficulty
does not meet some criterion.

In my case, a merge mining proxy can know it's pointless trying
to submit the nonce to a chain with higher difficulty when the
nonce only meets the difficulty for a lower difficulty chain.
2023-03-25 09:48:54 +00:00
xmrig
9508332258 Merge pull request #3230 from SChernykh/dev
Fixed parsing of TX_EXTRA_MERGE_MINING_TAG
2023-03-25 12:39:04 +07:00
SChernykh
bc5c1f7e65 Fixed parsing of TX_EXTRA_MERGE_MINING_TAG 2023-03-24 22:42:26 +01:00
XMRig
22118330e3 v6.19.2-dev 2023-03-23 20:41:00 +07:00
XMRig
240f2450af Merge branch 'master' into dev 2023-03-23 20:40:23 +07:00
XMRig
6e856ca39c v6.19.1 2023-03-23 19:03:09 +07:00
XMRig
6047786f43 Merge branch 'dev' 2023-03-23 19:02:24 +07:00
xmrig
7b8ba9ac09 Update CHANGELOG.md 2023-03-23 18:10:43 +07:00
xmrig
02259fec05 Merge pull request #3228 from SChernykh/dev
Fix build with gcc 13
2023-03-23 18:02:47 +07:00
Matthew Smith
51728b2d55 Fix build with gcc 13
Now some header files are not included transistively with new
libstdc++.

Bug: https://bugs.gentoo.org/895226
2023-03-23 12:01:15 +01:00
XMRig
ebe818a5fb Resolved deprecated methods warnings with OpenSSL 3.0. 2023-03-07 23:51:03 +07:00
xmrig
790a71b030 Merge pull request #3218 from SChernykh/dev
Fix: `--randomx-wrmsr=-1` worked only on Intel
2023-02-27 11:17:01 +07:00
SChernykh
c62622b114 Fix: --randomx-wrmsr=-1 worked only on Intel 2023-02-26 22:31:55 +01:00
xmrig
fc643e2936 Merge pull request #3213 from SChernykh/dev
Fix for 32-bit clang 15
2023-02-19 15:47:28 +07:00
SChernykh
12b9b62ef7 Fix for 32-bit clang 15
Don't define `_mm_cvtsi128_si64` and `_mm_cvtsi64_si128` because clang 15 already has them in its headers.
2023-02-19 09:42:16 +01:00
XMRig
667f636c62 Fixed DnsUvBackend storage cleanup. 2023-02-09 21:45:50 +07:00
XMRig
81e87a6931 Revert changes to fix MSVC build. 2023-02-09 21:28:39 +07:00
XMRig
540b223eab Cleanup. 2023-02-09 13:55:11 +07:00
XMRig
75474be060 Fix warning. 2023-02-03 23:46:58 +07:00
XMRig
49f34e59a6 Partially resolved deprecated methods warnings in OpenSSL 3.0. 2023-02-03 23:08:54 +07:00
XMRig
223add4e22 v6.19.1-dev 2023-02-02 12:27:33 +07:00
XMRig
435fc86120 Merge branch 'master' into dev 2023-02-02 12:27:08 +07:00
XMRig
c0143b90ce v6.19.0 2023-02-02 11:51:11 +07:00
XMRig
c3cdffe86d Merge branch 'dev' 2023-02-02 11:50:29 +07:00
XMRig
8a4da33bea Update scripts/build.*.sh. 2023-01-30 00:19:55 +07:00
XMRig
1c7a339527 v6.19.0-dev (new config options added). 2023-01-29 11:16:37 +07:00
xmrig
490acd6e55 Update CHANGELOG.md 2023-01-29 11:13:28 +07:00
xmrig
6ecf57959b Merge pull request #3202 from SChernykh/dev
Solo mining: added job timeout (default is 15 seconds)
2023-01-29 11:12:22 +07:00
SChernykh
e2c58126e9 Solo mining: added job timeout (default is 15 seconds)
It's important to update jobs frequently to get new transactions into the block template. See https://rucknium.me/posts/monero-pool-transaction-delay/ for more details.
2023-01-28 19:42:02 +01:00
XMRig
0ed4b35cd3 Update hwloc for MSVC builds to 2.9.0. 2023-01-27 01:07:58 +07:00
xmrig
afe2aa4402 Update CHANGELOG.md 2023-01-23 20:54:46 +07:00
XMRig
3f7533a645 Update to latest sse2neon.h. 2023-01-23 20:45:02 +07:00
xmrig
6ef0409086 Merge pull request #3198 from SChernykh/dev
Fixed broken RandomX light mode mining
2023-01-21 22:05:25 +07:00
SChernykh
64b0d9562e Fixed broken RandomX light mode mining
RandomX VMs didn't get updated properly in light mode.
2023-01-21 16:02:47 +01:00
XMRig
770b71c69a #3185 Fixed macOS DMI reader. 2023-01-19 22:09:59 +07:00
xmrig
44642643f8 Merge pull request #3196 from SChernykh/dev
Show IP address for failed connections
2023-01-11 17:02:15 +07:00
SChernykh
273bb84df8 Show IP address for failed connections 2023-01-11 09:28:16 +01:00
xmrig
4d0b8c9daf Merge pull request #3182 from SChernykh/dev
DragonflyBSD compilation fixes
2022-12-17 20:24:01 +07:00
SChernykh
7d4d48e83b DragonflyBSD compilation fixes 2022-12-17 13:11:14 +01:00
xmrig
2ea37cdf37 Merge pull request #3180 from SChernykh/dev
Added ifdefs for DragonflyBSD
2022-12-16 21:29:24 +07:00
SChernykh
a02afe6d4f Added ifdefs for DragonflyBSD
Possible fix for #3179
2022-12-16 15:26:37 +01:00
XMRig
6e86dddc65 Bump the minimum CMake version in other places too. 2022-12-09 16:07:42 +07:00
xmrig
0171faffe7 Merge pull request #3176 from SChernykh/dev
Update cmake required version to 3.1
2022-12-09 15:24:54 +07:00
SChernykh
25decd1b7f Update cmake required version to 3.1
`set(CMAKE_CXX_STANDARD 11)` only works properly starting from cmake 3.1, see #3174
2022-12-09 09:21:40 +01:00
xmrig
354b9ddb34 Merge pull request #3163 from SChernykh/dev
Improved Zen 3 MSR mod
2022-11-18 11:38:45 +07:00
SChernykh
3ad6ab56a5 Improved Zen 3 MSR mod
+0.5% speedup on Ryzen 5 5600X
2022-11-17 23:32:36 +01:00
xmrig
1aa0e37b54 Merge pull request #3161 from SChernykh/dev
MSVC build: enabled parallel compilation
2022-11-15 13:20:41 +06:30
SChernykh
807c64ddb1 MSVC build: enabled parallel compilation 2022-11-15 07:45:54 +01:00
XMRig
5bf90704a6 #2869 2022-10-29 23:51:42 +07:00
xmrig
912d1e362b Merge pull request #3144 from Spudz76/dev-updateSSE2NEON
Update to latest sse2neon.h from github:DLTcollab/sse2neon
2022-10-24 13:50:54 +07:00
Tony Butler
eeb459506c Update to latest sse2neon.h from github:DLTcollab/sse2neon 2022-10-23 15:27:14 -06:00
XMRig
f4ec0287c4 v6.18.2-dev 2022-10-23 23:19:50 +07:00
XMRig
483d6ada3d Merge branch 'master' into dev 2022-10-23 23:19:06 +07:00
XMRig
28e81bd7c0 v6.18.1 2022-10-23 17:44:24 +07:00
XMRig
54e75bc7c4 Merge branch 'dev' 2022-10-23 17:43:38 +07:00
xmrig
c388113a30 Update CHANGELOG.md 2022-10-23 17:14:57 +07:00
xmrig
36afeec225 Merge pull request #3134 from SChernykh/dev
Added Zen4 to randomx_boost.sh
2022-10-10 03:37:06 +07:00
SChernykh
4b5e56416d Added Zen4 to randomx_boost.sh 2022-10-09 22:02:50 +02:00
xmrig
0d314d0469 Merge pull request #3132 from SChernykh/dev
RandomX: added MSR mod for Zen 4
2022-10-01 23:40:04 +07:00
SChernykh
7fc45dfb2d RandomX: added MSR mod for Zen 4
+0.8% faster on Ryzen 9 7950X
2022-10-01 18:33:04 +02:00
xmrig
2ba40edee0 Update CHANGELOG.md 2022-09-25 17:01:33 +07:00
xmrig
bc4dd11761 Merge pull request #3129 from SChernykh/dev
Fix: protectRX flushed CPU cache only on MacOS/iOS
2022-09-22 07:02:28 +07:00
SChernykh
7b52a41459 Fix: protectRX flushed CPU cache only on MacOS/iOS 2022-09-21 15:18:06 +02:00
xmrig
b5de214ff9 Merge pull request #3126 from SChernykh/dev
Don't reset when pool sends the same job blob
2022-09-19 19:03:17 +07:00
SChernykh
8bd3b393ef Update m_size only if blob was set successfully 2022-09-19 10:42:08 +02:00
SChernykh
9223c2f027 Don't reset when pool sends the same job blob 2022-09-19 10:35:36 +02:00
xmrig
6346d36d1b Merge pull request #3120 from SChernykh/dev
RandomX: optimized CFROUND elimination more
2022-09-16 22:50:51 +07:00
SChernykh
93c07e1d34 RandomX: optimized CFROUND elimination more 2022-09-16 14:11:27 +02:00
xmrig
0ba3000982 Merge pull request #3119 from SChernykh/dev
RandomX: optimized CFROUND elimination
2022-09-16 01:04:32 +07:00
SChernykh
f0e7de8c71 RandomX: optimized CFROUND elimination 2022-09-15 19:57:34 +02:00
xmrig
1c4eb6c5fe Merge pull request #3109 from SChernykh/dev
RandomX: added Blake2 AVX2 version
2022-08-26 01:52:22 +07:00
SChernykh
63e21dfe63 RandomX: added Blake2 AVX2 version
+0.1% speedup on AMD Zen2/Zen3 and Intel CPUs which support AVX2.
2022-08-25 20:39:54 +02:00
xmrig
b2d9dab2e3 Merge pull request #3075 from dev-0x7C6/master
Recognize armv7ve as valid ARMv7 target.
2022-08-19 02:30:27 +07:00
xmrig
4c57b60e59 Merge pull request #3082 from SChernykh/dev
Fixed GCC 12 warnings
2022-07-03 16:59:26 +07:00
SChernykh
e6c81d7166 Fixed GCC 12 warnings 2022-07-03 11:51:46 +02:00
xmrig
94840c70d8 Update README.md 2022-07-02 22:27:51 +07:00
XMRig
e1478bfa94 v6.18.1-dev 2022-06-26 18:32:12 +07:00
XMRig
6df6e15267 Merge branch 'master' into dev 2022-06-26 18:31:40 +07:00
XMRig
834ea44507 v6.18.0 2022-06-23 20:04:00 +07:00
XMRig
73dc0ffb7e Merge branch 'dev' 2022-06-23 20:03:29 +07:00
XMRig
e57641d6b1 v6.18.0-dev 2022-06-23 17:36:31 +07:00
XMRig
b324e34444 Update hwloc for msvc. 2022-06-23 16:45:54 +07:00
Bartłomiej Burdukiewicz
7e49fc828d Recognize armv7ve as valid ARMv7 target.
Docs: https://gcc.gnu.org/onlinedocs/gcc/ARM-Options.html

'armv7ve' - The extended version of the ARMv7-A architecture with support for virtualization.

Signed-off-by: Bartłomiej Burdukiewicz <bartlomiej.burdukiewicz@gmail.com>
2022-06-21 18:31:24 +02:00
XMRig
fdfbb60840 Update deps. 2022-06-19 13:39:32 +07:00
xmrig
ee51dec499 Merge pull request #3068 from SChernykh/dev
Better fix for daemon solo mining with ZMQ
2022-06-13 03:37:56 +07:00
SChernykh
575742078c Better fix for daemon solo mining with ZMQ 2022-06-12 22:32:50 +02:00
xmrig
6bab67bced Merge pull request #3067 from SChernykh/dev
Monero v15 network upgrade support and more house keeping
2022-06-13 01:16:34 +07:00
SChernykh
db9069897d Improved daemon ZMQ mining stability 2022-06-12 14:41:47 +02:00
SChernykh
30641b1bdf Fixed ZMQ debug log 2022-06-12 12:33:09 +02:00
SChernykh
45061f40d8 Monero v15 network upgrade support 2022-06-12 11:49:54 +02:00
SChernykh
9f70752090 Fixed debug GhostRider build 2022-06-12 11:47:56 +02:00
SChernykh
22d6a7525e Removed deprecated AstroBWTv1 and v2 2022-06-12 11:47:36 +02:00
benthetechguy
c0bce256e1 Add x86 to README 2022-05-31 21:15:37 -04:00
xmrig
09a7219651 Merge pull request #3055 from benthetechguy/patch-1
Add armv7 to README
2022-05-21 15:50:01 +07:00
benthetechguy
97869f3347 Add armv7 to supported architectures 2022-05-20 23:19:34 -04:00
xmrig
1bbbff7d17 Merge pull request #3054 from SChernykh/dev
Fixes for 32-bit ARM
2022-05-21 09:57:17 +07:00
SChernykh
97683e5719 Fixes for 32-bit ARM 2022-05-20 21:16:10 +02:00
xmrig
059d5d8421 Merge pull request #3051 from SChernykh/dev
Fixed unaligned memory read in DMI
2022-05-20 09:11:26 +07:00
SChernykh
285719cde4 Fixed unaligned memory read in DMI 2022-05-19 20:56:19 +02:00
xmrig
c877ba8145 Merge pull request #3042 from SChernykh/dev
Fixed being unable to resume from pause-on-battery
2022-05-06 02:14:52 +07:00
SChernykh
6793981066 Fixed being unable to resume from pause-on-battery
Fixes #3041
2022-05-05 21:13:02 +02:00
xmrig
1ae9a4e428 Merge pull request #3031 from SChernykh/dev
Fixed --cpu-priority not working sometimes
2022-04-20 07:44:33 +07:00
SChernykh
0e57053c5a Fixed --cpu-priority not working sometimes 2022-04-19 19:57:12 +02:00
xmrig
232d2d6dc5 Merge pull request #3020 from SChernykh/dev
Removed old AstroBWT algorithm
2022-04-15 16:03:26 +07:00
SChernykh
a3cb74f29b Removed old AstroBWT algorithm
It's not used anywhere now.
2022-04-15 10:59:31 +02:00
XMRig
56753d7c4a v6.17.1-dev 2022-04-06 01:58:24 +07:00
XMRig
f7b9e3ca67 Merge branch 'master' into dev 2022-04-06 01:58:03 +07:00
XMRig
56c95703a5 v6.17.0 2022-04-05 21:46:01 +07:00
XMRig
eadf272425 Merge branch 'dev' 2022-04-05 21:45:26 +07:00
XMRig
cb227a0a79 Merge branch 'dev' of github.com:xmrig/xmrig into dev 2022-04-05 15:15:47 +07:00
XMRig
4c171bea1e Disable donate for astrobwt/v2. 2022-04-05 15:15:03 +07:00
xmrig
e55a854314 Update CHANGELOG.md 2022-04-04 20:33:39 +07:00
XMRig
5bdfafd719 v6.17.0-dev 2022-04-04 20:17:40 +07:00
xmrig
15a2091837 Merge pull request #2991 from SChernykh/dev
Fixed compilation error
2022-03-24 22:46:33 +07:00
SChernykh
48bd09f730 Fixed compilation error 2022-03-24 16:38:47 +01:00
xmrig
21fb970949 Merge pull request #2990 from SChernykh/dev
Optimized keccak
2022-03-24 22:22:21 +07:00
SChernykh
23c12fc351 Optimized keccak
Big astrobwt/v2 speedup on non-AVX2 CPUs: **Core i7-2600 +64% (17 -> 28 kh/s)**
2022-03-24 13:10:03 +01:00
xmrig
71d193676a Merge pull request #2974 from SChernykh/dev
Fixed AstroBWT OpenCL config generation
2022-03-16 16:19:08 +07:00
SChernykh
baef34ba8c Fixed AstroBWT OpenCL config generation 2022-03-16 10:15:38 +01:00
xmrig
95a739d821 Merge pull request #2969 from SChernykh/dev
Dero HE (astrobwt/v2) OpenCL support
2022-03-15 08:51:46 +07:00
SChernykh
7b9135aadc Dero HE (astrobwt/v2) OpenCL support 2022-03-14 20:13:31 +01:00
xmrig
e6f694ca9e Merge pull request #2958 from SChernykh/dev
Fixed out of bounds access in astrobwt/v2
2022-03-10 06:40:34 +07:00
xmrig
afd79e7537 Merge pull request #2961 from SChernykh/derohe_cuda
Dero HE (astrobwt/v2) CUDA config generator
2022-03-10 06:37:30 +07:00
SChernykh
a2728af4f7 Dero HE (astrobwt/v2) CUDA config generator 2022-03-10 00:24:49 +01:00
SChernykh
65dbded9c4 Fixed out of bounds access in astrobwt/v2 2022-03-08 22:31:34 +01:00
XMRig
f25e65b5ac Update hwloc for MSVC builds. 2022-03-07 04:29:13 +07:00
XMRig
bbb19ea2f9 #2941 Update deps scripts. 2022-03-07 00:27:49 +07:00
xmrig
1c5b332add Merge pull request #2954 from SChernykh/dev
Dero HE fork support (astrobwt/v2 algorithm)
2022-03-06 04:18:43 +07:00
SChernykh
87fd0ea94a Added alternative algo names for Dero HE 2022-03-05 13:32:16 +01:00
SChernykh
4a42dca2cb Show block/miniblock counters more often 2022-03-05 10:00:39 +01:00
SChernykh
b674fafa0f DaemonClient: fixed broken coin setting 2022-03-05 09:56:30 +01:00
SChernykh
b5da73389f Dero HE fork support (astrobwt/v2 algorithm) 2022-03-05 00:31:18 +01:00
XMRig
bf5e38545c Fixed displayed DMI memory information for empty slots. 2022-03-01 02:50:30 +07:00
xmrig
f7543ada60 Merge pull request #2932 from SChernykh/dev
Fixed GhostRider with hwloc disabled
2022-02-16 19:23:26 +07:00
SChernykh
95e1705fc8 Fixed GhostRider with hwloc disabled 2022-02-16 08:13:48 +01:00
XMRig
2d0b07afbc v6.16.5-dev 2022-02-05 16:22:55 +07:00
XMRig
b33ccf0e0b Merge branch 'master' into dev 2022-02-05 16:21:19 +07:00
XMRig
4f5f9bdffb v6.16.4 2022-02-04 16:11:37 +07:00
XMRig
4d3e3daa6a Merge branch 'dev' 2022-02-04 16:10:58 +07:00
xmrig
802029e5f5 Update CHANGELOG.md 2022-02-04 15:14:46 +07:00
XMRig
14117e9658 #2910 Fixed donation for GhostRider/RTM. 2022-01-31 14:29:41 +07:00
xmrig
7ccb1d65f0 Merge pull request #2908 from Spudz76/dev-addMSVC2022
Add MSVC/2022 to version.h
2022-01-31 10:14:47 +07:00
Tony Butler
15de3cc16c Add MSVC/2022 to version.h 2022-01-28 21:09:24 -07:00
xmrig
124daa4afd Merge pull request #2898 from SChernykh/armv7
Fixed armv7 compilation
2022-01-26 23:38:45 +07:00
xmrig
5de1609b7d Merge pull request #2904 from SChernykh/dev
Fixed unaligned memory accesses
2022-01-26 23:26:03 +07:00
SChernykh
644f4cc017 Fixed unaligned memory accesses 2022-01-26 17:18:18 +01:00
XMRig
41a3f97060 v6.16.4-dev 2022-01-25 23:21:54 +07:00
XMRig
452080cfbd Merge branch 'master' into dev 2022-01-25 23:21:21 +07:00
XMRig
4f103b6b45 v6.16.3 2022-01-25 21:53:47 +07:00
XMRig
39609c9183 Merge branch 'dev' 2022-01-25 21:53:19 +07:00
xmrig
2adb7b2b74 Update CHANGELOG.md 2022-01-25 20:57:06 +07:00
SChernykh
3673137df6 Fixed armv7 compilation
Fix for error `Unsupported target. Must be either ARMv7-A+NEON or ARMv8-A.`
2022-01-25 12:37:41 +01:00
xmrig
faa7095865 Merge pull request #2893 from SChernykh/dev
KawPow OpenCL: use separate UV loop for building programs
2022-01-24 19:30:24 +07:00
SChernykh
e0701f9dad KawPow OpenCL: build next period only when it's not in cache 2022-01-24 13:28:58 +01:00
SChernykh
14aacf8636 KawPow OpenCL: use separate UV loop for building programs
Fixes #2890: uv_default_loop() can't be used there because UV loops are not thread safe.
2022-01-24 13:20:04 +01:00
xmrig
c764441337 Update CHANGELOG.md 2022-01-22 00:05:54 +07:00
xmrig
05fae12a63 Merge pull request #2882 from benthetechguy/armv7-gcc
armv7 compilation fix
2022-01-21 23:52:59 +07:00
XMRig
8059ce67f9 Add missing DNS options to config example. 2022-01-21 20:17:00 +07:00
benthetechguy
10111fd7f9 armv7 compilation fix
Compilation fails for armv7 on gcc 11, and updating the version in that line fixes it.
2022-01-20 20:47:26 -05:00
xmrig
2d25bec2df Merge pull request #2873 from SChernykh/dev
Fixed GhostRider benchmark on single-core systems
2022-01-18 10:13:16 +07:00
SChernykh
cab244d468 Fixed GhostRider benchmark on single-core systems
Fixes #2871
2022-01-17 19:59:21 +01:00
xmrig
4001488888 Merge pull request #2856 from SChernykh/dev
Fix for short responses from some Raptoreum pools
2022-01-05 10:22:39 +07:00
SChernykh
9bec1521b8 Fix for short responses from some Raptoreum pools 2022-01-04 23:54:59 +01:00
xmrig
7bde3ed5f7 Merge pull request #2848 from Spudz76/dev-addClientReconnect
Add support for client.reconnect method
2021-12-30 20:44:22 +07:00
Tony Butler
2e738509bb Add support for client.reconnect method 2021-12-30 06:26:52 -07:00
xmrig
f5447088cb Merge pull request #2837 from SChernykh/dev
RandomX: don't restart mining threads when the seed changes
2021-12-26 18:03:48 +07:00
SChernykh
7f2f50a8d9 RandomX: don't restart mining threads when the seed changes
It helps to not loose huge pages when the seed changes (every 2048 blocks, ~2.8 days).
2021-12-25 13:39:15 +01:00
xmrig
5747ccfafc Merge pull request #2827 from SChernykh/dev
GhostRider: set correct priority for helper threads
2021-12-20 18:07:23 +07:00
SChernykh
93081eb1f6 GhostRidere: set correct priority for helper threads
Fixes #2825
2021-12-20 12:05:17 +01:00
xmrig
4bf65c8669 Update README.md 2021-12-19 22:26:50 +07:00
xmrig
1a6fc3a665 Merge pull request #2815 from SChernykh/dev
Fixed cn-heavy in 32-bit builds
2021-12-16 10:44:16 +07:00
SChernykh
8dede14ac8 Fixed cn-heavy in 32-bit builds 2021-12-15 21:17:25 +00:00
xmrig
20687a397e Merge pull request #2782 from SChernykh/dev
Updated GhostRider documentation
2021-12-03 22:16:33 +07:00
SChernykh
454f97fa0f Updated GhostRider documentation
Added examples for SSL port command line that don't use #1 pool.
2021-12-03 12:30:09 +01:00
xmrig
8149fc7dcb Merge pull request #2778 from SChernykh/dev
Fixed "READY threads X/X" display after algo switching
2021-12-03 15:02:58 +07:00
SChernykh
a39ab89236 Fixed "READY threads X/X" display after algo switching 2021-12-03 07:44:23 +01:00
XMRig
5b8501fb57 v6.16.3-dev 2021-12-02 22:10:57 +07:00
XMRig
039be2ab75 Merge branch 'master' into dev 2021-12-02 22:10:23 +07:00
XMRig
718c7e0fc1 v6.16.2 2021-12-02 20:55:27 +07:00
XMRig
ef7951b91d Merge branch 'dev' 2021-12-02 20:54:50 +07:00
xmrig
214b1f021b Update CHANGELOG.md 2021-12-02 20:52:53 +07:00
XMRig
81b18c0741 #2771 Fixed environment variables support in EthStratumClient. 2021-12-02 19:36:51 +07:00
xmrig
8e83f72456 Merge pull request #2772 from SChernykh/dev
Compilation fixes
2021-12-02 19:36:09 +07:00
SChernykh
c2ae625032 Compilationn fixes 2021-12-02 13:34:24 +01:00
xmrig
60566dc84c Merge pull request #2769 from SChernykh/compiler_fix
Performance fixes
2021-12-02 10:00:29 +07:00
SChernykh
4ea8fe694d GhostRider benchmark: added 20 more possible rounds 2021-12-01 20:26:41 +01:00
SChernykh
669d1ab008 Updated changelog and GhostRider readme 2021-12-01 18:14:01 +01:00
SChernykh
e87d5111a2 Compiler fix 2021-12-01 17:08:40 +01:00
xmrig
56158779de Merge pull request #2761 from SChernykh/dev
Refactored Chrono::highResolutionMSecs()
2021-11-30 19:13:24 +07:00
SChernykh
efb322df66 Refactored Chrono::highResolutionMSecs()
Improved precision
2021-11-30 08:11:09 +01:00
xmrig
e673d541c1 Merge pull request #2751 from SChernykh/dev
VAES crash fixes
2021-11-30 09:49:37 +07:00
SChernykh
a98db529fb Explicitly use QueryPerformanceCounter() on Windows 2021-11-29 21:58:24 +01:00
SChernykh
1a9eaaad8f VAES crash fixes 2021-11-29 21:05:51 +01:00
XMRig
be5fbca9b6 v6.16.2-dev 2021-11-29 21:35:42 +07:00
XMRig
2feb264375 Merge branch 'master' into dev 2021-11-29 21:35:02 +07:00
XMRig
00990f2649 v6.16.1 2021-11-29 20:43:17 +07:00
XMRig
d78713be48 Merge branch 'dev' 2021-11-29 20:42:32 +07:00
XMRig
77367abe13 Fixed Clang build. 2021-11-29 16:01:16 +07:00
xmrig
cd046f6fd0 Merge pull request #2747 from SChernykh/dev
Disable VAES in 32-bit builds
2021-11-29 15:50:17 +07:00
SChernykh
63b7ec2887 Check compiler support for VAES 2021-11-29 09:48:15 +01:00
xmrig
a1e8f1c3e5 Merge pull request #2746 from Spudz76/dev-fixVAESCompile
Fix compile for VAES support with GCC<10
2021-11-29 15:38:35 +07:00
SChernykh
6db480a1ab Disable VAES in 32-bit builds 2021-11-29 09:32:00 +01:00
Tony Butler
a7acd9de6d Fix compile for VAES support with GCC<10 2021-11-28 22:11:42 -07:00
XMRig
a64f4d1870 v6.16.1-dev 2021-11-29 09:29:24 +07:00
XMRig
9bfe59b630 Merge branch 'master' into dev 2021-11-29 09:28:43 +07:00
xmrig
1a4bf16521 Merge pull request #2740 from SChernykh/dev
Added VAES support for Cryptonight variants
2021-11-29 09:26:45 +07:00
SChernykh
a4d5d0a75a Added VAES support for Cryptonight variants 2021-11-28 20:49:54 +01:00
xmrig
c40f1f9f66 Merge pull request #2738 from SChernykh/dev
More GhostRider fixes
2021-11-28 18:19:08 +07:00
SChernykh
15e5052dd0 More GhostRider fixes
- Fixed "difficulty is not a number" when diff is high on some pools
- Fixed GhostRider compilation when WITH_KAWPOW=OFF
2021-11-28 12:11:08 +01:00
xmrig
f9f7963453 Merge pull request #2734 from Spudz76/dev-nitpickWhitespace
Slash and burn EOL whitespace everywhere
2021-11-28 10:51:45 +07:00
Tony Butler
02240eff8c Slash and burn EOL whitespace everywhere 2021-11-27 17:59:40 -07:00
xmrig
d64c963e5e Merge pull request #2729 from SChernykh/dev
GhostRider hotfixes
2021-11-27 18:31:19 +07:00
SChernykh
c6292ce9ee GhostRider hotfixes
- Added average hashrate display
- Fixed the number of threads shown at startup
- Fixed `--threads` or `-t` command line option (but `--cpu-max-threads-hint` is recommended to use)
2021-11-27 12:27:26 +01:00
XMRig
cd652e2644 v6.16.0 2021-11-26 18:57:07 +07:00
XMRig
6f5ef0fe0f Merge branch 'dev' 2021-11-26 18:51:53 +07:00
xmrig
01fa968763 Update CHANGELOG.md 2021-11-26 18:50:34 +07:00
xmrig
8e6f3ad99e Merge pull request #2719 from SChernykh/dev
Added GhostRider release notes
2021-11-25 19:21:50 +07:00
SChernykh
b1f2479ec1 Added GhostRider release notes 2021-11-25 13:19:01 +01:00
XMRig
ecceba8ecd Add GhostRider support for AutoClient. 2021-11-25 17:44:36 +07:00
xmrig
cb5f4a9c17 Merge pull request #2716 from Spudz76/dev-initGR
Only initGhostRider() when job is in the family
2021-11-25 09:09:16 +07:00
Tony Butler
3a8ebfdcb6 Only initGhostRider() when job is in the family 2021-11-24 13:04:03 -07:00
xmrig
0dcafeb571 Merge pull request #2715 from SChernykh/dev
Benchmark support for GhostRider (offline only)
2021-11-24 22:05:44 +07:00
SChernykh
a1d7ee4c6b Benchmark support for GhostRider (offline only)
Command line:
```
./xmrig --bench=250K -a gr --rotation 15
```
Where `rotation` is an integer between 0 and 19 (inclusive).
2021-11-24 15:54:09 +01:00
XMRig
03e70ba2ed v6.16.0-dev 2021-11-24 19:49:21 +07:00
xmrig
19ef8c5d65 Merge pull request #2714 from SChernykh/gh3
GhostRider: fixed invalid hashes on ARMv8
2021-11-24 19:40:48 +07:00
SChernykh
63baa9e263 GhostRider: fixed invalid hashes on ARMv8 2021-11-24 13:39:55 +01:00
xmrig
1248bd5859 Merge pull request #2713 from SChernykh/gh3
Optimized quad hash for Ryzens
2021-11-24 18:21:01 +07:00
SChernykh
5c951ddb8a Optimized quad hash for Ryzens 2021-11-24 08:16:41 +01:00
xmrig
4ab0ad928d Merge pull request #2712 from SChernykh/gh3
GhostRider algorithm (Raptoreum) support
2021-11-24 09:49:34 +07:00
SChernykh
e67eb47796 Faster quad hash for GhostRider algos (Ryzen CPUs) 2021-11-23 22:14:46 +01:00
SChernykh
a6656a8c49 Fixed broken difficulty adjustment on some Raptoreum pools 2021-11-23 18:02:58 +01:00
SChernykh
a903d0a5bd Fixed compilation error 2021-11-23 08:52:30 +01:00
SChernykh
ceaebfd877 GhostRider algorithm (Raptoreum) support 2021-11-23 08:14:01 +01:00
xmrig
5156ff11a8 Merge pull request #2684 from SChernykh/fix-183
MSR mod: fix for error 183
2021-11-11 17:50:08 +07:00
xmrig
e0143a92a8 Merge pull request #2682 from SChernykh/dev
Fix: use cn-heavy optimization only for Vermeer CPUs
2021-11-11 17:49:51 +07:00
SChernykh
f682d9a2e9 MSR mod: fix for error 183
When WinRing0 driver starts, but some other version already created "\\.\WinRing0_1_2_0", it returns error 183 ERROR_ALREADY_EXISTS - "Cannot create a file when that file already exists."
2021-11-11 10:26:38 +01:00
SChernykh
3bece0ff40 Fix: use cn-heavy optimization only for Vermeer CPUs
Fixes #2680
2021-11-11 07:57:05 +01:00
XMRig
e6c456a970 v6.15.4-dev 2021-11-02 18:26:44 +07:00
XMRig
923d1d712f Merge branch 'master' into dev 2021-11-02 18:26:12 +07:00
XMRig
ae8459bd35 v6.15.3 2021-11-01 19:59:05 +07:00
XMRig
3a7be07c62 Merge branch 'dev' 2021-11-01 19:58:30 +07:00
xmrig
e1cc0000c6 Update CHANGELOG.md 2021-11-01 12:27:10 +07:00
xmrig
1210e8e95c Merge pull request #2644 from Spudz76/dev-fixMemleaks
Patch a couple minor leaks
2021-10-25 20:33:10 +07:00
xmrig
a45fbd9cae Merge pull request #2646 from SChernykh/dev
Fix MSVC compilation error
2021-10-25 20:31:12 +07:00
Tony Butler
f6d45f7990 Fix various memory leaks 2021-10-25 04:06:49 -06:00
SChernykh
b9464f993b Fix MSVC compilation error 2021-10-25 10:26:44 +02:00
xmrig
f8f73b0cd7 Merge pull request #2641 from SChernykh/dev
AstroBWT: fixed rare incorrect hashes
2021-10-20 07:24:15 +07:00
SChernykh
df6ab2edd8 AstroBWT: fixed rare incorrect hashes 2021-10-19 19:08:56 +02:00
xmrig
8bf7600154 Merge pull request #2639 from SChernykh/dev
AstroBWT even bigger speedup (up to +35%)
2021-10-19 22:50:07 +07:00
SChernykh
a30501956f AstroBWT even bigger speedup 2021-10-19 17:37:45 +02:00
xmrig
c287a40a20 Merge pull request #2636 from SChernykh/dev
AstroBWT speedup (up to +7%)
2021-10-19 07:58:24 +07:00
SChernykh
04f50c24e2 AstroBWT speedup 2021-10-18 18:05:51 +02:00
xmrig
7627b23212 Merge pull request #2614 from Spudz76/dev-fixAppleOpenCL
OpenCL fixes for non-AMD platforms
2021-10-13 06:20:53 +07:00
XMRig
e90e7febfb Merge branch 'StriderDM-merge_mining_tag_fix' into dev 2021-10-13 05:43:27 +07:00
XMRig
733b85a132 Code cleanup. 2021-10-13 05:43:05 +07:00
XMRig
35ba786e63 Merge branch 'merge_mining_tag_fix' of https://github.com/StriderDM/xmrig into StriderDM-merge_mining_tag_fix 2021-10-13 05:33:34 +07:00
David Main
446810a837 fix: expand validation of tx_extra for merge mining tag 2021-10-12 11:17:37 +02:00
Tony Butler
c6a68c3e51 Cap max threads to 4096 with nVidia OpenCL 2021-10-11 04:17:01 -06:00
Tony Butler
ca8bef3ade Adjust API version logic 2021-10-11 04:17:01 -06:00
Tony Butler
d735caa334 Adjust definitions and replace literal 0x4038 2021-10-11 04:17:01 -06:00
Tony Butler
eb54cc0e0f Revert amd_bitalign/amd_bfe polyfills 2021-10-11 04:17:01 -06:00
Tony Butler
84c67c37cd Apply "no-static-without-amd" fixes 2021-10-11 04:17:01 -06:00
Tony Butler
b44f38a362 Attempt repair of cn/r output-array access problem 2021-10-11 04:17:01 -06:00
Tony Butler
8ed4088d0a Second try at fixing cn/r atomic_inc() call 2021-10-11 04:17:01 -06:00
Tony Butler
cdcea2a4f9 Attempt fix for cn/r on Apple-AMD 2021-10-11 04:17:01 -06:00
Tony Butler
f0d80326ec Add Ellesmere correctly (still just a Polaris alias) 2021-10-11 04:17:01 -06:00
Tony Butler
cb8fc26cbe Add every Apple AMD GPU type 2021-10-11 04:17:01 -06:00
Tony Butler
5ec5b5ed00 Possibly fix problem with clGetProgramInfo crash 2021-10-11 04:17:01 -06:00
Tony Butler
67e29c1af1 Readjust OclDevice logic and add OCL_VENDOR_APPLE 2021-10-11 04:17:01 -06:00
xmrig
4bd94a79a4 Merge pull request #2623 from Spudz76/dev-fixWithoutKawpow
Fix #2583 compiling without kawpow (string ref is nonexistent then)
2021-10-11 16:41:44 +07:00
Tony Butler
80e597d951 Fix #2583 compiling without kawpow (string ref is nonexistent then) 2021-10-11 03:31:28 -06:00
XMRig
2e269f5b8c v6.15.3-dev 2021-10-06 02:01:29 +07:00
XMRig
57b8e35903 Merge branch 'master' into dev 2021-10-06 02:00:49 +07:00
XMRig
53be5765e6 v6.15.2 2021-10-05 23:28:29 +07:00
XMRig
68741c925b Merge branch 'dev' 2021-10-05 23:28:06 +07:00
xmrig
9ce207e667 Update CHANGELOG.md 2021-10-05 22:24:58 +07:00
XMRig
07e0966517 Added "--versions" alias. 2021-10-05 21:49:03 +07:00
XMRig
a9d4c2a923 Removed uv_os_gethostname call for all OS. 2021-09-28 23:56:33 +07:00
xmrig
dc02e1feaa Merge pull request #2606 from SChernykh/dev
Fix: AstroBWT auto-config ignored max-threads-hint
2021-09-26 18:51:47 +07:00
SChernykh
7daff331dc Fix: AstroBWT auto-config ignored max-threads-hint 2021-09-26 12:22:58 +02:00
XMRig
058a2fb0f4 v6.15.2-dev 2021-09-22 19:13:07 +07:00
XMRig
4fff3b946e Merge branch 'master' into dev 2021-09-22 19:12:38 +07:00
XMRig
f7aa5e781b v6.15.1 2021-09-22 13:08:00 +07:00
XMRig
298c5cccfa Merge branch 'dev' 2021-09-22 13:05:36 +07:00
xmrig
2985571620 Update CHANGELOG.md 2021-09-21 18:59:47 +07:00
xmrig
279d29cd7f Merge pull request #2594 from SChernykh/dev
Added Windows taskbar icon colors
2021-09-20 23:07:00 +07:00
SChernykh
387320ad6d Added Windows taskbar icon colors
- Red when there's no connection to any pool
- Yellow when mining is paused
- No color during normal mining
2021-09-20 18:03:22 +02:00
XMRig
76cd83edb2 Merge branch 'Spudz76-dev-fixAsteriskProfiling' into dev 2021-09-20 20:56:11 +07:00
XMRig
7f4d667351 Remove unnecessary string. 2021-09-20 20:53:36 +07:00
Tony Butler
8027716264 Fix --threads generates "*" profile without "kawpow":false to negate it. 2021-09-20 06:49:17 -06:00
xmrig
a459dd7741 Merge pull request #2591 from Spudz76/dev-fixCompileNoRX
Fix compile warning/crash when WITH_RANDOMX=OFF
2021-09-20 10:50:00 +07:00
Tony Butler
ef6011ac12 Fix compile warning when WITH_RANDOMX=OFF 2021-09-19 18:12:46 -06:00
xmrig
6d66051d92 Merge pull request #2586 from SChernykh/dev
Fixed Windows 7 compatibility
2021-09-17 17:11:09 +07:00
SChernykh
b2cc2ef0d7 Fixed Windows 7 compatibility
Fixes #2585
2021-09-17 12:05:37 +02:00
xmrig
9805320517 Merge pull request #2582 from Spudz76/dev-fixupRXnaming
Fixup RandomX naming consistency
2021-09-17 08:03:03 +07:00
Tony Butler
582d17bb84 Fixup RandomX naming consistency 2021-09-16 08:24:37 -06:00
XMRig
9e5f5b35a6 v6.15.1-dev 2021-08-31 18:57:08 +07:00
XMRig
9a9c69ff50 Merge branch 'master' into dev 2021-08-31 18:56:31 +07:00
XMRig
5c1f3f395c v6.15.0 2021-08-31 14:42:43 +07:00
XMRig
23cefffe43 Merge branch 'dev' 2021-08-31 14:41:47 +07:00
XMRig
d048d5a639 Fixed class/struct inconsistency. 2021-08-31 03:32:36 +07:00
xmrig
9a6f773dea Update CHANGELOG.md 2021-08-29 20:19:41 +07:00
XMRig
cd7c7902a9 Fixed clang build. 2021-08-29 18:52:11 +07:00
xmrig
fd3dad920d Merge pull request #2565 from SChernykh/dev
AstroBWT: add AVX2 Salsa20 implementation
2021-08-29 15:42:00 +07:00
SChernykh
3dc192f63e AstroBWT: add AVX2 Salsa20 implementation
+4.5% speedup on Ryzen 5 5600X
2021-08-29 10:35:43 +02:00
XMRig
123c7ab140 Added support for new CUDA plugin API. 2021-08-29 14:22:19 +07:00
XMRig
838996a0fc v6.15.0-dev 2021-08-28 19:53:28 +07:00
XMRig
6e4fea34a4 #2555 Update deps. 2021-08-28 13:10:48 +07:00
XMRig
b52c289931 Increase RANDOMX_PROGRAM_MAX_SIZE 2021-08-28 12:32:57 +07:00
XMRig
4dbb5b89da Update hwloc for MSVC. 2021-08-28 12:16:41 +07:00
XMRig
84d0212e79 Merge branch 'pr2563' into dev 2021-08-28 11:54:11 +07:00
XMRig
35acb3f00b Merge branch 'GraftRandomX' of https://github.com/Stardock2018/xmrig into pr2563 2021-08-28 11:50:17 +07:00
Chris
7f2771b466 Fixed Algorithm id
Algorithm id should be 0x72151267, second and third byte encode L3 and L2 size.
0x72 = 'r'
0x15 = 1 << 0x15 (L3 size)
0x12 = 1 << 0x12 (L2 size)
0x67 = 'g'
2021-08-27 10:31:36 -06:00
Chris
5fdf5516ff Added Graft RandonX 2021-08-27 08:19:54 -06:00
XMRig
234de96784 Update rapidjson. 2021-08-27 18:51:59 +07:00
XMRig
df4532d9a1 Cleanup ARM code. 2021-08-27 12:36:08 +07:00
XMRig
c27f535768 Fixed build on Linux. 2021-08-25 18:52:54 +07:00
XMRig
c7ac314110 Code cleanup based on Clang-Tidy. 2021-08-25 18:45:15 +07:00
XMRig
3215403815 Add missing files. 2021-08-23 18:43:14 +07:00
XMRig
bea2a6cf5b Update BlockTemplate class. 2021-08-23 18:32:58 +07:00
xmrig
a28f411339 Merge pull request #2548 from xmrig/feature-auto-coin
Added automatic coin detection for daemon mining
2021-08-19 15:35:37 +07:00
XMRig
460d9c75c5 Add global wallet address parser for DaemonClient. 2021-08-18 13:36:50 +07:00
XMRig
d1033abbe5 Update Coin, BlobReader and WalletAddress. 2021-08-17 08:17:21 +07:00
XMRig
9eac9dd30a v6.14.2-dev 2021-08-15 02:12:33 +07:00
XMRig
8d7b6adf98 Merge branch 'master' into dev 2021-08-15 02:11:29 +07:00
XMRig
230ff87634 v6.14.1 2021-08-15 00:42:47 +07:00
XMRig
19adf2630a Merge branch 'dev' 2021-08-15 00:42:12 +07:00
xmrig
3de4b16117 Update CHANGELOG.md 2021-08-15 00:37:23 +07:00
XMRig
602e3a7587 Fix algorithms order. 2021-08-14 05:48:37 +07:00
XMRig
4f6ffb67c1 Cleanup. 2021-08-14 04:58:01 +07:00
xmrig
a0194ddd18 Merge pull request #2537 from SChernykh/dev
Fixed Termux build
2021-08-13 21:14:35 +07:00
SChernykh
30f7e876a2 Update CnHash.cpp 2021-08-13 16:03:15 +02:00
SChernykh
5958490c23 Fixed Termux build 2021-08-13 12:02:03 +02:00
XMRig
f92ad4423d Fix Job::getNumTransactions. 2021-08-12 22:30:47 +07:00
XMRig
e0749a82c2 Fix cn-pico name. 2021-08-12 17:52:52 +07:00
xmrig
440aa003af Merge pull request #2532 from xmrig/feature-stable-algo-id
Refactoring: Stable (persistent) algorithms IDs.
2021-08-12 02:03:39 +07:00
XMRig
9580f5395f Removed shortName. 2021-08-11 22:26:34 +07:00
XMRig
e9ae4deb91 Removed duplicate strings. 2021-08-11 22:07:43 +07:00
XMRig
aee0762424 Fix typo. 2021-08-11 16:27:36 +07:00
XMRig
e6332eff2b Implemented stable algorithm ids. 2021-08-11 03:46:34 +07:00
XMRig
d0a632f557 Optimize CnHash storage. 2021-08-10 14:54:35 +07:00
XMRig
f4cdc527b0 #2527 Fix narrowing conversion. 2021-08-10 01:40:36 +07:00
XMRig
661dc515ab namespace cleanup. 2021-08-09 23:51:07 +07:00
XMRig
6d9bafe068 v6.14.1-dev 2021-08-09 17:20:15 +07:00
XMRig
202c8aaee8 Merge branch 'master' into dev 2021-08-09 17:19:48 +07:00
510 changed files with 100642 additions and 23068 deletions

View File

@@ -1,3 +1,155 @@
# v6.21.1
- [#3391](https://github.com/xmrig/xmrig/pull/3391) Added support for townforge (monero fork using randomx).
- [#3399](https://github.com/xmrig/xmrig/pull/3399) Fixed Zephyr mining (OpenCL).
- [#3420](https://github.com/xmrig/xmrig/pull/3420) Fixed segfault in HTTP API rebind.
# v6.21.0
- [#3302](https://github.com/xmrig/xmrig/pull/3302) [#3312](https://github.com/xmrig/xmrig/pull/3312) Enabled keepalive for Windows (>= Vista).
- [#3320](https://github.com/xmrig/xmrig/pull/3320) Added "built for OS/architecture/bits" to "ABOUT".
- [#3339](https://github.com/xmrig/xmrig/pull/3339) Added SNI option for TLS connections.
- [#3342](https://github.com/xmrig/xmrig/pull/3342) Update `cn_main_loop.asm`.
- [#3346](https://github.com/xmrig/xmrig/pull/3346) ARM64 JIT: don't use `x18` register.
- [#3348](https://github.com/xmrig/xmrig/pull/3348) Update to latest `sse2neon.h`.
- [#3356](https://github.com/xmrig/xmrig/pull/3356) Updated pricing record size for **Zephyr** solo mining.
- [#3358](https://github.com/xmrig/xmrig/pull/3358) **Zephyr** solo mining: handle multiple outputs.
# v6.20.0
- Added new ARM CPU names.
- [#2394](https://github.com/xmrig/xmrig/pull/2394) Added new CMake options `ARM_V8` and `ARM_V7`.
- [#2830](https://github.com/xmrig/xmrig/pull/2830) Added API rebind polling.
- [#2927](https://github.com/xmrig/xmrig/pull/2927) Fixed compatibility with hwloc 1.11.x.
- [#3060](https://github.com/xmrig/xmrig/pull/3060) Added x86 to `README.md`.
- [#3236](https://github.com/xmrig/xmrig/pull/3236) Fixed: receive CUDA loader error on Linux too.
- [#3290](https://github.com/xmrig/xmrig/pull/3290) Added [Zephyr](https://www.zephyrprotocol.com/) coin support for solo mining.
# v6.19.3
- [#3245](https://github.com/xmrig/xmrig/issues/3245) Improved algorithm negotiation for donation rounds by sending extra information about current mining job.
- [#3254](https://github.com/xmrig/xmrig/pull/3254) Tweaked auto-tuning for Intel CPUs.
- [#3271](https://github.com/xmrig/xmrig/pull/3271) RandomX: optimized program generation.
- [#3273](https://github.com/xmrig/xmrig/pull/3273) RandomX: fixed undefined behavior.
- [#3275](https://github.com/xmrig/xmrig/pull/3275) RandomX: fixed `jccErratum` list.
- [#3280](https://github.com/xmrig/xmrig/pull/3280) Updated example scripts.
# v6.19.2
- [#3230](https://github.com/xmrig/xmrig/pull/3230) Fixed parsing of `TX_EXTRA_MERGE_MINING_TAG`.
- [#3232](https://github.com/xmrig/xmrig/pull/3232) Added new `X-Hash-Difficulty` HTTP header.
- [#3240](https://github.com/xmrig/xmrig/pull/3240) Improved .cmd files when run by shortcuts on another drive.
- [#3241](https://github.com/xmrig/xmrig/pull/3241) Added view tag calculation (fixes Wownero solo mining issue).
# v6.19.1
- Resolved deprecated methods warnings with OpenSSL 3.0.
- [#3213](https://github.com/xmrig/xmrig/pull/3213) Fixed build with 32-bit clang 15.
- [#3218](https://github.com/xmrig/xmrig/pull/3218) Fixed: `--randomx-wrmsr=-1` worked only on Intel.
- [#3228](https://github.com/xmrig/xmrig/pull/3228) Fixed build with gcc 13.
# v6.19.0
- [#3144](https://github.com/xmrig/xmrig/pull/3144) Update to latest `sse2neon.h`.
- [#3161](https://github.com/xmrig/xmrig/pull/3161) MSVC build: enabled parallel compilation.
- [#3163](https://github.com/xmrig/xmrig/pull/3163) Improved Zen 3 MSR mod.
- [#3176](https://github.com/xmrig/xmrig/pull/3176) Update cmake required version to 3.1.
- [#3182](https://github.com/xmrig/xmrig/pull/3182) DragonflyBSD compilation fixes.
- [#3196](https://github.com/xmrig/xmrig/pull/3196) Show IP address for failed connections.
- [#3185](https://github.com/xmrig/xmrig/issues/3185) Fixed macOS DMI reader.
- [#3198](https://github.com/xmrig/xmrig/pull/3198) Fixed broken RandomX light mode mining.
- [#3202](https://github.com/xmrig/xmrig/pull/3202) Solo mining: added job timeout (default is 15 seconds).
# v6.18.1
- [#3129](https://github.com/xmrig/xmrig/pull/3129) Fix: protectRX flushed CPU cache only on MacOS/iOS.
- [#3126](https://github.com/xmrig/xmrig/pull/3126) Don't reset when pool sends the same job blob.
- [#3120](https://github.com/xmrig/xmrig/pull/3120) RandomX: optimized `CFROUND` elimination.
- [#3109](https://github.com/xmrig/xmrig/pull/3109) RandomX: added Blake2 AVX2 version.
- [#3082](https://github.com/xmrig/xmrig/pull/3082) Fixed GCC 12 warnings.
- [#3075](https://github.com/xmrig/xmrig/pull/3075) Recognize `armv7ve` as valid ARMv7 target.
- [#3132](https://github.com/xmrig/xmrig/pull/3132) RandomX: added MSR mod for Zen 4.
- [#3134](https://github.com/xmrig/xmrig/pull/3134) Added Zen4 to `randomx_boost.sh`.
# v6.18.0
- [#3067](https://github.com/xmrig/xmrig/pull/3067) Monero v15 network upgrade support and more house keeping.
- Removed deprecated AstroBWTv1 and v2.
- Fixed debug GhostRider build.
- Monero v15 network upgrade support.
- Fixed ZMQ debug log.
- Improved daemon ZMQ mining stability.
- [#3054](https://github.com/xmrig/xmrig/pull/3054) Fixes for 32-bit ARM.
- [#3042](https://github.com/xmrig/xmrig/pull/3042) Fixed being unable to resume from `pause-on-battery`.
- [#3031](https://github.com/xmrig/xmrig/pull/3031) Fixed `--cpu-priority` not working sometimes.
- [#3020](https://github.com/xmrig/xmrig/pull/3020) Removed old AstroBWT algorithm.
# v6.17.0
- [#2954](https://github.com/xmrig/xmrig/pull/2954) **Dero HE fork support (`astrobwt/v2` algorithm).**
- [#2961](https://github.com/xmrig/xmrig/pull/2961) Dero HE (`astrobwt/v2`) CUDA config generator.
- [#2969](https://github.com/xmrig/xmrig/pull/2969) Dero HE (`astrobwt/v2`) OpenCL support.
- Fixed displayed DMI memory information for empty slots.
- [#2932](https://github.com/xmrig/xmrig/pull/2932) Fixed GhostRider with hwloc disabled.
# v6.16.4
- [#2904](https://github.com/xmrig/xmrig/pull/2904) Fixed unaligned memory accesses.
- [#2908](https://github.com/xmrig/xmrig/pull/2908) Added MSVC/2022 to `version.h`.
- [#2910](https://github.com/xmrig/xmrig/issues/2910) Fixed donation for GhostRider/RTM.
# v6.16.3
- [#2778](https://github.com/xmrig/xmrig/pull/2778) Fixed `READY threads X/X` display after algorithm switching.
- [#2782](https://github.com/xmrig/xmrig/pull/2782) Updated GhostRider documentation.
- [#2815](https://github.com/xmrig/xmrig/pull/2815) Fixed `cn-heavy` in 32-bit builds.
- [#2827](https://github.com/xmrig/xmrig/pull/2827) GhostRider: set correct priority for helper threads.
- [#2837](https://github.com/xmrig/xmrig/pull/2837) RandomX: don't restart mining threads when the seed changes.
- [#2848](https://github.com/xmrig/xmrig/pull/2848) GhostRider: added support for `client.reconnect` method.
- [#2856](https://github.com/xmrig/xmrig/pull/2856) Fix for short responses from some Raptoreum pools.
- [#2873](https://github.com/xmrig/xmrig/pull/2873) Fixed GhostRider benchmark on single-core systems.
- [#2882](https://github.com/xmrig/xmrig/pull/2882) Fixed ARMv7 compilation.
- [#2893](https://github.com/xmrig/xmrig/pull/2893) KawPow OpenCL: use separate UV loop for building programs.
# v6.16.2
- [#2751](https://github.com/xmrig/xmrig/pull/2751) Fixed crash on CPUs supporting VAES and running GCC-compiled xmrig.
- [#2761](https://github.com/xmrig/xmrig/pull/2761) Fixed broken auto-tuning in GCC Windows build.
- [#2771](https://github.com/xmrig/xmrig/issues/2771) Fixed environment variables support for GhostRider and KawPow.
- [#2769](https://github.com/xmrig/xmrig/pull/2769) Performance fixes:
- Fixed several performance bottlenecks introduced in v6.16.1.
- Fixed overall GCC-compiled build performance, it's the same speed as MSVC build now.
- **Linux builds are up to 10% faster now compared to v6.16.0 GCC build.**
- **Windows builds are up to 5% faster now compared to v6.16.0 MSVC build.**
# v6.16.1
- [#2729](https://github.com/xmrig/xmrig/pull/2729) GhostRider fixes:
- Added average hashrate display.
- Fixed the number of threads shown at startup.
- Fixed `--threads` or `-t` command line option (but `--cpu-max-threads-hint` is recommended to use).
- [#2738](https://github.com/xmrig/xmrig/pull/2738) GhostRider fixes:
- Fixed "difficulty is not a number" error when diff is high on some pools.
- Fixed GhostRider compilation when `WITH_KAWPOW=OFF`.
- [#2740](https://github.com/xmrig/xmrig/pull/2740) Added VAES support for Cryptonight variants **+4% speedup on Zen3**.
- VAES instructions are available on Intel Ice Lake/AMD Zen3 and newer CPUs.
- +4% speedup on Ryzen 5 5600X.
# v6.16.0
- [#2712](https://github.com/xmrig/xmrig/pull/2712) **GhostRider algorithm (Raptoreum) support**: read the [RELEASE NOTES](src/crypto/ghostrider/README.md) for quick start guide and performance comparisons.
- [#2682](https://github.com/xmrig/xmrig/pull/2682) Fixed: use cn-heavy optimization only for Vermeer CPUs.
- [#2684](https://github.com/xmrig/xmrig/pull/2684) MSR mod: fix for error 183.
# v6.15.3
- [#2614](https://github.com/xmrig/xmrig/pull/2614) OpenCL fixes for non-AMD platforms.
- [#2623](https://github.com/xmrig/xmrig/pull/2623) Fixed compiling without kawpow.
- [#2636](https://github.com/xmrig/xmrig/pull/2636) [#2639](https://github.com/xmrig/xmrig/pull/2639) AstroBWT speedup (up to +35%).
- [#2646](https://github.com/xmrig/xmrig/pull/2646) Fixed MSVC compilation error.
# v6.15.2
- [#2606](https://github.com/xmrig/xmrig/pull/2606) Fixed: AstroBWT auto-config ignored `max-threads-hint`.
- Fixed possible crash on Windows (regression in v6.15.1).
# v6.15.1
- [#2586](https://github.com/xmrig/xmrig/pull/2586) Fixed Windows 7 compatibility.
- [#2594](https://github.com/xmrig/xmrig/pull/2594) Added Windows taskbar icon colors.
# v6.15.0
- [#2548](https://github.com/xmrig/xmrig/pull/2548) Added automatic coin detection for daemon mining.
- [#2563](https://github.com/xmrig/xmrig/pull/2563) Added new algorithm RandomX Graft (`rx/graft`).
- [#2565](https://github.com/xmrig/xmrig/pull/2565) AstroBWT: added AVX2 Salsa20 implementation.
- Added support for new CUDA plugin API (previous API still supported).
# v6.14.1
- [#2532](https://github.com/xmrig/xmrig/pull/2532) Refactoring: stable (persistent) algorithms IDs.
- [#2537](https://github.com/xmrig/xmrig/pull/2537) Fixed Termux build.
# v6.14.0
- [#2484](https://github.com/xmrig/xmrig/pull/2484) Added ZeroMQ support for solo mining.
- [#2476](https://github.com/xmrig/xmrig/issues/2476) Fixed crash in DMI memory reader.

View File

@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 2.8.12)
cmake_minimum_required(VERSION 3.1)
project(xmrig)
option(WITH_HWLOC "Enable hwloc support" ON)
@@ -8,8 +8,8 @@ option(WITH_CN_PICO "Enable CryptoNight-Pico algorithm" ON)
option(WITH_CN_FEMTO "Enable CryptoNight-UPX2 algorithm" ON)
option(WITH_RANDOMX "Enable RandomX algorithms family" ON)
option(WITH_ARGON2 "Enable Argon2 algorithms family" ON)
option(WITH_ASTROBWT "Enable AstroBWT algorithms family" ON)
option(WITH_KAWPOW "Enable KawPow algorithms family" ON)
option(WITH_GHOSTRIDER "Enable GhostRider algorithm" ON)
option(WITH_HTTP "Enable HTTP protocol support (client/server)" ON)
option(WITH_DEBUG_LOG "Enable debug log output" OFF)
option(WITH_TLS "Enable OpenSSL support" ON)
@@ -18,6 +18,8 @@ option(WITH_MSR "Enable MSR mod & 1st-gen Ryzen fix" ON)
option(WITH_ENV_VARS "Enable environment variables support in config file" ON)
option(WITH_EMBEDDED_CONFIG "Enable internal embedded JSON config" OFF)
option(WITH_OPENCL "Enable OpenCL backend" ON)
set(WITH_OPENCL_VERSION 200 CACHE STRING "Target OpenCL version")
set_property(CACHE WITH_OPENCL_VERSION PROPERTY STRINGS 120 200 210 220)
option(WITH_CUDA "Enable CUDA backend" ON)
option(WITH_NVML "Enable NVML (NVIDIA Management Library) support (only if CUDA backend enabled)" ON)
option(WITH_ADL "Enable ADL (AMD Display Library) or sysfs support (only if OpenCL backend enabled)" ON)
@@ -25,12 +27,15 @@ option(WITH_STRICT_CACHE "Enable strict checks for OpenCL cache" ON)
option(WITH_INTERLEAVE_DEBUG_LOG "Enable debug log for threads interleave" OFF)
option(WITH_PROFILING "Enable profiling for developers" OFF)
option(WITH_SSE4_1 "Enable SSE 4.1 for Blake2" ON)
option(WITH_AVX2 "Enable AVX2 for Blake2" ON)
option(WITH_VAES "Enable VAES instructions for Cryptonight" ON)
option(WITH_BENCHMARK "Enable builtin RandomX benchmark and stress test" ON)
option(WITH_SECURE_JIT "Enable secure access to JIT memory" OFF)
option(WITH_DMI "Enable DMI/SMBIOS reader" ON)
option(BUILD_STATIC "Build static binary" OFF)
option(ARM_TARGET "Force use specific ARM target 8 or 7" 0)
option(ARM_V8 "Force ARMv8 (64 bit) architecture, use with caution if automatic detection fails, but you sure it may work" OFF)
option(ARM_V7 "Force ARMv7 (32 bit) architecture, use with caution if automatic detection fails, but you sure it may work" OFF)
option(HWLOC_DEBUG "Enable hwloc debug helpers and log" OFF)
@@ -56,6 +61,7 @@ set(HEADERS
src/core/config/usage.h
src/core/Controller.h
src/core/Miner.h
src/core/Taskbar.h
src/net/interfaces/IJobResultListener.h
src/net/JobResult.h
src/net/JobResults.h
@@ -104,6 +110,7 @@ set(SOURCES
src/core/config/ConfigTransform.cpp
src/core/Controller.cpp
src/core/Miner.cpp
src/core/Taskbar.cpp
src/net/JobResults.cpp
src/net/Network.cpp
src/net/strategies/DonateStrategy.cpp
@@ -124,6 +131,19 @@ set(SOURCES_CRYPTO
src/crypto/common/VirtualMemory.cpp
)
if (CMAKE_C_COMPILER_ID MATCHES GNU)
set_source_files_properties(src/crypto/cn/CnHash.cpp PROPERTIES COMPILE_FLAGS "-Ofast -fno-tree-vectorize")
endif()
if (WITH_VAES)
add_definitions(-DXMRIG_VAES)
set(HEADERS_CRYPTO "${HEADERS_CRYPTO}" src/crypto/cn/CryptoNight_x86_vaes.h)
set(SOURCES_CRYPTO "${SOURCES_CRYPTO}" src/crypto/cn/CryptoNight_x86_vaes.cpp)
if (CMAKE_C_COMPILER_ID MATCHES GNU OR CMAKE_C_COMPILER_ID MATCHES Clang)
set_source_files_properties(src/crypto/cn/CryptoNight_x86_vaes.cpp PROPERTIES COMPILE_FLAGS "-Ofast -fno-tree-vectorize -mavx2 -mvaes")
endif()
endif()
if (WITH_HWLOC)
list(APPEND HEADERS_CRYPTO
src/crypto/common/NUMAMemoryPool.h
@@ -180,8 +200,8 @@ find_package(UV REQUIRED)
include(cmake/flags.cmake)
include(cmake/randomx.cmake)
include(cmake/argon2.cmake)
include(cmake/astrobwt.cmake)
include(cmake/kawpow.cmake)
include(cmake/ghostrider.cmake)
include(cmake/OpenSSL.cmake)
include(cmake/asm.cmake)
@@ -217,7 +237,7 @@ if (WITH_DEBUG_LOG)
endif()
add_executable(${CMAKE_PROJECT_NAME} ${HEADERS} ${SOURCES} ${SOURCES_OS} ${HEADERS_CRYPTO} ${SOURCES_CRYPTO} ${SOURCES_SYSLOG} ${TLS_SOURCES} ${XMRIG_ASM_SOURCES})
target_link_libraries(${CMAKE_PROJECT_NAME} ${XMRIG_ASM_LIBRARY} ${OPENSSL_LIBRARIES} ${UV_LIBRARIES} ${EXTRA_LIBS} ${CPUID_LIB} ${ARGON2_LIBRARY} ${ETHASH_LIBRARY})
target_link_libraries(${CMAKE_PROJECT_NAME} ${XMRIG_ASM_LIBRARY} ${OPENSSL_LIBRARIES} ${UV_LIBRARIES} ${EXTRA_LIBS} ${CPUID_LIB} ${ARGON2_LIBRARY} ${ETHASH_LIBRARY} ${GHOSTRIDER_LIBRARY})
if (WIN32)
add_custom_command(TARGET ${CMAKE_PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "${CMAKE_SOURCE_DIR}/bin/WinRing0/WinRing0x64.sys" $<TARGET_FILE_DIR:${CMAKE_PROJECT_NAME}>)
@@ -225,6 +245,7 @@ if (WIN32)
add_custom_command(TARGET ${CMAKE_PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "${CMAKE_SOURCE_DIR}/scripts/benchmark_10M.cmd" $<TARGET_FILE_DIR:${CMAKE_PROJECT_NAME}>)
add_custom_command(TARGET ${CMAKE_PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "${CMAKE_SOURCE_DIR}/scripts/pool_mine_example.cmd" $<TARGET_FILE_DIR:${CMAKE_PROJECT_NAME}>)
add_custom_command(TARGET ${CMAKE_PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "${CMAKE_SOURCE_DIR}/scripts/solo_mine_example.cmd" $<TARGET_FILE_DIR:${CMAKE_PROJECT_NAME}>)
add_custom_command(TARGET ${CMAKE_PROJECT_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "${CMAKE_SOURCE_DIR}/scripts/rtm_ghostrider_example.cmd" $<TARGET_FILE_DIR:${CMAKE_PROJECT_NAME}>)
endif()
if (CMAKE_CXX_COMPILER_ID MATCHES Clang AND CMAKE_BUILD_TYPE STREQUAL Release AND NOT CMAKE_GENERATOR STREQUAL Xcode)

View File

@@ -7,10 +7,10 @@
[![GitHub stars](https://img.shields.io/github/stars/xmrig/xmrig.svg)](https://github.com/xmrig/xmrig/stargazers)
[![GitHub forks](https://img.shields.io/github/forks/xmrig/xmrig.svg)](https://github.com/xmrig/xmrig/network)
XMRig is a high performance, open source, cross platform RandomX, KawPow, CryptoNight and AstroBWT unified CPU/GPU miner and [RandomX benchmark](https://xmrig.com/benchmark). Official binaries are available for Windows, Linux, macOS and FreeBSD.
XMRig is a high performance, open source, cross platform RandomX, KawPow, CryptoNight and [GhostRider](https://github.com/xmrig/xmrig/tree/master/src/crypto/ghostrider#readme) unified CPU/GPU miner and [RandomX benchmark](https://xmrig.com/benchmark). Official binaries are available for Windows, Linux, macOS and FreeBSD.
## Mining backends
- **CPU** (x64/ARMv8)
- **CPU** (x86/x64/ARMv7/ARMv8)
- **OpenCL** for AMD GPUs.
- **CUDA** for NVIDIA GPUs via external [CUDA plugin](https://github.com/xmrig/xmrig-cuda).

View File

@@ -1,45 +0,0 @@
if (WITH_ASTROBWT)
add_definitions(/DXMRIG_ALGO_ASTROBWT)
list(APPEND HEADERS_CRYPTO
src/crypto/astrobwt/AstroBWT.h
)
list(APPEND SOURCES_CRYPTO
src/crypto/astrobwt/AstroBWT.cpp
)
if (XMRIG_ARM)
list(APPEND HEADERS_CRYPTO
src/crypto/astrobwt/salsa20_ref/ecrypt-config.h
src/crypto/astrobwt/salsa20_ref/ecrypt-machine.h
src/crypto/astrobwt/salsa20_ref/ecrypt-portable.h
src/crypto/astrobwt/salsa20_ref/ecrypt-sync.h
)
list(APPEND SOURCES_CRYPTO
src/crypto/astrobwt/salsa20_ref/salsa20.c
)
else()
if (CMAKE_SIZEOF_VOID_P EQUAL 8)
add_definitions(/DASTROBWT_AVX2)
if (CMAKE_C_COMPILER_ID MATCHES MSVC)
enable_language(ASM_MASM)
list(APPEND SOURCES_CRYPTO src/crypto/astrobwt/sha3_256_avx2.asm)
else()
enable_language(ASM)
list(APPEND SOURCES_CRYPTO src/crypto/astrobwt/sha3_256_avx2.S)
endif()
endif()
list(APPEND HEADERS_CRYPTO
src/crypto/astrobwt/Salsa20.hpp
)
list(APPEND SOURCES_CRYPTO
src/crypto/astrobwt/Salsa20.cpp
)
endif()
else()
remove_definitions(/DXMRIG_ALGO_ASTROBWT)
endif()

View File

@@ -1,47 +1,70 @@
if (CMAKE_SIZEOF_VOID_P EQUAL 8)
set(XMRIG_64_BIT ON)
add_definitions(-DXMRIG_64_BIT)
else()
set(XMRIG_64_BIT OFF)
endif()
if (NOT CMAKE_SYSTEM_PROCESSOR)
message(WARNING "CMAKE_SYSTEM_PROCESSOR not defined")
endif()
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|AMD64)$" AND CMAKE_SIZEOF_VOID_P EQUAL 8)
add_definitions(/DRAPIDJSON_SSE2)
include(CheckCXXCompilerFlag)
if (CMAKE_CXX_COMPILER_ID MATCHES MSVC)
set(VAES_SUPPORTED ON)
else()
CHECK_CXX_COMPILER_FLAG("-mavx2 -mvaes" VAES_SUPPORTED)
endif()
if (NOT VAES_SUPPORTED)
set(WITH_VAES OFF)
endif()
if (XMRIG_64_BIT AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|AMD64)$")
add_definitions(-DRAPIDJSON_SSE2)
else()
set(WITH_SSE4_1 OFF)
set(WITH_AVX2 OFF)
set(WITH_VAES OFF)
endif()
if (ARM_V8)
set(ARM_TARGET 8)
elseif (ARM_V7)
set(ARM_TARGET 7)
endif()
if (NOT ARM_TARGET)
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm64|armv8-a)$")
set(ARM_TARGET 8)
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(armv7|armv7f|armv7s|armv7k|armv7-a|armv7l)$")
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(armv7|armv7f|armv7s|armv7k|armv7-a|armv7l|armv7ve)$")
set(ARM_TARGET 7)
endif()
endif()
if (ARM_TARGET AND ARM_TARGET GREATER 6)
set(XMRIG_ARM ON)
add_definitions(/DXMRIG_ARM)
set(XMRIG_ARM ON)
add_definitions(-DXMRIG_ARM=${ARM_TARGET})
message(STATUS "Use ARM_TARGET=${ARM_TARGET} (${CMAKE_SYSTEM_PROCESSOR})")
include(CheckCXXCompilerFlag)
if (ARM_TARGET EQUAL 8)
set(XMRIG_ARMv8 ON)
add_definitions(/DXMRIG_ARMv8)
CHECK_CXX_COMPILER_FLAG(-march=armv8-a+crypto XMRIG_ARM_CRYPTO)
if (XMRIG_ARM_CRYPTO)
add_definitions(/DXMRIG_ARM_CRYPTO)
add_definitions(-DXMRIG_ARM_CRYPTO)
set(ARM8_CXX_FLAGS "-march=armv8-a+crypto")
else()
set(ARM8_CXX_FLAGS "-march=armv8-a")
endif()
elseif (ARM_TARGET EQUAL 7)
set(XMRIG_ARMv7 ON)
add_definitions(/DXMRIG_ARMv7)
endif()
endif()
if (WITH_SSE4_1)
add_definitions(/DXMRIG_FEATURE_SSE4_1)
add_definitions(-DXMRIG_FEATURE_SSE4_1)
endif()
if (WITH_AVX2)
add_definitions(-DXMRIG_FEATURE_AVX2)
endif()

View File

@@ -10,7 +10,7 @@ if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
endif()
if (CMAKE_BUILD_TYPE STREQUAL "Release")
add_definitions(/DNDEBUG)
add_definitions(-DNDEBUG)
endif()
include(CheckSymbolExists)
@@ -22,17 +22,17 @@ if (CMAKE_CXX_COMPILER_ID MATCHES GNU)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -fexceptions -fno-rtti -Wno-strict-aliasing -Wno-class-memaccess")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Ofast -s")
if (XMRIG_ARMv8)
if (ARM_TARGET EQUAL 8)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${ARM8_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ARM8_CXX_FLAGS} -flax-vector-conversions")
elseif (XMRIG_ARMv7)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=neon -flax-vector-conversions")
elseif (ARM_TARGET EQUAL 7)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv7-a -mfpu=neon")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv7-a -mfpu=neon -flax-vector-conversions")
else()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -maes")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -maes")
add_definitions(/DHAVE_ROTR)
add_definitions(-DHAVE_ROTR)
endif()
if (WIN32)
@@ -49,28 +49,16 @@ if (CMAKE_CXX_COMPILER_ID MATCHES GNU)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static")
endif()
add_definitions(/D_GNU_SOURCE)
if (${CMAKE_VERSION} VERSION_LESS "3.1.0")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
endif()
#set(CMAKE_C_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -gdwarf-2")
add_definitions(/DHAVE_BUILTIN_CLEAR_CACHE)
add_definitions(-D_GNU_SOURCE -DHAVE_BUILTIN_CLEAR_CACHE)
elseif (CMAKE_CXX_COMPILER_ID MATCHES MSVC)
set(CMAKE_C_FLAGS_RELEASE "/MT /O2 /Oi /DNDEBUG /GL")
set(CMAKE_CXX_FLAGS_RELEASE "/MT /O2 /Oi /DNDEBUG /GL")
set(CMAKE_C_FLAGS_RELEASE "/MP /MT /O2 /Oi /DNDEBUG /GL")
set(CMAKE_CXX_FLAGS_RELEASE "/MP /MT /O2 /Oi /DNDEBUG /GL")
set(CMAKE_C_FLAGS_RELWITHDEBINFO "/Ob1 /Zi /DRELWITHDEBINFO")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "/Ob1 /Zi /DRELWITHDEBINFO")
set(CMAKE_C_FLAGS_RELWITHDEBINFO "/MP /Ob1 /Zi /DRELWITHDEBINFO")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "/MP /Ob1 /Zi /DRELWITHDEBINFO")
add_definitions(/D_CRT_SECURE_NO_WARNINGS)
add_definitions(/D_CRT_NONSTDC_NO_WARNINGS)
add_definitions(/DNOMINMAX)
add_definitions(/DHAVE_ROTR)
add_definitions(-D_CRT_SECURE_NO_WARNINGS -D_CRT_NONSTDC_NO_WARNINGS -DNOMINMAX -DHAVE_ROTR)
elseif (CMAKE_CXX_COMPILER_ID MATCHES Clang)
@@ -80,10 +68,10 @@ elseif (CMAKE_CXX_COMPILER_ID MATCHES Clang)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -fexceptions -fno-rtti -Wno-missing-braces")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Ofast -funroll-loops -fmerge-all-constants")
if (XMRIG_ARMv8)
if (ARM_TARGET EQUAL 8)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${ARM8_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ARM8_CXX_FLAGS}")
elseif (XMRIG_ARMv7)
elseif (ARM_TARGET EQUAL 7)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon -march=${CMAKE_SYSTEM_PROCESSOR}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=neon -march=${CMAKE_SYSTEM_PROCESSOR}")
else()
@@ -92,7 +80,7 @@ elseif (CMAKE_CXX_COMPILER_ID MATCHES Clang)
check_symbol_exists("_rotr" "x86intrin.h" HAVE_ROTR)
if (HAVE_ROTR)
add_definitions(/DHAVE_ROTR)
add_definitions(-DHAVE_ROTR)
endif()
endif()
@@ -105,6 +93,6 @@ endif()
if (NOT WIN32)
check_symbol_exists("__builtin___clear_cache" "stdlib.h" HAVE_BUILTIN_CLEAR_CACHE)
if (HAVE_BUILTIN_CLEAR_CACHE)
add_definitions(/DHAVE_BUILTIN_CLEAR_CACHE)
add_definitions(-DHAVE_BUILTIN_CLEAR_CACHE)
endif()
endif()

8
cmake/ghostrider.cmake Normal file
View File

@@ -0,0 +1,8 @@
if (WITH_GHOSTRIDER)
add_definitions(/DXMRIG_ALGO_GHOSTRIDER)
add_subdirectory(src/crypto/ghostrider)
set(GHOSTRIDER_LIBRARY ghostrider)
else()
remove_definitions(/DXMRIG_ALGO_GHOSTRIDER)
set(GHOSTRIDER_LIBRARY "")
endif()

View File

@@ -1,7 +1,3 @@
if (CMAKE_SIZEOF_VOID_P EQUAL 8)
add_definitions(/DXMRIG_64_BIT)
endif()
if (WIN32)
set(XMRIG_OS_WIN ON)
elseif (APPLE)
@@ -19,39 +15,38 @@ else()
set(XMRIG_OS_ANDROID ON)
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
set(XMRIG_OS_LINUX ON)
elseif(CMAKE_SYSTEM_NAME STREQUAL FreeBSD)
elseif(CMAKE_SYSTEM_NAME STREQUAL FreeBSD OR CMAKE_SYSTEM_NAME STREQUAL DragonFly)
set(XMRIG_OS_FREEBSD ON)
endif()
endif()
if (XMRIG_OS_WIN)
add_definitions(/DWIN32)
add_definitions(/DXMRIG_OS_WIN)
add_definitions(-DWIN32 -DXMRIG_OS_WIN)
elseif(XMRIG_OS_APPLE)
add_definitions(/DXMRIG_OS_APPLE)
add_definitions(-DXMRIG_OS_APPLE)
if (XMRIG_OS_IOS)
add_definitions(/DXMRIG_OS_IOS)
add_definitions(-DXMRIG_OS_IOS)
else()
add_definitions(/DXMRIG_OS_MACOS)
add_definitions(-DXMRIG_OS_MACOS)
endif()
if (XMRIG_ARM)
set(WITH_SECURE_JIT ON)
endif()
elseif(XMRIG_OS_UNIX)
add_definitions(/DXMRIG_OS_UNIX)
add_definitions(-DXMRIG_OS_UNIX)
if (XMRIG_OS_ANDROID)
add_definitions(/DXMRIG_OS_ANDROID)
add_definitions(-DXMRIG_OS_ANDROID)
elseif (XMRIG_OS_LINUX)
add_definitions(/DXMRIG_OS_LINUX)
add_definitions(-DXMRIG_OS_LINUX)
elseif (XMRIG_OS_FREEBSD)
add_definitions(/DXMRIG_OS_FREEBSD)
add_definitions(-DXMRIG_OS_FREEBSD)
endif()
endif()
if (WITH_SECURE_JIT)
add_definitions(/DXMRIG_SECURE_JIT)
add_definitions(-DXMRIG_SECURE_JIT)
endif()

View File

@@ -76,7 +76,15 @@ if (WITH_RANDOMX)
list(APPEND SOURCES_CRYPTO src/crypto/randomx/blake2/blake2b_sse41.c)
if (CMAKE_C_COMPILER_ID MATCHES GNU OR CMAKE_C_COMPILER_ID MATCHES Clang)
set_source_files_properties(src/crypto/randomx/blake2/blake2b_sse41.c PROPERTIES COMPILE_FLAGS -msse4.1)
set_source_files_properties(src/crypto/randomx/blake2/blake2b_sse41.c PROPERTIES COMPILE_FLAGS "-Ofast -msse4.1")
endif()
endif()
if (WITH_AVX2)
list(APPEND SOURCES_CRYPTO src/crypto/randomx/blake2/avx2/blake2b_avx2.c)
if (CMAKE_C_COMPILER_ID MATCHES GNU OR CMAKE_C_COMPILER_ID MATCHES Clang)
set_source_files_properties(src/crypto/randomx/blake2/avx2/blake2b_avx2.c PROPERTIES COMPILE_FLAGS "-Ofast -mavx2")
endif()
endif()

View File

@@ -1,4 +1,4 @@
@echo off
cd %~dp0
cd /d "%~dp0"
xmrig.exe --bench=10M --submit
pause

View File

@@ -1,4 +1,4 @@
@echo off
cd %~dp0
cd /d "%~dp0"
xmrig.exe --bench=1M --submit
pause

View File

@@ -1,6 +1,10 @@
#!/bin/bash -e
HWLOC_VERSION="2.4.1"
HWLOC_VERSION_MAJOR="2"
HWLOC_VERSION_MINOR="9"
HWLOC_VERSION_PATCH="0"
HWLOC_VERSION="${HWLOC_VERSION_MAJOR}.${HWLOC_VERSION_MINOR}.${HWLOC_VERSION_PATCH}"
mkdir -p deps
mkdir -p deps/include
@@ -8,7 +12,7 @@ mkdir -p deps/lib
mkdir -p build && cd build
wget https://download.open-mpi.org/release/hwloc/v2.4/hwloc-${HWLOC_VERSION}.tar.gz -O hwloc-${HWLOC_VERSION}.tar.gz
wget https://download.open-mpi.org/release/hwloc/v${HWLOC_VERSION_MAJOR}.${HWLOC_VERSION_MINOR}/hwloc-${HWLOC_VERSION}.tar.gz -O hwloc-${HWLOC_VERSION}.tar.gz
tar -xzf hwloc-${HWLOC_VERSION}.tar.gz
cd hwloc-${HWLOC_VERSION}
@@ -16,4 +20,4 @@ cd hwloc-${HWLOC_VERSION}
make -j$(nproc || sysctl -n hw.ncpu || sysctl -n hw.logicalcpu)
cp -fr include ../../deps
cp hwloc/.libs/libhwloc.a ../../deps/lib
cd ..
cd ..

View File

@@ -1,6 +1,6 @@
#!/bin/bash -e
LIBRESSL_VERSION="3.0.2"
LIBRESSL_VERSION="3.5.2"
mkdir -p deps
mkdir -p deps/include
@@ -17,4 +17,4 @@ make -j$(nproc || sysctl -n hw.ncpu || sysctl -n hw.logicalcpu)
cp -fr include ../../deps
cp crypto/.libs/libcrypto.a ../../deps/lib
cp ssl/.libs/libssl.a ../../deps/lib
cd ..
cd ..

View File

@@ -1,6 +1,6 @@
#!/bin/bash -e
OPENSSL_VERSION="1.1.1k"
OPENSSL_VERSION="1.1.1s"
mkdir -p deps
mkdir -p deps/include

20
scripts/build.openssl3.sh Executable file
View File

@@ -0,0 +1,20 @@
#!/bin/bash -e
OPENSSL_VERSION="3.0.7"
mkdir -p deps
mkdir -p deps/include
mkdir -p deps/lib
mkdir -p build && cd build
wget https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz -O openssl-${OPENSSL_VERSION}.tar.gz
tar -xzf openssl-${OPENSSL_VERSION}.tar.gz
cd openssl-${OPENSSL_VERSION}
./config -no-shared -no-asm -no-zlib -no-comp -no-dgram -no-filenames -no-cms
make -j$(nproc || sysctl -n hw.ncpu || sysctl -n hw.logicalcpu)
cp -fr include ../../deps
cp libcrypto.a ../../deps/lib
cp libssl.a ../../deps/lib
cd ..

View File

@@ -1,6 +1,6 @@
#!/bin/bash -e
UV_VERSION="1.41.0"
UV_VERSION="1.44.2"
mkdir -p deps
mkdir -p deps/include
@@ -17,4 +17,4 @@ sh autogen.sh
make -j$(nproc || sysctl -n hw.ncpu || sysctl -n hw.logicalcpu)
cp -fr include ../../deps
cp .libs/libuv.a ../../deps/lib
cd ..
cd ..

View File

@@ -51,6 +51,7 @@ function rx()
'randomx_constants_wow.h',
'randomx_constants_arqma.h',
'randomx_constants_keva.h',
'randomx_constants_graft.h',
'aes.cl',
'blake2b.cl',
'randomx_vm.cl',
@@ -66,15 +67,6 @@ function rx()
}
function astrobwt()
{
const astrobwt = opencl_minify(addIncludes('astrobwt.cl', [ 'BWT.cl', 'salsa20.cl', 'sha3.cl' ]));
// fs.writeFileSync('astrobwt_gen.cl', astrobwt);
fs.writeFileSync('astrobwt_cl.h', text2h(astrobwt, 'xmrig', 'astrobwt_cl'));
}
function kawpow()
{
const kawpow = opencl_minify(addIncludes('kawpow.cl', [ 'defs.h' ]));
@@ -96,11 +88,6 @@ process.chdir(path.resolve('src/backend/opencl/cl/rx'));
rx();
process.chdir(cwd);
process.chdir(path.resolve('src/backend/opencl/cl/astrobwt'));
astrobwt();
process.chdir(cwd);
process.chdir(path.resolve('src/backend/opencl/cl/kawpow'));

View File

@@ -15,6 +15,6 @@
:: Choose pools outside of top 5 to help Monero network be more decentralized!
:: Smaller pools also often have smaller fees/payout limits.
cd %~dp0
xmrig.exe -o pool.hashvault.pro:3333 -u 48edfHu7V9Z84YzzMa6fUueoELZ9ZRXq9VetWzYGzKt52XU5xvqgzYnDK9URnRoJMk1j8nLwEVsaSWJ4fhdUyZijBGUicoD -p x
cd /d "%~dp0"
xmrig.exe -o xmrpool.eu:3333 -u 48edfHu7V9Z84YzzMa6fUueoELZ9ZRXq9VetWzYGzKt52XU5xvqgzYnDK9URnRoJMk1j8nLwEVsaSWJ4fhdUyZijBGUicoD -p x
pause

View File

@@ -10,14 +10,24 @@ fi
if grep -E 'AMD Ryzen|AMD EPYC' /proc/cpuinfo > /dev/null;
then
if grep "cpu family[[:space:]]:[[:space:]]25" /proc/cpuinfo > /dev/null;
if grep "cpu family[[:space:]]\{1,\}:[[:space:]]25" /proc/cpuinfo > /dev/null;
then
echo "Detected Zen3 CPU"
wrmsr -a 0xc0011020 0x4480000000000
wrmsr -a 0xc0011021 0x1c000200000040
wrmsr -a 0xc0011022 0xc000000401500000
wrmsr -a 0xc001102b 0x2000cc14
echo "MSR register values for Zen3 applied"
if grep "model[[:space:]]\{1,\}:[[:space:]]97" /proc/cpuinfo > /dev/null;
then
echo "Detected Zen4 CPU"
wrmsr -a 0xc0011020 0x4400000000000
wrmsr -a 0xc0011021 0x4000000000040
wrmsr -a 0xc0011022 0x8680000401570000
wrmsr -a 0xc001102b 0x2040cc10
echo "MSR register values for Zen4 applied"
else
echo "Detected Zen3 CPU"
wrmsr -a 0xc0011020 0x4480000000000
wrmsr -a 0xc0011021 0x1c000200000040
wrmsr -a 0xc0011022 0xc000000401570000
wrmsr -a 0xc001102b 0x2000cc10
echo "MSR register values for Zen3 applied"
fi
else
echo "Detected Zen1/Zen2 CPU"
wrmsr -a 0xc0011020 0

View File

@@ -0,0 +1,23 @@
:: Example batch file for mining Raptoreum at a pool
::
:: Format:
:: xmrig.exe -a gr -o <pool address>:<pool port> -u <pool username/wallet> -p <pool password>
::
:: Fields:
:: pool address The host name of the pool stratum or its IP address, for example raptoreumemporium.com
:: pool port The port of the pool's stratum to connect to, for example 3333. Check your pool's getting started page.
:: pool username/wallet For most pools, this is the wallet address you want to mine to. Some pools require a username
:: pool password For most pools this can be just 'x'. For pools using usernames, you may need to provide a password as configured on the pool.
::
:: List of Raptoreum mining pools:
:: https://miningpoolstats.stream/raptoreum
::
:: Choose pools outside of top 5 to help Raptoreum network be more decentralized!
:: Smaller pools also often have smaller fees/payout limits.
cd /d "%~dp0"
:: Use this command line to connect to non-SSL port
xmrig.exe -a gr -o raptoreumemporium.com:3008 -u WALLET_ADDRESS -p x
:: Or use this command line to connect to an SSL port
:: xmrig.exe -a gr -o rtm.suprnova.cc:4273 --tls -u WALLET_ADDRESS -p x
pause

View File

@@ -11,6 +11,6 @@
:: Mining solo is the best way to help Monero network be more decentralized!
:: But you will only get a payout when you find a block which can take more than a year for a single low-end PC.
cd %~dp0
xmrig.exe -o node.xmr.to:18081 -a rx/0 -u 48edfHu7V9Z84YzzMa6fUueoELZ9ZRXq9VetWzYGzKt52XU5xvqgzYnDK9URnRoJMk1j8nLwEVsaSWJ4fhdUyZijBGUicoD --daemon
cd /d "%~dp0"
xmrig.exe -o YOUR_NODE_IP:18081 -a rx/0 -u 48edfHu7V9Z84YzzMa6fUueoELZ9ZRXq9VetWzYGzKt52XU5xvqgzYnDK9URnRoJMk1j8nLwEVsaSWJ4fhdUyZijBGUicoD --daemon
pause

View File

@@ -44,7 +44,7 @@ extern "C" {
typedef cl_uint cl_dx9_media_adapter_type_khr;
typedef cl_uint cl_dx9_media_adapter_set_khr;
#if defined(_WIN32)
#include <d3d9.h>
typedef struct _cl_dx9_surface_info_khr
@@ -105,7 +105,7 @@ typedef CL_API_ENTRY cl_mem (CL_API_CALL *clCreateFromDX9MediaSurfaceKHR_fn)(
cl_mem_flags flags,
cl_dx9_media_adapter_type_khr adapter_type,
void * surface_info,
cl_uint plane,
cl_uint plane,
cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_int (CL_API_CALL *clEnqueueAcquireDX9MediaSurfacesKHR_fn)(

View File

@@ -35,7 +35,7 @@ extern "C" {
#include <CL/cl_gl.h>
/*
/*
* cl_khr_gl_event extension
*/
#define CL_COMMAND_GL_FENCE_SYNC_OBJECT_KHR 0x200D

View File

@@ -1471,7 +1471,7 @@ typedef enum _ADLProfilePropertyType
#define ADL_HDR_FREESYNC_HDR 0x0004 ///< FreeSync HDR supported
/// @}
/// \defgroup define_FreesyncFlags ADLDDCInfo2 Freesync HDR flags
/// \defgroup define_FreesyncFlags ADLDDCInfo2 Freesync HDR flags
/// @{
/// defines for iFreesyncFlags in ADLDDCInfo2
#define ADL_HDR_FREESYNC_BACKLIGHT_SUPPORT 0x0001 ///< Global backlight control supported
@@ -1738,7 +1738,7 @@ enum ADLODNDPMMaskType
ADL_ODN_DPM_MASK = 1 << 2,
};
//ODN features Bits for ADLODNCapabilitiesX2
//ODN features Bits for ADLODNCapabilitiesX2
enum ADLODNFeatureControl
{
ADL_ODN_SCLK_DPM = 1 << 0,
@@ -1764,7 +1764,7 @@ enum ADLODNFeatureControl
//If any new feature is added, PPLIB only needs to add ext feature ID and Item ID(Seeting ID). These IDs should match the drive defined in CWDDEPM.h
enum ADLODNExtFeatureControl
{
{
ADL_ODN_EXT_FEATURE_MEMORY_TIMING_TUNE = 1 << 0,
ADL_ODN_EXT_FEATURE_FAN_ZERO_RPM_CONTROL = 1 << 1,
ADL_ODN_EXT_FEATURE_AUTO_UV_ENGINE = 1 << 2, //Auto under voltage
@@ -1794,7 +1794,7 @@ enum ADLODNExtSettingId
ADL_ODN_PARAMETER_FAN_CURVE_SPEED_5,
ADL_ODN_POWERGAUGE,
ODN_COUNT
} ;
//OD8 Capability features bits
@@ -1811,7 +1811,7 @@ enum ADLOD8FeatureControl
ADL_OD8_MEMORY_TIMING_TUNE = 1 << 8,
ADL_OD8_FAN_ZERO_RPM_CONTROL = 1 << 9 ,
ADL_OD8_AUTO_UV_ENGINE = 1 << 10, //Auto under voltage
ADL_OD8_AUTO_OC_ENGINE = 1 << 11, //Auto overclock engine
ADL_OD8_AUTO_OC_ENGINE = 1 << 11, //Auto overclock engine
ADL_OD8_AUTO_OC_MEMORY = 1 << 12, //Auto overclock memory
ADL_OD8_FAN_CURVE = 1 << 13, //Fan curve
ADL_OD8_WS_AUTO_FAN_ACOUSTIC_LIMIT = 1 << 14, //Workstation Manual Fan controller
@@ -1888,7 +1888,7 @@ typedef enum _ADLSensorType
PMLOG_TEMPERATURE_VRSOC = 24,
PMLOG_TEMPERATURE_VRMVDD0 = 25,
PMLOG_TEMPERATURE_VRMVDD1 = 26,
PMLOG_TEMPERATURE_HOTSPOT = 27,
PMLOG_TEMPERATURE_HOTSPOT = 27,
PMLOG_TEMPERATURE_GFX = 28,
PMLOG_TEMPERATURE_SOC = 29,
PMLOG_GFX_POWER = 30,

View File

@@ -37,7 +37,7 @@
#define __stdcall
#endif /* (LINUX) */
/// Memory Allocation Call back
/// Memory Allocation Call back
typedef void* ( __stdcall *ADL_MAIN_MALLOC_CALLBACK )( int );

View File

@@ -1753,7 +1753,7 @@ typedef struct ADLPXConfigCaps
///\brief Enum containing PX or HG type
///
/// This enum is used to get PX or hG type
///
///
/// \nosubgrouping
//////////////////////////////////////////////////////////////////////////////////////////
enum ADLPxType

View File

@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 2.8.12)
cmake_minimum_required(VERSION 3.1)
project(argon2 C)
set(CMAKE_C_STANDARD 99)

25
src/3rdparty/epee/LICENSE.txt vendored Normal file
View File

@@ -0,0 +1,25 @@
Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Andrey N. Sabelnikov nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL Andrey N. Sabelnikov BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

1
src/3rdparty/epee/README.md vendored Normal file
View File

@@ -0,0 +1 @@
epee - is a small library of helpers, wrappers, tools and and so on, used to make my life easier.

176
src/3rdparty/epee/span.h vendored Normal file
View File

@@ -0,0 +1,176 @@
// Copyright (c) 2017-2020, The Monero Project
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pragma once
#include <algorithm>
#include <cstdint>
#include <memory>
#include <string>
#include <type_traits>
namespace epee
{
/*!
\brief Non-owning sequence of data. Does not deep copy
Inspired by `gsl::span` and/or `boost::iterator_range`. This class is
intended to be used as a parameter type for functions that need to take a
writable or read-only sequence of data. Most common cases are `span<char>`
and `span<std::uint8_t>`. Using as a class member is only recommended if
clearly documented as not doing a deep-copy. C-arrays are easily convertible
to this type.
\note Conversion from C string literal to `span<const char>` will include
the NULL-terminator.
\note Never allows derived-to-base pointer conversion; an array of derived
types is not an array of base types.
*/
template<typename T>
class span
{
template<typename U>
static constexpr bool safe_conversion() noexcept
{
// Allow exact matches or `T*` -> `const T*`.
using with_const = typename std::add_const<U>::type;
return std::is_same<T, U>() ||
(std::is_const<T>() && std::is_same<T, with_const>());
}
public:
using value_type = T;
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using pointer = T*;
using const_pointer = const T*;
using reference = T&;
using const_reference = const T&;
using iterator = pointer;
using const_iterator = const_pointer;
constexpr span() noexcept : ptr(nullptr), len(0) {}
constexpr span(std::nullptr_t) noexcept : span() {}
//! Prevent derived-to-base conversions; invalid in this context.
template<typename U, typename = typename std::enable_if<safe_conversion<U>()>::type>
constexpr span(U* const src_ptr, const std::size_t count) noexcept
: ptr(src_ptr), len(count) {}
//! Conversion from C-array. Prevents common bugs with sizeof + arrays.
template<std::size_t N>
constexpr span(T (&src)[N]) noexcept : span(src, N) {}
constexpr span(const span&) noexcept = default;
span& operator=(const span&) noexcept = default;
/*! Try to remove `amount` elements from beginning of span.
\return Number of elements removed. */
std::size_t remove_prefix(std::size_t amount) noexcept
{
amount = std::min(len, amount);
ptr += amount;
len -= amount;
return amount;
}
constexpr iterator begin() const noexcept { return ptr; }
constexpr const_iterator cbegin() const noexcept { return ptr; }
constexpr iterator end() const noexcept { return begin() + size(); }
constexpr const_iterator cend() const noexcept { return cbegin() + size(); }
constexpr bool empty() const noexcept { return size() == 0; }
constexpr pointer data() const noexcept { return ptr; }
constexpr std::size_t size() const noexcept { return len; }
constexpr std::size_t size_bytes() const noexcept { return size() * sizeof(value_type); }
T &operator[](size_t idx) noexcept { return ptr[idx]; }
const T &operator[](size_t idx) const noexcept { return ptr[idx]; }
private:
T* ptr;
std::size_t len;
};
//! \return `span<const T::value_type>` from a STL compatible `src`.
template<typename T>
constexpr span<const typename T::value_type> to_span(const T& src)
{
// compiler provides diagnostic if size() is not size_t.
return {src.data(), src.size()};
}
//! \return `span<T::value_type>` from a STL compatible `src`.
template<typename T>
constexpr span<typename T::value_type> to_mut_span(T& src)
{
// compiler provides diagnostic if size() is not size_t.
return {src.data(), src.size()};
}
template<typename T>
constexpr bool has_padding() noexcept
{
return !std::is_standard_layout<T>() || alignof(T) != 1;
}
//! \return Cast data from `src` as `span<const std::uint8_t>`.
template<typename T>
span<const std::uint8_t> to_byte_span(const span<const T> src) noexcept
{
static_assert(!has_padding<T>(), "source type may have padding");
return {reinterpret_cast<const std::uint8_t*>(src.data()), src.size_bytes()};
}
//! \return `span<const std::uint8_t>` which represents the bytes at `&src`.
template<typename T>
span<const std::uint8_t> as_byte_span(const T& src) noexcept
{
static_assert(!std::is_empty<T>(), "empty types will not work -> sizeof == 1");
static_assert(!has_padding<T>(), "source type may have padding");
return {reinterpret_cast<const std::uint8_t*>(std::addressof(src)), sizeof(T)};
}
//! \return `span<std::uint8_t>` which represents the bytes at `&src`.
template<typename T>
span<std::uint8_t> as_mut_byte_span(T& src) noexcept
{
static_assert(!std::is_empty<T>(), "empty types will not work -> sizeof == 1");
static_assert(!has_padding<T>(), "source type may have padding");
return {reinterpret_cast<std::uint8_t*>(std::addressof(src)), sizeof(T)};
}
//! make a span from a std::string
template<typename T>
span<const T> strspan(const std::string &s) noexcept
{
static_assert(std::is_same<T, char>() || std::is_same<T, unsigned char>() || std::is_same<T, int8_t>() || std::is_same<T, uint8_t>(), "Unexpected type");
return {reinterpret_cast<const T*>(s.data()), s.size()};
}
}

View File

@@ -81,7 +81,7 @@ Examples
.. code:: c++
#include <fmt/core.h>
int main() {
fmt::print("Hello, world!\n");
}
@@ -293,11 +293,11 @@ Projects using this library
An open-source library for mathematical programming
* `Aseprite <https://github.com/aseprite/aseprite>`_:
Animated sprite editor & pixel art tool
Animated sprite editor & pixel art tool
* `AvioBook <https://www.aviobook.aero/en>`_: A comprehensive aircraft
operations suite
* `Celestia <https://celestia.space/>`_: Real-time 3D visualization of space
* `Ceph <https://ceph.com/>`_: A scalable distributed storage system
@@ -351,7 +351,7 @@ Projects using this library
* `quasardb <https://www.quasardb.net/>`_: A distributed, high-performance,
associative database
* `Quill <https://github.com/odygrd/quill>`_: Asynchronous low-latency logging library
* `QKW <https://github.com/ravijanjam/qkw>`_: Generalizing aliasing to simplify

View File

@@ -3,9 +3,9 @@
* DISCLAIMER
* This file is part of the mingw-w64 runtime package.
*
* The mingw-w64 runtime package and its code is distributed in the hope that it
* will be useful but WITHOUT ANY WARRANTY. ALL WARRANTIES, EXPRESSED OR
* IMPLIED ARE HEREBY DISCLAIMED. This includes but is not limited to
* The mingw-w64 runtime package and its code is distributed in the hope that it
* will be useful but WITHOUT ANY WARRANTY. ALL WARRANTIES, EXPRESSED OR
* IMPLIED ARE HEREBY DISCLAIMED. This includes but is not limited to
* warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
/*
@@ -109,11 +109,7 @@ char *optarg; /* argument associated with option */
extern char __declspec(dllimport) *__progname;
#endif
#ifdef __CYGWIN__
static char EMSG[] = "";
#else
#define EMSG ""
#endif
static int getopt_internal(int, char * const *, const char *,
const struct option *, int *, int);

View File

@@ -1,4 +1,4 @@
cmake_minimum_required (VERSION 2.8.12)
cmake_minimum_required(VERSION 3.1)
project (hwloc C)
include_directories(include)

View File

@@ -1,5 +1,5 @@
Copyright © 2009 CNRS
Copyright © 2009-2020 Inria. All rights reserved.
Copyright © 2009-2022 Inria. All rights reserved.
Copyright © 2009-2013 Université Bordeaux
Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
Copyright © 2020 Hewlett Packard Enterprise. All rights reserved.
@@ -17,6 +17,202 @@ bug fixes (and other actions) for each version of hwloc since version
0.9.
Version 2.9.0
-------------
* Backends
+ Expose the memory size of CXL memory devices (Type 3) on Linux.
+ The LevelZero backend now reports the "XeLinkBandwidth" distance
matrix between L0 devices (and subdevices) when available.
+ Add support for CUDA compute capability up to 9.0.
* Tools
+ lstopo now switches to console mode when its output is redirected.
Graphical window mode may be forced back with --of window.
+ hwloc-calc now accepts "numa" in -H, and I/O subtypes such as "gpu"
in -I and -N.
Version 2.8.0
-------------
* API
+ Add HWLOC_TOPOLOGY_FLAG_NO_DISTANCES, _NO_MEMATTRS and _NO_CPUKINDS
to reduce the overhead when unneeded.
+ Add separate Read/Write Bandwidth/Latency memory attributes and
implement them on Linux.
* Backends
+ NUMA nodes may now have a subtype such as DRAM, HBM, SPM, or NVM
on heterogeneous memory platforms on Linux.
- Add DAXType and DAXParent attributes on Linux to tell where a
DAX device or its corresponding NUMA node come from (SPM for
Specific-Purpose or NVM for Non-Volatile Memory).
+ Detect heterogeneous caches in hybrid CPUs on MacOS X,
thanks to Paul Bone for the help.
+ Max frequencies are not ignored in Linux cpukinds anymore (they were
ignored in hwloc 2.7.0), but they may be slightly adjusted to avoid
reporting hybrid CPUs because Intel Turbo Boost Max 3.0.
- See the documentation of environment variable HWLOC_CPUKINDS_MAXFREQ.
+ Hardwire the PCI locality of HPE Cray EX235a nodes.
* Tools
+ lstopo and other tools may now load Linux and x86 cpuid topology files
from a tarball.
+ lstopo may now replace the P# and L# index prefixes with custom strings
thanks to --os-index-prefix and --logical-index-prefix options.
* Misc
+ Add --disable-readme to avoid regenerating the top-level hwloc README
file from the documentation.
Version 2.7.1
-------------
* Workaround crashes when virtual machines report incoherent x86 CPUID
information about numbers of cores and threads.
Thanks to Peter Bense for the report.
* Use setenv() instead of putenv() when trying to force enable oneAPI L0
support, to avoid issues with applications that touch the environment,
thanks to Josh Hursey for the patch.
* Add some warnings at the end of configure when GPU libraries are
missing on the system or their path is missing in the environment.
Version 2.7.0
-------------
* Backends
+ Add support for NUMA nodes and caches with more than 64 PUs across
multiple processor groups on Windows 11 and Windows Server 2022.
+ Group objects are not created for Windows processor groups anymore,
except if HWLOC_WINDOWS_PROCESSOR_GROUP_OBJS=1 in the environment.
+ Expose "Cluster" group objects on Linux kernel 5.16+ for CPUs
that share some internal cache or bus. This can be equivalent
to the L2 Cache level on some platforms (e.g. x86) or a specific
level between L2 and L3 on others (e.g. ARM Kungpeng 920).
Thanks to Jonathan Cameron for the help.
- HWLOC_DONT_MERGE_CLUSTER_GROUPS=1 may be set in the environment
to prevent these groups from being merged with identical caches, etc.
+ Improve the oneAPI LevelZero backend:
- Expose subdevices such as "ze0.1" inside root OS devices ("ze0")
when the hardware contains multiple subdevices.
- Add many new attributes to describe device type, and the
numbers of slices, subslices, execution units and threads.
- Expose the memory information as LevelZeroHBM/DDR/MemorySize infos.
+ Ignore the max frequencies of cores in Linux cpukinds when the
base frequencies are available (to avoid exposing hybrid CPUs
when Intel Turbo Boost Max 3.0 gives slightly different max
frequencies to CPU cores).
- May be reverted by setting HWLOC_CPUKINDS_MAXFREQ=1 in the environment.
* Tools
+ Add --grey and --palette options to switch lstopo to greyscale or
white-background-only graphics, or to tune individual colors.
* Build
+ Windows CMake builds now support non-MSVC compilers, detect several
features at build time, can build/run tests, etc.
Thanks to Michael Hirsch and Alexander Neumann .
Version 2.6.0
-------------
* Backends
+ Expose two cpukinds for energy-efficient cores (icestorm) and
high-performance cores (firestorm) on Apple M1 on Mac OS X.
+ Use sysfs CPU "capacity" to rank hybrid cores by efficiency
on Linux when available (mostly on recent ARM platforms for now).
+ Improve HWLOC_MEMBIND_BIND (without the STRICT flag) on Linux kernel
>= 5.15: If more than one node is given, the kernel may now use all
of them instead of only the first one before falling back to others.
+ Expose cache os_index when available on Linux, it may be needed
when using resctrl to configure cache partitioning, memory bandwidth
monitoring, etc.
+ Add a "XGMIHops" distances matrix in the RSMI backend for AMD GPU
interconnected through XGMI links.
+ Expose AMD GPU memory information (VRAM and GTT) in the RSMI backend.
+ Add OS devices such as "bxi0" for Atos/Bull BXI HCAs on Linux.
* Tools
+ lstopo has a better placement algorithm with respect to I/O
objects, see --children-order in the manpage for details.
+ hwloc-annotate may now change object subtypes and cache or memory
sizes.
* Build
+ Allow to specify the ROCm installation for building the RSMI backend:
- Use a custom installation path if specified with --with-rocm=<dir>.
- Use /opt/rocm-<version> if specified with --with-rocm-version=<version>
or the ROCM_VERSION environment variable.
- Try /opt/rocm if it exists.
- See "How do I enable ROCm SMI and select which version to use?"
in the FAQ for details.
+ Add a CMakeLists for Windows under contrib/windows-cmake/ .
* Documentation
+ Add FAQ entry "How do I create a custom heterogeneous and
asymmetric topology?"
Version 2.5.0
-------------
* API
+ Add hwloc/windows.h to query Windows processor groups.
+ Add hwloc_get_obj_with_same_locality() to convert between objects
with same locality, for instance NUMA nodes and Packages,
or OS devices within a PCI device.
+ Add hwloc_distances_transform() to modify distances structures.
- hwloc-annotate and lstopo have new distances-transform options.
+ hwloc_distances_add() is replaced with _add_create() followed by
_add_values() and _add_commit(). See hwloc/distances.h for details.
+ Add topology flags to mitigate binding modifications during
hwloc discovery, especially on Windows:
- HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING and _MEMBINDING
restrict discovery to PUs and NUMA nodes inside the binding.
- HWLOC_TOPOLOGY_FLAG_DONT_CHANGE_BINDING prevents from ever
changing the binding during discovery.
* Backends
+ Add a levelzero backend for oneAPI L0 devices, exposed as OS devices
of subtype "LevelZero" and name such as "ze0".
- Add hwloc/levelzero.h for interoperability between converting
between L0 API devices and hwloc cpusets or OS devices.
+ Expose NEC Vector Engine cards on Linux as OS devices of subtype
"VectorEngine" and name "ve0", etc.
Thanks to Anara Kozhokanova, Tim Cramer and Erich Focht for the help.
+ Add a NVLinkBandwidth distances structure between NVIDIA GPUs
(and POWER processor or NVSwitches) in the NVML backend,
and a XGMIBandwidth distances structure between AMD GPUs
in the RSMI backends.
- See "Topology Attributes: Distances, Memory Attributes and CPU Kinds"
in the documentation for details about these new distances.
+ Add support for NUMA node 0 being offline in Linux, thanks to Jirka Hladky.
* Build
+ Add --with-cuda-version=<version> or look at the CUDA_VERSION
environment variable to find the appropriate CUDA pkg-config files.
Thanks to Stephen Herbein for the suggestion.
- Also add --with-cuda=<dir> to specify the CUDA installation path
manually (and its NVML and OpenCL components).
Thanks to Andrea Bocci for the suggestion.
- See "How do I enable CUDA and select which CUDA version to use?"
in the FAQ for details.
* Tools
+ lstopo now has a --windows-processor-groups option on Windows.
+ hwloc-ps now has a --short-name option to avoid long/truncated
command path.
+ hwloc-ps now has a --single-ancestor option to return a single
(possibly too large) object where a process is bound.
+ hwloc-ps --pid-cmd may now query environment variables,
including MPI-specific variables to find out process ranks.
Version 2.4.1
-------------
* Fix AMD OpenCL device locality when PCI bus or device number >= 128.
Thanks to Edgar Leon for reporting the issue.
+ Applications using any of the following inline functions must
be recompiled to get the fix: hwloc_opencl_get_device_pci_busid()
hwloc_opencl_get_device_cpuset(), hwloc_opencl_get_device_osdev().
* Fix the ranking of cpukinds on non-Windows systems,
thanks to Ivan Kochin for the report.
* Fix the insertion of custom Groups after loading the topology,
thanks to Scott Hicks.
* Add support for CPU0 being offline in Linux, thanks to Garrett Clay.
* Fix missing x86 Package and Core objects FreeBSD/NetBSD.
Thanks to Thibault Payet and Yuri Victorovich for the report.
* Fix the import of very large distances with heterogeneous object types.
* Fix a memory leak in the Linux backend,
thanks to Perceval Anichini.
Version 2.4.0
-------------
* API

View File

@@ -78,7 +78,7 @@ debug and report issues.
Questions may be sent to the users or developers mailing lists (https://
www.open-mpi.org/community/lists/hwloc.php).
There is also a #hwloc IRC channel on Freenode (irc.freenode.net).
There is also a #hwloc IRC channel on Libera Chat (irc.libera.chat).

View File

@@ -8,7 +8,7 @@
# Please update HWLOC_VERSION* in contrib/windows/hwloc_config.h too.
major=2
minor=4
minor=9
release=0
# greek is used for alpha or beta release tags. If it is non-empty,
@@ -22,7 +22,7 @@ greek=
# The date when this release was created
date="Nov 26, 2020"
date="Dec 14, 2022"
# If snapshot=1, then use the value from snapshot_version as the
# entire hwloc version (i.e., ignore major, minor, release, and
@@ -41,7 +41,7 @@ snapshot_version=${major}.${minor}.${release}${greek}-git
# 2. Version numbers are described in the Libtool current:revision:age
# format.
libhwloc_so_version=19:0:4
libhwloc_so_version=21:1:6
libnetloc_so_version=0:0:0
# Please also update the <TargetName> lines in contrib/windows/libhwloc.vcxproj

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2021 Inria. All rights reserved.
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2012 Université Bordeaux
* Copyright © 2009-2020 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -29,7 +29,7 @@
* THAT IS IN THE PDF/HTML THAT IS ***NOT*** IN hwloc.h!
*
* There are entire paragraph-length descriptions, discussions, and
* pretty prictures to explain subtle corner cases, provide concrete
* pretty pictures to explain subtle corner cases, provide concrete
* examples, etc.
*
* Please, go read the documentation. :-)
@@ -93,7 +93,7 @@ extern "C" {
* Two stable releases of the same series usually have the same ::HWLOC_API_VERSION
* even if their HWLOC_VERSION are different.
*/
#define HWLOC_API_VERSION 0x00020400
#define HWLOC_API_VERSION 0x00020800
/** \brief Indicate at runtime which hwloc API version was used at build time.
*
@@ -346,7 +346,8 @@ typedef enum hwloc_obj_osdev_type_e {
* For instance the "eth0" interface on Linux. */
HWLOC_OBJ_OSDEV_OPENFABRICS, /**< \brief Operating system openfabrics device.
* For instance the "mlx4_0" InfiniBand HCA,
* or "hfi1_0" Omni-Path interface on Linux. */
* "hfi1_0" Omni-Path interface,
* or "bxi0" Atos/Bull BXI HCA on Linux. */
HWLOC_OBJ_OSDEV_DMA, /**< \brief Operating system dma engine device.
* For instance the "dma0chan0" DMA channel on Linux. */
HWLOC_OBJ_OSDEV_COPROC /**< \brief Operating system co-processor device.
@@ -516,7 +517,7 @@ struct hwloc_obj {
* objects).
*
* If the ::HWLOC_TOPOLOGY_FLAG_INCLUDE_DISALLOWED configuration flag is set,
* some of these CPUs may not be allowed for binding,
* some of these CPUs may be online but not allowed for binding,
* see hwloc_topology_get_allowed_cpuset().
*
* \note All objects have non-NULL CPU and node sets except Misc and I/O objects.
@@ -548,7 +549,7 @@ struct hwloc_obj {
* nodes more precisely.
*
* If the ::HWLOC_TOPOLOGY_FLAG_INCLUDE_DISALLOWED configuration flag is set,
* some of these nodes may not be allowed for allocation,
* some of these nodes may be online but not allowed for allocation,
* see hwloc_topology_get_allowed_nodeset().
*
* If there are no NUMA nodes in the machine, all the memory is close to this
@@ -641,7 +642,7 @@ union hwloc_obj_attr_u {
unsigned char revision;
float linkspeed; /* in GB/s */
} pcidev;
/** \brief Bridge specific Object Attribues */
/** \brief Bridge specific Object Attributes */
struct hwloc_bridge_attr_s {
union {
struct hwloc_pcidev_attr_s pci;
@@ -970,7 +971,7 @@ HWLOC_DECLSPEC const char * hwloc_obj_type_string (hwloc_obj_type_t type) __hwlo
*
* If \p size is 0, \p string may safely be \c NULL.
*
* \return the number of character that were actually written if not truncating,
* \return the number of characters that were actually written if not truncating,
* or that would have been written (not including the ending \\0).
*/
HWLOC_DECLSPEC int hwloc_obj_type_snprintf(char * __hwloc_restrict string, size_t size,
@@ -985,7 +986,7 @@ HWLOC_DECLSPEC int hwloc_obj_type_snprintf(char * __hwloc_restrict string, size_
*
* If \p size is 0, \p string may safely be \c NULL.
*
* \return the number of character that were actually written if not truncating,
* \return the number of characters that were actually written if not truncating,
* or that would have been written (not including the ending \\0).
*/
HWLOC_DECLSPEC int hwloc_obj_attr_snprintf(char * __hwloc_restrict string, size_t size,
@@ -1088,7 +1089,7 @@ HWLOC_DECLSPEC int hwloc_obj_add_info(hwloc_obj_t obj, const char *name, const c
*
* Some operating systems only support binding threads or processes to a single PU.
* Others allow binding to larger sets such as entire Cores or Packages or
* even random sets of invididual PUs. In such operating system, the scheduler
* even random sets of individual PUs. In such operating system, the scheduler
* is free to run the task on one of these PU, then migrate it to another PU, etc.
* It is often useful to call hwloc_bitmap_singlify() on the target CPU set before
* passing it to the binding function to avoid these expensive migrations.
@@ -1166,7 +1167,7 @@ typedef enum {
* CPUs are idle, operating systems may execute the thread/process
* on those other CPUs instead of the designated CPUs, to let them
* progress anyway. Strict binding means that the thread/process
* will _never_ execute on other cpus than the designated CPUs, even
* will _never_ execute on other CPUs than the designated CPUs, even
* when those are busy with other tasks and other CPUs are idle.
*
* \note Depending on the operating system, strict binding may not
@@ -1203,7 +1204,7 @@ typedef enum {
HWLOC_CPUBIND_NOMEMBIND = (1<<3)
} hwloc_cpubind_flags_t;
/** \brief Bind current process or thread on cpus given in physical bitmap \p set.
/** \brief Bind current process or thread on CPUs given in physical bitmap \p set.
*
* \return -1 with errno set to ENOSYS if the action is not supported
* \return -1 with errno set to EXDEV if the binding cannot be enforced
@@ -1212,12 +1213,13 @@ HWLOC_DECLSPEC int hwloc_set_cpubind(hwloc_topology_t topology, hwloc_const_cpus
/** \brief Get current process or thread binding.
*
* Writes into \p set the physical cpuset which the process or thread (according to \e
* flags) was last bound to.
* The CPU-set \p set (previously allocated by the caller)
* is filled with the list of PUs which the process or
* thread (according to \e flags) was last bound to.
*/
HWLOC_DECLSPEC int hwloc_get_cpubind(hwloc_topology_t topology, hwloc_cpuset_t set, int flags);
/** \brief Bind a process \p pid on cpus given in physical bitmap \p set.
/** \brief Bind a process \p pid on CPUs given in physical bitmap \p set.
*
* \note \p hwloc_pid_t is \p pid_t on Unix platforms,
* and \p HANDLE on native Windows platforms.
@@ -1231,6 +1233,10 @@ HWLOC_DECLSPEC int hwloc_get_cpubind(hwloc_topology_t topology, hwloc_cpuset_t s
HWLOC_DECLSPEC int hwloc_set_proc_cpubind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_cpuset_t set, int flags);
/** \brief Get the current physical binding of process \p pid.
*
* The CPU-set \p set (previously allocated by the caller)
* is filled with the list of PUs which the process
* was last bound to.
*
* \note \p hwloc_pid_t is \p pid_t on Unix platforms,
* and \p HANDLE on native Windows platforms.
@@ -1244,7 +1250,7 @@ HWLOC_DECLSPEC int hwloc_set_proc_cpubind(hwloc_topology_t topology, hwloc_pid_t
HWLOC_DECLSPEC int hwloc_get_proc_cpubind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_cpuset_t set, int flags);
#ifdef hwloc_thread_t
/** \brief Bind a thread \p thread on cpus given in physical bitmap \p set.
/** \brief Bind a thread \p thread on CPUs given in physical bitmap \p set.
*
* \note \p hwloc_thread_t is \p pthread_t on Unix platforms,
* and \p HANDLE on native Windows platforms.
@@ -1256,6 +1262,10 @@ HWLOC_DECLSPEC int hwloc_set_thread_cpubind(hwloc_topology_t topology, hwloc_thr
#ifdef hwloc_thread_t
/** \brief Get the current physical binding of thread \p tid.
*
* The CPU-set \p set (previously allocated by the caller)
* is filled with the list of PUs which the thread
* was last bound to.
*
* \note \p hwloc_thread_t is \p pthread_t on Unix platforms,
* and \p HANDLE on native Windows platforms.
@@ -1266,6 +1276,10 @@ HWLOC_DECLSPEC int hwloc_get_thread_cpubind(hwloc_topology_t topology, hwloc_thr
#endif
/** \brief Get the last physical CPU where the current process or thread ran.
*
* The CPU-set \p set (previously allocated by the caller)
* is filled with the list of PUs which the process or
* thread (according to \e flags) last ran on.
*
* The operating system may move some tasks from one processor
* to another at any time according to their binding,
@@ -1281,6 +1295,10 @@ HWLOC_DECLSPEC int hwloc_get_thread_cpubind(hwloc_topology_t topology, hwloc_thr
HWLOC_DECLSPEC int hwloc_get_last_cpu_location(hwloc_topology_t topology, hwloc_cpuset_t set, int flags);
/** \brief Get the last physical CPU where a process ran.
*
* The CPU-set \p set (previously allocated by the caller)
* is filled with the list of PUs which the process
* last ran on.
*
* The operating system may move some tasks from one processor
* to another at any time according to their binding,
@@ -1511,6 +1529,9 @@ HWLOC_DECLSPEC int hwloc_set_membind(hwloc_topology_t topology, hwloc_const_bitm
/** \brief Query the default memory binding policy and physical locality of the
* current process or thread.
*
* The bitmap \p set (previously allocated by the caller)
* is filled with the process or thread memory binding.
*
* This function has two output parameters: \p set and \p policy.
* The values returned in these parameters depend on both the \p flags
* passed in and the current memory binding policies and nodesets in
@@ -1571,6 +1592,9 @@ HWLOC_DECLSPEC int hwloc_set_proc_membind(hwloc_topology_t topology, hwloc_pid_t
/** \brief Query the default memory binding policy and physical locality of the
* specified process.
*
* The bitmap \p set (previously allocated by the caller)
* is filled with the process memory binding.
*
* This function has two output parameters: \p set and \p policy.
* The values returned in these parameters depend on both the \p flags
* passed in and the current memory binding policies and nodesets in
@@ -1624,6 +1648,9 @@ HWLOC_DECLSPEC int hwloc_set_area_membind(hwloc_topology_t topology, const void
/** \brief Query the CPUs near the physical NUMA node(s) and binding policy of
* the memory identified by (\p addr, \p len ).
*
* The bitmap \p set (previously allocated by the caller)
* is filled with the memory area binding.
*
* This function has two output parameters: \p set and \p policy.
* The values returned in these parameters depend on both the \p flags
* passed in and the memory binding policies and nodesets of the pages
@@ -1652,7 +1679,8 @@ HWLOC_DECLSPEC int hwloc_get_area_membind(hwloc_topology_t topology, const void
/** \brief Get the NUMA nodes where memory identified by (\p addr, \p len ) is physically allocated.
*
* Fills \p set according to the NUMA nodes where the memory area pages
* The bitmap \p set (previously allocated by the caller)
* is filled according to the NUMA nodes where the memory area pages
* are physically allocated. If no page is actually allocated yet,
* \p set may be empty.
*
@@ -1698,9 +1726,12 @@ HWLOC_DECLSPEC void *hwloc_alloc_membind(hwloc_topology_t topology, size_t len,
/** \brief Allocate some memory on NUMA memory nodes specified by \p set
*
* This is similar to hwloc_alloc_membind_nodeset() except that it is allowed to change
* the current memory binding policy, thus providing more binding support, at
* the expense of changing the current state.
* First, try to allocate properly with hwloc_alloc_membind().
* On failure, the current process or thread memory binding policy
* is changed with hwloc_set_membind() before allocating memory.
* Thus this function works in more cases, at the expense of changing
* the current state (possibly affecting future allocations that
* would not specify any policy).
*
* If ::HWLOC_MEMBIND_BYNODESET is specified, set is considered a nodeset.
* Otherwise it's a cpuset.
@@ -1883,8 +1914,9 @@ HWLOC_DECLSPEC int hwloc_topology_set_components(hwloc_topology_t __hwloc_restri
enum hwloc_topology_flags_e {
/** \brief Detect the whole system, ignore reservations, include disallowed objects.
*
* Gather all resources, even if some were disabled by the administrator.
* Gather all online resources, even if some were disabled by the administrator.
* For instance, ignore Linux Cgroup/Cpusets and gather all processors and memory nodes.
* However offline PUs and NUMA nodes are still ignored.
*
* When this flag is not set, PUs and NUMA nodes that are disallowed are not added to the topology.
* Parent objects (package, core, cache, etc.) are added only if some of their children are allowed.
@@ -1966,17 +1998,100 @@ enum hwloc_topology_flags_e {
* hwloc and machine support.
*
*/
HWLOC_TOPOLOGY_FLAG_IMPORT_SUPPORT = (1UL<<3)
HWLOC_TOPOLOGY_FLAG_IMPORT_SUPPORT = (1UL<<3),
/** \brief Do not consider resources outside of the process CPU binding.
*
* If the binding of the process is limited to a subset of cores,
* ignore the other cores during discovery.
*
* The resulting topology is identical to what a call to hwloc_topology_restrict()
* would generate, but this flag also prevents hwloc from ever touching other
* resources during the discovery.
*
* This flag especially tells the x86 backend to never temporarily
* rebind a thread on any excluded core. This is useful on Windows
* because such temporary rebinding can change the process binding.
* Another use-case is to avoid cores that would not be able to
* perform the hwloc discovery anytime soon because they are busy
* executing some high-priority real-time tasks.
*
* If process CPU binding is not supported,
* the thread CPU binding is considered instead if supported,
* or the flag is ignored.
*
* This flag requires ::HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM as well
* since binding support is required.
*/
HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING = (1UL<<4),
/** \brief Do not consider resources outside of the process memory binding.
*
* If the binding of the process is limited to a subset of NUMA nodes,
* ignore the other NUMA nodes during discovery.
*
* The resulting topology is identical to what a call to hwloc_topology_restrict()
* would generate, but this flag also prevents hwloc from ever touching other
* resources during the discovery.
*
* This flag is meant to be used together with
* ::HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING when both cores
* and NUMA nodes should be ignored outside of the process binding.
*
* If process memory binding is not supported,
* the thread memory binding is considered instead if supported,
* or the flag is ignored.
*
* This flag requires ::HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM as well
* since binding support is required.
*/
HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_MEMBINDING = (1UL<<5),
/** \brief Do not ever modify the process or thread binding during discovery.
*
* This flag disables all hwloc discovery steps that require a change of
* the process or thread binding. This currently only affects the x86
* backend which gets entirely disabled.
*
* This is useful when hwloc_topology_load() is called while the
* application also creates additional threads or modifies the binding.
*
* This flag is also a strict way to make sure the process binding will
* not change to due thread binding changes on Windows
* (see ::HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING).
*/
HWLOC_TOPOLOGY_FLAG_DONT_CHANGE_BINDING = (1UL<<6),
/** \brief Ignore distances.
*
* Ignore distance information from the operating systems (and from XML)
* and hence do not use distances for grouping.
*/
HWLOC_TOPOLOGY_FLAG_NO_DISTANCES = (1UL<<7),
/** \brief Ignore memory attributes.
*
* Ignore memory attribues from the operating systems (and from XML).
*/
HWLOC_TOPOLOGY_FLAG_NO_MEMATTRS = (1UL<<8),
/** \brief Ignore CPU Kinds.
*
* Ignore CPU kind information from the operating systems (and from XML).
*/
HWLOC_TOPOLOGY_FLAG_NO_CPUKINDS = (1UL<<9)
};
/** \brief Set OR'ed flags to non-yet-loaded topology.
*
* Set a OR'ed set of ::hwloc_topology_flags_e onto a topology that was not yet loaded.
*
* If this function is called multiple times, the last invokation will erase
* If this function is called multiple times, the last invocation will erase
* and replace the set of flags that was previously set.
*
* The flags set in a topology may be retrieved with hwloc_topology_get_flags()
* By default, no flags are set (\c 0).
*
* The flags set in a topology may be retrieved with hwloc_topology_get_flags().
*/
HWLOC_DECLSPEC int hwloc_topology_set_flags (hwloc_topology_t topology, unsigned long flags);
@@ -1984,6 +2099,9 @@ HWLOC_DECLSPEC int hwloc_topology_set_flags (hwloc_topology_t topology, unsigned
*
* Get the OR'ed set of ::hwloc_topology_flags_e of a topology.
*
* If hwloc_topology_set_flags() was not called earlier,
* no flags are set (\c 0 is returned).
*
* \return the flags previously set with hwloc_topology_set_flags().
*/
HWLOC_DECLSPEC unsigned long hwloc_topology_get_flags (hwloc_topology_t topology);

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2012 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -11,10 +11,10 @@
#ifndef HWLOC_CONFIG_H
#define HWLOC_CONFIG_H
#define HWLOC_VERSION "2.4.1"
#define HWLOC_VERSION "2.9.0"
#define HWLOC_VERSION_MAJOR 2
#define HWLOC_VERSION_MINOR 4
#define HWLOC_VERSION_RELEASE 1
#define HWLOC_VERSION_MINOR 9
#define HWLOC_VERSION_RELEASE 0
#define HWLOC_VERSION_GREEK ""
#define __hwloc_restrict

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2012 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -112,7 +112,7 @@ HWLOC_DECLSPEC int hwloc_bitmap_copy(hwloc_bitmap_t dst, hwloc_const_bitmap_t sr
*
* If \p buflen is 0, \p buf may safely be \c NULL.
*
* \return the number of character that were actually written if not truncating,
* \return the number of characters that were actually written if not truncating,
* or that would have been written (not including the ending \\0).
*/
HWLOC_DECLSPEC int hwloc_bitmap_snprintf(char * __hwloc_restrict buf, size_t buflen, hwloc_const_bitmap_t bitmap);
@@ -137,7 +137,7 @@ HWLOC_DECLSPEC int hwloc_bitmap_sscanf(hwloc_bitmap_t bitmap, const char * __hwl
*
* If \p buflen is 0, \p buf may safely be \c NULL.
*
* \return the number of character that were actually written if not truncating,
* \return the number of characters that were actually written if not truncating,
* or that would have been written (not including the ending \\0).
*/
HWLOC_DECLSPEC int hwloc_bitmap_list_snprintf(char * __hwloc_restrict buf, size_t buflen, hwloc_const_bitmap_t bitmap);
@@ -161,7 +161,7 @@ HWLOC_DECLSPEC int hwloc_bitmap_list_sscanf(hwloc_bitmap_t bitmap, const char *
*
* If \p buflen is 0, \p buf may safely be \c NULL.
*
* \return the number of character that were actually written if not truncating,
* \return the number of characters that were actually written if not truncating,
* or that would have been written (not including the ending \\0).
*/
HWLOC_DECLSPEC int hwloc_bitmap_taskset_snprintf(char * __hwloc_restrict buf, size_t buflen, hwloc_const_bitmap_t bitmap);
@@ -357,11 +357,11 @@ HWLOC_DECLSPEC int hwloc_bitmap_last_unset(hwloc_const_bitmap_t bitmap) __hwloc_
* The loop must start with hwloc_bitmap_foreach_begin() and end
* with hwloc_bitmap_foreach_end() followed by a terminating ';'.
*
* \p index is the loop variable; it should be an unsigned int. The
* first iteration will set \p index to the lowest index in the bitmap.
* \p id is the loop variable; it should be an unsigned int. The
* first iteration will set \p id to the lowest index in the bitmap.
* Successive iterations will iterate through, in order, all remaining
* indexes set in the bitmap. To be specific: each iteration will return a
* value for \p index such that hwloc_bitmap_isset(bitmap, index) is true.
* value for \p id such that hwloc_bitmap_isset(bitmap, id) is true.
*
* The assert prevents the loop from being infinite if the bitmap is infinitely set.
*

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2020 Inria. All rights reserved.
* Copyright © 2020-2021 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -42,18 +42,23 @@ extern "C" {
* (for instance the "CoreType" and "FrequencyMaxMHz",
* see \ref topoattrs_cpukinds).
*
* A higher efficiency value means intrinsic greater performance
* A higher efficiency value means greater intrinsic performance
* (and possibly less performance/power efficiency).
* Kinds with lower efficiency are ranked first:
* Kinds with lower efficiency values are ranked first:
* Passing 0 as \p kind_index to hwloc_cpukinds_get_info() will
* return information about the less efficient CPU kind.
* return information about the CPU kind with lower performance
* but higher energy-efficiency.
* Higher \p kind_index values would rather return information
* about power-hungry high-performance cores.
*
* When available, efficiency values are gathered from the operating
* system (when \p cpukind_efficiency is set in the
* struct hwloc_topology_discovery_support array, only on Windows 10 for now).
* Otherwise hwloc tries to compute efficiencies
* by comparing CPU kinds using frequencies (on ARM),
* or core types and frequencies (on other architectures).
* When available, efficiency values are gathered from the operating system.
* If so, \p cpukind_efficiency is set in the struct hwloc_topology_discovery_support array.
* This is currently available on Windows 10, Mac OS X (Darwin),
* and on some Linux platforms where core "capacity" is exposed in sysfs.
*
* If the operating system does not expose core efficiencies natively,
* hwloc tries to compute efficiencies by comparing CPU kinds using
* frequencies (on ARM), or core types and frequencies (on other architectures).
* The environment variable HWLOC_CPUKINDS_RANKING may be used
* to change this heuristics, see \ref envvar.
*

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2010-2020 Inria. All rights reserved.
* Copyright © 2010-2021 Inria. All rights reserved.
* Copyright © 2010-2011 Université Bordeaux
* Copyright © 2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -75,7 +75,7 @@ hwloc_cuda_get_device_pci_ids(hwloc_topology_t topology __hwloc_attribute_unused
/** \brief Get the CPU set of processors that are physically
* close to device \p cudevice.
*
* Return the CPU set describing the locality of the CUDA device \p cudevice.
* Store in \p set the CPU-set describing the locality of the CUDA device \p cudevice.
*
* Topology \p topology and device \p cudevice must match the local machine.
* I/O devices detection and the CUDA component are not needed in the topology.
@@ -120,8 +120,8 @@ hwloc_cuda_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused,
/** \brief Get the hwloc PCI device object corresponding to the
* CUDA device \p cudevice.
*
* Return the PCI device object describing the CUDA device \p cudevice.
* Return NULL if there is none.
* \return The hwloc PCI device object describing the CUDA device \p cudevice.
* \return \c NULL if none could be found.
*
* Topology \p topology and device \p cudevice must match the local machine.
* I/O devices detection must be enabled in topology \p topology.
@@ -140,8 +140,8 @@ hwloc_cuda_get_device_pcidev(hwloc_topology_t topology, CUdevice cudevice)
/** \brief Get the hwloc OS device object corresponding to CUDA device \p cudevice.
*
* Return the hwloc OS device object that describes the given
* CUDA device \p cudevice. Return NULL if there is none.
* \return The hwloc OS device object that describes the given CUDA device \p cudevice.
* \return \c NULL if none could be found.
*
* Topology \p topology and device \p cudevice must match the local machine.
* I/O devices detection and the CUDA component must be enabled in the topology.
@@ -183,8 +183,8 @@ hwloc_cuda_get_device_osdev(hwloc_topology_t topology, CUdevice cudevice)
/** \brief Get the hwloc OS device object corresponding to the
* CUDA device whose index is \p idx.
*
* Return the OS device object describing the CUDA device whose
* index is \p idx. Return NULL if there is none.
* \return The hwloc OS device object describing the CUDA device whose index is \p idx.
* \return \c NULL if none could be found.
*
* The topology \p topology does not necessarily have to match the current
* machine. For instance the topology may be an XML import of a remote host.

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2010-2020 Inria. All rights reserved.
* Copyright © 2010-2021 Inria. All rights reserved.
* Copyright © 2010-2011 Université Bordeaux
* Copyright © 2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -72,7 +72,7 @@ hwloc_cudart_get_device_pci_ids(hwloc_topology_t topology __hwloc_attribute_unus
/** \brief Get the CPU set of processors that are physically
* close to device \p idx.
*
* Return the CPU set describing the locality of the CUDA device
* Store in \p set the CPU-set describing the locality of the CUDA device
* whose index is \p idx.
*
* Topology \p topology and device \p idx must match the local machine.
@@ -117,8 +117,8 @@ hwloc_cudart_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unuse
/** \brief Get the hwloc PCI device object corresponding to the
* CUDA device whose index is \p idx.
*
* Return the PCI device object describing the CUDA device whose
* index is \p idx. Return NULL if there is none.
* \return The hwloc PCI device object describing the CUDA device whose index is \p idx.
* \return \c NULL if none could be found.
*
* Topology \p topology and device \p idx must match the local machine.
* I/O devices detection must be enabled in topology \p topology.
@@ -138,8 +138,8 @@ hwloc_cudart_get_device_pcidev(hwloc_topology_t topology, int idx)
/** \brief Get the hwloc OS device object corresponding to the
* CUDA device whose index is \p idx.
*
* Return the OS device object describing the CUDA device whose
* index is \p idx. Return NULL if there is none.
* \return The hwloc OS device object describing the CUDA device whose index is \p idx.
* \return \c NULL if none could be found.
*
* The topology \p topology does not necessarily have to match the current
* machine. For instance the topology may be an XML import of a remote host.

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2018 Inria. All rights reserved.
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2012 Université Bordeaux
* Copyright © 2009-2010 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -30,6 +30,15 @@ extern "C" {
/* backward compat with v1.10 before Node->NUMANode clarification */
#define HWLOC_OBJ_NODE HWLOC_OBJ_NUMANODE
/** \brief Add a distances structure.
*
* Superseded by hwloc_distances_add_create()+hwloc_distances_add_values()+hwloc_distances_add_commit()
* in v2.5.
*/
HWLOC_DECLSPEC int hwloc_distances_add(hwloc_topology_t topology,
unsigned nbobjs, hwloc_obj_t *objs, hwloc_uint64_t *values,
unsigned long kind, unsigned long flags) __hwloc_attribute_deprecated;
/** \brief Insert a misc object by parent.
*
* Identical to hwloc_topology_insert_misc_object().
@@ -46,7 +55,7 @@ hwloc_topology_insert_misc_object_by_parent(hwloc_topology_t topology, hwloc_obj
*
* If \p size is 0, \p string may safely be \c NULL.
*
* \return the number of character that were actually written if not truncating,
* \return the number of characters that were actually written if not truncating,
* or that would have been written (not including the ending \\0).
*/
static __hwloc_inline int

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2010-2020 Inria. All rights reserved.
* Copyright © 2010-2022 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -35,9 +35,20 @@ extern "C" {
* from a core in another node.
* The corresponding kind is ::HWLOC_DISTANCES_KIND_FROM_OS | ::HWLOC_DISTANCES_KIND_FROM_USER.
* The name of this distances structure is "NUMALatency".
* Others distance structures include and "XGMIBandwidth", "XGMIHops",
* "XeLinkBandwidth" and "NVLinkBandwidth".
*
* The matrix may also contain bandwidths between random sets of objects,
* possibly provided by the user, as specified in the \p kind attribute.
*
* Pointers \p objs and \p values should not be replaced, reallocated, freed, etc.
* However callers are allowed to modify \p kind as well as the contents
* of \p objs and \p values arrays.
* For instance, if there is a single NUMA node per Package,
* hwloc_get_obj_with_same_locality() may be used to convert between them
* and replace NUMA nodes in the \p objs array with the corresponding Packages.
* See also hwloc_distances_transform() for applying some transformations
* to the structure.
*/
struct hwloc_distances_s {
unsigned nbobjs; /**< \brief Number of objects described by the distance matrix. */
@@ -91,6 +102,8 @@ enum hwloc_distances_kind_e {
HWLOC_DISTANCES_KIND_MEANS_BANDWIDTH = (1UL<<3),
/** \brief This distances structure covers objects of different types.
* This may apply to the "NVLinkBandwidth" structure in presence
* of a NVSwitch or POWER processor NVLink port.
* \hideinitializer
*/
HWLOC_DISTANCES_KIND_HETEROGENEOUS_TYPES = (1UL<<4)
@@ -147,6 +160,8 @@ hwloc_distances_get_by_type(hwloc_topology_t topology, hwloc_obj_type_t type,
* Usually only one distances structure may match a given name.
*
* The name of the most common structure is "NUMALatency".
* Others include "XGMIBandwidth", "XGMIHops", "XeLinkBandwidth",
* and "NVLinkBandwidth".
*/
HWLOC_DECLSPEC int
hwloc_distances_get_by_name(hwloc_topology_t topology, const char *name,
@@ -168,6 +183,85 @@ hwloc_distances_get_name(hwloc_topology_t topology, struct hwloc_distances_s *di
HWLOC_DECLSPEC void
hwloc_distances_release(hwloc_topology_t topology, struct hwloc_distances_s *distances);
/** \brief Transformations of distances structures. */
enum hwloc_distances_transform_e {
/** \brief Remove \c NULL objects from the distances structure.
*
* Every object that was replaced with \c NULL in the \p objs array
* is removed and the \p values array is updated accordingly.
*
* At least \c 2 objects must remain, otherwise hwloc_distances_transform()
* will return \c -1 with \p errno set to \c EINVAL.
*
* \p kind will be updated with or without ::HWLOC_DISTANCES_KIND_HETEROGENEOUS_TYPES
* according to the remaining objects.
*
* \hideinitializer
*/
HWLOC_DISTANCES_TRANSFORM_REMOVE_NULL = 0,
/** \brief Replace bandwidth values with a number of links.
*
* Usually all values will be either \c 0 (no link) or \c 1 (one link).
* However some matrices could get larger values if some pairs of
* peers are connected by different numbers of links.
*
* Values on the diagonal are set to \c 0.
*
* This transformation only applies to bandwidth matrices.
*
* \hideinitializer
*/
HWLOC_DISTANCES_TRANSFORM_LINKS = 1,
/** \brief Merge switches with multiple ports into a single object.
* This currently only applies to NVSwitches where GPUs seem connected to different
* separate switch ports in the NVLinkBandwidth matrix. This transformation will
* replace all of them with the same port connected to all GPUs.
* Other ports are removed by applying ::HWLOC_DISTANCES_TRANSFORM_REMOVE_NULL internally.
* \hideinitializer
*/
HWLOC_DISTANCES_TRANSFORM_MERGE_SWITCH_PORTS = 2,
/** \brief Apply a transitive closure to the matrix to connect objects across switches.
* This currently only applies to GPUs and NVSwitches in the NVLinkBandwidth matrix.
* All pairs of GPUs will be reported as directly connected.
* \hideinitializer
*/
HWLOC_DISTANCES_TRANSFORM_TRANSITIVE_CLOSURE = 3
};
/** \brief Apply a transformation to a distances structure.
*
* Modify a distances structure that was previously obtained with
* hwloc_distances_get() or one of its variants.
*
* This modifies the local copy of the distances structures but does
* not modify the distances information stored inside the topology
* (retrieved by another call to hwloc_distances_get() or exported to XML).
* To do so, one should add a new distances structure with same
* name, kind, objects and values (see \ref hwlocality_distances_add)
* and then remove this old one with hwloc_distances_release_remove().
*
* \p transform must be one of the transformations listed
* in ::hwloc_distances_transform_e.
*
* These transformations may modify the contents of the \p objs or \p values arrays.
*
* \p transform_attr must be \c NULL for now.
*
* \p flags must be \c 0 for now.
*
* \note Objects in distances array \p objs may be directly modified
* in place without using hwloc_distances_transform().
* One may use hwloc_get_obj_with_same_locality() to easily convert
* between similar objects of different types.
*/
HWLOC_DECLSPEC int hwloc_distances_transform(hwloc_topology_t topology, struct hwloc_distances_s *distances,
enum hwloc_distances_transform_e transform,
void *transform_attr,
unsigned long flags);
/** @} */
@@ -215,13 +309,84 @@ hwloc_distances_obj_pair_values(struct hwloc_distances_s *distances,
/** \defgroup hwlocality_distances_add Add or remove distances between objects
/** \defgroup hwlocality_distances_add Add distances between objects
*
* The usual way to add distances is:
* \code
* hwloc_distances_add_handle_t handle;
* int err = -1;
* handle = hwloc_distances_add_create(topology, "name", kind, 0);
* if (handle) {
* err = hwloc_distances_add_values(topology, handle, nbobjs, objs, values, 0);
* if (!err)
* err = hwloc_distances_add_commit(topology, handle, flags);
* }
* \endcode
* If \p err is \c 0 at the end, then addition was successful.
*
* @{
*/
/** \brief Handle to a new distances structure during its addition to the topology. */
typedef void * hwloc_distances_add_handle_t;
/** \brief Create a new empty distances structure.
*
* Create an empty distances structure
* to be filled with hwloc_distances_add_values()
* and then committed with hwloc_distances_add_commit().
*
* Parameter \p name is optional, it may be \c NULL.
* Otherwise, it will be copied internally and may later be freed by the caller.
*
* \p kind specifies the kind of distance as a OR'ed set of ::hwloc_distances_kind_e.
* Kind ::HWLOC_DISTANCES_KIND_HETEROGENEOUS_TYPES will be automatically set
* according to objects having different types in hwloc_distances_add_values().
*
* \p flags must be \c 0 for now.
*
* \return A hwloc_distances_add_handle_t that should then be passed
* to hwloc_distances_add_values() and hwloc_distances_add_commit().
*
* \return \c NULL on error.
*/
HWLOC_DECLSPEC hwloc_distances_add_handle_t
hwloc_distances_add_create(hwloc_topology_t topology,
const char *name, unsigned long kind,
unsigned long flags);
/** \brief Specify the objects and values in a new empty distances structure.
*
* Specify the objects and values for a new distances structure
* that was returned as a handle by hwloc_distances_add_create().
* The structure must then be committed with hwloc_distances_add_commit().
*
* The number of objects is \p nbobjs and the array of objects is \p objs.
* Distance values are stored as a one-dimension array in \p values.
* The distance from object i to object j is in slot i*nbobjs+j.
*
* \p nbobjs must be at least 2.
*
* Arrays \p objs and \p values will be copied internally,
* they may later be freed by the caller.
*
* On error, the temporary distances structure and its content are destroyed.
*
* \p flags must be \c 0 for now.
*
* \return \c 0 on success.
* \return \c -1 on error.
*/
HWLOC_DECLSPEC int hwloc_distances_add_values(hwloc_topology_t topology,
hwloc_distances_add_handle_t handle,
unsigned nbobjs, hwloc_obj_t *objs,
hwloc_uint64_t *values,
unsigned long flags);
/** \brief Flags for adding a new distances to a topology. */
enum hwloc_distances_add_flag_e {
/** \brief Try to group objects based on the newly provided distance information.
* This is ignored for distances between objects of different types.
* \hideinitializer
*/
HWLOC_DISTANCES_ADD_FLAG_GROUP = (1UL<<0),
@@ -233,23 +398,33 @@ enum hwloc_distances_add_flag_e {
HWLOC_DISTANCES_ADD_FLAG_GROUP_INACCURATE = (1UL<<1)
};
/** \brief Provide a new distance matrix.
/** \brief Commit a new distances structure.
*
* Provide the matrix of distances between a set of objects given by \p nbobjs
* and the \p objs array. \p nbobjs must be at least 2.
* The distances are stored as a one-dimension array in \p values.
* The distance from object i to object j is in slot i*nbobjs+j.
* This function finalizes the distances structure and inserts in it the topology.
*
* \p kind specifies the kind of distance as a OR'ed set of ::hwloc_distances_kind_e.
* Kind ::HWLOC_DISTANCES_KIND_HETEROGENEOUS_TYPES will be automatically added
* if objects of different types are given.
* Parameter \p handle was previously returned by hwloc_distances_add_create().
* Then objects and values were specified with hwloc_distances_add_values().
*
* \p flags configures the behavior of the function using an optional OR'ed set of
* ::hwloc_distances_add_flag_e.
* It may be used to request the grouping of existing objects based on distances.
*
* On error, the temporary distances structure and its content are destroyed.
*
* \return \c 0 on success.
* \return \c -1 on error.
*/
HWLOC_DECLSPEC int hwloc_distances_add_commit(hwloc_topology_t topology,
hwloc_distances_add_handle_t handle,
unsigned long flags);
/** @} */
/** \defgroup hwlocality_distances_remove Remove distances between objects
* @{
*/
HWLOC_DECLSPEC int hwloc_distances_add(hwloc_topology_t topology,
unsigned nbobjs, hwloc_obj_t *objs, hwloc_uint64_t *values,
unsigned long kind, unsigned long flags);
/** \brief Remove all distance matrices from a topology.
*

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2012 Blue Brain Project, EPFL. All rights reserved.
* Copyright © 2012-2013 Inria. All rights reserved.
* Copyright © 2012-2021 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -39,9 +39,9 @@ extern "C" {
/** \brief Get the hwloc OS device object corresponding to the
* OpenGL display given by port and device index.
*
* Return the OS device object describing the OpenGL display
* \return The hwloc OS device object describing the OpenGL display
* whose port (server) is \p port and device (screen) is \p device.
* Return NULL if there is none.
* \return \c NULL if none could be found.
*
* The topology \p topology does not necessarily have to match the current
* machine. For instance the topology may be an XML import of a remote host.
@@ -70,9 +70,9 @@ hwloc_gl_get_display_osdev_by_port_device(hwloc_topology_t topology,
/** \brief Get the hwloc OS device object corresponding to the
* OpenGL display given by name.
*
* Return the OS device object describing the OpenGL display
* \return The hwloc OS device object describing the OpenGL display
* whose name is \p name, built as ":port.device" such as ":0.0" .
* Return NULL if there is none.
* \return \c NULL if none could be found.
*
* The topology \p topology does not necessarily have to match the current
* machine. For instance the topology may be an XML import of a remote host.
@@ -99,9 +99,10 @@ hwloc_gl_get_display_osdev_by_name(hwloc_topology_t topology,
/** \brief Get the OpenGL display port and device corresponding
* to the given hwloc OS object.
*
* Return the OpenGL display port (server) in \p port and device (screen)
* Retrieves the OpenGL display port (server) in \p port and device (screen)
* in \p screen that correspond to the given hwloc OS device object.
* Return \c -1 if there is none.
*
* \return \c -1 if none could be found.
*
* The topology \p topology does not necessarily have to match the current
* machine. For instance the topology may be an XML import of a remote host.

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2012 Université Bordeaux
* Copyright © 2009-2010 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -807,6 +807,49 @@ hwloc_get_obj_below_array_by_type (hwloc_topology_t topology, int nr, hwloc_obj_
return obj;
}
/** \brief Return an object of a different type with same locality.
*
* If the source object \p src is a normal or memory type,
* this function returns an object of type \p type with same
* CPU and node sets, either below or above in the hierarchy.
*
* If the source object \p src is a PCI or an OS device within a PCI
* device, the function may either return that PCI device, or another
* OS device in the same PCI parent.
* This may for instance be useful for converting between OS devices
* such as "nvml0" or "rsmi1" used in distance structures into the
* the PCI device, or the CUDA or OpenCL OS device that correspond
* to the same physical card.
*
* If not \c NULL, parameter \p subtype only select objects whose
* subtype attribute exists and is \p subtype (case-insensitively),
* for instance "OpenCL" or "CUDA".
*
* If not \c NULL, parameter \p nameprefix only selects objects whose
* name attribute exists and starts with \p nameprefix (case-insensitively),
* for instance "rsmi" for matching "rsmi0".
*
* If multiple objects match, the first one is returned.
*
* This function will not walk the hierarchy across bridges since
* the PCI locality may become different.
* This function cannot also convert between normal/memory objects
* and I/O or Misc objects.
*
* \p flags must be \c 0 for now.
*
* \return An object with identical locality,
* matching \p subtype and \p nameprefix if any.
*
* \return \c NULL if no matching object could be found,
* or if the source object and target type are incompatible,
* for instance if converting between CPU and I/O objects.
*/
HWLOC_DECLSPEC hwloc_obj_t
hwloc_get_obj_with_same_locality(hwloc_topology_t topology, hwloc_obj_t src,
hwloc_obj_type_t type, const char *subtype, const char *nameprefix,
unsigned long flags);
/** @} */
@@ -843,9 +886,6 @@ enum hwloc_distrib_flags_e {
* \p flags should be 0 or a OR'ed set of ::hwloc_distrib_flags_e.
*
* \note This function requires the \p roots objects to have a CPU set.
*
* \note This function replaces the now deprecated hwloc_distribute()
* and hwloc_distributev() functions.
*/
static __hwloc_inline int
hwloc_distrib(hwloc_topology_t topology,

View File

@@ -1,136 +0,0 @@
/*
* Copyright © 2013-2016 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
/** \file
* \brief Macros to help interaction between hwloc and Intel Xeon Phi (MIC).
*
* Applications that use both hwloc and Intel Xeon Phi (MIC) may want to
* include this file so as to get topology information for MIC devices.
*/
#ifndef HWLOC_INTEL_MIC_H
#define HWLOC_INTEL_MIC_H
#include "hwloc.h"
#include "hwloc/autogen/config.h"
#include "hwloc/helper.h"
#ifdef HWLOC_LINUX_SYS
#include "hwloc/linux.h"
#include <dirent.h>
#include <string.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup hwlocality_intel_mic Interoperability with Intel Xeon Phi (MIC)
*
* This interface offers ways to retrieve topology information about
* Intel Xeon Phi (MIC) devices.
*
* @{
*/
/** \brief Get the CPU set of logical processors that are physically
* close to MIC device whose index is \p idx.
*
* Return the CPU set describing the locality of the MIC device whose index is \p idx.
*
* Topology \p topology and device index \p idx must match the local machine.
* I/O devices detection is not needed in the topology.
*
* The function only returns the locality of the device.
* If more information about the device is needed, OS objects should
* be used instead, see hwloc_intel_mic_get_device_osdev_by_index().
*
* This function is currently only implemented in a meaningful way for
* Linux; other systems will simply get a full cpuset.
*/
static __hwloc_inline int
hwloc_intel_mic_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused,
int idx __hwloc_attribute_unused,
hwloc_cpuset_t set)
{
#ifdef HWLOC_LINUX_SYS
/* If we're on Linux, use the sysfs mechanism to get the local cpus */
#define HWLOC_INTEL_MIC_DEVICE_SYSFS_PATH_MAX 128
char path[HWLOC_INTEL_MIC_DEVICE_SYSFS_PATH_MAX];
DIR *sysdir = NULL;
struct dirent *dirent;
unsigned pcibus, pcidev, pcifunc;
if (!hwloc_topology_is_thissystem(topology)) {
errno = EINVAL;
return -1;
}
sprintf(path, "/sys/class/mic/mic%d", idx);
sysdir = opendir(path);
if (!sysdir)
return -1;
while ((dirent = readdir(sysdir)) != NULL) {
if (sscanf(dirent->d_name, "pci_%02x:%02x.%02x", &pcibus, &pcidev, &pcifunc) == 3) {
sprintf(path, "/sys/class/mic/mic%d/pci_%02x:%02x.%02x/local_cpus", idx, pcibus, pcidev, pcifunc);
if (hwloc_linux_read_path_as_cpumask(path, set) < 0
|| hwloc_bitmap_iszero(set))
hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology));
break;
}
}
closedir(sysdir);
#else
/* Non-Linux systems simply get a full cpuset */
hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology));
#endif
return 0;
}
/** \brief Get the hwloc OS device object corresponding to the
* MIC device for the given index.
*
* Return the OS device object describing the MIC device whose index is \p idx.
* Return NULL if there is none.
*
* The topology \p topology does not necessarily have to match the current
* machine. For instance the topology may be an XML import of a remote host.
* I/O devices detection must be enabled in the topology.
*
* \note The corresponding PCI device object can be obtained by looking
* at the OS device parent object.
*/
static __hwloc_inline hwloc_obj_t
hwloc_intel_mic_get_device_osdev_by_index(hwloc_topology_t topology,
unsigned idx)
{
hwloc_obj_t osdev = NULL;
while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) {
if (HWLOC_OBJ_OSDEV_COPROC == osdev->attr->osdev.type
&& osdev->name
&& !strncmp("mic", osdev->name, 3)
&& atoi(osdev->name + 3) == (int) idx)
return osdev;
}
return NULL;
}
/** @} */
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* HWLOC_INTEL_MIC_H */

View File

@@ -0,0 +1,157 @@
/*
* Copyright © 2021 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
/** \file
* \brief Macros to help interaction between hwloc and the oneAPI Level Zero interface.
*
* Applications that use both hwloc and Level Zero may want to
* include this file so as to get topology information for L0 devices.
*/
#ifndef HWLOC_LEVELZERO_H
#define HWLOC_LEVELZERO_H
#include "hwloc.h"
#include "hwloc/autogen/config.h"
#include "hwloc/helper.h"
#ifdef HWLOC_LINUX_SYS
#include "hwloc/linux.h"
#endif
#include <level_zero/ze_api.h>
#include <level_zero/zes_api.h>
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup hwlocality_levelzero Interoperability with the oneAPI Level Zero interface.
*
* This interface offers ways to retrieve topology information about
* devices managed by the Level Zero API.
*
* @{
*/
/** \brief Get the CPU set of logical processors that are physically
* close to the Level Zero device \p device
*
* Store in \p set the CPU-set describing the locality of
* the Level Zero device \p device.
*
* Topology \p topology and device \p device must match the local machine.
* The Level Zero must have been initialized with Sysman enabled
* (ZES_ENABLE_SYSMAN=1 in the environment).
* I/O devices detection and the Level Zero component are not needed in the
* topology.
*
* The function only returns the locality of the device.
* If more information about the device is needed, OS objects should
* be used instead, see hwloc_levelzero_get_device_osdev().
*
* This function is currently only implemented in a meaningful way for
* Linux; other systems will simply get a full cpuset.
*/
static __hwloc_inline int
hwloc_levelzero_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused,
ze_device_handle_t device, hwloc_cpuset_t set)
{
#ifdef HWLOC_LINUX_SYS
/* If we're on Linux, use the sysfs mechanism to get the local cpus */
#define HWLOC_LEVELZERO_DEVICE_SYSFS_PATH_MAX 128
char path[HWLOC_LEVELZERO_DEVICE_SYSFS_PATH_MAX];
zes_pci_properties_t pci;
zes_device_handle_t sdevice = device;
ze_result_t res;
if (!hwloc_topology_is_thissystem(topology)) {
errno = EINVAL;
return -1;
}
res = zesDevicePciGetProperties(sdevice, &pci);
if (res != ZE_RESULT_SUCCESS) {
errno = EINVAL;
return -1;
}
sprintf(path, "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/local_cpus",
pci.address.domain, pci.address.bus, pci.address.device, pci.address.function);
if (hwloc_linux_read_path_as_cpumask(path, set) < 0
|| hwloc_bitmap_iszero(set))
hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology));
#else
/* Non-Linux systems simply get a full cpuset */
hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology));
#endif
return 0;
}
/** \brief Get the hwloc OS device object corresponding to Level Zero device
* \p device.
*
* \return The hwloc OS device object that describes the given Level Zero device \p device.
* \return \c NULL if none could be found.
*
* Topology \p topology and device \p dv_ind must match the local machine.
* I/O devices detection and the Level Zero component must be enabled in the
* topology. If not, the locality of the object may still be found using
* hwloc_levelzero_get_device_cpuset().
*
* \note The corresponding hwloc PCI device may be found by looking
* at the result parent pointer (unless PCI devices are filtered out).
*/
static __hwloc_inline hwloc_obj_t
hwloc_levelzero_get_device_osdev(hwloc_topology_t topology, ze_device_handle_t device)
{
zes_device_handle_t sdevice = device;
zes_pci_properties_t pci;
ze_result_t res;
hwloc_obj_t osdev;
if (!hwloc_topology_is_thissystem(topology)) {
errno = EINVAL;
return NULL;
}
res = zesDevicePciGetProperties(sdevice, &pci);
if (res != ZE_RESULT_SUCCESS) {
/* L0 was likely initialized without sysman, don't bother */
errno = EINVAL;
return NULL;
}
osdev = NULL;
while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) {
hwloc_obj_t pcidev = osdev->parent;
if (strncmp(osdev->name, "ze", 2))
continue;
if (pcidev
&& pcidev->type == HWLOC_OBJ_PCI_DEVICE
&& pcidev->attr->pcidev.domain == pci.address.domain
&& pcidev->attr->pcidev.bus == pci.address.bus
&& pcidev->attr->pcidev.dev == pci.address.device
&& pcidev->attr->pcidev.func == pci.address.function)
return osdev;
/* FIXME: when we'll have serialnumber, try it in case PCI is filtered-out */
}
return NULL;
}
/** @} */
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* HWLOC_LEVELZERO_H */

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2016 Inria. All rights reserved.
* Copyright © 2009-2021 Inria. All rights reserved.
* Copyright © 2009-2011 Université Bordeaux
* See COPYING in top-level directory.
*/
@@ -44,6 +44,10 @@ extern "C" {
HWLOC_DECLSPEC int hwloc_linux_set_tid_cpubind(hwloc_topology_t topology, pid_t tid, hwloc_const_cpuset_t set);
/** \brief Get the current binding of thread \p tid
*
* The CPU-set \p set (previously allocated by the caller)
* is filled with the list of PUs which the thread
* was last bound to.
*
* The behavior is exactly the same as the Linux sched_getaffinity system call,
* but uses a hwloc cpuset.
@@ -54,6 +58,9 @@ HWLOC_DECLSPEC int hwloc_linux_set_tid_cpubind(hwloc_topology_t topology, pid_t
HWLOC_DECLSPEC int hwloc_linux_get_tid_cpubind(hwloc_topology_t topology, pid_t tid, hwloc_cpuset_t set);
/** \brief Get the last physical CPU where thread \p tid ran.
*
* The CPU-set \p set (previously allocated by the caller)
* is filled with the PU which the thread last ran on.
*
* \note This is equivalent to calling hwloc_get_proc_last_cpu_location() with
* ::HWLOC_CPUBIND_THREAD as flags.

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2019-2020 Inria. All rights reserved.
* Copyright © 2019-2022 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -54,6 +54,8 @@ extern "C" {
* Attribute values for these nodes, if any, may then be obtained with
* hwloc_memattr_get_value() and manually compared with the desired criteria.
*
* \sa An example is available in doc/examples/memory-attributes.c in the source tree.
*
* \note The API also supports specific objects as initiator,
* but it is currently not used internally by hwloc.
* Users may for instance use it to provide custom performance
@@ -65,19 +67,19 @@ extern "C" {
/** \brief Memory node attributes. */
enum hwloc_memattr_id_e {
/** \brief "Capacity".
* The capacity is returned in bytes
* (local_memory attribute in objects).
/** \brief
* The \"Capacity\" is returned in bytes (local_memory attribute in objects).
*
* Best capacity nodes are nodes with <b>higher capacity</b>.
*
* No initiator is involved when looking at this attribute.
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST.
* \hideinitializer
*/
HWLOC_MEMATTR_ID_CAPACITY = 0,
/** \brief "Locality".
* The locality is returned as the number of PUs in that locality
/** \brief
* The \"Locality\" is returned as the number of PUs in that locality
* (e.g. the weight of its cpuset).
*
* Best locality nodes are nodes with <b>smaller locality</b>
@@ -87,26 +89,87 @@ enum hwloc_memattr_id_e {
*
* No initiator is involved when looking at this attribute.
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST.
* \hideinitializer
*/
HWLOC_MEMATTR_ID_LOCALITY = 1,
/** \brief "Bandwidth".
* The bandwidth is returned in MiB/s, as seen from the given initiator location.
/** \brief
* The \"Bandwidth\" is returned in MiB/s, as seen from the given initiator location.
*
* Best bandwidth nodes are nodes with <b>higher bandwidth</b>.
*
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST
* and ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR.
*
* This is the average bandwidth for read and write accesses. If the platform
* provides individual read and write bandwidths but no explicit average value,
* hwloc computes and returns the average.
* \hideinitializer
*/
HWLOC_MEMATTR_ID_BANDWIDTH = 2,
/** \brief "Latency".
* The latency is returned as nanoseconds, as seen from the given initiator location.
/** \brief
* The \"ReadBandwidth\" is returned in MiB/s, as seen from the given initiator location.
*
* Best bandwidth nodes are nodes with <b>higher bandwidth</b>.
*
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST
* and ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR.
* \hideinitializer
*/
HWLOC_MEMATTR_ID_READ_BANDWIDTH = 4,
/** \brief
* The \"WriteBandwidth\" is returned in MiB/s, as seen from the given initiator location.
*
* Best bandwidth nodes are nodes with <b>higher bandwidth</b>.
*
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_HIGHER_FIRST
* and ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR.
* \hideinitializer
*/
HWLOC_MEMATTR_ID_WRITE_BANDWIDTH = 5,
/** \brief
* The \"Latency\" is returned as nanoseconds, as seen from the given initiator location.
*
* Best latency nodes are nodes with <b>smaller latency</b>.
*
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_LOWER_FIRST
* and ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR.
*
* This is the average latency for read and write accesses. If the platform
* provides individual read and write latencies but no explicit average value,
* hwloc computes and returns the average.
* \hideinitializer
*/
HWLOC_MEMATTR_ID_LATENCY = 3
HWLOC_MEMATTR_ID_LATENCY = 3,
/* TODO read vs write, persistence? */
/** \brief
* The \"ReadLatency\" is returned as nanoseconds, as seen from the given initiator location.
*
* Best latency nodes are nodes with <b>smaller latency</b>.
*
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_LOWER_FIRST
* and ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR.
* \hideinitializer
*/
HWLOC_MEMATTR_ID_READ_LATENCY = 6,
/** \brief
* The \"WriteLatency\" is returned as nanoseconds, as seen from the given initiator location.
*
* Best latency nodes are nodes with <b>smaller latency</b>.
*
* The corresponding attribute flags are ::HWLOC_MEMATTR_FLAG_LOWER_FIRST
* and ::HWLOC_MEMATTR_FLAG_NEED_INITIATOR.
* \hideinitializer
*/
HWLOC_MEMATTR_ID_WRITE_LATENCY = 7,
/* TODO persistence? */
HWLOC_MEMATTR_ID_MAX /**< \private Sentinel value */
};
/** \brief A memory attribute identifier.
@@ -354,7 +417,7 @@ hwloc_memattr_register(hwloc_topology_t topology,
* \p flags must be \c 0 for now.
*
* \note The initiator \p initiator should be of type ::HWLOC_LOCATION_TYPE_CPUSET
* when refering to accesses performed by CPU cores.
* when referring to accesses performed by CPU cores.
* ::HWLOC_LOCATION_TYPE_OBJECT is currently unused internally by hwloc,
* but users may for instance use it to provide custom information about
* host memory accesses performed by GPUs.
@@ -398,7 +461,7 @@ hwloc_memattr_set_value(hwloc_topology_t topology,
* values.
*
* \note The initiator \p initiator should be of type ::HWLOC_LOCATION_TYPE_CPUSET
* when refering to accesses performed by CPU cores.
* when referring to accesses performed by CPU cores.
* ::HWLOC_LOCATION_TYPE_OBJECT is currently unused internally by hwloc,
* but users may for instance use it to provide custom information about
* host memory accesses performed by GPUs.
@@ -408,7 +471,7 @@ hwloc_memattr_get_targets(hwloc_topology_t topology,
hwloc_memattr_id_t attribute,
struct hwloc_location *initiator,
unsigned long flags,
unsigned *nrp, hwloc_obj_t *targets, hwloc_uint64_t *values);
unsigned *nr, hwloc_obj_t *targets, hwloc_uint64_t *values);
/** \brief Return the initiators that have values for a given attribute for a specific target NUMA node.
*

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2012-2020 Inria. All rights reserved.
* Copyright © 2012-2021 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -39,7 +39,7 @@ extern "C" {
/** \brief Get the CPU set of processors that are physically
* close to NVML device \p device.
*
* Return the CPU set describing the locality of the NVML device \p device.
* Store in \p set the CPU-set describing the locality of the NVML device \p device.
*
* Topology \p topology and device \p device must match the local machine.
* I/O devices detection and the NVML component are not needed in the topology.
@@ -88,8 +88,8 @@ hwloc_nvml_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused,
/** \brief Get the hwloc OS device object corresponding to the
* NVML device whose index is \p idx.
*
* Return the OS device object describing the NVML device whose
* index is \p idx. Returns NULL if there is none.
* \return The hwloc OS device object describing the NVML device whose index is \p idx.
* \return \c NULL if none could be found.
*
* The topology \p topology does not necessarily have to match the current
* machine. For instance the topology may be an XML import of a remote host.
@@ -114,8 +114,8 @@ hwloc_nvml_get_device_osdev_by_index(hwloc_topology_t topology, unsigned idx)
/** \brief Get the hwloc OS device object corresponding to NVML device \p device.
*
* Return the hwloc OS device object that describes the given
* NVML device \p device. Return NULL if there is none.
* \return The hwloc OS device object that describes the given NVML device \p device.
* \return \c NULL if none could be found.
*
* Topology \p topology and device \p device must match the local machine.
* I/O devices detection and the NVML component must be enabled in the topology.

View File

@@ -113,7 +113,7 @@ hwloc_opencl_get_device_pci_busid(cl_device_id device,
/** \brief Get the CPU set of processors that are physically
* close to OpenCL device \p device.
*
* Return the CPU set describing the locality of the OpenCL device \p device.
* Store in \p set the CPU-set describing the locality of the OpenCL device \p device.
*
* Topology \p topology and device \p device must match the local machine.
* I/O devices detection and the OpenCL component are not needed in the topology.
@@ -162,10 +162,10 @@ hwloc_opencl_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unuse
/** \brief Get the hwloc OS device object corresponding to the
* OpenCL device for the given indexes.
*
* Return the OS device object describing the OpenCL device
* \return The hwloc OS device object describing the OpenCL device
* whose platform index is \p platform_index,
* and whose device index within this platform if \p device_index.
* Return NULL if there is none.
* \return \c NULL if there is none.
*
* The topology \p topology does not necessarily have to match the current
* machine. For instance the topology may be an XML import of a remote host.
@@ -192,8 +192,9 @@ hwloc_opencl_get_device_osdev_by_index(hwloc_topology_t topology,
/** \brief Get the hwloc OS device object corresponding to OpenCL device \p deviceX.
*
* Use OpenCL device attributes to find the corresponding hwloc OS device object.
* Return NULL if there is none or if useful attributes are not available.
* \return The hwloc OS device object corresponding to the given OpenCL device \p device.
* \return \c NULL if none could be found, for instance
* if required OpenCL attributes are not available.
*
* This function currently only works on AMD and NVIDIA OpenCL devices that support
* relevant OpenCL extensions. hwloc_opencl_get_device_osdev_by_index()

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2021 Inria. All rights reserved.
* Copyright © 2009-2010 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -44,7 +44,7 @@ extern "C" {
/** \brief Get the CPU set of processors that are physically
* close to device \p ibdev.
*
* Return the CPU set describing the locality of the OpenFabrics
* Store in \p set the CPU-set describing the locality of the OpenFabrics
* device \p ibdev (InfiniBand, etc).
*
* Topology \p topology and device \p ibdev must match the local machine.
@@ -88,10 +88,11 @@ hwloc_ibv_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused,
/** \brief Get the hwloc OS device object corresponding to the OpenFabrics
* device named \p ibname.
*
* Return the OS device object describing the OpenFabrics device
* \return The hwloc OS device object describing the OpenFabrics device
* (InfiniBand, Omni-Path, usNIC, etc) whose name is \p ibname
* (mlx5_0, hfi1_0, usnic_0, qib0, etc).
* Returns NULL if there is none.
* \return \c NULL if none could be found.
*
* The name \p ibname is usually obtained from ibv_get_device_name().
*
* The topology \p topology does not necessarily have to match the current
@@ -117,8 +118,9 @@ hwloc_ibv_get_device_osdev_by_name(hwloc_topology_t topology,
/** \brief Get the hwloc OS device object corresponding to the OpenFabrics
* device \p ibdev.
*
* Return the OS device object describing the OpenFabrics device \p ibdev
* (InfiniBand, etc). Returns NULL if there is none.
* \return The hwloc OS device object describing the OpenFabrics
* device \p ibdev (InfiniBand, etc).
* \return \c NULL if none could be found.
*
* Topology \p topology and device \p ibdev must match the local machine.
* I/O devices detection must be enabled in the topology.

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2013-2020 Inria. All rights reserved.
* Copyright © 2013-2022 Inria. All rights reserved.
* Copyright © 2016 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -27,6 +27,9 @@ struct hwloc_backend;
/** \defgroup hwlocality_disc_components Components and Plugins: Discovery components
*
* \note These structures and functions may change when ::HWLOC_COMPONENT_ABI is modified.
*
* @{
*/
@@ -93,6 +96,9 @@ struct hwloc_disc_component {
/** \defgroup hwlocality_disc_backends Components and Plugins: Discovery backends
*
* \note These structures and functions may change when ::HWLOC_COMPONENT_ABI is modified.
*
* @{
*/
@@ -241,6 +247,9 @@ HWLOC_DECLSPEC int hwloc_backend_enable(struct hwloc_backend *backend);
/** \defgroup hwlocality_generic_components Components and Plugins: Generic components
*
* \note These structures and functions may change when ::HWLOC_COMPONENT_ABI is modified.
*
* @{
*/
@@ -310,12 +319,34 @@ struct hwloc_component {
/** \defgroup hwlocality_components_core_funcs Components and Plugins: Core functions to be used by components
*
* \note These structures and functions may change when ::HWLOC_COMPONENT_ABI is modified.
*
* @{
*/
/** \brief Check whether insertion errors are hidden */
/** \brief Check whether error messages are hidden.
*
* Callers should print critical error messages
* (e.g. invalid hw topo info, invalid config)
* only if this function returns strictly less than 2.
*
* Callers should print non-critical error messages
* (e.g. failure to initialize CUDA)
* if this function returns 0.
*
* This function return 1 by default (show critical only),
* 0 in lstopo (show all),
* or anything set in HWLOC_HIDE_ERRORS in the environment.
*
* Use macros HWLOC_SHOW_CRITICAL_ERRORS() and HWLOC_SHOW_ALL_ERRORS()
* for clarity.
*/
HWLOC_DECLSPEC int hwloc_hide_errors(void);
#define HWLOC_SHOW_CRITICAL_ERRORS() (hwloc_hide_errors() < 2)
#define HWLOC_SHOW_ALL_ERRORS() (hwloc_hide_errors() == 0)
/** \brief Add an object to the topology.
*
* Insert new object \p obj in the topology starting under existing object \p root
@@ -455,6 +486,9 @@ hwloc_plugin_check_namespace(const char *pluginname __hwloc_attribute_unused, co
/** \defgroup hwlocality_components_filtering Components and Plugins: Filtering objects
*
* \note These structures and functions may change when ::HWLOC_COMPONENT_ABI is modified.
*
* @{
*/
@@ -469,9 +503,12 @@ hwloc_filter_check_pcidev_subtype_important(unsigned classid)
return (baseclass == 0x03 /* PCI_BASE_CLASS_DISPLAY */
|| baseclass == 0x02 /* PCI_BASE_CLASS_NETWORK */
|| baseclass == 0x01 /* PCI_BASE_CLASS_STORAGE */
|| baseclass == 0x00 /* Unclassified, for Atos/Bull BXI */
|| baseclass == 0x0b /* PCI_BASE_CLASS_PROCESSOR */
|| classid == 0x0c04 /* PCI_CLASS_SERIAL_FIBER */
|| classid == 0x0c06 /* PCI_CLASS_SERIAL_INFINIBAND */
|| classid == 0x0502 /* PCI_CLASS_MEMORY_CXL */
|| baseclass == 0x06 /* PCI_BASE_CLASS_BRIDGE with non-PCI downstream. the core will drop the useless ones later */
|| baseclass == 0x12 /* Processing Accelerators */);
}
@@ -527,6 +564,9 @@ hwloc_filter_check_keep_object(hwloc_topology_t topology, hwloc_obj_t obj)
/** \defgroup hwlocality_components_pcidisc Components and Plugins: helpers for PCI discovery
*
* \note These structures and functions may change when ::HWLOC_COMPONENT_ABI is modified.
*
* @{
*/
@@ -578,18 +618,76 @@ HWLOC_DECLSPEC int hwloc_pcidisc_tree_attach(struct hwloc_topology *topology, st
/** \defgroup hwlocality_components_pcifind Components and Plugins: finding PCI objects during other discoveries
*
* \note These structures and functions may change when ::HWLOC_COMPONENT_ABI is modified.
*
* @{
*/
/** \brief Find the normal parent of a PCI bus ID.
/** \brief Find the object or a parent of a PCI bus ID.
*
* Look at PCI affinity to find out where the given PCI bus ID should be attached.
* When attaching a new object (typically an OS device) whose locality
* is specified by PCI bus ID, this function returns the PCI object
* to use as a parent for attaching.
*
* This function should be used to attach an I/O device under the corresponding
* PCI object (if any), or under a normal (non-I/O) object with same locality.
* If the exact PCI device with this bus ID exists, it is returned.
* Otherwise (for instance if it was filtered out), the function returns
* another object with similar locality (for instance a parent bridge,
* or the local CPU Package).
*/
HWLOC_DECLSPEC struct hwloc_obj * hwloc_pci_find_parent_by_busid(struct hwloc_topology *topology, unsigned domain, unsigned bus, unsigned dev, unsigned func);
/** \brief Find the PCI device or bridge matching a PCI bus ID exactly.
*
* This is useful for adding specific information about some objects
* based on their PCI id. When it comes to attaching objects based on
* PCI locality, hwloc_pci_find_parent_by_busid() should be preferred.
*/
HWLOC_DECLSPEC struct hwloc_obj * hwloc_pci_find_by_busid(struct hwloc_topology *topology, unsigned domain, unsigned bus, unsigned dev, unsigned func);
/** \brief Handle to a new distances structure during its addition to the topology. */
typedef void * hwloc_backend_distances_add_handle_t;
/** \brief Create a new empty distances structure.
*
* This is identical to hwloc_distances_add_create()
* but this variant is designed for backend inserting
* distances during topology discovery.
*/
HWLOC_DECLSPEC hwloc_backend_distances_add_handle_t
hwloc_backend_distances_add_create(hwloc_topology_t topology,
const char *name, unsigned long kind,
unsigned long flags);
/** \brief Specify the objects and values in a new empty distances structure.
*
* This is similar to hwloc_distances_add_values()
* but this variant is designed for backend inserting
* distances during topology discovery.
*
* The only semantical difference is that \p objs and \p values
* are not duplicated, but directly attached to the topology.
* On success, these arrays are given to the core and should not
* ever be freed by the caller anymore.
*/
HWLOC_DECLSPEC int
hwloc_backend_distances_add_values(hwloc_topology_t topology,
hwloc_backend_distances_add_handle_t handle,
unsigned nbobjs, hwloc_obj_t *objs,
hwloc_uint64_t *values,
unsigned long flags);
/** \brief Commit a new distances structure.
*
* This is similar to hwloc_distances_add_commit()
* but this variant is designed for backend inserting
* distances during topology discovery.
*/
HWLOC_DECLSPEC int
hwloc_backend_distances_add_commit(hwloc_topology_t topology,
hwloc_backend_distances_add_handle_t handle,
unsigned long flags);
/** @} */

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* Copyright © 2010-2020 Inria. All rights reserved.
* Copyright © 2010-2022 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -120,6 +120,12 @@ extern "C" {
#define HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM HWLOC_NAME_CAPS(TOPOLOGY_FLAG_IS_THISSYSTEM)
#define HWLOC_TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES HWLOC_NAME_CAPS(TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES)
#define HWLOC_TOPOLOGY_FLAG_IMPORT_SUPPORT HWLOC_NAME_CAPS(TOPOLOGY_FLAG_IMPORT_SUPPORT)
#define HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING HWLOC_NAME_CAPS(TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING)
#define HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_MEMBINDING HWLOC_NAME_CAPS(TOPOLOGY_FLAG_RESTRICT_TO_MEMBINDING)
#define HWLOC_TOPOLOGY_FLAG_DONT_CHANGE_BINDING HWLOC_NAME_CAPS(TOPOLOGY_FLAG_DONT_CHANGE_BINDING)
#define HWLOC_TOPOLOGY_FLAG_NO_DISTANCES HWLOC_NAME_CAPS(TOPOLOGY_FLAG_NO_DISTANCES)
#define HWLOC_TOPOLOGY_FLAG_NO_MEMATTRS HWLOC_NAME_CAPS(TOPOLOGY_FLAG_NO_MEMATTRS)
#define HWLOC_TOPOLOGY_FLAG_NO_CPUKINDS HWLOC_NAME_CAPS(TOPOLOGY_FLAG_NO_CPUKINDS)
#define hwloc_topology_set_pid HWLOC_NAME(topology_set_pid)
#define hwloc_topology_set_synthetic HWLOC_NAME(topology_set_synthetic)
@@ -356,6 +362,7 @@ extern "C" {
#define hwloc_get_closest_objs HWLOC_NAME(get_closest_objs)
#define hwloc_get_obj_below_by_type HWLOC_NAME(get_obj_below_by_type)
#define hwloc_get_obj_below_array_by_type HWLOC_NAME(get_obj_below_array_by_type)
#define hwloc_get_obj_with_same_locality HWLOC_NAME(get_obj_with_same_locality)
#define hwloc_distrib_flags_e HWLOC_NAME(distrib_flags_e)
#define HWLOC_DISTRIB_FLAG_REVERSE HWLOC_NAME_CAPS(DISTRIB_FLAG_REVERSE)
#define hwloc_distrib HWLOC_NAME(distrib)
@@ -377,6 +384,11 @@ extern "C" {
#define HWLOC_MEMATTR_ID_LOCALITY HWLOC_NAME_CAPS(MEMATTR_ID_LOCALITY)
#define HWLOC_MEMATTR_ID_BANDWIDTH HWLOC_NAME_CAPS(MEMATTR_ID_BANDWIDTH)
#define HWLOC_MEMATTR_ID_LATENCY HWLOC_NAME_CAPS(MEMATTR_ID_LATENCY)
#define HWLOC_MEMATTR_ID_READ_BANDWIDTH HWLOC_NAME_CAPS(MEMATTR_ID_READ_BANDWIDTH)
#define HWLOC_MEMATTR_ID_WRITE_BANDWIDTH HWLOC_NAME_CAPS(MEMATTR_ID_WRITE_BANDWIDTH)
#define HWLOC_MEMATTR_ID_READ_LATENCY HWLOC_NAME_CAPS(MEMATTR_ID_READ_LATENCY)
#define HWLOC_MEMATTR_ID_WRITE_LATENCY HWLOC_NAME_CAPS(MEMATTR_ID_WRITE_LATENCY)
#define HWLOC_MEMATTR_ID_MAX HWLOC_NAME_CAPS(MEMATTR_ID_MAX)
#define hwloc_memattr_id_t HWLOC_NAME(memattr_id_t)
#define hwloc_memattr_get_by_name HWLOC_NAME(memattr_get_by_name)
@@ -454,11 +466,22 @@ extern "C" {
#define hwloc_distances_obj_index HWLOC_NAME(distances_obj_index)
#define hwloc_distances_obj_pair_values HWLOC_NAME(distances_pair_values)
#define hwloc_distances_transform_e HWLOC_NAME(distances_transform_e)
#define HWLOC_DISTANCES_TRANSFORM_REMOVE_NULL HWLOC_NAME_CAPS(DISTANCES_TRANSFORM_REMOVE_NULL)
#define HWLOC_DISTANCES_TRANSFORM_LINKS HWLOC_NAME_CAPS(DISTANCES_TRANSFORM_LINKS)
#define HWLOC_DISTANCES_TRANSFORM_MERGE_SWITCH_PORTS HWLOC_NAME_CAPS(DISTANCES_TRANSFORM_MERGE_SWITCH_PORTS)
#define HWLOC_DISTANCES_TRANSFORM_TRANSITIVE_CLOSURE HWLOC_NAME_CAPS(DISTANCES_TRANSFORM_TRANSITIVE_CLOSURE)
#define hwloc_distances_transform HWLOC_NAME(distances_transform)
#define hwloc_distances_add_flag_e HWLOC_NAME(distances_add_flag_e)
#define HWLOC_DISTANCES_ADD_FLAG_GROUP HWLOC_NAME_CAPS(DISTANCES_ADD_FLAG_GROUP)
#define HWLOC_DISTANCES_ADD_FLAG_GROUP_INACCURATE HWLOC_NAME_CAPS(DISTANCES_ADD_FLAG_GROUP_INACCURATE)
#define hwloc_distances_add HWLOC_NAME(distances_add)
#define hwloc_distances_add_handle_t HWLOC_NAME(distances_add_handle_t)
#define hwloc_distances_add_create HWLOC_NAME(distances_add_create)
#define hwloc_distances_add_values HWLOC_NAME(distances_add_values)
#define hwloc_distances_add_commit HWLOC_NAME(distances_add_commit)
#define hwloc_distances_remove HWLOC_NAME(distances_remove)
#define hwloc_distances_remove_by_depth HWLOC_NAME(distances_remove_by_depth)
#define hwloc_distances_remove_by_type HWLOC_NAME(distances_remove_by_type)
@@ -523,6 +546,11 @@ extern "C" {
#define hwloc_linux_get_tid_last_cpu_location HWLOC_NAME(linux_get_tid_last_cpu_location)
#define hwloc_linux_read_path_as_cpumask HWLOC_NAME(linux_read_file_cpumask)
/* windows.h */
#define hwloc_windows_get_nr_processor_groups HWLOC_NAME(windows_get_nr_processor_groups)
#define hwloc_windows_get_processor_group_cpuset HWLOC_NAME(windows_get_processor_group_cpuset)
/* openfabrics-verbs.h */
#define hwloc_ibv_get_device_cpuset HWLOC_NAME(ibv_get_device_cpuset)
@@ -564,6 +592,11 @@ extern "C" {
#define hwloc_rsmi_get_device_osdev HWLOC_NAME(rsmi_get_device_osdev)
#define hwloc_rsmi_get_device_osdev_by_index HWLOC_NAME(rsmi_get_device_osdev_by_index)
/* levelzero.h */
#define hwloc_levelzero_get_device_cpuset HWLOC_NAME(levelzero_get_device_cpuset)
#define hwloc_levelzero_get_device_osdev HWLOC_NAME(levelzero_get_device_osdev)
/* gl.h */
#define hwloc_gl_get_display_osdev_by_port_device HWLOC_NAME(gl_get_display_osdev_by_port_device)
@@ -620,10 +653,18 @@ extern "C" {
#define hwloc_pcidisc_tree_insert_by_busid HWLOC_NAME(pcidisc_tree_insert_by_busid)
#define hwloc_pcidisc_tree_attach HWLOC_NAME(pcidisc_tree_attach)
#define hwloc_pci_find_by_busid HWLOC_NAME(pcidisc_find_by_busid)
#define hwloc_pci_find_parent_by_busid HWLOC_NAME(pcidisc_find_busid_parent)
#define hwloc_backend_distances_add_handle_t HWLOC_NAME(backend_distances_add_handle_t)
#define hwloc_backend_distances_add_create HWLOC_NAME(backend_distances_add_create)
#define hwloc_backend_distances_add_values HWLOC_NAME(backend_distances_add_values)
#define hwloc_backend_distances_add_commit HWLOC_NAME(backend_distances_add_commit)
/* hwloc/deprecated.h */
#define hwloc_distances_add HWLOC_NAME(distances_add)
#define hwloc_topology_insert_misc_object_by_parent HWLOC_NAME(topology_insert_misc_object_by_parent)
#define hwloc_obj_cpuset_snprintf HWLOC_NAME(obj_cpuset_snprintf)
#define hwloc_obj_type_sscanf HWLOC_NAME(obj_type_sscanf)
@@ -733,6 +774,7 @@ extern "C" {
#define hwloc_cuda_component HWLOC_NAME(cuda_component)
#define hwloc_gl_component HWLOC_NAME(gl_component)
#define hwloc_levelzero_component HWLOC_NAME(levelzero_component)
#define hwloc_nvml_component HWLOC_NAME(nvml_component)
#define hwloc_rsmi_component HWLOC_NAME(rsmi_component)
#define hwloc_opencl_component HWLOC_NAME(opencl_component)
@@ -772,7 +814,6 @@ extern "C" {
#define hwloc_pci_discovery_init HWLOC_NAME(pci_discovery_init)
#define hwloc_pci_discovery_prepare HWLOC_NAME(pci_discovery_prepare)
#define hwloc_pci_discovery_exit HWLOC_NAME(pci_discovery_exit)
#define hwloc_pci_find_by_busid HWLOC_NAME(pcidisc_find_by_busid)
#define hwloc_find_insert_io_parent_by_complete_cpuset HWLOC_NAME(hwloc_find_insert_io_parent_by_complete_cpuset)
#define hwloc__add_info HWLOC_NAME(_add_info)
@@ -816,7 +857,6 @@ extern "C" {
#define hwloc_internal_distances_dup HWLOC_NAME(internal_distances_dup)
#define hwloc_internal_distances_refresh HWLOC_NAME(internal_distances_refresh)
#define hwloc_internal_distances_destroy HWLOC_NAME(internal_distances_destroy)
#define hwloc_internal_distances_add HWLOC_NAME(internal_distances_add)
#define hwloc_internal_distances_add_by_index HWLOC_NAME(internal_distances_add_by_index)
#define hwloc_internal_distances_invalidate_cached_objs HWLOC_NAME(hwloc_internal_distances_invalidate_cached_objs)
@@ -830,6 +870,7 @@ extern "C" {
#define hwloc_internal_memattrs_destroy HWLOC_NAME(internal_memattrs_destroy)
#define hwloc_internal_memattrs_need_refresh HWLOC_NAME(internal_memattrs_need_refresh)
#define hwloc_internal_memattrs_refresh HWLOC_NAME(internal_memattrs_refresh)
#define hwloc_internal_memattrs_guess_memory_tiers HWLOC_NAME(internal_memattrs_guess_memory_tiers)
#define hwloc_internal_cpukind_s HWLOC_NAME(internal_cpukind_s)
#define hwloc_internal_cpukinds_init HWLOC_NAME(internal_cpukinds_init)

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2012-2020 Inria. All rights reserved.
* Copyright © 2012-2021 Inria. All rights reserved.
* Copyright (c) 2020, Advanced Micro Devices, Inc. All rights reserved.
* Written by Advanced Micro Devices,
* See COPYING in top-level directory.
@@ -41,7 +41,7 @@ extern "C" {
/** \brief Get the CPU set of logical processors that are physically
* close to AMD GPU device whose index is \p dv_ind.
*
* Return the CPU set describing the locality of the AMD GPU device
* Store in \p set the CPU-set describing the locality of the AMD GPU device
* whose index is \p dv_ind.
*
* Topology \p topology and device \p dv_ind must match the local machine.
@@ -96,8 +96,9 @@ hwloc_rsmi_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused,
/** \brief Get the hwloc OS device object corresponding to the
* AMD GPU device whose index is \p dv_ind.
*
* Return the OS device object describing the AMD GPU device whose
* index is \p dv_ind. Returns NULL if there is none.
* \return The hwloc OS device object describing the AMD GPU device whose
* index is \p dv_ind.
* \return \c NULL if none could be found.
*
* The topology \p topology does not necessarily have to match the current
* machine. For instance the topology may be an XML import of a remote host.
@@ -124,8 +125,9 @@ hwloc_rsmi_get_device_osdev_by_index(hwloc_topology_t topology, uint32_t dv_ind)
/** \brief Get the hwloc OS device object corresponding to AMD GPU device,
* whose index is \p dv_ind.
*
* Return the hwloc OS device object that describes the given
* AMD GPU, whose index is \p dv_ind Return NULL if there is none.
* \return The hwloc OS device object that describes the given
* AMD GPU, whose index is \p dv_ind.
* \return \c NULL if none could be found.
*
* Topology \p topology and device \p dv_ind must match the local machine.
* I/O devices detection and the ROCm SMI component must be enabled in the

View File

@@ -0,0 +1,76 @@
/*
* Copyright © 2021 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
/** \file
* \brief Macros to help interaction between hwloc and Windows.
*
* Applications that use hwloc on Windows may want to include this file
* for Windows specific hwloc features.
*/
#ifndef HWLOC_WINDOWS_H
#define HWLOC_WINDOWS_H
#include "hwloc.h"
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup hwlocality_windows Windows-specific helpers
*
* These functions query Windows processor groups.
* These groups partition the operating system into virtual sets
* of up to 64 neighbor PUs.
* Threads and processes may only be bound inside a single group.
* Although Windows processor groups may be exposed in the hwloc
* hierarchy as hwloc Groups, they are also often merged into
* existing hwloc objects such as NUMA nodes or Packages.
* This API provides explicit information about Windows processor
* groups so that applications know whether binding to a large
* set of PUs may fail because it spans over multiple Windows
* processor groups.
*
* @{
*/
/** \brief Get the number of Windows processor groups
*
* \p flags must be 0 for now.
*
* \return at least \c 1 on success.
* \return -1 on error, for instance if the topology does not match
* the current system (e.g. loaded from another machine through XML).
*/
HWLOC_DECLSPEC int hwloc_windows_get_nr_processor_groups(hwloc_topology_t topology, unsigned long flags);
/** \brief Get the CPU-set of a Windows processor group.
*
* Get the set of PU included in the processor group specified
* by \p pg_index.
* \p pg_index must be between \c 0 and the value returned
* by hwloc_windows_get_nr_processor_groups() minus 1.
*
* \p flags must be 0 for now.
*
* \return \c 0 on success.
* \return \c -1 on error, for instance if \p pg_index is invalid,
* or if the topology does not match the current system (e.g. loaded
* from another machine through XML).
*/
HWLOC_DECLSPEC int hwloc_windows_get_processor_group_cpuset(hwloc_topology_t topology, unsigned pg_index, hwloc_cpuset_t cpuset, unsigned long flags);
/** @} */
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* HWLOC_WINDOWS_H */

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009, 2011, 2012 CNRS. All rights reserved.
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2021 Inria. All rights reserved.
* Copyright © 2009, 2011, 2012, 2015 Université Bordeaux. All rights reserved.
* Copyright © 2009-2020 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
@@ -290,10 +290,6 @@
/* Define to '1' if sysctlbyname is present and usable */
/* #undef HAVE_SYSCTLBYNAME */
/* Define to 1 if the system has the type
`SYSTEM_LOGICAL_PROCESSOR_INFORMATION'. */
#define HAVE_SYSTEM_LOGICAL_PROCESSOR_INFORMATION 1
/* Define to 1 if the system has the type
`SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX'. */
#define HAVE_SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX 1

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2018-2019 Inria. All rights reserved.
* Copyright © 2018-2020 Inria. All rights reserved.
*
* See COPYING in top-level directory.
*/
@@ -31,6 +31,7 @@ HWLOC_DECLSPEC extern const struct hwloc_component hwloc_cuda_component;
HWLOC_DECLSPEC extern const struct hwloc_component hwloc_gl_component;
HWLOC_DECLSPEC extern const struct hwloc_component hwloc_nvml_component;
HWLOC_DECLSPEC extern const struct hwloc_component hwloc_rsmi_component;
HWLOC_DECLSPEC extern const struct hwloc_component hwloc_levelzero_component;
HWLOC_DECLSPEC extern const struct hwloc_component hwloc_opencl_component;
HWLOC_DECLSPEC extern const struct hwloc_component hwloc_pci_component;

View File

@@ -504,7 +504,7 @@ hwloc__obj_type_is_icache(hwloc_obj_type_t type)
} \
} while(0)
#else /* HAVE_USELOCALE */
#if __HWLOC_HAVE_ATTRIBUTE_UNUSED
#if HWLOC_HAVE_ATTRIBUTE_UNUSED
#define hwloc_localeswitch_declare int __dummy_nolocale __hwloc_attribute_unused
#define hwloc_localeswitch_init()
#else

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2012, 2020 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
*
@@ -166,6 +166,7 @@ struct hwloc_topology {
unsigned long kind;
#define HWLOC_INTERNAL_DIST_FLAG_OBJS_VALID (1U<<0) /* if the objs array is valid below */
#define HWLOC_INTERNAL_DIST_FLAG_NOT_COMMITTED (1U<<1) /* if the distances isn't in the list yet */
unsigned iflags;
/* objects are currently stored in physical_index order */
@@ -258,6 +259,7 @@ struct hwloc_topology {
unsigned bus_first, bus_last;
hwloc_bitmap_t cpuset;
} * pci_forced_locality;
hwloc_uint64_t pci_locality_quirks;
/* component blacklisting */
unsigned nr_blacklisted_components;
@@ -304,11 +306,6 @@ extern void hwloc_pci_discovery_init(struct hwloc_topology *topology);
extern void hwloc_pci_discovery_prepare(struct hwloc_topology *topology);
extern void hwloc_pci_discovery_exit(struct hwloc_topology *topology);
/* Look for an object matching the given domain/bus/func,
* either exactly or return the smallest container bridge
*/
extern struct hwloc_obj * hwloc_pci_find_by_busid(struct hwloc_topology *topology, unsigned domain, unsigned bus, unsigned dev, unsigned func);
/* Look for an object matching complete cpuset exactly, or insert one.
* Return NULL on failure.
* Return a good fallback (object above) on failure to insert.
@@ -408,10 +405,14 @@ extern void hwloc_internal_distances_prepare(hwloc_topology_t topology);
extern void hwloc_internal_distances_destroy(hwloc_topology_t topology);
extern int hwloc_internal_distances_dup(hwloc_topology_t new, hwloc_topology_t old);
extern void hwloc_internal_distances_refresh(hwloc_topology_t topology);
extern int hwloc_internal_distances_add(hwloc_topology_t topology, const char *name, unsigned nbobjs, hwloc_obj_t *objs, uint64_t *values, unsigned long kind, unsigned long flags);
extern int hwloc_internal_distances_add_by_index(hwloc_topology_t topology, const char *name, hwloc_obj_type_t unique_type, hwloc_obj_type_t *different_types, unsigned nbobjs, uint64_t *indexes, uint64_t *values, unsigned long kind, unsigned long flags);
extern void hwloc_internal_distances_invalidate_cached_objs(hwloc_topology_t topology);
/* these distances_add() functions are higher-level than those in hwloc/plugins.h
* but they may change in the future, hence they are not exported to plugins.
*/
extern int hwloc_internal_distances_add_by_index(hwloc_topology_t topology, const char *name, hwloc_obj_type_t unique_type, hwloc_obj_type_t *different_types, unsigned nbobjs, uint64_t *indexes, uint64_t *values, unsigned long kind, unsigned long flags);
extern int hwloc_internal_distances_add(hwloc_topology_t topology, const char *name, unsigned nbobjs, hwloc_obj_t *objs, uint64_t *values, unsigned long kind, unsigned long flags);
extern void hwloc_internal_memattrs_init(hwloc_topology_t topology);
extern void hwloc_internal_memattrs_prepare(hwloc_topology_t topology);
extern void hwloc_internal_memattrs_destroy(hwloc_topology_t topology);
@@ -419,6 +420,7 @@ extern void hwloc_internal_memattrs_need_refresh(hwloc_topology_t topology);
extern void hwloc_internal_memattrs_refresh(hwloc_topology_t topology);
extern int hwloc_internal_memattrs_dup(hwloc_topology_t new, hwloc_topology_t old);
extern int hwloc_internal_memattr_set_value(hwloc_topology_t topology, hwloc_memattr_id_t id, hwloc_obj_type_t target_type, hwloc_uint64_t target_gp_index, unsigned target_os_index, struct hwloc_internal_location_s *initiator, hwloc_uint64_t value);
extern int hwloc_internal_memattrs_guess_memory_tiers(hwloc_topology_t topology);
extern void hwloc_internal_cpukinds_init(hwloc_topology_t topology);
extern int hwloc_internal_cpukinds_rank(hwloc_topology_t topology);
@@ -480,6 +482,7 @@ extern char * hwloc_progname(struct hwloc_topology *topology);
#define HWLOC_GROUP_KIND_AIX_SDL_UNKNOWN 210 /* subkind is SDL level */
#define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220 /* no subkind */
#define HWLOC_GROUP_KIND_WINDOWS_RELATIONSHIP_UNKNOWN 221 /* no subkind */
#define HWLOC_GROUP_KIND_LINUX_CLUSTER 222 /* no subkind */
/* distance groups */
#define HWLOC_GROUP_KIND_DISTANCE 900 /* subkind is round of adding these groups during distance based grouping */
/* finally, hwloc-specific groups required to insert something else, should disappear as soon as possible */

View File

@@ -0,0 +1,30 @@
/*
* Copyright © 2009 Université Bordeaux
* Copyright © 2020-2022 Inria. All rights reserved.
*
* See COPYING in top-level directory.
*/
#ifndef HWLOC_PRIVATE_WINDOWS_H
#define HWLOC_PRIVATE_WINDOWS_H
#ifndef _ANONYMOUS_UNION
#ifdef __GNUC__
#define _ANONYMOUS_UNION __extension__
#else
#define _ANONYMOUS_UNION
#endif /* __GNUC__ */
#endif /* _ANONYMOUS_UNION */
#ifndef _ANONYMOUS_STRUCT
#ifdef __GNUC__
#define _ANONYMOUS_STRUCT __extension__
#else
#define _ANONYMOUS_STRUCT
#endif /* __GNUC__ */
#endif /* _ANONYMOUS_STRUCT */
#define DUMMYUNIONNAME
#define DUMMYSTRUCTNAME
#endif /* HWLOC_PRIVATE_WINDOWS_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2012 Université Bordeaux
* See COPYING in top-level directory.
*/
@@ -124,7 +124,7 @@ hwloc_dlforeachfile(const char *_paths,
*colon = '\0';
if (hwloc_plugins_verbose)
fprintf(stderr, " Looking under %s\n", path);
fprintf(stderr, "hwloc: Looking under %s\n", path);
dir = opendir(path);
if (!dir)
@@ -198,7 +198,7 @@ hwloc__dlforeach_cb(const char *filename, void *_data __hwloc_attribute_unused)
char *componentsymbolname;
if (hwloc_plugins_verbose)
fprintf(stderr, "Plugin dlforeach found `%s'\n", filename);
fprintf(stderr, "hwloc: Plugin dlforeach found `%s'\n", filename);
basename = strrchr(filename, '/');
if (!basename)
@@ -208,7 +208,7 @@ hwloc__dlforeach_cb(const char *filename, void *_data __hwloc_attribute_unused)
if (hwloc_plugins_blacklist && strstr(hwloc_plugins_blacklist, basename)) {
if (hwloc_plugins_verbose)
fprintf(stderr, "Plugin `%s' is blacklisted in the environment\n", basename);
fprintf(stderr, "hwloc: Plugin `%s' is blacklisted in the environment\n", basename);
goto out;
}
@@ -216,14 +216,14 @@ hwloc__dlforeach_cb(const char *filename, void *_data __hwloc_attribute_unused)
handle = hwloc_dlopenext(filename);
if (!handle) {
if (hwloc_plugins_verbose)
fprintf(stderr, "Failed to load plugin: %s\n", hwloc_dlerror());
fprintf(stderr, "hwloc: Failed to load plugin: %s\n", hwloc_dlerror());
goto out;
}
componentsymbolname = malloc(strlen(basename)+10+1);
if (!componentsymbolname) {
if (hwloc_plugins_verbose)
fprintf(stderr, "Failed to allocation component `%s' symbol\n",
fprintf(stderr, "hwloc: Failed to allocation component `%s' symbol\n",
basename);
goto out_with_handle;
}
@@ -231,38 +231,38 @@ hwloc__dlforeach_cb(const char *filename, void *_data __hwloc_attribute_unused)
component = hwloc_dlsym(handle, componentsymbolname);
if (!component) {
if (hwloc_plugins_verbose)
fprintf(stderr, "Failed to find component symbol `%s'\n",
fprintf(stderr, "hwloc: Failed to find component symbol `%s'\n",
componentsymbolname);
free(componentsymbolname);
goto out_with_handle;
}
if (component->abi != HWLOC_COMPONENT_ABI) {
if (hwloc_plugins_verbose)
fprintf(stderr, "Plugin symbol ABI %u instead of %d\n",
fprintf(stderr, "hwloc: Plugin symbol ABI %u instead of %d\n",
component->abi, HWLOC_COMPONENT_ABI);
free(componentsymbolname);
goto out_with_handle;
}
if (hwloc_plugins_verbose)
fprintf(stderr, "Plugin contains expected symbol `%s'\n",
fprintf(stderr, "hwloc: Plugin contains expected symbol `%s'\n",
componentsymbolname);
free(componentsymbolname);
if (HWLOC_COMPONENT_TYPE_DISC == component->type) {
if (strncmp(basename, "hwloc_", 6)) {
if (hwloc_plugins_verbose)
fprintf(stderr, "Plugin name `%s' doesn't match its type DISCOVERY\n", basename);
fprintf(stderr, "hwloc: Plugin name `%s' doesn't match its type DISCOVERY\n", basename);
goto out_with_handle;
}
} else if (HWLOC_COMPONENT_TYPE_XML == component->type) {
if (strncmp(basename, "hwloc_xml_", 10)) {
if (hwloc_plugins_verbose)
fprintf(stderr, "Plugin name `%s' doesn't match its type XML\n", basename);
fprintf(stderr, "hwloc: Plugin name `%s' doesn't match its type XML\n", basename);
goto out_with_handle;
}
} else {
if (hwloc_plugins_verbose)
fprintf(stderr, "Plugin name `%s' has invalid type %u\n",
fprintf(stderr, "hwloc: Plugin name `%s' has invalid type %u\n",
basename, (unsigned) component->type);
goto out_with_handle;
}
@@ -277,7 +277,7 @@ hwloc__dlforeach_cb(const char *filename, void *_data __hwloc_attribute_unused)
desc->handle = handle;
desc->next = NULL;
if (hwloc_plugins_verbose)
fprintf(stderr, "Plugin descriptor `%s' ready\n", basename);
fprintf(stderr, "hwloc: Plugin descriptor `%s' ready\n", basename);
/* append to the list */
prevdesc = &hwloc_plugins;
@@ -285,7 +285,7 @@ hwloc__dlforeach_cb(const char *filename, void *_data __hwloc_attribute_unused)
prevdesc = &((*prevdesc)->next);
*prevdesc = desc;
if (hwloc_plugins_verbose)
fprintf(stderr, "Plugin descriptor `%s' queued\n", basename);
fprintf(stderr, "hwloc: Plugin descriptor `%s' queued\n", basename);
return 0;
out_with_handle:
@@ -300,7 +300,7 @@ hwloc_plugins_exit(void)
struct hwloc__plugin_desc *desc, *next;
if (hwloc_plugins_verbose)
fprintf(stderr, "Closing all plugins\n");
fprintf(stderr, "hwloc: Closing all plugins\n");
desc = hwloc_plugins;
while (desc) {
@@ -340,7 +340,7 @@ hwloc_plugins_init(void)
hwloc_plugins = NULL;
if (hwloc_plugins_verbose)
fprintf(stderr, "Starting plugin dlforeach in %s\n", path);
fprintf(stderr, "hwloc: Starting plugin dlforeach in %s\n", path);
err = hwloc_dlforeachfile(path, hwloc__dlforeach_cb, NULL);
if (err)
goto out_with_init;
@@ -364,14 +364,14 @@ hwloc_disc_component_register(struct hwloc_disc_component *component,
/* check that the component name is valid */
if (!strcmp(component->name, HWLOC_COMPONENT_STOP_NAME)) {
if (hwloc_components_verbose)
fprintf(stderr, "Cannot register discovery component with reserved name `" HWLOC_COMPONENT_STOP_NAME "'\n");
fprintf(stderr, "hwloc: Cannot register discovery component with reserved name `" HWLOC_COMPONENT_STOP_NAME "'\n");
return -1;
}
if (strchr(component->name, HWLOC_COMPONENT_EXCLUDE_CHAR)
|| strchr(component->name, HWLOC_COMPONENT_PHASESEP_CHAR)
|| strcspn(component->name, HWLOC_COMPONENT_SEPS) != strlen(component->name)) {
if (hwloc_components_verbose)
fprintf(stderr, "Cannot register discovery component with name `%s' containing reserved characters `%c" HWLOC_COMPONENT_SEPS "'\n",
fprintf(stderr, "hwloc: Cannot register discovery component with name `%s' containing reserved characters `%c" HWLOC_COMPONENT_SEPS "'\n",
component->name, HWLOC_COMPONENT_EXCLUDE_CHAR);
return -1;
}
@@ -386,8 +386,9 @@ hwloc_disc_component_register(struct hwloc_disc_component *component,
|HWLOC_DISC_PHASE_MISC
|HWLOC_DISC_PHASE_ANNOTATE
|HWLOC_DISC_PHASE_TWEAK))) {
fprintf(stderr, "Cannot register discovery component `%s' with invalid phases 0x%x\n",
component->name, component->phases);
if (HWLOC_SHOW_CRITICAL_ERRORS())
fprintf(stderr, "hwloc: Cannot register discovery component `%s' with invalid phases 0x%x\n",
component->name, component->phases);
return -1;
}
@@ -398,13 +399,13 @@ hwloc_disc_component_register(struct hwloc_disc_component *component,
if ((*prev)->priority < component->priority) {
/* drop the existing component */
if (hwloc_components_verbose)
fprintf(stderr, "Dropping previously registered discovery component `%s', priority %u lower than new one %u\n",
fprintf(stderr, "hwloc: Dropping previously registered discovery component `%s', priority %u lower than new one %u\n",
(*prev)->name, (*prev)->priority, component->priority);
*prev = (*prev)->next;
} else {
/* drop the new one */
if (hwloc_components_verbose)
fprintf(stderr, "Ignoring new discovery component `%s', priority %u lower than previously registered one %u\n",
fprintf(stderr, "hwloc: Ignoring new discovery component `%s', priority %u lower than previously registered one %u\n",
component->name, component->priority, (*prev)->priority);
return -1;
}
@@ -412,7 +413,7 @@ hwloc_disc_component_register(struct hwloc_disc_component *component,
prev = &((*prev)->next);
}
if (hwloc_components_verbose)
fprintf(stderr, "Registered discovery component `%s' phases 0x%x with priority %u (%s%s)\n",
fprintf(stderr, "hwloc: Registered discovery component `%s' phases 0x%x with priority %u (%s%s)\n",
component->name, component->phases, component->priority,
filename ? "from plugin " : "statically build", filename ? filename : "");
@@ -475,15 +476,16 @@ hwloc_components_init(void)
/* hwloc_static_components is created by configure in static-components.h */
for(i=0; NULL != hwloc_static_components[i]; i++) {
if (hwloc_static_components[i]->flags) {
fprintf(stderr, "Ignoring static component with invalid flags %lx\n",
hwloc_static_components[i]->flags);
if (HWLOC_SHOW_CRITICAL_ERRORS())
fprintf(stderr, "hwloc: Ignoring static component with invalid flags %lx\n",
hwloc_static_components[i]->flags);
continue;
}
/* initialize the component */
if (hwloc_static_components[i]->init && hwloc_static_components[i]->init(0) < 0) {
if (hwloc_components_verbose)
fprintf(stderr, "Ignoring static component, failed to initialize\n");
fprintf(stderr, "hwloc: Ignoring static component, failed to initialize\n");
continue;
}
/* queue ->finalize() callback if any */
@@ -503,15 +505,16 @@ hwloc_components_init(void)
#ifdef HWLOC_HAVE_PLUGINS
for(desc = hwloc_plugins; NULL != desc; desc = desc->next) {
if (desc->component->flags) {
fprintf(stderr, "Ignoring plugin `%s' component with invalid flags %lx\n",
desc->name, desc->component->flags);
if (HWLOC_SHOW_CRITICAL_ERRORS())
fprintf(stderr, "hwloc: Ignoring plugin `%s' component with invalid flags %lx\n",
desc->name, desc->component->flags);
continue;
}
/* initialize the component */
if (desc->component->init && desc->component->init(0) < 0) {
if (hwloc_components_verbose)
fprintf(stderr, "Ignoring plugin `%s', failed to initialize\n", desc->name);
fprintf(stderr, "hwloc: Ignoring plugin `%s', failed to initialize\n", desc->name);
continue;
}
/* queue ->finalize() callback if any */
@@ -608,7 +611,7 @@ hwloc_disc_component_blacklist_one(struct hwloc_topology *topology,
/* replace linuxpci and linuxio with linux (with IO phases)
* for backward compatibility with pre-v2.0 and v2.0 respectively */
if (hwloc_components_verbose)
fprintf(stderr, "Replacing deprecated component `%s' with `linux' IO phases in blacklisting\n", name);
fprintf(stderr, "hwloc: Replacing deprecated component `%s' with `linux' IO phases in blacklisting\n", name);
comp = hwloc_disc_component_find("linux", NULL);
phases = HWLOC_DISC_PHASE_PCI | HWLOC_DISC_PHASE_IO | HWLOC_DISC_PHASE_MISC | HWLOC_DISC_PHASE_ANNOTATE;
@@ -624,7 +627,7 @@ hwloc_disc_component_blacklist_one(struct hwloc_topology *topology,
}
if (hwloc_components_verbose)
fprintf(stderr, "Blacklisting component `%s` phases 0x%x\n", comp->name, phases);
fprintf(stderr, "hwloc: Blacklisting component `%s` phases 0x%x\n", comp->name, phases);
for(i=0; i<topology->nr_blacklisted_components; i++) {
if (topology->blacklisted_components[i].component == comp) {
@@ -727,7 +730,7 @@ hwloc_disc_component_try_enable(struct hwloc_topology *topology,
if (hwloc_components_verbose)
/* do not warn if envvar_forced since system-wide HWLOC_COMPONENTS must be silently ignored after set_xml() etc.
*/
fprintf(stderr, "Excluding discovery component `%s' phases 0x%x, conflicts with excludes 0x%x\n",
fprintf(stderr, "hwloc: Excluding discovery component `%s' phases 0x%x, conflicts with excludes 0x%x\n",
comp->name, comp->phases, topology->backend_excluded_phases);
return -1;
}
@@ -735,8 +738,8 @@ hwloc_disc_component_try_enable(struct hwloc_topology *topology,
backend = comp->instantiate(topology, comp, topology->backend_excluded_phases | blacklisted_phases,
NULL, NULL, NULL);
if (!backend) {
if (hwloc_components_verbose || envvar_forced)
fprintf(stderr, "Failed to instantiate discovery component `%s'\n", comp->name);
if (hwloc_components_verbose || (envvar_forced && HWLOC_SHOW_CRITICAL_ERRORS()))
fprintf(stderr, "hwloc: Failed to instantiate discovery component `%s'\n", comp->name);
return -1;
}
@@ -817,7 +820,7 @@ hwloc_disc_components_enable_others(struct hwloc_topology *topology)
name = curenv;
if (!strcmp(name, "linuxpci") || !strcmp(name, "linuxio")) {
if (hwloc_components_verbose)
fprintf(stderr, "Replacing deprecated component `%s' with `linux' in envvar forcing\n", name);
fprintf(stderr, "hwloc: Replacing deprecated component `%s' with `linux' in envvar forcing\n", name);
name = "linux";
}
@@ -832,7 +835,8 @@ hwloc_disc_components_enable_others(struct hwloc_topology *topology)
if (comp->phases & ~blacklisted_phases)
hwloc_disc_component_try_enable(topology, comp, 1 /* envvar forced */, blacklisted_phases);
} else {
fprintf(stderr, "Cannot find discovery component `%s'\n", name);
if (HWLOC_SHOW_CRITICAL_ERRORS())
fprintf(stderr, "hwloc: Cannot find discovery component `%s'\n", name);
}
/* restore chars (the second loop below needs env to be unmodified) */
@@ -864,7 +868,7 @@ hwloc_disc_components_enable_others(struct hwloc_topology *topology)
if (!(comp->phases & ~blacklisted_phases)) {
if (hwloc_components_verbose)
fprintf(stderr, "Excluding blacklisted discovery component `%s' phases 0x%x\n",
fprintf(stderr, "hwloc: Excluding blacklisted discovery component `%s' phases 0x%x\n",
comp->name, comp->phases);
goto nextcomp;
}
@@ -879,7 +883,7 @@ nextcomp:
/* print a summary */
int first = 1;
backend = topology->backends;
fprintf(stderr, "Final list of enabled discovery components: ");
fprintf(stderr, "hwloc: Final list of enabled discovery components: ");
while (backend != NULL) {
fprintf(stderr, "%s%s(0x%x)", first ? "" : ",", backend->component->name, backend->phases);
backend = backend->next;
@@ -935,7 +939,7 @@ hwloc_backend_alloc(struct hwloc_topology *topology,
/* filter-out component phases that are excluded */
backend->phases = component->phases & ~topology->backend_excluded_phases;
if (backend->phases != component->phases && hwloc_components_verbose)
fprintf(stderr, "Trying discovery component `%s' with phases 0x%x instead of 0x%x\n",
fprintf(stderr, "hwloc: Trying discovery component `%s' with phases 0x%x instead of 0x%x\n",
component->name, backend->phases, component->phases);
backend->flags = 0;
backend->discover = NULL;
@@ -963,8 +967,9 @@ hwloc_backend_enable(struct hwloc_backend *backend)
/* check backend flags */
if (backend->flags) {
fprintf(stderr, "Cannot enable discovery component `%s' phases 0x%x with unknown flags %lx\n",
backend->component->name, backend->component->phases, backend->flags);
if (HWLOC_SHOW_CRITICAL_ERRORS())
fprintf(stderr, "hwloc: Cannot enable discovery component `%s' phases 0x%x with unknown flags %lx\n",
backend->component->name, backend->component->phases, backend->flags);
return -1;
}
@@ -973,7 +978,7 @@ hwloc_backend_enable(struct hwloc_backend *backend)
while (NULL != *pprev) {
if ((*pprev)->component == backend->component) {
if (hwloc_components_verbose)
fprintf(stderr, "Cannot enable discovery component `%s' phases 0x%x twice\n",
fprintf(stderr, "hwloc: Cannot enable discovery component `%s' phases 0x%x twice\n",
backend->component->name, backend->component->phases);
hwloc_backend_disable(backend);
errno = EBUSY;
@@ -983,7 +988,7 @@ hwloc_backend_enable(struct hwloc_backend *backend)
}
if (hwloc_components_verbose)
fprintf(stderr, "Enabling discovery component `%s' with phases 0x%x (among 0x%x)\n",
fprintf(stderr, "hwloc: Enabling discovery component `%s' with phases 0x%x (among 0x%x)\n",
backend->component->name, backend->phases, backend->component->phases);
/* enqueue at the end */
@@ -1067,7 +1072,7 @@ hwloc_backends_disable_all(struct hwloc_topology *topology)
while (NULL != (backend = topology->backends)) {
struct hwloc_backend *next = backend->next;
if (hwloc_components_verbose)
fprintf(stderr, "Disabling discovery component `%s'\n",
fprintf(stderr, "hwloc: Disabling discovery component `%s'\n",
backend->component->name);
hwloc_backend_disable(backend);
topology->backends = next;

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2020-2021 Inria. All rights reserved.
* Copyright © 2020-2022 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -42,6 +42,9 @@ hwloc_internal_cpukinds_dup(hwloc_topology_t new, hwloc_topology_t old)
struct hwloc_internal_cpukind_s *kinds;
unsigned i;
if (!old->nr_cpukinds)
return 0;
kinds = hwloc_tma_malloc(tma, old->nr_cpukinds * sizeof(*kinds));
if (!kinds)
return -1;
@@ -343,7 +346,8 @@ enum hwloc_cpukinds_ranking {
HWLOC_CPUKINDS_RANKING_DEFAULT, /* forced + frequency on ARM, forced + coretype_frequency otherwise */
HWLOC_CPUKINDS_RANKING_NO_FORCED_EFFICIENCY, /* default without forced */
HWLOC_CPUKINDS_RANKING_FORCED_EFFICIENCY,
HWLOC_CPUKINDS_RANKING_CORETYPE_FREQUENCY,
HWLOC_CPUKINDS_RANKING_CORETYPE_FREQUENCY, /* either coretype or frequency or both */
HWLOC_CPUKINDS_RANKING_CORETYPE_FREQUENCY_STRICT, /* both coretype and frequency are required */
HWLOC_CPUKINDS_RANKING_CORETYPE,
HWLOC_CPUKINDS_RANKING_FREQUENCY,
HWLOC_CPUKINDS_RANKING_FREQUENCY_MAX,
@@ -358,9 +362,9 @@ hwloc__cpukinds_try_rank_by_info(struct hwloc_topology *topology,
{
unsigned i;
if (HWLOC_CPUKINDS_RANKING_CORETYPE_FREQUENCY == heuristics) {
hwloc_debug("Trying to rank cpukinds by coretype+frequency...\n");
/* we need intel_core_type + (base or max freq) for all kinds */
if (HWLOC_CPUKINDS_RANKING_CORETYPE_FREQUENCY_STRICT == heuristics) {
hwloc_debug("Trying to rank cpukinds by coretype+frequency_strict...\n");
/* we need intel_core_type AND (base or max freq) for all kinds */
if (!summary->have_intel_core_type
|| (!summary->have_max_freq && !summary->have_base_freq))
return -1;
@@ -373,6 +377,21 @@ hwloc__cpukinds_try_rank_by_info(struct hwloc_topology *topology,
kind->ranking_value = (summary->summaries[i].intel_core_type << 20) + summary->summaries[i].max_freq;
}
} else if (HWLOC_CPUKINDS_RANKING_CORETYPE_FREQUENCY == heuristics) {
hwloc_debug("Trying to rank cpukinds by coretype+frequency...\n");
/* we need intel_core_type OR (base or max freq) for all kinds */
if (!summary->have_intel_core_type
&& (!summary->have_max_freq && !summary->have_base_freq))
return -1;
/* rank first by coretype (Core>>Atom) then by frequency, base if available, max otherwise */
for(i=0; i<topology->nr_cpukinds; i++) {
struct hwloc_internal_cpukind_s *kind = &topology->cpukinds[i];
if (summary->have_base_freq)
kind->ranking_value = (summary->summaries[i].intel_core_type << 20) + summary->summaries[i].base_freq;
else
kind->ranking_value = (summary->summaries[i].intel_core_type << 20) + summary->summaries[i].max_freq;
}
} else if (HWLOC_CPUKINDS_RANKING_CORETYPE == heuristics) {
hwloc_debug("Trying to rank cpukinds by coretype...\n");
/* we need intel_core_type */
@@ -429,7 +448,9 @@ static int hwloc__cpukinds_compare_ranking_values(const void *_a, const void *_b
{
const struct hwloc_internal_cpukind_s *a = _a;
const struct hwloc_internal_cpukind_s *b = _b;
return a->ranking_value - b->ranking_value;
uint64_t arv = a->ranking_value;
uint64_t brv = b->ranking_value;
return arv < brv ? -1 : arv > brv ? 1 : 0;
}
/* this function requires ranking values to be unique */
@@ -469,6 +490,8 @@ hwloc_internal_cpukinds_rank(struct hwloc_topology *topology)
heuristics = HWLOC_CPUKINDS_RANKING_NONE;
else if (!strcmp(env, "coretype+frequency"))
heuristics = HWLOC_CPUKINDS_RANKING_CORETYPE_FREQUENCY;
else if (!strcmp(env, "coretype+frequency_strict"))
heuristics = HWLOC_CPUKINDS_RANKING_CORETYPE_FREQUENCY_STRICT;
else if (!strcmp(env, "coretype"))
heuristics = HWLOC_CPUKINDS_RANKING_CORETYPE;
else if (!strcmp(env, "frequency"))
@@ -481,16 +504,14 @@ hwloc_internal_cpukinds_rank(struct hwloc_topology *topology)
heuristics = HWLOC_CPUKINDS_RANKING_FORCED_EFFICIENCY;
else if (!strcmp(env, "no_forced_efficiency"))
heuristics = HWLOC_CPUKINDS_RANKING_NO_FORCED_EFFICIENCY;
else if (!hwloc_hide_errors())
fprintf(stderr, "Failed to recognize HWLOC_CPUKINDS_RANKING value %s\n", env);
else if (HWLOC_SHOW_CRITICAL_ERRORS())
fprintf(stderr, "hwloc: Failed to recognize HWLOC_CPUKINDS_RANKING value %s\n", env);
}
if (heuristics == HWLOC_CPUKINDS_RANKING_DEFAULT
|| heuristics == HWLOC_CPUKINDS_RANKING_NO_FORCED_EFFICIENCY) {
/* default is forced_efficiency first */
struct hwloc_cpukinds_info_summary summary;
enum hwloc_cpukinds_ranking subheuristics;
const char *arch;
if (heuristics == HWLOC_CPUKINDS_RANKING_DEFAULT)
hwloc_debug("Using default ranking strategy...\n");
@@ -508,16 +529,7 @@ hwloc_internal_cpukinds_rank(struct hwloc_topology *topology)
goto failed;
hwloc__cpukinds_summarize_info(topology, &summary);
arch = hwloc_obj_get_info_by_name(topology->levels[0][0], "Architecture");
/* TODO: rather coretype_frequency only on x86/Intel? */
if (arch && (!strncmp(arch, "arm", 3) || !strncmp(arch, "aarch", 5)))
/* then frequency on ARM */
subheuristics = HWLOC_CPUKINDS_RANKING_FREQUENCY;
else
/* or coretype+frequency otherwise */
subheuristics = HWLOC_CPUKINDS_RANKING_CORETYPE_FREQUENCY;
err = hwloc__cpukinds_try_rank_by_info(topology, subheuristics, &summary);
err = hwloc__cpukinds_try_rank_by_info(topology, HWLOC_CPUKINDS_RANKING_CORETYPE_FREQUENCY, &summary);
free(summary.summaries);
if (!err)
goto ready;

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2013-2020 Inria. All rights reserved.
* Copyright © 2013-2022 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -218,7 +218,7 @@ hwloc_diff_trees(hwloc_topology_t topo1, hwloc_obj_t obj1,
struct hwloc_info_s *info1 = &obj1->infos[i], *info2 = &obj2->infos[i];
if (strcmp(info1->name, info2->name))
goto out_too_complex;
if (strcmp(obj1->infos[i].value, obj2->infos[i].value)) {
if (strcmp(info1->value, info2->value)) {
err = hwloc_append_diff_obj_attr_string(obj1,
HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_INFO,
info1->name,

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2010-2020 Inria. All rights reserved.
* Copyright © 2010-2022 Inria. All rights reserved.
* Copyright © 2011-2012 Université Bordeaux
* Copyright © 2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -17,6 +17,37 @@
static struct hwloc_internal_distances_s *
hwloc__internal_distances_from_public(hwloc_topology_t topology, struct hwloc_distances_s *distances);
static void
hwloc__groups_by_distances(struct hwloc_topology *topology, unsigned nbobjs, struct hwloc_obj **objs, uint64_t *values, unsigned long kind, unsigned nbaccuracies, float *accuracies, int needcheck);
static void
hwloc_internal_distances_restrict(hwloc_obj_t *objs,
uint64_t *indexes,
hwloc_obj_type_t *different_types,
uint64_t *values,
unsigned nbobjs, unsigned disappeared);
static void
hwloc_internal_distances_print_matrix(struct hwloc_internal_distances_s *dist)
{
unsigned nbobjs = dist->nbobjs;
hwloc_obj_t *objs = dist->objs;
hwloc_uint64_t *values = dist->values;
int gp = !HWLOC_DIST_TYPE_USE_OS_INDEX(dist->unique_type);
unsigned i, j;
fprintf(stderr, "%s", gp ? "gp_index" : "os_index");
for(j=0; j<nbobjs; j++)
fprintf(stderr, " % 5d", (int)(gp ? objs[j]->gp_index : objs[j]->os_index));
fprintf(stderr, "\n");
for(i=0; i<nbobjs; i++) {
fprintf(stderr, " % 5d", (int)(gp ? objs[i]->gp_index : objs[i]->os_index));
for(j=0; j<nbobjs; j++)
fprintf(stderr, " % 5lld", (long long) values[i*nbobjs + j]);
fprintf(stderr, "\n");
}
}
/******************************************************
* Global init, prepare, destroy, dup
*/
@@ -244,27 +275,33 @@ int hwloc_distances_release_remove(hwloc_topology_t topology,
return 0;
}
/******************************************************
* Add distances to the topology
/*********************************************************
* Backend functions for adding distances to the topology
*/
/* cancel a distances handle. only needed internally for now */
static void
hwloc__groups_by_distances(struct hwloc_topology *topology, unsigned nbobjs, struct hwloc_obj **objs, uint64_t *values, unsigned long kind, unsigned nbaccuracies, float *accuracies, int needcheck);
hwloc_backend_distances_add__cancel(struct hwloc_internal_distances_s *dist)
{
/* everything is set to NULL in hwloc_backend_distances_add_create() */
free(dist->name);
free(dist->indexes);
free(dist->objs);
free(dist->different_types);
free(dist->values);
free(dist);
}
/* insert a distance matrix in the topology.
* the caller gives us the distances and objs pointers, we'll free them later.
/* prepare a distances handle for later commit in the topology.
* we duplicate the caller's name.
*/
static int
hwloc_internal_distances__add(hwloc_topology_t topology, const char *name,
hwloc_obj_type_t unique_type, hwloc_obj_type_t *different_types,
unsigned nbobjs, hwloc_obj_t *objs, uint64_t *indexes, uint64_t *values,
unsigned long kind, unsigned iflags)
hwloc_backend_distances_add_handle_t
hwloc_backend_distances_add_create(hwloc_topology_t topology,
const char *name, unsigned long kind, unsigned long flags)
{
struct hwloc_internal_distances_s *dist;
if (different_types) {
kind |= HWLOC_DISTANCES_KIND_HETEROGENEOUS_TYPES; /* the user isn't forced to give it */
} else if (kind & HWLOC_DISTANCES_KIND_HETEROGENEOUS_TYPES) {
if (flags) {
errno = EINVAL;
goto err;
}
@@ -273,110 +310,54 @@ hwloc_internal_distances__add(hwloc_topology_t topology, const char *name,
if (!dist)
goto err;
if (name)
if (name) {
dist->name = strdup(name); /* ignore failure */
dist->unique_type = unique_type;
dist->different_types = different_types;
dist->nbobjs = nbobjs;
dist->kind = kind;
dist->iflags = iflags;
assert(!!(iflags & HWLOC_INTERNAL_DIST_FLAG_OBJS_VALID) == !!objs);
if (!objs) {
assert(indexes);
/* we only have indexes, we'll refresh objs from there */
dist->indexes = indexes;
dist->objs = calloc(nbobjs, sizeof(hwloc_obj_t));
if (!dist->objs)
if (!dist->name)
goto err_with_dist;
} else {
unsigned i;
assert(!indexes);
/* we only have objs, generate the indexes arrays so that we can refresh objs later */
dist->objs = objs;
dist->indexes = malloc(nbobjs * sizeof(*dist->indexes));
if (!dist->indexes)
goto err_with_dist;
if (HWLOC_DIST_TYPE_USE_OS_INDEX(dist->unique_type)) {
for(i=0; i<nbobjs; i++)
dist->indexes[i] = objs[i]->os_index;
} else {
for(i=0; i<nbobjs; i++)
dist->indexes[i] = objs[i]->gp_index;
}
}
dist->values = values;
dist->kind = kind;
dist->iflags = HWLOC_INTERNAL_DIST_FLAG_NOT_COMMITTED;
dist->unique_type = HWLOC_OBJ_TYPE_NONE;
dist->different_types = NULL;
dist->nbobjs = 0;
dist->indexes = NULL;
dist->objs = NULL;
dist->values = NULL;
dist->id = topology->next_dist_id++;
if (topology->last_dist)
topology->last_dist->next = dist;
else
topology->first_dist = dist;
dist->prev = topology->last_dist;
dist->next = NULL;
topology->last_dist = dist;
return 0;
return dist;
err_with_dist:
if (name)
free(dist->name);
free(dist);
hwloc_backend_distances_add__cancel(dist);
err:
free(different_types);
free(objs);
free(indexes);
free(values);
return -1;
return NULL;
}
int hwloc_internal_distances_add_by_index(hwloc_topology_t topology, const char *name,
hwloc_obj_type_t unique_type, hwloc_obj_type_t *different_types, unsigned nbobjs, uint64_t *indexes, uint64_t *values,
unsigned long kind, unsigned long flags)
/* attach objects and values to a distances handle.
* on success, objs and values arrays are attached and will be freed with the distances.
* on failure, the handle is freed.
*/
int
hwloc_backend_distances_add_values(hwloc_topology_t topology __hwloc_attribute_unused,
hwloc_backend_distances_add_handle_t handle,
unsigned nbobjs, hwloc_obj_t *objs,
hwloc_uint64_t *values,
unsigned long flags)
{
unsigned iflags = 0; /* objs not valid */
if (nbobjs < 2) {
errno = EINVAL;
goto err;
}
/* cannot group without objects,
* and we don't group from XML anyway since the hwloc that generated the XML should have grouped already.
*/
if (flags & HWLOC_DISTANCES_ADD_FLAG_GROUP) {
errno = EINVAL;
goto err;
}
return hwloc_internal_distances__add(topology, name, unique_type, different_types, nbobjs, NULL, indexes, values, kind, iflags);
err:
free(indexes);
free(values);
free(different_types);
return -1;
}
static void
hwloc_internal_distances_restrict(hwloc_obj_t *objs,
uint64_t *indexes,
uint64_t *values,
unsigned nbobjs, unsigned disappeared);
int hwloc_internal_distances_add(hwloc_topology_t topology, const char *name,
unsigned nbobjs, hwloc_obj_t *objs, uint64_t *values,
unsigned long kind, unsigned long flags)
{
hwloc_obj_type_t unique_type, *different_types;
struct hwloc_internal_distances_s *dist = handle;
hwloc_obj_type_t unique_type, *different_types = NULL;
hwloc_uint64_t *indexes = NULL;
unsigned i, disappeared = 0;
unsigned iflags = HWLOC_INTERNAL_DIST_FLAG_OBJS_VALID;
if (nbobjs < 2) {
if (dist->nbobjs || !(dist->iflags & HWLOC_INTERNAL_DIST_FLAG_NOT_COMMITTED)) {
/* target distances is already set */
errno = EINVAL;
goto err;
}
if (flags || nbobjs < 2 || !objs || !values) {
errno = EINVAL;
goto err;
}
@@ -389,15 +370,18 @@ int hwloc_internal_distances_add(hwloc_topology_t topology, const char *name,
/* some objects are NULL */
if (disappeared == nbobjs) {
/* nothing left, drop the matrix */
free(objs);
free(values);
return 0;
errno = ENOENT;
goto err;
}
/* restrict the matrix */
hwloc_internal_distances_restrict(objs, NULL, values, nbobjs, disappeared);
hwloc_internal_distances_restrict(objs, NULL, NULL, values, nbobjs, disappeared);
nbobjs -= disappeared;
}
indexes = malloc(nbobjs * sizeof(*indexes));
if (!indexes)
goto err;
unique_type = objs[0]->type;
for(i=1; i<nbobjs; i++)
if (objs[i]->type != unique_type) {
@@ -408,16 +392,108 @@ int hwloc_internal_distances_add(hwloc_topology_t topology, const char *name,
/* heterogeneous types */
different_types = malloc(nbobjs * sizeof(*different_types));
if (!different_types)
goto err;
goto err_with_indexes;
for(i=0; i<nbobjs; i++)
different_types[i] = objs[i]->type;
} else {
/* homogeneous types */
different_types = NULL;
}
if (topology->grouping && (flags & HWLOC_DISTANCES_ADD_FLAG_GROUP) && !different_types) {
dist->nbobjs = nbobjs;
dist->objs = objs;
dist->iflags |= HWLOC_INTERNAL_DIST_FLAG_OBJS_VALID;
dist->indexes = indexes;
dist->unique_type = unique_type;
dist->different_types = different_types;
dist->values = values;
if (different_types)
dist->kind |= HWLOC_DISTANCES_KIND_HETEROGENEOUS_TYPES;
if (HWLOC_DIST_TYPE_USE_OS_INDEX(dist->unique_type)) {
for(i=0; i<nbobjs; i++)
dist->indexes[i] = objs[i]->os_index;
} else {
for(i=0; i<nbobjs; i++)
dist->indexes[i] = objs[i]->gp_index;
}
return 0;
err_with_indexes:
free(indexes);
err:
hwloc_backend_distances_add__cancel(dist);
return -1;
}
/* attach objects and values to a distance handle.
* on success, objs and values arrays are attached and will be freed with the distances.
* on failure, the handle is freed.
*/
static int
hwloc_backend_distances_add_values_by_index(hwloc_topology_t topology __hwloc_attribute_unused,
hwloc_backend_distances_add_handle_t handle,
unsigned nbobjs, hwloc_obj_type_t unique_type, hwloc_obj_type_t *different_types, hwloc_uint64_t *indexes,
hwloc_uint64_t *values)
{
struct hwloc_internal_distances_s *dist = handle;
hwloc_obj_t *objs;
if (dist->nbobjs || !(dist->iflags & HWLOC_INTERNAL_DIST_FLAG_NOT_COMMITTED)) {
/* target distances is already set */
errno = EINVAL;
goto err;
}
if (nbobjs < 2 || !indexes || !values || (unique_type == HWLOC_OBJ_TYPE_NONE && !different_types)) {
errno = EINVAL;
goto err;
}
objs = malloc(nbobjs * sizeof(*objs));
if (!objs)
goto err;
dist->nbobjs = nbobjs;
dist->objs = objs;
dist->indexes = indexes;
dist->unique_type = unique_type;
dist->different_types = different_types;
dist->values = values;
if (different_types)
dist->kind |= HWLOC_DISTANCES_KIND_HETEROGENEOUS_TYPES;
return 0;
err:
hwloc_backend_distances_add__cancel(dist);
return -1;
}
/* commit a distances handle.
* on failure, the handle is freed with its objects and values arrays.
*/
int
hwloc_backend_distances_add_commit(hwloc_topology_t topology,
hwloc_backend_distances_add_handle_t handle,
unsigned long flags)
{
struct hwloc_internal_distances_s *dist = handle;
if (!dist->nbobjs || !(dist->iflags & HWLOC_INTERNAL_DIST_FLAG_NOT_COMMITTED)) {
/* target distances not ready for commit */
errno = EINVAL;
goto err;
}
if ((flags & HWLOC_DISTANCES_ADD_FLAG_GROUP) && !dist->objs) {
/* cannot group without objects,
* and we don't group from XML anyway since the hwloc that generated the XML should have grouped already.
*/
errno = EINVAL;
goto err;
}
if (topology->grouping && (flags & HWLOC_DISTANCES_ADD_FLAG_GROUP) && !dist->different_types) {
float full_accuracy = 0.f;
float *accuracies;
unsigned nbaccuracies;
@@ -431,26 +507,94 @@ int hwloc_internal_distances_add(hwloc_topology_t topology, const char *name,
}
if (topology->grouping_verbose) {
unsigned j;
int gp = !HWLOC_DIST_TYPE_USE_OS_INDEX(unique_type);
fprintf(stderr, "Trying to group objects using distance matrix:\n");
fprintf(stderr, "%s", gp ? "gp_index" : "os_index");
for(j=0; j<nbobjs; j++)
fprintf(stderr, " % 5d", (int)(gp ? objs[j]->gp_index : objs[j]->os_index));
fprintf(stderr, "\n");
for(i=0; i<nbobjs; i++) {
fprintf(stderr, " % 5d", (int)(gp ? objs[i]->gp_index : objs[i]->os_index));
for(j=0; j<nbobjs; j++)
fprintf(stderr, " % 5lld", (long long) values[i*nbobjs + j]);
fprintf(stderr, "\n");
}
hwloc_internal_distances_print_matrix(dist);
}
hwloc__groups_by_distances(topology, nbobjs, objs, values,
kind, nbaccuracies, accuracies, 1 /* check the first matrice */);
hwloc__groups_by_distances(topology, dist->nbobjs, dist->objs, dist->values,
dist->kind, nbaccuracies, accuracies, 1 /* check the first matrix */);
}
return hwloc_internal_distances__add(topology, name, unique_type, different_types, nbobjs, objs, NULL, values, kind, iflags);
if (topology->last_dist)
topology->last_dist->next = dist;
else
topology->first_dist = dist;
dist->prev = topology->last_dist;
dist->next = NULL;
topology->last_dist = dist;
dist->iflags &= ~HWLOC_INTERNAL_DIST_FLAG_NOT_COMMITTED;
return 0;
err:
hwloc_backend_distances_add__cancel(dist);
return -1;
}
/* all-in-one backend function not exported to plugins, only used by XML for now */
int hwloc_internal_distances_add_by_index(hwloc_topology_t topology, const char *name,
hwloc_obj_type_t unique_type, hwloc_obj_type_t *different_types, unsigned nbobjs, uint64_t *indexes, uint64_t *values,
unsigned long kind, unsigned long flags)
{
hwloc_backend_distances_add_handle_t handle;
int err;
handle = hwloc_backend_distances_add_create(topology, name, kind, 0);
if (!handle)
goto err;
err = hwloc_backend_distances_add_values_by_index(topology, handle,
nbobjs, unique_type, different_types, indexes,
values);
if (err < 0)
goto err;
/* arrays are now attached to the handle */
indexes = NULL;
different_types = NULL;
values = NULL;
err = hwloc_backend_distances_add_commit(topology, handle, flags);
if (err < 0)
goto err;
return 0;
err:
free(indexes);
free(different_types);
free(values);
return -1;
}
/* all-in-one backend function not exported to plugins, used by OS backends */
int hwloc_internal_distances_add(hwloc_topology_t topology, const char *name,
unsigned nbobjs, hwloc_obj_t *objs, uint64_t *values,
unsigned long kind, unsigned long flags)
{
hwloc_backend_distances_add_handle_t handle;
int err;
handle = hwloc_backend_distances_add_create(topology, name, kind, 0);
if (!handle)
goto err;
err = hwloc_backend_distances_add_values(topology, handle,
nbobjs, objs,
values,
0);
if (err < 0)
goto err;
/* arrays are now attached to the handle */
objs = NULL;
values = NULL;
err = hwloc_backend_distances_add_commit(topology, handle, flags);
if (err < 0)
goto err;
return 0;
err:
free(objs);
@@ -458,44 +602,54 @@ int hwloc_internal_distances_add(hwloc_topology_t topology, const char *name,
return -1;
}
/********************************
* User API for adding distances
*/
#define HWLOC_DISTANCES_KIND_FROM_ALL (HWLOC_DISTANCES_KIND_FROM_OS|HWLOC_DISTANCES_KIND_FROM_USER)
#define HWLOC_DISTANCES_KIND_MEANS_ALL (HWLOC_DISTANCES_KIND_MEANS_LATENCY|HWLOC_DISTANCES_KIND_MEANS_BANDWIDTH)
#define HWLOC_DISTANCES_KIND_ALL (HWLOC_DISTANCES_KIND_FROM_ALL|HWLOC_DISTANCES_KIND_MEANS_ALL)
#define HWLOC_DISTANCES_KIND_ALL (HWLOC_DISTANCES_KIND_FROM_ALL|HWLOC_DISTANCES_KIND_MEANS_ALL|HWLOC_DISTANCES_KIND_HETEROGENEOUS_TYPES)
#define HWLOC_DISTANCES_ADD_FLAG_ALL (HWLOC_DISTANCES_ADD_FLAG_GROUP|HWLOC_DISTANCES_ADD_FLAG_GROUP_INACCURATE)
/* The actual function exported to the user
*/
int hwloc_distances_add(hwloc_topology_t topology,
unsigned nbobjs, hwloc_obj_t *objs, hwloc_uint64_t *values,
unsigned long kind, unsigned long flags)
void * hwloc_distances_add_create(hwloc_topology_t topology,
const char *name, unsigned long kind,
unsigned long flags)
{
if (!topology->is_loaded) {
errno = EINVAL;
return NULL;
}
if (topology->adopted_shmem_addr) {
errno = EPERM;
return NULL;
}
if ((kind & ~HWLOC_DISTANCES_KIND_ALL)
|| hwloc_weight_long(kind & HWLOC_DISTANCES_KIND_FROM_ALL) != 1
|| hwloc_weight_long(kind & HWLOC_DISTANCES_KIND_MEANS_ALL) != 1) {
errno = EINVAL;
return NULL;
}
return hwloc_backend_distances_add_create(topology, name, kind, flags);
}
int hwloc_distances_add_values(hwloc_topology_t topology,
void *handle,
unsigned nbobjs, hwloc_obj_t *objs,
hwloc_uint64_t *values,
unsigned long flags)
{
unsigned i;
uint64_t *_values;
hwloc_obj_t *_objs;
int err;
if (nbobjs < 2 || !objs || !values || !topology->is_loaded) {
errno = EINVAL;
return -1;
}
if (topology->adopted_shmem_addr) {
errno = EPERM;
return -1;
}
if ((kind & ~HWLOC_DISTANCES_KIND_ALL)
|| hwloc_weight_long(kind & HWLOC_DISTANCES_KIND_FROM_ALL) != 1
|| hwloc_weight_long(kind & HWLOC_DISTANCES_KIND_MEANS_ALL) != 1
|| (flags & ~HWLOC_DISTANCES_ADD_FLAG_ALL)) {
errno = EINVAL;
return -1;
}
/* no strict need to check for duplicates, things shouldn't break */
for(i=1; i<nbobjs; i++)
if (!objs[i]) {
errno = EINVAL;
return -1;
goto out;
}
/* copy the input arrays and give them to the topology */
@@ -506,22 +660,78 @@ int hwloc_distances_add(hwloc_topology_t topology,
memcpy(_objs, objs, nbobjs*sizeof(hwloc_obj_t));
memcpy(_values, values, nbobjs*nbobjs*sizeof(*_values));
err = hwloc_internal_distances_add(topology, NULL, nbobjs, _objs, _values, kind, flags);
if (err < 0)
goto out; /* _objs and _values freed in hwloc_internal_distances_add() */
err = hwloc_backend_distances_add_values(topology, handle, nbobjs, _objs, _values, flags);
if (err < 0) {
/* handle was canceled inside hwloc_backend_distances_add_values */
handle = NULL;
goto out_with_arrays;
}
return 0;
out_with_arrays:
free(_objs);
free(_values);
out:
if (handle)
hwloc_backend_distances_add__cancel(handle);
return -1;
}
int
hwloc_distances_add_commit(hwloc_topology_t topology,
void *handle,
unsigned long flags)
{
int err;
if (flags & ~HWLOC_DISTANCES_ADD_FLAG_ALL) {
errno = EINVAL;
goto out;
}
err = hwloc_backend_distances_add_commit(topology, handle, flags);
if (err < 0) {
/* handle was canceled inside hwloc_backend_distances_add_commit */
handle = NULL;
goto out;
}
/* in case we added some groups, see if we need to reconnect */
hwloc_topology_reconnect(topology, 0);
return 0;
out_with_arrays:
free(_values);
free(_objs);
out:
if (handle)
hwloc_backend_distances_add__cancel(handle);
return -1;
}
/* deprecated all-in-one user function */
int hwloc_distances_add(hwloc_topology_t topology,
unsigned nbobjs, hwloc_obj_t *objs, hwloc_uint64_t *values,
unsigned long kind, unsigned long flags)
{
void *handle;
int err;
handle = hwloc_distances_add_create(topology, NULL, kind, 0);
if (!handle)
return -1;
err = hwloc_distances_add_values(topology, handle, nbobjs, objs, values, 0);
if (err < 0)
return -1;
err = hwloc_distances_add_commit(topology, handle, flags);
if (err < 0)
return -1;
return 0;
}
/******************************************************
* Refresh objects in distances
*/
@@ -529,6 +739,7 @@ int hwloc_distances_add(hwloc_topology_t topology,
static void
hwloc_internal_distances_restrict(hwloc_obj_t *objs,
uint64_t *indexes,
hwloc_obj_type_t *different_types,
uint64_t *values,
unsigned nbobjs, unsigned disappeared)
{
@@ -550,6 +761,8 @@ hwloc_internal_distances_restrict(hwloc_obj_t *objs,
objs[newi] = objs[i];
if (indexes)
indexes[newi] = indexes[i];
if (different_types)
different_types[newi] = different_types[i];
newi++;
}
}
@@ -594,7 +807,7 @@ hwloc_internal_distances_refresh_one(hwloc_topology_t topology,
return -1;
if (disappeared) {
hwloc_internal_distances_restrict(objs, dist->indexes, dist->values, nbobjs, disappeared);
hwloc_internal_distances_restrict(objs, dist->indexes, dist->different_types, dist->values, nbobjs, disappeared);
dist->nbobjs -= disappeared;
}
@@ -647,7 +860,7 @@ struct hwloc_distances_container_s {
struct hwloc_distances_s distances;
};
#define HWLOC_DISTANCES_CONTAINER_OFFSET ((char*)&((struct hwloc_distances_container_s*)NULL)->distances - (char*)NULL)
#define HWLOC_DISTANCES_CONTAINER_OFFSET ((uintptr_t)(&((struct hwloc_distances_container_s*)NULL)->distances) - (uintptr_t)NULL)
#define HWLOC_DISTANCES_CONTAINER(_d) (struct hwloc_distances_container_s *) ( ((char*)_d) - HWLOC_DISTANCES_CONTAINER_OFFSET )
static struct hwloc_internal_distances_s *
@@ -1087,3 +1300,210 @@ hwloc__groups_by_distances(struct hwloc_topology *topology,
out_with_groupids:
free(groupids);
}
static int
hwloc__distances_transform_remove_null(struct hwloc_distances_s *distances)
{
hwloc_uint64_t *values = distances->values;
hwloc_obj_t *objs = distances->objs;
unsigned i, nb, nbobjs = distances->nbobjs;
hwloc_obj_type_t unique_type;
for(i=0, nb=0; i<nbobjs; i++)
if (objs[i])
nb++;
if (nb < 2) {
errno = EINVAL;
return -1;
}
if (nb == nbobjs)
return 0;
hwloc_internal_distances_restrict(objs, NULL, NULL, values, nbobjs, nbobjs-nb);
distances->nbobjs = nb;
/* update HWLOC_DISTANCES_KIND_HETEROGENEOUS_TYPES for convenience */
unique_type = objs[0]->type;
for(i=1; i<nb; i++)
if (objs[i]->type != unique_type) {
unique_type = HWLOC_OBJ_TYPE_NONE;
break;
}
if (unique_type == HWLOC_OBJ_TYPE_NONE)
distances->kind |= HWLOC_DISTANCES_KIND_HETEROGENEOUS_TYPES;
else
distances->kind &= ~HWLOC_DISTANCES_KIND_HETEROGENEOUS_TYPES;
return 0;
}
static int
hwloc__distances_transform_links(struct hwloc_distances_s *distances)
{
/* FIXME: we should look for the greatest common denominator
* but we just use the smallest positive value, that's enough for current use-cases.
* We'll return -1 in other cases.
*/
hwloc_uint64_t divider, *values = distances->values;
unsigned i, nbobjs = distances->nbobjs;
if (!(distances->kind & HWLOC_DISTANCES_KIND_MEANS_BANDWIDTH)) {
errno = EINVAL;
return -1;
}
for(i=0; i<nbobjs; i++)
values[i*nbobjs+i] = 0;
/* find the smallest positive value */
divider = 0;
for(i=0; i<nbobjs*nbobjs; i++)
if (values[i] && (!divider || values[i] < divider))
divider = values[i];
if (!divider)
/* only zeroes? do nothing */
return 0;
/* check it divides all values */
for(i=0; i<nbobjs*nbobjs; i++)
if (values[i]%divider) {
errno = ENOENT;
return -1;
}
/* ok, now divide for real */
for(i=0; i<nbobjs*nbobjs; i++)
values[i] /= divider;
return 0;
}
static __hwloc_inline int is_nvswitch(hwloc_obj_t obj)
{
return obj && obj->subtype && !strcmp(obj->subtype, "NVSwitch");
}
static int
hwloc__distances_transform_merge_switch_ports(hwloc_topology_t topology,
struct hwloc_distances_s *distances)
{
struct hwloc_internal_distances_s *dist = hwloc__internal_distances_from_public(topology, distances);
hwloc_obj_t *objs = distances->objs;
hwloc_uint64_t *values = distances->values;
unsigned first, i, j, nbobjs = distances->nbobjs;
if (strcmp(dist->name, "NVLinkBandwidth")) {
errno = EINVAL;
return -1;
}
/* find the first port */
first = (unsigned) -1;
for(i=0; i<nbobjs; i++)
if (is_nvswitch(objs[i])) {
first = i;
break;
}
if (first == (unsigned)-1) {
errno = ENOENT;
return -1;
}
for(j=i+1; j<nbobjs; j++) {
if (is_nvswitch(objs[j])) {
/* another port, merge it */
unsigned k;
for(k=0; k<nbobjs; k++) {
if (k==i || k==j)
continue;
values[k*nbobjs+i] += values[k*nbobjs+j];
values[k*nbobjs+j] = 0;
values[i*nbobjs+k] += values[j*nbobjs+k];
values[j*nbobjs+k] = 0;
}
values[i*nbobjs+i] += values[j*nbobjs+j];
values[j*nbobjs+j] = 0;
}
/* the caller will also call REMOVE_NULL to remove other ports */
objs[j] = NULL;
}
return 0;
}
static int
hwloc__distances_transform_transitive_closure(hwloc_topology_t topology,
struct hwloc_distances_s *distances)
{
struct hwloc_internal_distances_s *dist = hwloc__internal_distances_from_public(topology, distances);
hwloc_obj_t *objs = distances->objs;
hwloc_uint64_t *values = distances->values;
unsigned nbobjs = distances->nbobjs;
unsigned i, j, k;
if (strcmp(dist->name, "NVLinkBandwidth")) {
errno = EINVAL;
return -1;
}
for(i=0; i<nbobjs; i++) {
hwloc_uint64_t bw_i2sw = 0;
if (is_nvswitch(objs[i]))
continue;
/* count our BW to the switch */
for(k=0; k<nbobjs; k++)
if (is_nvswitch(objs[k]))
bw_i2sw += values[i*nbobjs+k];
for(j=0; j<nbobjs; j++) {
hwloc_uint64_t bw_sw2j = 0;
if (i == j || is_nvswitch(objs[j]))
continue;
/* count our BW from the switch */
for(k=0; k<nbobjs; k++)
if (is_nvswitch(objs[k]))
bw_sw2j += values[k*nbobjs+j];
/* bandwidth from i to j is now min(i2sw,sw2j) */
values[i*nbobjs+j] = bw_i2sw > bw_sw2j ? bw_sw2j : bw_i2sw;
}
}
return 0;
}
int
hwloc_distances_transform(hwloc_topology_t topology,
struct hwloc_distances_s *distances,
enum hwloc_distances_transform_e transform,
void *transform_attr,
unsigned long flags)
{
if (flags || transform_attr) {
errno = EINVAL;
return -1;
}
switch (transform) {
case HWLOC_DISTANCES_TRANSFORM_REMOVE_NULL:
return hwloc__distances_transform_remove_null(distances);
case HWLOC_DISTANCES_TRANSFORM_LINKS:
return hwloc__distances_transform_links(distances);
case HWLOC_DISTANCES_TRANSFORM_MERGE_SWITCH_PORTS:
{
int err;
err = hwloc__distances_transform_merge_switch_ports(topology, distances);
if (!err)
err = hwloc__distances_transform_remove_null(distances);
return err;
}
case HWLOC_DISTANCES_TRANSFORM_TRANSITIVE_CLOSURE:
return hwloc__distances_transform_transitive_closure(topology, distances);
default:
errno = EINVAL;
return -1;
}
}

View File

@@ -1,11 +1,12 @@
/*
* Copyright © 2020 Inria. All rights reserved.
* Copyright © 2020-2022 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
#include "private/autogen/config.h"
#include "hwloc.h"
#include "private/private.h"
#include "private/debug.h"
/*****************************
@@ -49,36 +50,51 @@ hwloc__setup_memattr(struct hwloc_internal_memattr_s *imattr,
void
hwloc_internal_memattrs_prepare(struct hwloc_topology *topology)
{
#define NR_DEFAULT_MEMATTRS 4
topology->memattrs = malloc(NR_DEFAULT_MEMATTRS * sizeof(*topology->memattrs));
topology->memattrs = malloc(HWLOC_MEMATTR_ID_MAX * sizeof(*topology->memattrs));
if (!topology->memattrs)
return;
assert(HWLOC_MEMATTR_ID_CAPACITY < NR_DEFAULT_MEMATTRS);
hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_CAPACITY],
(char *) "Capacity",
HWLOC_MEMATTR_FLAG_HIGHER_FIRST,
HWLOC_IMATTR_FLAG_STATIC_NAME|HWLOC_IMATTR_FLAG_CONVENIENCE);
assert(HWLOC_MEMATTR_ID_LOCALITY < NR_DEFAULT_MEMATTRS);
hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_LOCALITY],
(char *) "Locality",
HWLOC_MEMATTR_FLAG_LOWER_FIRST,
HWLOC_IMATTR_FLAG_STATIC_NAME|HWLOC_IMATTR_FLAG_CONVENIENCE);
assert(HWLOC_MEMATTR_ID_BANDWIDTH < NR_DEFAULT_MEMATTRS);
hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_BANDWIDTH],
(char *) "Bandwidth",
HWLOC_MEMATTR_FLAG_HIGHER_FIRST|HWLOC_MEMATTR_FLAG_NEED_INITIATOR,
HWLOC_IMATTR_FLAG_STATIC_NAME);
assert(HWLOC_MEMATTR_ID_LATENCY < NR_DEFAULT_MEMATTRS);
hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_READ_BANDWIDTH],
(char *) "ReadBandwidth",
HWLOC_MEMATTR_FLAG_HIGHER_FIRST|HWLOC_MEMATTR_FLAG_NEED_INITIATOR,
HWLOC_IMATTR_FLAG_STATIC_NAME);
hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_WRITE_BANDWIDTH],
(char *) "WriteBandwidth",
HWLOC_MEMATTR_FLAG_HIGHER_FIRST|HWLOC_MEMATTR_FLAG_NEED_INITIATOR,
HWLOC_IMATTR_FLAG_STATIC_NAME);
hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_LATENCY],
(char *) "Latency",
HWLOC_MEMATTR_FLAG_LOWER_FIRST|HWLOC_MEMATTR_FLAG_NEED_INITIATOR,
HWLOC_IMATTR_FLAG_STATIC_NAME);
topology->nr_memattrs = NR_DEFAULT_MEMATTRS;
hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_READ_LATENCY],
(char *) "ReadLatency",
HWLOC_MEMATTR_FLAG_LOWER_FIRST|HWLOC_MEMATTR_FLAG_NEED_INITIATOR,
HWLOC_IMATTR_FLAG_STATIC_NAME);
hwloc__setup_memattr(&topology->memattrs[HWLOC_MEMATTR_ID_WRITE_LATENCY],
(char *) "WriteLatency",
HWLOC_MEMATTR_FLAG_LOWER_FIRST|HWLOC_MEMATTR_FLAG_NEED_INITIATOR,
HWLOC_IMATTR_FLAG_STATIC_NAME);
topology->nr_memattrs = HWLOC_MEMATTR_ID_MAX;
}
static void
@@ -127,6 +143,8 @@ hwloc_internal_memattrs_dup(struct hwloc_topology *new, struct hwloc_topology *o
struct hwloc_internal_memattr_s *imattrs;
hwloc_memattr_id_t id;
/* old->nr_memattrs is always > 0 thanks to default memattrs */
imattrs = hwloc_tma_malloc(tma, old->nr_memattrs * sizeof(*imattrs));
if (!imattrs)
return -1;
@@ -1195,3 +1213,214 @@ hwloc_get_local_numanode_objs(hwloc_topology_t topology,
*nrp = i;
return 0;
}
/**************************************
* Using memattrs to identify HBM/DRAM
*/
struct hwloc_memory_tier_s {
hwloc_obj_t node;
uint64_t local_bw;
enum hwloc_memory_tier_type_e {
/* warning the order is important for guess_memory_tiers() after qsort() */
HWLOC_MEMORY_TIER_UNKNOWN,
HWLOC_MEMORY_TIER_DRAM,
HWLOC_MEMORY_TIER_HBM,
HWLOC_MEMORY_TIER_SPM, /* Specific-Purpose Memory is usually HBM, we'll use BW to confirm */
HWLOC_MEMORY_TIER_NVM,
HWLOC_MEMORY_TIER_GPU,
} type;
};
static int compare_tiers(const void *_a, const void *_b)
{
const struct hwloc_memory_tier_s *a = _a, *b = _b;
/* sort by type of tier first */
if (a->type != b->type)
return a->type - b->type;
/* then by bandwidth */
if (a->local_bw > b->local_bw)
return -1;
else if (a->local_bw < b->local_bw)
return 1;
return 0;
}
int
hwloc_internal_memattrs_guess_memory_tiers(hwloc_topology_t topology)
{
struct hwloc_internal_memattr_s *imattr;
struct hwloc_memory_tier_s *tiers;
unsigned i, j, n;
const char *env;
int spm_is_hbm = -1; /* -1 will guess from BW, 0 no, 1 forced */
int mark_dram = 1;
unsigned first_spm, first_nvm;
hwloc_uint64_t max_unknown_bw, min_spm_bw;
env = getenv("HWLOC_MEMTIERS_GUESS");
if (env) {
if (!strcmp(env, "none")) {
return 0;
} else if (!strcmp(env, "default")) {
/* nothing */
} else if (!strcmp(env, "spm_is_hbm")) {
hwloc_debug("Assuming SPM-tier is HBM, ignore bandwidth\n");
spm_is_hbm = 1;
} else if (HWLOC_SHOW_CRITICAL_ERRORS()) {
fprintf(stderr, "hwloc: Failed to recognize HWLOC_MEMTIERS_GUESS value %s\n", env);
}
}
imattr = &topology->memattrs[HWLOC_MEMATTR_ID_BANDWIDTH];
if (!(imattr->iflags & HWLOC_IMATTR_FLAG_CACHE_VALID))
hwloc__imattr_refresh(topology, imattr);
n = hwloc_get_nbobjs_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE);
assert(n);
tiers = malloc(n * sizeof(*tiers));
if (!tiers)
return -1;
for(i=0; i<n; i++) {
hwloc_obj_t node;
const char *daxtype;
struct hwloc_internal_location_s iloc;
struct hwloc_internal_memattr_target_s *imtg = NULL;
struct hwloc_internal_memattr_initiator_s *imi;
node = hwloc_get_obj_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE, i);
assert(node);
tiers[i].node = node;
/* defaults */
tiers[i].type = HWLOC_MEMORY_TIER_UNKNOWN;
tiers[i].local_bw = 0; /* unknown */
daxtype = hwloc_obj_get_info_by_name(node, "DAXType");
/* mark NVM, SPM and GPU nodes */
if (daxtype && !strcmp(daxtype, "NVM"))
tiers[i].type = HWLOC_MEMORY_TIER_NVM;
if (daxtype && !strcmp(daxtype, "SPM"))
tiers[i].type = HWLOC_MEMORY_TIER_SPM;
if (node->subtype && !strcmp(node->subtype, "GPUMemory"))
tiers[i].type = HWLOC_MEMORY_TIER_GPU;
if (spm_is_hbm == -1) {
for(j=0; j<imattr->nr_targets; j++)
if (imattr->targets[j].obj == node) {
imtg = &imattr->targets[j];
break;
}
if (imtg && !hwloc_bitmap_iszero(node->cpuset)) {
iloc.type = HWLOC_LOCATION_TYPE_CPUSET;
iloc.location.cpuset = node->cpuset;
imi = hwloc__memattr_target_get_initiator(imtg, &iloc, 0);
if (imi)
tiers[i].local_bw = imi->value;
}
}
}
/* sort tiers */
qsort(tiers, n, sizeof(*tiers), compare_tiers);
hwloc_debug("Sorting memory tiers...\n");
for(i=0; i<n; i++)
hwloc_debug(" tier %u = node L#%u P#%u with tier type %d and local BW #%llu\n",
i,
tiers[i].node->logical_index, tiers[i].node->os_index,
tiers[i].type, (unsigned long long) tiers[i].local_bw);
/* now we have UNKNOWN tiers (sorted by BW), then SPM tiers (sorted by BW), then NVM, then GPU */
/* iterate over UNKNOWN tiers, and find their BW */
for(i=0; i<n; i++) {
if (tiers[i].type > HWLOC_MEMORY_TIER_UNKNOWN)
break;
}
first_spm = i;
/* get max BW from first */
if (first_spm > 0)
max_unknown_bw = tiers[0].local_bw;
else
max_unknown_bw = 0;
/* there are no DRAM or HBM tiers yet */
/* iterate over SPM tiers, and find their BW */
for(i=first_spm; i<n; i++) {
if (tiers[i].type > HWLOC_MEMORY_TIER_SPM)
break;
}
first_nvm = i;
/* get min BW from last */
if (first_nvm > first_spm)
min_spm_bw = tiers[first_nvm-1].local_bw;
else
min_spm_bw = 0;
/* FIXME: if there's more than 10% between some sets of nodes inside a tier, split it? */
/* FIXME: if there are cpuset-intersecting nodes in same tier, abort? */
if (spm_is_hbm == -1) {
/* if we have BW for all SPM and UNKNOWN
* and all SPM BW are 2x superior to all UNKNOWN BW
*/
hwloc_debug("UNKNOWN-memory-tier max bandwidth %llu\n", (unsigned long long) max_unknown_bw);
hwloc_debug("SPM-memory-tier min bandwidth %llu\n", (unsigned long long) min_spm_bw);
if (max_unknown_bw > 0 && min_spm_bw > 0 && max_unknown_bw*2 < min_spm_bw) {
hwloc_debug("assuming SPM means HBM and !SPM means DRAM since bandwidths are very different\n");
spm_is_hbm = 1;
} else {
hwloc_debug("cannot assume SPM means HBM\n");
spm_is_hbm = 0;
}
}
if (spm_is_hbm) {
for(i=0; i<first_spm; i++)
tiers[i].type = HWLOC_MEMORY_TIER_DRAM;
for(i=first_spm; i<first_nvm; i++)
tiers[i].type = HWLOC_MEMORY_TIER_HBM;
}
if (first_spm == n)
mark_dram = 0;
/* now apply subtypes */
for(i=0; i<n; i++) {
const char *type = NULL;
if (tiers[i].node->subtype) /* don't overwrite the existing subtype */
continue;
switch (tiers[i].type) {
case HWLOC_MEMORY_TIER_DRAM:
if (mark_dram)
type = "DRAM";
break;
case HWLOC_MEMORY_TIER_HBM:
type = "HBM";
break;
case HWLOC_MEMORY_TIER_SPM:
type = "SPM";
break;
case HWLOC_MEMORY_TIER_NVM:
type = "NVM";
break;
default:
/* GPU memory is already marked with subtype="GPUMemory",
* UNKNOWN doesn't deserve any subtype
*/
break;
}
if (type) {
hwloc_debug("Marking node L#%u P#%u as %s\n", tiers[i].node->logical_index, tiers[i].node->os_index, type);
tiers[i].node->subtype = strdup(type);
}
}
free(tiers);
return 0;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2022 Inria. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -119,6 +119,13 @@ hwloc_pci_discovery_init(struct hwloc_topology *topology)
topology->pci_forced_locality = NULL;
topology->first_pci_locality = topology->last_pci_locality = NULL;
#define HWLOC_PCI_LOCALITY_QUIRK_CRAY_EX235A (1ULL<<0)
#define HWLOC_PCI_LOCALITY_QUIRK_FAKE (1ULL<<62)
topology->pci_locality_quirks = (uint64_t) -1;
/* -1 is unknown, 0 is disabled, >0 is bitmask of enabled quirks.
* bit 63 should remain unused so that -1 is unaccessible as a bitmask.
*/
}
void
@@ -146,8 +153,9 @@ hwloc_pci_discovery_prepare(struct hwloc_topology *topology)
}
free(buffer);
} else {
fprintf(stderr, "Ignoring HWLOC_PCI_LOCALITY file `%s' too large (%lu bytes)\n",
env, (unsigned long) st.st_size);
if (HWLOC_SHOW_CRITICAL_ERRORS())
fprintf(stderr, "hwloc/pci: Ignoring HWLOC_PCI_LOCALITY file `%s' too large (%lu bytes)\n",
env, (unsigned long) st.st_size);
}
}
close(fd);
@@ -206,8 +214,11 @@ hwloc_pci_traverse_print_cb(void * cbdata __hwloc_attribute_unused,
else
hwloc_debug("%s Bridge [%04x:%04x]", busid,
pcidev->attr->pcidev.vendor_id, pcidev->attr->pcidev.device_id);
hwloc_debug(" to %04x:[%02x:%02x]\n",
pcidev->attr->bridge.downstream.pci.domain, pcidev->attr->bridge.downstream.pci.secondary_bus, pcidev->attr->bridge.downstream.pci.subordinate_bus);
if (pcidev->attr->bridge.downstream_type == HWLOC_OBJ_BRIDGE_PCI)
hwloc_debug(" to %04x:[%02x:%02x]\n",
pcidev->attr->bridge.downstream.pci.domain, pcidev->attr->bridge.downstream.pci.secondary_bus, pcidev->attr->bridge.downstream.pci.subordinate_bus);
else
assert(0);
} else
hwloc_debug("%s Device [%04x:%04x (%04x:%04x) rev=%02x class=%04x]\n", busid,
pcidev->attr->pcidev.vendor_id, pcidev->attr->pcidev.device_id,
@@ -251,11 +262,11 @@ hwloc_pci_compare_busids(struct hwloc_obj *a, struct hwloc_obj *b)
if (a->attr->pcidev.domain > b->attr->pcidev.domain)
return HWLOC_PCI_BUSID_HIGHER;
if (a->type == HWLOC_OBJ_BRIDGE
if (a->type == HWLOC_OBJ_BRIDGE && a->attr->bridge.downstream_type == HWLOC_OBJ_BRIDGE_PCI
&& b->attr->pcidev.bus >= a->attr->bridge.downstream.pci.secondary_bus
&& b->attr->pcidev.bus <= a->attr->bridge.downstream.pci.subordinate_bus)
return HWLOC_PCI_BUSID_SUPERSET;
if (b->type == HWLOC_OBJ_BRIDGE
if (b->type == HWLOC_OBJ_BRIDGE && b->attr->bridge.downstream_type == HWLOC_OBJ_BRIDGE_PCI
&& a->attr->pcidev.bus >= b->attr->bridge.downstream.pci.secondary_bus
&& a->attr->pcidev.bus <= b->attr->bridge.downstream.pci.subordinate_bus)
return HWLOC_PCI_BUSID_INCLUDED;
@@ -302,7 +313,7 @@ hwloc_pci_add_object(struct hwloc_obj *parent, struct hwloc_obj **parent_io_firs
new->next_sibling = *curp;
*curp = new;
new->parent = parent;
if (new->type == HWLOC_OBJ_BRIDGE) {
if (new->type == HWLOC_OBJ_BRIDGE && new->attr->bridge.downstream_type == HWLOC_OBJ_BRIDGE_PCI) {
/* look at remaining siblings and move some below new */
childp = &new->io_first_child;
curp = &new->next_sibling;
@@ -329,7 +340,7 @@ hwloc_pci_add_object(struct hwloc_obj *parent, struct hwloc_obj **parent_io_firs
}
case HWLOC_PCI_BUSID_EQUAL: {
static int reported = 0;
if (!reported && !hwloc_hide_errors()) {
if (!reported && HWLOC_SHOW_CRITICAL_ERRORS()) {
fprintf(stderr, "*********************************************************\n");
fprintf(stderr, "* hwloc %s received invalid PCI information.\n", HWLOC_VERSION);
fprintf(stderr, "*\n");
@@ -411,7 +422,7 @@ hwloc_pcidisc_add_hostbridges(struct hwloc_topology *topology,
dstnextp = &child->next_sibling;
/* compute hostbridge secondary/subordinate buses */
if (child->type == HWLOC_OBJ_BRIDGE
if (child->type == HWLOC_OBJ_BRIDGE && child->attr->bridge.downstream_type == HWLOC_OBJ_BRIDGE_PCI
&& child->attr->bridge.downstream.pci.subordinate_bus > current_subordinate)
current_subordinate = child->attr->bridge.downstream.pci.subordinate_bus;
@@ -438,13 +449,90 @@ hwloc_pcidisc_add_hostbridges(struct hwloc_topology *topology,
return new;
}
static struct hwloc_obj *
hwloc_pci_fixup_busid_parent(struct hwloc_topology *topology __hwloc_attribute_unused,
struct hwloc_pcidev_attr_s *busid __hwloc_attribute_unused,
struct hwloc_obj *parent __hwloc_attribute_unused)
/* return 1 if a quirk was applied */
static int
hwloc__pci_find_busid_parent_quirk(struct hwloc_topology *topology,
struct hwloc_pcidev_attr_s *busid,
hwloc_cpuset_t cpuset)
{
/* no quirk for now */
return parent;
if (topology->pci_locality_quirks == (uint64_t)-1 /* unknown */) {
const char *dmi_board_name, *env;
/* first invokation, detect which quirks are needed */
topology->pci_locality_quirks = 0; /* no quirk yet */
dmi_board_name = hwloc_obj_get_info_by_name(hwloc_get_root_obj(topology), "DMIBoardName");
if (dmi_board_name && !strcmp(dmi_board_name, "HPE CRAY EX235A")) {
hwloc_debug("enabling for PCI locality quirk for HPE Cray EX235A\n");
topology->pci_locality_quirks |= HWLOC_PCI_LOCALITY_QUIRK_CRAY_EX235A;
}
env = getenv("HWLOC_PCI_LOCALITY_QUIRK_FAKE");
if (env && atoi(env)) {
hwloc_debug("enabling for PCI locality fake quirk (attaching everything to last PU)\n");
topology->pci_locality_quirks |= HWLOC_PCI_LOCALITY_QUIRK_FAKE;
}
}
if (topology->pci_locality_quirks & HWLOC_PCI_LOCALITY_QUIRK_FAKE) {
unsigned last = hwloc_bitmap_last(hwloc_topology_get_topology_cpuset(topology));
hwloc_bitmap_set(cpuset, last);
return 1;
}
if (topology->pci_locality_quirks & HWLOC_PCI_LOCALITY_QUIRK_CRAY_EX235A) {
/* AMD Trento has xGMI ports connected to individual CCDs (8 cores + L3)
* instead of NUMA nodes (pairs of CCDs within Trento) as is usual in AMD EPYC CPUs.
* This is not described by the ACPI tables, hence we need to manually hardwire
* the xGMI locality for the (currently single) server that currently uses that CPU.
* It's not clear if ACPI tables can/will ever be fixed (would require one initiator
* proximity domain per CCD), or if Linux can/will work around the issue.
*/
if (busid->domain == 0) {
if (busid->bus >= 0xd0 && busid->bus <= 0xd1) {
hwloc_bitmap_set_range(cpuset, 0, 7);
hwloc_bitmap_set_range(cpuset, 64, 71);
return 1;
}
if (busid->bus >= 0xd4 && busid->bus <= 0xd6) {
hwloc_bitmap_set_range(cpuset, 8, 15);
hwloc_bitmap_set_range(cpuset, 72, 79);
return 1;
}
if (busid->bus >= 0xc8 && busid->bus <= 0xc9) {
hwloc_bitmap_set_range(cpuset, 16, 23);
hwloc_bitmap_set_range(cpuset, 80, 87);
return 1;
}
if (busid->bus >= 0xcc && busid->bus <= 0xce) {
hwloc_bitmap_set_range(cpuset, 24, 31);
hwloc_bitmap_set_range(cpuset, 88, 95);
return 1;
}
if (busid->bus >= 0xd8 && busid->bus <= 0xd9) {
hwloc_bitmap_set_range(cpuset, 32, 39);
hwloc_bitmap_set_range(cpuset, 96, 103);
return 1;
}
if (busid->bus >= 0xdc && busid->bus <= 0xde) {
hwloc_bitmap_set_range(cpuset, 40, 47);
hwloc_bitmap_set_range(cpuset, 104, 111);
return 1;
}
if (busid->bus >= 0xc0 && busid->bus <= 0xc1) {
hwloc_bitmap_set_range(cpuset, 48, 55);
hwloc_bitmap_set_range(cpuset, 112, 119);
return 1;
}
if (busid->bus >= 0xc4 && busid->bus <= 0xc6) {
hwloc_bitmap_set_range(cpuset, 56, 63);
hwloc_bitmap_set_range(cpuset, 120, 127);
return 1;
}
}
}
return 0;
}
static struct hwloc_obj *
@@ -453,7 +541,7 @@ hwloc__pci_find_busid_parent(struct hwloc_topology *topology, struct hwloc_pcide
hwloc_bitmap_t cpuset = hwloc_bitmap_alloc();
hwloc_obj_t parent;
int forced = 0;
int noquirks = 0;
int noquirks = 0, got_quirked = 0;
unsigned i;
int err;
@@ -486,7 +574,8 @@ hwloc__pci_find_busid_parent(struct hwloc_topology *topology, struct hwloc_pcide
if (env) {
static int reported = 0;
if (!topology->pci_has_forced_locality && !reported) {
fprintf(stderr, "Environment variable %s is deprecated, please use HWLOC_PCI_LOCALITY instead.\n", env);
if (HWLOC_SHOW_ALL_ERRORS())
fprintf(stderr, "hwloc/pci: Environment variable %s is deprecated, please use HWLOC_PCI_LOCALITY instead.\n", env);
reported = 1;
}
if (*env) {
@@ -500,7 +589,13 @@ hwloc__pci_find_busid_parent(struct hwloc_topology *topology, struct hwloc_pcide
}
}
if (!forced) {
if (!forced && !noquirks && topology->pci_locality_quirks /* either quirks are unknown yet, or some are enabled */) {
err = hwloc__pci_find_busid_parent_quirk(topology, busid, cpuset);
if (err > 0)
got_quirked = 1;
}
if (!forced && !got_quirked) {
/* get the cpuset by asking the backend that provides the relevant hook, if any. */
struct hwloc_backend *backend = topology->get_pci_busid_cpuset_backend;
if (backend)
@@ -515,11 +610,7 @@ hwloc__pci_find_busid_parent(struct hwloc_topology *topology, struct hwloc_pcide
hwloc_debug_bitmap(" will attach PCI bus to cpuset %s\n", cpuset);
parent = hwloc_find_insert_io_parent_by_complete_cpuset(topology, cpuset);
if (parent) {
if (!noquirks)
/* We found a valid parent. Check that the OS didn't report invalid locality */
parent = hwloc_pci_fixup_busid_parent(topology, busid, parent);
} else {
if (!parent) {
/* Fallback to root */
parent = hwloc_get_root_obj(topology);
}
@@ -565,7 +656,7 @@ hwloc_pcidisc_tree_attach(struct hwloc_topology *topology, struct hwloc_obj *tre
assert(pciobj->type == HWLOC_OBJ_PCI_DEVICE
|| (pciobj->type == HWLOC_OBJ_BRIDGE && pciobj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_PCI));
if (obj->type == HWLOC_OBJ_BRIDGE) {
if (obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.downstream_type == HWLOC_OBJ_BRIDGE_PCI) {
domain = obj->attr->bridge.downstream.pci.domain;
bus_min = obj->attr->bridge.downstream.pci.secondary_bus;
bus_max = obj->attr->bridge.downstream.pci.subordinate_bus;
@@ -800,18 +891,28 @@ hwloc_pcidisc_find_linkspeed(const unsigned char *config,
memcpy(&linksta, &config[offset + HWLOC_PCI_EXP_LNKSTA], 4);
speed = linksta & HWLOC_PCI_EXP_LNKSTA_SPEED; /* PCIe generation */
width = (linksta & HWLOC_PCI_EXP_LNKSTA_WIDTH) >> 4; /* how many lanes */
/* PCIe Gen1 = 2.5GT/s signal-rate per lane with 8/10 encoding = 0.25GB/s data-rate per lane
* PCIe Gen2 = 5 GT/s signal-rate per lane with 8/10 encoding = 0.5 GB/s data-rate per lane
* PCIe Gen3 = 8 GT/s signal-rate per lane with 128/130 encoding = 1 GB/s data-rate per lane
* PCIe Gen4 = 16 GT/s signal-rate per lane with 128/130 encoding = 2 GB/s data-rate per lane
* PCIe Gen5 = 32 GT/s signal-rate per lane with 128/130 encoding = 4 GB/s data-rate per lane
/*
* These are single-direction bandwidths only.
*
* Gen1 used NRZ with 8/10 encoding.
* PCIe Gen1 = 2.5GT/s signal-rate per lane x 8/10 = 0.25GB/s data-rate per lane
* PCIe Gen2 = 5 GT/s signal-rate per lane x 8/10 = 0.5 GB/s data-rate per lane
* Gen3 switched to NRZ with 128/130 encoding.
* PCIe Gen3 = 8 GT/s signal-rate per lane x 128/130 = 1 GB/s data-rate per lane
* PCIe Gen4 = 16 GT/s signal-rate per lane x 128/130 = 2 GB/s data-rate per lane
* PCIe Gen5 = 32 GT/s signal-rate per lane x 128/130 = 4 GB/s data-rate per lane
* Gen6 switched to PAM with with 242/256 FLIT (242B payload protected by 8B CRC + 6B FEC).
* PCIe Gen6 = 64 GT/s signal-rate per lane x 242/256 = 8 GB/s data-rate per lane
* PCIe Gen7 = 128GT/s signal-rate per lane x 242/256 = 16 GB/s data-rate per lane
*/
/* lanespeed in Gbit/s */
if (speed <= 2)
lanespeed = 2.5f * speed * 0.8f;
else if (speed <= 5)
lanespeed = 8.0f * (1<<(speed-3)) * 128/130;
else
lanespeed = 8.0f * (1<<(speed-3)) * 128/130; /* assume Gen6 will be 64 GT/s and so on */
lanespeed = 8.0f * (1<<(speed-3)) * 242/256; /* assume Gen8 will be 256 GT/s and so on */
/* linkspeed in GB/s */
*linkspeed = lanespeed * width / 8;
@@ -938,6 +1039,7 @@ hwloc_pci_class_string(unsigned short class_id)
switch (class_id) {
case 0x0500: return "RAM";
case 0x0501: return "Flash";
case 0x0502: return "CXLMem";
}
return "Memory";
case 0x06:

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2010 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -323,17 +323,29 @@ hwloc_synthetic_parse_memory_attr(const char *attr, const char **endp)
hwloc_uint64_t size;
size = strtoull(attr, (char **) &endptr, 0);
if (!hwloc_strncasecmp(endptr, "TB", 2)) {
size *= 1000ULL*1000ULL*1000ULL*1000ULL;
endptr += 2;
} else if (!hwloc_strncasecmp(endptr, "TiB", 3)) {
size <<= 40;
endptr += 2;
endptr += 3;
} else if (!hwloc_strncasecmp(endptr, "GB", 2)) {
size *= 1000ULL*1000ULL*1000ULL;
endptr += 2;
} else if (!hwloc_strncasecmp(endptr, "GiB", 3)) {
size <<= 30;
endptr += 2;
endptr += 3;
} else if (!hwloc_strncasecmp(endptr, "MB", 2)) {
size *= 1000ULL*1000ULL;
endptr += 2;
} else if (!hwloc_strncasecmp(endptr, "MiB", 3)) {
size <<= 20;
endptr += 2;
endptr += 3;
} else if (!hwloc_strncasecmp(endptr, "kB", 2)) {
size <<= 10;
size *= 1000ULL;
endptr += 2;
} else if (!hwloc_strncasecmp(endptr, "kiB", 3)) {
size <<= 10;
endptr += 3;
}
*endp = endptr;
return size;
@@ -802,15 +814,15 @@ hwloc_backend_synthetic_init(struct hwloc_synthetic_backend_data_s *data,
} else if (hwloc__obj_type_is_cache(type)) {
if (!curlevel->attr.memorysize) {
if (1 == curlevel->attr.depth)
/* 32Kb in L1 */
/* 32KiB in L1 */
curlevel->attr.memorysize = 32*1024;
else
/* *4 at each level, starting from 1MB for L2, unified */
/* *4 at each level, starting from 1MiB for L2, unified */
curlevel->attr.memorysize = 256ULL*1024 << (2*curlevel->attr.depth);
}
} else if (type == HWLOC_OBJ_NUMANODE && !curlevel->attr.memorysize) {
/* 1GB in memory nodes. */
/* 1GiB in memory nodes. */
curlevel->attr.memorysize = 1024*1024*1024;
}

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2012, 2020 Université Bordeaux
* Copyright © 2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -11,7 +11,9 @@
#include "private/autogen/config.h"
#include "hwloc.h"
#include "hwloc/windows.h"
#include "private/private.h"
#include "private/windows.h" /* must be before windows.h */
#include "private/debug.h"
#include <windows.h>
@@ -64,26 +66,6 @@ typedef enum _LOGICAL_PROCESSOR_RELATIONSHIP {
# endif /* HAVE_RELATIONPROCESSORPACKAGE */
#endif /* HAVE_LOGICAL_PROCESSOR_RELATIONSHIP */
#ifndef HAVE_SYSTEM_LOGICAL_PROCESSOR_INFORMATION
typedef struct _SYSTEM_LOGICAL_PROCESSOR_INFORMATION {
ULONG_PTR ProcessorMask;
LOGICAL_PROCESSOR_RELATIONSHIP Relationship;
_ANONYMOUS_UNION
union {
struct {
BYTE flags;
} ProcessorCore;
struct {
DWORD NodeNumber;
} NumaNode;
CACHE_DESCRIPTOR Cache;
ULONGLONG Reserved[2];
} DUMMYUNIONNAME;
} SYSTEM_LOGICAL_PROCESSOR_INFORMATION, *PSYSTEM_LOGICAL_PROCESSOR_INFORMATION;
#endif
/* Extended interface, for group support */
#ifndef HAVE_GROUP_AFFINITY
typedef struct _GROUP_AFFINITY {
KAFFINITY Mask;
@@ -92,35 +74,40 @@ typedef struct _GROUP_AFFINITY {
} GROUP_AFFINITY, *PGROUP_AFFINITY;
#endif
#ifndef HAVE_PROCESSOR_RELATIONSHIP
/* always use our own structure because the EfficiencyClass field didn't exist before Win10 */
typedef struct HWLOC_PROCESSOR_RELATIONSHIP {
BYTE Flags;
BYTE EfficiencyClass; /* for RelationProcessorCore, higher means greater performance but less efficiency, only available in Win10+ */
BYTE EfficiencyClass; /* for RelationProcessorCore, higher means greater performance but less efficiency */
BYTE Reserved[20];
WORD GroupCount;
GROUP_AFFINITY GroupMask[ANYSIZE_ARRAY];
} PROCESSOR_RELATIONSHIP, *PPROCESSOR_RELATIONSHIP;
#endif
} HWLOC_PROCESSOR_RELATIONSHIP;
#ifndef HAVE_NUMA_NODE_RELATIONSHIP
typedef struct _NUMA_NODE_RELATIONSHIP {
/* always use our own structure because the GroupCount and GroupMasks fields didn't exist in some Win10 */
typedef struct HWLOC_NUMA_NODE_RELATIONSHIP {
DWORD NodeNumber;
BYTE Reserved[20];
GROUP_AFFINITY GroupMask;
} NUMA_NODE_RELATIONSHIP, *PNUMA_NODE_RELATIONSHIP;
#endif
BYTE Reserved[18];
WORD GroupCount;
_ANONYMOUS_UNION
union {
GROUP_AFFINITY GroupMask;
GROUP_AFFINITY GroupMasks[ANYSIZE_ARRAY];
} DUMMYUNIONNAME;
} HWLOC_NUMA_NODE_RELATIONSHIP;
#ifndef HAVE_CACHE_RELATIONSHIP
typedef struct _CACHE_RELATIONSHIP {
typedef struct HWLOC_CACHE_RELATIONSHIP {
BYTE Level;
BYTE Associativity;
WORD LineSize;
DWORD CacheSize;
PROCESSOR_CACHE_TYPE Type;
BYTE Reserved[20];
GROUP_AFFINITY GroupMask;
} CACHE_RELATIONSHIP, *PCACHE_RELATIONSHIP;
#endif
BYTE Reserved[18];
WORD GroupCount;
union {
GROUP_AFFINITY GroupMask;
GROUP_AFFINITY GroupMasks[ANYSIZE_ARRAY];
} DUMMYUNIONNAME;
} HWLOC_CACHE_RELATIONSHIP;
#ifndef HAVE_PROCESSOR_GROUP_INFO
typedef struct _PROCESSOR_GROUP_INFO {
@@ -140,20 +127,19 @@ typedef struct _GROUP_RELATIONSHIP {
} GROUP_RELATIONSHIP, *PGROUP_RELATIONSHIP;
#endif
#ifndef HAVE_SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX
typedef struct _SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX {
/* always use our own structure because we need our own HWLOC_PROCESSOR/CACHE/NUMA_NODE_RELATIONSHIP */
typedef struct HWLOC_SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX {
LOGICAL_PROCESSOR_RELATIONSHIP Relationship;
DWORD Size;
_ANONYMOUS_UNION
union {
PROCESSOR_RELATIONSHIP Processor;
NUMA_NODE_RELATIONSHIP NumaNode;
CACHE_RELATIONSHIP Cache;
HWLOC_PROCESSOR_RELATIONSHIP Processor;
HWLOC_NUMA_NODE_RELATIONSHIP NumaNode;
HWLOC_CACHE_RELATIONSHIP Cache;
GROUP_RELATIONSHIP Group;
/* Odd: no member to tell the cpu mask of the package... */
} DUMMYUNIONNAME;
} SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX, *PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX;
#endif
} HWLOC_SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX;
#ifndef HAVE_PSAPI_WORKING_SET_EX_BLOCK
typedef union _PSAPI_WORKING_SET_EX_BLOCK {
@@ -190,9 +176,6 @@ typedef struct _PROCESSOR_NUMBER {
typedef WORD (WINAPI *PFN_GETACTIVEPROCESSORGROUPCOUNT)(void);
static PFN_GETACTIVEPROCESSORGROUPCOUNT GetActiveProcessorGroupCountProc;
static unsigned long nr_processor_groups = 1;
static unsigned long max_numanode_index = 0;
typedef WORD (WINAPI *PFN_GETACTIVEPROCESSORCOUNT)(WORD);
static PFN_GETACTIVEPROCESSORCOUNT GetActiveProcessorCountProc;
@@ -202,10 +185,7 @@ static PFN_GETCURRENTPROCESSORNUMBER GetCurrentProcessorNumberProc;
typedef VOID (WINAPI *PFN_GETCURRENTPROCESSORNUMBEREX)(PPROCESSOR_NUMBER);
static PFN_GETCURRENTPROCESSORNUMBEREX GetCurrentProcessorNumberExProc;
typedef BOOL (WINAPI *PFN_GETLOGICALPROCESSORINFORMATION)(PSYSTEM_LOGICAL_PROCESSOR_INFORMATION Buffer, PDWORD ReturnLength);
static PFN_GETLOGICALPROCESSORINFORMATION GetLogicalProcessorInformationProc;
typedef BOOL (WINAPI *PFN_GETLOGICALPROCESSORINFORMATIONEX)(LOGICAL_PROCESSOR_RELATIONSHIP relationship, PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX Buffer, PDWORD ReturnLength);
typedef BOOL (WINAPI *PFN_GETLOGICALPROCESSORINFORMATIONEX)(LOGICAL_PROCESSOR_RELATIONSHIP relationship, HWLOC_SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *Buffer, PDWORD ReturnLength);
static PFN_GETLOGICALPROCESSORINFORMATIONEX GetLogicalProcessorInformationExProc;
typedef BOOL (WINAPI *PFN_SETTHREADGROUPAFFINITY)(HANDLE hThread, const GROUP_AFFINITY *GroupAffinity, PGROUP_AFFINITY PreviousGroupAffinity);
@@ -246,8 +226,6 @@ static void hwloc_win_get_function_ptrs(void)
(PFN_GETACTIVEPROCESSORGROUPCOUNT) GetProcAddress(kernel32, "GetActiveProcessorGroupCount");
GetActiveProcessorCountProc =
(PFN_GETACTIVEPROCESSORCOUNT) GetProcAddress(kernel32, "GetActiveProcessorCount");
GetLogicalProcessorInformationProc =
(PFN_GETLOGICALPROCESSORINFORMATION) GetProcAddress(kernel32, "GetLogicalProcessorInformation");
GetCurrentProcessorNumberProc =
(PFN_GETCURRENTPROCESSORNUMBER) GetProcAddress(kernel32, "GetCurrentProcessorNumber");
GetCurrentProcessorNumberExProc =
@@ -270,9 +248,6 @@ static void hwloc_win_get_function_ptrs(void)
(PFN_VIRTUALFREEEX) GetProcAddress(kernel32, "VirtualFreeEx");
}
if (GetActiveProcessorGroupCountProc)
nr_processor_groups = GetActiveProcessorGroupCountProc();
if (!QueryWorkingSetExProc) {
HMODULE psapi = LoadLibrary("psapi.dll");
if (psapi)
@@ -363,6 +338,173 @@ static int hwloc_bitmap_to_single_ULONG_PTR(hwloc_const_bitmap_t set, unsigned *
return 0;
}
/**********************
* Processor Groups
*/
static unsigned long max_numanode_index = 0;
static unsigned long nr_processor_groups = 1;
static hwloc_cpuset_t * processor_group_cpusets = NULL;
static void
hwloc_win_get_processor_groups(void)
{
HWLOC_SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *procInfoTotal, *tmpprocInfoTotal, *procInfo;
DWORD length;
unsigned i;
hwloc_debug("querying windows processor groups\n");
if (!GetLogicalProcessorInformationExProc)
goto error;
nr_processor_groups = GetActiveProcessorGroupCountProc();
if (!nr_processor_groups)
goto error;
hwloc_debug("found %lu windows processor groups\n", nr_processor_groups);
if (nr_processor_groups > 1 && SIZEOF_VOID_P == 4) {
if (HWLOC_SHOW_ALL_ERRORS())
fprintf(stderr, "hwloc: multiple processor groups found on 32bits Windows, topology may be invalid/incomplete.\n");
}
length = 0;
procInfoTotal = NULL;
while (1) {
if (GetLogicalProcessorInformationExProc(RelationGroup, procInfoTotal, &length))
break;
if (GetLastError() != ERROR_INSUFFICIENT_BUFFER)
goto error;
tmpprocInfoTotal = realloc(procInfoTotal, length);
if (!tmpprocInfoTotal)
goto error_with_procinfo;
procInfoTotal = tmpprocInfoTotal;
}
processor_group_cpusets = calloc(nr_processor_groups, sizeof(*processor_group_cpusets));
if (!processor_group_cpusets)
goto error_with_procinfo;
for (procInfo = procInfoTotal;
(void*) procInfo < (void*) ((uintptr_t) procInfoTotal + length);
procInfo = (void*) ((uintptr_t) procInfo + procInfo->Size)) {
unsigned id;
assert(procInfo->Relationship == RelationGroup);
hwloc_debug("Found %u active windows processor groups\n",
(unsigned) procInfo->Group.ActiveGroupCount);
for (id = 0; id < procInfo->Group.ActiveGroupCount; id++) {
KAFFINITY mask;
hwloc_bitmap_t set;
set = hwloc_bitmap_alloc();
if (!set)
goto error_with_cpusets;
mask = procInfo->Group.GroupInfo[id].ActiveProcessorMask;
hwloc_debug("group %u with %u cpus mask 0x%llx\n", id,
(unsigned) procInfo->Group.GroupInfo[id].ActiveProcessorCount, (unsigned long long) mask);
/* KAFFINITY is ULONG_PTR */
hwloc_bitmap_set_ith_ULONG_PTR(set, id, mask);
/* FIXME: what if running 32bits on a 64bits windows with 64-processor groups?
* ULONG_PTR is 32bits, so half the group is invisible?
* maybe scale id to id*8/sizeof(ULONG_PTR) so that groups are 64-PU aligned?
*/
hwloc_debug_2args_bitmap("group %u %d bitmap %s\n", id, procInfo->Group.GroupInfo[id].ActiveProcessorCount, set);
processor_group_cpusets[id] = set;
}
}
free(procInfoTotal);
return;
error_with_cpusets:
for(i=0; i<nr_processor_groups; i++) {
if (processor_group_cpusets[i])
hwloc_bitmap_free(processor_group_cpusets[i]);
}
free(processor_group_cpusets);
processor_group_cpusets = NULL;
error_with_procinfo:
free(procInfoTotal);
error:
/* on error set nr to 1 and keep cpusets NULL. We'll use the topology cpuset whenever needed */
nr_processor_groups = 1;
}
static void
hwloc_win_free_processor_groups(void)
{
unsigned i;
for(i=0; i<nr_processor_groups; i++) {
if (processor_group_cpusets[i])
hwloc_bitmap_free(processor_group_cpusets[i]);
}
free(processor_group_cpusets);
processor_group_cpusets = NULL;
nr_processor_groups = 1;
}
int
hwloc_windows_get_nr_processor_groups(hwloc_topology_t topology, unsigned long flags)
{
if (!topology->is_loaded || !topology->is_thissystem) {
errno = EINVAL;
return -1;
}
if (flags) {
errno = EINVAL;
return -1;
}
return nr_processor_groups;
}
int
hwloc_windows_get_processor_group_cpuset(hwloc_topology_t topology, unsigned pg_index, hwloc_cpuset_t cpuset, unsigned long flags)
{
if (!topology->is_loaded || !topology->is_thissystem) {
errno = EINVAL;
return -1;
}
if (!cpuset) {
errno = EINVAL;
return -1;
}
if (flags) {
errno = EINVAL;
return -1;
}
if (pg_index >= nr_processor_groups) {
errno = ENOENT;
return -1;
}
if (!processor_group_cpusets) {
assert(nr_processor_groups == 1);
/* we found no processor groups, return the entire topology as a single one */
hwloc_bitmap_copy(cpuset, topology->levels[0][0]->cpuset);
return 0;
}
if (!processor_group_cpusets[pg_index]) {
errno = ENOENT;
return -1;
}
hwloc_bitmap_copy(cpuset, processor_group_cpusets[pg_index]);
return 0;
}
/**************************************************************
* hwloc PU numbering with respect to Windows processor groups
*
@@ -848,6 +990,8 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
unsigned hostname_size = sizeof(hostname);
int has_efficiencyclass = 0;
struct hwloc_win_efficiency_classes eclasses;
char *env = getenv("HWLOC_WINDOWS_PROCESSOR_GROUP_OBJS");
int keep_pgroup_objs = (env && atoi(env));
assert(dstatus->phase == HWLOC_DISC_PHASE_CPU);
@@ -878,137 +1022,8 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
GetSystemInfo(&SystemInfo);
if (!GetLogicalProcessorInformationExProc && GetLogicalProcessorInformationProc) {
PSYSTEM_LOGICAL_PROCESSOR_INFORMATION procInfo, tmpprocInfo;
unsigned id;
unsigned i;
struct hwloc_obj *obj;
hwloc_obj_type_t type;
length = 0;
procInfo = NULL;
while (1) {
if (GetLogicalProcessorInformationProc(procInfo, &length))
break;
if (GetLastError() != ERROR_INSUFFICIENT_BUFFER)
return -1;
tmpprocInfo = realloc(procInfo, length);
if (!tmpprocInfo) {
free(procInfo);
goto out;
}
procInfo = tmpprocInfo;
}
assert(!length || procInfo);
for (i = 0; i < length / sizeof(*procInfo); i++) {
/* Ignore unknown caches */
if (procInfo->Relationship == RelationCache
&& procInfo->Cache.Type != CacheUnified
&& procInfo->Cache.Type != CacheData
&& procInfo->Cache.Type != CacheInstruction)
continue;
id = HWLOC_UNKNOWN_INDEX;
switch (procInfo[i].Relationship) {
case RelationNumaNode:
type = HWLOC_OBJ_NUMANODE;
id = procInfo[i].NumaNode.NodeNumber;
gotnuma++;
if (id > max_numanode_index)
max_numanode_index = id;
break;
case RelationProcessorPackage:
type = HWLOC_OBJ_PACKAGE;
break;
case RelationCache:
type = (procInfo[i].Cache.Type == CacheInstruction ? HWLOC_OBJ_L1ICACHE : HWLOC_OBJ_L1CACHE) + procInfo[i].Cache.Level - 1;
break;
case RelationProcessorCore:
type = HWLOC_OBJ_CORE;
break;
case RelationGroup:
default:
type = HWLOC_OBJ_GROUP;
break;
}
if (!hwloc_filter_check_keep_object_type(topology, type))
continue;
obj = hwloc_alloc_setup_object(topology, type, id);
obj->cpuset = hwloc_bitmap_alloc();
hwloc_debug("%s#%u mask %llx\n", hwloc_obj_type_string(type), id, (unsigned long long) procInfo[i].ProcessorMask);
/* ProcessorMask is a ULONG_PTR */
hwloc_bitmap_set_ith_ULONG_PTR(obj->cpuset, 0, procInfo[i].ProcessorMask);
hwloc_debug_2args_bitmap("%s#%u bitmap %s\n", hwloc_obj_type_string(type), id, obj->cpuset);
switch (type) {
case HWLOC_OBJ_NUMANODE:
{
ULONGLONG avail;
obj->nodeset = hwloc_bitmap_alloc();
hwloc_bitmap_set(obj->nodeset, id);
if ((GetNumaAvailableMemoryNodeExProc && GetNumaAvailableMemoryNodeExProc(id, &avail))
|| (GetNumaAvailableMemoryNodeProc && GetNumaAvailableMemoryNodeProc(id, &avail))) {
obj->attr->numanode.local_memory = avail;
gotnumamemory++;
}
obj->attr->numanode.page_types_len = 2;
obj->attr->numanode.page_types = malloc(2 * sizeof(*obj->attr->numanode.page_types));
memset(obj->attr->numanode.page_types, 0, 2 * sizeof(*obj->attr->numanode.page_types));
obj->attr->numanode.page_types_len = 1;
obj->attr->numanode.page_types[0].size = SystemInfo.dwPageSize;
#if HAVE_DECL__SC_LARGE_PAGESIZE
obj->attr->numanode.page_types_len++;
obj->attr->numanode.page_types[1].size = sysconf(_SC_LARGE_PAGESIZE);
#endif
break;
}
case HWLOC_OBJ_L1CACHE:
case HWLOC_OBJ_L2CACHE:
case HWLOC_OBJ_L3CACHE:
case HWLOC_OBJ_L4CACHE:
case HWLOC_OBJ_L5CACHE:
case HWLOC_OBJ_L1ICACHE:
case HWLOC_OBJ_L2ICACHE:
case HWLOC_OBJ_L3ICACHE:
obj->attr->cache.size = procInfo[i].Cache.Size;
obj->attr->cache.associativity = procInfo[i].Cache.Associativity == CACHE_FULLY_ASSOCIATIVE ? -1 : procInfo[i].Cache.Associativity ;
obj->attr->cache.linesize = procInfo[i].Cache.LineSize;
obj->attr->cache.depth = procInfo[i].Cache.Level;
switch (procInfo->Cache.Type) {
case CacheUnified:
obj->attr->cache.type = HWLOC_OBJ_CACHE_UNIFIED;
break;
case CacheData:
obj->attr->cache.type = HWLOC_OBJ_CACHE_DATA;
break;
case CacheInstruction:
obj->attr->cache.type = HWLOC_OBJ_CACHE_INSTRUCTION;
break;
default:
hwloc_free_unlinked_object(obj);
continue;
}
break;
case HWLOC_OBJ_GROUP:
obj->attr->group.kind = procInfo[i].Relationship == RelationGroup ? HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP : HWLOC_GROUP_KIND_WINDOWS_RELATIONSHIP_UNKNOWN;
break;
default:
break;
}
hwloc__insert_object_by_cpuset(topology, NULL, obj, "windows:GetLogicalProcessorInformation");
}
free(procInfo);
}
if (GetLogicalProcessorInformationExProc) {
PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX procInfoTotal, tmpprocInfoTotal, procInfo;
HWLOC_SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *procInfoTotal, *tmpprocInfoTotal, *procInfo;
unsigned id;
struct hwloc_obj *obj;
hwloc_obj_type_t type;
@@ -1047,8 +1062,16 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
switch (procInfo->Relationship) {
case RelationNumaNode:
type = HWLOC_OBJ_NUMANODE;
num = 1;
GroupMask = &procInfo->NumaNode.GroupMask;
/* Starting with Windows 11 and Server 2022, the GroupCount field is valid and >=1
* and we may read GroupMasks[]. Older releases have GroupCount==0 and we must read GroupMask.
*/
if (procInfo->NumaNode.GroupCount) {
num = procInfo->NumaNode.GroupCount;
GroupMask = procInfo->NumaNode.GroupMasks;
} else {
num = 1;
GroupMask = &procInfo->NumaNode.GroupMask;
}
id = procInfo->NumaNode.NodeNumber;
gotnuma++;
if (id > max_numanode_index)
@@ -1061,18 +1084,20 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
break;
case RelationCache:
type = (procInfo->Cache.Type == CacheInstruction ? HWLOC_OBJ_L1ICACHE : HWLOC_OBJ_L1CACHE) + procInfo->Cache.Level - 1;
num = 1;
GroupMask = &procInfo->Cache.GroupMask;
/* GroupCount added approximately with NumaNode.GroupCount above */
if (procInfo->Cache.GroupCount) {
num = procInfo->Cache.GroupCount;
GroupMask = procInfo->Cache.GroupMasks;
} else {
num = 1;
GroupMask = &procInfo->Cache.GroupMask;
}
break;
case RelationProcessorCore:
type = HWLOC_OBJ_CORE;
num = procInfo->Processor.GroupCount;
GroupMask = procInfo->Processor.GroupMask;
if (has_efficiencyclass)
/* the EfficiencyClass field didn't exist before Windows10 and recent MSVC headers,
* so just access it manually instead of trying to detect it.
*/
efficiency_class = * ((&procInfo->Processor.Flags) + 1);
efficiency_class = procInfo->Processor.EfficiencyClass;
break;
case RelationGroup:
/* So strange an interface... */
@@ -1097,11 +1122,12 @@ hwloc_look_windows(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
groups_pu_set = hwloc_bitmap_alloc();
hwloc_bitmap_or(groups_pu_set, groups_pu_set, set);
if (hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_GROUP)) {
/* Ignore processor groups unless requested and filtered-in */
if (keep_pgroup_objs && hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_GROUP)) {
obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, id);
obj->cpuset = set;
obj->attr->group.kind = HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP;
hwloc__insert_object_by_cpuset(topology, NULL, obj, "windows:GetLogicalProcessorInformation:ProcessorGroup");
hwloc__insert_object_by_cpuset(topology, NULL, obj, "windows:GetLogicalProcessorInformationEx:ProcessorGroup");
} else
hwloc_bitmap_free(set);
}
@@ -1328,11 +1354,13 @@ hwloc_set_windows_hooks(struct hwloc_binding_hooks *hooks,
static int hwloc_windows_component_init(unsigned long flags __hwloc_attribute_unused)
{
hwloc_win_get_function_ptrs();
hwloc_win_get_processor_groups();
return 0;
}
static void hwloc_windows_component_finalize(unsigned long flags __hwloc_attribute_unused)
{
hwloc_win_free_processor_groups();
}
static struct hwloc_backend *

View File

@@ -1,5 +1,5 @@
/*
* Copyright © 2010-2021 Inria. All rights reserved.
* Copyright © 2010-2022 Inria. All rights reserved.
* Copyright © 2010-2013 Université Bordeaux
* Copyright © 2010-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -7,11 +7,14 @@
*
* This backend is only used when the operating system does not export
* the necessary hardware topology information to user-space applications.
* Currently, only the FreeBSD backend relies on this x86 backend.
* Currently, FreeBSD and NetBSD only add PUs and then fallback to this
* backend for CPU/Cache discovery.
*
* Other backends such as Linux have their own way to retrieve various
* pieces of hardware topology information from the operating system
* on various architectures, without having to use this x86-specific code.
* But this backend is still used after them to annotate some objects with
* additional details (CPU info in Package, Inclusiveness in Caches).
*/
#include "private/autogen/config.h"
@@ -497,7 +500,8 @@ static void read_amd_cores_topoext(struct procinfo *infos, unsigned long flags,
nodes_per_proc = ((ecx >> 8) & 7) + 1;
}
if ((infos->cpufamilynumber == 0x15 && nodes_per_proc > 2)
|| ((infos->cpufamilynumber == 0x17 || infos->cpufamilynumber == 0x18) && nodes_per_proc > 4)) {
|| ((infos->cpufamilynumber == 0x17 || infos->cpufamilynumber == 0x18) && nodes_per_proc > 4)
|| (infos->cpufamilynumber == 0x19 && nodes_per_proc > 1)) {
hwloc_debug("warning: undefined nodes_per_proc value %u, assuming it means %u\n", nodes_per_proc, nodes_per_proc);
}
}
@@ -610,10 +614,13 @@ static void look_proc(struct hwloc_backend *backend, struct procinfo *infos, uns
eax = 0x01;
cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump);
infos->apicid = ebx >> 24;
if (edx & (1 << 28))
if (edx & (1 << 28)) {
legacy_max_log_proc = 1 << hwloc_flsl(((ebx >> 16) & 0xff) - 1);
else
} else {
hwloc_debug("HTT bit not set in CPUID 0x01.edx, assuming legacy_max_log_proc = 1\n");
legacy_max_log_proc = 1;
}
hwloc_debug("APIC ID 0x%02x legacy_max_log_proc %u\n", infos->apicid, legacy_max_log_proc);
infos->ids[PKG] = infos->apicid / legacy_max_log_proc;
legacy_log_proc_id = infos->apicid % legacy_max_log_proc;
@@ -676,12 +683,23 @@ static void look_proc(struct hwloc_backend *backend, struct procinfo *infos, uns
unsigned max_nbcores;
unsigned max_nbthreads;
unsigned threadid __hwloc_attribute_unused;
hwloc_debug("Trying to get core/thread IDs from 0x04...\n");
max_nbcores = ((eax >> 26) & 0x3f) + 1;
max_nbthreads = legacy_max_log_proc / max_nbcores;
hwloc_debug("thus %u threads\n", max_nbthreads);
threadid = legacy_log_proc_id % max_nbthreads;
infos->ids[CORE] = legacy_log_proc_id / max_nbthreads;
hwloc_debug("this is thread %u of core %u\n", threadid, infos->ids[CORE]);
hwloc_debug("found %u cores max\n", max_nbcores);
/* some VMs (e.g. issue#525) don't report valid information, check things before dividing by 0. */
if (!max_nbcores) {
hwloc_debug("cannot detect core/thread IDs from 0x04 without a valid max of cores\n");
} else {
max_nbthreads = legacy_max_log_proc / max_nbcores;
hwloc_debug("found %u threads max\n", max_nbthreads);
if (!max_nbthreads) {
hwloc_debug("cannot detect core/thread IDs from 0x04 without a valid max of threads\n");
} else {
threadid = legacy_log_proc_id % max_nbthreads;
infos->ids[CORE] = legacy_log_proc_id / max_nbthreads;
hwloc_debug("this is thread %u of core %u\n", threadid, infos->ids[CORE]);
}
}
}
}
@@ -772,13 +790,19 @@ static void look_proc(struct hwloc_backend *backend, struct procinfo *infos, uns
} else if (cpuid_type == amd) {
/* AMD quirks */
if (infos->cpufamilynumber == 0x17
&& cache->level == 3 && cache->nbthreads_sharing == 6) {
/* AMD family 0x17 always shares L3 between 8 APIC ids,
* even when only 6 APIC ids are enabled and reported in nbthreads_sharing
* (on 24-core CPUs).
if (infos->cpufamilynumber >= 0x17 && cache->level == 3) {
/* AMD family 0x19 always shares L3 between 16 APIC ids (8 HT cores).
* while Family 0x17 shares between 8 APIC ids (4 HT cores).
* But many models have less APIC ids enabled and reported in nbthreads_sharing.
* It means we must round-up nbthreads_sharing to the nearest power of 2
* before computing cacheid.
*/
cache->cacheid = infos->apicid / 8;
unsigned nbapics_sharing = cache->nbthreads_sharing;
if (nbapics_sharing & (nbapics_sharing-1))
/* not a power of two, round-up */
nbapics_sharing = 1U<<(1+hwloc_ffsl(nbapics_sharing));
cache->cacheid = infos->apicid / nbapics_sharing;
} else if (infos->cpufamilynumber== 0x10 && infos->cpumodelnumber == 0x9
&& cache->level == 3
@@ -804,7 +828,7 @@ static void look_proc(struct hwloc_backend *backend, struct procinfo *infos, uns
} else if (infos->cpufamilynumber == 0x15
&& (infos->cpumodelnumber == 0x1 /* Bulldozer */ || infos->cpumodelnumber == 0x2 /* Piledriver */)
&& cache->level == 3 && cache->nbthreads_sharing == 6) {
/* AMD Bulldozer and Piledriver 12-core processors have same APIC ids as Magny-Cours below,
/* AMD Bulldozer and Piledriver 12-core processors have same APIC ids as Magny-Cours above,
* but we can't merge the checks because the original nbthreads_sharing must be exactly 6 here.
*/
cache->cacheid = (infos->apicid % legacy_max_log_proc) / cache->nbthreads_sharing /* cacheid within the package */
@@ -1228,6 +1252,18 @@ static void summarize(struct hwloc_backend *backend, struct procinfo *infos, uns
}
}
cache = hwloc_alloc_setup_object(topology, otype, HWLOC_UNKNOWN_INDEX);
/* We don't specify the os_index of caches because we want to be
* 100% sure they are identical to what the Linux kernel reports
* (so that things like resctrl work).
* However, vendor/model-specific quirks in the x86 code above
* make this difficult.
*
* Caveat: if the x86 backend is used on Linux to avoid kernel bugs,
* IDs won't be available to resctrl users. But resctrl heavily
* relies on the kernel x86 discovery being non-buggy anyway.
*
* TODO: make this optional? or only disable it on Linux?
*/
cache->attr->cache.depth = level;
cache->attr->cache.size = infos[i].cache[l].size;
cache->attr->cache.linesize = infos[i].cache[l].linesize;
@@ -1257,7 +1293,8 @@ static int
look_procs(struct hwloc_backend *backend, struct procinfo *infos, unsigned long flags,
unsigned highest_cpuid, unsigned highest_ext_cpuid, unsigned *features, enum cpuid_type cpuid_type,
int (*get_cpubind)(hwloc_topology_t topology, hwloc_cpuset_t set, int flags),
int (*set_cpubind)(hwloc_topology_t topology, hwloc_const_cpuset_t set, int flags))
int (*set_cpubind)(hwloc_topology_t topology, hwloc_const_cpuset_t set, int flags),
hwloc_bitmap_t restrict_set)
{
struct hwloc_x86_backend_data_s *data = backend->private_data;
struct hwloc_topology *topology = backend->topology;
@@ -1277,6 +1314,12 @@ look_procs(struct hwloc_backend *backend, struct procinfo *infos, unsigned long
for (i = 0; i < nbprocs; i++) {
struct cpuiddump *src_cpuiddump = NULL;
if (restrict_set && !hwloc_bitmap_isset(restrict_set, i)) {
/* skip this CPU outside of the binding mask */
continue;
}
if (data->src_cpuiddump_path) {
src_cpuiddump = cpuiddump_read(data->src_cpuiddump_path, i);
if (!src_cpuiddump)
@@ -1306,7 +1349,7 @@ look_procs(struct hwloc_backend *backend, struct procinfo *infos, unsigned long
if (data->apicid_unique) {
summarize(backend, infos, flags);
if (has_hybrid(features)) {
if (has_hybrid(features) && !(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_CPUKINDS)) {
/* use hybrid info for cpukinds */
hwloc_bitmap_t atomset = hwloc_bitmap_alloc();
hwloc_bitmap_t coreset = hwloc_bitmap_alloc();
@@ -1410,6 +1453,7 @@ static
int hwloc_look_x86(struct hwloc_backend *backend, unsigned long flags)
{
struct hwloc_x86_backend_data_s *data = backend->private_data;
struct hwloc_topology *topology = backend->topology;
unsigned nbprocs = data->nbprocs;
unsigned eax, ebx, ecx = 0, edx;
unsigned i;
@@ -1425,9 +1469,21 @@ int hwloc_look_x86(struct hwloc_backend *backend, unsigned long flags)
struct hwloc_topology_membind_support memsupport __hwloc_attribute_unused;
int (*get_cpubind)(hwloc_topology_t topology, hwloc_cpuset_t set, int flags) = NULL;
int (*set_cpubind)(hwloc_topology_t topology, hwloc_const_cpuset_t set, int flags) = NULL;
hwloc_bitmap_t restrict_set = NULL;
struct cpuiddump *src_cpuiddump = NULL;
int ret = -1;
/* check if binding works */
memset(&hooks, 0, sizeof(hooks));
support.membind = &memsupport;
/* We could just copy the main hooks (except in some corner cases),
* but the current overhead is negligible, so just always reget them.
*/
hwloc_set_native_binding_hooks(&hooks, &support);
/* in theory, those are only needed if !data->src_cpuiddump_path || HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_BINDING
* but that's the vast majority of cases anyway, and the overhead is very small.
*/
if (data->src_cpuiddump_path) {
/* Just read cpuid from the dump (implies !topology->is_thissystem by default) */
src_cpuiddump = cpuiddump_read(data->src_cpuiddump_path, 0);
@@ -1440,13 +1496,6 @@ int hwloc_look_x86(struct hwloc_backend *backend, unsigned long flags)
* we may still force use this backend when debugging with !thissystem.
*/
/* check if binding works */
memset(&hooks, 0, sizeof(hooks));
support.membind = &memsupport;
/* We could just copy the main hooks (except in some corner cases),
* but the current overhead is negligible, so just always reget them.
*/
hwloc_set_native_binding_hooks(&hooks, &support);
if (hooks.get_thisthread_cpubind && hooks.set_thisthread_cpubind) {
get_cpubind = hooks.get_thisthread_cpubind;
set_cpubind = hooks.set_thisthread_cpubind;
@@ -1466,6 +1515,20 @@ int hwloc_look_x86(struct hwloc_backend *backend, unsigned long flags)
}
}
if (topology->flags & HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING) {
restrict_set = hwloc_bitmap_alloc();
if (!restrict_set)
goto out;
if (hooks.get_thisproc_cpubind)
hooks.get_thisproc_cpubind(topology, restrict_set, 0);
else if (hooks.get_thisthread_cpubind)
hooks.get_thisthread_cpubind(topology, restrict_set, 0);
if (hwloc_bitmap_iszero(restrict_set)) {
hwloc_bitmap_free(restrict_set);
restrict_set = NULL;
}
}
if (!src_cpuiddump && !hwloc_have_x86_cpuid())
goto out;
@@ -1530,7 +1593,7 @@ int hwloc_look_x86(struct hwloc_backend *backend, unsigned long flags)
ret = look_procs(backend, infos, flags,
highest_cpuid, highest_ext_cpuid, features, cpuid_type,
get_cpubind, set_cpubind);
get_cpubind, set_cpubind, restrict_set);
if (!ret)
/* success, we're done */
goto out_with_os_state;
@@ -1555,6 +1618,7 @@ out_with_infos:
}
out:
hwloc_bitmap_free(restrict_set);
if (src_cpuiddump)
cpuiddump_free(src_cpuiddump);
return ret;
@@ -1571,6 +1635,11 @@ hwloc_x86_discover(struct hwloc_backend *backend, struct hwloc_disc_status *dsta
assert(dstatus->phase == HWLOC_DISC_PHASE_CPU);
if (topology->flags & HWLOC_TOPOLOGY_FLAG_DONT_CHANGE_BINDING) {
/* TODO: Things would work if there's a single PU, no need to rebind */
return 0;
}
if (getenv("HWLOC_X86_TOPOEXT_NUMANODES")) {
flags |= HWLOC_X86_DISC_FLAG_TOPOEXT_NUMANODES;
}

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2011, 2020 Université Bordeaux
* Copyright © 2009-2018 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -123,6 +123,17 @@ hwloc__xml_import_object_attr(struct hwloc_topology *topology,
fprintf(stderr, "%s: unexpected zero gp_index, topology may be invalid\n", state->global->msgprefix);
if (obj->gp_index >= topology->next_gp_index)
topology->next_gp_index = obj->gp_index + 1;
} else if (!strcmp(name, "id")) { /* forward compat */
if (!strncmp(value, "obj", 3)) {
obj->gp_index = strtoull(value+3, NULL, 10);
if (!obj->gp_index && hwloc__xml_verbose())
fprintf(stderr, "%s: unexpected zero id, topology may be invalid\n", state->global->msgprefix);
if (obj->gp_index >= topology->next_gp_index)
topology->next_gp_index = obj->gp_index + 1;
} else {
if (hwloc__xml_verbose())
fprintf(stderr, "%s: unexpected id `%s' not-starting with `obj', ignoring\n", state->global->msgprefix, value);
}
} else if (!strcmp(name, "cpuset")) {
if (!obj->cpuset)
obj->cpuset = hwloc_bitmap_alloc();
@@ -192,8 +203,9 @@ hwloc__xml_import_object_attr(struct hwloc_topology *topology,
|| lvalue == HWLOC_OBJ_CACHE_INSTRUCTION)
obj->attr->cache.type = (hwloc_obj_cache_type_t) lvalue;
else
fprintf(stderr, "%s: ignoring invalid cache_type attribute %lu\n",
state->global->msgprefix, lvalue);
if (hwloc__xml_verbose())
fprintf(stderr, "%s: ignoring invalid cache_type attribute %lu\n",
state->global->msgprefix, lvalue);
} else if (hwloc__xml_verbose())
fprintf(stderr, "%s: ignoring cache_type attribute for non-cache object type\n",
state->global->msgprefix);
@@ -242,7 +254,7 @@ hwloc__xml_import_object_attr(struct hwloc_topology *topology,
else if (!strcmp(name, "dont_merge")) {
unsigned long lvalue = strtoul(value, NULL, 10);
if (obj->type == HWLOC_OBJ_GROUP)
obj->attr->group.dont_merge = lvalue;
obj->attr->group.dont_merge = (unsigned char) lvalue;
else if (hwloc__xml_verbose())
fprintf(stderr, "%s: ignoring dont_merge attribute for non-group object type\n",
state->global->msgprefix);
@@ -262,8 +274,8 @@ hwloc__xml_import_object_attr(struct hwloc_topology *topology,
#ifndef HWLOC_HAVE_32BITS_PCI_DOMAIN
} else if (domain > 0xffff) {
static int warned = 0;
if (!warned && !hwloc_hide_errors())
fprintf(stderr, "Ignoring PCI device with non-16bit domain.\nPass --enable-32bits-pci-domain to configure to support such devices\n(warning: it would break the library ABI, don't enable unless really needed).\n");
if (!warned && HWLOC_SHOW_ALL_ERRORS())
fprintf(stderr, "hwloc/xml: Ignoring PCI device with non-16bit domain.\nPass --enable-32bits-pci-domain to configure to support such devices\n(warning: it would break the library ABI, don't enable unless really needed).\n");
warned = 1;
*ignore = 1;
#endif
@@ -337,6 +349,7 @@ hwloc__xml_import_object_attr(struct hwloc_topology *topology,
} else {
obj->attr->bridge.upstream_type = (hwloc_obj_bridge_type_t) upstream_type;
obj->attr->bridge.downstream_type = (hwloc_obj_bridge_type_t) downstream_type;
/* FIXME verify that upstream/downstream type is valid */
};
break;
}
@@ -361,12 +374,13 @@ hwloc__xml_import_object_attr(struct hwloc_topology *topology,
#ifndef HWLOC_HAVE_32BITS_PCI_DOMAIN
} else if (domain > 0xffff) {
static int warned = 0;
if (!warned && !hwloc_hide_errors())
fprintf(stderr, "Ignoring bridge to PCI with non-16bit domain.\nPass --enable-32bits-pci-domain to configure to support such devices\n(warning: it would break the library ABI, don't enable unless really needed).\n");
if (!warned && HWLOC_SHOW_ALL_ERRORS())
fprintf(stderr, "hwloc/xml: Ignoring bridge to PCI with non-16bit domain.\nPass --enable-32bits-pci-domain to configure to support such devices\n(warning: it would break the library ABI, don't enable unless really needed).\n");
warned = 1;
*ignore = 1;
#endif
} else {
/* FIXME verify that downstream type vs pci info are valid */
obj->attr->bridge.downstream.pci.domain = domain;
obj->attr->bridge.downstream.pci.secondary_bus = secbus;
obj->attr->bridge.downstream.pci.subordinate_bus = subbus;
@@ -1232,7 +1246,7 @@ hwloc__xml_import_object(hwloc_topology_t topology,
/* next should be before cur */
if (!childrengotignored) {
static int reported = 0;
if (!reported && !hwloc_hide_errors()) {
if (!reported && HWLOC_SHOW_CRITICAL_ERRORS()) {
hwloc__xml_import_report_outoforder(topology, next, cur);
reported = 1;
}
@@ -1565,7 +1579,10 @@ hwloc__xml_v2import_distances(hwloc_topology_t topology,
}
}
hwloc_internal_distances_add_by_index(topology, name, unique_type, different_types, nbobjs, indexes, u64values, kind, 0);
if (topology->flags & HWLOC_TOPOLOGY_FLAG_NO_DISTANCES)
goto out_ignore;
hwloc_internal_distances_add_by_index(topology, name, unique_type, different_types, nbobjs, indexes, u64values, kind, 0 /* assume grouping was applied when this matrix was discovered before exporting to XML */);
/* prevent freeing below */
indexes = NULL;
@@ -1719,7 +1736,8 @@ hwloc__xml_import_memattr(hwloc_topology_t topology,
}
}
if (name && flags != (unsigned long) -1) {
if (name && flags != (unsigned long) -1
&& !(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_MEMATTRS)) {
hwloc_memattr_id_t _id;
ret = hwloc_memattr_get_by_name(topology, name, &_id);
@@ -1830,7 +1848,13 @@ hwloc__xml_import_cpukind(hwloc_topology_t topology,
goto error;
}
hwloc_internal_cpukinds_register(topology, cpuset, forced_efficiency, infos, nr_infos, HWLOC_CPUKINDS_REGISTER_FLAG_OVERWRITE_FORCED_EFFICIENCY);
if (topology->flags & HWLOC_TOPOLOGY_FLAG_NO_CPUKINDS) {
hwloc__free_infos(infos, nr_infos);
hwloc_bitmap_free(cpuset);
} else {
hwloc_internal_cpukinds_register(topology, cpuset, forced_efficiency, infos, nr_infos, HWLOC_CPUKINDS_REGISTER_FLAG_OVERWRITE_FORCED_EFFICIENCY);
hwloc__free_infos(infos, nr_infos);
}
return state->global->close_tag(state);
@@ -2165,7 +2189,8 @@ done:
* but it would require to have those objects in the original XML order (like the first_numanode cousin-list).
* because the topology order can be different if some parents are ignored during load.
*/
if (nbobjs == data->nbnumanodes) {
if (nbobjs == data->nbnumanodes
&& !(topology->flags & HWLOC_TOPOLOGY_FLAG_NO_DISTANCES)) {
hwloc_obj_t *objs = malloc(nbobjs*sizeof(hwloc_obj_t));
uint64_t *values = malloc(nbobjs*nbobjs*sizeof(*values));
assert(data->nbnumanodes > 0); /* v1dist->nbobjs is >0 after import */
@@ -2647,7 +2672,8 @@ hwloc__xml_export_object_contents (hwloc__xml_export_state_t state, hwloc_topolo
logical_to_v2array = malloc(nbobjs * sizeof(*logical_to_v2array));
if (!logical_to_v2array) {
fprintf(stderr, "xml/export/v1: failed to allocated logical_to_v2array\n");
if (HWLOC_SHOW_ALL_ERRORS())
fprintf(stderr, "hwloc/xml/export/v1: failed to allocated logical_to_v2array\n");
continue;
}
@@ -2821,6 +2847,7 @@ hwloc__xml_v1export_object_with_memory(hwloc__xml_export_state_t parentstate, hw
/* child has sibling, we must add a Group around those memory children */
hwloc_obj_t group = parentstate->global->v1_memory_group;
parentstate->new_child(parentstate, &gstate, "object");
group->parent = obj->parent;
group->cpuset = obj->cpuset;
group->complete_cpuset = obj->complete_cpuset;
group->nodeset = obj->nodeset;
@@ -3119,9 +3146,11 @@ hwloc__xml_export_memattrs(hwloc__xml_export_state_t state, hwloc_topology_t top
continue;
imattr = &topology->memattrs[id];
if ((id == HWLOC_MEMATTR_ID_LATENCY || id == HWLOC_MEMATTR_ID_BANDWIDTH)
&& !imattr->nr_targets)
/* no need to export target-less attributes for initial attributes, no release support attributes without those definitions */
if (id < HWLOC_MEMATTR_ID_MAX && !imattr->nr_targets)
/* no need to export standard attributes without any target,
* their definition is now standardized,
* the old hwloc importing this XML may recreate these attributes just like it would for a non-imported topology.
*/
continue;
state->new_child(state, &mstate, "memattr");

View File

@@ -1,8 +1,9 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2021 Inria. All rights reserved.
* Copyright © 2009-2022 Inria. All rights reserved.
* Copyright © 2009-2012, 2020 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* Copyright © 2022 IBM Corporation. All rights reserved.
* See COPYING in top-level directory.
*/
@@ -52,6 +53,57 @@
#include <windows.h>
#endif
#ifdef HWLOC_HAVE_LEVELZERO
/*
* Define ZES_ENABLE_SYSMAN=1 early so that the LevelZero backend gets Sysman enabled.
*
* Only if the levelzero was enabled in this build so that we don't enable sysman
* for external levelzero users when hwloc doesn't need it. If somebody ever loads
* an external levelzero plugin in a hwloc library built without levelzero (unlikely),
* he may have to manually set ZES_ENABLE_SYSMAN=1.
*
* Use the constructor if supported and/or the Windows DllMain callback.
* Do it in the main hwloc library instead of the levelzero component because
* the latter could be loaded later as a plugin.
*
* L0 seems to be using getenv() to check this variable on Windows
* (at least in the Intel Compute-Runtime of March 2021),
* but setenv() doesn't seem to exist on Windows, hence use putenv() to set the variable.
*
* For the record, Get/SetEnvironmentVariable() is not exactly the same as getenv/putenv():
* - getenv() doesn't see what was set with SetEnvironmentVariable()
* - GetEnvironmentVariable() doesn't see putenv() in cygwin (while it does in MSVC and MinGW).
* Hence, if L0 ever switches from getenv() to GetEnvironmentVariable(),
* it will break in cygwin, we'll have to use both putenv() and SetEnvironmentVariable().
* Hopefully L0 will provide a way to enable Sysman without env vars before it happens.
*/
#if HWLOC_HAVE_ATTRIBUTE_CONSTRUCTOR
static void hwloc_constructor(void) __attribute__((constructor));
static void hwloc_constructor(void)
{
if (!getenv("ZES_ENABLE_SYSMAN"))
#ifdef HWLOC_WIN_SYS
putenv("ZES_ENABLE_SYSMAN=1");
#else
setenv("ZES_ENABLE_SYSMAN", "1", 1);
#endif
}
#endif
#ifdef HWLOC_WIN_SYS
BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpReserved)
{
if (fdwReason == DLL_PROCESS_ATTACH) {
if (!getenv("ZES_ENABLE_SYSMAN"))
/* Windows does not have a setenv, so use putenv. */
putenv((char *) "ZES_ENABLE_SYSMAN=1");
}
return TRUE;
}
#endif
#endif /* HWLOC_HAVE_LEVELZERO */
unsigned hwloc_get_api_version(void)
{
return HWLOC_API_VERSION;
@@ -62,14 +114,25 @@ int hwloc_topology_abi_check(hwloc_topology_t topology)
return topology->topology_abi != HWLOC_TOPOLOGY_ABI ? -1 : 0;
}
/* callers should rather use wrappers HWLOC_SHOW_ALL_ERRORS() and HWLOC_SHOW_CRITICAL_ERRORS() for clarity */
int hwloc_hide_errors(void)
{
static int hide = 0;
static int hide = 1; /* only show critical errors by default. lstopo will show others */
static int checked = 0;
if (!checked) {
const char *envvar = getenv("HWLOC_HIDE_ERRORS");
if (envvar)
if (envvar) {
hide = atoi(envvar);
#ifdef HWLOC_DEBUG
} else {
/* if debug is enabled and HWLOC_DEBUG_VERBOSE isn't forced to 0,
* show all errors jus like we show all debug messages.
*/
envvar = getenv("HWLOC_DEBUG_VERBOSE");
if (!envvar || atoi(envvar))
hide = 0;
#endif
}
checked = 1;
}
return hide;
@@ -106,7 +169,7 @@ static void report_insert_error(hwloc_obj_t new, hwloc_obj_t old, const char *ms
{
static int reported = 0;
if (reason && !reported && !hwloc_hide_errors()) {
if (reason && !reported && HWLOC_SHOW_CRITICAL_ERRORS()) {
char newstr[512];
char oldstr[512];
report_insert_error_format_obj(newstr, sizeof(newstr), new);
@@ -1865,6 +1928,9 @@ hwloc_topology_alloc_group_object(struct hwloc_topology *topology)
static void hwloc_propagate_symmetric_subtree(hwloc_topology_t topology, hwloc_obj_t root);
static void propagate_total_memory(hwloc_obj_t obj);
static void hwloc_set_group_depth(hwloc_topology_t topology);
static void hwloc_connect_children(hwloc_obj_t parent);
static int hwloc_connect_levels(hwloc_topology_t topology);
static int hwloc_connect_special_levels(hwloc_topology_t topology);
hwloc_obj_t
hwloc_topology_insert_group_object(struct hwloc_topology *topology, hwloc_obj_t obj)
@@ -2307,9 +2373,15 @@ hwloc__filter_bridges(hwloc_topology_t topology, hwloc_obj_t root, unsigned dept
child->attr->bridge.depth = depth;
if (child->type == HWLOC_OBJ_BRIDGE
&& filter == HWLOC_TYPE_FILTER_KEEP_IMPORTANT
&& !child->io_first_child) {
/* remove bridges that have no child,
* and pci-to-non-pci bridges (pcidev) that no child either.
* keep NVSwitch since they may be used in NVLink matrices.
*/
if (filter == HWLOC_TYPE_FILTER_KEEP_IMPORTANT
&& !child->io_first_child
&& (child->type == HWLOC_OBJ_BRIDGE
|| (child->type == HWLOC_OBJ_PCI_DEVICE && (child->attr->pcidev.class_id >> 8) == 0x06
&& (!child->subtype || strcmp(child->subtype, "NVSwitch"))))) {
unlink_and_free_single_object(pchild);
topology->modified = 1;
}
@@ -2432,13 +2504,26 @@ hwloc_compare_levels_structure(hwloc_topology_t topology, unsigned i)
return 0;
}
/* return > 0 if any level was removed, which means reconnect is needed */
static void
/* return > 0 if any level was removed.
* performs its own reconnect internally if needed
*/
static int
hwloc_filter_levels_keep_structure(hwloc_topology_t topology)
{
unsigned i, j;
int res = 0;
if (topology->modified) {
/* WARNING: hwloc_topology_reconnect() is duplicated partially here
* and at the end of this function:
* - we need normal levels before merging.
* - and we'll need to update special levels after merging.
*/
hwloc_connect_children(topology->levels[0][0]);
if (hwloc_connect_levels(topology) < 0)
return -1;
}
/* start from the bottom since we'll remove intermediate levels */
for(i=topology->nb_levels-1; i>0; i--) {
int replacechild = 0, replaceparent = 0;
@@ -2604,6 +2689,22 @@ hwloc_filter_levels_keep_structure(hwloc_topology_t topology)
topology->type_depth[type] = HWLOC_TYPE_DEPTH_MULTIPLE;
}
}
if (res > 0 || topology-> modified) {
/* WARNING: hwloc_topology_reconnect() is duplicated partially here
* and at the beginning of this function.
* If we merged some levels, some child+parent special children lisst
* may have been merged, hence specials level might need reordering,
* So reconnect special levels only here at the end
* (it's not needed at the beginning of this function).
*/
if (hwloc_connect_special_levels(topology) < 0)
return -1;
topology->modified = 0;
}
return 0;
}
static void
@@ -2921,9 +3022,9 @@ hwloc_list_special_objects(hwloc_topology_t topology, hwloc_obj_t obj)
}
}
/* Build I/O levels */
/* Build Memory, I/O and Misc levels */
static int
hwloc_connect_io_misc_levels(hwloc_topology_t topology)
hwloc_connect_special_levels(hwloc_topology_t topology)
{
unsigned i;
@@ -3088,7 +3189,8 @@ hwloc_connect_levels(hwloc_topology_t topology)
tmpnbobjs = realloc(topology->level_nbobjects,
2 * topology->nb_levels_allocated * sizeof(*topology->level_nbobjects));
if (!tmplevels || !tmpnbobjs) {
fprintf(stderr, "hwloc failed to realloc level arrays to %u\n", topology->nb_levels_allocated * 2);
if (HWLOC_SHOW_CRITICAL_ERRORS())
fprintf(stderr, "hwloc: failed to realloc level arrays to %u\n", topology->nb_levels_allocated * 2);
/* if one realloc succeeded, make sure the caller will free the new buffer */
if (tmplevels)
@@ -3133,6 +3235,10 @@ hwloc_connect_levels(hwloc_topology_t topology)
int
hwloc_topology_reconnect(struct hwloc_topology *topology, unsigned long flags)
{
/* WARNING: when updating this function, the replicated code must
* also be updated inside hwloc_filter_levels_keep_structure()
*/
if (flags) {
errno = EINVAL;
return -1;
@@ -3145,7 +3251,7 @@ hwloc_topology_reconnect(struct hwloc_topology *topology, unsigned long flags)
if (hwloc_connect_levels(topology) < 0)
return -1;
if (hwloc_connect_io_misc_levels(topology) < 0)
if (hwloc_connect_special_levels(topology) < 0)
return -1;
topology->modified = 0;
@@ -3441,6 +3547,8 @@ hwloc_discover(struct hwloc_topology *topology,
/*
* Additional discovery
*/
hwloc_pci_discovery_prepare(topology);
if (topology->backend_phases & HWLOC_DISC_PHASE_PCI) {
dstatus->phase = HWLOC_DISC_PHASE_PCI;
hwloc_discover_by_phase(topology, dstatus, "PCI");
@@ -3458,6 +3566,8 @@ hwloc_discover(struct hwloc_topology *topology,
hwloc_discover_by_phase(topology, dstatus, "ANNOTATE");
}
hwloc_pci_discovery_exit(topology); /* pci needed up to annotate */
if (getenv("HWLOC_DEBUG_SORT_CHILDREN"))
hwloc_debug_sort_children(topology->levels[0][0]);
@@ -3470,28 +3580,28 @@ hwloc_discover(struct hwloc_topology *topology,
hwloc_debug("%s", "\nRemoving empty objects\n");
remove_empty(topology, &topology->levels[0][0]);
if (!topology->levels[0][0]) {
fprintf(stderr, "Topology became empty, aborting!\n");
if (HWLOC_SHOW_CRITICAL_ERRORS())
fprintf(stderr, "hwloc: Topology became empty, aborting!\n");
return -1;
}
if (hwloc_bitmap_iszero(topology->levels[0][0]->cpuset)) {
fprintf(stderr, "Topology does not contain any PU, aborting!\n");
if (HWLOC_SHOW_CRITICAL_ERRORS())
fprintf(stderr, "hwloc: Topology does not contain any PU, aborting!\n");
return -1;
}
if (hwloc_bitmap_iszero(topology->levels[0][0]->nodeset)) {
fprintf(stderr, "Topology does not contain any NUMA node, aborting!\n");
if (HWLOC_SHOW_CRITICAL_ERRORS())
fprintf(stderr, "hwloc: Topology does not contain any NUMA node, aborting!\n");
return -1;
}
hwloc_debug_print_objects(0, topology->levels[0][0]);
/* Reconnect things after all these changes.
* Often needed because of Groups inserted for I/Os.
* And required for KEEP_STRUCTURE below.
*/
if (hwloc_topology_reconnect(topology, 0) < 0)
return -1;
hwloc_debug("%s", "\nRemoving levels with HWLOC_TYPE_FILTER_KEEP_STRUCTURE\n");
hwloc_filter_levels_keep_structure(topology);
if (hwloc_filter_levels_keep_structure(topology) < 0)
return -1;
/* takes care of reconnecting children/levels internally,
* because it needs normal levels.
* and it's often needed below because of Groups inserted for I/Os anyway */
hwloc_debug_print_objects(0, topology->levels[0][0]);
/* accumulate children memory in total_memory fields (only once parent is set) */
@@ -3716,7 +3826,27 @@ hwloc_topology_set_flags (struct hwloc_topology *topology, unsigned long flags)
return -1;
}
if (flags & ~(HWLOC_TOPOLOGY_FLAG_INCLUDE_DISALLOWED|HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM|HWLOC_TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES|HWLOC_TOPOLOGY_FLAG_IMPORT_SUPPORT)) {
if (flags & ~(HWLOC_TOPOLOGY_FLAG_INCLUDE_DISALLOWED
|HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM
|HWLOC_TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES
|HWLOC_TOPOLOGY_FLAG_IMPORT_SUPPORT
|HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING
|HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_MEMBINDING
|HWLOC_TOPOLOGY_FLAG_DONT_CHANGE_BINDING
|HWLOC_TOPOLOGY_FLAG_NO_DISTANCES
|HWLOC_TOPOLOGY_FLAG_NO_MEMATTRS
|HWLOC_TOPOLOGY_FLAG_NO_CPUKINDS)) {
errno = EINVAL;
return -1;
}
if ((flags & (HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING|HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM)) == HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING) {
/* RESTRICT_TO_CPUBINDING requires THISSYSTEM for binding */
errno = EINVAL;
return -1;
}
if ((flags & (HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_MEMBINDING|HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM)) == HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_MEMBINDING) {
/* RESTRICT_TO_MEMBINDING requires THISSYSTEM for binding */
errno = EINVAL;
return -1;
}
@@ -3970,15 +4100,11 @@ hwloc_topology_load (struct hwloc_topology *topology)
*/
hwloc_set_binding_hooks(topology);
hwloc_pci_discovery_prepare(topology);
/* actual topology discovery */
err = hwloc_discover(topology, &dstatus);
if (err < 0)
goto out;
hwloc_pci_discovery_exit(topology);
#ifndef HWLOC_DEBUG
if (getenv("HWLOC_DEBUG_CHECK"))
#endif
@@ -4000,9 +4126,35 @@ hwloc_topology_load (struct hwloc_topology *topology)
/* Same for memattrs */
hwloc_internal_memattrs_need_refresh(topology);
hwloc_internal_memattrs_refresh(topology);
hwloc_internal_memattrs_guess_memory_tiers(topology);
topology->is_loaded = 1;
if (topology->flags & HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING) {
/* FIXME: filter directly in backends during the discovery.
* Only x86 does it because binding may cause issues on Windows.
*/
hwloc_bitmap_t set = hwloc_bitmap_alloc();
if (set) {
err = hwloc_get_cpubind(topology, set, HWLOC_CPUBIND_STRICT);
if (!err)
hwloc_topology_restrict(topology, set, 0);
hwloc_bitmap_free(set);
}
}
if (topology->flags & HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_MEMBINDING) {
/* FIXME: filter directly in backends during the discovery.
*/
hwloc_bitmap_t set = hwloc_bitmap_alloc();
hwloc_membind_policy_t policy;
if (set) {
err = hwloc_get_membind(topology, set, &policy, HWLOC_MEMBIND_STRICT | HWLOC_MEMBIND_BYNODESET);
if (!err)
hwloc_topology_restrict(topology, set, HWLOC_RESTRICT_FLAG_BYNODESET);
hwloc_bitmap_free(set);
}
}
if (topology->backend_phases & HWLOC_DISC_PHASE_TWEAK) {
dstatus.phase = HWLOC_DISC_PHASE_TWEAK;
hwloc_discover_by_phase(topology, &dstatus, "TWEAK");
@@ -4278,14 +4430,13 @@ hwloc_topology_restrict(struct hwloc_topology *topology, hwloc_const_bitmap_t se
hwloc_bitmap_free(droppedcpuset);
hwloc_bitmap_free(droppednodeset);
if (hwloc_topology_reconnect(topology, 0) < 0)
if (hwloc_filter_levels_keep_structure(topology) < 0) /* takes care of reconnecting internally */
goto out;
/* some objects may have disappeared, we need to update distances objs arrays */
hwloc_internal_distances_invalidate_cached_objs(topology);
hwloc_internal_memattrs_need_refresh(topology);
hwloc_filter_levels_keep_structure(topology);
hwloc_propagate_symmetric_subtree(topology, topology->levels[0][0]);
propagate_total_memory(topology->levels[0][0]);
hwloc_internal_cpukinds_restrict(topology);

View File

@@ -1,6 +1,6 @@
/*
* Copyright © 2009 CNRS
* Copyright © 2009-2020 Inria. All rights reserved.
* Copyright © 2009-2021 Inria. All rights reserved.
* Copyright © 2009-2010, 2020 Université Bordeaux
* Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved.
* See COPYING in top-level directory.
@@ -395,6 +395,8 @@ hwloc_type_sscanf(const char *string, hwloc_obj_type_t *typep,
} else if (hwloc__type_match(string, "pcibridge", 5)) {
type = HWLOC_OBJ_BRIDGE;
ubtype = HWLOC_OBJ_BRIDGE_PCI;
/* if downstream_type can ever be non-PCI, we'll have to make strings more precise,
* or relax the hwloc_type_sscanf test */
} else if (hwloc__type_match(string, "pcidev", 3)) {
type = HWLOC_OBJ_PCI_DEVICE;
@@ -448,7 +450,9 @@ hwloc_type_sscanf(const char *string, hwloc_obj_type_t *typep,
attrp->group.depth = depthattr;
} else if (type == HWLOC_OBJ_BRIDGE && attrsize >= sizeof(attrp->bridge)) {
attrp->bridge.upstream_type = ubtype;
attrp->bridge.downstream_type = HWLOC_OBJ_BRIDGE_PCI; /* nothing else so far */
attrp->bridge.downstream_type = HWLOC_OBJ_BRIDGE_PCI;
/* if downstream_type can ever be non-PCI, we'll have to make strings more precise,
* or relax the hwloc_type_sscanf test */
} else if (type == HWLOC_OBJ_OS_DEVICE && attrsize >= sizeof(attrp->osdev)) {
attrp->osdev.type = ostype;
}
@@ -531,6 +535,9 @@ hwloc_obj_type_snprintf(char * __hwloc_restrict string, size_t size, hwloc_obj_t
else
return hwloc_snprintf(string, size, "%s", hwloc_obj_type_string(type));
case HWLOC_OBJ_BRIDGE:
/* if downstream_type can ever be non-PCI, we'll have to make strings more precise,
* or relax the hwloc_type_sscanf test */
assert(obj->attr->bridge.downstream_type == HWLOC_OBJ_BRIDGE_PCI);
return hwloc_snprintf(string, size, obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_PCI ? "PCIBridge" : "HostBridge");
case HWLOC_OBJ_PCI_DEVICE:
return hwloc_snprintf(string, size, "PCI");
@@ -648,8 +655,11 @@ hwloc_obj_attr_snprintf(char * __hwloc_restrict string, size_t size, hwloc_obj_t
} else
*up = '\0';
/* downstream is_PCI */
snprintf(down, sizeof(down), "buses=%04x:[%02x-%02x]",
obj->attr->bridge.downstream.pci.domain, obj->attr->bridge.downstream.pci.secondary_bus, obj->attr->bridge.downstream.pci.subordinate_bus);
if (obj->attr->bridge.downstream_type == HWLOC_OBJ_BRIDGE_PCI) {
snprintf(down, sizeof(down), "buses=%04x:[%02x-%02x]",
obj->attr->bridge.downstream.pci.domain, obj->attr->bridge.downstream.pci.secondary_bus, obj->attr->bridge.downstream.pci.subordinate_bus);
} else
assert(0);
if (*up)
res = hwloc_snprintf(string, size, "%s%s%s", up, separator, down);
else
@@ -736,3 +746,92 @@ int hwloc_bitmap_singlify_per_core(hwloc_topology_t topology, hwloc_bitmap_t cpu
}
return 0;
}
hwloc_obj_t
hwloc_get_obj_with_same_locality(hwloc_topology_t topology, hwloc_obj_t src,
hwloc_obj_type_t type, const char *subtype, const char *nameprefix,
unsigned long flags)
{
if (flags) {
errno = EINVAL;
return NULL;
}
if (hwloc_obj_type_is_normal(src->type) || hwloc_obj_type_is_memory(src->type)) {
/* normal/memory type, look for normal/memory type with same sets */
hwloc_obj_t obj;
if (!hwloc_obj_type_is_normal(type) && !hwloc_obj_type_is_memory(type)) {
errno = EINVAL;
return NULL;
}
obj = NULL;
while ((obj = hwloc_get_next_obj_by_type(topology, type, obj)) != NULL) {
if (!hwloc_bitmap_isequal(src->cpuset, obj->cpuset)
|| !hwloc_bitmap_isequal(src->nodeset, obj->nodeset))
continue;
if (subtype && (!obj->subtype || strcasecmp(subtype, obj->subtype)))
continue;
if (nameprefix && (!obj->name || hwloc_strncasecmp(nameprefix, obj->name, strlen(nameprefix))))
continue;
return obj;
}
errno = ENOENT;
return NULL;
} else if (hwloc_obj_type_is_io(src->type)) {
/* I/O device, look for PCI/OS in same PCI */
hwloc_obj_t pci;
if ((src->type != HWLOC_OBJ_OS_DEVICE && src->type != HWLOC_OBJ_PCI_DEVICE)
|| (type != HWLOC_OBJ_OS_DEVICE && type != HWLOC_OBJ_PCI_DEVICE)) {
errno = EINVAL;
return NULL;
}
/* walk up to find the container */
pci = src;
while (pci->type == HWLOC_OBJ_OS_DEVICE)
pci = pci->parent;
if (type == HWLOC_OBJ_PCI_DEVICE) {
if (pci->type != HWLOC_OBJ_PCI_DEVICE) {
errno = ENOENT;
return NULL;
}
if (subtype && (!pci->subtype || strcasecmp(subtype, pci->subtype))) {
errno = ENOENT;
return NULL;
}
if (nameprefix && (!pci->name || hwloc_strncasecmp(nameprefix, pci->name, strlen(nameprefix)))) {
errno = ENOENT;
return NULL;
}
return pci;
} else {
/* find a matching osdev child */
assert(type == HWLOC_OBJ_OS_DEVICE);
/* FIXME: won't work if we ever store osdevs in osdevs */
hwloc_obj_t child;
for(child = pci->io_first_child; child; child = child->next_sibling) {
if (child->type != HWLOC_OBJ_OS_DEVICE)
/* FIXME: should never occur currently */
continue;
if (subtype && (!child->subtype || strcasecmp(subtype, child->subtype)))
continue;
if (nameprefix && (!child->name || hwloc_strncasecmp(nameprefix, child->name, strlen(nameprefix))))
continue;
return child;
}
}
errno = ENOENT;
return NULL;
} else {
/* nothing for Misc */
errno = EINVAL;
return NULL;
}
}

View File

@@ -1,4 +1,4 @@
cmake_minimum_required (VERSION 2.8.12)
cmake_minimum_required(VERSION 3.1)
project (ethash C)
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Os")

File diff suppressed because it is too large Load Diff

View File

@@ -1,21 +1,28 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_ALLOCATORS_H_
#define RAPIDJSON_ALLOCATORS_H_
#include "rapidjson.h"
#include "internal/meta.h"
#include <memory>
#if RAPIDJSON_HAS_CXX11
#include <type_traits>
#endif
RAPIDJSON_NAMESPACE_BEGIN
@@ -24,10 +31,10 @@ RAPIDJSON_NAMESPACE_BEGIN
/*! \class rapidjson::Allocator
\brief Concept for allocating, resizing and freeing memory block.
Note that Malloc() and Realloc() are non-static but Free() is static.
So if an allocator need to support Free(), it needs to put its pointer in
So if an allocator need to support Free(), it needs to put its pointer in
the header of memory block.
\code
@@ -75,28 +82,35 @@ concept Allocator {
class CrtAllocator {
public:
static const bool kNeedFree = true;
void* Malloc(size_t size) {
void* Malloc(size_t size) {
if (size) // behavior of malloc(0) is implementation defined.
return std::malloc(size);
return RAPIDJSON_MALLOC(size);
else
return NULL; // standardize to returning NULL.
}
void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) {
(void)originalSize;
if (newSize == 0) {
std::free(originalPtr);
RAPIDJSON_FREE(originalPtr);
return NULL;
}
return std::realloc(originalPtr, newSize);
return RAPIDJSON_REALLOC(originalPtr, newSize);
}
static void Free(void *ptr) RAPIDJSON_NOEXCEPT { RAPIDJSON_FREE(ptr); }
bool operator==(const CrtAllocator&) const RAPIDJSON_NOEXCEPT {
return true;
}
bool operator!=(const CrtAllocator&) const RAPIDJSON_NOEXCEPT {
return false;
}
static void Free(void *ptr) { std::free(ptr); }
};
///////////////////////////////////////////////////////////////////////////////
// MemoryPoolAllocator
//! Default memory allocator used by the parser and DOM.
/*! This allocator allocate memory blocks from pre-allocated memory chunks.
/*! This allocator allocate memory blocks from pre-allocated memory chunks.
It does not free memory blocks. And Realloc() only allocate new memory.
@@ -113,16 +127,64 @@ public:
*/
template <typename BaseAllocator = CrtAllocator>
class MemoryPoolAllocator {
//! Chunk header for perpending to each chunk.
/*! Chunks are stored as a singly linked list.
*/
struct ChunkHeader {
size_t capacity; //!< Capacity of the chunk in bytes (excluding the header itself).
size_t size; //!< Current size of allocated memory in bytes.
ChunkHeader *next; //!< Next chunk in the linked list.
};
struct SharedData {
ChunkHeader *chunkHead; //!< Head of the chunk linked-list. Only the head chunk serves allocation.
BaseAllocator* ownBaseAllocator; //!< base allocator created by this object.
size_t refcount;
bool ownBuffer;
};
static const size_t SIZEOF_SHARED_DATA = RAPIDJSON_ALIGN(sizeof(SharedData));
static const size_t SIZEOF_CHUNK_HEADER = RAPIDJSON_ALIGN(sizeof(ChunkHeader));
static inline ChunkHeader *GetChunkHead(SharedData *shared)
{
return reinterpret_cast<ChunkHeader*>(reinterpret_cast<uint8_t*>(shared) + SIZEOF_SHARED_DATA);
}
static inline uint8_t *GetChunkBuffer(SharedData *shared)
{
return reinterpret_cast<uint8_t*>(shared->chunkHead) + SIZEOF_CHUNK_HEADER;
}
static const size_t kDefaultChunkCapacity = RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY; //!< Default chunk capacity.
public:
static const bool kNeedFree = false; //!< Tell users that no need to call Free() with this allocator. (concept Allocator)
static const bool kRefCounted = true; //!< Tell users that this allocator is reference counted on copy
//! Constructor with chunkSize.
/*! \param chunkSize The size of memory chunk. The default is kDefaultChunkSize.
\param baseAllocator The allocator for allocating memory chunks.
*/
MemoryPoolAllocator(size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(0), baseAllocator_(baseAllocator), ownBaseAllocator_(0)
explicit
MemoryPoolAllocator(size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
chunk_capacity_(chunkSize),
baseAllocator_(baseAllocator ? baseAllocator : RAPIDJSON_NEW(BaseAllocator)()),
shared_(static_cast<SharedData*>(baseAllocator_ ? baseAllocator_->Malloc(SIZEOF_SHARED_DATA + SIZEOF_CHUNK_HEADER) : 0))
{
RAPIDJSON_ASSERT(baseAllocator_ != 0);
RAPIDJSON_ASSERT(shared_ != 0);
if (baseAllocator) {
shared_->ownBaseAllocator = 0;
}
else {
shared_->ownBaseAllocator = baseAllocator_;
}
shared_->chunkHead = GetChunkHead(shared_);
shared_->chunkHead->capacity = 0;
shared_->chunkHead->size = 0;
shared_->chunkHead->next = 0;
shared_->ownBuffer = true;
shared_->refcount = 1;
}
//! Constructor with user-supplied buffer.
@@ -136,41 +198,101 @@ public:
\param baseAllocator The allocator for allocating memory chunks.
*/
MemoryPoolAllocator(void *buffer, size_t size, size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(buffer), baseAllocator_(baseAllocator), ownBaseAllocator_(0)
chunk_capacity_(chunkSize),
baseAllocator_(baseAllocator),
shared_(static_cast<SharedData*>(AlignBuffer(buffer, size)))
{
RAPIDJSON_ASSERT(buffer != 0);
RAPIDJSON_ASSERT(size > sizeof(ChunkHeader));
chunkHead_ = reinterpret_cast<ChunkHeader*>(buffer);
chunkHead_->capacity = size - sizeof(ChunkHeader);
chunkHead_->size = 0;
chunkHead_->next = 0;
RAPIDJSON_ASSERT(size >= SIZEOF_SHARED_DATA + SIZEOF_CHUNK_HEADER);
shared_->chunkHead = GetChunkHead(shared_);
shared_->chunkHead->capacity = size - SIZEOF_SHARED_DATA - SIZEOF_CHUNK_HEADER;
shared_->chunkHead->size = 0;
shared_->chunkHead->next = 0;
shared_->ownBaseAllocator = 0;
shared_->ownBuffer = false;
shared_->refcount = 1;
}
MemoryPoolAllocator(const MemoryPoolAllocator& rhs) RAPIDJSON_NOEXCEPT :
chunk_capacity_(rhs.chunk_capacity_),
baseAllocator_(rhs.baseAllocator_),
shared_(rhs.shared_)
{
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
++shared_->refcount;
}
MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) RAPIDJSON_NOEXCEPT
{
RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
++rhs.shared_->refcount;
this->~MemoryPoolAllocator();
baseAllocator_ = rhs.baseAllocator_;
chunk_capacity_ = rhs.chunk_capacity_;
shared_ = rhs.shared_;
return *this;
}
#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
MemoryPoolAllocator(MemoryPoolAllocator&& rhs) RAPIDJSON_NOEXCEPT :
chunk_capacity_(rhs.chunk_capacity_),
baseAllocator_(rhs.baseAllocator_),
shared_(rhs.shared_)
{
RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
rhs.shared_ = 0;
}
MemoryPoolAllocator& operator=(MemoryPoolAllocator&& rhs) RAPIDJSON_NOEXCEPT
{
RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
this->~MemoryPoolAllocator();
baseAllocator_ = rhs.baseAllocator_;
chunk_capacity_ = rhs.chunk_capacity_;
shared_ = rhs.shared_;
rhs.shared_ = 0;
return *this;
}
#endif
//! Destructor.
/*! This deallocates all memory chunks, excluding the user-supplied buffer.
*/
~MemoryPoolAllocator() {
~MemoryPoolAllocator() RAPIDJSON_NOEXCEPT {
if (!shared_) {
// do nothing if moved
return;
}
if (shared_->refcount > 1) {
--shared_->refcount;
return;
}
Clear();
RAPIDJSON_DELETE(ownBaseAllocator_);
BaseAllocator *a = shared_->ownBaseAllocator;
if (shared_->ownBuffer) {
baseAllocator_->Free(shared_);
}
RAPIDJSON_DELETE(a);
}
//! Deallocates all memory chunks, excluding the user-supplied buffer.
void Clear() {
while (chunkHead_ && chunkHead_ != userBuffer_) {
ChunkHeader* next = chunkHead_->next;
baseAllocator_->Free(chunkHead_);
chunkHead_ = next;
//! Deallocates all memory chunks, excluding the first/user one.
void Clear() RAPIDJSON_NOEXCEPT {
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
for (;;) {
ChunkHeader* c = shared_->chunkHead;
if (!c->next) {
break;
}
shared_->chunkHead = c->next;
baseAllocator_->Free(c);
}
if (chunkHead_ && chunkHead_ == userBuffer_)
chunkHead_->size = 0; // Clear user buffer
shared_->chunkHead->size = 0;
}
//! Computes the total capacity of allocated memory chunks.
/*! \return total capacity in bytes.
*/
size_t Capacity() const {
size_t Capacity() const RAPIDJSON_NOEXCEPT {
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
size_t capacity = 0;
for (ChunkHeader* c = chunkHead_; c != 0; c = c->next)
for (ChunkHeader* c = shared_->chunkHead; c != 0; c = c->next)
capacity += c->capacity;
return capacity;
}
@@ -178,25 +300,35 @@ public:
//! Computes the memory blocks allocated.
/*! \return total used bytes.
*/
size_t Size() const {
size_t Size() const RAPIDJSON_NOEXCEPT {
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
size_t size = 0;
for (ChunkHeader* c = chunkHead_; c != 0; c = c->next)
for (ChunkHeader* c = shared_->chunkHead; c != 0; c = c->next)
size += c->size;
return size;
}
//! Whether the allocator is shared.
/*! \return true or false.
*/
bool Shared() const RAPIDJSON_NOEXCEPT {
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
return shared_->refcount > 1;
}
//! Allocates a memory block. (concept Allocator)
void* Malloc(size_t size) {
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
if (!size)
return NULL;
size = RAPIDJSON_ALIGN(size);
if (chunkHead_ == 0 || chunkHead_->size + size > chunkHead_->capacity)
if (RAPIDJSON_UNLIKELY(shared_->chunkHead->size + size > shared_->chunkHead->capacity))
if (!AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size))
return NULL;
void *buffer = reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size;
chunkHead_->size += size;
void *buffer = GetChunkBuffer(shared_) + shared_->chunkHead->size;
shared_->chunkHead->size += size;
return buffer;
}
@@ -205,6 +337,7 @@ public:
if (originalPtr == 0)
return Malloc(newSize);
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
if (newSize == 0)
return NULL;
@@ -216,10 +349,10 @@ public:
return originalPtr;
// Simply expand it if it is the last allocation and there is sufficient space
if (originalPtr == reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size - originalSize) {
if (originalPtr == GetChunkBuffer(shared_) + shared_->chunkHead->size - originalSize) {
size_t increment = static_cast<size_t>(newSize - originalSize);
if (chunkHead_->size + increment <= chunkHead_->capacity) {
chunkHead_->size += increment;
if (shared_->chunkHead->size + increment <= shared_->chunkHead->capacity) {
shared_->chunkHead->size += increment;
return originalPtr;
}
}
@@ -235,50 +368,325 @@ public:
}
//! Frees a memory block (concept Allocator)
static void Free(void *ptr) { (void)ptr; } // Do nothing
static void Free(void *ptr) RAPIDJSON_NOEXCEPT { (void)ptr; } // Do nothing
//! Compare (equality) with another MemoryPoolAllocator
bool operator==(const MemoryPoolAllocator& rhs) const RAPIDJSON_NOEXCEPT {
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
return shared_ == rhs.shared_;
}
//! Compare (inequality) with another MemoryPoolAllocator
bool operator!=(const MemoryPoolAllocator& rhs) const RAPIDJSON_NOEXCEPT {
return !operator==(rhs);
}
private:
//! Copy constructor is not permitted.
MemoryPoolAllocator(const MemoryPoolAllocator& rhs) /* = delete */;
//! Copy assignment operator is not permitted.
MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) /* = delete */;
//! Creates a new chunk.
/*! \param capacity Capacity of the chunk in bytes.
\return true if success.
*/
bool AddChunk(size_t capacity) {
if (!baseAllocator_)
ownBaseAllocator_ = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator)();
if (ChunkHeader* chunk = reinterpret_cast<ChunkHeader*>(baseAllocator_->Malloc(RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + capacity))) {
shared_->ownBaseAllocator = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator)();
if (ChunkHeader* chunk = static_cast<ChunkHeader*>(baseAllocator_->Malloc(SIZEOF_CHUNK_HEADER + capacity))) {
chunk->capacity = capacity;
chunk->size = 0;
chunk->next = chunkHead_;
chunkHead_ = chunk;
chunk->next = shared_->chunkHead;
shared_->chunkHead = chunk;
return true;
}
else
return false;
}
static const int kDefaultChunkCapacity = RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY; //!< Default chunk capacity.
static inline void* AlignBuffer(void* buf, size_t &size)
{
RAPIDJSON_NOEXCEPT_ASSERT(buf != 0);
const uintptr_t mask = sizeof(void*) - 1;
const uintptr_t ubuf = reinterpret_cast<uintptr_t>(buf);
if (RAPIDJSON_UNLIKELY(ubuf & mask)) {
const uintptr_t abuf = (ubuf + mask) & ~mask;
RAPIDJSON_ASSERT(size >= abuf - ubuf);
buf = reinterpret_cast<void*>(abuf);
size -= abuf - ubuf;
}
return buf;
}
//! Chunk header for perpending to each chunk.
/*! Chunks are stored as a singly linked list.
*/
struct ChunkHeader {
size_t capacity; //!< Capacity of the chunk in bytes (excluding the header itself).
size_t size; //!< Current size of allocated memory in bytes.
ChunkHeader *next; //!< Next chunk in the linked list.
size_t chunk_capacity_; //!< The minimum capacity of chunk when they are allocated.
BaseAllocator* baseAllocator_; //!< base allocator for allocating memory chunks.
SharedData *shared_; //!< The shared data of the allocator
};
namespace internal {
template<typename, typename = void>
struct IsRefCounted :
public FalseType
{ };
template<typename T>
struct IsRefCounted<T, typename internal::EnableIfCond<T::kRefCounted>::Type> :
public TrueType
{ };
}
template<typename T, typename A>
inline T* Realloc(A& a, T* old_p, size_t old_n, size_t new_n)
{
RAPIDJSON_NOEXCEPT_ASSERT(old_n <= SIZE_MAX / sizeof(T) && new_n <= SIZE_MAX / sizeof(T));
return static_cast<T*>(a.Realloc(old_p, old_n * sizeof(T), new_n * sizeof(T)));
}
template<typename T, typename A>
inline T *Malloc(A& a, size_t n = 1)
{
return Realloc<T, A>(a, NULL, 0, n);
}
template<typename T, typename A>
inline void Free(A& a, T *p, size_t n = 1)
{
static_cast<void>(Realloc<T, A>(a, p, n, 0));
}
#ifdef __GNUC__
RAPIDJSON_DIAG_PUSH
RAPIDJSON_DIAG_OFF(effc++) // std::allocator can safely be inherited
#endif
template <typename T, typename BaseAllocator = CrtAllocator>
class StdAllocator :
public std::allocator<T>
{
typedef std::allocator<T> allocator_type;
#if RAPIDJSON_HAS_CXX11
typedef std::allocator_traits<allocator_type> traits_type;
#else
typedef allocator_type traits_type;
#endif
public:
typedef BaseAllocator BaseAllocatorType;
StdAllocator() RAPIDJSON_NOEXCEPT :
allocator_type(),
baseAllocator_()
{ }
StdAllocator(const StdAllocator& rhs) RAPIDJSON_NOEXCEPT :
allocator_type(rhs),
baseAllocator_(rhs.baseAllocator_)
{ }
template<typename U>
StdAllocator(const StdAllocator<U, BaseAllocator>& rhs) RAPIDJSON_NOEXCEPT :
allocator_type(rhs),
baseAllocator_(rhs.baseAllocator_)
{ }
#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
StdAllocator(StdAllocator&& rhs) RAPIDJSON_NOEXCEPT :
allocator_type(std::move(rhs)),
baseAllocator_(std::move(rhs.baseAllocator_))
{ }
#endif
#if RAPIDJSON_HAS_CXX11
using propagate_on_container_move_assignment = std::true_type;
using propagate_on_container_swap = std::true_type;
#endif
/* implicit */
StdAllocator(const BaseAllocator& allocator) RAPIDJSON_NOEXCEPT :
allocator_type(),
baseAllocator_(allocator)
{ }
~StdAllocator() RAPIDJSON_NOEXCEPT
{ }
template<typename U>
struct rebind {
typedef StdAllocator<U, BaseAllocator> other;
};
ChunkHeader *chunkHead_; //!< Head of the chunk linked-list. Only the head chunk serves allocation.
size_t chunk_capacity_; //!< The minimum capacity of chunk when they are allocated.
void *userBuffer_; //!< User supplied buffer.
BaseAllocator* baseAllocator_; //!< base allocator for allocating memory chunks.
BaseAllocator* ownBaseAllocator_; //!< base allocator created by this object.
typedef typename traits_type::size_type size_type;
typedef typename traits_type::difference_type difference_type;
typedef typename traits_type::value_type value_type;
typedef typename traits_type::pointer pointer;
typedef typename traits_type::const_pointer const_pointer;
#if RAPIDJSON_HAS_CXX11
typedef typename std::add_lvalue_reference<value_type>::type &reference;
typedef typename std::add_lvalue_reference<typename std::add_const<value_type>::type>::type &const_reference;
pointer address(reference r) const RAPIDJSON_NOEXCEPT
{
return std::addressof(r);
}
const_pointer address(const_reference r) const RAPIDJSON_NOEXCEPT
{
return std::addressof(r);
}
size_type max_size() const RAPIDJSON_NOEXCEPT
{
return traits_type::max_size(*this);
}
template <typename ...Args>
void construct(pointer p, Args&&... args)
{
traits_type::construct(*this, p, std::forward<Args>(args)...);
}
void destroy(pointer p)
{
traits_type::destroy(*this, p);
}
#else // !RAPIDJSON_HAS_CXX11
typedef typename allocator_type::reference reference;
typedef typename allocator_type::const_reference const_reference;
pointer address(reference r) const RAPIDJSON_NOEXCEPT
{
return allocator_type::address(r);
}
const_pointer address(const_reference r) const RAPIDJSON_NOEXCEPT
{
return allocator_type::address(r);
}
size_type max_size() const RAPIDJSON_NOEXCEPT
{
return allocator_type::max_size();
}
void construct(pointer p, const_reference r)
{
allocator_type::construct(p, r);
}
void destroy(pointer p)
{
allocator_type::destroy(p);
}
#endif // !RAPIDJSON_HAS_CXX11
template <typename U>
U* allocate(size_type n = 1, const void* = 0)
{
return RAPIDJSON_NAMESPACE::Malloc<U>(baseAllocator_, n);
}
template <typename U>
void deallocate(U* p, size_type n = 1)
{
RAPIDJSON_NAMESPACE::Free<U>(baseAllocator_, p, n);
}
pointer allocate(size_type n = 1, const void* = 0)
{
return allocate<value_type>(n);
}
void deallocate(pointer p, size_type n = 1)
{
deallocate<value_type>(p, n);
}
#if RAPIDJSON_HAS_CXX11
using is_always_equal = std::is_empty<BaseAllocator>;
#endif
template<typename U>
bool operator==(const StdAllocator<U, BaseAllocator>& rhs) const RAPIDJSON_NOEXCEPT
{
return baseAllocator_ == rhs.baseAllocator_;
}
template<typename U>
bool operator!=(const StdAllocator<U, BaseAllocator>& rhs) const RAPIDJSON_NOEXCEPT
{
return !operator==(rhs);
}
//! rapidjson Allocator concept
static const bool kNeedFree = BaseAllocator::kNeedFree;
static const bool kRefCounted = internal::IsRefCounted<BaseAllocator>::Value;
void* Malloc(size_t size)
{
return baseAllocator_.Malloc(size);
}
void* Realloc(void* originalPtr, size_t originalSize, size_t newSize)
{
return baseAllocator_.Realloc(originalPtr, originalSize, newSize);
}
static void Free(void *ptr) RAPIDJSON_NOEXCEPT
{
BaseAllocator::Free(ptr);
}
private:
template <typename, typename>
friend class StdAllocator; // access to StdAllocator<!T>.*
BaseAllocator baseAllocator_;
};
#if !RAPIDJSON_HAS_CXX17 // std::allocator<void> deprecated in C++17
template <typename BaseAllocator>
class StdAllocator<void, BaseAllocator> :
public std::allocator<void>
{
typedef std::allocator<void> allocator_type;
public:
typedef BaseAllocator BaseAllocatorType;
StdAllocator() RAPIDJSON_NOEXCEPT :
allocator_type(),
baseAllocator_()
{ }
StdAllocator(const StdAllocator& rhs) RAPIDJSON_NOEXCEPT :
allocator_type(rhs),
baseAllocator_(rhs.baseAllocator_)
{ }
template<typename U>
StdAllocator(const StdAllocator<U, BaseAllocator>& rhs) RAPIDJSON_NOEXCEPT :
allocator_type(rhs),
baseAllocator_(rhs.baseAllocator_)
{ }
/* implicit */
StdAllocator(const BaseAllocator& baseAllocator) RAPIDJSON_NOEXCEPT :
allocator_type(),
baseAllocator_(baseAllocator)
{ }
~StdAllocator() RAPIDJSON_NOEXCEPT
{ }
template<typename U>
struct rebind {
typedef StdAllocator<U, BaseAllocator> other;
};
typedef typename allocator_type::value_type value_type;
private:
template <typename, typename>
friend class StdAllocator; // access to StdAllocator<!T>.*
BaseAllocator baseAllocator_;
};
#endif
#ifdef __GNUC__
RAPIDJSON_DIAG_POP
#endif
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_ENCODINGS_H_

View File

@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at

File diff suppressed because it is too large Load Diff

View File

@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_ENCODEDSTREAM_H_
@@ -41,7 +41,7 @@ class EncodedInputStream {
public:
typedef typename Encoding::Ch Ch;
EncodedInputStream(InputByteStream& is) : is_(is) {
EncodedInputStream(InputByteStream& is) : is_(is) {
current_ = Encoding::TakeBOM(is_);
}
@@ -51,7 +51,7 @@ public:
// Not implemented
void Put(Ch) { RAPIDJSON_ASSERT(false); }
void Flush() { RAPIDJSON_ASSERT(false); }
void Flush() { RAPIDJSON_ASSERT(false); }
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
@@ -80,7 +80,7 @@ public:
// Not implemented
void Put(Ch) {}
void Flush() {}
void Flush() {}
Ch* PutBegin() { return 0; }
size_t PutEnd(Ch*) { return 0; }
@@ -102,7 +102,7 @@ class EncodedOutputStream {
public:
typedef typename Encoding::Ch Ch;
EncodedOutputStream(OutputByteStream& os, bool putBOM = true) : os_(os) {
EncodedOutputStream(OutputByteStream& os, bool putBOM = true) : os_(os) {
if (putBOM)
Encoding::PutBOM(os_);
}
@@ -143,7 +143,7 @@ public:
\param type UTF encoding type if it is not detected from the stream.
*/
AutoUTFInputStream(InputByteStream& is, UTFType type = kUTF8) : is_(&is), type_(type), hasBOM_(false) {
RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE);
RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE);
DetectType();
static const TakeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Take) };
takeFunc_ = f[type_];
@@ -159,7 +159,7 @@ public:
// Not implemented
void Put(Ch) { RAPIDJSON_ASSERT(false); }
void Flush() { RAPIDJSON_ASSERT(false); }
void Flush() { RAPIDJSON_ASSERT(false); }
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
@@ -258,7 +258,7 @@ public:
UTFType GetType() const { return type_; }
void Put(Ch c) { putFunc_(*os_, c); }
void Flush() { os_->Flush(); }
void Flush() { os_->Flush(); }
// Not implemented
Ch Peek() const { RAPIDJSON_ASSERT(false); return 0;}
@@ -271,7 +271,7 @@ private:
AutoUTFOutputStream(const AutoUTFOutputStream&);
AutoUTFOutputStream& operator=(const AutoUTFOutputStream&);
void PutBOM() {
void PutBOM() {
typedef void (*PutBOMFunc)(OutputByteStream&);
static const PutBOMFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(PutBOM) };
f[type_](*os_);

View File

@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_ENCODINGS_H_
@@ -100,7 +100,7 @@ struct UTF8 {
template<typename OutputStream>
static void Encode(OutputStream& os, unsigned codepoint) {
if (codepoint <= 0x7F)
if (codepoint <= 0x7F)
os.Put(static_cast<Ch>(codepoint & 0xFF));
else if (codepoint <= 0x7FF) {
os.Put(static_cast<Ch>(0xC0 | ((codepoint >> 6) & 0xFF)));
@@ -122,7 +122,7 @@ struct UTF8 {
template<typename OutputStream>
static void EncodeUnsafe(OutputStream& os, unsigned codepoint) {
if (codepoint <= 0x7F)
if (codepoint <= 0x7F)
PutUnsafe(os, static_cast<Ch>(codepoint & 0xFF));
else if (codepoint <= 0x7FF) {
PutUnsafe(os, static_cast<Ch>(0xC0 | ((codepoint >> 6) & 0xFF)));
@@ -276,7 +276,7 @@ struct UTF16 {
static void Encode(OutputStream& os, unsigned codepoint) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2);
if (codepoint <= 0xFFFF) {
RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair
RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair
os.Put(static_cast<typename OutputStream::Ch>(codepoint));
}
else {
@@ -292,7 +292,7 @@ struct UTF16 {
static void EncodeUnsafe(OutputStream& os, unsigned codepoint) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2);
if (codepoint <= 0xFFFF) {
RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair
RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair
PutUnsafe(os, static_cast<typename OutputStream::Ch>(codepoint));
}
else {
@@ -406,7 +406,7 @@ struct UTF16BE : UTF16<CharType> {
///////////////////////////////////////////////////////////////////////////////
// UTF32
//! UTF-32 encoding.
//! UTF-32 encoding.
/*! http://en.wikipedia.org/wiki/UTF-32
\tparam CharType Type for storing 32-bit UTF-32 data. Default is unsigned. C++11 may use char32_t instead.
\note implements Encoding concept
@@ -498,7 +498,7 @@ struct UTF32BE : UTF32<CharType> {
static CharType TakeBOM(InputByteStream& is) {
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
CharType c = Take(is);
return static_cast<uint32_t>(c) == 0x0000FEFFu ? Take(is) : c;
return static_cast<uint32_t>(c) == 0x0000FEFFu ? Take(is) : c;
}
template <typename InputByteStream>
@@ -694,13 +694,13 @@ struct Transcoder<Encoding, Encoding> {
os.Put(is.Take()); // Just copy one code unit. This semantic is different from primary template class.
return true;
}
template<typename InputStream, typename OutputStream>
static RAPIDJSON_FORCEINLINE bool TranscodeUnsafe(InputStream& is, OutputStream& os) {
PutUnsafe(os, is.Take()); // Just copy one code unit. This semantic is different from primary template class.
return true;
}
template<typename InputStream, typename OutputStream>
static RAPIDJSON_FORCEINLINE bool Validate(InputStream& is, OutputStream& os) {
return Encoding::Validate(is, os); // source/target encoding are the same

View File

@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_ERROR_EN_H_
@@ -39,13 +39,13 @@ inline const RAPIDJSON_ERROR_CHARTYPE* GetParseError_En(ParseErrorCode parseErro
case kParseErrorDocumentEmpty: return RAPIDJSON_ERROR_STRING("The document is empty.");
case kParseErrorDocumentRootNotSingular: return RAPIDJSON_ERROR_STRING("The document root must not be followed by other values.");
case kParseErrorValueInvalid: return RAPIDJSON_ERROR_STRING("Invalid value.");
case kParseErrorObjectMissName: return RAPIDJSON_ERROR_STRING("Missing a name for object member.");
case kParseErrorObjectMissColon: return RAPIDJSON_ERROR_STRING("Missing a colon after a name of object member.");
case kParseErrorObjectMissCommaOrCurlyBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or '}' after an object member.");
case kParseErrorArrayMissCommaOrSquareBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or ']' after an array element.");
case kParseErrorStringUnicodeEscapeInvalidHex: return RAPIDJSON_ERROR_STRING("Incorrect hex digit after \\u escape in string.");
@@ -65,6 +65,54 @@ inline const RAPIDJSON_ERROR_CHARTYPE* GetParseError_En(ParseErrorCode parseErro
}
}
//! Maps error code of validation into error message.
/*!
\ingroup RAPIDJSON_ERRORS
\param validateErrorCode Error code obtained from validator.
\return the error message.
\note User can make a copy of this function for localization.
Using switch-case is safer for future modification of error codes.
*/
inline const RAPIDJSON_ERROR_CHARTYPE* GetValidateError_En(ValidateErrorCode validateErrorCode) {
switch (validateErrorCode) {
case kValidateErrors: return RAPIDJSON_ERROR_STRING("One or more validation errors have occurred");
case kValidateErrorNone: return RAPIDJSON_ERROR_STRING("No error.");
case kValidateErrorMultipleOf: return RAPIDJSON_ERROR_STRING("Number '%actual' is not a multiple of the 'multipleOf' value '%expected'.");
case kValidateErrorMaximum: return RAPIDJSON_ERROR_STRING("Number '%actual' is greater than the 'maximum' value '%expected'.");
case kValidateErrorExclusiveMaximum: return RAPIDJSON_ERROR_STRING("Number '%actual' is greater than or equal to the 'exclusiveMaximum' value '%expected'.");
case kValidateErrorMinimum: return RAPIDJSON_ERROR_STRING("Number '%actual' is less than the 'minimum' value '%expected'.");
case kValidateErrorExclusiveMinimum: return RAPIDJSON_ERROR_STRING("Number '%actual' is less than or equal to the 'exclusiveMinimum' value '%expected'.");
case kValidateErrorMaxLength: return RAPIDJSON_ERROR_STRING("String '%actual' is longer than the 'maxLength' value '%expected'.");
case kValidateErrorMinLength: return RAPIDJSON_ERROR_STRING("String '%actual' is shorter than the 'minLength' value '%expected'.");
case kValidateErrorPattern: return RAPIDJSON_ERROR_STRING("String '%actual' does not match the 'pattern' regular expression.");
case kValidateErrorMaxItems: return RAPIDJSON_ERROR_STRING("Array of length '%actual' is longer than the 'maxItems' value '%expected'.");
case kValidateErrorMinItems: return RAPIDJSON_ERROR_STRING("Array of length '%actual' is shorter than the 'minItems' value '%expected'.");
case kValidateErrorUniqueItems: return RAPIDJSON_ERROR_STRING("Array has duplicate items at indices '%duplicates' but 'uniqueItems' is true.");
case kValidateErrorAdditionalItems: return RAPIDJSON_ERROR_STRING("Array has an additional item at index '%disallowed' that is not allowed by the schema.");
case kValidateErrorMaxProperties: return RAPIDJSON_ERROR_STRING("Object has '%actual' members which is more than 'maxProperties' value '%expected'.");
case kValidateErrorMinProperties: return RAPIDJSON_ERROR_STRING("Object has '%actual' members which is less than 'minProperties' value '%expected'.");
case kValidateErrorRequired: return RAPIDJSON_ERROR_STRING("Object is missing the following members required by the schema: '%missing'.");
case kValidateErrorAdditionalProperties: return RAPIDJSON_ERROR_STRING("Object has an additional member '%disallowed' that is not allowed by the schema.");
case kValidateErrorPatternProperties: return RAPIDJSON_ERROR_STRING("Object has 'patternProperties' that are not allowed by the schema.");
case kValidateErrorDependencies: return RAPIDJSON_ERROR_STRING("Object has missing property or schema dependencies, refer to following errors.");
case kValidateErrorEnum: return RAPIDJSON_ERROR_STRING("Property has a value that is not one of its allowed enumerated values.");
case kValidateErrorType: return RAPIDJSON_ERROR_STRING("Property has a type '%actual' that is not in the following list: '%expected'.");
case kValidateErrorOneOf: return RAPIDJSON_ERROR_STRING("Property did not match any of the sub-schemas specified by 'oneOf', refer to following errors.");
case kValidateErrorOneOfMatch: return RAPIDJSON_ERROR_STRING("Property matched more than one of the sub-schemas specified by 'oneOf'.");
case kValidateErrorAllOf: return RAPIDJSON_ERROR_STRING("Property did not match all of the sub-schemas specified by 'allOf', refer to following errors.");
case kValidateErrorAnyOf: return RAPIDJSON_ERROR_STRING("Property did not match any of the sub-schemas specified by 'anyOf', refer to following errors.");
case kValidateErrorNot: return RAPIDJSON_ERROR_STRING("Property matched the sub-schema specified by 'not'.");
default: return RAPIDJSON_ERROR_STRING("Unknown error.");
}
}
RAPIDJSON_NAMESPACE_END
#ifdef __clang__

View File

@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_ERROR_ERROR_H_
@@ -152,6 +152,61 @@ private:
*/
typedef const RAPIDJSON_ERROR_CHARTYPE* (*GetParseErrorFunc)(ParseErrorCode);
///////////////////////////////////////////////////////////////////////////////
// ValidateErrorCode
//! Error codes when validating.
/*! \ingroup RAPIDJSON_ERRORS
\see GenericSchemaValidator
*/
enum ValidateErrorCode {
kValidateErrors = -1, //!< Top level error code when kValidateContinueOnErrorsFlag set.
kValidateErrorNone = 0, //!< No error.
kValidateErrorMultipleOf, //!< Number is not a multiple of the 'multipleOf' value.
kValidateErrorMaximum, //!< Number is greater than the 'maximum' value.
kValidateErrorExclusiveMaximum, //!< Number is greater than or equal to the 'maximum' value.
kValidateErrorMinimum, //!< Number is less than the 'minimum' value.
kValidateErrorExclusiveMinimum, //!< Number is less than or equal to the 'minimum' value.
kValidateErrorMaxLength, //!< String is longer than the 'maxLength' value.
kValidateErrorMinLength, //!< String is longer than the 'maxLength' value.
kValidateErrorPattern, //!< String does not match the 'pattern' regular expression.
kValidateErrorMaxItems, //!< Array is longer than the 'maxItems' value.
kValidateErrorMinItems, //!< Array is shorter than the 'minItems' value.
kValidateErrorUniqueItems, //!< Array has duplicate items but 'uniqueItems' is true.
kValidateErrorAdditionalItems, //!< Array has additional items that are not allowed by the schema.
kValidateErrorMaxProperties, //!< Object has more members than 'maxProperties' value.
kValidateErrorMinProperties, //!< Object has less members than 'minProperties' value.
kValidateErrorRequired, //!< Object is missing one or more members required by the schema.
kValidateErrorAdditionalProperties, //!< Object has additional members that are not allowed by the schema.
kValidateErrorPatternProperties, //!< See other errors.
kValidateErrorDependencies, //!< Object has missing property or schema dependencies.
kValidateErrorEnum, //!< Property has a value that is not one of its allowed enumerated values
kValidateErrorType, //!< Property has a type that is not allowed by the schema..
kValidateErrorOneOf, //!< Property did not match any of the sub-schemas specified by 'oneOf'.
kValidateErrorOneOfMatch, //!< Property matched more than one of the sub-schemas specified by 'oneOf'.
kValidateErrorAllOf, //!< Property did not match all of the sub-schemas specified by 'allOf'.
kValidateErrorAnyOf, //!< Property did not match any of the sub-schemas specified by 'anyOf'.
kValidateErrorNot //!< Property matched the sub-schema specified by 'not'.
};
//! Function pointer type of GetValidateError().
/*! \ingroup RAPIDJSON_ERRORS
This is the prototype for \c GetValidateError_X(), where \c X is a locale.
User can dynamically change locale in runtime, e.g.:
\code
GetValidateErrorFunc GetValidateError = GetValidateError_En; // or whatever
const RAPIDJSON_ERROR_CHARTYPE* s = GetValidateError(validator.GetInvalidSchemaCode());
\endcode
*/
typedef const RAPIDJSON_ERROR_CHARTYPE* (*GetValidateErrorFunc)(ValidateErrorCode);
RAPIDJSON_NAMESPACE_END
#ifdef __clang__

View File

@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_FILEREADSTREAM_H_
@@ -41,7 +41,7 @@ public:
\param buffer user-supplied buffer.
\param bufferSize size of buffer in bytes. Must >=4 bytes.
*/
FileReadStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) {
FileReadStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) {
RAPIDJSON_ASSERT(fp_ != 0);
RAPIDJSON_ASSERT(bufferSize >= 4);
Read();
@@ -53,7 +53,7 @@ public:
// Not implemented
void Put(Ch) { RAPIDJSON_ASSERT(false); }
void Flush() { RAPIDJSON_ASSERT(false); }
void Flush() { RAPIDJSON_ASSERT(false); }
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }

View File

@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_FILEWRITESTREAM_H_
@@ -33,11 +33,11 @@ class FileWriteStream {
public:
typedef char Ch; //!< Character type. Only support char.
FileWriteStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferEnd_(buffer + bufferSize), current_(buffer_) {
FileWriteStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferEnd_(buffer + bufferSize), current_(buffer_) {
RAPIDJSON_ASSERT(fp_ != 0);
}
void Put(char c) {
void Put(char c) {
if (current_ >= bufferEnd_)
Flush();

View File

@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_FWD_H_
@@ -101,8 +101,8 @@ class PrettyWriter;
// document.h
template <typename Encoding, typename Allocator>
struct GenericMember;
template <typename Encoding, typename Allocator>
class GenericMember;
template <bool Const, typename Encoding, typename Allocator>
class GenericMemberIterator;
@@ -110,7 +110,7 @@ class GenericMemberIterator;
template<typename CharType>
struct GenericStringRef;
template <typename Encoding, typename Allocator>
template <typename Encoding, typename Allocator>
class GenericValue;
typedef GenericValue<UTF8<char>, MemoryPoolAllocator<CrtAllocator> > Value;

View File

@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_BIGINTEGER_H_
@@ -17,7 +17,7 @@
#include "../rapidjson.h"
#if defined(_MSC_VER) && !__INTEL_COMPILER && defined(_M_AMD64)
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && defined(_M_AMD64)
#include <intrin.h> // for _umul128
#pragma intrinsic(_umul128)
#endif
@@ -37,7 +37,8 @@ public:
digits_[0] = u;
}
BigInteger(const char* decimals, size_t length) : count_(1) {
template<typename Ch>
BigInteger(const Ch* decimals, size_t length) : count_(1) {
RAPIDJSON_ASSERT(length > 0);
digits_[0] = 0;
size_t i = 0;
@@ -51,7 +52,7 @@ public:
if (length > 0)
AppendDecimal64(decimals + i, decimals + i + length);
}
BigInteger& operator=(const BigInteger &rhs)
{
if (this != &rhs) {
@@ -60,9 +61,9 @@ public:
}
return *this;
}
BigInteger& operator=(uint64_t u) {
digits_[0] = u;
digits_[0] = u;
count_ = 1;
return *this;
}
@@ -95,7 +96,7 @@ public:
digits_[i] = MulAdd64(digits_[i], u, k, &hi);
k = hi;
}
if (k > 0)
PushBack(k);
@@ -118,7 +119,7 @@ public:
digits_[i] = (p0 & 0xFFFFFFFF) | (p1 << 32);
k = p1 >> 32;
}
if (k > 0)
PushBack(k);
@@ -221,7 +222,8 @@ public:
bool IsZero() const { return count_ == 1 && digits_[0] == 0; }
private:
void AppendDecimal64(const char* begin, const char* end) {
template<typename Ch>
void AppendDecimal64(const Ch* begin, const Ch* end) {
uint64_t u = ParseUint64(begin, end);
if (IsZero())
*this = u;
@@ -236,11 +238,12 @@ private:
digits_[count_++] = digit;
}
static uint64_t ParseUint64(const char* begin, const char* end) {
template<typename Ch>
static uint64_t ParseUint64(const Ch* begin, const Ch* end) {
uint64_t r = 0;
for (const char* p = begin; p != end; ++p) {
RAPIDJSON_ASSERT(*p >= '0' && *p <= '9');
r = r * 10u + static_cast<unsigned>(*p - '0');
for (const Ch* p = begin; p != end; ++p) {
RAPIDJSON_ASSERT(*p >= Ch('0') && *p <= Ch('9'));
r = r * 10u + static_cast<unsigned>(*p - Ch('0'));
}
return r;
}

71
src/3rdparty/rapidjson/internal/clzll.h vendored Normal file
View File

@@ -0,0 +1,71 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_CLZLL_H_
#define RAPIDJSON_CLZLL_H_
#include "../rapidjson.h"
#if defined(_MSC_VER) && !defined(UNDER_CE)
#include <intrin.h>
#if defined(_WIN64)
#pragma intrinsic(_BitScanReverse64)
#else
#pragma intrinsic(_BitScanReverse)
#endif
#endif
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
inline uint32_t clzll(uint64_t x) {
// Passing 0 to __builtin_clzll is UB in GCC and results in an
// infinite loop in the software implementation.
RAPIDJSON_ASSERT(x != 0);
#if defined(_MSC_VER) && !defined(UNDER_CE)
unsigned long r = 0;
#if defined(_WIN64)
_BitScanReverse64(&r, x);
#else
// Scan the high 32 bits.
if (_BitScanReverse(&r, static_cast<uint32_t>(x >> 32)))
return 63 - (r + 32);
// Scan the low 32 bits.
_BitScanReverse(&r, static_cast<uint32_t>(x & 0xFFFFFFFF));
#endif // _WIN64
return 63 - r;
#elif (defined(__GNUC__) && __GNUC__ >= 4) || RAPIDJSON_HAS_BUILTIN(__builtin_clzll)
// __builtin_clzll wrapper
return static_cast<uint32_t>(__builtin_clzll(x));
#else
// naive version
uint32_t r = 0;
while (!(x & (static_cast<uint64_t>(1) << 63))) {
x <<= 1;
++r;
}
return r;
#endif // _MSC_VER
}
#define RAPIDJSON_CLZLL RAPIDJSON_NAMESPACE::internal::clzll
} // namespace internal
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_CLZLL_H_

View File

@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
@@ -20,11 +20,11 @@
#define RAPIDJSON_DIYFP_H_
#include "../rapidjson.h"
#include "clzll.h"
#include <limits>
#if defined(_MSC_VER) && defined(_M_AMD64) && !defined(__INTEL_COMPILER)
#include <intrin.h>
#pragma intrinsic(_BitScanReverse64)
#pragma intrinsic(_umul128)
#endif
@@ -100,22 +100,8 @@ struct DiyFp {
}
DiyFp Normalize() const {
RAPIDJSON_ASSERT(f != 0); // https://stackoverflow.com/a/26809183/291737
#if defined(_MSC_VER) && defined(_M_AMD64)
unsigned long index;
_BitScanReverse64(&index, f);
return DiyFp(f << (63 - index), e - (63 - index));
#elif defined(__GNUC__) && __GNUC__ >= 4
int s = __builtin_clzll(f);
int s = static_cast<int>(clzll(f));
return DiyFp(f << s, e - s);
#else
DiyFp res = *this;
while (!(res.f & (static_cast<uint64_t>(1) << 63))) {
res.f <<= 1;
res.e--;
}
return res;
#endif
}
DiyFp NormalizeBoundary() const {

View File

@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
// This is a C++ header-only implementation of Grisu2 algorithm from the publication:
@@ -58,7 +58,11 @@ inline int CountDecimalDigit32(uint32_t n) {
}
inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buffer, int* len, int* K) {
static const uint32_t kPow10[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 };
static const uint64_t kPow10[] = { 1U, 10U, 100U, 1000U, 10000U, 100000U, 1000000U, 10000000U, 100000000U,
1000000000U, 10000000000U, 100000000000U, 1000000000000U,
10000000000000U, 100000000000000U, 1000000000000000U,
10000000000000000U, 100000000000000000U, 1000000000000000000U,
10000000000000000000U };
const DiyFp one(uint64_t(1) << -Mp.e, Mp.e);
const DiyFp wp_w = Mp - W;
uint32_t p1 = static_cast<uint32_t>(Mp.f >> -one.e);
@@ -86,7 +90,7 @@ inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buff
uint64_t tmp = (static_cast<uint64_t>(p1) << -one.e) + p2;
if (tmp <= delta) {
*K += kappa;
GrisuRound(buffer, *len, delta, tmp, static_cast<uint64_t>(kPow10[kappa]) << -one.e, wp_w.f);
GrisuRound(buffer, *len, delta, tmp, kPow10[kappa] << -one.e, wp_w.f);
return;
}
}
@@ -103,7 +107,7 @@ inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buff
if (p2 < delta) {
*K += kappa;
int index = -kappa;
GrisuRound(buffer, *len, delta, p2, one.f, wp_w.f * (index < 9 ? kPow10[index] : 0));
GrisuRound(buffer, *len, delta, p2, one.f, wp_w.f * (index < 20 ? kPow10[index] : 0));
return;
}
}

View File

@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_IEEE754_

View File

@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at

View File

@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_INTERNAL_META_H_

View File

@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_POW10_
@@ -27,8 +27,8 @@ namespace internal {
*/
inline double Pow10(int n) {
static const double e[] = { // 1e-0...1e308: 309 * 8 bytes = 2472 bytes
1e+0,
1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20,
1e+0,
1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20,
1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, 1e+31, 1e+32, 1e+33, 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40,
1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60,
1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, 1e+68, 1e+69, 1e+70, 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80,

View File

@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_INTERNAL_REGEX_H_
@@ -23,7 +23,6 @@
RAPIDJSON_DIAG_PUSH
RAPIDJSON_DIAG_OFF(padded)
RAPIDJSON_DIAG_OFF(switch-enum)
RAPIDJSON_DIAG_OFF(implicit-fallthrough)
#elif defined(_MSC_VER)
RAPIDJSON_DIAG_PUSH
RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
@@ -32,9 +31,6 @@ RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
#ifdef __GNUC__
RAPIDJSON_DIAG_PUSH
RAPIDJSON_DIAG_OFF(effc++)
#if __GNUC__ >= 7
RAPIDJSON_DIAG_OFF(implicit-fallthrough)
#endif
#endif
#ifndef RAPIDJSON_REGEX_VERBOSE
@@ -106,9 +102,9 @@ class GenericRegexSearch;
- \c \\t Tab (U+0009)
- \c \\v Vertical tab (U+000B)
\note This is a Thompson NFA engine, implemented with reference to
Cox, Russ. "Regular Expression Matching Can Be Simple And Fast (but is slow in Java, Perl, PHP, Python, Ruby,...).",
https://swtch.com/~rsc/regexp/regexp1.html
\note This is a Thompson NFA engine, implemented with reference to
Cox, Russ. "Regular Expression Matching Can Be Simple And Fast (but is slow in Java, Perl, PHP, Python, Ruby,...).",
https://swtch.com/~rsc/regexp/regexp1.html
*/
template <typename Encoding, typename Allocator = CrtAllocator>
class GenericRegex {
@@ -117,9 +113,9 @@ public:
typedef typename Encoding::Ch Ch;
template <typename, typename> friend class GenericRegexSearch;
GenericRegex(const Ch* source, Allocator* allocator = 0) :
ownAllocator_(allocator ? 0 : RAPIDJSON_NEW(Allocator)()), allocator_(allocator ? allocator : ownAllocator_),
states_(allocator_, 256), ranges_(allocator_, 256), root_(kRegexInvalidState), stateCount_(), rangeCount_(),
GenericRegex(const Ch* source, Allocator* allocator = 0) :
ownAllocator_(allocator ? 0 : RAPIDJSON_NEW(Allocator)()), allocator_(allocator ? allocator : ownAllocator_),
states_(allocator_, 256), ranges_(allocator_, 256), root_(kRegexInvalidState), stateCount_(), rangeCount_(),
anchorBegin_(), anchorEnd_()
{
GenericStringStream<Encoding> ss(source);
@@ -151,7 +147,7 @@ private:
static const unsigned kRangeNegationFlag = 0x80000000;
struct Range {
unsigned start; //
unsigned start; //
unsigned end;
SizeType next;
};
@@ -291,6 +287,7 @@ private:
if (!CharacterEscape(ds, &codepoint))
return; // Unsupported escape character
// fall through to default
RAPIDJSON_DELIBERATE_FALLTHROUGH;
default: // Pattern character
PushOperand(operandStack, codepoint);
@@ -405,7 +402,7 @@ private:
}
return false;
default:
default:
// syntax error (e.g. unclosed kLeftParenthesis)
return false;
}
@@ -520,6 +517,7 @@ private:
else if (!CharacterEscape(ds, &codepoint))
return false;
// fall through to default
RAPIDJSON_DELIBERATE_FALLTHROUGH;
default:
switch (step) {
@@ -529,6 +527,7 @@ private:
break;
}
// fall through to step 0 for other characters
RAPIDJSON_DELIBERATE_FALLTHROUGH;
case 0:
{
@@ -551,7 +550,7 @@ private:
}
return false;
}
SizeType NewRange(unsigned codepoint) {
Range* r = ranges_.template Push<Range>();
r->start = r->end = codepoint;
@@ -609,7 +608,7 @@ public:
typedef typename RegexType::EncodingType Encoding;
typedef typename Encoding::Ch Ch;
GenericRegexSearch(const RegexType& regex, Allocator* allocator = 0) :
GenericRegexSearch(const RegexType& regex, Allocator* allocator = 0) :
regex_(regex), allocator_(allocator), ownAllocator_(0),
state0_(allocator, 0), state1_(allocator, 0), stateSet_()
{
@@ -668,7 +667,7 @@ private:
for (const SizeType* s = current->template Bottom<SizeType>(); s != current->template End<SizeType>(); ++s) {
const State& sr = regex_.GetState(*s);
if (sr.codepoint == codepoint ||
sr.codepoint == RegexType::kAnyCharacterClass ||
sr.codepoint == RegexType::kAnyCharacterClass ||
(sr.codepoint == RegexType::kRangeCharacterClass && MatchRange(sr.rangeStart, codepoint)))
{
matched = AddState(*next, sr.out) || matched;

View File

@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_INTERNAL_STACK_H_
@@ -98,7 +98,7 @@ public:
void Clear() { stackTop_ = stack_; }
void ShrinkToFit() {
void ShrinkToFit() {
if (Empty()) {
// If the stack is empty, completely deallocate the memory.
Allocator::Free(stack_); // NOLINT (+clang-analyzer-unix.Malloc)
@@ -142,7 +142,7 @@ public:
}
template<typename T>
T* Top() {
T* Top() {
RAPIDJSON_ASSERT(GetSize() >= sizeof(T));
return reinterpret_cast<T*>(stackTop_ - sizeof(T));
}

View File

@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_INTERNAL_STRFUNC_H_
@@ -24,7 +24,7 @@ namespace internal {
//! Custom strlen() which works on different character types.
/*! \tparam Ch Character type (e.g. char, wchar_t, short)
\param s Null-terminated input string.
\return Number of characters in the string.
\return Number of characters in the string.
\note This has the same semantics as strlen(), the return value is not number of Unicode codepoints.
*/
template <typename Ch>
@@ -45,6 +45,20 @@ inline SizeType StrLen(const wchar_t* s) {
return SizeType(std::wcslen(s));
}
//! Custom strcmpn() which works on different character types.
/*! \tparam Ch Character type (e.g. char, wchar_t, short)
\param s1 Null-terminated input string.
\param s2 Null-terminated input string.
\return 0 if equal
*/
template<typename Ch>
inline int StrCmp(const Ch* s1, const Ch* s2) {
RAPIDJSON_ASSERT(s1 != 0);
RAPIDJSON_ASSERT(s2 != 0);
while(*s1 && (*s1 == *s2)) { s1++; s2++; }
return static_cast<unsigned>(*s1) < static_cast<unsigned>(*s2) ? -1 : static_cast<unsigned>(*s1) > static_cast<unsigned>(*s2);
}
//! Returns number of code points in a encoded string.
template<typename Encoding>
bool CountStringCodePoint(const typename Encoding::Ch* s, SizeType length, SizeType* outCount) {

View File

@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_STRTOD_
@@ -128,17 +128,18 @@ inline bool StrtodFast(double d, int p, double* result) {
}
// Compute an approximation and see if it is within 1/2 ULP
inline bool StrtodDiyFp(const char* decimals, int dLen, int dExp, double* result) {
template<typename Ch>
inline bool StrtodDiyFp(const Ch* decimals, int dLen, int dExp, double* result) {
uint64_t significand = 0;
int i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999
int i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999
for (; i < dLen; i++) {
if (significand > RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) ||
(significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > '5'))
(significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > Ch('5')))
break;
significand = significand * 10u + static_cast<unsigned>(decimals[i] - '0');
significand = significand * 10u + static_cast<unsigned>(decimals[i] - Ch('0'));
}
if (i < dLen && decimals[i] >= '5') // Rounding
if (i < dLen && decimals[i] >= Ch('5')) // Rounding
significand++;
int remaining = dLen - i;
@@ -184,7 +185,7 @@ inline bool StrtodDiyFp(const char* decimals, int dLen, int dExp, double* result
if (precisionSize + kUlpShift >= 64) {
int scaleExp = (precisionSize + kUlpShift) - 63;
v.f >>= scaleExp;
v.e += scaleExp;
v.e += scaleExp;
error = (error >> scaleExp) + 1 + kUlp;
precisionSize -= scaleExp;
}
@@ -205,7 +206,8 @@ inline bool StrtodDiyFp(const char* decimals, int dLen, int dExp, double* result
return halfWay - static_cast<unsigned>(error) >= precisionBits || precisionBits >= halfWay + static_cast<unsigned>(error);
}
inline double StrtodBigInteger(double approx, const char* decimals, int dLen, int dExp) {
template<typename Ch>
inline double StrtodBigInteger(double approx, const Ch* decimals, int dLen, int dExp) {
RAPIDJSON_ASSERT(dLen >= 0);
const BigInteger dInt(decimals, static_cast<unsigned>(dLen));
Double a(approx);
@@ -223,7 +225,8 @@ inline double StrtodBigInteger(double approx, const char* decimals, int dLen, in
return a.NextPositiveDouble();
}
inline double StrtodFullPrecision(double d, int p, const char* decimals, size_t length, size_t decimalPosition, int exp) {
template<typename Ch>
inline double StrtodFullPrecision(double d, int p, const Ch* decimals, size_t length, size_t decimalPosition, int exp) {
RAPIDJSON_ASSERT(d >= 0.0);
RAPIDJSON_ASSERT(length >= 1);

View File

@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at

View File

@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_ISTREAMWRAPPER_H_
@@ -44,7 +44,7 @@ RAPIDJSON_NAMESPACE_BEGIN
\tparam StreamType Class derived from \c std::basic_istream.
*/
template <typename StreamType>
class BasicIStreamWrapper {
public:
@@ -54,7 +54,7 @@ public:
/*!
\param stream stream opened for read.
*/
BasicIStreamWrapper(StreamType &stream) : stream_(stream), buffer_(peekBuffer_), bufferSize_(4), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) {
BasicIStreamWrapper(StreamType &stream) : stream_(stream), buffer_(peekBuffer_), bufferSize_(4), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) {
Read();
}
@@ -64,7 +64,7 @@ public:
\param buffer user-supplied buffer.
\param bufferSize size of buffer in bytes. Must >=4 bytes.
*/
BasicIStreamWrapper(StreamType &stream, char* buffer, size_t bufferSize) : stream_(stream), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) {
BasicIStreamWrapper(StreamType &stream, char* buffer, size_t bufferSize) : stream_(stream), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) {
RAPIDJSON_ASSERT(bufferSize >= 4);
Read();
}
@@ -75,7 +75,7 @@ public:
// Not implemented
void Put(Ch) { RAPIDJSON_ASSERT(false); }
void Flush() { RAPIDJSON_ASSERT(false); }
void Flush() { RAPIDJSON_ASSERT(false); }
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }

Some files were not shown because too many files have changed in this diff Show More