13
13
* Hardware Resource Pool
14
14
*/
15
15
16
- // TODOs
17
- // * Resource reference counting.
18
- // * Need to deal with resource unavailability.
19
- // * Split and attach devices properly.
20
- // * Have bitmap scratch pad that is initialized once, then destroyed? This
21
- // approach may be an nice allocation optimization, but in heavily threaded
22
- // code may be a bottleneck.
23
-
24
-
25
- // Notes:
26
- // * Does it make sense attempting resource exclusivity? Why not just let the
27
- // users get what they ask for and hope that the abstractions that we provide do
28
- // a good enough job most of the time. Making the user deal with resource
29
- // exhaustion and retries (which will eventually be the case with
30
- // QV_RES_UNAVAILABLE) is error prone and often frustrating.
31
- //
32
- // * Reference Counting: we should probably still implement a rudimentary
33
- // reference counting system, but perhaps not for enforcing resource
34
- // exclusivity. Rather we could use this information to guide a collection of
35
- // resource allocators that would use resource availability for their pool
36
- // management strategies.
37
-
38
- // A Straightforward Reference Counting Approach: Maintain an array of integers
39
- // with length number of cpuset bits. As each resource (bitmap) is obtained,
40
- // increment the internal counter of each corresponding position. When a
41
- // resource is released, decrement in a similar way. If a location in the array
42
- // is zero, then the resource is not in use. For devices, we can take a similar
43
- // approach using the device IDs instead of the bit positions.
44
-
45
16
#include " qvi-hwpool.h"
46
17
#include " qvi-bbuff-rmi.h"
47
18
#include " qvi-utils.h"
48
19
49
- #if 0
50
- /**
51
- *
52
- */
53
- static int
54
- cpus_available(
55
- hwloc_const_cpuset_t which,
56
- hwloc_const_cpuset_t from,
57
- bool *avail
58
- ) {
59
- // TODO(skg) Cache storage for calculation?
60
- hwloc_cpuset_t tcpus = nullptr;
61
- int rc = qvi_hwloc_bitmap_calloc(&tcpus);
62
- if (rc != QV_SUCCESS) return rc;
63
-
64
- int hrc = hwloc_bitmap_and(tcpus, which, from);
65
- if (hrc != 0) {
66
- rc = QV_ERR_HWLOC;
67
- }
68
- if (rc == QV_SUCCESS) {
69
- *avail = cpusets_equal(tcpus, which);
70
- }
71
- else {
72
- *avail = false;
73
- }
74
- qvi_hwloc_bitmap_free(&tcpus);
75
- return rc;
76
- }
77
- #endif
78
-
79
- /* *
80
- * Example:
81
- * obcpuset 0110 0101
82
- * request 1000 1010
83
- * obcpuset' 1110 1111
84
- */
85
- #if 0
86
- static int
87
- pool_obtain_cpus_by_cpuset(
88
- qvi_hwpool_s *pool,
89
- hwloc_const_cpuset_t request
90
- ) {
91
- #if 0
92
- int hwrc = hwloc_bitmap_or(
93
- pool->obcpuset,
94
- pool->obcpuset,
95
- request
96
- );
97
- return (hwrc == 0 ? QV_SUCCESS : QV_ERR_HWLOC);
98
- #endif
99
- QVI_UNUSED(pool);
100
- QVI_UNUSED(request);
101
- return QV_SUCCESS;
102
- }
103
- #endif
104
-
105
- /* *
106
- * Example:
107
- * obcpuset 0110 0101
108
- * release 0100 0100
109
- * obcpuset' 0010 0001
110
- */
111
- #if 0
112
- static int
113
- pool_release_cpus_by_cpuset(
114
- qvi_hwpool_s *pool,
115
- hwloc_const_cpuset_t release
116
- ) {
117
- int hwrc = hwloc_bitmap_andnot(
118
- pool->obcpuset,
119
- pool->obcpuset,
120
- release
121
- );
122
- return (hwrc == 0 ? QV_SUCCESS : QV_ERR_HWLOC);
123
- }
124
- #endif
125
-
126
20
qv_scope_create_hints_t
127
21
qvi_hwpool_res_s::hints (void )
128
22
{
129
23
return m_hints;
130
24
}
131
25
132
26
qvi_hwloc_bitmap_s &
133
- qvi_hwpool_cpu_s::cpuset (void )
27
+ qvi_hwpool_res_s::affinity (void )
134
28
{
135
- return m_cpuset ;
29
+ return m_affinity ;
136
30
}
137
31
138
32
const qvi_hwloc_bitmap_s &
139
- qvi_hwpool_cpu_s::cpuset (void ) const
33
+ qvi_hwpool_res_s::affinity (void ) const
140
34
{
141
- return m_cpuset ;
35
+ return m_affinity ;
142
36
}
143
37
144
38
int
@@ -149,7 +43,7 @@ qvi_hwpool_cpu_s::packinto(
149
43
const int rc = qvi_bbuff_rmi_pack_item (buff, m_hints);
150
44
if (qvi_unlikely (rc != QV_SUCCESS)) return rc;
151
45
// Pack cpuset.
152
- return qvi_bbuff_rmi_pack_item (buff, m_cpuset );
46
+ return qvi_bbuff_rmi_pack_item (buff, m_affinity );
153
47
}
154
48
155
49
int
@@ -168,7 +62,7 @@ qvi_hwpool_cpu_s::unpack(
168
62
buffpos += bw;
169
63
// Unpack bitmap.
170
64
rc = qvi_bbuff_rmi_unpack_item (
171
- cpu.m_cpuset , buffpos, &bw
65
+ cpu.m_affinity , buffpos, &bw
172
66
);
173
67
if (qvi_unlikely (rc != QV_SUCCESS)) goto out;
174
68
total_bw += bw;
209
103
qvi_hwpool_dev_s::id (
210
104
qv_device_id_type_t format,
211
105
char **result
212
- ) {
106
+ ) const {
213
107
int rc = QV_SUCCESS, nw = 0 ;
214
108
switch (format) {
215
109
case (QV_DEVICE_ID_UUID):
@@ -233,12 +127,6 @@ qvi_hwpool_dev_s::id(
233
127
return rc;
234
128
}
235
129
236
- const qvi_hwloc_bitmap_s &
237
- qvi_hwpool_dev_s::affinity (void ) const
238
- {
239
- return m_affinity;
240
- }
241
-
242
130
int
243
131
qvi_hwpool_dev_s::packinto (
244
132
qvi_bbuff_t *buff
@@ -327,7 +215,7 @@ qvi_hwpool_s::add_devices_with_affinity(
327
215
for (const auto devt : qvi_hwloc_supported_devices ()) {
328
216
qvi_hwloc_dev_list_t devs;
329
217
rc = qvi_hwloc_get_devices_in_bitmap (
330
- hwloc, devt, m_cpu.cpuset (), devs
218
+ hwloc, devt, m_cpu.affinity (), devs
331
219
);
332
220
if (qvi_unlikely (rc != QV_SUCCESS)) return rc;
333
221
for (const auto &dev : devs) {
@@ -339,7 +227,7 @@ qvi_hwpool_s::add_devices_with_affinity(
339
227
}
340
228
341
229
int
342
- qvi_hwpool_s::new_hwpool (
230
+ qvi_hwpool_s::create (
343
231
qvi_hwloc_t *hwloc,
344
232
hwloc_const_cpuset_t cpuset,
345
233
qvi_hwpool_s **opool
@@ -362,7 +250,7 @@ qvi_hwpool_s::initialize(
362
250
qvi_hwloc_t *hwloc,
363
251
hwloc_const_bitmap_t cpuset
364
252
) {
365
- const int rc = m_cpu.cpuset ().set (cpuset);
253
+ const int rc = m_cpu.affinity ().set (cpuset);
366
254
if (qvi_unlikely (rc != QV_SUCCESS)) return rc;
367
255
// Add devices with affinity to the hardware pool.
368
256
return add_devices_with_affinity (hwloc);
@@ -371,7 +259,7 @@ qvi_hwpool_s::initialize(
371
259
const qvi_hwloc_bitmap_s &
372
260
qvi_hwpool_s::cpuset (void ) const
373
261
{
374
- return m_cpu.cpuset ();
262
+ return m_cpu.affinity ();
375
263
}
376
264
377
265
const qvi_hwpool_devs_t &
@@ -388,7 +276,7 @@ qvi_hwpool_s::nobjects(
388
276
) {
389
277
if (qvi_hwloc_obj_type_is_host_resource (obj_type)) {
390
278
return qvi_hwloc_get_nobjs_in_cpuset (
391
- hwloc, obj_type, m_cpu.cpuset ().cdata (), result
279
+ hwloc, obj_type, m_cpu.affinity ().cdata (), result
392
280
);
393
281
}
394
282
*result = m_devs.count (obj_type);
0 commit comments