Skip to content

Commit f102e75

Browse files
Cleanup scope and hardware pool code. (#265)
Signed-off-by: Samuel K. Gutierrez <[email protected]>
1 parent d33610b commit f102e75

8 files changed

+173
-186
lines changed

src/quo-vadis-pthread.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ qv_pthread_scopes_free(
129129
return QV_ERR_INVLD_ARG;
130130
}
131131
try {
132-
qv_scope_s::thdel(&scopes, nscopes);
132+
qv_scope_s::thdestroy(&scopes, nscopes);
133133
return QV_SUCCESS;
134134
}
135135
qvi_catch_and_return();

src/quo-vadis.cc

+3-3
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ qv_scope_free(
8282
return QV_ERR_INVLD_ARG;
8383
}
8484
try {
85-
qv_scope_s::del(&scope);
85+
qv_scope_s::destroy(&scope);
8686
return QV_SUCCESS;
8787
}
8888
qvi_catch_and_return();
@@ -98,7 +98,7 @@ qv_scope_nobjs(
9898
return QV_ERR_INVLD_ARG;
9999
}
100100
try {
101-
*nobjs = scope->nobjects(obj);
101+
*nobjs = scope->hwpool_nobjects(obj);
102102
return QV_SUCCESS;
103103
}
104104
qvi_catch_and_return();
@@ -142,7 +142,7 @@ qv_scope_barrier(
142142
return QV_ERR_INVLD_ARG;
143143
}
144144
try {
145-
return scope->barrier();
145+
return scope->group_barrier();
146146
}
147147
qvi_catch_and_return();
148148
}

src/qvi-hwpool.cc

+12-124
Original file line numberDiff line numberDiff line change
@@ -13,132 +13,26 @@
1313
* Hardware Resource Pool
1414
*/
1515

16-
// TODOs
17-
// * Resource reference counting.
18-
// * Need to deal with resource unavailability.
19-
// * Split and attach devices properly.
20-
// * Have bitmap scratch pad that is initialized once, then destroyed? This
21-
// approach may be an nice allocation optimization, but in heavily threaded
22-
// code may be a bottleneck.
23-
24-
25-
// Notes:
26-
// * Does it make sense attempting resource exclusivity? Why not just let the
27-
// users get what they ask for and hope that the abstractions that we provide do
28-
// a good enough job most of the time. Making the user deal with resource
29-
// exhaustion and retries (which will eventually be the case with
30-
// QV_RES_UNAVAILABLE) is error prone and often frustrating.
31-
//
32-
// * Reference Counting: we should probably still implement a rudimentary
33-
// reference counting system, but perhaps not for enforcing resource
34-
// exclusivity. Rather we could use this information to guide a collection of
35-
// resource allocators that would use resource availability for their pool
36-
// management strategies.
37-
38-
// A Straightforward Reference Counting Approach: Maintain an array of integers
39-
// with length number of cpuset bits. As each resource (bitmap) is obtained,
40-
// increment the internal counter of each corresponding position. When a
41-
// resource is released, decrement in a similar way. If a location in the array
42-
// is zero, then the resource is not in use. For devices, we can take a similar
43-
// approach using the device IDs instead of the bit positions.
44-
4516
#include "qvi-hwpool.h"
4617
#include "qvi-bbuff-rmi.h"
4718
#include "qvi-utils.h"
4819

49-
#if 0
50-
/**
51-
*
52-
*/
53-
static int
54-
cpus_available(
55-
hwloc_const_cpuset_t which,
56-
hwloc_const_cpuset_t from,
57-
bool *avail
58-
) {
59-
// TODO(skg) Cache storage for calculation?
60-
hwloc_cpuset_t tcpus = nullptr;
61-
int rc = qvi_hwloc_bitmap_calloc(&tcpus);
62-
if (rc != QV_SUCCESS) return rc;
63-
64-
int hrc = hwloc_bitmap_and(tcpus, which, from);
65-
if (hrc != 0) {
66-
rc = QV_ERR_HWLOC;
67-
}
68-
if (rc == QV_SUCCESS) {
69-
*avail = cpusets_equal(tcpus, which);
70-
}
71-
else {
72-
*avail = false;
73-
}
74-
qvi_hwloc_bitmap_free(&tcpus);
75-
return rc;
76-
}
77-
#endif
78-
79-
/**
80-
* Example:
81-
* obcpuset 0110 0101
82-
* request 1000 1010
83-
* obcpuset' 1110 1111
84-
*/
85-
#if 0
86-
static int
87-
pool_obtain_cpus_by_cpuset(
88-
qvi_hwpool_s *pool,
89-
hwloc_const_cpuset_t request
90-
) {
91-
#if 0
92-
int hwrc = hwloc_bitmap_or(
93-
pool->obcpuset,
94-
pool->obcpuset,
95-
request
96-
);
97-
return (hwrc == 0 ? QV_SUCCESS : QV_ERR_HWLOC);
98-
#endif
99-
QVI_UNUSED(pool);
100-
QVI_UNUSED(request);
101-
return QV_SUCCESS;
102-
}
103-
#endif
104-
105-
/**
106-
* Example:
107-
* obcpuset 0110 0101
108-
* release 0100 0100
109-
* obcpuset' 0010 0001
110-
*/
111-
#if 0
112-
static int
113-
pool_release_cpus_by_cpuset(
114-
qvi_hwpool_s *pool,
115-
hwloc_const_cpuset_t release
116-
) {
117-
int hwrc = hwloc_bitmap_andnot(
118-
pool->obcpuset,
119-
pool->obcpuset,
120-
release
121-
);
122-
return (hwrc == 0 ? QV_SUCCESS : QV_ERR_HWLOC);
123-
}
124-
#endif
125-
12620
qv_scope_create_hints_t
12721
qvi_hwpool_res_s::hints(void)
12822
{
12923
return m_hints;
13024
}
13125

13226
qvi_hwloc_bitmap_s &
133-
qvi_hwpool_cpu_s::cpuset(void)
27+
qvi_hwpool_res_s::affinity(void)
13428
{
135-
return m_cpuset;
29+
return m_affinity;
13630
}
13731

13832
const qvi_hwloc_bitmap_s &
139-
qvi_hwpool_cpu_s::cpuset(void) const
33+
qvi_hwpool_res_s::affinity(void) const
14034
{
141-
return m_cpuset;
35+
return m_affinity;
14236
}
14337

14438
int
@@ -149,7 +43,7 @@ qvi_hwpool_cpu_s::packinto(
14943
const int rc = qvi_bbuff_rmi_pack_item(buff, m_hints);
15044
if (qvi_unlikely(rc != QV_SUCCESS)) return rc;
15145
// Pack cpuset.
152-
return qvi_bbuff_rmi_pack_item(buff, m_cpuset);
46+
return qvi_bbuff_rmi_pack_item(buff, m_affinity);
15347
}
15448

15549
int
@@ -168,7 +62,7 @@ qvi_hwpool_cpu_s::unpack(
16862
buffpos += bw;
16963
// Unpack bitmap.
17064
rc = qvi_bbuff_rmi_unpack_item(
171-
cpu.m_cpuset, buffpos, &bw
65+
cpu.m_affinity, buffpos, &bw
17266
);
17367
if (qvi_unlikely(rc != QV_SUCCESS)) goto out;
17468
total_bw += bw;
@@ -209,7 +103,7 @@ int
209103
qvi_hwpool_dev_s::id(
210104
qv_device_id_type_t format,
211105
char **result
212-
) {
106+
) const {
213107
int rc = QV_SUCCESS, nw = 0;
214108
switch (format) {
215109
case (QV_DEVICE_ID_UUID):
@@ -233,12 +127,6 @@ qvi_hwpool_dev_s::id(
233127
return rc;
234128
}
235129

236-
const qvi_hwloc_bitmap_s &
237-
qvi_hwpool_dev_s::affinity(void) const
238-
{
239-
return m_affinity;
240-
}
241-
242130
int
243131
qvi_hwpool_dev_s::packinto(
244132
qvi_bbuff_t *buff
@@ -327,7 +215,7 @@ qvi_hwpool_s::add_devices_with_affinity(
327215
for (const auto devt : qvi_hwloc_supported_devices()) {
328216
qvi_hwloc_dev_list_t devs;
329217
rc = qvi_hwloc_get_devices_in_bitmap(
330-
hwloc, devt, m_cpu.cpuset(), devs
218+
hwloc, devt, m_cpu.affinity(), devs
331219
);
332220
if (qvi_unlikely(rc != QV_SUCCESS)) return rc;
333221
for (const auto &dev : devs) {
@@ -339,7 +227,7 @@ qvi_hwpool_s::add_devices_with_affinity(
339227
}
340228

341229
int
342-
qvi_hwpool_s::new_hwpool(
230+
qvi_hwpool_s::create(
343231
qvi_hwloc_t *hwloc,
344232
hwloc_const_cpuset_t cpuset,
345233
qvi_hwpool_s **opool
@@ -362,7 +250,7 @@ qvi_hwpool_s::initialize(
362250
qvi_hwloc_t *hwloc,
363251
hwloc_const_bitmap_t cpuset
364252
) {
365-
const int rc = m_cpu.cpuset().set(cpuset);
253+
const int rc = m_cpu.affinity().set(cpuset);
366254
if (qvi_unlikely(rc != QV_SUCCESS)) return rc;
367255
// Add devices with affinity to the hardware pool.
368256
return add_devices_with_affinity(hwloc);
@@ -371,7 +259,7 @@ qvi_hwpool_s::initialize(
371259
const qvi_hwloc_bitmap_s &
372260
qvi_hwpool_s::cpuset(void) const
373261
{
374-
return m_cpu.cpuset();
262+
return m_cpu.affinity();
375263
}
376264

377265
const qvi_hwpool_devs_t &
@@ -388,7 +276,7 @@ qvi_hwpool_s::nobjects(
388276
) {
389277
if (qvi_hwloc_obj_type_is_host_resource(obj_type)) {
390278
return qvi_hwloc_get_nobjs_in_cpuset(
391-
hwloc, obj_type, m_cpu.cpuset().cdata(), result
279+
hwloc, obj_type, m_cpu.affinity().cdata(), result
392280
);
393281
}
394282
*result = m_devs.count(obj_type);

src/qvi-hwpool.h

+15-25
Original file line numberDiff line numberDiff line change
@@ -26,33 +26,29 @@ struct qvi_hwpool_res_s {
2626
protected:
2727
/** Resource hint flags. */
2828
qv_scope_create_hints_t m_hints = QV_SCOPE_CREATE_HINT_NONE;
29+
/** The resource's affinity encoded as a bitmap. */
30+
qvi_hwloc_bitmap_s m_affinity;
2931
public:
3032
/** Returns the resource's create hints. */
3133
qv_scope_create_hints_t
3234
hints(void);
33-
};
34-
35-
/**
36-
* Defines a hardware pool CPU. A CPU here may have multiple
37-
* processing units (PUs), which are defined in the CPU's cpuset.
38-
*/
39-
struct qvi_hwpool_cpu_s : qvi_hwpool_res_s {
40-
private:
41-
/** The cpuset of the CPU's PUs. */
42-
qvi_hwloc_bitmap_s m_cpuset;
43-
public:
4435
/**
45-
* Returns a reference to the
46-
* CPU's resources encoded by a bitmap.
36+
* Returns a reference to the resource's affinity encoded by a bitmap.
4737
*/
4838
qvi_hwloc_bitmap_s &
49-
cpuset(void);
39+
affinity(void);
5040
/**
51-
* Returns a const reference to the
52-
* CPU's resources encoded by a bitmap.
41+
* Returns a const reference to the resource's affinity encoded by a bitmap.
5342
*/
5443
const qvi_hwloc_bitmap_s &
55-
cpuset(void) const;
44+
affinity(void) const;
45+
};
46+
47+
/**
48+
* Defines a hardware pool CPU. A CPU here may have multiple
49+
* processing units (PUs), which are defined as the CPU's affinity.
50+
*/
51+
struct qvi_hwpool_cpu_s : qvi_hwpool_res_s {
5652
/** Packs the instance into the provided buffer. */
5753
int
5854
packinto(
@@ -109,13 +105,7 @@ struct qvi_hwpool_dev_s : qvi_hwpool_res_s {
109105
id(
110106
qv_device_id_type_t format,
111107
char **result
112-
);
113-
/**
114-
* Returns a const reference to the
115-
* device's affinity encoded by a bitmap.
116-
*/
117-
const qvi_hwloc_bitmap_s &
118-
affinity(void) const;
108+
) const;
119109
/** Packs the instance into the provided buffer. */
120110
int
121111
packinto(
@@ -157,7 +147,7 @@ struct qvi_hwpool_s {
157147
* on the affinity encoded in the provided cpuset.
158148
*/
159149
static int
160-
new_hwpool(
150+
create(
161151
qvi_hwloc_t *hwloc,
162152
hwloc_const_cpuset_t cpuset,
163153
qvi_hwpool_s **opool

0 commit comments

Comments
 (0)