@@ -44,6 +44,7 @@ static umf_result_t CTL_READ_HANDLER(name)(void *ctx,
44
44
disjoint_pool_t * pool = (disjoint_pool_t * )ctx ;
45
45
46
46
if (arg == NULL ) {
47
+ LOG_ERR ("arg is NULL" );
47
48
return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
48
49
}
49
50
@@ -64,6 +65,7 @@ static umf_result_t CTL_WRITE_HANDLER(name)(void *ctx,
64
65
(void )source , (void )indexes , (void )size ;
65
66
disjoint_pool_t * pool = (disjoint_pool_t * )ctx ;
66
67
if (arg == NULL ) {
68
+ LOG_ERR ("arg is NULL" );
67
69
return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
68
70
}
69
71
@@ -81,6 +83,7 @@ CTL_READ_HANDLER(used_memory)(void *ctx, umf_ctl_query_source_t source,
81
83
disjoint_pool_t * pool = (disjoint_pool_t * )ctx ;
82
84
83
85
if (arg == NULL || size != sizeof (size_t )) {
86
+ LOG_ERR ("arg is NULL or size is not sizeof(size_t)" );
84
87
return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
85
88
}
86
89
@@ -119,6 +122,7 @@ CTL_READ_HANDLER(reserved_memory)(void *ctx, umf_ctl_query_source_t source,
119
122
disjoint_pool_t * pool = (disjoint_pool_t * )ctx ;
120
123
121
124
if (arg == NULL || size != sizeof (size_t )) {
125
+ LOG_ERR ("arg is NULL or size is not sizeof(size_t)" );
122
126
return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
123
127
}
124
128
@@ -148,12 +152,178 @@ CTL_READ_HANDLER(reserved_memory)(void *ctx, umf_ctl_query_source_t source,
148
152
return UMF_RESULT_SUCCESS ;
149
153
}
150
154
151
- static const umf_ctl_node_t CTL_NODE (stats )[] = {CTL_LEAF_RO (used_memory ),
152
- CTL_LEAF_RO (reserved_memory )};
155
+ static umf_result_t CTL_READ_HANDLER (count )(void * ctx ,
156
+ umf_ctl_query_source_t source ,
157
+ void * arg , size_t size ,
158
+ umf_ctl_index_utlist_t * indexes ) {
159
+ (void )source ;
160
+
161
+ disjoint_pool_t * pool = (disjoint_pool_t * )ctx ;
162
+ if (arg == NULL || size != sizeof (size_t )) {
163
+ LOG_ERR ("arg is NULL or size is not sizeof(size_t)" );
164
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
165
+ }
166
+
167
+ if (* (size_t * )indexes -> arg != SIZE_MAX ) {
168
+ LOG_ERR ("to read buckets' count, you must call it without bucket id" );
169
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
170
+ }
171
+
172
+ assert (pool );
173
+ * (size_t * )arg = pool -> buckets_num ;
174
+
175
+ return UMF_RESULT_SUCCESS ;
176
+ }
177
+
178
+ #define DEFINE_STATS_HANDLER (NAME , MEMBER ) \
179
+ static umf_result_t CTL_READ_HANDLER(NAME)( \
180
+ void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, \
181
+ umf_ctl_index_utlist_t *indexes) { \
182
+ (void)source; \
183
+ (void)indexes; \
184
+ disjoint_pool_t *pool = (disjoint_pool_t *)ctx; \
185
+ \
186
+ if (arg == NULL || size != sizeof(size_t)) { \
187
+ LOG_ERR("arg is NULL or size is not sizeof(size_t)"); \
188
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT; \
189
+ } \
190
+ \
191
+ if (!pool->params.pool_trace) { \
192
+ LOG_ERR("pool trace is disabled, cannot read " #NAME); \
193
+ return UMF_RESULT_ERROR_NOT_SUPPORTED; \
194
+ } \
195
+ \
196
+ size_t total = 0; \
197
+ for (size_t i = 0; i < pool->buckets_num; ++i) { \
198
+ bucket_t *bucket = pool->buckets[i]; \
199
+ utils_mutex_lock(&bucket->bucket_lock); \
200
+ total += bucket->MEMBER; \
201
+ utils_mutex_unlock(&bucket->bucket_lock); \
202
+ } \
203
+ \
204
+ *(size_t *)arg = total; \
205
+ return UMF_RESULT_SUCCESS; \
206
+ }
207
+
208
+ DEFINE_STATS_HANDLER (alloc_num , alloc_count )
209
+ DEFINE_STATS_HANDLER (alloc_pool_num , alloc_pool_count )
210
+ DEFINE_STATS_HANDLER (free_num , free_count )
211
+ DEFINE_STATS_HANDLER (curr_slabs_in_use , curr_slabs_in_use )
212
+ DEFINE_STATS_HANDLER (curr_slabs_in_pool , curr_slabs_in_pool )
213
+ DEFINE_STATS_HANDLER (max_slabs_in_use , max_slabs_in_use )
214
+ DEFINE_STATS_HANDLER (max_slabs_in_pool , max_slabs_in_pool )
215
+
216
+ static const umf_ctl_node_t CTL_NODE (stats )[] = {
217
+ CTL_LEAF_RO (used_memory ), CTL_LEAF_RO (reserved_memory ),
218
+ CTL_LEAF_RO (alloc_num ), CTL_LEAF_RO (alloc_pool_num ),
219
+ CTL_LEAF_RO (free_num ), CTL_LEAF_RO (curr_slabs_in_use ),
220
+ CTL_LEAF_RO (curr_slabs_in_pool ), CTL_LEAF_RO (max_slabs_in_use ),
221
+ CTL_LEAF_RO (max_slabs_in_pool ), CTL_NODE_END ,
222
+ };
223
+
224
+ #undef DEFINE_STATS_HANDLER
225
+
226
+ #ifdef UMF_DEVELOPER_MODE
227
+ #define VALIDATE_BUCKETS_NAME (indexes ) \
228
+ if (strcmp("buckets", indexes->name) != 0) { \
229
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT; \
230
+ }
231
+ #else
232
+ #define VALIDATE_BUCKETS_NAME (indexes ) \
233
+ do { \
234
+ } while (0);
235
+ #endif
236
+
237
+ #define DEFINE_BUCKET_STATS_HANDLER (NAME , MEMBER ) \
238
+ static umf_result_t CTL_READ_HANDLER(NAME, perBucket)( \
239
+ void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, \
240
+ umf_ctl_index_utlist_t *indexes) { \
241
+ (void)source; \
242
+ \
243
+ disjoint_pool_t *pool = (disjoint_pool_t *)ctx; \
244
+ if (arg == NULL || size != sizeof(size_t)) { \
245
+ LOG_ERR("arg is NULL or size is not sizeof(size_t)"); \
246
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT; \
247
+ } \
248
+ \
249
+ VALIDATE_BUCKETS_NAME(indexes); \
250
+ if (strcmp(#MEMBER, "size") != 0 && !pool->params.pool_trace) { \
251
+ LOG_ERR("pool trace is disabled, cannot read " #NAME); \
252
+ return UMF_RESULT_ERROR_NOT_SUPPORTED; \
253
+ } \
254
+ \
255
+ size_t idx; \
256
+ idx = *(size_t *)indexes->arg; \
257
+ \
258
+ if (idx >= pool->buckets_num) { \
259
+ LOG_ERR("bucket id %zu is out of range [0, %zu)", idx, \
260
+ pool->buckets_num); \
261
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT; \
262
+ } \
263
+ \
264
+ bucket_t *bucket = pool->buckets[idx]; \
265
+ *(size_t *)arg = bucket->MEMBER; \
266
+ \
267
+ return UMF_RESULT_SUCCESS; \
268
+ }
269
+
270
+ DEFINE_BUCKET_STATS_HANDLER (alloc_num , alloc_count )
271
+ DEFINE_BUCKET_STATS_HANDLER (alloc_pool_num , alloc_pool_count )
272
+ DEFINE_BUCKET_STATS_HANDLER (free_num , free_count )
273
+ DEFINE_BUCKET_STATS_HANDLER (curr_slabs_in_use , curr_slabs_in_use )
274
+ DEFINE_BUCKET_STATS_HANDLER (curr_slabs_in_pool , curr_slabs_in_pool )
275
+ DEFINE_BUCKET_STATS_HANDLER (max_slabs_in_use , max_slabs_in_use )
276
+ DEFINE_BUCKET_STATS_HANDLER (max_slabs_in_pool , max_slabs_in_pool )
277
+
278
+ static const umf_ctl_node_t CTL_NODE (stats , perBucket )[] = {
279
+ CTL_LEAF_RO (alloc_num , perBucket ),
280
+ CTL_LEAF_RO (alloc_pool_num , perBucket ),
281
+ CTL_LEAF_RO (free_num , perBucket ),
282
+ CTL_LEAF_RO (curr_slabs_in_use , perBucket ),
283
+ CTL_LEAF_RO (curr_slabs_in_pool , perBucket ),
284
+ CTL_LEAF_RO (max_slabs_in_use , perBucket ),
285
+ CTL_LEAF_RO (max_slabs_in_pool , perBucket ),
286
+ CTL_NODE_END ,
287
+ };
288
+
289
+ // Not a counter; but it is read exactly like other per-bucket stats, so we can use macro.
290
+ DEFINE_BUCKET_STATS_HANDLER (size , size )
291
+
292
+ #undef DEFINE_BUCKET_STATS_HANDLER
293
+
294
+ static const umf_ctl_node_t CTL_NODE (buckets )[] = {
295
+ CTL_LEAF_RO (count ), CTL_LEAF_RO (size , perBucket ),
296
+ CTL_CHILD (stats , perBucket ), CTL_NODE_END };
297
+
298
+ static int bucket_id_parser (const void * arg , void * dest , size_t dest_size ) {
299
+ size_t * out = (size_t * )dest ;
300
+ assert (out );
301
+
302
+ if (arg == NULL ) {
303
+ * out = SIZE_MAX ;
304
+ return 1 ; // node n
305
+ }
306
+
307
+ int ret = ctl_arg_unsigned (arg , dest , dest_size );
308
+ if (ret ) {
309
+ * out = SIZE_MAX ;
310
+ return 1 ;
311
+ }
312
+
313
+ return 0 ;
314
+ }
315
+
316
+ static const struct ctl_argument CTL_ARG (buckets ) = {
317
+ sizeof (size_t ),
318
+ {{0 , sizeof (size_t ), CTL_ARG_TYPE_UNSIGNED_LONG_LONG , bucket_id_parser },
319
+ CTL_ARG_PARSER_END }};
153
320
154
321
static void initialize_disjoint_ctl (void ) {
155
322
CTL_REGISTER_MODULE (& disjoint_ctl_root , stats );
156
- // CTL_REGISTER_MODULE(&disjoint_ctl_root, name);
323
+ CTL_REGISTER_MODULE (& disjoint_ctl_root , buckets );
324
+ // TODO: this is hack. Need some way to register module as node with argument
325
+ disjoint_ctl_root .root [disjoint_ctl_root .first_free - 1 ].arg =
326
+ & CTL_ARG (buckets );
157
327
}
158
328
159
329
umf_result_t disjoint_pool_ctl (void * hPool ,
0 commit comments