Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-17 09:55:53

0001 /* Copyright (C) 2003,2004 Andi Kleen, SuSE Labs.
0002 
0003    libnuma is free software; you can redistribute it and/or
0004    modify it under the terms of the GNU Lesser General Public
0005    License as published by the Free Software Foundation; version
0006    2.1.
0007 
0008    libnuma is distributed in the hope that it will be useful,
0009    but WITHOUT ANY WARRANTY; without even the implied warranty of
0010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
0011    Lesser General Public License for more details.
0012 
0013    You should find a copy of v2.1 of the GNU Lesser General Public License
0014    somewhere on your Linux system; if not, write to the Free Software
0015    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
0016 
0017 #ifndef _NUMA_H
0018 #define _NUMA_H 1
0019 
0020 /* allow an application to test for the current programming interface: */
0021 #define LIBNUMA_API_VERSION 2
0022 
0023 /* Simple NUMA policy library */
0024 
0025 #include <stddef.h>
0026 #include <string.h>
0027 #include <sys/types.h>
0028 #include <stdlib.h>
0029 
0030 #if defined(__x86_64__) || defined(__i386__)
0031 #define NUMA_NUM_NODES  128
0032 #else
0033 #define NUMA_NUM_NODES  2048
0034 #endif
0035 
0036 #ifdef __cplusplus
0037 extern "C" {
0038 #endif
0039 
0040 typedef struct {
0041         unsigned long n[NUMA_NUM_NODES/(sizeof(unsigned long)*8)];
0042 } nodemask_t;
0043 
0044 struct bitmask {
0045     unsigned long size; /* number of bits in the map */
0046     unsigned long *maskp;
0047 };
0048 
0049 /* operations on struct bitmask */
0050 int numa_bitmask_isbitset(const struct bitmask *, unsigned int);
0051 struct bitmask *numa_bitmask_setall(struct bitmask *);
0052 struct bitmask *numa_bitmask_clearall(struct bitmask *);
0053 struct bitmask *numa_bitmask_setbit(struct bitmask *, unsigned int);
0054 struct bitmask *numa_bitmask_clearbit(struct bitmask *, unsigned int);
0055 unsigned int numa_bitmask_nbytes(struct bitmask *);
0056 unsigned int numa_bitmask_weight(const struct bitmask *);
0057 struct bitmask *numa_bitmask_alloc(unsigned int);
0058 void numa_bitmask_free(struct bitmask *);
0059 int numa_bitmask_equal(const struct bitmask *, const struct bitmask *);
0060 void copy_nodemask_to_bitmask(nodemask_t *, struct bitmask *);
0061 void copy_bitmask_to_nodemask(struct bitmask *, nodemask_t *);
0062 void copy_bitmask_to_bitmask(struct bitmask *, struct bitmask *);
0063 
0064 /* compatibility for codes that used them: */
0065 
0066 static inline void nodemask_zero(nodemask_t *mask)
0067 {
0068     struct bitmask tmp;
0069 
0070     tmp.maskp = (unsigned long *)mask;
0071     tmp.size = sizeof(nodemask_t) * 8;
0072     numa_bitmask_clearall(&tmp);
0073 }
0074 
0075 static inline void nodemask_zero_compat(nodemask_t *mask)
0076 {
0077     struct bitmask tmp;
0078 
0079     tmp.maskp = (unsigned long *)mask;
0080     tmp.size = sizeof(nodemask_t) * 8;
0081     numa_bitmask_clearall(&tmp);
0082 }
0083 
0084 static inline void nodemask_set_compat(nodemask_t *mask, int node)
0085 {
0086     mask->n[node / (8*sizeof(unsigned long))] |=
0087         (1UL<<(node%(8*sizeof(unsigned long))));
0088 }
0089 
0090 static inline void nodemask_clr_compat(nodemask_t *mask, int node)
0091 {
0092     mask->n[node / (8*sizeof(unsigned long))] &=
0093         ~(1UL<<(node%(8*sizeof(unsigned long))));
0094 }
0095 
0096 static inline int nodemask_isset_compat(const nodemask_t *mask, int node)
0097 {
0098     if ((unsigned)node >= NUMA_NUM_NODES)
0099         return 0;
0100     if (mask->n[node / (8*sizeof(unsigned long))] &
0101         (1UL<<(node%(8*sizeof(unsigned long)))))
0102         return 1;
0103     return 0;
0104 }
0105 
0106 static inline int nodemask_equal(const nodemask_t *a, const nodemask_t *b)
0107 {
0108     struct bitmask tmp_a, tmp_b;
0109 
0110     tmp_a.maskp = (unsigned long *)a;
0111     tmp_a.size = sizeof(nodemask_t) * 8;
0112 
0113     tmp_b.maskp = (unsigned long *)b;
0114     tmp_b.size = sizeof(nodemask_t) * 8;
0115 
0116     return numa_bitmask_equal(&tmp_a, &tmp_b);
0117 }
0118 
0119 static inline int nodemask_equal_compat(const nodemask_t *a, const nodemask_t *b)
0120 {
0121     struct bitmask tmp_a, tmp_b;
0122 
0123     tmp_a.maskp = (unsigned long *)a;
0124     tmp_a.size = sizeof(nodemask_t) * 8;
0125 
0126     tmp_b.maskp = (unsigned long *)b;
0127     tmp_b.size = sizeof(nodemask_t) * 8;
0128 
0129     return numa_bitmask_equal(&tmp_a, &tmp_b);
0130 }
0131 
0132 /* NUMA support available. If this returns a negative value all other function
0133    in this library are undefined. */
0134 int numa_available(void);
0135 
0136 /* Basic NUMA state */
0137 
0138 /* Get max available node */
0139 int numa_max_node(void);
0140 int numa_max_possible_node(void);
0141 /* Return preferred node */
0142 int numa_preferred(void);
0143 
0144 /* Return node size and free memory */
0145 long long numa_node_size64(int node, long long *freep);
0146 long numa_node_size(int node, long *freep);
0147 
0148 int numa_pagesize(void);
0149 
0150 /* Set with all nodes from which the calling process may allocate memory.
0151    Only valid after numa_available. */
0152 extern struct bitmask *numa_all_nodes_ptr;
0153 
0154 /* Set with all nodes the kernel has exposed to userspace */
0155 extern struct bitmask *numa_nodes_ptr;
0156 
0157 /* For source compatibility */
0158 extern nodemask_t numa_all_nodes;
0159 
0160 /* Set with all cpus. */
0161 extern struct bitmask *numa_all_cpus_ptr;
0162 
0163 /* Set with no nodes */
0164 extern struct bitmask *numa_no_nodes_ptr;
0165 
0166 /* Source compatibility */
0167 extern nodemask_t numa_no_nodes;
0168 
0169 /* Only run and allocate memory from a specific set of nodes. */
0170 void numa_bind(struct bitmask *nodes);
0171 
0172 /* Set the NUMA node interleaving mask. 0 to turn off interleaving */
0173 void numa_set_interleave_mask(struct bitmask *nodemask);
0174 
0175 /* Return the current interleaving mask */
0176 struct bitmask *numa_get_interleave_mask(void);
0177 
0178 /* allocate a bitmask big enough for all nodes */
0179 struct bitmask *numa_allocate_nodemask(void);
0180 
0181 static inline void numa_free_nodemask(struct bitmask *b)
0182 {
0183     numa_bitmask_free(b);
0184 }
0185 
0186 /* Some node to preferably allocate memory from for task. */
0187 void numa_set_preferred(int node);
0188 
0189 /* Returns whether or not the platform supports MPOL_PREFERRED_MANY */
0190 int numa_has_preferred_many(void);
0191 
0192 /* Set of nodes to preferably allocate memory from for task. */
0193 void numa_set_preferred_many(struct bitmask *bitmask);
0194 
0195 /* Return preferred nodes */
0196 struct bitmask *numa_preferred_many(void);
0197 
0198 /* Set local memory allocation policy for task */
0199 void numa_set_localalloc(void);
0200 
0201 /* Only allocate memory from the nodes set in mask. 0 to turn off */
0202 void numa_set_membind(struct bitmask *nodemask);
0203 
0204 /* Only allocate memory from the nodes set in mask. Optimize page
0205    placement with Linux kernel NUMA balancing if possible. 0 to turn off */
0206 void numa_set_membind_balancing(struct bitmask *bmp);
0207 
0208 /* Return current membind */
0209 struct bitmask *numa_get_membind(void);
0210 
0211 /* Return allowed memories [nodes] */
0212 struct bitmask *numa_get_mems_allowed(void);
0213 
0214 int numa_get_interleave_node(void);
0215 
0216 /* NUMA memory allocation. These functions always round to page size
0217    and are relatively slow. */
0218 
0219 /* Alloc memory page interleaved on nodes in mask */
0220 void *numa_alloc_interleaved_subset(size_t size, struct bitmask *nodemask);
0221 /* Alloc memory page interleaved on all nodes. */
0222 void *numa_alloc_interleaved(size_t size);
0223 /* Alloc memory located on node */
0224 void *numa_alloc_onnode(size_t size, int node);
0225 /* Alloc memory on local node */
0226 void *numa_alloc_local(size_t size);
0227 /* Allocation with current policy */
0228 void *numa_alloc(size_t size);
0229 /* Change the size of a memory area preserving the memory policy */
0230 void *numa_realloc(void *old_addr, size_t old_size, size_t new_size);
0231 /* Free memory allocated by the functions above */
0232 void numa_free(void *mem, size_t size);
0233 
0234 /* Low level functions, primarily for shared memory. All memory
0235    processed by these must not be touched yet */
0236 
0237 /* Interleave a memory area. */
0238 void numa_interleave_memory(void *mem, size_t size, struct bitmask *mask);
0239 
0240 /* Allocate a memory area on a specific node. */
0241 void numa_tonode_memory(void *start, size_t size, int node);
0242 
0243 /* Allocate memory on a mask of nodes. */
0244 void numa_tonodemask_memory(void *mem, size_t size, struct bitmask *mask);
0245 
0246 /* Allocate a memory area on the current node. */
0247 void numa_setlocal_memory(void *start, size_t size);
0248 
0249 /* Allocate memory area with current memory policy */
0250 void numa_police_memory(void *start, size_t size);
0251 
0252 /* Run current task only on nodes in mask */
0253 int numa_run_on_node_mask(struct bitmask *mask);
0254 /* Run current task on nodes in mask without any cpuset awareness */
0255 int numa_run_on_node_mask_all(struct bitmask *mask);
0256 /* Run current task only on node */
0257 int numa_run_on_node(int node);
0258 /* Return current mask of nodes the task can run on */
0259 struct bitmask * numa_get_run_node_mask(void);
0260 
0261 /* When strict fail allocation when memory cannot be allocated in target node(s). */
0262 void numa_set_bind_policy(int strict);
0263 
0264 /* Fail when existing memory has incompatible policy */
0265 void numa_set_strict(int flag);
0266 
0267 /* maximum nodes (size of kernel nodemask_t) */
0268 int numa_num_possible_nodes(void);
0269 
0270 /* maximum cpus (size of kernel cpumask_t) */
0271 int numa_num_possible_cpus(void);
0272 
0273 /* nodes in the system */
0274 int numa_num_configured_nodes(void);
0275 
0276 /* maximum cpus */
0277 int numa_num_configured_cpus(void);
0278 
0279 /* maximum cpus allowed to current task */
0280 int numa_num_task_cpus(void);
0281 int numa_num_thread_cpus(void); /* backward compatibility */
0282 
0283 /* maximum nodes allowed to current task */
0284 int numa_num_task_nodes(void);
0285 int numa_num_thread_nodes(void); /* backward compatibility */
0286 
0287 /* allocate a bitmask the size of the kernel cpumask_t */
0288 struct bitmask *numa_allocate_cpumask(void);
0289 
0290 static inline void numa_free_cpumask(struct bitmask *b)
0291 {
0292     numa_bitmask_free(b);
0293 }
0294 
0295 /* Convert node to CPU mask. -1/errno on failure, otherwise 0. */
0296 int numa_node_to_cpus(int, struct bitmask *);
0297 
0298 void numa_node_to_cpu_update(void);
0299 
0300 /* report the node of the specified cpu. -1/errno on invalid cpu. */
0301 int numa_node_of_cpu(int cpu);
0302 
0303 /* Report distance of node1 from node2. 0 on error.*/
0304 int numa_distance(int node1, int node2);
0305 
0306 /* Error handling. */
0307 /* This is an internal function in libnuma that can be overwritten by an user
0308    program. Default is to print an error to stderr and exit if numa_exit_on_error
0309    is true. */
0310 void numa_error(char *where);
0311 
0312 /* When true exit the program when a NUMA system call (except numa_available)
0313    fails */
0314 extern int numa_exit_on_error;
0315 /* Warning function. Can also be overwritten. Default is to print on stderr
0316    once. */
0317 void numa_warn(int num, char *fmt, ...);
0318 
0319 /* When true exit the program on a numa_warn() call */
0320 extern int numa_exit_on_warn;
0321 
0322 int numa_migrate_pages(int pid, struct bitmask *from, struct bitmask *to);
0323 
0324 int numa_move_pages(int pid, unsigned long count, void **pages,
0325         const int *nodes, int *status, int flags);
0326 
0327 int numa_sched_getaffinity(pid_t, struct bitmask *);
0328 int numa_sched_setaffinity(pid_t, struct bitmask *);
0329 
0330 /* Convert an ascii list of nodes to a bitmask */
0331 struct bitmask *numa_parse_nodestring(const char *);
0332 
0333 /* Convert an ascii list of nodes to a bitmask without current nodeset
0334  * dependency */
0335 struct bitmask *numa_parse_nodestring_all(const char *);
0336 
0337 /* Convert an ascii list of cpu to a bitmask */
0338 struct bitmask *numa_parse_cpustring(const char *);
0339 
0340 /* Convert an ascii list of cpu to a bitmask without current taskset
0341  * dependency */
0342 struct bitmask *numa_parse_cpustring_all(const char *);
0343 
0344 /* Returns whether or not the system supports setting home_node for mbind
0345  * and preferred_many.
0346  */
0347 int numa_has_home_node(void);
0348 
0349 /* set the home node for a VMA policy present in the task's address range */
0350 int numa_set_mempolicy_home_node(void *start, unsigned long len,
0351         int home_node, int flags);
0352 
0353 /*
0354  * The following functions are for source code compatibility
0355  * with releases prior to version 2.
0356  * Such codes should be compiled with NUMA_VERSION1_COMPATIBILITY defined.
0357  */
0358 
0359 static inline void numa_set_interleave_mask_compat(nodemask_t *nodemask)
0360 {
0361     struct bitmask tmp;
0362 
0363     tmp.maskp = (unsigned long *)nodemask;
0364     tmp.size = sizeof(nodemask_t) * 8;
0365     numa_set_interleave_mask(&tmp);
0366 }
0367 
0368 static inline nodemask_t numa_get_interleave_mask_compat(void)
0369 {
0370     struct bitmask *tp;
0371     nodemask_t mask;
0372 
0373     tp = numa_get_interleave_mask();
0374     copy_bitmask_to_nodemask(tp, &mask);
0375     numa_bitmask_free(tp);
0376     return mask;
0377 }
0378 
0379 static inline void numa_bind_compat(nodemask_t *mask)
0380 {
0381     struct bitmask *tp;
0382 
0383     tp = numa_allocate_nodemask();
0384     copy_nodemask_to_bitmask(mask, tp);
0385     numa_bind(tp);
0386     numa_bitmask_free(tp);
0387 }
0388 
0389 static inline void numa_set_membind_compat(nodemask_t *mask)
0390 {
0391     struct bitmask tmp;
0392 
0393     tmp.maskp = (unsigned long *)mask;
0394     tmp.size = sizeof(nodemask_t) * 8;
0395     numa_set_membind(&tmp);
0396 }
0397 
0398 static inline nodemask_t numa_get_membind_compat(void)
0399 {
0400     struct bitmask *tp;
0401     nodemask_t mask;
0402 
0403     tp = numa_get_membind();
0404     copy_bitmask_to_nodemask(tp, &mask);
0405     numa_bitmask_free(tp);
0406     return mask;
0407 }
0408 
0409 static inline void *numa_alloc_interleaved_subset_compat(size_t size,
0410                     const nodemask_t *mask)
0411 {
0412     struct bitmask tmp;
0413 
0414     tmp.maskp = (unsigned long *)mask;
0415     tmp.size = sizeof(nodemask_t) * 8;
0416     return numa_alloc_interleaved_subset(size, &tmp);
0417 }
0418 
0419 static inline int numa_run_on_node_mask_compat(const nodemask_t *mask)
0420 {
0421     struct bitmask tmp;
0422 
0423     tmp.maskp = (unsigned long *)mask;
0424     tmp.size = sizeof(nodemask_t) * 8;
0425     return numa_run_on_node_mask(&tmp);
0426 }
0427 
0428 static inline nodemask_t numa_get_run_node_mask_compat(void)
0429 {
0430     struct bitmask *tp;
0431     nodemask_t mask;
0432 
0433     tp = numa_get_run_node_mask();
0434     copy_bitmask_to_nodemask(tp, &mask);
0435     numa_bitmask_free(tp);
0436     return mask;
0437 }
0438 
0439 static inline void numa_interleave_memory_compat(void *mem, size_t size,
0440                         const nodemask_t *mask)
0441 {
0442     struct bitmask tmp;
0443 
0444     tmp.maskp = (unsigned long *)mask;
0445     tmp.size = sizeof(nodemask_t) * 8;
0446     numa_interleave_memory(mem, size, &tmp);
0447 }
0448 
0449 static inline void numa_tonodemask_memory_compat(void *mem, size_t size,
0450                         const nodemask_t *mask)
0451 {
0452     struct bitmask tmp;
0453 
0454     tmp.maskp = (unsigned long *)mask;
0455     tmp.size = sizeof(nodemask_t) * 8;
0456     numa_tonodemask_memory(mem, size, &tmp);
0457 }
0458 
0459 static inline int numa_sched_getaffinity_compat(pid_t pid, unsigned len,
0460                         unsigned long *mask)
0461 {
0462     struct bitmask tmp;
0463 
0464     tmp.maskp = (unsigned long *)mask;
0465     tmp.size = len * 8;
0466     return numa_sched_getaffinity(pid, &tmp);
0467 }
0468 
0469 static inline int numa_sched_setaffinity_compat(pid_t pid, unsigned len,
0470                         unsigned long *mask)
0471 {
0472     struct bitmask tmp;
0473 
0474     tmp.maskp = (unsigned long *)mask;
0475     tmp.size = len * 8;
0476     return numa_sched_setaffinity(pid, &tmp);
0477 }
0478 
0479 static inline int numa_node_to_cpus_compat(int node, unsigned long *buffer,
0480                             int buffer_len)
0481 {
0482     struct bitmask tmp;
0483 
0484     tmp.maskp = (unsigned long *)buffer;
0485     tmp.size = buffer_len * 8;
0486     return numa_node_to_cpus(node, &tmp);
0487 }
0488 
0489 /* end of version 1 compatibility functions */
0490 
0491 /*
0492  * To compile an application that uses libnuma version 1:
0493  *   add -DNUMA_VERSION1_COMPATIBILITY to your Makefile's CFLAGS
0494  */
0495 #ifdef NUMA_VERSION1_COMPATIBILITY
0496 #include <numacompat1.h>
0497 #endif
0498 
0499 #ifdef __cplusplus
0500 }
0501 #endif
0502 
0503 #endif