* . NUMA allocators, CPU affinity threads are built over CPU partitions,
* instead of HW CPUs or HW nodes.
*
- * . By default, Lustre modules should refer to the global cfs_cpt_table,
+ * . By default, Lustre modules should refer to the global cfs_cpt_tab,
* instead of accessing HW CPUs directly, so concurrency of Lustre can be
- * configured by cpu_npartitions of the global cfs_cpt_table
+ * configured by cpu_npartitions of the global cfs_cpt_tab
*
* . If cpu_npartitions=1(all CPUs in one pool), lustre should work the
* same way as 2.2 or earlier versions
}
#endif /* CONFIG_SMP */
-extern struct cfs_cpt_table *cfs_cpt_table;
+extern struct cfs_cpt_table *cfs_cpt_tab;
/**
* destroy a CPU partition table
#include <linux/libcfs/libcfs.h>
/** Global CPU partition table */
-struct cfs_cpt_table *cfs_cpt_table __read_mostly;
-EXPORT_SYMBOL(cfs_cpt_table);
+struct cfs_cpt_table *cfs_cpt_tab __read_mostly;
+EXPORT_SYMBOL(cfs_cpt_tab);
#define DEBUG_SUBSYSTEM S_LNET
#include <linux/cpu.h>
void
cfs_cpu_fini(void)
{
- if (cfs_cpt_table)
- cfs_cpt_table_free(cfs_cpt_table);
+ if (cfs_cpt_tab)
+ cfs_cpt_table_free(cfs_cpt_tab);
#ifdef CONFIG_HOTPLUG_CPU
if (lustre_cpu_online > 0)
{
int ret = 0;
- LASSERT(!cfs_cpt_table);
+ LASSERT(!cfs_cpt_tab);
memset(&cpt_data, 0, sizeof(cpt_data));
goto failed;
}
- cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern_dup);
+ cfs_cpt_tab = cfs_cpt_table_create_pattern(cpu_pattern_dup);
kfree(cpu_pattern_dup);
- if (!cfs_cpt_table) {
+ if (!cfs_cpt_tab) {
CERROR("Failed to create cptab from pattern %s\n",
cpu_pattern);
goto failed;
}
} else {
- cfs_cpt_table = cfs_cpt_table_create(cpu_npartitions);
- if (!cfs_cpt_table) {
+ cfs_cpt_tab = cfs_cpt_table_create(cpu_npartitions);
+ if (!cfs_cpt_tab) {
CERROR("Failed to create ptable with npartitions %d\n",
cpu_npartitions);
goto failed;
}
spin_lock(&cpt_data.cpt_lock);
- if (cfs_cpt_table->ctb_version != cpt_data.cpt_version) {
+ if (cfs_cpt_tab->ctb_version != cpt_data.cpt_version) {
spin_unlock(&cpt_data.cpt_lock);
CERROR("CPU hotplug/unplug during setup\n");
goto failed;
LCONSOLE(0, "HW nodes: %d, HW CPU cores: %d, npartitions: %d\n",
num_online_nodes(), num_online_cpus(),
- cfs_cpt_number(cfs_cpt_table));
+ cfs_cpt_number(cfs_cpt_tab));
return 0;
failed:
void
cfs_cpu_fini(void)
{
- if (cfs_cpt_table) {
- cfs_cpt_table_free(cfs_cpt_table);
- cfs_cpt_table = NULL;
+ if (cfs_cpt_tab) {
+ cfs_cpt_table_free(cfs_cpt_tab);
+ cfs_cpt_tab = NULL;
}
}
int
cfs_cpu_init(void)
{
- cfs_cpt_table = cfs_cpt_table_alloc(1);
+ cfs_cpt_tab = cfs_cpt_table_alloc(1);
- return cfs_cpt_table ? 0 : -1;
+ return cfs_cpt_tab ? 0 : -1;
}
#endif /* CONFIG_SMP */
if (write)
return -EPERM;
- LASSERT(cfs_cpt_table);
+ LASSERT(cfs_cpt_tab);
while (1) {
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- rc = cfs_cpt_table_print(cfs_cpt_table, buf, len);
+ rc = cfs_cpt_table_print(cfs_cpt_tab, buf, len);
if (rc >= 0)
break;
memset(&the_lnet, 0, sizeof(the_lnet));
- /* refer to global cfs_cpt_table for now */
- the_lnet.ln_cpt_table = cfs_cpt_table;
- the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
+ /* refer to global cfs_cpt_tab for now */
+ the_lnet.ln_cpt_table = cfs_cpt_tab;
+ the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_tab);
LASSERT(the_lnet.ln_cpt_number > 0);
if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
CDEBUG(D_NET, "Reserved %d buffers for test %s\n",
nbuf * (srpc_serv_is_framework(svc) ?
- 2 : cfs_cpt_number(cfs_cpt_table)), svc->sv_name);
+ 2 : cfs_cpt_number(cfs_cpt_tab)), svc->sv_name);
return 0;
}
struct ptlrpc_request_set *set;
int cpt;
- cpt = cfs_cpt_current(cfs_cpt_table, 0);
+ cpt = cfs_cpt_current(cfs_cpt_tab, 0);
set = kzalloc_node(sizeof(*set), GFP_NOFS,
- cfs_cpt_spread_node(cfs_cpt_table, cpt));
+ cfs_cpt_spread_node(cfs_cpt_tab, cpt));
if (!set)
return NULL;
atomic_set(&set->set_refcount, 1);
if (req && req->rq_send_state != LUSTRE_IMP_FULL)
return &ptlrpcd_rcv;
- cpt = cfs_cpt_current(cfs_cpt_table, 1);
+ cpt = cfs_cpt_current(cfs_cpt_tab, 1);
if (!ptlrpcds_cpt_idx)
idx = cpt;
else
int exit = 0;
unshare_fs_struct();
- if (cfs_cpt_bind(cfs_cpt_table, pc->pc_cpt) != 0)
+ if (cfs_cpt_bind(cfs_cpt_tab, pc->pc_cpt) != 0)
CWARN("Failed to bind %s on CPT %d\n", pc->pc_name, pc->pc_cpt);
/*
size = sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners;
pc->pc_partners = kzalloc_node(size, GFP_NOFS,
- cfs_cpt_spread_node(cfs_cpt_table,
+ cfs_cpt_spread_node(cfs_cpt_tab,
pc->pc_cpt));
if (!pc->pc_partners) {
pc->pc_npartners = 0;
/*
* Determine the CPTs that ptlrpcd threads will run on.
*/
- cptable = cfs_cpt_table;
+ cptable = cfs_cpt_tab;
ncpts = cfs_cpt_number(cptable);
if (ptlrpcd_cpts) {
struct cfs_expr_list *el;
size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
pd = kzalloc_node(size, GFP_NOFS,
- cfs_cpt_spread_node(cfs_cpt_table, cpt));
+ cfs_cpt_spread_node(cfs_cpt_tab, cpt));
if (!pd) {
rc = -ENOMEM;
goto out;
#define HRT_STOPPING 1
struct ptlrpc_hr_service {
- /* CPU partition table, it's just cfs_cpt_table for now */
+ /* CPU partition table, it's just cfs_cpt_tab for now */
struct cfs_cpt_table *hr_cpt_table;
/** controller sleep waitq */
wait_queue_head_t hr_waitq;
cptable = cconf->cc_cptable;
if (!cptable)
- cptable = cfs_cpt_table;
+ cptable = cfs_cpt_tab;
if (!conf->psc_thr.tc_cpu_affinity) {
ncpts = 1;
int weight;
memset(&ptlrpc_hr, 0, sizeof(ptlrpc_hr));
- ptlrpc_hr.hr_cpt_table = cfs_cpt_table;
+ ptlrpc_hr.hr_cpt_table = cfs_cpt_tab;
ptlrpc_hr.hr_partitions = cfs_percpt_alloc(ptlrpc_hr.hr_cpt_table,
sizeof(*hrp));