memset(stats, 0, sizeof(*stats));
- cb2 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb2) {
ehea_error("no mem for cb2");
goto out;
u64 hret;
struct hcp_ehea_port_cb0 *cb0;
- cb0 = kzalloc(H_CB_ALIGNMENT, GFP_ATOMIC); /* May be called via */
- if (!cb0) { /* ehea_neq_tasklet() */
+ cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); /* May be called via */
+ if (!cb0) { /* ehea_neq_tasklet() */
ehea_error("no mem for cb0");
ret = -ENOMEM;
goto out;
u64 hret;
int ret = 0;
- cb4 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb4) {
ehea_error("no mem for cb4");
ret = -ENOMEM;
struct hcp_ehea_port_cb0 *cb0;
ret = -ENOMEM;
- cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb0)
goto out;
goto out;
}
- cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb0) {
ehea_error("no mem for cb0");
ret = -ENOMEM;
if ((enable && port->promisc) || (!enable && !port->promisc))
return;
- cb7 = kzalloc(H_CB_ALIGNMENT, GFP_ATOMIC);
+ cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
if (!cb7) {
ehea_error("no mem for cb7");
goto out;
port->vgrp = grp;
- cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
int index;
u64 hret;
- cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
if (port->vgrp)
port->vgrp->vlan_devices[vid] = NULL;
- cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
u64 dummy64 = 0;
struct hcp_modify_qp_cb0* cb0;
- cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb0) {
ret = -ENOMEM;
goto out;
u64 hret;
int ret;
- cb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb) {
ret = -ENOMEM;
goto out;
goto out;
/* Enable Jumbo frames */
- cb4 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb4) {
ehea_error("no mem for cb4");
} else {
static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel,
u64 paddr_user)
{
- epas->kernel.addr = ioremap(paddr_kernel, PAGE_SIZE);
+ /* To support 64k pages we must round to 64k page boundary */
+ epas->kernel.addr = ioremap((paddr_kernel & PAGE_MASK), PAGE_SIZE) +
+ (paddr_kernel & ~PAGE_MASK);
epas->user.addr = paddr_user;
}
static inline void hcp_epas_dtor(struct h_epas *epas)
{
if (epas->kernel.addr)
- iounmap(epas->kernel.addr);
+ iounmap((void __iomem*)((u64)epas->kernel.addr & PAGE_MASK));
epas->user.addr = 0;
epas->kernel.addr = 0;
start = KERNELBASE;
end = (u64)high_memory;
- nr_pages = (end - start) / PAGE_SIZE;
+ nr_pages = (end - start) / EHEA_PAGESIZE;
pt = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!pt) {
if (nr_pages > 1) {
u64 num_pages = min(nr_pages, (u64)512);
for (i = 0; i < num_pages; i++)
- pt[i] = virt_to_abs((void*)(((u64)start)
- + ((k++) *
- PAGE_SIZE)));
+ pt[i] = virt_to_abs((void*)(((u64)start) +
+ ((k++) *
+ EHEA_PAGESIZE)));
hret = ehea_h_register_rpage_mr(adapter->handle,
adapter->mr.handle, 0,
num_pages);
nr_pages -= num_pages;
} else {
- u64 abs_adr = virt_to_abs((void*)(((u64)start)
- + (k * PAGE_SIZE)));
+ u64 abs_adr = virt_to_abs((void*)(((u64)start) +
+ (k * EHEA_PAGESIZE)));
+
hret = ehea_h_register_rpage_mr(adapter->handle,
adapter->mr.handle, 0,
0, abs_adr,1);