diff options
author | Jan-Bernd Themann <ossthema@de.ibm.com> | 2007-10-01 16:33:18 +0200 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 16:54:05 -0700 |
commit | 2c69448bbcedebeb8409ddb05fbc7d3fe1cfbda7 (patch) | |
tree | 143d29f88c4983f0437b9114c0784469f59a956a /drivers/net/ehea/ehea_qmr.c | |
parent | 31a5bb04d59931eb4657826213a439d37d12d4a9 (diff) |
ehea: DLPAR memory add fix
Due to stability issues in high load situations the HW queue handling
has to be changed. The HW queues are now stopped and restarted again instead
of destroying and allocating new HW queues.
Signed-off-by: Jan-Bernd Themann <themann@de.ibm.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/ehea/ehea_qmr.c')
-rw-r--r-- | drivers/net/ehea/ehea_qmr.c | 20 |
1 files changed, 8 insertions, 12 deletions
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index c82e24596074..329a25248d75 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c @@ -563,8 +563,7 @@ int ehea_destroy_qp(struct ehea_qp *qp) int ehea_create_busmap( void ) { u64 vaddr = EHEA_BUSMAP_START; - unsigned long abs_max_pfn = 0; - unsigned long sec_max_pfn; + unsigned long high_section_index = 0; int i; /* @@ -574,14 +573,10 @@ int ehea_create_busmap( void ) ehea_bmap.valid_sections = 0; for (i = 0; i < NR_MEM_SECTIONS; i++) - if (valid_section_nr(i)) { - sec_max_pfn = section_nr_to_pfn(i); - if (sec_max_pfn > abs_max_pfn) - abs_max_pfn = sec_max_pfn; - ehea_bmap.valid_sections++; - } + if (valid_section_nr(i)) + high_section_index = i; - ehea_bmap.entries = abs_max_pfn / EHEA_PAGES_PER_SECTION + 1; + ehea_bmap.entries = high_section_index + 1; ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr)); if (!ehea_bmap.vaddr) @@ -593,6 +588,7 @@ int ehea_create_busmap( void ) if (pfn_valid(pfn)) { ehea_bmap.vaddr[i] = vaddr; vaddr += EHEA_SECTSIZE; + ehea_bmap.valid_sections++; } else ehea_bmap.vaddr[i] = 0; } @@ -637,7 +633,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE; - pt = kzalloc(EHEA_MAX_RPAGE * sizeof(u64), GFP_KERNEL); + pt = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!pt) { ehea_error("no mem"); ret = -ENOMEM; @@ -660,8 +656,8 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) void *sectbase = __va(i << SECTION_SIZE_BITS); unsigned long k = 0; - for (j = 0; j < (PAGES_PER_SECTION / EHEA_MAX_RPAGE); - j++) { + for (j = 0; j < (EHEA_PAGES_PER_SECTION / + EHEA_MAX_RPAGE); j++) { for (m = 0; m < EHEA_MAX_RPAGE; m++) { pg = sectbase + ((k++) * EHEA_PAGESIZE); |