2008-10-06 20:15:24 +08:00
|
|
|
#ifndef _LINUX_IOMMU_HELPER_H
|
|
|
|
#define _LINUX_IOMMU_HELPER_H
|
|
|
|
|
iommu: inline iommu_num_pages
A profile of a network benchmark showed iommu_num_pages rather high up:
0.52% iommu_num_pages
Looking at the profile, an integer divide is taking almost all of the time:
%
: c000000000376ea4 <.iommu_num_pages>:
1.93 : c000000000376ea4: fb e1 ff f8 std r31,-8(r1)
0.00 : c000000000376ea8: f8 21 ff c1 stdu r1,-64(r1)
0.00 : c000000000376eac: 7c 3f 0b 78 mr r31,r1
3.86 : c000000000376eb0: 38 84 ff ff addi r4,r4,-1
0.00 : c000000000376eb4: 38 05 ff ff addi r0,r5,-1
0.00 : c000000000376eb8: 7c 84 2a 14 add r4,r4,r5
46.95 : c000000000376ebc: 7c 00 18 38 and r0,r0,r3
45.66 : c000000000376ec0: 7c 84 02 14 add r4,r4,r0
0.00 : c000000000376ec4: 7c 64 2b 92 divdu r3,r4,r5
0.00 : c000000000376ec8: 38 3f 00 40 addi r1,r31,64
0.00 : c000000000376ecc: eb e1 ff f8 ld r31,-8(r1)
1.61 : c000000000376ed0: 4e 80 00 20 blr
Since every caller of iommu_num_pages passes in a constant power of two
we can inline this such that the divide is replaced by a shift. The
entire function is only a few instructions once optimised, so it is
a good candidate for inlining overall.
Signed-off-by: Anton Blanchard <anton@samba.org>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-10 08:20:23 +08:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
|
2008-09-12 18:42:33 +08:00
|
|
|
static inline unsigned long iommu_device_max_index(unsigned long size,
|
|
|
|
unsigned long offset,
|
|
|
|
u64 dma_mask)
|
|
|
|
{
|
|
|
|
if (size + offset > dma_mask)
|
|
|
|
return dma_mask - offset + 1;
|
|
|
|
else
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2008-03-05 06:29:27 +08:00
|
|
|
extern int iommu_is_span_boundary(unsigned int index, unsigned int nr,
|
|
|
|
unsigned long shift,
|
|
|
|
unsigned long boundary_size);
|
2008-02-05 14:28:07 +08:00
|
|
|
extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
|
|
|
|
unsigned long start, unsigned int nr,
|
|
|
|
unsigned long shift,
|
|
|
|
unsigned long boundary_size,
|
|
|
|
unsigned long align_mask);
|
2008-10-06 20:15:24 +08:00
|
|
|
|
iommu: inline iommu_num_pages
A profile of a network benchmark showed iommu_num_pages rather high up:
0.52% iommu_num_pages
Looking at the profile, an integer divide is taking almost all of the time:
%
: c000000000376ea4 <.iommu_num_pages>:
1.93 : c000000000376ea4: fb e1 ff f8 std r31,-8(r1)
0.00 : c000000000376ea8: f8 21 ff c1 stdu r1,-64(r1)
0.00 : c000000000376eac: 7c 3f 0b 78 mr r31,r1
3.86 : c000000000376eb0: 38 84 ff ff addi r4,r4,-1
0.00 : c000000000376eb4: 38 05 ff ff addi r0,r5,-1
0.00 : c000000000376eb8: 7c 84 2a 14 add r4,r4,r5
46.95 : c000000000376ebc: 7c 00 18 38 and r0,r0,r3
45.66 : c000000000376ec0: 7c 84 02 14 add r4,r4,r0
0.00 : c000000000376ec4: 7c 64 2b 92 divdu r3,r4,r5
0.00 : c000000000376ec8: 38 3f 00 40 addi r1,r31,64
0.00 : c000000000376ecc: eb e1 ff f8 ld r31,-8(r1)
1.61 : c000000000376ed0: 4e 80 00 20 blr
Since every caller of iommu_num_pages passes in a constant power of two
we can inline this such that the divide is replaced by a shift. The
entire function is only a few instructions once optimised, so it is
a good candidate for inlining overall.
Signed-off-by: Anton Blanchard <anton@samba.org>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-10 08:20:23 +08:00
|
|
|
static inline unsigned long iommu_num_pages(unsigned long addr,
|
|
|
|
unsigned long len,
|
|
|
|
unsigned long io_page_size)
|
|
|
|
{
|
|
|
|
unsigned long size = (addr & (io_page_size - 1)) + len;
|
|
|
|
|
|
|
|
return DIV_ROUND_UP(size, io_page_size);
|
|
|
|
}
|
2008-10-16 13:02:10 +08:00
|
|
|
|
2008-10-06 20:15:24 +08:00
|
|
|
#endif
|