fix tests

This commit is contained in:
Jerome Kieffer 2017-05-11 15:48:33 +02:00
parent 80d0cef2c1
commit 5a5eb9e0b3
16 changed files with 756 additions and 959 deletions

View File

@ -1051,8 +1051,8 @@ class AzimuthalIntegrator(Geometry):
int2d = True
else:
int2d = False
pos0 = self.array_from_unit(shape, "center", unit)
dpos0 = self.array_from_unit(shape, "delta", unit)
pos0 = self.array_from_unit(shape, "center", unit, scale=False)
dpos0 = self.array_from_unit(shape, "delta", unit, scale=False)
if (pos1_range is None) and (not int2d):
pos1 = None
dpos1 = None
@ -1147,13 +1147,13 @@ class AzimuthalIntegrator(Geometry):
else:
int2d = False
if split == "full":
pos = self.array_from_unit(shape, "corner", unit)
pos = self.array_from_unit(shape, "corner", unit, scale=False)
else:
pos0 = self.array_from_unit(shape, "center", unit)
pos0 = self.array_from_unit(shape, "center", unit, scale=False)
if split == "no":
dpos0 = None
else:
dpos0 = self.array_from_unit(shape, "delta", unit)
dpos0 = self.array_from_unit(shape, "delta", unit, scale=False)
if (pos1_range is None) and (not int2d):
pos1 = None
dpos1 = None
@ -2515,7 +2515,7 @@ class AzimuthalIntegrator(Geometry):
method = self.DEFAULT_METHOD
else:
logger.debug("integrate1d uses SplitPixel implementation")
pos = self.array_from_unit(shape, "corner", unit)
pos = self.array_from_unit(shape, "corner", unit, scale=False)
qAxis, I, sum_, count = splitPixel.fullSplit1D(pos=pos,
weights=data,
bins=npt,
@ -2559,8 +2559,8 @@ class AzimuthalIntegrator(Geometry):
dchi = self.deltaChi(shape)
else:
dchi = None
pos0 = self.array_from_unit(shape, "center", unit)
dpos0 = self.array_from_unit(shape, "delta", unit)
pos0 = self.array_from_unit(shape, "center", unit, scale=False)
dpos0 = self.array_from_unit(shape, "delta", unit, scale=False)
qAxis, I, sum_, count = splitBBox.histoBBox1d(weights=data,
pos0=pos0,
delta_pos0=dpos0,
@ -2600,7 +2600,7 @@ class AzimuthalIntegrator(Geometry):
# Common part for Numpy and Cython
data = data.astype(numpy.float32)
mask = self.create_mask(data, mask, dummy, delta_dummy, mode="numpy")
pos0 = self.array_from_unit(shape, "center", unit)
pos0 = self.array_from_unit(shape, "center", unit, scale=False)
if radial_range is not None:
mask *= (pos0 >= min(radial_range))
mask *= (pos0 <= max(radial_range))
@ -3013,7 +3013,7 @@ class AzimuthalIntegrator(Geometry):
method = self.DEFAULT_METHOD
else:
logger.debug("integrate2d uses SplitPixel implementation")
pos = self.array_from_unit(shape, "corner", unit)
pos = self.array_from_unit(shape, "corner", unit, scale=False)
I, bins_rad, bins_azim, sum_, count = splitPixel.fullSplit2D(pos=pos,
weights=data,
bins=(npt_rad, npt_azim),
@ -3036,8 +3036,8 @@ class AzimuthalIntegrator(Geometry):
logger.debug("integrate2d uses BBox implementation")
chi = self.chiArray(shape)
dchi = self.deltaChi(shape)
pos0 = self.array_from_unit(shape, "center", unit)
dpos0 = self.array_from_unit(shape, "delta", unit)
pos0 = self.array_from_unit(shape, "center", unit, scale=False)
dpos0 = self.array_from_unit(shape, "delta", unit, scale=False)
I, bins_rad, bins_azim, sum_, count = splitBBox.histoBBox2d(weights=data,
pos0=pos0,
delta_pos0=dpos0,
@ -3060,7 +3060,7 @@ class AzimuthalIntegrator(Geometry):
data = data.astype(numpy.float32) # it is important to make a copy see issue #88
mask = self.create_mask(data, mask, dummy, delta_dummy,
mode="numpy")
pos0 = self.array_from_unit(shape, "center", unit)
pos0 = self.array_from_unit(shape, "center", unit, scale=False)
pos1 = self.chiArray(shape)
if radial_range is not None:

File diff suppressed because it is too large Load Diff

View File

@ -3801,20 +3801,20 @@ static __Pyx_memviewslice __pyx_fuse_0_4__pyx_f_5pyFAI_3ext_7preproc_c1_preproc(
if (__pyx_t_9 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_19, __pyx_t_12, __pyx_t_13, __pyx_t_18, __pyx_t_20, __pyx_t_17, __pyx_t_16, __pyx_t_21, __pyx_t_14, __pyx_t_11, __pyx_t_22, __pyx_t_10, __pyx_t_15)
#pragma omp parallel private(__pyx_t_13, __pyx_t_10, __pyx_t_14, __pyx_t_20, __pyx_t_22, __pyx_t_21, __pyx_t_11, __pyx_t_12, __pyx_t_15, __pyx_t_17, __pyx_t_19, __pyx_t_16, __pyx_t_18)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_one_flat) lastprivate(__pyx_v_one_den) lastprivate(__pyx_v_one_num) lastprivate(__pyx_v_is_valid) schedule(static)
#pragma omp for lastprivate(__pyx_v_is_valid) lastprivate(__pyx_v_one_flat) lastprivate(__pyx_v_one_num) lastprivate(__pyx_v_one_den) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) schedule(static)
#endif /* _OPENMP */
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){
{
__pyx_v_i = 0 + 1 * __pyx_t_8;
/* Initialize private variables to invalid values */
__pyx_v_one_flat = ((float)__PYX_NAN());
__pyx_v_one_den = ((float)__PYX_NAN());
__pyx_v_one_num = ((float)__PYX_NAN());
__pyx_v_is_valid = ((int)0xbad0bad0);
__pyx_v_one_flat = ((float)__PYX_NAN());
__pyx_v_one_num = ((float)__PYX_NAN());
__pyx_v_one_den = ((float)__PYX_NAN());
/* "pyFAI/ext/preproc.pyx":105
*
@ -4560,20 +4560,20 @@ static __Pyx_memviewslice __pyx_fuse_0_4__pyx_f_5pyFAI_3ext_7preproc_c2_preproc(
if (__pyx_t_9 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_19, __pyx_t_12, __pyx_t_13, __pyx_t_18, __pyx_t_20, __pyx_t_22, __pyx_t_17, __pyx_t_16, __pyx_t_14, __pyx_t_11, __pyx_t_23, __pyx_t_21, __pyx_t_10, __pyx_t_15)
#pragma omp parallel private(__pyx_t_13, __pyx_t_10, __pyx_t_14, __pyx_t_20, __pyx_t_23, __pyx_t_22, __pyx_t_11, __pyx_t_21, __pyx_t_12, __pyx_t_15, __pyx_t_17, __pyx_t_19, __pyx_t_16, __pyx_t_18)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_one_num) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_one_den) lastprivate(__pyx_v_is_valid) lastprivate(__pyx_v_one_flat) schedule(static)
#pragma omp for lastprivate(__pyx_v_one_num) lastprivate(__pyx_v_is_valid) lastprivate(__pyx_v_one_flat) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_one_den) schedule(static)
#endif /* _OPENMP */
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){
{
__pyx_v_i = 0 + 1 * __pyx_t_8;
/* Initialize private variables to invalid values */
__pyx_v_one_num = ((float)__PYX_NAN());
__pyx_v_one_den = ((float)__PYX_NAN());
__pyx_v_is_valid = ((int)0xbad0bad0);
__pyx_v_one_flat = ((float)__PYX_NAN());
__pyx_v_one_den = ((float)__PYX_NAN());
/* "pyFAI/ext/preproc.pyx":196
*
@ -5350,21 +5350,21 @@ static __Pyx_memviewslice __pyx_fuse_0_4__pyx_f_5pyFAI_3ext_7preproc_cp_preproc(
if (__pyx_t_9 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_13, __pyx_t_12, __pyx_t_18, __pyx_t_26, __pyx_t_20, __pyx_t_27, __pyx_t_17, __pyx_t_11, __pyx_t_16, __pyx_t_25, __pyx_t_21, __pyx_t_24, __pyx_t_14, __pyx_t_19, __pyx_t_23, __pyx_t_22, __pyx_t_10, __pyx_t_15)
#pragma omp parallel private(__pyx_t_10, __pyx_t_27, __pyx_t_14, __pyx_t_20, __pyx_t_23, __pyx_t_22, __pyx_t_21, __pyx_t_13, __pyx_t_26, __pyx_t_12, __pyx_t_15, __pyx_t_17, __pyx_t_19, __pyx_t_16, __pyx_t_18, __pyx_t_24, __pyx_t_25, __pyx_t_11)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_one_den) lastprivate(__pyx_v_one_var) lastprivate(__pyx_v_is_valid) lastprivate(__pyx_v_one_num) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_one_flat) schedule(static)
#pragma omp for lastprivate(__pyx_v_one_var) lastprivate(__pyx_v_one_flat) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_one_num) lastprivate(__pyx_v_one_den) lastprivate(__pyx_v_is_valid) schedule(static)
#endif /* _OPENMP */
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){
{
__pyx_v_i = 0 + 1 * __pyx_t_8;
/* Initialize private variables to invalid values */
__pyx_v_one_den = ((float)__PYX_NAN());
__pyx_v_one_var = ((float)__PYX_NAN());
__pyx_v_is_valid = ((int)0xbad0bad0);
__pyx_v_one_num = ((float)__PYX_NAN());
__pyx_v_one_flat = ((float)__PYX_NAN());
__pyx_v_one_num = ((float)__PYX_NAN());
__pyx_v_one_den = ((float)__PYX_NAN());
__pyx_v_is_valid = ((int)0xbad0bad0);
/* "pyFAI/ext/preproc.pyx":290
*
@ -6216,7 +6216,7 @@ static __Pyx_memviewslice __pyx_fuse_0_4__pyx_f_5pyFAI_3ext_7preproc_c3_preproc(
if (__pyx_t_9 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_13, __pyx_t_19, __pyx_t_18, __pyx_t_12, __pyx_t_26, __pyx_t_20, __pyx_t_27, __pyx_t_17, __pyx_t_16, __pyx_t_25, __pyx_t_21, __pyx_t_24, __pyx_t_14, __pyx_t_11, __pyx_t_23, __pyx_t_22, __pyx_t_10, __pyx_t_15)
#pragma omp parallel private(__pyx_t_10, __pyx_t_12, __pyx_t_14, __pyx_t_20, __pyx_t_23, __pyx_t_22, __pyx_t_21, __pyx_t_11, __pyx_t_13, __pyx_t_26, __pyx_t_15, __pyx_t_17, __pyx_t_19, __pyx_t_16, __pyx_t_27, __pyx_t_18, __pyx_t_24, __pyx_t_25)
#endif /* _OPENMP */
{
#ifdef _OPENMP

View File

@ -984,20 +984,16 @@ class Geometry(object):
if meth_name and meth_name in dir(Geometry):
# fast path may be available
out = Geometry.__dict__[meth_name](self, shape)
if scale and unit:
out = out * unit.scale
else:
# fast path is definitely not available, use the generic formula
if typ == "center":
out = self.center_array(shape, unit, scale=False)
out = self.center_array(shape, unit, scale=scale)
elif typ == "corner":
out = self.corner_array(shape, unit, scale=False)
out = self.corner_array(shape, unit, scale=scale)
else: # typ == "delta":
out = self.delta_array(shape, unit, scale=False)
if scale and unit:
return out * unit.scale
else:
return out
out = self.delta_array(shape, unit, scale=scale)
return out
def cosIncidance(self, d1, d2, path="cython"):

View File

@ -86,6 +86,8 @@ def plot1d(result, calibrant=None, label=None, ax=None):
"""Display the powder diffraction pattern in the jupyter notebook
:param result: instance of Integrate1dResult
:param calibrant: Calibrant instance to overlay diffraction lines
:param label: (str) name of the curve
:param ax: subplot object to display in, if None, a new one is created.
:return: Matplotlib subplot
"""
@ -124,17 +126,28 @@ def plot1d(result, calibrant=None, label=None, ax=None):
return ax
# TODO: 2D integration results.
# img = res2.intensity
# pos_rad = res2.radial
# pos_azim = res2.azimuthal
# self.ax_xrpd_2d.imshow(numpy.log(img - img.min() + 1e-3), origin="lower",
# extent=[pos_rad.min(), pos_rad.max(), pos_azim.min(), pos_azim.max()],
# aspect="auto")
# self.ax_xrpd_2d.set_title("2D regrouping")
# self.ax_xrpd_2d.set_xlabel(self.unit.label)
# self.ax_xrpd_2d.set_ylabel(r"Azimuthal angle $\chi$ ($^{o}$)")
# if not gui_utils.main_loop:
# self.fig_integrate.show()
# update_fig(self.fig_integrate)
# """
def plot2d(result, label=None, ax=None):
"""Display the caked image in the jupyter notebook
:param result: instance of Integrate1dResult
:param ax: subplot object to display in, if None, a new one is created.
:return: Matplotlib subplot
"""
img = result.intensity
pos_rad = result.radial
pos_azim = result.azimuthal
if ax is None:
_fig, ax = subplots()
ax.imshow(numpy.arcsinh(img),
origin="lower",
extent=[pos_rad.min(), pos_rad.max(), pos_azim.min(), pos_azim.max()],
aspect="auto",
cmap="inferno")
if label:
ax.set_title("2D regrouping")
else:
ax.set_title(label)
ax.set_xlabel(result.unit.label)
ax.set_ylabel(r"Azimuthal angle $\chi$ ($^{o}$)")
return ax

View File

@ -15,7 +15,7 @@ from pylab import *
print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import OCLFullSplit
#logger = utilstest.getLogger("profile")
# logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/halfccd.poni")
@ -24,16 +24,16 @@ data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg", scale=False)
pos = pos_in.reshape(pos_in.size/8,4,2)
pos = pos_in.reshape(pos_in.size / 8, 4, 2)
pos_size = pos.size
#size = data.size
size = pos_size/8
# size = data.size
size = pos_size / 8
foo = OCLFullSplit.OCLFullSplit1d(pos,bins)
foo = OCLFullSplit.OCLFullSplit1d(pos, bins)
print(foo.pos0Range)
print(foo.pos1Range)
print(foo.pos1Range)

View File

@ -59,7 +59,7 @@ ref = ai.xrpd_LUT(data, 1000)[1]
# ref = ai.integrate1d(data, 1000, method="ocl_csr", unit="2th_deg")[0]
pos = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos = ai.array_from_unit(data.shape, "corner", unit="2th_deg", scale=False)
foo = splitPixelFullLUT.HistoLUT1dFullSplit(pos, 1000, unit="2th_deg")
boo = foo.integrate(data)[1]

View File

@ -20,9 +20,9 @@ print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitPixelFullLUT
from pyFAI import ocl_hist_pixelsplit
#from pyFAI import splitBBoxLUT
# from pyFAI import splitBBoxLUT
from pyFAI import splitBBoxCSR
#logger = utilstest.getLogger("profile")
# logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/halfccd.poni")
@ -31,21 +31,21 @@ data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg", scale=False)
pos = pos_in.reshape(pos_in.size/8,4,2)
pos = pos_in.reshape(pos_in.size / 8, 4, 2)
pos_size = pos.size
#size = data.size
size = pos_size/8
# size = data.size
size = pos_size / 8
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
d_pos = cl.array.to_device(queue, pos)
d_preresult = cl.array.empty(queue, (4*workgroup_size,), dtype=numpy.float32)
d_minmax = cl.array.empty(queue, (4,), dtype=numpy.float32)
d_pos = cl.array.to_device(queue, pos)
d_preresult = cl.array.empty(queue, (4 * workgroup_size,), dtype=numpy.float32)
d_minmax = cl.array.empty(queue, (4,), dtype=numpy.float32)
with open("../openCL/ocl_lut_pixelsplit.cl", "r") as kernelFile:
kernel_src = kernelFile.read()
@ -57,7 +57,7 @@ print(compile_options)
program = cl.Program(ctx, kernel_src).build(options=compile_options)
program.reduce1(queue, (workgroup_size*workgroup_size,), (workgroup_size,), d_pos.data, numpy.uint32(pos_size), d_preresult.data)
program.reduce1(queue, (workgroup_size * workgroup_size,), (workgroup_size,), d_pos.data, numpy.uint32(pos_size), d_preresult.data)
program.reduce2(queue, (workgroup_size,), (workgroup_size,), d_preresult.data, d_minmax.data)
@ -66,7 +66,7 @@ min0 = pos[:, :, 0].min()
max0 = pos[:, :, 0].max()
min1 = pos[:, :, 1].min()
max1 = pos[:, :, 1].max()
minmax=(min0,max0,min1,max1)
minmax = (min0, max0, min1, max1)
print(minmax)
print(d_minmax)
@ -74,7 +74,7 @@ print(d_minmax)
memset_size = (bins + workgroup_size - 1) & ~(workgroup_size - 1),
d_outMax = cl.array.empty(queue, (bins,), dtype=numpy.int32)
d_outMax = cl.array.empty(queue, (bins,), dtype=numpy.int32)
program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
@ -83,17 +83,17 @@ global_size = (size + workgroup_size - 1) & ~(workgroup_size - 1),
program.lut1(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data)
outMax_1 = numpy.copy(d_outMax)
outMax_1 = numpy.copy(d_outMax)
d_idx_ptr = cl.array.empty(queue, (bins+1,), dtype=numpy.int32)
d_idx_ptr = cl.array.empty(queue, (bins + 1,), dtype=numpy.int32)
d_lutsize = cl.array.empty(queue, (1,), dtype=numpy.int32)
program.lut2(queue, (1,), (1,), d_outMax.data, d_idx_ptr.data, d_lutsize.data)
lutsize = numpy.ndarray(1, dtype=numpy.int32)
lutsize = numpy.ndarray(1, dtype=numpy.int32)
cl.enqueue_copy(queue, lutsize, d_lutsize.data)
@ -101,34 +101,34 @@ print(lutsize)
lut_size = int(lutsize[0])
d_indices = cl.array.empty(queue, (lut_size,), dtype=numpy.int32)
d_data = cl.array.empty(queue, (lut_size,), dtype=numpy.float32)
d_indices = cl.array.empty(queue, (lut_size,), dtype=numpy.int32)
d_data = cl.array.empty(queue, (lut_size,), dtype=numpy.float32)
#d_check_atomics = cl.Buffer(ctx, mf.READ_WRITE, 4*lut_size)
# d_check_atomics = cl.Buffer(ctx, mf.READ_WRITE, 4*lut_size)
program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
d_outData = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outData = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outCount = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outMerge = cl.array.empty(queue, (bins,), dtype=numpy.float32)
program.lut3(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data, d_idx_ptr.data, d_indices.data, d_data.data)
outMax_2 = numpy.copy(d_outMax)
outMax_2 = numpy.copy(d_outMax)
indices = ndarray(lut_size, dtype=numpy.int32)
indices = ndarray(lut_size, dtype=numpy.int32)
data_lut = ndarray(lut_size, dtype=numpy.float32)
idx_ptr = ndarray(bins+1, dtype=numpy.int32)
idx_ptr = ndarray(bins + 1, dtype=numpy.int32)
cl.enqueue_copy(queue,indices, d_indices.data)
cl.enqueue_copy(queue,data_lut, d_data.data)
cl.enqueue_copy(queue,idx_ptr, d_idx_ptr.data)
cl.enqueue_copy(queue, indices, d_indices.data)
cl.enqueue_copy(queue, data_lut, d_data.data)
cl.enqueue_copy(queue, idx_ptr, d_idx_ptr.data)
#check_atomics = numpy.ndarray(lut_size, dtype=numpy.int32)
# check_atomics = numpy.ndarray(lut_size, dtype=numpy.int32)
#cl.enqueue_copy(queue, check_atomics, d_check_atomics)
# cl.enqueue_copy(queue, check_atomics, d_check_atomics)
program.memset_out(queue, memset_size, (workgroup_size,), d_outData.data, d_outCount.data, d_outMerge.data)
@ -139,45 +139,45 @@ program.memset_out(queue, memset_size, (workgroup_size,), d_outData.data, d_outC
d_image = cl.array.to_device(queue, data)
d_image_float = cl.array.empty(queue, (size,), dtype=numpy.float32)
#program.s32_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # Pilatus1M
# program.s32_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # Pilatus1M
program.u16_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float.data) # halfccd
program.csr_integrate(queue, (bins*workgroup_size,),(workgroup_size,), d_image_float.data, d_data.data, d_indices.data, d_idx_ptr.data, d_outData.data, d_outCount.data, d_outMerge.data)
program.csr_integrate(queue, (bins * workgroup_size,), (workgroup_size,), d_image_float.data, d_data.data, d_indices.data, d_idx_ptr.data, d_outData.data, d_outCount.data, d_outMerge.data)
#outData = numpy.copy(d_outData)
#outCount = numpy.copy(d_outCount)
#outMerge = numpy.copy(d_outMerge)
# outData = numpy.copy(d_outData)
# outCount = numpy.copy(d_outCount)
# outMerge = numpy.copy(d_outMerge)
outData = numpy.ndarray(bins, dtype=numpy.float32)
outData = numpy.ndarray(bins, dtype=numpy.float32)
outCount = numpy.ndarray(bins, dtype=numpy.float32)
outMerge = numpy.ndarray(bins, dtype=numpy.float32)
cl.enqueue_copy(queue,outData, d_outData.data)
cl.enqueue_copy(queue,outCount, d_outCount.data)
cl.enqueue_copy(queue,outMerge, d_outMerge.data)
cl.enqueue_copy(queue, outData, d_outData.data)
cl.enqueue_copy(queue, outCount, d_outCount.data)
cl.enqueue_copy(queue, outMerge, d_outMerge.data)
#program.integrate2(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)
# program.integrate2(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)
#cl.enqueue_copy(queue,outData, d_outData)
#cl.enqueue_copy(queue,outCount, d_outCount)
#cl.enqueue_copy(queue,outMerge, d_outMerge)
# cl.enqueue_copy(queue,outData, d_outData)
# cl.enqueue_copy(queue,outCount, d_outCount)
# cl.enqueue_copy(queue,outMerge, d_outMerge)
ai.xrpd_LUT(data, 1000)
#ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="lut")
# ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="lut")
#foo = splitPixelFullLUT.HistoLUT1dFullSplit(pos,bins, unit="2th_deg")
# foo = splitPixelFullLUT.HistoLUT1dFullSplit(pos,bins, unit="2th_deg")
foo = splitBBoxCSR.HistoBBox1d(ai._ttha, ai._dttha, bins=bins, unit="2th_deg")
ref = foo.integrate(data)
#assert(numpy.allclose(ref[1],outMerge))
# assert(numpy.allclose(ref[1],outMerge))
#plot(ref[0],outMerge, label="ocl_lut_merge")
#plot(ref[0],outData, label="ocl_lut_data")
plot(ref[0],outCount, label="ocl_lut_count")
#plot(ref[0], ref[1], label="ref_merge")
#plot(ref[0], ref[2], label="ref_data")
# plot(ref[0],outMerge, label="ocl_lut_merge")
# plot(ref[0],outData, label="ocl_lut_data")
plot(ref[0], outCount, label="ocl_lut_count")
# plot(ref[0], ref[1], label="ref_merge")
# plot(ref[0], ref[2], label="ref_data")
plot(ref[0], ref[3], label="ref_count")
####plot(abs(ref-outMerge)/outMerge, label="ocl_csr_fullsplit")
legend()
@ -185,20 +185,20 @@ show()
six.moves.input()
#aaa = 0
#bbb = 0
#for i in range(bins):
#ind_tmp1 = numpy.copy(indices[idx_ptr[i]:idx_ptr[i+1]])
#ind_tmp2 = numpy.copy(foo.indices[idx_ptr[i]:idx_ptr[i+1]])
#data_tmp1 = numpy.copy(data_lut[idx_ptr[i]:idx_ptr[i+1]])
#data_tmp2 = numpy.copy(foo.data[idx_ptr[i]:idx_ptr[i+1]])
#sort1 = numpy.argsort(ind_tmp1)
#sort2 = numpy.argsort(ind_tmp2)
#data_1 = data_tmp1[sort1]
#data_2 = data_tmp2[sort2]
#for j in range(data_1.size):
#aaa += 1
#if not numpy.allclose(data_1[j],data_2[j]):
#bbb += 1
# aaa = 0
# bbb = 0
# for i in range(bins):
# ind_tmp1 = numpy.copy(indices[idx_ptr[i]:idx_ptr[i+1]])
# ind_tmp2 = numpy.copy(foo.indices[idx_ptr[i]:idx_ptr[i+1]])
# data_tmp1 = numpy.copy(data_lut[idx_ptr[i]:idx_ptr[i+1]])
# data_tmp2 = numpy.copy(foo.data[idx_ptr[i]:idx_ptr[i+1]])
# sort1 = numpy.argsort(ind_tmp1)
# sort2 = numpy.argsort(ind_tmp2)
# data_1 = data_tmp1[sort1]
# data_2 = data_tmp2[sort2]
# for j in range(data_1.size):
# aaa += 1
# if not numpy.allclose(data_1[j],data_2[j]):
# bbb += 1

View File

@ -36,7 +36,7 @@ data = fabio.open("halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg", scale=False)
pos = pos_in.reshape(pos_in.size / 8, 4, 2)

View File

@ -35,7 +35,7 @@ data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg", scale=False)
pos = pos_in.reshape(pos_in.size / 8, 4, 2)

View File

@ -20,9 +20,9 @@ print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitPixelFullLUT
from pyFAI import ocl_hist_pixelsplit
#from pyFAI import splitBBoxLUT
#from pyFAI import splitBBoxCSR
#logger = utilstest.getLogger("profile")
# from pyFAI import splitBBoxLUT
# from pyFAI import splitBBoxCSR
# logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/halfccd.poni")
@ -31,29 +31,29 @@ data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg", scale=False)
pos = pos_in.reshape(pos_in.size/8,4,2)
pos = pos_in.reshape(pos_in.size / 8, 4, 2)
ref = splitPixelFullLUT.HistoLUT1dFullSplit(pos,bins, unit="2th_deg")
ref = splitPixelFullLUT.HistoLUT1dFullSplit(pos, bins, unit="2th_deg")
pos_size = pos.size
#size = data.size
size = pos_size/8
# size = data.size
size = pos_size / 8
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
d_pos = cl.array.to_device(queue, pos)
d_preresult = cl.array.empty(queue, (4*workgroup_size,), dtype=numpy.float32)
d_minmax = cl.array.empty(queue, (4,), dtype=numpy.float32)
d_pos = cl.array.to_device(queue, pos)
d_preresult = cl.array.empty(queue, (4 * workgroup_size,), dtype=numpy.float32)
d_minmax = cl.array.empty(queue, (4,), dtype=numpy.float32)
with open("../openCL/ocl_lut_pixelsplit.cl", "r") as kernelFile:
kernel_src = kernelFile.read()
@ -65,7 +65,7 @@ print(compile_options)
program = cl.Program(ctx, kernel_src).build(options=compile_options)
program.reduce1(queue, (workgroup_size*workgroup_size,), (workgroup_size,), d_pos.data, numpy.uint32(pos_size), d_preresult.data)
program.reduce1(queue, (workgroup_size * workgroup_size,), (workgroup_size,), d_pos.data, numpy.uint32(pos_size), d_preresult.data)
program.reduce2(queue, (workgroup_size,), (workgroup_size,), d_preresult.data, d_minmax.data)
@ -74,7 +74,7 @@ min0 = pos[:, :, 0].min()
max0 = pos[:, :, 0].max()
min1 = pos[:, :, 1].min()
max1 = pos[:, :, 1].max()
minmax=(min0,max0,min1,max1)
minmax = (min0, max0, min1, max1)
print(minmax)
print(d_minmax)
@ -82,22 +82,22 @@ print(d_minmax)
memset_size = (bins + workgroup_size - 1) & ~(workgroup_size - 1),
#d_outMax = cl.array.empty(queue, (bins,), dtype=numpy.int32)
# d_outMax = cl.array.empty(queue, (bins,), dtype=numpy.int32)
#program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
# program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
global_size = (size + workgroup_size - 1) & ~(workgroup_size - 1),
#program.lut1(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data)
# program.lut1(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data)
#outMax_1 = numpy.copy(d_outMax)
# outMax_1 = numpy.copy(d_outMax)
outMax = ref.outMax
idx_ptr = numpy.ndarray(bins+1, dtype=numpy.int32)
idx_ptr = numpy.ndarray(bins + 1, dtype=numpy.int32)
idx_ptr[0] = 0
idx_ptr[1:] = outMax.cumsum()
@ -105,38 +105,38 @@ idx_ptr[1:] = outMax.cumsum()
d_idx_ptr = cl.array.to_device(queue, idx_ptr)
#d_lutsize = cl.array.empty(queue, (1,), dtype=numpy.int32)
# d_lutsize = cl.array.empty(queue, (1,), dtype=numpy.int32)
#program.lut2(queue, (1,), (1,), d_outMax.data, d_idx_ptr.data, d_lutsize.data)
# program.lut2(queue, (1,), (1,), d_outMax.data, d_idx_ptr.data, d_lutsize.data)
#lutsize = numpy.ndarray(1, dtype=numpy.int32)
# lutsize = numpy.ndarray(1, dtype=numpy.int32)
#cl.enqueue_copy(queue, lutsize, d_lutsize.data)
# cl.enqueue_copy(queue, lutsize, d_lutsize.data)
lut_size = int(idx_ptr[-1])
d_indices = cl.array.empty(queue, (lut_size,), dtype=numpy.int32)
d_data = cl.array.empty(queue, (lut_size,), dtype=numpy.float32)
d_indices = cl.array.empty(queue, (lut_size,), dtype=numpy.int32)
d_data = cl.array.empty(queue, (lut_size,), dtype=numpy.float32)
#d_check_atomics = cl.Buffer(ctx, mf.READ_WRITE, 4*lut_size)
# d_check_atomics = cl.Buffer(ctx, mf.READ_WRITE, 4*lut_size)
d_outMax = cl.array.empty(queue, (bins,), dtype=numpy.int32)
program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
d_outData = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outData = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outCount = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outMerge = cl.array.empty(queue, (bins,), dtype=numpy.float32)
program.lut3(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data, d_idx_ptr.data, d_indices.data, d_data.data)
outMax_2 = numpy.copy(d_outMax)
outMax_2 = numpy.copy(d_outMax)
#check_atomics = numpy.ndarray(lut_size, dtype=numpy.int32)
# check_atomics = numpy.ndarray(lut_size, dtype=numpy.int32)
#cl.enqueue_copy(queue, check_atomics, d_check_atomics)
# cl.enqueue_copy(queue, check_atomics, d_check_atomics)
program.memset_out(queue, memset_size, (workgroup_size,), d_outData.data, d_outCount.data, d_outMerge.data)
@ -147,42 +147,42 @@ program.memset_out(queue, memset_size, (workgroup_size,), d_outData.data, d_outC
d_image = cl.array.to_device(queue, data)
d_image_float = cl.array.empty(queue, (size,), dtype=numpy.float32)
#program.s32_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # Pilatus1M
# program.s32_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # Pilatus1M
program.u16_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float.data) # halfccd
program.csr_integrate(queue, (bins*workgroup_size,),(workgroup_size,), d_image_float.data, d_data.data, d_indices.data, d_idx_ptr.data, d_outData.data, d_outCount.data, d_outMerge.data)
program.csr_integrate(queue, (bins * workgroup_size,), (workgroup_size,), d_image_float.data, d_data.data, d_indices.data, d_idx_ptr.data, d_outData.data, d_outCount.data, d_outMerge.data)
#outData = numpy.ndarray(bins, dtype=numpy.float32)
#outCount = numpy.ndarray(bins, dtype=numpy.float32)
# outData = numpy.ndarray(bins, dtype=numpy.float32)
# outCount = numpy.ndarray(bins, dtype=numpy.float32)
outMerge = numpy.ndarray(bins, dtype=numpy.float32)
#cl.enqueue_copy(queue,outData, d_outData)
#cl.enqueue_copy(queue,outCount, d_outCount)
cl.enqueue_copy(queue,outMerge, d_outMerge.data)
# cl.enqueue_copy(queue,outData, d_outData)
# cl.enqueue_copy(queue,outCount, d_outCount)
cl.enqueue_copy(queue, outMerge, d_outMerge.data)
#program.integrate2(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)
# program.integrate2(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)
#cl.enqueue_copy(queue,outData, d_outData)
#cl.enqueue_copy(queue,outCount, d_outCount)
#cl.enqueue_copy(queue,outMerge, d_outMerge)
# cl.enqueue_copy(queue,outData, d_outData)
# cl.enqueue_copy(queue,outCount, d_outCount)
# cl.enqueue_copy(queue,outMerge, d_outMerge)
ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="splitpixelfull")
ref = ai.integrate1d(data, bins, unit="2th_deg", correctSolidAngle=False, method="splitpixelfull")
#assert(numpy.allclose(ref,outMerge))
# assert(numpy.allclose(ref,outMerge))
plot(ref[0],outMerge, label="ocl_lut_merge")
##plot(ref[0],outData, label="ocl_lut_data")
##plot(ref[0],outCount, label="ocl_lut_count")
plot(ref[0], outMerge, label="ocl_lut_merge")
# #plot(ref[0],outData, label="ocl_lut_data")
# #plot(ref[0],outCount, label="ocl_lut_count")
plot(*ref, label="ref_merge")
##plot(ref[0], ref[2], label="ref_data")
##plot(ref[0], ref[3], label="ref_count")
###plot(abs(ref-outMerge)/outMerge, label="ocl_csr_fullsplit")
# #plot(ref[0], ref[2], label="ref_data")
# #plot(ref[0], ref[3], label="ref_count")
# ##plot(abs(ref-outMerge)/outMerge, label="ocl_csr_fullsplit")
legend()
show()
six.moves.input()

View File

@ -20,9 +20,9 @@ try:
from pyFAI.third_party import six
except (ImportError, Exception):
import six
#from pyFAI import splitBBoxLUT
#from pyFAI import splitBBoxCSR
#logger = utilstest.getLogger("profile")
# from pyFAI import splitBBoxLUT
# from pyFAI import splitBBoxCSR
# logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/halfccd.poni")
@ -31,21 +31,21 @@ data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg", scale=False)
pos = pos_in.reshape(pos_in.size/8,4,2)
pos = pos_in.reshape(pos_in.size / 8, 4, 2)
pos_size = pos.size
#size = data.size
size = pos_size/8
# size = data.size
size = pos_size / 8
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
d_pos = cl.array.to_device(queue, pos)
d_preresult = cl.array.empty(queue, (4*workgroup_size,), dtype=numpy.float32)
d_minmax = cl.array.empty(queue, (4,), dtype=numpy.float32)
d_pos = cl.array.to_device(queue, pos)
d_preresult = cl.array.empty(queue, (4 * workgroup_size,), dtype=numpy.float32)
d_minmax = cl.array.empty(queue, (4,), dtype=numpy.float32)
with open("../openCL/ocl_lut_pixelsplit.cl", "r") as kernelFile:
kernel_src = kernelFile.read()
@ -57,7 +57,7 @@ print(compile_options)
program = cl.Program(ctx, kernel_src).build(options=compile_options)
program.reduce1(queue, (workgroup_size*workgroup_size,), (workgroup_size,), d_pos.data, numpy.uint32(pos_size), d_preresult.data)
program.reduce1(queue, (workgroup_size * workgroup_size,), (workgroup_size,), d_pos.data, numpy.uint32(pos_size), d_preresult.data)
program.reduce2(queue, (workgroup_size,), (workgroup_size,), d_preresult.data, d_minmax.data)
@ -66,7 +66,7 @@ min0 = pos[:, :, 0].min()
max0 = pos[:, :, 0].max()
min1 = pos[:, :, 1].min()
max1 = pos[:, :, 1].max()
minmax=(min0,max0,min1,max1)
minmax = (min0, max0, min1, max1)
print(minmax)
print(d_minmax)
@ -74,7 +74,7 @@ print(d_minmax)
memset_size = (bins + workgroup_size - 1) & ~(workgroup_size - 1),
d_outMax = cl.array.empty(queue, (bins,), dtype=numpy.int32)
d_outMax = cl.array.empty(queue, (bins,), dtype=numpy.int32)
program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
@ -83,17 +83,17 @@ global_size = (size + workgroup_size - 1) & ~(workgroup_size - 1),
program.lut1(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data)
outMax_1 = numpy.copy(d_outMax)
outMax_1 = numpy.copy(d_outMax)
d_idx_ptr = cl.array.empty(queue, (bins+1,), dtype=numpy.int32)
d_idx_ptr = cl.array.empty(queue, (bins + 1,), dtype=numpy.int32)
d_lutsize = cl.array.empty(queue, (1,), dtype=numpy.int32)
program.lut2(queue, (1,), (1,), d_outMax.data, d_idx_ptr.data, d_lutsize.data)
lutsize = numpy.ndarray(1, dtype=numpy.int32)
lutsize = numpy.ndarray(1, dtype=numpy.int32)
cl.enqueue_copy(queue, lutsize, d_lutsize.data)
@ -101,26 +101,26 @@ print(lutsize)
lut_size = int(lutsize[0])
d_indices = cl.array.empty(queue, (lut_size,), dtype=numpy.int32)
d_data = cl.array.empty(queue, (lut_size,), dtype=numpy.float32)
d_indices = cl.array.empty(queue, (lut_size,), dtype=numpy.int32)
d_data = cl.array.empty(queue, (lut_size,), dtype=numpy.float32)
#d_check_atomics = cl.Buffer(ctx, mf.READ_WRITE, 4*lut_size)
# d_check_atomics = cl.Buffer(ctx, mf.READ_WRITE, 4*lut_size)
program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
d_outData = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outData = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outCount = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outMerge = cl.array.empty(queue, (bins,), dtype=numpy.float32)
program.lut3(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data, d_idx_ptr.data, d_indices.data, d_data.data)
outMax_2 = numpy.copy(d_outMax)
outMax_2 = numpy.copy(d_outMax)
#check_atomics = numpy.ndarray(lut_size, dtype=numpy.int32)
# check_atomics = numpy.ndarray(lut_size, dtype=numpy.int32)
#cl.enqueue_copy(queue, check_atomics, d_check_atomics)
# cl.enqueue_copy(queue, check_atomics, d_check_atomics)
program.memset_out(queue, memset_size, (workgroup_size,), d_outData.data, d_outCount.data, d_outMerge.data)
@ -131,43 +131,43 @@ program.memset_out(queue, memset_size, (workgroup_size,), d_outData.data, d_outC
d_image = cl.array.to_device(queue, data)
d_image_float = cl.array.empty(queue, (size,), dtype=numpy.float32)
#program.s32_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # Pilatus1M
# program.s32_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # Pilatus1M
program.u16_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float.data) # halfccd
program.csr_integrate(queue, (bins*workgroup_size,),(workgroup_size,), d_image_float.data, d_data.data, d_indices.data, d_idx_ptr.data, d_outData.data, d_outCount.data, d_outMerge.data)
program.csr_integrate(queue, (bins * workgroup_size,), (workgroup_size,), d_image_float.data, d_data.data, d_indices.data, d_idx_ptr.data, d_outData.data, d_outCount.data, d_outMerge.data)
#outData = numpy.ndarray(bins, dtype=numpy.float32)
#outCount = numpy.ndarray(bins, dtype=numpy.float32)
# outData = numpy.ndarray(bins, dtype=numpy.float32)
# outCount = numpy.ndarray(bins, dtype=numpy.float32)
outMerge = numpy.ndarray(bins, dtype=numpy.float32)
#cl.enqueue_copy(queue,outData, d_outData)
#cl.enqueue_copy(queue,outCount, d_outCount)
cl.enqueue_copy(queue,outMerge, d_outMerge.data)
# cl.enqueue_copy(queue,outData, d_outData)
# cl.enqueue_copy(queue,outCount, d_outCount)
cl.enqueue_copy(queue, outMerge, d_outMerge.data)
#program.integrate2(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)
# program.integrate2(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)
#cl.enqueue_copy(queue,outData, d_outData)
#cl.enqueue_copy(queue,outCount, d_outCount)
#cl.enqueue_copy(queue,outMerge, d_outMerge)
# cl.enqueue_copy(queue,outData, d_outData)
# cl.enqueue_copy(queue,outCount, d_outCount)
# cl.enqueue_copy(queue,outMerge, d_outMerge)
#ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="splitpixelfull")
# ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="splitpixelfull")
ref = splitPixelFullLUT.HistoLUT1dFullSplit(pos,bins, unit="2th_deg")
ref = splitPixelFullLUT.HistoLUT1dFullSplit(pos, bins, unit="2th_deg")
#assert(numpy.allclose(ref,outMerge))
# assert(numpy.allclose(ref,outMerge))
##plot(ref[0],outMerge, label="ocl_lut_merge")
###plot(ref[0],outData, label="ocl_lut_data")
###plot(ref[0],outCount, label="ocl_lut_count")
#plot(ref[1], label="ref_merge")
###plot(ref[0], ref[2], label="ref_data")
###plot(ref[0], ref[3], label="ref_count")
# #plot(ref[0],outMerge, label="ocl_lut_merge")
# ##plot(ref[0],outData, label="ocl_lut_data")
# ##plot(ref[0],outCount, label="ocl_lut_count")
# plot(ref[1], label="ref_merge")
# ##plot(ref[0], ref[2], label="ref_data")
# ##plot(ref[0], ref[3], label="ref_count")
####plot(abs(ref-outMerge)/outMerge, label="ocl_csr_fullsplit")
#legend()
#show()
#six.moves.input()
# legend()
# show()
# six.moves.input()

View File

@ -20,10 +20,10 @@ print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitPixelFullLUT
from pyFAI import ocl_hist_pixelsplit
#from pyFAI import splitBBoxLUT
# from pyFAI import splitBBoxLUT
from pyFAI import splitBBoxCSR
from pyFAI import splitPixelFull
#logger = utilstest.getLogger("profile")
# logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/halfccd.poni")
@ -32,21 +32,21 @@ data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg", scale=False)
pos = pos_in.reshape(pos_in.size/8,4,2)
pos = pos_in.reshape(pos_in.size / 8, 4, 2)
pos_size = pos.size
#size = data.size
size = pos_size/8
# size = data.size
size = pos_size / 8
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
d_pos = cl.array.to_device(queue, pos)
d_preresult = cl.array.empty(queue, (4*workgroup_size,), dtype=numpy.float32)
d_minmax = cl.array.empty(queue, (4,), dtype=numpy.float32)
d_pos = cl.array.to_device(queue, pos)
d_preresult = cl.array.empty(queue, (4 * workgroup_size,), dtype=numpy.float32)
d_minmax = cl.array.empty(queue, (4,), dtype=numpy.float32)
with open("../openCL/ocl_lut_pixelsplit.cl", "r") as kernelFile:
kernel_src = kernelFile.read()
@ -58,7 +58,7 @@ print(compile_options)
program = cl.Program(ctx, kernel_src).build(options=compile_options)
program.reduce1(queue, (workgroup_size*workgroup_size,), (workgroup_size,), d_pos.data, numpy.uint32(pos_size), d_preresult.data)
program.reduce1(queue, (workgroup_size * workgroup_size,), (workgroup_size,), d_pos.data, numpy.uint32(pos_size), d_preresult.data)
program.reduce2(queue, (workgroup_size,), (workgroup_size,), d_preresult.data, d_minmax.data)
@ -67,7 +67,7 @@ min0 = pos[:, :, 0].min()
max0 = pos[:, :, 0].max()
min1 = pos[:, :, 1].min()
max1 = pos[:, :, 1].max()
minmax=(min0,max0,min1,max1)
minmax = (min0, max0, min1, max1)
print(minmax)
print(d_minmax)
@ -75,7 +75,7 @@ print(d_minmax)
memset_size = (bins + workgroup_size - 1) & ~(workgroup_size - 1),
d_outMax = cl.array.empty(queue, (bins,), dtype=numpy.int32)
d_outMax = cl.array.empty(queue, (bins,), dtype=numpy.int32)
program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
@ -84,17 +84,17 @@ global_size = (size + workgroup_size - 1) & ~(workgroup_size - 1),
program.lut1(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data)
outMax_1 = numpy.copy(d_outMax)
outMax_1 = numpy.copy(d_outMax)
d_idx_ptr = cl.array.empty(queue, (bins+1,), dtype=numpy.int32)
d_idx_ptr = cl.array.empty(queue, (bins + 1,), dtype=numpy.int32)
d_lutsize = cl.array.empty(queue, (1,), dtype=numpy.int32)
program.lut2(queue, (1,), (1,), d_outMax.data, d_idx_ptr.data, d_lutsize.data)
lutsize = numpy.ndarray(1, dtype=numpy.int32)
lutsize = numpy.ndarray(1, dtype=numpy.int32)
cl.enqueue_copy(queue, lutsize, d_lutsize.data)
@ -102,34 +102,34 @@ print(lutsize)
lut_size = int(lutsize[0])
d_indices = cl.array.empty(queue, (lut_size,), dtype=numpy.int32)
d_data = cl.array.empty(queue, (lut_size,), dtype=numpy.float32)
d_indices = cl.array.empty(queue, (lut_size,), dtype=numpy.int32)
d_data = cl.array.empty(queue, (lut_size,), dtype=numpy.float32)
#d_check_atomics = cl.Buffer(ctx, mf.READ_WRITE, 4*lut_size)
# d_check_atomics = cl.Buffer(ctx, mf.READ_WRITE, 4*lut_size)
program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
d_outData = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outData = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outCount = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outMerge = cl.array.empty(queue, (bins,), dtype=numpy.float32)
program.lut3(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data, d_idx_ptr.data, d_indices.data, d_data.data)
outMax_2 = numpy.copy(d_outMax)
outMax_2 = numpy.copy(d_outMax)
indices = ndarray(lut_size, dtype=numpy.int32)
indices = ndarray(lut_size, dtype=numpy.int32)
data_lut = ndarray(lut_size, dtype=numpy.float32)
idx_ptr = ndarray(bins+1, dtype=numpy.int32)
idx_ptr = ndarray(bins + 1, dtype=numpy.int32)
cl.enqueue_copy(queue,indices, d_indices.data)
cl.enqueue_copy(queue,data_lut, d_data.data)
cl.enqueue_copy(queue,idx_ptr, d_idx_ptr.data)
cl.enqueue_copy(queue, indices, d_indices.data)
cl.enqueue_copy(queue, data_lut, d_data.data)
cl.enqueue_copy(queue, idx_ptr, d_idx_ptr.data)
#check_atomics = numpy.ndarray(lut_size, dtype=numpy.int32)
# check_atomics = numpy.ndarray(lut_size, dtype=numpy.int32)
#cl.enqueue_copy(queue, check_atomics, d_check_atomics)
# cl.enqueue_copy(queue, check_atomics, d_check_atomics)
program.memset_out(queue, memset_size, (workgroup_size,), d_outData.data, d_outCount.data, d_outMerge.data)
@ -140,48 +140,48 @@ program.memset_out(queue, memset_size, (workgroup_size,), d_outData.data, d_outC
d_image = cl.array.to_device(queue, data)
d_image_float = cl.array.empty(queue, (size,), dtype=numpy.float32)
#program.s32_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # Pilatus1M
# program.s32_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # Pilatus1M
program.u16_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float.data) # halfccd
program.csr_integrate(queue, (bins*workgroup_size,),(workgroup_size,), d_image_float.data, d_data.data, d_indices.data, d_idx_ptr.data, d_outData.data, d_outCount.data, d_outMerge.data)
program.csr_integrate(queue, (bins * workgroup_size,), (workgroup_size,), d_image_float.data, d_data.data, d_indices.data, d_idx_ptr.data, d_outData.data, d_outCount.data, d_outMerge.data)
#outData = numpy.copy(d_outData)
#outCount = numpy.copy(d_outCount)
#outMerge = numpy.copy(d_outMerge)
# outData = numpy.copy(d_outData)
# outCount = numpy.copy(d_outCount)
# outMerge = numpy.copy(d_outMerge)
outData = numpy.ndarray(bins, dtype=numpy.float32)
outData = numpy.ndarray(bins, dtype=numpy.float32)
outCount = numpy.ndarray(bins, dtype=numpy.float32)
outMerge = numpy.ndarray(bins, dtype=numpy.float32)
cl.enqueue_copy(queue,outData, d_outData.data)
cl.enqueue_copy(queue,outCount, d_outCount.data)
cl.enqueue_copy(queue,outMerge, d_outMerge.data)
cl.enqueue_copy(queue, outData, d_outData.data)
cl.enqueue_copy(queue, outCount, d_outCount.data)
cl.enqueue_copy(queue, outMerge, d_outMerge.data)
#program.integrate2(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)
# program.integrate2(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)
#cl.enqueue_copy(queue,outData, d_outData)
#cl.enqueue_copy(queue,outCount, d_outCount)
#cl.enqueue_copy(queue,outMerge, d_outMerge)
# cl.enqueue_copy(queue,outData, d_outData)
# cl.enqueue_copy(queue,outCount, d_outCount)
# cl.enqueue_copy(queue,outMerge, d_outMerge)
ai.xrpd_LUT(data, 1000)
#ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="lut")
# ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="lut")
foo = splitPixelFullLUT.HistoLUT1dFullSplit(pos,bins, unit="2th_deg")
#foo = splitBBoxCSR.HistoBBox1d(ai._ttha, ai._dttha, bins=bins, unit="2th_deg")
foo = splitPixelFullLUT.HistoLUT1dFullSplit(pos, bins, unit="2th_deg")
# foo = splitBBoxCSR.HistoBBox1d(ai._ttha, ai._dttha, bins=bins, unit="2th_deg")
#ref = splitPixelFull.fullSplit1D(pos, data, bins)
# ref = splitPixelFull.fullSplit1D(pos, data, bins)
ref = foo.integrate(data)
#assert(numpy.allclose(ref[1],outMerge))
# assert(numpy.allclose(ref[1],outMerge))
#plot(ref[0],outMerge, label="ocl_lut_merge")
#plot(ref[0],outData, label="ocl_lut_data")
plot(ref[0],outCount, label="ocl_lut_count")
#plot(ref[0], ref[1], label="ref_merge")
#plot(ref[0], ref[2], label="ref_data")
# plot(ref[0],outMerge, label="ocl_lut_merge")
# plot(ref[0],outData, label="ocl_lut_data")
plot(ref[0], outCount, label="ocl_lut_count")
# plot(ref[0], ref[1], label="ref_merge")
# plot(ref[0], ref[2], label="ref_data")
plot(ref[0], ref[3], label="ref_count")
####plot(abs(ref-outMerge)/outMerge, label="ocl_csr_fullsplit")
legend()
@ -189,18 +189,18 @@ show()
six.moves.input()
#aaa = 0
#bbb = 0
#for i in range(bins):
#ind_tmp1 = numpy.copy(indices[idx_ptr[i]:idx_ptr[i+1]])
#ind_tmp2 = numpy.copy(foo.indices[idx_ptr[i]:idx_ptr[i+1]])
#data_tmp1 = numpy.copy(data_lut[idx_ptr[i]:idx_ptr[i+1]])
#data_tmp2 = numpy.copy(foo.data[idx_ptr[i]:idx_ptr[i+1]])
#sort1 = numpy.argsort(ind_tmp1)
#sort2 = numpy.argsort(ind_tmp2)
#data_1 = data_tmp1[sort1]
#data_2 = data_tmp2[sort2]
#for j in range(data_1.size):
#aaa += 1
#if not numpy.allclose(data_1[j],data_2[j]):
#bbb += 1
# aaa = 0
# bbb = 0
# for i in range(bins):
# ind_tmp1 = numpy.copy(indices[idx_ptr[i]:idx_ptr[i+1]])
# ind_tmp2 = numpy.copy(foo.indices[idx_ptr[i]:idx_ptr[i+1]])
# data_tmp1 = numpy.copy(data_lut[idx_ptr[i]:idx_ptr[i+1]])
# data_tmp2 = numpy.copy(foo.data[idx_ptr[i]:idx_ptr[i+1]])
# sort1 = numpy.argsort(ind_tmp1)
# sort2 = numpy.argsort(ind_tmp2)
# data_1 = data_tmp1[sort1]
# data_2 = data_tmp2[sort2]
# for j in range(data_1.size):
# aaa += 1
# if not numpy.allclose(data_1[j],data_2[j]):
# bbb += 1

View File

@ -17,7 +17,7 @@ pyFAI = sys.modules["pyFAI"]
from pyFAI import splitPixelFullLUT
from pyFAI import splitPixelFullLUT_float32
from pyFAI import ocl_hist_pixelsplit
#from pyFAI import splitBBoxLUT
# from pyFAI import splitBBoxLUT
from pyFAI import splitBBoxCSR
from pyFAI import splitPixelFull
try:
@ -25,7 +25,7 @@ try:
except (ImportError, Exception):
import six
import scipy
#logger = utilstest.getLogger("profile")
# logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/halfccd.poni")
@ -34,37 +34,37 @@ data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg", scale=False)
pos = pos_in.reshape(pos_in.size/8,4,2)
pos = pos_in.reshape(pos_in.size / 8, 4, 2)
pos_size = pos.size
#size = data.size
size = pos_size/8
# size = data.size
size = pos_size / 8
boo = splitPixelFullLUT_float32.HistoLUT1dFullSplit(pos,bins, unit="2th_deg")
boo = splitPixelFullLUT_float32.HistoLUT1dFullSplit(pos, bins, unit="2th_deg")
matrix_32 = scipy.sparse.csr_matrix((boo.data,boo.indices,boo.indptr), shape=(bins,data.size))
matrix_32 = scipy.sparse.csr_matrix((boo.data, boo.indices, boo.indptr), shape=(bins, data.size))
mat32d = matrix_32.todense()
#mat32d.shape = (mat32d.size,)
#out = boo.integrate(data)
# mat32d.shape = (mat32d.size,)
# out = boo.integrate(data)
#ai.xrpd_LUT(data, 1000)
# ai.xrpd_LUT(data, 1000)
#ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="lut")
# ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="lut")
foo = splitPixelFullLUT.HistoLUT1dFullSplit(pos,bins, unit="2th_deg")
foo = splitPixelFullLUT.HistoLUT1dFullSplit(pos, bins, unit="2th_deg")
matrix_64 = scipy.sparse.csr_matrix((foo.data,foo.indices,foo.indptr), shape=(bins,data.size))
matrix_64 = scipy.sparse.csr_matrix((foo.data, foo.indices, foo.indptr), shape=(bins, data.size))
mat64d = matrix_64.todense()
#mat64d.shape = (mat64d.size,)
#foo = splitBBoxCSR.HistoBBox1d(ai._ttha, ai._dttha, bins=bins, unit="2th_deg")
# mat64d.shape = (mat64d.size,)
# foo = splitBBoxCSR.HistoBBox1d(ai._ttha, ai._dttha, bins=bins, unit="2th_deg")
bools_bad = (abs(mat32d - mat64d) > 0.000001)
#bools_good = (abs(mat32d - mat64d) <= 0.000001)
# bools_good = (abs(mat32d - mat64d) <= 0.000001)
del mat32d
del mat64d
@ -75,9 +75,9 @@ tmp = numpy.where(bools_bad)[1].ravel()
pixels_bad = numpy.copy(tmp)
pixels_bad.sort()
#tmp = numpy.where(bools_good)[1]
#pixels_good = numpy.copy(tmp)
#pixels_good.sort()
# tmp = numpy.where(bools_good)[1]
# pixels_good = numpy.copy(tmp)
# pixels_good.sort()
@ -85,40 +85,40 @@ pixels_bad.sort()
#ref = splitPixelFull.fullSplit1D(pos, data, bins)
# ref = splitPixelFull.fullSplit1D(pos, data, bins)
#ref = foo.integrate(data)
#assert(numpy.allclose(ref[1],outMerge))
# ref = foo.integrate(data)
# assert(numpy.allclose(ref[1],outMerge))
#plot(ref[0],outMerge, label="ocl_lut_merge")
#plot(ref[0],outData, label="ocl_lut_data")
#plot(ref[0],outCount, label="ocl_lut_count")
# plot(ref[0],outMerge, label="ocl_lut_merge")
# plot(ref[0],outData, label="ocl_lut_data")
# plot(ref[0],outCount, label="ocl_lut_count")
#plot(out[0], out[1], label="ocl_lut_merge")
#plot(out[0], out[2], label="ocl_lut_data")
#plot(out[0], out[3], label="ocl_lut_count")
# plot(out[0], out[1], label="ocl_lut_merge")
# plot(out[0], out[2], label="ocl_lut_data")
# plot(out[0], out[3], label="ocl_lut_count")
#plot(ref[0], ref[1], label="ref_merge")
#plot(ref[0], ref[2], label="ref_data")
#plot(ref[0], ref[3], label="ref_count")
# plot(ref[0], ref[1], label="ref_merge")
# plot(ref[0], ref[2], label="ref_data")
# plot(ref[0], ref[3], label="ref_count")
####plot(abs(ref-outMerge)/outMerge, label="ocl_csr_fullsplit")
#legend()
#show()
#six.moves.input()
# legend()
# show()
# six.moves.input()
#aaa = 0
#bbb = 0
#for i in range(bins):
#ind_tmp1 = numpy.copy(indices[idx_ptr[i]:idx_ptr[i+1]])
#ind_tmp2 = numpy.copy(foo.indices[idx_ptr[i]:idx_ptr[i+1]])
#data_tmp1 = numpy.copy(data_lut[idx_ptr[i]:idx_ptr[i+1]])
#data_tmp2 = numpy.copy(foo.data[idx_ptr[i]:idx_ptr[i+1]])
#sort1 = numpy.argsort(ind_tmp1)
#sort2 = numpy.argsort(ind_tmp2)
#data_1 = data_tmp1[sort1]
#data_2 = data_tmp2[sort2]
#for j in range(data_1.size):
#aaa += 1
#if not numpy.allclose(data_1[j],data_2[j]):
#bbb += 1
# aaa = 0
# bbb = 0
# for i in range(bins):
# ind_tmp1 = numpy.copy(indices[idx_ptr[i]:idx_ptr[i+1]])
# ind_tmp2 = numpy.copy(foo.indices[idx_ptr[i]:idx_ptr[i+1]])
# data_tmp1 = numpy.copy(data_lut[idx_ptr[i]:idx_ptr[i+1]])
# data_tmp2 = numpy.copy(foo.data[idx_ptr[i]:idx_ptr[i+1]])
# sort1 = numpy.argsort(ind_tmp1)
# sort2 = numpy.argsort(ind_tmp2)
# data_1 = data_tmp1[sort1]
# data_2 = data_tmp2[sort2]
# for j in range(data_1.size):
# aaa += 1
# if not numpy.allclose(data_1[j],data_2[j]):
# bbb += 1

View File

@ -20,11 +20,11 @@ print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitPixelFullLUT
from pyFAI import ocl_hist_pixelsplit
#from pyFAI import splitBBoxLUT
# from pyFAI import splitBBoxLUT
from pyFAI import splitBBoxCSR
from pyFAI import splitPixelFull
import scipy
#logger = utilstest.getLogger("profile")
# logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/halfccd.poni")
@ -33,93 +33,93 @@ data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg", scale=False)
pos = pos_in.reshape(pos_in.size/8,4,2)
pos = pos_in.reshape(pos_in.size / 8, 4, 2)
pos_size = pos.size
#size = data.size
size = pos_size/8
# size = data.size
size = pos_size / 8
boo = splitPixelFullLUT.HistoLUT1dFullSplit(pos,bins, unit="2th_deg") #1101814
boo = splitPixelFullLUT.HistoLUT1dFullSplit(pos, bins, unit="2th_deg") # 1101814
#matrix_32 = scipy.sparse.csr_matrix((boo.data,boo.indices,boo.indptr), shape=(bins,data.size))
#mat32d = matrix_32.todense()
#mat32d.shape = (mat32d.size,)
# matrix_32 = scipy.sparse.csr_matrix((boo.data,boo.indices,boo.indptr), shape=(bins,data.size))
# mat32d = matrix_32.todense()
# mat32d.shape = (mat32d.size,)
out = boo.integrate(data)
#ai.xrpd_LUT(data, 1000)
# ai.xrpd_LUT(data, 1000)
#ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="lut")
# ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="lut")
#foo = splitPixelFullLUT.HistoLUT1dFullSplit(pos,bins, unit="2th_deg", bad_pixel=None)
# foo = splitPixelFullLUT.HistoLUT1dFullSplit(pos,bins, unit="2th_deg", bad_pixel=None)
#ref = foo.integrate(data)
#matrix_64 = scipy.sparse.csr_matrix((foo.data,foo.indices,foo.indptr), shape=(bins,data.size))
#mat64d = matrix_64.todense()
#mat64d.shape = (mat64d.size,)
#foo = splitBBoxCSR.HistoBBox1d(ai._ttha, ai._dttha, bins=bins, unit="2th_deg")
# ref = foo.integrate(data)
# matrix_64 = scipy.sparse.csr_matrix((foo.data,foo.indices,foo.indptr), shape=(bins,data.size))
# mat64d = matrix_64.todense()
# mat64d.shape = (mat64d.size,)
# foo = splitBBoxCSR.HistoBBox1d(ai._ttha, ai._dttha, bins=bins, unit="2th_deg")
#bools_bad = (abs(mat32d - mat64d) > 0.000001)
#bools_good = (abs(mat32d - mat64d) <= 0.000001)
# bools_bad = (abs(mat32d - mat64d) > 0.000001)
# bools_good = (abs(mat32d - mat64d) <= 0.000001)
#del mat32d
#del mat64d
#del matrix_32
#del matrix_64
# del mat32d
# del mat64d
# del matrix_32
# del matrix_64
#tmp = numpy.where(bools_bad)[1].ravel()
#pixels_bad = numpy.copy(tmp)
#pixels_bad.sort()
# tmp = numpy.where(bools_bad)[1].ravel()
# pixels_bad = numpy.copy(tmp)
# pixels_bad.sort()
#tmp = numpy.where(bools_good)[1]
#pixels_good = numpy.copy(tmp)
#pixels_good.sort()
# tmp = numpy.where(bools_good)[1]
# pixels_good = numpy.copy(tmp)
# pixels_good.sort()
#pff_ind: (matrix([[856]]), matrix([[1101814]]))
# pff_ind: (matrix([[856]]), matrix([[1101814]]))
#ref = splitPixelFull.fullSplit1D(pos, data, bins)
# ref = splitPixelFull.fullSplit1D(pos, data, bins)
#ref = foo.integrate(data)
#assert(numpy.allclose(ref[1],outMerge))
# ref = foo.integrate(data)
# assert(numpy.allclose(ref[1],outMerge))
#plot(ref[0],outMerge, label="ocl_lut_merge")
#plot(ref[0],outData, label="ocl_lut_data")
#plot(ref[0],outCount, label="ocl_lut_count")
# plot(ref[0],outMerge, label="ocl_lut_merge")
# plot(ref[0],outData, label="ocl_lut_data")
# plot(ref[0],outCount, label="ocl_lut_count")
plot(out[0], out[1], label="ocl_lut_merge")
#plot(out[0], out[2], label="ocl_lut_data")
#plot(out[0], out[3], label="ocl_lut_count")
# plot(out[0], out[2], label="ocl_lut_data")
# plot(out[0], out[3], label="ocl_lut_count")
#plot(ref[0], ref[1], label="ref_merge")
#plot(ref[0], ref[2], label="ref_data")
#plot(ref[0], ref[3], label="ref_count")
# plot(ref[0], ref[1], label="ref_merge")
# plot(ref[0], ref[2], label="ref_data")
# plot(ref[0], ref[3], label="ref_count")
####plot(abs(ref-outMerge)/outMerge, label="ocl_csr_fullsplit")
legend()
show()
six.moves.input()
#aaa = 0
#bbb = 0
#for i in range(bins):
#ind_tmp1 = numpy.copy(indices[idx_ptr[i]:idx_ptr[i+1]])
#ind_tmp2 = numpy.copy(foo.indices[idx_ptr[i]:idx_ptr[i+1]])
#data_tmp1 = numpy.copy(data_lut[idx_ptr[i]:idx_ptr[i+1]])
#data_tmp2 = numpy.copy(foo.data[idx_ptr[i]:idx_ptr[i+1]])
#sort1 = numpy.argsort(ind_tmp1)
#sort2 = numpy.argsort(ind_tmp2)
#data_1 = data_tmp1[sort1]
#data_2 = data_tmp2[sort2]
#for j in range(data_1.size):
#aaa += 1
#if not numpy.allclose(data_1[j],data_2[j]):
#bbb += 1
# aaa = 0
# bbb = 0
# for i in range(bins):
# ind_tmp1 = numpy.copy(indices[idx_ptr[i]:idx_ptr[i+1]])
# ind_tmp2 = numpy.copy(foo.indices[idx_ptr[i]:idx_ptr[i+1]])
# data_tmp1 = numpy.copy(data_lut[idx_ptr[i]:idx_ptr[i+1]])
# data_tmp2 = numpy.copy(foo.data[idx_ptr[i]:idx_ptr[i+1]])
# sort1 = numpy.argsort(ind_tmp1)
# sort2 = numpy.argsort(ind_tmp2)
# data_1 = data_tmp1[sort1]
# data_2 = data_tmp2[sort2]
# for j in range(data_1.size):
# aaa += 1
# if not numpy.allclose(data_1[j],data_2[j]):
# bbb += 1

View File

@ -23,7 +23,7 @@ data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = (100, 36)
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg", scale=False)
pos = pos_in.reshape(pos_in.size / 8, 4, 2)