@@ -446,16 +446,16 @@ cdef gpucontext *kernel_context(GpuKernel k) except NULL:
446446 raise GpuArrayException, " Invalid kernel or destroyed context"
447447 return res
448448
449- cdef int kernel_sched(GpuKernel k, size_t n, size_t * ls , size_t * gs ) except - 1 :
449+ cdef int kernel_sched(GpuKernel k, size_t n, size_t * gs , size_t * ls ) except - 1 :
450450 cdef int err
451- err = GpuKernel_sched(& k.k, n, ls, gs )
451+ err = GpuKernel_sched(& k.k, n, gs, ls )
452452 if err != GA_NO_ERROR:
453453 raise get_exc(err), kernel_error(k, err)
454454
455- cdef int kernel_call(GpuKernel k, unsigned int n, const size_t * ls ,
456- const size_t * gs , size_t shared, void ** args) except - 1 :
455+ cdef int kernel_call(GpuKernel k, unsigned int n, const size_t * gs ,
456+ const size_t * ls , size_t shared, void ** args) except - 1 :
457457 cdef int err
458- err = GpuKernel_call(& k.k, n, ls, gs , shared, args)
458+ err = GpuKernel_call(& k.k, n, gs, ls , shared, args)
459459 if err != GA_NO_ERROR:
460460 raise get_exc(err), kernel_error(k, err)
461461
@@ -2113,10 +2113,10 @@ cdef class GpuKernel:
21132113 sure to test against the size of your data.
21142114
21152115 If you want more control over thread allocation you can use the
2116- `ls ` and `gs ` parameters like so::
2116+ `gs ` and `ls ` parameters like so::
21172117
21182118 k = GpuKernel(...)
2119- k(param1, param2, ls=ls, gs=gs )
2119+ k(param1, param2, gs=gs, ls=ls )
21202120
21212121 If you choose to use this interface, make sure to stay within the
21222122 limits of `k.maxlsize` and `ctx.maxgsize` or the call will fail.
@@ -2200,12 +2200,12 @@ cdef class GpuKernel:
22002200 finally :
22012201 free(_types)
22022202
2203- def __call__ (self , *args , n = None , ls = None , gs = None , shared = 0 ):
2203+ def __call__ (self , *args , n = None , gs = None , ls = None , shared = 0 ):
22042204 if n == None and (ls == None or gs == None ):
22052205 raise ValueError , " Must specify size (n) or both gs and ls"
2206- self .do_call(n, ls, gs , args, shared)
2206+ self .do_call(n, gs, ls , args, shared)
22072207
2208- cdef do_call(self , py_n, py_ls, py_gs , py_args, size_t shared):
2208+ cdef do_call(self , py_n, py_gs, py_ls , py_args, size_t shared):
22092209 cdef size_t n
22102210 cdef size_t gs[3 ]
22112211 cdef size_t ls[3 ]
@@ -2272,8 +2272,8 @@ cdef class GpuKernel:
22722272 if nd != 1 :
22732273 raise ValueError , " n is specified and nd != 1"
22742274 n = py_n
2275- kernel_sched(self , n, & ls [0 ], & gs [0 ])
2276- kernel_call(self , nd, ls, gs , shared, self .callbuf)
2275+ kernel_sched(self , n, & gs [0 ], & ls [0 ])
2276+ kernel_call(self , nd, gs, ls , shared, self .callbuf)
22772277
22782278 cdef _setarg(self , unsigned int index, int typecode, object o):
22792279 if typecode == GA_BUFFER:
0 commit comments