Introduction to TOPI

Author: Ehsan M. Kermani

This is an introductory tutorial to TVM Operator Inventory (TOPI). TOPI provides numpy-style generic operations and schedules with higher abstractions than TVM. In this tutorial, we will see how TOPI can save us from writing boilerplates code in TVM.

from __future__ import absolute_import, print_function

import tvm
import topi
import numpy as np

Basic example

Let’s revisit the sum of rows operation (equivalent to B = numpy.sum(A, axis=1)’) To compute the sum of rows of a two dimensional TVM tensor A, we should specify the symbolic operation as well as schedule as follows

n = tvm.var("n")
m = tvm.var("m")
A = tvm.placeholder((n, m), name='A')
k = tvm.reduce_axis((0, m), "k")
B = tvm.compute((n,), lambda i: tvm.sum(A[i, k], axis=k), name="B")
s = tvm.create_schedule(B.op)

and to examine the IR code in human readable format, we can do

print(tvm.lower(s, [A], simple_mode=True))

Out:

// attr [B] storage_scope = "global"
allocate B[float32 * n]
produce B {
  for (i, 0, n) {
    B[i] = 0f
    for (k, 0, m) {
      B[i] = (B[i] + A[((i*stride) + (k*stride))])
    }
  }
}

However, for such a common operation we had to define the reduce axis ourselves as well as explicit computation with tvm.compute. Imagine for more complicated operations how much details we need to provide. Fortunately, we can replace those two lines with simple topi.sum much like numpy.sum

C = topi.sum(A, axis=1)
ts = tvm.create_schedule(C.op)
print(tvm.lower(ts, [A], simple_mode=True))

Out:

// attr [A_red] storage_scope = "global"
allocate A_red[float32 * n]
produce A_red {
  for (ax0, 0, n) {
    A_red[ax0] = 0f
    for (k1, 0, m) {
      A_red[ax0] = (A_red[ax0] + A[((ax0*stride) + (k1*stride))])
    }
  }
}

Numpy-style operator overloading

We can add two tensors using topi.broadcast_add that have correct (broadcastable with specific) shapes. Even shorter, TOPI provides operator overloading for such common operations. For example,

x, y = 100, 10
a = tvm.placeholder((x, y, y), name="a")
b = tvm.placeholder((y, y), name="b")
c = a + b  # same as topi.broadcast_add
d = a * b  # same as topi.broadcast_mul

Overloaded with the same syntax, TOPI handles broadcasting a primitive (int, float) to a tensor d - 3.14.

Generic schedules and fusing operations

Up to now, we have seen an example of how TOPI can save us from writing explicit computations in lower level API. But it doesn’t stop here. Still we did the scheduling as before. TOPI also provides higher level scheduling recipes depending on a given context. For example, for CUDA, we can schedule the following series of operations ending with topi.sum using only topi.generic.schedule_reduce

e = topi.elemwise_sum([c, d])
f = e / 2.0
g = topi.sum(f)
with tvm.target.cuda():
    sg = topi.generic.schedule_reduce(g)
    print(tvm.lower(sg, [a, b], simple_mode=True))

Out:

// attr [T_divide_red] storage_scope = "global"
allocate T_divide_red[float32 * 1]
produce T_divide_red {
  // attr [iter_var(threadIdx.x, range(min=0, ext=1024), threadIdx.x)] thread_extent = 1024
  // attr [T_divide_red.rf] storage_scope = "local"
  allocate T_divide_red.rf[float32 * 1]
  // attr [reduce_temp0] storage_scope = "local"
  allocate reduce_temp0[float32 * 1]
  produce T_divide_red.rf {
    T_divide_red.rf[0] = 0f
    for (k0.k1.fused.k2.fused.outer, 0, 10) {
      if (likely((((((k0.k1.fused.k2.fused.outer*1024) + threadIdx.x) < 10000) && (((k0.k1.fused.k2.fused.outer*1024) + threadIdx.x) < 10000)) && (((k0.k1.fused.k2.fused.outer*1024) + threadIdx.x) < 10000)))) {
        T_divide_red.rf[0] = (T_divide_red.rf[0] + (((a[((k0.k1.fused.k2.fused.outer*1024) + threadIdx.x)] + b[floormod(((k0.k1.fused.k2.fused.outer*1024) + threadIdx.x), 100)]) + (a[((k0.k1.fused.k2.fused.outer*1024) + threadIdx.x)]*b[floormod(((k0.k1.fused.k2.fused.outer*1024) + threadIdx.x), 100)]))*0.5f))
      }
    }
  }
  // attr [comm_reducer(result=[(x + y)], lhs=[x], rhs=[y], identity_element=[0f])] reduce_scope = reinterpret((uint64)0)
  tvm_thread_allreduce((uint32)1, T_divide_red.rf[0], (bool)1, reduce_temp0, threadIdx.x)
  if ((threadIdx.x == 0)) {
    T_divide_red[0] = reduce_temp0[0]
  }
}

As you can see, scheduled stages of computation have been accumulated and we can examine them by

print(sg.stages)

Out:

[stage(a, 0x18e0a4ce0), stage(b, 0x194d3fa60), stage(T_add, 0x1964b6260), stage(T_multiply, 0x194dba830), stage(T_elemwise_sum, 0x1964f1810), stage(T_divide, 0x194d9d430), stage(T_divide_red.rf, 0x18ba89e10), stage(T_divide_red, 0x194d9d560)]

We can test the correctness by comparing with numpy result as follows

func = tvm.build(sg, [a, b, g], 'cuda')
ctx = tvm.gpu(0)
a_np = np.random.uniform(size=(x, y, y)).astype(a.dtype)
b_np = np.random.uniform(size=(y, y)).astype(b.dtype)
g_np = np.sum(np.add(a_np + b_np, a_np * b_np) / 2.0)
a_nd = tvm.nd.array(a_np, ctx)
b_nd = tvm.nd.array(b_np, ctx)
g_nd = tvm.nd.array(np.zeros(g_np.shape, dtype=g_np.dtype), ctx)
func(a_nd, b_nd, g_nd)
tvm.testing.assert_allclose(g_nd.asnumpy(), g_np, rtol=1e-5)

TOPI also provides common neural nets operations such as _softmax_ with optimized schedule

tarray = tvm.placeholder((512, 512), name="tarray")
softmax_topi = topi.nn.softmax(tarray)
with tvm.target.create("cuda"):
    sst = topi.generic.schedule_softmax(softmax_topi)
    print(tvm.lower(sst, [tarray], simple_mode=True))

Out:

// attr [T_softmax_maxelem] storage_scope = "global"
allocate T_softmax_maxelem[float32 * 512]
// attr [T_softmax_exp] storage_scope = "global"
allocate T_softmax_exp[float32 * 262144]
produce T_softmax_maxelem {
  // attr [iter_var(blockIdx.x, , blockIdx.x)] thread_extent = 512
  T_softmax_maxelem[blockIdx.x] = -3.40282e+38f
  for (k, 0, 512) {
    T_softmax_maxelem[blockIdx.x] = max(T_softmax_maxelem[blockIdx.x], tarray[((blockIdx.x*512) + k)])
  }
}
produce T_softmax_exp {
  // attr [iter_var(blockIdx.x, , blockIdx.x)] thread_extent = 512
  for (i1, 0, 512) {
    T_softmax_exp[((blockIdx.x*512) + i1)] = exp((tarray[((blockIdx.x*512) + i1)] - T_softmax_maxelem[blockIdx.x]))
  }
}
produce T_softmax_expsum {
  // attr [iter_var(blockIdx.x, , blockIdx.x)] thread_extent = 512
  // attr [T_softmax_expsum.rf] storage_scope = "local"
  allocate T_softmax_expsum.rf[float32 * 1]
  // attr [reduce_temp0] storage_scope = "local"
  allocate reduce_temp0[float32 * 1]
  // attr [iter_var(threadIdx.x, range(min=0, ext=64), threadIdx.x)] thread_extent = 64
  produce T_softmax_expsum.rf {
    T_softmax_expsum.rf[0] = 0f
    for (k.outer, 0, 8) {
      T_softmax_expsum.rf[0] = (T_softmax_expsum.rf[0] + T_softmax_exp[(((blockIdx.x*512) + (k.outer*64)) + threadIdx.x)])
    }
  }
  // attr [comm_reducer(result=[(x + y)], lhs=[x], rhs=[y], identity_element=[0f])] reduce_scope = reinterpret((uint64)0)
  tvm_thread_allreduce((uint32)1, T_softmax_expsum.rf[0], (bool)1, reduce_temp0, threadIdx.x)
  if ((threadIdx.x == 0)) {
    T_softmax_maxelem[blockIdx.x] = reduce_temp0[0]
  }
}
produce T_softmax_norm {
  // attr [iter_var(blockIdx.x, , blockIdx.x)] thread_extent = 512
  // attr [iter_var(threadIdx.x, range(min=0, ext=64), threadIdx.x)] thread_extent = 64
  for (i1.inner, 0, 8) {
    T_softmax_exp[(((blockIdx.x*512) + (threadIdx.x*8)) + i1.inner)] = (T_softmax_exp[(((blockIdx.x*512) + (threadIdx.x*8)) + i1.inner)]/T_softmax_maxelem[blockIdx.x])
  }
}

Fusing convolutions

We can fuse topi.nn.conv2d and topi.nn.relu together.

Note

TOPI functions are all generic functions. They have different implementations for different backends to optimize for performance. For each backend, it is necessary to call them under a target scope for both compute declaration and schedule. TVM will choose the right function to call with the target information.

data = tvm.placeholder((1, 3, 224, 224))
kernel = tvm.placeholder((10, 3, 5, 5))

with tvm.target.create("cuda"):
    conv = topi.nn.conv2d(data, kernel, strides=1, padding=2, dilation=1)
    out = topi.nn.relu(conv)
    sconv = topi.generic.nn.schedule_conv2d_nchw([out])
    print(tvm.lower(sconv, [data, kernel], simple_mode=True))

Out:

// attr [compute] storage_scope = "global"
allocate compute[float32 * 501760]
produce compute {
  // attr [iter_var(blockIdx.z, , blockIdx.z)] thread_extent = 10
  // attr [compute] storage_scope = "local"
  allocate compute[float32 * 1]
  // attr [pad_temp.shared] storage_scope = "shared"
  allocate pad_temp.shared[float32 * 1]
  // attr [placeholder.shared] storage_scope = "shared"
  allocate placeholder.shared[float32 * 1]
  // attr [iter_var(blockIdx.y, , blockIdx.y)] thread_extent = 224
  // attr [iter_var(blockIdx.x, , blockIdx.x)] thread_extent = 224
  // attr [iter_var(threadIdx.z, , threadIdx.z)] thread_extent = 1
  // attr [iter_var(threadIdx.y, , threadIdx.y)] thread_extent = 1
  // attr [iter_var(threadIdx.x, , threadIdx.x)] thread_extent = 1
  produce compute {
    compute[0] = 0f
    for (rc.outer, 0, 3) {
      for (ry.outer, 0, 5) {
        for (rx.outer, 0, 5) {
          produce pad_temp.shared {
            // attr [iter_var(threadIdx.z, , threadIdx.z)] thread_extent = 1
            // attr [iter_var(threadIdx.y, , threadIdx.y)] thread_extent = 1
            // attr [iter_var(threadIdx.x, , threadIdx.x)] thread_extent = 1
            pad_temp.shared[0] = tvm_if_then_else(((((2 <= (blockIdx.y + ry.outer)) && ((blockIdx.y + ry.outer) < 226)) && (2 <= (blockIdx.x + rx.outer))) && ((blockIdx.x + rx.outer) < 226)), placeholder[((((((rc.outer*50176) + (blockIdx.y*224)) + (ry.outer*224)) + blockIdx.x) + rx.outer) - 450)], 0f)
          }
          produce placeholder.shared {
            // attr [iter_var(threadIdx.z, , threadIdx.z)] thread_extent = 1
            // attr [iter_var(threadIdx.y, , threadIdx.y)] thread_extent = 1
            // attr [iter_var(threadIdx.x, , threadIdx.x)] thread_extent = 1
            placeholder.shared[0] = placeholder[((((blockIdx.z*75) + (rc.outer*25)) + (ry.outer*5)) + rx.outer)]
          }
          compute[0] = (compute[0] + (pad_temp.shared[0]*placeholder.shared[0]))
        }
      }
    }
  }
  compute[(((blockIdx.z*50176) + (blockIdx.y*224)) + blockIdx.x)] = max(compute[0], 0f)
}

Summary

In this tutorial, we have seen

  • How to use TOPI API for common operations with numpy-style operators.

  • How TOPI facilitates generic schedules and operator fusion for a context, to generate optimized kernel codes.

Total running time of the script: ( 0 minutes 0.535 seconds)

Gallery generated by Sphinx-Gallery