def load_test_case(pagelocked_buffer, img):
copy_size = img.ravel().size
np.copyto(pagelocked_buffer[:int(copy_size)], img.ravel())
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
print('max_batch_size', engine.max_batch_size)
for binding in engine:
print('binding', binding, engine.get_binding_shape(binding),engine.get_binding_dtype(binding))
size = trt.volume(engine.get_binding_shape(binding)[1:]) * engine.max_batch_size
print(size)
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
inputs, outputs, bindings, stream = allocate_buffers(engine)
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
def test_tensorrt(engine, test_loader):
test_loss = 0
correct = 0
lossLayer = torch.nn.CrossEntropyLoss(reduction='sum')
with engine.create_execution_context() as context:
context.set_optimization_profile_async(0, stream.handle)
for data, target in test_loader:
data = data.numpy()
input_shape = engine.get_binding_shape(0)
input_shape[0] = data.shape[0]
context.set_binding_shape(0,input_shape)
if not context.all_binding_shapes_specified:
raise RuntimeError("Not all input dimensions are specified for the exeuction context")
load_test_case(inputs[0].host, data)
# =======================================
# The common do_inference function will return a list of outputs - we only have one in this case.
pred = do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream, batch_size=data.shape[0])
output = torch.as_tensor(pred[0]).view(-1, 1000)[:data.shape[0]]
test_loss += lossLayer(output, target).item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
# del context if not reuse
del context
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {:.3f}%\n'.format(
test_loss, 100. * correct / len(test_loader.dataset)
))
import time
def test_tensorrt_for_test(engine):
test_loss = 0
correct = 0
lossLayer = torch.nn.CrossEntropyLoss(reduction='sum')
i = 0
total_time_span = 0
with engine.create_execution_context() as context:
context.set_optimization_profile_async(0, stream.handle)
input_shape = engine.get_binding_shape(0)
input_shape[0] = engine.max_batch_size
context.set_binding_shape(0,input_shape)
if not context.all_binding_shapes_specified:
raise RuntimeError("Not all input dimensions are specified for the exeuction context")
# warm up
print('input_shape', input_shape)
data = np.random.rand(*input_shape).astype(np.float32)
load_test_case(inputs[0].host, data)
for i in range(10):
pred = do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream, batch_size=engine.max_batch_size)
for i in range(100):
# data = np.random.rand(*input_shape).astype(np.float32)
# load_test_case(inputs[0].host, data)
# =======================================
# The common do_inference function will return a list of outputs - we only have one in this case.
start_time = time.time()
pred = do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream, batch_size=engine.max_batch_size)
time_span = time.time() - start_time
total_time_span += time_span
total_time_span /= 100.0
print('total_time_span', total_time_span)
# del context if not reuse
del context