You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I am using Finn to implement a Resnet-18 trained on Brevitas on Cifar10 dataset. The code of training is as fllows:
import onnx
import torch
import torch.nn as nn
from torch.nn import functional as F
import brevitas.nn as qnn
from torch import nn, optim
import torchvision.transforms as transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import brevitas.onnx as bo
def forward(self, x):
out = self.quant_inp(x)
out = self.conv1(out)
out = self.bn1(out)
out = self.relu1(out)
out = self.maxpool1(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = out.reshape(out.shape[0], -1)
out = self.fc(out)
return out
import finn.builder.build_dataflow as build
import finn.builder.build_dataflow_config as build_cfg
import os
import shutil
model_file = "resnet18_quant.onnx"
estimates_output_dir = "output_estimates_only"
#Delete previous run results if exist
if os.path.exists(estimates_output_dir):
shutil.rmtree(estimates_output_dir)
print("Previous run results deleted!")
And I got the erro:
Traceback (most recent call last):
File "/workspace/finn/src/finn/builder/build_dataflow.py", line 128, in build_dataflow_cfg
model = transform_step(model, cfg)
File "/workspace/finn/src/finn/builder/build_dataflow_steps.py", line 168, in step_streamline
model = model.transform(Streamline())
File "/workspace/finn-base/src/finn/core/modelwrapper.py", line 140, in transform
transformed_model
File "/workspace/finn/src/finn/transformation/streamline/init.py", line 102, in apply
model = model.transform(RemoveIdentityOps())
File "/workspace/finn-base/src/finn/core/modelwrapper.py", line 140, in transform
transformed_model
File "/workspace/finn/src/finn/transformation/streamline/remove.py", line 90, in apply
model = model.transform(InferShapes())
File "/workspace/finn-base/src/finn/core/modelwrapper.py", line 140, in transform
transformed_model
File "/workspace/finn-base/src/finn/transformation/infer_shapes.py", line 86, in apply
hidden_ops = _hide_finn_ops(model)
File "/workspace/finn-base/src/finn/transformation/infer_shapes.py", line 60, in _hide_finn_ops
new_node = _make_shape_compatible_op(node, model)
File "/workspace/finn-base/src/finn/transformation/infer_shapes.py", line 45, in _make_shape_compatible_op
return inst.make_shape_compatible_op(model)
File "/workspace/finn-base/src/finn/custom_op/general/im2col.py", line 175, in make_shape_compatible_op
of dimensions (N, 1, W, C)"
AssertionError: Unexpected kernel shape and padding for input image of dimensions (N, 1, W, C)
/workspace/finn-base/src/finn/custom_op/general/im2col.py(175)make_shape_compatible_op()
-> of dimensions (N, 1, W, C)"
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
-
I am using Finn to implement a Resnet-18 trained on Brevitas on Cifar10 dataset. The code of training is as fllows:
import onnx
import torch
import torch.nn as nn
from torch.nn import functional as F
import brevitas.nn as qnn
from torch import nn, optim
import torchvision.transforms as transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import brevitas.onnx as bo
return_quant = True
bit_width_input = 8
bit_width_weight = 2
bit_width_weight_b = 2
bit_width_act = 2
class RestNetBasicBlock(nn.Module):
def init(self, in_channels, out_channels, stride):
super(RestNetBasicBlock, self).init()
self.conv1 = qnn.QuantConv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False, weight_bit_width=bit_width_weight_b, return_quant_tensor=return_quant)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu1 = qnn.QuantReLU(bit_width=bit_width_act, return_quant_tensor=return_quant)
self.conv2 = qnn.QuantConv2d(out_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False, weight_bit_width=bit_width_weight_b, return_quant_tensor=return_quant)
self.bn2 = nn.BatchNorm2d(out_channels)
self.relu2 = qnn.QuantReLU(bit_width=bit_width_act, return_quant_tensor=return_quant)
class RestNetDownBlock(nn.Module):
def init(self, in_channels, out_channels, stride):
super(RestNetDownBlock, self).init()
self.conv1 = qnn.QuantConv2d(in_channels, out_channels, kernel_size=3, stride=stride[0], padding=1, bias=False, weight_bit_width=bit_width_weight_b, return_quant_tensor=return_quant)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu1 = qnn.QuantReLU(bit_width=bit_width_act, return_quant_tensor=return_quant)
self.conv2 = qnn.QuantConv2d(out_channels, out_channels, kernel_size=3, stride=stride[1], padding=1, bias=False, weight_bit_width=bit_width_weight_b, return_quant_tensor=return_quant)
self.bn2 = nn.BatchNorm2d(out_channels)
self.relu2 = qnn.QuantReLU(bit_width=bit_width_act, return_quant_tensor=return_quant)
self.extra = nn.Sequential(
qnn.QuantConv2d(in_channels, out_channels, kernel_size=1, stride=stride[0], padding=0, bias=False, weight_bit_width=bit_width_weight, return_quant_tensor=return_quant),
nn.BatchNorm2d(out_channels)
)
class RestNet18(nn.Module):
def init(self):
super(RestNet18, self).init()
self.quant_inp = qnn.QuantIdentity(bit_width=bit_width_input, return_quant_tensor=return_quant)
self.conv1 = qnn.QuantConv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False, weight_bit_width=bit_width_weight, return_quant_tensor=return_quant)
self.bn1 = nn.BatchNorm2d(64)
self.relu1 = qnn.QuantReLU(bit_width=bit_width_act, return_quant_tensor=return_quant)
self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = nn.Sequential(RestNetBasicBlock(64, 64, 1), RestNetBasicBlock(64, 64, 1))
self.layer2 = nn.Sequential(RestNetDownBlock(64, 128, [2, 1]), RestNetBasicBlock(128, 128, 1))
self.layer3 = nn.Sequential(RestNetDownBlock(128, 256, [2, 1]), RestNetBasicBlock(256, 256, 1))
self.layer4 = nn.Sequential(RestNetDownBlock(256, 512, [2, 1]), RestNetBasicBlock(512, 512, 1))
self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.fc = qnn.QuantLinear(512, 10, bias=False, weight_bit_width=bit_width_weight)
out = out.view(out.size(0), -1)
batchsz = 128
cifar_train = datasets.CIFAR10('cifar', True, transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
]), download=True)
cifar_train = DataLoader(cifar_train, batch_size=batchsz, shuffle=True)
cifar_test = datasets.CIFAR10('cifar', False, transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
]), download=True)
cifar_test = DataLoader(cifar_test, batch_size=batchsz, shuffle=True)
x, label = iter(cifar_train).next()
print('x:', x.shape, 'label:', label.shape)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = RestNet18().to(device)
criteon = nn.CrossEntropyLoss().to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
print(model)
for epoch in range(1):
model.cpu()
bo.export_finn_onnx(model, (1, 3, 32, 32), "resnet18_quant.onnx", opset_version=12)
The onnx file I got is as follows:
resnet18_quant.zip
Then, I build it on Finn with code:
import finn.builder.build_dataflow as build
import finn.builder.build_dataflow_config as build_cfg
import os
import shutil
model_file = "resnet18_quant.onnx"
estimates_output_dir = "output_estimates_only"
#Delete previous run results if exist
if os.path.exists(estimates_output_dir):
shutil.rmtree(estimates_output_dir)
print("Previous run results deleted!")
cfg_estimates = build.DataflowBuildConfig(
output_dir = estimates_output_dir,
mvau_wwidth_max = 10000,
target_fps = 10000,
synth_clk_period_ns = 5.0,
fpga_part = "xcu200-fsgd2104-2-e",
steps = build_cfg.estimate_only_dataflow_steps,
generate_outputs=[
build_cfg.DataflowOutputType.ESTIMATE_REPORTS,
]
)
%%time
build.build_dataflow_cfg(model_file, cfg_estimates)
And I got the erro:
Traceback (most recent call last):
File "/workspace/finn/src/finn/builder/build_dataflow.py", line 128, in build_dataflow_cfg
model = transform_step(model, cfg)
File "/workspace/finn/src/finn/builder/build_dataflow_steps.py", line 168, in step_streamline
model = model.transform(Streamline())
File "/workspace/finn-base/src/finn/core/modelwrapper.py", line 140, in transform
transformed_model
File "/workspace/finn/src/finn/transformation/streamline/init.py", line 102, in apply
model = model.transform(RemoveIdentityOps())
File "/workspace/finn-base/src/finn/core/modelwrapper.py", line 140, in transform
transformed_model
File "/workspace/finn/src/finn/transformation/streamline/remove.py", line 90, in apply
model = model.transform(InferShapes())
File "/workspace/finn-base/src/finn/core/modelwrapper.py", line 140, in transform
transformed_model
File "/workspace/finn-base/src/finn/transformation/infer_shapes.py", line 86, in apply
hidden_ops = _hide_finn_ops(model)
File "/workspace/finn-base/src/finn/transformation/infer_shapes.py", line 60, in _hide_finn_ops
new_node = _make_shape_compatible_op(node, model)
File "/workspace/finn-base/src/finn/transformation/infer_shapes.py", line 45, in _make_shape_compatible_op
return inst.make_shape_compatible_op(model)
File "/workspace/finn-base/src/finn/custom_op/general/im2col.py", line 175, in make_shape_compatible_op
of dimensions (N, 1, W, C)"
AssertionError: Unexpected kernel shape and padding for input image of dimensions (N, 1, W, C)
Does anyone have any suggestions about this?
Thank you.
Beta Was this translation helpful? Give feedback.
All reactions