Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -136,3 +136,7 @@ if (BUILD_TESTS)
enable_testing()
add_subdirectory(tests)
endif()

if (BUILD_COMPARISONS)
add_subdirectory(comparison)
endif()
13 changes: 13 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -129,3 +129,16 @@ $ cmake --build . --
$ ninja test
```

## Comparisons

```
$ cd buddy-benchmark
$ mkdir build && cd build
$ cmake -G Ninja .. \
-DBUILD_COMPARISONS=ON \
-DOpenCV_DIR=/PATH/TO/OPENCV/BUILD/ \
-DBUDDY_OPT_BUILD_DIR=/PATH/TO/BUDDY-MLIR/BUILD/
$ ninja boost_gil_conv2d
$ cd bin && ./boost_gil_conv2d <image_path> <output_image_name>
```
Ex. `./boost_gil_conv2d ../../benchmarks/ImageProcessing/Images/gil_sample.png gil_output.png`
Binary file added benchmarks/ImageProcessing/Images/gil_sample.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
16 changes: 16 additions & 0 deletions comparison/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})

add_executable(boost_gil_conv2d boost_gil_conv2d.cpp)
find_package(Boost)
if(Boost_FOUND)
message(STATUS "boost include path is : ${Boost_INCLUDE_DIRS}")
include_directories(${Boost_INCLUDE_DIRS})
else()
message(WARNING "boost not found.")
endif()
find_package(PNG REQUIRED)
target_link_libraries(boost_gil_conv2d
PNG::PNG
${BOOST_LIBRARIES}
${OpenCV_LIBS})
70 changes: 70 additions & 0 deletions comparison/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
# Convolution Comparison

## Generate comparison build files

```
$ cd buddy-benchmark
$ mkdir build && cd build
$ cmake -G Ninja .. \
-DBUILD_COMPARISONS=ON
```

## Instructions for installing dependencies for comparison examples.

- Create and activate a separate virtual environment

```
sudo apt install python3.8-venv
python3 -m venv /path/to/new/virtual/environment
source /path/to/new/virtual/environment/bin/activate
```

- Install dependencies

```
pip install tensorflow
pip install opencv-python
pip install torch
pip install onnx
pip install onnxruntime
```

> **_NOTE:_** In order to run boost_gil_conv2d.cpp, one has to install the Boost distribution and
> its related dependencies. Instructions regarding Boost installation can be found here : https://www.boost.org/doc/libs/1_77_0/more/getting_started/unix-variants.html
> More information regarding external dependencies can be found here : https://github.com/boostorg/gil/blob/develop/CONTRIBUTING.md#install-dependencies

## Run Comparison

Please make sure TensorFlow, OpenCV, PyTorch, ONNX, ONNX Runtime are installed in your environment.

- TensorFlow

```
$ cd buddy-benchmark/comparison/
$ python3 tf-conv2d.py
```

- PyTorch

```
$ cd buddy-benchmark/comparison/
$ python3 pytorch-conv2d.py
```

- ONNX Runtime

```
$ cd buddy-benchmark/comparison/
$ python3 gen-conv-models.py
$ python3 onnxruntime-conv2d.py
```

- Boost GIL

```
$ cd buddy-benchmark/build/
$ ninja boost_gil_conv2d
$ ./boost_gil_conv2d <input_image_path> <output_image_name>
```

Ex. `./boost_gil_conv2d ../../benchmarks/ImageProcessing/Images/gil_sample.png gil_output.png`
64 changes: 64 additions & 0 deletions comparison/boost_gil_conv2d.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
//===- boost_gil_conv2d.cpp -----------------------------------------------===//
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//===----------------------------------------------------------------------===//
//
// This file implements an example for depicting the use of Boost GIL's 2D
// Convolution API.
//
//===----------------------------------------------------------------------===//

#include "ImageProcessing/Kernels.h"
#include <boost/gil.hpp>
#include <boost/gil/extension/io/png.hpp>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <time.h>

namespace gil = boost::gil;

void fill_gil_view_with_opencv_mat(cv::Mat opencv_mat,
gil::gray8_view_t gil_view) {
for (int i = 0; i < opencv_mat.rows; i++)
for (int j = 0; j < opencv_mat.cols; j++)
gil_view(j, i) = opencv_mat.at<uchar>(i, j);
}

int main(int argc, char *argv[]) {
// Read input image using opencv's imread()
cv::Mat opencv_image = cv::imread(argv[1], cv::IMREAD_GRAYSCALE);

// Declare input image
gil::gray8_image_t image(opencv_image.cols, opencv_image.rows);

// Fill GIL image view with image read using opencv's imread()
fill_gil_view_with_opencv_mat(opencv_image, gil::view(image));

// Declare output image
gil::gray8_image_t output(image.dimensions());

// Create a 2D GIL kernel
gil::detail::kernel_2d<float> kernel(sobel3x3KernelAlign, 9, 1, 1);

clock_t start, end;
start = clock();
// Apply 2D convolution between input image and kernel
gil::detail::convolve_2d(gil::view(image), kernel, gil::view(output));
end = clock();
std::cout << "Execution time: " << (double)(end - start) / CLOCKS_PER_SEC
<< " s" << std::endl;

// Save obtained image
gil::write_view(argv[2], gil::view(output), gil::png_tag{});
}
89 changes: 89 additions & 0 deletions comparison/gen-conv-models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
import onnx
from onnx import numpy_helper
import numpy as np

# Filter
sobel = {
3: np.array([[1, 0, -1],
[2, 0, -2],
[1, 0, -1]], dtype='float32'),
5: np.array([[2, 1, 0, -1, -2],
[3, 2, 0, -2, -3],
[4, 3, 0, -3, -4],
[3, 2, 0, -2, -3],
[2, 1, 0, -1, -2]], dtype='float32'),
7: np.array([[3, 2, 1, 0, -1, -2, -3],
[4, 3, 2, 0, -2, -3, -4],
[5, 4, 3, 0, -3, -4, -5],
[6, 5, 4, 0, -4, -5, -6],
[5, 4, 3, 0, -3, -4, -5],
[4, 3, 2, 0, -2, -3, -4],
[3, 2, 1, 0, -1, -2, -3]], dtype='float32'),
9: np.array([[4, 3, 2, 1, 0, -1, -2, -3, -4],
[5, 4, 3, 2, 0, -2, -3, -4, -5],
[6, 5, 4, 3, 0, -3, -4, -5, -6],
[7, 6, 5, 4, 0, -4, -5, -6, -7],
[8, 7, 6, 5, 0, -5, -6, -7, -8],
[7, 6, 5, 4, 0, -4, -5, -6, -7],
[6, 5, 4, 3, 0, -3, -4, -5, -6],
[5, 4, 3, 2, 0, -2, -3, -4, -5],
[4, 3, 2, 1, 0, -1, -2, -3, -4]], dtype='float32')
}

def get_output_shape(i):
if i == 3:
return [1, 1, 1022, 1022]
elif i == 5:
return [1, 1, 1020, 1020]
elif i == 7:
return [1, 1, 1018, 1018]
elif i == 9:
return [1, 1, 1016, 1016]

def main():
for i in range(3, 10, 2):
# Filter
w = sobel[i].reshape((1, 1, i, i))

# Input
x = np.random.rand(1, 1, 1024, 1024).astype('float32')

# Initializer of the weight
initializer_w = numpy_helper.from_array(w, 'w')

tensor_w = onnx.helper.make_tensor_value_info('w', onnx.TensorProto.FLOAT, [1, 1, i, i])
tensor_x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [1, 1, 1024, 1024])
tensor_y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, get_output_shape(i))

# Create a node
node_def = onnx.helper.make_node(
'Conv',
inputs=['x', 'w'],
outputs=['y'],
kernel_shape=[i, i]
)

# Create the graph
graph_def = onnx.helper.make_graph(
[node_def],
f'conv_{i}x{i}',
[tensor_x],
[tensor_y],
[initializer_w]
)

# Create the model
model_def = onnx.helper.make_model(graph_def,
producer_name='python_script',
ir_version=6
)
model_def.opset_import[0].version = 10

# Check the model
onnx.checker.check_model(model_def)

# Save the model
onnx.save(model_def, f'conv_{i}x{i}.onnx')

if __name__ == "__main__":
main()
33 changes: 33 additions & 0 deletions comparison/onnxruntime-conv2d.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import numpy as np
import cv2
import onnxruntime
import time

def test_conv2d(img, filter_size):
start = time.time()
# Load the model
model_path = f'conv_{filter_size}x{filter_size}.onnx'
ort_session = onnxruntime.InferenceSession(model_path)
# Run inference
ort_inputs = {ort_session.get_inputs()[0].name: img}
ort_outs = ort_session.run(None, ort_inputs)
edge_detect = ort_outs[0]
edge_detect = edge_detect.squeeze()
end = time.time()
print(f'conv {filter_size}x{filter_size} : {end - start}')
return edge_detect

def main():
img = cv2.imread('../benchmarks/ImageProcessing/Images/YuTu1024.png',cv2.IMREAD_GRAYSCALE)
# Convert the image to numpy array.
img = np.array(img, dtype='float32')
img = img.reshape((1, 1, img.shape[0], img.shape[1]))
'''
Perform the edget detection.
'''
for i in range(3, 10, 2):
edge_detect = test_conv2d(img, i)
cv2.imwrite(f'./onnxruntime-conv2d_{i}.png', edge_detect)

if __name__ == "__main__":
main()
68 changes: 68 additions & 0 deletions comparison/pytorch-conv2d.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
import torch
import numpy as np
import cv2
import time
from torch.autograd import Variable
import torch.nn.functional as F

sobel_3x3 = np.array([[1, 0, -1],
[2, 0, -2],
[1, 0, -1]], dtype='float32')

sobel_5x5 = np.array([[2, 1, 0, -1, -2],
[3, 2, 0, -2, -3],
[4, 3, 0, -3, -4],
[3, 2, 0, -2, -3],
[2, 1, 0, -1, -2]], dtype='float32')

sobel_7x7 = np.array([[3, 2, 1, 0, -1, -2, -3],
[4, 3, 2, 0, -2, -3, -4],
[5, 4, 3, 0, -3, -4, -5],
[6, 5, 4, 0, -4, -5, -6],
[5, 4, 3, 0, -3, -4, -5],
[4, 3, 2, 0, -2, -3, -4],
[3, 2, 1, 0, -1, -2, -3]], dtype='float32')

sobel_9x9 = np.array([[4, 3, 2, 1, 0, -1, -2, -3, -4],
[5, 4, 3, 2, 0, -2, -3, -4, -5],
[6, 5, 4, 3, 0, -3, -4, -5, -6],
[7, 6, 5, 4, 0, -4, -5, -6, -7],
[8, 7, 6, 5, 0, -5, -6, -7, -8],
[7, 6, 5, 4, 0, -4, -5, -6, -7],
[6, 5, 4, 3, 0, -3, -4, -5, -6],
[5, 4, 3, 2, 0, -2, -3, -4, -5],
[4, 3, 2, 1, 0, -1, -2, -3, -4]], dtype='float32')

sobel_3x3_filter = sobel_3x3.reshape((1, 1, 3, 3))
sobel_5x5_filter = sobel_5x5.reshape((1, 1, 5, 5))
sobel_7x7_filter = sobel_7x7.reshape((1, 1, 7, 7))
sobel_9x9_filter = sobel_9x9.reshape((1, 1, 9, 9))

def test_conv2d(img, kernel):
weight = Variable(torch.from_numpy(kernel))
start = time.time()
edge_detect = F.conv2d(Variable(img), weight)
end = time.time()
print(end - start)
edge_detect = edge_detect.squeeze().detach().numpy()
return edge_detect

def main():
img = cv2.imread('../benchmarks/ImageProcessing/Images/YuTu1024.png',cv2.IMREAD_GRAYSCALE)
# Convert the image to numpy array.
img = np.array(img, dtype='float32')
# Convert the numpy array to torch tensor.
img = torch.from_numpy(img.reshape((1, 1, img.shape[0], img.shape[1])))
'''
Perform the edget detection.
Uncomment to use the corresponding size kernel for testing.
Note that only one kernel size is used for testing at a time.
'''
edge_detect = test_conv2d(img, sobel_3x3_filter)
# edge_detect = test_conv2d(img, sobel_5x5_filter)
# edge_detect = test_conv2d(img, sobel_7x7_filter)
# edge_detect = test_conv2d(img, sobel_9x9_filter)
cv2.imwrite("./pytorch-conv2d.png", edge_detect)

if __name__ == "__main__":
main()
Loading