forked from Farahtharwat35/Distributed-Image-Processing-
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutilities.py
262 lines (215 loc) · 9.81 KB
/
utilities.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
import cv2
import numpy as np
class Utilities:
@staticmethod
def invert(image):
return cv2.bitwise_not(image)
@staticmethod
def saturate(image, saturation_scale=1.5):
"""
Increase the saturation of an image while keeping the number of channels the same.
Parameters:
image (numpy.ndarray): Input image in BGR format.
saturation_scale (float): Scale factor for saturation. Default is 1.5.
Returns:
numpy.ndarray: Image with increased saturation.
"""
# Check if the input image is valid
if image is None:
raise ValueError("The input image is not valid")
# Convert the image from BGR to HSV color space
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# Split the HSV image into its components
h, s, v = cv2.split(hsv_image)
# Increase the saturation channel by the given scale
s = np.clip(s * saturation_scale, 0, 255).astype(np.uint8)
# Merge the channels back
saturated_hsv_image = cv2.merge([h, s, v])
# Convert the HSV image back to BGR format
saturated_bgr_image = cv2.cvtColor(saturated_hsv_image, cv2.COLOR_HSV2BGR)
return saturated_bgr_image
@staticmethod
def rgb_to_gray(image):
if (image.shape[2]==3):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return image
@staticmethod
def gray_to_rgb(image):
if(image.shape[2]==3):
return image
return cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
@staticmethod
def fourier_transform(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Apply Fourier Transform
f = np.fft.fft2(gray)
fshift = np.fft.fftshift(f)
return fshift
@staticmethod
def butterworth_lowpass_filter(image, cutoff_frequency, order):
# In butterworth_lowpass_filter function
if len(image.shape) == 2:
rows, cols = image.shape
elif len(image.shape) == 3:
rows, cols, channels = image.shape
crow, ccol = int(rows / 2), int(cols / 2)
x = np.linspace(-0.5, 0.5, cols) * cols
y = np.linspace(-0.5, 0.5, rows) * rows
X, Y = np.meshgrid(x, y)
radius = np.sqrt((X - ccol) ** 2 + (Y - crow) ** 2)
filter_array = 1 / (1.0 + (radius / cutoff_frequency) ** (2 * order))
return filter_array
@staticmethod
def apply_lowpass_filter(image, cutoff_frequency, order):
fshift = Utilities.fourier_transform(image)
low_pass_filter = Utilities.butterworth_lowpass_filter(image, cutoff_frequency, order)
# Apply filter to Fourier Transform of the image
filtered_image = fshift * low_pass_filter
# Apply inverse Fourier Transform
img_back = np.fft.ifftshift(filtered_image)
img_back = np.fft.ifft2(img_back)
img_back = np.abs(img_back)
return img_back
@staticmethod
def butterworth_highpass_filter(image, cutoff_frequency, order):
return 1 - Utilities.butterworth_lowpass_filter(image, cutoff_frequency, order)
@staticmethod
def apply_highpass_filter(image, cutoff_frequency, order):
fshift = Utilities.fourier_transform(image)
high_pass_filter = Utilities.butterworth_highpass_filter(image, cutoff_frequency, order)
# Apply filter to Fourier Transform of the image
filtered_image = fshift * high_pass_filter
# Apply inverse Fourier Transform
img_back = np.fft.ifftshift(filtered_image)
img_back = np.fft.ifft2(img_back)
img_back = np.abs(img_back)
return img_back
@staticmethod
def blur_image(image, kernel_size):
return cv2.blur(image, (kernel_size, kernel_size))
@staticmethod
def sharpen_image(image):
kernel = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]])
return cv2.filter2D(image, -1, kernel)
@staticmethod
def remove_gaussian_noise(image, kernel_size):
return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
@staticmethod
def remove_salt_pepper_noise(image, kernel_size):
return cv2.medianBlur(image, kernel_size)
@staticmethod
def sobel_filter(image, kernel_size):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=kernel_size)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=kernel_size)
return np.hypot(sobelx, sobely)
@staticmethod
def prewitt_filter(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kernelx = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])
kernely = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
img_prewittx = cv2.filter2D(gray, -1, kernelx)
img_prewitty = cv2.filter2D(gray, -1, kernely)
return np.hypot(img_prewittx, img_prewitty)
@staticmethod
def roberts_filter(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kernelx = np.array([[1, 0], [0, -1]])
kernely = np.array([[0, 1], [-1, 0]])
img_robertsx = cv2.filter2D(gray, -1, kernelx)
img_robertsy = cv2.filter2D(gray, -1, kernely)
return np.hypot(img_robertsx, img_robertsy)
@staticmethod
def canny_edge_filter(image, threshold1, threshold2):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return cv2.Canny(gray, threshold1, threshold2)
@staticmethod
def hough_transform(image, rho, theta, threshold):
# Create a copy of the original image to draw lines on
image_with_lines = np.copy(image)
# Convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Perform edge detection using the Canny edge detector
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
# Apply the Hough Transform to detect lines
lines = cv2.HoughLines(edges, rho, theta, threshold)
# Draw detected lines on the image copy
if lines is not None:
for line in lines:
rho, theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
cv2.line(image_with_lines, (x1, y1), (x2, y2), (0, 0, 255), 2)
return image_with_lines
def harris_corner_detection(image, blockSize=2, ksize=3, k=0.04):
# Converting the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Applying Harris corner detection
corners = cv2.cornerHarris(gray, blockSize, ksize, k)
# Threshold for an optimal value, it may vary depending on the image.
corners = cv2.dilate(corners, None)
# Creating an RGB image with corner markers
output_image = np.copy(image)
output_image[corners > 0.01 * corners.max()] = [0, 0, 255] # Marking corners in red
return output_image
@staticmethod
def binary_threshold(image, threshold_value):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, threshold_value, 255, cv2.THRESH_BINARY)
return thresh
@staticmethod
def otsu_threshold(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return thresh
@staticmethod
def gaussian_threshold(image, threshold_value):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
ret, thresh = cv2.threshold(blur, threshold_value, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return thresh
def mean_adaptive_threshold(image, block_size=11, constant=2):
"""
Apply mean adaptive thresholding to an image.
Parameters:
- image: The input image (numpy array).
- block_size: Size of the pixel neighborhood used to calculate the threshold value. Must be odd and greater than 1.
- constant: A constant value that is subtracted from the mean or weighted mean.
Returns:
- thresholded_image: The thresholded image (same number of channels as input).
"""
# Ensuring block_size is odd and greater than 1
# if block_size % 2 == 0 or block_size <= 1:
# raise ValueError("block_size must be an odd number and greater than 1.")
# Converting to grayscale if the image has more than one channel
if len(image.shape) == 3 and image.shape[2] == 3:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray = image
# Applying adaptive thresholding
thresh = cv2.adaptiveThreshold(
gray,
255,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY,
block_size,
constant
)
# Converting back to 3 channels if the original image had 3 channels
if len(image.shape) == 3 and image.shape[2] == 3:
thresholded_image = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR)
else:
thresholded_image = thresh
return thresholded_image
@staticmethod
def gaussian_adaptive_threshold(image, block_size, constant):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, block_size,
constant)
return thresh