Skip to content

Commit c1e0613

Browse files
committed
Add : Project Added
1 parent d72c047 commit c1e0613

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+2879
-0
lines changed

Case Study/543470.jpg

46 KB
Loading

Case Study/9.png

1.43 MB
Loading

Case Study/Capture.JPG

46.6 KB
Loading

Case Study/car.jpg

110 KB
Loading

Case Study/car2.jpg

149 KB
Loading

Case Study/car3.jpg

262 KB
Loading

Case Study/car4.jpg

53.4 KB
Loading

Case Study/classes/custom.names

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
License_Plate
Binary file not shown.
Binary file not shown.
847 Bytes
Binary file not shown.
Binary file not shown.
10.5 KB
Binary file not shown.
Binary file not shown.

Case Study/core/backbone.py

+165
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,165 @@
1+
2+
import tensorflow as tf
3+
import core.common as common
4+
5+
def darknet53(input_data):
6+
7+
input_data = common.convolutional(input_data, (3, 3, 3, 32))
8+
input_data = common.convolutional(input_data, (3, 3, 32, 64), downsample=True)
9+
10+
for i in range(1):
11+
input_data = common.residual_block(input_data, 64, 32, 64)
12+
13+
input_data = common.convolutional(input_data, (3, 3, 64, 128), downsample=True)
14+
15+
for i in range(2):
16+
input_data = common.residual_block(input_data, 128, 64, 128)
17+
18+
input_data = common.convolutional(input_data, (3, 3, 128, 256), downsample=True)
19+
20+
for i in range(8):
21+
input_data = common.residual_block(input_data, 256, 128, 256)
22+
23+
route_1 = input_data
24+
input_data = common.convolutional(input_data, (3, 3, 256, 512), downsample=True)
25+
26+
for i in range(8):
27+
input_data = common.residual_block(input_data, 512, 256, 512)
28+
29+
route_2 = input_data
30+
input_data = common.convolutional(input_data, (3, 3, 512, 1024), downsample=True)
31+
32+
for i in range(4):
33+
input_data = common.residual_block(input_data, 1024, 512, 1024)
34+
35+
return route_1, route_2, input_data
36+
37+
def cspdarknet53(input_data):
38+
39+
input_data = common.convolutional(input_data, (3, 3, 3, 32), activate_type="mish")
40+
input_data = common.convolutional(input_data, (3, 3, 32, 64), downsample=True, activate_type="mish")
41+
42+
route = input_data
43+
route = common.convolutional(route, (1, 1, 64, 64), activate_type="mish")
44+
input_data = common.convolutional(input_data, (1, 1, 64, 64), activate_type="mish")
45+
for i in range(1):
46+
input_data = common.residual_block(input_data, 64, 32, 64, activate_type="mish")
47+
input_data = common.convolutional(input_data, (1, 1, 64, 64), activate_type="mish")
48+
49+
input_data = tf.concat([input_data, route], axis=-1)
50+
input_data = common.convolutional(input_data, (1, 1, 128, 64), activate_type="mish")
51+
input_data = common.convolutional(input_data, (3, 3, 64, 128), downsample=True, activate_type="mish")
52+
route = input_data
53+
route = common.convolutional(route, (1, 1, 128, 64), activate_type="mish")
54+
input_data = common.convolutional(input_data, (1, 1, 128, 64), activate_type="mish")
55+
for i in range(2):
56+
input_data = common.residual_block(input_data, 64, 64, 64, activate_type="mish")
57+
input_data = common.convolutional(input_data, (1, 1, 64, 64), activate_type="mish")
58+
input_data = tf.concat([input_data, route], axis=-1)
59+
60+
input_data = common.convolutional(input_data, (1, 1, 128, 128), activate_type="mish")
61+
input_data = common.convolutional(input_data, (3, 3, 128, 256), downsample=True, activate_type="mish")
62+
route = input_data
63+
route = common.convolutional(route, (1, 1, 256, 128), activate_type="mish")
64+
input_data = common.convolutional(input_data, (1, 1, 256, 128), activate_type="mish")
65+
for i in range(8):
66+
input_data = common.residual_block(input_data, 128, 128, 128, activate_type="mish")
67+
input_data = common.convolutional(input_data, (1, 1, 128, 128), activate_type="mish")
68+
input_data = tf.concat([input_data, route], axis=-1)
69+
70+
input_data = common.convolutional(input_data, (1, 1, 256, 256), activate_type="mish")
71+
route_1 = input_data
72+
input_data = common.convolutional(input_data, (3, 3, 256, 512), downsample=True, activate_type="mish")
73+
route = input_data
74+
route = common.convolutional(route, (1, 1, 512, 256), activate_type="mish")
75+
input_data = common.convolutional(input_data, (1, 1, 512, 256), activate_type="mish")
76+
for i in range(8):
77+
input_data = common.residual_block(input_data, 256, 256, 256, activate_type="mish")
78+
input_data = common.convolutional(input_data, (1, 1, 256, 256), activate_type="mish")
79+
input_data = tf.concat([input_data, route], axis=-1)
80+
81+
input_data = common.convolutional(input_data, (1, 1, 512, 512), activate_type="mish")
82+
route_2 = input_data
83+
input_data = common.convolutional(input_data, (3, 3, 512, 1024), downsample=True, activate_type="mish")
84+
route = input_data
85+
route = common.convolutional(route, (1, 1, 1024, 512), activate_type="mish")
86+
input_data = common.convolutional(input_data, (1, 1, 1024, 512), activate_type="mish")
87+
for i in range(4):
88+
input_data = common.residual_block(input_data, 512, 512, 512, activate_type="mish")
89+
input_data = common.convolutional(input_data, (1, 1, 512, 512), activate_type="mish")
90+
input_data = tf.concat([input_data, route], axis=-1)
91+
92+
input_data = common.convolutional(input_data, (1, 1, 1024, 1024), activate_type="mish")
93+
input_data = common.convolutional(input_data, (1, 1, 1024, 512))
94+
input_data = common.convolutional(input_data, (3, 3, 512, 1024))
95+
input_data = common.convolutional(input_data, (1, 1, 1024, 512))
96+
97+
input_data = tf.concat([tf.nn.max_pool(input_data, ksize=13, padding='SAME', strides=1), tf.nn.max_pool(input_data, ksize=9, padding='SAME', strides=1)
98+
, tf.nn.max_pool(input_data, ksize=5, padding='SAME', strides=1), input_data], axis=-1)
99+
input_data = common.convolutional(input_data, (1, 1, 2048, 512))
100+
input_data = common.convolutional(input_data, (3, 3, 512, 1024))
101+
input_data = common.convolutional(input_data, (1, 1, 1024, 512))
102+
103+
return route_1, route_2, input_data
104+
105+
def cspdarknet53_tiny(input_data):
106+
input_data = common.convolutional(input_data, (3, 3, 3, 32), downsample=True)
107+
input_data = common.convolutional(input_data, (3, 3, 32, 64), downsample=True)
108+
input_data = common.convolutional(input_data, (3, 3, 64, 64))
109+
110+
route = input_data
111+
input_data = common.route_group(input_data, 2, 1)
112+
input_data = common.convolutional(input_data, (3, 3, 32, 32))
113+
route_1 = input_data
114+
input_data = common.convolutional(input_data, (3, 3, 32, 32))
115+
input_data = tf.concat([input_data, route_1], axis=-1)
116+
input_data = common.convolutional(input_data, (1, 1, 32, 64))
117+
input_data = tf.concat([route, input_data], axis=-1)
118+
input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)
119+
120+
input_data = common.convolutional(input_data, (3, 3, 64, 128))
121+
route = input_data
122+
input_data = common.route_group(input_data, 2, 1)
123+
input_data = common.convolutional(input_data, (3, 3, 64, 64))
124+
route_1 = input_data
125+
input_data = common.convolutional(input_data, (3, 3, 64, 64))
126+
input_data = tf.concat([input_data, route_1], axis=-1)
127+
input_data = common.convolutional(input_data, (1, 1, 64, 128))
128+
input_data = tf.concat([route, input_data], axis=-1)
129+
input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)
130+
131+
input_data = common.convolutional(input_data, (3, 3, 128, 256))
132+
route = input_data
133+
input_data = common.route_group(input_data, 2, 1)
134+
input_data = common.convolutional(input_data, (3, 3, 128, 128))
135+
route_1 = input_data
136+
input_data = common.convolutional(input_data, (3, 3, 128, 128))
137+
input_data = tf.concat([input_data, route_1], axis=-1)
138+
input_data = common.convolutional(input_data, (1, 1, 128, 256))
139+
route_1 = input_data
140+
input_data = tf.concat([route, input_data], axis=-1)
141+
input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)
142+
143+
input_data = common.convolutional(input_data, (3, 3, 512, 512))
144+
145+
return route_1, input_data
146+
147+
def darknet53_tiny(input_data):
148+
input_data = common.convolutional(input_data, (3, 3, 3, 16))
149+
input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)
150+
input_data = common.convolutional(input_data, (3, 3, 16, 32))
151+
input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)
152+
input_data = common.convolutional(input_data, (3, 3, 32, 64))
153+
input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)
154+
input_data = common.convolutional(input_data, (3, 3, 64, 128))
155+
input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)
156+
input_data = common.convolutional(input_data, (3, 3, 128, 256))
157+
route_1 = input_data
158+
input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)
159+
input_data = common.convolutional(input_data, (3, 3, 256, 512))
160+
input_data = tf.keras.layers.MaxPool2D(2, 1, 'same')(input_data)
161+
input_data = common.convolutional(input_data, (3, 3, 512, 1024))
162+
163+
return route_1, input_data
164+
165+

Case Study/core/common.py

+60
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
#! /usr/bin/env python
2+
# coding=utf-8
3+
4+
import tensorflow as tf
5+
6+
class BatchNormalization(tf.keras.layers.BatchNormalization):
7+
"""
8+
"Frozen state" and "inference mode" are two separate concepts.
9+
`layer.trainable = False` is to freeze the layer, so the layer will use
10+
stored moving `var` and `mean` in the "inference mode", and both `gama`
11+
and `beta` will not be updated !
12+
"""
13+
def call(self, x, training=False):
14+
if not training:
15+
training = tf.constant(False)
16+
training = tf.logical_and(training, self.trainable)
17+
return super().call(x, training)
18+
19+
def convolutional(input_layer, filters_shape, downsample=False, activate=True, bn=True, activate_type='leaky'):
20+
if downsample:
21+
input_layer = tf.keras.layers.ZeroPadding2D(((1, 0), (1, 0)))(input_layer)
22+
padding = 'valid'
23+
strides = 2
24+
else:
25+
strides = 1
26+
padding = 'same'
27+
28+
conv = tf.keras.layers.Conv2D(filters=filters_shape[-1], kernel_size = filters_shape[0], strides=strides, padding=padding,
29+
use_bias=not bn, kernel_regularizer=tf.keras.regularizers.l2(0.0005),
30+
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
31+
bias_initializer=tf.constant_initializer(0.))(input_layer)
32+
33+
if bn: conv = BatchNormalization()(conv)
34+
if activate == True:
35+
if activate_type == "leaky":
36+
conv = tf.nn.leaky_relu(conv, alpha=0.1)
37+
elif activate_type == "mish":
38+
conv = mish(conv)
39+
return conv
40+
41+
def mish(x):
42+
return x * tf.math.tanh(tf.math.softplus(x))
43+
44+
def residual_block(input_layer, input_channel, filter_num1, filter_num2, activate_type='leaky'):
45+
short_cut = input_layer
46+
conv = convolutional(input_layer, filters_shape=(1, 1, input_channel, filter_num1), activate_type=activate_type)
47+
conv = convolutional(conv , filters_shape=(3, 3, filter_num1, filter_num2), activate_type=activate_type)
48+
49+
residual_output = short_cut + conv
50+
return residual_output
51+
52+
53+
54+
def route_group(input_layer, groups, group_id):
55+
convs = tf.split(input_layer, num_or_size_splits=groups, axis=-1)
56+
return convs[group_id]
57+
58+
def upsample(input_layer):
59+
return tf.image.resize(input_layer, (input_layer.shape[1] * 2, input_layer.shape[2] * 2), method='bilinear')
60+

Case Study/core/config.py

+25
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
2+
from easydict import EasyDict as edict
3+
4+
5+
__C = edict()
6+
7+
8+
cfg = __C
9+
10+
# YOLO options
11+
__C.YOLO = edict()
12+
13+
__C.YOLO.CLASSES = "./classes/custom.names"
14+
__C.YOLO.ANCHORS = [12,16, 19,36, 40,28, 36,75, 76,55, 72,146, 142,110, 192,243, 459,401]
15+
__C.YOLO.ANCHORS_V3 = [10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326]
16+
__C.YOLO.ANCHORS_TINY = [23,27, 37,58, 81,82, 81,82, 135,169, 344,319]
17+
__C.YOLO.STRIDES = [8, 16, 32]
18+
__C.YOLO.STRIDES_TINY = [16, 32]
19+
__C.YOLO.XYSCALE = [1.2, 1.1, 1.05]
20+
__C.YOLO.XYSCALE_TINY = [1.05, 1.05]
21+
__C.YOLO.ANCHOR_PER_SCALE = 3
22+
__C.YOLO.IOU_LOSS_THRESH = 0.5
23+
24+
25+

0 commit comments

Comments
 (0)