Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Belyaeva Oksana
object-detection-DLA
Commits
c83b7ae2
Commit
c83b7ae2
authored
Jan 24, 2020
by
Oksana Belyaeva
Browse files
added working with generated dataset
parent
db24ed83
Changes
8
Hide whitespace changes
Inline
Side-by-side
evaluate_model.py
View file @
c83b7ae2
...
...
@@ -48,10 +48,11 @@ def predict_test_from_coco_dataset(data_dir: str = '/home/ox/PycharmProjects/TF_
detection_graph
=
tf
.
Graph
()
# setting parameters
PATH_TO_CKPT
=
'object_detection_graph_pln_4cl/frozen_inference_graph.pb'
PATH_TO_LABELS
=
'configs/label_map_4cl.pbtxt'
NUM_CLASSES
=
4
PATH_TO_TEST_RES_IMAGES_DIR
=
"/home/ox/work/pablaynet/test_res_4cl"
PATH_TO_CKPT
=
'object_detection_graph/frozen_inference_graph.pb'
PATH_TO_LABELS
=
'configs/label_map_3cl.pbtxt'
NUM_CLASSES
=
3
PATH_TO_TEST_RES_IMAGES_DIR
=
'/home/ox/PycharmProjects/TF_ObjectDetection_API/GENERATED/images/test_res'
#"/home/ox/work/pablaynet/test_res_3cl"
batch_size
=
1
#'/home/ox/PycharmProjects/TF_ObjectDetection_API/CLAWDATA/images/test_res' # 'images/lep_res'#tz/gost-23118-99_res'#'tz/GOST_31937-2011_res'#'PubLayNet/images/examples_res'
label_map
=
label_map_util
.
load_labelmap
(
PATH_TO_LABELS
)
...
...
@@ -78,55 +79,71 @@ def predict_test_from_coco_dataset(data_dir: str = '/home/ox/PycharmProjects/TF_
detection_classes
=
detection_graph
.
get_tensor_by_name
(
'detection_classes:0'
)
num_detections
=
detection_graph
.
get_tensor_by_name
(
'num_detections:0'
)
for
image_id
in
imgIds
:
img_name
=
coco
.
loadImgs
(
image_id
)[
0
][
'file_name'
]
height
=
coco
.
loadImgs
(
image_id
)[
0
][
'height'
]
width
=
coco
.
loadImgs
(
image_id
)[
0
][
'width'
]
image
=
cv2
.
imread
(
os
.
path
.
join
(
data_dir
,
img_name
))
image_np
=
cv2
.
cvtColor
(
image
,
cv2
.
COLOR_BGR2RGB
)
image_np_expanded
=
np
.
expand_dims
(
image_np
,
axis
=
0
)
if
len
(
imgIds
)
%
batch_size
==
0
:
imgIds_batch
=
list
(
np
.
reshape
(
imgIds
,
(
batch_size
,
-
1
)).
T
)
else
:
cnt_less
=
len
(
imgIds
)
%
batch_size
imgIds_batch
=
list
(
np
.
reshape
(
imgIds
[:
-
cnt_less
],
(
batch_size
,
-
1
)).
T
)
imgIds_batch
.
append
(
imgIds
[
-
cnt_less
:])
for
batch
in
imgIds_batch
:
print
(
batch
)
image_np_expanded
=
np
.
ndarray
(
shape
=
[
len
(
batch
)])
for
i
,
image_id
in
enumerate
(
batch
):
image_id
=
int
(
image_id
)
img_name
=
coco
.
loadImgs
(
image_id
)[
0
][
'file_name'
]
height
=
coco
.
loadImgs
(
image_id
)[
0
][
'height'
]
width
=
coco
.
loadImgs
(
image_id
)[
0
][
'width'
]
image
=
cv2
.
imread
(
os
.
path
.
join
(
data_dir
,
img_name
))
image_np
=
cv2
.
cvtColor
(
image
,
cv2
.
COLOR_BGR2RGB
)
#image_np_expanded(image_np)
image_np_expanded
=
np
.
expand_dims
(
image_np
,
axis
=
0
)
#image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
(
boxes
,
scores
,
classes
,
num
)
=
sess
.
run
(
(
boxes
_b
,
scores
_b
,
classes
_b
,
num
_b
)
=
sess
.
run
(
[
detection_boxes
,
detection_scores
,
detection_classes
,
num_detections
],
feed_dict
=
{
image_tensor
:
image_np_expanded
})
scores
=
[
s
for
s
in
scores
[
0
]
if
s
>
0.6
]
num
=
len
(
scores
)
boxes
=
boxes
[
0
][:
num
]
# network return [ymin, xmin, ymax, xmax]
classes
=
classes
[
0
][:
num
]
for
i
in
range
(
0
,
int
(
num
)):
bbox
=
boxes
[
i
]
boxes
[
i
]
=
[
float
(
min
(
bbox
[
0
]
*
height
,
height
)),
# Post processing
for
index
in
range
(
0
,
batch_size
):
scores
=
[
s
for
s
in
scores_b
[
index
]
if
s
>
0.6
]
num
=
len
(
scores
)
boxes
=
boxes_b
[
index
][:
num
]
# network return [ymin, xmin, ymax, xmax]
classes
=
classes_b
[
index
][:
num
]
for
i
in
range
(
0
,
int
(
num
)):
bbox
=
boxes
[
i
]
boxes
[
i
]
=
[
float
(
min
(
bbox
[
0
]
*
height
,
height
)),
float
(
min
(
bbox
[
1
]
*
width
,
width
)),
float
(
min
((
bbox
[
2
])
*
height
,
height
)),
float
(
min
((
bbox
[
3
])
*
width
,
width
))]
# Visualization of the results of a detection.
res
=
vis_util
.
visualize_boxes_and_labels_on_image_array
(
image_np
,
boxes
,
classes
.
astype
(
np
.
int32
),
scores
,
category_index
,
use_normalized_coordinates
=
False
,
line_thickness
=
2
)
# Visualization of the results of a detection.
res
=
vis_util
.
visualize_boxes_and_labels_on_image_array
(
image_np
,
boxes
,
classes
.
astype
(
np
.
int32
),
scores
,
category_index
,
use_normalized_coordinates
=
False
,
line_thickness
=
2
)
cv2
.
imwrite
(
PATH_TO_TEST_RES_IMAGES_DIR
+
"/"
+
img_name
,
cv2
.
imwrite
(
PATH_TO_TEST_RES_IMAGES_DIR
+
"/"
+
img_name
,
cv2
.
cvtColor
(
res
,
cv2
.
COLOR_RGB2BGR
))
annIds
=
coco
.
getAnnIds
(
imgIds
=
image_id
)
anns
=
coco
.
loadAnns
(
annIds
)
annIds
=
coco
.
getAnnIds
(
imgIds
=
image_id
)
anns
=
coco
.
loadAnns
(
annIds
)
# write into file coco object detection output
for
i
in
range
(
0
,
int
(
num
)):
bbox
=
boxes
[
i
]
# converting from [ymin, xmin, ymax, xmax] to [xmin, ymin, width, height]
bbox
=
[
float
(
bbox
[
1
]),
float
(
bbox
[
0
]),
float
(
bbox
[
3
]
-
bbox
[
1
]),
float
(
bbox
[
2
]
-
bbox
[
0
])]
# write into file coco object detection output
for
i
in
range
(
0
,
int
(
num
)):
bbox
=
boxes
[
i
]
# converting from [ymin, xmin, ymax, xmax] to [xmin, ymin, width, height]
bbox
=
[
float
(
bbox
[
1
]),
float
(
bbox
[
0
]),
float
(
bbox
[
3
]
-
bbox
[
1
]),
float
(
bbox
[
2
]
-
bbox
[
0
])]
coco_output
.
append
(
init_coco_format
(
image_id
,
int
(
classes
[
i
]),
bbox
,
float
(
scores
[
i
])))
coco_output
.
append
(
init_coco_format
(
image_id
,
int
(
classes
[
i
]),
bbox
,
float
(
scores
[
i
])))
with
open
(
os
.
path
.
join
(
output_coco_predict_file
),
'w'
)
as
output_json_file
:
json
.
dump
(
coco_output
,
output_json_file
)
...
...
@@ -145,16 +162,46 @@ def evaluate(annotation_gt: str = '/home/ox/PycharmProjects/TF_ObjectDetection_A
cocoDt
=
cocoGt
.
loadRes
(
annotation_predict
)
# running evaluation
cocoEval
=
COCOeval
(
cocoGt
,
cocoDt
,
annType
)
# sum all categories
cocoEval
.
evaluate
()
cocoEval
.
accumulate
()
cocoEval
.
summarize
()
# text
print
(
"class TEXT: "
)
cocoEval
.
params
.
catIds
=
[
1
]
# 1 stands for the 'person' class, you can increase or decrease the category as needed
cocoEval
.
evaluate
()
cocoEval
.
accumulate
()
cocoEval
.
summarize
()
# table
print
(
"class TABLE"
)
cocoEval
.
params
.
catIds
=
[
2
]
# 1 stands for the 'person' class, you can increase or decrease the category as needed
cocoEval
.
evaluate
()
cocoEval
.
accumulate
()
cocoEval
.
summarize
()
# figure
print
(
"class PICTURE"
)
cocoEval
.
params
.
catIds
=
[
3
]
# 1 stands for the 'person' class, you can increase or decrease the category as needed
cocoEval
.
evaluate
()
cocoEval
.
accumulate
()
cocoEval
.
summarize
()
if
__name__
==
'__main__'
:
predict_test_from_coco_dataset
(
data_dir
=
'/home/ox/PycharmProjects/TF_ObjectDetection_API/PubLayNet/images/test'
,
coco_ann_file_path
=
'/home/ox/PycharmProjects/TF_ObjectDetection_API/PubLayNet/jsons/test_4cl.json'
,
output_coco_predict_file
=
'/home/ox/PycharmProjects/TF_ObjectDetection_API/PubLayNet/jsons/predict_test.json'
)
evaluate
(
annotation_gt
=
'/home/ox/PycharmProjects/TF_ObjectDetection_API/PubLayNet/jsons/test_4cl.json'
,
annotation_predict
=
'/home/ox/PycharmProjects/TF_ObjectDetection_API/PubLayNet/jsons/predict_test.json'
)
\ No newline at end of file
if
__name__
==
'__main__'
:
'''predict_test_from_coco_dataset(data_dir='/home/ox/PycharmProjects/TF_ObjectDetection_API/PubLayNet/images/test',
coco_ann_file_path='/home/ox/PycharmProjects/TF_ObjectDetection_API/PubLayNet/jsons/test_3cl.json',
output_coco_predict_file='/home/ox/PycharmProjects/TF_ObjectDetection_API/PubLayNet/jsons/predict_test_3cl.json')
evaluate(annotation_gt='/home/ox/PycharmProjects/TF_ObjectDetection_API/PubLayNet/jsons/test_3cl.json',
annotation_predict='/home/ox/PycharmProjects/TF_ObjectDetection_API/PubLayNet/jsons/predict_test_3cl.json')
'''
'''predict_test_from_coco_dataset(data_dir='/home/ox/PycharmProjects/TF_ObjectDetection_API/GENERATED/images/test',
coco_ann_file_path='/home/ox/PycharmProjects/TF_ObjectDetection_API/GENERATED/jsons/test.json',
output_coco_predict_file='/home/ox/PycharmProjects/TF_ObjectDetection_API/GENERATED/jsons/predict_test.json')'''
evaluate
(
annotation_gt
=
'/home/ox/PycharmProjects/TF_ObjectDetection_API/GENERATED/jsons/test.json'
,
annotation_predict
=
'/home/ox/PycharmProjects/TF_ObjectDetection_API/GENERATED/jsons/predict_test.json'
)
#predict_test_from_coco_dataset()
#evaluate()
\ No newline at end of file
test_model.py
View file @
c83b7ae2
...
...
@@ -9,7 +9,7 @@ import utils.visualization_utils as vis_util
def
test
():
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT
=
'object_detection_graph/frozen_inference_graph.pb'
PATH_TO_CKPT
=
'object_detection_graph/frozen_inference_graph.pb'
#"object_detection_graph_mobilev2_3cl/frozen_inference_graph.pb"#'object_detection_graph/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
#PATH_TO_LABELS = 'configs/label_map.pbtxt'
...
...
@@ -18,8 +18,8 @@ def test():
PATH_TO_LABELS
=
'configs/label_map_3cl.pbtxt'
NUM_CLASSES
=
3
PATH_TO_TEST_RES_IMAGES_DIR
=
'
tz/gost-23118-99
_res'
#
'images/lep_res'#tz/gost-23118-99_res'#'tz/GOST_31937-2011_res'#'PubLayNet/images/examples_res'
PATH_TO_TEST_IMAGES_DIR
=
'
tz/gost-23118-99'
#
'images/lep'#'tz/gost-23118-99'#'tz/GOST_31937-2011_'#'PubLayNet/images/examples'
PATH_TO_TEST_RES_IMAGES_DIR
=
'
/home/ox/work/data_claw/from_ilya/images_good_res'
#'/home/ox/work/data_claw/from_ilya/images_good
_res''images/lep_res'#
'
tz/gost-23118-99_res'#'tz/GOST_31937-2011_res'#'PubLayNet/images/examples_res'
PATH_TO_TEST_IMAGES_DIR
=
'
/home/ox/work/data_claw/from_ilya/images_good'
#'/home/ox/work/data_claw/from_ilya/images_good'
'images/lep'#'tz/gost-23118-99'#'tz/GOST_31937-2011_'#'PubLayNet/images/examples'
TEST_IMAGE_PATHS
=
[
f
for
f
in
os
.
listdir
(
PATH_TO_TEST_IMAGES_DIR
)
if
os
.
path
.
isfile
(
os
.
path
.
join
(
PATH_TO_TEST_IMAGES_DIR
,
f
))]
detection_graph
=
tf
.
Graph
()
...
...
@@ -72,6 +72,5 @@ def test():
cv2
.
imwrite
(
PATH_TO_TEST_RES_IMAGES_DIR
+
"/"
+
image_path
.
split
(
'/'
)[
-
1
],
cv2
.
cvtColor
(
res
,
cv2
.
COLOR_RGB2BGR
))
if
__name__
==
'__main__'
:
test
()
utils_convert_draw_format/ConverterCLAW.py
View file @
c83b7ae2
...
...
@@ -8,9 +8,9 @@ class ConverterCLAW(ConverterAbstract):
def
__init__
(
self
):
self
.
PROJ_DIR
=
os
.
path
.
dirname
(
__file__
)
self
.
_RAW_DATA_DIR
=
"/home/ox/
claw_dat
a"
self
.
_WORK_DATA_DIR
=
self
.
PROJ_DIR
+
"/../CLAWDATA"
self
.
_DATASETNAME
=
"train
-0
"
self
.
_RAW_DATA_DIR
=
"/home/ox/
work/data_claw/from_ily
a"
self
.
_WORK_DATA_DIR
=
self
.
PROJ_DIR
+
"/../CLAWDATA
2
"
self
.
_DATASETNAME
=
"train"
@
property
def
RAW_DATA_DIR
(
self
):
...
...
@@ -34,7 +34,7 @@ class ConverterCLAW(ConverterAbstract):
return
1
if
row_label
==
'table'
:
return
2
if
row_label
==
'figure'
:
if
row_label
==
'figure'
or
row_label
==
'picture'
:
return
3
else
:
None
...
...
utils_convert_draw_format/ConverterGENERATE.py
View file @
c83b7ae2
import
datetime
from
utils_convert_draw_format.Converter
import
ConverterAbstract
import
os
class
ConverterGENERATED
(
ConverterAbstract
):
def
__init__
(
self
):
self
.
PROJ_DIR
=
os
.
path
.
dirname
(
__file__
)
self
.
_RAW_DATA_DIR
=
"/home/ox/work/data_claw/from_ilya"
self
.
_WORK_DATA_DIR
=
self
.
PROJ_DIR
+
"/../GENERATED"
self
.
_DATASETNAME
=
"test"
@
property
def
RAW_DATA_DIR
(
self
):
return
self
.
_RAW_DATA_DIR
@
property
def
WORK_DATA_DIR
(
self
):
return
self
.
_WORK_DATA_DIR
@
property
def
DATASETNAME
(
self
):
return
self
.
_DATASETNAME
@
DATASETNAME
.
setter
def
DATASETNAME
(
self
,
name
:
str
):
self
.
_DATASETNAME
=
name
@
staticmethod
def
class_text_to_int
(
row_label
:
str
)
->
int
:
if
row_label
==
'text'
:
return
1
if
row_label
==
'table'
:
return
2
if
row_label
==
'figure'
:
return
3
else
:
None
@
ConverterAbstract
.
CATEGORIES
.
getter
def
CATEGORIES
(
self
):
return
[
{
'id'
:
1
,
'name'
:
'text'
,
'supercategory'
:
'shape'
,
},
{
'id'
:
2
,
'name'
:
'table'
,
'supercategory'
:
'shape'
,
},
{
'id'
:
3
,
'name'
:
'figure'
,
'supercategory'
:
'shape'
,
},
]
@
ConverterAbstract
.
INFO
.
getter
def
INFO
(
self
):
return
{
"description"
:
"CLAW Dataset"
,
"url"
:
""
,
"version"
:
"0.1.0"
,
"year"
:
2019
,
"contributor"
:
"belyaeva"
,
"date_created"
:
datetime
.
datetime
.
utcnow
().
isoformat
(
' '
)
}
@
ConverterAbstract
.
LICENSES
.
getter
def
LICENSES
(
self
):
return
[
{
"id"
:
1
,
"name"
:
"Attribution-NonCommercial-ShareAlike License"
,
"url"
:
"http://creativecommons.org/licenses/by-nc-sa/2.0/"
}
]
\ No newline at end of file
utils_convert_draw_format/convert_csv_to_coco.py
View file @
c83b7ae2
...
...
@@ -11,7 +11,8 @@ from pycococreatortools import pycococreatortools
import
pandas
as
pd
from
collections
import
namedtuple
from
config_project
import
CUR_DATA_DIR
from
utils_convert_draw_format.Converter
import
ConverterCLAW
,
ConverterAbstract
from
utils_convert_draw_format.Converter
import
ConverterAbstract
from
utils_convert_draw_format.ConverterGENERATE
import
ConverterGENERATED
def
split
(
df
,
group
):
data
=
namedtuple
(
'data'
,
[
'filename'
,
'object'
])
...
...
@@ -99,6 +100,6 @@ def convert_csv_to_coco(converter: ConverterAbstract):
if
__name__
==
"__main__"
:
converter
=
ConverterCLAW
()
converter
.
DATASETNAME
=
"train
-0
"
converter
=
ConverterGENERATED
()
#
ConverterCLAW()
converter
.
DATASETNAME
=
"train"
convert_csv_to_coco
(
converter
)
utils_convert_draw_format/convert_pascal_to_csv.py
View file @
c83b7ae2
...
...
@@ -5,6 +5,7 @@ from typing import List
import
pandas
as
pd
from
config_project
import
CUR_DATA_DIR
,
RAW_DATA_DIR
from
utils_convert_draw_format.ConverterCLAW
import
ConverterCLAW
from
utils_convert_draw_format.ConverterGENERATE
import
ConverterGENERATED
def
parse_xml
(
file_xml
:
str
)
->
List
:
...
...
@@ -49,6 +50,7 @@ def create_csv(path_labels: str, out_csv_file: str):
csv_list
.
extend
(
values
)
except
Exception
as
ex
:
print
(
ex
)
column_name
=
[
'filename'
,
'height'
,
'width'
,
'class'
,
'xmin'
,
'ymin'
,
'xmax'
,
'ymax'
]
xml_df
=
pd
.
DataFrame
(
csv_list
,
columns
=
column_name
)
xml_df
.
to_csv
(
out_csv_file
,
index
=
None
)
...
...
@@ -56,6 +58,7 @@ def create_csv(path_labels: str, out_csv_file: str):
if
__name__
==
"__main__"
:
converter
=
ConverterCLAW
()
#converter = ConverterGENERATED()
create_csv
(
os
.
path
.
join
(
converter
.
RAW_DATA_DIR
,
"/
labels"
),
os
.
path
.
join
(
converter
.
WORK_DATA_DIR
,
"csv/train
-0
.csv"
))
create_csv
(
os
.
path
.
join
(
converter
.
RAW_DATA_DIR
,
"/labels"
),
os
.
path
.
join
(
converter
.
WORK_DATA_DIR
,
"csv/test.csv"
))
create_csv
(
os
.
path
.
join
(
"/home/ox/work/data_claw/from_ilya/images_good_100_
labels"
),
os
.
path
.
join
(
converter
.
WORK_DATA_DIR
,
"csv/train.csv"
))
#
create_csv(os.path.join(converter.RAW_DATA_DIR, "/labels"), os.path.join(converter.WORK_DATA_DIR, "csv/test.csv"))
utils_convert_draw_format/draw_GT_coco.py
View file @
c83b7ae2
...
...
@@ -15,6 +15,7 @@ from config_project import CUR_DATA_DIR
# Define color code
from
utils_convert_draw_format.ConverterCLAW
import
ConverterCLAW
from
utils_convert_draw_format.ConverterPubLayNet
import
ConverterPubLayNet
from
utils_convert_draw_format.ConverterGENERATE
import
ConverterGENERATED
colors
=
{
'title'
:
(
255
,
0
,
0
),
'text'
:
(
0
,
255
,
0
),
...
...
@@ -33,7 +34,7 @@ def markup(image: PIL.Image, annotations: Dict, samples: Dict, font: FreeTypeFon
draw
.
polygon
(
annotation
[
'segmentation'
][
0
],
fill
=
colors
[
samples
[
'categories'
][
annotation
[
'category_id'
]
-
1
][
'name'
]]
+
(
64
,))
# Draw bbox
delta
=
5
delta
=
10
draw
.
rectangle
(
(
max
(
annotation
[
'bbox'
][
0
]
-
delta
,
0
),
max
(
annotation
[
'bbox'
][
1
]
-
delta
,
0
),
...
...
@@ -107,10 +108,11 @@ def draw_annotation(json_path: str, path_images: str, path_out: str):
if
__name__
==
"__main__"
:
converter
=
ConverterPubLayNet
()
draw_annotation
(
json_path
=
os
.
path
.
join
(
converter
.
WORK_DATA_DIR
,
"jsons/samples.json"
),
path_images
=
os
.
path
.
join
(
converter
.
WORK_DATA_DIR
,
"images/examples/"
),
path_out
=
os
.
path
.
join
(
converter
.
WORK_DATA_DIR
,
"images/examples_res/"
))
#converter = ConverterPubLayNet()
converter
=
ConverterGENERATED
()
draw_annotation
(
json_path
=
os
.
path
.
join
(
converter
.
WORK_DATA_DIR
,
"jsons/test.json"
),
path_images
=
os
.
path
.
join
(
converter
.
WORK_DATA_DIR
,
"images/test/"
),
path_out
=
os
.
path
.
join
(
converter
.
WORK_DATA_DIR
,
"images/test_labeled/"
))
'''
for CLAW
draw_annotation(json_path=os.path.join(converter.WORK_DATA_DIR, "jsons/train-0.json"),
...
...
utils_convert_draw_format/permilov_to_csv.py
View file @
c83b7ae2
import
json
from
PIL
import
Image
import
pandas
as
pd
import
os
from
typing
import
Dict
,
List
import
numpy
as
np
import
cv2
def
convert_category
(
category_name
:
str
)
->
str
:
if
category_name
==
"colontituls"
:
return
"text"
if
category_name
==
"text"
:
return
"text"
if
category_name
==
"headers"
:
return
"text"
if
category_name
==
"pagenumber"
:
return
"text"
if
category_name
==
"tables"
:
return
"table"
def
convert_one_image
(
filename
:
str
,
height
:
int
,
width
:
int
,
annotations
:
Dict
)
->
List
:
values
=
[]
for
annotation
in
annotations
:
value
=
list
()
# add filename
value
.
append
(
filename
)
# add height, width
value
.
append
(
height
)
value
.
append
(
width
)
# category_id
value
.
append
(
convert_category
(
annotation
[
'type'
]))
# bbox : xmin, ymin, xmax, ymax
padding_pln
=
10
x_min
=
max
(
0
,
annotation
[
'bbox'
][
0
]
-
padding_pln
)
y_min
=
max
(
0
,
annotation
[
'bbox'
][
1
]
-
padding_pln
)
x_max
=
min
(
width
,
annotation
[
'bbox'
][
2
]
+
padding_pln
)
y_max
=
min
(
height
,
annotation
[
'bbox'
][
3
]
+
padding_pln
)
value
.
append
(
x_min
)
value
.
append
(
y_min
)
value
.
append
(
x_max
)
value
.
append
(
y_max
)
values
.
append
(
value
)
return
values
def
create_csv_annotation
(
json_path
:
str
,
path_images
:
str
,
out_csv
:
str
):
fp
=
open
(
json_path
,
'r'
)
samples
=
json
.
load
(
fp
)
csv_list
=
[]
for
i
,
sample
in
enumerate
(
samples
):
try
:
name
=
sample
[
"path"
].
split
(
'/'
)[
-
1
]
path_image
=
os
.
path
.
join
(
path_images
,
name
)
img
=
cv2
.
imread
(
path_image
)
if
img
is
None
:
continue
hsv
=
cv2
.
cvtColor
(
img
,
cv2
.
COLOR_BGR2HSV
)
mask_green
=
cv2
.
inRange
(
hsv
,
(
36
,
25
,
25
),
(
70
,
255
,
255
))
if
np
.
sum
(
mask_green
/
255.0
)
>
2.0
:
cv2
.
imwrite
(
'green.png'
,
mask_green
)
os
.
remove
(
os
.
path
.
join
(
path_images
,
name
))
print
(
"except image with green {}"
.
format
(
name
))
continue
values
=
convert_one_image
(
filename
=
name
,
height
=
img
.
shape
[
0
],
width
=
img
.
shape
[
1
],
annotations
=
sample
[
'shapes'
])
csv_list
.
extend
(
values
)
except
Exception
as
ex
:
print
(
ex
)
column_name
=
[
'filename'
,
'height'
,
'width'
,
'class'
,
'xmin'
,
'ymin'
,
'xmax'
,
'ymax'
]
xml_df
=
pd
.
DataFrame
(
csv_list
,
columns
=
column_name
)
xml_df
.
to_csv
(
out_csv
,
index
=
None
)
if
__name__
==
"__main__"
:
create_csv_annotation
(
json_path
=
"/home/ox/PycharmProjects/TF_ObjectDetection_API/GENERATED/jsons_no_coco/test.json"
,
path_images
=
"/home/ox/PycharmProjects/TF_ObjectDetection_API/GENERATED/images/test"
,
out_csv
=
"/home/ox/PycharmProjects/TF_ObjectDetection_API/GENERATED/csv/test.csv"
)
\ No newline at end of file
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment