Skip to content

Commit 711645e

Browse files
committed
fix default values
1 parent 7bbeea0 commit 711645e

4 files changed

Lines changed: 273 additions & 272 deletions

File tree

README.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -83,12 +83,12 @@ img = cv2.imread(img_path)
8383

8484
element_crops = MakeCropsDetectThem(
8585
image=img,
86-
model_path="yolov8m.pt",
86+
model_path="yolo11m.pt",
8787
segment=False,
8888
shape_x=640,
8989
shape_y=640,
90-
overlap_x=50,
91-
overlap_y=50,
90+
overlap_x=25,
91+
overlap_y=25,
9292
conf=0.5,
9393
iou=0.7,
9494
)
@@ -264,7 +264,7 @@ Possible arguments of the ```auto_calculate_crop_values``` function:
264264
|-----------------------|------------------------|--------------|----------------------------------------------------------------------------------------------------------------|
265265
| image | np.ndarray | | The input image in BGR format. |
266266
| mode | str | "network_based" | The type of analysis to perform. Can be "resolution_based" for Resolution-Based Analysis or "network_based" for Neural Network-Based Analysis.|
267-
| model | ultralytics model | YOLO("yolov8m.pt") | Pre-initialized model object for "network_based" mode. If not provided, the default YOLOv8m model will be used.|
267+
| model | ultralytics model | YOLO("yolo11m.pt") | Pre-initialized model object for "network_based" mode. If not provided, the default YOLO11m model will be used.|
268268
| classes_list | list | None | A list of class indices to consider for object detection in "network_based" mode. If None, all classes will be considered. |
269269
| conf | float | 0.25 | The confidence threshold for detection in "network_based" mode. |
270270

@@ -280,7 +280,7 @@ img = cv2.imread(img_path)
280280

281281
# Calculate the optimal crop size and overlap for an image
282282
shape_x, shape_y, overlap_x, overlap_y = auto_calculate_crop_values(
283-
image=img, mode="network_based", model=YOLO("yolov8m.pt")
283+
image=img, mode="network_based", model=YOLO("yolo11m.pt")
284284
)
285285
```
286286

examples/example_extra_functions.ipynb

Lines changed: 250 additions & 249 deletions
Large diffs are not rendered by default.

patched_yolo_infer/README.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -59,12 +59,12 @@ img = cv2.imread(img_path)
5959

6060
element_crops = MakeCropsDetectThem(
6161
image=img,
62-
model_path="yolov8m.pt",
62+
model_path="yolo11m.pt",
6363
segment=False,
6464
shape_x=640,
6565
shape_y=640,
66-
overlap_x=50,
67-
overlap_y=50,
66+
overlap_x=25,
67+
overlap_y=25,
6868
conf=0.5,
6969
iou=0.7,
7070
)
@@ -135,7 +135,7 @@ Visualizes custom results of object detection or segmentation on an image.
135135
- **thickness** (*int*): The thickness of bounding box and text. Default is 4.
136136
- **font**: The font type for class labels. Default is cv2.FONT_HERSHEY_SIMPLEX.
137137
- **font_scale** (*float*): The scale factor for font size. Default is 1.5.
138-
- **delta_colors** (*int*): The random seed offset for color variation. Default is seed=0.
138+
- **delta_colors** (*int*): The random seed offset for color variation. Default is seed=3.
139139
- **dpi** (*int*): Final visualization size (plot is bigger when dpi is higher). Default is 150.
140140
- **random_object_colors** (*bool*): If true, colors for each object are selected randomly. Default is False.
141141
- **show_confidences** (*bool*): If true and show_class=True, confidences near class are visualized. Default is False.
@@ -220,7 +220,7 @@ img = cv2.imread(img_path)
220220

221221
# Calculate the optimal crop size and overlap for an image
222222
shape_x, shape_y, overlap_x, overlap_y = auto_calculate_crop_values(
223-
image=img, mode="network_based", model=YOLO("yolov8m.pt")
223+
image=img, mode="network_based", model=YOLO("yolo11m.pt")
224224
)
225225
```
226226

patched_yolo_infer/functions_extra.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -179,18 +179,18 @@ def visualize_results_usual_yolo_inference(
179179

180180
def visualize_results_yolo_pose_inference(
181181
img,
182-
model,
182+
model=YOLO("yolo11m-pose.pt"),
183183
imgsz=640,
184184
conf=0.25,
185185
iou=0.7,
186-
show_boxes=True,
187-
show_class=True,
188-
color_class_background=(0, 0, 255),
189-
color_class_text=(255, 255, 255),
190186
thickness=4,
191187
point_radius=4,
192188
connection_schema=None,
193189
min_landmark_visibility=0.25,
190+
show_boxes=True,
191+
show_class=True,
192+
color_class_background=(0, 0, 255),
193+
color_class_text=(255, 255, 255),
194194
font=cv2.FONT_HERSHEY_SIMPLEX,
195195
font_scale=1.5,
196196
delta_colors=3,
@@ -208,20 +208,20 @@ def visualize_results_yolo_pose_inference(
208208
209209
Args:
210210
img (numpy.ndarray): The input image in BGR format.
211-
model: The object detection or segmentation model (yolov8).
211+
model: The yolo-pose model. Default is "yolo11m-pose.pt".
212212
imgsz (int): The input image size for the model. Default is 640.
213213
conf (float): The confidence threshold for detection. Default is 0.25.
214214
iou (float): The intersection over union threshold for detection. Default is 0.7.
215-
show_boxes (bool): Whether to show bounding boxes. Default is True.
216-
show_class (bool): Whether to show class labels. Default is True.
217-
color_class_background (tuple / list of tuple): The background BGR color for class labels. Default is (0, 0, 255) (red).
218-
color_class_text (tuple): The text BGR color for class labels. Default is (255, 255, 255) (white).
219-
thickness (int): The thickness of bounding box and text. Default is 4.
215+
thickness (int): The thickness of bounding box, text and skeleton connections. Default is 4.
220216
point_radius (int): The radius of the landmark points to be drawn on the image.
221217
connection_schema (list): A list of tuples defining how landmarks should be connected to form a skeleton.
222218
Each tuple contains two indices representing the landmarks to be connected.
223219
If None or empty, only landmarks will be drawn without any connections.
224220
min_landmark_visibility (float): The minimum confidence threshold for a landmark's visibility to be drawn.
221+
show_boxes (bool): Whether to show bounding boxes. Default is True.
222+
show_class (bool): Whether to show class labels. Default is True.
223+
color_class_background (tuple / list of tuple): The background BGR color for class labels. Default is (0, 0, 255) (red).
224+
color_class_text (tuple): The text BGR color for class labels. Default is (255, 255, 255) (white).
225225
font: The font type for class labels. Default is cv2.FONT_HERSHEY_SIMPLEX.
226226
font_scale (float): The scale factor for font size. Default is 1.5.
227227
delta_colors (int): The random seed offset for color variation. Default is 3.
@@ -680,7 +680,7 @@ def auto_calculate_crop_values(image, mode="network_based", model=None, classes_
680680
image (numpy.ndarray): The input BGR image.
681681
mode (str): The type of analysis to perform. Can be "resolution_based" or "network_based".
682682
Default is "network_analysis".
683-
model (YOLO): The YOLO model to use for object detection. If None, a default model yolov8m
683+
model (YOLO): The YOLO model to use for object detection. If None, a default model yolo11m
684684
will be loaded. Default is None.
685685
classes_list (list): A list of class indices to consider for object detection. If None, all classes
686686
will be considered. Default is None.
@@ -700,7 +700,7 @@ def auto_calculate_crop_values(image, mode="network_based", model=None, classes_
700700
else:
701701
# If no model is provided, load a default YOLO model
702702
if model is None:
703-
model = YOLO("yolov8m.pt")
703+
model = YOLO("yolo11m.pt")
704704

705705
# Perform object detection on the image
706706
result = model.predict(image, conf=conf, iou=0.75, classes=classes_list, verbose=False)

0 commit comments

Comments
 (0)