first commit
|
@ -0,0 +1,3 @@
|
|||
# Default ignored files
|
||||
/shelf/
|
||||
/workspace.xml
|
|
@ -0,0 +1 @@
|
|||
split_json.py
|
|
@ -0,0 +1,23 @@
|
|||
<component name="InspectionProjectProfileManager">
|
||||
<profile version="1.0">
|
||||
<option name="myName" value="Project Default" />
|
||||
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
|
||||
<option name="ignoredPackages">
|
||||
<value>
|
||||
<list size="10">
|
||||
<item index="0" class="java.lang.String" itemvalue="scipy" />
|
||||
<item index="1" class="java.lang.String" itemvalue="python" />
|
||||
<item index="2" class="java.lang.String" itemvalue="scikit-learn" />
|
||||
<item index="3" class="java.lang.String" itemvalue="pytorch" />
|
||||
<item index="4" class="java.lang.String" itemvalue="numpy" />
|
||||
<item index="5" class="java.lang.String" itemvalue="scikit-image" />
|
||||
<item index="6" class="java.lang.String" itemvalue="opencv-python" />
|
||||
<item index="7" class="java.lang.String" itemvalue="pyaml" />
|
||||
<item index="8" class="java.lang.String" itemvalue="future" />
|
||||
<item index="9" class="java.lang.String" itemvalue="matplotlib" />
|
||||
</list>
|
||||
</value>
|
||||
</option>
|
||||
</inspection_tool>
|
||||
</profile>
|
||||
</component>
|
|
@ -0,0 +1,6 @@
|
|||
<component name="InspectionProjectProfileManager">
|
||||
<settings>
|
||||
<option name="USE_PROJECT_PROFILE" value="false" />
|
||||
<version value="1.0" />
|
||||
</settings>
|
||||
</component>
|
|
@ -0,0 +1,7 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="Black">
|
||||
<option name="sdkName" value="Python 3.9 (pytorch)" />
|
||||
</component>
|
||||
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9 (pytorch)" project-jdk-type="Python SDK" />
|
||||
</project>
|
|
@ -0,0 +1,8 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectModuleManager">
|
||||
<modules>
|
||||
<module fileurl="file://$PROJECT_DIR$/.idea/pythonProject1.iml" filepath="$PROJECT_DIR$/.idea/pythonProject1.iml" />
|
||||
</modules>
|
||||
</component>
|
||||
</project>
|
|
@ -0,0 +1,6 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="PySciProjectComponent">
|
||||
<option name="PY_INTERACTIVE_PLOTS_SUGGESTED" value="true" />
|
||||
</component>
|
||||
</project>
|
|
@ -0,0 +1,10 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<module type="PYTHON_MODULE" version="4">
|
||||
<component name="NewModuleRootManager">
|
||||
<content url="file://$MODULE_DIR$">
|
||||
<excludeFolder url="file://$MODULE_DIR$/venv" />
|
||||
</content>
|
||||
<orderEntry type="jdk" jdkName="Python 3.9 (pytorch)" jdkType="Python SDK" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
</module>
|
|
@ -0,0 +1,6 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="VcsDirectoryMappings">
|
||||
<mapping directory="" vcs="Git" />
|
||||
</component>
|
||||
</project>
|
After Width: | Height: | Size: 295 KiB |
After Width: | Height: | Size: 327 KiB |
After Width: | Height: | Size: 283 KiB |
After Width: | Height: | Size: 158 KiB |
After Width: | Height: | Size: 151 KiB |
After Width: | Height: | Size: 175 KiB |
After Width: | Height: | Size: 238 KiB |
After Width: | Height: | Size: 126 KiB |
After Width: | Height: | Size: 242 KiB |
After Width: | Height: | Size: 205 KiB |
After Width: | Height: | Size: 320 KiB |
After Width: | Height: | Size: 124 KiB |
After Width: | Height: | Size: 221 KiB |
After Width: | Height: | Size: 171 KiB |
After Width: | Height: | Size: 178 KiB |
After Width: | Height: | Size: 199 KiB |
After Width: | Height: | Size: 193 KiB |
After Width: | Height: | Size: 124 KiB |
After Width: | Height: | Size: 55 KiB |
After Width: | Height: | Size: 175 KiB |
After Width: | Height: | Size: 50 KiB |
After Width: | Height: | Size: 158 KiB |
After Width: | Height: | Size: 154 KiB |
After Width: | Height: | Size: 151 KiB |
|
@ -0,0 +1,237 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
car_measure.py
|
||||
--------------
|
||||
1. 读取显著性图 -> 阈值化生成纯白掩模
|
||||
2. 形态学闭运算 -> 去噪 & 填孔
|
||||
3. 计算 + 绘制外接矩形 (显示宽、高像素)
|
||||
4. 霍夫圆检测 -> 仅画圆心 & 连线 + 距离标注
|
||||
所有可视化与结果文件统一写到 out_dir
|
||||
"""
|
||||
|
||||
import os
|
||||
import cv2
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
from u2net_saliency import generate_saliency_map
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# -------------- 辅助:显著性图增强 & 调试可视化(可选) -------------------
|
||||
# ----------------------------------------------------------------------
|
||||
def enhance_saliency_map(saliency_map):
|
||||
"""对显著性图做对比度增强、CLAHE、双边滤波——调参用,可删"""
|
||||
saliency_map = cv2.normalize(saliency_map, None, 0, 255, cv2.NORM_MINMAX)
|
||||
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
|
||||
saliency_map = clahe.apply(saliency_map)
|
||||
saliency_map = cv2.bilateralFilter(saliency_map, 9, 75, 75)
|
||||
return saliency_map
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# ------------------------- 圆心检测 & 距离标注 -------------------------
|
||||
# ----------------------------------------------------------------------
|
||||
import os
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
def detect_and_draw_circles(salient_path, original_path, output_dir):
|
||||
"""
|
||||
霍夫圆检测:
|
||||
- 在原图上:画圆心、连线、标注距离
|
||||
- 在显著图上:画完整圆 和 圆心
|
||||
—— 输出两张图:
|
||||
- detected_centers_salient.png
|
||||
- detected_centers_original.png
|
||||
"""
|
||||
salient_img = cv2.imread(salient_path, cv2.IMREAD_GRAYSCALE)
|
||||
original_img = cv2.imread(original_path)
|
||||
if salient_img is None or original_img is None:
|
||||
raise FileNotFoundError("Salient 或 original 图片路径有误")
|
||||
|
||||
# 模糊+圆检测
|
||||
blurred = cv2.GaussianBlur(salient_img, (9, 9), 2)
|
||||
circles = cv2.HoughCircles(
|
||||
blurred, cv2.HOUGH_GRADIENT,
|
||||
dp=1.2, minDist=290,
|
||||
param1=50, param2=17,
|
||||
minRadius=85, maxRadius=95
|
||||
)
|
||||
|
||||
output_salient = cv2.cvtColor(salient_img, cv2.COLOR_GRAY2BGR)
|
||||
output_original = original_img.copy()
|
||||
|
||||
if circles is not None:
|
||||
circles = np.uint16(np.around(circles))
|
||||
centers = sorted([(c[0], c[1], c[2]) for c in circles[0]],
|
||||
key=lambda p: p[1], reverse=True)[:2]
|
||||
|
||||
# 显著图上:画完整圆 + 圆心
|
||||
for (x, y, r) in centers:
|
||||
cv2.circle(output_salient, (x, y), r, (0, 255, 0), 2) # 画圆边
|
||||
cv2.circle(output_salient, (x, y), 3, (0, 0, 255), -1) # 画圆心
|
||||
|
||||
# 原图上:画圆心 + 连线 + 距离
|
||||
if len(centers) >= 2:
|
||||
(x1, y1, _), (x2, y2, _) = centers
|
||||
cv2.circle(output_original, (x1, y1), 3, (0, 255, 0), -1)
|
||||
cv2.circle(output_original, (x2, y2), 3, (0, 255, 0), -1)
|
||||
cv2.line(output_original, (x1, y1), (x2, y2), (0, 0, 255), 2)
|
||||
|
||||
dist = np.hypot(x1 - x2, y1 - y2)
|
||||
mid_pt = (int((x1 + x2) / 2), int((y1 + y2) / 2) - 10)
|
||||
cv2.putText(output_original, f"{dist:.1f}px", mid_pt,
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)
|
||||
|
||||
print(f"[Circle] 两圆心距离:{dist:.2f} px")
|
||||
else:
|
||||
print("[Circle] 检测到的圆少于 2 个")
|
||||
else:
|
||||
print("[Circle] 未检测到圆")
|
||||
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
cv2.imwrite(os.path.join(output_dir, 'detected_centers_salient.png'), output_salient)
|
||||
cv2.imwrite(os.path.join(output_dir, 'detected_centers_original.png'), output_original)
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# ----------------------- 外接矩形 & 像素尺寸 ---------------------------
|
||||
# ----------------------------------------------------------------------
|
||||
def _get_font(size):
|
||||
"""跨平台字体加载"""
|
||||
for path in ("/usr/share/fonts/truetype/ubuntu/Ubuntu-B.ttf", "arial.ttf"):
|
||||
try:
|
||||
return ImageFont.truetype(path, size)
|
||||
except IOError:
|
||||
continue
|
||||
return ImageFont.load_default()
|
||||
|
||||
def calculate_and_draw_bbox(mask_path,
|
||||
output_mask_path,
|
||||
original_path=None,
|
||||
output_original_path=None,
|
||||
display_width=None,
|
||||
display_height=None):
|
||||
"""
|
||||
仅画一条顶边宽度线、一条右边高度线,并标注像素尺寸
|
||||
"""
|
||||
# ---------- 获取外接框 ----------
|
||||
mask_img = Image.open(mask_path).convert("L")
|
||||
arr = np.array(mask_img)
|
||||
coords = np.argwhere(arr > 0)
|
||||
if coords.size == 0:
|
||||
raise RuntimeError("掩模为空,无法测量尺寸")
|
||||
|
||||
ymin, xmin = coords.min(axis=0)
|
||||
ymax, xmax = coords.max(axis=0)
|
||||
w_px = xmax - xmin + 1
|
||||
h_px = ymax - ymin + 1
|
||||
|
||||
show_w = w_px if display_width is None else display_width
|
||||
show_h = h_px if display_height is None else display_height
|
||||
font = _get_font(34)
|
||||
|
||||
# ---------- 在掩模图上绘制 ----------
|
||||
vis_mask = mask_img.convert("RGB")
|
||||
draw_m = ImageDraw.Draw(vis_mask)
|
||||
|
||||
# 顶边水平线
|
||||
draw_m.line([(xmin, ymin), (xmax, ymin)], fill="red", width=4)
|
||||
# 右边垂直线
|
||||
draw_m.line([(xmax, ymin), (xmax, ymax)], fill="red", width=4)
|
||||
|
||||
# 文本位置计算
|
||||
# 宽度文字:顶边中点偏上 10px;若超出图片则放到线下方 10px
|
||||
tx_w = int((xmin + xmax) / 2) - 40
|
||||
ty_w = ymin - 40
|
||||
if ty_w < 0:
|
||||
ty_w = ymin + 10
|
||||
w_text = f"W:{int(round(show_w))}px"
|
||||
draw_m.text((tx_w, ty_w), w_text, fill="yellow", font=font)
|
||||
|
||||
# 高度文字:右边线中点偏右 10px
|
||||
tx_h = xmax + 10
|
||||
ty_h = int((ymin + ymax) / 2) - 20
|
||||
h_text = f"H:{int(round(show_h))}px"
|
||||
draw_m.text((tx_h, ty_h), h_text, fill="yellow", font=font)
|
||||
|
||||
vis_mask.save(output_mask_path)
|
||||
print(f"[Size] 掩模可视化已保存: {output_mask_path}")
|
||||
|
||||
# ---------- 同步绘制到原图 ----------
|
||||
if original_path and output_original_path:
|
||||
orig = Image.open(original_path).convert("RGB")
|
||||
draw_o = ImageDraw.Draw(orig)
|
||||
|
||||
draw_o.line([(xmin, ymin), (xmax, ymin)], fill="red", width=4)
|
||||
draw_o.line([(xmax, ymin), (xmax, ymax)], fill="red", width=4)
|
||||
draw_o.text((tx_w, ty_w), w_text, fill="yellow", font=font)
|
||||
draw_o.text((tx_h, ty_h), h_text, fill="yellow", font=font)
|
||||
|
||||
orig.save(output_original_path)
|
||||
print(f"[Size] 原图可视化已保存: {output_original_path}")
|
||||
|
||||
return w_px, h_px
|
||||
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# ------------------------------- 主程序 -------------------------------
|
||||
# ----------------------------------------------------------------------
|
||||
if __name__ == '__main__':
|
||||
# ======================= 路径配置 =======================
|
||||
triplets = [
|
||||
# (标签, 原图路径, 显著 / 掩模 路径)
|
||||
('front', './image/front_2.jpg', './saliency/front_2.jpg'), # 正面
|
||||
('rear', './image/rear_2.jpg', './saliency/rear_2.jpg'), # 后面
|
||||
('side', './image/side_2.jpg', './saliency/side_2.jpg'), # 侧面(做圆检测)
|
||||
]
|
||||
|
||||
out_dir = './result2'
|
||||
thresh_dir = './thresh2'
|
||||
os.makedirs(out_dir, exist_ok=True)
|
||||
os.makedirs(thresh_dir, exist_ok=True)
|
||||
|
||||
for tag, orig_path, mask_src in triplets:
|
||||
# # ======================= 生成显著性图 (可以注释掉,在u2net_saliency生成)=======================
|
||||
print(f"处理 {tag} 图像中...")
|
||||
generate_saliency_map(orig_path, mask_src)
|
||||
# # ==========================================================================================
|
||||
|
||||
# #======================= 阈值化处理 =======================
|
||||
print(f'\n===== 处理 {tag} =====')
|
||||
|
||||
# ---------- 1) 阈值化掩模 ----------
|
||||
gray = cv2.imread(mask_src, cv2.IMREAD_GRAYSCALE)
|
||||
if gray is None:
|
||||
raise FileNotFoundError(mask_src)
|
||||
|
||||
# Otsu 自动阈值 + 可选偏移(偏移范围建议0~20之间)
|
||||
offset = -10 # 负值让阈值变更敏感,保留更多区域
|
||||
otsu_val, _ = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
final_thresh = max(0, min(255, otsu_val + offset))
|
||||
_, mask_bin = cv2.threshold(gray, final_thresh, 255, cv2.THRESH_BINARY)
|
||||
print(f'[Mask-{tag}] Otsu阈值={otsu_val:.1f}, 最终阈值={final_thresh}')
|
||||
|
||||
# 可选的轻度闭运算(平滑小孔,不破坏细节)
|
||||
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
|
||||
mask_bin = cv2.morphologyEx(mask_bin, cv2.MORPH_CLOSE, kernel, iterations=1)
|
||||
|
||||
# 保存阈值化结果
|
||||
mask_path = os.path.join(thresh_dir, f'{tag}_1_mask_thresh.png')
|
||||
cv2.imwrite(mask_path, mask_bin)
|
||||
print(f'[Mask-{tag}] 阈值化掩模已保存: {mask_path}')
|
||||
|
||||
# ---------- 2) 画长/宽线并写像素尺寸 ----------
|
||||
mask_vis_path = os.path.join(out_dir, f'{tag}_size_lines_mask.png')
|
||||
orig_vis_path = os.path.join(out_dir, f'{tag}_size_lines_orig.png')
|
||||
calculate_and_draw_bbox(
|
||||
mask_path, # 纯白掩模
|
||||
mask_vis_path, # 绘制后的掩模输出
|
||||
orig_path, # 原图
|
||||
orig_vis_path # 绘制后的原图输出
|
||||
)
|
||||
|
||||
# ---------- 3) 仅“side”做圆心检测 ----------
|
||||
if tag == 'side':
|
||||
detect_and_draw_circles(mask_src, orig_path, out_dir)
|
|
@ -0,0 +1,525 @@
|
|||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
class REBNCONV(nn.Module):
|
||||
def __init__(self,in_ch=3,out_ch=3,dirate=1):
|
||||
super(REBNCONV,self).__init__()
|
||||
|
||||
self.conv_s1 = nn.Conv2d(in_ch,out_ch,3,padding=1*dirate,dilation=1*dirate)
|
||||
self.bn_s1 = nn.BatchNorm2d(out_ch)
|
||||
self.relu_s1 = nn.ReLU(inplace=True)
|
||||
|
||||
def forward(self,x):
|
||||
|
||||
hx = x
|
||||
xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))
|
||||
|
||||
return xout
|
||||
|
||||
## upsample tensor 'src' to have the same spatial size with tensor 'tar'
|
||||
def _upsample_like(src,tar):
|
||||
|
||||
src = F.upsample(src,size=tar.shape[2:],mode='bilinear')
|
||||
|
||||
return src
|
||||
|
||||
|
||||
### RSU-7 ###
|
||||
class RSU7(nn.Module):#UNet07DRES(nn.Module):
|
||||
|
||||
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
|
||||
super(RSU7,self).__init__()
|
||||
|
||||
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
|
||||
|
||||
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
|
||||
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
|
||||
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
|
||||
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
|
||||
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
|
||||
self.pool5 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=1)
|
||||
|
||||
self.rebnconv7 = REBNCONV(mid_ch,mid_ch,dirate=2)
|
||||
|
||||
self.rebnconv6d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
|
||||
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
|
||||
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
|
||||
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
|
||||
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
|
||||
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
|
||||
|
||||
def forward(self,x):
|
||||
|
||||
hx = x
|
||||
hxin = self.rebnconvin(hx)
|
||||
|
||||
hx1 = self.rebnconv1(hxin)
|
||||
hx = self.pool1(hx1)
|
||||
|
||||
hx2 = self.rebnconv2(hx)
|
||||
hx = self.pool2(hx2)
|
||||
|
||||
hx3 = self.rebnconv3(hx)
|
||||
hx = self.pool3(hx3)
|
||||
|
||||
hx4 = self.rebnconv4(hx)
|
||||
hx = self.pool4(hx4)
|
||||
|
||||
hx5 = self.rebnconv5(hx)
|
||||
hx = self.pool5(hx5)
|
||||
|
||||
hx6 = self.rebnconv6(hx)
|
||||
|
||||
hx7 = self.rebnconv7(hx6)
|
||||
|
||||
hx6d = self.rebnconv6d(torch.cat((hx7,hx6),1))
|
||||
hx6dup = _upsample_like(hx6d,hx5)
|
||||
|
||||
hx5d = self.rebnconv5d(torch.cat((hx6dup,hx5),1))
|
||||
hx5dup = _upsample_like(hx5d,hx4)
|
||||
|
||||
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
|
||||
hx4dup = _upsample_like(hx4d,hx3)
|
||||
|
||||
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
|
||||
hx3dup = _upsample_like(hx3d,hx2)
|
||||
|
||||
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
|
||||
hx2dup = _upsample_like(hx2d,hx1)
|
||||
|
||||
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
|
||||
|
||||
return hx1d + hxin
|
||||
|
||||
### RSU-6 ###
|
||||
class RSU6(nn.Module):#UNet06DRES(nn.Module):
|
||||
|
||||
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
|
||||
super(RSU6,self).__init__()
|
||||
|
||||
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
|
||||
|
||||
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
|
||||
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
|
||||
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
|
||||
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
|
||||
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
|
||||
|
||||
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=2)
|
||||
|
||||
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
|
||||
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
|
||||
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
|
||||
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
|
||||
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
|
||||
|
||||
def forward(self,x):
|
||||
|
||||
hx = x
|
||||
|
||||
hxin = self.rebnconvin(hx)
|
||||
|
||||
hx1 = self.rebnconv1(hxin)
|
||||
hx = self.pool1(hx1)
|
||||
|
||||
hx2 = self.rebnconv2(hx)
|
||||
hx = self.pool2(hx2)
|
||||
|
||||
hx3 = self.rebnconv3(hx)
|
||||
hx = self.pool3(hx3)
|
||||
|
||||
hx4 = self.rebnconv4(hx)
|
||||
hx = self.pool4(hx4)
|
||||
|
||||
hx5 = self.rebnconv5(hx)
|
||||
|
||||
hx6 = self.rebnconv6(hx5)
|
||||
|
||||
|
||||
hx5d = self.rebnconv5d(torch.cat((hx6,hx5),1))
|
||||
hx5dup = _upsample_like(hx5d,hx4)
|
||||
|
||||
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
|
||||
hx4dup = _upsample_like(hx4d,hx3)
|
||||
|
||||
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
|
||||
hx3dup = _upsample_like(hx3d,hx2)
|
||||
|
||||
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
|
||||
hx2dup = _upsample_like(hx2d,hx1)
|
||||
|
||||
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
|
||||
|
||||
return hx1d + hxin
|
||||
|
||||
### RSU-5 ###
|
||||
class RSU5(nn.Module):#UNet05DRES(nn.Module):
|
||||
|
||||
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
|
||||
super(RSU5,self).__init__()
|
||||
|
||||
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
|
||||
|
||||
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
|
||||
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
|
||||
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
|
||||
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
|
||||
|
||||
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=2)
|
||||
|
||||
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
|
||||
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
|
||||
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
|
||||
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
|
||||
|
||||
def forward(self,x):
|
||||
|
||||
hx = x
|
||||
|
||||
hxin = self.rebnconvin(hx)
|
||||
|
||||
hx1 = self.rebnconv1(hxin)
|
||||
hx = self.pool1(hx1)
|
||||
|
||||
hx2 = self.rebnconv2(hx)
|
||||
hx = self.pool2(hx2)
|
||||
|
||||
hx3 = self.rebnconv3(hx)
|
||||
hx = self.pool3(hx3)
|
||||
|
||||
hx4 = self.rebnconv4(hx)
|
||||
|
||||
hx5 = self.rebnconv5(hx4)
|
||||
|
||||
hx4d = self.rebnconv4d(torch.cat((hx5,hx4),1))
|
||||
hx4dup = _upsample_like(hx4d,hx3)
|
||||
|
||||
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
|
||||
hx3dup = _upsample_like(hx3d,hx2)
|
||||
|
||||
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
|
||||
hx2dup = _upsample_like(hx2d,hx1)
|
||||
|
||||
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
|
||||
|
||||
return hx1d + hxin
|
||||
|
||||
### RSU-4 ###
|
||||
class RSU4(nn.Module):#UNet04DRES(nn.Module):
|
||||
|
||||
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
|
||||
super(RSU4,self).__init__()
|
||||
|
||||
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
|
||||
|
||||
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
|
||||
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
|
||||
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
|
||||
|
||||
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=2)
|
||||
|
||||
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
|
||||
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
|
||||
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
|
||||
|
||||
def forward(self,x):
|
||||
|
||||
hx = x
|
||||
|
||||
hxin = self.rebnconvin(hx)
|
||||
|
||||
hx1 = self.rebnconv1(hxin)
|
||||
hx = self.pool1(hx1)
|
||||
|
||||
hx2 = self.rebnconv2(hx)
|
||||
hx = self.pool2(hx2)
|
||||
|
||||
hx3 = self.rebnconv3(hx)
|
||||
|
||||
hx4 = self.rebnconv4(hx3)
|
||||
|
||||
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
|
||||
hx3dup = _upsample_like(hx3d,hx2)
|
||||
|
||||
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
|
||||
hx2dup = _upsample_like(hx2d,hx1)
|
||||
|
||||
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
|
||||
|
||||
return hx1d + hxin
|
||||
|
||||
### RSU-4F ###
|
||||
class RSU4F(nn.Module):#UNet04FRES(nn.Module):
|
||||
|
||||
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
|
||||
super(RSU4F,self).__init__()
|
||||
|
||||
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
|
||||
|
||||
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
|
||||
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=2)
|
||||
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=4)
|
||||
|
||||
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=8)
|
||||
|
||||
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=4)
|
||||
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=2)
|
||||
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
|
||||
|
||||
def forward(self,x):
|
||||
|
||||
hx = x
|
||||
|
||||
hxin = self.rebnconvin(hx)
|
||||
|
||||
hx1 = self.rebnconv1(hxin)
|
||||
hx2 = self.rebnconv2(hx1)
|
||||
hx3 = self.rebnconv3(hx2)
|
||||
|
||||
hx4 = self.rebnconv4(hx3)
|
||||
|
||||
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
|
||||
hx2d = self.rebnconv2d(torch.cat((hx3d,hx2),1))
|
||||
hx1d = self.rebnconv1d(torch.cat((hx2d,hx1),1))
|
||||
|
||||
return hx1d + hxin
|
||||
|
||||
|
||||
##### U^2-Net ####
|
||||
class U2NET(nn.Module):
|
||||
|
||||
def __init__(self,in_ch=3,out_ch=1):
|
||||
super(U2NET,self).__init__()
|
||||
|
||||
self.stage1 = RSU7(in_ch,32,64)
|
||||
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.stage2 = RSU6(64,32,128)
|
||||
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.stage3 = RSU5(128,64,256)
|
||||
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.stage4 = RSU4(256,128,512)
|
||||
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.stage5 = RSU4F(512,256,512)
|
||||
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.stage6 = RSU4F(512,256,512)
|
||||
|
||||
# decoder
|
||||
self.stage5d = RSU4F(1024,256,512)
|
||||
self.stage4d = RSU4(1024,128,256)
|
||||
self.stage3d = RSU5(512,64,128)
|
||||
self.stage2d = RSU6(256,32,64)
|
||||
self.stage1d = RSU7(128,16,64)
|
||||
|
||||
self.side1 = nn.Conv2d(64,out_ch,3,padding=1)
|
||||
self.side2 = nn.Conv2d(64,out_ch,3,padding=1)
|
||||
self.side3 = nn.Conv2d(128,out_ch,3,padding=1)
|
||||
self.side4 = nn.Conv2d(256,out_ch,3,padding=1)
|
||||
self.side5 = nn.Conv2d(512,out_ch,3,padding=1)
|
||||
self.side6 = nn.Conv2d(512,out_ch,3,padding=1)
|
||||
|
||||
self.outconv = nn.Conv2d(6*out_ch,out_ch,1)
|
||||
|
||||
def forward(self,x):
|
||||
|
||||
hx = x
|
||||
|
||||
#stage 1
|
||||
hx1 = self.stage1(hx)
|
||||
hx = self.pool12(hx1)
|
||||
|
||||
#stage 2
|
||||
hx2 = self.stage2(hx)
|
||||
hx = self.pool23(hx2)
|
||||
|
||||
#stage 3
|
||||
hx3 = self.stage3(hx)
|
||||
hx = self.pool34(hx3)
|
||||
|
||||
#stage 4
|
||||
hx4 = self.stage4(hx)
|
||||
hx = self.pool45(hx4)
|
||||
|
||||
#stage 5
|
||||
hx5 = self.stage5(hx)
|
||||
hx = self.pool56(hx5)
|
||||
|
||||
#stage 6
|
||||
hx6 = self.stage6(hx)
|
||||
hx6up = _upsample_like(hx6,hx5)
|
||||
|
||||
#-------------------- decoder --------------------
|
||||
hx5d = self.stage5d(torch.cat((hx6up,hx5),1))
|
||||
hx5dup = _upsample_like(hx5d,hx4)
|
||||
|
||||
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
|
||||
hx4dup = _upsample_like(hx4d,hx3)
|
||||
|
||||
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
|
||||
hx3dup = _upsample_like(hx3d,hx2)
|
||||
|
||||
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
|
||||
hx2dup = _upsample_like(hx2d,hx1)
|
||||
|
||||
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
|
||||
|
||||
|
||||
#side output
|
||||
d1 = self.side1(hx1d)
|
||||
|
||||
d2 = self.side2(hx2d)
|
||||
d2 = _upsample_like(d2,d1)
|
||||
|
||||
d3 = self.side3(hx3d)
|
||||
d3 = _upsample_like(d3,d1)
|
||||
|
||||
d4 = self.side4(hx4d)
|
||||
d4 = _upsample_like(d4,d1)
|
||||
|
||||
d5 = self.side5(hx5d)
|
||||
d5 = _upsample_like(d5,d1)
|
||||
|
||||
d6 = self.side6(hx6)
|
||||
d6 = _upsample_like(d6,d1)
|
||||
|
||||
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))
|
||||
|
||||
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)
|
||||
|
||||
### U^2-Net small ###
|
||||
class U2NETP(nn.Module):
|
||||
|
||||
def __init__(self,in_ch=3,out_ch=1):
|
||||
super(U2NETP,self).__init__()
|
||||
|
||||
self.stage1 = RSU7(in_ch,16,64)
|
||||
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.stage2 = RSU6(64,16,64)
|
||||
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.stage3 = RSU5(64,16,64)
|
||||
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.stage4 = RSU4(64,16,64)
|
||||
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.stage5 = RSU4F(64,16,64)
|
||||
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
|
||||
|
||||
self.stage6 = RSU4F(64,16,64)
|
||||
|
||||
# decoder
|
||||
self.stage5d = RSU4F(128,16,64)
|
||||
self.stage4d = RSU4(128,16,64)
|
||||
self.stage3d = RSU5(128,16,64)
|
||||
self.stage2d = RSU6(128,16,64)
|
||||
self.stage1d = RSU7(128,16,64)
|
||||
|
||||
self.side1 = nn.Conv2d(64,out_ch,3,padding=1)
|
||||
self.side2 = nn.Conv2d(64,out_ch,3,padding=1)
|
||||
self.side3 = nn.Conv2d(64,out_ch,3,padding=1)
|
||||
self.side4 = nn.Conv2d(64,out_ch,3,padding=1)
|
||||
self.side5 = nn.Conv2d(64,out_ch,3,padding=1)
|
||||
self.side6 = nn.Conv2d(64,out_ch,3,padding=1)
|
||||
|
||||
self.outconv = nn.Conv2d(6*out_ch,out_ch,1)
|
||||
|
||||
def forward(self,x):
|
||||
|
||||
hx = x
|
||||
|
||||
#stage 1
|
||||
hx1 = self.stage1(hx)
|
||||
hx = self.pool12(hx1)
|
||||
|
||||
#stage 2
|
||||
hx2 = self.stage2(hx)
|
||||
hx = self.pool23(hx2)
|
||||
|
||||
#stage 3
|
||||
hx3 = self.stage3(hx)
|
||||
hx = self.pool34(hx3)
|
||||
|
||||
#stage 4
|
||||
hx4 = self.stage4(hx)
|
||||
hx = self.pool45(hx4)
|
||||
|
||||
#stage 5
|
||||
hx5 = self.stage5(hx)
|
||||
hx = self.pool56(hx5)
|
||||
|
||||
#stage 6
|
||||
hx6 = self.stage6(hx)
|
||||
hx6up = _upsample_like(hx6,hx5)
|
||||
|
||||
#decoder
|
||||
hx5d = self.stage5d(torch.cat((hx6up,hx5),1))
|
||||
hx5dup = _upsample_like(hx5d,hx4)
|
||||
|
||||
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
|
||||
hx4dup = _upsample_like(hx4d,hx3)
|
||||
|
||||
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
|
||||
hx3dup = _upsample_like(hx3d,hx2)
|
||||
|
||||
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
|
||||
hx2dup = _upsample_like(hx2d,hx1)
|
||||
|
||||
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
|
||||
|
||||
|
||||
#side output
|
||||
d1 = self.side1(hx1d)
|
||||
|
||||
d2 = self.side2(hx2d)
|
||||
d2 = _upsample_like(d2,d1)
|
||||
|
||||
d3 = self.side3(hx3d)
|
||||
d3 = _upsample_like(d3,d1)
|
||||
|
||||
d4 = self.side4(hx4d)
|
||||
d4 = _upsample_like(d4,d1)
|
||||
|
||||
d5 = self.side5(hx5d)
|
||||
d5 = _upsample_like(d5,d1)
|
||||
|
||||
d6 = self.side6(hx6)
|
||||
d6 = _upsample_like(d6,d1)
|
||||
|
||||
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))
|
||||
|
||||
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)
|
|
@ -0,0 +1,540 @@
|
|||
import cv2
|
||||
import numpy as np
|
||||
import os
|
||||
# main.py
|
||||
# ───────────── 导入 utils 内的工具函数 ─────────────
|
||||
from utils import *
|
||||
|
||||
# 之后就可以直接调用,例如:
|
||||
# tl, tr, br, bl = get_bounding_box(mask_path)
|
||||
# top_line, right_line, bottom_line, left_line = calculate_lines(tl, tr, br, bl)
|
||||
|
||||
def resize_image(image, target_size=(1000, 600)):
|
||||
"""
|
||||
将图像缩放到指定尺寸
|
||||
"""
|
||||
return cv2.resize(image, target_size, interpolation=cv2.INTER_LINEAR)
|
||||
|
||||
def save_eroded(mask_color, out_path, kernel_size=(5, 5), erode_iter=10, dilate_iter=10):
|
||||
"""
|
||||
保存腐蚀10次再膨胀10次后的掩膜图像,便于可视化
|
||||
返回处理后的图像
|
||||
"""
|
||||
gray = cv2.cvtColor(mask_color, cv2.COLOR_BGR2GRAY)
|
||||
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
|
||||
eroded = cv2.erode(gray, kernel, iterations=erode_iter)
|
||||
morphed = cv2.dilate(eroded, kernel, iterations=dilate_iter)
|
||||
morphed_bgr = cv2.cvtColor(morphed, cv2.COLOR_GRAY2BGR)
|
||||
cv2.imwrite(out_path, morphed_bgr)
|
||||
return morphed_bgr
|
||||
|
||||
def detect_circles(image_input, min_radius=50, max_radius=60, min_dist=200, param1=50, param2=17):
|
||||
"""
|
||||
对给定的图像进行霍夫圆检测,返回圆心坐标和半径
|
||||
|
||||
参数
|
||||
----
|
||||
image_input : 图像文件路径或图像数组
|
||||
"""
|
||||
# 读取图像并转换为灰度图
|
||||
if isinstance(image_input, str):
|
||||
# 如果是文件路径
|
||||
image = cv2.imread(image_input, cv2.IMREAD_GRAYSCALE)
|
||||
if image is None:
|
||||
raise FileNotFoundError(f"图像文件 {image_input} 未找到")
|
||||
else:
|
||||
# 如果是图像数组
|
||||
image = cv2.cvtColor(image_input, cv2.COLOR_BGR2GRAY) if image_input.ndim == 3 else image_input.copy()
|
||||
|
||||
# 图像模糊处理,减少噪声
|
||||
blurred = cv2.GaussianBlur(image, (9, 9), 2)
|
||||
|
||||
# 霍夫圆变换检测圆
|
||||
circles = cv2.HoughCircles(
|
||||
blurred, cv2.HOUGH_GRADIENT, dp=1.2, minDist=min_dist,
|
||||
param1=param1, param2=param2, minRadius=min_radius, maxRadius=max_radius
|
||||
)
|
||||
|
||||
# 如果检测到圆
|
||||
if circles is not None:
|
||||
# 将坐标和半径转换为整数并返回
|
||||
circles = np.uint16(np.around(circles))
|
||||
centers = [(c[0], c[1], c[2]) for c in circles[0]]
|
||||
return centers # 返回 [(x1, y1, r1), (x2, y2, r2), ...]
|
||||
else:
|
||||
print("[Circle] 未检测到圆")
|
||||
return [] # 返回空列表表示未检测到圆
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def draw_line_equation(image, line_info, color=(0, 255, 0), thickness=2):
|
||||
"""
|
||||
根据线性表达式绘制线段(而非无限直线)
|
||||
"""
|
||||
if line_info[0] is None:
|
||||
# 垂直线 x = const
|
||||
_, x, y1, y2 = line_info
|
||||
pt1 = (int(x), int(min(y1, y2)))
|
||||
pt2 = (int(x), int(max(y1, y2)))
|
||||
elif line_info[0] == 0:
|
||||
# 水平线 y = const
|
||||
_, y, x1, x2 = line_info
|
||||
pt1 = (int(min(x1, x2)), int(y))
|
||||
pt2 = (int(max(x1, x2)), int(y))
|
||||
else:
|
||||
# 斜率线 y = ax + b
|
||||
a, b, x1, x2 = line_info
|
||||
x1, x2 = int(x1), int(x2)
|
||||
pt1 = (x1, int(a * x1 + b))
|
||||
pt2 = (x2, int(a * x2 + b))
|
||||
|
||||
cv2.line(image, pt1, pt2, color, thickness)
|
||||
|
||||
|
||||
def draw_circles(image, circles, color=(0, 0, 255), thickness=2):
|
||||
"""
|
||||
根据圆心坐标和半径在图像上绘制圆心和圆,并保存图像
|
||||
"""
|
||||
for (x, y, r) in circles:
|
||||
# 画圆边
|
||||
# cv2.circle(image, (x, y), r, (0, 255, 0), 2)
|
||||
# 画圆心
|
||||
cv2.circle(image, (x, y), 3, (0, 0, 255), -1)
|
||||
|
||||
|
||||
|
||||
# ------------------------- 找 A,B 点 -------------------------
|
||||
def find_A_B(circles):
|
||||
"""
|
||||
circles : [(x, y, r), ...] 来自 detect_circles()
|
||||
返回 (A, B) —— 均为 (x, y) 或 None
|
||||
规则:检测到的第 1 个圆心为 A,第 2 个为 B
|
||||
"""
|
||||
if len(circles) == 0:
|
||||
return None, None
|
||||
elif len(circles) == 1:
|
||||
return (circles[0][0], circles[0][1]), None
|
||||
else:
|
||||
A = (circles[0][0], circles[0][1])
|
||||
B = (circles[1][0], circles[1][1])
|
||||
return A, B
|
||||
|
||||
|
||||
# ------------------------- 找 C 点 -------------------------
|
||||
def find_C(image, line_info,
|
||||
thresh_val=250,
|
||||
do_morph=False,
|
||||
kernel_size=(5, 5),
|
||||
iterations=3,
|
||||
adj = -5):
|
||||
"""
|
||||
C 点:沿给定直线自"下 → 上"遇到的第一个白色像素
|
||||
"""
|
||||
# 灰度化
|
||||
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if image.ndim == 3 else image.copy()
|
||||
|
||||
# 可选形态学
|
||||
if do_morph:
|
||||
gray = apply_morphology(gray,
|
||||
kernel_size=kernel_size,
|
||||
erode_iter=iterations,
|
||||
dilate_iter=iterations)
|
||||
|
||||
# 垂直线 x = const
|
||||
if line_info[0] is None:
|
||||
_, x_const, y1, y2 = line_info
|
||||
x_const = int(x_const)
|
||||
for y in range(int(max(y1, y2)), int(min(y1, y2)) - 1, -1): # ⬅️ 下→上
|
||||
if gray[y, x_const] >= thresh_val:
|
||||
return x_const, y + adj
|
||||
|
||||
# 水平线 y = const(极少需要)
|
||||
elif line_info[0] == 0:
|
||||
_, y_const, x1, x2 = line_info
|
||||
y_const = int(y_const)
|
||||
for y in range(gray.shape[0] - 1, -1, -1): # ⬅️ 底→顶
|
||||
if gray[y, int(x1)] >= thresh_val:
|
||||
return int(x1), y + adj
|
||||
|
||||
# 斜线 y = ax + b
|
||||
else:
|
||||
a, b, x1, x2 = line_info
|
||||
xs = np.linspace(x1, x2, num=abs(int(x2 - x1)) + 1, dtype=int)
|
||||
ys = (a * xs + b).astype(int)
|
||||
for x_i, y_i in sorted(zip(xs, ys), key=lambda p: -p[1]): # ⬅️ y 降序
|
||||
if 0 <= x_i < gray.shape[1] and 0 <= y_i < gray.shape[0]:
|
||||
if gray[y_i, x_i] >= thresh_val:
|
||||
return x_i, y_i + adj
|
||||
return None
|
||||
|
||||
|
||||
# ------------------------- 找 D 点 -------------------------
|
||||
def find_D(image, line_info,
|
||||
thresh_val=250,
|
||||
do_morph=False,
|
||||
kernel_size=(3, 3),
|
||||
iterations=9,
|
||||
adj = - 5):
|
||||
"""
|
||||
最右侧直线(或给定 line_info)与图像区域的第一个白色像素交点
|
||||
—— 采用"自下而上"扫描策略。
|
||||
|
||||
参数
|
||||
----
|
||||
image : BGR/灰度图
|
||||
line_info : 与 get_line_equation 返回格式一致
|
||||
thresh_val : 像素灰度值 ≥ thresh_val 判定为白
|
||||
do_morph : 是否做形态学去噪
|
||||
kernel_size : 形态学核尺寸
|
||||
iterations : 腐蚀/膨胀迭代次数
|
||||
"""
|
||||
# 1) 灰度化
|
||||
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if image.ndim == 3 else image.copy()
|
||||
|
||||
|
||||
# 2) 形态学(可选)
|
||||
if do_morph:
|
||||
gray = apply_morphology(gray,
|
||||
kernel_size=kernel_size,
|
||||
erode_iter=iterations,
|
||||
dilate_iter=iterations+1)
|
||||
|
||||
# 3) 自下而上扫描
|
||||
if line_info[0] is None: # 垂直线 x = const
|
||||
_, x_const, y1, y2 = line_info
|
||||
x_const = int(x_const)
|
||||
for y in range(int(max(y1, y2)), int(min(y1, y2)) - 1, -1):
|
||||
if gray[y, x_const] >= thresh_val:
|
||||
return x_const, y + adj
|
||||
|
||||
elif line_info[0] == 0: # 水平线 y = const(少见)
|
||||
_, y_const, x1, x2 = line_info
|
||||
y_const = int(y_const)
|
||||
for y in range(gray.shape[0] - 1, -1, -1):
|
||||
if gray[y, int(x1)] >= thresh_val:
|
||||
return int(x1), y + adj
|
||||
|
||||
else: # 斜率线 y = a x + b
|
||||
a, b, x1, x2 = line_info
|
||||
xs = np.linspace(x1, x2, num=abs(int(x2 - x1)) + 1, dtype=int)
|
||||
ys = (a * xs + b).astype(int)
|
||||
for x_i, y_i in sorted(zip(xs, ys), key=lambda p: -p[1]): # y 由大到小
|
||||
if 0 <= x_i < gray.shape[1] and 0 <= y_i < gray.shape[0]:
|
||||
if gray[y_i, x_i] >= thresh_val:
|
||||
return x_i, y_i + adj
|
||||
return None
|
||||
|
||||
|
||||
def find_EFH(A, B, G, bottom_line):
|
||||
"""
|
||||
参数
|
||||
----
|
||||
A, B, G : 三个已知点坐标 (x, y),或 None
|
||||
bottom_line : 外接矩形底边的 line_info(来自 calculate_lines)
|
||||
返回
|
||||
----
|
||||
(E, F, H) : 三个垂足坐标,若对应输入为 None 则返回 None
|
||||
"""
|
||||
E = perpendicular_foot(A, bottom_line) if A else None
|
||||
F = perpendicular_foot(B, bottom_line) if B else None
|
||||
H = perpendicular_foot(G, bottom_line) if G else None
|
||||
return E, F, H
|
||||
# ------------------------- 找 G 点 -------------------------
|
||||
def find_G(image, line_info,
|
||||
kernel_size=(7, 7),
|
||||
erode_iter=10,
|
||||
dilate_iter=11,
|
||||
adj=3):
|
||||
"""
|
||||
侧视图汽车最高点 G:沿直线从左到右(或上到下)找到遇到的第一个白色像素。
|
||||
"""
|
||||
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
# 形态学处理(固定参数版本)
|
||||
gray = apply_morphology(gray,
|
||||
kernel_size=kernel_size,
|
||||
erode_iter=erode_iter,
|
||||
dilate_iter=dilate_iter)
|
||||
|
||||
if line_info[0] is None: # 垂直线 x = const
|
||||
_, x, y1, y2 = line_info
|
||||
for y in range(int(min(y1, y2)), int(max(y1, y2))):
|
||||
if gray[y, int(x)] == 255:
|
||||
return int(x), y + adj
|
||||
|
||||
elif line_info[0] == 0: # 水平线 y = const
|
||||
_, y, x1, x2 = line_info
|
||||
for x in range(int(min(x1, x2)), int(max(x1, x2))):
|
||||
if gray[int(y), x] == 255:
|
||||
return x, int(y) + adj
|
||||
|
||||
else: # 斜率线 y = ax + b
|
||||
a, b, x1, x2 = line_info
|
||||
for x in range(int(x1), int(x2) + 1):
|
||||
y = int(a * x + b)
|
||||
if 0 <= y < gray.shape[0] and gray[y, x] == 255:
|
||||
return x, y + adj
|
||||
return None
|
||||
|
||||
|
||||
def find_bottom_gap_midpoint(mask_color, bottom_line, gap=20, direction='left'):
|
||||
"""
|
||||
遍历底边采样点,往上找gap距离内的白色点,
|
||||
第一个dy<=gap的点为左端点,第一个dy>gap的点的前一个为右端点,
|
||||
L/M点为区间中心。
|
||||
"""
|
||||
gray = cv2.cvtColor(mask_color, cv2.COLOR_BGR2GRAY)
|
||||
h, w = gray.shape
|
||||
|
||||
# 采样底边线段上的点
|
||||
if bottom_line[0] is None: # 垂直线
|
||||
_, x_const, y1, y2 = bottom_line
|
||||
ys = np.linspace(y1, y2, abs(int(y2 - y1)) + 1, dtype=int)
|
||||
xs = np.full_like(ys, int(x_const))
|
||||
elif bottom_line[0] == 0: # 水平线
|
||||
_, y_const, x1, x2 = bottom_line
|
||||
xs = np.linspace(x1, x2, abs(int(x2 - x1)) + 1, dtype=int)
|
||||
ys = np.full_like(xs, int(y_const))
|
||||
else: # 斜率线
|
||||
a, b, x1, x2 = bottom_line
|
||||
xs = np.linspace(x1, x2, abs(int(x2 - x1)) + 1, dtype=int)
|
||||
ys = (a * xs + b).astype(int)
|
||||
|
||||
# 按方向决定遍历顺序
|
||||
if direction == 'left':
|
||||
idx_range = range(len(xs))
|
||||
else:
|
||||
idx_range = range(len(xs) - 1, -1, -1)
|
||||
|
||||
dy_list = [] # 存储(x, y, dy)
|
||||
for i in idx_range:
|
||||
x, y = xs[i], ys[i]
|
||||
dy = None
|
||||
for d in range(1, gap + 2):
|
||||
yy = y - d
|
||||
if 0 <= x < w and 0 <= yy < h and gray[yy, x] >= 200:
|
||||
dy = d
|
||||
break
|
||||
dy_list.append((x, y, dy))
|
||||
|
||||
# 找左端点(第一个dy<=gap的点)
|
||||
left_idx = None
|
||||
for i, (x, y, dy) in enumerate(dy_list):
|
||||
if dy is not None and dy <= gap:
|
||||
left_idx = i
|
||||
break
|
||||
if left_idx is None:
|
||||
return None
|
||||
# 找右端点(第一个dy>gap的点的前一个)
|
||||
right_idx = None
|
||||
for i in range(left_idx, len(dy_list)):
|
||||
dy = dy_list[i][2]
|
||||
if dy is None or dy > gap:
|
||||
right_idx = i - 1
|
||||
break
|
||||
if right_idx is None:
|
||||
right_idx = len(dy_list) - 1
|
||||
# 区间中心
|
||||
mid_idx = (left_idx + right_idx) // 2
|
||||
print(f"[find_bottom_gap_midpoint] direction={direction}, 左端点=({dy_list[left_idx][0]}, {dy_list[left_idx][1]}), 右端点=({dy_list[right_idx][0]}, {dy_list[right_idx][1]})")
|
||||
return (int(dy_list[mid_idx][0]), int(dy_list[mid_idx][1]))
|
||||
|
||||
def process_side(mask_path, rgb_path, out_dir):
|
||||
"""
|
||||
处理侧视图,检测并标注 A-H 点
|
||||
"""
|
||||
os.makedirs(out_dir, exist_ok=True)
|
||||
mask_color = cv2.imread(mask_path, cv2.IMREAD_COLOR)
|
||||
rgb_color = cv2.imread(rgb_path, cv2.IMREAD_COLOR)
|
||||
if mask_color is None or rgb_color is None:
|
||||
raise FileNotFoundError("无法读取 mask 或 RGB 图像")
|
||||
|
||||
# 缩放图像到1000*600
|
||||
# mask_color = resize_image(mask_color, (1000, 600))
|
||||
# rgb_color = resize_image(rgb_color, (1000, 600))
|
||||
|
||||
circles = detect_circles(mask_color)
|
||||
top_left, top_right, bottom_right, bottom_left = get_bounding_box(mask_color)
|
||||
top_line, right_line, bottom_line, left_line = calculate_lines(
|
||||
top_left, top_right, bottom_right, bottom_left
|
||||
)
|
||||
A, B = find_A_B(circles)
|
||||
C = find_C(mask_color, left_line)
|
||||
D = find_D(mask_color, right_line)
|
||||
G = find_G(mask_color, top_line)
|
||||
E, F, H = find_EFH(A, B, G, bottom_line)
|
||||
|
||||
points = {'A': A, 'B': B, 'C': C, 'D': D, 'E': E, 'F': F, 'G': G, 'H': H}
|
||||
for canvas in (mask_color, rgb_color):
|
||||
draw_line_equation(canvas, top_line)
|
||||
draw_line_equation(canvas, right_line)
|
||||
draw_line_equation(canvas, bottom_line)
|
||||
draw_line_equation(canvas, left_line)
|
||||
for label, pt in points.items():
|
||||
if pt is not None:
|
||||
cv2.circle(canvas, pt, 3, (0, 0, 255), -1)
|
||||
cv2.putText(canvas, label, (pt[0]+5, pt[1]-5),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 1, cv2.LINE_AA)
|
||||
cv2.imwrite(os.path.join(out_dir, 'side_output_mask.png'), mask_color)
|
||||
cv2.imwrite(os.path.join(out_dir, 'side_output_rgb.png'), rgb_color)
|
||||
print("A =", A, " B =", B,"C =", C, " D =", D," G =", G , "E =", E ,"F =", F ,"H =", H )
|
||||
print(f"[side] 结果已保存到 {out_dir}")
|
||||
|
||||
def process_rear(mask_path, rgb_path, out_dir):
|
||||
"""
|
||||
处理后视图,检测并标注 P, N, O, Q, R 点
|
||||
"""
|
||||
os.makedirs(out_dir, exist_ok=True)
|
||||
mask_color = cv2.imread(mask_path, cv2.IMREAD_COLOR)
|
||||
rgb_color = cv2.imread(rgb_path, cv2.IMREAD_COLOR)
|
||||
if mask_color is None or rgb_color is None:
|
||||
raise FileNotFoundError("无法读取 mask 或 RGB 图像")
|
||||
|
||||
# 先验知识的外接矩形(与正视图一致)
|
||||
top_left, top_right, bottom_right, bottom_left = get_front_bounding_box_with_prior(mask_color)
|
||||
top_line, right_line, bottom_line, left_line = calculate_lines(
|
||||
top_left, top_right, bottom_right, bottom_left
|
||||
)
|
||||
|
||||
# 保存绘制外接矩形的图片
|
||||
mask_with_rect = mask_color.copy()
|
||||
draw_line_equation(mask_with_rect, top_line)
|
||||
draw_line_equation(mask_with_rect, right_line)
|
||||
draw_line_equation(mask_with_rect, bottom_line)
|
||||
draw_line_equation(mask_with_rect, left_line)
|
||||
cv2.imwrite(os.path.join(out_dir, 'rear_with_rect.png'), mask_with_rect)
|
||||
|
||||
# P: 上边线段中点
|
||||
P = ((top_left[0] + top_right[0]) // 2, (top_left[1] + top_right[1]) // 2)
|
||||
# N: 左边线段的中点
|
||||
N = ((top_left[0] + bottom_left[0]) // 2, (top_left[1] + bottom_left[1]) // 2)
|
||||
# O: 右边线段的中点
|
||||
O = ((top_right[0] + bottom_right[0]) // 2, (top_right[1] + bottom_right[1]) // 2)
|
||||
|
||||
R = find_bottom_gap_midpoint(mask_color, bottom_line, gap=8, direction='left')
|
||||
Q = find_bottom_gap_midpoint(mask_color, bottom_line, gap=8, direction='right')
|
||||
|
||||
points = {'P': P, 'N': N, 'O': O, 'Q': Q, 'R': R}
|
||||
for canvas in (mask_color, rgb_color):
|
||||
draw_line_equation(canvas, top_line)
|
||||
draw_line_equation(canvas, right_line)
|
||||
draw_line_equation(canvas, bottom_line)
|
||||
draw_line_equation(canvas, left_line)
|
||||
for label, pt in points.items():
|
||||
if pt is not None:
|
||||
cv2.circle(canvas, pt, 3, (0, 0, 255), -1)
|
||||
cv2.putText(canvas, label, (pt[0]+5, pt[1]-5),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 1, cv2.LINE_AA)
|
||||
cv2.imwrite(os.path.join(out_dir, 'rear_output_mask.png'), mask_color)
|
||||
cv2.imwrite(os.path.join(out_dir, 'rear_output_rgb.png'), rgb_color)
|
||||
print("P =", P, "N =", N, "O =", O, "Q =", Q, "R =", R)
|
||||
print(f"[rear] 结果已保存到 {out_dir}")
|
||||
|
||||
def get_front_bounding_box_with_prior(mask):
|
||||
"""
|
||||
先将mask上1/2区域设为黑色,仅用下1/3区域的白色像素计算左右边界,上下边界用原始mask。
|
||||
返回 (top_left, top_right, bottom_right, bottom_left)
|
||||
"""
|
||||
h, w = mask.shape[:2]
|
||||
# 1. 生成处理后的mask(上2/3黑,下1/3保留)
|
||||
mask_proc = mask.copy()
|
||||
y_cut = h * 1 // 2
|
||||
mask_proc[:y_cut, :] = 0
|
||||
# 2. 计算左右边界(下1/3区域)
|
||||
# 只考虑白色像素
|
||||
gray_proc = cv2.cvtColor(mask_proc, cv2.COLOR_BGR2GRAY) if mask_proc.ndim == 3 else mask_proc
|
||||
coords = cv2.findNonZero((gray_proc > 127).astype(np.uint8))
|
||||
if coords is None:
|
||||
raise ValueError("处理后mask没有白色区域,无法计算左右边界")
|
||||
xs = coords[:, 0, 0]
|
||||
ys = coords[:, 0, 1]
|
||||
left_x = np.min(xs)
|
||||
right_x = np.max(xs)
|
||||
# 3. 计算上下边界(原始mask)
|
||||
gray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) if mask.ndim == 3 else mask
|
||||
coords_full = cv2.findNonZero((gray > 127).astype(np.uint8))
|
||||
if coords_full is None:
|
||||
raise ValueError("原始mask没有白色区域,无法计算上下边界")
|
||||
top_y = np.min(coords_full[:, 0, 1])
|
||||
bottom_y = np.max(coords_full[:, 0, 1])
|
||||
# 4. 返回四个点
|
||||
top_left = (left_x, top_y)
|
||||
top_right = (right_x, top_y)
|
||||
bottom_right = (right_x, bottom_y)
|
||||
bottom_left = (left_x, bottom_y)
|
||||
return top_left, top_right, bottom_right, bottom_left
|
||||
|
||||
def process_front(mask_path, rgb_path, out_dir):
|
||||
"""
|
||||
处理前视图,检测并标注 K, I, J, L, M 点
|
||||
"""
|
||||
os.makedirs(out_dir, exist_ok=True)
|
||||
mask_color = cv2.imread(mask_path, cv2.IMREAD_COLOR)
|
||||
rgb_color = cv2.imread(rgb_path, cv2.IMREAD_COLOR)
|
||||
if mask_color is None or rgb_color is None:
|
||||
raise FileNotFoundError("无法读取 mask 或 RGB 图像")
|
||||
|
||||
# 先进行腐蚀膨胀处理
|
||||
#mask_processed = save_eroded(mask_color, os.path.join(out_dir, 'front_eroded_mask.png'),
|
||||
# kernel_size=(5, 5), erode_iter=8, dilate_iter=5)
|
||||
mask_processed = mask_color
|
||||
# 使用先验知识的外接矩形
|
||||
top_left, top_right, bottom_right, bottom_left = get_front_bounding_box_with_prior(mask_processed)
|
||||
top_line, right_line, bottom_line, left_line = calculate_lines(
|
||||
top_left, top_right, bottom_right, bottom_left
|
||||
)
|
||||
|
||||
# 保存绘制外接矩形的图片
|
||||
mask_with_rect = mask_processed.copy()
|
||||
draw_line_equation(mask_with_rect, top_line)
|
||||
draw_line_equation(mask_with_rect, right_line)
|
||||
draw_line_equation(mask_with_rect, bottom_line)
|
||||
draw_line_equation(mask_with_rect, left_line)
|
||||
cv2.imwrite(os.path.join(out_dir, 'front_with_rect.png'), mask_with_rect)
|
||||
|
||||
# K: 上边线段中点
|
||||
K = ((top_left[0] + top_right[0]) // 2, (top_left[1] + top_right[1]) // 2)
|
||||
# I: 左边线段的高度一半处
|
||||
I = (left_line[1], (left_line[2] + left_line[3]) // 2) if left_line[0] is None else None
|
||||
# J: 右边线段的高度一半处
|
||||
J = (right_line[1], (right_line[2] + right_line[3]) // 2) if right_line[0] is None else None
|
||||
M = find_bottom_gap_midpoint(mask_color, bottom_line, gap=8, direction='left')
|
||||
L = find_bottom_gap_midpoint(mask_color, bottom_line, gap=8, direction='right')
|
||||
|
||||
points = {'K': K, 'I': I, 'J': J, 'L': L, 'M': M}
|
||||
for canvas in (mask_color, rgb_color):
|
||||
draw_line_equation(canvas, top_line)
|
||||
draw_line_equation(canvas, right_line)
|
||||
draw_line_equation(canvas, bottom_line)
|
||||
draw_line_equation(canvas, left_line)
|
||||
for label, pt in points.items():
|
||||
if pt is not None:
|
||||
cv2.circle(canvas, pt, 3, (0, 0, 255), -1)
|
||||
cv2.putText(canvas, label, (pt[0]+5, pt[1]-5),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 1, cv2.LINE_AA)
|
||||
cv2.imwrite(os.path.join(out_dir, 'front_output_mask.png'), mask_color)
|
||||
cv2.imwrite(os.path.join(out_dir, 'front_output_rgb.png'), rgb_color)
|
||||
print("K =", K, "I =", I, "J =", J, "L =", L, "M =", M)
|
||||
print(f"[front] 结果已保存到 {out_dir}")
|
||||
|
||||
if __name__ == '__main__':
|
||||
side_mask = '../segment-anything-main/output/2_mask.png'
|
||||
side_rgb = '../ultralytics-main/input/2.png'
|
||||
|
||||
front_mask = '../segment-anything-main/output/00213_mask.png'
|
||||
front_rgb = '../ultralytics-main/input/00213.jpg'
|
||||
|
||||
rear_mask = '../segment-anything-main/output/3_mask.png'
|
||||
rear_rgb = '../ultralytics-main/input/3.jpg'
|
||||
|
||||
out_dir = './result'
|
||||
|
||||
process_side(side_mask, side_rgb, out_dir)
|
||||
process_front(front_mask, front_rgb, out_dir)
|
||||
process_rear(rear_mask, rear_rgb, out_dir)
|
|
@ -0,0 +1,43 @@
|
|||
## 下载环境
|
||||
|
||||
`pip install -r requirements.txt`
|
||||
|
||||
## 数据集位置
|
||||
|
||||
`demo/image` 中存在3张`front.jpg` , `rear.jpg`,`side.jpg` 分别代表汽车前方、后方、侧方
|
||||
|
||||
## 运行代码
|
||||
|
||||
1. `python3 u2net_saliency.py` 将原图经过模型生成显著图
|
||||
2. `python3 main.py` 将显著图进行轮胎圆轴距检测,长宽高检测
|
||||
3. `python3 point.py` 将进行接触点检测
|
||||
|
||||
## 实际效果
|
||||
|
||||
#### 原始图片
|
||||
|
||||
<img src=".\image\front.jpg" alt="front" style="zoom:25%;" />
|
||||
|
||||
<img src=".\image\rear.jpg" alt="rear" style="zoom:25%;" />
|
||||
|
||||
<img src=".\image\side.jpg" alt="side" style="zoom:25%;" />
|
||||
|
||||
#### 最后结果
|
||||
|
||||
外接矩形和轴距检测
|
||||
|
||||
<img src=".\result\detected_centers_original.png" alt="detected_centers_original" style="zoom:25%;" />
|
||||
|
||||
<img src=".\result\side_1_size_lines_orig.png" alt="front_size_lines_orig" style="zoom:25%;" />
|
||||
|
||||
<img src=".\result\rear_1_size_lines_orig.png" alt="side_size_lines_orig" style="zoom:25%;" />
|
||||
|
||||
<img src=".\result\front_1_size_lines_orig.png" alt="rear_size_lines_orig" style="zoom:25%;" />
|
||||
|
||||
周围接触点检测
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||

|
|
@ -0,0 +1,6 @@
|
|||
torch==2.0.0+cu118
|
||||
torchvision==0.15.1+cu118
|
||||
opencv-python==4.11.0.86
|
||||
numpy==1.26.4
|
||||
Pillow==10.3.0
|
||||
matplotlib==3.9.0
|
After Width: | Height: | Size: 17 KiB |
After Width: | Height: | Size: 2.3 MiB |
After Width: | Height: | Size: 15 KiB |
After Width: | Height: | Size: 15 KiB |
After Width: | Height: | Size: 779 KiB |
After Width: | Height: | Size: 12 KiB |
After Width: | Height: | Size: 7.3 KiB |
After Width: | Height: | Size: 160 KiB |
After Width: | Height: | Size: 708 KiB |
After Width: | Height: | Size: 100 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 706 KiB |
After Width: | Height: | Size: 13 KiB |
After Width: | Height: | Size: 606 KiB |
After Width: | Height: | Size: 11 KiB |
After Width: | Height: | Size: 594 KiB |
After Width: | Height: | Size: 37 KiB |
After Width: | Height: | Size: 55 KiB |
After Width: | Height: | Size: 5.5 KiB |
After Width: | Height: | Size: 50 KiB |
After Width: | Height: | Size: 37 KiB |
After Width: | Height: | Size: 57 KiB |
After Width: | Height: | Size: 4.7 KiB |
After Width: | Height: | Size: 45 KiB |
After Width: | Height: | Size: 35 KiB |
After Width: | Height: | Size: 41 KiB |
After Width: | Height: | Size: 10 KiB |
After Width: | Height: | Size: 36 KiB |
After Width: | Height: | Size: 6.0 KiB |
After Width: | Height: | Size: 1.3 KiB |
After Width: | Height: | Size: 6.3 KiB |
After Width: | Height: | Size: 1.2 KiB |
After Width: | Height: | Size: 5.5 KiB |
After Width: | Height: | Size: 1.9 KiB |
After Width: | Height: | Size: 7.6 KiB |
After Width: | Height: | Size: 6.9 KiB |
After Width: | Height: | Size: 5.7 KiB |
|
@ -0,0 +1,40 @@
|
|||
import os
|
||||
import json
|
||||
|
||||
# 设置你的目标文件夹路径
|
||||
folder_path = "../../data/part5/精标"
|
||||
|
||||
# 定义替换映射
|
||||
replace_dict = {
|
||||
"引擎盖": 2,
|
||||
"前保险杠": 1,
|
||||
"前挡风玻璃": 4,
|
||||
"背景": 0,
|
||||
"A柱": 3
|
||||
}
|
||||
|
||||
def replace_labels_in_json(file_path):
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
|
||||
def replace(obj):
|
||||
if isinstance(obj, dict):
|
||||
return {k: replace(v) for k, v in obj.items()}
|
||||
elif isinstance(obj, list):
|
||||
return [replace(item) for item in obj]
|
||||
elif isinstance(obj, str) and obj in replace_dict:
|
||||
return replace_dict[obj]
|
||||
else:
|
||||
return obj
|
||||
|
||||
new_data = replace(data)
|
||||
|
||||
with open(file_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(new_data, f, ensure_ascii=False, indent=2)
|
||||
|
||||
if __name__ == "__main__":
|
||||
for filename in os.listdir(folder_path):
|
||||
if filename.endswith('.json'):
|
||||
file_path = os.path.join(folder_path, filename)
|
||||
replace_labels_in_json(file_path)
|
||||
print("替换完成!")
|
|
@ -0,0 +1,68 @@
|
|||
# u2net_saliency_only.py
|
||||
import torch
|
||||
import cv2
|
||||
import torchvision.transforms as transforms
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
from model.u2net import U2NETP
|
||||
import os
|
||||
# 缓存模型,避免重复加载
|
||||
_model = None
|
||||
|
||||
|
||||
def load_u2net_model(model_path="./saved_models/u2netp.pth"):
|
||||
global _model
|
||||
if _model is None:
|
||||
_model = U2NETP(3, 1)
|
||||
_model.load_state_dict(torch.load(model_path))
|
||||
_model.cuda()
|
||||
_model.eval()
|
||||
return _model
|
||||
|
||||
|
||||
def preprocess(image):
|
||||
transform = transforms.Compose([
|
||||
transforms.Resize((320, 320)),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
||||
std=[0.229, 0.224, 0.225])
|
||||
])
|
||||
return transform(image).unsqueeze(0)
|
||||
|
||||
|
||||
def postprocess(output, original_size):
|
||||
output = output.squeeze().cpu().detach().numpy()
|
||||
output = (output * 255).astype(np.uint8)
|
||||
output = cv2.resize(output, original_size)
|
||||
return output
|
||||
|
||||
|
||||
def generate_saliency_map(input_image_path, output_image_path, model_path="./saved_models/u2netp.pth"):
|
||||
net = load_u2net_model(model_path)
|
||||
|
||||
original_image = Image.open(input_image_path).convert("RGB")
|
||||
original_size = original_image.size
|
||||
input_tensor = preprocess(original_image).cuda()
|
||||
|
||||
output = net(input_tensor)[0]
|
||||
saliency_map = postprocess(output, original_size)
|
||||
|
||||
cv2.imwrite(output_image_path, saliency_map)
|
||||
print(f"显著图已保存至: {output_image_path}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
triplets = [
|
||||
# (标签, 原图路径, 显著 / 掩模 路径)
|
||||
('front', './image/front.jpg', './saliency/front.png'), # 正面
|
||||
('rear', './image/rear.jpg', './saliency/rear.png'), # 后面
|
||||
('side', './image/side.jpg', './saliency/side.png'), # 侧面(做圆检测)
|
||||
]
|
||||
|
||||
thresh_dir = './thresh'
|
||||
os.makedirs(thresh_dir, exist_ok=True)
|
||||
|
||||
# # ======================= 生成显著性图 (可以注释掉,在u2net_saliency生成)=======================
|
||||
for tag, image_path, saliency_path in triplets:
|
||||
print(f"处理 {tag} 图像中...")
|
||||
generate_saliency_map(image_path, saliency_path)
|
|
@ -0,0 +1,100 @@
|
|||
import cv2
|
||||
import numpy as np
|
||||
|
||||
|
||||
def get_bounding_box(image_input):
|
||||
"""
|
||||
从阈值化图像中提取最小外接矩形,返回四个角点坐标(顺时针)
|
||||
|
||||
参数
|
||||
----
|
||||
image_input : 图像文件路径或图像数组
|
||||
"""
|
||||
if isinstance(image_input, str):
|
||||
# 如果是文件路径
|
||||
image = cv2.imread(image_input, cv2.IMREAD_GRAYSCALE)
|
||||
if image is None:
|
||||
raise FileNotFoundError(f"图像文件 {image_input} 未找到")
|
||||
else:
|
||||
# 如果是图像数组
|
||||
image = cv2.cvtColor(image_input, cv2.COLOR_BGR2GRAY) if image_input.ndim == 3 else image_input.copy()
|
||||
|
||||
coords = np.argwhere(image > 0)
|
||||
if coords.size == 0:
|
||||
raise ValueError("图像中没有非零像素,无法计算外接矩形")
|
||||
|
||||
ymin, xmin = coords.min(axis=0)
|
||||
ymax, xmax = coords.max(axis=0)
|
||||
|
||||
# 返回四个角点(顺时针)
|
||||
return (xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)
|
||||
|
||||
|
||||
def get_line_equation(p1, p2):
|
||||
"""
|
||||
给定两点 (p1, p2),返回直线方程:
|
||||
- 垂直线: (None, x_const, y1, y2)
|
||||
- 水平线: (0, y_const, x1, x2)
|
||||
- 斜率线: (a, b, x1, x2) 对应 y = ax + b,范围 [x1, x2]
|
||||
"""
|
||||
if p2[0] == p1[0]: # 垂直线
|
||||
return None, p1[0], p1[1], p2[1]
|
||||
elif p2[1] == p1[1]: # 水平线
|
||||
return 0, p1[1], p1[0], p2[0]
|
||||
else:
|
||||
a = (p2[1] - p1[1]) / (p2[0] - p1[0])
|
||||
b = p1[1] - a * p1[0]
|
||||
return a, b, p1[0], p2[0]
|
||||
|
||||
def calculate_lines(top_left, top_right, bottom_right, bottom_left):
|
||||
"""
|
||||
返回外接矩形四条边的线性表达式
|
||||
"""
|
||||
top_line = get_line_equation(top_left, top_right)
|
||||
right_line = get_line_equation(top_right, bottom_right)
|
||||
bottom_line = get_line_equation(bottom_right, bottom_left)
|
||||
left_line = get_line_equation(bottom_left, top_left)
|
||||
return top_line, right_line, bottom_line, left_line
|
||||
|
||||
def apply_morphology(gray,
|
||||
kernel_size=(5, 5),
|
||||
erode_iter=3,
|
||||
dilate_iter=3):
|
||||
"""
|
||||
对灰度图执行"腐蚀 → 膨胀"去噪声并返回处理后的结果。
|
||||
|
||||
参数
|
||||
----
|
||||
gray : np.ndarray,灰度图 (H, W)
|
||||
kernel_size : structuring element 尺寸
|
||||
erode_iter : 腐蚀迭代次数
|
||||
dilate_iter : 膨胀迭代次数
|
||||
"""
|
||||
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
|
||||
out = cv2.erode(gray, kernel, iterations=erode_iter)
|
||||
out = cv2.dilate(out, kernel, iterations=dilate_iter)
|
||||
return out
|
||||
|
||||
|
||||
def perpendicular_foot(pt, line_info):
|
||||
"""
|
||||
计算点 pt 到指定直线 line_info 的垂足坐标。
|
||||
pt : (x0, y0)
|
||||
line_info : get_line_equation() 的返回值
|
||||
"""
|
||||
x0, y0 = pt
|
||||
|
||||
if line_info[0] is None: # 垂直线 x = const
|
||||
return int(line_info[1]), int(y0)
|
||||
|
||||
if line_info[0] == 0: # 水平线 y = const
|
||||
return int(x0), int(line_info[1])
|
||||
|
||||
a, b, *_ = line_info # 斜率线 y = ax + b
|
||||
xf = (x0 + a * (y0 - b)) / (1 + a**2)
|
||||
yf = a * xf + b
|
||||
return int(round(xf)), int(round(yf))
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
## 下载环境
|
||||
|
||||
```bash
|
||||
pip install ultralytics
|
||||
pip install opencv-python pycocotools matplotlib onnxruntime onnx torch torchvision
|
||||
```
|
||||
|
||||
## 权重文件
|
||||
|
||||
* https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth SAM的权重文件,放入 `segment-anything-main`目录下
|
||||
* https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x.pt yolo11的权重文件,放入`ultralytics-main`目录下
|
||||
|
||||
## 运行流程
|
||||
|
||||
###1. 用yolo11跑出汽车框作为prompt
|
||||
|
||||
将汽车图片放入 `ultralytics-main/input下`
|
||||
|
||||
```
|
||||
input/
|
||||
├── 00060.jpg
|
||||
├── 00213.jpg
|
||||
├── ...
|
||||
├── 00600.jpg
|
||||
```
|
||||
|
||||
运行 `python3 test.py`
|
||||
|
||||
生成`output/exp` 保存框出来的汽车和 `output/car_boxes.txt` 保存每个图片框的左上和右下坐标
|
||||
|
||||
### 2. 将框的prompt和图片一起输入SAM模型
|
||||
|
||||
进入`segment-anything-main`文件夹
|
||||
|
||||
运行 `python test_box.py --checkpoint sam_vit_h_4b8939.pth --model-type vit_h --input ../ultralytics-main/input --output ./output --box-file ../ultralytics-main/output/car_boxes.txt`
|
||||
|
||||
在 `segment-anything-main/output` 可看到汽车显著图
|
||||
|
||||
### 3. 将显著图进行外接矩形和关键点检测
|
||||
|
||||
进入`demo`文件夹,修改`main`函数里所需要的图片路径,如下
|
||||
|
||||
```python
|
||||
side_mask = '../segment-anything-main/output/2_mask.png'
|
||||
side_rgb = '../ultralytics-main/input/2.png'
|
||||
out_dir = './result'
|
||||
process_side(side_mask, side_rgb, out_dir)
|
||||
```
|
||||
|
||||
运行 `python3 point.py`
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
[flake8]
|
||||
ignore = W503, E203, E221, C901, C408, E741, C407, B017, F811, C101, EXE001, EXE002
|
||||
max-line-length = 100
|
||||
max-complexity = 18
|
||||
select = B,C,E,F,W,T4,B9
|
||||
per-file-ignores =
|
||||
**/__init__.py:F401,F403,E402
|
|
@ -0,0 +1,42 @@
|
|||
.nfs*
|
||||
|
||||
# compilation and distribution
|
||||
__pycache__
|
||||
_ext
|
||||
*.pyc
|
||||
*.pyd
|
||||
*.so
|
||||
*.dll
|
||||
*.egg-info/
|
||||
build/
|
||||
dist/
|
||||
wheels/
|
||||
|
||||
# pytorch/python/numpy formats
|
||||
*.pth
|
||||
*.pkl
|
||||
*.npy
|
||||
*.ts
|
||||
model_ts*.txt
|
||||
|
||||
# onnx models
|
||||
*.onnx
|
||||
|
||||
# ipython/jupyter notebooks
|
||||
**/.ipynb_checkpoints/
|
||||
|
||||
# Editor temporaries
|
||||
*.swn
|
||||
*.swo
|
||||
*.swp
|
||||
*~
|
||||
|
||||
# editor settings
|
||||
.idea
|
||||
.vscode
|
||||
_darcs
|
||||
|
||||
# demo
|
||||
**/node_modules
|
||||
yarn.lock
|
||||
package-lock.json
|
|
@ -0,0 +1,80 @@
|
|||
# Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to make participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, sex characteristics, gender identity and expression,
|
||||
level of experience, education, socio-economic status, nationality, personal
|
||||
appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all project spaces, and it also applies when
|
||||
an individual is representing the project or its community in public spaces.
|
||||
Examples of representing a project or community include using an official
|
||||
project e-mail address, posting via an official social media account, or acting
|
||||
as an appointed representative at an online or offline event. Representation of
|
||||
a project may be further defined and clarified by project maintainers.
|
||||
|
||||
This Code of Conduct also applies outside the project spaces when there is a
|
||||
reasonable belief that an individual's behavior may have a negative impact on
|
||||
the project or its community.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at <opensource-conduct@fb.com>. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see
|
||||
https://www.contributor-covenant.org/faq
|
|
@ -0,0 +1,31 @@
|
|||
# Contributing to segment-anything
|
||||
We want to make contributing to this project as easy and transparent as
|
||||
possible.
|
||||
|
||||
## Pull Requests
|
||||
We actively welcome your pull requests.
|
||||
|
||||
1. Fork the repo and create your branch from `main`.
|
||||
2. If you've added code that should be tested, add tests.
|
||||
3. If you've changed APIs, update the documentation.
|
||||
4. Ensure the test suite passes.
|
||||
5. Make sure your code lints, using the `linter.sh` script in the project's root directory. Linting requires `black==23.*`, `isort==5.12.0`, `flake8`, and `mypy`.
|
||||
6. If you haven't already, complete the Contributor License Agreement ("CLA").
|
||||
|
||||
## Contributor License Agreement ("CLA")
|
||||
In order to accept your pull request, we need you to submit a CLA. You only need
|
||||
to do this once to work on any of Facebook's open source projects.
|
||||
|
||||
Complete your CLA here: <https://code.facebook.com/cla>
|
||||
|
||||
## Issues
|
||||
We use GitHub issues to track public bugs. Please ensure your description is
|
||||
clear and has sufficient instructions to be able to reproduce the issue.
|
||||
|
||||
Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe
|
||||
disclosure of security bugs. In those cases, please go through the process
|
||||
outlined on that page and do not file a public issue.
|
||||
|
||||
## License
|
||||
By contributing to segment-anything, you agree that your contributions will be licensed
|
||||
under the LICENSE file in the root directory of this source tree.
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,183 @@
|
|||
## Latest updates -- SAM 2: Segment Anything in Images and Videos
|
||||
|
||||
Please check out our new release on [**Segment Anything Model 2 (SAM 2)**](https://github.com/facebookresearch/segment-anything-2).
|
||||
|
||||
* SAM 2 code: https://github.com/facebookresearch/segment-anything-2
|
||||
* SAM 2 demo: https://sam2.metademolab.com/
|
||||
* SAM 2 paper: https://arxiv.org/abs/2408.00714
|
||||
|
||||

|
||||
|
||||
**Segment Anything Model 2 (SAM 2)** is a foundation model towards solving promptable visual segmentation in images and videos. We extend SAM to video by considering images as a video with a single frame. The model design is a simple transformer architecture with streaming memory for real-time video processing. We build a model-in-the-loop data engine, which improves model and data via user interaction, to collect [**our SA-V dataset**](https://ai.meta.com/datasets/segment-anything-video), the largest video segmentation dataset to date. SAM 2 trained on our data provides strong performance across a wide range of tasks and visual domains.
|
||||
|
||||
# Segment Anything
|
||||
|
||||
**[Meta AI Research, FAIR](https://ai.facebook.com/research/)**
|
||||
|
||||
[Alexander Kirillov](https://alexander-kirillov.github.io/), [Eric Mintun](https://ericmintun.github.io/), [Nikhila Ravi](https://nikhilaravi.com/), [Hanzi Mao](https://hanzimao.me/), Chloe Rolland, Laura Gustafson, [Tete Xiao](https://tetexiao.com), [Spencer Whitehead](https://www.spencerwhitehead.com/), Alex Berg, Wan-Yen Lo, [Piotr Dollar](https://pdollar.github.io/), [Ross Girshick](https://www.rossgirshick.info/)
|
||||
|
||||
[[`Paper`](https://ai.facebook.com/research/publications/segment-anything/)] [[`Project`](https://segment-anything.com/)] [[`Demo`](https://segment-anything.com/demo)] [[`Dataset`](https://segment-anything.com/dataset/index.html)] [[`Blog`](https://ai.facebook.com/blog/segment-anything-foundation-model-image-segmentation/)] [[`BibTeX`](#citing-segment-anything)]
|
||||
|
||||

|
||||
|
||||
The **Segment Anything Model (SAM)** produces high quality object masks from input prompts such as points or boxes, and it can be used to generate masks for all objects in an image. It has been trained on a [dataset](https://segment-anything.com/dataset/index.html) of 11 million images and 1.1 billion masks, and has strong zero-shot performance on a variety of segmentation tasks.
|
||||
|
||||
<p float="left">
|
||||
<img src="assets/masks1.png?raw=true" width="37.25%" />
|
||||
<img src="assets/masks2.jpg?raw=true" width="61.5%" />
|
||||
</p>
|
||||
|
||||
## Installation
|
||||
|
||||
The code requires `python>=3.8`, as well as `pytorch>=1.7` and `torchvision>=0.8`. Please follow the instructions [here](https://pytorch.org/get-started/locally/) to install both PyTorch and TorchVision dependencies. Installing both PyTorch and TorchVision with CUDA support is strongly recommended.
|
||||
|
||||
Install Segment Anything:
|
||||
|
||||
```
|
||||
pip install git+https://github.com/facebookresearch/segment-anything.git
|
||||
```
|
||||
|
||||
or clone the repository locally and install with
|
||||
|
||||
```
|
||||
git clone git@github.com:facebookresearch/segment-anything.git
|
||||
cd segment-anything; pip install -e .
|
||||
```
|
||||
|
||||
The following optional dependencies are necessary for mask post-processing, saving masks in COCO format, the example notebooks, and exporting the model in ONNX format. `jupyter` is also required to run the example notebooks.
|
||||
|
||||
```
|
||||
pip install opencv-python pycocotools matplotlib onnxruntime onnx
|
||||
```
|
||||
|
||||
## <a name="GettingStarted"></a>Getting Started
|
||||
|
||||
First download a [model checkpoint](#model-checkpoints). Then the model can be used in just a few lines to get masks from a given prompt:
|
||||
|
||||
```
|
||||
from segment_anything import SamPredictor, sam_model_registry
|
||||
sam = sam_model_registry["<model_type>"](checkpoint="<path/to/checkpoint>")
|
||||
predictor = SamPredictor(sam)
|
||||
predictor.set_image(<your_image>)
|
||||
masks, _, _ = predictor.predict(<input_prompts>)
|
||||
```
|
||||
|
||||
or generate masks for an entire image:
|
||||
|
||||
```
|
||||
from segment_anything import SamAutomaticMaskGenerator, sam_model_registry
|
||||
sam = sam_model_registry["<model_type>"](checkpoint="<path/to/checkpoint>")
|
||||
mask_generator = SamAutomaticMaskGenerator(sam)
|
||||
masks = mask_generator.generate(<your_image>)
|
||||
```
|
||||
|
||||
Additionally, masks can be generated for images from the command line:
|
||||
|
||||
```
|
||||
python scripts/amg.py --checkpoint <path/to/checkpoint> --model-type <model_type> --input <image_or_folder> --output <path/to/output>
|
||||
```
|
||||
|
||||
See the examples notebooks on [using SAM with prompts](/notebooks/predictor_example.ipynb) and [automatically generating masks](/notebooks/automatic_mask_generator_example.ipynb) for more details.
|
||||
|
||||
<p float="left">
|
||||
<img src="assets/notebook1.png?raw=true" width="49.1%" />
|
||||
<img src="assets/notebook2.png?raw=true" width="48.9%" />
|
||||
</p>
|
||||
|
||||
## ONNX Export
|
||||
|
||||
SAM's lightweight mask decoder can be exported to ONNX format so that it can be run in any environment that supports ONNX runtime, such as in-browser as showcased in the [demo](https://segment-anything.com/demo). Export the model with
|
||||
|
||||
```
|
||||
python scripts/export_onnx_model.py --checkpoint <path/to/checkpoint> --model-type <model_type> --output <path/to/output>
|
||||
```
|
||||
|
||||
See the [example notebook](https://github.com/facebookresearch/segment-anything/blob/main/notebooks/onnx_model_example.ipynb) for details on how to combine image preprocessing via SAM's backbone with mask prediction using the ONNX model. It is recommended to use the latest stable version of PyTorch for ONNX export.
|
||||
|
||||
### Web demo
|
||||
|
||||
The `demo/` folder has a simple one page React app which shows how to run mask prediction with the exported ONNX model in a web browser with multithreading. Please see [`demo/README.md`](https://github.com/facebookresearch/segment-anything/blob/main/demo/README.md) for more details.
|
||||
|
||||
## <a name="Models"></a>Model Checkpoints
|
||||
|
||||
Three model versions of the model are available with different backbone sizes. These models can be instantiated by running
|
||||
|
||||
```
|
||||
from segment_anything import sam_model_registry
|
||||
sam = sam_model_registry["<model_type>"](checkpoint="<path/to/checkpoint>")
|
||||
```
|
||||
|
||||
Click the links below to download the checkpoint for the corresponding model type.
|
||||
|
||||
- **`default` or `vit_h`: [ViT-H SAM model.](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth)**
|
||||
- `vit_l`: [ViT-L SAM model.](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth)
|
||||
- `vit_b`: [ViT-B SAM model.](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth)
|
||||
|
||||
## Dataset
|
||||
|
||||
See [here](https://ai.facebook.com/datasets/segment-anything/) for an overview of the datastet. The dataset can be downloaded [here](https://ai.facebook.com/datasets/segment-anything-downloads/). By downloading the datasets you agree that you have read and accepted the terms of the SA-1B Dataset Research License.
|
||||
|
||||
We save masks per image as a json file. It can be loaded as a dictionary in python in the below format.
|
||||
|
||||
```python
|
||||
{
|
||||
"image" : image_info,
|
||||
"annotations" : [annotation],
|
||||
}
|
||||
|
||||
image_info {
|
||||
"image_id" : int, # Image id
|
||||
"width" : int, # Image width
|
||||
"height" : int, # Image height
|
||||
"file_name" : str, # Image filename
|
||||
}
|
||||
|
||||
annotation {
|
||||
"id" : int, # Annotation id
|
||||
"segmentation" : dict, # Mask saved in COCO RLE format.
|
||||
"bbox" : [x, y, w, h], # The box around the mask, in XYWH format
|
||||
"area" : int, # The area in pixels of the mask
|
||||
"predicted_iou" : float, # The model's own prediction of the mask's quality
|
||||
"stability_score" : float, # A measure of the mask's quality
|
||||
"crop_box" : [x, y, w, h], # The crop of the image used to generate the mask, in XYWH format
|
||||
"point_coords" : [[x, y]], # The point coordinates input to the model to generate the mask
|
||||
}
|
||||
```
|
||||
|
||||
Image ids can be found in sa_images_ids.txt which can be downloaded using the above [link](https://ai.facebook.com/datasets/segment-anything-downloads/) as well.
|
||||
|
||||
To decode a mask in COCO RLE format into binary:
|
||||
|
||||
```
|
||||
from pycocotools import mask as mask_utils
|
||||
mask = mask_utils.decode(annotation["segmentation"])
|
||||
```
|
||||
|
||||
See [here](https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/mask.py) for more instructions to manipulate masks stored in RLE format.
|
||||
|
||||
## License
|
||||
|
||||
The model is licensed under the [Apache 2.0 license](LICENSE).
|
||||
|
||||
## Contributing
|
||||
|
||||
See [contributing](CONTRIBUTING.md) and the [code of conduct](CODE_OF_CONDUCT.md).
|
||||
|
||||
## Contributors
|
||||
|
||||
The Segment Anything project was made possible with the help of many contributors (alphabetical):
|
||||
|
||||
Aaron Adcock, Vaibhav Aggarwal, Morteza Behrooz, Cheng-Yang Fu, Ashley Gabriel, Ahuva Goldstand, Allen Goodman, Sumanth Gurram, Jiabo Hu, Somya Jain, Devansh Kukreja, Robert Kuo, Joshua Lane, Yanghao Li, Lilian Luong, Jitendra Malik, Mallika Malhotra, William Ngan, Omkar Parkhi, Nikhil Raina, Dirk Rowe, Neil Sejoor, Vanessa Stark, Bala Varadarajan, Bram Wasti, Zachary Winstrom
|
||||
|
||||
## Citing Segment Anything
|
||||
|
||||
If you use SAM or SA-1B in your research, please use the following BibTeX entry.
|
||||
|
||||
```
|
||||
@article{kirillov2023segany,
|
||||
title={Segment Anything},
|
||||
author={Kirillov, Alexander and Mintun, Eric and Ravi, Nikhila and Mao, Hanzi and Rolland, Chloe and Gustafson, Laura and Xiao, Tete and Whitehead, Spencer and Berg, Alexander C. and Lo, Wan-Yen and Doll{\'a}r, Piotr and Girshick, Ross},
|
||||
journal={arXiv:2304.02643},
|
||||
year={2023}
|
||||
}
|
||||
```
|
After Width: | Height: | Size: 3.5 MiB |
After Width: | Height: | Size: 130 KiB |
After Width: | Height: | Size: 1.9 MiB |
After Width: | Height: | Size: 568 KiB |
After Width: | Height: | Size: 854 KiB |
After Width: | Height: | Size: 1.2 MiB |
|
@ -0,0 +1,126 @@
|
|||
## Segment Anything Simple Web demo
|
||||
|
||||
This **front-end only** React based web demo shows how to load a fixed image and corresponding `.npy` file of the SAM image embedding, and run the SAM ONNX model in the browser using Web Assembly with mulithreading enabled by `SharedArrayBuffer`, Web Worker, and SIMD128.
|
||||
|
||||
<img src="https://github.com/facebookresearch/segment-anything/raw/main/assets/minidemo.gif" width="500"/>
|
||||
|
||||
## Run the app
|
||||
|
||||
Install Yarn
|
||||
|
||||
```
|
||||
npm install --g yarn
|
||||
```
|
||||
|
||||
Build and run:
|
||||
|
||||
```
|
||||
yarn && yarn start
|
||||
```
|
||||
|
||||
Navigate to [`http://localhost:8081/`](http://localhost:8081/)
|
||||
|
||||
Move your cursor around to see the mask prediction update in real time.
|
||||
|
||||
## Export the image embedding
|
||||
|
||||
In the [ONNX Model Example notebook](https://github.com/facebookresearch/segment-anything/blob/main/notebooks/onnx_model_example.ipynb) upload the image of your choice and generate and save corresponding embedding.
|
||||
|
||||
Initialize the predictor:
|
||||
|
||||
```python
|
||||
checkpoint = "sam_vit_h_4b8939.pth"
|
||||
model_type = "vit_h"
|
||||
sam = sam_model_registry[model_type](checkpoint=checkpoint)
|
||||
sam.to(device='cuda')
|
||||
predictor = SamPredictor(sam)
|
||||
```
|
||||
|
||||
Set the new image and export the embedding:
|
||||
|
||||
```
|
||||
image = cv2.imread('src/assets/dogs.jpg')
|
||||
predictor.set_image(image)
|
||||
image_embedding = predictor.get_image_embedding().cpu().numpy()
|
||||
np.save("dogs_embedding.npy", image_embedding)
|
||||
```
|
||||
|
||||
Save the new image and embedding in `src/assets/data`.
|
||||
|
||||
## Export the ONNX model
|
||||
|
||||
You also need to export the quantized ONNX model from the [ONNX Model Example notebook](https://github.com/facebookresearch/segment-anything/blob/main/notebooks/onnx_model_example.ipynb).
|
||||
|
||||
Run the cell in the notebook which saves the `sam_onnx_quantized_example.onnx` file, download it and copy it to the path `/model/sam_onnx_quantized_example.onnx`.
|
||||
|
||||
Here is a snippet of the export/quantization code:
|
||||
|
||||
```
|
||||
onnx_model_path = "sam_onnx_example.onnx"
|
||||
onnx_model_quantized_path = "sam_onnx_quantized_example.onnx"
|
||||
quantize_dynamic(
|
||||
model_input=onnx_model_path,
|
||||
model_output=onnx_model_quantized_path,
|
||||
optimize_model=True,
|
||||
per_channel=False,
|
||||
reduce_range=False,
|
||||
weight_type=QuantType.QUInt8,
|
||||
)
|
||||
```
|
||||
|
||||
**NOTE: if you change the ONNX model by using a new checkpoint you need to also re-export the embedding.**
|
||||
|
||||
## Update the image, embedding, model in the app
|
||||
|
||||
Update the following file paths at the top of`App.tsx`:
|
||||
|
||||
```py
|
||||
const IMAGE_PATH = "/assets/data/dogs.jpg";
|
||||
const IMAGE_EMBEDDING = "/assets/data/dogs_embedding.npy";
|
||||
const MODEL_DIR = "/model/sam_onnx_quantized_example.onnx";
|
||||
```
|
||||
|
||||
## ONNX multithreading with SharedArrayBuffer
|
||||
|
||||
To use multithreading, the appropriate headers need to be set to create a cross origin isolation state which will enable use of `SharedArrayBuffer` (see this [blog post](https://cloudblogs.microsoft.com/opensource/2021/09/02/onnx-runtime-web-running-your-machine-learning-model-in-browser/) for more details)
|
||||
|
||||
The headers below are set in `configs/webpack/dev.js`:
|
||||
|
||||
```js
|
||||
headers: {
|
||||
"Cross-Origin-Opener-Policy": "same-origin",
|
||||
"Cross-Origin-Embedder-Policy": "credentialless",
|
||||
}
|
||||
```
|
||||
|
||||
## Structure of the app
|
||||
|
||||
**`App.tsx`**
|
||||
|
||||
- Initializes ONNX model
|
||||
- Loads image embedding and image
|
||||
- Runs the ONNX model based on input prompts
|
||||
|
||||
**`Stage.tsx`**
|
||||
|
||||
- Handles mouse move interaction to update the ONNX model prompt
|
||||
|
||||
**`Tool.tsx`**
|
||||
|
||||
- Renders the image and the mask prediction
|
||||
|
||||
**`helpers/maskUtils.tsx`**
|
||||
|
||||
- Conversion of ONNX model output from array to an HTMLImageElement
|
||||
|
||||
**`helpers/onnxModelAPI.tsx`**
|
||||
|
||||
- Formats the inputs for the ONNX model
|
||||
|
||||
**`helpers/scaleHelper.tsx`**
|
||||
|
||||
- Handles image scaling logic for SAM (longest size 1024)
|
||||
|
||||
**`hooks/`**
|
||||
|
||||
- Handle shared state for the app
|
|
@ -0,0 +1,84 @@
|
|||
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
// All rights reserved.
|
||||
|
||||
// This source code is licensed under the license found in the
|
||||
// LICENSE file in the root directory of this source tree.
|
||||
|
||||
const { resolve } = require("path");
|
||||
const HtmlWebpackPlugin = require("html-webpack-plugin");
|
||||
const FriendlyErrorsWebpackPlugin = require("friendly-errors-webpack-plugin");
|
||||
const CopyPlugin = require("copy-webpack-plugin");
|
||||
const webpack = require("webpack");
|
||||
|
||||
module.exports = {
|
||||
entry: "./src/index.tsx",
|
||||
resolve: {
|
||||
extensions: [".js", ".jsx", ".ts", ".tsx"],
|
||||
},
|
||||
output: {
|
||||
path: resolve(__dirname, "dist"),
|
||||
},
|
||||
module: {
|
||||
rules: [
|
||||
{
|
||||
test: /\.mjs$/,
|
||||
include: /node_modules/,
|
||||
type: "javascript/auto",
|
||||
resolve: {
|
||||
fullySpecified: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
test: [/\.jsx?$/, /\.tsx?$/],
|
||||
use: ["ts-loader"],
|
||||
exclude: /node_modules/,
|
||||
},
|
||||
{
|
||||
test: /\.css$/,
|
||||
use: ["style-loader", "css-loader"],
|
||||
},
|
||||
{
|
||||
test: /\.(scss|sass)$/,
|
||||
use: ["style-loader", "css-loader", "postcss-loader"],
|
||||
},
|
||||
{
|
||||
test: /\.(jpe?g|png|gif|svg)$/i,
|
||||
use: [
|
||||
"file-loader?hash=sha512&digest=hex&name=img/[contenthash].[ext]",
|
||||
"image-webpack-loader?bypassOnDebug&optipng.optimizationLevel=7&gifsicle.interlaced=false",
|
||||
],
|
||||
},
|
||||
{
|
||||
test: /\.(woff|woff2|ttf)$/,
|
||||
use: {
|
||||
loader: "url-loader",
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
plugins: [
|
||||
new CopyPlugin({
|
||||
patterns: [
|
||||
{
|
||||
from: "node_modules/onnxruntime-web/dist/*.wasm",
|
||||
to: "[name][ext]",
|
||||
},
|
||||
{
|
||||
from: "model",
|
||||
to: "model",
|
||||
},
|
||||
{
|
||||
from: "src/assets",
|
||||
to: "assets",
|
||||
},
|
||||
],
|
||||
}),
|
||||
new HtmlWebpackPlugin({
|
||||
template: "./src/assets/index.html",
|
||||
}),
|
||||
new FriendlyErrorsWebpackPlugin(),
|
||||
new webpack.ProvidePlugin({
|
||||
process: "process/browser",
|
||||
}),
|
||||
],
|
||||
};
|
|
@ -0,0 +1,25 @@
|
|||
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
// All rights reserved.
|
||||
|
||||
// This source code is licensed under the license found in the
|
||||
// LICENSE file in the root directory of this source tree.
|
||||
|
||||
// development config
|
||||
const { merge } = require("webpack-merge");
|
||||
const commonConfig = require("./common");
|
||||
|
||||
module.exports = merge(commonConfig, {
|
||||
mode: "development",
|
||||
devServer: {
|
||||
hot: true, // enable HMR on the server
|
||||
open: true,
|
||||
// These headers enable the cross origin isolation state
|
||||
// needed to enable use of SharedArrayBuffer for ONNX
|
||||
// multithreading.
|
||||
headers: {
|
||||
"Cross-Origin-Opener-Policy": "same-origin",
|
||||
"Cross-Origin-Embedder-Policy": "credentialless",
|
||||
},
|
||||
},
|
||||
devtool: "cheap-module-source-map",
|
||||
});
|