Files
hirres_tractor_vision/lib/alg/image_processing_3d.py
2025-05-30 16:30:37 +08:00

184 lines
7.0 KiB
Python
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import os
import cv2
import numpy as np
import ctypes
import math
import open3d as o3d
import time
# +++++++++++++++++++++++++++++++++++++++++++++++++++++保存深度图像函数+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def save_depth_image(image_data, width, height, sn, queue,image_count):
# 将图像数据转换为OpenCV格式
depth_image_array = ctypes.cast(image_data, ctypes.POINTER(ctypes.c_uint16 * (width.value * height.value))).contents
depth_image_np = np.array(depth_image_array).reshape(height.value, width.value)
# 对每个像素点的深度数据乘以 0.25
# 使用OpenCV保存图像
image_name = f"{image_count}DepthImage_{sn}.png"
# output_directory = "OutputDirectory/Linux/x64Release/Images"
output_directory = "src/Python/src/ToolsFuncsDemo/ImagesTest/Trac_up_down02"
os.makedirs(output_directory, exist_ok=True)
output_path = os.path.join(output_directory, image_name)
cv2.imwrite(output_path, depth_image_np)
print(f"Depth image saved as {output_path} for camera {sn}")
queue.put(f"Depth image saved as {output_path} for camera {sn}")
return f"Depth image saved as {output_path} for camera {sn}"
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++障碍物检测++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++用统计滤波去除孤立点++++++++++++++++++++++++++++++++++++++
# def statistical_outlier_removal(point_cloud,nb_neighbors, std_ratio):
# """
# 使用统计滤波去除孤立点
# :param point_cloud: Open3D点云对象
# :param nb_neighbors: 每个点考虑的邻域点数
# :param std_ratio: 标准差倍数,用于判断是否为离群点
# :return: 去除孤立点后的点云对象
# """
# # # 执行统计滤波
# cl, ind = point_cloud.remove_radius_outlier(20,10)
# # # 选择过滤后的点云
# point_cloud_filtered = point_cloud.select_by_index(ind)
# return point_cloud_filtered
def statistical_outlier_removal(point_cloud, nb_neighbors=20, std_ratio=2.0):
cl, ind = point_cloud.remove_statistical_outlier(nb_neighbors=nb_neighbors, std_ratio=std_ratio)
return cl
#+++++++++++++++++++++++++++++++++++++++++++ 深度图-》点云++++++++++++++++++++++++++++++++++++++
def depth_to_point_cloud(depth_image_data,x_image_np,y_image_np,width, height, min_depth, max_depth):
"""
将深度图像转换为点云使用像素坐标作为x和y并筛选出有效范围内的点
:param depth_image_data: 深度图像数据 (二维数组,每个元素表示深度值,单位为毫米)
:param width: 图像宽度 (整数)
:param height: 图像高度 (整数)
:param min_depth: 最小有效深度 (毫米)默认为500毫米 (0.5米)
:param max_depth: 最大有效深度 (毫米)默认为6000毫米 (6米)
:return: Open3D点云对象
"""
# 确保宽度和高度是整数
if not isinstance(width, int) or not isinstance(height, int):
raise ValueError("Width and height must be integers")
# 创建点云对象
point_cloud = o3d.geometry.PointCloud()
# 创建点云数据
points = []
colors = []
# 确保深度图像数据是 NumPy 数组
depth_image_data = np.array(depth_image_data)
x_image_data = np.array(x_image_np)
y_image_data = np.array(y_image_np)
# 相机中心位置
center_x = width / 2
center_y = height / 2
for v in range(height):
for u in range(width):
# 获取深度值
z = depth_image_data[v, u]
# 检查深度值是否在有效范围内
if z < min_depth or z > max_depth:
continue
# 计算像素大小(毫米/像素)
pixel_size_x_mm = 2 * z * np.tan(np.radians(69 / 2)) / width
pixel_size_y_mm = 2 * z * np.tan(np.radians(51 / 2)) / height
# 计算以相机中心为原点的坐标,并调整坐标系
x = (u - center_x) # X 轴相对于相机中心
y = (center_y - v) # 相机的y+朝下
x_mm = x * pixel_size_x_mm
y_mm = y * pixel_size_y_mm
points.append([x_mm, y_mm, z])
colors.append([1.0, 0.0, 0.0]) # 设置颜色为红色
# 将点云数据转换为NumPy数组
points = np.array(points)
colors = np.array(colors)
# 设置点云的点和颜色
point_cloud.points = o3d.utility.Vector3dVector(points)
point_cloud.colors = o3d.utility.Vector3dVector(colors)
# # 可视化点云
# o3d.visualization.draw_geometries([point_cloud])
# 去除噪声(孤立点),根据现场情况再调整
start_time = time.perf_counter() # 记录开始时间
point_cloud = statistical_outlier_removal(point_cloud, 20, 1.0)
end_time = time.perf_counter() # 记录结束时间
runtime = end_time - start_time # 计算运行时间
# print(f"statistical_outlier_removal函数运行时间{runtime:.6f} 秒")
return point_cloud
#+++++++++++++++++++++++++++++++++++++++++++ 绘制立体矩形框并检测障碍物++++++++++++++++++++++++++++++++++++++
def detect_obstacles_in_box(point_cloud, min_bound, max_bound, width, height, save_name="detected_obstacle.ply", visualize=False):
# 1. 滤波
filtered_point_cloud = statistical_outlier_removal(point_cloud, 20, 1.0)
# 2. 创建 AABB 框
box = o3d.geometry.AxisAlignedBoundingBox(min_bound=min_bound, max_bound=max_bound)
box.color = (0.0, 1.0, 0.0) # ✅ 绿色边框
# 3. 提取框内点
indices = box.get_point_indices_within_bounding_box(filtered_point_cloud.points)
if len(indices) == 0:
# print("未检测到任何点在框内")
return None, filtered_point_cloud
points = np.asarray(filtered_point_cloud.points)
colors = np.asarray(filtered_point_cloud.colors)
if len(colors) == 0:
colors = np.ones((len(points), 3)) # 白色
filtered_point_cloud.colors = o3d.utility.Vector3dVector(colors)
colors = np.asarray(filtered_point_cloud.colors)
# 框内红色,框外绿色
colors[indices] = [1.0, 0.0, 0.0]
mask = np.ones(len(points), dtype=bool)
mask[indices] = False
colors[mask] = [0.0, 1.0, 0.0]
# 最近点 → 黑色
reference_point = np.array([0.0, 0.0, 0.0])
obstacle_points = points[indices]
distances = np.linalg.norm(obstacle_points - reference_point, axis=1)
closest_index = np.argmin(distances)
closest_point = obstacle_points[closest_index]
min_distance = distances[closest_index]
global_closest_index = indices[closest_index]
colors[global_closest_index] = [0.0, 0.0, 0.0]
# 更新颜色
filtered_point_cloud.colors = o3d.utility.Vector3dVector(colors)
# ✅ 可视化
if visualize:
o3d.visualization.draw_geometries([filtered_point_cloud, box])
# TODO: 返回一个像素点和距离
return {
'position': closest_point.tolist(),
'distance': float(min_distance)
}, filtered_point_cloud